text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# -*- coding: utf8 -*-
#
# Copyright 2011 Kyrre Ness Sjøbæk
# This file is part of AcdOpti.
#
# AcdOpti is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# AcdOpti is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with AcdOpti. If not, see <http://www.gnu.org/licenses/>.
import pygtk
pygtk.require('2.0')
import gtk
import os
from InfoFrameComponent import InfoFrameComponent
from acdOpti.AcdOptiExceptions import AcdOptiException_cubitTemplateFile_CUBITerror,\
AcdOptiException_runConfig_createFail,\
AcdOptiException_meshInstance_generateFail
from RunConfig import RunConfig
import acdOpti.AcdOptiCommandWrapper as AcdOptiCommandWrapper
from acdOpti.AcdOptiSettings import AcdOptiSettings
class MeshInstance(InfoFrameComponent):
meshInstance = None
__topLabels = None
__labelCollection = None
__entryCollection = None
__checkCollection = None
__tableWidget = None
__scrolledWindow = None
__meshTemplateNameLabel = None
__meshBadIndicator = None
__clearLockdownButton = None
__cloneButton = None
__runConfigButton = None
__exportButton = None
__paraviewButton = None
__generateButton = None
def __init__(self,frameManager,meshInstance):
print "MeshInstance::__init__()"
InfoFrameComponent.__init__(self, frameManager)
self.meshInstance = meshInstance
#Create GUI
self.__topLabels = []
tlab = gtk.Label("Tag name")
self.__topLabels.append(tlab)
tlab = gtk.Label("Value")
self.__topLabels.append(tlab)
tlab = gtk.Label("Use default")
self.__topLabels.append(tlab)
self.__meshTemplateNameLabel = gtk.Label("Name of mesh template: \"" + self.meshInstance.meshTemplate.instName + "\"")
self.__meshBadIndicator = gtk.Label("Mesh bad (ISOTEs): " + str(self.meshInstance.meshBad))
self.__clearLockdownButton = gtk.Button(label="Clear lockdown")
self.__clearLockdownButton.connect("clicked", self.event_button_clearLockdown, None)
self.__cloneButton = gtk.Button(label="Clone this mesh instance (deep copy)")
self.__cloneButton.connect("clicked", self.event_button_clone, None)
self.__runConfigButton = gtk.Button(label="Attach a runconfig...")
self.__runConfigButton.connect("clicked", self.event_button_runConfig, None)
self.__exportButton = gtk.Button(label="Export CUBIT journal to file...")
self.__exportButton.connect("clicked", self.event_button_export, None)
self.__paraviewButton = gtk.Button(label="Run ParaView...")
self.__paraviewButton.connect("clicked", self.event_button_paraview)
self.__generateButton = gtk.Button(label="Run CUBIT to generate mesh")
self.__generateButton.connect("clicked", self.event_button_generate, None)
self.updateTable()
self.__scrolledWindow = gtk.ScrolledWindow()
self.__scrolledWindow.set_policy(gtk.POLICY_NEVER,gtk.POLICY_AUTOMATIC)
self.__scrolledWindow.add_with_viewport(self.__tableWidget)
self.__scrolledWindow.set_shadow_type(gtk.SHADOW_NONE)
self.baseWidget = gtk.VBox()
self.baseWidget.pack_start(self.__meshTemplateNameLabel, expand=False)
self.baseWidget.pack_start(self.__meshBadIndicator, expand=False)
self.baseWidget.pack_start(self.__scrolledWindow, expand=True)
self.baseWidget.pack_start(self.__clearLockdownButton, expand=False)
self.baseWidget.pack_start(self.__cloneButton, expand=False)
self.baseWidget.pack_start(self.__runConfigButton, expand=False)
self.baseWidget.pack_start(self.__exportButton, expand=False)
self.baseWidget.pack_start(self.__paraviewButton, expand=False)
self.baseWidget.pack_start(self.__generateButton, expand=False)
self.baseWidget.show_all()
def updateTable(self):
"""
Fills the __tableWidget
"""
print "MeshInstance::updateTable()"
numEntries = self.meshInstance.meshTemplate.paramDefaults_len()
lockdown = self.meshInstance.lockdown
#Initialize __tableWidget
if not self.__tableWidget:
self.__tableWidget=gtk.Table(numEntries+1, 3, False)
self.__tableWidget.set_row_spacings(3)
self.__tableWidget.set_col_spacings(3)
self.__tableWidget.attach(self.__topLabels[0],
0,1,0,1,
xoptions=gtk.FILL,yoptions=gtk.FILL)
self.__tableWidget.attach(self.__topLabels[1],
1,2,0,1,
xoptions=gtk.FILL|gtk.EXPAND,yoptions=gtk.FILL)
self.__tableWidget.attach(self.__topLabels[2],
2,3,0,1,
xoptions=gtk.FILL,yoptions=gtk.FILL)
self.__labelCollection = {}
self.__entryCollection = {}
self.__checkCollection = {}
else:
#Clear anything that might be there from before
for k in self.meshInstance.meshTemplate.paramDefaults_getKeys():
self.__tableWidget.remove(self.__labelCollection[k])
self.__tableWidget.remove(self.__entryCollection[k])
self.__tableWidget.remove(self.__checkCollection[k])
self.__labelCollection.clear()
self.__entryCollection.clear()
self.__checkCollection.clear()
#Create and attach the table entries
for (k,i) in zip(sorted(self.meshInstance.meshTemplate.paramDefaults_getKeys()),
xrange(numEntries)):
self.__labelCollection[k]=lab=gtk.Label(k)
self.__tableWidget.attach(lab,0,1,i+1,i+2, xoptions=gtk.FILL, yoptions=gtk.FILL)
self.__entryCollection[k]=ent=gtk.Entry()
if k in self.meshInstance.templateOverrides_getKeys():
ent.set_text(self.meshInstance.templateOverrides_get(k))
if lockdown:
ent.set_sensitive(False)
else:
ent.set_text(self.meshInstance.meshTemplate.paramDefaults_get(k))
ent.set_sensitive(False)
self.__tableWidget.attach(ent,1,2,i+1,i+2, xoptions=gtk.FILL|gtk.EXPAND, yoptions=gtk.FILL)
self.__checkCollection[k]=check=gtk.CheckButton()
if k in self.meshInstance.templateOverrides_getKeys():
check.set_active(False)
else:
check.set_active(True)
if lockdown:
check.set_sensitive(False)
check.connect("toggled", self.event_check_toggled, k) #Toggle first, then message handler
self.__tableWidget.attach(check,2,3,i+1,i+2, xoptions=gtk.FILL, yoptions=gtk.FILL)
self.__tableWidget.show_all()
#Update the meshBad label
self.__meshBadIndicator.set_text("Mesh bad (ISOTEs): " + str(self.meshInstance.meshBad))
#Update the lockdown button
if lockdown:
self.__clearLockdownButton.set_sensitive(True)
self.__generateButton.set_sensitive(False)
else:
self.__clearLockdownButton.set_sensitive(False)
self.__generateButton.set_sensitive(True)
self.frameManager.mainWindow.updateProjectExplorer()
def updateMeshInstance(self):
"""
Copies information from the on-screen form into the geomInstance.
Does NOT ask the meshInstance to write itself to file.
If the meshInstance is in lockdown, do nothing.
"""
print "MeshInstance::updateMeshInstance()"
if self.meshInstance.lockdown:
return
for k in self.meshInstance.templateOverrides_getKeys():
self.meshInstance.templateOverrides_insert(k, self.__entryCollection[k].get_text())
def event_delete(self):
print "MeshInstance::event_delete()"
#Save to the meshInstance
self.updateMeshInstance()
#Ask the meshInstance to write itself to disk
self.meshInstance.write()
def event_button_export(self,widget,data=None):
print "MeshInstance::event_button_export()"
self.updateMeshInstance()
(journal, extraKeys) = self.meshInstance.generateCubitJou()
#Check for extra keys
if len(extraKeys):
dia = gtk.Dialog("Extra keys in template", self.getBaseWindow(),
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_NO, gtk.RESPONSE_NO,
gtk.STOCK_YES, gtk.RESPONSE_YES))
dia.set_default_response(gtk.RESPONSE_YES)
dia.vbox.pack_start(gtk.image_new_from_stock(
gtk.STOCK_DIALOG_QUESTION,
gtk.ICON_SIZE_DIALOG))
dia.vbox.pack_start(gtk.Label("Extra keys found in template, continue?\n" + str(extraKeys) ))
dia.show_all()
response = dia.run()
dia.destroy()
if not response == gtk.RESPONSE_YES:
#Stop now
return
#Ask where to save
chooser = gtk.FileChooserDialog(title="Export file",
parent=self.getBaseWindow(),
action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN,gtk.RESPONSE_OK))
chooser.set_default_response(gtk.RESPONSE_OK)
filter = gtk.FileFilter()
filter.set_name("CUBIT journal file .jou")
filter.add_mime_type("text/plain")
filter.add_pattern("*.jou")
chooser.add_filter(filter)
response = chooser.run()
if response == gtk.RESPONSE_OK:
fname = chooser.get_filename()
if not fname.endswith(".jou"):
fname += ".jou"
if os.path.isfile(fname):
dia = gtk.Dialog("File already exists", chooser,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_NO, gtk.RESPONSE_NO,
gtk.STOCK_YES, gtk.RESPONSE_YES))
dia.set_default_response(gtk.RESPONSE_YES)
dia.vbox.pack_start(gtk.image_new_from_stock\
(gtk.STOCK_DIALOG_QUESTION,
gtk.ICON_SIZE_DIALOG))
dia.vbox.pack_start(gtk.Label("File already exists, overwrite?"))
dia.show_all()
response2 = dia.run()
dia.destroy()
if not response2 == gtk.RESPONSE_YES:
#Stop now!
print "MeshInstance::event_button_export()::AbortOverwrite"
chooser.destroy() #I'm to lazy to implement a proper event loop
return
#File name free OR user clicked YES to overwrite
chooser.destroy()
print "MeshInstance::event_button_export()::write"
ofile = open(fname,'w')
ofile.write(journal)
ofile.close()
else:
chooser.destroy()
def event_button_generate(self,widget,data=None):
print "MeshInstance::event_button_generate()"
self.updateMeshInstance()
try:
self.meshInstance.generateMesh()
except AcdOptiException_cubitTemplateFile_CUBITerror as e:
self.makePing()
md = gtk.MessageDialog(self.getBaseWindow(),
gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_ERROR,
gtk.BUTTONS_CLOSE, "Error during execution of CUBIT script, offending command:\n" + str(e.args[2]))
md.run()
md.destroy()
except AcdOptiException_meshInstance_generateFail as e:
self.makePing()
md = gtk.MessageDialog(self.getBaseWindow(),
gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_ERROR,
gtk.BUTTONS_CLOSE, "There was a problem generating the mesh:\n" + str(e.args[0]))
md.run()
md.destroy()
self.updateTable()
self.makePing()
def event_check_toggled(self, widget, data):
print "MeshInstance::event_check_toggled(), data =", data
if widget.get_active():
#Checked
dia = gtk.Dialog("Entry unchecked", self.getBaseWindow(),
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_NO, gtk.RESPONSE_NO,
gtk.STOCK_YES, gtk.RESPONSE_YES))
dia.set_default_response(gtk.RESPONSE_YES)
dia.vbox.pack_start(gtk.image_new_from_stock(
gtk.STOCK_DIALOG_QUESTION,
gtk.ICON_SIZE_DIALOG))
dia.vbox.pack_start(gtk.Label("Delete override \"" + data + "\" ?"))
dia.show_all()
response = dia.run()
if response == gtk.RESPONSE_YES:
#Delete
dia.destroy()
self.meshInstance.templateOverrides_del(data)
self.__entryCollection[data].set_sensitive(False)
self.__entryCollection[data].set_text(self.meshInstance.meshTemplate.paramDefaults_get(data))
else:
#Abort
dia.destroy()
self.__checkCollection[data].set_active(False)
else:
#Unchecked
self.meshInstance.templateOverrides_insert(data, self.meshInstance.meshTemplate.paramDefaults_get(data))
self.__entryCollection[data].set_sensitive(True)
def event_button_clearLockdown(self, widget, data=None):
print "MeshInstance::event_button_clearLockdown()"
self.meshInstance.clearLockdown()
self.updateTable()
self.frameManager.mainWindow.updateProjectExplorer()
def event_button_runConfig(self, widget,data=None):
print "MeshInstance::event_button_runConfig()"
name = ""
while True:
dia = gtk.Dialog("Please enter name of new runconfig:", self.getBaseWindow(),
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK))
dia.set_default_response(gtk.RESPONSE_OK)
nameBox = gtk.Entry()
nameBox.set_text(name)
nameBox.show()
dia.vbox.pack_start(nameBox)
dia.show_all()
response = dia.run()
name = nameBox.get_text()
dia.destroy()
if response == gtk.RESPONSE_OK:
#Check for whitespace
print "got: \"" + name + "\""
if " " in name:
mDia = gtk.MessageDialog(self.getBaseWindow(),
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_OK,
"Name cannot contain whitespace")
mDia.run()
mDia.destroy()
#OK, try to add it...
else:
if name in self.meshInstance.runConfigs:
mDia = gtk.MessageDialog(self.getBaseWindow(),
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_OK,
"Name already in use")
mDia.run()
mDia.destroy()
continue
else:
self.meshInstance.addRunConfig(name, "Hopper", "omega3P")
break #Done!
#Response cancel or close
else:
break
# END if response...
# END while True
self.updateTable()
self.frameManager.mainWindow.updateProjectExplorer()
def event_button_paraview(self, widget,data=None):
print "MeshInstance::event_button_paraview()"
paraViewPath = AcdOptiSettings().getSetting("paraviewpath")
AcdOptiCommandWrapper.runProgramInFolder(paraViewPath, self.meshInstance.folder)
def event_button_clone(self, widget, data=None):
print "MeshInstance::event_button_clone()"
#Ask for the new geomInstance name
dia = gtk.Dialog("Please enter name of new mesh instance:", self.getBaseWindow(),
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK))
dia.set_default_response(gtk.RESPONSE_OK)
nameBox = gtk.Entry()
nameBox.set_text(self.meshInstance.instName + "_clone")
dia.vbox.pack_start(nameBox)
dia.show_all()
response = dia.run()
cloneName = nameBox.get_text()
dia.destroy()
if response == gtk.RESPONSE_OK:
#Check for whitespace
if " " in cloneName:
mDia = gtk.MessageDialog(self.getBaseWindow(),
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_OK,
"Name cannot contain whitespace")
mDia.run()
mDia.destroy()
elif cloneName == "":
mDia = gtk.MessageDialog(self.getBaseWindow(),
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_OK,
"Name cannot be empty")
mDia.run()
mDia.destroy()
elif cloneName in self.meshInstance.geometryInstance.meshInsts:
mDia = gtk.MessageDialog(self.getBaseWindow(),
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_OK,
"Name already in use")
mDia.run()
mDia.destroy()
#Everything OK: Try to attach the MeshInstance!
else:
#self.geomInstance.template.cloneGeomInstance(self.geomInstance.instName, cloneName)
self.meshInstance.geometryInstance.cloneMeshInstance(self.meshInstance,cloneName)
self.frameManager.mainWindow.updateProjectExplorer()
|
kyrsjo/AcdOpti
|
src/acdOptiGui/infoFrames/MeshInstance.py
|
Python
|
gpl-3.0
| 20,096
|
[
"ParaView"
] |
9a0274a85ae645989e8fa54bae5aedb9fbecdb9be2928f9bb797590823dfa761
|
#! /usr/bin/env python2.7
#Opens VMD with overlayed fields
import os
import numpy as np
import csv
import sys
import shutil
import glob
from .mdpostproc import MD_PostProc
from .mdfields import (MD_mField, MD_vField, MD_TField,
MD_momField, MD_dField)
from .headerdata import MDHeaderData
from .writecolormap import WriteColorMap
sys.path.insert(0,'../')
from misclib import Chdir
from .vmd_reformat import VmdReformat
class VMDFields:
"""
Class to create reformat mass, momentum, temperature, etc
fields and run VMD with molecules coloured by these fields
"""
def __init__(self, fieldobj, fdir=None, scriptdir=None):
if fdir == None:
self.fdir = fieldobj.fdir
else:
self.fdir = os.path.join(fdir, '')
if scriptdir == None:
self.scriptdir = os.path.dirname(os.path.realpath(__file__))+"/"
else:
self.scriptdir = scriptdir
self.fieldobj = fieldobj
self.pwd = os.path.join(os.path.dirname(__file__))
self.vmdfile = 'vmd_out.dcd'
#Read Header
self.header = fieldobj.Raw.header #MDHeaderData(fdir)
#Check simulation progress file to check if run
#is still going or has finished prematurely (crashed)
with open(self.fdir+'simulation_progress', 'r') as f:
simulation_time = f.readline()
for i in dir(self.header):
print(i, i.find('initialstep') == 0)
if (i.find('Nsteps') == 0):
self.Nsteps = int(vars(self.header)[i])
if (i.find('initialstep') == 0):
self.initialstep = int(vars(self.header)[i])
if (int(simulation_time)-self.initialstep < self.Nsteps):
self.Nsteps = int(simulation_time)
self.finished = False
else:
self.finished = True
#Get vmd skip
vmd_skip_found = False
for i in dir(self.header):
if (i.find('vmd_skip') == 0):
self.vmd_skip = int(vars(self.header)[i])
vmd_skip_found = True
if (not vmd_skip_found):
self.vmd_skip = 1
#Get VMD intervals from header
self.starts = []; self.ends = []
for i in dir(self.header):
if (i.find('vmd_start') == 0):
start = int(vars(self.header)[i])
if (start < int(simulation_time)-self.initialstep):
self.starts.append(start)
if (i.find('vmd_end') == 0):
end = int(vars(self.header)[i])
if (end < float(simulation_time)-self.initialstep):
self.ends.append(end)
#If part way though an interval, set maximum to last iteration run
if (len(self.starts) > len(self.ends)):
self.ends.append(int(simulation_time)-self.initialstep)
#Shift by initialrecord
#Get averaging time per record
self.Nave = str(self.fieldobj.plotfreq)
def copy_tclfiles(self):
"""
Create VMD vol_data folder
"""
self.vmd_dir = self.fdir + '/vmd/'
self.vol_dir = self.vmd_dir + '/vol_data/'
if not os.path.exists(self.vol_dir):
os.makedirs(self.vol_dir)
def listdir_nohidden(path):
return glob.glob(os.path.join(path, '*'))
#Copy tcl scripts to vmd folder
self.vmdtcl = self.scriptdir + '/vmd_tcl/'
if listdir_nohidden(self.vmdtcl) == []:
sys.exit("Error in copy_tclfiles -- Directory "
+ self.vmdtcl +
" is empty or not found ")
for filepath in listdir_nohidden(self.vmdtcl):
filename = filepath.split('/')[-1]
print(filepath, self.vmd_dir+ '/' +filename)
shutil.copyfile(filepath, self.vmd_dir+ '/' +filename )
def reformat(self):
# If simulation has not finish and temp is newer than out
# call reformat to update vmd_out.dcd
reformat = False
if not self.finished:
if (os.path.isfile(self.fdir+self.vmdfile)):
filetime = os.path.getmtime(self.fdir+self.vmdfile)
else:
filetime = 0.
if (os.path.isfile(self.fdir+self.vmdfile.replace('out','temp'))):
temptime = os.path.getmtime(self.fdir+self.vmdfile.replace('out','temp'))
else:
temptime = 0.
if temptime > filetime:
print('Attempting to reformat vmd_out.dcd from vmd_temp.dcd')
reformat = True
else:
if not os.path.isfile(self.fdir+self.vmdfile):
print(self.fdir+self.vmdfile)
print('Run has finished but vmd_out.dcd is missing')
sys.exit(1)
if reformat:
self.reformat_vmdtemp()
def write_vmd_header(self):
#Write VMD intervals
print('Writing VMD Header')
with open(self.vol_dir + '/vmd_header','w+') as f:
f.write(self.header.tplot + '\n')
f.write(self.header.delta_t + '\n')
f.write(self.Nave + '\n')
f.write(str(self.vmd_skip) + '\n')
def write_vmd_intervals(self):
#Write VMD intervals
print('Writing VMD intervals data')
self.starts.sort(); self.ends.sort()
self.vmdintervals = zip(self.starts, self.ends)
with open(self.vol_dir + '/vmd_intervals','w+') as f:
for i in self.vmdintervals:
f.write(str(i[0]) + '\n' + str(i[1]) + '\n')
#Write range of dx files based on VMD intervals
def write_dx_range(self, component=0, clims=None):
#Clean previous files
outdir = self.fdir+"./vmd/vol_data/"
filelist = [ f for f in os.listdir(outdir + ".") if f.endswith(".dx") ]
for f in filelist:
os.remove(outdir+f)
#Write range of files in all intervals
print('Writing dx files intervals data',self.vmdintervals)
clims_array = []
for i in self.vmdintervals:
fieldrecstart = i[0]/(int(self.header.tplot)*int(self.Nave))
fieldrecend = i[1]/(int(self.header.tplot)*int(self.Nave))
if (fieldrecend > self.fieldobj.maxrec):
fieldrecend = self.fieldobj.maxrec
#If limits are not specified, store time history for all intervals and average
if (clims == None):
clims_array.append(self.fieldobj.write_dx_file(fieldrecstart,
fieldrecend,
component=component, norm=True))
elif (len(clims) != 2):
quit("Error in write_dx_range - clims should be tuple length 2 of form (cmin,cmax)")
else:
dummy = self.fieldobj.write_dx_file(fieldrecstart,
fieldrecend,
component=component)
#Write maximum and minimum values for colourbar
if (clims == None):
clims = np.max(clims_array,axis=0)
#clims[1] = np.min(clims_array,axis=1)
with open(self.vol_dir + '/colour_range','w+') as f:
f.write(str(clims[0]) + '\n' + str(clims[1]) + '\n')
def writecolormap(self,cmap='RdYlBu_r'):
cmap_writer = WriteColorMap(cmap,1024)
cmap_writer.write(self.vol_dir)
def reformat_vmdtemp(self):
"""
If run has not finished, attempt to build and
run fortran code to reorder temp files to a
useful form
"""
print("Attempting to reformat " + self.vmdfile.replace('out','temp') +
" in " + self.fdir + " to " + self.vmdfile )
VMDreformobj = VmdReformat(os.path.abspath(self.fdir)+'/',
fname=self.vmdfile.replace('out','temp'),
scriptdir=self.scriptdir)
VMDreformobj.reformat()
if __name__ == "__main__":
fdir='../../MD_dCSE/src_code/results/'
fieldtypes = {'mbins','vbins','Tbins',
'density','momentum','CV_config',
'CV_kinetic','CV_total'}
ppObj = MD_PostProc(fdir)
if(len(sys.argv) == 1):
print("No field type specified, options include: "
+ str(fieldtypes) + " Setting default vbins")
objtype = 'vbins'
component = 0
elif(sys.argv[1] in ['--help', '-help', '-h']):
print("Available field types include")
print(ppObj)
sys.exit()
else:
objtype = sys.argv[1]
if(len(sys.argv) == 2):
print("No components direction specified, setting default = 0")
component = 0
else:
component = sys.argv[2]
try:
fobj = ppObj.plotlist[objtype]
except KeyError:
print("Field not recognised == available field types include")
print(ppObj)
sys.exit()
except:
raise
vmdobj = VMDFields(fobj,fdir)
vmdobj.reformat()
vmdobj.write_vmd_header()
vmdobj.write_vmd_intervals()
vmdobj.write_dx_range(component=component)
vmdobj.writecolormap('RdYlBu')
with Chdir(fdir + './vmd/'):
print(fdir)
command = "vmd -e " + "./plot_MD_field.vmd"
os.system(command)
|
edwardsmith999/pyDataView
|
postproclib/vmdfields.py
|
Python
|
gpl-3.0
| 9,470
|
[
"VMD"
] |
d10d50814daca1a60ef453f3230fe9d4b2bf76df78a94aec4ebc97ed1f39a1fa
|
# coding: utf-8
'''
------------------------------------------------------------------------------
Copyright 2016 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------
Name: NAMDownload.py
Description: Downloads the most up to date data from the NOAA site by getting the present date.
Script works as follow;
Gets the present time date in UTC
Uses the OPeNDAP to NetCDF tool from Multidimension Supplimental Tool
Downloads the specified variables into NetCDF format files and saves them in the relative location, based on where the script file is located.
The present data is removed from the Mosaic Dataset
The new data is then loaded into the Mosiac Dataset
History:
9/21/2015 - ab - original coding
6/10/2016 - mf - Updates for dimension and formatting
9/12/2016 - mf - fix for Python3 not liking leading zeros
'''
#Import modules
#import arceditor
import arcpy
import os
import sys
import traceback
import datetime
from arcpy import env
from datetime import datetime
from datetime import time
from datetime import timedelta
#Gets the current directory where the script is sitting so that everything else can work off relative paths.
currentFolder = os.path.dirname(__file__)
topFolder = os.path.dirname(currentFolder)
#Names of folders to be added to topFolder generated above
gdb = "Geodatabase"
NetCDFData = "NetCDFdata"
tls = "Tools"
env.workspace = os.path.join(topFolder, gdb, r"MAOWdata.gdb")
env.scratchWorkspace = env.workspace
#Declaration of variables used later
opVariables = "rh2m;tcdcclm;tmpsfc;hgtclb;vissfc;ugrd10m;vgrd10m;ugrdmwl;vgrdmwl;snodsfc;gustsfc;apcpsfc"
windVariables = "ugrd10m;vgrd10m"
geoExtent = "-126 32 -114 43"
timeDimension = "time '2016-01-01 00:00:00' '2016-12-31 00:00:00'"
# Processing flags
REMOVE_EXISTING_RASTERS = True
DEBUG = True # Extra messaging while debugging
def makeOutputFilePath(topFolder, NetCDFData, stringDateNow, paramFN):
'''Set output file paths for op weather and wind'''
opDataFileName = "nam%s%s.nc" % (stringDateNow, paramFN)
outputOpDataFile = os.path.join(topFolder, NetCDFData, opDataFileName)
windDataFileName = "nam%s%sWind.nc" % (stringDateNow, paramFN)
outputWindDataFile = os.path.join(topFolder, NetCDFData, windDataFileName)
return [outputOpDataFile, outputWindDataFile]
def makeSourceURLPath(stringDateNow, paramDL):
'''make the URL to the source forecast data'''
return r"http://nomads.ncep.noaa.gov/dods/nam/nam%s/nam%s" % (stringDateNow, paramDL)
def download(stringDateNow, stringTimeNow, paramFN, paramDL):
'''Download NetCDF data and add to mosaic dataset'''
if DEBUG: print ("datetime to use: %s, %s" % (stringDateNow, stringTimeNow))
#Import required Multidimensional tools
tbxMST = os.path.join(topFolder, tls, r"MultidimensionSupplementalTools\Multidimension Supplemental Tools.pyt")
if DEBUG: print ("Importing %s" % tbxMST)
arcpy.ImportToolbox(tbxMST)
# Get target NetCDF data file names
outputOpDataFile, outputWindDataFile = makeOutputFilePath(topFolder, NetCDFData, stringDateNow, paramFN)
if os.path.exists(outputOpDataFile):
print("removing existing %s" % outputOpDataFile)
os.remove(outputOpDataFile)
if os.path.exists(outputWindDataFile):
print("removing existing %s" % outputWindDataFile)
os.remove(outputWindDataFile)
# Get source URL path
in_url = makeSourceURLPath(stringDateNow, paramDL)
#Run OPeNDAP to NetCDF tool
if DEBUG:
print("in_url: %s" % in_url)
print("variable: %s" % opVariables)
print("dimension: %s" % timeDimension )
print ("OPeNDAP Tool run for Operational Weather variables...")
arcpy.OPeNDAPtoNetCDF_mds(in_url, opVariables, outputOpDataFile, geoExtent, timeDimension, "BY_VALUE")
#Run OPeNDAP to NetCDF tool
print ("OPeNDAP Tool run for Wind variables...")
arcpy.OPeNDAPtoNetCDF_mds(in_url, windVariables, outputWindDataFile, geoExtent, timeDimension, "BY_VALUE")
targetOpDataMosaic = os.path.join(topFolder, gdb, r"OperationalWeather.gdb\OperationalData")
targetWindDataMosaic = os.path.join(topFolder, gdb, r"OperationalWeather.gdb\OperationalWind")
# Remove Rasters From Mosaic Dataset
if REMOVE_EXISTING_RASTERS:
print ("Removing existing rasters from Operational Weather...")
arcpy.RemoveRastersFromMosaicDataset_management(targetOpDataMosaic, "OBJECTID >=0", "NO_BOUNDARY", "NO_MARK_OVERVIEW_ITEMS",
"NO_DELETE_OVERVIEW_IMAGES", "NO_DELETE_ITEM_CACHE", "REMOVE_MOSAICDATASET_ITEMS",
"NO_CELL_SIZES")
print ("Removing existing rasters from Wind...")
arcpy.RemoveRastersFromMosaicDataset_management(targetWindDataMosaic, "OBJECTID >= 0", "UPDATE_BOUNDARY", "MARK_OVERVIEW_ITEMS",
"DELETE_OVERVIEW_IMAGES", "DELETE_ITEM_CACHE", "REMOVE_MOSAICDATASET_ITEMS",
"UPDATE_CELL_SIZES")
# Add Rasters To Mosaic Dataset
print ("Adding new rasters from Operational Weather...")
arcpy.AddRastersToMosaicDataset_management(targetOpDataMosaic, "NetCDF", outputOpDataFile, "UPDATE_CELL_SIZES", "UPDATE_BOUNDARY",
"NO_OVERVIEWS", "", "0", "1500", "", "*.nc", "SUBFOLDERS", "ALLOW_DUPLICATES",
"NO_PYRAMIDS", "NO_STATISTICS", "NO_THUMBNAILS", "", "NO_FORCE_SPATIAL_REFERENCE")
print ("Adding new rasters from Wind...")
arcpy.AddRastersToMosaicDataset_management(targetWindDataMosaic, "NetCDF", outputWindDataFile, "UPDATE_CELL_SIZES", "UPDATE_BOUNDARY",
"NO_OVERVIEWS", "", "0", "1500", "", "*.nc", "SUBFOLDERS", "ALLOW_DUPLICATES",
"NO_PYRAMIDS", "NO_STATISTICS", "NO_THUMBNAILS", "", "NO_FORCE_SPATIAL_REFERENCE")
return
def main():
'''Decide which time period to download'''
try:
now_time = time(int(datetime.utcnow().strftime("%H")), int(datetime.utcnow().strftime("%M")), int(datetime.utcnow().strftime("%S")))
print("UTC time is (now_time): %s" % now_time)
patternDate = '%Y%m%d'
patternTime = '%H:%M:%S'
stringDateNow = datetime.utcnow().strftime(patternDate)
stringTimeNow = datetime.utcnow().strftime(patternTime)
if now_time >= time(2,50,00) and now_time < time(8,50,00):
print("Going to download 1hr_00z...")
download(stringDateNow, stringTimeNow,"1hr00z", "1hr_00z")
elif now_time >= time(8,50,00) and now_time < time(14,50,00):
print("Going to download 1hr_06z...")
download(stringDateNow, stringTimeNow,"1hr06z", "1hr_06z")
elif now_time >= time(14,50,00) and now_time < time(21,00,00):
print("Going to download 1hr_12z...")
download(stringDateNow, stringTimeNow,"1hr12z", "1hr_12z")
elif (now_time >= time(21,00,00) and now_time <= time(23,59,59)):
print("Going to download 1hr_18z...")
download(stringDateNow, stringTimeNow,"1hr18z", "1hr_18z")
elif (now_time >= time(00,00,00) and now_time <= time(2,49,59)):
# Get yesterday's forecast, because today's isn't
# published yet:
stringDateNow = (datetime.utcnow() - timedelta(days=1)).strftime(patternDate)
print("Going to download 1hr_18z for %s..." % stringDateNow)
download(stringDateNow, stringTimeNow,"1hr18z", "1hr_18z")
print("Done.")
except:
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
pymsg = "ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
print(pymsg + "\n")
sys.exit(1)
# MAIN =============================================
if __name__ == "__main__":
main()
|
jfrygeo/solutions-geoprocessing-toolbox
|
suitability/toolboxes/scripts/NAMDownload.py
|
Python
|
apache-2.0
| 8,637
|
[
"NetCDF"
] |
dce6cea09f66f09a61560b61646a03ccc5a2783a2b1323bb94b98623376e0ff0
|
#!/usr/bin/env python3
import sys
import os
import re
try:
import requests
import requests_cache
except Exception as e:
print("ERROR", "- You need to install missing dependencies: pip install requests requests-cache\n")
raise
assert sys.version_info.major == 3, "the script requires Python 3"
__author__ = "Juan Miguel Cejuela (@juanmirocks)"
__help__ = """
A bit of a hack that uses the NCBI Global Alignment API to align two proteins (Needleman-Wunsch algorithm)
Also (optionally) parse out a column of the tabular output, e.g. column 2 == sequence identity percentage.
Http requests are cached; a cache file is saved into the user's home folder.
Example call: ./ncbi_global_align.py P08100 P02699 2
See: https://blast.ncbi.nlm.nih.gov/Blast.cgi?PROGRAM=blastp&BLAST_PROGRAMS=blastp&PAGE_TYPE=BlastSearch&BLAST_SPEC=GlobalAln&LINK_LOC=blasttab&LAST_PAGE=blastn&BLAST_INIT=GlobalAln
Api doc: https://ncbi.github.io/blast-cloud/dev/api.html
WARNING: this script uses options that are not documented in the API; will likely break. Tested as of: 2017-04-14
"""
# ----------------------------------------------------------------------------
CACHE_FILE_NAME = os.path.join(os.path.expanduser('~'), "TMP_CACHE_NCBI_GLOBAL_ALIGN") # home folder
requests_cache.install_cache(
cache_name=CACHE_FILE_NAME,
backend="sqlite",
expire_after=(7 * 24 * 60 * 60), # 1 week
allowable_methods=('GET', "POST"))
# ----------------------------------------------------------------------------
POST_URL = "https://blast.ncbi.nlm.nih.gov/BlastAlign.cgi?CMD=Put&PROGRAM=blastp&BLAST_SPEC=GlobalAln"
GET_URL = "https://blast.ncbi.nlm.nih.gov/Blast.cgi?CMD=Get&FORMAT_TYPE=Tabular"
def post(seq1, seq2, is_alignment_commutative=True):
params = {}
if is_alignment_commutative and seq2 < seq1:
seq1, seq2 = seq2, seq1 # Order parameters to benefit more from cache
params["QUERY"] = seq1
params["SUBJECTS"] = seq2
response = requests.post(POST_URL, params=params)
assert response.ok, response
rid_search = re.search('RID = (\\S+)', response.text)
assert rid_search, "No RID found (job id / result id)"
rid = rid_search.group(1)
if not response.from_cache:
print("Called NCBI API -- RID:", rid, params)
return rid
def get(rid, column=None):
params = {}
params["RID"] = rid
response = requests.get(GET_URL, params=params)
assert response.ok, response
real_body = ""
in_pre = False
for line in response.text.splitlines():
if line.lower() == "<pre>":
in_pre = True
elif line.lower() == "</pre>":
break
elif in_pre and not line.startswith("#"):
real_body += line
try:
assert len(real_body) > 0
if column is None:
return real_body
else:
return real_body.split("\t")[column]
except Exception as e:
raise AssertionError(("No valid response output", response.text), e)
def global_align(seq1, seq2, column=None):
rid = post(seq1, seq2)
return get(rid, column)
# ----------------------------------------------------------------------------
if __name__ == "__main__":
try:
assert len(sys.argv) in {3, 4}
column = None if len(sys.argv) == 3 else int(sys.argv[3])
ret = global_align(seq1=sys.argv[1], seq2=sys.argv[2], column=column)
print(ret)
except Exception:
print(__help__)
print()
raise
|
juanmirocks/LocText
|
loctext/util/ncbi_global_align.py
|
Python
|
apache-2.0
| 3,601
|
[
"BLAST"
] |
39a9f264a4bcc1f5e791ae76f44b9039a27ed240b85142ec834b4224c5856f01
|
"""
Some algorithms from the original skyline implementation are commented out and
the best combination of algorithms for NAB is included below.
"""
from datetime import datetime, timedelta
import numpy as np
import pandas
def tail_avg(timeseries):
"""
This is a utility function used to calculate the average of the last three
datapoints in the series as a measure, instead of just the last datapoint.
It reduces noise, but it also reduces sensitivity and increases the delay
to detection.
"""
try:
t = (timeseries[-1][1] + timeseries[-2][1] + timeseries[-3][1]) / 3
return t
except IndexError:
return timeseries[-1][1]
def median_absolute_deviation(timeseries):
"""
A timeseries is anomalous if the deviation of its latest datapoint with
respect to the median is X times larger than the median of deviations.
"""
series = pandas.Series([x[1] for x in timeseries])
median = series.median()
demedianed = np.abs(series - median)
median_deviation = demedianed.median()
# The test statistic is infinite when the median is zero,
# so it becomes super sensitive. We play it safe and skip when this happens.
if median_deviation == 0:
return False
test_statistic = demedianed.iloc[-1] / median_deviation
# Completely arbitary...triggers if the median deviation is
# 6 times bigger than the median
if test_statistic > 6:
return True
else:
return False
# The method below is excluded because it is computationally inefficient
# def grubbs(timeseries):
# """
# A timeseries is anomalous if the Z score is greater than the Grubb's
# score.
# """
# series = np.array([x[1] for x in timeseries])
# stdDev = np.std(series)
# mean = np.mean(series)
# tail_average = tail_avg(timeseries)
# z_score = (tail_average - mean) / stdDev
# len_series = len(series)
# threshold = scipy.stats.t.isf(.05 / (2 * len_series), len_series - 2)
# threshold_squared = threshold * threshold
# grubbs_score = ((len_series - 1) / np.sqrt(len_series)) * np.sqrt(
# threshold_squared / (len_series - 2 + threshold_squared))
# return z_score > grubbs_score
def first_hour_average(timeseries):
"""
Calcuate the simple average over one hour, one day ago.
A timeseries is anomalous if the average of the last three datapoints
are outside of three standard deviations of this value.
"""
day = timedelta(days=1)
hour = timedelta(hours=1)
last_hour_threshold = timeseries[-1][0] - (day - hour)
startTime = last_hour_threshold - hour
series = pandas.Series([x[1] for x in timeseries
if x[0] >= startTime
and x[0] < last_hour_threshold])
mean = (series).mean()
stdDev = (series).std()
t = tail_avg(timeseries)
return abs(t - mean) > 3 * stdDev
def stddev_from_average(timeseries):
"""
A timeseries is anomalous if the absolute value of the average of the latest
three datapoint minus the moving average is greater than three standard
deviations of the average. This does not exponentially weight the MA and so
is better for detecting anomalies with respect to the entire series.
"""
series = pandas.Series([x[1] for x in timeseries])
mean = series.mean()
stdDev = series.std()
t = tail_avg(timeseries)
return abs(t - mean) > 3 * stdDev
def stddev_from_moving_average(timeseries):
"""
A timeseries is anomalous if the absolute value of the average of the latest
three datapoint minus the moving average is greater than three standard
deviations of the moving average. This is better for finding anomalies with
respect to the short term trends.
"""
series = pandas.Series([x[1] for x in timeseries])
expAverage = series.ewm(ignore_na=False, min_periods=0, adjust=True, com=50).mean()
stdDev = series.ewm(ignore_na=False, min_periods=0, adjust=True, com=50).std(bias=False)
return abs(series.iloc[-1] - expAverage.iloc[-1]) > 3 * stdDev.iloc[-1]
def mean_subtraction_cumulation(timeseries):
"""
A timeseries is anomalous if the value of the next datapoint in the
series is farther than three standard deviations out in cumulative terms
after subtracting the mean from each data point.
"""
series = pandas.Series([x[1] if x[1] else 0 for x in timeseries])
series = series - series[0:len(series) - 1].mean()
stdDev = series[0:len(series) - 1].std()
return abs(series.iloc[-1]) > 3 * stdDev
def least_squares(timeseries):
"""
A timeseries is anomalous if the average of the last three datapoints
on a projected least squares model is greater than three sigma.
"""
x = np.array(
[(t[0] - datetime(1970, 1, 1)).total_seconds() for t in timeseries])
y = np.array([t[1] for t in timeseries])
A = np.vstack([x, np.ones(len(x))]).T
results = np.linalg.lstsq(A, y)
residual = results[1]
m, c = np.linalg.lstsq(A, y)[0]
errors = []
for i, value in enumerate(y):
projected = m * x[i] + c
error = value - projected
errors.append(error)
if len(errors) < 3:
return False
std_dev = np.std(errors)
t = (errors[-1] + errors[-2] + errors[-3]) / 3
return abs(t) > std_dev * 3 and round(std_dev) != 0 and round(t) != 0
def histogram_bins(timeseries):
"""
A timeseries is anomalous if the average of the last three datapoints falls
into a histogram bin with less than 20 other datapoints (you'll need to tweak
that number depending on your data)
Returns: the size of the bin which contains the tail_avg. Smaller bin size
means more anomalous.
"""
series = np.array([x[1] for x in timeseries])
t = tail_avg(timeseries)
h = np.histogram(series, bins=15)
bins = h[1]
for index, bin_size in enumerate(h[0]):
if bin_size <= 20:
# Is it in the first bin?
if index == 0:
if t <= bins[0]:
return True
# Is it in the current bin?
elif t >= bins[index] and t < bins[index + 1]:
return True
return False
# The method below is excluded because it is computationally inefficient
# def ks_test(timeseries):
# """
# A timeseries is anomalous if 2 sample Kolmogorov-Smirnov test indicates
# that data distribution for last 10 minutes is different from last hour.
# It produces false positives on non-stationary series so Augmented
# Dickey-Fuller test applied to check for stationarity.
# """
# hour_ago = time() - 3600
# ten_minutes_ago = time() - 600
# reference = scipy.array(
# [x[1] for x in timeseries if x[0] >= hour_ago and x[0] < ten_minutes_ago])
# probe = scipy.array([x[1] for x in timeseries if x[0] >= ten_minutes_ago])
# if reference.size < 20 or probe.size < 20:
# return False
# ks_d, ks_p_value = scipy.stats.ks_2samp(reference, probe)
# if ks_p_value < 0.05 and ks_d > 0.5:
# adf = sm.tsa.stattools.adfuller(reference, 10)
# if adf[1] < 0.05:
# return True
# return False
# The method below is excluded because it has no effect on the final skyline
# scores for NAB
# def is_anomalously_anomalous(metric_name, ensemble, datapoint):
# """
# This method runs a meta-analysis on the metric to determine whether the
# metric has a past history of triggering.
# TODO: weight intervals based on datapoint
# """
# # We want the datapoint to avoid triggering twice on the same data
# new_trigger = [time(), datapoint]
# # Get the old history
# raw_trigger_history = redis_conn.get("trigger_history." + metric_name)
# if not raw_trigger_history:
# redis_conn.set("trigger_history." + metric_name, packb(
# [(time(), datapoint)]))
# return True
# trigger_history = unpackb(raw_trigger_history)
# # Are we (probably) triggering on the same data?
# if (new_trigger[1] == trigger_history[-1][1] and
# new_trigger[0] - trigger_history[-1][0] <= 300):
# return False
# # Update the history
# trigger_history.append(new_trigger)
# redis_conn.set("trigger_history." + metric_name, packb(trigger_history))
# # Should we surface the anomaly?
# trigger_times = [x[0] for x in trigger_history]
# intervals = [
# trigger_times[i + 1] - trigger_times[i]
# for i, v in enumerate(trigger_times)
# if (i + 1) < len(trigger_times)
# ]
# series = pandas.Series(intervals)
# mean = series.mean()
# stdDev = series.std()
# return abs(intervals[-1] - mean) > 3 * stdDev
# def run_selected_algorithm(timeseries, metric_name):
# """
# Filter timeseries and run selected algorithm.
# """
# # Get rid of short series
# if len(timeseries) < MIN_TOLERABLE_LENGTH:
# raise TooShort()
# # Get rid of stale series
# if time() - timeseries[-1][0] > STALE_PERIOD:
# raise Stale()
# # Get rid of boring series
# if len(
# set(
# item[1] for item in timeseries[
# -MAX_TOLERABLE_BOREDOM:])) == BOREDOM_SET_SIZE:
# raise Boring()
# try:
# ensemble = [globals()[algorithm](timeseries) for algorithm in ALGORITHMS]
# threshold = len(ensemble) - CONSENSUS
# if ensemble.count(False) <= threshold:
# if ENABLE_SECOND_ORDER:
# if is_anomalously_anomalous(metric_name, ensemble, timeseries[-1][1]):
# return True, ensemble, timeseries[-1][1]
# else:
# return True, ensemble, timeseries[-1][1]
# return False, ensemble, timeseries[-1][1]
# except:
# logging.error("Algorithm error: " + traceback.format_exc())
# return False, [], 1
|
rhyolight/NAB
|
nab/detectors/skyline/algorithms.py
|
Python
|
agpl-3.0
| 9,540
|
[
"ADF"
] |
976ef016b2439c35ac1b13a073b4d4cb657c71c547283a39f659e432fe8c8237
|
#!/usr/bin/env python
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module implementing the command line entry point for executing tests.
This module can be executed from the command line using the following
approaches::
python -m robot.run
python path/to/robot/run.py
Instead of ``python`` it is possible to use also other Python interpreters.
This module is also used by the installed ``robot`` start-up script.
This module also provides :func:`run` and :func:`run_cli` functions
that can be used programmatically. Other code is for internal usage.
"""
import sys
# Allows running as a script. __name__ check needed with multiprocessing:
# https://github.com/robotframework/robotframework/issues/1137
if 'robot' not in sys.modules and __name__ == '__main__':
import pythonpathsetter
from robot.conf import RobotSettings
from robot.model import ModelModifier
from robot.output import LOGGER, pyloggingconf
from robot.reporting import ResultWriter
from robot.running import TestSuiteBuilder
from robot.utils import Application, unic
USAGE = """Robot Framework -- A generic test automation framework
Version: <VERSION>
Usage: robot [options] data_sources
or: python -m robot [options] data_sources
or: python path/to/robot [options] data_sources
or: java -jar robotframework.jar [options] data_sources
Robot Framework is a Python-based keyword-driven test automation framework for
acceptance level testing and acceptance test-driven development (ATDD). It has
an easy-to-use tabular syntax for creating test cases and its testing
capabilities can be extended by test libraries implemented either with Python
or Java. Users can also create new higher level keywords from existing ones
using the same simple syntax that is used for creating test cases.
The easiest way to execute tests is using the `robot` script created as part
of the normal installation. Alternatively it is possible to execute the `robot`
module directly using `python -m robot`, where `python` can be replaced with
any supported Python interpreter like `jython`, `ipy` or `python3`. Yet another
alternative is running the `robot` directory like `python path/to/robot`.
Finally, there is a standalone JAR distribution available.
Data sources given to Robot Framework are either test case files or directories
containing them and/or other directories. Single test case file creates a test
suite containing all the test cases in it and a directory containing test case
files creates a higher level test suite with test case files or other
directories as sub test suites. If multiple data sources are given, a virtual
top level suite containing suites generated from given data sources is created.
By default Robot Framework creates an XML output file and a log and a report in
HTML format, but this can be configured using various options listed below.
Outputs in HTML format are for human consumption and XML output for integration
with other systems. XML outputs can also be combined and otherwise further
processed with Rebot tool. Run `rebot --help` for more information.
Robot Framework is open source software released under Apache License 2.0.
For more information about the framework and the rich ecosystem around it
see http://robotframework.org/.
Options
=======
-N --name name Set the name of the top level test suite. Underscores
in the name are converted to spaces. Default name is
created from the name of the executed data source.
-D --doc documentation Set the documentation of the top level test suite.
Underscores in the documentation are converted to
spaces and it may also contain simple HTML formatting
(e.g. *bold* and http://url/).
-M --metadata name:value * Set metadata of the top level suite. Underscores
in the name and value are converted to spaces. Value
can contain same HTML formatting as --doc.
Example: --metadata version:1.2
-G --settag tag * Sets given tag(s) to all executed test cases.
-t --test name * Select test cases to run by name or long name. Name
is case and space insensitive and it can also be a
simple pattern where `*` matches anything and `?`
matches any char. If using `*` and `?` in the console
is problematic see --escape and --argumentfile.
-s --suite name * Select test suites to run by name. When this option
is used with --test, --include or --exclude, only
test cases in matching suites and also matching other
filtering criteria are selected. Name can be a simple
pattern similarly as with --test and it can contain
parent name separated with a dot. For example
`-s X.Y` selects suite `Y` only if its parent is `X`.
-i --include tag * Select test cases to run by tag. Similarly as name
with --test, tag is case and space insensitive and it
is possible to use patterns with `*` and `?` as
wildcards. Tags and patterns can also be combined
together with `AND`, `OR`, and `NOT` operators.
Examples: --include foo --include bar*
--include fooANDbar*
-e --exclude tag * Select test cases not to run by tag. These tests are
not run even if included with --include. Tags are
matched using the rules explained with --include.
-R --rerunfailed output Select failed tests from an earlier output file to be
re-executed. Equivalent to selecting same tests
individually using --test option.
-c --critical tag * Tests having given tag are considered critical. If no
critical tags are set, all tags are critical. Tags
can be given as a pattern like with --include.
-n --noncritical tag * Tests with given tag are not critical even if they
have a tag set with --critical. Tag can be a pattern.
-v --variable name:value * Set variables in the test data. Only scalar
variables with string value are supported and name is
given without `${}`. See --escape for how to use
special characters and --variablefile for a more
powerful variable setting mechanism.
Examples:
--variable str:Hello => ${str} = `Hello`
-v hi:Hi_World -E space:_ => ${hi} = `Hi World`
-v x: -v y:42 => ${x} = ``, ${y} = `42`
-V --variablefile path * Python or YAML file file to read variables from.
Possible arguments to the variable file can be given
after the path using colon or semicolon as separator.
Examples: --variablefile path/vars.yaml
--variablefile environment.py:testing
-d --outputdir dir Where to create output files. The default is the
directory where tests are run from and the given path
is considered relative to that unless it is absolute.
-o --output file XML output file. Given path, similarly as paths given
to --log, --report, --xunit, and --debugfile, is
relative to --outputdir unless given as an absolute
path. Other output files are created based on XML
output files after the test execution and XML outputs
can also be further processed with Rebot tool. Can be
disabled by giving a special value `NONE`. In this
case, also log and report are automatically disabled.
Default: output.xml
-l --log file HTML log file. Can be disabled by giving a special
value `NONE`. Default: log.html
Examples: `--log mylog.html`, `-l NONE`
-r --report file HTML report file. Can be disabled with `NONE`
similarly as --log. Default: report.html
-x --xunit file xUnit compatible result file. Not created unless this
option is specified.
--xunitskipnoncritical Mark non-critical tests on xUnit output as skipped.
-b --debugfile file Debug file written during execution. Not created
unless this option is specified.
-T --timestampoutputs When this option is used, timestamp in a format
`YYYYMMDD-hhmmss` is added to all generated output
files between their basename and extension. For
example `-T -o output.xml -r report.html -l none`
creates files like `output-20070503-154410.xml` and
`report-20070503-154410.html`.
--splitlog Split log file into smaller pieces that open in
browser transparently.
--logtitle title Title for the generated test log. The default title
is `<Name Of The Suite> Test Log`. Underscores in
the title are converted into spaces in all titles.
--reporttitle title Title for the generated test report. The default
title is `<Name Of The Suite> Test Report`.
--reportbackground colors Background colors to use in the report file.
Either `all_passed:critical_passed:failed` or
`passed:failed`. Both color names and codes work.
Examples: --reportbackground green:yellow:red
--reportbackground #00E:#E00
-L --loglevel level Threshold level for logging. Available levels: TRACE,
DEBUG, INFO (default), WARN, NONE (no logging). Use
syntax `LOGLEVEL:DEFAULT` to define the default
visible log level in log files.
Examples: --loglevel DEBUG
--loglevel DEBUG:INFO
--suitestatlevel level How many levels to show in `Statistics by Suite`
in log and report. By default all suite levels are
shown. Example: --suitestatlevel 3
--tagstatinclude tag * Include only matching tags in `Statistics by Tag`
and `Test Details` in log and report. By default all
tags set in test cases are shown. Given `tag` can
also be a simple pattern (see e.g. --test).
--tagstatexclude tag * Exclude matching tags from `Statistics by Tag` and
`Test Details`. This option can be used with
--tagstatinclude similarly as --exclude is used with
--include.
--tagstatcombine tags:name * Create combined statistics based on tags.
These statistics are added into `Statistics by Tag`
and matching tests into `Test Details`. If optional
`name` is not given, name of the combined tag is got
from the specified tags. Tags are combined using the
rules explained in --include.
Examples: --tagstatcombine requirement-*
--tagstatcombine tag1ANDtag2:My_name
--tagdoc pattern:doc * Add documentation to tags matching given pattern.
Documentation is shown in `Test Details` and also as
a tooltip in `Statistics by Tag`. Pattern can contain
characters `*` (matches anything) and `?` (matches
any char). Documentation can contain formatting
similarly as with --doc option.
Examples: --tagdoc mytag:My_documentation
--tagdoc regression:*See*_http://info.html
--tagdoc owner-*:Original_author
--tagstatlink pattern:link:title * Add external links into `Statistics by
Tag`. Pattern can contain characters `*` (matches
anything) and `?` (matches any char). Characters
matching to wildcard expressions can be used in link
and title with syntax %N, where N is index of the
match (starting from 1). In title underscores are
automatically converted to spaces.
Examples: --tagstatlink mytag:http://my.domain:Link
--tagstatlink bug-*:http://tracker/id=%1:Bug_Tracker
--removekeywords all|passed|for|wuks|name:<pattern>|tag:<pattern> *
Remove keyword data from the generated log file.
Keywords containing warnings are not removed except
in `all` mode.
all: remove data from all keywords
passed: remove data only from keywords in passed
test cases and suites
for: remove passed iterations from for loops
wuks: remove all but the last failing keyword
inside `BuiltIn.Wait Until Keyword Succeeds`
name:<pattern>: remove data from keywords that match
the given pattern. The pattern is matched
against the full name of the keyword (e.g.
'MyLib.Keyword', 'resource.Second Keyword'),
is case, space, and underscore insensitive,
and may contain `*` and `?` as wildcards.
Examples: --removekeywords name:Lib.HugeKw
--removekeywords name:myresource.*
tag:<pattern>: remove data from keywords that match
the given pattern. Tags are case and space
insensitive and it is possible to use
patterns with `*` and `?` as wildcards.
Tags and patterns can also be combined
together with `AND`, `OR`, and `NOT`
operators.
Examples: --removekeywords foo
--removekeywords fooANDbar*
--flattenkeywords for|foritem|name:<pattern>|tag:<pattern> *
Flattens matching keywords in the generated log file.
Matching keywords get all log messages from their
child keywords and children are discarded otherwise.
for: flatten for loops fully
foritem: flatten individual for loop iterations
name:<pattern>: flatten matched keywords using same
matching rules as with
`--removekeywords name:<pattern>`
tag:<pattern>: flatten matched keywords using same
matching rules as with
`--removekeywords tag:<pattern>`
--listener class * A class for monitoring test execution. Gets
notifications e.g. when a test case starts and ends.
Arguments to the listener class can be given after
the name using colon or semicolon as a separator.
Examples: --listener MyListenerClass
--listener path/to/Listener.py:arg1:arg2
--warnonskippedfiles If this option is used, skipped test data files will
cause a warning that is visible in the console output
and the log file. By default skipped files only cause
an info level syslog message.
--nostatusrc Sets the return code to zero regardless of failures
in test cases. Error codes are returned normally.
--runemptysuite Executes tests also if the top level test suite is
empty. Useful e.g. with --include/--exclude when it
is not an error that no test matches the condition.
--dryrun Verifies test data and runs tests so that library
keywords are not executed.
-X --exitonfailure Stops test execution if any critical test fails.
--exitonerror Stops test execution if any error occurs when parsing
test data, importing libraries, and so on.
--skipteardownonexit Causes teardowns to be skipped if test execution is
stopped prematurely.
--randomize all|suites|tests|none Randomizes the test execution order.
all: randomizes both suites and tests
suites: randomizes suites
tests: randomizes tests
none: no randomization (default)
Use syntax `VALUE:SEED` to give a custom random seed.
The seed must be an integer.
Examples: --randomize all
--randomize tests:1234
--prerunmodifier class * Class to programmatically modify the test suite
structure before execution.
--prerebotmodifier class * Class to programmatically modify the result
model before creating reports and logs.
--console type How to report execution on the console.
verbose: report every suite and test (default)
dotted: only show `.` for passed test, `f` for
failed non-critical tests, and `F` for
failed critical tests
quiet: no output except for errors and warnings
none: no output whatsoever
-. --dotted Shortcut for `--console dotted`.
--quiet Shortcut for `--console quiet`.
-W --consolewidth chars Width of the monitor output. Default is 78.
-C --consolecolors auto|on|ansi|off Use colors on console output or not.
auto: use colors when output not redirected (default)
on: always use colors
ansi: like `on` but use ANSI colors also on Windows
off: disable colors altogether
Note that colors do not work with Jython on Windows.
-K --consolemarkers auto|on|off Show markers on the console when top level
keywords in a test case end. Values have same
semantics as with --consolecolors.
-P --pythonpath path * Additional locations (directories, ZIPs, JARs) where
to search test libraries and other extensions when
they are imported. Multiple paths can be given by
separating them with a colon (`:`) or by using this
option several times. Given path can also be a glob
pattern matching multiple paths but then it normally
must be escaped or quoted.
Examples:
--pythonpath libs/
--pythonpath /opt/testlibs:mylibs.zip:yourlibs
-E star:STAR -P lib/STAR.jar -P mylib.jar
-E --escape what:with * Escape characters which are problematic in console.
`what` is the name of the character to escape and
`with` is the string to escape it with. Note that
all given arguments, incl. data sources, are escaped
so escape characters ought to be selected carefully.
<--------------------ESCAPES------------------------>
Examples:
--escape space:_ --metadata X:Value_with_spaces
-E space:SP -E quot:Q -v var:QhelloSPworldQ
-A --argumentfile path * Text file to read more arguments from. Use special
path `STDIN` to read contents from the standard input
stream. File can have both options and data sources
one per line. Contents do not need to be escaped but
spaces in the beginning and end of lines are removed.
Empty lines and lines starting with a hash character
(#) are ignored.
Example file:
| --include regression
| --name Regression Tests
| # This is a comment line
| my_tests.html
| path/to/test/directory/
Examples:
--argumentfile argfile.txt --argumentfile STDIN
-h -? --help Print usage instructions.
--version Print version information.
Options that are marked with an asterisk (*) can be specified multiple times.
For example, `--test first --test third` selects test cases with name `first`
and `third`. If an option accepts a value but is not marked with an asterisk,
the last given value has precedence. For example, `--log A.html --log B.html`
creates log file `B.html`. Options accepting no values can be disabled by
using the same option again with `no` prefix added or dropped. The last option
has precedence regardless of how many times options are used. For example,
`--dryrun --dryrun --nodryrun --nostatusrc --statusrc` would not activate the
dry-run mode and would return normal status rc.
Long option format is case-insensitive. For example, --SuiteStatLevel is
equivalent to but easier to read than --suitestatlevel. Long options can
also be shortened as long as they are unique. For example, `--logti Title`
works while `--lo log.html` does not because the former matches only --logtitle
but the latter matches --log, --loglevel and --logtitle.
Environment Variables
=====================
ROBOT_OPTIONS Space separated list of default options to be placed
in front of any explicit options on the command line.
ROBOT_SYSLOG_FILE Path to a file where Robot Framework writes internal
information about parsing test case files and running
tests. Can be useful when debugging problems. If not
set, or set to a special value `NONE`, writing to the
syslog file is disabled.
ROBOT_SYSLOG_LEVEL Log level to use when writing to the syslog file.
Available levels are the same as with --loglevel
command line option and the default is INFO.
ROBOT_INTERNAL_TRACES When set to any non-empty value, Robot Framework's
internal methods are included in error tracebacks.
Examples
========
# Simple test run with `robot` without options.
$ robot tests.robot
# Using options.
$ robot --include smoke --name Smoke_Tests path/to/tests.robot
# Executing `robot` module using Python.
$ python -m robot test_directory
# Running `robot` directory with Jython.
$ jython /opt/robot tests.robot
# Executing multiple test case files and using case-insensitive long options.
$ robot --SuiteStatLevel 2 --Metadata Version:3 tests/*.robot more/tests.robot
# Setting default options and syslog file before running tests.
$ export ROBOT_OPTIONS="--critical regression --suitestatlevel 2"
$ export ROBOT_SYSLOG_FILE=/tmp/syslog.txt
$ robot tests.robot
"""
class RobotFramework(Application):
def __init__(self):
Application.__init__(self, USAGE, arg_limits=(1,),
env_options='ROBOT_OPTIONS', logger=LOGGER)
def main(self, datasources, **options):
settings = RobotSettings(options)
LOGGER.register_console_logger(**settings.console_output_config)
LOGGER.info('Settings:\n%s' % unic(settings))
suite = TestSuiteBuilder(settings['SuiteNames'],
settings['WarnOnSkipped']).build(*datasources)
suite.configure(**settings.suite_config)
if settings.pre_run_modifiers:
suite.visit(ModelModifier(settings.pre_run_modifiers,
settings.run_empty_suite, LOGGER))
with pyloggingconf.robot_handler_enabled(settings.log_level):
result = suite.run(settings)
LOGGER.info("Tests execution ended. Statistics:\n%s"
% result.suite.stat_message)
if settings.log or settings.report or settings.xunit:
writer = ResultWriter(settings.output if settings.log
else result)
writer.write_results(settings.get_rebot_settings())
return result.return_code
def validate(self, options, arguments):
return self._filter_options_without_value(options), arguments
def _filter_options_without_value(self, options):
return dict((name, value) for name, value in options.items()
if value not in (None, []))
def run_cli(arguments):
"""Command line execution entry point for running tests.
:param arguments: Command line arguments as a list of strings.
For programmatic usage the :func:`run` function is typically better. It has
a better API for that usage and does not call :func:`sys.exit` like this
function.
Example::
from robot import run_cli
run_cli(['--include', 'tag', 'path/to/tests.html'])
"""
RobotFramework().execute_cli(arguments)
def run(*datasources, **options):
"""Executes given Robot Framework data sources with given options.
Data sources are paths to files and directories, similarly as when running
`robot` command from the command line. Options are given as keyword
arguments and their names are same as long command line options except
without hyphens.
Options that can be given on the command line multiple times can be
passed as lists like `include=['tag1', 'tag2']`. If such option is used
only once, it can be given also as a single string like `include='tag'`.
Additionally listener, prerunmodifier and prerebotmodifier options support
passing values as instances in addition to module names. For example,
`run('tests.robot', listener=Listener(), prerunmodifier=Modifier())`.
To capture stdout and/or stderr streams, pass open file objects in as
special keyword arguments `stdout` and `stderr`, respectively.
A return code is returned similarly as when running on the command line.
Example::
from robot import run
run('path/to/tests.html', include=['tag1', 'tag2'])
with open('stdout.txt', 'w') as stdout:
run('t1.txt', 't2.txt', report='r.html', log='NONE', stdout=stdout)
Equivalent command line usage::
robot --include tag1 --include tag2 path/to/tests.html
robot --report r.html --log NONE t1.txt t2.txt > stdout.txt
"""
return RobotFramework().execute(*datasources, **options)
if __name__ == '__main__':
run_cli(sys.argv[1:])
|
jaloren/robotframework
|
src/robot/run.py
|
Python
|
apache-2.0
| 29,254
|
[
"VisIt"
] |
9dbff30f68c75cd12dcef36e323afae8567c54bcc511f5b1bbb8b3a2d85ff81f
|
#__docformat__ = "restructuredtext en"
# ******NOTICE***************
# optimize.py module by Travis E. Oliphant
#
# You may copy and use this module as you see fit with no
# guarantee implied provided you keep this notice in all copies.
# *****END NOTICE************
# A collection of optimization algorithms. Version 0.5
# CHANGES
# Added fminbound (July 2001)
# Added brute (Aug. 2002)
# Finished line search satisfying strong Wolfe conditions (Mar. 2004)
# Updated strong Wolfe conditions line search to use
# cubic-interpolation (Mar. 2004)
from __future__ import division, print_function, absolute_import
# Minimization routines
__all__ = ['fmin', 'fmin_powell', 'fmin_bfgs', 'fmin_ncg', 'fmin_cg',
'fminbound', 'brent', 'golden', 'bracket', 'rosen', 'rosen_der',
'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime',
'line_search', 'check_grad', 'OptimizeResult', 'show_options',
'OptimizeWarning']
__docformat__ = "restructuredtext en"
import warnings
import sys
import numpy
from scipy._lib.six import callable, xrange
from numpy import (atleast_1d, eye, mgrid, argmin, zeros, shape, squeeze,
asarray, sqrt, Inf, asfarray, isinf)
import numpy as np
from .linesearch import (line_search_wolfe1, line_search_wolfe2,
line_search_wolfe2 as line_search,
LineSearchWarning)
from scipy._lib._util import getargspec_no_self as _getargspec
from scipy._lib._util import MapWrapper
# standard status messages of optimizers
_status_message = {'success': 'Optimization terminated successfully.',
'maxfev': 'Maximum number of function evaluations has '
'been exceeded.',
'maxiter': 'Maximum number of iterations has been '
'exceeded.',
'pr_loss': 'Desired error not necessarily achieved due '
'to precision loss.'}
class MemoizeJac(object):
""" Decorator that caches the value gradient of function each time it
is called. """
def __init__(self, fun):
self.fun = fun
self.jac = None
self.x = None
def __call__(self, x, *args):
self.x = numpy.asarray(x).copy()
fg = self.fun(x, *args)
self.jac = fg[1]
return fg[0]
def derivative(self, x, *args):
if self.jac is not None and numpy.all(x == self.x):
return self.jac
else:
self(x, *args)
return self.jac
class OptimizeResult(dict):
""" Represents the optimization result.
Attributes
----------
x : ndarray
The solution of the optimization.
success : bool
Whether or not the optimizer exited successfully.
status : int
Termination status of the optimizer. Its value depends on the
underlying solver. Refer to `message` for details.
message : str
Description of the cause of the termination.
fun, jac, hess: ndarray
Values of objective function, its Jacobian and its Hessian (if
available). The Hessians may be approximations, see the documentation
of the function in question.
hess_inv : object
Inverse of the objective function's Hessian; may be an approximation.
Not available for all solvers. The type of this attribute may be
either np.ndarray or scipy.sparse.linalg.LinearOperator.
nfev, njev, nhev : int
Number of evaluations of the objective functions and of its
Jacobian and Hessian.
nit : int
Number of iterations performed by the optimizer.
maxcv : float
The maximum constraint violation.
Notes
-----
There may be additional attributes not listed above depending of the
specific solver. Since this class is essentially a subclass of dict
with attribute accessors, one can see which attributes are available
using the `keys()` method.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in sorted(self.items())])
else:
return self.__class__.__name__ + "()"
def __dir__(self):
return list(self.keys())
class OptimizeWarning(UserWarning):
pass
def _check_unknown_options(unknown_options):
if unknown_options:
msg = ", ".join(map(str, unknown_options.keys()))
# Stack level 4: this is called from _minimize_*, which is
# called from another function in SciPy. Level 4 is the first
# level in user code.
warnings.warn("Unknown solver options: %s" % msg, OptimizeWarning, 4)
def is_array_scalar(x):
"""Test whether `x` is either a scalar or an array scalar.
"""
return np.size(x) == 1
_epsilon = sqrt(numpy.finfo(float).eps)
def vecnorm(x, ord=2):
if ord == Inf:
return numpy.amax(numpy.abs(x))
elif ord == -Inf:
return numpy.amin(numpy.abs(x))
else:
return numpy.sum(numpy.abs(x)**ord, axis=0)**(1.0 / ord)
def rosen(x):
"""
The Rosenbrock function.
The function computed is::
sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0)
Parameters
----------
x : array_like
1-D array of points at which the Rosenbrock function is to be computed.
Returns
-------
f : float
The value of the Rosenbrock function.
See Also
--------
rosen_der, rosen_hess, rosen_hess_prod
Examples
--------
>>> from scipy.optimize import rosen
>>> X = 0.1 * np.arange(10)
>>> rosen(X)
76.56
"""
x = asarray(x)
r = numpy.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0,
axis=0)
return r
def rosen_der(x):
"""
The derivative (i.e. gradient) of the Rosenbrock function.
Parameters
----------
x : array_like
1-D array of points at which the derivative is to be computed.
Returns
-------
rosen_der : (N,) ndarray
The gradient of the Rosenbrock function at `x`.
See Also
--------
rosen, rosen_hess, rosen_hess_prod
Examples
--------
>>> from scipy.optimize import rosen_der
>>> X = 0.1 * np.arange(9)
>>> rosen_der(X)
array([ -2. , 10.6, 15.6, 13.4, 6.4, -3. , -12.4, -19.4, 62. ])
"""
x = asarray(x)
xm = x[1:-1]
xm_m1 = x[:-2]
xm_p1 = x[2:]
der = numpy.zeros_like(x)
der[1:-1] = (200 * (xm - xm_m1**2) -
400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm))
der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0])
der[-1] = 200 * (x[-1] - x[-2]**2)
return der
def rosen_hess(x):
"""
The Hessian matrix of the Rosenbrock function.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
Returns
-------
rosen_hess : ndarray
The Hessian matrix of the Rosenbrock function at `x`.
See Also
--------
rosen, rosen_der, rosen_hess_prod
Examples
--------
>>> from scipy.optimize import rosen_hess
>>> X = 0.1 * np.arange(4)
>>> rosen_hess(X)
array([[-38., 0., 0., 0.],
[ 0., 134., -40., 0.],
[ 0., -40., 130., -80.],
[ 0., 0., -80., 200.]])
"""
x = atleast_1d(x)
H = numpy.diag(-400 * x[:-1], 1) - numpy.diag(400 * x[:-1], -1)
diagonal = numpy.zeros(len(x), dtype=x.dtype)
diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2
diagonal[-1] = 200
diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:]
H = H + numpy.diag(diagonal)
return H
def rosen_hess_prod(x, p):
"""
Product of the Hessian matrix of the Rosenbrock function with a vector.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
p : array_like
1-D array, the vector to be multiplied by the Hessian matrix.
Returns
-------
rosen_hess_prod : ndarray
The Hessian matrix of the Rosenbrock function at `x` multiplied
by the vector `p`.
See Also
--------
rosen, rosen_der, rosen_hess
Examples
--------
>>> from scipy.optimize import rosen_hess_prod
>>> X = 0.1 * np.arange(9)
>>> p = 0.5 * np.arange(9)
>>> rosen_hess_prod(X, p)
array([ -0., 27., -10., -95., -192., -265., -278., -195., -180.])
"""
x = atleast_1d(x)
Hp = numpy.zeros(len(x), dtype=x.dtype)
Hp[0] = (1200 * x[0]**2 - 400 * x[1] + 2) * p[0] - 400 * x[0] * p[1]
Hp[1:-1] = (-400 * x[:-2] * p[:-2] +
(202 + 1200 * x[1:-1]**2 - 400 * x[2:]) * p[1:-1] -
400 * x[1:-1] * p[2:])
Hp[-1] = -400 * x[-2] * p[-2] + 200*p[-1]
return Hp
def wrap_function(function, args):
ncalls = [0]
if function is None:
return ncalls, None
def function_wrapper(*wrapper_args):
ncalls[0] += 1
return function(*(wrapper_args + args))
return ncalls, function_wrapper
def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,
full_output=0, disp=1, retall=0, callback=None, initial_simplex=None):
"""
Minimize a function using the downhill simplex algorithm.
This algorithm only uses function values, not derivatives or second
derivatives.
Parameters
----------
func : callable func(x,*args)
The objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to func, i.e. ``f(x,*args)``.
xtol : float, optional
Absolute error in xopt between iterations that is acceptable for
convergence.
ftol : number, optional
Absolute error in func(xopt) between iterations that is acceptable for
convergence.
maxiter : int, optional
Maximum number of iterations to perform.
maxfun : number, optional
Maximum number of function evaluations to make.
full_output : bool, optional
Set to True if fopt and warnflag outputs are desired.
disp : bool, optional
Set to True to print convergence messages.
retall : bool, optional
Set to True to return list of solutions at each iteration.
callback : callable, optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
initial_simplex : array_like of shape (N + 1, N), optional
Initial simplex. If given, overrides `x0`.
``initial_simplex[j,:]`` should contain the coordinates of
the j-th vertex of the ``N+1`` vertices in the simplex, where
``N`` is the dimension.
Returns
-------
xopt : ndarray
Parameter that minimizes function.
fopt : float
Value of function at minimum: ``fopt = func(xopt)``.
iter : int
Number of iterations performed.
funcalls : int
Number of function calls made.
warnflag : int
1 : Maximum number of function evaluations made.
2 : Maximum number of iterations reached.
allvecs : list
Solution at each iteration.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'Nelder-Mead' `method` in particular.
Notes
-----
Uses a Nelder-Mead simplex algorithm to find the minimum of function of
one or more variables.
This algorithm has a long history of successful use in applications.
But it will usually be slower than an algorithm that uses first or
second derivative information. In practice it can have poor
performance in high-dimensional problems and is not robust to
minimizing complicated functions. Additionally, there currently is no
complete theory describing when the algorithm will successfully
converge to the minimum, or how fast it will if it does. Both the ftol and
xtol criteria must be met for convergence.
Examples
--------
>>> def f(x):
... return x**2
>>> from scipy import optimize
>>> minimum = optimize.fmin(f, 1)
Optimization terminated successfully.
Current function value: 0.000000
Iterations: 17
Function evaluations: 34
>>> minimum[0]
-8.8817841970012523e-16
References
----------
.. [1] Nelder, J.A. and Mead, R. (1965), "A simplex method for function
minimization", The Computer Journal, 7, pp. 308-313
.. [2] Wright, M.H. (1996), "Direct Search Methods: Once Scorned, Now
Respectable", in Numerical Analysis 1995, Proceedings of the
1995 Dundee Biennial Conference in Numerical Analysis, D.F.
Griffiths and G.A. Watson (Eds.), Addison Wesley Longman,
Harlow, UK, pp. 191-208.
"""
opts = {'xatol': xtol,
'fatol': ftol,
'maxiter': maxiter,
'maxfev': maxfun,
'disp': disp,
'return_all': retall,
'initial_simplex': initial_simplex}
res = _minimize_neldermead(func, x0, args, callback=callback, **opts)
if full_output:
retlist = res['x'], res['fun'], res['nit'], res['nfev'], res['status']
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_neldermead(func, x0, args=(), callback=None,
maxiter=None, maxfev=None, disp=False,
return_all=False, initial_simplex=None,
xatol=1e-4, fatol=1e-4, adaptive=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
Nelder-Mead algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter, maxfev : int
Maximum allowed number of iterations and function evaluations.
Will default to ``N*200``, where ``N`` is the number of
variables, if neither `maxiter` or `maxfev` is set. If both
`maxiter` and `maxfev` are set, minimization will stop at the
first reached.
initial_simplex : array_like of shape (N + 1, N)
Initial simplex. If given, overrides `x0`.
``initial_simplex[j,:]`` should contain the coordinates of
the j-th vertex of the ``N+1`` vertices in the simplex, where
``N`` is the dimension.
xatol : float, optional
Absolute error in xopt between iterations that is acceptable for
convergence.
fatol : number, optional
Absolute error in func(xopt) between iterations that is acceptable for
convergence.
adaptive : bool, optional
Adapt algorithm parameters to dimensionality of problem. Useful for
high-dimensional minimization [1]_.
References
----------
.. [1] Gao, F. and Han, L.
Implementing the Nelder-Mead simplex algorithm with adaptive
parameters. 2012. Computational Optimization and Applications.
51:1, pp. 259-277
"""
if 'ftol' in unknown_options:
warnings.warn("ftol is deprecated for Nelder-Mead,"
" use fatol instead. If you specified both, only"
" fatol is used.",
DeprecationWarning)
if (np.isclose(fatol, 1e-4) and
not np.isclose(unknown_options['ftol'], 1e-4)):
# only ftol was probably specified, use it.
fatol = unknown_options['ftol']
unknown_options.pop('ftol')
if 'xtol' in unknown_options:
warnings.warn("xtol is deprecated for Nelder-Mead,"
" use xatol instead. If you specified both, only"
" xatol is used.",
DeprecationWarning)
if (np.isclose(xatol, 1e-4) and
not np.isclose(unknown_options['xtol'], 1e-4)):
# only xtol was probably specified, use it.
xatol = unknown_options['xtol']
unknown_options.pop('xtol')
_check_unknown_options(unknown_options)
maxfun = maxfev
retall = return_all
fcalls, func = wrap_function(func, args)
if adaptive:
dim = float(len(x0))
rho = 1
chi = 1 + 2/dim
psi = 0.75 - 1/(2*dim)
sigma = 1 - 1/dim
else:
rho = 1
chi = 2
psi = 0.5
sigma = 0.5
nonzdelt = 0.05
zdelt = 0.00025
x0 = asfarray(x0).flatten()
if initial_simplex is None:
N = len(x0)
sim = numpy.zeros((N + 1, N), dtype=x0.dtype)
sim[0] = x0
for k in range(N):
y = numpy.array(x0, copy=True)
if y[k] != 0:
y[k] = (1 + nonzdelt)*y[k]
else:
y[k] = zdelt
sim[k + 1] = y
else:
sim = np.asfarray(initial_simplex).copy()
if sim.ndim != 2 or sim.shape[0] != sim.shape[1] + 1:
raise ValueError("`initial_simplex` should be an array of shape (N+1,N)")
if len(x0) != sim.shape[1]:
raise ValueError("Size of `initial_simplex` is not consistent with `x0`")
N = sim.shape[1]
if retall:
allvecs = [sim[0]]
# If neither are set, then set both to default
if maxiter is None and maxfun is None:
maxiter = N * 200
maxfun = N * 200
elif maxiter is None:
# Convert remaining Nones, to np.inf, unless the other is np.inf, in
# which case use the default to avoid unbounded iteration
if maxfun == np.inf:
maxiter = N * 200
else:
maxiter = np.inf
elif maxfun is None:
if maxiter == np.inf:
maxfun = N * 200
else:
maxfun = np.inf
one2np1 = list(range(1, N + 1))
fsim = numpy.zeros((N + 1,), float)
for k in range(N + 1):
fsim[k] = func(sim[k])
ind = numpy.argsort(fsim)
fsim = numpy.take(fsim, ind, 0)
# sort so sim[0,:] has the lowest function value
sim = numpy.take(sim, ind, 0)
iterations = 1
while (fcalls[0] < maxfun and iterations < maxiter):
if (numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))) <= xatol and
numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= fatol):
break
xbar = numpy.add.reduce(sim[:-1], 0) / N
xr = (1 + rho) * xbar - rho * sim[-1]
fxr = func(xr)
doshrink = 0
if fxr < fsim[0]:
xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]
fxe = func(xe)
if fxe < fxr:
sim[-1] = xe
fsim[-1] = fxe
else:
sim[-1] = xr
fsim[-1] = fxr
else: # fsim[0] <= fxr
if fxr < fsim[-2]:
sim[-1] = xr
fsim[-1] = fxr
else: # fxr >= fsim[-2]
# Perform contraction
if fxr < fsim[-1]:
xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]
fxc = func(xc)
if fxc <= fxr:
sim[-1] = xc
fsim[-1] = fxc
else:
doshrink = 1
else:
# Perform an inside contraction
xcc = (1 - psi) * xbar + psi * sim[-1]
fxcc = func(xcc)
if fxcc < fsim[-1]:
sim[-1] = xcc
fsim[-1] = fxcc
else:
doshrink = 1
if doshrink:
for j in one2np1:
sim[j] = sim[0] + sigma * (sim[j] - sim[0])
fsim[j] = func(sim[j])
ind = numpy.argsort(fsim)
sim = numpy.take(sim, ind, 0)
fsim = numpy.take(fsim, ind, 0)
if callback is not None:
callback(sim[0])
iterations += 1
if retall:
allvecs.append(sim[0])
x = sim[0]
fval = numpy.min(fsim)
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message['maxfev']
if disp:
print('Warning: ' + msg)
elif iterations >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
if disp:
print('Warning: ' + msg)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iterations)
print(" Function evaluations: %d" % fcalls[0])
result = OptimizeResult(fun=fval, nit=iterations, nfev=fcalls[0],
status=warnflag, success=(warnflag == 0),
message=msg, x=x, final_simplex=(sim, fsim))
if retall:
result['allvecs'] = allvecs
return result
def _approx_fprime_helper(xk, f, epsilon, args=(), f0=None):
"""
See ``approx_fprime``. An optional initial function value arg is added.
"""
if f0 is None:
f0 = f(*((xk,) + args))
grad = numpy.zeros((len(xk),), float)
ei = numpy.zeros((len(xk),), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
df = (f(*((xk + d,) + args)) - f0) / d[k]
if not np.isscalar(df):
try:
df = df.item()
except (ValueError, AttributeError):
raise ValueError("The user-provided "
"objective function must "
"return a scalar value.")
grad[k] = df
ei[k] = 0.0
return grad
def approx_fprime(xk, f, epsilon, *args):
"""Finite-difference approximation of the gradient of a scalar function.
Parameters
----------
xk : array_like
The coordinate vector at which to determine the gradient of `f`.
f : callable
The function of which to determine the gradient (partial derivatives).
Should take `xk` as first argument, other arguments to `f` can be
supplied in ``*args``. Should return a scalar, the value of the
function at `xk`.
epsilon : array_like
Increment to `xk` to use for determining the function gradient.
If a scalar, uses the same finite difference delta for all partial
derivatives. If an array, should contain one value per element of
`xk`.
\\*args : args, optional
Any other arguments that are to be passed to `f`.
Returns
-------
grad : ndarray
The partial derivatives of `f` to `xk`.
See Also
--------
check_grad : Check correctness of gradient function against approx_fprime.
Notes
-----
The function gradient is determined by the forward finite difference
formula::
f(xk[i] + epsilon[i]) - f(xk[i])
f'[i] = ---------------------------------
epsilon[i]
The main use of `approx_fprime` is in scalar function optimizers like
`fmin_bfgs`, to determine numerically the Jacobian of a function.
Examples
--------
>>> from scipy import optimize
>>> def func(x, c0, c1):
... "Coordinate vector `x` should be an array of size two."
... return c0 * x[0]**2 + c1*x[1]**2
>>> x = np.ones(2)
>>> c0, c1 = (1, 200)
>>> eps = np.sqrt(np.finfo(float).eps)
>>> optimize.approx_fprime(x, func, [eps, np.sqrt(200) * eps], c0, c1)
array([ 2. , 400.00004198])
"""
return _approx_fprime_helper(xk, f, epsilon, args=args)
def check_grad(func, grad, x0, *args, **kwargs):
"""Check the correctness of a gradient function by comparing it against a
(forward) finite-difference approximation of the gradient.
Parameters
----------
func : callable ``func(x0, *args)``
Function whose derivative is to be checked.
grad : callable ``grad(x0, *args)``
Gradient of `func`.
x0 : ndarray
Points to check `grad` against forward difference approximation of grad
using `func`.
args : \\*args, optional
Extra arguments passed to `func` and `grad`.
epsilon : float, optional
Step size used for the finite difference approximation. It defaults to
``sqrt(numpy.finfo(float).eps)``, which is approximately 1.49e-08.
Returns
-------
err : float
The square root of the sum of squares (i.e. the 2-norm) of the
difference between ``grad(x0, *args)`` and the finite difference
approximation of `grad` using func at the points `x0`.
See Also
--------
approx_fprime
Examples
--------
>>> def func(x):
... return x[0]**2 - 0.5 * x[1]**3
>>> def grad(x):
... return [2 * x[0], -1.5 * x[1]**2]
>>> from scipy.optimize import check_grad
>>> check_grad(func, grad, [1.5, -1.5])
2.9802322387695312e-08
"""
step = kwargs.pop('epsilon', _epsilon)
if kwargs:
raise ValueError("Unknown keyword arguments: %r" %
(list(kwargs.keys()),))
return sqrt(sum((grad(x0, *args) -
approx_fprime(x0, func, step, *args))**2))
def approx_fhess_p(x0, p, fprime, epsilon, *args):
f2 = fprime(*((x0 + epsilon*p,) + args))
f1 = fprime(*((x0,) + args))
return (f2 - f1) / epsilon
class _LineSearchError(RuntimeError):
pass
def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval,
**kwargs):
"""
Same as line_search_wolfe1, but fall back to line_search_wolfe2 if
suitable step length is not found, and raise an exception if a
suitable step length is not found.
Raises
------
_LineSearchError
If no suitable step size is found
"""
extra_condition = kwargs.pop('extra_condition', None)
ret = line_search_wolfe1(f, fprime, xk, pk, gfk,
old_fval, old_old_fval,
**kwargs)
if ret[0] is not None and extra_condition is not None:
xp1 = xk + ret[0] * pk
if not extra_condition(ret[0], xp1, ret[3], ret[5]):
# Reject step if extra_condition fails
ret = (None,)
if ret[0] is None:
# line search failed: try different one.
with warnings.catch_warnings():
warnings.simplefilter('ignore', LineSearchWarning)
kwargs2 = {}
for key in ('c1', 'c2', 'amax'):
if key in kwargs:
kwargs2[key] = kwargs[key]
ret = line_search_wolfe2(f, fprime, xk, pk, gfk,
old_fval, old_old_fval,
extra_condition=extra_condition,
**kwargs2)
if ret[0] is None:
raise _LineSearchError()
return ret
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1,
retall=0, callback=None):
"""
Minimize a function using the BFGS algorithm.
Parameters
----------
f : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable f'(x,*args), optional
Gradient of f.
args : tuple, optional
Extra arguments passed to f and fprime.
gtol : float, optional
Gradient norm must be less than gtol before successful termination.
norm : float, optional
Order of norm (Inf is max, -Inf is min)
epsilon : int or ndarray, optional
If fprime is approximated, use this value for the step size.
callback : callable, optional
An optional user-supplied function to call after each
iteration. Called as callback(xk), where xk is the
current parameter vector.
maxiter : int, optional
Maximum number of iterations to perform.
full_output : bool, optional
If True,return fopt, func_calls, grad_calls, and warnflag
in addition to xopt.
disp : bool, optional
Print convergence message if True.
retall : bool, optional
Return a list of results at each iteration if True.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. f(xopt) == fopt.
fopt : float
Minimum value.
gopt : ndarray
Value of gradient at minimum, f'(xopt), which should be near 0.
Bopt : ndarray
Value of 1/f''(xopt), i.e. the inverse hessian matrix.
func_calls : int
Number of function_calls made.
grad_calls : int
Number of gradient calls made.
warnflag : integer
1 : Maximum number of iterations exceeded.
2 : Gradient and/or function calls not changing.
allvecs : list
The value of xopt at each iteration. Only returned if retall is True.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'BFGS' `method` in particular.
Notes
-----
Optimize the function, f, whose gradient is given by fprime
using the quasi-Newton method of Broyden, Fletcher, Goldfarb,
and Shanno (BFGS)
References
----------
Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198.
"""
opts = {'gtol': gtol,
'norm': norm,
'eps': epsilon,
'disp': disp,
'maxiter': maxiter,
'return_all': retall}
res = _minimize_bfgs(f, x0, args, fprime, callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['jac'], res['hess_inv'],
res['nfev'], res['njev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_bfgs(fun, x0, args=(), jac=None, callback=None,
gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
BFGS algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter : int
Maximum number of iterations to perform.
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
"""
_check_unknown_options(unknown_options)
f = fun
fprime = jac
epsilon = eps
retall = return_all
x0 = asarray(x0).flatten()
if x0.ndim == 0:
x0.shape = (1,)
if maxiter is None:
maxiter = len(x0) * 200
func_calls, f = wrap_function(f, args)
old_fval = f(x0)
if fprime is None:
grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon))
else:
grad_calls, myfprime = wrap_function(fprime, args)
gfk = myfprime(x0)
k = 0
N = len(x0)
I = numpy.eye(N, dtype=int)
Hk = I
# Sets the initial step guess to dx ~ 1
old_old_fval = old_fval + np.linalg.norm(gfk) / 2
xk = x0
if retall:
allvecs = [x0]
warnflag = 0
gnorm = vecnorm(gfk, ord=norm)
while (gnorm > gtol) and (k < maxiter):
pk = -numpy.dot(Hk, gfk)
try:
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, myfprime, xk, pk, gfk,
old_fval, old_old_fval, amin=1e-100, amax=1e100)
except _LineSearchError:
# Line search failed to find a better solution.
warnflag = 2
break
xkp1 = xk + alpha_k * pk
if retall:
allvecs.append(xkp1)
sk = xkp1 - xk
xk = xkp1
if gfkp1 is None:
gfkp1 = myfprime(xkp1)
yk = gfkp1 - gfk
gfk = gfkp1
if callback is not None:
callback(xk)
k += 1
gnorm = vecnorm(gfk, ord=norm)
if (gnorm <= gtol):
break
if not numpy.isfinite(old_fval):
# We correctly found +-Inf as optimal value, or something went
# wrong.
warnflag = 2
break
try: # this was handled in numeric, let it remaines for more safety
rhok = 1.0 / (numpy.dot(yk, sk))
except ZeroDivisionError:
rhok = 1000.0
if disp:
print("Divide-by-zero encountered: rhok assumed large")
if isinf(rhok): # this is patch for numpy
rhok = 1000.0
if disp:
print("Divide-by-zero encountered: rhok assumed large")
A1 = I - sk[:, numpy.newaxis] * yk[numpy.newaxis, :] * rhok
A2 = I - yk[:, numpy.newaxis] * sk[numpy.newaxis, :] * rhok
Hk = numpy.dot(A1, numpy.dot(Hk, A2)) + (rhok * sk[:, numpy.newaxis] *
sk[numpy.newaxis, :])
fval = old_fval
if np.isnan(fval):
# This can happen if the first call to f returned NaN;
# the loop is then never entered.
warnflag = 2
if warnflag == 2:
msg = _status_message['pr_loss']
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
else:
msg = _status_message['success']
if disp:
print("%s%s" % ("Warning: " if warnflag != 0 else "", msg))
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
result = OptimizeResult(fun=fval, jac=gfk, hess_inv=Hk, nfev=func_calls[0],
njev=grad_calls[0], status=warnflag,
success=(warnflag == 0), message=msg, x=xk,
nit=k)
if retall:
result['allvecs'] = allvecs
return result
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon,
maxiter=None, full_output=0, disp=1, retall=0, callback=None):
"""
Minimize a function using a nonlinear conjugate gradient algorithm.
Parameters
----------
f : callable, ``f(x, *args)``
Objective function to be minimized. Here `x` must be a 1-D array of
the variables that are to be changed in the search for a minimum, and
`args` are the other (fixed) parameters of `f`.
x0 : ndarray
A user-supplied initial estimate of `xopt`, the optimal value of `x`.
It must be a 1-D array of values.
fprime : callable, ``fprime(x, *args)``, optional
A function that returns the gradient of `f` at `x`. Here `x` and `args`
are as described above for `f`. The returned value must be a 1-D array.
Defaults to None, in which case the gradient is approximated
numerically (see `epsilon`, below).
args : tuple, optional
Parameter values passed to `f` and `fprime`. Must be supplied whenever
additional fixed parameters are needed to completely specify the
functions `f` and `fprime`.
gtol : float, optional
Stop when the norm of the gradient is less than `gtol`.
norm : float, optional
Order to use for the norm of the gradient
(``-np.Inf`` is min, ``np.Inf`` is max).
epsilon : float or ndarray, optional
Step size(s) to use when `fprime` is approximated numerically. Can be a
scalar or a 1-D array. Defaults to ``sqrt(eps)``, with eps the
floating point machine precision. Usually ``sqrt(eps)`` is about
1.5e-8.
maxiter : int, optional
Maximum number of iterations to perform. Default is ``200 * len(x0)``.
full_output : bool, optional
If True, return `fopt`, `func_calls`, `grad_calls`, and `warnflag` in
addition to `xopt`. See the Returns section below for additional
information on optional return values.
disp : bool, optional
If True, return a convergence message, followed by `xopt`.
retall : bool, optional
If True, add to the returned values the results of each iteration.
callback : callable, optional
An optional user-supplied function, called after each iteration.
Called as ``callback(xk)``, where ``xk`` is the current value of `x0`.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. ``f(xopt) == fopt``.
fopt : float, optional
Minimum value found, f(xopt). Only returned if `full_output` is True.
func_calls : int, optional
The number of function_calls made. Only returned if `full_output`
is True.
grad_calls : int, optional
The number of gradient calls made. Only returned if `full_output` is
True.
warnflag : int, optional
Integer value with warning status, only returned if `full_output` is
True.
0 : Success.
1 : The maximum number of iterations was exceeded.
2 : Gradient and/or function calls were not changing. May indicate
that precision was lost, i.e., the routine did not converge.
allvecs : list of ndarray, optional
List of arrays, containing the results at each iteration.
Only returned if `retall` is True.
See Also
--------
minimize : common interface to all `scipy.optimize` algorithms for
unconstrained and constrained minimization of multivariate
functions. It provides an alternative way to call
``fmin_cg``, by specifying ``method='CG'``.
Notes
-----
This conjugate gradient algorithm is based on that of Polak and Ribiere
[1]_.
Conjugate gradient methods tend to work better when:
1. `f` has a unique global minimizing point, and no local minima or
other stationary points,
2. `f` is, at least locally, reasonably well approximated by a
quadratic function of the variables,
3. `f` is continuous and has a continuous gradient,
4. `fprime` is not too large, e.g., has a norm less than 1000,
5. The initial guess, `x0`, is reasonably close to `f` 's global
minimizing point, `xopt`.
References
----------
.. [1] Wright & Nocedal, "Numerical Optimization", 1999, pp. 120-122.
Examples
--------
Example 1: seek the minimum value of the expression
``a*u**2 + b*u*v + c*v**2 + d*u + e*v + f`` for given values
of the parameters and an initial guess ``(u, v) = (0, 0)``.
>>> args = (2, 3, 7, 8, 9, 10) # parameter values
>>> def f(x, *args):
... u, v = x
... a, b, c, d, e, f = args
... return a*u**2 + b*u*v + c*v**2 + d*u + e*v + f
>>> def gradf(x, *args):
... u, v = x
... a, b, c, d, e, f = args
... gu = 2*a*u + b*v + d # u-component of the gradient
... gv = b*u + 2*c*v + e # v-component of the gradient
... return np.asarray((gu, gv))
>>> x0 = np.asarray((0, 0)) # Initial guess.
>>> from scipy import optimize
>>> res1 = optimize.fmin_cg(f, x0, fprime=gradf, args=args)
Optimization terminated successfully.
Current function value: 1.617021
Iterations: 4
Function evaluations: 8
Gradient evaluations: 8
>>> res1
array([-1.80851064, -0.25531915])
Example 2: solve the same problem using the `minimize` function.
(This `myopts` dictionary shows all of the available options,
although in practice only non-default values would be needed.
The returned value will be a dictionary.)
>>> opts = {'maxiter' : None, # default value.
... 'disp' : True, # non-default value.
... 'gtol' : 1e-5, # default value.
... 'norm' : np.inf, # default value.
... 'eps' : 1.4901161193847656e-08} # default value.
>>> res2 = optimize.minimize(f, x0, jac=gradf, args=args,
... method='CG', options=opts)
Optimization terminated successfully.
Current function value: 1.617021
Iterations: 4
Function evaluations: 8
Gradient evaluations: 8
>>> res2.x # minimum found
array([-1.80851064, -0.25531915])
"""
opts = {'gtol': gtol,
'norm': norm,
'eps': epsilon,
'disp': disp,
'maxiter': maxiter,
'return_all': retall}
res = _minimize_cg(f, x0, args, fprime, callback=callback, **opts)
if full_output:
retlist = res['x'], res['fun'], res['nfev'], res['njev'], res['status']
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_cg(fun, x0, args=(), jac=None, callback=None,
gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
conjugate gradient algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter : int
Maximum number of iterations to perform.
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
"""
_check_unknown_options(unknown_options)
f = fun
fprime = jac
epsilon = eps
retall = return_all
x0 = asarray(x0).flatten()
if maxiter is None:
maxiter = len(x0) * 200
func_calls, f = wrap_function(f, args)
if fprime is None:
grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon))
else:
grad_calls, myfprime = wrap_function(fprime, args)
gfk = myfprime(x0)
k = 0
xk = x0
# Sets the initial step guess to dx ~ 1
old_fval = f(xk)
old_old_fval = old_fval + np.linalg.norm(gfk) / 2
if retall:
allvecs = [xk]
warnflag = 0
pk = -gfk
gnorm = vecnorm(gfk, ord=norm)
sigma_3 = 0.01
while (gnorm > gtol) and (k < maxiter):
deltak = numpy.dot(gfk, gfk)
cached_step = [None]
def polak_ribiere_powell_step(alpha, gfkp1=None):
xkp1 = xk + alpha * pk
if gfkp1 is None:
gfkp1 = myfprime(xkp1)
yk = gfkp1 - gfk
beta_k = max(0, numpy.dot(yk, gfkp1) / deltak)
pkp1 = -gfkp1 + beta_k * pk
gnorm = vecnorm(gfkp1, ord=norm)
return (alpha, xkp1, pkp1, gfkp1, gnorm)
def descent_condition(alpha, xkp1, fp1, gfkp1):
# Polak-Ribiere+ needs an explicit check of a sufficient
# descent condition, which is not guaranteed by strong Wolfe.
#
# See Gilbert & Nocedal, "Global convergence properties of
# conjugate gradient methods for optimization",
# SIAM J. Optimization 2, 21 (1992).
cached_step[:] = polak_ribiere_powell_step(alpha, gfkp1)
alpha, xk, pk, gfk, gnorm = cached_step
# Accept step if it leads to convergence.
if gnorm <= gtol:
return True
# Accept step if sufficient descent condition applies.
return numpy.dot(pk, gfk) <= -sigma_3 * numpy.dot(gfk, gfk)
try:
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, myfprime, xk, pk, gfk, old_fval,
old_old_fval, c2=0.4, amin=1e-100, amax=1e100,
extra_condition=descent_condition)
except _LineSearchError:
# Line search failed to find a better solution.
warnflag = 2
break
# Reuse already computed results if possible
if alpha_k == cached_step[0]:
alpha_k, xk, pk, gfk, gnorm = cached_step
else:
alpha_k, xk, pk, gfk, gnorm = polak_ribiere_powell_step(alpha_k, gfkp1)
if retall:
allvecs.append(xk)
if callback is not None:
callback(xk)
k += 1
fval = old_fval
if warnflag == 2:
msg = _status_message['pr_loss']
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
else:
msg = _status_message['success']
if disp:
print("%s%s" % ("Warning: " if warnflag != 0 else "", msg))
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
result = OptimizeResult(fun=fval, jac=gfk, nfev=func_calls[0],
njev=grad_calls[0], status=warnflag,
success=(warnflag == 0), message=msg, x=xk,
nit=k)
if retall:
result['allvecs'] = allvecs
return result
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0,
callback=None):
"""
Unconstrained minimization of a function using the Newton-CG method.
Parameters
----------
f : callable ``f(x, *args)``
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable ``f'(x, *args)``
Gradient of f.
fhess_p : callable ``fhess_p(x, p, *args)``, optional
Function which computes the Hessian of f times an
arbitrary vector, p.
fhess : callable ``fhess(x, *args)``, optional
Function to compute the Hessian matrix of f.
args : tuple, optional
Extra arguments passed to f, fprime, fhess_p, and fhess
(the same set of extra arguments is supplied to all of
these functions).
epsilon : float or ndarray, optional
If fhess is approximated, use this value for the step size.
callback : callable, optional
An optional user-supplied function which is called after
each iteration. Called as callback(xk), where xk is the
current parameter vector.
avextol : float, optional
Convergence is assumed when the average relative error in
the minimizer falls below this amount.
maxiter : int, optional
Maximum number of iterations to perform.
full_output : bool, optional
If True, return the optional outputs.
disp : bool, optional
If True, print convergence message.
retall : bool, optional
If True, return a list of results at each iteration.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. ``f(xopt) == fopt``.
fopt : float
Value of the function at xopt, i.e. ``fopt = f(xopt)``.
fcalls : int
Number of function calls made.
gcalls : int
Number of gradient calls made.
hcalls : int
Number of hessian calls made.
warnflag : int
Warnings generated by the algorithm.
1 : Maximum number of iterations exceeded.
allvecs : list
The result at each iteration, if retall is True (see below).
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'Newton-CG' `method` in particular.
Notes
-----
Only one of `fhess_p` or `fhess` need to be given. If `fhess`
is provided, then `fhess_p` will be ignored. If neither `fhess`
nor `fhess_p` is provided, then the hessian product will be
approximated using finite differences on `fprime`. `fhess_p`
must compute the hessian times an arbitrary vector. If it is not
given, finite-differences on `fprime` are used to compute
it.
Newton-CG methods are also called truncated Newton methods. This
function differs from scipy.optimize.fmin_tnc because
1. scipy.optimize.fmin_ncg is written purely in python using numpy
and scipy while scipy.optimize.fmin_tnc calls a C function.
2. scipy.optimize.fmin_ncg is only for unconstrained minimization
while scipy.optimize.fmin_tnc is for unconstrained minimization
or box constrained minimization. (Box constraints give
lower and upper bounds for each variable separately.)
References
----------
Wright & Nocedal, 'Numerical Optimization', 1999, pg. 140.
"""
opts = {'xtol': avextol,
'eps': epsilon,
'maxiter': maxiter,
'disp': disp,
'return_all': retall}
res = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p,
callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['nfev'], res['njev'],
res['nhev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None,
callback=None, xtol=1e-5, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
Newton-CG algorithm.
Note that the `jac` parameter (Jacobian) is required.
Options
-------
disp : bool
Set to True to print convergence messages.
xtol : float
Average relative error in solution `xopt` acceptable for
convergence.
maxiter : int
Maximum number of iterations to perform.
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
"""
_check_unknown_options(unknown_options)
if jac is None:
raise ValueError('Jacobian is required for Newton-CG method')
f = fun
fprime = jac
fhess_p = hessp
fhess = hess
avextol = xtol
epsilon = eps
retall = return_all
def terminate(warnflag, msg):
if disp:
print(msg)
print(" Current function value: %f" % old_fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % fcalls[0])
print(" Gradient evaluations: %d" % gcalls[0])
print(" Hessian evaluations: %d" % hcalls)
fval = old_fval
result = OptimizeResult(fun=fval, jac=gfk, nfev=fcalls[0],
njev=gcalls[0], nhev=hcalls, status=warnflag,
success=(warnflag == 0), message=msg, x=xk,
nit=k)
if retall:
result['allvecs'] = allvecs
return result
x0 = asarray(x0).flatten()
fcalls, f = wrap_function(f, args)
gcalls, fprime = wrap_function(fprime, args)
hcalls = 0
if maxiter is None:
maxiter = len(x0)*200
cg_maxiter = 20*len(x0)
xtol = len(x0) * avextol
update = [2 * xtol]
xk = x0
if retall:
allvecs = [xk]
k = 0
gfk = None
old_fval = f(x0)
old_old_fval = None
float64eps = numpy.finfo(numpy.float64).eps
while numpy.add.reduce(numpy.abs(update)) > xtol:
if k >= maxiter:
msg = "Warning: " + _status_message['maxiter']
return terminate(1, msg)
# Compute a search direction pk by applying the CG method to
# del2 f(xk) p = - grad f(xk) starting from 0.
b = -fprime(xk)
maggrad = numpy.add.reduce(numpy.abs(b))
eta = numpy.min([0.5, numpy.sqrt(maggrad)])
termcond = eta * maggrad
xsupi = zeros(len(x0), dtype=x0.dtype)
ri = -b
psupi = -ri
i = 0
dri0 = numpy.dot(ri, ri)
if fhess is not None: # you want to compute hessian once.
A = fhess(*(xk,) + args)
hcalls = hcalls + 1
for k2 in xrange(cg_maxiter):
if numpy.add.reduce(numpy.abs(ri)) <= termcond:
break
if fhess is None:
if fhess_p is None:
Ap = approx_fhess_p(xk, psupi, fprime, epsilon)
else:
Ap = fhess_p(xk, psupi, *args)
hcalls = hcalls + 1
else:
Ap = numpy.dot(A, psupi)
# check curvature
Ap = asarray(Ap).squeeze() # get rid of matrices...
curv = numpy.dot(psupi, Ap)
if 0 <= curv <= 3 * float64eps:
break
elif curv < 0:
if (i > 0):
break
else:
# fall back to steepest descent direction
xsupi = dri0 / (-curv) * b
break
alphai = dri0 / curv
xsupi = xsupi + alphai * psupi
ri = ri + alphai * Ap
dri1 = numpy.dot(ri, ri)
betai = dri1 / dri0
psupi = -ri + betai * psupi
i = i + 1
dri0 = dri1 # update numpy.dot(ri,ri) for next time.
else:
# curvature keeps increasing, bail out
msg = ("Warning: CG iterations didn't converge. The Hessian is not "
"positive definite.")
return terminate(3, msg)
pk = xsupi # search direction is solution to system.
gfk = -b # gradient at xk
try:
alphak, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, fprime, xk, pk, gfk,
old_fval, old_old_fval)
except _LineSearchError:
# Line search failed to find a better solution.
msg = "Warning: " + _status_message['pr_loss']
return terminate(2, msg)
update = alphak * pk
xk = xk + update # upcast if necessary
if callback is not None:
callback(xk)
if retall:
allvecs.append(xk)
k += 1
else:
msg = _status_message['success']
return terminate(0, msg)
def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500,
full_output=0, disp=1):
"""Bounded minimization for scalar functions.
Parameters
----------
func : callable f(x,*args)
Objective function to be minimized (must accept and return scalars).
x1, x2 : float or array scalar
The optimization bounds.
args : tuple, optional
Extra arguments passed to function.
xtol : float, optional
The convergence tolerance.
maxfun : int, optional
Maximum number of function evaluations allowed.
full_output : bool, optional
If True, return optional outputs.
disp : int, optional
If non-zero, print messages.
0 : no message printing.
1 : non-convergence notification messages only.
2 : print a message on convergence too.
3 : print iteration results.
Returns
-------
xopt : ndarray
Parameters (over given interval) which minimize the
objective function.
fval : number
The function value at the minimum point.
ierr : int
An error flag (0 if converged, 1 if maximum number of
function calls reached).
numfunc : int
The number of function calls made.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Bounded' `method` in particular.
Notes
-----
Finds a local minimizer of the scalar function `func` in the
interval x1 < xopt < x2 using Brent's method. (See `brent`
for auto-bracketing).
Examples
--------
`fminbound` finds the minimum of the function in the given range.
The following examples illustrate the same
>>> def f(x):
... return x**2
>>> from scipy import optimize
>>> minimum = optimize.fminbound(f, -1, 2)
>>> minimum
0.0
>>> minimum = optimize.fminbound(f, 1, 2)
>>> minimum
1.0000059608609866
"""
options = {'xatol': xtol,
'maxiter': maxfun,
'disp': disp}
res = _minimize_scalar_bounded(func, (x1, x2), args, **options)
if full_output:
return res['x'], res['fun'], res['status'], res['nfev']
else:
return res['x']
def _minimize_scalar_bounded(func, bounds, args=(),
xatol=1e-5, maxiter=500, disp=0,
**unknown_options):
"""
Options
-------
maxiter : int
Maximum number of iterations to perform.
disp: int, optional
If non-zero, print messages.
0 : no message printing.
1 : non-convergence notification messages only.
2 : print a message on convergence too.
3 : print iteration results.
xatol : float
Absolute error in solution `xopt` acceptable for convergence.
"""
_check_unknown_options(unknown_options)
maxfun = maxiter
# Test bounds are of correct form
if len(bounds) != 2:
raise ValueError('bounds must have two elements.')
x1, x2 = bounds
if not (is_array_scalar(x1) and is_array_scalar(x2)):
raise ValueError("Optimisation bounds must be scalars"
" or array scalars.")
if x1 > x2:
raise ValueError("The lower bound exceeds the upper bound.")
flag = 0
header = ' Func-count x f(x) Procedure'
step = ' initial'
sqrt_eps = sqrt(2.2e-16)
golden_mean = 0.5 * (3.0 - sqrt(5.0))
a, b = x1, x2
fulc = a + golden_mean * (b - a)
nfc, xf = fulc, fulc
rat = e = 0.0
x = xf
fx = func(x, *args)
num = 1
fmin_data = (1, xf, fx)
ffulc = fnfc = fx
xm = 0.5 * (a + b)
tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0
tol2 = 2.0 * tol1
if disp > 2:
print(" ")
print(header)
print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)))
while (numpy.abs(xf - xm) > (tol2 - 0.5 * (b - a))):
golden = 1
# Check for parabolic fit
if numpy.abs(e) > tol1:
golden = 0
r = (xf - nfc) * (fx - ffulc)
q = (xf - fulc) * (fx - fnfc)
p = (xf - fulc) * q - (xf - nfc) * r
q = 2.0 * (q - r)
if q > 0.0:
p = -p
q = numpy.abs(q)
r = e
e = rat
# Check for acceptability of parabola
if ((numpy.abs(p) < numpy.abs(0.5*q*r)) and (p > q*(a - xf)) and
(p < q * (b - xf))):
rat = (p + 0.0) / q
x = xf + rat
step = ' parabolic'
if ((x - a) < tol2) or ((b - x) < tol2):
si = numpy.sign(xm - xf) + ((xm - xf) == 0)
rat = tol1 * si
else: # do a golden section step
golden = 1
if golden: # Do a golden-section step
if xf >= xm:
e = a - xf
else:
e = b - xf
rat = golden_mean*e
step = ' golden'
si = numpy.sign(rat) + (rat == 0)
x = xf + si * numpy.max([numpy.abs(rat), tol1])
fu = func(x, *args)
num += 1
fmin_data = (num, x, fu)
if disp > 2:
print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)))
if fu <= fx:
if x >= xf:
a = xf
else:
b = xf
fulc, ffulc = nfc, fnfc
nfc, fnfc = xf, fx
xf, fx = x, fu
else:
if x < xf:
a = x
else:
b = x
if (fu <= fnfc) or (nfc == xf):
fulc, ffulc = nfc, fnfc
nfc, fnfc = x, fu
elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc):
fulc, ffulc = x, fu
xm = 0.5 * (a + b)
tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0
tol2 = 2.0 * tol1
if num >= maxfun:
flag = 1
break
fval = fx
if disp > 0:
_endprint(x, flag, fval, maxfun, xatol, disp)
result = OptimizeResult(fun=fval, status=flag, success=(flag == 0),
message={0: 'Solution found.',
1: 'Maximum number of function calls '
'reached.'}.get(flag, ''),
x=xf, nfev=num)
return result
class Brent:
#need to rethink design of __init__
def __init__(self, func, args=(), tol=1.48e-8, maxiter=500,
full_output=0):
self.func = func
self.args = args
self.tol = tol
self.maxiter = maxiter
self._mintol = 1.0e-11
self._cg = 0.3819660
self.xmin = None
self.fval = None
self.iter = 0
self.funcalls = 0
# need to rethink design of set_bracket (new options, etc)
def set_bracket(self, brack=None):
self.brack = brack
def get_bracket_info(self):
#set up
func = self.func
args = self.args
brack = self.brack
### BEGIN core bracket_info code ###
### carefully DOCUMENT any CHANGES in core ##
if brack is None:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args)
elif len(brack) == 2:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0],
xb=brack[1], args=args)
elif len(brack) == 3:
xa, xb, xc = brack
if (xa > xc): # swap so xa < xc can be assumed
xc, xa = xa, xc
if not ((xa < xb) and (xb < xc)):
raise ValueError("Not a bracketing interval.")
fa = func(*((xa,) + args))
fb = func(*((xb,) + args))
fc = func(*((xc,) + args))
if not ((fb < fa) and (fb < fc)):
raise ValueError("Not a bracketing interval.")
funcalls = 3
else:
raise ValueError("Bracketing interval must be "
"length 2 or 3 sequence.")
### END core bracket_info code ###
return xa, xb, xc, fa, fb, fc, funcalls
def optimize(self):
# set up for optimization
func = self.func
xa, xb, xc, fa, fb, fc, funcalls = self.get_bracket_info()
_mintol = self._mintol
_cg = self._cg
#################################
#BEGIN CORE ALGORITHM
#################################
x = w = v = xb
fw = fv = fx = func(*((x,) + self.args))
if (xa < xc):
a = xa
b = xc
else:
a = xc
b = xa
deltax = 0.0
funcalls += 1
iter = 0
while (iter < self.maxiter):
tol1 = self.tol * numpy.abs(x) + _mintol
tol2 = 2.0 * tol1
xmid = 0.5 * (a + b)
# check for convergence
if numpy.abs(x - xmid) < (tol2 - 0.5 * (b - a)):
break
# XXX In the first iteration, rat is only bound in the true case
# of this conditional. This used to cause an UnboundLocalError
# (gh-4140). It should be set before the if (but to what?).
if (numpy.abs(deltax) <= tol1):
if (x >= xmid):
deltax = a - x # do a golden section step
else:
deltax = b - x
rat = _cg * deltax
else: # do a parabolic step
tmp1 = (x - w) * (fx - fv)
tmp2 = (x - v) * (fx - fw)
p = (x - v) * tmp2 - (x - w) * tmp1
tmp2 = 2.0 * (tmp2 - tmp1)
if (tmp2 > 0.0):
p = -p
tmp2 = numpy.abs(tmp2)
dx_temp = deltax
deltax = rat
# check parabolic fit
if ((p > tmp2 * (a - x)) and (p < tmp2 * (b - x)) and
(numpy.abs(p) < numpy.abs(0.5 * tmp2 * dx_temp))):
rat = p * 1.0 / tmp2 # if parabolic step is useful.
u = x + rat
if ((u - a) < tol2 or (b - u) < tol2):
if xmid - x >= 0:
rat = tol1
else:
rat = -tol1
else:
if (x >= xmid):
deltax = a - x # if it's not do a golden section step
else:
deltax = b - x
rat = _cg * deltax
if (numpy.abs(rat) < tol1): # update by at least tol1
if rat >= 0:
u = x + tol1
else:
u = x - tol1
else:
u = x + rat
fu = func(*((u,) + self.args)) # calculate new output value
funcalls += 1
if (fu > fx): # if it's bigger than current
if (u < x):
a = u
else:
b = u
if (fu <= fw) or (w == x):
v = w
w = u
fv = fw
fw = fu
elif (fu <= fv) or (v == x) or (v == w):
v = u
fv = fu
else:
if (u >= x):
a = x
else:
b = x
v = w
w = x
x = u
fv = fw
fw = fx
fx = fu
iter += 1
#################################
#END CORE ALGORITHM
#################################
self.xmin = x
self.fval = fx
self.iter = iter
self.funcalls = funcalls
def get_result(self, full_output=False):
if full_output:
return self.xmin, self.fval, self.iter, self.funcalls
else:
return self.xmin
def brent(func, args=(), brack=None, tol=1.48e-8, full_output=0, maxiter=500):
"""
Given a function of one-variable and a possible bracket, return
the local minimum of the function isolated to a fractional precision
of tol.
Parameters
----------
func : callable f(x,*args)
Objective function.
args : tuple, optional
Additional arguments (if present).
brack : tuple, optional
Either a triple (xa,xb,xc) where xa<xb<xc and func(xb) <
func(xa), func(xc) or a pair (xa,xb) which are used as a
starting interval for a downhill bracket search (see
`bracket`). Providing the pair (xa,xb) does not always mean
the obtained solution will satisfy xa<=x<=xb.
tol : float, optional
Stop if between iteration change is less than `tol`.
full_output : bool, optional
If True, return all output args (xmin, fval, iter,
funcalls).
maxiter : int, optional
Maximum number of iterations in solution.
Returns
-------
xmin : ndarray
Optimum point.
fval : float
Optimum value.
iter : int
Number of iterations.
funcalls : int
Number of objective function evaluations made.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Brent' `method` in particular.
Notes
-----
Uses inverse parabolic interpolation when possible to speed up
convergence of golden section method.
Does not ensure that the minimum lies in the range specified by
`brack`. See `fminbound`.
Examples
--------
We illustrate the behaviour of the function when `brack` is of
size 2 and 3 respectively. In the case where `brack` is of the
form (xa,xb), we can see for the given values, the output need
not necessarily lie in the range (xa,xb).
>>> def f(x):
... return x**2
>>> from scipy import optimize
>>> minimum = optimize.brent(f,brack=(1,2))
>>> minimum
0.0
>>> minimum = optimize.brent(f,brack=(-1,0.5,2))
>>> minimum
-2.7755575615628914e-17
"""
options = {'xtol': tol,
'maxiter': maxiter}
res = _minimize_scalar_brent(func, brack, args, **options)
if full_output:
return res['x'], res['fun'], res['nit'], res['nfev']
else:
return res['x']
def _minimize_scalar_brent(func, brack=None, args=(),
xtol=1.48e-8, maxiter=500,
**unknown_options):
"""
Options
-------
maxiter : int
Maximum number of iterations to perform.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
Notes
-----
Uses inverse parabolic interpolation when possible to speed up
convergence of golden section method.
"""
_check_unknown_options(unknown_options)
tol = xtol
if tol < 0:
raise ValueError('tolerance should be >= 0, got %r' % tol)
brent = Brent(func=func, args=args, tol=tol,
full_output=True, maxiter=maxiter)
brent.set_bracket(brack)
brent.optimize()
x, fval, nit, nfev = brent.get_result(full_output=True)
return OptimizeResult(fun=fval, x=x, nit=nit, nfev=nfev,
success=nit < maxiter)
def golden(func, args=(), brack=None, tol=_epsilon,
full_output=0, maxiter=5000):
"""
Return the minimum of a function of one variable using golden section
method.
Given a function of one variable and a possible bracketing interval,
return the minimum of the function isolated to a fractional precision of
tol.
Parameters
----------
func : callable func(x,*args)
Objective function to minimize.
args : tuple, optional
Additional arguments (if present), passed to func.
brack : tuple, optional
Triple (a,b,c), where (a<b<c) and func(b) <
func(a),func(c). If bracket consists of two numbers (a,
c), then they are assumed to be a starting interval for a
downhill bracket search (see `bracket`); it doesn't always
mean that obtained solution will satisfy a<=x<=c.
tol : float, optional
x tolerance stop criterion
full_output : bool, optional
If True, return optional outputs.
maxiter : int
Maximum number of iterations to perform.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Golden' `method` in particular.
Notes
-----
Uses analog of bisection method to decrease the bracketed
interval.
Examples
--------
We illustrate the behaviour of the function when `brack` is of
size 2 and 3 respectively. In the case where `brack` is of the
form (xa,xb), we can see for the given values, the output need
not necessarily lie in the range ``(xa, xb)``.
>>> def f(x):
... return x**2
>>> from scipy import optimize
>>> minimum = optimize.golden(f, brack=(1, 2))
>>> minimum
1.5717277788484873e-162
>>> minimum = optimize.golden(f, brack=(-1, 0.5, 2))
>>> minimum
-1.5717277788484873e-162
"""
options = {'xtol': tol, 'maxiter': maxiter}
res = _minimize_scalar_golden(func, brack, args, **options)
if full_output:
return res['x'], res['fun'], res['nfev']
else:
return res['x']
def _minimize_scalar_golden(func, brack=None, args=(),
xtol=_epsilon, maxiter=5000, **unknown_options):
"""
Options
-------
maxiter : int
Maximum number of iterations to perform.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
"""
_check_unknown_options(unknown_options)
tol = xtol
if brack is None:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args)
elif len(brack) == 2:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0],
xb=brack[1], args=args)
elif len(brack) == 3:
xa, xb, xc = brack
if (xa > xc): # swap so xa < xc can be assumed
xc, xa = xa, xc
if not ((xa < xb) and (xb < xc)):
raise ValueError("Not a bracketing interval.")
fa = func(*((xa,) + args))
fb = func(*((xb,) + args))
fc = func(*((xc,) + args))
if not ((fb < fa) and (fb < fc)):
raise ValueError("Not a bracketing interval.")
funcalls = 3
else:
raise ValueError("Bracketing interval must be length 2 or 3 sequence.")
_gR = 0.61803399 # golden ratio conjugate: 2.0/(1.0+sqrt(5.0))
_gC = 1.0 - _gR
x3 = xc
x0 = xa
if (numpy.abs(xc - xb) > numpy.abs(xb - xa)):
x1 = xb
x2 = xb + _gC * (xc - xb)
else:
x2 = xb
x1 = xb - _gC * (xb - xa)
f1 = func(*((x1,) + args))
f2 = func(*((x2,) + args))
funcalls += 2
nit = 0
for i in xrange(maxiter):
if numpy.abs(x3 - x0) <= tol * (numpy.abs(x1) + numpy.abs(x2)):
break
if (f2 < f1):
x0 = x1
x1 = x2
x2 = _gR * x1 + _gC * x3
f1 = f2
f2 = func(*((x2,) + args))
else:
x3 = x2
x2 = x1
x1 = _gR * x2 + _gC * x0
f2 = f1
f1 = func(*((x1,) + args))
funcalls += 1
nit += 1
if (f1 < f2):
xmin = x1
fval = f1
else:
xmin = x2
fval = f2
return OptimizeResult(fun=fval, nfev=funcalls, x=xmin, nit=nit,
success=nit < maxiter)
def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000):
"""
Bracket the minimum of the function.
Given a function and distinct initial points, search in the
downhill direction (as defined by the initital points) and return
new points xa, xb, xc that bracket the minimum of the function
f(xa) > f(xb) < f(xc). It doesn't always mean that obtained
solution will satisfy xa<=x<=xb
Parameters
----------
func : callable f(x,*args)
Objective function to minimize.
xa, xb : float, optional
Bracketing interval. Defaults `xa` to 0.0, and `xb` to 1.0.
args : tuple, optional
Additional arguments (if present), passed to `func`.
grow_limit : float, optional
Maximum grow limit. Defaults to 110.0
maxiter : int, optional
Maximum number of iterations to perform. Defaults to 1000.
Returns
-------
xa, xb, xc : float
Bracket.
fa, fb, fc : float
Objective function values in bracket.
funcalls : int
Number of function evaluations made.
"""
_gold = 1.618034 # golden ratio: (1.0+sqrt(5.0))/2.0
_verysmall_num = 1e-21
fa = func(*(xa,) + args)
fb = func(*(xb,) + args)
if (fa < fb): # Switch so fa > fb
xa, xb = xb, xa
fa, fb = fb, fa
xc = xb + _gold * (xb - xa)
fc = func(*((xc,) + args))
funcalls = 3
iter = 0
while (fc < fb):
tmp1 = (xb - xa) * (fb - fc)
tmp2 = (xb - xc) * (fb - fa)
val = tmp2 - tmp1
if numpy.abs(val) < _verysmall_num:
denom = 2.0 * _verysmall_num
else:
denom = 2.0 * val
w = xb - ((xb - xc) * tmp2 - (xb - xa) * tmp1) / denom
wlim = xb + grow_limit * (xc - xb)
if iter > maxiter:
raise RuntimeError("Too many iterations.")
iter += 1
if (w - xc) * (xb - w) > 0.0:
fw = func(*((w,) + args))
funcalls += 1
if (fw < fc):
xa = xb
xb = w
fa = fb
fb = fw
return xa, xb, xc, fa, fb, fc, funcalls
elif (fw > fb):
xc = w
fc = fw
return xa, xb, xc, fa, fb, fc, funcalls
w = xc + _gold * (xc - xb)
fw = func(*((w,) + args))
funcalls += 1
elif (w - wlim)*(wlim - xc) >= 0.0:
w = wlim
fw = func(*((w,) + args))
funcalls += 1
elif (w - wlim)*(xc - w) > 0.0:
fw = func(*((w,) + args))
funcalls += 1
if (fw < fc):
xb = xc
xc = w
w = xc + _gold * (xc - xb)
fb = fc
fc = fw
fw = func(*((w,) + args))
funcalls += 1
else:
w = xc + _gold * (xc - xb)
fw = func(*((w,) + args))
funcalls += 1
xa = xb
xb = xc
xc = w
fa = fb
fb = fc
fc = fw
return xa, xb, xc, fa, fb, fc, funcalls
def _linesearch_powell(func, p, xi, tol=1e-3):
"""Line-search algorithm using fminbound.
Find the minimium of the function ``func(x0+ alpha*direc)``.
"""
def myfunc(alpha):
return func(p + alpha*xi)
alpha_min, fret, iter, num = brent(myfunc, full_output=1, tol=tol)
xi = alpha_min*xi
return squeeze(fret), p + xi, xi
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None,
maxfun=None, full_output=0, disp=1, retall=0, callback=None,
direc=None):
"""
Minimize a function using modified Powell's method.
This method only uses function values, not derivatives.
Parameters
----------
func : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to func.
xtol : float, optional
Line-search error tolerance.
ftol : float, optional
Relative error in ``func(xopt)`` acceptable for convergence.
maxiter : int, optional
Maximum number of iterations to perform.
maxfun : int, optional
Maximum number of function evaluations to make.
full_output : bool, optional
If True, ``fopt``, ``xi``, ``direc``, ``iter``, ``funcalls``, and
``warnflag`` are returned.
disp : bool, optional
If True, print convergence messages.
retall : bool, optional
If True, return a list of the solution at each iteration.
callback : callable, optional
An optional user-supplied function, called after each
iteration. Called as ``callback(xk)``, where ``xk`` is the
current parameter vector.
direc : ndarray, optional
Initial fitting step and parameter order set as an (N, N) array, where N
is the number of fitting parameters in `x0`. Defaults to step size 1.0
fitting all parameters simultaneously (``np.ones((N, N))``). To
prevent initial consideration of values in a step or to change initial
step size, set to 0 or desired step size in the Jth position in the Mth
block, where J is the position in `x0` and M is the desired evaluation
step, with steps being evaluated in index order. Step size and ordering
will change freely as minimization proceeds.
Returns
-------
xopt : ndarray
Parameter which minimizes `func`.
fopt : number
Value of function at minimum: ``fopt = func(xopt)``.
direc : ndarray
Current direction set.
iter : int
Number of iterations.
funcalls : int
Number of function calls made.
warnflag : int
Integer warning flag:
1 : Maximum number of function evaluations.
2 : Maximum number of iterations.
allvecs : list
List of solutions at each iteration.
See also
--------
minimize: Interface to unconstrained minimization algorithms for
multivariate functions. See the 'Powell' method in particular.
Notes
-----
Uses a modification of Powell's method to find the minimum of
a function of N variables. Powell's method is a conjugate
direction method.
The algorithm has two loops. The outer loop merely iterates over the inner
loop. The inner loop minimizes over each current direction in the direction
set. At the end of the inner loop, if certain conditions are met, the
direction that gave the largest decrease is dropped and replaced with the
difference between the current estimated x and the estimated x from the
beginning of the inner-loop.
The technical conditions for replacing the direction of greatest
increase amount to checking that
1. No further gain can be made along the direction of greatest increase
from that iteration.
2. The direction of greatest increase accounted for a large sufficient
fraction of the decrease in the function value from that iteration of
the inner loop.
References
----------
Powell M.J.D. (1964) An efficient method for finding the minimum of a
function of several variables without calculating derivatives,
Computer Journal, 7 (2):155-162.
Press W., Teukolsky S.A., Vetterling W.T., and Flannery B.P.:
Numerical Recipes (any edition), Cambridge University Press
Examples
--------
>>> def f(x):
... return x**2
>>> from scipy import optimize
>>> minimum = optimize.fmin_powell(f, -1)
Optimization terminated successfully.
Current function value: 0.000000
Iterations: 2
Function evaluations: 18
>>> minimum
array(0.0)
"""
opts = {'xtol': xtol,
'ftol': ftol,
'maxiter': maxiter,
'maxfev': maxfun,
'disp': disp,
'direc': direc,
'return_all': retall}
res = _minimize_powell(func, x0, args, callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['direc'], res['nit'],
res['nfev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_powell(func, x0, args=(), callback=None,
xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None,
disp=False, direc=None, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
modified Powell algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxiter, maxfev : int
Maximum allowed number of iterations and function evaluations.
Will default to ``N*1000``, where ``N`` is the number of
variables, if neither `maxiter` or `maxfev` is set. If both
`maxiter` and `maxfev` are set, minimization will stop at the
first reached.
direc : ndarray
Initial set of direction vectors for the Powell method.
"""
_check_unknown_options(unknown_options)
maxfun = maxfev
retall = return_all
# we need to use a mutable object here that we can update in the
# wrapper function
fcalls, func = wrap_function(func, args)
x = asarray(x0).flatten()
if retall:
allvecs = [x]
N = len(x)
# If neither are set, then set both to default
if maxiter is None and maxfun is None:
maxiter = N * 1000
maxfun = N * 1000
elif maxiter is None:
# Convert remaining Nones, to np.inf, unless the other is np.inf, in
# which case use the default to avoid unbounded iteration
if maxfun == np.inf:
maxiter = N * 1000
else:
maxiter = np.inf
elif maxfun is None:
if maxiter == np.inf:
maxfun = N * 1000
else:
maxfun = np.inf
if direc is None:
direc = eye(N, dtype=float)
else:
direc = asarray(direc, dtype=float)
fval = squeeze(func(x))
x1 = x.copy()
iter = 0
ilist = list(range(N))
while True:
fx = fval
bigind = 0
delta = 0.0
for i in ilist:
direc1 = direc[i]
fx2 = fval
fval, x, direc1 = _linesearch_powell(func, x, direc1,
tol=xtol * 100)
if (fx2 - fval) > delta:
delta = fx2 - fval
bigind = i
iter += 1
if callback is not None:
callback(x)
if retall:
allvecs.append(x)
bnd = ftol * (numpy.abs(fx) + numpy.abs(fval)) + 1e-20
if 2.0 * (fx - fval) <= bnd:
break
if fcalls[0] >= maxfun:
break
if iter >= maxiter:
break
# Construct the extrapolated point
direc1 = x - x1
x2 = 2*x - x1
x1 = x.copy()
fx2 = squeeze(func(x2))
if (fx > fx2):
t = 2.0*(fx + fx2 - 2.0*fval)
temp = (fx - fval - delta)
t *= temp*temp
temp = fx - fx2
t -= delta*temp*temp
if t < 0.0:
fval, x, direc1 = _linesearch_powell(func, x, direc1,
tol=xtol*100)
direc[bigind] = direc[-1]
direc[-1] = direc1
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message['maxfev']
if disp:
print("Warning: " + msg)
elif iter >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iter)
print(" Function evaluations: %d" % fcalls[0])
x = squeeze(x)
result = OptimizeResult(fun=fval, direc=direc, nit=iter, nfev=fcalls[0],
status=warnflag, success=(warnflag == 0),
message=msg, x=x)
if retall:
result['allvecs'] = allvecs
return result
def _endprint(x, flag, fval, maxfun, xtol, disp):
if flag == 0:
if disp > 1:
print("\nOptimization terminated successfully;\n"
"The returned value satisfies the termination criteria\n"
"(using xtol = ", xtol, ")")
if flag == 1:
if disp:
print("\nMaximum number of function evaluations exceeded --- "
"increase maxfun argument.\n")
return
def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin,
disp=False, workers=1):
"""Minimize a function over a given range by brute force.
Uses the "brute force" method, i.e. computes the function's value
at each point of a multidimensional grid of points, to find the global
minimum of the function.
The function is evaluated everywhere in the range with the datatype of the
first call to the function, as enforced by the ``vectorize`` NumPy
function. The value and type of the function evaluation returned when
``full_output=True`` are affected in addition by the ``finish`` argument
(see Notes).
The brute force approach is inefficient because the number of grid points
increases exponentially - the number of grid points to evaluate is
``Ns ** len(x)``. Consequently, even with coarse grid spacing, even
moderately sized problems can take a long time to run, and/or run into
memory limitations.
Parameters
----------
func : callable
The objective function to be minimized. Must be in the
form ``f(x, *args)``, where ``x`` is the argument in
the form of a 1-D array and ``args`` is a tuple of any
additional fixed parameters needed to completely specify
the function.
ranges : tuple
Each component of the `ranges` tuple must be either a
"slice object" or a range tuple of the form ``(low, high)``.
The program uses these to create the grid of points on which
the objective function will be computed. See `Note 2` for
more detail.
args : tuple, optional
Any additional fixed parameters needed to completely specify
the function.
Ns : int, optional
Number of grid points along the axes, if not otherwise
specified. See `Note2`.
full_output : bool, optional
If True, return the evaluation grid and the objective function's
values on it.
finish : callable, optional
An optimization function that is called with the result of brute force
minimization as initial guess. `finish` should take `func` and
the initial guess as positional arguments, and take `args` as
keyword arguments. It may additionally take `full_output`
and/or `disp` as keyword arguments. Use None if no "polishing"
function is to be used. See Notes for more details.
disp : bool, optional
Set to True to print convergence messages from the `finish` callable.
workers : int or map-like callable, optional
If `workers` is an int the grid is subdivided into `workers`
sections and evaluated in parallel (uses
`multiprocessing.Pool <multiprocessing>`).
Supply `-1` to use all cores available to the Process.
Alternatively supply a map-like callable, such as
`multiprocessing.Pool.map` for evaluating the grid in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
Requires that `func` be pickleable.
.. versionadded:: 1.3.0
Returns
-------
x0 : ndarray
A 1-D array containing the coordinates of a point at which the
objective function had its minimum value. (See `Note 1` for
which point is returned.)
fval : float
Function value at the point `x0`. (Returned when `full_output` is
True.)
grid : tuple
Representation of the evaluation grid. It has the same
length as `x0`. (Returned when `full_output` is True.)
Jout : ndarray
Function values at each point of the evaluation
grid, `i.e.`, ``Jout = func(*grid)``. (Returned
when `full_output` is True.)
See Also
--------
basinhopping, differential_evolution
Notes
-----
*Note 1*: The program finds the gridpoint at which the lowest value
of the objective function occurs. If `finish` is None, that is the
point returned. When the global minimum occurs within (or not very far
outside) the grid's boundaries, and the grid is fine enough, that
point will be in the neighborhood of the global minimum.
However, users often employ some other optimization program to
"polish" the gridpoint values, `i.e.`, to seek a more precise
(local) minimum near `brute's` best gridpoint.
The `brute` function's `finish` option provides a convenient way to do
that. Any polishing program used must take `brute's` output as its
initial guess as a positional argument, and take `brute's` input values
for `args` as keyword arguments, otherwise an error will be raised.
It may additionally take `full_output` and/or `disp` as keyword arguments.
`brute` assumes that the `finish` function returns either an
`OptimizeResult` object or a tuple in the form:
``(xmin, Jmin, ... , statuscode)``, where ``xmin`` is the minimizing
value of the argument, ``Jmin`` is the minimum value of the objective
function, "..." may be some other returned values (which are not used
by `brute`), and ``statuscode`` is the status code of the `finish` program.
Note that when `finish` is not None, the values returned are those
of the `finish` program, *not* the gridpoint ones. Consequently,
while `brute` confines its search to the input grid points,
the `finish` program's results usually will not coincide with any
gridpoint, and may fall outside the grid's boundary. Thus, if a
minimum only needs to be found over the provided grid points, make
sure to pass in `finish=None`.
*Note 2*: The grid of points is a `numpy.mgrid` object.
For `brute` the `ranges` and `Ns` inputs have the following effect.
Each component of the `ranges` tuple can be either a slice object or a
two-tuple giving a range of values, such as (0, 5). If the component is a
slice object, `brute` uses it directly. If the component is a two-tuple
range, `brute` internally converts it to a slice object that interpolates
`Ns` points from its low-value to its high-value, inclusive.
Examples
--------
We illustrate the use of `brute` to seek the global minimum of a function
of two variables that is given as the sum of a positive-definite
quadratic and two deep "Gaussian-shaped" craters. Specifically, define
the objective function `f` as the sum of three other functions,
``f = f1 + f2 + f3``. We suppose each of these has a signature
``(z, *params)``, where ``z = (x, y)``, and ``params`` and the functions
are as defined below.
>>> params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5)
>>> def f1(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f)
>>> def f2(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale))
>>> def f3(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale))
>>> def f(z, *params):
... return f1(z, *params) + f2(z, *params) + f3(z, *params)
Thus, the objective function may have local minima near the minimum
of each of the three functions of which it is composed. To
use `fmin` to polish its gridpoint result, we may then continue as
follows:
>>> rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25))
>>> from scipy import optimize
>>> resbrute = optimize.brute(f, rranges, args=params, full_output=True,
... finish=optimize.fmin)
>>> resbrute[0] # global minimum
array([-1.05665192, 1.80834843])
>>> resbrute[1] # function value at global minimum
-3.4085818767
Note that if `finish` had been set to None, we would have gotten the
gridpoint [-1.0 1.75] where the rounded function value is -2.892.
"""
N = len(ranges)
if N > 40:
raise ValueError("Brute Force not possible with more "
"than 40 variables.")
lrange = list(ranges)
for k in range(N):
if type(lrange[k]) is not type(slice(None)):
if len(lrange[k]) < 3:
lrange[k] = tuple(lrange[k]) + (complex(Ns),)
lrange[k] = slice(*lrange[k])
if (N == 1):
lrange = lrange[0]
grid = np.mgrid[lrange]
# obtain an array of parameters that is iterable by a map-like callable
inpt_shape = grid.shape
if (N > 1):
grid = np.reshape(grid, (inpt_shape[0], np.prod(inpt_shape[1:]))).T
wrapped_func = _Brute_Wrapper(func, args)
# iterate over input arrays, possibly in parallel
with MapWrapper(pool=workers) as mapper:
Jout = np.array(list(mapper(wrapped_func, grid)))
if (N == 1):
grid = (grid,)
Jout = np.squeeze(Jout)
elif (N > 1):
Jout = np.reshape(Jout, inpt_shape[1:])
grid = np.reshape(grid.T, inpt_shape)
Nshape = shape(Jout)
indx = argmin(Jout.ravel(), axis=-1)
Nindx = zeros(N, int)
xmin = zeros(N, float)
for k in range(N - 1, -1, -1):
thisN = Nshape[k]
Nindx[k] = indx % Nshape[k]
indx = indx // thisN
for k in range(N):
xmin[k] = grid[k][tuple(Nindx)]
Jmin = Jout[tuple(Nindx)]
if (N == 1):
grid = grid[0]
xmin = xmin[0]
if callable(finish):
# set up kwargs for `finish` function
finish_args = _getargspec(finish).args
finish_kwargs = dict()
if 'full_output' in finish_args:
finish_kwargs['full_output'] = 1
if 'disp' in finish_args:
finish_kwargs['disp'] = disp
elif 'options' in finish_args:
# pass 'disp' as `options`
# (e.g. if `finish` is `minimize`)
finish_kwargs['options'] = {'disp': disp}
# run minimizer
res = finish(func, xmin, args=args, **finish_kwargs)
if isinstance(res, OptimizeResult):
xmin = res.x
Jmin = res.fun
success = res.success
else:
xmin = res[0]
Jmin = res[1]
success = res[-1] == 0
if not success:
if disp:
print("Warning: Either final optimization did not succeed "
"or `finish` does not return `statuscode` as its last "
"argument.")
if full_output:
return xmin, Jmin, grid, Jout
else:
return xmin
class _Brute_Wrapper(object):
"""
Object to wrap user cost function for optimize.brute, allowing picklability
"""
def __init__(self, f, args):
self.f = f
self.args = [] if args is None else args
def __call__(self, x):
# flatten needed for one dimensional case.
return self.f(np.asarray(x).flatten(), *self.args)
def show_options(solver=None, method=None, disp=True):
"""
Show documentation for additional options of optimization solvers.
These are method-specific options that can be supplied through the
``options`` dict.
Parameters
----------
solver : str
Type of optimization solver. One of 'minimize', 'minimize_scalar',
'root', or 'linprog'.
method : str, optional
If not given, shows all methods of the specified solver. Otherwise,
show only the options for the specified method. Valid values
corresponds to methods' names of respective solver (e.g. 'BFGS' for
'minimize').
disp : bool, optional
Whether to print the result rather than returning it.
Returns
-------
text
Either None (for disp=True) or the text string (disp=False)
Notes
-----
The solver-specific methods are:
`scipy.optimize.minimize`
- :ref:`Nelder-Mead <optimize.minimize-neldermead>`
- :ref:`Powell <optimize.minimize-powell>`
- :ref:`CG <optimize.minimize-cg>`
- :ref:`BFGS <optimize.minimize-bfgs>`
- :ref:`Newton-CG <optimize.minimize-newtoncg>`
- :ref:`L-BFGS-B <optimize.minimize-lbfgsb>`
- :ref:`TNC <optimize.minimize-tnc>`
- :ref:`COBYLA <optimize.minimize-cobyla>`
- :ref:`SLSQP <optimize.minimize-slsqp>`
- :ref:`dogleg <optimize.minimize-dogleg>`
- :ref:`trust-ncg <optimize.minimize-trustncg>`
`scipy.optimize.root`
- :ref:`hybr <optimize.root-hybr>`
- :ref:`lm <optimize.root-lm>`
- :ref:`broyden1 <optimize.root-broyden1>`
- :ref:`broyden2 <optimize.root-broyden2>`
- :ref:`anderson <optimize.root-anderson>`
- :ref:`linearmixing <optimize.root-linearmixing>`
- :ref:`diagbroyden <optimize.root-diagbroyden>`
- :ref:`excitingmixing <optimize.root-excitingmixing>`
- :ref:`krylov <optimize.root-krylov>`
- :ref:`df-sane <optimize.root-dfsane>`
`scipy.optimize.minimize_scalar`
- :ref:`brent <optimize.minimize_scalar-brent>`
- :ref:`golden <optimize.minimize_scalar-golden>`
- :ref:`bounded <optimize.minimize_scalar-bounded>`
`scipy.optimize.linprog`
- :ref:`simplex <optimize.linprog-simplex>`
- :ref:`interior-point <optimize.linprog-interior-point>`
"""
import textwrap
doc_routines = {
'minimize': (
('bfgs', 'scipy.optimize.optimize._minimize_bfgs'),
('cg', 'scipy.optimize.optimize._minimize_cg'),
('cobyla', 'scipy.optimize.cobyla._minimize_cobyla'),
('dogleg', 'scipy.optimize._trustregion_dogleg._minimize_dogleg'),
('l-bfgs-b', 'scipy.optimize.lbfgsb._minimize_lbfgsb'),
('nelder-mead', 'scipy.optimize.optimize._minimize_neldermead'),
('newton-cg', 'scipy.optimize.optimize._minimize_newtoncg'),
('powell', 'scipy.optimize.optimize._minimize_powell'),
('slsqp', 'scipy.optimize.slsqp._minimize_slsqp'),
('tnc', 'scipy.optimize.tnc._minimize_tnc'),
('trust-ncg', 'scipy.optimize._trustregion_ncg._minimize_trust_ncg'),
),
'root': (
('hybr', 'scipy.optimize.minpack._root_hybr'),
('lm', 'scipy.optimize._root._root_leastsq'),
('broyden1', 'scipy.optimize._root._root_broyden1_doc'),
('broyden2', 'scipy.optimize._root._root_broyden2_doc'),
('anderson', 'scipy.optimize._root._root_anderson_doc'),
('diagbroyden', 'scipy.optimize._root._root_diagbroyden_doc'),
('excitingmixing', 'scipy.optimize._root._root_excitingmixing_doc'),
('linearmixing', 'scipy.optimize._root._root_linearmixing_doc'),
('krylov', 'scipy.optimize._root._root_krylov_doc'),
('df-sane', 'scipy.optimize._spectral._root_df_sane'),
),
'root_scalar': (
('bisect', 'scipy.optimize._root_scalar._root_scalar_bisect_doc'),
('brentq', 'scipy.optimize._root_scalar._root_scalar_brentq_doc'),
('brenth', 'scipy.optimize._root_scalar._root_scalar_brenth_doc'),
('ridder', 'scipy.optimize._root_scalar._root_scalar_ridder_doc'),
('toms748', 'scipy.optimize._root_scalar._root_scalar_toms748_doc'),
('secant', 'scipy.optimize._root_scalar._root_scalar_secant_doc'),
('newton', 'scipy.optimize._root_scalar._root_scalar_newton_doc'),
('halley', 'scipy.optimize._root_scalar._root_scalar_halley_doc'),
),
'linprog': (
('simplex', 'scipy.optimize._linprog._linprog_simplex'),
('interior-point', 'scipy.optimize._linprog._linprog_ip'),
),
'minimize_scalar': (
('brent', 'scipy.optimize.optimize._minimize_scalar_brent'),
('bounded', 'scipy.optimize.optimize._minimize_scalar_bounded'),
('golden', 'scipy.optimize.optimize._minimize_scalar_golden'),
),
}
if solver is None:
text = ["\n\n\n========\n", "minimize\n", "========\n"]
text.append(show_options('minimize', disp=False))
text.extend(["\n\n===============\n", "minimize_scalar\n",
"===============\n"])
text.append(show_options('minimize_scalar', disp=False))
text.extend(["\n\n\n====\n", "root\n",
"====\n"])
text.append(show_options('root', disp=False))
text.extend(['\n\n\n=======\n', 'linprog\n',
'=======\n'])
text.append(show_options('linprog', disp=False))
text = "".join(text)
else:
solver = solver.lower()
if solver not in doc_routines:
raise ValueError('Unknown solver %r' % (solver,))
if method is None:
text = []
for name, _ in doc_routines[solver]:
text.extend(["\n\n" + name, "\n" + "="*len(name) + "\n\n"])
text.append(show_options(solver, name, disp=False))
text = "".join(text)
else:
method = method.lower()
methods = dict(doc_routines[solver])
if method not in methods:
raise ValueError("Unknown method %r" % (method,))
name = methods[method]
# Import function object
parts = name.split('.')
mod_name = ".".join(parts[:-1])
__import__(mod_name)
obj = getattr(sys.modules[mod_name], parts[-1])
# Get doc
doc = obj.__doc__
if doc is not None:
text = textwrap.dedent(doc).strip()
else:
text = ""
if disp:
print(text)
return
else:
return text
def main():
import time
times = []
algor = []
x0 = [0.8, 1.2, 0.7]
print("Nelder-Mead Simplex")
print("===================")
start = time.time()
x = fmin(rosen, x0)
print(x)
times.append(time.time() - start)
algor.append('Nelder-Mead Simplex\t')
print()
print("Powell Direction Set Method")
print("===========================")
start = time.time()
x = fmin_powell(rosen, x0)
print(x)
times.append(time.time() - start)
algor.append('Powell Direction Set Method.')
print()
print("Nonlinear CG")
print("============")
start = time.time()
x = fmin_cg(rosen, x0, fprime=rosen_der, maxiter=200)
print(x)
times.append(time.time() - start)
algor.append('Nonlinear CG \t')
print()
print("BFGS Quasi-Newton")
print("=================")
start = time.time()
x = fmin_bfgs(rosen, x0, fprime=rosen_der, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('BFGS Quasi-Newton\t')
print()
print("BFGS approximate gradient")
print("=========================")
start = time.time()
x = fmin_bfgs(rosen, x0, gtol=1e-4, maxiter=100)
print(x)
times.append(time.time() - start)
algor.append('BFGS without gradient\t')
print()
print("Newton-CG with Hessian product")
print("==============================")
start = time.time()
x = fmin_ncg(rosen, x0, rosen_der, fhess_p=rosen_hess_prod, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('Newton-CG with hessian product')
print()
print("Newton-CG with full Hessian")
print("===========================")
start = time.time()
x = fmin_ncg(rosen, x0, rosen_der, fhess=rosen_hess, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('Newton-CG with full hessian')
print()
print("\nMinimizing the Rosenbrock function of order 3\n")
print(" Algorithm \t\t\t Seconds")
print("===========\t\t\t =========")
for k in range(len(algor)):
print(algor[k], "\t -- ", times[k])
if __name__ == "__main__":
main()
|
gertingold/scipy
|
scipy/optimize/optimize.py
|
Python
|
bsd-3-clause
| 108,314
|
[
"Gaussian"
] |
526c4433b42676ef8bc3124eef7a7a3d45755167c53c242a0184a4df9c85e93e
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 17 13:10:52 2012
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
import scipy.sparse as sparse
from scipy.sparse.linalg import svds
from scipy.optimize import fminbound
import warnings
from statsmodels.tools.tools import Bunch
from statsmodels.tools.sm_exceptions import (
IterationLimitWarning, iteration_limit_doc)
def clip_evals(x, value=0): # threshold=0, value=0):
evals, evecs = np.linalg.eigh(x)
clipped = np.any(evals < value)
x_new = np.dot(evecs * np.maximum(evals, value), evecs.T)
return x_new, clipped
def corr_nearest(corr, threshold=1e-15, n_fact=100):
'''
Find the nearest correlation matrix that is positive semi-definite.
The function iteratively adjust the correlation matrix by clipping the
eigenvalues of a difference matrix. The diagonal elements are set to one.
Parameters
----------
corr : ndarray, (k, k)
initial correlation matrix
threshold : float
clipping threshold for smallest eigenvalue, see Notes
n_fact : int or float
factor to determine the maximum number of iterations. The maximum
number of iterations is the integer part of the number of columns in
the correlation matrix times n_fact.
Returns
-------
corr_new : ndarray, (optional)
corrected correlation matrix
Notes
-----
The smallest eigenvalue of the corrected correlation matrix is
approximately equal to the ``threshold``.
If the threshold=0, then the smallest eigenvalue of the correlation matrix
might be negative, but zero within a numerical error, for example in the
range of -1e-16.
Assumes input correlation matrix is symmetric.
Stops after the first step if correlation matrix is already positive
semi-definite or positive definite, so that smallest eigenvalue is above
threshold. In this case, the returned array is not the original, but
is equal to it within numerical precision.
See Also
--------
corr_clipped
cov_nearest
'''
k_vars = corr.shape[0]
if k_vars != corr.shape[1]:
raise ValueError("matrix is not square")
diff = np.zeros(corr.shape)
x_new = corr.copy()
diag_idx = np.arange(k_vars)
for ii in range(int(len(corr) * n_fact)):
x_adj = x_new - diff
x_psd, clipped = clip_evals(x_adj, value=threshold)
if not clipped:
x_new = x_psd
break
diff = x_psd - x_adj
x_new = x_psd.copy()
x_new[diag_idx, diag_idx] = 1
else:
warnings.warn(iteration_limit_doc, IterationLimitWarning)
return x_new
def corr_clipped(corr, threshold=1e-15):
'''
Find a near correlation matrix that is positive semi-definite
This function clips the eigenvalues, replacing eigenvalues smaller than
the threshold by the threshold. The new matrix is normalized, so that the
diagonal elements are one.
Compared to corr_nearest, the distance between the original correlation
matrix and the positive definite correlation matrix is larger, however,
it is much faster since it only computes eigenvalues once.
Parameters
----------
corr : ndarray, (k, k)
initial correlation matrix
threshold : float
clipping threshold for smallest eigenvalue, see Notes
Returns
-------
corr_new : ndarray, (optional)
corrected correlation matrix
Notes
-----
The smallest eigenvalue of the corrected correlation matrix is
approximately equal to the ``threshold``. In examples, the
smallest eigenvalue can be by a factor of 10 smaller than the threshold,
e.g. threshold 1e-8 can result in smallest eigenvalue in the range
between 1e-9 and 1e-8.
If the threshold=0, then the smallest eigenvalue of the correlation matrix
might be negative, but zero within a numerical error, for example in the
range of -1e-16.
Assumes input correlation matrix is symmetric. The diagonal elements of
returned correlation matrix is set to ones.
If the correlation matrix is already positive semi-definite given the
threshold, then the original correlation matrix is returned.
``cov_clipped`` is 40 or more times faster than ``cov_nearest`` in simple
example, but has a slightly larger approximation error.
See Also
--------
corr_nearest
cov_nearest
'''
x_new, clipped = clip_evals(corr, value=threshold)
if not clipped:
return corr
# cov2corr
x_std = np.sqrt(np.diag(x_new))
x_new = x_new / x_std / x_std[:, None]
return x_new
def cov_nearest(cov, method='clipped', threshold=1e-15, n_fact=100,
return_all=False):
"""
Find the nearest covariance matrix that is positive (semi-) definite
This leaves the diagonal, i.e. the variance, unchanged
Parameters
----------
cov : ndarray, (k,k)
initial covariance matrix
method : str
if "clipped", then the faster but less accurate ``corr_clipped`` is
used.if "nearest", then ``corr_nearest`` is used
threshold : float
clipping threshold for smallest eigen value, see Notes
n_fact : int or float
factor to determine the maximum number of iterations in
``corr_nearest``. See its doc string
return_all : bool
if False (default), then only the covariance matrix is returned.
If True, then correlation matrix and standard deviation are
additionally returned.
Returns
-------
cov_ : ndarray
corrected covariance matrix
corr_ : ndarray, (optional)
corrected correlation matrix
std_ : ndarray, (optional)
standard deviation
Notes
-----
This converts the covariance matrix to a correlation matrix. Then, finds
the nearest correlation matrix that is positive semidefinite and converts
it back to a covariance matrix using the initial standard deviation.
The smallest eigenvalue of the intermediate correlation matrix is
approximately equal to the ``threshold``.
If the threshold=0, then the smallest eigenvalue of the correlation matrix
might be negative, but zero within a numerical error, for example in the
range of -1e-16.
Assumes input covariance matrix is symmetric.
See Also
--------
corr_nearest
corr_clipped
"""
from statsmodels.stats.moment_helpers import cov2corr, corr2cov
cov_, std_ = cov2corr(cov, return_std=True)
if method == 'clipped':
corr_ = corr_clipped(cov_, threshold=threshold)
else: # method == 'nearest'
corr_ = corr_nearest(cov_, threshold=threshold, n_fact=n_fact)
cov_ = corr2cov(corr_, std_)
if return_all:
return cov_, corr_, std_
else:
return cov_
def _nmono_linesearch(obj, grad, x, d, obj_hist, M=10, sig1=0.1,
sig2=0.9, gam=1e-4, maxiter=100):
"""
Implements the non-monotone line search of Grippo et al. (1986),
as described in Birgin, Martinez and Raydan (2013).
Parameters
----------
obj : real-valued function
The objective function, to be minimized
grad : vector-valued function
The gradient of the objective function
x : array_like
The starting point for the line search
d : array_like
The search direction
obj_hist : array_like
Objective function history (must contain at least one value)
M : positive int
Number of previous function points to consider (see references
for details).
sig1 : real
Tuning parameter, see references for details.
sig2 : real
Tuning parameter, see references for details.
gam : real
Tuning parameter, see references for details.
maxiter : int
The maximum number of iterations; returns Nones if convergence
does not occur by this point
Returns
-------
alpha : real
The step value
x : Array_like
The function argument at the final step
obval : Real
The function value at the final step
g : Array_like
The gradient at the final step
Notes
-----
The basic idea is to take a big step in the direction of the
gradient, even if the function value is not decreased (but there
is a maximum allowed increase in terms of the recent history of
the iterates).
References
----------
Grippo L, Lampariello F, Lucidi S (1986). A Nonmonotone Line
Search Technique for Newton's Method. SIAM Journal on Numerical
Analysis, 23, 707-716.
E. Birgin, J.M. Martinez, and M. Raydan. Spectral projected
gradient methods: Review and perspectives. Journal of Statistical
Software (preprint).
"""
alpha = 1.
last_obval = obj(x)
obj_max = max(obj_hist[-M:])
for iter in range(maxiter):
obval = obj(x + alpha*d)
g = grad(x)
gtd = (g * d).sum()
if obval <= obj_max + gam*alpha*gtd:
return alpha, x + alpha*d, obval, g
a1 = -0.5*alpha**2*gtd / (obval - last_obval - alpha*gtd)
if (sig1 <= a1) and (a1 <= sig2*alpha):
alpha = a1
else:
alpha /= 2.
last_obval = obval
return None, None, None, None
def _spg_optim(func, grad, start, project, maxiter=1e4, M=10,
ctol=1e-3, maxiter_nmls=200, lam_min=1e-30,
lam_max=1e30, sig1=0.1, sig2=0.9, gam=1e-4):
"""
Implements the spectral projected gradient method for minimizing a
differentiable function on a convex domain.
Parameters
----------
func : real valued function
The objective function to be minimized.
grad : real array-valued function
The gradient of the objective function
start : array_like
The starting point
project : function
In-place projection of the argument to the domain
of func.
... See notes regarding additional arguments
Returns
-------
rslt : Bunch
rslt.params is the final iterate, other fields describe
convergence status.
Notes
-----
This can be an effective heuristic algorithm for problems where no
guaranteed algorithm for computing a global minimizer is known.
There are a number of tuning parameters, but these generally
should not be changed except for `maxiter` (positive integer) and
`ctol` (small positive real). See the Birgin et al reference for
more information about the tuning parameters.
Reference
---------
E. Birgin, J.M. Martinez, and M. Raydan. Spectral projected
gradient methods: Review and perspectives. Journal of Statistical
Software (preprint). Available at:
http://www.ime.usp.br/~egbirgin/publications/bmr5.pdf
"""
lam = min(10*lam_min, lam_max)
params = start.copy()
gval = grad(params)
obj_hist = [func(params), ]
for itr in range(int(maxiter)):
# Check convergence
df = params - gval
project(df)
df -= params
if np.max(np.abs(df)) < ctol:
return Bunch(**{"Converged": True, "params": params,
"objective_values": obj_hist,
"Message": "Converged successfully"})
# The line search direction
d = params - lam*gval
project(d)
d -= params
# Carry out the nonmonotone line search
alpha, params1, fval, gval1 = _nmono_linesearch(
func,
grad,
params,
d,
obj_hist,
M=M,
sig1=sig1,
sig2=sig2,
gam=gam,
maxiter=maxiter_nmls)
if alpha is None:
return Bunch(**{"Converged": False, "params": params,
"objective_values": obj_hist,
"Message": "Failed in nmono_linesearch"})
obj_hist.append(fval)
s = params1 - params
y = gval1 - gval
sy = (s*y).sum()
if sy <= 0:
lam = lam_max
else:
ss = (s*s).sum()
lam = max(lam_min, min(ss/sy, lam_max))
params = params1
gval = gval1
return Bunch(**{"Converged": False, "params": params,
"objective_values": obj_hist,
"Message": "spg_optim did not converge"})
def _project_correlation_factors(X):
"""
Project a matrix into the domain of matrices whose row-wise sums
of squares are less than or equal to 1.
The input matrix is modified in-place.
"""
nm = np.sqrt((X*X).sum(1))
ii = np.flatnonzero(nm > 1)
if len(ii) > 0:
X[ii, :] /= nm[ii][:, None]
class FactoredPSDMatrix:
"""
Representation of a positive semidefinite matrix in factored form.
The representation is constructed based on a vector `diag` and
rectangular matrix `root`, such that the PSD matrix represented by
the class instance is Diag + root * root', where Diag is the
square diagonal matrix with `diag` on its main diagonal.
Parameters
----------
diag : 1d array_like
See above
root : 2d array_like
See above
Notes
-----
The matrix is represented internally in the form Diag^{1/2}(I +
factor * scales * factor')Diag^{1/2}, where `Diag` and `scales`
are diagonal matrices, and `factor` is an orthogonal matrix.
"""
def __init__(self, diag, root):
self.diag = diag
self.root = root
root = root / np.sqrt(diag)[:, None]
u, s, vt = np.linalg.svd(root, 0)
self.factor = u
self.scales = s**2
def to_matrix(self):
"""
Returns the PSD matrix represented by this instance as a full
(square) matrix.
"""
return np.diag(self.diag) + np.dot(self.root, self.root.T)
def decorrelate(self, rhs):
"""
Decorrelate the columns of `rhs`.
Parameters
----------
rhs : array_like
A 2 dimensional array with the same number of rows as the
PSD matrix represented by the class instance.
Returns
-------
C^{-1/2} * rhs, where C is the covariance matrix represented
by this class instance.
Notes
-----
The returned matrix has the identity matrix as its row-wise
population covariance matrix.
This function exploits the factor structure for efficiency.
"""
# I + factor * qval * factor' is the inverse square root of
# the covariance matrix in the homogeneous case where diag =
# 1.
qval = -1 + 1 / np.sqrt(1 + self.scales)
# Decorrelate in the general case.
rhs = rhs / np.sqrt(self.diag)[:, None]
rhs1 = np.dot(self.factor.T, rhs)
rhs1 *= qval[:, None]
rhs1 = np.dot(self.factor, rhs1)
rhs += rhs1
return rhs
def solve(self, rhs):
"""
Solve a linear system of equations with factor-structured
coefficients.
Parameters
----------
rhs : array_like
A 2 dimensional array with the same number of rows as the
PSD matrix represented by the class instance.
Returns
-------
C^{-1} * rhs, where C is the covariance matrix represented
by this class instance.
Notes
-----
This function exploits the factor structure for efficiency.
"""
qval = -self.scales / (1 + self.scales)
dr = np.sqrt(self.diag)
rhs = rhs / dr[:, None]
mat = qval[:, None] * np.dot(self.factor.T, rhs)
rhs = rhs + np.dot(self.factor, mat)
return rhs / dr[:, None]
def logdet(self):
"""
Returns the logarithm of the determinant of a
factor-structured matrix.
"""
logdet = np.sum(np.log(self.diag))
logdet += np.sum(np.log(self.scales))
logdet += np.sum(np.log(1 + 1 / self.scales))
return logdet
def corr_nearest_factor(corr, rank, ctol=1e-6, lam_min=1e-30,
lam_max=1e30, maxiter=1000):
"""
Find the nearest correlation matrix with factor structure to a
given square matrix.
Parameters
----------
corr : square array
The target matrix (to which the nearest correlation matrix is
sought). Must be square, but need not be positive
semidefinite.
rank : int
The rank of the factor structure of the solution, i.e., the
number of linearly independent columns of X.
ctol : positive real
Convergence criterion.
lam_min : float
Tuning parameter for spectral projected gradient optimization
(smallest allowed step in the search direction).
lam_max : float
Tuning parameter for spectral projected gradient optimization
(largest allowed step in the search direction).
maxiter : int
Maximum number of iterations in spectral projected gradient
optimization.
Returns
-------
rslt : Bunch
rslt.corr is a FactoredPSDMatrix defining the estimated
correlation structure. Other fields of `rslt` contain
returned values from spg_optim.
Notes
-----
A correlation matrix has factor structure if it can be written in
the form I + XX' - diag(XX'), where X is n x k with linearly
independent columns, and with each row having sum of squares at
most equal to 1. The approximation is made in terms of the
Frobenius norm.
This routine is useful when one has an approximate correlation
matrix that is not positive semidefinite, and there is need to
estimate the inverse, square root, or inverse square root of the
population correlation matrix. The factor structure allows these
tasks to be done without constructing any n x n matrices.
This is a non-convex problem with no known guaranteed globally
convergent algorithm for computing the solution. Borsdof, Higham
and Raydan (2010) compared several methods for this problem and
found the spectral projected gradient (SPG) method (used here) to
perform best.
The input matrix `corr` can be a dense numpy array or any scipy
sparse matrix. The latter is useful if the input matrix is
obtained by thresholding a very large sample correlation matrix.
If `corr` is sparse, the calculations are optimized to save
memory, so no working matrix with more than 10^6 elements is
constructed.
References
----------
.. [*] R Borsdof, N Higham, M Raydan (2010). Computing a nearest
correlation matrix with factor structure. SIAM J Matrix Anal Appl,
31:5, 2603-2622.
http://eprints.ma.man.ac.uk/1523/01/covered/MIMS_ep2009_87.pdf
Examples
--------
Hard thresholding a correlation matrix may result in a matrix that
is not positive semidefinite. We can approximate a hard
thresholded correlation matrix with a PSD matrix as follows, where
`corr` is the input correlation matrix.
>>> import numpy as np
>>> from statsmodels.stats.correlation_tools import corr_nearest_factor
>>> np.random.seed(1234)
>>> b = 1.5 - np.random.rand(10, 1)
>>> x = np.random.randn(100,1).dot(b.T) + np.random.randn(100,10)
>>> corr = np.corrcoef(x.T)
>>> corr = corr * (np.abs(corr) >= 0.3)
>>> rslt = corr_nearest_factor(corr, 3)
"""
p, _ = corr.shape
# Starting values (following the PCA method in BHR).
u, s, vt = svds(corr, rank)
X = u * np.sqrt(s)
nm = np.sqrt((X**2).sum(1))
ii = np.flatnonzero(nm > 1e-5)
X[ii, :] /= nm[ii][:, None]
# Zero the diagonal
corr1 = corr.copy()
if type(corr1) == np.ndarray:
np.fill_diagonal(corr1, 0)
elif sparse.issparse(corr1):
corr1.setdiag(np.zeros(corr1.shape[0]))
corr1.eliminate_zeros()
corr1.sort_indices()
else:
raise ValueError("Matrix type not supported")
# The gradient, from lemma 4.1 of BHR.
def grad(X):
gr = np.dot(X, np.dot(X.T, X))
if type(corr1) == np.ndarray:
gr -= np.dot(corr1, X)
else:
gr -= corr1.dot(X)
gr -= (X*X).sum(1)[:, None] * X
return 4*gr
# The objective function (sum of squared deviations between fitted
# and observed arrays).
def func(X):
if type(corr1) == np.ndarray:
M = np.dot(X, X.T)
np.fill_diagonal(M, 0)
M -= corr1
fval = (M*M).sum()
return fval
else:
fval = 0.
# Control the size of intermediates
max_ws = 1e6
bs = int(max_ws / X.shape[0])
ir = 0
while ir < X.shape[0]:
ir2 = min(ir+bs, X.shape[0])
u = np.dot(X[ir:ir2, :], X.T)
ii = np.arange(u.shape[0])
u[ii, ir+ii] = 0
u -= np.asarray(corr1[ir:ir2, :].todense())
fval += (u*u).sum()
ir += bs
return fval
rslt = _spg_optim(func, grad, X, _project_correlation_factors, ctol=ctol,
lam_min=lam_min, lam_max=lam_max, maxiter=maxiter)
root = rslt.params
diag = 1 - (root**2).sum(1)
soln = FactoredPSDMatrix(diag, root)
rslt.corr = soln
del rslt.params
return rslt
def cov_nearest_factor_homog(cov, rank):
"""
Approximate an arbitrary square matrix with a factor-structured
matrix of the form k*I + XX'.
Parameters
----------
cov : array_like
The input array, must be square but need not be positive
semidefinite
rank : int
The rank of the fitted factor structure
Returns
-------
A FactoredPSDMatrix instance containing the fitted matrix
Notes
-----
This routine is useful if one has an estimated covariance matrix
that is not SPD, and the ultimate goal is to estimate the inverse,
square root, or inverse square root of the true covariance
matrix. The factor structure allows these tasks to be performed
without constructing any n x n matrices.
The calculations use the fact that if k is known, then X can be
determined from the eigen-decomposition of cov - k*I, which can
in turn be easily obtained form the eigen-decomposition of `cov`.
Thus the problem can be reduced to a 1-dimensional search for k
that does not require repeated eigen-decompositions.
If the input matrix is sparse, then cov - k*I is also sparse, so
the eigen-decomposition can be done efficiently using sparse
routines.
The one-dimensional search for the optimal value of k is not
convex, so a local minimum could be obtained.
Examples
--------
Hard thresholding a covariance matrix may result in a matrix that
is not positive semidefinite. We can approximate a hard
thresholded covariance matrix with a PSD matrix as follows:
>>> import numpy as np
>>> np.random.seed(1234)
>>> b = 1.5 - np.random.rand(10, 1)
>>> x = np.random.randn(100,1).dot(b.T) + np.random.randn(100,10)
>>> cov = np.cov(x)
>>> cov = cov * (np.abs(cov) >= 0.3)
>>> rslt = cov_nearest_factor_homog(cov, 3)
"""
m, n = cov.shape
Q, Lambda, _ = svds(cov, rank)
if sparse.issparse(cov):
QSQ = np.dot(Q.T, cov.dot(Q))
ts = cov.diagonal().sum()
tss = cov.dot(cov).diagonal().sum()
else:
QSQ = np.dot(Q.T, np.dot(cov, Q))
ts = np.trace(cov)
tss = np.trace(np.dot(cov, cov))
def fun(k):
Lambda_t = Lambda - k
v = tss + m*(k**2) + np.sum(Lambda_t**2) - 2*k*ts
v += 2*k*np.sum(Lambda_t) - 2*np.sum(np.diag(QSQ) * Lambda_t)
return v
# Get the optimal decomposition
k_opt = fminbound(fun, 0, 1e5)
Lambda_opt = Lambda - k_opt
fac_opt = Q * np.sqrt(Lambda_opt)
diag = k_opt * np.ones(m, dtype=np.float64) # - (fac_opt**2).sum(1)
return FactoredPSDMatrix(diag, fac_opt)
def corr_thresholded(data, minabs=None, max_elt=1e7):
r"""
Construct a sparse matrix containing the thresholded row-wise
correlation matrix from a data array.
Parameters
----------
data : array_like
The data from which the row-wise thresholded correlation
matrix is to be computed.
minabs : non-negative real
The threshold value; correlation coefficients smaller in
magnitude than minabs are set to zero. If None, defaults
to 1 / sqrt(n), see Notes for more information.
Returns
-------
cormat : sparse.coo_matrix
The thresholded correlation matrix, in COO format.
Notes
-----
This is an alternative to C = np.corrcoef(data); C \*= (np.abs(C)
>= absmin), suitable for very tall data matrices.
If the data are jointly Gaussian, the marginal sampling
distributions of the elements of the sample correlation matrix are
approximately Gaussian with standard deviation 1 / sqrt(n). The
default value of ``minabs`` is thus equal to 1 standard error, which
will set to zero approximately 68% of the estimated correlation
coefficients for which the population value is zero.
No intermediate matrix with more than ``max_elt`` values will be
constructed. However memory use could still be high if a large
number of correlation values exceed `minabs` in magnitude.
The thresholded matrix is returned in COO format, which can easily
be converted to other sparse formats.
Examples
--------
Here X is a tall data matrix (e.g. with 100,000 rows and 50
columns). The row-wise correlation matrix of X is calculated
and stored in sparse form, with all entries smaller than 0.3
treated as 0.
>>> import numpy as np
>>> np.random.seed(1234)
>>> b = 1.5 - np.random.rand(10, 1)
>>> x = np.random.randn(100,1).dot(b.T) + np.random.randn(100,10)
>>> cmat = corr_thresholded(x, 0.3)
"""
nrow, ncol = data.shape
if minabs is None:
minabs = 1. / float(ncol)
# Row-standardize the data
data = data.copy()
data -= data.mean(1)[:, None]
sd = data.std(1, ddof=1)
ii = np.flatnonzero(sd > 1e-5)
data[ii, :] /= sd[ii][:, None]
ii = np.flatnonzero(sd <= 1e-5)
data[ii, :] = 0
# Number of rows to process in one pass
bs = int(np.floor(max_elt / nrow))
ipos_all, jpos_all, cor_values = [], [], []
ir = 0
while ir < nrow:
ir2 = min(data.shape[0], ir + bs)
cm = np.dot(data[ir:ir2, :], data.T) / (ncol - 1)
cma = np.abs(cm)
ipos, jpos = np.nonzero(cma >= minabs)
ipos_all.append(ipos + ir)
jpos_all.append(jpos)
cor_values.append(cm[ipos, jpos])
ir += bs
ipos = np.concatenate(ipos_all)
jpos = np.concatenate(jpos_all)
cor_values = np.concatenate(cor_values)
cmat = sparse.coo_matrix((cor_values, (ipos, jpos)), (nrow, nrow))
return cmat
class MultivariateKernel(object):
"""
Base class for multivariate kernels.
An instance of MultivariateKernel implements a `call` method having
signature `call(x, loc)`, returning the kernel weights comparing `x`
(a 1d ndarray) to each row of `loc` (a 2d ndarray).
"""
def call(self, x, loc):
raise NotImplementedError
def set_bandwidth(self, bw):
"""
Set the bandwidth to the given vector.
Parameters
----------
bw : array_like
A vector of non-negative bandwidth values.
"""
self.bw = bw
self._setup()
def _setup(self):
# Precompute the squared bandwidth values.
self.bwk = np.prod(self.bw)
self.bw2 = self.bw * self.bw
def set_default_bw(self, loc, bwm=None):
"""
Set default bandwiths based on domain values.
Parameters
----------
loc : array_like
Values from the domain to which the kernel will
be applied.
bwm : scalar, optional
A non-negative scalar that is used to multiply
the default bandwidth.
"""
sd = loc.std(0)
q25, q75 = np.percentile(loc, [25, 75], axis=0)
iqr = (q75 - q25) / 1.349
bw = np.where(iqr < sd, iqr, sd)
bw *= 0.9 / loc.shape[0] ** 0.2
if bwm is not None:
bw *= bwm
# The final bandwidths
self.bw = np.asarray(bw, dtype=np.float64)
self._setup()
class GaussianMultivariateKernel(MultivariateKernel):
"""
The Gaussian (squared exponential) multivariate kernel.
"""
def call(self, x, loc):
return np.exp(-(x - loc)**2 / (2 * self.bw2)).sum(1) / self.bwk
def kernel_covariance(exog, loc, groups, kernel=None, bw=None):
"""
Use kernel averaging to estimate a multivariate covariance function.
The goal is to estimate a covariance function C(x, y) =
cov(Z(x), Z(y)) where x, y are vectors in R^p (e.g. representing
locations in time or space), and Z(.) represents a multivariate
process on R^p.
The data used for estimation can be observed at arbitrary values of the
position vector, and there can be multiple independent observations
from the process.
Parameters
----------
exog : array_like
The rows of exog are realizations of the process obtained at
specified points.
loc : array_like
The rows of loc are the locations (e.g. in space or time) at
which the rows of exog are observed.
groups : array_like
The values of groups are labels for distinct independent copies
of the process.
kernel : MultivariateKernel instance, optional
An instance of MultivariateKernel, defaults to
GaussianMultivariateKernel.
bw : array_like or scalar
A bandwidth vector, or bandwidth multiplier. If a 1d array, it
contains kernel bandwidths for each component of the process, and
must have length equal to the number of columns of exog. If a scalar,
bw is a bandwidth multiplier used to adjust the default bandwidth; if
None, a default bandwidth is used.
Returns
-------
A real-valued function C(x, y) that returns an estimate of the covariance
between values of the process located at x and y.
References
----------
.. [1] Genton M, W Kleiber (2015). Cross covariance functions for
multivariate geostatics. Statistical Science 30(2).
https://arxiv.org/pdf/1507.08017.pdf
"""
exog = np.asarray(exog)
loc = np.asarray(loc)
groups = np.asarray(groups)
if loc.ndim == 1:
loc = loc[:, None]
v = [exog.shape[0], loc.shape[0], len(groups)]
if min(v) != max(v):
msg = "exog, loc, and groups must have the same number of rows"
raise ValueError(msg)
# Map from group labels to the row indices in each group.
ix = {}
for i, g in enumerate(groups):
if g not in ix:
ix[g] = []
ix[g].append(i)
for g in ix.keys():
ix[g] = np.sort(ix[g])
if kernel is None:
kernel = GaussianMultivariateKernel()
if bw is None:
kernel.set_default_bw(loc)
elif np.isscalar(bw):
kernel.set_default_bw(loc, bwm=bw)
else:
kernel.set_bandwidth(bw)
def cov(x, y):
kx = kernel.call(x, loc)
ky = kernel.call(y, loc)
cm, cw = 0., 0.
for g, ii in ix.items():
m = len(ii)
j1, j2 = np.indices((m, m))
j1 = ii[j1.flat]
j2 = ii[j2.flat]
w = kx[j1] * ky[j2]
# TODO: some other form of broadcasting may be faster than
# einsum here
cm += np.einsum("ij,ik,i->jk", exog[j1, :], exog[j2, :], w)
cw += w.sum()
if cw < 1e-10:
msg = ("Effective sample size is 0. The bandwidth may be too " +
"small, or you are outside the range of your data.")
warnings.warn(msg)
return np.nan * np.ones_like(cm)
return cm / cw
return cov
|
bashtage/statsmodels
|
statsmodels/stats/correlation_tools.py
|
Python
|
bsd-3-clause
| 32,295
|
[
"Gaussian"
] |
b66b89c6a4110499b90df6f14ccf60135f02c4f5b1e23bc54b91b16eeada1b34
|
# This file is part of Androguard.
#
# Copyright (C) 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file is a simplified version of writer.py that outputs an AST instead of source code."""
import struct
from androguard.decompiler.dad import basic_blocks, instruction, opcode_ins
def array_access(arr, ind):
return ['ArrayAccess', [arr, ind]]
def array_creation(tn, params, dim):
return ['ArrayCreation', [tn] + params, dim]
def array_initializer(params, tn=None):
return ['ArrayInitializer', params, tn]
def assignment(lhs, rhs, op=''):
return ['Assignment', [lhs, rhs], op]
def binary_infix(op, left, right):
return ['BinaryInfix', [left, right], op]
def cast(tn, arg):
return ['Cast', [tn, arg]]
def field_access(triple, left):
return ['FieldAccess', [left], triple]
def literal(result, tt):
return ['Literal', result, tt]
def local(name):
return ['Local', name]
def method_invocation(triple, name, base, params):
if base is None:
return ['MethodInvocation', params, triple, name, False]
return ['MethodInvocation', [base] + params, triple, name, True]
def parenthesis(expr):
return ['Parenthesis', [expr]]
def typen(baset, dim):
return ['TypeName', (baset, dim)]
def unary_prefix(op, left):
return ['Unary', [left], op, False]
def unary_postfix(left, op):
return ['Unary', [left], op, True]
def var_decl(typen, var):
return [typen, var]
def dummy(*args):
return ['Dummy', args]
################################################################################
def expression_stmt(expr):
return ['ExpressionStatement', expr]
def local_decl_stmt(expr, decl):
return ['LocalDeclarationStatement', expr, decl]
def return_stmt(expr):
return ['ReturnStatement', expr]
def throw_stmt(expr):
return ['ThrowStatement', expr]
def jump_stmt(keyword):
return ['JumpStatement', keyword, None]
def loop_stmt(isdo, cond_expr, body):
type_ = 'DoStatement' if isdo else 'WhileStatement'
return [type_, None, cond_expr, body]
def try_stmt(tryb, pairs):
return ['TryStatement', None, tryb, pairs]
def if_stmt(cond_expr, scopes):
return ['IfStatement', None, cond_expr, scopes]
def switch_stmt(cond_expr, ksv_pairs):
return ['SwitchStatement', None, cond_expr, ksv_pairs]
# Create empty statement block (statements to be appended later)
# Note, the code below assumes this can be modified in place
def statement_block():
return ['BlockStatement', None, []]
# Add a statement to the end of a statement block
def _append(sb, stmt):
assert (sb[0] == 'BlockStatement')
if stmt is not None:
sb[2].append(stmt)
################################################################################
TYPE_DESCRIPTOR = {
'V': 'void',
'Z': 'boolean',
'B': 'byte',
'S': 'short',
'C': 'char',
'I': 'int',
'J': 'long',
'F': 'float',
'D': 'double',
}
def parse_descriptor(desc):
dim = 0
while desc and desc[0] == '[':
desc = desc[1:]
dim += 1
if desc in TYPE_DESCRIPTOR:
return typen('.' + TYPE_DESCRIPTOR[desc], dim)
if desc and desc[0] == 'L' and desc[-1] == ';':
return typen(desc[1:-1], dim)
# invalid descriptor (probably None)
return dummy(str(desc))
# Note: the literal_foo functions (and dummy) are also imported by decompile.py
def literal_string(s):
# We return a escaped string in ASCII encoding
return literal(s.encode('unicode_escape').decode("ascii"), ('java/lang/String', 0))
def literal_class(desc):
return literal(parse_descriptor(desc), ('java/lang/Class', 0))
def literal_bool(b):
return literal(str(b).lower(), ('.boolean', 0))
def literal_int(b):
return literal(str(b), ('.int', 0))
def literal_hex_int(b):
return literal(hex(b), ('.int', 0))
def literal_long(b):
return literal(str(b) + 'L', ('.long', 0))
def literal_float(f):
return literal(str(f) + 'f', ('.float', 0))
def literal_double(f):
return literal(str(f), ('.double', 0))
def literal_null():
return literal('null', ('.null', 0))
def visit_decl(var, init_expr=None):
t = parse_descriptor(var.get_type())
v = local('v{}'.format(var.name))
return local_decl_stmt(init_expr, var_decl(t, v))
def visit_arr_data(value):
data = value.get_data()
tab = []
elem_size = value.element_width
if elem_size == 4:
for i in range(0, value.size * 4, 4):
tab.append(struct.unpack('<i', data[i:i + 4])[0])
else: # FIXME: other cases
for i in range(value.size):
tab.append(data[i])
return array_initializer(list(map(literal_int, tab)))
def write_inplace_if_possible(lhs, rhs):
if isinstance(
rhs, instruction.BinaryExpression) and lhs == rhs.var_map[rhs.arg1]:
exp_rhs = rhs.var_map[rhs.arg2]
# post increment/decrement
if rhs.op in '+-' and isinstance(
exp_rhs, instruction.Constant) and exp_rhs.get_int_value() == 1:
return unary_postfix(visit_expr(lhs), rhs.op * 2)
# compound assignment
return assignment(visit_expr(lhs), visit_expr(exp_rhs), op=rhs.op)
return assignment(visit_expr(lhs), visit_expr(rhs))
def visit_expr(op):
if isinstance(op, instruction.ArrayLengthExpression):
expr = visit_expr(op.var_map[op.array])
return field_access([None, 'length', None], expr)
if isinstance(op, instruction.ArrayLoadExpression):
array_expr = visit_expr(op.var_map[op.array])
index_expr = visit_expr(op.var_map[op.idx])
return array_access(array_expr, index_expr)
if isinstance(op, instruction.ArrayStoreInstruction):
array_expr = visit_expr(op.var_map[op.array])
index_expr = visit_expr(op.var_map[op.index])
rhs = visit_expr(op.var_map[op.rhs])
return assignment(array_access(array_expr, index_expr), rhs)
if isinstance(op, instruction.AssignExpression):
lhs = op.var_map.get(op.lhs)
rhs = op.rhs
if lhs is None:
return visit_expr(rhs)
return write_inplace_if_possible(lhs, rhs)
if isinstance(op, instruction.BaseClass):
if op.clsdesc is None:
assert (op.cls == "super")
return local(op.cls)
return parse_descriptor(op.clsdesc)
if isinstance(op, instruction.BinaryExpression):
lhs = op.var_map.get(op.arg1)
rhs = op.var_map.get(op.arg2)
expr = binary_infix(op.op, visit_expr(lhs), visit_expr(rhs))
if not isinstance(op, instruction.BinaryCompExpression):
expr = parenthesis(expr)
return expr
if isinstance(op, instruction.CheckCastExpression):
lhs = op.var_map.get(op.arg)
return parenthesis(cast(parse_descriptor(op.clsdesc), visit_expr(lhs)))
if isinstance(op, instruction.ConditionalExpression):
lhs = op.var_map.get(op.arg1)
rhs = op.var_map.get(op.arg2)
return binary_infix(op.op, visit_expr(lhs), visit_expr(rhs))
if isinstance(op, instruction.ConditionalZExpression):
arg = op.var_map[op.arg]
if isinstance(arg, instruction.BinaryCompExpression):
arg.op = op.op
return visit_expr(arg)
expr = visit_expr(arg)
atype = arg.get_type()
if atype == 'Z':
if op.op == opcode_ins.Op.EQUAL:
expr = unary_prefix('!', expr)
elif atype in 'VBSCIJFD':
expr = binary_infix(op.op, expr, literal_int(0))
else:
expr = binary_infix(op.op, expr, literal_null())
return expr
if isinstance(op, instruction.Constant):
if op.type == 'Ljava/lang/String;':
return literal_string(op.cst)
elif op.type == 'Z':
return literal_bool(op.cst == 0)
elif op.type in 'ISCB':
return literal_int(op.cst2)
elif op.type in 'J':
return literal_long(op.cst2)
elif op.type in 'F':
return literal_float(op.cst)
elif op.type in 'D':
return literal_double(op.cst)
elif op.type == 'Ljava/lang/Class;':
return literal_class(op.clsdesc)
return dummy('??? Unexpected constant: ' + str(op.type))
if isinstance(op, instruction.FillArrayExpression):
array_expr = visit_expr(op.var_map[op.reg])
rhs = visit_arr_data(op.value)
return assignment(array_expr, rhs)
if isinstance(op, instruction.FilledArrayExpression):
tn = parse_descriptor(op.type)
params = [visit_expr(op.var_map[x]) for x in op.args]
return array_initializer(params, tn)
if isinstance(op, instruction.InstanceExpression):
triple = op.clsdesc[1:-1], op.name, op.ftype
expr = visit_expr(op.var_map[op.arg])
return field_access(triple, expr)
if isinstance(op, instruction.InstanceInstruction):
triple = op.clsdesc[1:-1], op.name, op.atype
lhs = field_access(triple, visit_expr(op.var_map[op.lhs]))
rhs = visit_expr(op.var_map[op.rhs])
return assignment(lhs, rhs)
if isinstance(op, instruction.InvokeInstruction):
base = op.var_map[op.base]
params = [op.var_map[arg] for arg in op.args]
params = list(map(visit_expr, params))
if op.name == '<init>':
if isinstance(base, instruction.ThisParam):
keyword = 'this' if base.type[1:-1] == op.triple[0] else 'super'
return method_invocation(op.triple, keyword, None, params)
elif isinstance(base, instruction.NewInstance):
return ['ClassInstanceCreation', op.triple, params,
parse_descriptor(base.type)]
else:
assert (isinstance(base, instruction.Variable))
# fallthrough to create dummy <init> call
return method_invocation(op.triple, op.name, visit_expr(base), params)
# for unmatched monitor instructions, just create dummy expressions
if isinstance(op, instruction.MonitorEnterExpression):
return dummy("monitor enter(", visit_expr(op.var_map[op.ref]), ")")
if isinstance(op, instruction.MonitorExitExpression):
return dummy("monitor exit(", visit_expr(op.var_map[op.ref]), ")")
if isinstance(op, instruction.MoveExpression):
lhs = op.var_map.get(op.lhs)
rhs = op.var_map.get(op.rhs)
return write_inplace_if_possible(lhs, rhs)
if isinstance(op, instruction.MoveResultExpression):
lhs = op.var_map.get(op.lhs)
rhs = op.var_map.get(op.rhs)
return assignment(visit_expr(lhs), visit_expr(rhs))
if isinstance(op, instruction.NewArrayExpression):
tn = parse_descriptor(op.type[1:])
expr = visit_expr(op.var_map[op.size])
return array_creation(tn, [expr], 1)
# create dummy expression for unmatched newinstance
if isinstance(op, instruction.NewInstance):
return dummy("new ", parse_descriptor(op.type))
if isinstance(op, instruction.Param):
if isinstance(op, instruction.ThisParam):
return local('this')
return local('p{}'.format(op.v))
if isinstance(op, instruction.StaticExpression):
triple = op.clsdesc[1:-1], op.name, op.ftype
return field_access(triple, parse_descriptor(op.clsdesc))
if isinstance(op, instruction.StaticInstruction):
triple = op.clsdesc[1:-1], op.name, op.ftype
lhs = field_access(triple, parse_descriptor(op.clsdesc))
rhs = visit_expr(op.var_map[op.rhs])
return assignment(lhs, rhs)
if isinstance(op, instruction.SwitchExpression):
return visit_expr(op.var_map[op.src])
if isinstance(op, instruction.UnaryExpression):
lhs = op.var_map.get(op.arg)
if isinstance(op, instruction.CastExpression):
expr = cast(parse_descriptor(op.clsdesc), visit_expr(lhs))
else:
expr = unary_prefix(op.op, visit_expr(lhs))
return parenthesis(expr)
if isinstance(op, instruction.Variable):
# assert(op.declared)
return local('v{}'.format(op.name))
return dummy('??? Unexpected op: ' + type(op).__name__)
def visit_ins(op, isCtor=False):
if isinstance(op, instruction.ReturnInstruction):
expr = None if op.arg is None else visit_expr(op.var_map[op.arg])
return return_stmt(expr)
elif isinstance(op, instruction.ThrowExpression):
return throw_stmt(visit_expr(op.var_map[op.ref]))
elif isinstance(op, instruction.NopExpression):
return None
# Local var decl statements
if isinstance(op, (instruction.AssignExpression, instruction.MoveExpression,
instruction.MoveResultExpression)):
lhs = op.var_map.get(op.lhs)
rhs = op.rhs if isinstance(
op, instruction.AssignExpression) else op.var_map.get(op.rhs)
if isinstance(lhs, instruction.Variable) and not lhs.declared:
lhs.declared = True
expr = visit_expr(rhs)
return visit_decl(lhs, expr)
# skip this() at top of constructors
if isCtor and isinstance(op, instruction.AssignExpression):
op2 = op.rhs
if op.lhs is None and isinstance(op2, instruction.InvokeInstruction):
if op2.name == '<init>' and len(op2.args) == 0:
if isinstance(op2.var_map[op2.base], instruction.ThisParam):
return None
# MoveExpression is skipped when lhs = rhs
if isinstance(op, instruction.MoveExpression):
if op.var_map.get(op.lhs) is op.var_map.get(op.rhs):
return None
return expression_stmt(visit_expr(op))
class JSONWriter:
def __init__(self, graph, method):
self.graph = graph
self.method = method
self.visited_nodes = set()
self.loop_follow = [None]
self.if_follow = [None]
self.switch_follow = [None]
self.latch_node = [None]
self.try_follow = [None]
self.next_case = None
self.need_break = True
self.constructor = False
self.context = []
# This class is created as a context manager so that it can be used like
# with self as foo:
# ...
# which pushes a statement block on to the context stack and assigns it to foo
# within the with block, all added instructions will be added to foo
def __enter__(self):
self.context.append(statement_block())
return self.context[-1]
def __exit__(self, *args):
self.context.pop()
return False
# Add a statement to the current context
def add(self, val):
_append(self.context[-1], val)
def visit_ins(self, op):
self.add(visit_ins(op, isCtor=self.constructor))
# Note: this is a mutating operation
def get_ast(self):
m = self.method
flags = m.access
if 'constructor' in flags:
flags.remove('constructor')
self.constructor = True
params = m.lparams[:]
if 'static' not in m.access:
params = params[1:]
# DAD doesn't create any params for abstract methods
if len(params) != len(m.params_type):
assert ('abstract' in flags or 'native' in flags)
assert (not params)
params = list(range(len(m.params_type)))
paramdecls = []
for ptype, name in zip(m.params_type, params):
t = parse_descriptor(ptype)
v = local('p{}'.format(name))
paramdecls.append(var_decl(t, v))
if self.graph is None:
body = None
else:
with self as body:
self.visit_node(self.graph.entry)
return {
'triple': m.triple,
'flags': flags,
'ret': parse_descriptor(m.type),
'params': paramdecls,
'comments': [],
'body': body,
}
def _visit_condition(self, cond):
if cond.isnot:
cond.cond1.neg()
left = parenthesis(self.get_cond(cond.cond1))
right = parenthesis(self.get_cond(cond.cond2))
op = '&&' if cond.isand else '||'
res = binary_infix(op, left, right)
return res
def get_cond(self, node):
if isinstance(node, basic_blocks.ShortCircuitBlock):
return self._visit_condition(node.cond)
elif isinstance(node, basic_blocks.LoopBlock):
return self.get_cond(node.cond)
else:
assert (type(node) == basic_blocks.CondBlock)
assert (len(node.ins) == 1)
return visit_expr(node.ins[-1])
def visit_node(self, node):
if node in (self.if_follow[-1], self.switch_follow[-1],
self.loop_follow[-1], self.latch_node[-1],
self.try_follow[-1]):
return
if not node.type.is_return and node in self.visited_nodes:
return
self.visited_nodes.add(node)
for var in node.var_to_declare:
if not var.declared:
self.add(visit_decl(var))
var.declared = True
node.visit(self)
def visit_loop_node(self, loop):
isDo = cond_expr = body = None
follow = loop.follow['loop']
if loop.looptype.is_pretest:
if loop.true is follow:
loop.neg()
loop.true, loop.false = loop.false, loop.true
isDo = False
cond_expr = self.get_cond(loop)
elif loop.looptype.is_posttest:
isDo = True
self.latch_node.append(loop.latch)
elif loop.looptype.is_endless:
isDo = False
cond_expr = literal_bool(True)
with self as body:
self.loop_follow.append(follow)
if loop.looptype.is_pretest:
self.visit_node(loop.true)
else:
self.visit_node(loop.cond)
self.loop_follow.pop()
if loop.looptype.is_pretest:
pass
elif loop.looptype.is_posttest:
self.latch_node.pop()
cond_expr = self.get_cond(loop.latch)
else:
self.visit_node(loop.latch)
assert (cond_expr is not None and isDo is not None)
self.add(loop_stmt(isDo, cond_expr, body))
if follow is not None:
self.visit_node(follow)
def visit_cond_node(self, cond):
cond_expr = None
scopes = []
follow = cond.follow['if']
if cond.false is cond.true:
self.add(expression_stmt(self.get_cond(cond)))
self.visit_node(cond.true)
return
if cond.false is self.loop_follow[-1]:
cond.neg()
cond.true, cond.false = cond.false, cond.true
if self.loop_follow[-1] in (cond.true, cond.false):
cond_expr = self.get_cond(cond)
with self as scope:
self.add(jump_stmt('break'))
scopes.append(scope)
with self as scope:
self.visit_node(cond.false)
scopes.append(scope)
self.add(if_stmt(cond_expr, scopes))
elif follow is not None:
if cond.true in (follow, self.next_case) or \
cond.num > cond.true.num:
# or cond.true.num > cond.false.num:
cond.neg()
cond.true, cond.false = cond.false, cond.true
self.if_follow.append(follow)
if cond.true: # in self.visited_nodes:
cond_expr = self.get_cond(cond)
with self as scope:
self.visit_node(cond.true)
scopes.append(scope)
is_else = not (follow in (cond.true, cond.false))
if is_else and cond.false not in self.visited_nodes:
with self as scope:
self.visit_node(cond.false)
scopes.append(scope)
self.if_follow.pop()
self.add(if_stmt(cond_expr, scopes))
self.visit_node(follow)
else:
cond_expr = self.get_cond(cond)
with self as scope:
self.visit_node(cond.true)
scopes.append(scope)
with self as scope:
self.visit_node(cond.false)
scopes.append(scope)
self.add(if_stmt(cond_expr, scopes))
def visit_switch_node(self, switch):
lins = switch.get_ins()
for ins in lins[:-1]:
self.visit_ins(ins)
switch_ins = switch.get_ins()[-1]
cond_expr = visit_expr(switch_ins)
ksv_pairs = []
follow = switch.follow['switch']
cases = switch.cases
self.switch_follow.append(follow)
default = switch.default
for i, node in enumerate(cases):
if node in self.visited_nodes:
continue
cur_ks = switch.node_to_case[node][:]
if i + 1 < len(cases):
self.next_case = cases[i + 1]
else:
self.next_case = None
if node is default:
cur_ks.append(None)
default = None
with self as body:
self.visit_node(node)
if self.need_break:
self.add(jump_stmt('break'))
else:
self.need_break = True
ksv_pairs.append((cur_ks, body))
if default not in (None, follow):
with self as body:
self.visit_node(default)
ksv_pairs.append(([None], body))
self.add(switch_stmt(cond_expr, ksv_pairs))
self.switch_follow.pop()
self.visit_node(follow)
def visit_statement_node(self, stmt):
sucs = self.graph.sucs(stmt)
for ins in stmt.get_ins():
self.visit_ins(ins)
if len(sucs) == 1:
if sucs[0] is self.loop_follow[-1]:
self.add(jump_stmt('break'))
elif sucs[0] is self.next_case:
self.need_break = False
else:
self.visit_node(sucs[0])
def visit_try_node(self, try_node):
with self as tryb:
self.try_follow.append(try_node.follow)
self.visit_node(try_node.try_start)
pairs = []
for catch_node in try_node.catch:
if catch_node.exception_ins:
ins = catch_node.exception_ins
assert (isinstance(ins, instruction.MoveExceptionExpression))
var = ins.var_map[ins.ref]
var.declared = True
ctype = var.get_type()
name = 'v{}'.format(var.name)
else:
ctype = catch_node.catch_type
name = '_'
catch_decl = var_decl(parse_descriptor(ctype), local(name))
with self as body:
self.visit_node(catch_node.catch_start)
pairs.append((catch_decl, body))
self.add(try_stmt(tryb, pairs))
self.visit_node(self.try_follow.pop())
def visit_return_node(self, ret):
self.need_break = False
for ins in ret.get_ins():
self.visit_ins(ins)
def visit_throw_node(self, throw):
for ins in throw.get_ins():
self.visit_ins(ins)
|
reox/androguard
|
androguard/decompiler/dad/dast.py
|
Python
|
apache-2.0
| 23,908
|
[
"VisIt"
] |
a8bd397bac60144caeb1ae967a5d38bf1146d4b105fc6e42dc749f25bf6d7ad2
|
#!/usr/bin/env python
# encoding: utf-8
"""Wrapper of ELLIPSE."""
import os
import gc
import sys
import copy
import warnings
import argparse
import subprocess
import numpy as np
from scipy.stats import sigmaclip
# Astropy related
from astropy.io import fits
from astropy.table import Table, Column
from kungpao import io
from kungpao import utils
# Matplotlib default settings
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
from matplotlib.ticker import MaxNLocator
from matplotlib.patches import Ellipse
plt.rc('text', usetex=True)
use_py3 = sys.version_info > (3, 0)
__all__ = ['correctPositionAngle', 'convIso2Ell', 'imageMaskNaN',
'defaultEllipse', 'easierEllipse', 'writeEllipPar',
'ellipRemoveIndef', 'readEllipseOut', 'ellipseGetGrowthCurve',
'ellipseGetR50', 'ellipseGetAvgCen', 'ellipseGetAvgGeometry',
'ellipseGetAvgGeometry', 'ellipseFixNegIntens',
'ellipseGetOuterBoundary', 'ellipsePlotSummary',
'saveEllipOut', 'galSBP',]
# ------------------------------------------------------------------------- #
# About the Colormaps
IMG_CMAP = plt.get_cmap('viridis')
IMG_CMAP.set_bad(color='black')
COM = '#' * 100
SEP = '-' * 100
WAR = '!' * 100
# ------------------------------------------------------------------------- #
def correctPositionAngle(ellipOut, paNorm=False, dPA=75.0):
"""
Correct the position angle for large jump.
Parameters:
"""
if paNorm:
posAng = ellipOut['pa_norm']
else:
posAng = ellipOut['pa']
for i in range(1, len(posAng)):
if (posAng[i] - posAng[i - 1]) >= dPA:
posAng[i] -= 180.0
elif (posAng[i] - posAng[i - 1] <= (-1.0 * dPA)):
posAng[i] += 180.0
if paNorm:
ellipOut['pa_norm'] = posAng
else:
ellipOut['pa'] = posAng
return ellipOut
def convIso2Ell(ellTab, xpad=0.0, ypad=0.0):
"""
Convert ellipse results into ellipses for visualization.
Parameters:
"""
x = ellTab['x0'] - xpad
y = ellTab['y0'] - ypad
pa = ellTab['pa']
a = ellTab['sma'] * 2.0
b = ellTab['sma'] * 2.0 * (1.0 - ellTab['ell'])
ells = [Ellipse(xy=np.array([x[i], y[i]]),
width=np.array(b[i]),
height=np.array(a[i]),
angle=np.array(pa[i]))
for i in range(x.shape[0])]
return ells
def imageMaskNaN(inputImage, inputMask, verbose=False):
"""
Assigning NaN to mask region.
Under Pyraf, it seems that .pl file is not working. This is a work around.
"""
newImage = inputImage.replace('.fits', '_nan.fits')
if verbose:
print(" ## %s ---> %s " % (inputImage, newImage))
if os.path.islink(inputImage):
imgOri = os.readlink(inputImage)
else:
imgOri = inputImage
if not os.path.isfile(imgOri):
raise Exception("Can not find the FITS image: %s" % imgOri)
else:
imgArr = fits.open(imgOri)[0].data
imgHead = fits.open(imgOri)[0].header
if os.path.islink(inputMask):
mskOri = os.readlink(inputMask)
else:
mskOri = inputMask
if not os.path.isfile(mskOri):
raise Exception("Can not find the FITS mask: %s" % mskOri)
else:
mskArr = fits.open(mskOri)[0].data
imgArr[mskArr > 0] = np.nan
newHdu = fits.PrimaryHDU(imgArr, header=imgHead)
hduList = fits.HDUList([newHdu])
hduList.writeto(newImage, clobber=True)
del imgArr
del mskArr
return newImage
def defaultEllipse(x0, y0, maxsma, ellip0=0.05, pa0=0.0, sma0=6.0, minsma=0.0,
linear=False, step=0.08, recenter=True, conver=0.05,
hcenter=True, hellip=True, hpa=True, minit=10, maxit=250,
olthresh=0.75, mag0=27.0, integrmode='median', usclip=2.5,
lsclip=3.0, nclip=2, fflag=0.5, harmonics=False):
"""
The default settings for Ellipse.
Parameters:
"""
ellipConfig = np.recarray((1,), dtype=[('x0', float), ('y0', float),
('ellip0', float), ('pa0', float),
('sma0', float), ('minsma', float),
('maxsma', float), ('linear', bool),
('step', float), ('recenter', bool),
('conver', int), ('hcenter', bool),
('hellip', bool), ('hpa', bool),
('minit', int), ('maxit', int),
('olthresh', float),
('mag0', float),
('integrmode', 'a10'),
('usclip', float),
('lsclip', float), ('nclip', int),
('fflag', float),
('harmonics', bool)])
# Default setting for Ellipse Run
ellipConfig['x0'] = x0
ellipConfig['y0'] = y0
ellipConfig['ellip0'] = ellip0
ellipConfig['pa0'] = pa0
ellipConfig['sma0'] = sma0
ellipConfig['minsma'] = minsma
ellipConfig['maxsma'] = maxsma
ellipConfig['linear'] = linear
ellipConfig['step'] = step
ellipConfig['recenter'] = recenter
ellipConfig['conver'] = conver
ellipConfig['hcenter'] = hcenter
ellipConfig['hellip'] = hellip
ellipConfig['hpa'] = hpa
ellipConfig['minit'] = minit
ellipConfig['maxit'] = maxit
ellipConfig['olthresh'] = olthresh
ellipConfig['mag0'] = mag0
ellipConfig['integrmode'] = integrmode
ellipConfig['usclip'] = usclip
ellipConfig['lsclip'] = lsclip
ellipConfig['nclip'] = nclip
ellipConfig['fflag'] = fflag
ellipConfig['harmonics'] = harmonics
return ellipConfig
def easierEllipse(ellipConfig, degree=3, verbose=True,
dRad=0.90, dStep=0.008, dFlag=0.03):
"""Make the Ellipse run easier."""
if verbose:
print("### Maxsma %6.1f --> %6.1f" % (ellipConfig['maxsma'],
ellipConfig['maxsma'] *
dRad))
ellipConfig['maxsma'] *= dRad
if degree > 3:
if verbose:
print("### Step %6.2f --> %6.2f" % (ellipConfig['step'],
ellipConfig['step'] +
dStep))
ellipConfig['step'] += dStep
if degree > 4:
if verbose:
print("### Flag %6.2f --> %6.2f" % (ellipConfig['fflag'],
ellipConfig['fflag'] +
dFlag))
ellipConfig['fflag'] += dFlag
return ellipConfig
def writeEllipPar(cfg, image, outBin, outPar, inEllip=None):
"""Write a parameter file for x_isophote.e."""
if os.path.isfile(outPar):
os.remove(outPar)
f = open(outPar, 'w')
# ----------------------------------------------------------------- #
f.write('\n')
# ----------------------------------------------------------------- #
"""Ellipse parameters"""
f.write('ellipse.input = "%s" \n' % image.strip())
f.write('ellipse.output = "%s" \n' % outBin.strip())
f.write('ellipse.dqf = ".c1h" \n')
f.write('ellipse.interactive = no \n')
f.write('ellipse.device = "red" \n')
f.write('ellipse.icommands = "" \n')
f.write('ellipse.gcommands = "" \n')
f.write('ellipse.masksz = 5 \n')
f.write('ellipse.region = no \n')
f.write('ellipse.memory = yes \n')
f.write('ellipse.verbose = no \n')
f.write('ellipse.mode = "al" \n')
# Used for force photometry mode
if inEllip is None:
f.write('ellipse.inellip = "" \n')
else:
f.write('ellipse.inellip = "%s" \n' % inEllip.strip())
# ----------------------------------------------------------------- #
"""Sampling parameters"""
intMode = cfg['integrmode'][0]
if use_py3:
intMode = intMode.decode('UTF-8')
intMode = intMode.lower().strip()
if intMode == 'median':
f.write('samplepar.integrmode = "median" \n')
elif intMode == 'mean':
f.write('samplepar.integrmode = "mean" \n')
elif intMode == 'bi-linear':
f.write('samplepar.integrmode = "bi-linear" \n')
else:
raise Exception(
"### Only 'mean', 'median', and 'bi-linear' are available !")
f.write('samplepar.usclip = %5.2f \n' % cfg['usclip'])
f.write('samplepar.lsclip = %5.2f \n' % cfg['lsclip'])
f.write('samplepar.nclip = %2d \n' % cfg['nclip'])
f.write('samplepar.fflag = %6.4f \n' % cfg['fflag'])
f.write('samplepar.sdevice = "none" \n')
f.write('samplepar.tsample = "none" \n')
f.write('samplepar.absangle = yes \n')
if cfg['harmonics']:
f.write('samplepar.harmonics = "1 2 3 4" \n')
else:
f.write('samplepar.harmonics = "none" \n')
f.write('samplepar.mode = "al" \n')
# ----------------------------------------------------------------- #
"""Control parameters"""
f.write('controlpar.conver = %5.2f \n' % cfg['conver'])
f.write('controlpar.minit = %3d \n' % cfg['minit'])
f.write('controlpar.maxit = %3d \n' % cfg['maxit'])
if cfg['hcenter']:
f.write('controlpar.hcenter = yes \n')
else:
f.write('controlpar.hcenter = no \n')
if cfg['hellip']:
f.write('controlpar.hellip = yes \n')
else:
f.write('controlpar.hellip = no \n')
if cfg['hpa']:
f.write('controlpar.hpa = yes \n')
else:
f.write('controlpar.hpa = no \n')
f.write('controlpar.wander = INDEF \n')
f.write('controlpar.maxgerr = 0.5 \n')
f.write('controlpar.olthresh = %4.2f \n' % cfg['olthresh'])
f.write('controlpar.soft = yes \n')
f.write('controlpar.mode = "al" \n')
# ----------------------------------------------------------------- #
"""Geometry parameters"""
if (cfg['x0'] > 0) and (cfg['y0'] > 0):
f.write('geompar.x0 = %8.2f \n' % cfg['x0'])
f.write('geompar.y0 = %8.2f \n' % cfg['y0'])
else:
raise Exception("Make sure that the input X0 and Y0 are meaningful !",
cfg['x0'], cfg['y0'])
if (cfg['ellip0'] >= 0.0) and (cfg['ellip0'] < 1.0):
f.write('geompar.ellip0 = %5.2f \n' % cfg['ellip0'])
else:
raise Exception("Make sure that the input Ellipticity is meaningful !",
cfg['ellip0'])
if (cfg['pa0'] >= -90.0) and (cfg['pa0'] <= 90.0):
f.write('geompar.pa0 = %5.2f \n' % cfg['pa0'])
else:
raise Exception("Make sure that the input Position Angle is meaningful !",
cfg['pa0'])
f.write('geompar.sma0 = %8.2f \n' % cfg['sma0'])
f.write('geompar.minsma = %8.1f \n' % cfg['minsma'])
f.write('geompar.maxsma = %8.1f \n' % cfg['maxsma'])
f.write('geompar.step = %5.2f \n' % cfg['step'])
if cfg['linear']:
f.write('geompar.linear = yes \n')
else:
f.write('geompar.linear = no \n')
if cfg['recenter']:
f.write('geompar.recenter = yes \n')
else:
f.write('geompar.recenter = no \n')
f.write('geompar.maxrit = INDEF \n')
f.write('geompar.xylearn = yes \n')
f.write('geompar.physical = yes \n')
f.write('geompar.mode = "al" \n')
# ----------------------------------------------------------------- #
"""Magnitude parameters"""
f.write('magpar.mag0 = %6.2f \n' % cfg['mag0'])
f.write('magpar.refer = 1. \n')
f.write('magpar.zerolevel = 0. \n')
f.write('magpar.mode = "al" \n')
# ----------------------------------------------------------------- #
f.close()
if os.path.isfile(outPar):
return True
else:
return False
def ellipRemoveIndef(outTabName, replace='NaN'):
"""
Remove the Indef values from the Ellipse output.
Parameters:
"""
if os.path.exists(outTabName):
subprocess.call(
['sed', '-i_back', 's/INDEF/' + replace + '/g', outTabName])
if os.path.isfile(outTabName.replace('.tab', '_back.tab')):
os.remove(outTabName.replace('.tab', '_back.tab'))
else:
raise Exception('Can not find the input catalog!')
return outTabName
def readEllipseOut(outTabName, pix=1.0, zp=27.0, exptime=1.0, bkg=0.0,
harmonics=False, galR=None, minSma=2.0, dPA=75.0,
rFactor=0.2, fRatio1=0.20, fRatio2=0.60, useTflux=False):
"""
Read the Ellipse output into a structure.
Parameters:
"""
# Replace the 'INDEF' in the table
ellipRemoveIndef(outTabName)
ellipseOut = Table.read(outTabName, format='ascii.no_header')
# Rename all the columns
ellipseOut.rename_column('col1', 'sma')
ellipseOut.rename_column('col2', 'intens')
ellipseOut.rename_column('col3', 'int_err')
ellipseOut.rename_column('col4', 'pix_var')
ellipseOut.rename_column('col5', 'rms')
ellipseOut.rename_column('col6', 'ell')
ellipseOut.rename_column('col7', 'ell_err')
ellipseOut.rename_column('col8', 'pa')
ellipseOut.rename_column('col9', 'pa_err')
ellipseOut.rename_column('col10', 'x0')
ellipseOut.rename_column('col11', 'x0_err')
ellipseOut.rename_column('col12', 'y0')
ellipseOut.rename_column('col13', 'y0_err')
ellipseOut.rename_column('col14', 'grad')
ellipseOut.rename_column('col15', 'grad_err')
ellipseOut.rename_column('col16', 'grad_r_err')
ellipseOut.rename_column('col17', 'rsma')
ellipseOut.rename_column('col18', 'mag')
ellipseOut.rename_column('col19', 'mag_lerr')
ellipseOut.rename_column('col20', 'mag_uerr')
ellipseOut.rename_column('col21', 'tflux_e')
ellipseOut.rename_column('col22', 'tflux_c')
ellipseOut.rename_column('col23', 'tmag_e')
ellipseOut.rename_column('col24', 'tmag_c')
ellipseOut.rename_column('col25', 'npix_e')
ellipseOut.rename_column('col26', 'npix_c')
ellipseOut.rename_column('col27', 'a3')
ellipseOut.rename_column('col28', 'a3_err')
ellipseOut.rename_column('col29', 'b3')
ellipseOut.rename_column('col30', 'b3_err')
ellipseOut.rename_column('col31', 'a4')
ellipseOut.rename_column('col32', 'a4_err')
ellipseOut.rename_column('col33', 'b4')
ellipseOut.rename_column('col34', 'b4_err')
ellipseOut.rename_column('col35', 'ndata')
ellipseOut.rename_column('col36', 'nflag')
ellipseOut.rename_column('col37', 'niter')
ellipseOut.rename_column('col38', 'stop')
ellipseOut.rename_column('col39', 'a_big')
ellipseOut.rename_column('col40', 'sarea')
if harmonics:
ellipseOut.rename_column('col41', 'A1')
ellipseOut.rename_column('col42', 'A1_err')
ellipseOut.rename_column('col43', 'B1')
ellipseOut.rename_column('col44', 'B1_err')
ellipseOut.rename_column('col45', 'A2')
ellipseOut.rename_column('col46', 'A2_err')
ellipseOut.rename_column('col47', 'B2')
ellipseOut.rename_column('col48', 'B2_err')
ellipseOut.rename_column('col49', 'A3')
ellipseOut.rename_column('col50', 'A3_err')
ellipseOut.rename_column('col51', 'B3')
ellipseOut.rename_column('col52', 'B3_err')
ellipseOut.rename_column('col53', 'A4')
ellipseOut.rename_column('col54', 'A4_err')
ellipseOut.rename_column('col55', 'B4')
ellipseOut.rename_column('col56', 'B4_err')
# Normalize the PA
ellipseOut = correctPositionAngle(ellipseOut, paNorm=False, dPA=dPA)
ellipseOut.add_column(
Column(name='pa_norm', data=np.array(
[utils.normalize_angle(pa, lower=-90, upper=90.0, b=True)
for pa in ellipseOut['pa']])))
# Apply a photometric zeropoint to the magnitude
ellipseOut['mag'] += zp
ellipseOut['tmag_e'] += zp
ellipseOut['tmag_c'] += zp
# Convert the intensity into surface brightness
pixArea = (pix ** 2.0)
# Surface brightness
# Fixed the negative intensity
intensOri = (ellipseOut['intens'])
intensSub = (ellipseOut['intens'] - bkg)
# intensOri[intensOri <= 0] = np.nan
# intensSub[intensSub <= 0] = np.nan
# Surface brightness
sbpOri = zp - 2.5 * np.log10(intensOri / (pixArea * exptime))
sbpSub = zp - 2.5 * np.log10(intensSub / (pixArea * exptime))
ellipseOut.add_column(Column(name='sbp_ori', data=sbpOri))
ellipseOut.add_column(Column(name='sbp_sub', data=sbpSub))
ellipseOut.add_column(Column(name='sbp', data=sbpSub))
ellipseOut.add_column(Column(name='intens_sub', data=intensSub))
# Also save the background level
ellipseOut.add_column(
Column(name='intens_bkg', data=(ellipseOut['sma'] * 0.0 + bkg)))
# Not so accurate estimates of surface brightness error
sbp_low = zp - 2.5 * np.log10((intensSub + ellipseOut['int_err']) /
(pixArea * exptime))
sbp_err = (sbpSub - sbp_low)
sbp_upp = (sbpSub + sbp_err)
ellipseOut.add_column(Column(name='sbp_err', data=sbp_err))
ellipseOut.add_column(Column(name='sbp_low', data=sbp_low))
ellipseOut.add_column(Column(name='sbp_upp', data=sbp_upp))
# Convert the unit of radius into arcsecs
ellipseOut.add_column(Column(name='sma_asec',
data=(ellipseOut['sma'] * pix)))
ellipseOut.add_column(Column(name='rsma_asec',
data=(ellipseOut['sma'] * pix) ** 0.25))
# Curve of Growth
cogOri, maxSma, maxFlux = ellipseGetGrowthCurve(ellipseOut,
bkgCor=False,
useTflux=useTflux)
ellipseOut.add_column(Column(name='growth_ori', data=cogOri))
cogSub, maxSma, maxFlux = ellipseGetGrowthCurve(ellipseOut,
bkgCor=True)
ellipseOut.add_column(Column(name='growth_sub', data=cogSub))
# Get the average X0, Y0, Q, and PA
if galR is None:
galR = np.max(ellipseOut['sma']) * rFactor
avgX, avgY = ellipseGetAvgCen(ellipseOut, galR, minSma=minSma)
"""
Try to select a region around R50 to get the average geometry
"""
radTemp = ellipseOut['sma'][(cogSub >= (maxFlux * fRatio1)) &
(cogSub <= (maxFlux * fRatio2))]
avgQ, avgPA = ellipseGetAvgGeometry(ellipseOut, np.nanmax(radTemp),
minSma=np.nanmin(radTemp))
# Save as new column
ellipseOut.add_column(Column(name='avg_x0',
data=(ellipseOut['sma'] * 0.0 + avgX)))
ellipseOut.add_column(Column(name='avg_y0',
data=(ellipseOut['sma'] * 0.0 + avgY)))
ellipseOut.add_column(Column(name='avg_q',
data=(ellipseOut['sma'] * 0.0 + avgQ)))
ellipseOut.add_column(Column(name='avg_pa',
data=(ellipseOut['sma'] * 0.0 + avgPA)))
return ellipseOut
def ellipseGetGrowthCurve(ellipOut, bkgCor=False, intensArr=None,
useTflux=False):
"""
Extract growth curve from Ellipse output.
Parameters:
"""
if not useTflux:
# The area in unit of pixels covered by an elliptical isophote
ellArea = np.pi * ((ellipOut['sma'] ** 2.0) * (1.0 - ellipOut['ell']))
# The area in unit covered by the "ring"
# isoArea = np.append(ellArea[0], [ellArea[1:] - ellArea[:-1]])
# The total flux inside the "ring"
if intensArr is None:
if bkgCor:
intensUse = ellipOut['intens_sub']
else:
intensUse = ellipOut['intens']
else:
intensUse = intensArr
try:
isoFlux = np.append(
ellArea[0], [ellArea[1:] - ellArea[:-1]]) * intensUse
except Exception:
isoFlux = np.append(
ellArea[0], [ellArea[1:] - ellArea[:-1]]) * ellipOut['intens']
# Get the growth Curve
if use_py3:
curveOfGrowth = np.asarray(
list(map(lambda x: np.nansum(isoFlux[0:x + 1]), range(isoFlux.shape[0]))))
else:
curveOfGrowth = np.asarray(
map(lambda x: np.nansum(isoFlux[0:x + 1]), range(isoFlux.shape[0])))
else:
curveOfGrowth = ellipOut['tflux_e']
indexMax = np.argmax(curveOfGrowth)
maxIsoSma = ellipOut['sma'][indexMax]
maxIsoFlux = curveOfGrowth[indexMax]
return curveOfGrowth, maxIsoSma, maxIsoFlux
def ellipseGetR50(ellipseRsma, isoGrowthCurve, simple=True):
"""Estimate R50 fom Ellipse output."""
if len(ellipseRsma) != len(isoGrowthCurve):
raise Exception("The x and y should have the same size!",
(len(ellipseRsma), len(isoGrowthCurve)))
else:
if simple:
isoRsma50 = ellipseRsma[np.nanargmin(
np.abs(isoGrowthCurve - 50.0))]
else:
isoRsma50 = (np.interp([50.0], isoGrowthCurve, ellipseRsma))[0]
return isoRsma50
def ellipseGetAvgCen(ellipseOut, outRad, minSma=2.0):
"""Get the Average X0/Y0."""
try:
xUse = ellipseOut['x0'][(ellipseOut['sma'] <= outRad) &
(np.isfinite(ellipseOut['x0_err'])) &
(np.isfinite(ellipseOut['y0_err']))]
yUse = ellipseOut['y0'][(ellipseOut['sma'] <= outRad) &
(np.isfinite(ellipseOut['x0_err'])) &
(np.isfinite(ellipseOut['y0_err']))]
iUse = ellipseOut['intens'][(ellipseOut['sma'] <= outRad) &
(np.isfinite(ellipseOut['x0_err'])) &
(np.isfinite(ellipseOut['y0_err']))]
except Exception:
xUse = ellipseOut['x0'][(ellipseOut['sma'] <= outRad)]
yUse = ellipseOut['y0'][(ellipseOut['sma'] <= outRad)]
iUse = ellipseOut['intens'][(ellipseOut['sma'] <= outRad)]
avgCenX = utils.numpy_weighted_mean(xUse, weights=iUse)
avgCenY = utils.numpy_weighted_mean(yUse, weights=iUse)
return avgCenX, avgCenY
def ellipseGetAvgGeometry(ellipseOut, outRad, minSma=2.0):
"""Get the Average Q and PA."""
tfluxE = ellipseOut['tflux_e']
ringFlux = np.append(tfluxE[0], [tfluxE[1:] - tfluxE[:-1]])
try:
eUse = ellipseOut['ell'][(ellipseOut['sma'] <= outRad) &
(ellipseOut['sma'] >= minSma) &
(np.isfinite(ellipseOut['ell_err'])) &
(np.isfinite(ellipseOut['pa_err']))]
pUse = ellipseOut['pa_norm'][(ellipseOut['sma'] <= outRad) &
(ellipseOut['sma'] >= minSma) &
(np.isfinite(ellipseOut['ell_err'])) &
(np.isfinite(ellipseOut['pa_err']))]
fUse = ringFlux[(ellipseOut['sma'] <= outRad) &
(ellipseOut['sma'] >= minSma) &
(np.isfinite(ellipseOut['ell_err'])) &
(np.isfinite(ellipseOut['pa_err']))]
except Exception:
try:
eUse = ellipseOut['ell'][(ellipseOut['sma'] <= outRad) &
(ellipseOut['sma'] >= 0.5) &
(np.isfinite(ellipseOut['ell_err'])) &
(np.isfinite(ellipseOut['pa_err']))]
pUse = ellipseOut['pa_norm'][(ellipseOut['sma'] <= outRad) &
(ellipseOut['sma'] >= 0.5) &
(np.isfinite(ellipseOut['ell_err'])) &
(np.isfinite(ellipseOut['pa_err']))]
fUse = ringFlux[(ellipseOut['sma'] <= outRad) &
(ellipseOut['sma'] >= 0.5) &
(np.isfinite(ellipseOut['ell_err'])) &
(np.isfinite(ellipseOut['pa_err']))]
except Exception:
eUse = ellipseOut['ell'][(ellipseOut['sma'] <= outRad) &
(ellipseOut['sma'] >= 0.5)]
pUse = ellipseOut['pa_norm'][(ellipseOut['sma'] <= outRad) &
(ellipseOut['sma'] >= 0.5)]
fUse = ringFlux[(ellipseOut['sma'] <= outRad) &
(ellipseOut['sma'] >= 0.5)]
avgQ = 1.0 - utils.numpy_weighted_mean(eUse, weights=fUse)
avgPA = utils.numpy_weighted_mean(pUse, weights=fUse)
return avgQ, avgPA
def ellipseFixNegIntens(ellipseOut):
"""Replace the negative value from the intensity."""
ellipseNew = copy.deepcopy(ellipseOut)
ellipseNew['intens'][ellipseNew['intens'] < 0.0] = np.nan
return ellipseNew
def ellipseGetOuterBoundary(ellipseOut, ratio=1.2, margin=0.2, polyOrder=12,
ln
median=False, threshold=None):
"""Get the outer boundary of the output 1-D profile."""
try:
medianErr = np.nanmean(ellipseOut['int_err'])
if threshold is not None:
thre = threshold
else:
thre = medianErr
negRad = ellipseOut['rsma'][np.where(ellipseOut['intens'] <= thre)]
if (negRad is np.nan) or (len(negRad) < 3):
try:
uppIntens = np.nanmax(ellipseOut['intens']) * 0.01
indexUse = np.where(ellipseOut['intens'] <= uppIntens)
except Exception:
uppIntens = np.nanmax(ellipseOut['intens']) * 0.03
indexUse = np.where(ellipseOut['intens'] <= uppIntens)
radUse = ellipseOut['rsma'][indexUse]
# Try fit a polynomial first
try:
intensFit = utils.polyFit(ellipseOut['rsma'][indexUse],
ellipseOut['intens'][indexUse],
order=polyOrder)
negRad = radUse[np.where(intensFit <= medianErr)]
except Exception:
negRad = radUse[-5:-1] if len(radUse) >= 5 else radUse
print("!!! DANGEROUS : Outer boundary is not safe !!!")
if median:
outRsma = np.nanmedian(negRad)
else:
outRsma = np.nanmean(negRad)
return (outRsma ** 4.0) * ratio
except Exception as err:
print(err)
return None
def ellipsePlotSummary(ellipOut, image, maxRad=None, mask=None, radMode='rsma',
outPng='ellipse_summary.png', zp=27.0, threshold=None,
showZoom=False, useZscale=True, pngSize=16,
verbose=False, outRatio=1.2, oriName=None,
imgType='_imgsub', dpi=80):
"""
Make a summary plot of the ellipse run.
Parameters:
"""
""" Left side: SBP """
reg1 = [0.08, 0.07, 0.45, 0.33]
reg2 = [0.08, 0.40, 0.45, 0.15]
reg3 = [0.08, 0.55, 0.45, 0.15]
reg4 = [0.08, 0.70, 0.45, 0.15]
reg5 = [0.08, 0.85, 0.45, 0.14]
""" Right side: Curve of growth & IsoMap """
reg6 = [0.60, 0.07, 0.38, 0.29]
reg7 = [0.60, 0.36, 0.38, 0.15]
reg8 = [0.60, 0.55, 0.38, 0.39]
fig = plt.figure(figsize=(pngSize, pngSize))
""" Left """
ax1 = fig.add_axes(reg1)
ax2 = fig.add_axes(reg2)
ax3 = fig.add_axes(reg3)
ax4 = fig.add_axes(reg4)
ax5 = fig.add_axes(reg5)
""" Right """
ax6 = fig.add_axes(reg6)
ax7 = fig.add_axes(reg7)
ax8 = fig.add_axes(reg8)
""" Image """
img = fits.open(image)[0].data
imgX, imgY = img.shape
imgMsk = copy.deepcopy(img)
if useZscale:
try:
imin, imax = utils.zscale(imgMsk, contrast=0.25, samples=500)
except Exception:
imin, imax = np.nanmin(imgMsk), np.nanmax(imgMsk)
else:
imin = np.percentile(np.ravel(imgMsk), 0.01)
imax = np.percentile(np.ravel(imgMsk), 0.95)
if mask is not None:
msk = fits.open(mask)[0].data
imgMsk[msk > 0] = np.nan
""" Find the proper outer boundary """
sma = ellipOut['sma']
radOuter = ellipseGetOuterBoundary(ellipOut,
ratio=outRatio,
threshold=threshold)
if (not np.isfinite(radOuter)) or (radOuter is None):
if verbose:
print(WAR)
print(" XX radOuter is NaN, use 0.80 * max(SMA) instead !")
radOuter = np.nanmax(sma) * 0.80
indexUse = np.where(ellipOut['sma'] <= (radOuter * 1.3))
if verbose:
print(SEP)
print("### OutRadius : ", radOuter)
""" Get growth curve """
curveOri = ellipOut['growth_ori']
curveSub = ellipOut['growth_sub']
curveCor = ellipOut['growth_cor']
growthCurveOri = -2.5 * np.log10(curveOri) + zp
growthCurveSub = -2.5 * np.log10(curveSub) + zp
growthCurveCor = -2.5 * np.log10(curveCor) + zp
maxIsoFluxO = np.nanmax(ellipOut['growth_ori'][indexUse])
magFluxOri100 = -2.5 * np.log10(maxIsoFluxO) + zp
if verbose:
print("### MagTot OLD : ", magFluxOri100)
maxIsoFluxS = np.nanmax(ellipOut['growth_sub'][indexUse])
magFluxSub100 = -2.5 * np.log10(maxIsoFluxS) + zp
if verbose:
print("### MagTot SUB : ", magFluxSub100)
maxIsoFluxC = np.nanmax(ellipOut['growth_cor'][indexUse])
magFlux50 = -2.5 * np.log10(maxIsoFluxC * 0.50) + zp
magFlux100 = -2.5 * np.log10(maxIsoFluxC) + zp
if verbose:
print("### MagTot NEW : ", magFlux100)
indMaxFlux = np.nanargmax(ellipOut['growth_cor'][indexUse])
maxIsoSbp = ellipOut['sbp_sub'][indMaxFlux]
if verbose:
print("### MaxIsoSbp : ", maxIsoSbp)
""" Type of Radius """
if radMode is 'rsma':
rad = ellipOut['rsma']
radStr = '$R^{1/4}\ (\mathrm{pix}^{1/4})$'
minRad = 0.41 if 0.41 >= np.nanmin(
ellipOut['rsma']) else np.nanmin(ellipOut['rsma'])
imgR50 = (imgX / 2.0) ** 0.25
radOut = (radOuter * 1.2) ** 0.25
radOut = radOut if radOut <= imgR50 else imgR50
if maxRad is None:
maxRad = np.nanmax(rad)
maxSma = np.nanmax(ellipOut['sma'])
else:
maxSma = maxRad
maxRad = maxRad ** 0.25
elif radMode is 'sma':
rad = ellipOut['sma']
radStr = '$R\ (\mathrm{pix})$'
minRad = 0.05 if 0.05 >= np.nanmin(
ellipOut['sma']) else np.nanmin(ellipOut['sma'])
imgR50 = (imgX / 2.0)
radOut = (radOuter * 1.2)
radOut = radOut if radOut <= imgR50 else imgR50
if maxRad is None:
maxRad = maxSma = np.nanmax(rad)
else:
maxSma = maxRad
elif radMode is 'log':
rad = ellipOut['sma']
rad = np.log10(rad)
radStr = '$\log\ (R/\mathrm{pixel})$'
minRad = 0.01 if 0.01 >= np.log10(
np.nanmin(ellipOut['sma'])) else np.log10(
np.nanmin(ellipOut['sma']))
imgR50 = np.log10(imgX / 2.0)
radOut = np.log10(radOuter * 1.2)
radOut = radOut if radOut <= imgR50 else imgR50
if maxRad is None:
maxRad = np.nanmax(rad)
maxSma = np.nanmax(ellipOut['sma'])
else:
maxSma = maxRad
maxRad = np.log10(maxRad)
else:
print(WAR)
raise Exception('### Wrong type of Radius: sma, rsma, log')
""" ax1 SBP """
ax1.minorticks_on()
ax1.invert_yaxis()
ax1.tick_params(axis='both', which='major', labelsize=22, pad=8)
ax1.set_xlabel(radStr, fontsize=30)
ax1.set_ylabel('${\mu}\ (\mathrm{mag}/\mathrm{arcsec}^2)$', fontsize=28)
sbp_ori = ellipOut['sbp_ori']
sbp_cor = ellipOut['sbp_cor']
sbp_err = ellipOut['sbp_err']
ax1.fill_between(rad[indexUse],
(sbp_cor[indexUse] - sbp_err[indexUse]),
(sbp_cor[indexUse] + sbp_err[indexUse]),
facecolor='r', alpha=0.3)
ax1.plot(rad[indexUse], sbp_ori[indexUse],
'--', color='k', linewidth=3.0)
"""
ax1.plot(rad[indexUse], sbp_sub[indexUse], '-',
color='b', linewidth=3.5)
"""
ax1.plot(rad[indexUse], sbp_cor[indexUse],
'-', color='r', linewidth=3.0)
sbpBuffer = 0.75
minSbp = np.nanmin(ellipOut['sbp_low'][indexUse]) - sbpBuffer
maxSbp = np.nanmax(ellipOut['sbp_upp'][indexUse]) + sbpBuffer
"""
maxSbp = maxIsoSbp + sbpBuffer
"""
maxSbp = maxSbp if maxSbp >= 29.0 else 28.9
maxSbp = maxSbp if maxSbp <= 32.0 else 31.9
ax1.set_xlim(minRad, radOut)
ax1.set_ylim(maxSbp, minSbp)
ax1.text(0.49, 0.86,
'$\mathrm{mag}_{\mathrm{tot,cor}}=%5.2f$' % magFlux100,
fontsize=30, transform=ax1.transAxes)
""" ax2 Ellipticity """
ax2.minorticks_on()
ax2.tick_params(axis='both', which='major', labelsize=20, pad=8)
ax2.yaxis.set_major_locator(MaxNLocator(prune='lower'))
ax2.yaxis.set_major_locator(MaxNLocator(prune='upper'))
ax2.locator_params(axis='y', tight=True, nbins=4)
ax2.set_ylabel('$e$', fontsize=30)
if verbose:
print("### AvgEll", (1.0 - ellipOut['avg_q'][0]))
ax2.axhline((1.0 - ellipOut['avg_q'][0]),
color='k', linestyle='--', linewidth=2)
ax2.fill_between(rad[indexUse],
ellipOut['ell'][indexUse] + ellipOut['ell_err'][indexUse],
ellipOut['ell'][indexUse] - ellipOut['ell_err'][indexUse],
facecolor='r', alpha=0.25)
ax2.plot(rad[indexUse], ellipOut['ell'][
indexUse], '-', color='r', linewidth=2.0)
ax2.xaxis.set_major_formatter(NullFormatter())
ax2.set_xlim(minRad, radOut)
ellBuffer = 0.02
minEll = np.nanmin(ellipOut['ell'][indexUse] -
ellipOut['ell_err'][indexUse])
maxEll = np.nanmax(ellipOut['ell'][indexUse] +
ellipOut['ell_err'][indexUse])
ax2.set_ylim(minEll - ellBuffer, maxEll + ellBuffer)
""" ax3 PA """
ax3.minorticks_on()
ax3.tick_params(axis='both', which='major', labelsize=20, pad=8)
ax3.yaxis.set_major_locator(MaxNLocator(prune='lower'))
ax3.yaxis.set_major_locator(MaxNLocator(prune='upper'))
ax3.locator_params(axis='y', tight=True, nbins=4)
ax3.set_ylabel('$\mathrm{PA}\ (\mathrm{deg})$', fontsize=23)
medPA = np.nanmedian(ellipOut['pa_norm'][indexUse])
avgPA = ellipOut['avg_pa'][0]
if (avgPA - medPA >= 85.0) and (avgPA <= 92.0):
avgPA -= 180.0
elif (avgPA - medPA <= -85.0) and (avgPA >= -92.0):
avgPA += 180.0
if verbose:
print("### AvgPA", avgPA)
ax3.axhline(avgPA, color='k', linestyle='--', linewidth=3.0)
ax3.fill_between(rad[indexUse],
ellipOut['pa_norm'][indexUse] +
ellipOut['pa_err'][indexUse],
ellipOut['pa_norm'][indexUse] -
ellipOut['pa_err'][indexUse],
facecolor='r', alpha=0.25)
ax3.plot(rad[indexUse], ellipOut['pa_norm'][
indexUse], '-', color='r', linewidth=2.0)
ax3.xaxis.set_major_formatter(NullFormatter())
ax3.set_xlim(minRad, radOut)
paBuffer = 4.0
minPA = np.nanmin(ellipOut['pa_norm'][indexUse] -
ellipOut['pa_err'][indexUse])
maxPA = np.nanmax(ellipOut['pa_norm'][indexUse] +
ellipOut['pa_err'][indexUse])
minPA = minPA if minPA >= -110.0 else -100.0
maxPA = maxPA if maxPA <= 110.0 else 100.0
ax3.set_ylim(minPA - paBuffer, maxPA + paBuffer)
""" ax4 X0/Y0 """
ax4.minorticks_on()
ax4.tick_params(axis='both', which='major', labelsize=20, pad=8)
ax4.yaxis.set_major_locator(MaxNLocator(prune='lower'))
ax4.yaxis.set_major_locator(MaxNLocator(prune='upper'))
ax4.locator_params(axis='y', tight=True, nbins=4)
ax4.set_ylabel('$\mathrm{X}_{0}\ \mathrm{or}\ $' +
'$\mathrm{Y}_{0}\ (\mathrm{pix})$', fontsize=23)
if verbose:
print("### AvgX0", ellipOut['avg_x0'][0])
print("### AvgY0", ellipOut['avg_y0'][0])
ax4.axhline(ellipOut['avg_x0'][0], linestyle='--',
color='r', alpha=0.6, linewidth=3.0)
ax4.fill_between(rad[indexUse],
ellipOut['x0'][indexUse] + ellipOut['x0_err'][indexUse],
ellipOut['x0'][indexUse] - ellipOut['x0_err'][indexUse],
facecolor='r', alpha=0.25)
ax4.plot(rad[indexUse], ellipOut['x0'][indexUse], '-', color='r',
linewidth=2.0, label='X0')
ax4.axhline(ellipOut['avg_y0'][0], linestyle='--',
color='b', alpha=0.6, linewidth=3.0)
ax4.fill_between(rad[indexUse],
ellipOut['y0'][indexUse] + ellipOut['y0_err'][indexUse],
ellipOut['y0'][indexUse] - ellipOut['y0_err'][indexUse],
facecolor='b', alpha=0.25)
ax4.plot(rad[indexUse], ellipOut['y0'][indexUse], '-', color='b',
linewidth=2.0, label='Y0')
ax4.xaxis.set_major_formatter(NullFormatter())
ax4.set_xlim(minRad, radOut)
xBuffer = 3.0
minX0 = np.nanmin(ellipOut['x0'][indexUse])
maxX0 = np.nanmax(ellipOut['x0'][indexUse])
minY0 = np.nanmin(ellipOut['y0'][indexUse])
maxY0 = np.nanmax(ellipOut['y0'][indexUse])
minCen = minX0 if minX0 <= minY0 else minY0
maxCen = maxX0 if maxX0 >= maxY0 else maxY0
ax4.set_ylim(minCen - xBuffer, maxCen + xBuffer)
""" ax5 A4/B4 """
ax5.minorticks_on()
ax5.tick_params(axis='both', which='major', labelsize=20, pad=8)
ax5.yaxis.set_major_locator(MaxNLocator(prune='lower'))
ax5.yaxis.set_major_locator(MaxNLocator(prune='upper'))
ax5.locator_params(axis='y', tight=True, nbins=4)
ax5.set_ylabel('$a_4\ \mathrm{or}\ b_4$', fontsize=23)
ax5.axhline(0.0, linestyle='-', color='k', alpha=0.3)
ax5.fill_between(rad[indexUse],
ellipOut['a4'][indexUse] + ellipOut['a4_err'][indexUse],
ellipOut['a4'][indexUse] - ellipOut['a4_err'][indexUse],
facecolor='r', alpha=0.25)
ax5.plot(rad[indexUse], ellipOut['a4'][indexUse], '-', color='r',
linewidth=2.0, label='A4')
ax5.fill_between(rad[indexUse],
ellipOut['b4'][indexUse] + ellipOut['b4_err'][indexUse],
ellipOut['b4'][indexUse] - ellipOut['b4_err'][indexUse],
facecolor='b', alpha=0.25)
ax5.plot(rad[indexUse], ellipOut['b4'][indexUse], '-', color='b',
linewidth=2.0, label='B4')
ax5.xaxis.set_major_formatter(NullFormatter())
ax5.set_xlim(minRad, radOut)
abBuffer = 0.02
minA4 = np.nanmin(ellipOut['a4'][indexUse])
minB4 = np.nanmin(ellipOut['b4'][indexUse])
maxA4 = np.nanmax(ellipOut['a4'][indexUse])
maxB4 = np.nanmax(ellipOut['b4'][indexUse])
minAB = minA4 if minA4 <= minB4 else minB4
maxAB = maxA4 if maxA4 >= maxB4 else maxB4
ax5.set_ylim(minAB - abBuffer, maxAB + abBuffer)
""" ax6 Growth Curve """
ax6.minorticks_on()
ax6.tick_params(axis='both', which='major', labelsize=16, pad=8)
ax6.yaxis.set_major_locator(MaxNLocator(prune='lower'))
ax6.yaxis.set_major_locator(MaxNLocator(prune='upper'))
ax6.set_xlabel(radStr, fontsize=30)
ax6.set_ylabel('$\mathrm{Curve\ of\ Growth}\ (\mathrm{mag})$',
fontsize=20)
ax6.axhline(magFlux100, linestyle='-', color='k', alpha=0.5, linewidth=2,
label='$\mathrm{mag}_{100}$')
ax6.axhline(magFlux50, linestyle='--', color='k', alpha=0.5, linewidth=2,
label='$\mathrm{mag}_{50}$')
"""
ax6.axvline(imgR50, linestyle='-', color='g', alpha=0.4, linewidth=2.5)
"""
ax6.plot(rad, growthCurveOri, '--', color='k', linewidth=3.5,
label='$\mathrm{CoG}_{\mathrm{old}}$')
ax6.plot(rad, growthCurveSub, '-.', color='b', linewidth=3.5,
label='$\mathrm{CoG}_{\mathrm{sub}}$')
ax6.plot(rad, growthCurveCor, '-', color='r', linewidth=4.0,
label='$\mathrm{CoG}_{\mathrm{cor}}$')
ax6.axvline(radOut, linestyle='-', color='g', alpha=0.6, linewidth=5.0)
ax6.legend(loc=[0.55, 0.06], shadow=True, fancybox=True,
fontsize=18)
minCurve = (magFlux100 - 0.9)
maxCurve = (magFlux100 + 2.9)
curveUse = growthCurveOri[np.isfinite(growthCurveOri)]
radTemp = rad[np.isfinite(growthCurveOri)]
radInner = radTemp[curveUse <= maxCurve][0]
"""
ax6.set_xlim(minRad, maxRad)
"""
ax6.set_xlim((radInner - 0.02), (maxRad + 0.2))
ax6.set_ylim(maxCurve, minCurve)
""" ax7 Intensity Curve """
ax7.minorticks_on()
ax7.tick_params(axis='both', which='major', labelsize=16, pad=10)
ax7.yaxis.set_major_locator(MaxNLocator(prune='lower'))
ax7.yaxis.set_major_locator(MaxNLocator(prune='upper'))
ax7.locator_params(axis='y', tight=True, nbins=4)
"""
ax7.axvline(imgR50, linestyle='-', color='k', alpha=0.4,
linewidth=2.5)
"""
bkgVal = ellipOut['intens_bkg'][0]
ax7.axhline(0.0, linestyle='-', color='k', linewidth=2.5, alpha=0.8)
ax7.axhline(bkgVal, linestyle='--', color='c', linewidth=2.5, alpha=0.6)
ax7.fill_between(rad,
(rad * 0.0 - 1.0 * np.nanmedian(ellipOut['int_err'])),
(rad * 0.0 + 1.0 * np.nanmedian(ellipOut['int_err'])),
facecolor='k', edgecolor='none', alpha=0.15)
ax7.fill_between(rad, ellipOut['intens_cor'] - ellipOut['int_err'],
ellipOut['intens_cor'] + ellipOut['int_err'],
facecolor='r', alpha=0.2)
ax7.plot(rad, ellipOut['intens'], '--', color='k', linewidth=3.0)
ax7.plot(rad, ellipOut['intens_sub'], '-.', color='b', linewidth=3.0)
ax7.plot(rad, ellipOut['intens_cor'], '-', color='r', linewidth=3.5)
""" TODO: Could be problematic """
indexOut = np.where(ellipOut['intens'] <= (0.003 *
np.nanmax(ellipOut['intens'])))
minOut = np.nanmin(ellipOut['intens'][indexOut] -
ellipOut['int_err'][indexOut])
maxOut = np.nanmax(ellipOut['intens'][indexOut] +
ellipOut['int_err'][indexOut])
sepOut = (maxOut - minOut) / 4.0
minY = (minOut - sepOut) if (minOut - sepOut) >= 0.0 else (-1.0 * sepOut)
ax7.xaxis.set_major_formatter(NullFormatter())
ax7.set_xlim((radInner - 0.02), (maxRad + 0.2))
ax7.set_ylim((minY - sepOut), maxOut)
ax7.axvline(radOut, linestyle='-', color='g', alpha=0.6, linewidth=5.0)
""" ax8 IsoPlot """
if oriName is not None:
oriFile = os.path.basename(oriName)
imgTitle = oriFile.replace('.fits', '')
else:
imgFile = os.path.basename(image)
imgTitle = imgFile.replace('.fits', '')
if imgType is not None:
imgTitle = imgTitle.replace(imgType, '')
ax8.tick_params(axis='both', which='major', labelsize=20)
ax8.yaxis.set_major_locator(MaxNLocator(prune='lower'))
ax8.yaxis.set_major_locator(MaxNLocator(prune='upper'))
ax8.set_title(imgTitle, fontsize=25, fontweight='bold')
ax8.title.set_position((0.5, 1.05))
galX0 = ellipOut['avg_x0'][0]
galY0 = ellipOut['avg_y0'][0]
imgSizeX, imgSizeY = img.shape
if (galX0 > maxSma) and (galY0 > maxSma) and showZoom:
zoomReg = imgMsk[np.int(galX0 - maxSma):np.int(galX0 + maxSma),
np.int(galY0 - maxSma):np.int(galY0 + maxSma)]
# Define the new center of the cropped images
xPad = (imgSizeX / 2.0 - maxSma)
yPad = (imgSizeY / 2.0 - maxSma)
else:
zoomReg = imgMsk
xPad = 0
yPad = 0
# Show the image
ax8.imshow(np.arcsinh(zoomReg), interpolation="none",
vmin=imin, vmax=imax, cmap=IMG_CMAP, origin='lower')
# Get the Shapes
ellipIso = convIso2Ell(ellipOut, xpad=xPad, ypad=yPad)
# Overlay the ellipses on the image
for ii, e in enumerate(ellipIso):
if len(ellipIso) >= 30:
if (ii >= 6) and (ii <= 30) and (ii % 5 == 0):
ax8.add_artist(e)
e.set_clip_box(ax8.bbox)
e.set_alpha(0.4)
e.set_edgecolor('r')
e.set_facecolor('none')
e.set_linewidth(1.0)
elif (ii > 30):
ax8.add_artist(e)
e.set_clip_box(ax8.bbox)
e.set_alpha(0.8)
e.set_edgecolor('r')
e.set_facecolor('none')
e.set_linewidth(2.0)
else:
if (ii >= 6):
ax8.add_artist(e)
e.set_clip_box(ax8.bbox)
e.set_alpha(0.8)
e.set_edgecolor('r')
e.set_facecolor('none')
fig.savefig(outPng, dpi=dpi)
plt.close(fig)
return
def saveEllipOut(ellipOut, prefix, ellipCfg=None, verbose=True,
pkl=True, cfg=False):
"""
Save the Ellipse output to file.
Parameters:
"""
outPkl = prefix + '.pkl'
outCfg = prefix + '.cfg'
""" Save a Pickle file """
if pkl:
io.save_to_pickle(ellipOut, outPkl)
if not os.path.isfile(outPkl):
raise Exception("### Something is wrong with the .pkl file")
""" Save the current configuration to a .pkl file """
if cfg:
if ellipCfg is not None:
io.save_to_pickle(ellipCfg, outCfg)
if not os.path.isfile(outCfg):
raise Exception("### Something is wrong with the .pkl file")
def galSBP(image, mask=None, galX=None, galY=None, inEllip=None,
maxSma=None, iniSma=6.0, galR=20.0, galQ=0.9, galPA=0.0,
pix=0.168, bkg=0.00, stage=3, minSma=0.0,
gain=3.0, expTime=1.0, zpPhoto=27.0,
maxTry=4, minIt=20, maxIt=200, outRatio=1.2,
ellipStep=0.12, uppClip=3.0, lowClip=3.0,
nClip=2, fracBad=0.5, intMode="mean",
plMask=True, conver=0.05, recenter=True,
verbose=True, linearStep=False, saveOut=True, savePng=True,
olthresh=0.5, harmonics=False, outerThreshold=None,
updateIntens=True, psfSma=6.0, suffix='', useZscale=True,
hdu=0, imgType='_imgsub', useTflux=False,
isophote=None, xttools=None):
"""
Running Ellipse to Extract 1-D profile.
stage = 1: All Free
2: Center Fixed
3: All geometry fixd
4: Force Photometry, must have inEllip
:returns: TODO
"""
gc.collect()
verStr = 'yes' if verbose else 'no'
""" Minimum starting radius for Ellipsein pixel """
minIniSma = 10.0
pixArea = (pix ** 2.0)
""" Check input files """
if os.path.islink(image):
imgOri = os.readlink(image)
else:
imgOri = image
if not os.path.isfile(imgOri):
raise Exception("### Can not find the input image: %s !" % imgOri)
"""
Check if x_isophote.e and x_ttools.e exist if necessary
"""
if (not os.path.isfile(isophote)) or (not os.path.isfile(isophote)):
raise Exception("Can not find x_isophote.e: %s" % isophote)
if (not os.path.isfile(xttools)) or (not os.path.isfile(xttools)):
raise Exception("Can not find x_ttools.e: %s" % xttools)
"""
New approach, save the HDU into a temp fits file
"""
data = (fits.open(imgOri))[hdu].data
imgHdu = fits.PrimaryHDU(data)
imgHduList = fits.HDUList([imgHdu])
while True:
imgTemp = 'temp_' + utils.random_string() + '.fits'
if not os.path.isfile(imgTemp):
imgHduList.writeto(imgTemp)
break
""" Conver the .fits mask to .pl file if necessary """
if mask is not None:
if os.path.islink(mask):
mskOri = os.readlink(mask)
else:
mskOri = mask
if not os.path.isfile(mskOri):
try:
os.remove(imgTemp)
except Exception:
pass
raise Exception("### Can not find the input mask: %s !" % mskOri)
if plMask:
plFile = maskFits2Pl(imgTemp, mskOri)
plFile2 = maskFits2Pl(imgTemp, mskOri, replace=True)
if not os.path.isfile(plFile):
try:
os.remove(imgTemp)
except Exception:
pass
raise Exception("### Can not find the mask: %s !" % plFile)
if not os.path.isfile(plFile2):
try:
os.remove(imgTemp)
except Exception:
pass
raise Exception("### Can not find the mask: %s !" % plFile2)
imageUse = imgTemp
else:
imageNew = imageMaskNaN(imgTemp, mskOri, verbose=verbose)
if not os.path.isfile(imageNew):
try:
os.remove(imgTemp)
except Exception:
pass
raise Exception(
"### Can not find the NaN-Masked image: %s" % imageNew)
imageUse = imageNew
else:
imageUse = imgTemp
mskOri = None
""" Estimate the maxSMA if none is provided """
if (maxSma is None) or (galX is None) or (galY is None):
dimX, dimY = data.shape
imgSize = dimX if (dimX >= dimY) else dimY
imgR = (imgSize / 2.0)
imgX = (dimX / 2.0)
imgY = (dimY / 2.0)
if maxSma is None:
maxSma = imgR * 1.6
if galX is None:
galX = imgX
if galY is None:
galY = imgY
""" Inisital radius for Ellipse """
iniSma = iniSma if iniSma >= minIniSma else minIniSma
if verbose:
print(SEP)
print("### galX, galY : ", galX, galY)
print("### galR : ", galR)
print("### iniSma, maxSma : ", iniSma, maxSma)
print("### Stage : ", stage)
print("### Step : ", ellipStep)
""" Check the stage """
if stage == 1:
hcenter, hellip, hpa = False, False, False
elif stage == 2:
hcenter, hellip, hpa = True, False, False
elif stage == 3:
hcenter, hellip, hpa = True, True, True
elif stage == 4:
hcenter, hellip, hpa = True, True, True
if (inEllip is None) or (not os.path.isfile(inEllip)):
try:
os.remove(imgTemp)
except Exception:
pass
try:
os.remove(plFile)
os.remove(plFile2)
except Exception:
pass
raise Exception(
"### Can not find the input ellip file: %s !" % inEllip)
else:
try:
os.remove(imgTemp)
except Exception:
pass
try:
os.remove(plFile)
os.remove(plFile2)
except Exception:
pass
raise Exception("### Available step: 1 , 2 , 3 , 4")
""" Get the default Ellipse settings """
if verbose:
print("## Set up the Ellipse configuration")
galEll = (1.0 - galQ)
ellipCfg = defaultEllipse(galX, galY, maxSma, ellip0=galEll, pa0=galPA,
sma0=iniSma, minsma=minSma, linear=linearStep,
step=ellipStep, recenter=recenter,
conver=conver, hcenter=hcenter, hellip=hellip,
hpa=hpa, minit=minIt, maxit=maxIt,
olthresh=olthresh, mag0=zpPhoto,
integrmode=intMode, usclip=uppClip,
lsclip=lowClip, nclip=nClip, fflag=fracBad,
harmonics=harmonics)
""" Name of the output files """
if suffix == '':
suffix = '_ellip_' + suffix + str(stage).strip()
elif suffix[-1] != '_':
suffix = '_ellip_' + suffix + '_' + str(stage).strip()
else:
suffix = '_ellip_' + suffix + str(stage).strip()
outBin = image.replace('.fits', suffix + '.bin')
outTab = image.replace('.fits', suffix + '.tab')
outCdf = image.replace('.fits', suffix + '.cdf')
if isophote is not None:
outPar = outBin.replace('.bin', '.par')
""" Call the STSDAS.ANALYSIS.ISOPHOTE package """
if isophote is None:
if verbose:
print("## Call STSDAS.ANALYSIS.ISOPHOTE() ")
iraf.stsdas()
iraf.analysis()
iraf.isophote()
""" Start the Ellipse Run """
attempts = 0
while attempts < maxTry:
if verbose:
print("## Start the Ellipse Run: Attempt ", (attempts + 1))
#try:
""" Config the parameters for ellipse """
if isophote is None:
unlearnEllipse()
setupEllipse(ellipCfg)
else:
parOk = writeEllipPar(ellipCfg, imageUse, outBin, outPar,
inEllip=inEllip)
if not parOk:
raise Exception("XXX Cannot find %s" % outPar)
""" Ellipse run """
# Check and remove outputs from the previous Ellipse run
if os.path.exists(outBin):
os.remove(outBin)
if os.path.exists(outTab):
os.remove(outTab)
if os.path.exists(outCdf):
os.remove(outCdf)
# Start the Ellipse fitting
if verbose:
print(SEP)
print("### Origin Image : %s" % imgOri)
print("### Input Image : %s" % imageUse)
print("### Output Binary : %s" % outBin)
if isophote is None:
if stage != 4:
iraf.ellipse(input=imageUse, output=outBin, verbose=verStr)
else:
print("### Input Binary : %s" % inEllip)
iraf.ellipse(input=imageUse, output=outBin,
inellip=inEllip, verbose=verStr)
else:
if os.path.isfile(outPar):
ellCommand = isophote + " ellipse "
ellCommand += ' @%s' % outPar.strip()
os.system(ellCommand)
else:
raise Exception("XXX Can not find par file %s" % outPar)
# Check if the Ellipse run is finished
if not os.path.isfile(outBin):
raise Exception("XXX Can not find the outBin: %s!" % outBin)
else:
# Remove the existed .tab and .cdf file
if os.path.isfile(outTab):
os.remove(outTab)
if os.path.isfile(outCdf):
os.remove(outCdf)
if xttools is None:
iraf.unlearn('tdump')
iraf.tdump.columns = ''
iraf.tdump(outBin, datafil=outTab, cdfile=outCdf)
else:
tdumpCommand = xttools + ' tdump '
tdumpCommand += ' table=%s ' % outBin.strip()
tdumpCommand += ' datafile=%s ' % outTab.strip()
tdumpCommand += ' cdfile=%s ' % outCdf.strip()
tdumpCommand += ' pfile=STDOUT pwidth=-1 '
tdumpCommand += ' columns="" rows="-" mode="al"'
tdumpOut = os.system(tdumpCommand)
if tdumpOut != 0:
raise Exception("XXX Can not convert the binary tab")
# Read in the Ellipse output tab
ellipOut = readEllipseOut(outTab, zp=zpPhoto, pix=pix,
exptime=expTime, bkg=bkg,
harmonics=harmonics,
minSma=psfSma, useTflux=useTflux)
# Get the outer boundary of the isophotes
radOuter = ellipseGetOuterBoundary(ellipOut,
ratio=outRatio)
sma = ellipOut['sma']
if radOuter is None:
print("XXX radOuter is NaN, use 0.8 * max(SMA) instead !")
radOuter = np.nanmax(sma) * 0.8
"""
Update the Intensity
Note that this avgBkg is different with the input bkg value
"""
if updateIntens:
indexBkg = np.where(ellipOut['sma'] > radOuter)
if indexBkg[0].shape[0] > 0:
try:
intens1 = ellipOut['intens'][indexBkg]
clipArr, clipL, clipU = sigmaclip(intens1,
2.5, 2.0)
avgOut = np.nanmedian(clipArr)
intens2 = ellipOut['intens_sub'][indexBkg]
clipArr, clipL, clipU = sigmaclip(intens2,
2.5, 2.0)
avgBkg = np.nanmedian(clipArr)
if not np.isfinite(avgBkg):
avgBkg = 0.0
avgOut = 0.0
except Exception:
avgOut = 0.0
avgBkg = 0.0
else:
avgOut = 0.0
avgBkg = 0.0
else:
avgOut = 0.0
avgBkg = 0.0
if verbose:
print(SEP)
print("### Input background value : ", bkg)
print("### 1-D SBP background value : ", avgOut)
print("### Current outer background : ", avgBkg)
""" Do not correct this ? """
ellipOut.add_column(
Column(name='avg_bkg', data=(sma * 0.0 + avgBkg)))
intensCor = (ellipOut['intens_sub'] - avgBkg)
ellipOut.add_column(Column(name='intens_cor', data=intensCor))
sbpCor = zpPhoto - 2.5 * np.log10(intensCor / (pixArea *
expTime))
ellipOut.add_column(Column(name='sbp_cor', data=sbpCor))
""" Update the curve of growth """
cogCor, mm, ff = ellipseGetGrowthCurve(
ellipOut, intensArr=intensCor, useTflux=useTflux)
ellipOut.add_column(Column(name='growth_cor', data=(cogCor)))
""" Update the outer radius """
radOuter = ellipseGetOuterBoundary(ellipOut, ratio=outRatio)
if not np.isfinite(radOuter):
if verbose:
print(" XXX radOuter is NaN, use 0.80 * max(SMA) !")
radOuter = np.nanmax(sma) * 0.80
ellipOut.add_column(
Column(name='rad_outer', data=(sma*0.0 + radOuter)))
""" Update the total magnitude """
indexUse = np.where(ellipOut['sma'] <= (radOuter * outRatio))
maxIsoFluxO = np.nanmax(ellipOut['growth_ori'][indexUse])
maxIsoFluxS = np.nanmax(ellipOut['growth_sub'][indexUse])
maxIsoFluxC = np.nanmax(ellipOut['growth_cor'][indexUse])
magFluxTotC = -2.5 * np.log10(maxIsoFluxC) + zpPhoto
ellipOut.add_column(
Column(name='mag_tot', data=(sma*0.0 + magFluxTotC)))
magFluxTotO = -2.5 * np.log10(maxIsoFluxO) + zpPhoto
ellipOut.add_column(
Column(name='mag_tot_ori', data=(sma*0.0 + magFluxTotO)))
magFluxTotS = -2.5 * np.log10(maxIsoFluxS) + zpPhoto
ellipOut.add_column(
Column(name='mag_tot_sub', data=(sma*0.0 + magFluxTotS)))
""" Save a summary figure """
if savePng:
outPng = image.replace('.fits', suffix + '.png')
try:
ellipsePlotSummary(ellipOut, imgTemp, maxRad=None,
mask=mskOri, outPng=outPng,
threshold=outerThreshold,
useZscale=useZscale,
oriName=image, verbose=verbose,
imgType=imgType)
except Exception:
warnings.warn("XXX Can not generate: %s" % outPng)
""" Save the results """
if saveOut:
outPre = image.replace('.fits', suffix)
saveEllipOut(ellipOut, outPre, ellipCfg=ellipCfg, verbose=verbose)
gc.collect()
break
#except Exception as error:
# print("### ELLIPSE RUN FAILED IN ATTEMPT: %2d" % attempts)
# print("### Error Information : ", error)
# if verbose:
# print("### !!! Make the Ellipse Run A Little Bit Easier !")
# ellipCfg = easierEllipse(ellipCfg, degree=attempts)
# attempts += 1
# ellipOut = None
gc.collect()
if not os.path.isfile(outBin):
ellipOut = None
print("### ELLIPSE RUN FAILED AFTER %3d ATTEMPTS!!!" % maxTry)
"""
Remove the temp files
"""
try:
os.remove(imgTemp)
except Exception:
pass
try:
os.remove(plFile)
os.remove(plFile2)
except Exception:
pass
"""
Remove some outputs to save space
"""
try:
os.remove(outCdf)
except Exception:
pass
try:
os.remove(outTab + '_back')
except Exception:
pass
return ellipOut, outBin
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("image", help="Name of the input image")
parser.add_argument("--suffix",
help="Suffix of the output files",
default='')
parser.add_argument("--mask", dest='mask',
help="Name of the input mask",
default=None)
parser.add_argument("--intMode", dest='intMode',
help="Method for integration",
default='mean')
parser.add_argument('--x0', dest='galX',
help='Galaxy center in X-dimension',
type=float, default=None)
parser.add_argument('--y0', dest='galY',
help='Galaxy center in Y-dimension',
type=float, default=None)
parser.add_argument('--inEllip', dest='inEllip',
help='Input Ellipse table',
default=None)
parser.add_argument('--expTime', dest='expTime',
help='Exposure time of the image',
type=float, default=1.0)
parser.add_argument('--minSma', dest='minSma',
help='Minimum radius for Ellipse Run',
type=float, default=0.0)
parser.add_argument('--maxSma', dest='maxSma',
help='Maximum radius for Ellipse Run',
type=float, default=None)
parser.add_argument('--iniSma', dest='iniSma',
help='Initial radius for Ellipse Run',
type=float, default=10.0)
parser.add_argument('--galR', dest='galR',
help='Typical size of the galaxy',
type=float, default=20.0)
parser.add_argument('--galQ', dest='galQ',
help='Typical axis ratio of the galaxy',
type=float, default=0.9)
parser.add_argument('--galPA', dest='galPA',
help='Typical PA of the galaxy',
type=float, default=0.0)
parser.add_argument('--stage', dest='stage',
help='Stage of Ellipse Run',
type=int, default=3, choices=range(1, 5))
parser.add_argument('--hdu', dest='hdu',
help='HDU of data to run on',
type=int, default=0)
parser.add_argument('--pix', dest='pix',
help='Pixel Scale',
type=float, default=0.168)
parser.add_argument('--bkg', dest='bkg',
help='Background level',
type=float, default=0.0)
parser.add_argument('--step', dest='step',
help='Step size',
type=float, default=0.12)
parser.add_argument('--uppClip', dest='uppClip',
help='Upper limit for clipping',
type=float, default=3.0)
parser.add_argument('--lowClip', dest='lowClip',
help='Upper limit for clipping',
type=float, default=3.0)
parser.add_argument('--nClip', dest='nClip',
help='Upper limit for clipping',
type=int, default=2)
parser.add_argument('--olthresh', dest='olthresh',
help='Central locator threshold',
type=float, default=0.50)
parser.add_argument('--zpPhoto', dest='zpPhoto',
help='Photometric zeropoint',
type=float, default=27.0)
parser.add_argument('--outThre', dest='outerThreshold',
help='Outer threshold',
type=float, default=None)
parser.add_argument('--fracBad', dest='fracBad',
help='Outer threshold',
type=float, default=0.5)
parser.add_argument('--maxTry', dest='maxTry',
help='Maximum number of ellipse run',
type=int, default=4)
parser.add_argument('--minIt', dest='minIt',
help='Minimum number of iterations',
type=int, default=20)
parser.add_argument('--maxIt', dest='maxIt',
help='Maximum number of iterations',
type=int, default=150)
parser.add_argument('--plot', dest='plot', action="store_true",
help='Generate summary plot', default=True)
parser.add_argument('--verbose', dest='verbose', action="store_true",
default=True)
parser.add_argument('--linear', dest='linear', action="store_true",
default=False)
parser.add_argument('--save', dest='save', action="store_true",
default=True)
parser.add_argument('--plmask', dest='plmask', action="store_true",
default=True)
parser.add_argument('--updateIntens', dest='updateIntens',
action="store_true", default=True)
parser.add_argument("--isophote", dest='isophote',
help="Location of the x_isophote.e file",
default=None)
parser.add_argument("--xttools", dest='xttools',
help="Location of the x_ttools.e file",
default=None)
args = parser.parse_args()
galSBP(args.image, mask=args.mask,
galX=args.galX, galY=args.galY,
inEllip=args.inEllip,
maxSma=args.maxSma,
iniSma=args.iniSma,
galR=args.galR,
galQ=args.galQ,
galPA=args.galPA,
pix=args.pix,
bkg=args.bkg,
stage=args.stage,
minSma=args.minSma,
gain=3.0,
expTime=args.expTime,
zpPhoto=args.zpPhoto,
maxTry=args.maxTry,
minIt=args.minIt,
maxIt=args.maxIt,
ellipStep=args.step,
uppClip=args.uppClip,
lowClip=args.lowClip,
nClip=args.nClip,
fracBad=args.fracBad,
intMode=args.intMode,
suffix=args.suffix,
plMask=args.plmask,
conver=0.05,
recenter=True,
verbose=args.verbose,
linearStep=args.linear,
saveOut=args.save,
savePng=args.plot,
olthresh=args.olthresh,
harmonics=False,
outerThreshold=args.outerThreshold,
updateIntens=args.updateIntens,
hdu=args.hdu,
isophote=args.isophote,
xttools=args.xttools)
|
dr-guangtou/KungPao
|
kungpao/sbp.py
|
Python
|
gpl-3.0
| 68,174
|
[
"Galaxy"
] |
822d32f737e99dd03fe4c048802cd419bea958008cb5392d4aec4657322cfa2c
|
"""
Sphinx plugins for Django documentation.
"""
import json
import os
import re
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.statemachine import ViewList
from sphinx import addnodes
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.directives.code import CodeBlock
from sphinx.domains.std import Cmdoption
from sphinx.errors import ExtensionError
from sphinx.util import logging
from sphinx.util.console import bold
from sphinx.writers.html import HTMLTranslator
logger = logging.getLogger(__name__)
# RE for option descriptions without a '--' prefix
simple_option_desc_re = re.compile(
r'([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)')
def setup(app):
app.add_crossref_type(
directivename="setting",
rolename="setting",
indextemplate="pair: %s; setting",
)
app.add_crossref_type(
directivename="templatetag",
rolename="ttag",
indextemplate="pair: %s; template tag"
)
app.add_crossref_type(
directivename="templatefilter",
rolename="tfilter",
indextemplate="pair: %s; template filter"
)
app.add_crossref_type(
directivename="fieldlookup",
rolename="lookup",
indextemplate="pair: %s; field lookup type",
)
app.add_object_type(
directivename="django-admin",
rolename="djadmin",
indextemplate="pair: %s; django-admin command",
parse_node=parse_django_admin_node,
)
app.add_directive('django-admin-option', Cmdoption)
app.add_config_value('django_next_version', '0.0', True)
app.add_directive('versionadded', VersionDirective)
app.add_directive('versionchanged', VersionDirective)
app.add_builder(DjangoStandaloneHTMLBuilder)
app.set_translator('djangohtml', DjangoHTMLTranslator)
app.set_translator('json', DjangoHTMLTranslator)
app.add_node(
ConsoleNode,
html=(visit_console_html, None),
latex=(visit_console_dummy, depart_console_dummy),
man=(visit_console_dummy, depart_console_dummy),
text=(visit_console_dummy, depart_console_dummy),
texinfo=(visit_console_dummy, depart_console_dummy),
)
app.add_directive('console', ConsoleDirective)
app.connect('html-page-context', html_page_context_hook)
app.add_role('default-role-error', default_role_error)
return {'parallel_read_safe': True}
class VersionDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
if len(self.arguments) > 1:
msg = """Only one argument accepted for directive '{directive_name}::'.
Comments should be provided as content,
not as an extra argument.""".format(directive_name=self.name)
raise self.error(msg)
env = self.state.document.settings.env
ret = []
node = addnodes.versionmodified()
ret.append(node)
if self.arguments[0] == env.config.django_next_version:
node['version'] = "Development version"
else:
node['version'] = self.arguments[0]
node['type'] = self.name
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
try:
env.get_domain('changeset').note_changeset(node)
except ExtensionError:
# Sphinx < 1.8: Domain 'changeset' is not registered
env.note_versionchange(node['type'], node['version'], node, self.lineno)
return ret
class DjangoHTMLTranslator(HTMLTranslator):
"""
Django-specific reST to HTML tweaks.
"""
# Don't use border=1, which docutils does by default.
def visit_table(self, node):
self.context.append(self.compact_p)
self.compact_p = True
self._table_row_index = 0 # Needed by Sphinx
self.body.append(self.starttag(node, 'table', CLASS='docutils'))
def depart_table(self, node):
self.compact_p = self.context.pop()
self.body.append('</table>\n')
def visit_desc_parameterlist(self, node):
self.body.append('(') # by default sphinx puts <big> around the "("
self.first_param = 1
self.optional_param_level = 0
self.param_separator = node.child_text_separator
self.required_params_left = sum(isinstance(c, addnodes.desc_parameter) for c in node.children)
def depart_desc_parameterlist(self, node):
self.body.append(')')
#
# Turn the "new in version" stuff (versionadded/versionchanged) into a
# better callout -- the Sphinx default is just a little span,
# which is a bit less obvious that I'd like.
#
# FIXME: these messages are all hardcoded in English. We need to change
# that to accommodate other language docs, but I can't work out how to make
# that work.
#
version_text = {
'versionchanged': 'Changed in Django %s',
'versionadded': 'New in Django %s',
}
def visit_versionmodified(self, node):
self.body.append(
self.starttag(node, 'div', CLASS=node['type'])
)
version_text = self.version_text.get(node['type'])
if version_text:
title = "%s%s" % (
version_text % node['version'],
":" if len(node) else "."
)
self.body.append('<span class="title">%s</span> ' % title)
def depart_versionmodified(self, node):
self.body.append("</div>\n")
# Give each section a unique ID -- nice for custom CSS hooks
def visit_section(self, node):
old_ids = node.get('ids', [])
node['ids'] = ['s-' + i for i in old_ids]
node['ids'].extend(old_ids)
super().visit_section(node)
node['ids'] = old_ids
def parse_django_admin_node(env, sig, signode):
command = sig.split(' ')[0]
env.ref_context['std:program'] = command
title = "django-admin %s" % sig
signode += addnodes.desc_name(title, title)
return command
class DjangoStandaloneHTMLBuilder(StandaloneHTMLBuilder):
"""
Subclass to add some extra things we need.
"""
name = 'djangohtml'
def finish(self):
super().finish()
logger.info(bold("writing templatebuiltins.js..."))
xrefs = self.env.domaindata["std"]["objects"]
templatebuiltins = {
"ttags": [
n for ((t, n), (k, a)) in xrefs.items()
if t == "templatetag" and k == "ref/templates/builtins"
],
"tfilters": [
n for ((t, n), (k, a)) in xrefs.items()
if t == "templatefilter" and k == "ref/templates/builtins"
],
}
outfilename = os.path.join(self.outdir, "templatebuiltins.js")
with open(outfilename, 'w') as fp:
fp.write('var django_template_builtins = ')
json.dump(templatebuiltins, fp)
fp.write(';\n')
class ConsoleNode(nodes.literal_block):
"""
Custom node to override the visit/depart event handlers at registration
time. Wrap a literal_block object and defer to it.
"""
tagname = 'ConsoleNode'
def __init__(self, litblk_obj):
self.wrapped = litblk_obj
def __getattr__(self, attr):
if attr == 'wrapped':
return self.__dict__.wrapped
return getattr(self.wrapped, attr)
def visit_console_dummy(self, node):
"""Defer to the corresponding parent's handler."""
self.visit_literal_block(node)
def depart_console_dummy(self, node):
"""Defer to the corresponding parent's handler."""
self.depart_literal_block(node)
def visit_console_html(self, node):
"""Generate HTML for the console directive."""
if self.builder.name in ('djangohtml', 'json') and node['win_console_text']:
# Put a mark on the document object signaling the fact the directive
# has been used on it.
self.document._console_directive_used_flag = True
uid = node['uid']
self.body.append('''\
<div class="console-block" id="console-block-%(id)s">
<input class="c-tab-unix" id="c-tab-%(id)s-unix" type="radio" name="console-%(id)s" checked>
<label for="c-tab-%(id)s-unix" title="Linux/macOS">/</label>
<input class="c-tab-win" id="c-tab-%(id)s-win" type="radio" name="console-%(id)s">
<label for="c-tab-%(id)s-win" title="Windows"></label>
<section class="c-content-unix" id="c-content-%(id)s-unix">\n''' % {'id': uid})
try:
self.visit_literal_block(node)
except nodes.SkipNode:
pass
self.body.append('</section>\n')
self.body.append('<section class="c-content-win" id="c-content-%(id)s-win">\n' % {'id': uid})
win_text = node['win_console_text']
highlight_args = {'force': True}
linenos = node.get('linenos', False)
def warner(msg):
self.builder.warn(msg, (self.builder.current_docname, node.line))
highlighted = self.highlighter.highlight_block(
win_text, 'doscon', warn=warner, linenos=linenos, **highlight_args
)
self.body.append(highlighted)
self.body.append('</section>\n')
self.body.append('</div>\n')
raise nodes.SkipNode
else:
self.visit_literal_block(node)
class ConsoleDirective(CodeBlock):
"""
A reStructuredText directive which renders a two-tab code block in which
the second tab shows a Windows command line equivalent of the usual
Unix-oriented examples.
"""
required_arguments = 0
# The 'doscon' Pygments formatter needs a prompt like this. '>' alone
# won't do it because then it simply paints the whole command line as a
# gray comment with no highlighting at all.
WIN_PROMPT = r'...\> '
def run(self):
def args_to_win(cmdline):
changed = False
out = []
for token in cmdline.split():
if token[:2] == './':
token = token[2:]
changed = True
elif token[:2] == '~/':
token = '%HOMEPATH%\\' + token[2:]
changed = True
elif token == 'make':
token = 'make.bat'
changed = True
if '://' not in token and 'git' not in cmdline:
out.append(token.replace('/', '\\'))
changed = True
else:
out.append(token)
if changed:
return ' '.join(out)
return cmdline
def cmdline_to_win(line):
if line.startswith('# '):
return 'REM ' + args_to_win(line[2:])
if line.startswith('$ # '):
return 'REM ' + args_to_win(line[4:])
if line.startswith('$ ./manage.py'):
return 'manage.py ' + args_to_win(line[13:])
if line.startswith('$ manage.py'):
return 'manage.py ' + args_to_win(line[11:])
if line.startswith('$ ./runtests.py'):
return 'runtests.py ' + args_to_win(line[15:])
if line.startswith('$ ./'):
return args_to_win(line[4:])
if line.startswith('$ python3'):
return 'py ' + args_to_win(line[9:])
if line.startswith('$ python'):
return 'py ' + args_to_win(line[8:])
if line.startswith('$ '):
return args_to_win(line[2:])
return None
def code_block_to_win(content):
bchanged = False
lines = []
for line in content:
modline = cmdline_to_win(line)
if modline is None:
lines.append(line)
else:
lines.append(self.WIN_PROMPT + modline)
bchanged = True
if bchanged:
return ViewList(lines)
return None
env = self.state.document.settings.env
self.arguments = ['console']
lit_blk_obj = super().run()[0]
# Only do work when the djangohtml HTML Sphinx builder is being used,
# invoke the default behavior for the rest.
if env.app.builder.name not in ('djangohtml', 'json'):
return [lit_blk_obj]
lit_blk_obj['uid'] = str(env.new_serialno('console'))
# Only add the tabbed UI if there is actually a Windows-specific
# version of the CLI example.
win_content = code_block_to_win(self.content)
if win_content is None:
lit_blk_obj['win_console_text'] = None
else:
self.content = win_content
lit_blk_obj['win_console_text'] = super().run()[0].rawsource
# Replace the literal_node object returned by Sphinx's CodeBlock with
# the ConsoleNode wrapper.
return [ConsoleNode(lit_blk_obj)]
def html_page_context_hook(app, pagename, templatename, context, doctree):
# Put a bool on the context used to render the template. It's used to
# control inclusion of console-tabs.css and activation of the JavaScript.
# This way it's include only from HTML files rendered from reST files where
# the ConsoleDirective is used.
context['include_console_assets'] = getattr(doctree, '_console_directive_used_flag', False)
def default_role_error(
name, rawtext, text, lineno, inliner, options=None, content=None
):
msg = (
"Default role used (`single backticks`): %s. Did you mean to use two "
"backticks for ``code``, or miss an underscore for a `link`_ ?"
% rawtext
)
logger.warning(msg, location=(inliner.document.current_source, lineno))
return [nodes.Text(text)], []
|
ar4s/django
|
docs/_ext/djangodocs.py
|
Python
|
bsd-3-clause
| 13,834
|
[
"VisIt"
] |
66e9b0ab157cad653b13a15830d98eaf5ff6222f273fdababf0052270064aa09
|
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2018 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
##
"""
Parallizes the convertion of PDB files into pickled PDBModel objects.
"""
import Biskit.PVM.hosts as hosts
import Biskit.tools as T
## from Biskit.PVM.TrackingJobMaster import TrackingJobMaster
from Biskit.PVM import TrackingJobMaster
class StructMaster(TrackingJobMaster):
def __init__(self, dat, chunk, hosts, outFolder, skipWat=0, amber=0,
sort=0, **kw):
"""
@param dat: data dictionary
@type dat: dict
@param chunk: chunk size passed to slave
@type chunk: int
@param hosts: list of host-names
@type hosts: [str]
@param outFolder: alternative output folder
@type outFolder: str
"""
niceness = {'default': 0}
slave_script = T.projectRoot() + '/Biskit/StructureSlave.py'
TrackingJobMaster.__init__(self, dat, chunk, hosts, niceness,
slave_script, **kw)
self.options = {}
self.options['out'] = outFolder
self.options['skipRes'] = None
if skipWat:
self.options['skipRes'] = ['WAT','TIP3','H2O','WWW','Na+','Cl-']
if kw.has_key('show_output'):
self.options['report'] = not kw['show_output']
self.options['amber'] = amber
self.options['sort'] = sort
def getInitParameters(self, slave_tid):
"""
hand over parameters to slave once.
@param slave_tid: slave task id
@type slave_tid: int
@return: dictionary with init parameters
@rtype: {param:value}
"""
return self.options
def done(self):
self.exit()
#############
## TESTING
#############
import Biskit.test as BT
class Test(BT.BiskitTest):
"""Test"""
TAGS = [ BT.PVM ]
def prepare(self):
import tempfile
self.out_folder = tempfile.mkdtemp( '_test_StructureMaster' )
def test_StructureMaster(self):
"""StructureMaster test"""
import os
pdbs = {T.testRoot() + '/lig/1A19.pdb':'',
T.testRoot() + '/rec/1A2P.pdb':'',
T.testRoot() + '/com/1BGS.pdb':''}
self.master = StructMaster( pdbs,
2,
hosts=hosts.cpus_all,
outFolder= self.out_folder,
show_output=self.local,
verbose=self.local,
add_hosts=1 )
## run and wait for result
self.r = self.master.calculateResult()
if self.local:
print 'The converted pdb files has been written to %s' \
% self.out_folder
self.assert_( os.path.exists( self.out_folder + '/1A19.model') )
def cleanUp(self):
T.tryRemove( self.out_folder, tree=1 )
if __name__ == '__main__':
BT.localTest()
|
graik/biskit
|
archive_biskit2/Biskit/StructureMaster.py
|
Python
|
gpl-3.0
| 3,752
|
[
"Amber"
] |
f779d1cd71c4268336d0b7259e7c100110732903403050f0c48864d39622170d
|
"""
Test functions for genmod.families.family
"""
import pytest
import statsmodels.genmod.families as F
import statsmodels.genmod.families.links as L
all_links = {
L.Logit, L.logit, L.Power, L.inverse_power, L.sqrt, L.inverse_squared,
L.identity, L.Log, L.log, L.CDFLink, L.probit, L.cauchy, L.LogLog,
L.loglog, L.CLogLog, L.cloglog, L.NegativeBinomial, L.nbinom
}
poisson_links = {L.Log, L.log, L.identity, L.sqrt}
gaussian_links = {L.Log, L.log, L.identity, L.inverse_power}
gamma_links = {L.Log, L.log, L.identity, L.inverse_power}
binomial_links = {
L.Logit, L.logit, L.probit, L.cauchy, L.Log, L.log, L.CLogLog,
L.cloglog, L.LogLog, L.loglog, L.identity
}
inverse_gaussian_links = {
L.inverse_squared, L.inverse_power, L.identity, L.Log, L.log
}
negative_bionomial_links = {
L.Log, L.log, L.CLogLog, L.cloglog, L.identity, L.NegativeBinomial,
L.nbinom, L.Power
}
tweedie_links = {L.Log, L.log, L.Power}
link_cases = [
(F.Poisson, poisson_links),
(F.Gaussian, gaussian_links),
(F.Gamma, gamma_links),
(F.Binomial, binomial_links),
(F.InverseGaussian, inverse_gaussian_links),
(F.NegativeBinomial, negative_bionomial_links),
(F.Tweedie, tweedie_links)
]
@pytest.mark.parametrize("family, links", link_cases)
def test_invalid_family_link(family, links):
invalid_links = all_links - links
with pytest.raises(ValueError):
for link in invalid_links:
family(link())
@pytest.mark.parametrize("family, links", link_cases)
def test_family_link(family, links):
for link in links:
assert family(link())
|
bashtage/statsmodels
|
statsmodels/genmod/families/tests/test_family.py
|
Python
|
bsd-3-clause
| 1,603
|
[
"Gaussian"
] |
b669c5f4c667dde4189241a15004fbcaf70a9ba8522c46ccb5b999e8776730a3
|
import platform
from PyQt4.QtCore import *
from PyQt4.QtGui import *
StyleSheet = '''
QLineEdit[valid="false"] {background-color: rgb(255, 80, 80);}
'''
#TODO: add formatting to general, field, run and time options
#TODO: split up classes into separate files
#TODO: set tool tips
class ParamsForm(QDialog):
def __init__(self, params=None, parent=None):
super(ParamsForm, self).__init__(parent)
self.params = {"ndim":4,"dims":[1,1,1,1],"run-type":1,"input_file":"input.dat","output_file":"output.dat"}
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok| QDialogButtonBox.Cancel)
self.connect(buttonBox, SIGNAL("accepted()"),self, SLOT("accept()"))
self.connect(buttonBox, SIGNAL("rejected()"),self, SLOT("reject()"))
tabWidget = QTabWidget()
self.generalWidget = GeneralParamsWidget()
tabWidget.addTab(self.generalWidget, "&General")
self.timeWidget = TimeParamsWidget()
tabWidget.addTab(self.timeWidget, "&Time")
self.wfWidget = WfParamsWidget()
tabWidget.addTab(self.wfWidget, "&Wavefunction")
self.fieldWidget = FieldParamsWidget()
tabWidget.addTab(self.fieldWidget, "&Fields")
self.runWidget = RunParamsWidget()
tabWidget.addTab(self.runWidget, "&Runtime")
layout = QVBoxLayout()
layout.addWidget(tabWidget)
layout.addWidget(buttonBox)
self.setLayout(layout)
def runParamsDict(self):
return self.runparams
def paramsDict(self):
return self.params
def accept(self):
class DimsError(Exception): pass
#class FileError(Exception): pass
try:
self.params["ndim"] = self.generalWidget.ndimSpinbox.value()
self.params["dims"] = []
for i in unicode(self.generalWidget.dimsLineEdit.text()).split(","):
self.params["dims"].append(int(i))
#i build a list of ints and then replace it with a text string because I want to test the values
#being sent to the c++ program, I may comment out the above line depending on how I interface to
#the c++ routines same goes for the fields below
self.params["run-type"] = self.generalWidget.distBox.currentIndex()+1
self.params["input_file"] = self.generalWidget.inFileLineEdit.text()
self.params["output_file"] = self.generalWidget.outFileLineEdit.text()
self.params["tinitial"] = self.timeWidget.tiSpinBox.value()
self.params["tfinal"] = self.timeWidget.tfSpinBox.value()
self.params["dt"] = float(self.timeWidget.dtLineEdit.text())
self.params["wf-type"] = self.wfWidget.wfTypeBox.currentIndex()+1
self.params["pot-type"] = self.wfWidget.potTypeBox.currentIndex()+1
self.params["smoothing"] = float(self.wfWidget.smoothingLineEdit.text())
self.params["rnuc"] = self.wfWidget.rnucDoubleSpinBox.value()
self.params["theta-nuc"] = self.wfWidget.thetaDoubleSpinBox.value()
self.params["ip"] = self.wfWidget.ipDoubleSpinBox.value()
if self.params["pot-type"] == 2:
self.params["charges"] = self.wfWidget.chargesLineEdit.text()
else:
self.params["charges"] = self.wfWidget.chargeLineEdit.text()
self.params["nfield"] = self.fieldWidget.nfieldSpinBox.value()
self.params["env"] = self.fieldWidget.envComboBox.currentIndex()+1
self.params["omega"] = []
for i in unicode(self.fieldWidget.omegaLineEdit.text()).split(","):
self.params["omega"].append(float(i))
self.params["ef"] = []
for i in unicode(self.fieldWidget.efLineEdit.text()).split(","):
self.params["ef"].append(float(i))
self.params["ce"] = []
for i in unicode(self.fieldWidget.ceLineEdit.text()).split(","):
self.params["ce"].append(float(i))
self.params["fwhm"] = []
for i in unicode(self.fieldWidget.fwhmLineEdit.text()).split(","):
self.params["fwhm"].append(float(i))
self.params["nthreads"] = self.runWidget.nthreadSpinBox.value()
self.runparams={}
if self.runWidget.runLocationComboBox.currentIndex() == 0:
self.local = True
else:
self.local = False
self.runparams["user"] = self.runWidget.userLineEdit.text()
self.runparams["pass"] = self.runWidget.passLineEdit.text()
self.runparams["server"] = self.runWidget.serverLineEdit.text()
self.runparams["binary"] = self.runWidget.binaryLineEdit.text()
self.runparams["script"] = self.runWidget.scriptComboBox.currentIndex()
if not len(self.params["dims"]) == self.params["ndim"]:
raise DimsError, ("The number of trajectories must be the same as the dimensionality")
if not len(self.params["omega"]) == self.params["nfield"]:
raise DimsError, ("The number of frequencies must be the same as the number of fields")
if not len(self.params["ef"]) == self.params["nfield"]:
raise DimsError, ("The number of field strengths must be the same as the number of fields")
if self.params["env"] == 3 or self.params["env"] == 4:
if not len(self.params["ce"]) == self.params["nfield"]:
raise DimsError, ("The number of CE phases must be the same as the number of fields")
if not len(self.params["fwhm"]) == self.params["nfield"]:
raise DimsError, ("The number of FWHMs must be the same as the number of fields")
self.params["dims"] = self.generalWidget.dimsLineEdit.text()
self.params["omega"] = self.fieldWidget.omegaLineEdit.text()
self.params["ef"] = self.fieldWidget.efLineEdit.text()
self.params["ce"] = self.fieldWidget.ceLineEdit.text()
self.params["fwhm"] = self.fieldWidget.fwhmLineEdit.text()
#these are to test for the existence of a few files, the files may not exist so it isnt terribly important
# if not QFile.exists(self.params["input_file"]):
# raise FileError, ("input file does not exist")
# if not QFile.exists(self.params["output_file"]):
# raise FileError, ("output file does not exist")
except DimsError, e:
QMessageBox.warning(self, "Dimensionality Error", unicode(e))
self.generalWidget.dimsLineEdit.selectAll()
self.generalWidget.dimsLineEdit.setFocus()
return
except ValueError, e:
QMessageBox.warning(self, "Value Error", unicode(e))
return
#except FileError, e:
# QMessageBox.warning(self, "File Error", unicode(e))
# return
QDialog.accept(self)
#TODO: set tooltips!!
class GeneralParamsWidget(QWidget):
def __init__(self, parent = None):
super(GeneralParamsWidget, self).__init__(parent)
ndimLabel = QLabel("Number of Dimensions:")
self.ndimSpinbox = QSpinBox()
self.ndimSpinbox.setRange(2,400)
self.ndimSpinbox.setValue(4)
self.ndimSpinbox.setToolTip("Set Number of Dimensions, must be greater than 2")
self.ndimSpinbox.setStatusTip(self.ndimSpinbox.toolTip())
self.connect(self.ndimSpinbox, SIGNAL("valueChanged(int)"), self.ndimChange)
dimsLabel = QLabel("Trajectories per Dimension:")
self.dimsLineEdit = QLineEdit("1,1,1,1")
self.dimsLineEdit.setProperty("valid", (True))
self.connect(self.dimsLineEdit, SIGNAL("textChanged(QString)"), self.ndimChange)
distType = {1:"Monte-Carlo",2:"Linear"}
distLabel = QLabel("Distribution Type:")
self.distBox = QComboBox()
for k,v in distType.items():
self.distBox.insertItem(k, v)
inFileLabel = QLabel("Input File Name:")
self.inFileButton = QPushButton("&Open")
self.inFileLineEdit = QLineEdit()
self.inFileLineEdit.setMinimumWidth(200)
self.inFileLineEdit.setMaximumWidth(600)
self.inFileLineEdit.setText("input.dat")
self.connect(self.inFileButton, SIGNAL("clicked()"), lambda who="in": self.changeButton(who))
outFileLabel = QLabel("Output File Name:")
self.outFileButton = QPushButton("&Open")
self.outFileLineEdit = QLineEdit()
self.outFileLineEdit.setMinimumWidth(200)
self.outFileLineEdit.setMaximumWidth(600)
self.outFileLineEdit.setText("output.dat")
self.connect(self.outFileButton, SIGNAL("clicked()"), lambda who="out": self.changeButton(who))
generalLayout = QGridLayout()
generalLayout.addWidget(ndimLabel, 0, 0)
generalLayout.addWidget(self.ndimSpinbox, 1, 0)
generalLayout.addWidget(dimsLabel, 2, 0)
generalLayout.addWidget(self.dimsLineEdit, 3, 0)
generalLayout.addWidget(distLabel, 4, 0)
generalLayout.addWidget(self.distBox, 5, 0)
generalLayout.addWidget(inFileLabel, 0, 1)
generalLayout.addWidget(self.inFileLineEdit, 1, 1)
generalLayout.addWidget(outFileLabel, 2, 1)
generalLayout.addWidget(self.outFileLineEdit, 3, 1)
generalLayout.addWidget(self.inFileButton, 1, 2)
generalLayout.addWidget(self.outFileButton, 3, 2)
self.setLayout(generalLayout)
self.setStyleSheet(StyleSheet)
def ndimChange(self):
if len(unicode(self.dimsLineEdit.text()).split(",")) == self.ndimSpinbox.value():
self.dimsLineEdit.setProperty("valid",QVariant(True))
self.setStyleSheet(StyleSheet)
else:
self.dimsLineEdit.setProperty("valid",QVariant(False))
self.setStyleSheet(StyleSheet)
def changeButton(self,who):
findFile = QFileDialog()
findFile.setFileMode(QFileDialog.AnyFile)
if platform.system() == "Darwin":
findFile.setOption(QFileDialog.DontUseNativeDialog,True)
if not findFile.exec_():
# exit if cancel
return
filenames = findFile.selectedFiles()
filename = filenames.takeFirst()
#filename = QFileDialog.getSaveFileName(self, 'Open file','.')
if who == "in":
self.inFileLineEdit.setText(filename)
else:
self.outFileLineEdit.setText(filename)
class TimeParamsWidget(QWidget):
def __init__(self, parent = None):
super(TimeParamsWidget, self).__init__(parent)
tiLabel = QLabel("Start Time:")
self.tiSpinBox = QSpinBox()
self.tiSpinBox.setRange(-1000000000,1000000000)
self.tiSpinBox.setValue(0)
self.tiSpinBox.setMaximumWidth(80)
tfLabel = QLabel("End Time:")
self.tfSpinBox = QSpinBox()
self.tfSpinBox.setRange(-1000000000,1000000000)
self.tfSpinBox.setValue(100)
self.tfSpinBox.setMaximumWidth(80)
dtLabel = QLabel("Initial Time Step:")
self.dtLineEdit = QLineEdit("0.001")
self.dtLineEdit.setMaximumWidth(80)
tifLayout = QHBoxLayout()
tifLayout.addStretch()
for item in [tiLabel, self.tiSpinBox, tfLabel, self.tfSpinBox]:
tifLayout.addWidget(item)
tifLayout.addStretch()
dtLayout = QHBoxLayout()
dtLayout.addStretch()
for item in [dtLabel, self.dtLineEdit]:
dtLayout.addWidget(item)
dtLayout.addStretch()
layout = QVBoxLayout()
layout.addLayout(tifLayout)
layout.addLayout(dtLayout)
self.setLayout(layout)
class WfParamsWidget(QWidget):
def __init__(self, parent = None):
super(WfParamsWidget, self).__init__(parent)
wfType = {1:"Hydrogen Atom-like", 2:"Hydrogen Molecule-like", 3:"GAMESS-US checkpoint", 4:"Wavefunction on Grid"}
wfTypeLabel = QLabel("Wavefunction type:")
self.wfTypeBox = QComboBox()
for k,v in wfType.items():
self.wfTypeBox.insertItem(k, v)
self.wfTypeBox.setCurrentIndex(0)
potType = wfType
potTypeLabel = QLabel("Potential type:")
self.potTypeBox = QComboBox()
for k,v in potType.items():
self.potTypeBox.insertItem(k, v)
self.potTypeBox.setCurrentIndex(0)
ipLabel = QLabel("Ionization Potential:")
self.ipDoubleSpinBox = QDoubleSpinBox()
self.ipDoubleSpinBox.setRange(0,10)
self.ipDoubleSpinBox.setDecimals(3)
self.ipDoubleSpinBox.setSingleStep(0.001)
self.ipDoubleSpinBox.setValue(0.5)
smoothingLabel = QLabel("Smoothing Parameter:")
self.smoothingLineEdit = QLineEdit("1E-4")
self.smoothingLineEdit.setMaximumWidth(80)
chargeLabel = QLabel("Charge on core:")
self.chargeLineEdit = QLineEdit("1")
self.chargeLineEdit.setMaximumWidth(50)
chargesLabel = QLabel("Charges on cores:")
self.chargesLineEdit = QLineEdit("1,1")
self.chargesLineEdit.setMaximumWidth(50)
rnucLabel = QLabel("Internuclear Seperation:")
self.rnucDoubleSpinBox = QDoubleSpinBox()
self.rnucDoubleSpinBox.setRange(0,10)
self.rnucDoubleSpinBox.setDecimals(3)
self.rnucDoubleSpinBox.setSingleStep(0.001)
self.rnucDoubleSpinBox.setValue(4.0)
self.rnucDoubleSpinBox.setMaximumWidth(80)
thetaLabel = QLabel("Molecular Orientation:")
self.thetaDoubleSpinBox = QDoubleSpinBox()
self.thetaDoubleSpinBox.setRange(0,360)
self.thetaDoubleSpinBox.setDecimals(1)
self.thetaDoubleSpinBox.setSingleStep(0.1)
self.thetaDoubleSpinBox.setValue(0.0)
self.thetaDoubleSpinBox.setMaximumWidth(80)
blankWidget = QWidget()
atomWidget = QWidget()
atomLayout = QHBoxLayout()
#atomLayout.addStretch()
atomLayout.addWidget(chargeLabel)
atomLayout.addWidget(self.chargeLineEdit)
atomLayout.addStretch()
atomWidget.setLayout(atomLayout)
molWidget = QWidget()
molLayout = QVBoxLayout()
molUpLayout = QHBoxLayout()
molUpLayout.addWidget(chargesLabel)
molUpLayout.addWidget(self.chargesLineEdit)
molUpLayout.addWidget(rnucLabel)
molUpLayout.addWidget(self.rnucDoubleSpinBox)
molDownLayout = QHBoxLayout()
molDownLayout.addWidget(thetaLabel)
molDownLayout.addWidget(self.thetaDoubleSpinBox)
molDownLayout.addStretch()
molLayout.addLayout(molUpLayout)
molLayout.addLayout(molDownLayout)
molWidget.setLayout(molLayout)
self.stackedWidget = QStackedWidget()
self.stackedWidget.addWidget(atomWidget)
self.stackedWidget.addWidget(molWidget)
self.stackedWidget.addWidget(blankWidget)
labelLayout = QHBoxLayout()
labelLayout.addWidget(wfTypeLabel)
labelLayout.addWidget(self.wfTypeBox)
itemLayout = QHBoxLayout()
itemLayout.addWidget(potTypeLabel)
itemLayout.addWidget(self.potTypeBox)
smoothLayout = QHBoxLayout()
smoothLayout.addWidget(ipLabel)
smoothLayout.addWidget(self.ipDoubleSpinBox)
smoothLayout.addWidget(smoothingLabel)
smoothLayout.addWidget(self.smoothingLineEdit)
wfLayout = QVBoxLayout()
wfLayout.addLayout(labelLayout)
wfLayout.addLayout(itemLayout)
wfLayout.addLayout(smoothLayout)
wfLayout.addWidget(self.stackedWidget)
self.connect(self.potTypeBox,SIGNAL("currentIndexChanged(QString)"),self.changeStacked)
self.setLayout(wfLayout)
def changeStacked(self, text):
if text == "Hydrogen Atom-like":
self.stackedWidget.setCurrentIndex(0)
elif text == "Hydrogen Molecule-like":
self.stackedWidget.setCurrentIndex(1)
else:
self.stackedWidget.setCurrentIndex(2)
class FieldParamsWidget(QWidget):
def __init__(self, parent = None):
super(FieldParamsWidget, self).__init__(parent)
nfieldLabel = QLabel("Number of fields:")
self.nfieldSpinBox = QSpinBox()
self.nfieldSpinBox.setRange(0,100)
self.nfieldSpinBox.setValue(1)
self.connect(self.nfieldSpinBox, SIGNAL("valueChanged(int)"), self.nfieldChange)
efLabel = QLabel("Strength per Field (a.u.):")
self.efLineEdit = QLineEdit("0.01")
self.efLineEdit.setProperty("valid", (True))
self.connect(self.efLineEdit, SIGNAL("textChanged(QString)"), self.nfieldChange)
fwhmLabel = QLabel("FWHM per Field (a.u.):")
self.fwhmLineEdit = QLineEdit("0.")
self.fwhmLineEdit.setProperty("valid", (True))
self.connect(self.fwhmLineEdit, SIGNAL("textChanged(QString)"), self.nfieldChange)
omegaLabel = QLabel("Frequency per Field (a.u.):")
self.omegaLineEdit = QLineEdit("0.057")
self.omegaLineEdit.setProperty("valid", (True))
self.connect(self.omegaLineEdit, SIGNAL("textChanged(QString)"), self.nfieldChange)
ceLabel = QLabel("CEP per Field (a.u.):")
self.ceLineEdit = QLineEdit("0.")
self.ceLineEdit.setProperty("valid", (True))
self.connect(self.ceLineEdit, SIGNAL("textChanged(QString)"), self.nfieldChange)
envType = {1:"Static", 2:"Constant Cosine", 3:"Numerical", 4:"Gaussian", 5:"Sine Squared"}
envLabel = QLabel("Type of Envelope:")
self.envComboBox = QComboBox()
for k,v in envType.items():
self.envComboBox.insertItem(k, v)
self.envComboBox.setCurrentIndex(1)
blankWidget = QWidget()
envWidget = QWidget()
envLayout = QHBoxLayout()
for item in [fwhmLabel, self.fwhmLineEdit, ceLabel, self.ceLineEdit]:
envLayout.addWidget(item)
envWidget.setLayout(envLayout)
self.stackedWidget = QStackedWidget()
self.stackedWidget.addWidget(blankWidget)
self.stackedWidget.addWidget(envWidget)
layout = QVBoxLayout()
topLayout = QHBoxLayout()
for item in [nfieldLabel, self.nfieldSpinBox, efLabel, self.efLineEdit]:
topLayout.addWidget(item)
midLayout = QHBoxLayout()
for item in [envLabel, self.envComboBox, omegaLabel, self.omegaLineEdit]:
midLayout.addWidget(item)
layout.addLayout(topLayout)
layout.addLayout(midLayout)
layout.addWidget(self.stackedWidget)
self.connect(self.envComboBox,SIGNAL("currentIndexChanged(QString)"),self.changeStacked)
self.setLayout(layout)
def nfieldChange(self):
edits = [self.efLineEdit,
self.ceLineEdit,
self.fwhmLineEdit,
self.omegaLineEdit]
nfields = self.nfieldSpinBox.value()
for i in edits:
if len(unicode(i.text()).split(",")) == nfields:
i.setProperty("valid",QVariant(True))
else:
i.setProperty("valid",QVariant(False))
self.setStyleSheet(StyleSheet)
def changeStacked(self, text):
if text == "Static" or text == "Constant Cosine" or text == "Numerical":
self.stackedWidget.setCurrentIndex(0)
else:
self.stackedWidget.setCurrentIndex(1)
class RunParamsWidget(QWidget):
def __init__(self, parent = None):
super(RunParamsWidget, self).__init__(parent)
nthreadLabel = QLabel("Number of threads:")
self.nthreadSpinBox = QSpinBox()
self.nthreadSpinBox.setRange(1,1000)
self.nthreadSpinBox.setValue(1)
runLocationType = {1:"local", 2:"remote"}
runLocationLabel = QLabel("Simulation Location:")
self.runLocationComboBox = QComboBox()
for k,v in runLocationType.items():
self.runLocationComboBox.insertItem(k, v)
userLabel = QLabel("Username:")
self.userLineEdit = QLineEdit("user")
passLabel = QLabel("Password:")
self.passLineEdit = QLineEdit("password")
self.passLineEdit.setEchoMode(QLineEdit.Password)
serverLabel = QLabel("Server:")
self.serverLineEdit = QLineEdit("example.com")
binaryLabel = QLabel("Binary Name:")
self.binaryLineEdit = QLineEdit("cpp-class")
scriptLabel = QLabel("Script type")
self.scriptComboBox = QComboBox()
self.scriptComboBox.insertItems(1, ["shell script","PBS script"])
#TODO: connect user and password etc. to QSettings to store them.
blankWidget = QWidget()
remoteWidget = QWidget()
remoteLayout = QVBoxLayout()
remoteTopLayout = QHBoxLayout()
for item in [userLabel, self.userLineEdit, passLabel, self.passLineEdit]:
remoteTopLayout.addWidget(item)
remoteBottomLayout = QHBoxLayout()
for item in [serverLabel, self.serverLineEdit, binaryLabel, self.binaryLineEdit, scriptLabel, self.scriptComboBox]:
remoteBottomLayout.addWidget(item)
remoteLayout.addLayout(remoteTopLayout)
remoteLayout.addLayout(remoteBottomLayout)
remoteWidget.setLayout(remoteLayout)
self.stackedWidget = QStackedWidget()
self.stackedWidget.addWidget(blankWidget)
self.stackedWidget.addWidget(remoteWidget)
layout = QVBoxLayout()
topRow = QHBoxLayout()
for item in [nthreadLabel, self.nthreadSpinBox, runLocationLabel, self.runLocationComboBox]:
topRow.addWidget(item)
layout.addLayout(topRow)
layout.addWidget(self.stackedWidget)
self.connect(self.runLocationComboBox,SIGNAL("currentIndexChanged(QString)"),self.changeStacked)
self.setLayout(layout)
def changeStacked(self, text):
if text == "local":
self.stackedWidget.setCurrentIndex(0)
else:
self.stackedWidget.setCurrentIndex(1)
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
#form = GeneralParamsWidget()
form = ParamsForm()
form.show()
app.exec_()
|
rymurr/cpp-class
|
pygui/paramsform.py
|
Python
|
gpl-3.0
| 22,869
|
[
"GAMESS",
"Gaussian"
] |
4b94790c5bb7f0ed9b6dc8897a26a2ca678bff4bc4cb11ca2d86828698446717
|
# Copyright (c) 2012 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import os
import sys
from setuptools import setup
def get_static_files(path):
return [os.path.join(dirpath.replace("luigi/", ""), ext)
for (dirpath, dirnames, filenames) in os.walk(path)
for ext in ["*.html", "*.js", "*.css", "*.png",
"*.eot", "*.svg", "*.ttf", "*.woff", "*.woff2"]]
luigi_package_data = sum(map(get_static_files, ["luigi/static", "luigi/templates"]), [])
readme_note = """\
.. note::
For the latest source, discussion, etc, please visit the
`GitHub repository <https://github.com/spotify/luigi>`_\n\n
"""
with open('README.rst') as fobj:
long_description = readme_note + fobj.read()
install_requires = [
'tornado>=4.0,<5',
# https://pagure.io/python-daemon/issue/18
'python-daemon<2.2.0',
'python-dateutil>=2.7.5,<3',
]
# Note: To support older versions of setuptools, we're explicitly not
# using conditional syntax (i.e. 'enum34>1.1.0;python_version<"3.4"').
# This syntax is a problem for setuptools as recent as `20.1.1`,
# published Feb 16, 2016.
if sys.version_info[:2] < (3, 4):
install_requires.append('enum34>1.1.0')
if os.environ.get('READTHEDOCS', None) == 'True':
# So that we can build documentation for luigi.db_task_history and luigi.contrib.sqla
install_requires.append('sqlalchemy')
# readthedocs don't like python-daemon, see #1342
install_requires.remove('python-daemon<2.2.0')
install_requires.append('sphinx>=1.4.4') # Value mirrored in doc/conf.py
setup(
name='luigi',
version='2.8.3',
description='Workflow mgmgt + task scheduling + dependency resolution',
long_description=long_description,
author='The Luigi Authors',
url='https://github.com/spotify/luigi',
license='Apache License 2.0',
packages=[
'luigi',
'luigi.configuration',
'luigi.contrib',
'luigi.contrib.hdfs',
'luigi.tools'
],
package_data={
'luigi': luigi_package_data
},
entry_points={
'console_scripts': [
'luigi = luigi.cmdline:luigi_run',
'luigid = luigi.cmdline:luigid',
'luigi-grep = luigi.tools.luigi_grep:main',
'luigi-deps = luigi.tools.deps:main',
'luigi-deps-tree = luigi.tools.deps_tree:main'
]
},
install_requires=install_requires,
extras_require={
'prometheus': ['prometheus-client==0.5.0'],
'toml': ['toml<2.0.0'],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: System :: Monitoring',
],
)
|
rayrrr/luigi
|
setup.py
|
Python
|
apache-2.0
| 3,689
|
[
"VisIt"
] |
5c4a4a5d786b65e6586a8a7ece0f1011a53522eca3492fe8475d5d09b5507a2a
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Concatenates the seamed and bandmerged fields into the final catalogue.
This script takes the output from the seaming script and concatenates the
results into the final source catalogue products, which contains one entry
for each unique source and is generated in 5x5 degree tiles. Both a 'light' and
a 'full' version of these tiles are generated.
This step also creates the source designations "JHHMMSS.ss+DDMMSS.s".
"""
from __future__ import division, print_function, unicode_literals
import os
import numpy as np
import datetime
from multiprocessing import Pool
from astropy import log
import constants
from constants import IPHASQC
import util
__author__ = 'Geert Barentsen'
__copyright__ = 'Copyright, The Authors'
__credits__ = ['Geert Barentsen', 'Hywel Farnhill', 'Janet Drew']
###########
# CLASSES
###########
class Concatenator(object):
"""Concatenates field-based data into a partial catalogue."""
def __init__(self, strip, part='a', mode='full'):
assert(part in ['a', 'b'])
assert(mode in ['light', 'full'])
self.strip = strip
self.part = part
self.mode = mode
# Where are the input catalogues?
self.datapath = os.path.join(constants.DESTINATION, 'seamed')
# Where to write the output?
self.destination = os.path.join(constants.DESTINATION,
'concatenated')
# Setup the destination directory
if mode == 'light':
self.destination = os.path.join(self.destination, 'light')
else:
self.destination = os.path.join(self.destination, 'full')
util.setup_dir(self.destination)
util.setup_dir(self.destination+'-compressed')
log.info('Reading data from {0}'.format(self.datapath))
# Limits
self.lon1 = strip
self.lon2 = strip + constants.STRIPWIDTH
self.fieldlist = self.get_fieldlist()
def get_partname(self):
"""Returns the name of this partial catalogue, e.g. '215b'"""
return '{0:03.0f}{1}'.format(self.lon1, self.part)
def get_output_filename(self, gzip=False):
"""Returns the full path of the output file."""
if self.mode == 'light':
suffix = '-light'
else:
suffix = ''
destination = self.destination
extension = 'fits'
if gzip:
destination += '-compressed'
extension += '.gz'
return os.path.join(destination,
'iphas-dr2-{0}{1}.{2}'.format(
self.get_partname(),
suffix,
extension))
def get_fieldlist(self):
# Which are our fields?
# Note: we must allow for border overlaps
if self.part == 'a':
cond_b = IPHASQC['b'] < (0 + constants.FIELD_MAXDIST)
else:
cond_b = IPHASQC['b'] > (0 - constants.FIELD_MAXDIST)
cond_strip = (constants.IPHASQC_COND_RELEASE
& cond_b
& (IPHASQC['l'] >= (self.lon1 - constants.FIELD_MAXDIST))
& (IPHASQC['l'] < (self.lon2 + constants.FIELD_MAXDIST)))
fieldlist = IPHASQC['id'][cond_strip]
log.info('Found {0} fields.'.format(len(fieldlist)))
return fieldlist
def run(self):
"""Performs the concatenation of the strip.
This step will only keep stars with low errbits:
(errBits < 64)
and not uber-saturated:
! (r<12.5 & i<11.5 & ha<12)
and reasonable errors:
(rErr < 0.198 || iErr < 0.198 || haErr < 0.198)
and not noise-like:
(pStar > 0.2 || pGalaxy > 0.2)
and not saturated in all bands:
(NULL_rErrBits || NULL_iErrBits || NULL_haErrBits || ((rErrbits & iErrBits & haErrBits & 8) == 0))
and being the primary detection:
sourceID == primaryID
"""
if self.part == 'a':
cond_latitude = "b < 0"
else:
cond_latitude = "b >= 0"
if self.mode == 'full':
extracmd = """delcols "pSaturated \
rErrBits iErrBits haErrBits errBits \
rPlaneX rPlaneY iPlaneX iPlaneY \
haPlaneX haPlaneY rAxis primaryID \
vignetted truncated badPix" """
else:
# select "nBands == 3"; \
extracmd = """keepcols "name ra dec \
r rErr \
i iErr \
ha haErr \
mergedClass errBits";"""
instring = ''
for field in self.fieldlist:
path = os.path.join(self.datapath,
'strip{0:.0f}'.format(self.strip),
'{0}.fits'.format(field))
instring += 'in={0} '.format(path)
output_filename = self.get_output_filename()
output_filename_gzip = self.get_output_filename(gzip=True)
log.info('Writing data to {0}'.format(output_filename))
version = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
# A bug in stilts causes long fieldIDs to be truncated if -utype S15 is not set
# We also replace a bunch of column descriptions because they cannot be longer than 73 chars.
param = {'stilts': constants.STILTS,
'in': instring,
'icmd': """'clearparams *; \
setparam NAME "IPHAS DR2 Source Catalogue (part """+self.get_partname()+""")"; \
setparam ORIGIN "www.iphas.org"; \
setparam AUTHOR "Geert Barentsen, Hywel Farnhill, Janet Drew"; \
setparam VERSION \""""+version+""""; \
select "(errBits < 64) \
& ! (r<12.5 & i<11.5 & ha<12) \
& (rErr < 0.198 || iErr < 0.198 || haErr < 0.198) \
& (pStar > 0.2 || pGalaxy > 0.2) \
& (NULL_rErrBits || NULL_iErrBits || NULL_haErrBits || ((rErrbits & iErrBits & haErrBits & 8) == 0))
& l >= """+str(self.lon1)+""" \
& l < """+str(self.lon2)+""" \
& """+str(cond_latitude)+""" \
& sourceID == primaryID"; \
addcol -before ra \
-desc "Source designation (JHHMMSS.ss+DDMMSS.s) without IPHAS2 prefix." \
name \
"concat(\\"J\\",
replaceAll(degreesToHms(ra, 2),
\\":\\", \\"\\"),
replaceAll(degreesToDms(dec, 1),
\\":\\", \\"\\")
)"; \
addcol -before rMJD -desc "True if source was blended with a nearby neighbour in the r-band." \
rDeblend "NULL_rErrBits ? false : (rErrBits & 2) > 0";
addcol -before rMJD -desc "True i the peak pixel count exceeded 55000 in r." \
rSaturated "r<13 ? true : NULL_rErrBits ? false : (rErrBits & 8) > 0";
addcol -before iMJD -desc "True if source was blended with a nearby neighbour in the i-band." \
iDeblend "NULL_iErrBits ? false : (iErrBits & 2) > 0";
addcol -before iMJD -desc "True if the peak pixel count exceeded 55000 in i." \
iSaturated "i<12 ? true : NULL_iErrBits ? false : (iErrBits & 8) > 0";
addcol -before haMJD -desc "True if source was blended with a nearby neighbour in H-alpha." \
haDeblend "NULL_haErrBits ? false : (haErrBits & 2) > 0";
addcol -before haMJD -desc "True if the peak pixel count exceeded 55000 in H-alpha." \
haSaturated "ha<12.5 ? true : NULL_haErrBits ? false : (haErrBits & 8) > 0";
replacecol saturated "rSaturated || iSaturated || haSaturated";
colmeta -name a10 reliable;
replacecol a10 "! saturated & nBands == 3 & rErr<0.1 & iErr<0.1 & haErr<0.1 & (abs(r-rAperMag1) < 3*hypot(rErr,rAperMag1Err)+0.03) & (abs(i-iAperMag1) < 3*hypot(iErr,iAperMag1Err)+0.03) & (abs(ha-haAperMag1) < 3*hypot(haErr,haAperMag1Err)+0.03)";
addcol -before fieldID -desc "True if (a10 & pStar > 0.9 & ! deblend & ! brightNeighb)" \
a10point "a10 & pStar > 0.9 & ! deblend & ! brightNeighb";
replacecol -utype S15 fieldID "fieldID";
replacecol -utype S1 fieldGrade "toString(fieldGrade)";
colmeta -desc "True if detected in all bands at 10-sigma plus other criteria." a10;
colmeta -desc "J2000 RA with respect to the 2MASS reference frame." ra;
colmeta -desc "Unique source identification string (run-ccd-detectionnumber)." sourceID;
colmeta -desc "Astrometric fit error (RMS) across the CCD." posErr;
colmeta -desc "1=galaxy, 0=noise, -1=star, -2=probableStar, -3=probableGalaxy." mergedClass;
colmeta -desc "N(0,1) stellarness-of-profile statistic." mergedClassStat;
colmeta -desc "1=galaxy, 0=noise, -1=star, -2=probableStar, -3=probableGalaxy." rClass;
colmeta -desc "1=galaxy, 0=noise, -1=star, -2=probableStar, -3=probableGalaxy." iClass;
colmeta -desc "1=galaxy, 0=noise, -1=star, -2=probableStar, -3=probableGalaxy." haClass;
colmeta -desc "Unique r-band detection identifier (run-ccd-detectionnumber)." rDetectionID;
colmeta -desc "Unique i-band detection identifier (run-ccd-detectionnumber)." iDetectionID;
colmeta -desc "Unique H-alpha detection identifier (run-ccd-detectionnumber)." haDetectionID;
colmeta -desc "CCD pixel coordinate in the r-band exposure." rX;
colmeta -desc "CCD pixel coordinate in the r-band exposure." rY;
colmeta -desc "CCD pixel coordinate in the i-band exposure." iX;
colmeta -desc "CCD pixel coordinate in the i-band exposure." iY;
colmeta -desc "CCD pixel coordinate in the H-alpha exposure." haX;
colmeta -desc "CCD pixel coordinate in the H-alpha exposure." haY;
colmeta -desc "Survey field identifier." fieldID;
colmeta -desc "Probability the source is extended." pGalaxy;
colmeta -desc "Default r mag (Vega) using the 2.3 arcsec aperture." r;
colmeta -desc "Default i mag (Vega) using the 2.3 arcsec aperture." i;
colmeta -desc "Default H-alpha mag (Vega) using the 2.3 arcsec aperture." ha;
colmeta -desc "r mag (Vega) derived from peak pixel height." rPeakMag;
colmeta -desc "i mag (Vega) derived from peak pixel height." iPeakMag;
colmeta -desc "H-alpha mag (Vega) derived from peak pixel height." haPeakMag;
colmeta -desc "r mag (Vega) using the 1.2 arcsec aperture." rAperMag1;
colmeta -desc "i mag (Vega) using the 1.2 arcsec aperture." iAperMag1;
colmeta -desc "H-alpha mag (Vega) using the 1.2 arcsec aperture." haAperMag1;
colmeta -desc "r mag (Vega) using the 3.3 arcsec aperture." rAperMag3;
colmeta -desc "i mag (Vega) using the 3.3 arcsec aperture." iAperMag3;
colmeta -desc "H-alpha mag (Vega) using the 3.3 arcsec aperture." haAperMag3;
colmeta -desc "Internal quality control score of the field. One of A, B, C or D." fieldGrade;
colmeta -desc "Number of repeat observations of this source in the survey." nObs;
colmeta -desc "SourceID of the object in the partner exposure." sourceID2;
colmeta -desc "FieldID of the partner detection." fieldID2;
colmeta -desc "r mag (Vega) in the partner field, obtained within 10 minutes." r2;
colmeta -desc "Uncertainty for r2." rErr2;
colmeta -desc "i mag (Vega) in the partner field, obtained within 10 minutes." i2;
colmeta -desc "Uncertainty for i2." iErr2;
colmeta -desc "H-alpha mag (Vega) in the partner field, obtained within 10 minutes." ha2;
colmeta -desc "Uncertainty for ha2." haErr2;
colmeta -desc "flag brightNeighb (1), deblend (2), saturated (8), vignetting (64)" errBits2;
{0}
'""".format(extracmd),
'out': output_filename}
cmd = '{stilts} tcat {in} icmd={icmd} countrows=true lazy=true out={out}'
mycmd = cmd.format(**param)
log.info(mycmd)
status = os.system(mycmd)
log.info('concat: '+str(status))
# zip
mycmd = 'gzip --stdout {0} > {1}'.format(output_filename, output_filename_gzip)
log.debug(mycmd)
status = os.system(mycmd)
log.info('gzip: '+str(status))
return status
###########
# FUNCTIONS
###########
def concatenate_one(strip,
logfile = os.path.join(constants.LOGDIR, 'concatenation.log')):
with log.log_to_file(logfile):
# Strips are defined by the start longitude
log.info('Concatenating L={0}'.format(strip))
for mode in ['light', 'full']:
for part in ['a', 'b']:
concat = Concatenator(strip, part, mode)
concat.run()
return strip
def concatenate(clusterview):
# Spread the work across the cluster
strips = np.arange(25, 215+1, constants.STRIPWIDTH)
results = clusterview.imap(concatenate_one, strips)
# Print a friendly message once in a while
i = 0
for mystrip in results:
i += 1
log.info('Completed strip {0} ({1}/{2})'.format(mystrip,
i,
len(strips)))
log.info('Concatenating finished')
def merge_light_catalogue():
"""Merge the light tiled catalogues into one big file."""
output_filename = os.path.join(constants.DESTINATION,
'concatenated',
'iphas-dr2-light.fits')
instring = ''
for lon in np.arange(25, 215+1, constants.STRIPWIDTH):
for part in ['a', 'b']:
path = os.path.join(constants.DESTINATION,
'concatenated',
'light',
'iphas-dr2-{0:03d}{1}-light.fits'.format(
lon, part))
instring += 'in={0} '.format(path)
# Warning: a bug in stilts causes long fieldIDs to be truncated if -utype S15 is not set
param = {'stilts': constants.STILTS,
'in': instring,
'out': output_filename}
cmd = '{stilts} tcat {in} countrows=true lazy=true ofmt=colfits-basic out={out}'
mycmd = cmd.format(**param)
log.debug(mycmd)
status = os.system(mycmd)
log.info('concat: '+str(status))
return status
################################
# MAIN EXECUTION (FOR DEBUGGING)
################################
if __name__ == "__main__":
log.setLevel('DEBUG')
concatenate_one(215)
|
barentsen/iphas-dr2
|
dr2/concatenating.py
|
Python
|
mit
| 16,803
|
[
"Galaxy"
] |
36eaa1d124413fb3ef825bbb1b385fc1a4d92132f7559ea00ada692539057d04
|
import os
import sys
from xml.etree import ElementTree as ET
from collections import defaultdict
# Todo: ""
# execute from galaxy root dir
tooldict = defaultdict(list)
def main():
doc = ET.parse("tool_conf.xml")
root = doc.getroot()
# index range 1-1000, current sections/tools divided between 250-750
sectionindex = 250
sectionfactor = int( 500 / len( root.getchildren() ) )
for rootchild in root.getchildren():
currentsectionlabel = ""
if ( rootchild.tag == "section" ):
sectionname = rootchild.attrib['name']
# per section tool index range 1-1000, current labels/tools
# divided between 20 and 750
toolindex = 250
toolfactor = int( 500 / len( rootchild.getchildren() ) )
currentlabel = ""
for sectionchild in rootchild.getchildren():
if ( sectionchild.tag == "tool" ):
addToToolDict(sectionchild, sectionname, sectionindex, toolindex, currentlabel)
toolindex += toolfactor
elif ( sectionchild.tag == "label" ):
currentlabel = sectionchild.attrib["text"]
sectionindex += sectionfactor
elif ( rootchild.tag == "tool" ):
addToToolDict(rootchild, "", sectionindex, None, currentsectionlabel)
sectionindex += sectionfactor
elif ( rootchild.tag == "label" ):
currentsectionlabel = rootchild.attrib["text"]
sectionindex += sectionfactor
# scan galaxy root tools dir for tool-specific xmls
toolconffilelist = getfnl( os.path.join(os.getcwd(), "tools" ) )
# foreach tool xml:
# check if the tags element exists in the tool xml (as child of <tool>)
# if not, add empty tags element for later use
# if this tool is in the above tooldict, add the toolboxposition element to the tool xml
# if not, then nothing.
for toolconffile in toolconffilelist:
hastags = False
hastoolboxpos = False
#parse tool config file into a document structure as defined by the ElementTree
tooldoc = ET.parse(toolconffile)
# get the root element of the toolconfig file
tooldocroot = tooldoc.getroot()
#check tags element, set flag
tagselement = tooldocroot.find("tags")
if (tagselement):
hastags = True
# check if toolboxposition element already exists in this tooconfig file
toolboxposelement = tooldocroot.find("toolboxposition")
if ( toolboxposelement ):
hastoolboxpos = True
if ( not ( hastags and hastoolboxpos ) ):
original = open( toolconffile, 'r' )
contents = original.readlines()
original.close()
# the new elements will be added directly below the root tool element
addelementsatposition = 1
# but what's on the first line? Root or not?
if ( contents[0].startswith("<?") ):
addelementsatposition = 2
newelements = []
if ( not hastoolboxpos ):
if ( toolconffile in tooldict ):
for attributes in tooldict[toolconffile]:
# create toolboxposition element
sectionelement = ET.Element("toolboxposition")
sectionelement.attrib = attributes
sectionelement.tail = "\n "
newelements.append( ET.tostring(sectionelement, 'utf-8') )
if ( not hastags ):
# create empty tags element
newelements.append( "<tags/>\n " )
contents = (
contents[ 0:addelementsatposition ] +
newelements +
contents[ addelementsatposition: ] )
# add .new for testing/safety purposes :P
newtoolconffile = open ( toolconffile, 'w' )
newtoolconffile.writelines( contents )
newtoolconffile.close()
def addToToolDict(tool, sectionname, sectionindex, toolindex, currentlabel):
toolfile = tool.attrib["file"]
realtoolfile = os.path.join(os.getcwd(), "tools", toolfile)
toolxmlfile = ET.parse(realtoolfile)
localroot = toolxmlfile.getroot()
# define attributes for the toolboxposition xml-tag
attribdict = {}
if ( sectionname ):
attribdict[ "section" ] = sectionname
if ( currentlabel ):
attribdict[ "label" ] = currentlabel
if ( sectionindex ):
attribdict[ "sectionorder" ] = str(sectionindex)
if ( toolindex ):
attribdict[ "order" ] = str(toolindex)
tooldict[ realtoolfile ].append(attribdict)
# Build a list of all toolconf xml files in the tools directory
def getfnl(startdir):
filenamelist = []
for root, dirs, files in os.walk(startdir):
for fn in files:
fullfn = os.path.join(root, fn)
if fn.endswith('.xml'):
try:
doc = ET.parse(fullfn)
except:
print "Oops, bad xml in: ", fullfn
raise
rootelement = doc.getroot()
# here we check if this xml file actually is a tool conf xml!
if rootelement.tag == 'tool':
filenamelist.append(fullfn)
return filenamelist
if __name__ == "__main__":
main()
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/scripts/extract_toolbox_sections.py
|
Python
|
gpl-3.0
| 5,474
|
[
"Galaxy"
] |
7bcb98922bf06ad64817ced054b4d835b78c27d27866ce9b7710fab477f36e15
|
import vtk, qt, slicer
from math import sqrt, cos, sin
from .CircleEffect import AbstractCircleEffect
class AbstractPointToPointEffect(AbstractCircleEffect):
def __init__(self, sliceWidget):
# keep a flag since events such as sliceNode modified
# may come during superclass construction, which will
# invoke our processEvents method
self.initialized = False
AbstractCircleEffect.__init__(self, sliceWidget)
# interaction state variables
self.actionState = None
# initialization
self.xyPoints = vtk.vtkPoints()
self.rasPoints = vtk.vtkPoints()
self.transform = vtk.vtkThinPlateSplineTransform()
self.transform.SetBasisToR()
self.transform.Inverse()
self.auxNodes = []
self.initialized = True
def cleanup(self):
"""
call superclass to clean up actor
"""
AbstractCircleEffect.cleanup(self)
def processEvent(self, caller=None, event=None):
"""
handle events from the render window interactor
"""
if not self.initialized:
return
AbstractCircleEffect.processEvent(self, caller, event)
if event == "LeftButtonReleaseEvent":
if self.actionState is None:
xy = self.interactor.GetEventPosition()
self.addPoint(self.xyToRAS(xy))
self.initTransform()
self.actionState = "placingPoint"
elif self.actionState == "placingPoint":
self.removeAuxNodes()
self.actionState = None
elif event == "MouseMoveEvent":
if self.actionState == "placingPoint":
p = vtk.vtkPoints()
xy = self.interactor.GetEventPosition()
p.InsertNextPoint(self.xyToRAS(xy))
self.transform.SetTargetLandmarks(p)
elif event == 'RightButtonPressEvent' or (event == 'KeyPressEvent' and self.interactor.GetKeySym()=='Escape'):
self.removeAuxNodes()
self.resetPoints()
self.actionState = None
# self.positionActors()
self.sliceView.scheduleRender()
def removeAuxNodes(self):
while len(self.auxNodes):
slicer.mrmlScene.RemoveNode(self.auxNodes.pop())
def initTransform(self):
sourceFiducialNode = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLMarkupsFiducialNode')
sourceFiducialNode.GetDisplayNode().SetVisibility(0)
sourceFiducialNode.SetControlPointPositionsWorld(self.rasPoints)
self.transform.SetSourceLandmarks(self.rasPoints)
self.transform.SetTargetLandmarks(self.rasPoints)
transformNode = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLTransformNode')
transformNode.SetAndObserveTransformFromParent(self.transform)
transformNode.CreateDefaultDisplayNodes()
transformNode.GetDisplayNode().SetVisibility(1)
transformNode.GetDisplayNode().SetVisibility2D(1)
transformNode.GetDisplayNode().SetVisibility3D(0)
transformNode.GetDisplayNode().SetAndObserveGlyphPointsNode(sourceFiducialNode)
self.auxNodes.append(sourceFiducialNode)
self.auxNodes.append(transformNode)
def resetPoints(self):
"""return the points to initial state with no points"""
self.xyPoints.Reset()
self.rasPoints.Reset()
def addPoint(self,ras):
"""add a world space point"""
p = self.rasPoints.InsertNextPoint(ras)
|
andreashorn/lead_dbs
|
ext_libs/SlicerNetstim/WarpDrive/WarpDriveLib/Effects/PointToPointEffect.py
|
Python
|
gpl-3.0
| 3,198
|
[
"VTK"
] |
735043f1d7c24807d6d6d9e72722e4be71ae3f24b048ea26bf9f49c8347e36c0
|
from __future__ import print_function
from .objs import *
from .axes import *
__displayname__ = 'Visualization'
def hideAll():
"""This hides all sources/filters on the pipeline from the current view"""
import paraview.simple as pvs
for f in pvs.GetSources().values():
pvs.Hide(f)
return None
hideAll.__displayname__ = 'Hide All'
hideAll.__category__ = 'macro'
|
banesullivan/ParaViewGeophysics
|
pvmacros/vis/__init__.py
|
Python
|
bsd-3-clause
| 390
|
[
"ParaView"
] |
7b2d2bd2e2b45f3d034f549d3c6f6353793ca7ae762c597f739fcccf7e88ace9
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
class Visitor:
def defaultVisit(self, node):
raise Exception, "INTERNAL ERROR: no visitor for node type `%s'"% (
node.__class__.__name__)
def visitTranslationUnit(self, tu):
for cxxInc in tu.cxxIncludes:
cxxInc.accept(self)
for inc in tu.includes:
inc.accept(self)
for su in tu.structsAndUnions:
su.accept(self)
for using in tu.builtinUsing:
using.accept(self)
for using in tu.using:
using.accept(self)
if tu.protocol:
tu.protocol.accept(self)
def visitCxxInclude(self, inc):
pass
def visitInclude(self, inc):
# Note: we don't visit the child AST here, because that needs delicate
# and pass-specific handling
pass
def visitStructDecl(self, struct):
for f in struct.fields:
f.accept(self)
def visitStructField(self, field):
field.typespec.accept(self)
def visitUnionDecl(self, union):
for t in union.components:
t.accept(self)
def visitUsingStmt(self, using):
pass
def visitProtocol(self, p):
for namespace in p.namespaces:
namespace.accept(self)
for spawns in p.spawnsStmts:
spawns.accept(self)
for bridges in p.bridgesStmts:
bridges.accept(self)
for opens in p.opensStmts:
opens.accept(self)
for mgr in p.managers:
mgr.accept(self)
for managed in p.managesStmts:
managed.accept(self)
for msgDecl in p.messageDecls:
msgDecl.accept(self)
for transitionStmt in p.transitionStmts:
transitionStmt.accept(self)
def visitNamespace(self, ns):
pass
def visitSpawnsStmt(self, spawns):
pass
def visitBridgesStmt(self, bridges):
pass
def visitOpensStmt(self, opens):
pass
def visitManager(self, mgr):
pass
def visitManagesStmt(self, mgs):
pass
def visitMessageDecl(self, md):
for inParam in md.inParams:
inParam.accept(self)
for outParam in md.outParams:
outParam.accept(self)
def visitTransitionStmt(self, ts):
ts.state.accept(self)
for trans in ts.transitions:
trans.accept(self)
def visitTransition(self, t):
for toState in t.toStates:
toState.accept(self)
def visitState(self, s):
pass
def visitParam(self, decl):
pass
def visitTypeSpec(self, ts):
pass
def visitDecl(self, d):
pass
class Loc:
def __init__(self, filename='<??>', lineno=0):
assert filename
self.filename = filename
self.lineno = lineno
def __repr__(self):
return '%r:%r'% (self.filename, self.lineno)
def __str__(self):
return '%s:%s'% (self.filename, self.lineno)
Loc.NONE = Loc(filename='<??>', lineno=0)
class _struct:
pass
class Node:
def __init__(self, loc=Loc.NONE):
self.loc = loc
def accept(self, visitor):
visit = getattr(visitor, 'visit'+ self.__class__.__name__, None)
if visit is None:
return getattr(visitor, 'defaultVisit')(self)
return visit(self)
def addAttrs(self, attrsName):
if not hasattr(self, attrsName):
setattr(self, attrsName, _struct())
class NamespacedNode(Node):
def __init__(self, loc=Loc.NONE, name=None):
Node.__init__(self, loc)
self.name = name
self.namespaces = [ ]
def addOuterNamespace(self, namespace):
self.namespaces.insert(0, namespace)
def qname(self):
return QualifiedId(self.loc, self.name,
[ ns.name for ns in self.namespaces ])
class TranslationUnit(NamespacedNode):
def __init__(self, type, name):
NamespacedNode.__init__(self, name=name)
self.filetype = type
self.filename = None
self.cxxIncludes = [ ]
self.includes = [ ]
self.builtinUsing = [ ]
self.using = [ ]
self.structsAndUnions = [ ]
self.protocol = None
def addCxxInclude(self, cxxInclude): self.cxxIncludes.append(cxxInclude)
def addInclude(self, inc): self.includes.append(inc)
def addStructDecl(self, struct): self.structsAndUnions.append(struct)
def addUnionDecl(self, union): self.structsAndUnions.append(union)
def addUsingStmt(self, using): self.using.append(using)
def setProtocol(self, protocol): self.protocol = protocol
class CxxInclude(Node):
def __init__(self, loc, cxxFile):
Node.__init__(self, loc)
self.file = cxxFile
class Include(Node):
def __init__(self, loc, type, name):
Node.__init__(self, loc)
suffix = 'ipdl'
if type == 'header':
suffix += 'h'
self.file = "%s.%s" % (name, suffix)
class UsingStmt(Node):
def __init__(self, loc, cxxTypeSpec):
Node.__init__(self, loc)
self.type = cxxTypeSpec
# "singletons"
class ASYNC:
pretty = 'async'
@classmethod
def __hash__(cls): return hash(cls.pretty)
@classmethod
def __str__(cls): return cls.pretty
class RPC:
pretty = 'rpc'
@classmethod
def __hash__(cls): return hash(cls.pretty)
@classmethod
def __str__(cls): return cls.pretty
class SYNC:
pretty = 'sync'
@classmethod
def __hash__(cls): return hash(cls.pretty)
@classmethod
def __str__(cls): return cls.pretty
class INOUT:
pretty = 'inout'
@classmethod
def __hash__(cls): return hash(cls.pretty)
@classmethod
def __str__(cls): return cls.pretty
class IN:
pretty = 'in'
@classmethod
def __hash__(cls): return hash(cls.pretty)
@classmethod
def __str__(cls): return cls.pretty
@staticmethod
def prettySS(cls, ss): return _prettyTable['in'][ss.pretty]
class OUT:
pretty = 'out'
@classmethod
def __hash__(cls): return hash(cls.pretty)
@classmethod
def __str__(cls): return cls.pretty
@staticmethod
def prettySS(ss): return _prettyTable['out'][ss.pretty]
_prettyTable = {
IN : { 'async': 'AsyncRecv',
'sync': 'SyncRecv',
'rpc': 'RpcAnswer' },
OUT : { 'async': 'AsyncSend',
'sync': 'SyncSend',
'rpc': 'RpcCall' }
# inout doesn't make sense here
}
class Namespace(Node):
def __init__(self, loc, namespace):
Node.__init__(self, loc)
self.name = namespace
class Protocol(NamespacedNode):
def __init__(self, loc):
NamespacedNode.__init__(self, loc)
self.sendSemantics = ASYNC
self.spawnsStmts = [ ]
self.bridgesStmts = [ ]
self.opensStmts = [ ]
self.managers = [ ]
self.managesStmts = [ ]
self.messageDecls = [ ]
self.transitionStmts = [ ]
self.startStates = [ ]
class StructField(Node):
def __init__(self, loc, type, name):
Node.__init__(self, loc)
self.typespec = type
self.name = name
class StructDecl(NamespacedNode):
def __init__(self, loc, name, fields):
NamespacedNode.__init__(self, loc, name)
self.fields = fields
class UnionDecl(NamespacedNode):
def __init__(self, loc, name, components):
NamespacedNode.__init__(self, loc, name)
self.components = components
class SpawnsStmt(Node):
def __init__(self, loc, side, proto, spawnedAs):
Node.__init__(self, loc)
self.side = side
self.proto = proto
self.spawnedAs = spawnedAs
class BridgesStmt(Node):
def __init__(self, loc, parentSide, childSide):
Node.__init__(self, loc)
self.parentSide = parentSide
self.childSide = childSide
class OpensStmt(Node):
def __init__(self, loc, side, proto):
Node.__init__(self, loc)
self.side = side
self.proto = proto
class Manager(Node):
def __init__(self, loc, managerName):
Node.__init__(self, loc)
self.name = managerName
class ManagesStmt(Node):
def __init__(self, loc, managedName):
Node.__init__(self, loc)
self.name = managedName
class MessageDecl(Node):
def __init__(self, loc):
Node.__init__(self, loc)
self.name = None
self.sendSemantics = ASYNC
self.direction = None
self.inParams = [ ]
self.outParams = [ ]
self.compress = ''
def addInParams(self, inParamsList):
self.inParams += inParamsList
def addOutParams(self, outParamsList):
self.outParams += outParamsList
def hasReply(self):
return self.sendSemantics is SYNC or self.sendSemantics is RPC
class Transition(Node):
def __init__(self, loc, trigger, msg, toStates):
Node.__init__(self, loc)
self.trigger = trigger
self.msg = msg
self.toStates = toStates
def __cmp__(self, o):
c = cmp(self.msg, o.msg)
if c: return c
c = cmp(self.trigger, o.trigger)
if c: return c
def __hash__(self): return hash(str(self))
def __str__(self): return '%s %s'% (self.trigger, self.msg)
@staticmethod
def nameToTrigger(name):
return { 'send': SEND, 'recv': RECV, 'call': CALL, 'answer': ANSWER }[name]
Transition.NULL = Transition(Loc.NONE, None, None, [ ])
class TransitionStmt(Node):
def __init__(self, loc, state, transitions):
Node.__init__(self, loc)
self.state = state
self.transitions = transitions
@staticmethod
def makeNullStmt(state):
return TransitionStmt(Loc.NONE, state, [ Transition.NULL ])
class SEND:
pretty = 'send'
@classmethod
def __hash__(cls): return hash(cls.pretty)
@classmethod
def direction(cls): return OUT
class RECV:
pretty = 'recv'
@classmethod
def __hash__(cls): return hash(cls.pretty)
@classmethod
def direction(cls): return IN
class CALL:
pretty = 'call'
@classmethod
def __hash__(cls): return hash(cls.pretty)
@classmethod
def direction(cls): return OUT
class ANSWER:
pretty = 'answer'
@classmethod
def __hash__(cls): return hash(cls.pretty)
@classmethod
def direction(cls): return IN
class State(Node):
def __init__(self, loc, name, start=False):
Node.__init__(self, loc)
self.name = name
self.start = start
def __eq__(self, o):
return (isinstance(o, State)
and o.name == self.name
and o.start == self.start)
def __hash__(self):
return hash(repr(self))
def __ne__(self, o):
return not (self == o)
def __repr__(self): return '<State %r start=%r>'% (self.name, self.start)
def __str__(self): return '<State %s start=%s>'% (self.name, self.start)
State.ANY = State(Loc.NONE, '[any]', start=True)
State.DEAD = State(Loc.NONE, '[dead]', start=False)
State.DYING = State(Loc.NONE, '[dying]', start=False)
class Param(Node):
def __init__(self, loc, typespec, name):
Node.__init__(self, loc)
self.name = name
self.typespec = typespec
class TypeSpec(Node):
def __init__(self, loc, spec, state=None, array=0, nullable=0,
myChmod=None, otherChmod=None):
Node.__init__(self, loc)
self.spec = spec # QualifiedId
self.state = state # None or State
self.array = array # bool
self.nullable = nullable # bool
self.myChmod = myChmod # None or string
self.otherChmod = otherChmod # None or string
def basename(self):
return self.spec.baseid
def isActor(self):
return self.state is not None
def __str__(self): return str(self.spec)
class QualifiedId: # FIXME inherit from node?
def __init__(self, loc, baseid, quals=[ ]):
assert isinstance(baseid, str)
for qual in quals: assert isinstance(qual, str)
self.loc = loc
self.baseid = baseid
self.quals = quals
def qualify(self, id):
self.quals.append(self.baseid)
self.baseid = id
def __str__(self):
if 0 == len(self.quals):
return self.baseid
return '::'.join(self.quals) +'::'+ self.baseid
# added by type checking passes
class Decl(Node):
def __init__(self, loc):
Node.__init__(self, loc)
self.progname = None # what the programmer typed, if relevant
self.shortname = None # shortest way to refer to this decl
self.fullname = None # full way to refer to this decl
self.loc = loc
self.type = None
self.scope = None
|
wilebeast/FireFox-OS
|
B2G/gecko/ipc/ipdl/ipdl/ast.py
|
Python
|
apache-2.0
| 12,921
|
[
"VisIt"
] |
52363490252db6994c8610bf546dc6f40664e0a6472ea6b3a21beb2195be6836
|
"""Tests for user-friendly public interface to polynomial functions. """
from sympy.polys.polytools import (
Poly, PurePoly, poly,
parallel_poly_from_expr,
degree, degree_list,
LC, LM, LT,
pdiv, prem, pquo, pexquo,
div, rem, quo, exquo,
half_gcdex, gcdex, invert,
subresultants,
resultant, discriminant,
terms_gcd, cofactors,
gcd, gcd_list,
lcm, lcm_list,
trunc,
monic, content, primitive,
compose, decompose,
sturm,
gff_list, gff,
sqf_norm, sqf_part, sqf_list, sqf,
factor_list, factor,
intervals, refine_root, count_roots,
real_roots, nroots, ground_roots,
nth_power_roots_poly,
cancel, reduced, groebner,
GroebnerBasis, is_zero_dimensional,
_torational_factor_list)
from sympy.polys.polyerrors import (
MultivariatePolynomialError,
OperationNotSupported,
ExactQuotientFailed,
PolificationFailed,
ComputationFailed,
UnificationFailed,
RefinementFailed,
GeneratorsNeeded,
GeneratorsError,
PolynomialError,
CoercionFailed,
NotAlgebraic,
DomainError,
OptionError,
FlagError)
from sympy.polys.polyclasses import DMP
from sympy.polys.fields import field
from sympy.polys.domains import FF, ZZ, QQ, RR, EX
from sympy.polys.orderings import lex, grlex, grevlex
from sympy import (
S, Integer, Rational, Float, Mul, Symbol, symbols, sqrt, Piecewise,
exp, sin, tanh, expand, oo, I, pi, re, im, RootOf, Eq, Tuple, Expr)
from sympy.core.basic import _aresame
from sympy.core.compatibility import iterable
from sympy.core.mul import _keep_coeff
from sympy.utilities.pytest import raises, XFAIL
x, y, z, p, q, r, s, t, u, v, w, a, b, c, d, e = symbols(
'x,y,z,p,q,r,s,t,u,v,w,a,b,c,d,e')
def _epsilon_eq(a, b):
for x, y in zip(a, b):
if abs(x - y) > 1e-10:
return False
return True
def _strict_eq(a, b):
if type(a) == type(b):
if iterable(a):
if len(a) == len(b):
return all(_strict_eq(c, d) for c, d in zip(a, b))
else:
return False
else:
return isinstance(a, Poly) and a.eq(b, strict=True)
else:
return False
def test_Poly_from_dict():
K = FF(3)
assert Poly.from_dict(
{0: 1, 1: 2}, gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_dict(
{0: 1, 1: 5}, gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_dict(
{(0,): 1, (1,): 5}, gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_dict({(0, 0): 1, (1, 1): 2}, gens=(
x, y), domain=K).rep == DMP([[K(2), K(0)], [K(1)]], K)
assert Poly.from_dict({0: 1, 1: 2}, gens=x).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_dict(
{0: 1, 1: 2}, gens=x, field=True).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_dict(
{0: 1, 1: 2}, gens=x, domain=ZZ).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_dict(
{0: 1, 1: 2}, gens=x, domain=QQ).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x, field=True).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x, domain=ZZ).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x, domain=QQ).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_dict({(1,): sin(y)}, gens=x, composite=False) == \
Poly(sin(y)*x, x, domain='EX')
assert Poly.from_dict({(1,): y}, gens=x, composite=False) == \
Poly(y*x, x, domain='EX')
assert Poly.from_dict({(1, 1): 1}, gens=(x, y), composite=False) == \
Poly(x*y, x, y, domain='ZZ')
assert Poly.from_dict({(1, 0): y}, gens=(x, z), composite=False) == \
Poly(y*x, x, z, domain='EX')
def test_Poly_from_list():
K = FF(3)
assert Poly.from_list([2, 1], gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_list([5, 1], gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_list([2, 1], gens=x).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_list([2, 1], gens=x, field=True).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_list([2, 1], gens=x, domain=ZZ).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_list([2, 1], gens=x, domain=QQ).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_list([0, 1.0], gens=x).rep == DMP([RR(1.0)], RR)
assert Poly.from_list([1.0, 0], gens=x).rep == DMP([RR(1.0), RR(0.0)], RR)
raises(MultivariatePolynomialError, lambda: Poly.from_list([[]], gens=(x, y)))
def test_Poly_from_poly():
f = Poly(x + 7, x, domain=ZZ)
g = Poly(x + 2, x, modulus=3)
h = Poly(x + y, x, y, domain=ZZ)
K = FF(3)
assert Poly.from_poly(f) == f
assert Poly.from_poly(f, domain=K).rep == DMP([K(1), K(1)], K)
assert Poly.from_poly(f, domain=ZZ).rep == DMP([1, 7], ZZ)
assert Poly.from_poly(f, domain=QQ).rep == DMP([1, 7], QQ)
assert Poly.from_poly(f, gens=x) == f
assert Poly.from_poly(f, gens=x, domain=K).rep == DMP([K(1), K(1)], K)
assert Poly.from_poly(f, gens=x, domain=ZZ).rep == DMP([1, 7], ZZ)
assert Poly.from_poly(f, gens=x, domain=QQ).rep == DMP([1, 7], QQ)
assert Poly.from_poly(f, gens=y) == Poly(x + 7, y, domain='ZZ[x]')
raises(CoercionFailed, lambda: Poly.from_poly(f, gens=y, domain=K))
raises(CoercionFailed, lambda: Poly.from_poly(f, gens=y, domain=ZZ))
raises(CoercionFailed, lambda: Poly.from_poly(f, gens=y, domain=QQ))
assert Poly.from_poly(f, gens=(x, y)) == Poly(x + 7, x, y, domain='ZZ')
assert Poly.from_poly(
f, gens=(x, y), domain=ZZ) == Poly(x + 7, x, y, domain='ZZ')
assert Poly.from_poly(
f, gens=(x, y), domain=QQ) == Poly(x + 7, x, y, domain='QQ')
assert Poly.from_poly(
f, gens=(x, y), modulus=3) == Poly(x + 7, x, y, domain='FF(3)')
K = FF(2)
assert Poly.from_poly(g) == g
assert Poly.from_poly(g, domain=ZZ).rep == DMP([1, -1], ZZ)
raises(CoercionFailed, lambda: Poly.from_poly(g, domain=QQ))
assert Poly.from_poly(g, domain=K).rep == DMP([K(1), K(0)], K)
assert Poly.from_poly(g, gens=x) == g
assert Poly.from_poly(g, gens=x, domain=ZZ).rep == DMP([1, -1], ZZ)
raises(CoercionFailed, lambda: Poly.from_poly(g, gens=x, domain=QQ))
assert Poly.from_poly(g, gens=x, domain=K).rep == DMP([K(1), K(0)], K)
K = FF(3)
assert Poly.from_poly(h) == h
assert Poly.from_poly(
h, domain=ZZ).rep == DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ)
assert Poly.from_poly(
h, domain=QQ).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
assert Poly.from_poly(h, domain=K).rep == DMP([[K(1)], [K(1), K(0)]], K)
assert Poly.from_poly(h, gens=x) == Poly(x + y, x, domain=ZZ[y])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=x, domain=ZZ))
assert Poly.from_poly(
h, gens=x, domain=ZZ[y]) == Poly(x + y, x, domain=ZZ[y])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=x, domain=QQ))
assert Poly.from_poly(
h, gens=x, domain=QQ[y]) == Poly(x + y, x, domain=QQ[y])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=x, modulus=3))
assert Poly.from_poly(h, gens=y) == Poly(x + y, y, domain=ZZ[x])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=y, domain=ZZ))
assert Poly.from_poly(
h, gens=y, domain=ZZ[x]) == Poly(x + y, y, domain=ZZ[x])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=y, domain=QQ))
assert Poly.from_poly(
h, gens=y, domain=QQ[x]) == Poly(x + y, y, domain=QQ[x])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=y, modulus=3))
assert Poly.from_poly(h, gens=(x, y)) == h
assert Poly.from_poly(
h, gens=(x, y), domain=ZZ).rep == DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ)
assert Poly.from_poly(
h, gens=(x, y), domain=QQ).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
assert Poly.from_poly(
h, gens=(x, y), domain=K).rep == DMP([[K(1)], [K(1), K(0)]], K)
assert Poly.from_poly(
h, gens=(y, x)).rep == DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ)
assert Poly.from_poly(
h, gens=(y, x), domain=ZZ).rep == DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ)
assert Poly.from_poly(
h, gens=(y, x), domain=QQ).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
assert Poly.from_poly(
h, gens=(y, x), domain=K).rep == DMP([[K(1)], [K(1), K(0)]], K)
assert Poly.from_poly(
h, gens=(x, y), field=True).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
assert Poly.from_poly(
h, gens=(x, y), field=True).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
def test_Poly_from_expr():
raises(GeneratorsNeeded, lambda: Poly.from_expr(S(0)))
raises(GeneratorsNeeded, lambda: Poly.from_expr(S(7)))
F3 = FF(3)
assert Poly.from_expr(x + 5, domain=F3).rep == DMP([F3(1), F3(2)], F3)
assert Poly.from_expr(y + 5, domain=F3).rep == DMP([F3(1), F3(2)], F3)
assert Poly.from_expr(x + 5, x, domain=F3).rep == DMP([F3(1), F3(2)], F3)
assert Poly.from_expr(y + 5, y, domain=F3).rep == DMP([F3(1), F3(2)], F3)
assert Poly.from_expr(x + y, domain=F3).rep == DMP([[F3(1)], [F3(1), F3(0)]], F3)
assert Poly.from_expr(x + y, x, y, domain=F3).rep == DMP([[F3(1)], [F3(1), F3(0)]], F3)
assert Poly.from_expr(x + 5).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(y + 5).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(x + 5, x).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(y + 5, y).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(x + 5, domain=ZZ).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(y + 5, domain=ZZ).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(x + 5, x, domain=ZZ).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(y + 5, y, domain=ZZ).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(x + 5, x, y, domain=ZZ).rep == DMP([[1], [5]], ZZ)
assert Poly.from_expr(y + 5, x, y, domain=ZZ).rep == DMP([[1, 5]], ZZ)
def test_Poly__new__():
raises(GeneratorsError, lambda: Poly(x + 1, x, x))
raises(GeneratorsError, lambda: Poly(x + y, x, y, domain=ZZ[x]))
raises(GeneratorsError, lambda: Poly(x + y, x, y, domain=ZZ[y]))
raises(OptionError, lambda: Poly(x, x, symmetric=True))
raises(OptionError, lambda: Poly(x + 2, x, modulus=3, domain=QQ))
raises(OptionError, lambda: Poly(x + 2, x, domain=ZZ, gaussian=True))
raises(OptionError, lambda: Poly(x + 2, x, modulus=3, gaussian=True))
raises(OptionError, lambda: Poly(x + 2, x, domain=ZZ, extension=[sqrt(3)]))
raises(OptionError, lambda: Poly(x + 2, x, modulus=3, extension=[sqrt(3)]))
raises(OptionError, lambda: Poly(x + 2, x, domain=ZZ, extension=True))
raises(OptionError, lambda: Poly(x + 2, x, modulus=3, extension=True))
raises(OptionError, lambda: Poly(x + 2, x, domain=ZZ, greedy=True))
raises(OptionError, lambda: Poly(x + 2, x, domain=QQ, field=True))
raises(OptionError, lambda: Poly(x + 2, x, domain=ZZ, greedy=False))
raises(OptionError, lambda: Poly(x + 2, x, domain=QQ, field=False))
raises(NotImplementedError, lambda: Poly(x + 1, x, modulus=3, order='grlex'))
raises(NotImplementedError, lambda: Poly(x + 1, x, order='grlex'))
raises(GeneratorsNeeded, lambda: Poly({1: 2, 0: 1}))
raises(GeneratorsNeeded, lambda: Poly([2, 1]))
raises(GeneratorsNeeded, lambda: Poly((2, 1)))
raises(GeneratorsNeeded, lambda: Poly(1))
f = a*x**2 + b*x + c
assert Poly({2: a, 1: b, 0: c}, x) == f
assert Poly(iter([a, b, c]), x) == f
assert Poly([a, b, c], x) == f
assert Poly((a, b, c), x) == f
f = Poly({}, x, y, z)
assert f.gens == (x, y, z) and f.as_expr() == 0
assert Poly(Poly(a*x + b*y, x, y), x) == Poly(a*x + b*y, x)
assert Poly(3*x**2 + 2*x + 1, domain='ZZ').all_coeffs() == [3, 2, 1]
assert Poly(3*x**2 + 2*x + 1, domain='QQ').all_coeffs() == [3, 2, 1]
assert Poly(3*x**2 + 2*x + 1, domain='RR').all_coeffs() == [3.0, 2.0, 1.0]
raises(CoercionFailed, lambda: Poly(3*x**2/5 + 2*x/5 + 1, domain='ZZ'))
assert Poly(
3*x**2/5 + 2*x/5 + 1, domain='QQ').all_coeffs() == [S(3)/5, S(2)/5, 1]
assert _epsilon_eq(
Poly(3*x**2/5 + 2*x/5 + 1, domain='RR').all_coeffs(), [0.6, 0.4, 1.0])
assert Poly(3.0*x**2 + 2.0*x + 1, domain='ZZ').all_coeffs() == [3, 2, 1]
assert Poly(3.0*x**2 + 2.0*x + 1, domain='QQ').all_coeffs() == [3, 2, 1]
assert Poly(
3.0*x**2 + 2.0*x + 1, domain='RR').all_coeffs() == [3.0, 2.0, 1.0]
raises(CoercionFailed, lambda: Poly(3.1*x**2 + 2.1*x + 1, domain='ZZ'))
assert Poly(3.1*x**2 + 2.1*x + 1, domain='QQ').all_coeffs() == [S(31)/10, S(21)/10, 1]
assert Poly(3.1*x**2 + 2.1*x + 1, domain='RR').all_coeffs() == [3.1, 2.1, 1.0]
assert Poly({(2, 1): 1, (1, 2): 2, (1, 1): 3}, x, y) == \
Poly(x**2*y + 2*x*y**2 + 3*x*y, x, y)
assert Poly(x**2 + 1, extension=I).get_domain() == QQ.algebraic_field(I)
f = 3*x**5 - x**4 + x**3 - x** 2 + 65538
assert Poly(f, x, modulus=65537, symmetric=True) == \
Poly(3*x**5 - x**4 + x**3 - x** 2 + 1, x, modulus=65537,
symmetric=True)
assert Poly(f, x, modulus=65537, symmetric=False) == \
Poly(3*x**5 + 65536*x**4 + x**3 + 65536*x** 2 + 1, x,
modulus=65537, symmetric=False)
assert Poly(x**2 + x + 1.0).get_domain() == RR
def test_Poly__args():
assert Poly(x**2 + 1).args == (x**2 + 1,)
def test_Poly__gens():
assert Poly((x - p)*(x - q), x).gens == (x,)
assert Poly((x - p)*(x - q), p).gens == (p,)
assert Poly((x - p)*(x - q), q).gens == (q,)
assert Poly((x - p)*(x - q), x, p).gens == (x, p)
assert Poly((x - p)*(x - q), x, q).gens == (x, q)
assert Poly((x - p)*(x - q), x, p, q).gens == (x, p, q)
assert Poly((x - p)*(x - q), p, x, q).gens == (p, x, q)
assert Poly((x - p)*(x - q), p, q, x).gens == (p, q, x)
assert Poly((x - p)*(x - q)).gens == (x, p, q)
assert Poly((x - p)*(x - q), sort='x > p > q').gens == (x, p, q)
assert Poly((x - p)*(x - q), sort='p > x > q').gens == (p, x, q)
assert Poly((x - p)*(x - q), sort='p > q > x').gens == (p, q, x)
assert Poly((x - p)*(x - q), x, p, q, sort='p > q > x').gens == (x, p, q)
assert Poly((x - p)*(x - q), wrt='x').gens == (x, p, q)
assert Poly((x - p)*(x - q), wrt='p').gens == (p, x, q)
assert Poly((x - p)*(x - q), wrt='q').gens == (q, x, p)
assert Poly((x - p)*(x - q), wrt=x).gens == (x, p, q)
assert Poly((x - p)*(x - q), wrt=p).gens == (p, x, q)
assert Poly((x - p)*(x - q), wrt=q).gens == (q, x, p)
assert Poly((x - p)*(x - q), x, p, q, wrt='p').gens == (x, p, q)
assert Poly((x - p)*(x - q), wrt='p', sort='q > x').gens == (p, q, x)
assert Poly((x - p)*(x - q), wrt='q', sort='p > x').gens == (q, p, x)
def test_Poly_zero():
assert Poly(x).zero == Poly(0, x, domain=ZZ)
assert Poly(x/2).zero == Poly(0, x, domain=QQ)
def test_Poly_one():
assert Poly(x).one == Poly(1, x, domain=ZZ)
assert Poly(x/2).one == Poly(1, x, domain=QQ)
def test_Poly__unify():
raises(UnificationFailed, lambda: Poly(x)._unify(y))
F3 = FF(3)
F5 = FF(5)
assert Poly(x, x, modulus=3)._unify(Poly(y, y, modulus=3))[2:] == (
DMP([[F3(1)], []], F3), DMP([[F3(1), F3(0)]], F3))
assert Poly(x, x, modulus=3)._unify(Poly(y, y, modulus=5))[2:] == (
DMP([[F5(1)], []], F5), DMP([[F5(1), F5(0)]], F5))
assert Poly(y, x, y)._unify(Poly(x, x, modulus=3))[2:] == (DMP([[F3(1), F3(0)]], F3), DMP([[F3(1)], []], F3))
assert Poly(x, x, modulus=3)._unify(Poly(y, x, y))[2:] == (DMP([[F3(1)], []], F3), DMP([[F3(1), F3(0)]], F3))
assert Poly(x + 1, x)._unify(Poly(x + 2, x))[2:] == (DMP([1, 1], ZZ), DMP([1, 2], ZZ))
assert Poly(x + 1, x, domain='QQ')._unify(Poly(x + 2, x))[2:] == (DMP([1, 1], QQ), DMP([1, 2], QQ))
assert Poly(x + 1, x)._unify(Poly(x + 2, x, domain='QQ'))[2:] == (DMP([1, 1], QQ), DMP([1, 2], QQ))
assert Poly(x + 1, x)._unify(Poly(x + 2, x, y))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x + 1, x, domain='QQ')._unify(Poly(x + 2, x, y))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x)._unify(Poly(x + 2, x, y, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, x))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x + 1, x, y, domain='QQ')._unify(Poly(x + 2, x))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, x, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, x, y))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x + 1, x, y, domain='QQ')._unify(Poly(x + 2, x, y))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, x, y, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x)._unify(Poly(x + 2, y, x))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ))
assert Poly(x + 1, x, domain='QQ')._unify(Poly(x + 2, y, x))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x + 1, x)._unify(Poly(x + 2, y, x, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x + 1, y, x)._unify(Poly(x + 2, x))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ))
assert Poly(x + 1, y, x, domain='QQ')._unify(Poly(x + 2, x))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x + 1, y, x)._unify(Poly(x + 2, x, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, y, x))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x + 1, x, y, domain='QQ')._unify(Poly(x + 2, y, x))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, y, x, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, y, x)._unify(Poly(x + 2, x, y))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ))
assert Poly(x + 1, y, x, domain='QQ')._unify(Poly(x + 2, x, y))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x + 1, y, x)._unify(Poly(x + 2, x, y, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
F, A, B = field("a,b", ZZ)
assert Poly(a*x, x, domain='ZZ[a]')._unify(Poly(a*b*x, x, domain='ZZ(a,b)'))[2:] == \
(DMP([A, F(0)], F.to_domain()), DMP([A*B, F(0)], F.to_domain()))
assert Poly(a*x, x, domain='ZZ(a)')._unify(Poly(a*b*x, x, domain='ZZ(a,b)'))[2:] == \
(DMP([A, F(0)], F.to_domain()), DMP([A*B, F(0)], F.to_domain()))
raises(CoercionFailed, lambda: Poly(Poly(x**2 + x**2*z, y, field=True), domain='ZZ(x)'))
f = Poly(t**2 + t/3 + x, t, domain='QQ(x)')
g = Poly(t**2 + t/3 + x, t, domain='QQ[x]')
assert f._unify(g)[2:] == (f.rep, f.rep)
def test_Poly_free_symbols():
assert Poly(x**2 + 1).free_symbols == set([x])
assert Poly(x**2 + y*z).free_symbols == set([x, y, z])
assert Poly(x**2 + y*z, x).free_symbols == set([x, y, z])
assert Poly(x**2 + sin(y*z)).free_symbols == set([x, y, z])
assert Poly(x**2 + sin(y*z), x).free_symbols == set([x, y, z])
assert Poly(x**2 + sin(y*z), x, domain=EX).free_symbols == set([x, y, z])
def test_PurePoly_free_symbols():
assert PurePoly(x**2 + 1).free_symbols == set([])
assert PurePoly(x**2 + y*z).free_symbols == set([])
assert PurePoly(x**2 + y*z, x).free_symbols == set([y, z])
assert PurePoly(x**2 + sin(y*z)).free_symbols == set([])
assert PurePoly(x**2 + sin(y*z), x).free_symbols == set([y, z])
assert PurePoly(x**2 + sin(y*z), x, domain=EX).free_symbols == set([y, z])
def test_Poly__eq__():
assert (Poly(x, x) == Poly(x, x)) is True
assert (Poly(x, x, domain=QQ) == Poly(x, x)) is True
assert (Poly(x, x) == Poly(x, x, domain=QQ)) is True
assert (Poly(x, x, domain=ZZ[a]) == Poly(x, x)) is True
assert (Poly(x, x) == Poly(x, x, domain=ZZ[a])) is True
assert (Poly(x*y, x, y) == Poly(x, x)) is False
assert (Poly(x, x, y) == Poly(x, x)) is False
assert (Poly(x, x) == Poly(x, x, y)) is False
assert (Poly(x**2 + 1, x) == Poly(y**2 + 1, y)) is False
assert (Poly(y**2 + 1, y) == Poly(x**2 + 1, x)) is False
f = Poly(x, x, domain=ZZ)
g = Poly(x, x, domain=QQ)
assert f.eq(g) is True
assert f.ne(g) is False
assert f.eq(g, strict=True) is False
assert f.ne(g, strict=True) is True
t0 = Symbol('t0')
f = Poly((t0/2 + x**2)*t**2 - x**2*t, t, domain='QQ[x,t0]')
g = Poly((t0/2 + x**2)*t**2 - x**2*t, t, domain='ZZ(x,t0)')
assert (f == g) is True
def test_PurePoly__eq__():
assert (PurePoly(x, x) == PurePoly(x, x)) is True
assert (PurePoly(x, x, domain=QQ) == PurePoly(x, x)) is True
assert (PurePoly(x, x) == PurePoly(x, x, domain=QQ)) is True
assert (PurePoly(x, x, domain=ZZ[a]) == PurePoly(x, x)) is True
assert (PurePoly(x, x) == PurePoly(x, x, domain=ZZ[a])) is True
assert (PurePoly(x*y, x, y) == PurePoly(x, x)) is False
assert (PurePoly(x, x, y) == PurePoly(x, x)) is False
assert (PurePoly(x, x) == PurePoly(x, x, y)) is False
assert (PurePoly(x**2 + 1, x) == PurePoly(y**2 + 1, y)) is True
assert (PurePoly(y**2 + 1, y) == PurePoly(x**2 + 1, x)) is True
f = PurePoly(x, x, domain=ZZ)
g = PurePoly(x, x, domain=QQ)
assert f.eq(g) is True
assert f.ne(g) is False
assert f.eq(g, strict=True) is False
assert f.ne(g, strict=True) is True
f = PurePoly(x, x, domain=ZZ)
g = PurePoly(y, y, domain=QQ)
assert f.eq(g) is True
assert f.ne(g) is False
assert f.eq(g, strict=True) is False
assert f.ne(g, strict=True) is True
def test_PurePoly_Poly():
assert isinstance(PurePoly(Poly(x**2 + 1)), PurePoly) is True
assert isinstance(Poly(PurePoly(x**2 + 1)), Poly) is True
def test_Poly_get_domain():
assert Poly(2*x).get_domain() == ZZ
assert Poly(2*x, domain='ZZ').get_domain() == ZZ
assert Poly(2*x, domain='QQ').get_domain() == QQ
assert Poly(x/2).get_domain() == QQ
raises(CoercionFailed, lambda: Poly(x/2, domain='ZZ'))
assert Poly(x/2, domain='QQ').get_domain() == QQ
assert Poly(0.2*x).get_domain() == RR
def test_Poly_set_domain():
assert Poly(2*x + 1).set_domain(ZZ) == Poly(2*x + 1)
assert Poly(2*x + 1).set_domain('ZZ') == Poly(2*x + 1)
assert Poly(2*x + 1).set_domain(QQ) == Poly(2*x + 1, domain='QQ')
assert Poly(2*x + 1).set_domain('QQ') == Poly(2*x + 1, domain='QQ')
assert Poly(S(2)/10*x + S(1)/10).set_domain('RR') == Poly(0.2*x + 0.1)
assert Poly(0.2*x + 0.1).set_domain('QQ') == Poly(S(2)/10*x + S(1)/10)
raises(CoercionFailed, lambda: Poly(x/2 + 1).set_domain(ZZ))
raises(CoercionFailed, lambda: Poly(x + 1, modulus=2).set_domain(QQ))
raises(GeneratorsError, lambda: Poly(x*y, x, y).set_domain(ZZ[y]))
def test_Poly_get_modulus():
assert Poly(x**2 + 1, modulus=2).get_modulus() == 2
raises(PolynomialError, lambda: Poly(x**2 + 1).get_modulus())
def test_Poly_set_modulus():
assert Poly(
x**2 + 1, modulus=2).set_modulus(7) == Poly(x**2 + 1, modulus=7)
assert Poly(
x**2 + 5, modulus=7).set_modulus(2) == Poly(x**2 + 1, modulus=2)
assert Poly(x**2 + 1).set_modulus(2) == Poly(x**2 + 1, modulus=2)
raises(CoercionFailed, lambda: Poly(x/2 + 1).set_modulus(2))
def test_Poly_add_ground():
assert Poly(x + 1).add_ground(2) == Poly(x + 3)
def test_Poly_sub_ground():
assert Poly(x + 1).sub_ground(2) == Poly(x - 1)
def test_Poly_mul_ground():
assert Poly(x + 1).mul_ground(2) == Poly(2*x + 2)
def test_Poly_quo_ground():
assert Poly(2*x + 4).quo_ground(2) == Poly(x + 2)
assert Poly(2*x + 3).quo_ground(2) == Poly(x + 1)
def test_Poly_exquo_ground():
assert Poly(2*x + 4).exquo_ground(2) == Poly(x + 2)
raises(ExactQuotientFailed, lambda: Poly(2*x + 3).exquo_ground(2))
def test_Poly_abs():
assert Poly(-x + 1, x).abs() == abs(Poly(-x + 1, x)) == Poly(x + 1, x)
def test_Poly_neg():
assert Poly(-x + 1, x).neg() == -Poly(-x + 1, x) == Poly(x - 1, x)
def test_Poly_add():
assert Poly(0, x).add(Poly(0, x)) == Poly(0, x)
assert Poly(0, x) + Poly(0, x) == Poly(0, x)
assert Poly(1, x).add(Poly(0, x)) == Poly(1, x)
assert Poly(1, x, y) + Poly(0, x) == Poly(1, x, y)
assert Poly(0, x).add(Poly(1, x, y)) == Poly(1, x, y)
assert Poly(0, x, y) + Poly(1, x, y) == Poly(1, x, y)
assert Poly(1, x) + x == Poly(x + 1, x)
assert Poly(1, x) + sin(x) == 1 + sin(x)
assert Poly(x, x) + 1 == Poly(x + 1, x)
assert 1 + Poly(x, x) == Poly(x + 1, x)
def test_Poly_sub():
assert Poly(0, x).sub(Poly(0, x)) == Poly(0, x)
assert Poly(0, x) - Poly(0, x) == Poly(0, x)
assert Poly(1, x).sub(Poly(0, x)) == Poly(1, x)
assert Poly(1, x, y) - Poly(0, x) == Poly(1, x, y)
assert Poly(0, x).sub(Poly(1, x, y)) == Poly(-1, x, y)
assert Poly(0, x, y) - Poly(1, x, y) == Poly(-1, x, y)
assert Poly(1, x) - x == Poly(1 - x, x)
assert Poly(1, x) - sin(x) == 1 - sin(x)
assert Poly(x, x) - 1 == Poly(x - 1, x)
assert 1 - Poly(x, x) == Poly(1 - x, x)
def test_Poly_mul():
assert Poly(0, x).mul(Poly(0, x)) == Poly(0, x)
assert Poly(0, x) * Poly(0, x) == Poly(0, x)
assert Poly(2, x).mul(Poly(4, x)) == Poly(8, x)
assert Poly(2, x, y) * Poly(4, x) == Poly(8, x, y)
assert Poly(4, x).mul(Poly(2, x, y)) == Poly(8, x, y)
assert Poly(4, x, y) * Poly(2, x, y) == Poly(8, x, y)
assert Poly(1, x) * x == Poly(x, x)
assert Poly(1, x) * sin(x) == sin(x)
assert Poly(x, x) * 2 == Poly(2*x, x)
assert 2 * Poly(x, x) == Poly(2*x, x)
def test_Poly_sqr():
assert Poly(x*y, x, y).sqr() == Poly(x**2*y**2, x, y)
def test_Poly_pow():
assert Poly(x, x).pow(10) == Poly(x**10, x)
assert Poly(x, x).pow(Integer(10)) == Poly(x**10, x)
assert Poly(2*y, x, y).pow(4) == Poly(16*y**4, x, y)
assert Poly(2*y, x, y).pow(Integer(4)) == Poly(16*y**4, x, y)
assert Poly(7*x*y, x, y)**3 == Poly(343*x**3*y**3, x, y)
assert Poly(x*y + 1, x, y)**(-1) == (x*y + 1)**(-1)
assert Poly(x*y + 1, x, y)**x == (x*y + 1)**x
def test_Poly_divmod():
f, g = Poly(x**2), Poly(x)
q, r = g, Poly(0, x)
assert divmod(f, g) == (q, r)
assert f // g == q
assert f % g == r
assert divmod(f, x) == (q, r)
assert f // x == q
assert f % x == r
q, r = Poly(0, x), Poly(2, x)
assert divmod(2, g) == (q, r)
assert 2 // g == q
assert 2 % g == r
assert Poly(x)/Poly(x) == 1
assert Poly(x**2)/Poly(x) == x
assert Poly(x)/Poly(x**2) == 1/x
def test_Poly_eq_ne():
assert (Poly(x + y, x, y) == Poly(x + y, x, y)) is True
assert (Poly(x + y, x) == Poly(x + y, x, y)) is False
assert (Poly(x + y, x, y) == Poly(x + y, x)) is False
assert (Poly(x + y, x) == Poly(x + y, x)) is True
assert (Poly(x + y, y) == Poly(x + y, y)) is True
assert (Poly(x + y, x, y) == x + y) is True
assert (Poly(x + y, x) == x + y) is True
assert (Poly(x + y, x, y) == x + y) is True
assert (Poly(x + y, x) == x + y) is True
assert (Poly(x + y, y) == x + y) is True
assert (Poly(x + y, x, y) != Poly(x + y, x, y)) is False
assert (Poly(x + y, x) != Poly(x + y, x, y)) is True
assert (Poly(x + y, x, y) != Poly(x + y, x)) is True
assert (Poly(x + y, x) != Poly(x + y, x)) is False
assert (Poly(x + y, y) != Poly(x + y, y)) is False
assert (Poly(x + y, x, y) != x + y) is False
assert (Poly(x + y, x) != x + y) is False
assert (Poly(x + y, x, y) != x + y) is False
assert (Poly(x + y, x) != x + y) is False
assert (Poly(x + y, y) != x + y) is False
assert (Poly(x, x) == sin(x)) is False
assert (Poly(x, x) != sin(x)) is True
def test_Poly_nonzero():
assert not bool(Poly(0, x)) is True
assert not bool(Poly(1, x)) is False
def test_Poly_properties():
assert Poly(0, x).is_zero is True
assert Poly(1, x).is_zero is False
assert Poly(1, x).is_one is True
assert Poly(2, x).is_one is False
assert Poly(x - 1, x).is_sqf is True
assert Poly((x - 1)**2, x).is_sqf is False
assert Poly(x - 1, x).is_monic is True
assert Poly(2*x - 1, x).is_monic is False
assert Poly(3*x + 2, x).is_primitive is True
assert Poly(4*x + 2, x).is_primitive is False
assert Poly(1, x).is_ground is True
assert Poly(x, x).is_ground is False
assert Poly(x + y + z + 1).is_linear is True
assert Poly(x*y*z + 1).is_linear is False
assert Poly(x*y + z + 1).is_quadratic is True
assert Poly(x*y*z + 1).is_quadratic is False
assert Poly(x*y).is_monomial is True
assert Poly(x*y + 1).is_monomial is False
assert Poly(x**2 + x*y).is_homogeneous is True
assert Poly(x**3 + x*y).is_homogeneous is False
assert Poly(x).is_univariate is True
assert Poly(x*y).is_univariate is False
assert Poly(x*y).is_multivariate is True
assert Poly(x).is_multivariate is False
assert Poly(
x**16 + x**14 - x**10 + x**8 - x**6 + x**2 + 1).is_cyclotomic is False
assert Poly(
x**16 + x**14 - x**10 - x**8 - x**6 + x**2 + 1).is_cyclotomic is True
def test_Poly_is_irreducible():
assert Poly(x**2 + x + 1).is_irreducible is True
assert Poly(x**2 + 2*x + 1).is_irreducible is False
assert Poly(7*x + 3, modulus=11).is_irreducible is True
assert Poly(7*x**2 + 3*x + 1, modulus=11).is_irreducible is False
def test_Poly_subs():
assert Poly(x + 1).subs(x, 0) == 1
assert Poly(x + 1).subs(x, x) == Poly(x + 1)
assert Poly(x + 1).subs(x, y) == Poly(y + 1)
assert Poly(x*y, x).subs(y, x) == x**2
assert Poly(x*y, x).subs(x, y) == y**2
def test_Poly_replace():
assert Poly(x + 1).replace(x) == Poly(x + 1)
assert Poly(x + 1).replace(y) == Poly(y + 1)
raises(PolynomialError, lambda: Poly(x + y).replace(z))
assert Poly(x + 1).replace(x, x) == Poly(x + 1)
assert Poly(x + 1).replace(x, y) == Poly(y + 1)
assert Poly(x + y).replace(x, x) == Poly(x + y)
assert Poly(x + y).replace(x, z) == Poly(z + y, z, y)
assert Poly(x + y).replace(y, y) == Poly(x + y)
assert Poly(x + y).replace(y, z) == Poly(x + z, x, z)
raises(PolynomialError, lambda: Poly(x + y).replace(x, y))
raises(PolynomialError, lambda: Poly(x + y).replace(z, t))
assert Poly(x + y, x).replace(x, z) == Poly(z + y, z)
assert Poly(x + y, y).replace(y, z) == Poly(x + z, z)
raises(PolynomialError, lambda: Poly(x + y, x).replace(x, y))
raises(PolynomialError, lambda: Poly(x + y, y).replace(y, x))
def test_Poly_reorder():
raises(PolynomialError, lambda: Poly(x + y).reorder(x, z))
assert Poly(x + y, x, y).reorder(x, y) == Poly(x + y, x, y)
assert Poly(x + y, x, y).reorder(y, x) == Poly(x + y, y, x)
assert Poly(x + y, y, x).reorder(x, y) == Poly(x + y, x, y)
assert Poly(x + y, y, x).reorder(y, x) == Poly(x + y, y, x)
assert Poly(x + y, x, y).reorder(wrt=x) == Poly(x + y, x, y)
assert Poly(x + y, x, y).reorder(wrt=y) == Poly(x + y, y, x)
def test_Poly_ltrim():
f = Poly(y**2 + y*z**2, x, y, z).ltrim(y)
assert f.as_expr() == y**2 + y*z**2 and f.gens == (y, z)
raises(PolynomialError, lambda: Poly(x*y**2 + y**2, x, y).ltrim(y))
def test_Poly_has_only_gens():
assert Poly(x*y + 1, x, y, z).has_only_gens(x, y) is True
assert Poly(x*y + z, x, y, z).has_only_gens(x, y) is False
raises(GeneratorsError, lambda: Poly(x*y**2 + y**2, x, y).has_only_gens(t))
def test_Poly_to_ring():
assert Poly(2*x + 1, domain='ZZ').to_ring() == Poly(2*x + 1, domain='ZZ')
assert Poly(2*x + 1, domain='QQ').to_ring() == Poly(2*x + 1, domain='ZZ')
raises(CoercionFailed, lambda: Poly(x/2 + 1).to_ring())
raises(DomainError, lambda: Poly(2*x + 1, modulus=3).to_ring())
def test_Poly_to_field():
assert Poly(2*x + 1, domain='ZZ').to_field() == Poly(2*x + 1, domain='QQ')
assert Poly(2*x + 1, domain='QQ').to_field() == Poly(2*x + 1, domain='QQ')
assert Poly(x/2 + 1, domain='QQ').to_field() == Poly(x/2 + 1, domain='QQ')
assert Poly(2*x + 1, modulus=3).to_field() == Poly(2*x + 1, modulus=3)
assert Poly(2.0*x + 1.0).to_field() == Poly(2.0*x + 1.0)
def test_Poly_to_exact():
assert Poly(2*x).to_exact() == Poly(2*x)
assert Poly(x/2).to_exact() == Poly(x/2)
assert Poly(0.1*x).to_exact() == Poly(x/10)
def test_Poly_retract():
f = Poly(x**2 + 1, x, domain=QQ[y])
assert f.retract() == Poly(x**2 + 1, x, domain='ZZ')
assert f.retract(field=True) == Poly(x**2 + 1, x, domain='QQ')
assert Poly(0, x, y).retract() == Poly(0, x, y)
def test_Poly_slice():
f = Poly(x**3 + 2*x**2 + 3*x + 4)
assert f.slice(0, 0) == Poly(0, x)
assert f.slice(0, 1) == Poly(4, x)
assert f.slice(0, 2) == Poly(3*x + 4, x)
assert f.slice(0, 3) == Poly(2*x**2 + 3*x + 4, x)
assert f.slice(0, 4) == Poly(x**3 + 2*x**2 + 3*x + 4, x)
assert f.slice(x, 0, 0) == Poly(0, x)
assert f.slice(x, 0, 1) == Poly(4, x)
assert f.slice(x, 0, 2) == Poly(3*x + 4, x)
assert f.slice(x, 0, 3) == Poly(2*x**2 + 3*x + 4, x)
assert f.slice(x, 0, 4) == Poly(x**3 + 2*x**2 + 3*x + 4, x)
def test_Poly_coeffs():
assert Poly(0, x).coeffs() == [0]
assert Poly(1, x).coeffs() == [1]
assert Poly(2*x + 1, x).coeffs() == [2, 1]
assert Poly(7*x**2 + 2*x + 1, x).coeffs() == [7, 2, 1]
assert Poly(7*x**4 + 2*x + 1, x).coeffs() == [7, 2, 1]
assert Poly(x*y**7 + 2*x**2*y**3).coeffs('lex') == [2, 1]
assert Poly(x*y**7 + 2*x**2*y**3).coeffs('grlex') == [1, 2]
def test_Poly_monoms():
assert Poly(0, x).monoms() == [(0,)]
assert Poly(1, x).monoms() == [(0,)]
assert Poly(2*x + 1, x).monoms() == [(1,), (0,)]
assert Poly(7*x**2 + 2*x + 1, x).monoms() == [(2,), (1,), (0,)]
assert Poly(7*x**4 + 2*x + 1, x).monoms() == [(4,), (1,), (0,)]
assert Poly(x*y**7 + 2*x**2*y**3).monoms('lex') == [(2, 3), (1, 7)]
assert Poly(x*y**7 + 2*x**2*y**3).monoms('grlex') == [(1, 7), (2, 3)]
def test_Poly_terms():
assert Poly(0, x).terms() == [((0,), 0)]
assert Poly(1, x).terms() == [((0,), 1)]
assert Poly(2*x + 1, x).terms() == [((1,), 2), ((0,), 1)]
assert Poly(7*x**2 + 2*x + 1, x).terms() == [((2,), 7), ((1,), 2), ((0,), 1)]
assert Poly(7*x**4 + 2*x + 1, x).terms() == [((4,), 7), ((1,), 2), ((0,), 1)]
assert Poly(
x*y**7 + 2*x**2*y**3).terms('lex') == [((2, 3), 2), ((1, 7), 1)]
assert Poly(
x*y**7 + 2*x**2*y**3).terms('grlex') == [((1, 7), 1), ((2, 3), 2)]
def test_Poly_all_coeffs():
assert Poly(0, x).all_coeffs() == [0]
assert Poly(1, x).all_coeffs() == [1]
assert Poly(2*x + 1, x).all_coeffs() == [2, 1]
assert Poly(7*x**2 + 2*x + 1, x).all_coeffs() == [7, 2, 1]
assert Poly(7*x**4 + 2*x + 1, x).all_coeffs() == [7, 0, 0, 2, 1]
def test_Poly_all_monoms():
assert Poly(0, x).all_monoms() == [(0,)]
assert Poly(1, x).all_monoms() == [(0,)]
assert Poly(2*x + 1, x).all_monoms() == [(1,), (0,)]
assert Poly(7*x**2 + 2*x + 1, x).all_monoms() == [(2,), (1,), (0,)]
assert Poly(7*x**4 + 2*x + 1, x).all_monoms() == [(4,), (3,), (2,), (1,), (0,)]
def test_Poly_all_terms():
assert Poly(0, x).all_terms() == [((0,), 0)]
assert Poly(1, x).all_terms() == [((0,), 1)]
assert Poly(2*x + 1, x).all_terms() == [((1,), 2), ((0,), 1)]
assert Poly(7*x**2 + 2*x + 1, x).all_terms() == \
[((2,), 7), ((1,), 2), ((0,), 1)]
assert Poly(7*x**4 + 2*x + 1, x).all_terms() == \
[((4,), 7), ((3,), 0), ((2,), 0), ((1,), 2), ((0,), 1)]
def test_Poly_termwise():
f = Poly(x**2 + 20*x + 400)
g = Poly(x**2 + 2*x + 4)
def func(monom, coeff):
(k,) = monom
return coeff//10**(2 - k)
assert f.termwise(func) == g
def func(monom, coeff):
(k,) = monom
return (k,), coeff//10**(2 - k)
assert f.termwise(func) == g
def test_Poly_length():
assert Poly(0, x).length() == 0
assert Poly(1, x).length() == 1
assert Poly(x, x).length() == 1
assert Poly(x + 1, x).length() == 2
assert Poly(x**2 + 1, x).length() == 2
assert Poly(x**2 + x + 1, x).length() == 3
def test_Poly_as_dict():
assert Poly(0, x).as_dict() == {}
assert Poly(0, x, y, z).as_dict() == {}
assert Poly(1, x).as_dict() == {(0,): 1}
assert Poly(1, x, y, z).as_dict() == {(0, 0, 0): 1}
assert Poly(x**2 + 3, x).as_dict() == {(2,): 1, (0,): 3}
assert Poly(x**2 + 3, x, y, z).as_dict() == {(2, 0, 0): 1, (0, 0, 0): 3}
assert Poly(3*x**2*y*z**3 + 4*x*y + 5*x*z).as_dict() == {(2, 1, 3): 3,
(1, 1, 0): 4, (1, 0, 1): 5}
def test_Poly_as_expr():
assert Poly(0, x).as_expr() == 0
assert Poly(0, x, y, z).as_expr() == 0
assert Poly(1, x).as_expr() == 1
assert Poly(1, x, y, z).as_expr() == 1
assert Poly(x**2 + 3, x).as_expr() == x**2 + 3
assert Poly(x**2 + 3, x, y, z).as_expr() == x**2 + 3
assert Poly(
3*x**2*y*z**3 + 4*x*y + 5*x*z).as_expr() == 3*x**2*y*z**3 + 4*x*y + 5*x*z
f = Poly(x**2 + 2*x*y**2 - y, x, y)
assert f.as_expr() == -y + x**2 + 2*x*y**2
assert f.as_expr({x: 5}) == 25 - y + 10*y**2
assert f.as_expr({y: 6}) == -6 + 72*x + x**2
assert f.as_expr({x: 5, y: 6}) == 379
assert f.as_expr(5, 6) == 379
raises(GeneratorsError, lambda: f.as_expr({z: 7}))
def test_Poly_lift():
assert Poly(x**4 - I*x + 17*I, x, gaussian=True).lift() == \
Poly(x**16 + 2*x**10 + 578*x**8 + x**4 - 578*x**2 + 83521,
x, domain='QQ')
def test_Poly_deflate():
assert Poly(0, x).deflate() == ((1,), Poly(0, x))
assert Poly(1, x).deflate() == ((1,), Poly(1, x))
assert Poly(x, x).deflate() == ((1,), Poly(x, x))
assert Poly(x**2, x).deflate() == ((2,), Poly(x, x))
assert Poly(x**17, x).deflate() == ((17,), Poly(x, x))
assert Poly(
x**2*y*z**11 + x**4*z**11).deflate() == ((2, 1, 11), Poly(x*y*z + x**2*z))
def test_Poly_inject():
f = Poly(x**2*y + x*y**3 + x*y + 1, x)
assert f.inject() == Poly(x**2*y + x*y**3 + x*y + 1, x, y)
assert f.inject(front=True) == Poly(y**3*x + y*x**2 + y*x + 1, y, x)
def test_Poly_eject():
f = Poly(x**2*y + x*y**3 + x*y + 1, x, y)
assert f.eject(x) == Poly(x*y**3 + (x**2 + x)*y + 1, y, domain='ZZ[x]')
assert f.eject(y) == Poly(y*x**2 + (y**3 + y)*x + 1, x, domain='ZZ[y]')
ex = x + y + z + t + w
g = Poly(ex, x, y, z, t, w)
assert g.eject(x) == Poly(ex, y, z, t, w, domain='ZZ[x]')
assert g.eject(x, y) == Poly(ex, z, t, w, domain='ZZ[x, y]')
assert g.eject(x, y, z) == Poly(ex, t, w, domain='ZZ[x, y, z]')
assert g.eject(w) == Poly(ex, x, y, z, t, domain='ZZ[w]')
assert g.eject(t, w) == Poly(ex, x, y, z, domain='ZZ[w, t]')
assert g.eject(z, t, w) == Poly(ex, x, y, domain='ZZ[w, t, z]')
raises(DomainError, lambda: Poly(x*y, x, y, domain=ZZ[z]).eject(y))
raises(NotImplementedError, lambda: Poly(x*y, x, y, z).eject(y))
def test_Poly_exclude():
assert Poly(x, x, y).exclude() == Poly(x, x)
assert Poly(x*y, x, y).exclude() == Poly(x*y, x, y)
assert Poly(1, x, y).exclude() == Poly(1, x, y)
def test_Poly__gen_to_level():
assert Poly(1, x, y)._gen_to_level(-2) == 0
assert Poly(1, x, y)._gen_to_level(-1) == 1
assert Poly(1, x, y)._gen_to_level( 0) == 0
assert Poly(1, x, y)._gen_to_level( 1) == 1
raises(PolynomialError, lambda: Poly(1, x, y)._gen_to_level(-3))
raises(PolynomialError, lambda: Poly(1, x, y)._gen_to_level( 2))
assert Poly(1, x, y)._gen_to_level(x) == 0
assert Poly(1, x, y)._gen_to_level(y) == 1
assert Poly(1, x, y)._gen_to_level('x') == 0
assert Poly(1, x, y)._gen_to_level('y') == 1
raises(PolynomialError, lambda: Poly(1, x, y)._gen_to_level(z))
raises(PolynomialError, lambda: Poly(1, x, y)._gen_to_level('z'))
def test_Poly_degree():
assert Poly(0, x).degree() == -oo
assert Poly(1, x).degree() == 0
assert Poly(x, x).degree() == 1
assert Poly(0, x).degree(gen=0) == -oo
assert Poly(1, x).degree(gen=0) == 0
assert Poly(x, x).degree(gen=0) == 1
assert Poly(0, x).degree(gen=x) == -oo
assert Poly(1, x).degree(gen=x) == 0
assert Poly(x, x).degree(gen=x) == 1
assert Poly(0, x).degree(gen='x') == -oo
assert Poly(1, x).degree(gen='x') == 0
assert Poly(x, x).degree(gen='x') == 1
raises(PolynomialError, lambda: Poly(1, x).degree(gen=1))
raises(PolynomialError, lambda: Poly(1, x).degree(gen=y))
raises(PolynomialError, lambda: Poly(1, x).degree(gen='y'))
assert Poly(1, x, y).degree() == 0
assert Poly(2*y, x, y).degree() == 0
assert Poly(x*y, x, y).degree() == 1
assert Poly(1, x, y).degree(gen=x) == 0
assert Poly(2*y, x, y).degree(gen=x) == 0
assert Poly(x*y, x, y).degree(gen=x) == 1
assert Poly(1, x, y).degree(gen=y) == 0
assert Poly(2*y, x, y).degree(gen=y) == 1
assert Poly(x*y, x, y).degree(gen=y) == 1
assert degree(1, x) == 0
assert degree(x, x) == 1
assert degree(x*y**2, gen=x) == 1
assert degree(x*y**2, gen=y) == 2
assert degree(x*y**2, x, y) == 1
assert degree(x*y**2, y, x) == 2
raises(ComputationFailed, lambda: degree(1))
def test_Poly_degree_list():
assert Poly(0, x).degree_list() == (-oo,)
assert Poly(0, x, y).degree_list() == (-oo, -oo)
assert Poly(0, x, y, z).degree_list() == (-oo, -oo, -oo)
assert Poly(1, x).degree_list() == (0,)
assert Poly(1, x, y).degree_list() == (0, 0)
assert Poly(1, x, y, z).degree_list() == (0, 0, 0)
assert Poly(x**2*y + x**3*z**2 + 1).degree_list() == (3, 1, 2)
assert degree_list(1, x) == (0,)
assert degree_list(x, x) == (1,)
assert degree_list(x*y**2) == (1, 2)
raises(ComputationFailed, lambda: degree_list(1))
def test_Poly_total_degree():
assert Poly(x**2*y + x**3*z**2 + 1).total_degree() == 5
assert Poly(x**2 + z**3).total_degree() == 3
assert Poly(x*y*z + z**4).total_degree() == 4
assert Poly(x**3 + x + 1).total_degree() == 3
def test_Poly_homogenize():
assert Poly(x**2+y).homogenize(z) == Poly(x**2+y*z)
assert Poly(x+y).homogenize(z) == Poly(x+y, x, y, z)
assert Poly(x+y**2).homogenize(y) == Poly(x*y+y**2)
def test_Poly_homogeneous_order():
assert Poly(0, x, y).homogeneous_order() == -oo
assert Poly(1, x, y).homogeneous_order() == 0
assert Poly(x, x, y).homogeneous_order() == 1
assert Poly(x*y, x, y).homogeneous_order() == 2
assert Poly(x + 1, x, y).homogeneous_order() is None
assert Poly(x*y + x, x, y).homogeneous_order() is None
assert Poly(x**5 + 2*x**3*y**2 + 9*x*y**4).homogeneous_order() == 5
assert Poly(x**5 + 2*x**3*y**3 + 9*x*y**4).homogeneous_order() is None
def test_Poly_LC():
assert Poly(0, x).LC() == 0
assert Poly(1, x).LC() == 1
assert Poly(2*x**2 + x, x).LC() == 2
assert Poly(x*y**7 + 2*x**2*y**3).LC('lex') == 2
assert Poly(x*y**7 + 2*x**2*y**3).LC('grlex') == 1
assert LC(x*y**7 + 2*x**2*y**3, order='lex') == 2
assert LC(x*y**7 + 2*x**2*y**3, order='grlex') == 1
def test_Poly_TC():
assert Poly(0, x).TC() == 0
assert Poly(1, x).TC() == 1
assert Poly(2*x**2 + x, x).TC() == 0
def test_Poly_EC():
assert Poly(0, x).EC() == 0
assert Poly(1, x).EC() == 1
assert Poly(2*x**2 + x, x).EC() == 1
assert Poly(x*y**7 + 2*x**2*y**3).EC('lex') == 1
assert Poly(x*y**7 + 2*x**2*y**3).EC('grlex') == 2
def test_Poly_coeff():
assert Poly(0, x).coeff_monomial(1) == 0
assert Poly(0, x).coeff_monomial(x) == 0
assert Poly(1, x).coeff_monomial(1) == 1
assert Poly(1, x).coeff_monomial(x) == 0
assert Poly(x**8, x).coeff_monomial(1) == 0
assert Poly(x**8, x).coeff_monomial(x**7) == 0
assert Poly(x**8, x).coeff_monomial(x**8) == 1
assert Poly(x**8, x).coeff_monomial(x**9) == 0
assert Poly(3*x*y**2 + 1, x, y).coeff_monomial(1) == 1
assert Poly(3*x*y**2 + 1, x, y).coeff_monomial(x*y**2) == 3
p = Poly(24*x*y*exp(8) + 23*x, x, y)
assert p.coeff_monomial(x) == 23
assert p.coeff_monomial(y) == 0
assert p.coeff_monomial(x*y) == 24*exp(8)
assert p.as_expr().coeff(x) == 24*y*exp(8) + 23
raises(NotImplementedError, lambda: p.coeff(x))
raises(ValueError, lambda: Poly(x + 1).coeff_monomial(0))
raises(ValueError, lambda: Poly(x + 1).coeff_monomial(3*x))
raises(ValueError, lambda: Poly(x + 1).coeff_monomial(3*x*y))
def test_Poly_nth():
assert Poly(0, x).nth(0) == 0
assert Poly(0, x).nth(1) == 0
assert Poly(1, x).nth(0) == 1
assert Poly(1, x).nth(1) == 0
assert Poly(x**8, x).nth(0) == 0
assert Poly(x**8, x).nth(7) == 0
assert Poly(x**8, x).nth(8) == 1
assert Poly(x**8, x).nth(9) == 0
assert Poly(3*x*y**2 + 1, x, y).nth(0, 0) == 1
assert Poly(3*x*y**2 + 1, x, y).nth(1, 2) == 3
def test_Poly_LM():
assert Poly(0, x).LM() == (0,)
assert Poly(1, x).LM() == (0,)
assert Poly(2*x**2 + x, x).LM() == (2,)
assert Poly(x*y**7 + 2*x**2*y**3).LM('lex') == (2, 3)
assert Poly(x*y**7 + 2*x**2*y**3).LM('grlex') == (1, 7)
assert LM(x*y**7 + 2*x**2*y**3, order='lex') == x**2*y**3
assert LM(x*y**7 + 2*x**2*y**3, order='grlex') == x*y**7
def test_Poly_LM_custom_order():
f = Poly(x**2*y**3*z + x**2*y*z**3 + x*y*z + 1)
rev_lex = lambda monom: tuple(reversed(monom))
assert f.LM(order='lex') == (2, 3, 1)
assert f.LM(order=rev_lex) == (2, 1, 3)
def test_Poly_EM():
assert Poly(0, x).EM() == (0,)
assert Poly(1, x).EM() == (0,)
assert Poly(2*x**2 + x, x).EM() == (1,)
assert Poly(x*y**7 + 2*x**2*y**3).EM('lex') == (1, 7)
assert Poly(x*y**7 + 2*x**2*y**3).EM('grlex') == (2, 3)
def test_Poly_LT():
assert Poly(0, x).LT() == ((0,), 0)
assert Poly(1, x).LT() == ((0,), 1)
assert Poly(2*x**2 + x, x).LT() == ((2,), 2)
assert Poly(x*y**7 + 2*x**2*y**3).LT('lex') == ((2, 3), 2)
assert Poly(x*y**7 + 2*x**2*y**3).LT('grlex') == ((1, 7), 1)
assert LT(x*y**7 + 2*x**2*y**3, order='lex') == 2*x**2*y**3
assert LT(x*y**7 + 2*x**2*y**3, order='grlex') == x*y**7
def test_Poly_ET():
assert Poly(0, x).ET() == ((0,), 0)
assert Poly(1, x).ET() == ((0,), 1)
assert Poly(2*x**2 + x, x).ET() == ((1,), 1)
assert Poly(x*y**7 + 2*x**2*y**3).ET('lex') == ((1, 7), 1)
assert Poly(x*y**7 + 2*x**2*y**3).ET('grlex') == ((2, 3), 2)
def test_Poly_max_norm():
assert Poly(-1, x).max_norm() == 1
assert Poly( 0, x).max_norm() == 0
assert Poly( 1, x).max_norm() == 1
def test_Poly_l1_norm():
assert Poly(-1, x).l1_norm() == 1
assert Poly( 0, x).l1_norm() == 0
assert Poly( 1, x).l1_norm() == 1
def test_Poly_clear_denoms():
coeff, poly = Poly(x + 2, x).clear_denoms()
assert coeff == 1 and poly == Poly(
x + 2, x, domain='ZZ') and poly.get_domain() == ZZ
coeff, poly = Poly(x/2 + 1, x).clear_denoms()
assert coeff == 2 and poly == Poly(
x + 2, x, domain='QQ') and poly.get_domain() == QQ
coeff, poly = Poly(x/2 + 1, x).clear_denoms(convert=True)
assert coeff == 2 and poly == Poly(
x + 2, x, domain='ZZ') and poly.get_domain() == ZZ
coeff, poly = Poly(x/y + 1, x).clear_denoms(convert=True)
assert coeff == y and poly == Poly(
x + y, x, domain='ZZ[y]') and poly.get_domain() == ZZ[y]
coeff, poly = Poly(x/3 + sqrt(2), x, domain='EX').clear_denoms()
assert coeff == 3 and poly == Poly(
x + 3*sqrt(2), x, domain='EX') and poly.get_domain() == EX
coeff, poly = Poly(
x/3 + sqrt(2), x, domain='EX').clear_denoms(convert=True)
assert coeff == 3 and poly == Poly(
x + 3*sqrt(2), x, domain='EX') and poly.get_domain() == EX
def test_Poly_rat_clear_denoms():
f = Poly(x**2/y + 1, x)
g = Poly(x**3 + y, x)
assert f.rat_clear_denoms(g) == \
(Poly(x**2 + y, x), Poly(y*x**3 + y**2, x))
f = f.set_domain(EX)
g = g.set_domain(EX)
assert f.rat_clear_denoms(g) == (f, g)
def test_Poly_integrate():
assert Poly(x + 1).integrate() == Poly(x**2/2 + x)
assert Poly(x + 1).integrate(x) == Poly(x**2/2 + x)
assert Poly(x + 1).integrate((x, 1)) == Poly(x**2/2 + x)
assert Poly(x*y + 1).integrate(x) == Poly(x**2*y/2 + x)
assert Poly(x*y + 1).integrate(y) == Poly(x*y**2/2 + y)
assert Poly(x*y + 1).integrate(x, x) == Poly(x**3*y/6 + x**2/2)
assert Poly(x*y + 1).integrate(y, y) == Poly(x*y**3/6 + y**2/2)
assert Poly(x*y + 1).integrate((x, 2)) == Poly(x**3*y/6 + x**2/2)
assert Poly(x*y + 1).integrate((y, 2)) == Poly(x*y**3/6 + y**2/2)
assert Poly(x*y + 1).integrate(x, y) == Poly(x**2*y**2/4 + x*y)
assert Poly(x*y + 1).integrate(y, x) == Poly(x**2*y**2/4 + x*y)
def test_Poly_diff():
assert Poly(x**2 + x).diff() == Poly(2*x + 1)
assert Poly(x**2 + x).diff(x) == Poly(2*x + 1)
assert Poly(x**2 + x).diff((x, 1)) == Poly(2*x + 1)
assert Poly(x**2*y**2 + x*y).diff(x) == Poly(2*x*y**2 + y)
assert Poly(x**2*y**2 + x*y).diff(y) == Poly(2*x**2*y + x)
assert Poly(x**2*y**2 + x*y).diff(x, x) == Poly(2*y**2, x, y)
assert Poly(x**2*y**2 + x*y).diff(y, y) == Poly(2*x**2, x, y)
assert Poly(x**2*y**2 + x*y).diff((x, 2)) == Poly(2*y**2, x, y)
assert Poly(x**2*y**2 + x*y).diff((y, 2)) == Poly(2*x**2, x, y)
assert Poly(x**2*y**2 + x*y).diff(x, y) == Poly(4*x*y + 1)
assert Poly(x**2*y**2 + x*y).diff(y, x) == Poly(4*x*y + 1)
def test_Poly_eval():
assert Poly(0, x).eval(7) == 0
assert Poly(1, x).eval(7) == 1
assert Poly(x, x).eval(7) == 7
assert Poly(0, x).eval(0, 7) == 0
assert Poly(1, x).eval(0, 7) == 1
assert Poly(x, x).eval(0, 7) == 7
assert Poly(0, x).eval(x, 7) == 0
assert Poly(1, x).eval(x, 7) == 1
assert Poly(x, x).eval(x, 7) == 7
assert Poly(0, x).eval('x', 7) == 0
assert Poly(1, x).eval('x', 7) == 1
assert Poly(x, x).eval('x', 7) == 7
raises(PolynomialError, lambda: Poly(1, x).eval(1, 7))
raises(PolynomialError, lambda: Poly(1, x).eval(y, 7))
raises(PolynomialError, lambda: Poly(1, x).eval('y', 7))
assert Poly(123, x, y).eval(7) == Poly(123, y)
assert Poly(2*y, x, y).eval(7) == Poly(2*y, y)
assert Poly(x*y, x, y).eval(7) == Poly(7*y, y)
assert Poly(123, x, y).eval(x, 7) == Poly(123, y)
assert Poly(2*y, x, y).eval(x, 7) == Poly(2*y, y)
assert Poly(x*y, x, y).eval(x, 7) == Poly(7*y, y)
assert Poly(123, x, y).eval(y, 7) == Poly(123, x)
assert Poly(2*y, x, y).eval(y, 7) == Poly(14, x)
assert Poly(x*y, x, y).eval(y, 7) == Poly(7*x, x)
assert Poly(x*y + y, x, y).eval({x: 7}) == Poly(8*y, y)
assert Poly(x*y + y, x, y).eval({y: 7}) == Poly(7*x + 7, x)
assert Poly(x*y + y, x, y).eval({x: 6, y: 7}) == 49
assert Poly(x*y + y, x, y).eval({x: 7, y: 6}) == 48
assert Poly(x*y + y, x, y).eval((6, 7)) == 49
assert Poly(x*y + y, x, y).eval([6, 7]) == 49
Poly(x + 1, domain='ZZ').eval(S(1)/2) == S(3)/2
Poly(x + 1, domain='ZZ').eval(sqrt(2)) == sqrt(2) + 1
raises(ValueError, lambda: Poly(x*y + y, x, y).eval((6, 7, 8)))
raises(DomainError, lambda: Poly(x + 1, domain='ZZ').eval(S(1)/2, auto=False))
# issue 3245
alpha = Symbol('alpha')
result = (2*alpha*z - 2*alpha + z**2 + 3)/(z**2 - 2*z + 1)
f = Poly(x**2 + (alpha - 1)*x - alpha + 1, x, domain='ZZ[alpha]')
assert f.eval((z + 1)/(z - 1)) == result
g = Poly(x**2 + (alpha - 1)*x - alpha + 1, x, y, domain='ZZ[alpha]')
assert g.eval((z + 1)/(z - 1)) == Poly(result, y, domain='ZZ(alpha,z)')
def test_Poly___call__():
f = Poly(2*x*y + 3*x + y + 2*z)
assert f(2) == Poly(5*y + 2*z + 6)
assert f(2, 5) == Poly(2*z + 31)
assert f(2, 5, 7) == 45
def test_parallel_poly_from_expr():
assert parallel_poly_from_expr(
[x - 1, x**2 - 1], x)[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[Poly(x - 1, x), x**2 - 1], x)[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[x - 1, Poly(x**2 - 1, x)], x)[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr([Poly(
x - 1, x), Poly(x**2 - 1, x)], x)[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[x - 1, x**2 - 1], x, y)[0] == [Poly(x - 1, x, y), Poly(x**2 - 1, x, y)]
assert parallel_poly_from_expr([Poly(
x - 1, x), x**2 - 1], x, y)[0] == [Poly(x - 1, x, y), Poly(x**2 - 1, x, y)]
assert parallel_poly_from_expr([x - 1, Poly(
x**2 - 1, x)], x, y)[0] == [Poly(x - 1, x, y), Poly(x**2 - 1, x, y)]
assert parallel_poly_from_expr([Poly(x - 1, x), Poly(
x**2 - 1, x)], x, y)[0] == [Poly(x - 1, x, y), Poly(x**2 - 1, x, y)]
assert parallel_poly_from_expr(
[x - 1, x**2 - 1])[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[Poly(x - 1, x), x**2 - 1])[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[x - 1, Poly(x**2 - 1, x)])[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[Poly(x - 1, x), Poly(x**2 - 1, x)])[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[1, x**2 - 1])[0] == [Poly(1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[1, x**2 - 1])[0] == [Poly(1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[1, Poly(x**2 - 1, x)])[0] == [Poly(1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[1, Poly(x**2 - 1, x)])[0] == [Poly(1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[x**2 - 1, 1])[0] == [Poly(x**2 - 1, x), Poly(1, x)]
assert parallel_poly_from_expr(
[x**2 - 1, 1])[0] == [Poly(x**2 - 1, x), Poly(1, x)]
assert parallel_poly_from_expr(
[Poly(x**2 - 1, x), 1])[0] == [Poly(x**2 - 1, x), Poly(1, x)]
assert parallel_poly_from_expr(
[Poly(x**2 - 1, x), 1])[0] == [Poly(x**2 - 1, x), Poly(1, x)]
assert parallel_poly_from_expr([Poly(x, x, y), Poly(y, x, y)], x, y, order='lex')[0] == \
[Poly(x, x, y, domain='ZZ'), Poly(y, x, y, domain='ZZ')]
raises(PolificationFailed, lambda: parallel_poly_from_expr([0, 1]))
def test_pdiv():
f, g = x**2 - y**2, x - y
q, r = x + y, 0
F, G, Q, R = [ Poly(h, x, y) for h in (f, g, q, r) ]
assert F.pdiv(G) == (Q, R)
assert F.prem(G) == R
assert F.pquo(G) == Q
assert F.pexquo(G) == Q
assert pdiv(f, g) == (q, r)
assert prem(f, g) == r
assert pquo(f, g) == q
assert pexquo(f, g) == q
assert pdiv(f, g, x, y) == (q, r)
assert prem(f, g, x, y) == r
assert pquo(f, g, x, y) == q
assert pexquo(f, g, x, y) == q
assert pdiv(f, g, (x, y)) == (q, r)
assert prem(f, g, (x, y)) == r
assert pquo(f, g, (x, y)) == q
assert pexquo(f, g, (x, y)) == q
assert pdiv(F, G) == (Q, R)
assert prem(F, G) == R
assert pquo(F, G) == Q
assert pexquo(F, G) == Q
assert pdiv(f, g, polys=True) == (Q, R)
assert prem(f, g, polys=True) == R
assert pquo(f, g, polys=True) == Q
assert pexquo(f, g, polys=True) == Q
assert pdiv(F, G, polys=False) == (q, r)
assert prem(F, G, polys=False) == r
assert pquo(F, G, polys=False) == q
assert pexquo(F, G, polys=False) == q
raises(ComputationFailed, lambda: pdiv(4, 2))
raises(ComputationFailed, lambda: prem(4, 2))
raises(ComputationFailed, lambda: pquo(4, 2))
raises(ComputationFailed, lambda: pexquo(4, 2))
def test_div():
f, g = x**2 - y**2, x - y
q, r = x + y, 0
F, G, Q, R = [ Poly(h, x, y) for h in (f, g, q, r) ]
assert F.div(G) == (Q, R)
assert F.rem(G) == R
assert F.quo(G) == Q
assert F.exquo(G) == Q
assert div(f, g) == (q, r)
assert rem(f, g) == r
assert quo(f, g) == q
assert exquo(f, g) == q
assert div(f, g, x, y) == (q, r)
assert rem(f, g, x, y) == r
assert quo(f, g, x, y) == q
assert exquo(f, g, x, y) == q
assert div(f, g, (x, y)) == (q, r)
assert rem(f, g, (x, y)) == r
assert quo(f, g, (x, y)) == q
assert exquo(f, g, (x, y)) == q
assert div(F, G) == (Q, R)
assert rem(F, G) == R
assert quo(F, G) == Q
assert exquo(F, G) == Q
assert div(f, g, polys=True) == (Q, R)
assert rem(f, g, polys=True) == R
assert quo(f, g, polys=True) == Q
assert exquo(f, g, polys=True) == Q
assert div(F, G, polys=False) == (q, r)
assert rem(F, G, polys=False) == r
assert quo(F, G, polys=False) == q
assert exquo(F, G, polys=False) == q
raises(ComputationFailed, lambda: div(4, 2))
raises(ComputationFailed, lambda: rem(4, 2))
raises(ComputationFailed, lambda: quo(4, 2))
raises(ComputationFailed, lambda: exquo(4, 2))
f, g = x**2 + 1, 2*x - 4
qz, rz = 0, x**2 + 1
qq, rq = x/2 + 1, 5
assert div(f, g) == (qq, rq)
assert div(f, g, auto=True) == (qq, rq)
assert div(f, g, auto=False) == (qz, rz)
assert div(f, g, domain=ZZ) == (qz, rz)
assert div(f, g, domain=QQ) == (qq, rq)
assert div(f, g, domain=ZZ, auto=True) == (qq, rq)
assert div(f, g, domain=ZZ, auto=False) == (qz, rz)
assert div(f, g, domain=QQ, auto=True) == (qq, rq)
assert div(f, g, domain=QQ, auto=False) == (qq, rq)
assert rem(f, g) == rq
assert rem(f, g, auto=True) == rq
assert rem(f, g, auto=False) == rz
assert rem(f, g, domain=ZZ) == rz
assert rem(f, g, domain=QQ) == rq
assert rem(f, g, domain=ZZ, auto=True) == rq
assert rem(f, g, domain=ZZ, auto=False) == rz
assert rem(f, g, domain=QQ, auto=True) == rq
assert rem(f, g, domain=QQ, auto=False) == rq
assert quo(f, g) == qq
assert quo(f, g, auto=True) == qq
assert quo(f, g, auto=False) == qz
assert quo(f, g, domain=ZZ) == qz
assert quo(f, g, domain=QQ) == qq
assert quo(f, g, domain=ZZ, auto=True) == qq
assert quo(f, g, domain=ZZ, auto=False) == qz
assert quo(f, g, domain=QQ, auto=True) == qq
assert quo(f, g, domain=QQ, auto=False) == qq
f, g, q = x**2, 2*x, x/2
assert exquo(f, g) == q
assert exquo(f, g, auto=True) == q
raises(ExactQuotientFailed, lambda: exquo(f, g, auto=False))
raises(ExactQuotientFailed, lambda: exquo(f, g, domain=ZZ))
assert exquo(f, g, domain=QQ) == q
assert exquo(f, g, domain=ZZ, auto=True) == q
raises(ExactQuotientFailed, lambda: exquo(f, g, domain=ZZ, auto=False))
assert exquo(f, g, domain=QQ, auto=True) == q
assert exquo(f, g, domain=QQ, auto=False) == q
f, g = Poly(x**2), Poly(x)
q, r = f.div(g)
assert q.get_domain().is_ZZ and r.get_domain().is_ZZ
r = f.rem(g)
assert r.get_domain().is_ZZ
q = f.quo(g)
assert q.get_domain().is_ZZ
q = f.exquo(g)
assert q.get_domain().is_ZZ
def test_gcdex():
f, g = 2*x, x**2 - 16
s, t, h = x/32, -Rational(1, 16), 1
F, G, S, T, H = [ Poly(u, x, domain='QQ') for u in (f, g, s, t, h) ]
assert F.half_gcdex(G) == (S, H)
assert F.gcdex(G) == (S, T, H)
assert F.invert(G) == S
assert half_gcdex(f, g) == (s, h)
assert gcdex(f, g) == (s, t, h)
assert invert(f, g) == s
assert half_gcdex(f, g, x) == (s, h)
assert gcdex(f, g, x) == (s, t, h)
assert invert(f, g, x) == s
assert half_gcdex(f, g, (x,)) == (s, h)
assert gcdex(f, g, (x,)) == (s, t, h)
assert invert(f, g, (x,)) == s
assert half_gcdex(F, G) == (S, H)
assert gcdex(F, G) == (S, T, H)
assert invert(F, G) == S
assert half_gcdex(f, g, polys=True) == (S, H)
assert gcdex(f, g, polys=True) == (S, T, H)
assert invert(f, g, polys=True) == S
assert half_gcdex(F, G, polys=False) == (s, h)
assert gcdex(F, G, polys=False) == (s, t, h)
assert invert(F, G, polys=False) == s
assert half_gcdex(100, 2004) == (-20, 4)
assert gcdex(100, 2004) == (-20, 1, 4)
assert invert(3, 7) == 5
raises(DomainError, lambda: half_gcdex(x + 1, 2*x + 1, auto=False))
raises(DomainError, lambda: gcdex(x + 1, 2*x + 1, auto=False))
raises(DomainError, lambda: invert(x + 1, 2*x + 1, auto=False))
def test_revert():
f = Poly(1 - x**2/2 + x**4/24 - x**6/720)
g = Poly(61*x**6/720 + 5*x**4/24 + x**2/2 + 1)
assert f.revert(8) == g
def test_subresultants():
f, g, h = x**2 - 2*x + 1, x**2 - 1, 2*x - 2
F, G, H = Poly(f), Poly(g), Poly(h)
assert F.subresultants(G) == [F, G, H]
assert subresultants(f, g) == [f, g, h]
assert subresultants(f, g, x) == [f, g, h]
assert subresultants(f, g, (x,)) == [f, g, h]
assert subresultants(F, G) == [F, G, H]
assert subresultants(f, g, polys=True) == [F, G, H]
assert subresultants(F, G, polys=False) == [f, g, h]
raises(ComputationFailed, lambda: subresultants(4, 2))
def test_resultant():
f, g, h = x**2 - 2*x + 1, x**2 - 1, 0
F, G = Poly(f), Poly(g)
assert F.resultant(G) == h
assert resultant(f, g) == h
assert resultant(f, g, x) == h
assert resultant(f, g, (x,)) == h
assert resultant(F, G) == h
assert resultant(f, g, polys=True) == h
assert resultant(F, G, polys=False) == h
assert resultant(f, g, includePRS=True) == (h, [f, g, 2*x - 2])
f, g, h = x - a, x - b, a - b
F, G, H = Poly(f), Poly(g), Poly(h)
assert F.resultant(G) == H
assert resultant(f, g) == h
assert resultant(f, g, x) == h
assert resultant(f, g, (x,)) == h
assert resultant(F, G) == H
assert resultant(f, g, polys=True) == H
assert resultant(F, G, polys=False) == h
raises(ComputationFailed, lambda: resultant(4, 2))
def test_discriminant():
f, g = x**3 + 3*x**2 + 9*x - 13, -11664
F = Poly(f)
assert F.discriminant() == g
assert discriminant(f) == g
assert discriminant(f, x) == g
assert discriminant(f, (x,)) == g
assert discriminant(F) == g
assert discriminant(f, polys=True) == g
assert discriminant(F, polys=False) == g
f, g = a*x**2 + b*x + c, b**2 - 4*a*c
F, G = Poly(f), Poly(g)
assert F.discriminant() == G
assert discriminant(f) == g
assert discriminant(f, x, a, b, c) == g
assert discriminant(f, (x, a, b, c)) == g
assert discriminant(F) == G
assert discriminant(f, polys=True) == G
assert discriminant(F, polys=False) == g
raises(ComputationFailed, lambda: discriminant(4))
def test_dispersion():
# We test only the API here. For more mathematical
# tests see the dedicated test file.
fp = poly((x + 1)*(x + 2), x)
assert sorted(fp.dispersionset()) == [0, 1]
assert fp.dispersion() == 1
fp = poly(x**4 - 3*x**2 + 1, x)
gp = fp.shift(-3)
assert sorted(fp.dispersionset(gp)) == [2, 3, 4]
assert fp.dispersion(gp) == 4
def test_gcd_list():
F = [x**3 - 1, x**2 - 1, x**2 - 3*x + 2]
assert gcd_list(F) == x - 1
assert gcd_list(F, polys=True) == Poly(x - 1)
assert gcd_list([]) == 0
assert gcd_list([1, 2]) == 1
assert gcd_list([4, 6, 8]) == 2
gcd = gcd_list([], x)
assert gcd.is_Number and gcd is S.Zero
gcd = gcd_list([], x, polys=True)
assert gcd.is_Poly and gcd.is_zero
raises(ComputationFailed, lambda: gcd_list([], polys=True))
def test_lcm_list():
F = [x**3 - 1, x**2 - 1, x**2 - 3*x + 2]
assert lcm_list(F) == x**5 - x**4 - 2*x**3 - x**2 + x + 2
assert lcm_list(F, polys=True) == Poly(x**5 - x**4 - 2*x**3 - x**2 + x + 2)
assert lcm_list([]) == 1
assert lcm_list([1, 2]) == 2
assert lcm_list([4, 6, 8]) == 24
lcm = lcm_list([], x)
assert lcm.is_Number and lcm is S.One
lcm = lcm_list([], x, polys=True)
assert lcm.is_Poly and lcm.is_one
raises(ComputationFailed, lambda: lcm_list([], polys=True))
def test_gcd():
f, g = x**3 - 1, x**2 - 1
s, t = x**2 + x + 1, x + 1
h, r = x - 1, x**4 + x**3 - x - 1
F, G, S, T, H, R = [ Poly(u) for u in (f, g, s, t, h, r) ]
assert F.cofactors(G) == (H, S, T)
assert F.gcd(G) == H
assert F.lcm(G) == R
assert cofactors(f, g) == (h, s, t)
assert gcd(f, g) == h
assert lcm(f, g) == r
assert cofactors(f, g, x) == (h, s, t)
assert gcd(f, g, x) == h
assert lcm(f, g, x) == r
assert cofactors(f, g, (x,)) == (h, s, t)
assert gcd(f, g, (x,)) == h
assert lcm(f, g, (x,)) == r
assert cofactors(F, G) == (H, S, T)
assert gcd(F, G) == H
assert lcm(F, G) == R
assert cofactors(f, g, polys=True) == (H, S, T)
assert gcd(f, g, polys=True) == H
assert lcm(f, g, polys=True) == R
assert cofactors(F, G, polys=False) == (h, s, t)
assert gcd(F, G, polys=False) == h
assert lcm(F, G, polys=False) == r
f, g = 1.0*x**2 - 1.0, 1.0*x - 1.0
h, s, t = g, 1.0*x + 1.0, 1.0
assert cofactors(f, g) == (h, s, t)
assert gcd(f, g) == h
assert lcm(f, g) == f
f, g = 1.0*x**2 - 1.0, 1.0*x - 1.0
h, s, t = g, 1.0*x + 1.0, 1.0
assert cofactors(f, g) == (h, s, t)
assert gcd(f, g) == h
assert lcm(f, g) == f
assert cofactors(8, 6) == (2, 4, 3)
assert gcd(8, 6) == 2
assert lcm(8, 6) == 24
f, g = x**2 - 3*x - 4, x**3 - 4*x**2 + x - 4
l = x**4 - 3*x**3 - 3*x**2 - 3*x - 4
h, s, t = x - 4, x + 1, x**2 + 1
assert cofactors(f, g, modulus=11) == (h, s, t)
assert gcd(f, g, modulus=11) == h
assert lcm(f, g, modulus=11) == l
f, g = x**2 + 8*x + 7, x**3 + 7*x**2 + x + 7
l = x**4 + 8*x**3 + 8*x**2 + 8*x + 7
h, s, t = x + 7, x + 1, x**2 + 1
assert cofactors(f, g, modulus=11, symmetric=False) == (h, s, t)
assert gcd(f, g, modulus=11, symmetric=False) == h
assert lcm(f, g, modulus=11, symmetric=False) == l
raises(TypeError, lambda: gcd(x))
raises(TypeError, lambda: lcm(x))
def test_gcd_numbers_vs_polys():
assert isinstance(gcd(3, 9), Integer)
assert isinstance(gcd(3*x, 9), Integer)
assert gcd(3, 9) == 3
assert gcd(3*x, 9) == 3
assert isinstance(gcd(S(3)/2, S(9)/4), Rational)
assert isinstance(gcd(S(3)/2*x, S(9)/4), Rational)
assert gcd(S(3)/2, S(9)/4) == S(3)/4
assert gcd(S(3)/2*x, S(9)/4) == 1
assert isinstance(gcd(3.0, 9.0), Float)
assert isinstance(gcd(3.0*x, 9.0), Float)
assert gcd(3.0, 9.0) == 1.0
assert gcd(3.0*x, 9.0) == 1.0
def test_terms_gcd():
assert terms_gcd(1) == 1
assert terms_gcd(1, x) == 1
assert terms_gcd(x - 1) == x - 1
assert terms_gcd(-x - 1) == -x - 1
assert terms_gcd(2*x + 3) == 2*x + 3
assert terms_gcd(6*x + 4) == Mul(2, 3*x + 2, evaluate=False)
assert terms_gcd(x**3*y + x*y**3) == x*y*(x**2 + y**2)
assert terms_gcd(2*x**3*y + 2*x*y**3) == 2*x*y*(x**2 + y**2)
assert terms_gcd(x**3*y/2 + x*y**3/2) == x*y/2*(x**2 + y**2)
assert terms_gcd(x**3*y + 2*x*y**3) == x*y*(x**2 + 2*y**2)
assert terms_gcd(2*x**3*y + 4*x*y**3) == 2*x*y*(x**2 + 2*y**2)
assert terms_gcd(2*x**3*y/3 + 4*x*y**3/5) == 2*x*y/15*(5*x**2 + 6*y**2)
assert terms_gcd(2.0*x**3*y + 4.1*x*y**3) == x*y*(2.0*x**2 + 4.1*y**2)
assert _aresame(terms_gcd(2.0*x + 3), 2.0*x + 3)
assert terms_gcd((3 + 3*x)*(x + x*y), expand=False) == \
(3*x + 3)*(x*y + x)
assert terms_gcd((3 + 3*x)*(x + x*sin(3 + 3*y)), expand=False, deep=True) == \
3*x*(x + 1)*(sin(Mul(3, y + 1, evaluate=False)) + 1)
assert terms_gcd(sin(x + x*y), deep=True) == \
sin(x*(y + 1))
def test_trunc():
f, g = x**5 + 2*x**4 + 3*x**3 + 4*x**2 + 5*x + 6, x**5 - x**4 + x**2 - x
F, G = Poly(f), Poly(g)
assert F.trunc(3) == G
assert trunc(f, 3) == g
assert trunc(f, 3, x) == g
assert trunc(f, 3, (x,)) == g
assert trunc(F, 3) == G
assert trunc(f, 3, polys=True) == G
assert trunc(F, 3, polys=False) == g
f, g = 6*x**5 + 5*x**4 + 4*x**3 + 3*x**2 + 2*x + 1, -x**4 + x**3 - x + 1
F, G = Poly(f), Poly(g)
assert F.trunc(3) == G
assert trunc(f, 3) == g
assert trunc(f, 3, x) == g
assert trunc(f, 3, (x,)) == g
assert trunc(F, 3) == G
assert trunc(f, 3, polys=True) == G
assert trunc(F, 3, polys=False) == g
f = Poly(x**2 + 2*x + 3, modulus=5)
assert f.trunc(2) == Poly(x**2 + 1, modulus=5)
def test_monic():
f, g = 2*x - 1, x - S(1)/2
F, G = Poly(f, domain='QQ'), Poly(g)
assert F.monic() == G
assert monic(f) == g
assert monic(f, x) == g
assert monic(f, (x,)) == g
assert monic(F) == G
assert monic(f, polys=True) == G
assert monic(F, polys=False) == g
raises(ComputationFailed, lambda: monic(4))
assert monic(2*x**2 + 6*x + 4, auto=False) == x**2 + 3*x + 2
raises(ExactQuotientFailed, lambda: monic(2*x + 6*x + 1, auto=False))
assert monic(2.0*x**2 + 6.0*x + 4.0) == 1.0*x**2 + 3.0*x + 2.0
assert monic(2*x**2 + 3*x + 4, modulus=5) == x**2 - x + 2
def test_content():
f, F = 4*x + 2, Poly(4*x + 2)
assert F.content() == 2
assert content(f) == 2
raises(ComputationFailed, lambda: content(4))
f = Poly(2*x, modulus=3)
assert f.content() == 1
def test_primitive():
f, g = 4*x + 2, 2*x + 1
F, G = Poly(f), Poly(g)
assert F.primitive() == (2, G)
assert primitive(f) == (2, g)
assert primitive(f, x) == (2, g)
assert primitive(f, (x,)) == (2, g)
assert primitive(F) == (2, G)
assert primitive(f, polys=True) == (2, G)
assert primitive(F, polys=False) == (2, g)
raises(ComputationFailed, lambda: primitive(4))
f = Poly(2*x, modulus=3)
g = Poly(2.0*x, domain=RR)
assert f.primitive() == (1, f)
assert g.primitive() == (1.0, g)
assert primitive(S('-3*x/4 + y + 11/8')) == \
S('(1/8, -6*x + 8*y + 11)')
def test_compose():
f = x**12 + 20*x**10 + 150*x**8 + 500*x**6 + 625*x**4 - 2*x**3 - 10*x + 9
g = x**4 - 2*x + 9
h = x**3 + 5*x
F, G, H = map(Poly, (f, g, h))
assert G.compose(H) == F
assert compose(g, h) == f
assert compose(g, h, x) == f
assert compose(g, h, (x,)) == f
assert compose(G, H) == F
assert compose(g, h, polys=True) == F
assert compose(G, H, polys=False) == f
assert F.decompose() == [G, H]
assert decompose(f) == [g, h]
assert decompose(f, x) == [g, h]
assert decompose(f, (x,)) == [g, h]
assert decompose(F) == [G, H]
assert decompose(f, polys=True) == [G, H]
assert decompose(F, polys=False) == [g, h]
raises(ComputationFailed, lambda: compose(4, 2))
raises(ComputationFailed, lambda: decompose(4))
assert compose(x**2 - y**2, x - y, x, y) == x**2 - 2*x*y
assert compose(x**2 - y**2, x - y, y, x) == -y**2 + 2*x*y
def test_shift():
assert Poly(x**2 - 2*x + 1, x).shift(2) == Poly(x**2 + 2*x + 1, x)
def test_sturm():
f, F = x, Poly(x, domain='QQ')
g, G = 1, Poly(1, x, domain='QQ')
assert F.sturm() == [F, G]
assert sturm(f) == [f, g]
assert sturm(f, x) == [f, g]
assert sturm(f, (x,)) == [f, g]
assert sturm(F) == [F, G]
assert sturm(f, polys=True) == [F, G]
assert sturm(F, polys=False) == [f, g]
raises(ComputationFailed, lambda: sturm(4))
raises(DomainError, lambda: sturm(f, auto=False))
f = Poly(S(1024)/(15625*pi**8)*x**5
- S(4096)/(625*pi**8)*x**4
+ S(32)/(15625*pi**4)*x**3
- S(128)/(625*pi**4)*x**2
+ S(1)/62500*x
- S(1)/625, x, domain='ZZ(pi)')
assert sturm(f) == \
[Poly(x**3 - 100*x**2 + pi**4/64*x - 25*pi**4/16, x, domain='ZZ(pi)'),
Poly(3*x**2 - 200*x + pi**4/64, x, domain='ZZ(pi)'),
Poly((S(20000)/9 - pi**4/96)*x + 25*pi**4/18, x, domain='ZZ(pi)'),
Poly((-3686400000000*pi**4 - 11520000*pi**8 - 9*pi**12)/(26214400000000 - 245760000*pi**4 + 576*pi**8), x, domain='ZZ(pi)')]
def test_gff():
f = x**5 + 2*x**4 - x**3 - 2*x**2
assert Poly(f).gff_list() == [(Poly(x), 1), (Poly(x + 2), 4)]
assert gff_list(f) == [(x, 1), (x + 2, 4)]
raises(NotImplementedError, lambda: gff(f))
f = x*(x - 1)**3*(x - 2)**2*(x - 4)**2*(x - 5)
assert Poly(f).gff_list() == [(
Poly(x**2 - 5*x + 4), 1), (Poly(x**2 - 5*x + 4), 2), (Poly(x), 3)]
assert gff_list(f) == [(x**2 - 5*x + 4, 1), (x**2 - 5*x + 4, 2), (x, 3)]
raises(NotImplementedError, lambda: gff(f))
def test_sqf_norm():
assert sqf_norm(x**2 - 2, extension=sqrt(3)) == \
(1, x**2 - 2*sqrt(3)*x + 1, x**4 - 10*x**2 + 1)
assert sqf_norm(x**2 - 3, extension=sqrt(2)) == \
(1, x**2 - 2*sqrt(2)*x - 1, x**4 - 10*x**2 + 1)
assert Poly(x**2 - 2, extension=sqrt(3)).sqf_norm() == \
(1, Poly(x**2 - 2*sqrt(3)*x + 1, x, extension=sqrt(3)),
Poly(x**4 - 10*x**2 + 1, x, domain='QQ'))
assert Poly(x**2 - 3, extension=sqrt(2)).sqf_norm() == \
(1, Poly(x**2 - 2*sqrt(2)*x - 1, x, extension=sqrt(2)),
Poly(x**4 - 10*x**2 + 1, x, domain='QQ'))
def test_sqf():
f = x**5 - x**3 - x**2 + 1
g = x**3 + 2*x**2 + 2*x + 1
h = x - 1
p = x**4 + x**3 - x - 1
F, G, H, P = map(Poly, (f, g, h, p))
assert F.sqf_part() == P
assert sqf_part(f) == p
assert sqf_part(f, x) == p
assert sqf_part(f, (x,)) == p
assert sqf_part(F) == P
assert sqf_part(f, polys=True) == P
assert sqf_part(F, polys=False) == p
assert F.sqf_list() == (1, [(G, 1), (H, 2)])
assert sqf_list(f) == (1, [(g, 1), (h, 2)])
assert sqf_list(f, x) == (1, [(g, 1), (h, 2)])
assert sqf_list(f, (x,)) == (1, [(g, 1), (h, 2)])
assert sqf_list(F) == (1, [(G, 1), (H, 2)])
assert sqf_list(f, polys=True) == (1, [(G, 1), (H, 2)])
assert sqf_list(F, polys=False) == (1, [(g, 1), (h, 2)])
assert F.sqf_list_include() == [(G, 1), (H, 2)]
raises(ComputationFailed, lambda: sqf_part(4))
assert sqf(1) == 1
assert sqf_list(1) == (1, [])
assert sqf((2*x**2 + 2)**7) == 128*(x**2 + 1)**7
assert sqf(f) == g*h**2
assert sqf(f, x) == g*h**2
assert sqf(f, (x,)) == g*h**2
d = x**2 + y**2
assert sqf(f/d) == (g*h**2)/d
assert sqf(f/d, x) == (g*h**2)/d
assert sqf(f/d, (x,)) == (g*h**2)/d
assert sqf(x - 1) == x - 1
assert sqf(-x - 1) == -x - 1
assert sqf(x - 1) == x - 1
assert sqf(6*x - 10) == Mul(2, 3*x - 5, evaluate=False)
assert sqf((6*x - 10)/(3*x - 6)) == S(2)/3*((3*x - 5)/(x - 2))
assert sqf(Poly(x**2 - 2*x + 1)) == (x - 1)**2
f = 3 + x - x*(1 + x) + x**2
assert sqf(f) == 3
f = (x**2 + 2*x + 1)**20000000000
assert sqf(f) == (x + 1)**40000000000
assert sqf_list(f) == (1, [(x + 1, 40000000000)])
def test_factor():
f = x**5 - x**3 - x**2 + 1
u = x + 1
v = x - 1
w = x**2 + x + 1
F, U, V, W = map(Poly, (f, u, v, w))
assert F.factor_list() == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(f) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(f, x) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(f, (x,)) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(F) == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(f, polys=True) == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(F, polys=False) == (1, [(u, 1), (v, 2), (w, 1)])
assert F.factor_list_include() == [(U, 1), (V, 2), (W, 1)]
assert factor_list(1) == (1, [])
assert factor_list(6) == (6, [])
assert factor_list(sqrt(3), x) == (1, [(3, S.Half)])
assert factor_list((-1)**x, x) == (1, [(-1, x)])
assert factor_list((2*x)**y, x) == (1, [(2, y), (x, y)])
assert factor_list(sqrt(x*y), x) == (1, [(x*y, S.Half)])
assert factor(6) == 6 and factor(6).is_Integer
assert factor_list(3*x) == (3, [(x, 1)])
assert factor_list(3*x**2) == (3, [(x, 2)])
assert factor(3*x) == 3*x
assert factor(3*x**2) == 3*x**2
assert factor((2*x**2 + 2)**7) == 128*(x**2 + 1)**7
assert factor(f) == u*v**2*w
assert factor(f, x) == u*v**2*w
assert factor(f, (x,)) == u*v**2*w
g, p, q, r = x**2 - y**2, x - y, x + y, x**2 + 1
assert factor(f/g) == (u*v**2*w)/(p*q)
assert factor(f/g, x) == (u*v**2*w)/(p*q)
assert factor(f/g, (x,)) == (u*v**2*w)/(p*q)
p = Symbol('p', positive=True)
i = Symbol('i', integer=True)
r = Symbol('r', real=True)
assert factor(sqrt(x*y)).is_Pow is True
assert factor(sqrt(3*x**2 - 3)) == sqrt(3)*sqrt((x - 1)*(x + 1))
assert factor(sqrt(3*x**2 + 3)) == sqrt(3)*sqrt(x**2 + 1)
assert factor((y*x**2 - y)**i) == y**i*(x - 1)**i*(x + 1)**i
assert factor((y*x**2 + y)**i) == y**i*(x**2 + 1)**i
assert factor((y*x**2 - y)**t) == (y*(x - 1)*(x + 1))**t
assert factor((y*x**2 + y)**t) == (y*(x**2 + 1))**t
f = sqrt(expand((r**2 + 1)*(p + 1)*(p - 1)*(p - 2)**3))
g = sqrt((p - 2)**3*(p - 1))*sqrt(p + 1)*sqrt(r**2 + 1)
assert factor(f) == g
assert factor(g) == g
f = sqrt(expand((x - 1)**5*(r**2 + 1)))
g = sqrt(r**2 + 1)*(x - 1)**(S(5)/2)
assert factor(f) == g
assert factor(g) == g
f = Poly(sin(1)*x + 1, x, domain=EX)
assert f.factor_list() == (1, [(f, 1)])
f = x**4 + 1
assert factor(f) == f
assert factor(f, extension=I) == (x**2 - I)*(x**2 + I)
assert factor(f, gaussian=True) == (x**2 - I)*(x**2 + I)
assert factor(
f, extension=sqrt(2)) == (x**2 + sqrt(2)*x + 1)*(x**2 - sqrt(2)*x + 1)
f = x**2 + 2*sqrt(2)*x + 2
assert factor(f, extension=sqrt(2)) == (x + sqrt(2))**2
assert factor(f**3, extension=sqrt(2)) == (x + sqrt(2))**6
assert factor(x**2 - 2*y**2, extension=sqrt(2)) == \
(x + sqrt(2)*y)*(x - sqrt(2)*y)
assert factor(2*x**2 - 4*y**2, extension=sqrt(2)) == \
2*((x + sqrt(2)*y)*(x - sqrt(2)*y))
assert factor(x - 1) == x - 1
assert factor(-x - 1) == -x - 1
assert factor(x - 1) == x - 1
assert factor(6*x - 10) == Mul(2, 3*x - 5, evaluate=False)
assert factor(x**11 + x + 1, modulus=65537, symmetric=True) == \
(x**2 + x + 1)*(x**9 - x**8 + x**6 - x**5 + x**3 - x** 2 + 1)
assert factor(x**11 + x + 1, modulus=65537, symmetric=False) == \
(x**2 + x + 1)*(x**9 + 65536*x**8 + x**6 + 65536*x**5 +
x**3 + 65536*x** 2 + 1)
f = x/pi + x*sin(x)/pi
g = y/(pi**2 + 2*pi + 1) + y*sin(x)/(pi**2 + 2*pi + 1)
assert factor(f) == x*(sin(x) + 1)/pi
assert factor(g) == y*(sin(x) + 1)/(pi + 1)**2
assert factor(Eq(
x**2 + 2*x + 1, x**3 + 1)) == Eq((x + 1)**2, (x + 1)*(x**2 - x + 1))
f = (x**2 - 1)/(x**2 + 4*x + 4)
assert factor(f) == (x + 1)*(x - 1)/(x + 2)**2
assert factor(f, x) == (x + 1)*(x - 1)/(x + 2)**2
f = 3 + x - x*(1 + x) + x**2
assert factor(f) == 3
assert factor(f, x) == 3
assert factor(1/(x**2 + 2*x + 1/x) - 1) == -((1 - x + 2*x**2 +
x**3)/(1 + 2*x**2 + x**3))
assert factor(f, expand=False) == f
raises(PolynomialError, lambda: factor(f, x, expand=False))
raises(FlagError, lambda: factor(x**2 - 1, polys=True))
assert factor([x, Eq(x**2 - y**2, Tuple(x**2 - z**2, 1/x + 1/y))]) == \
[x, Eq((x - y)*(x + y), Tuple((x - z)*(x + z), (x + y)/x/y))]
assert not isinstance(
Poly(x**3 + x + 1).factor_list()[1][0][0], PurePoly) is True
assert isinstance(
PurePoly(x**3 + x + 1).factor_list()[1][0][0], PurePoly) is True
assert factor(sqrt(-x)) == sqrt(-x)
# issue 2818
e = (-2*x*(-x + 1)*(x - 1)*(-x*(-x + 1)*(x - 1) - x*(x - 1)**2)*(x**2*(x -
1) - x*(x - 1) - x) - (-2*x**2*(x - 1)**2 - x*(-x + 1)*(-x*(-x + 1) +
x*(x - 1)))*(x**2*(x - 1)**4 - x*(-x*(-x + 1)*(x - 1) - x*(x - 1)**2)))
assert factor(e) == 0
# deep option
assert factor(sin(x**2 + x) + x, deep=True) == sin(x*(x + 1)) + x
def test_factor_large():
f = (x**2 + 4*x + 4)**10000000*(x**2 + 1)*(x**2 + 2*x + 1)**1234567
g = ((x**2 + 2*x + 1)**3000*y**2 + (x**2 + 2*x + 1)**3000*2*y + (
x**2 + 2*x + 1)**3000)
assert factor(f) == (x + 2)**20000000*(x**2 + 1)*(x + 1)**2469134
assert factor(g) == (x + 1)**6000*(y + 1)**2
assert factor_list(
f) == (1, [(x + 1, 2469134), (x + 2, 20000000), (x**2 + 1, 1)])
assert factor_list(g) == (1, [(y + 1, 2), (x + 1, 6000)])
f = (x**2 - y**2)**200000*(x**7 + 1)
g = (x**2 + y**2)**200000*(x**7 + 1)
assert factor(f) == \
(x + 1)*(x - y)**200000*(x + y)**200000*(x**6 - x**5 +
x**4 - x**3 + x**2 - x + 1)
assert factor(g, gaussian=True) == \
(x + 1)*(x - I*y)**200000*(x + I*y)**200000*(x**6 - x**5 +
x**4 - x**3 + x**2 - x + 1)
assert factor_list(f) == \
(1, [(x + 1, 1), (x - y, 200000), (x + y, 200000), (x**6 -
x**5 + x**4 - x**3 + x**2 - x + 1, 1)])
assert factor_list(g, gaussian=True) == \
(1, [(x + 1, 1), (x - I*y, 200000), (x + I*y, 200000), (
x**6 - x**5 + x**4 - x**3 + x**2 - x + 1, 1)])
@XFAIL
def test_factor_noeval():
assert factor(6*x - 10) == 2*(3*x - 5)
assert factor((6*x - 10)/(3*x - 6)) == S(2)/3*((3*x - 5)/(x - 2))
def test_intervals():
assert intervals(0) == []
assert intervals(1) == []
assert intervals(x, sqf=True) == [(0, 0)]
assert intervals(x) == [((0, 0), 1)]
assert intervals(x**128) == [((0, 0), 128)]
assert intervals([x**2, x**4]) == [((0, 0), {0: 2, 1: 4})]
f = Poly((2*x/5 - S(17)/3)*(4*x + S(1)/257))
assert f.intervals(sqf=True) == [(-1, 0), (14, 15)]
assert f.intervals() == [((-1, 0), 1), ((14, 15), 1)]
assert f.intervals(fast=True, sqf=True) == [(-1, 0), (14, 15)]
assert f.intervals(fast=True) == [((-1, 0), 1), ((14, 15), 1)]
assert f.intervals(eps=S(1)/10) == f.intervals(eps=0.1) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/100) == f.intervals(eps=0.01) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/1000) == f.intervals(eps=0.001) == \
[((-S(1)/1005, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/10000) == f.intervals(eps=0.0001) == \
[((-S(1)/1028, -S(1)/1028), 1), ((S(85)/6, S(85)/6), 1)]
f = (2*x/5 - S(17)/3)*(4*x + S(1)/257)
assert intervals(f, sqf=True) == [(-1, 0), (14, 15)]
assert intervals(f) == [((-1, 0), 1), ((14, 15), 1)]
assert intervals(f, eps=S(1)/10) == intervals(f, eps=0.1) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/100) == intervals(f, eps=0.01) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/1000) == intervals(f, eps=0.001) == \
[((-S(1)/1005, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/10000) == intervals(f, eps=0.0001) == \
[((-S(1)/1028, -S(1)/1028), 1), ((S(85)/6, S(85)/6), 1)]
f = Poly((x**2 - 2)*(x**2 - 3)**7*(x + 1)*(7*x + 3)**3)
assert f.intervals() == \
[((-2, -S(3)/2), 7), ((-S(3)/2, -1), 1),
((-1, -1), 1), ((-1, 0), 3),
((1, S(3)/2), 1), ((S(3)/2, 2), 7)]
assert intervals([x**5 - 200, x**5 - 201]) == \
[((S(75)/26, S(101)/35), {0: 1}), ((S(283)/98, S(26)/9), {1: 1})]
assert intervals([x**5 - 200, x**5 - 201], fast=True) == \
[((S(75)/26, S(101)/35), {0: 1}), ((S(283)/98, S(26)/9), {1: 1})]
assert intervals([x**2 - 200, x**2 - 201]) == \
[((-S(71)/5, -S(85)/6), {1: 1}), ((-S(85)/6, -14), {0: 1}),
((14, S(85)/6), {0: 1}), ((S(85)/6, S(71)/5), {1: 1})]
assert intervals([x + 1, x + 2, x - 1, x + 1, 1, x - 1, x - 1, (x - 2)**2]) == \
[((-2, -2), {1: 1}), ((-1, -1), {0: 1, 3: 1}), ((1, 1), {2:
1, 5: 1, 6: 1}), ((2, 2), {7: 2})]
f, g, h = x**2 - 2, x**4 - 4*x**2 + 4, x - 1
assert intervals(f, inf=S(7)/4, sqf=True) == []
assert intervals(f, inf=S(7)/5, sqf=True) == [(S(7)/5, S(3)/2)]
assert intervals(f, sup=S(7)/4, sqf=True) == [(-2, -1), (1, S(3)/2)]
assert intervals(f, sup=S(7)/5, sqf=True) == [(-2, -1)]
assert intervals(g, inf=S(7)/4) == []
assert intervals(g, inf=S(7)/5) == [((S(7)/5, S(3)/2), 2)]
assert intervals(g, sup=S(7)/4) == [((-2, -1), 2), ((1, S(3)/2), 2)]
assert intervals(g, sup=S(7)/5) == [((-2, -1), 2)]
assert intervals([g, h], inf=S(7)/4) == []
assert intervals([g, h], inf=S(7)/5) == [((S(7)/5, S(3)/2), {0: 2})]
assert intervals([g, h], sup=S(
7)/4) == [((-2, -1), {0: 2}), ((1, 1), {1: 1}), ((1, S(3)/2), {0: 2})]
assert intervals(
[g, h], sup=S(7)/5) == [((-2, -1), {0: 2}), ((1, 1), {1: 1})]
assert intervals([x + 2, x**2 - 2]) == \
[((-2, -2), {0: 1}), ((-2, -1), {1: 1}), ((1, 2), {1: 1})]
assert intervals([x + 2, x**2 - 2], strict=True) == \
[((-2, -2), {0: 1}), ((-S(3)/2, -1), {1: 1}), ((1, 2), {1: 1})]
f = 7*z**4 - 19*z**3 + 20*z**2 + 17*z + 20
assert intervals(f) == []
real_part, complex_part = intervals(f, all=True, sqf=True)
assert real_part == []
assert all(re(a) < re(r) < re(b) and im(
a) < im(r) < im(b) for (a, b), r in zip(complex_part, nroots(f)))
assert complex_part == [(-S(40)/7 - 40*I/7, 0), (-S(40)/7, 40*I/7),
(-40*I/7, S(40)/7), (0, S(40)/7 + 40*I/7)]
real_part, complex_part = intervals(f, all=True, sqf=True, eps=S(1)/10)
assert real_part == []
assert all(re(a) < re(r) < re(b) and im(
a) < im(r) < im(b) for (a, b), r in zip(complex_part, nroots(f)))
raises(ValueError, lambda: intervals(x**2 - 2, eps=10**-100000))
raises(ValueError, lambda: Poly(x**2 - 2).intervals(eps=10**-100000))
raises(
ValueError, lambda: intervals([x**2 - 2, x**2 - 3], eps=10**-100000))
def test_refine_root():
f = Poly(x**2 - 2)
assert f.refine_root(1, 2, steps=0) == (1, 2)
assert f.refine_root(-2, -1, steps=0) == (-2, -1)
assert f.refine_root(1, 2, steps=None) == (1, S(3)/2)
assert f.refine_root(-2, -1, steps=None) == (-S(3)/2, -1)
assert f.refine_root(1, 2, steps=1) == (1, S(3)/2)
assert f.refine_root(-2, -1, steps=1) == (-S(3)/2, -1)
assert f.refine_root(1, 2, steps=1, fast=True) == (1, S(3)/2)
assert f.refine_root(-2, -1, steps=1, fast=True) == (-S(3)/2, -1)
assert f.refine_root(1, 2, eps=S(1)/100) == (S(24)/17, S(17)/12)
assert f.refine_root(1, 2, eps=1e-2) == (S(24)/17, S(17)/12)
raises(PolynomialError, lambda: (f**2).refine_root(1, 2, check_sqf=True))
raises(RefinementFailed, lambda: (f**2).refine_root(1, 2))
raises(RefinementFailed, lambda: (f**2).refine_root(2, 3))
f = x**2 - 2
assert refine_root(f, 1, 2, steps=1) == (1, S(3)/2)
assert refine_root(f, -2, -1, steps=1) == (-S(3)/2, -1)
assert refine_root(f, 1, 2, steps=1, fast=True) == (1, S(3)/2)
assert refine_root(f, -2, -1, steps=1, fast=True) == (-S(3)/2, -1)
assert refine_root(f, 1, 2, eps=S(1)/100) == (S(24)/17, S(17)/12)
assert refine_root(f, 1, 2, eps=1e-2) == (S(24)/17, S(17)/12)
raises(PolynomialError, lambda: refine_root(1, 7, 8, eps=S(1)/100))
raises(ValueError, lambda: Poly(f).refine_root(1, 2, eps=10**-100000))
raises(ValueError, lambda: refine_root(f, 1, 2, eps=10**-100000))
def test_count_roots():
assert count_roots(x**2 - 2) == 2
assert count_roots(x**2 - 2, inf=-oo) == 2
assert count_roots(x**2 - 2, sup=+oo) == 2
assert count_roots(x**2 - 2, inf=-oo, sup=+oo) == 2
assert count_roots(x**2 - 2, inf=-2) == 2
assert count_roots(x**2 - 2, inf=-1) == 1
assert count_roots(x**2 - 2, sup=1) == 1
assert count_roots(x**2 - 2, sup=2) == 2
assert count_roots(x**2 - 2, inf=-1, sup=1) == 0
assert count_roots(x**2 - 2, inf=-2, sup=2) == 2
assert count_roots(x**2 - 2, inf=-1, sup=1) == 0
assert count_roots(x**2 - 2, inf=-2, sup=2) == 2
assert count_roots(x**2 + 2) == 0
assert count_roots(x**2 + 2, inf=-2*I) == 2
assert count_roots(x**2 + 2, sup=+2*I) == 2
assert count_roots(x**2 + 2, inf=-2*I, sup=+2*I) == 2
assert count_roots(x**2 + 2, inf=0) == 0
assert count_roots(x**2 + 2, sup=0) == 0
assert count_roots(x**2 + 2, inf=-I) == 1
assert count_roots(x**2 + 2, sup=+I) == 1
assert count_roots(x**2 + 2, inf=+I/2, sup=+I) == 0
assert count_roots(x**2 + 2, inf=-I, sup=-I/2) == 0
raises(PolynomialError, lambda: count_roots(1))
def test_Poly_root():
f = Poly(2*x**3 - 7*x**2 + 4*x + 4)
assert f.root(0) == -S(1)/2
assert f.root(1) == 2
assert f.root(2) == 2
raises(IndexError, lambda: f.root(3))
assert Poly(x**5 + x + 1).root(0) == RootOf(x**3 - x**2 + 1, 0)
def test_real_roots():
assert real_roots(x) == [0]
assert real_roots(x, multiple=False) == [(0, 1)]
assert real_roots(x**3) == [0, 0, 0]
assert real_roots(x**3, multiple=False) == [(0, 3)]
assert real_roots(x*(x**3 + x + 3)) == [RootOf(x**3 + x + 3, 0), 0]
assert real_roots(x*(x**3 + x + 3), multiple=False) == [(RootOf(
x**3 + x + 3, 0), 1), (0, 1)]
assert real_roots(
x**3*(x**3 + x + 3)) == [RootOf(x**3 + x + 3, 0), 0, 0, 0]
assert real_roots(x**3*(x**3 + x + 3), multiple=False) == [(RootOf(
x**3 + x + 3, 0), 1), (0, 3)]
f = 2*x**3 - 7*x**2 + 4*x + 4
g = x**3 + x + 1
assert Poly(f).real_roots() == [-S(1)/2, 2, 2]
assert Poly(g).real_roots() == [RootOf(g, 0)]
def test_all_roots():
f = 2*x**3 - 7*x**2 + 4*x + 4
g = x**3 + x + 1
assert Poly(f).all_roots() == [-S(1)/2, 2, 2]
assert Poly(g).all_roots() == [RootOf(g, 0), RootOf(g, 1), RootOf(g, 2)]
def test_nroots():
assert Poly(0, x).nroots() == []
assert Poly(1, x).nroots() == []
assert Poly(x**2 - 1, x).nroots() == [-1.0, 1.0]
assert Poly(x**2 + 1, x).nroots() == [-1.0*I, 1.0*I]
roots, error = Poly(x**2 - 1, x).nroots(error=True)
assert roots == [-1.0, 1.0] and error < 1e25
roots, error = Poly(x**2 + 1, x).nroots(error=True)
assert roots == [-1.0*I, 1.0*I] and error < 1e25
roots, error = Poly(x**2/3 - S(1)/3, x).nroots(error=True)
assert roots == [-1.0, 1.0] and error < 1e25
roots, error = Poly(x**2/3 + S(1)/3, x).nroots(error=True)
assert roots == [-1.0*I, 1.0*I] and error < 1e25
assert Poly(x**2 + 2*I, x).nroots() == [-1.0 + 1.0*I, 1.0 - 1.0*I]
assert Poly(
x**2 + 2*I, x, extension=I).nroots() == [-1.0 + 1.0*I, 1.0 - 1.0*I]
assert Poly(0.2*x + 0.1).nroots() == [-0.5]
roots = nroots(x**5 + x + 1, n=5)
eps = Float("1e-5")
assert re(roots[0]).epsilon_eq(-0.75487, eps) is True
assert im(roots[0]) == 0.0
assert re(roots[1]) == -0.5
assert im(roots[1]).epsilon_eq(-0.86602, eps) is True
assert re(roots[2]) == -0.5
assert im(roots[2]).epsilon_eq(+0.86602, eps) is True
assert re(roots[3]).epsilon_eq(+0.87743, eps) is True
assert im(roots[3]).epsilon_eq(-0.74486, eps) is True
assert re(roots[4]).epsilon_eq(+0.87743, eps) is True
assert im(roots[4]).epsilon_eq(+0.74486, eps) is True
eps = Float("1e-6")
assert re(roots[0]).epsilon_eq(-0.75487, eps) is False
assert im(roots[0]) == 0.0
assert re(roots[1]) == -0.5
assert im(roots[1]).epsilon_eq(-0.86602, eps) is False
assert re(roots[2]) == -0.5
assert im(roots[2]).epsilon_eq(+0.86602, eps) is False
assert re(roots[3]).epsilon_eq(+0.87743, eps) is False
assert im(roots[3]).epsilon_eq(-0.74486, eps) is False
assert re(roots[4]).epsilon_eq(+0.87743, eps) is False
assert im(roots[4]).epsilon_eq(+0.74486, eps) is False
raises(DomainError, lambda: Poly(x + y, x).nroots())
raises(MultivariatePolynomialError, lambda: Poly(x + y).nroots())
assert nroots(x**2 - 1) == [-1.0, 1.0]
roots, error = nroots(x**2 - 1, error=True)
assert roots == [-1.0, 1.0] and error < 1e25
assert nroots(x + I) == [-1.0*I]
assert nroots(x + 2*I) == [-2.0*I]
raises(PolynomialError, lambda: nroots(0))
def test_ground_roots():
f = x**6 - 4*x**4 + 4*x**3 - x**2
assert Poly(f).ground_roots() == {S(1): 2, S(0): 2}
assert ground_roots(f) == {S(1): 2, S(0): 2}
def test_nth_power_roots_poly():
f = x**4 - x**2 + 1
f_2 = (x**2 - x + 1)**2
f_3 = (x**2 + 1)**2
f_4 = (x**2 + x + 1)**2
f_12 = (x - 1)**4
assert nth_power_roots_poly(f, 1) == f
raises(ValueError, lambda: nth_power_roots_poly(f, 0))
raises(ValueError, lambda: nth_power_roots_poly(f, x))
assert factor(nth_power_roots_poly(f, 2)) == f_2
assert factor(nth_power_roots_poly(f, 3)) == f_3
assert factor(nth_power_roots_poly(f, 4)) == f_4
assert factor(nth_power_roots_poly(f, 12)) == f_12
raises(MultivariatePolynomialError, lambda: nth_power_roots_poly(
x + y, 2, x, y))
def test_torational_factor_list():
p = expand(((x**2-1)*(x-2)).subs({x:x*(1 + sqrt(2))}))
assert _torational_factor_list(p, x) == (-2, [
(-x*(1 + sqrt(2))/2 + 1, 1),
(-x*(1 + sqrt(2)) - 1, 1),
(-x*(1 + sqrt(2)) + 1, 1)])
p = expand(((x**2-1)*(x-2)).subs({x:x*(1 + 2**Rational(1, 4))}))
assert _torational_factor_list(p, x) is None
def test_cancel():
assert cancel(0) == 0
assert cancel(7) == 7
assert cancel(x) == x
assert cancel(oo) == oo
assert cancel((2, 3)) == (1, 2, 3)
assert cancel((1, 0), x) == (1, 1, 0)
assert cancel((0, 1), x) == (1, 0, 1)
f, g, p, q = 4*x**2 - 4, 2*x - 2, 2*x + 2, 1
F, G, P, Q = [ Poly(u, x) for u in (f, g, p, q) ]
assert F.cancel(G) == (1, P, Q)
assert cancel((f, g)) == (1, p, q)
assert cancel((f, g), x) == (1, p, q)
assert cancel((f, g), (x,)) == (1, p, q)
assert cancel((F, G)) == (1, P, Q)
assert cancel((f, g), polys=True) == (1, P, Q)
assert cancel((F, G), polys=False) == (1, p, q)
f = (x**2 - 2)/(x + sqrt(2))
assert cancel(f) == f
assert cancel(f, greedy=False) == x - sqrt(2)
f = (x**2 - 2)/(x - sqrt(2))
assert cancel(f) == f
assert cancel(f, greedy=False) == x + sqrt(2)
assert cancel((x**2/4 - 1, x/2 - 1)) == (S(1)/2, x + 2, 1)
assert cancel((x**2 - y)/(x - y)) == 1/(x - y)*(x**2 - y)
assert cancel((x**2 - y**2)/(x - y), x) == x + y
assert cancel((x**2 - y**2)/(x - y), y) == x + y
assert cancel((x**2 - y**2)/(x - y)) == x + y
assert cancel((x**3 - 1)/(x**2 - 1)) == (x**2 + x + 1)/(x + 1)
assert cancel((x**3/2 - S(1)/2)/(x**2 - 1)) == (x**2 + x + 1)/(2*x + 2)
assert cancel((exp(2*x) + 2*exp(x) + 1)/(exp(x) + 1)) == exp(x) + 1
f = Poly(x**2 - a**2, x)
g = Poly(x - a, x)
F = Poly(x + a, x)
G = Poly(1, x)
assert cancel((f, g)) == (1, F, G)
f = x**3 + (sqrt(2) - 2)*x**2 - (2*sqrt(2) + 3)*x - 3*sqrt(2)
g = x**2 - 2
assert cancel((f, g), extension=True) == (1, x**2 - 2*x - 3, x - sqrt(2))
f = Poly(-2*x + 3, x)
g = Poly(-x**9 + x**8 + x**6 - x**5 + 2*x**2 - 3*x + 1, x)
assert cancel((f, g)) == (1, -f, -g)
f = Poly(y, y, domain='ZZ(x)')
g = Poly(1, y, domain='ZZ[x]')
assert f.cancel(
g) == (1, Poly(y, y, domain='ZZ(x)'), Poly(1, y, domain='ZZ(x)'))
assert f.cancel(g, include=True) == (
Poly(y, y, domain='ZZ(x)'), Poly(1, y, domain='ZZ(x)'))
f = Poly(5*x*y + x, y, domain='ZZ(x)')
g = Poly(2*x**2*y, y, domain='ZZ(x)')
assert f.cancel(g, include=True) == (
Poly(5*y + 1, y, domain='ZZ(x)'), Poly(2*x*y, y, domain='ZZ(x)'))
f = -(-2*x - 4*y + 0.005*(z - y)**2)/((z - y)*(-z + y + 2))
assert cancel(f).is_Mul == True
P = tanh(x - 3.0)
Q = tanh(x + 3.0)
f = ((-2*P**2 + 2)*(-P**2 + 1)*Q**2/2 + (-2*P**2 + 2)*(-2*Q**2 + 2)*P*Q - (-2*P**2 + 2)*P**2*Q**2 + (-2*Q**2 + 2)*(-Q**2 + 1)*P**2/2 - (-2*Q**2 + 2)*P**2*Q**2)/(2*sqrt(P**2*Q**2 + 0.0001)) \
+ (-(-2*P**2 + 2)*P*Q**2/2 - (-2*Q**2 + 2)*P**2*Q/2)*((-2*P**2 + 2)*P*Q**2/2 + (-2*Q**2 + 2)*P**2*Q/2)/(2*(P**2*Q**2 + 0.0001)**(S(3)/2))
assert cancel(f).is_Mul == True
# issue 3923
A = Symbol('A', commutative=False)
p1 = Piecewise((A*(x**2 - 1)/(x + 1), x > 1), ((x + 2)/(x**2 + 2*x), True))
p2 = Piecewise((A*(x - 1), x > 1), (1/x, True))
assert cancel(p1) == p2
assert cancel(2*p1) == 2*p2
assert cancel(1 + p1) == 1 + p2
assert cancel((x**2 - 1)/(x + 1)*p1) == (x - 1)*p2
assert cancel((x**2 - 1)/(x + 1) + p1) == (x - 1) + p2
p3 = Piecewise(((x**2 - 1)/(x + 1), x > 1), ((x + 2)/(x**2 + 2*x), True))
p4 = Piecewise(((x - 1), x > 1), (1/x, True))
assert cancel(p3) == p4
assert cancel(2*p3) == 2*p4
assert cancel(1 + p3) == 1 + p4
assert cancel((x**2 - 1)/(x + 1)*p3) == (x - 1)*p4
assert cancel((x**2 - 1)/(x + 1) + p3) == (x - 1) + p4
def test_reduced():
f = 2*x**4 + y**2 - x**2 + y**3
G = [x**3 - x, y**3 - y]
Q = [2*x, 1]
r = x**2 + y**2 + y
assert reduced(f, G) == (Q, r)
assert reduced(f, G, x, y) == (Q, r)
H = groebner(G)
assert H.reduce(f) == (Q, r)
Q = [Poly(2*x, x, y), Poly(1, x, y)]
r = Poly(x**2 + y**2 + y, x, y)
assert _strict_eq(reduced(f, G, polys=True), (Q, r))
assert _strict_eq(reduced(f, G, x, y, polys=True), (Q, r))
H = groebner(G, polys=True)
assert _strict_eq(H.reduce(f), (Q, r))
f = 2*x**3 + y**3 + 3*y
G = groebner([x**2 + y**2 - 1, x*y - 2])
Q = [x**2 - x*y**3/2 + x*y/2 + y**6/4 - y**4/2 + y**2/4, -y**5/4 + y**3/2 + 3*y/4]
r = 0
assert reduced(f, G) == (Q, r)
assert G.reduce(f) == (Q, r)
assert reduced(f, G, auto=False)[1] != 0
assert G.reduce(f, auto=False)[1] != 0
assert G.contains(f) is True
assert G.contains(f + 1) is False
assert reduced(1, [1], x) == ([1], 0)
raises(ComputationFailed, lambda: reduced(1, [1]))
def test_groebner():
assert groebner([], x, y, z) == []
assert groebner([x**2 + 1, y**4*x + x**3], x, y, order='lex') == [1 + x**2, -1 + y**4]
assert groebner([x**2 + 1, y**4*x + x**3, x*y*z**3], x, y, z, order='grevlex') == [-1 + y**4, z**3, 1 + x**2]
assert groebner([x**2 + 1, y**4*x + x**3], x, y, order='lex', polys=True) == \
[Poly(1 + x**2, x, y), Poly(-1 + y**4, x, y)]
assert groebner([x**2 + 1, y**4*x + x**3, x*y*z**3], x, y, z, order='grevlex', polys=True) == \
[Poly(-1 + y**4, x, y, z), Poly(z**3, x, y, z), Poly(1 + x**2, x, y, z)]
assert groebner([x**3 - 1, x**2 - 1]) == [x - 1]
assert groebner([Eq(x**3, 1), Eq(x**2, 1)]) == [x - 1]
F = [3*x**2 + y*z - 5*x - 1, 2*x + 3*x*y + y**2, x - 3*y + x*z - 2*z**2]
f = z**9 - x**2*y**3 - 3*x*y**2*z + 11*y*z**2 + x**2*z**2 - 5
G = groebner(F, x, y, z, modulus=7, symmetric=False)
assert G == [1 + x + y + 3*z + 2*z**2 + 2*z**3 + 6*z**4 + z**5,
1 + 3*y + y**2 + 6*z**2 + 3*z**3 + 3*z**4 + 3*z**5 + 4*z**6,
1 + 4*y + 4*z + y*z + 4*z**3 + z**4 + z**6,
6 + 6*z + z**2 + 4*z**3 + 3*z**4 + 6*z**5 + 3*z**6 + z**7]
Q, r = reduced(f, G, x, y, z, modulus=7, symmetric=False, polys=True)
assert sum([ q*g for q, g in zip(Q, G.polys)], r) == Poly(f, modulus=7)
F = [x*y - 2*y, 2*y**2 - x**2]
assert groebner(F, x, y, order='grevlex') == \
[y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y]
assert groebner(F, y, x, order='grevlex') == \
[x**3 - 2*x**2, -x**2 + 2*y**2, x*y - 2*y]
assert groebner(F, order='grevlex', field=True) == \
[y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y]
assert groebner([1], x) == [1]
assert groebner([x**2 + 2.0*y], x, y) == [1.0*x**2 + 2.0*y]
raises(ComputationFailed, lambda: groebner([1]))
assert groebner([x**2 - 1, x**3 + 1], method='buchberger') == [x + 1]
assert groebner([x**2 - 1, x**3 + 1], method='f5b') == [x + 1]
raises(ValueError, lambda: groebner([x, y], method='unknown'))
def test_fglm():
F = [a + b + c + d, a*b + a*d + b*c + b*d, a*b*c + a*b*d + a*c*d + b*c*d, a*b*c*d - 1]
G = groebner(F, a, b, c, d, order=grlex)
B = [
4*a + 3*d**9 - 4*d**5 - 3*d,
4*b + 4*c - 3*d**9 + 4*d**5 + 7*d,
4*c**2 + 3*d**10 - 4*d**6 - 3*d**2,
4*c*d**4 + 4*c - d**9 + 4*d**5 + 5*d,
d**12 - d**8 - d**4 + 1,
]
assert groebner(F, a, b, c, d, order=lex) == B
assert G.fglm(lex) == B
F = [9*x**8 + 36*x**7 - 32*x**6 - 252*x**5 - 78*x**4 + 468*x**3 + 288*x**2 - 108*x + 9,
-72*t*x**7 - 252*t*x**6 + 192*t*x**5 + 1260*t*x**4 + 312*t*x**3 - 404*t*x**2 - 576*t*x + \
108*t - 72*x**7 - 256*x**6 + 192*x**5 + 1280*x**4 + 312*x**3 - 576*x + 96]
G = groebner(F, t, x, order=grlex)
B = [
203577793572507451707*t + 627982239411707112*x**7 - 666924143779443762*x**6 - \
10874593056632447619*x**5 + 5119998792707079562*x**4 + 72917161949456066376*x**3 + \
20362663855832380362*x**2 - 142079311455258371571*x + 183756699868981873194,
9*x**8 + 36*x**7 - 32*x**6 - 252*x**5 - 78*x**4 + 468*x**3 + 288*x**2 - 108*x + 9,
]
assert groebner(F, t, x, order=lex) == B
assert G.fglm(lex) == B
F = [x**2 - x - 3*y + 1, -2*x + y**2 + y - 1]
G = groebner(F, x, y, order=lex)
B = [
x**2 - x - 3*y + 1,
y**2 - 2*x + y - 1,
]
assert groebner(F, x, y, order=grlex) == B
assert G.fglm(grlex) == B
def test_is_zero_dimensional():
assert is_zero_dimensional([x, y], x, y) is True
assert is_zero_dimensional([x**3 + y**2], x, y) is False
assert is_zero_dimensional([x, y, z], x, y, z) is True
assert is_zero_dimensional([x, y, z], x, y, z, t) is False
F = [x*y - z, y*z - x, x*y - y]
assert is_zero_dimensional(F, x, y, z) is True
F = [x**2 - 2*x*z + 5, x*y**2 + y*z**3, 3*y**2 - 8*z**2]
assert is_zero_dimensional(F, x, y, z) is True
def test_GroebnerBasis():
F = [x*y - 2*y, 2*y**2 - x**2]
G = groebner(F, x, y, order='grevlex')
H = [y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y]
P = [ Poly(h, x, y) for h in H ]
assert isinstance(G, GroebnerBasis) is True
assert len(G) == 3
assert G[0] == H[0] and not G[0].is_Poly
assert G[1] == H[1] and not G[1].is_Poly
assert G[2] == H[2] and not G[2].is_Poly
assert G[1:] == H[1:] and not any(g.is_Poly for g in G[1:])
assert G[:2] == H[:2] and not any(g.is_Poly for g in G[1:])
assert G.exprs == H
assert G.polys == P
assert G.gens == (x, y)
assert G.domain == ZZ
assert G.order == grevlex
assert G == H
assert G == tuple(H)
assert G == P
assert G == tuple(P)
assert G != []
G = groebner(F, x, y, order='grevlex', polys=True)
assert G[0] == P[0] and G[0].is_Poly
assert G[1] == P[1] and G[1].is_Poly
assert G[2] == P[2] and G[2].is_Poly
assert G[1:] == P[1:] and all(g.is_Poly for g in G[1:])
assert G[:2] == P[:2] and all(g.is_Poly for g in G[1:])
def test_poly():
assert poly(x) == Poly(x, x)
assert poly(y) == Poly(y, y)
assert poly(x + y) == Poly(x + y, x, y)
assert poly(x + sin(x)) == Poly(x + sin(x), x, sin(x))
assert poly(x + y, wrt=y) == Poly(x + y, y, x)
assert poly(x + sin(x), wrt=sin(x)) == Poly(x + sin(x), sin(x), x)
assert poly(x*y + 2*x*z**2 + 17) == Poly(x*y + 2*x*z**2 + 17, x, y, z)
assert poly(2*(y + z)**2 - 1) == Poly(2*y**2 + 4*y*z + 2*z**2 - 1, y, z)
assert poly(
x*(y + z)**2 - 1) == Poly(x*y**2 + 2*x*y*z + x*z**2 - 1, x, y, z)
assert poly(2*x*(
y + z)**2 - 1) == Poly(2*x*y**2 + 4*x*y*z + 2*x*z**2 - 1, x, y, z)
assert poly(2*(
y + z)**2 - x - 1) == Poly(2*y**2 + 4*y*z + 2*z**2 - x - 1, x, y, z)
assert poly(x*(
y + z)**2 - x - 1) == Poly(x*y**2 + 2*x*y*z + x*z**2 - x - 1, x, y, z)
assert poly(2*x*(y + z)**2 - x - 1) == Poly(2*x*y**2 + 4*x*y*z + 2*
x*z**2 - x - 1, x, y, z)
assert poly(x*y + (x + y)**2 + (x + z)**2) == \
Poly(2*x*z + 3*x*y + y**2 + z**2 + 2*x**2, x, y, z)
assert poly(x*y*(x + y)*(x + z)**2) == \
Poly(x**3*y**2 + x*y**2*z**2 + y*x**2*z**2 + 2*z*x**2*
y**2 + 2*y*z*x**3 + y*x**4, x, y, z)
assert poly(Poly(x + y + z, y, x, z)) == Poly(x + y + z, y, x, z)
assert poly((x + y)**2, x) == Poly(x**2 + 2*x*y + y**2, x, domain=ZZ[y])
assert poly((x + y)**2, y) == Poly(x**2 + 2*x*y + y**2, y, domain=ZZ[x])
assert poly(1, x) == Poly(1, x)
raises(GeneratorsNeeded, lambda: poly(1))
# issue 3085
assert poly(x + y, x, y) == Poly(x + y, x, y)
assert poly(x + y, y, x) == Poly(x + y, y, x)
def test_keep_coeff():
u = Mul(2, x + 1, evaluate=False)
assert _keep_coeff(S(1), x) == x
assert _keep_coeff(S(-1), x) == -x
assert _keep_coeff(S(1.0), x) == 1.0*x
assert _keep_coeff(S(-1.0), x) == -1.0*x
assert _keep_coeff(S(1), 2*x) == 2*x
assert _keep_coeff(S(2), x/2) == x
assert _keep_coeff(S(2), sin(x)) == 2*sin(x)
assert _keep_coeff(S(2), x + 1) == u
assert _keep_coeff(x, 1/x) == 1
assert _keep_coeff(x + 1, S(2)) == u
@XFAIL
def test_poly_matching_consistency():
# Test for this issue:
# http://code.google.com/p/sympy/issues/detail?id=2415
assert I * Poly(x, x) == Poly(I*x, x)
assert Poly(x, x) * I == Poly(I*x, x)
@XFAIL
def test_issue_2687():
assert expand(factor(expand(
(x - I*y)*(z - I*t)), extension=[I])) == -I*t*x - t*y + x*z - I*y*z
def test_noncommutative():
class foo(Expr):
is_commutative=False
e = x/(x + x*y)
c = 1/( 1 + y)
assert cancel(foo(e)) == foo(c)
assert cancel(e + foo(e)) == c + foo(c)
assert cancel(e*foo(c)) == c*foo(c)
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/sympy/polys/tests/test_polytools.py
|
Python
|
gpl-3.0
| 105,502
|
[
"Gaussian"
] |
78b6ecab67df417e15d1cb969999fa8ae751320ccacba3a576ac104dc53a5b9d
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# read data
#
reader = vtk.vtkGenericEnSightReader()
# Make sure all algorithms use the composite data pipeline
cdp = vtk.vtkCompositeDataPipeline()
reader.SetDefaultExecutivePrototype(cdp)
reader.SetCaseFileName("" + str(VTK_DATA_ROOT) + "/Data/EnSight/office_ascii.case")
reader.Update()
outline = vtk.vtkStructuredGridOutlineFilter()
# outline SetInputConnection [reader GetOutputPort]
outline.SetInputData(reader.GetOutput().GetBlock(0))
mapOutline = vtk.vtkPolyDataMapper()
mapOutline.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(mapOutline)
outlineActor.GetProperty().SetColor(0,0,0)
# Create source for streamtubes
streamer = vtk.vtkStreamPoints()
# streamer SetInputConnection [reader GetOutputPort]
streamer.SetInputData(reader.GetOutput().GetBlock(0))
streamer.SetStartPosition(0.1,2.1,0.5)
streamer.SetMaximumPropagationTime(500)
streamer.SetTimeIncrement(0.5)
streamer.SetIntegrationDirectionToForward()
cone = vtk.vtkConeSource()
cone.SetResolution(8)
cones = vtk.vtkGlyph3D()
cones.SetInputConnection(streamer.GetOutputPort())
cones.SetSourceConnection(cone.GetOutputPort())
cones.SetScaleFactor(0.9)
cones.SetScaleModeToScaleByVector()
mapCones = vtk.vtkPolyDataMapper()
mapCones.SetInputConnection(cones.GetOutputPort())
# eval mapCones SetScalarRange [[reader GetOutput] GetScalarRange]
mapCones.SetScalarRange(reader.GetOutput().GetBlock(0).GetScalarRange())
conesActor = vtk.vtkActor()
conesActor.SetMapper(mapCones)
ren1.AddActor(outlineActor)
ren1.AddActor(conesActor)
ren1.SetBackground(0.4,0.4,0.5)
renWin.SetSize(300,300)
iren.Initialize()
# interact with data
reader.SetDefaultExecutivePrototype(None)
# --- end of script --
|
berendkleinhaneveld/VTK
|
IO/EnSight/Testing/Python/EnSightOfficeASCII.py
|
Python
|
bsd-3-clause
| 1,997
|
[
"VTK"
] |
5636553f2a5c872c611d40eb5aaf041f23e1cefabde4329bb3a6a547650f1a6b
|
"""
This file contains miscellaneous.
"""
import os
import random
from propargs.propargs import PropArgs
from registry.registry import set_propargs
def gaussian(mean, sigma, trim_at_zero=True):
sample = random.gauss(mean, sigma)
if trim_at_zero:
if sample < 0:
sample *= -1
return sample
def get_func_name(f):
# Until Agent.restore and Env.to_json can restore functions from function
# names, strings will be returned as-is.
if isinstance(f, str):
return f
elif f is not None:
return f.__name__
else:
return ""
def get_prop_path(model_name, model_dir="models"):
ihome = os.getenv("INDRA_HOME", " ")
return ihome + "/" + model_dir + "/props/" + model_name + ".props.json"
def init_props(model_nm, props=None, model_dir="models",
skip_user_questions=False):
props_file = get_prop_path(model_nm, model_dir=model_dir)
if props is None:
pa = PropArgs.create_props(model_nm,
ds_file=props_file,
skip_user_questions=skip_user_questions)
else:
pa = PropArgs.create_props(model_nm,
prop_dict=props,
skip_user_questions=skip_user_questions)
# we keep props available in registry:
set_propargs(pa)
return pa
def get_props(model_nm, props=None, model_dir="models",
skip_user_questions=False):
"""
This name for the function is deprecated: use init_props()
"""
return init_props(model_nm, props=props, model_dir=model_dir,
skip_user_questions=skip_user_questions)
|
gcallah/Indra
|
indra/utils.py
|
Python
|
gpl-3.0
| 1,694
|
[
"Gaussian"
] |
86b4617eb199a5cc1bd8e876dbd7db809af2716eb7b94eb37a2d9b6992321b60
|
"""
Tags Controller: handles tagging/untagging of entities
and provides autocomplete support.
"""
from galaxy import web
from galaxy.web.base.controller import BaseUIController, UsesTagsMixin
from sqlalchemy.sql import select
from sqlalchemy.sql.expression import and_, func
import logging
log = logging.getLogger( __name__ )
class TagsController ( BaseUIController, UsesTagsMixin ):
@web.expose
@web.require_login( "edit item tags" )
def get_tagging_elt_async( self, trans, item_id, item_class, elt_context="" ):
"""
Returns HTML for editing an item's tags.
"""
item = self._get_item( trans, item_class, trans.security.decode_id( item_id ) )
if not item:
return trans.show_error_message( "No item of class %s with id %s " % ( item_class, item_id ) )
return trans.fill_template( "/tagging_common.mako",
tag_type="individual",
user=trans.user,
tagged_item=item,
elt_context=elt_context,
in_form=False,
input_size="22",
tag_click_fn="default_tag_click_fn",
use_toggle_link=False )
@web.expose
@web.require_login( "add tag to an item" )
def add_tag_async( self, trans, item_id=None, item_class=None, new_tag=None, context=None ):
"""
Add tag to an item.
"""
# Apply tag.
item = self._get_item( trans, item_class, trans.security.decode_id( item_id ) )
user = trans.user
self.get_tag_handler( trans ).apply_item_tags( trans, user, item, new_tag.encode( 'utf-8' ) )
trans.sa_session.flush()
# Log.
params = dict( item_id=item.id, item_class=item_class, tag=new_tag )
trans.log_action( user, unicode( "tag" ), context, params )
@web.expose
@web.require_login( "remove tag from an item" )
def remove_tag_async( self, trans, item_id=None, item_class=None, tag_name=None, context=None ):
"""
Remove tag from an item.
"""
# Remove tag.
item = self._get_item( trans, item_class, trans.security.decode_id( item_id ) )
user = trans.user
self.get_tag_handler( trans ).remove_item_tag( trans, user, item, tag_name.encode( 'utf-8' ) )
trans.sa_session.flush()
# Log.
params = dict( item_id=item.id, item_class=item_class, tag=tag_name )
trans.log_action( user, unicode( "untag" ), context, params )
# Retag an item. All previous tags are deleted and new tags are applied.
#@web.expose
@web.require_login( "Apply a new set of tags to an item; previous tags are deleted." )
def retag_async( self, trans, item_id=None, item_class=None, new_tags=None ):
"""
Apply a new set of tags to an item; previous tags are deleted.
"""
# Apply tags.
item = self._get_item( trans, item_class, trans.security.decode_id( item_id ) )
user = trans.user
self.get_tag_handler( trans ).delete_item_tags( trans, item )
self.get_tag_handler( trans ).apply_item_tags( trans, user, item, new_tags.encode( 'utf-8' ) )
trans.sa_session.flush()
@web.expose
@web.require_login( "get autocomplete data for an item's tags" )
def tag_autocomplete_data( self, trans, q=None, limit=None, timestamp=None, item_id=None, item_class=None ):
"""
Get autocomplete data for an item's tags.
"""
# Get item, do security check, and get autocomplete data.
item = None
if item_id is not None:
item = self._get_item( trans, item_class, trans.security.decode_id( item_id ) )
user = trans.user
item_class = self.get_class( item_class )
q = '' if q is None else q
q = q.encode( 'utf-8' )
if q.find( ":" ) == -1:
return self._get_tag_autocomplete_names( trans, q, limit, timestamp, user, item, item_class )
else:
return self._get_tag_autocomplete_values( trans, q, limit, timestamp, user, item, item_class )
def _get_tag_autocomplete_names( self, trans, q, limit, timestamp, user=None, item=None, item_class=None ):
"""
Returns autocomplete data for tag names ordered from most frequently used to
least frequently used.
"""
# Get user's item tags and usage counts.
# Get item's class object and item-tag association class.
if item is None and item_class is None:
raise RuntimeError( "Both item and item_class cannot be None" )
elif item is not None:
item_class = item.__class__
item_tag_assoc_class = self.get_tag_handler( trans ).get_tag_assoc_class( item_class )
# Build select statement.
cols_to_select = [ item_tag_assoc_class.table.c.tag_id, func.count( '*' ) ]
from_obj = item_tag_assoc_class.table.join( item_class.table ).join( trans.app.model.Tag.table )
where_clause = and_( trans.app.model.Tag.table.c.name.like( q + "%" ),
item_tag_assoc_class.table.c.user_id == user.id )
order_by = [ func.count( "*" ).desc() ]
group_by = item_tag_assoc_class.table.c.tag_id
# Do query and get result set.
query = select( columns=cols_to_select,
from_obj=from_obj,
whereclause=where_clause,
group_by=group_by,
order_by=order_by,
limit=limit )
result_set = trans.sa_session.execute( query )
# Create and return autocomplete data.
ac_data = "#Header|Your Tags\n"
for row in result_set:
tag = self.get_tag_handler( trans ).get_tag_by_id( trans, row[0] )
# Exclude tags that are already applied to the item.
if ( item is not None ) and ( self.get_tag_handler( trans ).item_has_tag( trans, trans.user, item, tag ) ):
continue
# Add tag to autocomplete data. Use the most frequent name that user
# has employed for the tag.
tag_names = self._get_usernames_for_tag( trans, trans.user, tag, item_class, item_tag_assoc_class )
ac_data += tag_names[0] + "|" + tag_names[0] + "\n"
return ac_data
def _get_tag_autocomplete_values( self, trans, q, limit, timestamp, user=None, item=None, item_class=None ):
"""
Returns autocomplete data for tag values ordered from most frequently used to
least frequently used.
"""
tag_name_and_value = q.split( ":" )
tag_name = tag_name_and_value[0]
tag_value = tag_name_and_value[1]
tag = self.get_tag_handler( trans ).get_tag_by_name( trans, tag_name )
# Don't autocomplete if tag doesn't exist.
if tag is None:
return ""
# Get item's class object and item-tag association class.
if item is None and item_class is None:
raise RuntimeError( "Both item and item_class cannot be None" )
elif item is not None:
item_class = item.__class__
item_tag_assoc_class = self.get_tag_handler( trans ).get_tag_assoc_class( item_class )
# Build select statement.
cols_to_select = [ item_tag_assoc_class.table.c.value, func.count( '*' ) ]
from_obj = item_tag_assoc_class.table.join( item_class.table ).join( trans.app.model.Tag.table )
where_clause = and_( item_tag_assoc_class.table.c.user_id == user.id,
trans.app.model.Tag.table.c.id == tag.id,
item_tag_assoc_class.table.c.value.like( tag_value + "%" ) )
order_by = [ func.count("*").desc(), item_tag_assoc_class.table.c.value ]
group_by = item_tag_assoc_class.table.c.value
# Do query and get result set.
query = select( columns=cols_to_select,
from_obj=from_obj,
whereclause=where_clause,
group_by=group_by,
order_by=order_by,
limit=limit )
result_set = trans.sa_session.execute( query )
# Create and return autocomplete data.
ac_data = "#Header|Your Values for '%s'\n" % ( tag_name )
tag_uname = self._get_usernames_for_tag( trans, trans.user, tag, item_class, item_tag_assoc_class )[0]
for row in result_set:
ac_data += tag_uname + ":" + row[0] + "|" + row[0] + "\n"
return ac_data
def _get_usernames_for_tag( self, trans, user, tag, item_class, item_tag_assoc_class ):
"""
Returns an ordered list of the user names for a tag; list is ordered from
most popular to least popular name.
"""
# Build select stmt.
cols_to_select = [ item_tag_assoc_class.table.c.user_tname, func.count( '*' ) ]
where_clause = and_( item_tag_assoc_class.table.c.user_id == user.id,
item_tag_assoc_class.table.c.tag_id == tag.id )
group_by = item_tag_assoc_class.table.c.user_tname
order_by = [ func.count( "*" ).desc() ]
# Do query and get result set.
query = select( columns=cols_to_select,
whereclause=where_clause,
group_by=group_by,
order_by=order_by )
result_set = trans.sa_session.execute( query )
user_tag_names = list()
for row in result_set:
user_tag_names.append( row[0] )
return user_tag_names
def _get_item( self, trans, item_class_name, id ):
"""
Get an item based on type and id.
"""
item_class = self.get_tag_handler( trans ).item_tag_assoc_info[item_class_name].item_class
item = trans.sa_session.query( item_class ).filter( item_class.id == id)[0]
return item
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/webapps/galaxy/controllers/tag.py
|
Python
|
gpl-3.0
| 10,048
|
[
"Galaxy"
] |
c62a47469dc80f2910cbf188a8dfe96620774a82f0ad2440c4db3071987047eb
|
"""
prototype for a pymel ipython configuration
Current Features:
tab completion of depend nodes, dag nodes, and attributes
automatic import of pymel
Future Features:
tab completion of PyNode attributes
color coding of tab complete options:
- to differentiate between methods and attributes
- dag nodes vs depend nodes
- shortNames vs longNames
magic commands
bookmarking of maya's recent project and files
To Use:
place in your PYTHONPATH
add the following line to the 'main' function of $HOME/.ipython/ipy_user_conf.py::
import ipymel
Author: Chad Dombrova
Version: 0.1
"""
from optparse import OptionParser
try:
import maya
except ImportError, e:
print( "ipymel can only be setup if the maya package can be imported" )
raise e
import IPython.ipapi
ip = IPython.ipapi.get()
from IPython.ColorANSI import TermColors, ColorScheme, ColorSchemeTable
from IPython.genutils import page
try:
import readline
except ImportError:
import pyreadline as readline
delim = readline.get_completer_delims()
delim = delim.replace('|', '') # remove pipes
delim = delim.replace(':', '') # remove colon
#delim = delim.replace("'", '') # remove quotes
#delim = delim.replace('"', '') # remove quotes
readline.set_completer_delims(delim)
import inspect, re, glob,os,shlex,sys
from pymel import core
import maya.cmds as cmds
import IPython.Extensions.ipy_completers
_scheme_default = 'Linux'
Colors = TermColors # just a shorthand
# Build a few color schemes
NoColor = ColorScheme(
'NoColor',{
'instance' : Colors.NoColor,
'collapsed' : Colors.NoColor,
'tree' : Colors.NoColor,
'transform' : Colors.NoColor,
'shape' : Colors.NoColor,
'nonunique' : Colors.NoColor,
'nonunique_transform' : Colors.NoColor,
'normal' : Colors.NoColor # color off (usu. Colors.Normal)
} )
LinuxColors = ColorScheme(
'Linux',{
'instance' : Colors.LightCyan,
'collapsed' : Colors.Yellow,
'tree' : Colors.Green,
'transform' : Colors.White,
'shape' : Colors.LightGray,
'nonunique' : Colors.Red,
'nonunique_transform' : Colors.LightRed,
'normal' : Colors.Normal # color off (usu. Colors.Normal)
} )
LightBGColors = ColorScheme(
'LightBG',{
'instance' : Colors.Cyan,
'collapsed' : Colors.LightGreen,
'tree' : Colors.Blue,
'transform' : Colors.DarkGray,
'shape' : Colors.Black,
'nonunique' : Colors.Red,
'nonunique_transform' : Colors.LightRed,
'normal' : Colors.Normal # color off (usu. Colors.Normal)
} )
# Build table of color schemes (needed by the dag_parser)
color_table = ColorSchemeTable([NoColor,LinuxColors,LightBGColors],
_scheme_default)
def finalPipe(obj):
"""
DAG nodes with children should end in a pipe (|), so that each successive pressing
of TAB will take you further down the DAG hierarchy. this is analagous to TAB
completion of directories, which always places a final slash (/) after a directory.
"""
if cmds.listRelatives( obj ):
return obj + "|"
return obj
def splitDag(obj):
buf = obj.split('|')
tail = buf[-1]
path = '|'.join( buf[:-1] )
return path, tail
def expand( obj ):
"""
allows for completion of objects that reside within a namespace. for example,
``tra*`` will match ``trak:camera`` and ``tram``
for now, we will hardwire the search to a depth of three recursive namespaces.
TODO:
add some code to determine how deep we should go
"""
return (obj + '*', obj + '*:*', obj + '*:*:*')
def complete_node_with_no_path( node ):
tmpres = cmds.ls( expand(node) )
#print "node_with_no_path", tmpres, node, expand(node)
res = []
for x in tmpres:
x = finalPipe(x.split('|')[-1])
#x = finalPipe(x)
if x not in res:
res.append( x )
#print res
return res
def complete_node_with_attr( node, attr ):
#print "noe_with_attr", node, attr
long_attrs = cmds.listAttr( node )
short_attrs = cmds.listAttr( node , shortNames=1)
# if node is a plug ( 'persp.t' ), the first result will be the passed plug
if '.' in node:
attrs = long_attrs[1:] + short_attrs[1:]
else:
attrs = long_attrs + short_attrs
return [ u'%s.%s' % ( node, a) for a in attrs if a.startswith(attr) ]
def pymel_name_completer(self, event):
def get_children(obj):
path, partialObj = splitDag(obj)
#print "getting children", repr(path), repr(partialObj)
try:
fullpath = cmds.ls( path, l=1 )[0]
if not fullpath: return []
children = cmds.listRelatives( fullpath , f=1, c=1)
if not children: return []
except:
return []
matchStr = fullpath + '|' + partialObj
#print "children", children
#print matchStr, fullpath, path
matches = [ x.replace( fullpath, path, 1) for x in children if x.startswith( matchStr ) ]
#print matches
return matches
#print "\nnode", repr(event.symbol), repr(event.line)
#print "\nbegin"
line = event.symbol
matches = None
#--------------
# Attributes
#--------------
m = re.match( r"""([a-zA-Z_0-9|:.]+)\.(\w*)$""", line)
if m:
node, attr = m.groups()
if node == 'SCENE':
res = cmds.ls( attr + '*' )
if res:
matches = ['SCENE.' + x for x in res if '|' not in x ]
elif node.startswith('SCENE.'):
node = node.replace('SCENE.', '')
matches = ['SCENE.' + x for x in complete_node_with_attr(node, attr) if '|' not in x ]
else:
matches = complete_node_with_attr(node, attr)
#--------------
# Nodes
#--------------
else:
# we don't yet have a full node
if '|' not in line or (line.startswith('|') and line.count('|') == 1):
#print "partial node"
kwargs = {}
if line.startswith('|'):
kwargs['l'] = True
matches = cmds.ls( expand(line), **kwargs )
# we have a full node, get it's children
else:
matches = get_children(line)
if not matches:
raise IPython.ipapi.TryNext
# if we have only one match, get the children as well
if len(matches)==1:
res = get_children(matches[0] + '|')
matches += res
return matches
def pymel_python_completer(self,event):
"""Match attributes or global python names"""
#print "python_matches"
text = event.symbol
#print repr(text)
# Another option, seems to work great. Catches things like ''.<tab>
m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
if not m:
raise IPython.ipapi.TryNext
expr, attr = m.group(1, 3)
#print type(self.Completer), dir(self.Completer)
#print self.Completer.namespace
#print self.Completer.global_namespace
try:
#print "first"
obj = eval(expr, self.Completer.namespace)
except:
try:
#print "second"
obj = eval(expr, self.Completer.global_namespace)
except:
raise IPython.ipapi.TryNext
#print "complete"
if isinstance(obj, (core.nt.DependNode, core.Attribute) ):
#print "isinstance"
node = unicode(obj)
long_attrs = cmds.listAttr( node )
short_attrs = cmds.listAttr( node , shortNames=1)
matches = []
matches = self.Completer.python_matches(text)
#print "here"
# if node is a plug ( 'persp.t' ), the first result will be the passed plug
if '.' in node:
attrs = long_attrs[1:] + short_attrs[1:]
else:
attrs = long_attrs + short_attrs
#print "returning"
matches += [ expr + '.' + at for at in attrs ]
#import colorize
#matches = [ colorize.colorize(x,'magenta') for x in matches ]
return matches
raise IPython.ipapi.TryNext
def buildRecentFileMenu():
if "RecentFilesList" not in core.optionVar:
return
# get the list
RecentFilesList = core.optionVar["RecentFilesList"]
nNumItems = len(RecentFilesList)
RecentFilesMaxSize = core.optionVar["RecentFilesMaxSize"]
# # check if there are too many items in the list
# if (RecentFilesMaxSize < nNumItems):
#
# #if so, truncate the list
# nNumItemsToBeRemoved = nNumItems - RecentFilesMaxSize
#
# #Begin removing items from the head of the array (least recent file in the list)
# for ($i = 0; $i < $nNumItemsToBeRemoved; $i++):
#
# core.optionVar -removeFromArray "RecentFilesList" 0;
#
# RecentFilesList = core.optionVar["RecentFilesList"]
# nNumItems = len($RecentFilesList);
# The RecentFilesTypeList optionVar may not exist since it was
# added after the RecentFilesList optionVar. If it doesn't exist,
# we create it and initialize it with a guess at the file type
if nNumItems > 0 :
if "RecentFilesTypeList" not in core.optionVar:
core.mel.initRecentFilesTypeList( RecentFilesList )
RecentFilesTypeList = core.optionVar["RecentFilesTypeList"]
#toNativePath
# first, check if we are the same.
def open_completer(self, event):
relpath = event.symbol
#print event # dbg
if '-b' in event.line:
# return only bookmark completions
bkms = self.db.get('bookmarks',{})
return bkms.keys()
if event.symbol == '-':
print "completer"
width_dh = str(len(str(len(ip.user_ns['_sh']) + 1)))
print width_dh
# jump in directory history by number
fmt = '-%0' + width_dh +'d [%s]'
ents = [ fmt % (i,s) for i,s in enumerate(ip.user_ns['_sh'])]
if len(ents) > 1:
return ents
return []
raise IPython.ipapi.TryNext
class TreePager(object):
def __init__(self, colors, options):
self.colors = colors
self.options = options
#print options.depth
def do_level(self, obj, depth, isLast ):
if isLast[-1]:
sep = '`-- '
else:
sep = '|-- '
#sep = '|__ '
depth += 1
branch = ''
for x in isLast[:-1]:
if x:
branch += ' '
else:
branch += '| '
branch = self.colors['tree'] + branch + sep + self.colors['normal']
children = self.getChildren(obj)
name = self.getName(obj)
num = len(children)-1
if children:
if self.options.maxdepth and depth >= self.options.maxdepth:
state = '+'
else:
state = '-'
pre = self.colors['collapsed'] + state + ' '
else:
pre = ' '
yield pre + branch + name + self.colors['normal'] + '\n'
#yield Colors.Yellow + branch + sep + Colors.Normal+ name + '\n'
if not self.options.maxdepth or depth < self.options.maxdepth:
for i, x in enumerate(children):
for line in self.do_level(x, depth, isLast+[i==num]):
yield line
def make_tree(self, roots):
num = len(roots)-1
tree = ''
for i, x in enumerate(roots):
for line in self.do_level(x, 0, [i==num]):
tree += line
return tree
class DagTree(TreePager):
def getChildren(self, obj):
if self.options.shapes:
return obj.getChildren()
else:
return obj.getChildren(type='transform')
def getName(self, obj):
name = obj.nodeName()
if obj.isInstanced():
if isinstance(obj, core.nt.Transform):
# keep transforms bolded
color = self.colors['nonunique_transform']
else:
color = self.colors['nonunique']
id = obj.instanceNumber()
if id != 0:
source = ' -> %s' % obj.getOtherInstances()[0]
else:
source = ''
name = color + name + self.colors['instance'] + ' [' + str(id) + ']' + source
elif not obj.isUniquelyNamed():
if isinstance(obj, core.nt.Transform):
# keep transforms bolded
color = self.colors['nonunique_transform']
else:
color = self.colors['nonunique']
name = color + name
elif isinstance(obj, core.nt.Transform):
# bold
name = self.colors['transform'] + name
else:
name = self.colors['shape'] + name
return name
dag_parser = OptionParser()
dag_parser.add_option("-d", type="int", dest="maxdepth")
dag_parser.add_option("-t", action="store_false", dest="shapes", default=True)
dag_parser.add_option("-s", action="store_true", dest="shapes" )
def magic_dag(self, parameter_s=''):
"""
"""
options, args = dag_parser.parse_args(parameter_s.split())
colors = color_table[self.rc.colors].colors
dagtree = DagTree(colors, options)
if args:
roots = [core.PyNode(args[0])]
else:
roots = core.ls(assemblies=1)
page(dagtree.make_tree(roots))
class DGHistoryTree(TreePager):
def getChildren(self, obj):
source, dest = obj
return source.node().listConnections(plugs=True, connections=True, source=True, destination=False, sourceFirst=True)
def getName(self, obj):
source, dest = obj
name = "%s -> %s" % (source, dest)
return name
def make_tree(self, root):
roots = core.listConnections(root, plugs=True, connections=True, source=True, destination=False, sourceFirst=True)
return TreePager.make_tree(self,roots)
dg_parser = OptionParser()
dg_parser.add_option("-d", type="int", dest="maxdepth")
dg_parser.add_option("-t", action="store_false", dest="shapes", default=True)
dg_parser.add_option("-s", action="store_true", dest="shapes" )
def magic_dghist(self, parameter_s=''):
"""
"""
options, args = dg_parser.parse_args(parameter_s.split())
colors = color_table[self.rc.colors].colors
dgtree = DGHistoryTree(colors, options)
roots = [core.PyNode(args[0])]
page(dgtree.make_tree(roots))
def magic_open(self, parameter_s=''):
"""Change the current working directory.
This command automatically maintains an internal list of directories
you visit during your IPython session, in the variable _sh. The
command %dhist shows this history nicely formatted. You can also
do 'cd -<tab>' to see directory history conveniently.
Usage:
openFile 'dir': changes to directory 'dir'.
openFile -: changes to the last visited directory.
openFile -<n>: changes to the n-th directory in the directory history.
openFile --foo: change to directory that matches 'foo' in history
openFile -b <bookmark_name>: jump to a bookmark set by %bookmark
(note: cd <bookmark_name> is enough if there is no
directory <bookmark_name>, but a bookmark with the name exists.)
'cd -b <tab>' allows you to tab-complete bookmark names.
Options:
-q: quiet. Do not print the working directory after the cd command is
executed. By default IPython's cd command does print this directory,
since the default prompts do not display path information.
Note that !cd doesn't work for this purpose because the shell where
!command runs is immediately discarded after executing 'command'."""
parameter_s = parameter_s.strip()
#bkms = self.shell.persist.get("bookmarks",{})
oldcwd = os.getcwd()
numcd = re.match(r'(-)(\d+)$',parameter_s)
# jump in directory history by number
if numcd:
nn = int(numcd.group(2))
try:
ps = ip.ev('_sh[%d]' % nn )
except IndexError:
print 'The requested directory does not exist in history.'
return
else:
opts = {}
# elif parameter_s.startswith('--'):
# ps = None
# fallback = None
# pat = parameter_s[2:]
# dh = self.shell.user_ns['_sh']
# # first search only by basename (last component)
# for ent in reversed(dh):
# if pat in os.path.basename(ent) and os.path.isdir(ent):
# ps = ent
# break
#
# if fallback is None and pat in ent and os.path.isdir(ent):
# fallback = ent
#
# # if we have no last part match, pick the first full path match
# if ps is None:
# ps = fallback
#
# if ps is None:
# print "No matching entry in directory history"
# return
# else:
# opts = {}
else:
#turn all non-space-escaping backslashes to slashes,
# for c:\windows\directory\names\
parameter_s = re.sub(r'\\(?! )','/', parameter_s)
opts,ps = self.parse_options(parameter_s,'qb',mode='string')
# jump to previous
if ps == '-':
try:
ps = ip.ev('_sh[-2]' % nn )
except IndexError:
raise UsageError('%cd -: No previous directory to change to.')
# # jump to bookmark if needed
# else:
# if not os.path.exists(ps) or opts.has_key('b'):
# bkms = self.db.get('bookmarks', {})
#
# if bkms.has_key(ps):
# target = bkms[ps]
# print '(bookmark:%s) -> %s' % (ps,target)
# ps = target
# else:
# if opts.has_key('b'):
# raise UsageError("Bookmark '%s' not found. "
# "Use '%%bookmark -l' to see your bookmarks." % ps)
# at this point ps should point to the target dir
if ps:
ip.ex( 'openFile("%s", f=1)' % ps )
# try:
# os.chdir(os.path.expanduser(ps))
# if self.shell.rc.term_title:
# #print 'set term title:',self.shell.rc.term_title # dbg
# platutils.set_term_title('IPy ' + abbrev_cwd())
# except OSError:
# print sys.exc_info()[1]
# else:
# cwd = os.getcwd()
# dhist = self.shell.user_ns['_sh']
# if oldcwd != cwd:
# dhist.append(cwd)
# self.db['dhist'] = compress_dhist(dhist)[-100:]
# else:
# os.chdir(self.shell.home_dir)
# if self.shell.rc.term_title:
# platutils.set_term_title("IPy ~")
# cwd = os.getcwd()
# dhist = self.shell.user_ns['_sh']
#
# if oldcwd != cwd:
# dhist.append(cwd)
# self.db['dhist'] = compress_dhist(dhist)[-100:]
# if not 'q' in opts and self.shell.user_ns['_sh']:
# print self.shell.user_ns['_sh'][-1]
def setup():
ip = IPython.ipapi.get()
ip.set_hook('complete_command', pymel_python_completer , re_key = ".*" )
ip.set_hook('complete_command', pymel_name_completer , re_key = "(.+(\s+|\())|(SCENE\.)" )
ip.set_hook('complete_command', open_completer , str_key = "openf" )
ip.ex("from pymel.core import *")
# if you don't want pymel imported into the main namespace, you can replace the above with something like:
#ip.ex("import pymel as pm")
ip.expose_magic('openf', magic_open)
ip.expose_magic('dag', magic_dag)
ip.expose_magic('dghist', magic_dghist)
# add projects
ip.ex("""
import os.path
for _mayaproj in optionVar.get('RecentProjectsList', []):
_mayaproj = os.path.join( _mayaproj, 'scenes' )
if _mayaproj not in _dh:
_dh.append(_mayaproj)""")
# add files
ip.ex("""
import os.path
_sh=[]
for _mayaproj in optionVar.get('RecentFilesList', []):
if _mayaproj not in _sh:
_sh.append(_mayaproj)""")
def main():
import IPython.Shell
s = IPython.Shell.start()
setup()
s.mainloop()
if __name__ == '__main__':
main()
|
CountZer0/PipelineConstructionSet
|
python/maya/site-packages/pymel-1.0.3/pymel/tools/ipymel.py
|
Python
|
bsd-3-clause
| 20,452
|
[
"VisIt"
] |
8a5d707e8d21e852a88d3d802612034733cb7a45ef991efb0c0ac6518aed7da5
|
########################################################################
# File: MetaQuery.py
# Author: A.T.
# Date: 24.02.2015
# $HeadID$
########################################################################
""" Utilities for managing metadata based queries
"""
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR
import DIRAC.Core.Utilities.Time as Time
from types import ListType, DictType, StringTypes, IntType, LongType, FloatType
import json
FILE_STANDARD_METAKEYS = { 'SE': 'VARCHAR',
'CreationDate': 'DATETIME',
'ModificationDate': 'DATETIME',
'LastAccessDate': 'DATETIME',
'User': 'VARCHAR',
'Group': 'VARCHAR',
'Path': 'VARCHAR',
'Name': 'VARCHAR',
'FileName': 'VARCHAR',
'CheckSum': 'VARCHAR',
'GUID': 'VARCHAR',
'UID': 'INTEGER',
'GID': 'INTEGER',
'Size': 'INTEGER',
'Status': 'VARCHAR' }
FILES_TABLE_METAKEYS = { 'Name': 'FileName',
'FileName': 'FileName',
'Size': 'Size',
'User': 'UID',
'Group': 'GID',
'UID': 'UID',
'GID': 'GID',
'Status': 'Status' }
FILEINFO_TABLE_METAKEYS = { 'GUID': 'GUID',
'CheckSum': 'CheckSum',
'CreationDate': 'CreationDate',
'ModificationDate': 'ModificationDate',
'LastAccessDate': 'LastAccessDate' }
class MetaQuery( object ):
def __init__( self, queryDict = None, typeDict = None ):
self.__metaQueryDict = {}
if queryDict is not None:
self.__metaQueryDict = queryDict
self.__metaTypeDict = {}
if typeDict is not None:
self.__metaTypeDict = typeDict
def setMetaQuery( self, queryList, metaTypeDict = None ):
""" Create the metadata query out of the command line arguments
"""
if metaTypeDict is not None:
self.__metaTypeDict = metaTypeDict
metaDict = {}
contMode = False
value = ''
for arg in queryList:
if not contMode:
operation = ''
for op in ['>=','<=','>','<','!=','=']:
if op in arg:
operation = op
break
if not operation:
return S_ERROR( 'Illegal query element %s' % arg )
name,value = arg.split(operation)
if not name in self.__metaTypeDict:
return S_ERROR( "Metadata field %s not defined" % name )
mtype = self.__metaTypeDict[name]
else:
value += ' ' + arg
value = value.replace(contMode,'')
contMode = False
if value[0] in ['"', "'"] and value[-1] not in ['"', "'"]:
contMode = value[0]
continue
if ',' in value:
valueList = [ x.replace("'","").replace('"','') for x in value.split(',') ]
mvalue = valueList
if mtype[0:3].lower() == 'int':
mvalue = [ int(x) for x in valueList if not x in ['Missing','Any'] ]
mvalue += [ x for x in valueList if x in ['Missing','Any'] ]
if mtype[0:5].lower() == 'float':
mvalue = [ float(x) for x in valueList if not x in ['Missing','Any'] ]
mvalue += [ x for x in valueList if x in ['Missing','Any'] ]
if operation == "=":
operation = 'in'
if operation == "!=":
operation = 'nin'
mvalue = {operation:mvalue}
else:
mvalue = value.replace("'","").replace('"','')
if not value in ['Missing','Any']:
if mtype[0:3].lower() == 'int':
mvalue = int(value)
if mtype[0:5].lower() == 'float':
mvalue = float(value)
if operation != '=':
mvalue = {operation:mvalue}
if name in metaDict:
if type(metaDict[name]) == DictType:
if type(mvalue) == DictType:
op,value = mvalue.items()[0]
if op in metaDict[name]:
if type(metaDict[name][op]) == ListType:
if type(value) == ListType:
metaDict[name][op] = list( set( metaDict[name][op] + value) )
else:
metaDict[name][op] = list( set( metaDict[name][op].append( value ) ) )
else:
if type(value) == ListType:
metaDict[name][op] = list( set( [metaDict[name][op]] + value) )
else:
metaDict[name][op] = list( set( [metaDict[name][op],value]) )
else:
metaDict[name].update(mvalue)
else:
if type(mvalue) == ListType:
metaDict[name].update({'in':mvalue})
else:
metaDict[name].update({'=':mvalue})
elif type(metaDict[name]) == ListType:
if type(mvalue) == DictType:
metaDict[name] = {'in':metaDict[name]}
metaDict[name].update(mvalue)
elif type(mvalue) == ListType:
metaDict[name] = list( set( (metaDict[name] + mvalue ) ) )
else:
metaDict[name] = list( set( metaDict[name].append( mvalue ) ) )
else:
if type(mvalue) == DictType:
metaDict[name] = {'=':metaDict[name]}
metaDict[name].update(mvalue)
elif type(mvalue) == ListType:
metaDict[name] = list( set( [metaDict[name]] + mvalue ) )
else:
metaDict[name] = list( set( [metaDict[name],mvalue] ) )
else:
metaDict[name] = mvalue
self.__metaQueryDict = metaDict
return S_OK( metaDict )
def getMetaQuery( self ):
return self.__metaQueryDict
def getMetaQueryAsJson( self ):
return json.dumps( self.__metaQueryDict )
def applyQuery( self, userMetaDict ):
""" Return a list of tuples with tables and conditions to locate files for a given user Metadata
"""
def getOperands( value ):
if type( value ) == ListType:
return [ ('in', value) ]
elif type( value ) == DictType:
resultList = []
for operation, operand in value.items():
resultList.append( ( operation, operand ) )
return resultList
else:
return [ ("=", value) ]
def getTypedValue( value, mtype ):
if mtype[0:3].lower() == 'int':
return int( value )
elif mtype[0:5].lower() == 'float':
return float( value )
elif mtype[0:4].lower() == 'date':
return Time.fromString( value )
else:
return value
for meta, value in self.__metaQueryDict.items():
# Check if user dict contains all the requested meta data
userValue = userMetaDict.get( meta, None )
if userValue is None:
if str( value ).lower() == 'missing':
continue
else:
return S_OK( False )
elif str( value ).lower() == 'any':
continue
mtype = self.__metaTypeDict[meta]
try:
userValue = getTypedValue( userValue, mtype )
except ValueError:
return S_ERROR( 'Illegal type for metadata %s: %s in user data' % ( meta, str( userValue ) ) )
# Check operations
for operation, operand in getOperands( value ):
try:
if type( operand ) == ListType:
typedValue = [ getTypedValue( x, mtype ) for x in operand ]
else:
typedValue = getTypedValue( operand, mtype )
except ValueError:
return S_ERROR( 'Illegal type for metadata %s: %s in filter' % ( meta, str( operand ) ) )
# Apply query operation
if operation in ['>', '<', '>=', '<=']:
if type( typedValue ) == ListType:
return S_ERROR( 'Illegal query: list of values for comparison operation' )
elif operation == '>' and typedValue >= userValue:
return S_OK( False )
elif operation == '<' and typedValue <= userValue:
return S_OK( False )
elif operation == '>=' and typedValue > userValue:
return S_OK( False )
elif operation == '<=' and typedValue < userValue:
return S_OK( False )
elif operation == 'in' or operation == "=":
if type( typedValue ) == ListType and not userValue in typedValue:
return S_OK( False )
elif type( typedValue ) != ListType and userValue != typedValue:
return S_OK( False )
elif operation == 'nin' or operation == "!=":
if type( typedValue ) == ListType and userValue in typedValue:
return S_OK( False )
elif type( typedValue ) != ListType and userValue == typedValue:
return S_OK( False )
return S_OK( True )
|
coberger/DIRAC
|
DataManagementSystem/Client/MetaQuery.py
|
Python
|
gpl-3.0
| 8,932
|
[
"DIRAC"
] |
1de090e2403fff0e6cf0751895ff5e216bc08492733002bcc65ff3f131ef28b8
|
#!/usr/bin/env python
########################################
#Globale Karte fuer tests
# from Rabea Amther
########################################
# http://gfesuite.noaa.gov/developer/netCDFPythonInterface.html
import math
import numpy as np
import pylab as pl
import Scientific.IO.NetCDF as IO
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import matplotlib.lines as lines
import datetime
from mpl_toolkits.basemap import Basemap , addcyclic
from matplotlib.colors import LinearSegmentedColormap
import textwrap
pl.close('all')
obsRef=0
########################## for CMIP5 charactors
VARIABLE='clt'
PRODUCT='Amon'
AbsTemp=273.15
RefTemp=5
MODISmean=52.721 #2001-2010
TargetModel=[\
#'CCSM4',\
#'CESM1-BGC',\
#'CESM1-CAM5',\
#'CESM1-FASTCHEM',\
#'CESM1-WACCM',\
#'CNRM-CM5',\
#'CSIRO-Mk3-6-0',\
#'CanESM2',\
#'EC-EARTH',\
#'GFDL-ESM2G',\
'GFDL-ESM2M',\
#'GISS-E2-H',\
#'GISS-E2-R-CC',\
#'HadGEM2-AO',\
#'HadGEM2-CC',\
'HadGEM2-ES',\
'IPSL-CM5A-LR',\
#'IPSL-CM5A-MR',\
#'MIROC-ESM-CHEM',\
#'MIROC-ESM',\
#'MIROC5',\
#'MPI-ESM-LR',\
#'MPI-ESM-MR',\
#'MPI-ESM-P',\
#'MRI-CGCM3',\
#'NorESM1-ME',\
#'bcc-csm1-1-m',\
#'bcc-csm1-1',\
#'inmcm4',\
]
COLORtar=['red','darkmagenta','navy',\
'deeppink','orange','orangered','yellow','gold','brown','chocolate',\
'green','yellowgreen','aqua','olive','teal','blue',\
'purple','darkmagenta','fuchsia','indigo',\
'dimgray','black','navy']
COLORall=['darkred','darkblue','darkgreen','deeppink',\
'red','blue','green','pink','gold',\
'lime','lightcyan','orchid','yellow','lightsalmon',\
'brown','khaki','aquamarine','yellowgreen','blueviolet',\
'snow','skyblue','slateblue','orangered','dimgray',\
'chocolate','teal','mediumvioletred','gray','cadetblue',\
'mediumorchid','bisque','tomato','hotpink','firebrick',\
'Chartreuse','purple','goldenrod',\
'black','orangered','cyan','magenta']
linestyles=['_', '_', '_', '-', '-',\
'-', '--','--','--', '--',\
'_', '_','_','_',\
'_', '_','_','_',\
'_', '-', '--', ':','_', '-', '--', ':','_', '-', '--', ':','_', '-', '--', ':']
RCMsHist=[\
'clt_AFR-44_CCCma-CanESM2_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_CNRM-CERFACS-CNRM-CM5_historical_r1i1p1_CLMcom-CCLM4-8-17_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_CNRM-CERFACS-CNRM-CM5_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_CSIRO-QCCCE-CSIRO-Mk3-6-0_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_ICHEC-EC-EARTH_historical_r12i1p1_CLMcom-CCLM4-8-17_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_ICHEC-EC-EARTH_historical_r12i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_ICHEC-EC-EARTH_historical_r1i1p1_KNMI-RACMO22T_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_ICHEC-EC-EARTH_historical_r3i1p1_DMI-HIRHAM5_v2_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_IPSL-IPSL-CM5A-MR_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_MIROC-MIROC5_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_MOHC-HadGEM2-ES_historical_r1i1p1_CLMcom-CCLM4-8-17_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_MOHC-HadGEM2-ES_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_MPI-M-MPI-ESM-LR_historical_r1i1p1_CLMcom-CCLM4-8-17_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_MPI-M-MPI-ESM-LR_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_NCC-NorESM1-M_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_NOAA-GFDL-GFDL-ESM2M_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
]
RCMsRCP85=[\
'clt_AFR-44_CCCma-CanESM2_rcp85_r1i1p1_SMHI-RCA4_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_CNRM-CERFACS-CNRM-CM5_rcp85_r1i1p1_CLMcom-CCLM4-8-17_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_CNRM-CERFACS-CNRM-CM5_rcp85_r1i1p1_SMHI-RCA4_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_CSIRO-QCCCE-CSIRO-Mk3-6-0_rcp85_r1i1p1_SMHI-RCA4_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_ICHEC-EC-EARTH_rcp85_r12i1p1_CLMcom-CCLM4-8-17_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_ICHEC-EC-EARTH_rcp85_r12i1p1_SMHI-RCA4_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_ICHEC-EC-EARTH_rcp85_r1i1p1_KNMI-RACMO22T_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_ICHEC-EC-EARTH_rcp85_r3i1p1_DMI-HIRHAM5_v2_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_IPSL-IPSL-CM5A-MR_rcp85_r1i1p1_SMHI-RCA4_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_MIROC-MIROC5_rcp85_r1i1p1_SMHI-RCA4_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_MOHC-HadGEM2-ES_rcp85_r1i1p1_CLMcom-CCLM4-8-17_v1_200601-210012.ymean.fldmean.nc',\
#'clt_AFR-44_MOHC-HadGEM2-ES_rcp85_r1i1p1_KNMI-RACMO22T_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_MOHC-HadGEM2-ES_rcp85_r1i1p1_SMHI-RCA4_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_MPI-M-MPI-ESM-LR_rcp85_r1i1p1_CLMcom-CCLM4-8-17_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_MPI-M-MPI-ESM-LR_rcp85_r1i1p1_SMHI-RCA4_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_NCC-NorESM1-M_rcp85_r1i1p1_SMHI-RCA4_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_NOAA-GFDL-GFDL-ESM2M_rcp85_r1i1p1_SMHI-RCA4_v1_200601-210012.ymean.fldmean.nc',\
]
GCMsRCP85=[\
'CCSM4',\
'CESM1-BGC',\
'CESM1-CAM5',\
'CNRM-CM5',\
'CanESM2',\
'GFDL-ESM2G',\
'GFDL-ESM2M',\
'GISS-E2-H',\
'GISS-E2-R-CC',\
'HadGEM2-AO',\
'HadGEM2-ES',\
'IPSL-CM5A-LR',\
'IPSL-CM5A-MR',\
'MPI-ESM-LR',\
'MPI-ESM-MR',\
'NorESM1-ME',\
'inmcm4',\
]
#================================================ CMIP5 models
# for historical
GCMsHist=[\
'CCSM4',\
'CESM1-BGC',\
'CESM1-CAM5',\
'CESM1-FASTCHEM',\
'CESM1-WACCM',\
'CNRM-CM5',\
'CSIRO-Mk3-6-0',\
'CanESM2',\
'EC-EARTH',\
'GFDL-ESM2G',\
'GFDL-ESM2M',\
'GISS-E2-H',\
'GISS-E2-R-CC',\
'HadGEM2-AO',\
'HadGEM2-CC',\
'HadGEM2-ES',\
'IPSL-CM5A-LR',\
'IPSL-CM5A-MR',\
'MIROC-ESM-CHEM',\
'MIROC-ESM',\
'MIROC5',\
'MPI-ESM-LR',\
'MPI-ESM-MR',\
'MPI-ESM-P',\
'MRI-CGCM3',\
'NorESM1-ME',\
'bcc-csm1-1-m',\
'bcc-csm1-1',\
'inmcm4',\
]
EnsembleHist=[\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r2i1p1',\
'r1i1p1',\
'r2i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r2i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
]
EnsembleRCP85=[\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
]
#=================================================== define the Plot:
fig1=plt.figure(figsize=(16,9))
ax = fig1.add_subplot(111)
plt.xlabel('Year',fontsize=16)
plt.ylabel('Cloud Cover Fraction Change (%)',fontsize=16)
plt.title("Cloud Cover Fraction Change (%) in AFRICA simulated by CMIP5 models",fontsize=18)
plt.ylim(-10,5)
plt.xlim(1960,2100)
plt.grid()
plt.xticks(np.arange(1960, 2100+10, 20))
plt.tick_params(axis='both', which='major', labelsize=14)
plt.tick_params(axis='both', which='minor', labelsize=14)
# vertical at 2005
plt.axvline(x=2005.5,linewidth=2, color='gray')
plt.axhline(y=0,linewidth=2, color='gray')
#plt.plot(x,y,color="blue",linewidth=4)
########################## for hist:
########################## for hist:
#============================ for CORDEX
#============================ for CORDEX
EXPERIMENT='CORDEX'
DirCordexHist='/Users/tang/climate/CORDEX/hist/AFRICA/'
YEAR=range(1960,2006)
SumTemp=np.zeros(len(YEAR))
K=0
print "========== for",EXPERIMENT," ==============="
for infile0 in RCMsHist:
infile1=DirCordexHist+infile0
K=K+1 # for average
print('the file is == ' +infile1)
#open input files
infile=IO.NetCDFFile(infile1,'r')
# read the variable tas
TAS=infile.variables[VARIABLE][:,:,:].copy()
#print 'the variable tas ===============: '
#print TAS
# calculate the annual mean temp:
#TEMP=range(0,Nmonth,12)
#for j in range(0,Nmonth,12):
#TEMP[j/12]=np.mean(TAS[j:j+11][:][:])-AbsTemp
print " temp ======================== absolut"
TEMP=range(0,len(YEAR))
for j in range(0,len(YEAR)):
TEMP[j]=np.mean(TAS[j][:][:])
#print TEMP
if obsRef==1:
RefTemp=MODISmean
else:
# reference temp: mean of 1996-2005
RefTemp=np.mean(TEMP[len(YEAR)-10+1:len(YEAR)])
if K==1:
ArrRefTemp=[RefTemp]
else:
ArrRefTemp=ArrRefTemp+[RefTemp]
print 'ArrRefTemp ========== ',ArrRefTemp
TEMP=[t-RefTemp for t in TEMP]
#print " temp ======================== relative to mean of 1986-2005"
#print TEMP
# get array of temp K*TimeStep
if K==1:
ArrTemp=[TEMP]
else:
ArrTemp=ArrTemp+[TEMP]
SumTemp=SumTemp+TEMP
#print SumTemp
#=================================================== to plot
print "======== to plot =========="
print len(TEMP)
print 'NO. of year:',len(YEAR)
print 'NO. of timesteps on TEMP:',len(TEMP)
#plot only target models
#if Model in TargetModel:
#plt.plot(YEAR,TEMP,label=Model,\
##linestyles[TargetModel.index(Model)],\
#color=COLORtar[TargetModel.index(Model)],linewidth=2)
#print "color is",COLORtar[TargetModel.index(Model)]
#if Model=='CanESM2':
#plt.plot(YEAR,TEMP,color="red",linewidth=1)
#if Model=='MPI-ESM-LR':
#plt.plot(YEAR,TEMP,color="blue",linewidth=1)
#if Model=='MPI-ESM-MR':
#plt.plot(YEAR,TEMP,color="green",linewidth=1)
#=================================================== for ensemble mean
AveTemp=[e/K for e in SumTemp]
ArrTemp=list(np.array(ArrTemp))
print 'shape of ArrTemp:',np.shape(ArrTemp)
StdTemp=np.std(np.array(ArrTemp),axis=0)
print 'shape of StdTemp:',np.shape(StdTemp)
print "ArrTemp ========================:"
print ArrTemp
print "StdTemp ========================:"
print StdTemp
# 5-95% range ( +-1.64 STD)
StdTemp1=[AveTemp[i]+StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
StdTemp2=[AveTemp[i]-StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
print "Model number for historical is :",K
print "models for historical:";print GCMsHist
plt.plot(YEAR,AveTemp,label=" CORDEX mean", color="black",linewidth=4)
plt.plot(YEAR,StdTemp1,color="black",linewidth=0.1)
plt.plot(YEAR,StdTemp2,color="black",linewidth=0.1)
plt.fill_between(YEAR,StdTemp1,StdTemp2,color='blue',alpha=0.3)
# draw NO. of model used:
plt.text(1980,-6,'CORDEX model: '+str(K),size=16,rotation=0.,
ha="center",va="center",
#bbox = dict(boxstyle="round",
#ec=(1., 0.5, 0.5),
#fc=(1., 0.8, 0.8),
)
#=================================================== for CORDEX RCP85
#=================================================== for CORDEX RCP85
#=================================================== for CORDEX RCP85
#=================================================== for CORDEX RCP85
DirCordexRcp85='/Users/tang/climate/CORDEX/rcp85/AFRICA/'
YEAR=range(2006,2101)
SumTemp=np.zeros(len(YEAR))
K=0
print "========== for",EXPERIMENT," ==============="
for infile0 in RCMsRCP85:
infile1=DirCordexRcp85+infile0
K=K+1 # for average
print('the file is == ' +infile1)
#open input files
infile=IO.NetCDFFile(infile1,'r')
# read the variable tas
TAS=infile.variables[VARIABLE][:,:,:].copy()
#print 'the variable tas ===============: '
#print TAS
# calculate the annual mean temp:
#TEMP=range(0,Nmonth,12)
#for j in range(0,Nmonth,12):
#TEMP[j/12]=np.mean(TAS[j:j+11][:][:])-AbsTemp
print " temp ======================== absolut"
TEMP=range(0,len(YEAR))
for j in range(0,len(YEAR)):
TEMP[j]=np.mean(TAS[j][:][:])
#print TEMP
if obsRef==1:
RefTemp=MODISmean
else:
# reference temp: mean of 1996-2005
# get the reftemp if the model has historical data here
print 'ArrRefTemp in HIST ensembles:',np.shape(ArrRefTemp)
print ArrRefTemp
print 'model index in HIST: ',RCMsRCP85.index(infile0)
RefTemp=ArrRefTemp[RCMsRCP85.index(infile0)]
print 'RefTemp from HIST: ',RefTemp
TEMP=[t-RefTemp for t in TEMP]
#print " temp ======================== relative to mean of 1986-2005"
#print TEMP
# get array of temp K*TimeStep
if K==1:
ArrTemp=[TEMP]
else:
ArrTemp=ArrTemp+[TEMP]
SumTemp=SumTemp+TEMP
#print SumTemp
#=================================================== to plot
print "======== to plot =========="
print len(TEMP)
print 'NO. of year:',len(YEAR)
print 'NO. of timesteps on TEMP:',len(TEMP)
#plot only target models
#if Model in TargetModel:
#plt.plot(YEAR,TEMP,label=Model,\
##linestyles[TargetModel.index(Model)],\
#color=COLORtar[TargetModel.index(Model)],linewidth=2)
#print "color is",COLORtar[TargetModel.index(Model)]
#if Model=='CanESM2':
#plt.plot(YEAR,TEMP,color="red",linewidth=1)
#if Model=='MPI-ESM-LR':
#plt.plot(YEAR,TEMP,color="blue",linewidth=1)
#if Model=='MPI-ESM-MR':
#plt.plot(YEAR,TEMP,color="green",linewidth=1)
#=================================================== for ensemble mean
AveTemp=[e/K for e in SumTemp]
ArrTemp=list(np.array(ArrTemp))
print 'shape of ArrTemp:',np.shape(ArrTemp)
StdTemp=np.std(np.array(ArrTemp),axis=0)
print 'shape of StdTemp:',np.shape(StdTemp)
print "ArrTemp ========================:"
print ArrTemp
print "StdTemp ========================:"
print StdTemp
# 5-95% range ( +-1.64 STD)
StdTemp1=[AveTemp[i]+StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
StdTemp2=[AveTemp[i]-StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
print "Model number for historical is :",K
print "models for historical:";print GCMsHist
plt.plot(YEAR,AveTemp,label=" CORDEX mean", color="black",linewidth=4)
plt.plot(YEAR,StdTemp1,color="black",linewidth=0.1)
plt.plot(YEAR,StdTemp2,color="black",linewidth=0.1)
plt.fill_between(YEAR,StdTemp1,StdTemp2,color='blue',alpha=0.3)
# draw NO. of model used:
plt.text(1980,-6,'CORDEX model: '+str(K),size=16,rotation=0.,
ha="center",va="center",
#bbox = dict(boxstyle="round",
#ec=(1., 0.5, 0.5),
#fc=(1., 0.8, 0.8),
)
#=================================================== for CMIP5 hist
#=================================================== for CMIP5 hist
#=================================================== for CMIP5 hist
#=================================================== for CMIP5 hist
#=================================================== for CMIP5 hist
#=================================================== for CMIP5 hist
DirCMIP5Hist='/Users/tang/climate/CMIP5/hist/AFRICA'
TAILhist='_196001-200512.ymean.fldmean.AFR.nc'
EXPERIMENT='historical'
YEAR=range(1960,2006)
SumTemp=np.zeros(len(YEAR))
K=0
print "========== for",EXPERIMENT," ==============="
for Model in GCMsHist:
K=K+1 # for average
infile1=DirCMIP5Hist+'/'\
+VARIABLE+'_'+PRODUCT+'_'+Model+'_'+EXPERIMENT+'_'+EnsembleHist[GCMsHist.index(Model)]+TAILhist
#clt_Amon_MPI-ESM-LR_historical_r1i1p1_196001-200512.fldmean.AFR.nc
print('the file is == ' +infile1)
#open input files
infile=IO.NetCDFFile(infile1,'r')
# read the variable tas
TAS=infile.variables[VARIABLE][:,:,:].copy()
#print 'the variable tas ===============: '
#print TAS
# calculate the annual mean temp:
#TEMP=range(0,Nmonth,12)
#for j in range(0,Nmonth,12):
#TEMP[j/12]=np.mean(TAS[j:j+11][:][:])-AbsTemp
print " temp ======================== absolut"
TEMP=range(0,len(YEAR))
for j in range(0,len(YEAR)):
TEMP[j]=np.mean(TAS[j][:][:])
#print TEMP
if obsRef==1:
RefTemp=MODISmean
else:
# reference temp: mean of 1996-2005
RefTemp=np.mean(TEMP[len(YEAR)-10+1:len(YEAR)])
if K==1:
ArrRefTemp=[RefTemp]
else:
ArrRefTemp=ArrRefTemp+[RefTemp]
print 'ArrRefTemp ========== ',ArrRefTemp
TEMP=[t-RefTemp for t in TEMP]
#print " temp ======================== relative to mean of 1986-2005"
#print TEMP
# get array of temp K*TimeStep
if K==1:
ArrTemp=[TEMP]
else:
ArrTemp=ArrTemp+[TEMP]
SumTemp=SumTemp+TEMP
#print SumTemp
#=================================================== to plot
print "======== to plot =========="
print len(TEMP)
print 'NO. of year:',len(YEAR)
print 'NO. of timesteps on TEMP:',len(TEMP)
#plot only target models
if Model in TargetModel:
plt.plot(YEAR,TEMP,\
#linestyles[TargetModel.index(Model)],\
color=COLORtar[TargetModel.index(Model)],linewidth=2)
print "color is",COLORtar[TargetModel.index(Model)]
#if Model=='CanESM2':
#plt.plot(YEAR,TEMP,color="red",linewidth=1)
#if Model=='MPI-ESM-LR':
#plt.plot(YEAR,TEMP,color="blue",linewidth=1)
#if Model=='MPI-ESM-MR':
#plt.plot(YEAR,TEMP,color="green",linewidth=1)
#=================================================== for ensemble mean
AveTemp=[e/K for e in SumTemp]
ArrTemp=list(np.array(ArrTemp))
print 'shape of ArrTemp:',np.shape(ArrTemp)
StdTemp=np.std(np.array(ArrTemp),axis=0)
print 'shape of StdTemp:',np.shape(StdTemp)
print "ArrTemp ========================:"
print ArrTemp
print "StdTemp ========================:"
print StdTemp
# 5-95% range ( +-1.64 STD)
StdTemp1=[AveTemp[i]+StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
StdTemp2=[AveTemp[i]-StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
print "Model number for historical is :",K
print "models for historical:";print GCMsHist
plt.plot(YEAR,AveTemp,label=' CMIP5 mean',color="black",linewidth=4)
plt.plot(YEAR,StdTemp1,color="black",linewidth=0.1)
plt.plot(YEAR,StdTemp2,color="black",linewidth=0.1)
plt.fill_between(YEAR,StdTemp1,StdTemp2,color='black',alpha=0.3)
# draw NO. of model used:
plt.text(1980,-4,'CMIP5 model: '+str(K),size=16,rotation=0.,
ha="center",va="center",
#bbox = dict(boxstyle="round",
#ec=(1., 0.5, 0.5),
#fc=(1., 0.8, 0.8),
)
#=================================================== for CMIP5 rcp8.5:
#=================================================== for CMIP5 rcp8.5:
#=================================================== for CMIP5 rcp8.5:
#=================================================== for CMIP5 rcp8.5:
#=================================================== for CMIP5 rcp8.5:
DirCMIP5RCP85='/Users/tang/climate/CMIP5/rcp85/AFRICA/'
EXPERIMENT='rcp85'
TailRcp85='_200601-210012.ymean.fldmean.AFR.nc'
YEAR=range(2006,2101)
Nmonth=1140
SumTemp=np.zeros(len(YEAR))
K=0
print "========== for",EXPERIMENT," ==============="
for Model in GCMsRCP85:
K=K+1 # for average
infile1=DirCMIP5RCP85+'/'\
+VARIABLE+'_'+PRODUCT+'_'+Model+'_'+EXPERIMENT+'_'+EnsembleRCP85[GCMsRCP85.index(Model)]+TailRcp85
#clt_Amon_MPI-ESM-LR_historical_r1i1p1_196001-200512.fldmean.AFR.nc
print('the file is == ' +infile1)
#open input files
infile=IO.NetCDFFile(infile1,'r')
# read the variable tas
TAS=infile.variables[VARIABLE][:,:,:].copy()
#print 'the variable tas ===============: '
#print TAS
# calculate the annual mean temp:
#TEMP=range(0,Nmonth,12)
#for j in range(0,Nmonth,12):
#TEMP[j/12]=np.mean(TAS[j:j+11][:][:])-AbsTemp
print " temp ======================== absolut"
TEMP=range(0,len(YEAR))
for j in range(0,len(YEAR)):
TEMP[j]=np.mean(TAS[j][:][:])
#print TEMP
if obsRef==1:
RefTemp=MODISmean
else:
# reference temp: mean of 1996-2005
# get the reftemp if the model has historical data here
print 'ArrRefTemp in HIST ensembles:',np.shape(ArrRefTemp)
print ArrRefTemp
if Model in GCMsHist:
print 'model index in HIST: ',GCMsHist.index(Model)
print 'K=',K
RefTemp=ArrRefTemp[GCMsHist.index(Model)]
print 'RefTemp from HIST: ',RefTemp
else:
RefTemp=np.mean(TEMP[0:9])
print 'RefTemp from RCP8.5: ',RefTemp
TEMP=[t-RefTemp for t in TEMP]
#print " temp ======================== relative to mean of 1986-2005"
#print TEMP
# get array of temp K*TimeStep
if K==1:
ArrTemp=[TEMP]
else:
ArrTemp=ArrTemp+[TEMP]
SumTemp=SumTemp+TEMP
#print SumTemp
#=================================================== to plot
print "======== to plot =========="
print len(TEMP)
print 'NO. of year:',len(YEAR)
print 'NO. of timesteps on TEMP:',len(TEMP)
#plot only target models
if Model in TargetModel:
plt.plot(YEAR,TEMP,label=Model,\
#linestyles[TargetModel.index(Model)],\
color=COLORtar[TargetModel.index(Model)],linewidth=2)
print "color is",COLORtar[TargetModel.index(Model)]
#if Model=='CanESM2':
#plt.plot(YEAR,TEMP,color="red",linewidth=1)
#if Model=='MPI-ESM-LR':
#plt.plot(YEAR,TEMP,color="blue",linewidth=1)
#if Model=='MPI-ESM-MR':
#plt.plot(YEAR,TEMP,color="green",linewidth=1)
#=================================================== for ensemble mean
AveTemp=[e/K for e in SumTemp]
ArrTemp=list(np.array(ArrTemp))
print 'shape of ArrTemp:',np.shape(ArrTemp)
StdTemp=np.std(np.array(ArrTemp),axis=0)
print 'shape of StdTemp:',np.shape(StdTemp)
print "ArrTemp ========================:"
print ArrTemp
print "StdTemp ========================:"
print StdTemp
# 5-95% range ( +-1.64 STD)
StdTemp1=[AveTemp[i]+StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
StdTemp2=[AveTemp[i]-StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
print "Model number for historical is :",K
print "models for historical:";print GCMsHist
plt.plot(YEAR,AveTemp,label=' CMIP5 RCP85 mean',color="black",linewidth=4)
plt.plot(YEAR,StdTemp1,color="black",linewidth=0.1)
plt.plot(YEAR,StdTemp2,color="black",linewidth=0.1)
plt.fill_between(YEAR,StdTemp1,StdTemp2,color='black',alpha=0.3)
# draw NO. of model used:
plt.text(2020,-4,'CMIP5 model: '+str(K),size=16,rotation=0.,
ha="center",va="center",
bbox = dict(boxstyle="round",
ec=(1., 0.5, 0.5),
fc=(1., 0.8, 0.8),
))
plt.legend(loc=2)
plt.show()
quit()
|
CopyChat/Plotting
|
Downscaling/climatechange.clt2.py
|
Python
|
gpl-3.0
| 24,117
|
[
"NetCDF"
] |
f4b9be073134c0c52e65160aca151371e5af5c0a18052e4b5243cab3a8a1a819
|
import json
import shlex
import os
import socket
import heapq
from collections import OrderedDict, defaultdict
from libmproxy.protocol.http import decoded
from tabulate import tabulate
from recordpeeker import Equipment, ITEMS, BATTLES, DUNGEONS, slicedict, best_equipment
from recordpeeker.dispatcher import Dispatcher
def get_display_name(enemy):
for child in enemy["children"]:
for param in child["params"]:
return param.get("disp_name", "Unknown Enemy")
def get_drops(enemy):
for child in enemy["children"]:
for drop in child["drop_item_list"]:
yield drop
def handle_get_battle_init_data(data):
battle_data = data["battle"]
battle_id = battle_data["battle_id"]
battle_name = BATTLES.get(battle_id, "battle #" + battle_id)
print "Entering {0}".format(battle_name)
all_rounds_data = battle_data['rounds']
tbl = [["rnd", "enemy", "drop"]]
for round_data in all_rounds_data:
round = round_data.get("round", "???")
for round_drop in round_data["drop_item_list"]:
item_type = int(round_drop.get("type", 0))
if item_type == 21:
itemname = "potion"
elif item_type == 22:
itemname = "hi-potion"
elif item_type == 23:
itemname = "x-potion"
elif item_type == 31:
itemname = "ether"
elif item_type == 32:
itemname = "turbo ether"
else:
itemname = "unknown"
tbl.append([round, "<round drop>", itemname])
for enemy in round_data["enemy"]:
had_drop = False
enemyname = get_display_name(enemy)
for drop in get_drops(enemy):
item_type = drop.get("type", 0)
if item_type == 11:
itemname = "{0} gil".format(drop.get("amount", 0))
elif item_type == 41 or item_type == 51:
type_name = "orb id#" if item_type == 51 else "equipment id#"
item = ITEMS.get(drop["item_id"], type_name + drop["item_id"])
itemname = "{0}* {1}".format(drop.get("rarity", 1), item)
elif item_type == 61:
itemname = "event item"
else:
itemname = "unknown"
had_drop = True
tbl.append([round, enemyname, itemname])
if not had_drop:
tbl.append([round, enemyname, "nothing"])
print tabulate(tbl, headers="firstrow")
print ""
def handle_party_list(data):
wanted = "name series_id acc atk def eva matk mdef mnd series_acc series_atk series_def series_eva series_matk series_mdef series_mnd"
topn = OrderedDict()
topn["atk"] = 5
topn["matk"] = 2
topn["mnd"] = 2
topn["def"] = 5
find_series = [101001, 102001, 103001, 104001, 105001, 106001, 107001, 108001, 110001, 113001]
equips = defaultdict(list)
for item in data["equipments"]:
kind = item.get("equipment_type", 1)
heapq.heappush(equips[kind], Equipment(slicedict(item, wanted)))
for series in find_series:
print "Best equipment for FF{0}:".format((series - 100001) / 1000)
# Need to use lists for column ordering
tbl = ["stat n weapon stat n armor stat n accessory".split()]
tbldata = [[],[],[],[]]
for itemtype in range(1, 4): ## 1, 2, 3
for stat, count in topn.iteritems():
for equip in best_equipment(series, equips[itemtype], stat, count):
name = equip["name"].replace(u"\uff0b", "+")
tbldata[itemtype].append([stat, equip[stat], name])
# Transpose data
for idx in range(0, len(tbldata[1])):
tbl.append(tbldata[1][idx] + tbldata[2][idx] + tbldata[3][idx])
print tabulate(tbl, headers="firstrow")
print ""
def handle_dungeon_list(data):
tbl = []
world_data = data["world"]
world_id = world_data["id"]
world_name = world_data["name"]
print "Dungeon List for {0} (id={1})".format(world_name, world_id)
dungeons = data["dungeons"]
for dungeon in dungeons:
name = dungeon["name"]
id = dungeon["id"]
difficulty = dungeon["challenge_level"]
type = "ELITE" if dungeon["type"] == 2 else "NORMAL"
tbl.append([name, id, difficulty, type])
tbl = sorted(tbl, key=lambda row : int(row[1]))
tbl.insert(0, ["Name", "ID", "Difficulty", "Type"])
print tabulate(tbl, headers="firstrow")
def handle_battle_list(data):
tbl = [["Name", "Id", "Rounds"]]
dungeon_data = data["dungeon_session"]
dungeon_id = dungeon_data["dungeon_id"]
dungeon_name = dungeon_data["name"]
dungeon_type = int(dungeon_data["type"])
world_id = dungeon_data["world_id"]
print "Entering dungeon {0} ({1})".format(dungeon_name, "Elite" if dungeon_type==2 else "Normal")
battles = data["battles"]
for battle in battles:
tbl.append([battle["name"], battle["id"], battle["round_num"]])
print tabulate(tbl, headers="firstrow")
def handle_survival_event(data):
# XXX: This maybe works for all survival events...
enemy = data.get("enemy", dict(name="???", memory_factor="0"))
name = enemy.get("name", "???")
factor = float(enemy.get("memory_factor", "0"))
print "Your next opponent is {0} (x{1:.1f})".format(name, factor)
def start(context, argv):
global args
from recordpeeker.command_line import parse_args
args = parse_args(argv)
ips = set([ii[4][0] for ii in socket.getaddrinfo(socket.gethostname(), None) if ii[4][0] != "127.0.0.1"])
print "Configure your phone's proxy to point to this computer, then visit mitm.it"
print "on your phone to install the interception certificate.\n"
print "Record Peeker is listening on port {0}, on these addresses:".format(args.port)
print "\n".join([" * {0}".format(ip) for ip in ips])
print ""
print "Try entering the Party screen, or starting a battle."
global dp
dp = Dispatcher('ffrk.denagames.com')
[dp.register(path, function) for path, function in handlers]
[dp.ignore(path, regex) for path, regex in ignored_requests]
handlers = [
('get_battle_init_data' , handle_get_battle_init_data),
('/dff/party/list', handle_party_list),
('/dff/world/dungeons', handle_dungeon_list),
('/dff/world/battles', handle_battle_list),
('/dff/event/coliseum/6/get_data', handle_survival_event)
]
ignored_requests = [
('/dff/', True),
('/dff/splash', False),
('/dff/?timestamp', False),
('/dff/battle/?timestamp', False),
]
def response(context, flow):
global args
global dp
dp.handle(flow, args)
|
jonchang/recordpeeker
|
recordpeeker/mitmdump_input.py
|
Python
|
mit
| 6,758
|
[
"VisIt"
] |
bd11708e3dc382fee41aa8ee568b5943d4573506680f962ce5cde35365ad90a2
|
"""
Helper functions for the course complete event that was originally included with the Badging MVP.
"""
import hashlib
import logging
import six
from django.urls import reverse
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from badges.models import BadgeAssertion, BadgeClass, CourseCompleteImageConfiguration
from badges.utils import requires_badges_enabled, site_prefix
from xmodule.modulestore.django import modulestore
LOGGER = logging.getLogger(__name__)
# NOTE: As these functions are carry-overs from the initial badging implementation, they are used in
# migrations. Please check the badge migrations when changing any of these functions.
def course_slug(course_key, mode):
"""
Legacy: Not to be used as a model for constructing badge slugs. Included for compatibility with the original badge
type, awarded on course completion.
Slug ought to be deterministic and limited in size so it's not too big for Badgr.
Badgr's max slug length is 255.
"""
# Seven digits should be enough to realistically avoid collisions. That's what git services use.
digest = hashlib.sha256(
u"{}{}".format(six.text_type(course_key), six.text_type(mode)).encode('utf-8')
).hexdigest()[:7]
base_slug = slugify(six.text_type(course_key) + u'_{}_'.format(mode))[:248]
return base_slug + digest
def badge_description(course, mode):
"""
Returns a description for the earned badge.
"""
if course.end:
return _(u'Completed the course "{course_name}" ({course_mode}, {start_date} - {end_date})').format(
start_date=course.start.date(),
end_date=course.end.date(),
course_name=course.display_name,
course_mode=mode,
)
else:
return _(u'Completed the course "{course_name}" ({course_mode})').format(
course_name=course.display_name,
course_mode=mode,
)
def evidence_url(user_id, course_key):
"""
Generates a URL to the user's Certificate HTML view, along with a GET variable that will signal the evidence visit
event.
"""
course_id = six.text_type(course_key)
# avoid circular import problems
from lms.djangoapps.certificates.models import GeneratedCertificate
cert = GeneratedCertificate.eligible_certificates.get(user__id=int(user_id), course_id=course_id)
return site_prefix() + reverse(
'certificates:render_cert_by_uuid', kwargs={'certificate_uuid': cert.verify_uuid}) + '?evidence_visit=1'
def criteria(course_key):
"""
Constructs the 'criteria' URL from the course about page.
"""
about_path = reverse('about_course', kwargs={'course_id': six.text_type(course_key)})
return u'{}{}'.format(site_prefix(), about_path)
def get_completion_badge(course_id, user):
"""
Given a course key and a user, find the user's enrollment mode
and get the Course Completion badge.
"""
from student.models import CourseEnrollment
badge_classes = CourseEnrollment.objects.filter(
user=user, course_id=course_id
).order_by('-is_active')
if not badge_classes:
return None
mode = badge_classes[0].mode
course = modulestore().get_course(course_id)
if not course.issue_badges:
return None
return BadgeClass.get_badge_class(
slug=course_slug(course_id, mode),
issuing_component='',
criteria=criteria(course_id),
description=badge_description(course, mode),
course_id=course_id,
mode=mode,
display_name=course.display_name,
image_file_handle=CourseCompleteImageConfiguration.image_for_mode(mode)
)
@requires_badges_enabled
def course_badge_check(user, course_key):
"""
Takes a GeneratedCertificate instance, and checks to see if a badge exists for this course, creating
it if not, should conditions be right.
"""
if not modulestore().get_course(course_key).issue_badges:
LOGGER.info("Course is not configured to issue badges.")
return
badge_class = get_completion_badge(course_key, user)
if not badge_class:
# We're not configured to make a badge for this course mode.
return
if BadgeAssertion.objects.filter(user=user, badge_class=badge_class):
LOGGER.info("Completion badge already exists for this user on this course.")
# Badge already exists. Skip.
return
evidence = evidence_url(user.id, course_key)
badge_class.award(user, evidence_url=evidence)
|
msegado/edx-platform
|
lms/djangoapps/badges/events/course_complete.py
|
Python
|
agpl-3.0
| 4,555
|
[
"VisIt"
] |
62ad6d23f54b92fef536f0ccd0d659d39145de48b28126a624ff8443a195538a
|
from __future__ import division
import abc
import warnings
import numpy as np
import six
np.seterr('warn')
from scipy.special import gamma as scipy_gamma
from scipy.special import gammaln as scipy_gammaln
from astropy.modeling.fitting import _fitter_to_model_params
from astropy.modeling import models
from stingray import Lightcurve, Powerspectrum
# TODO: Add checks and balances to code
#from stingray.modeling.parametricmodels import logmin
__all__ = ["set_logprior", "Posterior", "PSDPosterior", "LogLikelihood",
"PSDLogLikelihood", "GaussianLogLikelihood", "LaplaceLogLikelihood",
"PoissonPosterior", "GaussianPosterior", "LaplacePosterior",
"PriorUndefinedError", "LikelihoodUndefinedError"]
logmin = -10000000000000000.0
class PriorUndefinedError(Exception):
pass
class LikelihoodUndefinedError(Exception):
pass
class IncorrectParameterError(Exception):
pass
def set_logprior(lpost, priors):
"""
This function constructs the `logprior` method required to successfully
use a `Posterior` object.
All instances of lass `Posterior` and its subclasses require to implement a
`logprior` methods. However, priors are strongly problem-dependent and
therefore usually user-defined.
This function allows for setting the `logprior` method on any instance
of class `Posterior` efficiently by allowing the user to pass a
dictionary of priors and an instance of class `Posterior`.
Parameters
----------
lpost : Posterior object
An instance of class Posterior or any of its subclasses
priors : dictionary
A dictionary containing the prior definitions. Keys are parameter
names as defined by the model used in `lpost`. Items are functions
that take a parameter as input and return the log-prior probability
of that parameter.
Returns
-------
logprior : function
The function definition for the prior
Example
-------
Make a light curve and power spectrum
>>> photon_arrivals = np.sort(np.random.uniform(0,1000, size=10000))
>>> lc = Lightcurve.make_lightcurve(photon_arrivals, dt=1.0)
>>> ps = Powerspectrum(lc, norm="frac")
Define the model
>>> pl = models.PowerLaw1D()
>>> pl.x_0.fixed = True
Instantiate the posterior:
>>> lpost = PSDPosterior(ps.freq, ps.power, pl, m=ps.m)
Define the priors:
>>> p_alpha = lambda alpha: ((-1. <= alpha) & (alpha <= 5.))
>>> p_amplitude = lambda amplitude: ((-10 <= np.log(amplitude)) &
... ((np.log(amplitude) <= 10.0)))
>>> priors = {"alpha":p_alpha, "amplitude":p_amplitude}
Set the logprior method in the lpost object:
>>> lpost.logprior = set_logprior(lpost, priors)
"""
# get the number of free parameters in the model
#free_params = [p for p in lpost.model.param_names if not
# getattr(lpost.model, p).fixed]
free_params = [key for key, l in lpost.model.fixed.items() if not l]
# define the logprior
def logprior(t0, neg=False):
"""
The logarithm of the prior distribution for the
model defined in self.model.
Parameters:
------------
t0: {list | numpy.ndarray}
The list with parameters for the model
Returns:
--------
logp: float
The logarithm of the prior distribution for the model and
parameters given.
"""
if len(t0) != len(free_params):
raise IncorrectParameterError("The number of parameters passed into "
"the prior does not match the number "
"of parameters in the model.")
logp = 0.0 # initialize log-prior
ii = 0 # counter for the variable parameter
# loop through all parameter names, but only compute
# prior for those that are not fixed
# Note: need to do it this way to preserve order of parameters
# correctly!
for pname in lpost.model.param_names:
if not lpost.model.fixed[pname]:
with warnings.catch_warnings(record=True) as out:
logp += np.log(priors[pname](t0[ii]))
if len(out) > 0:
if isinstance(out[0].message, RuntimeWarning):
logp = np.nan
ii += 1
if not np.isfinite(logp):
logp = logmin
if neg:
return -logp
else:
return logp
return logprior
@six.add_metaclass(abc.ABCMeta)
class LogLikelihood(object):
def __init__(self, x, y, model, **kwargs):
"""
x : iterable
x-coordinate of the data. Could be multi-dimensional.
y : iterable
y-coordinate of the data. Could be multi-dimensional.
model : probably astropy.modeling.FittableModel instance
Your model
kwargs :
keyword arguments specific to the individual sub-classes. For
details, see the respective docstrings for each subclass
"""
self.x = x
self.y = y
self.model = model
@abc.abstractmethod
def evaluate(self, parameters):
"""
This is where you define your log-likelihood. Do this!
"""
pass
def __call__(self, parameters, neg=False):
return self.evaluate(parameters, neg)
class GaussianLogLikelihood(LogLikelihood):
def __init__(self, x, y, yerr, model):
"""
A Gaussian likelihood.
Parameters
----------
x : iterable
x-coordinate of the data
y : iterable
y-coordinte of the data
yerr: iterable
the uncertainty on the data, as standard deviation
model: an Astropy Model instance
The model to use in the likelihood.
"""
self.x = x
self.y = y
self.yerr = yerr
self.model = model
self.npar = 0
for pname in self.model.param_names:
if not self.model.fixed[pname]:
self.npar += 1
def evaluate(self, pars, neg=False):
if np.size(pars) != self.npar:
raise IncorrectParameterError("Input parameters must" +
" match model parameters!")
_fitter_to_model_params(self.model, pars)
mean_model = self.model(self.x)
loglike = np.sum(-0.5*np.log(2.*np.pi) - np.log(self.yerr) -
(self.y-mean_model)**2/(2.*self.yerr**2))
if not np.isfinite(loglike):
loglike = logmin
if neg:
return -loglike
else:
return loglike
class PoissonLogLikelihood(LogLikelihood):
def __init__(self, x, y, model):
"""
A Gaussian likelihood.
Parameters
----------
x : iterable
x-coordinate of the data
y : iterable
y-coordinte of the data
model: an Astropy Model instance
The model to use in the likelihood.
"""
self.x = x
self.y = y
self.model = model
self.npar = 0
for pname in self.model.param_names:
if not self.model.fixed[pname]:
self.npar += 1
def evaluate(self, pars, neg=False):
if np.size(pars) != self.npar:
raise IncorrectParameterError("Input parameters must" +
" match model parameters!")
_fitter_to_model_params(self.model, pars)
mean_model = self.model(self.x)
loglike = np.sum(-mean_model + self.y*np.log(mean_model) \
- scipy_gammaln(self.y + 1.))
if not np.isfinite(loglike):
loglike = logmin
if neg:
return -loglike
else:
return loglike
class PSDLogLikelihood(LogLikelihood):
def __init__(self, freq, power, model, m=1):
"""
A Gaussian likelihood.
Parameters
----------
freq: iterable
Array with frequencies
power: iterable
Array with (averaged/singular) powers corresponding to the
frequencies in `freq`
model: an Astropy Model instance
The model to use in the likelihood.
m : int
1/2 of the degrees of freedom
"""
LogLikelihood.__init__(self, freq, power, model)
self.m = m
self.npar = 0
for pname in self.model.param_names:
if not self.model.fixed[pname] and not self.model.tied[pname]:
self.npar += 1
def evaluate(self, pars, neg=False):
if np.size(pars) != self.npar:
raise IncorrectParameterError("Input parameters must" +
" match model parameters!")
_fitter_to_model_params(self.model, pars)
mean_model = self.model(self.x)
with warnings.catch_warnings(record=True) as out:
if self.m == 1:
loglike = -np.sum(np.log(mean_model)) - \
np.sum(self.y/mean_model)
else:
loglike = -2.0*self.m*(np.sum(np.log(mean_model)) +
np.sum(self.y/mean_model) +
np.sum((2.0 / (2. * self.m) - 1.0) *
np.log(self.y)))
if not np.isfinite(loglike):
loglike = logmin
if neg:
return -loglike
else:
return loglike
class LaplaceLogLikelihood(LogLikelihood):
def __init__(self, x, y, yerr, model):
"""
A Gaussian likelihood.
Parameters
----------
x : iterable
Array with independent variable
y : iterable
Array with dependent variable
model : an Astropy Model instance
The model to use in the likelihood.
yerr : iterable
Array with the uncertainties on `y`, in standard deviation
"""
LogLikelihood.__init__(self, x, y, model)
self.yerr = yerr
self.npar = 0
for pname in self.model.param_names:
if not self.model.fixed[pname] and not self.model.tied[pname]:
self.npar += 1
def evaluate(self, pars, neg=False):
if np.size(pars) != self.npar:
raise IncorrectParameterError("Input parameters must" +
" match model parameters!")
_fitter_to_model_params(self.model, pars)
mean_model = self.model(self.x)
with warnings.catch_warnings(record=True) as out:
loglike = np.sum(-np.log(2.*self.yerr) - \
(np.abs(self.y - mean_model)/self.yerr))
if not np.isfinite(loglike):
loglike = logmin
if neg:
return -loglike
else:
return loglike
class Posterior(object):
def __init__(self, x, y, model, **kwargs):
"""
Define a posterior object.
The posterior describes the Bayesian probability distribution of
a set of parameters $\theta$ given some observed data $D$ and
some prior assumptions $I$.
It is defined as
$p(\theta | D, I) = p(D | \theta, I) p(\theta | I)/p(D| I)
where $p(D | \theta, I)$ describes the likelihood, i.e. the
sampling distribution of the data and the (parametric) model, and
$p(\theta | I)$ describes the prior distribution, i.e. our information
about the parameters $\theta$ before we gathered the data.
The marginal likelihood $p(D| I)$ describes the probability of
observing the data given the model assumptions, integrated over the
space of all parameters.
Parameters
----------
x : iterable
The abscissa or independent variable of the data. This could
in principle be a multi-dimensional array.
y : iterable
The ordinate or dependent variable of the data.
model: astropy.modeling.models class instance
The parametric model supposed to represent the data. For details
see the astropy.modeling documentation
kwargs :
keyword arguments related to the subclases of `Posterior`. For
details, see the documentation of the individual subclasses
References
----------
* Sivia, D. S., and J. Skilling. "Data Analysis:
A Bayesian Tutorial. 2006."
* Gelman, Andrew, et al. Bayesian data analysis. Vol. 2. Boca Raton,
FL, USA: Chapman & Hall/CRC, 2014.
* von Toussaint, Udo. "Bayesian inference in physics."
Reviews of Modern Physics 83.3 (2011): 943.
* Hogg, David W. "Probability Calculus for inference".
arxiv: 1205.4446
"""
self.x = x
self.y = y
self.model = model
self.npar = 0
for pname in self.model.param_names:
if not self.model.fixed[pname]:
self.npar += 1
def logposterior(self, t0, neg=False):
if not hasattr(self, "logprior"):
raise PriorUndefinedError("There is no prior implemented. " +
"Cannot calculate posterior!")
if not hasattr(self, "loglikelihood"):
raise LikelihoodUndefinedError("There is no likelihood implemented. " +
"Cannot calculate posterior!")
lpost = self.loglikelihood(t0) + self.logprior(t0)
if neg is True:
return -lpost
else:
return lpost
def __call__(self, t0, neg=False):
return self.logposterior(t0, neg=neg)
class PSDPosterior(Posterior):
def __init__(self, freq, power, model, priors=None, m=1):
"""
Posterior distribution for power spectra.
Uses an exponential distribution for the errors in the likelihood,
or a $\chi^2$ distribution with $2M$ degrees of freedom, where $M$ is
the number of frequency bins or power spectra averaged in each bin.
Parameters
----------
ps: {Powerspectrum | AveragedPowerspectrum} instance
the Powerspectrum object containing the data
model: instance of any subclass of parameterclass.ParametricModel
The model for the power spectrum. Note that in order to define
the posterior properly, the ParametricModel subclass must be
instantiated with the hyperpars parameter set, or there won't
be a prior to be calculated! If all this object is used
for a maximum likelihood-style analysis, no prior is required.
priors : dict of form {"parameter name": function}, optional
A dictionary with the definitions for the prior probabilities.
For each parameter in `model`, there must be a prior defined with
a key of the exact same name as stored in `model.param_names`.
The item for each key is a function definition defining the prior
(e.g. a lambda function or a `scipy.stats.distribution.pdf`.
If `priors = None`, then no prior is set. This means priors need
to be added by hand using the `set_logprior` function defined in
this module. Note that it is impossible to call the posterior object
itself or the `self.logposterior` method without defining a prior.
m: int, default 1
The number of averaged periodograms or frequency bins in ps.
Useful for binned/averaged periodograms, since the value of
m will change the likelihood function!
Attributes
----------
ps: {Powerspectrum | AveragedPowerspectrum} instance
the Powerspectrum object containing the data
x: numpy.ndarray
The independent variable (list of frequencies) stored in ps.freq
y: numpy.ndarray
The dependent variable (list of powers) stored in ps.power
model: instance of any subclass of parameterclass.ParametricModel
The model for the power spectrum. Note that in order to define
the posterior properly, the ParametricModel subclass must be
instantiated with the hyperpars parameter set, or there won't
be a prior to be calculated! If all this object is used
for a maximum likelihood-style analysis, no prior is required.
"""
self.loglikelihood = PSDLogLikelihood(freq, power,
model, m=m)
self.m = m
Posterior.__init__(self, freq, power, model)
if not priors is None:
self.logprior = set_logprior(self, priors)
class PoissonPosterior(Posterior):
def __init__(self, x, y, model, priors=None):
"""
Posterior for Poisson lightcurve data. Primary intended use is for
modelling X-ray light curves, but alternative uses are conceivable.
TODO: Include astropy.modeling models
Parameters
----------
x : numpy.ndarray
The independent variable (e.g. time stamps of a light curve)
y : numpy.ndarray
The dependent variable (e.g. counts per bin of a light curve)
model: instance of any subclass of parameterclass.ParametricModel
The model for the power spectrum. Note that in order to define
the posterior properly, the ParametricModel subclass must be
instantiated with the hyperpars parameter set, or there won't
be a prior to be calculated! If all this object is used
for a maximum likelihood-style analysis, no prior is required.
priors : dict of form {"parameter name": function}, optional
A dictionary with the definitions for the prior probabilities.
For each parameter in `model`, there must be a prior defined with
a key of the exact same name as stored in `model.param_names`.
The item for each key is a function definition defining the prior
(e.g. a lambda function or a `scipy.stats.distribution.pdf`.
If `priors = None`, then no prior is set. This means priors need
to be added by hand using the `set_logprior` function defined in
this module. Note that it is impossible to call the posterior object
itself or the `self.logposterior` method without defining a prior.
Attributes
----------
x: numpy.ndarray
The independent variable (list of frequencies) stored in ps.freq
y: numpy.ndarray
The dependent variable (list of powers) stored in ps.power
model: instance of any subclass of parameterclass.ParametricModel
The model for the power spectrum. Note that in order to define
the posterior properly, the ParametricModel subclass must be
instantiated with the hyperpars parameter set, or there won't
be a prior to be calculated! If all this object is used
for a maximum likelihood-style analysis, no prior is required.
"""
self.x = x
self.y = y
self.loglikelihood = PoissonLogLikelihood(self.x, self.y, model)
Posterior.__init__(self, self.x, self.y, model)
if not priors is None:
self.logprior = set_logprior(self, priors)
class GaussianPosterior(Posterior):
def __init__(self, x, y, yerr, model, priors=None):
"""
A general class for two-dimensional data following a Gaussian
sampling distribution.
Parameters
----------
x: numpy.ndarray
independent variable
y: numpy.ndarray
dependent variable
yerr: numpy.ndarray
measurement uncertainties for y
model: instance of any subclass of parameterclass.ParametricModel
The model for the power spectrum. Note that in order to define
the posterior properly, the ParametricModel subclass must be
instantiated with the hyperpars parameter set, or there won't
be a prior to be calculated! If all this object is used
for a maximum likelihood-style analysis, no prior is required.
"""
self.loglikelihood = GaussianLogLikelihood(x, y, yerr, model)
Posterior.__init__(self, x, y, model)
self.yerr = yerr
if not priors is None:
self.logprior = set_logprior(self, priors)
class LaplacePosterior(Posterior):
def __init__(self, x, y, yerr, model, priors=None):
"""
A general class for two-dimensional data following a Gaussian
sampling distribution.
Parameters
----------
x: numpy.ndarray
independent variable
y: numpy.ndarray
dependent variable
yerr: numpy.ndarray
measurement uncertainties for y, in standard deviation
model: instance of any subclass of parameterclass.ParametricModel
The model for the power spectrum. Note that in order to define
the posterior properly, the ParametricModel subclass must be
instantiated with the hyperpars parameter set, or there won't
be a prior to be calculated! If all this object is used
for a maximum likelihood-style analysis, no prior is required.
"""
self.loglikelihood = LaplaceLogLikelihood(x, y, yerr, model)
Posterior.__init__(self, x, y, model)
self.yerr = yerr
if not priors is None:
self.logprior = set_logprior(self, priors)
|
pabell/stingray
|
stingray/modeling/posterior.py
|
Python
|
mit
| 22,031
|
[
"Gaussian"
] |
6b0da809dfec9a59f414d897a324806eca364fa5869ed98bfb5f50f4076c8c66
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=80 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
hiddenimports = ['openlp.core.ui.media.phononplayer',
'openlp.core.ui.media.vlcplayer',
'openlp.core.ui.media.webkitplayer']
|
marmyshev/transitions
|
resources/pyinstaller/hook-openlp.core.ui.media.py
|
Python
|
gpl-2.0
| 2,265
|
[
"Brian"
] |
0d5249eb9e032a886a49d3ecd574eedc7b6a53e366a601a404bc4c2ba1691d77
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# vgridforum - vgrid forum front end
# Copyright (C) 2003-2014 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import cgi
import cgitb
cgitb.enable()
from shared.functionality.vgridworkflows import main
from shared.cgiscriptstub import run_cgi_script
run_cgi_script(main)
|
heromod/migrid
|
mig/cgi-bin/vgridworkflows.py
|
Python
|
gpl-2.0
| 1,078
|
[
"Brian"
] |
781c0051755adf0c5f3e7afea8b9d95789548e5212fb6044390e03d1ce28917b
|
""" :mod: RegisterReplica
==================
.. module: RegisterReplica
:synopsis: register replica handler
RegisterReplica operation handler
"""
__RCSID__ = "$Id $"
from DIRAC import S_OK, S_ERROR
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.DataManagementSystem.Agent.RequestOperations.DMSRequestOperationsBase import DMSRequestOperationsBase
########################################################################
class RegisterReplica( DMSRequestOperationsBase ):
"""
.. class:: RegisterReplica
RegisterReplica operation handler
"""
def __init__( self, operation = None, csPath = None ):
"""c'tor
:param self: self reference
:param Operation operation: Operation instance
:param str csPath: CS path for this handler
"""
DMSRequestOperationsBase.__init__( self, operation, csPath )
# # RegisterReplica specific monitor info
gMonitor.registerActivity( "RegisterReplicaAtt", "Attempted replicas registrations",
"RequestExecutingAgent", "Replicas/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "RegisterReplicaOK", "Successful replicas registrations",
"RequestExecutingAgent", "Replicas/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "RegisterReplicaFail", "Failed replicas registrations",
"RequestExecutingAgent", "Replicas/min", gMonitor.OP_SUM )
def __call__( self ):
""" call me maybe """
# # counter for failed replicas
failedReplicas = 0
# # catalog to use
catalogs = self.operation.Catalog
if catalogs:
catalogs = [ cat.strip() for cat in catalogs.split( ',' ) ]
# # get waiting files
waitingFiles = self.getWaitingFilesList()
# # loop over files
registerOperations = {}
for opFile in waitingFiles:
gMonitor.addMark( "RegisterReplicaAtt", 1 )
# # get LFN
lfn = opFile.LFN
# # and others
targetSE = self.operation.targetSEList[0]
replicaTuple = ( lfn , opFile.PFN, targetSE )
# # call ReplicaManager
registerReplica = self.dm.registerReplica( replicaTuple, catalogs )
# # check results
if not registerReplica["OK"] or lfn in registerReplica["Value"]["Failed"]:
# There have been some errors
gMonitor.addMark( "RegisterReplicaFail", 1 )
# self.dataLoggingClient().addFileRecord( lfn, "RegisterReplicaFail", ','.join( catalogs ) if catalogs else "all catalogs", "", "RegisterReplica" )
reason = registerReplica.get( "Message", registerReplica.get( "Value", {} ).get( "Failed", {} ).get( lfn, 'Unknown' ) )
errorStr = "failed to register LFN %s: %s" % ( lfn, str( reason ) )
# FIXME: this is incompatible with the change made in the DM that we ignore failures if successful in at least one catalog
if lfn in registerReplica.get( "Value", {} ).get( "Successful", {} ) and isinstance( reason, dict ):
# As we managed, let's create a new operation for just the remaining registration
errorStr += ' - adding registerReplica operations to request'
for failedCatalog in reason:
key = '%s/%s' % ( targetSE, failedCatalog )
newOperation = self.getRegisterOperation( opFile, targetSE, type = 'RegisterReplica', catalog = failedCatalog )
if key not in registerOperations:
registerOperations[key] = newOperation
else:
registerOperations[key].addFile( newOperation[0] )
opFile.Status = 'Done'
else:
opFile.Error = errorStr
catMaster = True
if isinstance( reason, dict ):
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
for failedCatalog in reason:
catMaster = catMaster and FileCatalog()._getCatalogConfigDetails( failedCatalog ).get( 'Value', {} ).get( 'Master', False )
# If one targets explicitly a catalog and it fails or if it fails on the master catalog
if ( catalogs or catMaster ) and ( 'file does not exist' in opFile.Error.lower() or 'no such file' in opFile.Error.lower() ) :
opFile.Status = 'Failed'
failedReplicas += 1
self.log.warn( errorStr )
else:
# All is OK
gMonitor.addMark( "RegisterReplicaOK", 1 )
# self.dataLoggingClient().addFileRecord( lfn, "RegisterReplicaOK", ','.join( catalogs ) if catalogs else "all catalogs", "", "RegisterReplica" )
self.log.info( "Replica %s has been registered at %s" % ( lfn, ','.join( catalogs ) if catalogs else "all catalogs" ) )
opFile.Status = "Done"
# # if we have new replications to take place, put them at the end
if registerOperations:
self.log.info( "adding %d operations to the request" % len( registerOperations ) )
for operation in registerOperations.values():
self.operation._parent.addOperation( operation )
# # final check
if failedReplicas:
self.log.info( "all replicas processed, %s replicas failed to register" % failedReplicas )
self.operation.Error = "some replicas failed to register"
return S_ERROR( self.operation.Error )
return S_OK()
|
vmendez/DIRAC
|
DataManagementSystem/Agent/RequestOperations/RegisterReplica.py
|
Python
|
gpl-3.0
| 5,244
|
[
"DIRAC"
] |
9d33f15dadc664f11f2613ffd1ce59f55d958adc8af7e44c74e6ba85fda9817c
|
import errno
import functools
import fnmatch
import json
import os
import os.path
import re
import shutil
import subprocess
import sys
import tarfile
import requests
import stat
from typing import (
Type, NoReturn, List, Optional, Dict, Any, Tuple, Callable, Union
)
from dbt.events.functions import fire_event
from dbt.events.types import (
SystemErrorRetrievingModTime, SystemCouldNotWrite, SystemExecutingCmd, SystemStdOutMsg,
SystemStdErrMsg, SystemReportReturnCode
)
import dbt.exceptions
from dbt.utils import _connection_exception_retry as connection_exception_retry
if sys.platform == 'win32':
from ctypes import WinDLL, c_bool
else:
WinDLL = None
c_bool = None
def find_matching(
root_path: str,
relative_paths_to_search: List[str],
file_pattern: str,
) -> List[Dict[str, Any]]:
"""
Given an absolute `root_path`, a list of relative paths to that
absolute root path (`relative_paths_to_search`), and a `file_pattern`
like '*.sql', returns information about the files. For example:
> find_matching('/root/path', ['models'], '*.sql')
[ { 'absolute_path': '/root/path/models/model_one.sql',
'relative_path': 'model_one.sql',
'searched_path': 'models' },
{ 'absolute_path': '/root/path/models/subdirectory/model_two.sql',
'relative_path': 'subdirectory/model_two.sql',
'searched_path': 'models' } ]
"""
matching = []
root_path = os.path.normpath(root_path)
regex = fnmatch.translate(file_pattern)
reobj = re.compile(regex, re.IGNORECASE)
for relative_path_to_search in relative_paths_to_search:
absolute_path_to_search = os.path.join(
root_path, relative_path_to_search)
walk_results = os.walk(absolute_path_to_search)
for current_path, subdirectories, local_files in walk_results:
for local_file in local_files:
absolute_path = os.path.join(current_path, local_file)
relative_path = os.path.relpath(
absolute_path, absolute_path_to_search
)
modification_time = 0.0
try:
modification_time = os.path.getmtime(absolute_path)
except OSError:
fire_event(SystemErrorRetrievingModTime(path=absolute_path))
if reobj.match(local_file):
matching.append({
'searched_path': relative_path_to_search,
'absolute_path': absolute_path,
'relative_path': relative_path,
'modification_time': modification_time,
})
return matching
def load_file_contents(path: str, strip: bool = True) -> str:
path = convert_path(path)
with open(path, 'rb') as handle:
to_return = handle.read().decode('utf-8')
if strip:
to_return = to_return.strip()
return to_return
def make_directory(path: str) -> None:
"""
Make a directory and any intermediate directories that don't already
exist. This function handles the case where two threads try to create
a directory at once.
"""
path = convert_path(path)
if not os.path.exists(path):
# concurrent writes that try to create the same dir can fail
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise e
def make_file(path: str, contents: str = '', overwrite: bool = False) -> bool:
"""
Make a file at `path` assuming that the directory it resides in already
exists. The file is saved with contents `contents`
"""
if overwrite or not os.path.exists(path):
path = convert_path(path)
with open(path, 'w') as fh:
fh.write(contents)
return True
return False
def make_symlink(source: str, link_path: str) -> None:
"""
Create a symlink at `link_path` referring to `source`.
"""
if not supports_symlinks():
dbt.exceptions.system_error('create a symbolic link')
os.symlink(source, link_path)
def supports_symlinks() -> bool:
return getattr(os, "symlink", None) is not None
def write_file(path: str, contents: str = '') -> bool:
path = convert_path(path)
try:
make_directory(os.path.dirname(path))
with open(path, 'w', encoding='utf-8') as f:
f.write(str(contents))
except Exception as exc:
# note that you can't just catch FileNotFound, because sometimes
# windows apparently raises something else.
# It's also not sufficient to look at the path length, because
# sometimes windows fails to write paths that are less than the length
# limit. So on windows, suppress all errors that happen from writing
# to disk.
if os.name == 'nt':
# sometimes we get a winerror of 3 which means the path was
# definitely too long, but other times we don't and it means the
# path was just probably too long. This is probably based on the
# windows/python version.
if getattr(exc, 'winerror', 0) == 3:
reason = 'Path was too long'
else:
reason = 'Path was possibly too long'
# all our hard work and the path was still too long. Log and
# continue.
fire_event(SystemCouldNotWrite(path=path, reason=reason, exc=exc))
else:
raise
return True
def read_json(path: str) -> Dict[str, Any]:
return json.loads(load_file_contents(path))
def write_json(path: str, data: Dict[str, Any]) -> bool:
return write_file(path, json.dumps(data, cls=dbt.utils.JSONEncoder))
def _windows_rmdir_readonly(
func: Callable[[str], Any], path: str, exc: Tuple[Any, OSError, Any]
):
exception_val = exc[1]
if exception_val.errno == errno.EACCES:
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
def resolve_path_from_base(path_to_resolve: str, base_path: str) -> str:
"""
If path-to_resolve is a relative path, create an absolute path
with base_path as the base.
If path_to_resolve is an absolute path or a user path (~), just
resolve it to an absolute path and return.
"""
return os.path.abspath(
os.path.join(
base_path,
os.path.expanduser(path_to_resolve)))
def rmdir(path: str) -> None:
"""
Recursively deletes a directory. Includes an error handler to retry with
different permissions on Windows. Otherwise, removing directories (eg.
cloned via git) can cause rmtree to throw a PermissionError exception
"""
path = convert_path(path)
if sys.platform == 'win32':
onerror = _windows_rmdir_readonly
else:
onerror = None
shutil.rmtree(path, onerror=onerror)
def _win_prepare_path(path: str) -> str:
"""Given a windows path, prepare it for use by making sure it is absolute
and normalized.
"""
path = os.path.normpath(path)
# if a path starts with '\', splitdrive() on it will return '' for the
# drive, but the prefix requires a drive letter. So let's add the drive
# letter back in.
# Unless it starts with '\\'. In that case, the path is a UNC mount point
# and splitdrive will be fine.
if not path.startswith('\\\\') and path.startswith('\\'):
curdrive = os.path.splitdrive(os.getcwd())[0]
path = curdrive + path
# now our path is either an absolute UNC path or relative to the current
# directory. If it's relative, we need to make it absolute or the prefix
# won't work. `ntpath.abspath` allegedly doesn't always play nice with long
# paths, so do this instead.
if not os.path.splitdrive(path)[0]:
path = os.path.join(os.getcwd(), path)
return path
def _supports_long_paths() -> bool:
if sys.platform != 'win32':
return True
# Eryk Sun says to use `WinDLL('ntdll')` instead of `windll.ntdll` because
# of pointer caching in a comment here:
# https://stackoverflow.com/a/35097999/11262881
# I don't know exaclty what he means, but I am inclined to believe him as
# he's pretty active on Python windows bugs!
try:
dll = WinDLL('ntdll')
except OSError: # I don't think this happens? you need ntdll to run python
return False
# not all windows versions have it at all
if not hasattr(dll, 'RtlAreLongPathsEnabled'):
return False
# tell windows we want to get back a single unsigned byte (a bool).
dll.RtlAreLongPathsEnabled.restype = c_bool
return dll.RtlAreLongPathsEnabled()
def convert_path(path: str) -> str:
"""Convert a path that dbt has, which might be >260 characters long, to one
that will be writable/readable on Windows.
On other platforms, this is a no-op.
"""
# some parts of python seem to append '\*.*' to strings, better safe than
# sorry.
if len(path) < 250:
return path
if _supports_long_paths():
return path
prefix = '\\\\?\\'
# Nothing to do
if path.startswith(prefix):
return path
path = _win_prepare_path(path)
# add the prefix. The check is just in case os.getcwd() does something
# unexpected - I believe this if-state should always be True though!
if not path.startswith(prefix):
path = prefix + path
return path
def remove_file(path: str) -> None:
path = convert_path(path)
os.remove(path)
def path_exists(path: str) -> bool:
path = convert_path(path)
return os.path.lexists(path)
def path_is_symlink(path: str) -> bool:
path = convert_path(path)
return os.path.islink(path)
def open_dir_cmd() -> str:
# https://docs.python.org/2/library/sys.html#sys.platform
if sys.platform == 'win32':
return 'start'
elif sys.platform == 'darwin':
return 'open'
else:
return 'xdg-open'
def _handle_posix_cwd_error(
exc: OSError, cwd: str, cmd: List[str]
) -> NoReturn:
if exc.errno == errno.ENOENT:
message = 'Directory does not exist'
elif exc.errno == errno.EACCES:
message = 'Current user cannot access directory, check permissions'
elif exc.errno == errno.ENOTDIR:
message = 'Not a directory'
else:
message = 'Unknown OSError: {} - cwd'.format(str(exc))
raise dbt.exceptions.WorkingDirectoryError(cwd, cmd, message)
def _handle_posix_cmd_error(
exc: OSError, cwd: str, cmd: List[str]
) -> NoReturn:
if exc.errno == errno.ENOENT:
message = "Could not find command, ensure it is in the user's PATH"
elif exc.errno == errno.EACCES:
message = 'User does not have permissions for this command'
else:
message = 'Unknown OSError: {} - cmd'.format(str(exc))
raise dbt.exceptions.ExecutableError(cwd, cmd, message)
def _handle_posix_error(exc: OSError, cwd: str, cmd: List[str]) -> NoReturn:
"""OSError handling for posix systems.
Some things that could happen to trigger an OSError:
- cwd could not exist
- exc.errno == ENOENT
- exc.filename == cwd
- cwd could have permissions that prevent the current user moving to it
- exc.errno == EACCES
- exc.filename == cwd
- cwd could exist but not be a directory
- exc.errno == ENOTDIR
- exc.filename == cwd
- cmd[0] could not exist
- exc.errno == ENOENT
- exc.filename == None(?)
- cmd[0] could exist but have permissions that prevents the current
user from executing it (executable bit not set for the user)
- exc.errno == EACCES
- exc.filename == None(?)
"""
if getattr(exc, 'filename', None) == cwd:
_handle_posix_cwd_error(exc, cwd, cmd)
else:
_handle_posix_cmd_error(exc, cwd, cmd)
def _handle_windows_error(exc: OSError, cwd: str, cmd: List[str]) -> NoReturn:
cls: Type[dbt.exceptions.Exception] = dbt.exceptions.CommandError
if exc.errno == errno.ENOENT:
message = ("Could not find command, ensure it is in the user's PATH "
"and that the user has permissions to run it")
cls = dbt.exceptions.ExecutableError
elif exc.errno == errno.ENOEXEC:
message = ('Command was not executable, ensure it is valid')
cls = dbt.exceptions.ExecutableError
elif exc.errno == errno.ENOTDIR:
message = ('Unable to cd: path does not exist, user does not have'
' permissions, or not a directory')
cls = dbt.exceptions.WorkingDirectoryError
else:
message = 'Unknown error: {} (errno={}: "{}")'.format(
str(exc), exc.errno, errno.errorcode.get(exc.errno, '<Unknown!>')
)
raise cls(cwd, cmd, message)
def _interpret_oserror(exc: OSError, cwd: str, cmd: List[str]) -> NoReturn:
"""Interpret an OSError exc and raise the appropriate dbt exception.
"""
if len(cmd) == 0:
raise dbt.exceptions.CommandError(cwd, cmd)
# all of these functions raise unconditionally
if os.name == 'nt':
_handle_windows_error(exc, cwd, cmd)
else:
_handle_posix_error(exc, cwd, cmd)
# this should not be reachable, raise _something_ at least!
raise dbt.exceptions.InternalException(
'Unhandled exception in _interpret_oserror: {}'.format(exc)
)
def run_cmd(
cwd: str, cmd: List[str], env: Optional[Dict[str, Any]] = None
) -> Tuple[bytes, bytes]:
fire_event(SystemExecutingCmd(cmd=cmd))
if len(cmd) == 0:
raise dbt.exceptions.CommandError(cwd, cmd)
# the env argument replaces the environment entirely, which has exciting
# consequences on Windows! Do an update instead.
full_env = env
if env is not None:
full_env = os.environ.copy()
full_env.update(env)
try:
exe_pth = shutil.which(cmd[0])
if exe_pth:
cmd = [os.path.abspath(exe_pth)] + list(cmd[1:])
proc = subprocess.Popen(
cmd,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=full_env)
out, err = proc.communicate()
except OSError as exc:
_interpret_oserror(exc, cwd, cmd)
fire_event(SystemStdOutMsg(bmsg=out))
fire_event(SystemStdErrMsg(bmsg=err))
if proc.returncode != 0:
fire_event(SystemReportReturnCode(returncode=proc.returncode))
raise dbt.exceptions.CommandResultError(cwd, cmd, proc.returncode,
out, err)
return out, err
def download_with_retries(
url: str, path: str, timeout: Optional[Union[float, tuple]] = None
) -> None:
download_fn = functools.partial(download, url, path, timeout)
connection_exception_retry(download_fn, 5)
def download(
url: str, path: str, timeout: Optional[Union[float, tuple]] = None
) -> None:
path = convert_path(path)
connection_timeout = timeout or float(os.getenv('DBT_HTTP_TIMEOUT', 10))
response = requests.get(url, timeout=connection_timeout)
with open(path, 'wb') as handle:
for block in response.iter_content(1024 * 64):
handle.write(block)
def rename(from_path: str, to_path: str, force: bool = False) -> None:
from_path = convert_path(from_path)
to_path = convert_path(to_path)
is_symlink = path_is_symlink(to_path)
if os.path.exists(to_path) and force:
if is_symlink:
remove_file(to_path)
else:
rmdir(to_path)
shutil.move(from_path, to_path)
def untar_package(
tar_path: str, dest_dir: str, rename_to: Optional[str] = None
) -> None:
tar_path = convert_path(tar_path)
tar_dir_name = None
with tarfile.open(tar_path, 'r:gz') as tarball:
tarball.extractall(dest_dir)
tar_dir_name = os.path.commonprefix(tarball.getnames())
if rename_to:
downloaded_path = os.path.join(dest_dir, tar_dir_name)
desired_path = os.path.join(dest_dir, rename_to)
dbt.clients.system.rename(downloaded_path, desired_path, force=True)
def chmod_and_retry(func, path, exc_info):
"""Define an error handler to pass to shutil.rmtree.
On Windows, when a file is marked read-only as git likes to do, rmtree will
fail. To handle that, on errors try to make the file writable.
We want to retry most operations here, but listdir is one that we know will
be useless.
"""
if func is os.listdir or os.name != 'nt':
raise
os.chmod(path, stat.S_IREAD | stat.S_IWRITE)
# on error,this will raise.
func(path)
def _absnorm(path):
return os.path.normcase(os.path.abspath(path))
def move(src, dst):
"""A re-implementation of shutil.move that properly removes the source
directory on windows when it has read-only files in it and the move is
between two drives.
This is almost identical to the real shutil.move, except it uses our rmtree
and skips handling non-windows OSes since the existing one works ok there.
"""
src = convert_path(src)
dst = convert_path(dst)
if os.name != 'nt':
return shutil.move(src, dst)
if os.path.isdir(dst):
if _absnorm(src) == _absnorm(dst):
os.rename(src, dst)
return
dst = os.path.join(dst, os.path.basename(src.rstrip('/\\')))
if os.path.exists(dst):
raise EnvironmentError("Path '{}' already exists".format(dst))
try:
os.rename(src, dst)
except OSError:
# probably different drives
if os.path.isdir(src):
if _absnorm(dst + '\\').startswith(_absnorm(src + '\\')):
# dst is inside src
raise EnvironmentError(
"Cannot move a directory '{}' into itself '{}'"
.format(src, dst)
)
shutil.copytree(src, dst, symlinks=True)
rmtree(src)
else:
shutil.copy2(src, dst)
os.unlink(src)
def rmtree(path):
"""Recursively remove path. On permissions errors on windows, try to remove
the read-only flag and try again.
"""
path = convert_path(path)
return shutil.rmtree(path, onerror=chmod_and_retry)
|
analyst-collective/dbt
|
core/dbt/clients/system.py
|
Python
|
apache-2.0
| 18,396
|
[
"exciting"
] |
8f918b9b7f1d39c0f3e7460c49a599876cd30c30b515365baef6611aa8b7fa5d
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
from PyQt4 import QtGui
from openlp.core.lib import UiStrings, build_icon, translate
from openlp.core.lib.ui import create_button_box, create_button
class Ui_CustomEditDialog(object):
def setupUi(self, customEditDialog):
customEditDialog.setObjectName(u'customEditDialog')
customEditDialog.resize(450, 350)
customEditDialog.setWindowIcon(build_icon(u':/icon/openlp-logo-16x16.png'))
self.dialogLayout = QtGui.QVBoxLayout(customEditDialog)
self.dialogLayout.setObjectName(u'dialog_layout')
self.titleLayout = QtGui.QHBoxLayout()
self.titleLayout.setObjectName(u'titleLayout')
self.titleLabel = QtGui.QLabel(customEditDialog)
self.titleLabel.setObjectName(u'titleLabel')
self.titleLayout.addWidget(self.titleLabel)
self.titleEdit = QtGui.QLineEdit(customEditDialog)
self.titleLabel.setBuddy(self.titleEdit)
self.titleEdit.setObjectName(u'titleEdit')
self.titleLayout.addWidget(self.titleEdit)
self.dialogLayout.addLayout(self.titleLayout)
self.centralLayout = QtGui.QHBoxLayout()
self.centralLayout.setObjectName(u'centralLayout')
self.slideListView = QtGui.QListWidget(customEditDialog)
self.slideListView.setAlternatingRowColors(True)
self.slideListView.setObjectName(u'slideListView')
self.centralLayout.addWidget(self.slideListView)
self.buttonLayout = QtGui.QVBoxLayout()
self.buttonLayout.setObjectName(u'buttonLayout')
self.addButton = QtGui.QPushButton(customEditDialog)
self.addButton.setObjectName(u'addButton')
self.buttonLayout.addWidget(self.addButton)
self.editButton = QtGui.QPushButton(customEditDialog)
self.editButton.setEnabled(False)
self.editButton.setObjectName(u'editButton')
self.buttonLayout.addWidget(self.editButton)
self.editAllButton = QtGui.QPushButton(customEditDialog)
self.editAllButton.setObjectName(u'editAllButton')
self.buttonLayout.addWidget(self.editAllButton)
self.deleteButton = create_button(customEditDialog, u'deleteButton', role=u'delete',
click=customEditDialog.onDeleteButtonClicked)
self.deleteButton.setEnabled(False)
self.buttonLayout.addWidget(self.deleteButton)
self.buttonLayout.addStretch()
self.upButton = create_button(customEditDialog, u'upButton', role=u'up', enabled=False,
click=customEditDialog.onUpButtonClicked)
self.downButton = create_button(customEditDialog, u'downButton', role=u'down', enabled=False,
click=customEditDialog.onDownButtonClicked)
self.buttonLayout.addWidget(self.upButton)
self.buttonLayout.addWidget(self.downButton)
self.centralLayout.addLayout(self.buttonLayout)
self.dialogLayout.addLayout(self.centralLayout)
self.bottomFormLayout = QtGui.QFormLayout()
self.bottomFormLayout.setObjectName(u'bottomFormLayout')
self.themeLabel = QtGui.QLabel(customEditDialog)
self.themeLabel.setObjectName(u'themeLabel')
self.themeComboBox = QtGui.QComboBox(customEditDialog)
self.themeComboBox.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents)
self.themeComboBox.setObjectName(u'themeComboBox')
self.themeLabel.setBuddy(self.themeComboBox)
self.bottomFormLayout.addRow(self.themeLabel, self.themeComboBox)
self.creditLabel = QtGui.QLabel(customEditDialog)
self.creditLabel.setObjectName(u'creditLabel')
self.creditEdit = QtGui.QLineEdit(customEditDialog)
self.creditEdit.setObjectName(u'creditEdit')
self.creditLabel.setBuddy(self.creditEdit)
self.bottomFormLayout.addRow(self.creditLabel, self.creditEdit)
self.dialogLayout.addLayout(self.bottomFormLayout)
self.previewButton = QtGui.QPushButton()
self.button_box = create_button_box(customEditDialog, u'button_box', [u'cancel', u'save'], [self.previewButton])
self.dialogLayout.addWidget(self.button_box)
self.retranslateUi(customEditDialog)
def retranslateUi(self, customEditDialog):
customEditDialog.setWindowTitle(translate('CustomPlugin.EditCustomForm', 'Edit Custom Slides'))
self.titleLabel.setText(translate('CustomPlugin.EditCustomForm', '&Title:'))
self.addButton.setText(UiStrings().Add)
self.addButton.setToolTip(translate('CustomPlugin.EditCustomForm', 'Add a new slide at bottom.'))
self.editButton.setText(UiStrings().Edit)
self.editButton.setToolTip(translate('CustomPlugin.EditCustomForm', 'Edit the selected slide.'))
self.editAllButton.setText(translate('CustomPlugin.EditCustomForm', 'Ed&it All'))
self.editAllButton.setToolTip(translate('CustomPlugin.EditCustomForm', 'Edit all the slides at once.'))
self.themeLabel.setText(translate('CustomPlugin.EditCustomForm', 'The&me:'))
self.creditLabel.setText(translate('CustomPlugin.EditCustomForm', '&Credits:'))
self.previewButton.setText(UiStrings().SaveAndPreview)
|
marmyshev/transitions
|
openlp/plugins/custom/forms/editcustomdialog.py
|
Python
|
gpl-2.0
| 7,205
|
[
"Brian"
] |
03453ffca5320f7f044fae77211b452034aa8024f25c84228091b6f845b4db18
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# [START kms_encrypt_symmetric]
def encrypt_symmetric(project_id, location_id, key_ring_id, key_id, plaintext):
"""
Encrypt plaintext using a symmetric key.
Args:
project_id (string): Google Cloud project ID (e.g. 'my-project').
location_id (string): Cloud KMS location (e.g. 'us-east1').
key_ring_id (string): ID of the Cloud KMS key ring (e.g. 'my-key-ring').
key_id (string): ID of the key to use (e.g. 'my-key').
plaintext (string): message to encrypt
Returns:
bytes: Encrypted ciphertext.
"""
# Import the client library.
from google.cloud import kms
# Import base64 for printing the ciphertext.
import base64
# Convert the plaintext to bytes.
plaintext_bytes = plaintext.encode('utf-8')
# Optional, but recommended: compute plaintext's CRC32C.
# See crc32c() function defined below.
plaintext_crc32c = crc32c(plaintext_bytes)
# Create the client.
client = kms.KeyManagementServiceClient()
# Build the key name.
key_name = client.crypto_key_path(project_id, location_id, key_ring_id, key_id)
# Call the API.
encrypt_response = client.encrypt(
request={'name': key_name, 'plaintext': plaintext_bytes, 'plaintext_crc32c': plaintext_crc32c})
# Optional, but recommended: perform integrity verification on encrypt_response.
# For more details on ensuring E2E in-transit integrity to and from Cloud KMS visit:
# https://cloud.google.com/kms/docs/data-integrity-guidelines
if not encrypt_response.verified_plaintext_crc32c:
raise Exception('The request sent to the server was corrupted in-transit.')
if not encrypt_response.ciphertext_crc32c == crc32c(encrypt_response.ciphertext):
raise Exception('The response received from the server was corrupted in-transit.')
# End integrity verification
print('Ciphertext: {}'.format(base64.b64encode(encrypt_response.ciphertext)))
return encrypt_response
def crc32c(data):
"""
Calculates the CRC32C checksum of the provided data.
Args:
data: the bytes over which the checksum should be calculated.
Returns:
An int representing the CRC32C checksum of the provided bytes.
"""
import crcmod
import six
crc32c_fun = crcmod.predefined.mkPredefinedCrcFun('crc-32c')
return crc32c_fun(six.ensure_binary(data))
# [END kms_encrypt_symmetric]
|
googleapis/python-kms
|
samples/snippets/encrypt_symmetric.py
|
Python
|
apache-2.0
| 2,966
|
[
"VisIt"
] |
997311c21e98ef417530f6c654a84a6f5d662cc8e7e58ee9bc09efa132c66035
|
import os
import sys
import tempfile
import yaml
import zlib
import numpy as np
import simplejson as js
import subprocess as sb
from time import time,sleep
from os import path
from scipy.stats.mstats import mquantiles
try:
from sklearn.lda import LDA
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.feature_selection import SelectKBest, f_classif
import samcnet.mh as mh
from samcnet.mixturepoisson import *
from samcnet.lori import *
from samcnet.data import *
except ImportError as e:
sys.exit("Make sure LD_LIBRARY_PATH is set correctly and that the build"+\
" directory is populated by waf.\n\n %s" % str(e))
if 'WORKHASH' in os.environ:
try:
server = os.environ['SERVER']
except:
sys.exit("ERROR in worker: Need SERVER environment variable defined.")
if 'PARAM' in os.environ:
params = yaml.load(os.environ['PARAM'])
else:
params = {}
iters = setv(params, 'iters', int(1e4), int)
num_feat = setv(params, 'num_feat', 4, int)
seed = setv(params, 'seed', np.random.randint(10**8), int)
rseed = setv(params, 'rseed', np.random.randint(10**8), int)
Ntrn = setv(params, 'Ntrn', 10, int)
Ntst = setv(params, 'Ntst', 3000, int)
f_glob = setv(params, 'f_glob', 5, int)
subclasses = setv(params, 'subclasses', 0, int)
f_het = setv(params, 'f_het', 0, int)
f_rand = setv(params, 'f_rand', 10, int)
rho = setv(params, 'rho', 0.6, float)
f_tot = setv(params, 'f_tot', f_glob+f_het*subclasses+f_rand, float)
blocksize = setv(params, 'blocksize', 5, int)
mu0 = setv(params, 'mu0', -2.0, float)
mu1 = setv(params, 'mu1', -1.0, float)
sigma0 = setv(params, 'sigma0', 0.2, float)
sigma1 = setv(params, 'sigma1', 0.6, float)
lowd = setv(params, 'lowd', 9.0, float)
highd = setv(params, 'highd', 11.0, float)
output = {}
output['errors'] = {}
errors = output['errors']
np.seterr(all='ignore') # Careful with this
rseed = np.random.randint(10**8)
t1 = time()
trn_data, trn_labels, tst_data, tst_labels = data_yj(params)
norm_trn_data, norm_tst_data = norm(trn_data, tst_data)
norm_trn_data0, norm_trn_data1 = split(norm_trn_data, trn_labels)
norm_tst_data0, norm_tst_data1 = split(norm_tst_data, tst_labels)
trn_data0, trn_data1 = split(trn_data, trn_labels)
tst_data0, tst_data1 = split(tst_data, tst_labels)
#################### CLASSIFICATION ################
sklda = LDA()
skknn = KNN(3, warn_on_equidistant=False)
sksvm = SVC()
sklda.fit(norm_trn_data, trn_labels)
skknn.fit(norm_trn_data, trn_labels)
sksvm.fit(norm_trn_data, trn_labels)
errors['lda'] = (1-sklda.score(norm_tst_data, tst_labels))
errors['knn'] = (1-skknn.score(norm_tst_data, tst_labels))
errors['svm'] = (1-sksvm.score(norm_tst_data, tst_labels))
print("skLDA error: %f" % errors['lda'])
print("skKNN error: %f" % errors['knn'])
print("skSVM error: %f" % errors['svm'])
kappa = 10
bayes0 = GaussianBayes(np.zeros(num_feat), 1, kappa, np.eye(num_feat)*(kappa-1-num_feat), norm_trn_data0)
bayes1 = GaussianBayes(np.zeros(num_feat), 1, kappa, np.eye(num_feat)*(kappa-1-num_feat), norm_trn_data1)
# Gaussian Analytic
gc = GaussianCls(bayes0, bayes1)
errors['gauss'] = gc.approx_error_data(norm_tst_data, tst_labels)
print("Gaussian Analytic error: %f" % errors['gauss'])
# MPM Model
dist0 = MPMDist(trn_data0,kmax=1,priorkappa=180,lammove=0.002,mumove=0.08)
dist1 = MPMDist(trn_data1,kmax=1,priorkappa=180,lammove=0.002,mumove=0.08)
mpm = MPMCls(dist0, dist1)
mhmc = mh.MHRun(mpm, burn=1000, thin=50)
mhmc.sample(iters,verbose=False)
errors['mpm'] = mpm.approx_error_data(mhmc.db, tst_data, tst_labels,numlam=50)
print("MPM Sampler error: %f" % errors['mpm'])
output['acceptance'] = float(mhmc.accept_loc)/mhmc.total_loc
mhmc.clean_db()
kappa = 200
S0 = (np.ones(4) + (np.eye(4)-1)*0.4) * (kappa - 4 - 1) *0.2
S1 = (np.ones(4) + (np.eye(4)-1)*0.4) * (kappa - 4 - 1) *0.6
priormu1 = np.ones(4)*0.6
priorsigma = np.ones(4) * 0.1
dist0 = MPMDist(trn_data0,kmax=1,priorkappa=280,lammove=0.002,mumove=0.08,S=S0,kappa=kappa,
priorsigma=priorsigma)
dist1 = MPMDist(trn_data1,kmax=1,priorkappa=280,lammove=0.002,mumove=0.08,S=S1,kappa=kappa,
priormu=priormu1, priorsigma=priorsigma)
mpm = MPMCls(dist0, dist1)
mhmc = mh.MHRun(mpm, burn=1000, thin=50)
mhmc.sample(iters,verbose=False)
errors['mpm_prior'] = mpm.approx_error_data(mhmc.db, tst_data, tst_labels,numlam=50)
print("MPM prior Sampler error: %f" % errors['mpm_prior'])
output['acceptance_prior'] = float(mhmc.accept_loc)/mhmc.total_loc
mhmc.clean_db()
output['seed'] = seed
output['time'] = time()-t1
if 'WORKHASH' in os.environ:
import zmq
ctx = zmq.Context()
socket = ctx.socket(zmq.REQ)
socket.connect('tcp://'+server+':7000')
wiredata = zlib.compress(js.dumps(output))
#wiredata = s.read_db()
socket.send(os.environ['WORKHASH'], zmq.SNDMORE)
socket.send(wiredata)
socket.recv()
socket.close()
ctx.term()
|
binarybana/samcnet
|
exps/fig_yj.py
|
Python
|
mit
| 4,928
|
[
"Gaussian"
] |
3236aed8022fa0bf404fb44eb44120444e1ef7e174ee2998512bb4ee7218b079
|
import numpy as np
from numpy.fft import fft2, ifft2, ifftshift
from templatetracker.correlationfilter.utils import pad, crop, fast2dconv
def gaussian_correlation(x, z, sigma=0.2, boundary='constant'):
r"""
Gaussian kernel correlation.
Parameters
----------
x : ``(channels, height, width)`` `ndarray`
Template image.
z : ``(channels, height, width)`` `ndarray`
Input image.
sigma: `float`, optional
Kernel std.
Returns
-------
xz: ``(1, height, width)`` `ndarray`
Gaussian kernel correlation between the image and the template.
"""
# norms
x_norm = x.ravel().T.dot(x.ravel())
z_norm = z.ravel().T.dot(z.ravel())
# cross correlation
xz = np.sum(fast2dconv(z, x[:, ::-1, ::-1], boundary=boundary), axis=0)
# gaussian kernel
kxz = np.exp(-(1/sigma**2) * np.maximum(0, x_norm + z_norm - 2 * xz))
return kxz[None]
def polynomial_correlation(x, z, a=5, b=1, boundary='constant'):
r"""
Polynomial kernel correlation.
Parameters
----------
x : ``(channels, height, width)`` `ndarray`
Template image.
z : ``(channels, height, width)`` `ndarray`
Input image.
a: `float`, optional
Kernel exponent.
b: `float`, optional
Kernel constant.
Returns
-------
kxz: ``(1, height, width)`` `ndarray`
Polynomial kernel correlation between the image and the template.
"""
# cross correlation
xz = np.sum(fast2dconv(z, x[:, ::-1, ::-1], boundary=boundary), axis=0)
# polynomial kernel
kxz = (xz + b) ** a
return kxz[None]
def linear_correlation(x, z, boundary='constant'):
r"""
Linear kernel correlation (dot product).
Parameters
----------
x : ``(channels, height, width)`` `ndarray`
Template image.
z : ``(channels, height, width)`` `ndarray`
Input image.
Returns
-------
xz: ``(1, height, width)`` `ndarray`
Linear kernel correlation between the image and the template.
"""
# cross correlation
xz = np.sum(fast2dconv(z, x[:, ::-1, ::-1], boundary=boundary), axis=0)
return xz[None]
def learn_kcf(x, y, kernel_correlation=gaussian_correlation, l=0.01,
boundary='constant', **kwargs):
r"""
Kernelized Correlation Filter (KCF).
Parameters
----------
x : ``(channels, height, width)`` `ndarray`
Template image.
y : ``(1, height, width)`` `ndarray`
Desired response.
kernel_correlation: `callable`, optional
Callable implementing a particular type of kernel correlation i.e.
gaussian, polynomial or linear.
l: `float`, optional
Regularization parameter.
boundary: str {`constant`, `symmetric`}, optional
Determines how the image is padded.
Returns
-------
alpha: ``(channels, height, width)`` `ndarray`
Kernelized Correlation Filter (KFC) associated to the template image.
References
----------
.. [1] J. F. Henriques, R. Caseiro, P. Martins, J. Batista. "High-Speed
Tracking with Kernelized Correlation Filters". TPAMI, 2015.
"""
# extended shape
x_shape = np.asarray(x.shape[-2:])
y_shape = np.asarray(y.shape[-2:])
ext_shape = x_shape + y_shape - 1
# extend desired response
ext_x = pad(x, ext_shape, boundary=boundary)
ext_y = pad(y, ext_shape)
# ffts of extended auto kernel correlation and extended desired response
fft_ext_kxx = fft2(kernel_correlation(ext_x, ext_x, **kwargs))
fft_ext_y = fft2(ext_y)
# extended kernelized correlation filter
ext_alpha = np.real(ifftshift(ifft2(fft_ext_y / (fft_ext_kxx + l)),
axes=(-2, -1)))
return crop(ext_alpha, y_shape), crop(x, y_shape)
def learn_deep_kcf(x, y, n_levels=3, kernel_correlation=gaussian_correlation,
l=0.01, boundary='constant', **kwargs):
r"""
Deep Kernelized Correlation Filter (DKCF).
Parameters
----------
x : ``(channels, height, width)`` `ndarray`
Template image.
y : ``(1, height, width)`` `ndarray`
Desired response.
n_levels: `int`, optional
Number of levels.
kernel_correlation: `callable`, optional
Callable implementing a particular type of kernel correlation i.e.
gaussian, polynomial or linear.
l: `float`, optional
Regularization parameter.
boundary: str {`constant`, `symmetric`}, optional
Determines how the image is padded.
Returns
-------
deep_alpha: ``(channels, height, width)`` `ndarray`
Deep Kernelized Correlation Filter (DKFC), in the frequency domain,
associated to the template image.
"""
# learn alpha
alpha = learn_kcf(x, y, kernel_correlation=kernel_correlation, l=l,
boundary=boundary, **kwargs)
# initialize alphas
alphas = np.empty((n_levels,) + alpha.shape)
# for each level
for l in range(n_levels):
# store filter
alphas[l] = alpha
# compute kernel correlation between template and image
kxz = kernel_correlation(x, x, **kwargs)
# compute kernel correlation response
x = fast2dconv(kxz, alpha)
# learn mosse filter from responses
alpha = learn_kcf(x, y, kernel_correlation=kernel_correlation, l=l,
boundary=boundary, **kwargs)
# compute equivalent deep mosse filter
deep_alpha = alphas[0]
for a in alphas[1:]:
deep_alpha = fast2dconv(a, a, boundary=boundary)
return deep_alpha, alphas
|
jalabort/templatetracker
|
templatetracker/correlationfilter/kernelizedfilter.py
|
Python
|
bsd-3-clause
| 5,605
|
[
"Gaussian"
] |
80151dcc5eb6870987a77535382d710caa1a3a1474c102dca78e5742f7a0bea2
|
from ase import Atoms
from ase.calculators.emt import EMT
from ase.constraints import FixBondLength
from ase.io import PickleTrajectory
from ase.optimize import BFGS
a = 3.6
b = a / 2
cu = Atoms('Cu2Ag',
positions=[(0, 0, 0),
(b, b, 0),
(a, a, b)],
calculator=EMT())
e0 = cu.get_potential_energy()
print e0
d0 = cu.get_distance(0, 1)
cu.set_constraint(FixBondLength(0, 1))
t = PickleTrajectory('cu2ag.traj', 'w', cu)
qn = BFGS(cu)
qn.attach(t.write)
def f(): print cu.get_distance(0,1)
qn.attach(f)
qn.run(fmax=0.01)
assert abs(cu.get_distance(0, 1) - d0) < 1e-14
|
grhawk/ASE
|
tools/ase/test/emt1.py
|
Python
|
gpl-2.0
| 632
|
[
"ASE"
] |
70e4d42ec86764b496acfc8cbbfbb12045ef300d9872c2f909413c1fc808f04b
|
"""
TornadoREST is the base class for your RESTful API handlers.
It directly inherits from :py:class:`tornado.web.RequestHandler`
"""
import os
import inspect
from tornado.escape import json_decode
from tornado.web import url as TornadoURL
from urllib.parse import unquote
from functools import partial
from DIRAC import gLogger
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.Core.Tornado.Server.private.BaseRequestHandler import *
sLog = gLogger.getSubLogger(__name__)
# decorator to determine the path to access the target method
location = partial(set_attribute, "location")
location.__doc__ = """
Use this decorator to determine the request path to the target method
Example:
@location('/test/myAPI')
def post_my_method(self, a, b):
''' Usage:
requests.post(url + '/test/myAPI?a=value1?b=value2', cert=cert).context
'["value1", "value2"]'
'''
return [a, b]
"""
class TornadoREST(BaseRequestHandler): # pylint: disable=abstract-method
"""Base class for all the endpoints handlers.
### Example
In order to create a handler for your service, it has to follow a certain skeleton.
Simple example:
.. code-block:: python
from DIRAC.Core.Tornado.Server.TornadoREST import *
class yourEndpointHandler(TornadoREST):
def get_hello(self, *args, **kwargs):
''' Usage:
requests.get(url + '/hello/pos_arg1', params=params).json()['args]
['pos_arg1']
'''
return {'args': args, 'kwargs': kwargs}
.. code-block:: python
from diraccfg import CFG
from DIRAC.Core.Utilities.JDL import loadJDLAsCFG, dumpCFGAsJDL
from DIRAC.Core.Tornado.Server.TornadoREST import *
from DIRAC.WorkloadManagementSystem.Client.JobManagerClient import JobManagerClient
from DIRAC.WorkloadManagementSystem.Client.JobMonitoringClient import JobMonitoringClient
class yourEndpointHandler(TornadoREST):
# Specify the default permission for the handler
DEFAULT_AUTHORIZATION = ['authenticated']
# Base URL
DEFAULT_LOCATION = "/"
@classmethod
def initializeHandler(cls, infosDict):
''' Initialization '''
cls.my_requests = 0
cls.j_manager = JobManagerClient()
cls.j_monitor = JobMonitoringClient()
def initializeRequest(self):
''' Called at the beginning of each request '''
self.my_requests += 1
# In the annotation, you can specify the expected value type of the argument
def get_job(self, jobID:int, category=None):
'''Usage:
requests.get(f'https://myserver/job/{jobID}', cert=cert)
requests.get(f'https://myserver/job/{jobID}/owner', cert=cert)
requests.get(f'https://myserver/job/{jobID}/site', cert=cert)
'''
if not category:
return self.j_monitor.getJobStatus(jobID)
if category == 'owner':
return self.j_monitor.getJobOwner(jobID)
if category == 'owner':
return self.j_monitor.getJobSite(jobID)
else:
# TornadoResponse allows you to call tornadoes methods, thread-safe
return TornadoResponse().redirect(f'/job/{jobID}')
def get_jobs(self, owner=None, *, jobGroup=None, jobName=None):
'''Usage:
requests.get(f'https://myserver/jobs', cert=cert)
requests.get(f'https://myserver/jobs/{owner}?jobGroup=job_group?jobName=job_name', cert=cert)
'''
conditions = {"Owner": owner or self.getRemoteCredentials}
if jobGroup:
conditions["JobGroup"] = jobGroup
if jobName:
conditions["JobName"] = jobName
return self.j_monitor.getJobs(conditions, date)
def post_job(self, manifest):
'''Usage:
requests.post(f'https://myserver/job', cert=cert, json=[{Executable: "/bin/ls"}])
'''
jdl = dumpCFGAsJDL(CFG.CFG().loadFromDict(manifest))
return self.j_manager.submitJob(str(jdl))
def delete_job(self, jobIDs):
'''Usage:
requests.delete(f'https://myserver/job', cert=cert, json=[123, 124])
'''
return self.j_manager.deleteJob(jobIDs)
@authentication(["VISITOR"])
@authorization(["all"])
def options_job(self):
'''Usage:
requests.options(f'https://myserver/job')
'''
return "You use OPTIONS method to access job manager API."
.. note:: This example aims to show how access interfaces can be implemented and no more
This class can read the method annotation to understand what type of argument expects to get the method,
see :py:meth:`_getMethodArgs`.
Note that because we inherit from :py:class:`tornado.web.RequestHandler`
and we are running using executors, the methods you export cannot write
back directly to the client. Please see inline comments in
:py:class:`BaseRequestHandler <DIRAC.Core.Tornado.Server.private.BaseRequestHandler.BaseRequestHandler>` for more details.
"""
# By default we enable all authorization grants, see DIRAC.Core.Tornado.Server.private.BaseRequestHandler for details
DEFAULT_AUTHENTICATION = ["SSL", "JWT", "VISITOR"]
METHOD_PREFIX = None
DEFAULT_LOCATION = "/"
@classmethod
def _pre_initialize(cls) -> list:
"""This method is run by the Tornado server to prepare the handler for launch
this method is run before the server tornado starts for each handler.
it does the following:
- searches for all possible methods for which you need to create routes
- reads their annotation if present
- adds attributes to each target method that help to significantly speed up
the processing of the values of the target method arguments for each query
- prepares mappings between URLs and handlers/method in a clear tornado format
:returns: a list of URL (not the string with "https://..." but the tornado object)
see http://www.tornadoweb.org/en/stable/web.html#tornado.web.URLSpec
"""
urls = []
# Look for methods that are exported
for mName in cls.__dict__:
mObj = cls.__dict__[mName]
if cls.METHOD_PREFIX and mName.startswith(cls.METHOD_PREFIX):
# Target methods begin with a prefix defined for all supported http methods,
# e.g.: def export_myMethod(self):
prefix = len(cls.METHOD_PREFIX)
elif _prefix := [
p for p in cls.SUPPORTED_METHODS if mName.startswith(f"{p.lower()}_") # pylint: disable=no-member
]:
# Target methods begin with the name of the http method,
# e.g.: def post_myMethod(self):
prefix = len(_prefix[-1]) + 1
else:
# The name of the target method must contain a special prefix
continue
# if the method exists we will continue
if callable(mObj) and (methodName := mName[prefix:]):
sLog.debug(f" Find {mName} method")
# Find target method URL
url = os.path.join(
cls.DEFAULT_LOCATION, getattr(mObj, "location", "" if methodName == "index" else methodName)
)
if cls.BASE_URL and cls.BASE_URL.strip("/"):
url = cls.BASE_URL.strip("/") + (f"/{url}" if (url := url.strip("/")) else "")
url = f"/{url.strip('/')}/?"
sLog.verbose(f" - Route {url} -> {cls.__name__}.{mName}")
# Discover positional arguments
mObj.var_kwargs = False # attribute indicating the presence of `**kwargs``
args = []
kwargs = {}
# Read signature of a target function to explore arguments and their types
# https://docs.python.org/3/library/inspect.html#inspect.Signature
signature = inspect.signature(mObj)
for name in list(signature.parameters)[1:]: # skip `self` argument
# Consider in detail the description of the argument of the objective function
# to correctly form the route and determine the type of argument,
# see https://docs.python.org/3/library/inspect.html#inspect.Parameter
kind = signature.parameters[name].kind # argument type
default = signature.parameters[name].default # argument default value
# Determine what type of the target function argument is expected. By Default it's None.
_type = (
# Select the type specified in the target function, if any.
signature.parameters[name].annotation
if signature.parameters[name].annotation is not inspect.Parameter.empty
# If there is no argument annotation, take the default value type, if any
else type(default)
if default is not inspect.Parameter.empty and default is not None
# If you can not determine the type then leave None
else None
)
# Consider separately the positional arguments
if kind in [inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD]:
# register the positional argument type
args.append(_type)
# is argument optional
is_optional = (
kind is inspect.Parameter.POSITIONAL_OR_KEYWORD or default is inspect.Parameter.empty
)
# add to tornado route url regex describing the argument according to the type (if the type is specified)
# only simple types are considered, which should be more than enough
if _type is int:
url += r"(?:/([+-]?\d+)?)?" if is_optional else r"/([+-]?\d+)"
elif _type is float:
url += r"(?:/([+-]?\d*\.?\d+)?)?" if is_optional else r"/([+-]?\d*\.?\d+)"
elif _type is bool:
url += r"(?:/([01]|[A-z]+)?)?" if is_optional else r"/([01]|[A-z]+)"
else:
url += r"(?:/([\w%]+)?)?" if is_optional else r"/([\w%]+)"
# Consider separately the keyword arguments
if kind in [inspect.Parameter.KEYWORD_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD]:
# register the keyword argument type
kwargs[name] = _type
if kind == inspect.Parameter.VAR_KEYWORD:
# if `**kwargs` is available in the target method,
# all additional query arguments will be passed there
mObj.var_kwargs = True
url += r"(?:[?&].+=.+)*"
# We will leave the results of the study here so as not to waste time on each request
mObj.keyword_kwarg_types = kwargs # an attribute that contains types of keyword arguments
mObj.positional_arg_types = args # an attribute that contains types of positional arguments
# We collect all generated tornado url for target handler methods
if url not in urls:
sLog.debug(f" * {url}")
urls.append(TornadoURL(url, cls, dict(method=methodName)))
return urls
@classmethod
def _getComponentInfoDict(cls, fullComponentName: str, fullURL: str) -> dict:
"""Fills the dictionary with information about the current component,
:param fullComponentName: full component name, see :py:meth:`_getFullComponentName`
:param fullURL: incoming request path
"""
return {}
@classmethod
def _getCSAuthorizarionSection(cls, apiName):
"""Search endpoint auth section.
:param str apiName: API name, see :py:meth:`_getFullComponentName`
:return: str
"""
return "%s/Authorization" % PathFinder.getAPISection(apiName)
def _getMethod(self):
"""Get target method function to call. By default we read the first section in the path
following the coincidence with the value of `DEFAULT_LOCATION`.
If such a method is not defined, then try to use the `index` method.
You can also restrict access to a specific method by adding a http method name as a target method prefix::
# Available from any http method specified in SUPPORTED_METHODS class variable
def export_myMethod(self, data):
if self.request.method == 'POST':
# Do your "post job" here
return data
# Available only for POST http method if it specified in SUPPORTED_METHODS class variable
def post_myMethod(self, data):
# Do your "post job" here
return data
:return: function name
"""
prefix = self.METHOD_PREFIX or f"{self.request.method.lower()}_"
# the method key is appended to the URLSpec object when handling the handler in `_pre_initialize`,
# the tornado server passes this argument to `initialize` method.
# Read more about it https://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.initialize
return getattr(self, f"{prefix}{self._init_kwargs['method']}")
def _getMethodArgs(self, args: tuple, kwargs: dict) -> tuple:
"""Search method arguments.
By default, the arguments are taken from the description of the method itself.
Then the arguments received in the request are assigned by the name of the method arguments.
Usage:
# requests.post(url + "/my_api/pos_only_value", data={'standard': standard_value, 'kwd_only': kwd_only_value}, ..
# requests.post(url + "/my_api", json=[pos_only_value, standard_value, kwd_only_value], ..
@location("/my_api")
def post_note(self, pos_only, /, standard, *, kwd_only):
..
.. warning:: this means that the target methods cannot be wrapped in the decorator,
or if so the decorator must duplicate the arguments and annotation of the target method
:param args: positional arguments that comes from request path
:return: target method args and kwargs
"""
keywordArguments = {}
positionalArguments = []
for i, a in enumerate(args):
if a:
if _type := self.methodObj.positional_arg_types[i]:
positionalArguments.append(_type(unquote(a)))
else:
positionalArguments.append(unquote(a))
if self.request.headers.get("Content-Type") == "application/json":
decoded = json_decode(body) if (body := self.request.body) else []
return (positionalArguments + decoded, {}) if isinstance(decoded, list) else (positionalArguments, decoded)
for name in self.request.arguments:
if name in self.methodObj.keyword_kwarg_types or self.methodObj.var_kwargs:
_type = self.methodObj.keyword_kwarg_types.get(name)
# Get list of the arguments or on argument according to the type
value = self.get_arguments(name) if _type in (tuple, list, set) else self.get_argument(name)
# Wrap argument with annotated type
keywordArguments[name] = _type(value) if _type else value
return (positionalArguments, keywordArguments)
|
ic-hep/DIRAC
|
src/DIRAC/Core/Tornado/Server/TornadoREST.py
|
Python
|
gpl-3.0
| 16,483
|
[
"DIRAC"
] |
fde4591975677bc9ecd6b73479f497c20d02fa39eb940786a2fdc9711d1b8733
|
"""
Load and Plot from a File
~~~~~~~~~~~~~~~~~~~~~~~~~
Read a dataset from a known file type.
"""
###############################################################################
# Loading a mesh is trivial - if your data is in one of the many supported
# file formats, simply use :func:`pyvista.read` to load your spatially
# referenced dataset into a PyVista mesh object.
#
# The following code block uses a built-in example file and displays an
# airplane mesh.
# sphinx_gallery_thumbnail_number = 5
import pyvista as pv
from pyvista import examples
###############################################################################
# The following code block uses a built-in example
# file, displays an airplane mesh and returns the camera's position:
# Get a sample file
filename = examples.planefile
filename
###############################################################################
# Note the above filename, it's a ``.ply`` file - one of the many supported
# formats in PyVista.
mesh = pv.read(filename)
cpos = mesh.plot()
###############################################################################
# You can also take a screenshot without creating an interactive plot window
# using the ``Plotter``:
plotter = pv.Plotter(off_screen=True)
plotter.add_mesh(mesh)
plotter.show(screenshot="myscreenshot.png")
###############################################################################
# The points from the mesh are directly accessible as a NumPy array:
mesh.points
###############################################################################
# The faces from the mesh are also directly accessible as a NumPy array:
mesh.faces.reshape(-1, 4)[:, 1:] # triangular faces
###############################################################################
# Loading other files types is just as easy! Simply pass your file path to the
# :func:`pyvista.read` function and that's it!
#
# Here are a few other examples - simply replace ``examples.download_*`` in the
# examples below with ``pyvista.read('path/to/you/file.ext')``
###############################################################################
# Example STL file:
mesh = examples.download_cad_model()
cpos = [(107.0, 68.5, 204.0), (128.0, 86.5, 223.5), (0.45, 0.36, -0.8)]
mesh.plot(cpos=cpos)
###############################################################################
# Example OBJ file
mesh = examples.download_doorman()
mesh.plot(cpos="xy")
###############################################################################
# Example BYU file
mesh = examples.download_teapot()
mesh.plot(cpos=[-1, 2, -5], show_edges=True)
###############################################################################
# Example VTK file
mesh = examples.download_bunny_coarse()
cpos = [(0.2, 0.3, 0.9), (0, 0, 0), (0, 1, 0)]
mesh.plot(cpos=cpos, show_edges=True, color=True)
|
akaszynski/vtkInterface
|
examples/00-load/read-file.py
|
Python
|
mit
| 2,859
|
[
"VTK"
] |
f43249fa89c8689f48473df21aaf87edaa327fff91b26d270f4c12af5a75ac92
|
#!/usr/bin/env python3
import gensafeprime
import contextlib
import textwrap
import hashlib
import fuckpy3 #pylint:disable=unused-import
import random
import numpy as np
import ast
import os
import re
banner = r"""
___ ___ ___ _____ _
/ _ \ / _ \ / _ \ | ___| | __ _ __ _
| | | | | | | | | | | |_ | |/ _` |/ _` |
| |_| | |_| | |_| | | _| | | (_| | (_| |
\___/ \___/ \___/ |_| |_|\__,_|\__, |
|___/
____ _ _ ____ _
/ ___|| |__ __ _ _ __(_)_ __ __ _ / ___| ___ _ ____ _(_) ___ ___
\___ \| '_ \ / _` | '__| | '_ \ / _` | \___ \ / _ \ '__\ \ / / |/ __/ _ \
___) | | | | (_| | | | | | | | (_| | ___) | __/ | \ V /| | (_| __/
|____/|_| |_|\__,_|_| |_|_| |_|\__, | |____/ \___|_| \_/ |_|\___\___|
|___/
"""
#
# Matrix stuff
#
def pascal_matrix(n, k):
matrix = np.ones((n, k)).astype(int)
for r in range(1, n):
for c in range(1, k):
matrix[r,c] = matrix[r,c-1] + matrix[r-1,c]
assert np.linalg.matrix_rank(matrix) == k
m = [ list(map(int, row)) for row in matrix ]
return m
def random_matrix(n, k):
matrix = [ list(map(int, row)) for row in (np.random.rand(n,k)*1000).astype(int) ]
assert np.linalg.matrix_rank(matrix) == k
return matrix
def calc_det(A):
n,_ = np.shape(A)
if n== 1:
return A[0,0]
else:
S=0
for i in range(n):
L = [x for x in range(n) if x != i]
S += (-1)**i *A[0,i]*calc_det(A[1:,L])
return int(S)
#
# OOO Secret Sharing Scheme
#
def split_secret(key, n, k, matrix):
assert len(matrix) == n, "misshaped matrix"
assert len(matrix[0]) == k, "misshaped matrix"
x = [ int.from_bytes(key, byteorder='little') ]
for _ in range(k-1):
x.append(random.randint(0, P))
x = np.array(x)
shares = [ (n,int(i)) for n,i in enumerate(np.dot(matrix, x)) ]
return shares[1:]
def reconstitute_secret(keys, matrix):
k = len(matrix[0])
assert k <= len(keys), "not enough keys"
assert np.linalg.matrix_rank(matrix) == k, "linearly dependent keys"
subkeys = sorted(keys[:k])
submatrix = [ matrix[e[0]] for e in subkeys ]
subshares = [ e[-1] for e in subkeys ]
det = calc_det(np.array(submatrix))
inv_float = np.linalg.inv(submatrix)
key = (int(sum([ i*j for i,j in zip([ i*pow(det, -1, P) for i in [ int(round(h)) for h in [ det * inv_float[0][i] for i in range(k) ] ] ], subshares) ])) % P).to_bytes(32, byteorder='little')
return key
#
# Menu library
#
def one_menu(items, done_option=True, default=None):
choices = [ None ]
for item in items:
if type(item) in (str, bytes):
print(item)
else:
print("%d <- %s" % (len(choices), item[0]))
choices.append(item[1])
if done_option:
print("0 <- Done.")
cstr = input("Choice: ")
if not cstr:
return default if default is not None else one_menu(items, done_option=done_option)
choice = int(cstr)
assert 0 <= choice < len(choices), "Invalid choice!"
assert choice or done_option, "Invalid choice!"
return choices[choice]
def menu(*items, do_while=False, loop=False, done_option=False, default=None):
if do_while: yield True
c = default
while True:
c = one_menu(items, done_option=done_option, default=c)
if callable(c): yield c()
elif c is None: break
else: yield c
if not loop: break
#
# Menu handlers
#
def share_user_flag():
secret = input("Enter secret to share: ").bytes()
secret_id = hashlib.md5(secret).hexdigest()[:6]
print("Your secret's ID is:", secret_id)
shares = split_secret(secret, N, K, M)
random.shuffle(shares)
total_shares = int(input("Number of shares to make: "))
assert total_shares >= K, "Too few shares; you won't be able to reconstitute the secret!"
with open(os.path.join(SHAREDIR, secret_id+".1"), "w") as f:
f.write(str(shares[0]))
print("Your shares are:", shares[1:total_shares])
print("Your stored share is safe with us!")
def redeem_user_flag():
secret_id = input("Enter the secret's ID: ")
assert re.match(r"^\w\w\w\w\w\w$", secret_id), "Invalid ID format!"
user_shares = ast.literal_eval(input("Enter your shares of the secret: "))
stored_share = ast.literal_eval(open(os.path.join(SHAREDIR, secret_id+".1")).read().strip())
shares = user_shares + [ stored_share ]
secret = reconstitute_secret(shares, M).strip(b"\x00")
print("Your secret is:", secret)
def share_actual_flag():
the_flag = open(os.path.join(os.path.dirname(__file__), "flag"), "rb").read().strip()
shares = split_secret(the_flag, N, K, M)
sanity_flag = reconstitute_secret(shares, M).strip(b"\x00")
assert sanity_flag == the_flag
random.shuffle(shares)
secret_id = os.urandom(3).hex()
with open(os.path.join(SHAREDIR, secret_id+".1"), "w") as f:
f.write(str(shares[0]))
with open(os.path.join(SHAREDIR, secret_id+".2"), "w") as f:
f.write(str(shares[1]))
print("Our secret's ID is:", secret_id)
print("Your shares are:", shares[2:K])
print("Our stored shares are quite safe with us!")
def redeem_actual_flag():
secret_id = input("Enter the secret's ID: ")
assert re.match(r"^\w\w\w\w\w\w$", secret_id), "Invalid ID format!"
user_shares = ast.literal_eval(input("Enter your shares of the secret: "))
stored_share1 = ast.literal_eval(open(os.path.join(SHAREDIR, secret_id+".1")).read().strip())
stored_share2 = ast.literal_eval(open(os.path.join(SHAREDIR, secret_id+".2")).read().strip())
shares = [ stored_share1, stored_share2 ] + user_shares
assert len(set(s[0] for s in shares)) == len(shares), "Duplicate shares."
secret = reconstitute_secret(shares, M).strip(b"\x00")
if secret.startswith(b"OOO{"):
print("Congrats! You have decoded our secret. We must have trusted you!")
def login():
global USER
global SHAREDIR
print("Welcome to the...")
print(banner)
print('\n'.join(textwrap.wrap("OOO has finally solved the flag sharing problem by making it quick and easy for aspiring cheaters to share flags by utilizing a secure and exciting secret sharing scheme! OOO reserves the right to withhold flag shares where deemed appropriate.", width=80)))
print()
USER = input("Username: ")
assert USER.lower() != "ooo", "No way!"
assert USER.lower() != "zardus", "That's me!"
assert USER.lower() != "malina", "Nope!"
assert re.match(r"^\w+$", USER), "Invalid username format!"
SHAREDIR = os.path.join(os.path.dirname(__file__), "shares", USER)
with contextlib.suppress(FileExistsError):
os.makedirs(SHAREDIR)
main_menu()
def main_menu():
for _ in menu(
*[ f"What do, {USER}?" ] +
[
("Share useless flag.", share_user_flag),
("Redeem useless flag.", redeem_user_flag),
("Store scoring flag.", share_actual_flag),
("Retrieve scoring flag.", redeem_actual_flag)
],
loop=True, done_option=True
):
pass
if not os.path.exists(os.path.join(os.path.dirname(__file__), "prime.ooo")):
print("[STARTUP] Generating prime...")
with open("prime.ooo", 'w') as _f:
_f.write(str(gensafeprime.generate(256)))
if not os.path.exists(os.path.join(os.path.dirname(__file__), "matrix.ooo")):
print("[STARTUP] Generating matrix...")
with open("matrix.ooo", 'w') as _f:
_f.write(str(random_matrix(100, 5)))
P = ast.literal_eval(open("prime.ooo").read().strip())
M = ast.literal_eval(open("matrix.ooo").read().strip())
N = len(M)
K = len(M[0])
def sanity_check(n=N, k=K, m=M):
def one_check(secret):
shares = split_secret(secret, n, k, m)
random.shuffle(shares)
new_secret = reconstitute_secret(shares[:k], m)
assert secret.ljust(32, b"\x00") == new_secret
for _ in range(1000):
one_check(os.urandom(random.randint(0, 31)))
one_check(open(os.path.join(os.path.dirname(__file__), "flag"), "rb").read().strip())
if __name__ == '__main__':
USER = None
SHAREDIR = None
try:
login()
except Exception as e:
print("ERROR:", e)
|
Qwaz/solved-hacking-problem
|
DEFCON/2020 Quals/ooo-flag-sharing/chal.original.py
|
Python
|
gpl-2.0
| 8,529
|
[
"exciting"
] |
5f5ea357d2504d94f620b8f16f3cea408521e7b7634372537f897ec6c416f9e9
|
##########################################################################
#
# Copyright 2010 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
"""Generic retracing code generator."""
# Adjust path
import os.path
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import specs.stdapi as stdapi
class UnsupportedType(Exception):
pass
def lookupHandle(handle, value, lval=False):
if handle.key is None:
return "_%s_map[%s]" % (handle.name, value)
else:
key_name, key_type = handle.key
if handle.name == "location" and lval == False:
return "_location_map[%s].lookupUniformLocation(%s)" % (key_name, value)
else:
return "_%s_map[%s][%s]" % (handle.name, key_name, value)
class ValueAllocator(stdapi.Visitor):
def visitLiteral(self, literal, lvalue, rvalue):
pass
def visitConst(self, const, lvalue, rvalue):
self.visit(const.type, lvalue, rvalue)
def visitAlias(self, alias, lvalue, rvalue):
self.visit(alias.type, lvalue, rvalue)
def visitEnum(self, enum, lvalue, rvalue):
pass
def visitBitmask(self, bitmask, lvalue, rvalue):
pass
def visitArray(self, array, lvalue, rvalue):
print(' %s = _allocator.allocArray<%s>(&%s);' % (lvalue, array.type, rvalue))
def visitPointer(self, pointer, lvalue, rvalue):
print(' %s = _allocator.allocArray<%s>(&%s);' % (lvalue, pointer.type, rvalue))
def visitIntPointer(self, pointer, lvalue, rvalue):
pass
def visitObjPointer(self, pointer, lvalue, rvalue):
pass
def visitLinearPointer(self, pointer, lvalue, rvalue):
pass
def visitReference(self, reference, lvalue, rvalue):
self.visit(reference.type, lvalue, rvalue);
def visitHandle(self, handle, lvalue, rvalue):
pass
def visitBlob(self, blob, lvalue, rvalue):
pass
def visitString(self, string, lvalue, rvalue):
pass
def visitStruct(self, struct, lvalue, rvalue):
pass
def visitPolymorphic(self, polymorphic, lvalue, rvalue):
assert polymorphic.defaultType is not None
self.visit(polymorphic.defaultType, lvalue, rvalue)
def visitOpaque(self, opaque, lvalue, rvalue):
pass
class ValueDeserializer(stdapi.Visitor, stdapi.ExpanderMixin):
def visitLiteral(self, literal, lvalue, rvalue):
print(' %s = (%s).to%s();' % (lvalue, rvalue, literal.kind))
def visitConst(self, const, lvalue, rvalue):
self.visit(const.type, lvalue, rvalue)
def visitAlias(self, alias, lvalue, rvalue):
self.visit(alias.type, lvalue, rvalue)
def visitEnum(self, enum, lvalue, rvalue):
print(' %s = static_cast<%s>((%s).toSInt());' % (lvalue, enum, rvalue))
def visitBitmask(self, bitmask, lvalue, rvalue):
print(' %s = static_cast<%s>((%s).toUInt());' % (lvalue, bitmask, rvalue))
def visitArray(self, array, lvalue, rvalue):
tmp = '_a_' + array.tag + '_' + str(self.seq)
self.seq += 1
print(' const trace::Array *%s = (%s).toArray();' % (tmp, rvalue))
print(' if (%s) {' % (tmp,))
length = '%s->values.size()' % (tmp,)
if self.insideStruct:
if isinstance(array.length, int):
# Member is an array
print(r' static_assert( std::is_array< std::remove_reference< decltype( %s ) >::type >::value , "lvalue must be an array" );' % lvalue)
print(r' static_assert( std::extent< std::remove_reference< decltype( %s ) >::type >::value == %s, "array size mismatch" );' % (lvalue, array.length))
print(r' assert( %s );' % (tmp,))
print(r' assert( %s->size() == %s );' % (tmp, array.length))
length = str(array.length)
else:
# Member is a pointer to an array, hence must be allocated
print(r' static_assert( std::is_pointer< std::remove_reference< decltype( %s ) >::type >::value , "lvalue must be a pointer" );' % lvalue)
print(r' %s = _allocator.allocArray<%s>(&%s);' % (lvalue, array.type, rvalue))
index = '_j' + array.tag
print(' for (size_t {i} = 0; {i} < {length}; ++{i}) {{'.format(i = index, length = length))
try:
self.visit(array.type, '%s[%s]' % (lvalue, index), '*%s->values[%s]' % (tmp, index))
finally:
print(' }')
print(' }')
def visitPointer(self, pointer, lvalue, rvalue):
tmp = '_a_' + pointer.tag + '_' + str(self.seq)
self.seq += 1
if self.insideStruct:
# Member is a pointer to an object, hence must be allocated
print(r' static_assert( std::is_pointer< std::remove_reference< decltype( %s ) >::type >::value , "lvalue must be a pointer" );' % lvalue)
print(r' %s = _allocator.allocArray<%s>(&%s);' % (lvalue, pointer.type, rvalue))
print(' if (%s) {' % (lvalue,))
print(' const trace::Array *%s = (%s).toArray();' % (tmp, rvalue))
try:
self.visit(pointer.type, '%s[0]' % (lvalue,), '*%s->values[0]' % (tmp,))
finally:
print(' }')
def visitIntPointer(self, pointer, lvalue, rvalue):
print(' %s = static_cast<%s>((%s).toPointer());' % (lvalue, pointer, rvalue))
def visitObjPointer(self, pointer, lvalue, rvalue):
print(' %s = retrace::asObjPointer<%s>(call, %s);' % (lvalue, pointer.type, rvalue))
def visitLinearPointer(self, pointer, lvalue, rvalue):
print(' %s = static_cast<%s>(retrace::toPointer(%s));' % (lvalue, pointer, rvalue))
def visitReference(self, reference, lvalue, rvalue):
self.visit(reference.type, lvalue, rvalue);
def visitHandle(self, handle, lvalue, rvalue):
#OpaqueValueDeserializer().visit(handle.type, lvalue, rvalue);
self.visit(handle.type, lvalue, rvalue);
new_lvalue = lookupHandle(handle, lvalue)
shaderObject = new_lvalue.startswith('_program_map') or new_lvalue.startswith('_shader_map')
if shaderObject:
print('if (glretrace::supportsARBShaderObjects) {')
print(' if (retrace::verbosity >= 2) {')
print(' std::cout << "%s " << size_t(%s) << " <- " << size_t(_handleARB_map[%s]) << "\\n";' % (handle.name, lvalue, lvalue))
print(' }')
print(' %s = _handleARB_map[%s];' % (lvalue, lvalue))
print('} else {')
print(' if (retrace::verbosity >= 2) {')
print(' std::cout << "%s " << size_t(%s) << " <- " << size_t(%s) << "\\n";' % (handle.name, lvalue, new_lvalue))
print(' }')
print(' %s = %s;' % (lvalue, new_lvalue))
if shaderObject:
print('}')
def visitBlob(self, blob, lvalue, rvalue):
print(' %s = static_cast<%s>((%s).toPointer());' % (lvalue, blob, rvalue))
def visitString(self, string, lvalue, rvalue):
print(' %s = (%s)((%s).toString());' % (lvalue, string.expr, rvalue))
seq = 0
insideStruct = 0
def visitStruct(self, struct, lvalue, rvalue):
tmp = '_s_' + struct.tag + '_' + str(self.seq)
self.seq += 1
self.insideStruct += 1
print(' const trace::Struct *%s = (%s).toStruct();' % (tmp, rvalue))
print(' assert(%s);' % (tmp))
for i in range(len(struct.members)):
member = struct.members[i]
self.visitMember(member, lvalue, '*%s->members[%s]' % (tmp, i))
self.insideStruct -= 1
def visitPolymorphic(self, polymorphic, lvalue, rvalue):
if polymorphic.defaultType is None:
switchExpr = self.expand(polymorphic.switchExpr)
print(r' switch (%s) {' % switchExpr)
for cases, type in polymorphic.iterSwitch():
for case in cases:
print(r' %s:' % case)
caseLvalue = lvalue
if type.expr is not None:
caseLvalue = 'static_cast<%s>(%s)' % (type, caseLvalue)
print(r' {')
try:
self.visit(type, caseLvalue, rvalue)
finally:
print(r' }')
print(r' break;')
if polymorphic.defaultType is None:
print(r' default:')
print(r' retrace::warning(call) << "unexpected polymorphic case" << %s << "\n";' % (switchExpr,))
print(r' break;')
print(r' }')
else:
self.visit(polymorphic.defaultType, lvalue, rvalue)
def visitOpaque(self, opaque, lvalue, rvalue):
raise UnsupportedType
class OpaqueValueDeserializer(ValueDeserializer):
'''Value extractor that also understands opaque values.
Normally opaque values can't be retraced, unless they are being extracted
in the context of handles.'''
def visitOpaque(self, opaque, lvalue, rvalue):
print(' %s = static_cast<%s>(retrace::toPointer(%s));' % (lvalue, opaque, rvalue))
class SwizzledValueRegistrator(stdapi.Visitor, stdapi.ExpanderMixin):
'''Type visitor which will register (un)swizzled value pairs, to later be
swizzled.'''
def visitLiteral(self, literal, lvalue, rvalue):
pass
def visitAlias(self, alias, lvalue, rvalue):
self.visit(alias.type, lvalue, rvalue)
def visitEnum(self, enum, lvalue, rvalue):
pass
def visitBitmask(self, bitmask, lvalue, rvalue):
pass
def visitArray(self, array, lvalue, rvalue):
print(' const trace::Array *_a%s = (%s).toArray();' % (array.tag, rvalue))
print(' if (_a%s) {' % (array.tag))
length = '_a%s->values.size()' % array.tag
index = '_j' + array.tag
print(' for (size_t {i} = 0; {i} < {length}; ++{i}) {{'.format(i = index, length = length))
try:
self.visit(array.type, '%s[%s]' % (lvalue, index), '*_a%s->values[%s]' % (array.tag, index))
finally:
print(' }')
print(' }')
def visitPointer(self, pointer, lvalue, rvalue):
print(' const trace::Array *_a%s = (%s).toArray();' % (pointer.tag, rvalue))
print(' if (_a%s) {' % (pointer.tag))
try:
self.visit(pointer.type, '%s[0]' % (lvalue,), '*_a%s->values[0]' % (pointer.tag,))
finally:
print(' }')
def visitIntPointer(self, pointer, lvalue, rvalue):
pass
def visitObjPointer(self, pointer, lvalue, rvalue):
print(r' retrace::addObj(call, %s, %s);' % (rvalue, lvalue))
def visitLinearPointer(self, pointer, lvalue, rvalue):
assert pointer.size is not None
if pointer.size is not None:
print(r' retrace::addRegion(call, (%s).toUIntPtr(), %s, %s);' % (rvalue, lvalue, pointer.size))
def visitReference(self, reference, lvalue, rvalue):
pass
def visitHandle(self, handle, lvalue, rvalue):
print(' %s _origResult;' % handle.type)
OpaqueValueDeserializer().visit(handle.type, '_origResult', rvalue);
if handle.range is None:
rvalue = "_origResult"
entry = lookupHandle(handle, rvalue, True)
if (entry.startswith('_program_map') or entry.startswith('_shader_map')):
print('if (glretrace::supportsARBShaderObjects) {')
print(' _handleARB_map[%s] = %s;' % (rvalue, lvalue))
print('} else {')
print(' %s = %s;' % (entry, lvalue))
print('}')
else:
print(" %s = %s;" % (entry, lvalue))
if entry.startswith('_textureHandle_map') or entry.startswith('_imageHandle_map'):
print(' if (%s != %s) {' % (rvalue, lvalue))
print(' std::cout << "Bindless handle doesn\'t match, GPU failures ahead.\\n";')
print(' }')
print(' if (retrace::verbosity >= 2) {')
print(' std::cout << "{handle.name} " << {rvalue} << " -> " << {lvalue} << "\\n";'.format(**locals()))
print(' }')
else:
i = '_h' + handle.tag
lvalue = "%s + %s" % (lvalue, i)
rvalue = "_origResult + %s" % (i,)
entry = lookupHandle(handle, rvalue)
print(' for ({handle.type} {i} = 0; {i} < {handle.range}; ++{i}) {{'.format(**locals()))
print(' {entry} = {lvalue};'.format(**locals()))
print(' if (retrace::verbosity >= 2) {')
print(' std::cout << "{handle.name} " << ({rvalue}) << " -> " << ({lvalue}) << "\\n";'.format(**locals()))
print(' }')
print(' }')
def visitBlob(self, blob, lvalue, rvalue):
pass
def visitString(self, string, lvalue, rvalue):
pass
seq = 0
def visitStruct(self, struct, lvalue, rvalue):
tmp = '_s_' + struct.tag + '_' + str(self.seq)
self.seq += 1
print(' const trace::Struct *%s = (%s).toStruct();' % (tmp, rvalue))
print(' assert(%s);' % (tmp,))
print(' (void)%s;' % (tmp,))
for i in range(len(struct.members)):
member = struct.members[i]
self.visitMember(member, lvalue, '*%s->members[%s]' % (tmp, i))
def visitPolymorphic(self, polymorphic, lvalue, rvalue):
assert polymorphic.defaultType is not None
self.visit(polymorphic.defaultType, lvalue, rvalue)
def visitOpaque(self, opaque, lvalue, rvalue):
pass
class Retracer:
def makeFunctionId(self, function):
name = function.name
if function.overloaded:
# TODO: Use a sequence number
name += '__%08x' % (hash(function) & 0xffffffff)
return name
def retraceFunction(self, function):
print('static void retrace_%s(trace::Call &call) {' % self.makeFunctionId(function))
self.retraceFunctionBody(function)
print('}')
print()
def retraceInterfaceMethod(self, interface, method):
print('static void retrace_%s__%s(trace::Call &call) {' % (interface.name, self.makeFunctionId(method)))
self.retraceInterfaceMethodBody(interface, method)
print('}')
print()
def retraceFunctionBody(self, function):
assert function.sideeffects
if function.type is not stdapi.Void:
self.checkOrigResult(function)
self.deserializeArgs(function)
self.overrideArgs(function)
self.declareRet(function)
self.invokeFunction(function)
self.swizzleValues(function)
def overrideArgs(self, function):
pass
def retraceInterfaceMethodBody(self, interface, method):
assert method.sideeffects
if method.type is not stdapi.Void:
self.checkOrigResult(method)
self.deserializeThisPointer(interface)
self.deserializeArgs(method)
self.declareRet(method)
self.invokeInterfaceMethod(interface, method)
self.swizzleValues(method)
def checkOrigResult(self, function):
'''Hook for checking the original result, to prevent succeeding now
where the original did not, which would cause diversion and potentially
unpredictable results.'''
assert function.type is not stdapi.Void
if str(function.type) == 'HRESULT':
print(r' if (call.ret && FAILED(call.ret->toSInt())) {')
print(r' return;')
print(r' }')
def deserializeThisPointer(self, interface):
print(r' %s *_this;' % (interface.name,))
print(r' _this = retrace::asObjPointer<%s>(call, call.arg(0));' % (interface.name,))
print(r' if (!_this) {')
print(r' return;')
print(r' }')
def deserializeArgs(self, function):
print(' retrace::ScopedAllocator _allocator;')
print(' (void)_allocator;')
success = True
for arg in function.args:
arg_type = arg.type.mutable()
print(' %s %s;' % (arg_type, arg.name))
rvalue = 'call.arg(%u)' % (arg.index,)
lvalue = arg.name
try:
self.extractArg(function, arg, arg_type, lvalue, rvalue)
except UnsupportedType:
success = False
print(' memset(&%s, 0, sizeof %s); // FIXME' % (arg.name, arg.name))
print()
if not success:
print(' if (1) {')
self.failFunction(function)
sys.stderr.write('warning: unsupported %s call\n' % function.name)
print(' }')
def swizzleValues(self, function):
for arg in function.args:
if arg.output:
arg_type = arg.type.mutable()
rvalue = 'call.arg(%u)' % (arg.index,)
lvalue = arg.name
try:
self.regiterSwizzledValue(arg_type, lvalue, rvalue)
except UnsupportedType:
print(' // XXX: %s' % arg.name)
if function.type is not stdapi.Void:
rvalue = '*call.ret'
lvalue = '_result'
try:
self.regiterSwizzledValue(function.type, lvalue, rvalue)
except UnsupportedType:
raise
print(' // XXX: result')
def failFunction(self, function):
print(' if (retrace::verbosity >= 0) {')
print(' retrace::unsupported(call);')
print(' }')
print(' return;')
def extractArg(self, function, arg, arg_type, lvalue, rvalue):
ValueAllocator().visit(arg_type, lvalue, rvalue)
if arg.input:
ValueDeserializer().visit(arg_type, lvalue, rvalue)
def extractOpaqueArg(self, function, arg, arg_type, lvalue, rvalue):
try:
ValueAllocator().visit(arg_type, lvalue, rvalue)
except UnsupportedType:
pass
OpaqueValueDeserializer().visit(arg_type, lvalue, rvalue)
def regiterSwizzledValue(self, type, lvalue, rvalue):
visitor = SwizzledValueRegistrator()
visitor.visit(type, lvalue, rvalue)
def declareRet(self, function):
if function.type is not stdapi.Void:
print(' %s _result;' % (function.type))
def invokeFunction(self, function):
# Same as invokeFunction, but without error checking
#
# XXX: Find a better name
self.doInvokeFunction(function)
if function.type is not stdapi.Void:
self.checkResult(None, function)
def doInvokeFunction(self, function):
arg_names = ", ".join(function.argNames())
if function.type is not stdapi.Void:
print(' _result = %s(%s);' % (function.name, arg_names))
else:
print(' %s(%s);' % (function.name, arg_names))
def doInvokeInterfaceMethod(self, interface, method):
# Same as invokeInterfaceMethod, but without error checking
#
# XXX: Find a better name
arg_names = ", ".join(method.argNames())
if method.type is not stdapi.Void:
print(' _result = _this->%s(%s);' % (method.name, arg_names))
else:
print(' _this->%s(%s);' % (method.name, arg_names))
# Adjust reference count when QueryInterface fails. This is
# particularly useful when replaying traces on older Direct3D runtimes
# which might miss newer versions of interfaces, yet none of those
# methods are actually used.
#
# TODO: Generalize to other methods that return interfaces
if method.name == 'QueryInterface':
print(r' if (FAILED(_result)) {')
print(r' IUnknown *pObj = retrace::asObjPointer<IUnknown>(call, *call.arg(2).toArray()->values[0]);')
print(r' if (pObj) {')
print(r' pObj->AddRef();')
print(r' }')
print(r' }')
# Debug COM reference counting. Disabled by default as reported
# reference counts depend on internal implementation details.
if method.name in ('AddRef', 'Release'):
print(r' if (0) retrace::checkMismatch(call, "cRef", call.ret, _result);')
# On release our reference when we reach Release() == 0 call in the
# trace.
if method.name == 'Release':
print(r' ULONG _orig_result = call.ret->toUInt();')
print(r' if (_orig_result == 0 || _result == 0) {')
print(r' if (_orig_result != 0) {')
print(r' retrace::warning(call) << "unexpected object destruction\n";')
print(r' }')
print(r' // NOTE: Must not delete the object mapping here. See')
print(r' // https://github.com/apitrace/apitrace/issues/462')
print(r' }')
def invokeInterfaceMethod(self, interface, method):
self.doInvokeInterfaceMethod(interface, method)
if method.type is not stdapi.Void:
self.checkResult(interface, method)
def checkResult(self, interface, methodOrFunction):
assert methodOrFunction.type is not stdapi.Void
if str(methodOrFunction.type) == 'HRESULT':
print(r' if (FAILED(_result)) {')
print(r' retrace::failed(call, _result);')
self.handleFailure(interface, methodOrFunction)
print(r' }')
else:
print(r' (void)_result;')
def handleFailure(self, interface, methodOrFunction):
print(r' return;')
def checkPitchMismatch(self, method):
# Warn for mismatches in 2D/3D mappings.
# FIXME: We should try to swizzle them. It's a bit of work, but possible.
for outArg in method.args:
if outArg.output \
and isinstance(outArg.type, stdapi.Pointer) \
and isinstance(outArg.type.type, stdapi.Struct):
print(r' const trace::Array *_%s = call.arg(%u).toArray();' % (outArg.name, outArg.index))
print(r' if (%s) {' % outArg.name)
print(r' const trace::Struct *_struct = _%s->values[0]->toStruct();' % (outArg.name))
print(r' if (_struct) {')
struct = outArg.type.type
for memberIndex in range(len(struct.members)):
memberType, memberName = struct.members[memberIndex]
if memberName.endswith('Pitch'):
print(r' if (%s->%s) {' % (outArg.name, memberName))
print(r' retrace::checkMismatch(call, "%s", _struct->members[%u], %s->%s);' % (memberName, memberIndex, outArg.name, memberName))
print(r' }')
print(r' }')
print(r' }')
def filterFunction(self, function):
return True
table_name = 'retrace::callbacks'
def retraceApi(self, api):
print('#include "os_time.hpp"')
print('#include "trace_parser.hpp"')
print('#include "retrace.hpp"')
print('#include "retrace_swizzle.hpp"')
print()
types = api.getAllTypes()
handles = [type for type in types if isinstance(type, stdapi.Handle)]
handle_names = set()
for handle in handles:
if handle.name not in handle_names:
if handle.key is None:
print('static retrace::map<%s> _%s_map;' % (handle.type, handle.name))
else:
key_name, key_type = handle.key
print('static std::map<%s, retrace::map<%s> > _%s_map;' % (key_type, handle.type, handle.name))
handle_names.add(handle.name)
print()
functions = list(filter(self.filterFunction, api.getAllFunctions()))
for function in functions:
if function.sideeffects and not function.internal:
self.retraceFunction(function)
interfaces = api.getAllInterfaces()
for interface in interfaces:
for method in interface.methods:
if method.sideeffects and not method.internal:
self.retraceInterfaceMethod(interface, method)
print('const retrace::Entry %s[] = {' % self.table_name)
for function in functions:
if not function.internal:
sigName = function.sigName()
if function.sideeffects:
print(' {"%s", &retrace_%s},' % (sigName, self.makeFunctionId(function)))
else:
print(' {"%s", &retrace::ignore},' % (sigName,))
for interface in interfaces:
for base, method in interface.iterBaseMethods():
sigName = method.sigName()
if method.sideeffects:
print(' {"%s::%s", &retrace_%s__%s},' % (interface.name, sigName, base.name, self.makeFunctionId(method)))
else:
print(' {"%s::%s", &retrace::ignore},' % (interface.name, sigName))
print(' {NULL, NULL}')
print('};')
print()
|
apitrace/apitrace
|
retrace/retrace.py
|
Python
|
mit
| 26,739
|
[
"VisIt"
] |
c7bae1f6ab79297373cbae3eec0d97535f8bef93ee7d4d477663be0e8ca191f0
|
# BEGIN_COPYRIGHT
#
# Copyright (C) 2013-2014 CRS4.
#
# This file is part of vispa.
#
# vispa is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# vispa is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# vispa. If not, see <http://www.gnu.org/licenses/>.
#
# END_COPYRIGHT
def al2seq(result, seq_len):
query_start = int(result[6])
query_end = int(result[7])
return abs(query_start - query_end) / float(seq_len)
score = lambda result: float(result[-1])
def is_repeat(seq_len, blast_results, min_al2seq=.15, min_score_diff=20.):
"""
Mark a repeat according to tiget rules (FIXME: summarize).
NOTE: sorts ``blast_results`` by decreasing score.
:type seq_len: int
:param seq_len: length of the query sequence
:type blast_results: list
:param blast_results: list of tabular blast hits as lists of strings
"""
blast_results.sort(key=score, reverse=True)
try:
r1 = blast_results[0]
except IndexError:
return None # no match
try:
r2 = blast_results[1]
except IndexError:
return False
if abs(al2seq(r1, seq_len) - al2seq(r2, seq_len)) <= min_al2seq:
return True
if score(r1) - score(r2) <= min_score_diff:
return True
return False
|
crs4/vispa
|
bl/tiget/repeats.py
|
Python
|
gpl-3.0
| 1,643
|
[
"BLAST"
] |
efdcbc696cef1f4265d02ce2efcff91accc0b1ca4a31fca80b2bf16375b1c1ca
|
from __future__ import annotations
from scitbx import matrix
from dials.algorithms.refinement.parameterisation.crystal_parameters import (
CrystalOrientationMixin,
CrystalUnitCellMixin,
)
from dials.algorithms.refinement.parameterisation.scan_varying_model_parameters import (
GaussianSmoother,
ScanVaryingModelParameterisation,
ScanVaryingParameterSet,
)
from dials.algorithms.refinement.refinement_helpers import CrystalOrientationCompose
class ScanVaryingCrystalOrientationParameterisation(
ScanVaryingModelParameterisation, CrystalOrientationMixin
):
"""Scan-varying parameterisation for crystal orientation, with angles
expressed in mrad"""
def __init__(self, crystal, t_range, num_intervals, experiment_ids=None):
if experiment_ids is None:
experiment_ids = [0]
# The state of a scan varying crystal orientation parameterisation
# is an orientation
# matrix '[U](t)', expressed as a function of image number 't'
# in a sequential scan.
#
# The initial state is a snapshot of the crystal orientation
# at the point of initialisation '[U0]', which is independent of
# image number.
#
# Future states are composed by
# rotations around axes of the phi-axis frame by Tait-Bryan angles.
#
# [U](t) = [Phi3](t)[Phi2](t)[Phi1](t)[U0]
# Set up the smoother
smoother = GaussianSmoother(t_range, num_intervals)
nv = smoother.num_values()
# Set up the initial state
istate = matrix.sqr(crystal.get_U())
self._U_at_t = istate
# Factory function to provide to _build_p_list
def parameter_type(value, axis, ptype, name):
return ScanVaryingParameterSet(value, nv, axis, ptype, name)
# Build the parameter list
p_list = self._build_p_list(parameter_type)
# Set up the base class
ScanVaryingModelParameterisation.__init__(
self, crystal, istate, p_list, smoother, experiment_ids=experiment_ids
)
return
def compose(self, t):
"""calculate state and derivatives for model at image number t"""
# Extract orientation from the initial state
U0 = self._initial_state
# extract parameter sets from the internal list
phi1_set, phi2_set, phi3_set = self._param
# extract angles and other data at time t using the smoother
phi1, phi1_weights, phi1_sumweights = self._smoother.value_weight(t, phi1_set)
phi2, phi2_weights, phi2_sumweights = self._smoother.value_weight(t, phi2_set)
phi3, phi3_weights, phi3_sumweights = self._smoother.value_weight(t, phi3_set)
# calculate derivatives of angles wrt underlying parameters.
dphi1_dp = phi1_weights * (1.0 / phi1_sumweights)
dphi2_dp = phi2_weights * (1.0 / phi2_sumweights)
dphi3_dp = phi3_weights * (1.0 / phi3_sumweights)
# calculate state and derivatives using the helper class
coc = CrystalOrientationCompose(
U0, phi1, phi1_set.axis, phi2, phi2_set.axis, phi3, phi3_set.axis
)
self._U_at_t = coc.U()
dU_dphi1 = coc.dU_dphi1()
dU_dphi2 = coc.dU_dphi2()
dU_dphi3 = coc.dU_dphi3()
# calculate derivatives of state wrt underlying parameters
dU_dp1 = [None] * dphi1_dp.size
for (i, v) in dphi1_dp:
dU_dp1[i] = dU_dphi1 * v
dU_dp2 = [None] * dphi2_dp.size
for (i, v) in dphi2_dp:
dU_dp2[i] = dU_dphi2 * v
dU_dp3 = [None] * dphi3_dp.size
for (i, v) in dphi3_dp:
dU_dp3[i] = dU_dphi3 * v
# store derivatives as list-of-lists
self._dstate_dp = [dU_dp1, dU_dp2, dU_dp3]
return
def get_state(self):
"""Return crystal orientation matrix [U] at image number t"""
# only a single crystal is parameterised here, so no multi_state_elt
# argument is allowed
return self._U_at_t
class ScanVaryingCrystalUnitCellParameterisation(
ScanVaryingModelParameterisation, CrystalUnitCellMixin
):
"""Scan-varying parameterisation for the crystal unit cell"""
def __init__(
self,
crystal,
t_range,
num_intervals,
experiment_ids=None,
set_state_uncertainties=False,
):
self._set_state_uncertainties = set_state_uncertainties
from scitbx import matrix
if experiment_ids is None:
experiment_ids = [0]
# The state of a scan-varying unit cell parameterisation is the
# reciprocal space orthogonalisation matrix '[B](t)', expressed as a
# function of image number 't' in a sequential scan.
# Other comments from CrystalUnitCellParameterisation are relevant here
# Set up the smoother
smoother = GaussianSmoother(t_range, num_intervals)
nv = smoother.num_values()
# Set up the initial state
istate = None
self._B_at_t = matrix.sqr(crystal.get_B())
# Factory function to provide to _build_p_list
def parameter_type(value, name):
return ScanVaryingParameterSet(value, nv, name=name)
# Build the parameter list
p_list = self._build_p_list(crystal, parameter_type)
# Set up the base class
ScanVaryingModelParameterisation.__init__(
self, crystal, istate, p_list, smoother, experiment_ids=experiment_ids
)
return
def compose(self, t):
"""calculate state and derivatives for model at image number t"""
# extract values and weights at time t using the smoother
vals, weights, sumweights = zip(
*(self._smoother.value_weight(t, pset) for pset in self._param)
)
# calculate derivatives of metrical matrix parameters wrt underlying
# scan-varying parameters
inv_sumw = [1.0 / sw for sw in sumweights]
dvals_dp = [e * isw for e, isw in zip(weights, inv_sumw)]
# calculate new B and derivatives
self._B_at_t, dB_dval = self._compose_core(vals)
# calculate derivatives of state wrt underlying parameters
self._dstate_dp = [
[b * e for e in a.as_dense_vector()] for a, b in zip(dvals_dp, dB_dval)
]
self._dstate_dp = [[None] * e.size for e in dvals_dp]
for i, (dv, dB) in enumerate(zip(dvals_dp, dB_dval)):
for j, e in dv:
self._dstate_dp[i][j] = e * dB
return
def get_state(self):
"""Return crystal orthogonalisation matrix [B] at image number t"""
# only a single crystal is parameterised here, so no multi_state_elt
# argument is allowed
return self._B_at_t
def set_state_uncertainties(self, var_cov_list):
"""Send the calculated variance-covariance of the elements of the B matrix
for all scan points back to the crystal model, if required
"""
if not self._set_state_uncertainties:
return
# Convert list of 9*9 matrices to a 3d array
from scitbx.array_family import flex
B_cov = flex.double(flex.grid(len(var_cov_list), 9, 9))
for i, v in enumerate(var_cov_list):
v = v.as_flex_double_matrix()
v.reshape(flex.grid(1, 9, 9))
B_cov[i : (i + 1), :, :] = v
# Pass it back to the model
self._model.set_B_covariance_at_scan_points(B_cov)
|
dials/dials
|
algorithms/refinement/parameterisation/scan_varying_crystal_parameters.py
|
Python
|
bsd-3-clause
| 7,512
|
[
"CRYSTAL"
] |
3de3d350ae42d1e3cc1eed7e4ad66e2087c825bae90ee4015dfc3ab8c3953779
|
"""
Tests for elliptical Gaussian fitting code in the TKP pipeline.
"""
import unittest
import numpy
from sourcefinder.extract import source_profile_and_errors
from sourcefinder.fitting import moments, fitgaussian, FIT_PARAMS
from sourcefinder.gaussian import gaussian
# The units that are tested often require information about the resolution element:
# (semimajor(pixels),semiminor(pixels),beam position angle (radians).
# Please enter some reasonable restoring beam here.
beam = (2.5, 2., 0.5)
class SimpleGaussTest(unittest.TestCase):
"""Generic, easy-to-fit elliptical Gaussian"""
def setUp(self):
Xin, Yin = numpy.indices((500, 500))
self.height = 10
self.x = 250
self.y = 250
self.maj = 40
self.min = 20
self.theta = 0
self.mygauss = numpy.ma.array(
gaussian(self.height, self.x, self.y, self.maj, self.min,
self.theta)(Xin, Yin))
self.moments = moments(self.mygauss, beam, 0)
self.fit = fitgaussian(self.mygauss, self.moments)
def testHeight(self):
self.assertEqual(self.mygauss.max(), self.height)
def testFitHeight(self):
self.assertAlmostEqual(self.fit["peak"], self.height)
def testMomentPosition(self):
self.assertAlmostEqual(self.moments["xbar"], self.x)
self.assertAlmostEqual(self.moments["ybar"], self.y)
def testFitPosition(self):
self.assertAlmostEqual(self.fit["xbar"], self.x)
self.assertAlmostEqual(self.fit["ybar"], self.y)
def testMomentSize(self):
self.assertAlmostEqual(self.moments["semimajor"], self.maj, 3)
self.assertAlmostEqual(self.moments["semiminor"], self.min, 3)
def testFitSize(self):
self.assertAlmostEqual(self.fit["semimajor"], self.maj)
self.assertAlmostEqual(self.fit["semiminor"], self.min)
def testMomentAngle(self):
self.assertAlmostEqual(self.moments["theta"], self.theta)
def testFitAngle(self):
self.assertAlmostEqual(self.fit["theta"], self.theta)
class NegativeGaussTest(SimpleGaussTest):
"""Negative Gaussian"""
def setUp(self):
Xin, Yin = numpy.indices((500, 500))
self.height = -10
self.x = 250
self.y = 250
self.maj = 40
self.min = 20
self.theta = 0
self.mygauss = numpy.ma.array(
gaussian(self.height, self.x, self.y, self.maj, self.min,
self.theta)(Xin, Yin))
self.moments = moments(self.mygauss, beam, 0)
self.fit = fitgaussian(self.mygauss, self.moments)
def testHeight(self):
self.assertEqual(self.mygauss.min(), self.height)
class CircularGaussTest(SimpleGaussTest):
"""Circular Gaussian: it makes no sense to measure a rotation angle"""
def setUp(self):
Xin, Yin = numpy.indices((500, 500))
self.height = 10
self.x = 250
self.y = 250
self.maj = 40
self.min = 40
self.theta = 0
self.mygauss = numpy.ma.array(
gaussian(self.height, self.x, self.y, self.maj, self.min,
self.theta)(Xin, Yin))
self.moments = moments(self.mygauss, beam, 0)
self.fit = fitgaussian(self.mygauss, self.moments)
def testMomentAngle(self):
pass
def testFitAngle(self):
pass
class NarrowGaussTest(SimpleGaussTest):
"""Only 1 pixel wide"""
def setUp(self):
Xin, Yin = numpy.indices((500, 500))
self.height = 10
self.x = 250
self.y = 250
self.maj = 40
self.min = 1
self.theta = 0
self.mygauss = numpy.ma.array(gaussian(
self.height, self.x, self.y, self.maj, self.min, self.theta)(Xin,
Yin))
self.moments = moments(self.mygauss, beam, 0)
self.fit = fitgaussian(self.mygauss, self.moments)
class RotatedGaussTest(SimpleGaussTest):
"""Rotated by an angle < pi/2"""
def setUp(self):
Xin, Yin = numpy.indices((500, 500))
self.height = 10
self.x = 250
self.y = 250
self.maj = 40
self.min = 20
self.theta = numpy.pi / 4
self.mygauss = numpy.ma.array(gaussian(
self.height, self.x, self.y, self.maj, self.min, self.theta)(Xin,
Yin))
self.moments = moments(self.mygauss, beam, 0)
self.fit = fitgaussian(self.mygauss, self.moments)
def testMomentAngle(self):
self.assertAlmostEqual(self.moments["theta"], self.theta)
class RotatedGaussTest2(SimpleGaussTest):
"""Rotated by an angle > pi/2; theta becomes negative"""
def setUp(self):
Xin, Yin = numpy.indices((500, 500))
self.height = 10
self.x = 250
self.y = 250
self.maj = 40
self.min = 20
self.theta = 3 * numpy.pi / 4
self.mygauss = numpy.ma.array(gaussian(
self.height, self.x, self.y, self.maj, self.min, self.theta)(Xin,
Yin))
self.moments = moments(self.mygauss, beam, 0)
self.fit = fitgaussian(self.mygauss, self.moments)
def testMomentAngle(self):
self.assertAlmostEqual(self.moments["theta"], self.theta - numpy.pi)
def testFitAngle(self):
self.assertAlmostEqual(self.fit["theta"], self.theta - numpy.pi)
class AxesSwapGaussTest(SimpleGaussTest):
"""We declare the axes the wrong way round: the fit should reverse them &
change the angle"""
def setUp(self):
Xin, Yin = numpy.indices((500, 500))
self.height = 10
self.x = 250
self.y = 250
self.maj = 20
self.min = 40
self.theta = 0
self.mygauss = numpy.ma.array(gaussian(
self.height, self.x, self.y, self.maj, self.min, self.theta)(Xin,
Yin))
self.moments = moments(self.mygauss, beam, 0)
self.fit = fitgaussian(self.mygauss, self.moments)
def testMomentAngle(self):
theta = self.moments["theta"]
# Numpy 1.6 and 1.9 return -pi/2 and +pi/2, respectively.
# Presumably there's some numerical quirk causing different,
# but equivalent, convergence in the optimization.
if theta < 0:
theta = theta + numpy.pi
self.assertAlmostEqual(theta, numpy.pi / 2)
def testFitAngle(self):
theta = self.fit["theta"]
# Numpy 1.6 and 1.9 return -pi/2 and +pi/2, respectively.
# Presumably there's some numerical quirk causing different,
# but equivalent, convergence in the optimization.
if theta < 0:
theta = theta + numpy.pi
self.assertAlmostEqual(theta, numpy.pi / 2)
def testMomentSize(self):
self.assertAlmostEqual(self.moments["semiminor"], self.maj, 5)
self.assertAlmostEqual(self.moments["semimajor"], self.min, 5)
def testFitSize(self):
self.assertAlmostEqual(self.fit["semiminor"], self.maj)
self.assertAlmostEqual(self.fit["semimajor"], self.min)
class RandomGaussTest(unittest.TestCase):
"""Should not be possible to fit a Gaussian to random data. You can still
measure moments, though -- things should be fairly evenly distributed."""
def setUp(self):
Xin, Yin = numpy.indices((500, 500))
self.mygauss = numpy.random.random(Xin.shape)
def testMoments(self):
try:
moments(self.mygauss, beam, 0)
except:
self.fail('Moments method failed to run.')
class NoisyGaussTest(unittest.TestCase):
"""Test calculation of chi-sq and fitting in presence of (artificial) noise"""
def setUp(self):
Xin, Yin = numpy.indices((500, 500))
self.peak = 10
self.xbar = 250
self.ybar = 250
self.semimajor = 40
self.semiminor = 20
self.theta = 0
self.mygauss = numpy.ma.array(
gaussian(self.peak, self.xbar, self.ybar,
self.semimajor, self.semiminor, self.theta)(Xin, Yin))
self.moments = moments(self.mygauss, beam, 0)
def test_tiny_pixel_offset(self):
pixel_noise = 0.0001
self.mygauss[self.xbar + 30, self.ybar] += pixel_noise
self.fit = fitgaussian(self.mygauss, self.moments)
for param in FIT_PARAMS:
self.assertAlmostEqual(getattr(self, param), self.fit[param],
places=6)
def test_small_pixel_offset(self):
pixel_noise = 0.001
n_noisy_pix = 2
# Place noisy pix symmetrically about centre to avoid position offset
self.mygauss[self.xbar + 30, self.ybar] += pixel_noise
self.mygauss[self.xbar - 30, self.ybar] += pixel_noise
self.fit = fitgaussian(self.mygauss, self.moments)
for param in FIT_PARAMS:
self.assertAlmostEqual(getattr(self, param), self.fit[param],
places=5)
def test_noisy_background(self):
# Use a fixed random state seed, so unit-test is reproducible:
rstate = numpy.random.RandomState(42)
pixel_noise = 0.5
self.mygauss += rstate.normal(scale=pixel_noise,
size=len(self.mygauss.ravel())).reshape(
self.mygauss.shape)
self.fit = fitgaussian(self.mygauss, self.moments)
self.longMessage = True # Show assertion fail values + given message
# First, let's check we've converged to a reasonable fit in the
# presence of noise:
# print
for param in FIT_PARAMS:
# print param, getattr(self,param), self.fit[param]
self.assertAlmostEqual(getattr(self, param), self.fit[param],
places=1,
msg=param + " misfit (bad random noise seed?)",
)
# Now we run the full error-profiling routine and check the chi-sq
# calculations:
self.fit_w_errs, _ = source_profile_and_errors(
data=self.mygauss,
threshold=0.,
noise=pixel_noise,
beam=(self.semimajor, self.semiminor, self.theta),
)
npix = len(self.mygauss.ravel())
# print "CHISQ", npix, self.fit_w_errs.chisq
# NB: this is the calculation for reduced chisq in presence of
# independent pixels, i.e. uncorrelated noise.
# For real data, we try to take the noise-correlation into account.
# print "Reduced chisq", self.fit_w_errs.chisq / npix
self.assertTrue(0.9 < self.fit_w_errs.chisq / npix < 1.1)
|
transientskp/pyse
|
test/test_gaussian.py
|
Python
|
bsd-2-clause
| 10,816
|
[
"Gaussian"
] |
4df13f39de888eeb98ea2637085a0cd85e06ffc694bc96fd150989b24941fef5
|
#!/usr/bin/env python
import argparse
from rdkit import Chem
from rdkit.Chem import rdFMCS
def _process_input():
"""Define and parse the command line arguments"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-target',
'--target_file',
required=True,
help='Path to the file containing the target SMILES',
nargs='?'
)
parser.add_argument(
'-templates',
'--template_file',
required=True,
help='Path to the file containing the template SMILES',
nargs='?'
)
args = parser.parse_args()
return args
def _read_single_smiles_from_file(filepath):
"""Read single SMILES strings from file"""
with open(filepath) as input_file:
for line in input_file:
smiles, _id = line.split()
break
return smiles
def _read_multi_smiles_from_file(filepath):
"""Read multiple SMILES strings from file"""
smiles = []
with open(filepath) as input_file:
for line in input_file:
_smiles, _id = line.split()
smiles.append(_smiles)
return smiles
def calc_mcs_atoms(mol1, mol2):
"""Finds the maximum common substructure between two molecules"""
mcs = rdFMCS.FindMCS(
(mol1, mol2),
ringMatchesRingOnly=True,
completeRingsOnly=True,
bondCompare=rdFMCS.BondCompare.CompareOrder
)
return mcs.numAtoms
def calc_similarity(target, templates, mcs_areas, tversky_weight=0.8):
"""Calculates tanimoto and tversky coefficients between molecules"""
def _calc_tanimoto_coefficient(mol1_atoms, mol2_atoms, mcs_atoms):
"""Calculate the tanimoto coefficient"""
return ((mcs_atoms) / (mol1_atoms + mol2_atoms - mcs_atoms))
def _calc_tversky_coefficient(mol1_atoms, mol2_atoms, mcs_atoms, weight):
"""Calculate the tversky coefficient"""
weight1 = weight
weight2 = 1 - weight
mol1_weighted = weight1 * (mol1_atoms - mcs_atoms)
mol2_weighted = weight2 * (mol2_atoms - mcs_atoms)
return ((mcs_atoms) / (mol1_weighted + mol2_weighted + mcs_atoms))
target_atoms = target.GetNumAtoms()
tanimoto = []
tversky = []
for template, mcs_area in zip(templates, mcs_areas):
template_atoms = template.GetNumAtoms()
tanimoto.append(_calc_tanimoto_coefficient(
target_atoms,
template_atoms,
mcs_area
))
tversky.append(_calc_tversky_coefficient(
target_atoms,
template_atoms,
mcs_area,
tversky_weight
))
return tanimoto, tversky
def main():
"""Run everything"""
args = _process_input()
target = _read_single_smiles_from_file(args.target_file)
templates = _read_multi_smiles_from_file(args.template_file)
target_mol = Chem.MolFromSmiles(target)
template_mols = [Chem.MolFromSmiles(_) for _ in templates]
mcs_atoms = [calc_mcs_atoms(target_mol, _) for _ in template_mols]
# print(target_mol.GetNumAtoms(), template_mols[0].GetNumAtoms(), mcs_atoms[0])
tanimoto_sim, tversky_sim = calc_similarity(
target_mol,
template_mols,
mcs_atoms,
)
for tan, tve in zip(tanimoto_sim, tversky_sim):
print(f"{tan:0.3f} {tve:0.3f}")
if __name__ == "__main__":
main()
|
amjjbonvin/haddocking.github.io
|
education/HADDOCK24/shape-small-molecule/scripts/calc_mcs.py
|
Python
|
mit
| 3,372
|
[
"RDKit"
] |
185e06419a34ecf2d9813a5954924bce54d996d4dc075065d3b9c69b74c37769
|
# Copyright 2015 SAP AG or an SAP affiliate company.
#
import operator
import re
import decimal
import itertools
import sqlanydb
from sqlalchemy.sql import compiler, expression, text, column, bindparam
from sqlalchemy.engine import default, base, reflection, url
from sqlalchemy import types as sqltypes
from sqlalchemy.sql import operators as sql_operators
from sqlalchemy import schema as sa_schema
from sqlalchemy import util, sql, exc
from sqlalchemy.types import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\
TEXT, DATE, DATETIME, FLOAT, NUMERIC,\
BIGINT, INT, INTEGER, SMALLINT, BINARY,\
VARBINARY, DECIMAL, TIMESTAMP, Unicode,\
UnicodeText, REAL, LargeBinary
RESERVED_WORDS = set([
"add", "all", "alter", "and",
"any", "array", "as", "asc", "attach", "backup",
"begin", "between", "bigint", "binary",
"bit", "bottom", "break", "by",
"call", "capability", "cascade", "case",
"cast", "char", "char_convert", "character",
"check", "checkpoint", "close", "comment",
"commit", "compressed", "conflict", "connect", "constraint", "contains",
"continue", "convert", "create", "cross",
"cube", "current", "current_timestamp", "current_user",
"cursor", "date", "datetimeoffse", "dbspace", "deallocate",
"dec", "decimal", "declare", "default",
"delete", "deleting", "desc", "detach", "distinct",
"do", "double", "drop", "dynamic",
"else", "elseif", "encrypted", "end",
"endif", "escape", "except", "exception",
"exec", "execute", "existing", "exists",
"externlogin", "fetch", "first", "float",
"for", "force", "foreign", "forward",
"from", "full", "goto", "grant",
"group", "having", "holdlock", "identified",
"if", "in", "index",
"inner", "inout", "insensitive", "insert",
"inserting", "install", "instead", "int",
"integer", "integrated", "intersect", "into",
"is", "isolation", "join", "json", "kerberos",
"key", "lateral", "left", "like", "limit",
"lock", "login", "long", "match",
"membership", "merge", "message", "mode", "modify",
"natural", "nchar", "new", "no", "noholdlock",
"not", "notify", "null", "numeric", "nvarchar",
"of", "off", "on", "open", "openstring", "openxml",
"option", "options", "or", "order",
"others", "out", "outer", "over",
"passthrough", "precision", "prepare", "primary",
"print", "privileges", "proc", "procedure",
"publication", "raiserror", "readtext", "real",
"reference", "references", "refresh", "release", "remote",
"remove", "rename", "reorganize", "resource",
"restore", "restrict", "return", "revoke",
"right", "rollback", "rollup", "row", "rowtype", "save",
"savepoint", "scroll", "select", "sensitive",
"session", "set", "setuser", "share",
"smallint", "some", "spatial", "sqlcode", "sqlstate",
"start", "stop", "subtrans", "subtransaction",
"synchronize", "table", "temporary",
"then", "time", "timestamp", "tinyint",
"to", "top", "tran", "treat", "trigger",
"truncate", "tsequal", "unbounded", "union",
"unique", "uniqueidentifier", "unknown", "unnest", "unsigned", "update",
"updating", "user", "using", "validate",
"values", "varbinary", "varbit", "varchar", "variable", "varray",
"varying", "view", "wait", "waitfor",
"when", "where", "while", "window",
"with",
"within", "work", "writetext", "xml"
])
class SQLAnyNoPrimaryKeyError(Exception):
""" exception that is raised when trying to load the primary keys for a
table that does not have any columns marked as being a primary key.
As noted in this documentation:
http://docs.sqlalchemy.org/en/latest/faq.html#how-do-i-map-a-table-that-has-no-primary-key
if a table has fully duplicate rows, and has no primary key, it cannot be mapped.
Since we can't tell if a table has rows that are 'supposed' to act like a primary key,
we just throw an exception and hopes the user adds primary keys to the table instead.
"""
def __init__(self, message, table_name):
super(SQLAnyNoPrimaryKeyError, self).__init__(message)
self.table_name = table_name
class _SQLAnyUnitypeMixin(object):
"""these types appear to return a buffer object."""
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
return str(value)
else:
return None
return process
class UNICHAR(_SQLAnyUnitypeMixin, sqltypes.Unicode):
__visit_name__ = 'UNICHAR'
class UNIVARCHAR(_SQLAnyUnitypeMixin, sqltypes.Unicode):
__visit_name__ = 'UNIVARCHAR'
class UNITEXT(_SQLAnyUnitypeMixin, sqltypes.UnicodeText):
__visit_name__ = 'UNITEXT'
class TINYINT(sqltypes.Integer):
__visit_name__ = 'TINYINT'
class BIT(sqltypes.TypeEngine):
__visit_name__ = 'BIT'
class MONEY(sqltypes.TypeEngine):
__visit_name__ = "MONEY"
class SMALLMONEY(sqltypes.TypeEngine):
__visit_name__ = "SMALLMONEY"
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
__visit_name__ = "UNIQUEIDENTIFIER"
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = 'IMAGE'
class SQLAnyTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_):
return self.visit_IMAGE(type_)
def visit_boolean(self, type_):
return self.visit_BIT(type_)
def visit_unicode(self, type_):
return self.visit_NVARCHAR(type_)
def visit_UNICHAR(self, type_):
return "UNICHAR(%d)" % type_.length
def visit_UNIVARCHAR(self, type_):
return "UNIVARCHAR(%d)" % type_.length
def visit_UNITEXT(self, type_):
return "UNITEXT"
def visit_TINYINT(self, type_):
return "TINYINT"
def visit_IMAGE(self, type_):
return "IMAGE"
def visit_BIT(self, type_):
return "BIT"
def visit_MONEY(self, type_):
return "MONEY"
def visit_SMALLMONEY(self, type_):
return "SMALLMONEY"
def visit_UNIQUEIDENTIFIER(self, type_):
return "UNIQUEIDENTIFIER"
ischema_names = {
'bigint': BIGINT,
'int': INTEGER,
'integer': INTEGER,
'smallint': SMALLINT,
'tinyint': TINYINT,
'unsigned bigint': BIGINT, # TODO: unsigned flags
'unsigned int': INTEGER, # TODO: unsigned flags
'unsigned smallint': SMALLINT, # TODO: unsigned flags
'numeric': NUMERIC,
'decimal': DECIMAL,
'dec': DECIMAL,
'float': FLOAT,
'double': NUMERIC, # TODO
'double precision': NUMERIC, # TODO
'real': REAL,
'smallmoney': SMALLMONEY,
'money': MONEY,
'smalldatetime': DATETIME,
'datetime': DATETIME,
'date': DATE,
'time': TIME,
'char': CHAR,
'character': CHAR,
'varchar': VARCHAR,
'character varying': VARCHAR,
'char varying': VARCHAR,
'unichar': UNICHAR,
'unicode character': UNIVARCHAR,
'nchar': NCHAR,
'national char': NCHAR,
'national character': NCHAR,
'nvarchar': NVARCHAR,
'nchar varying': NVARCHAR,
'national char varying': NVARCHAR,
'national character varying': NVARCHAR,
'text': TEXT,
'unitext': UNITEXT,
'binary': BINARY,
'varbinary': VARBINARY,
'image': IMAGE,
'bit': BIT,
"long binary": LargeBinary,
# not in documentation for ASE 15.7
'long varchar': TEXT, # TODO
'timestamp': TIMESTAMP,
'uniqueidentifier': UNIQUEIDENTIFIER,
}
# converter function, only argument is the value returned
# from the database that we want to convert
_decimal_converter = lambda decAsString: decimal.Decimal(decAsString) if decAsString is not None else None
# list of types to have converted, and the callable that sqlanydb calls when
# it needs to convert said type
_converter_list = [(sqlanydb.DT_DECIMAL, _decimal_converter)]
# register any converters we have with sqlanydb
list(itertools.starmap(lambda x, y: sqlanydb.register_converter(x, y), _converter_list))
class SQLAnyInspector(reflection.Inspector):
def __init__(self, conn):
reflection.Inspector.__init__(self, conn)
def get_table_id(self, table_name, schema=None):
"""Return the table id from `table_name` and `schema`."""
return self.dialect.get_table_id(self.bind, table_name, schema,
info_cache=self.info_cache)
class SQLAnyExecutionContext(default.DefaultExecutionContext):
def set_ddl_autocommit(self, connection, value):
"""Must be implemented by subclasses to accommodate DDL executions.
"connection" is the raw unwrapped DBAPI connection. "value"
is True or False. when True, the connection should be configured
such that a DDL can take place subsequently. when False,
a DDL has taken place and the connection should be resumed
into non-autocommit mode.
"""
pass
def pre_exec(self):
if self.isinsert:
tbl = self.compiled.statement.table
seq_column = tbl._autoincrement_column
insert_has_sequence = seq_column is not None
if self.isddl:
if not self.should_autocommit:
raise exc.InvalidRequestError(
"The SQLAny dialect only supports "
"DDL in 'autocommit' mode at this time.")
self.set_ddl_autocommit(
self.root_connection.connection.connection,
True)
def post_exec(self):
if self.isddl:
self.set_ddl_autocommit(self.root_connection, False)
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT @@identity AS lastrowid")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class SQLAnySQLCompiler(compiler.SQLCompiler):
ansi_bind_rules = True
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
'doy': 'dayofyear',
'dow': 'weekday',
'milliseconds': 'millisecond'
})
def get_select_precolumns(self, select, **kw ):
s = "DISTINCT " if select._distinct else ""
if select._limit:
if select._limit == 1:
s += "FIRST "
else:
s += "TOP %s " % select._limit
if select._offset:
if not select._limit:
# SQL Anywhere doesn't allow "start at" without "top n"
s += "TOP ALL "
s += "START AT %s " % (select._offset + 1,)
if s != '':
return s
return compiler.SQLCompiler.get_select_precolumns(
self, select, **kw)
def get_from_hint_text(self, table, text):
return text
def limit_clause(self, select, **kw):
# Limit in sybase is after the select keyword
return ""
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % (
field, self.process(extract.expr, **kw))
def visit_now_func(self, fn, **kw):
return "NOW()"
def for_update_clause(self, select):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR"
# which SQLAlchemy doesn't use
return ''
def order_by_clause(self, select, **kw):
kw['literal_binds'] = True
order_by = self.process(select._order_by_clause, **kw)
if order_by:
return " ORDER BY " + order_by
else:
return ""
class SQLAnyDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
self.dialect.type_compiler.process(column.type)
if column.table is None:
raise exc.CompileError(
"The SQLAny dialect requires Table-bound "
"columns in order to generate DDL")
seq_col = column.table._autoincrement_column
# install a IDENTITY Sequence if we have an implicit IDENTITY column
if seq_col is column:
sequence = isinstance(column.default, sa_schema.Sequence) \
and column.default
if sequence:
start, increment = sequence.start or 1, \
sequence.increment or 1
else:
start, increment = 1, 1
if (start, increment) == (1, 1):
colspec += " IDENTITY"
else:
# TODO: need correct syntax for this
colspec += " IDENTITY(%s,%s)" % (start, increment)
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if column.nullable is not None:
if not column.nullable or column.primary_key:
colspec += " NOT NULL"
else:
colspec += " NULL"
return colspec
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX %s.%s" % (
self.preparer.quote_identifier(index.table.name),
self._prepared_index_name(drop.element,
include_schema=False)
)
class SQLAnyIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
class SQLAnyDialect(default.DefaultDialect):
name = 'sqlany'
supports_unicode_statements = False
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
supports_native_boolean = False
supports_unicode_binds = False
postfetch_lastrowid = True
supports_multivalues_insert = True
# if not present, then sqlalchemy expects a float when dealing with 'Numeric' decimal types
# but by default sqlanydb returns them as strings, which freaks sqlalchemy out
# so we return them as Decimal objects, by use of a converter function and
# `sqlanydb.register_converter()`
supports_native_decimal = True
@classmethod
def dbapi(self):
return sqlanydb
@property
def driver(self):
return sqlanydb
def create_connect_args(self, url):
# get extra options
dialect_opts = dict(url.query)
opts = url.translate_connect_args(username='uid', password='pwd',
database='dbn' )
keys = list(opts.keys())
if 'host' in keys and 'port' in keys:
opts['host'] += ':%s' % opts['port']
del opts['port']
#
opts.update(dialect_opts)
return ([], opts)
#
colspecs = {}
ischema_names = ischema_names
type_compiler = SQLAnyTypeCompiler
statement_compiler = SQLAnySQLCompiler
ddl_compiler = SQLAnyDDLCompiler
preparer = SQLAnyIdentifierPreparer
inspector = SQLAnyInspector
execution_ctx_cls = SQLAnyExecutionContext
def _get_default_schema_name(self, connection):
return connection.scalar(
text("SELECT current user").columns(column('user_name', Unicode))
)
def initialize(self, connection):
super(SQLAnyDialect, self).initialize(connection)
self.max_identifier_length = 128
VERSION_SQL = text('select @@version')
result = connection.execute(VERSION_SQL)
vers = result.scalar()
self.server_version_info = tuple( vers.split(' ')[0].split( '.' ) )
def get_table_id(self, connection, table_name, schema=None, **kw):
"""Fetch the id for schema.table_name.
Several reflection methods require the table id. The idea for using
this method is that it can be fetched one time and cached for
subsequent calls.
"""
table_id = None
if schema is None:
schema = self.default_schema_name
TABLEID_SQL = text("""
SELECT t.table_id AS id
FROM sys.systab t JOIN dbo.sysusers u ON t.creator=u.uid
WHERE u.name = :schema_name
AND t.table_name = :table_name
AND t.table_type in (1, 3, 4, 21)
""")
# Py2K
if isinstance(schema, str):
schema = schema.encode("ascii")
if isinstance(table_name, str):
table_name = table_name.encode("ascii")
# end Py2K
result = connection.execute(TABLEID_SQL,
schema_name=schema,
table_name=table_name)
table_id = result.scalar()
if table_id is None:
raise exc.NoSuchTableError(table_name)
return table_id
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
COLUMN_SQL = text("""
SELECT col.column_name AS name,
t.domain_name AS type,
if col.nulls ='Y' then 1 else 0 endif AS nullable,
if col."default" = 'autoincrement' then 1 else 0 endif AS autoincrement,
col."default" AS "default",
col.width AS "precision",
col.scale AS scale,
col.width AS length
FROM sys.sysdomain t join sys.systabcol col on t.domain_id=col.domain_id
WHERE col.table_id = :table_id
ORDER BY col.column_id
""")
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = []
for (name, type_, nullable, autoincrement, default, precision, scale,
length) in results:
col_info = self._get_column_info(name, type_, bool(nullable),
bool(autoincrement), default, precision, scale,
length)
columns.append(col_info)
return columns
def _get_column_info(self, name, type_, nullable, autoincrement, default,
precision, scale, length):
coltype = self.ischema_names.get(type_, None)
kwargs = {}
if coltype in (NUMERIC, DECIMAL):
args = (precision, scale)
elif coltype == FLOAT:
args = (precision,)
elif coltype in (CHAR, VARCHAR, UNICHAR, UNIVARCHAR, NCHAR, NVARCHAR):
args = (length,)
else:
args = ()
if coltype:
coltype = coltype(*args, **kwargs)
#is this necessary
#if is_array:
# coltype = ARRAY(coltype)
else:
util.warn("Did not recognize type '%s' of column '%s'" %
(type_, name))
coltype = sqltypes.NULLTYPE
if default:
default = re.sub("DEFAULT", "", default).strip()
default = re.sub("^'(.*)'$", lambda m: m.group(1), default)
else:
default = None
column_info = dict(name=name, type=coltype, nullable=nullable,
default=default, autoincrement=autoincrement)
return column_info
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
table_cache = {}
column_cache = {}
foreign_keys = []
table_cache[table_id] = {"name": table_name, "schema": schema}
COLUMN_SQL = text("""
SELECT c.column_id AS id, c.column_name AS name
FROM sys.systabcol c
WHERE c.table_id = :table_id
""")
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = {}
for col in results:
columns[col["id"]] = col["name"]
column_cache[table_id] = columns
REFCONSTRAINT_SQL = text("""
SELECT fk.foreign_index_id, i.index_name AS name, pt.table_id AS reftable_id
FROM sys.sysfkey fk
join sys.systab pt on fk.primary_table_id = pt.table_id
join sys.sysidx i on i.table_id=fk.primary_table_id
WHERE fk.foreign_table_id = :table_id
and i.index_category=2
""")
referential_constraints = connection.execute(REFCONSTRAINT_SQL,
table_id=table_id)
REFTABLE_SQL = text("""
SELECT t.table_name AS name, u.name AS "schema"
FROM sys.systab t JOIN dbo.sysusers u ON t.creator = u.uid
WHERE t.table_id = :table_id
""")
for r in referential_constraints:
reftable_id = r["reftable_id"]
foreign_index_id = r["foreign_index_id"]
if reftable_id not in table_cache:
c = connection.execute(REFTABLE_SQL, table_id=reftable_id)
reftable = c.fetchone()
c.close()
table_info = {"name": reftable["name"], "schema": None}
if (schema is not None or
reftable["schema"] != self.default_schema_name):
table_info["schema"] = reftable["schema"]
table_cache[reftable_id] = table_info
results = connection.execute(COLUMN_SQL, table_id=reftable_id)
reftable_columns = {}
for col in results:
reftable_columns[col["id"]] = col["name"]
column_cache[reftable_id] = reftable_columns
reftable = table_cache[reftable_id]
reftable_columns = column_cache[reftable_id]
constrained_columns = []
referred_columns = []
REFCOLS_SQL = text("""SELECT
ic.column_id as fokey,
pic.column_id as refkey
FROM sys.sysfkey fk
join sys.sysidxcol ic on (fk.foreign_index_id=ic.index_id and fk.foreign_table_id=ic.table_id)
join sys.sysidxcol pic on (fk.primary_index_id=pic.index_id and fk.primary_table_id=pic.table_id)
WHERE fk.primary_table_id = :reftable_id
and fk.foreign_table_id = :table_id
and fk.foreign_index_id = :foreign_index_id
""")
ref_cols = connection.execute(REFCOLS_SQL,
table_id=table_id,
reftable_id=reftable_id,
foreign_index_id=foreign_index_id)
for rc in ref_cols:
constrained_columns.append(columns[rc["fokey"]])
referred_columns.append(reftable_columns[rc["refkey"]])
fk_info = {
"constrained_columns": constrained_columns,
"referred_schema": reftable["schema"],
"referred_table": reftable["name"],
"referred_columns": referred_columns,
"name": r["name"]
}
foreign_keys.append(fk_info)
return foreign_keys
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
# index_category=3 -> not primary key, not foreign key, not text index
# unique=1 -> unique index, 2 -> unique constraint, 5->unique index with
# nulls not distinct
INDEX_SQL = text("""
SELECT i.index_id as index_id, i.index_name AS name,
if i."unique" in (1,2,5) then 1 else 0 endif AS "unique"
FROM sys.sysidx i join sys.systab t on i.table_id=t.table_id
WHERE t.table_id = :table_id and i.index_category = 3
""")
results = connection.execute(INDEX_SQL, table_id=table_id)
indexes = []
for r in results:
INDEXCOL_SQL = text("""
select tc.column_name as col
FROM sys.sysidxcol ic
join sys.systabcol tc on (ic.table_id=tc.table_id and ic.column_id=tc.column_id)
WHERE ic.index_id = :index_id and ic.table_id = :table_id
ORDER BY ic.sequence ASC
""")
idx_cols = connection.execute(INDEXCOL_SQL, index_id=r["index_id"],
table_id=table_id)
column_names = [ic["col"] for ic in idx_cols]
index_info = {"name": r["name"],
"unique": bool(r["unique"]),
"column_names": column_names}
indexes.append(index_info)
return indexes
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
# index_category=1 -> primary key
PK_SQL = text("""
SELECT t.table_name AS table_name, i.index_id as index_id,
i.index_name AS name
FROM sys.sysidx i join sys.systab t on i.table_id=t.table_id
WHERE t.table_id = :table_id and i.index_category = 1
""")
results = connection.execute(PK_SQL, table_id=table_id)
pks = results.fetchone()
results.close()
if not pks:
return {"constrained_columns": [],
"name": None}
PKCOL_SQL = text("""
select tc.column_name as col
FROM sys.sysidxcol ic
join sys.systabcol tc on (ic.table_id=tc.table_id and ic.column_id=tc.column_id)
WHERE ic.index_id = :index_id and ic.table_id = :table_id
""")
pk_cols = connection.execute(PKCOL_SQL, index_id=pks["index_id"],
table_id=table_id )
column_names = [pkc["col"] for pkc in pk_cols]
return {"constrained_columns": column_names,
"name": pks["name"]}
@reflection.cache
def get_unique_constraints(self, connection, table_name, schema=None, **kw):
# Same as get_indexes except only for "unique"=2
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
# unique=2 -> unique constraint
INDEX_SQL = text("""
SELECT i.index_id as index_id, i.index_name AS name
FROM sys.sysidx i join sys.systab t on i.table_id=t.table_id
WHERE t.table_id = :table_id and i.index_category = 3 and i."unique"=2
""")
results = connection.execute(INDEX_SQL, table_id=table_id)
indexes = []
for r in results:
INDEXCOL_SQL = text("""
select tc.column_name as col
FROM sys.sysidxcol ic
join sys.systabcol tc on (ic.table_id=tc.table_id and ic.column_id=tc.column_id)
WHERE ic.index_id = :index_id and ic.table_id = :table_id
ORDER BY ic.sequence ASC
""")
idx_cols = connection.execute(INDEXCOL_SQL, index_id=r["index_id"],
table_id=table_id)
column_names = [ic["col"] for ic in idx_cols]
index_info = {"name": r["name"],
"column_names": column_names}
indexes.append(index_info)
return indexes
@reflection.cache
def get_schema_names(self, connection, **kw):
SCHEMA_SQL = text("SELECT u.name AS name FROM dbo.sysusers u")
schemas = connection.execute(SCHEMA_SQL)
return [s["name"] for s in schemas]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
TABLE_SQL = text("""
SELECT t.table_name AS name
FROM sys.systab t JOIN dbo.sysusers u ON t.creator = u.uid
WHERE u.name = :schema_name and table_type <> 21
""")
# Py2K
if isinstance(schema, str):
schema = schema.encode("ascii")
# end Py2K
tables = connection.execute(TABLE_SQL, schema_name=schema)
return [t["name"] for t in tables]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_DEF_SQL = text("""
SELECT v.view_def as text
FROM sys.sysview v JOIN sys.sysobject o ON v.view_object_id = o.object_id
join sys.systab t on o.object_id=t.object_id
WHERE t.table_name = :view_name
AND t.table_type = 21
""")
# Py2K
if isinstance(view_name, str):
view_name = view_name.encode("ascii")
# end Py2K
view = connection.execute(VIEW_DEF_SQL, view_name=view_name)
return view.scalar()
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_SQL = text("""
SELECT t.table_name AS name
FROM sys.systab t JOIN dbo.sysusers u ON t.creator = u.uid
WHERE u.name = :schema_name
AND t.table_type = 21
""")
# Py2K
if isinstance(schema, str):
schema = schema.encode("ascii")
# end Py2K
views = connection.execute(VIEW_SQL, schema_name=schema)
return [v["name"] for v in views]
def has_table(self, connection, table_name, schema=None):
try:
self.get_table_id(connection, table_name, schema)
except exc.NoSuchTableError:
return False
else:
return True
def is_disconnect(self, e, connection, cursor):
"""
Signal to SQLAlchemy whether *e* indicates that *connection* is
broken and the pool needs to be recycled.
"""
if isinstance(e, sqlanydb.OperationalError):
return e.args[1] in (-101, -308,)
return False
|
sqlanywhere/sqlalchemy-sqlany
|
sqlalchemy_sqlany/base.py
|
Python
|
apache-2.0
| 30,247
|
[
"ASE"
] |
803c5169206a63069962ff5721754fc34b12bac053e25963dd124cc29a08e0ed
|
# transform_counts_with_normalization_factor/transform_counts_with_normalization_factor.py - a self annotated version of DockerToolFactory.py generated by running DockerToolFactory.py
# to make a new Galaxy tool called transform counts with normalization factor
# User m.vandenbeek@gmail.com at 14/01/2015 21:18:10
# DockerToolFactory.py
# see https://bitbucket.org/mvdbeek/DockerToolFactory
import sys
import shutil
import subprocess
import os
import time
import tempfile
import argparse
import tarfile
import re
import shutil
import math
import fileinput
from os.path import abspath
progname = os.path.split(sys.argv[0])[1]
myversion = 'V001.1 March 2014'
verbose = False
debug = False
toolFactoryURL = 'https://bitbucket.org/fubar/galaxytoolfactory'
# if we do html we need these dependencies specified in a tool_dependencies.xml file and referred to in the generated
# tool xml
toolhtmldepskel = """<?xml version="1.0"?>
<tool_dependency>
<package name="ghostscript" version="9.10">
<repository name="package_ghostscript_9_10" owner="devteam" prior_installation_required="True" />
</package>
<package name="graphicsmagick" version="1.3.18">
<repository name="package_graphicsmagick_1_3" owner="iuc" prior_installation_required="True" />
</package>
<readme>
%s
</readme>
</tool_dependency>
"""
protorequirements = """<requirements>
<requirement type="package" version="9.10">ghostscript</requirement>
<requirement type="package" version="1.3.18">graphicsmagick</requirement>
<container type="docker">toolfactory/custombuild:%s</container>
</requirements>"""
def timenow():
"""return current time as a string
"""
return time.strftime('%d/%m/%Y %H:%M:%S', time.localtime(time.time()))
html_escape_table = {
"&": "&",
">": ">",
"<": "<",
"$": "\$"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
def cmd_exists(cmd):
return subprocess.call("type " + cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
def edit_dockerfile(dockerfile):
'''we have to change the userid of galaxy inside the container to the id with which the tool is run,
otherwise we have a mismatch in the file permissions inside the container'''
uid=os.getuid()
for line in fileinput.FileInput(dockerfile, inplace=1):
sys.stdout.write(re.sub("RUN adduser galaxy.*", "RUN adduser galaxy -u {0}\n".format(uid), line))
def build_docker(dockerfile, docker_client, image_tag='base'):
'''Given the path to a dockerfile, and a docker_client, build the image, if it does not
exist yet.'''
image_id='toolfactory/custombuild:'+image_tag
existing_images=", ".join(["".join(d['RepoTags']) for d in docker_client.images()])
if image_id in existing_images:
print 'docker container exists, skipping build'
return image_id
print "Building Docker image, using Dockerfile:{0}".format(dockerfile)
build_process=docker_client.build(fileobj=open(dockerfile, 'r'), tag=image_id)
print "succesfully dispatched docker build process, building now"
build_log=[line for line in build_process] #will block until image is built.
return image_id
def construct_bind(host_path, container_path=False, binds=None, ro=True):
#TODO remove container_path if it's alwyas going to be the same as host_path
'''build or extend binds dictionary with container path. binds is used
to mount all files using the docker-py client.'''
if not binds:
binds={}
if isinstance(host_path, list):
for k,v in enumerate(host_path):
if not container_path:
container_path=host_path[k]
binds[host_path[k]]={'bind':container_path, 'ro':ro}
container_path=False #could be more elegant
return binds
else:
if not container_path:
container_path=host_path
binds[host_path]={'bind':container_path, 'ro':ro}
return binds
def switch_to_docker(opts):
import docker #need local import, as container does not have docker-py
docker_client=docker.Client()
toolfactory_path=abspath(sys.argv[0])
dockerfile=os.path.dirname(toolfactory_path)+'/Dockerfile'
edit_dockerfile(dockerfile)
image_id=build_docker(dockerfile, docker_client)
binds=construct_bind(host_path=opts.script_path, ro=False)
binds=construct_bind(binds=binds, host_path=abspath(opts.output_dir), ro=False)
if len(opts.input_tab)>0:
binds=construct_bind(binds=binds, host_path=opts.input_tab, ro=True)
if not opts.output_tab == 'None':
binds=construct_bind(binds=binds, host_path=opts.output_tab, ro=False)
if opts.make_HTML:
binds=construct_bind(binds=binds, host_path=opts.output_html, ro=False)
if opts.make_Tool:
binds=construct_bind(binds=binds, host_path=opts.new_tool, ro=False)
binds=construct_bind(binds=binds, host_path=opts.help_text, ro=True)
binds=construct_bind(binds=binds, host_path=toolfactory_path)
volumes=binds.keys()
sys.argv=[abspath(opts.output_dir) if sys.argv[i-1]=='--output_dir' else arg for i,arg in enumerate(sys.argv)] ##inject absolute path of working_dir
cmd=['python', '-u']+sys.argv+['--dockerized', '1']
container=docker_client.create_container(
image=image_id,
user='galaxy',
volumes=volumes,
command=cmd
)
docker_client.start(container=container[u'Id'], binds=binds)
docker_client.wait(container=container[u'Id'])
logs=docker_client.logs(container=container[u'Id'])
print "".join([log for log in logs])
class ScriptRunner:
"""class is a wrapper for an arbitrary script
"""
def __init__(self,opts=None,treatbashSpecial=True, image_tag='base'):
"""
cleanup inputs, setup some outputs
"""
self.opts = opts
self.useGM = cmd_exists('gm')
self.useIM = cmd_exists('convert')
self.useGS = cmd_exists('gs')
self.temp_warned = False # we want only one warning if $TMP not set
self.treatbashSpecial = treatbashSpecial
self.image_tag = image_tag
os.chdir(abspath(opts.output_dir))
self.thumbformat = 'png'
self.toolname_sanitized = re.sub('[^a-zA-Z0-9_]+', '_', opts.tool_name) # a sanitizer now does this but..
self.toolname = opts.tool_name
self.toolid = self.toolname
self.myname = sys.argv[0] # get our name because we write ourselves out as a tool later
self.pyfile = self.myname # crude but efficient - the cruft won't hurt much
self.xmlfile = '%s.xml' % self.toolname_sanitized
s = open(self.opts.script_path,'r').readlines()
s = [x.rstrip() for x in s] # remove pesky dos line endings if needed
self.script = '\n'.join(s)
fhandle,self.sfile = tempfile.mkstemp(prefix=self.toolname_sanitized,suffix=".%s" % (opts.interpreter))
tscript = open(self.sfile,'w') # use self.sfile as script source for Popen
tscript.write(self.script)
tscript.close()
self.indentedScript = '\n'.join([' %s' % html_escape(x) for x in s]) # for restructured text in help
self.escapedScript = '\n'.join([html_escape(x) for x in s])
self.elog = os.path.join(self.opts.output_dir,"%s_error.log" % self.toolname_sanitized)
if opts.output_dir: # may not want these complexities
self.tlog = os.path.join(self.opts.output_dir,"%s_runner.log" % self.toolname_sanitized)
art = '%s.%s' % (self.toolname_sanitized,opts.interpreter)
artpath = os.path.join(self.opts.output_dir,art) # need full path
artifact = open(artpath,'w') # use self.sfile as script source for Popen
artifact.write(self.script)
artifact.close()
self.cl = []
self.html = []
a = self.cl.append
a(opts.interpreter)
if self.treatbashSpecial and opts.interpreter in ['bash','sh']:
a(self.sfile)
else:
a('-') # stdin
for input in opts.input_tab:
a(input)
if opts.output_tab == 'None': #If tool generates only HTML, set output name to toolname
a(str(self.toolname_sanitized)+'.out')
a(opts.output_tab)
for param in opts.additional_parameters:
param, value=param.split(',')
a('--'+param)
a(value)
#print self.cl
self.outFormats = opts.output_format
self.inputFormats = [formats for formats in opts.input_formats]
self.test1Input = '%s_test1_input.xls' % self.toolname_sanitized
self.test1Output = '%s_test1_output.xls' % self.toolname_sanitized
self.test1HTML = '%s_test1_output.html' % self.toolname_sanitized
def makeXML(self):
"""
Create a Galaxy xml tool wrapper for the new script as a string to write out
fixme - use templating or something less fugly than this example of what we produce
<tool id="reverse" name="reverse" version="0.01">
<description>a tabular file</description>
<command interpreter="python">
reverse.py --script_path "$runMe" --interpreter "python"
--tool_name "reverse" --input_tab "$input1" --output_tab "$tab_file"
</command>
<inputs>
<param name="input1" type="data" format="tabular" label="Select a suitable input file from your history"/>
</inputs>
<outputs>
<data format=opts.output_format name="tab_file"/>
</outputs>
<help>
**What it Does**
Reverse the columns in a tabular file
</help>
<configfiles>
<configfile name="runMe">
# reverse order of columns in a tabular file
import sys
inp = sys.argv[1]
outp = sys.argv[2]
i = open(inp,'r')
o = open(outp,'w')
for row in i:
rs = row.rstrip().split('\t')
rs.reverse()
o.write('\t'.join(rs))
o.write('\n')
i.close()
o.close()
</configfile>
</configfiles>
</tool>
"""
newXML="""<tool id="%(toolid)s" name="%(toolname)s" version="%(tool_version)s">
%(tooldesc)s
%(requirements)s
<command interpreter="python">
%(command)s
</command>
<inputs>
%(inputs)s
</inputs>
<outputs>
%(outputs)s
</outputs>
<configfiles>
<configfile name="runMe">
%(script)s
</configfile>
</configfiles>
%(tooltests)s
<help>
%(help)s
</help>
</tool>""" # needs a dict with toolname, toolname_sanitized, toolid, interpreter, scriptname, command, inputs as a multi line string ready to write, outputs ditto, help ditto
newCommand="""
%(toolname_sanitized)s.py --script_path "$runMe" --interpreter "%(interpreter)s"
--tool_name "%(toolname)s" %(command_inputs)s %(command_outputs)s """
# may NOT be an input or htmlout - appended later
tooltestsTabOnly = """
<tests>
<test>
<param name="input1" value="%(test1Input)s" ftype="tabular"/>
<param name="runMe" value="$runMe"/>
<output name="tab_file" file="%(test1Output)s" ftype="tabular"/>
</test>
</tests>
"""
tooltestsHTMLOnly = """
<tests>
<test>
<param name="input1" value="%(test1Input)s" ftype="tabular"/>
<param name="runMe" value="$runMe"/>
<output name="html_file" file="%(test1HTML)s" ftype="html" lines_diff="5"/>
</test>
</tests>
"""
tooltestsBoth = """<tests>
<test>
<param name="input1" value="%(test1Input)s" ftype="tabular"/>
<param name="runMe" value="$runMe"/>
<output name="tab_file" file="%(test1Output)s" ftype="tabular" />
<output name="html_file" file="%(test1HTML)s" ftype="html" lines_diff="10"/>
</test>
</tests>
"""
xdict = {}
#xdict['requirements'] = ''
#if self.opts.make_HTML:
xdict['requirements'] = protorequirements % self.image_tag
xdict['tool_version'] = self.opts.tool_version
xdict['test1Input'] = self.test1Input
xdict['test1HTML'] = self.test1HTML
xdict['test1Output'] = self.test1Output
if self.opts.make_HTML and self.opts.output_tab <> 'None':
xdict['tooltests'] = tooltestsBoth % xdict
elif self.opts.make_HTML:
xdict['tooltests'] = tooltestsHTMLOnly % xdict
else:
xdict['tooltests'] = tooltestsTabOnly % xdict
xdict['script'] = self.escapedScript
# configfile is least painful way to embed script to avoid external dependencies
# but requires escaping of <, > and $ to avoid Mako parsing
if self.opts.help_text:
helptext = open(self.opts.help_text,'r').readlines()
helptext = [html_escape(x) for x in helptext] # must html escape here too - thanks to Marius van den Beek
xdict['help'] = ''.join([x for x in helptext])
else:
xdict['help'] = 'Please ask the tool author (%s) for help as none was supplied at tool generation\n' % (self.opts.user_email)
coda = ['**Script**','Pressing execute will run the following code over your input file and generate some outputs in your history::']
coda.append('\n')
coda.append(self.indentedScript)
coda.append('\n**Attribution**\nThis Galaxy tool was created by %s at %s\nusing the Galaxy Tool Factory.\n' % (self.opts.user_email,timenow()))
coda.append('See %s for details of that project' % (toolFactoryURL))
coda.append('Please cite: Creating re-usable tools from scripts: The Galaxy Tool Factory. Ross Lazarus; Antony Kaspi; Mark Ziemann; The Galaxy Team. ')
coda.append('Bioinformatics 2012; doi: 10.1093/bioinformatics/bts573\n')
xdict['help'] = '%s\n%s' % (xdict['help'],'\n'.join(coda))
if self.opts.tool_desc:
xdict['tooldesc'] = '<description>%s</description>' % self.opts.tool_desc
else:
xdict['tooldesc'] = ''
xdict['command_outputs'] = ''
xdict['outputs'] = ''
if self.opts.input_tab <> 'None':
xdict['command_inputs'] = '--input_tab'
xdict['inputs']=''
for i,input in enumerate(self.inputFormats):
xdict['inputs' ]+='<param name="input{0}" type="data" format="{1}" label="Select a suitable input file from your history"/> \n'.format(i+1, input)
xdict['command_inputs'] += ' $input{0}'.format(i+1)
else:
xdict['command_inputs'] = '' # assume no input - eg a random data generator
xdict['inputs'] = ''
# I find setting the job name not very logical. can be changed in workflows anyway. xdict['inputs'] += '<param name="job_name" type="text" label="Supply a name for the outputs to remind you what they contain" value="%s"/> \n' % self.toolname
xdict['toolname'] = self.toolname
xdict['toolname_sanitized'] = self.toolname_sanitized
xdict['toolid'] = self.toolid
xdict['interpreter'] = self.opts.interpreter
xdict['scriptname'] = self.sfile
if self.opts.make_HTML:
xdict['command_outputs'] += ' --output_dir "$html_file.files_path" --output_html "$html_file" --make_HTML "yes"'
xdict['outputs'] += ' <data format="html" name="html_file"/>\n'
else:
xdict['command_outputs'] += ' --output_dir "./"'
#print self.opts.output_tab
if self.opts.output_tab!="None":
xdict['command_outputs'] += ' --output_tab "$tab_file"'
xdict['outputs'] += ' <data format="%s" name="tab_file"/>\n' % self.outFormats
xdict['command'] = newCommand % xdict
#print xdict['outputs']
xmls = newXML % xdict
xf = open(self.xmlfile,'w')
xf.write(xmls)
xf.write('\n')
xf.close()
# ready for the tarball
def makeTooltar(self):
"""
a tool is a gz tarball with eg
/toolname_sanitized/tool.xml /toolname_sanitized/tool.py /toolname_sanitized/test-data/test1_in.foo ...
"""
retval = self.run()
if retval:
print >> sys.stderr,'## Run failed. Cannot build yet. Please fix and retry'
sys.exit(1)
tdir = self.toolname_sanitized
os.mkdir(tdir)
self.makeXML()
if self.opts.make_HTML:
if self.opts.help_text:
hlp = open(self.opts.help_text,'r').read()
else:
hlp = 'Please ask the tool author for help as none was supplied at tool generation\n'
if self.opts.include_dependencies:
tooldepcontent = toolhtmldepskel % hlp
depf = open(os.path.join(tdir,'tool_dependencies.xml'),'w')
depf.write(tooldepcontent)
depf.write('\n')
depf.close()
if self.opts.input_tab <> 'None': # no reproducible test otherwise? TODO: maybe..
testdir = os.path.join(tdir,'test-data')
os.mkdir(testdir) # make tests directory
for i in self.opts.input_tab:
#print i
shutil.copyfile(i,os.path.join(testdir,self.test1Input))
if not self.opts.output_tab:
shutil.copyfile(self.opts.output_tab,os.path.join(testdir,self.test1Output))
if self.opts.make_HTML:
shutil.copyfile(self.opts.output_html,os.path.join(testdir,self.test1HTML))
if self.opts.output_dir:
shutil.copyfile(self.tlog,os.path.join(testdir,'test1_out.log'))
outpif = '%s.py' % self.toolname_sanitized # new name
outpiname = os.path.join(tdir,outpif) # path for the tool tarball
pyin = os.path.basename(self.pyfile) # our name - we rewrite ourselves (TM)
notes = ['# %s - a self annotated version of %s generated by running %s\n' % (outpiname,pyin,pyin),]
notes.append('# to make a new Galaxy tool called %s\n' % self.toolname)
notes.append('# User %s at %s\n' % (self.opts.user_email,timenow()))
pi=[line.replace('if False:', 'if False:') for line in open(self.pyfile)] #do not run docker in the generated tool
notes += pi
outpi = open(outpiname,'w')
outpi.write(''.join(notes))
outpi.write('\n')
outpi.close()
stname = os.path.join(tdir,self.sfile)
if not os.path.exists(stname):
shutil.copyfile(self.sfile, stname)
xtname = os.path.join(tdir,self.xmlfile)
if not os.path.exists(xtname):
shutil.copyfile(self.xmlfile,xtname)
tarpath = "%s.gz" % self.toolname_sanitized
tar = tarfile.open(tarpath, "w:gz")
tar.add(tdir,arcname=self.toolname_sanitized)
tar.close()
shutil.copyfile(tarpath,self.opts.new_tool)
shutil.rmtree(tdir)
## TODO: replace with optional direct upload to local toolshed?
return retval
def compressPDF(self,inpdf=None,thumbformat='png'):
"""need absolute path to pdf
note that GS gets confoozled if no $TMP or $TEMP
so we set it
"""
assert os.path.isfile(inpdf), "## Input %s supplied to %s compressPDF not found" % (inpdf,self.myName)
hlog = os.path.join(self.opts.output_dir,"compress_%s.txt" % os.path.basename(inpdf))
sto = open(hlog,'a')
our_env = os.environ.copy()
our_tmp = our_env.get('TMP',None)
if not our_tmp:
our_tmp = our_env.get('TEMP',None)
if not (our_tmp and os.path.exists(our_tmp)):
newtmp = os.path.join(self.opts.output_dir,'tmp')
try:
os.mkdir(newtmp)
except:
sto.write('## WARNING - cannot make %s - it may exist or permissions need fixing\n' % newtmp)
our_env['TEMP'] = newtmp
if not self.temp_warned:
sto.write('## WARNING - no $TMP or $TEMP!!! Please fix - using %s temporarily\n' % newtmp)
self.temp_warned = True
outpdf = '%s_compressed' % inpdf
cl = ["gs", "-sDEVICE=pdfwrite", "-dNOPAUSE", "-dUseCIEColor", "-dBATCH","-dPDFSETTINGS=/printer", "-sOutputFile=%s" % outpdf,inpdf]
x = subprocess.Popen(cl,stdout=sto,stderr=sto,cwd=self.opts.output_dir,env=our_env)
retval1 = x.wait()
sto.close()
if retval1 == 0:
os.unlink(inpdf)
shutil.move(outpdf,inpdf)
os.unlink(hlog)
hlog = os.path.join(self.opts.output_dir,"thumbnail_%s.txt" % os.path.basename(inpdf))
sto = open(hlog,'w')
outpng = '%s.%s' % (os.path.splitext(inpdf)[0],thumbformat)
if self.useGM:
cl2 = ['gm', 'convert', inpdf, outpng]
else: # assume imagemagick
cl2 = ['convert', inpdf, outpng]
x = subprocess.Popen(cl2,stdout=sto,stderr=sto,cwd=self.opts.output_dir,env=our_env)
retval2 = x.wait()
sto.close()
if retval2 == 0:
os.unlink(hlog)
retval = retval1 or retval2
return retval
def getfSize(self,fpath,outpath):
"""
format a nice file size string
"""
size = ''
fp = os.path.join(outpath,fpath)
if os.path.isfile(fp):
size = '0 B'
n = float(os.path.getsize(fp))
if n > 2**20:
size = '%1.1f MB' % (n/2**20)
elif n > 2**10:
size = '%1.1f KB' % (n/2**10)
elif n > 0:
size = '%d B' % (int(n))
return size
def makeHtml(self):
""" Create an HTML file content to list all the artifacts found in the output_dir
"""
galhtmlprefix = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="generator" content="Galaxy %s tool output - see http://g2.trac.bx.psu.edu/" />
<title></title>
<link rel="stylesheet" href="/static/style/base.css" type="text/css" />
</head>
<body>
<div class="toolFormBody">
"""
galhtmlattr = """<hr/><div class="infomessage">This tool (%s) was generated by the <a href="https://bitbucket.org/fubar/galaxytoolfactory/overview">Galaxy Tool Factory</a></div><br/>"""
galhtmlpostfix = """</div></body></html>\n"""
flist = os.listdir(self.opts.output_dir)
flist = [x for x in flist if x <> 'Rplots.pdf']
flist.sort()
html = []
html.append(galhtmlprefix % progname)
html.append('<div class="infomessage">Galaxy Tool "%s" run at %s</div><br/>' % (self.toolname,timenow()))
fhtml = []
if len(flist) > 0:
logfiles = [x for x in flist if x.lower().endswith('.log')] # log file names determine sections
logfiles.sort()
logfiles = [x for x in logfiles if abspath(x) <> abspath(self.tlog)]
logfiles.append(abspath(self.tlog)) # make it the last one
pdflist = []
npdf = len([x for x in flist if os.path.splitext(x)[-1].lower() == '.pdf'])
for rownum,fname in enumerate(flist):
dname,e = os.path.splitext(fname)
sfsize = self.getfSize(fname,self.opts.output_dir)
if e.lower() == '.pdf' : # compress and make a thumbnail
thumb = '%s.%s' % (dname,self.thumbformat)
pdff = os.path.join(self.opts.output_dir,fname)
retval = self.compressPDF(inpdf=pdff,thumbformat=self.thumbformat)
if retval == 0:
pdflist.append((fname,thumb))
else:
pdflist.append((fname,fname))
if (rownum+1) % 2 == 0:
fhtml.append('<tr class="odd_row"><td><a href="%s">%s</a></td><td>%s</td></tr>' % (fname,fname,sfsize))
else:
fhtml.append('<tr><td><a href="%s">%s</a></td><td>%s</td></tr>' % (fname,fname,sfsize))
for logfname in logfiles: # expect at least tlog - if more
if abspath(logfname) == abspath(self.tlog): # handled later
sectionname = 'All tool run'
if (len(logfiles) > 1):
sectionname = 'Other'
ourpdfs = pdflist
else:
realname = os.path.basename(logfname)
sectionname = os.path.splitext(realname)[0].split('_')[0] # break in case _ added to log
ourpdfs = [x for x in pdflist if os.path.basename(x[0]).split('_')[0] == sectionname]
pdflist = [x for x in pdflist if os.path.basename(x[0]).split('_')[0] <> sectionname] # remove
nacross = 1
npdf = len(ourpdfs)
if npdf > 0:
nacross = math.sqrt(npdf) ## int(round(math.log(npdf,2)))
if int(nacross)**2 != npdf:
nacross += 1
nacross = int(nacross)
width = min(400,int(1200/nacross))
html.append('<div class="toolFormTitle">%s images and outputs</div>' % sectionname)
html.append('(Click on a thumbnail image to download the corresponding original PDF image)<br/>')
ntogo = nacross # counter for table row padding with empty cells
html.append('<div><table class="simple" cellpadding="2" cellspacing="2">\n<tr>')
for i,paths in enumerate(ourpdfs):
fname,thumb = paths
s= """<td><a href="%s"><img src="%s" title="Click to download a PDF of %s" hspace="5" width="%d"
alt="Image called %s"/></a></td>\n""" % (fname,thumb,fname,width,fname)
if ((i+1) % nacross == 0):
s += '</tr>\n'
ntogo = 0
if i < (npdf - 1): # more to come
s += '<tr>'
ntogo = nacross
else:
ntogo -= 1
html.append(s)
if html[-1].strip().endswith('</tr>'):
html.append('</table></div>\n')
else:
if ntogo > 0: # pad
html.append('<td> </td>'*ntogo)
html.append('</tr></table></div>\n')
logt = open(logfname,'r').readlines()
logtext = [x for x in logt if x.strip() > '']
html.append('<div class="toolFormTitle">%s log output</div>' % sectionname)
if len(logtext) > 1:
html.append('\n<pre>\n')
html += logtext
html.append('\n</pre>\n')
else:
html.append('%s is empty<br/>' % logfname)
if len(fhtml) > 0:
fhtml.insert(0,'<div><table class="colored" cellpadding="3" cellspacing="3"><tr><th>Output File Name (click to view)</th><th>Size</th></tr>\n')
fhtml.append('</table></div><br/>')
html.append('<div class="toolFormTitle">All output files available for downloading</div>\n')
html += fhtml # add all non-pdf files to the end of the display
else:
html.append('<div class="warningmessagelarge">### Error - %s returned no files - please confirm that parameters are sane</div>' % self.opts.interpreter)
html.append(galhtmlpostfix)
htmlf = file(self.opts.output_html,'w')
htmlf.write('\n'.join(html))
htmlf.write('\n')
htmlf.close()
self.html = html
def run(self):
"""
scripts must be small enough not to fill the pipe!
"""
if self.treatbashSpecial and self.opts.interpreter in ['bash','sh']:
retval = self.runBash()
else:
if self.opts.output_dir:
ste = open(self.elog,'w')
sto = open(self.tlog,'w')
sto.write('## Toolfactory generated command line = %s\n' % ' '.join(self.cl))
sto.flush()
p = subprocess.Popen(self.cl,shell=False,stdout=sto,stderr=ste,stdin=subprocess.PIPE,cwd=self.opts.output_dir)
else:
p = subprocess.Popen(self.cl,shell=False,stdin=subprocess.PIPE)
p.stdin.write(self.script)
p.stdin.close()
retval = p.wait()
if self.opts.output_dir:
sto.close()
ste.close()
err = open(self.elog,'r').readlines()
if retval <> 0 and err: # problem
print >> sys.stderr,err #same problem, need to capture docker stdin/stdout
if self.opts.make_HTML:
self.makeHtml()
return retval
def runBash(self):
"""
cannot use - for bash so use self.sfile
"""
if self.opts.output_dir:
s = '## Toolfactory generated command line = %s\n' % ' '.join(self.cl)
sto = open(self.tlog,'w')
sto.write(s)
sto.flush()
p = subprocess.Popen(self.cl,shell=False,stdout=sto,stderr=sto,cwd=self.opts.output_dir)
else:
p = subprocess.Popen(self.cl,shell=False)
retval = p.wait()
if self.opts.output_dir:
sto.close()
if self.opts.make_HTML:
self.makeHtml()
return retval
def main():
u = """
This is a Galaxy wrapper. It expects to be called by a special purpose tool.xml as:
<command interpreter="python">rgBaseScriptWrapper.py --script_path "$scriptPath" --tool_name "foo" --interpreter "Rscript"
</command>
"""
op = argparse.ArgumentParser()
a = op.add_argument
a('--script_path',default=None)
a('--tool_name',default=None)
a('--interpreter',default=None)
a('--output_dir',default='./')
a('--output_html',default=None)
a('--input_tab',default='None', nargs='*')
a('--output_tab',default='None')
a('--user_email',default='Unknown')
a('--bad_user',default=None)
a('--make_Tool',default=None)
a('--make_HTML',default=None)
a('--help_text',default=None)
a('--tool_desc',default=None)
a('--new_tool',default=None)
a('--tool_version',default=None)
a('--include_dependencies',default=None)
a('--dockerized',default=0)
a('--output_format', default='tabular')
a('--input_format', dest='input_formats', action='append', default=[])
a('--additional_parameters', dest='additional_parameters', action='append', default=[])
opts = op.parse_args()
assert not opts.bad_user,'UNAUTHORISED: %s is NOT authorized to use this tool until Galaxy admin adds %s to admin_users in universe_wsgi.ini' % (opts.bad_user,opts.bad_user)
assert opts.tool_name,'## Tool Factory expects a tool name - eg --tool_name=DESeq'
assert opts.interpreter,'## Tool Factory wrapper expects an interpreter - eg --interpreter=Rscript'
assert os.path.isfile(opts.script_path),'## Tool Factory wrapper expects a script path - eg --script_path=foo.R'
if opts.output_dir:
try:
os.makedirs(opts.output_dir)
except:
pass
if False:
switch_to_docker(opts)
return
r = ScriptRunner(opts)
if opts.make_Tool:
retcode = r.makeTooltar()
else:
retcode = r.run()
os.unlink(r.sfile)
if retcode:
sys.exit(retcode) # indicate failure to job runner
if __name__ == "__main__":
main()
|
JuPeg/tools-artbio
|
unstable/local_tools/local_shed_backup/transform_counts_with_normalization_factor-default/transform_counts_with_normalization_factor/transform_counts_with_normalization_factor.py
|
Python
|
mit
| 32,081
|
[
"Galaxy"
] |
a164918184e8888be157ca046ea99fdbc21cd6b4eaaa4a605ec7c5e9844f6a51
|
# -*- encoding: utf-8 -*-
from scriptorium.base.test import SeleniumTestCase
from scriptorium.base.test.mixin import MailerTesterMixIn
from scriptorium.models import User
class AuthTesterMixin(object):
def login(self: SeleniumTestCase, email, password):
self.tester.visit('login')
self.tester.fill_form({
'email': email,
'password': password
})
self.tester.click('submit', '#login-form *')
def logout(self: SeleniumTestCase):
self.tester.visit('homepage')
self.tester.click('logout', '.navbar a')
def signup(self: SeleniumTestCase, data):
self.tester.visit('signup')
self.tester.fill_form(data)
self.tester.click('submit', '#signup-form *')
class AuthTestCase(MailerTesterMixIn, AuthTesterMixin, SeleniumTestCase):
fixtures = ['auth']
def setUp(self):
super().setUp()
self.tester.visit('homepage')
def test_login(self):
self.tester.see('sign in', '.navbar a')
self.tester.click('sign in', '.navbar a')
self.tester.see('Login', '.container *')
self.login('test@testing.com', 'testing')
self.tester.see('logout')
def test_logout(self):
self.login('test@testing.com', 'testing')
self.logout()
self.tester.see('sign in')
def test_signup(self):
self.signup({
'username': 'tester',
'email': 'tester@testing.com',
'password': 'testing',
'password_repeat': 'testing'
})
User.objects.get(username='tester')
|
alex20465/open-scriptorium
|
apps/frontpage/tests/test_auth.py
|
Python
|
mit
| 1,588
|
[
"VisIt"
] |
d0e55ba3170c3b1b842d00b7f475a961887b0b832f660e7646dd3edcba67736f
|
#!/usr/bin/env python
from vtk import *
source = vtkRandomGraphSource()
source.SetNumberOfVertices(150)
source.SetEdgeProbability(0.01)
source.SetUseEdgeProbability(True)
source.SetStartWithTree(True)
view = vtkGraphLayoutView()
view.AddRepresentationFromInputConnection(source.GetOutputPort())
view.SetVertexLabelArrayName("vertex id")
view.SetVertexLabelVisibility(True)
view.SetVertexColorArrayName("vertex id")
view.SetColorVertices(True)
view.SetLayoutStrategyToSpanTree()
view.SetInteractionModeTo3D() # Left mouse button causes 3D rotate instead of zoom
view.SetLabelPlacementModeToNoOverlap()
theme = vtkViewTheme.CreateMellowTheme()
theme.SetCellColor(.2,.2,.6)
theme.SetLineWidth(2)
theme.SetPointSize(10)
view.ApplyViewTheme(theme)
theme.FastDelete()
view.GetRenderWindow().SetSize(600, 600)
view.ResetCamera()
view.Render()
view.GetInteractor().Start()
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Examples/Infovis/Python/random3d.py
|
Python
|
bsd-3-clause
| 899
|
[
"VTK"
] |
808c0d295b64de24bfce4d9929243435c76a81a95ce97f9b1f776d36abbfeebc
|
../../../../share/pyshared/orca/dectalk.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/dectalk.py
|
Python
|
gpl-3.0
| 42
|
[
"ORCA"
] |
ec58a1bde0458685ce88b6b3a95183d5cb7526d860589c30009c7affd27aced6
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Count and export redundancy of sequences found in a fasta file.
Warning: may crash by using the whole memory.
Usage:
%program <input_file> <output_file> [data_column]"""
import sys
import re
from collections import defaultdict
try:
from Bio import SeqIO
except:
print("This program requires the Biopython library")
sys.exit(0)
try:
fasta_file = sys.argv[1] # Input fasta file
out_file = sys.argv[2] # Outpout file
except:
print(__doc__)
sys.exit(0)
d = defaultdict(int)
fasta_sequences = SeqIO.parse(open(fasta_file),'fasta')
for seq in fasta_sequences:
d[str(seq.seq)] += 1
print("There where %s different sequences" % (len(d)))
fasta_sequences.close()
dd = [(x[1], x[0]) for x in d.items()]
dd.sort(reverse=True)
with open(out_file, "w") as f:
for x in dd:
f.write(str(x[1]) + "\t" + str(x[0]) + "\n")
|
enormandeau/Scripts
|
fasta_distribution.py
|
Python
|
gpl-3.0
| 917
|
[
"Biopython"
] |
1be7c634d65e606ca98967800ff373b72a61ebb2ebb03785d5c51769f2ac4890
|
'''
Unittests for pysal.model.spreg.error_sp_hom module
'''
import unittest
import pysal.lib
from pysal.model.spreg import error_sp_hom as HOM
import numpy as np
from pysal.lib.common import RTOL
import pysal.model.spreg
class BaseGM_Error_Hom_Tester(unittest.TestCase):
def setUp(self):
db=pysal.lib.io.open(pysal.lib.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.X = np.hstack((np.ones(self.y.shape),self.X))
self.w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = HOM.BaseGM_Error_Hom(self.y, self.X, self.w.sparse, A1='hom_sc')
np.testing.assert_allclose(reg.y[0],np.array([80.467003]),RTOL)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.x[0],x,RTOL)
betas = np.array([[ 47.9478524 ], [ 0.70633223], [ -0.55595633], [ 0.41288558]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
np.testing.assert_allclose(reg.u[0],np.array([27.466734]),RTOL)
np.testing.assert_allclose(reg.e_filtered[0],np.array([ 32.37298547]),RTOL)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
np.testing.assert_allclose(reg.predy[0],np.array([ 53.000269]),RTOL)
np.testing.assert_allclose(reg.n,49,RTOL)
np.testing.assert_allclose(reg.k,3,RTOL)
sig2 = 189.94459439729718
np.testing.assert_allclose(reg.sig2,sig2)
vm = np.array([[ 1.51340717e+02, -5.29057506e+00, -1.85654540e+00, -2.39139054e-03], [ -5.29057506e+00, 2.46669610e-01, 5.14259101e-02, 3.19241302e-04], [ -1.85654540e+00, 5.14259101e-02, 3.20510550e-02, -5.95640240e-05], [ -2.39139054e-03, 3.19241302e-04, -5.95640240e-05, 3.36690159e-02]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
xtx = np.array([[ 4.90000000e+01, 7.04371999e+02, 1.72131237e+03], [ 7.04371999e+02, 1.16866734e+04, 2.15575320e+04], [ 1.72131237e+03, 2.15575320e+04, 7.39058986e+04]])
np.testing.assert_allclose(reg.xtx,xtx,RTOL)
class GM_Error_Hom_Tester(unittest.TestCase):
def setUp(self):
db=pysal.lib.io.open(pysal.lib.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = HOM.GM_Error_Hom(self.y, self.X, self.w, A1='hom_sc')
np.testing.assert_allclose(reg.y[0],np.array([80.467003]),RTOL)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.x[0],x,RTOL)
betas = np.array([[ 47.9478524 ], [ 0.70633223], [ -0.55595633], [ 0.41288558]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
np.testing.assert_allclose(reg.u[0],np.array([27.46673388]),RTOL)
np.testing.assert_allclose(reg.e_filtered[0],np.array([ 32.37298547]),RTOL)
np.testing.assert_allclose(reg.predy[0],np.array([ 53.00026912]),RTOL)
np.testing.assert_allclose(reg.n,49,RTOL)
np.testing.assert_allclose(reg.k,3,RTOL)
vm = np.array([[ 1.51340717e+02, -5.29057506e+00, -1.85654540e+00, -2.39139054e-03], [ -5.29057506e+00, 2.46669610e-01, 5.14259101e-02, 3.19241302e-04], [ -1.85654540e+00, 5.14259101e-02, 3.20510550e-02, -5.95640240e-05], [ -2.39139054e-03, 3.19241302e-04, -5.95640240e-05, 3.36690159e-02]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
np.testing.assert_allclose(reg.iteration,1,RTOL)
my = 38.436224469387746
np.testing.assert_allclose(reg.mean_y,my)
std_y = 18.466069465206047
np.testing.assert_allclose(reg.std_y,std_y)
pr2 = 0.34950977055969729
np.testing.assert_allclose(reg.pr2,pr2)
sig2 = 189.94459439729718
np.testing.assert_allclose(reg.sig2,sig2)
std_err = np.array([ 12.30206149, 0.49665844, 0.17902808, 0.18349119])
np.testing.assert_allclose(reg.std_err,std_err,RTOL)
z_stat = np.array([[ 3.89754616e+00, 9.71723059e-05], [ 1.42216900e+00, 1.54977196e-01], [ -3.10541409e+00, 1.90012806e-03], [ 2.25016500e+00, 2.44384731e-02]])
np.testing.assert_allclose(reg.z_stat,z_stat,RTOL)
xtx = np.array([[ 4.90000000e+01, 7.04371999e+02, 1.72131237e+03], [ 7.04371999e+02, 1.16866734e+04, 2.15575320e+04], [ 1.72131237e+03, 2.15575320e+04, 7.39058986e+04]])
np.testing.assert_allclose(reg.xtx,xtx,RTOL)
class BaseGM_Endog_Error_Hom_Tester(unittest.TestCase):
def setUp(self):
db=pysal.lib.io.open(pysal.lib.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
self.X = np.array(X).T
self.X = np.hstack((np.ones(self.y.shape),self.X))
yd = []
yd.append(db.by_col("CRIME"))
self.yd = np.array(yd).T
q = []
q.append(db.by_col("DISCBD"))
self.q = np.array(q).T
self.w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = HOM.BaseGM_Endog_Error_Hom(self.y, self.X, self.yd, self.q, self.w.sparse, A1='hom_sc')
np.testing.assert_allclose(reg.y[0],np.array([ 80.467003]),RTOL)
x = np.array([ 1. , 19.531])
np.testing.assert_allclose(reg.x[0],x,RTOL)
z = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.z[0],z,RTOL)
h = np.array([ 1. , 19.531, 5.03 ])
np.testing.assert_allclose(reg.h[0],h,RTOL)
yend = np.array([ 15.72598])
np.testing.assert_allclose(reg.yend[0],yend,RTOL)
q = np.array([ 5.03])
np.testing.assert_allclose(reg.q[0],q,RTOL)
betas = np.array([[ 55.36575166], [ 0.46432416], [ -0.66904404], [ 0.43205526]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
u = np.array([ 26.55390939])
np.testing.assert_allclose(reg.u[0],u,RTOL)
np.testing.assert_allclose(reg.e_filtered[0],np.array([ 31.74114306]),RTOL)
predy = np.array([ 53.91309361])
np.testing.assert_allclose(reg.predy[0],predy,RTOL)
np.testing.assert_allclose(reg.n,49,RTOL)
np.testing.assert_allclose(reg.k,3,RTOL)
sig2 = 190.59435238060928
np.testing.assert_allclose(reg.sig2,sig2)
vm = np.array([[ 5.52064057e+02, -1.61264555e+01, -8.86360735e+00, 1.04251912e+00], [ -1.61264555e+01, 5.44898242e-01, 2.39518645e-01, -1.88092950e-02], [ -8.86360735e+00, 2.39518645e-01, 1.55501840e-01, -2.18638648e-02], [ 1.04251912e+00, -1.88092950e-02, -2.18638648e-02, 3.71222222e-02]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
its = 1
np.testing.assert_allclose(reg.iteration,its,RTOL)
my = 38.436224469387746
np.testing.assert_allclose(reg.mean_y,my)
std_y = 18.466069465206047
np.testing.assert_allclose(reg.std_y,std_y)
sig2 = 0
#np.testing.assert_allclose(reg.sig2,sig2)
hth = np.array([[ 49. , 704.371999 , 139.75 ], [ 704.371999 , 11686.67338121, 2246.12800625], [ 139.75 , 2246.12800625, 498.5851]])
np.testing.assert_allclose(reg.hth,hth,RTOL)
class GM_Endog_Error_Hom_Tester(unittest.TestCase):
def setUp(self):
db=pysal.lib.io.open(pysal.lib.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
self.X = np.array(X).T
yd = []
yd.append(db.by_col("CRIME"))
self.yd = np.array(yd).T
q = []
q.append(db.by_col("DISCBD"))
self.q = np.array(q).T
self.w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = HOM.GM_Endog_Error_Hom(self.y, self.X, self.yd, self.q, self.w, A1='hom_sc')
np.testing.assert_allclose(reg.y[0],np.array([ 80.467003]),RTOL)
x = np.array([ 1. , 19.531])
np.testing.assert_allclose(reg.x[0],x,RTOL)
z = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.z[0],z,RTOL)
h = np.array([ 1. , 19.531, 5.03 ])
np.testing.assert_allclose(reg.h[0],h,RTOL)
yend = np.array([ 15.72598])
np.testing.assert_allclose(reg.yend[0],yend,RTOL)
q = np.array([ 5.03])
np.testing.assert_allclose(reg.q[0],q,RTOL)
betas = np.array([[ 55.36575166], [ 0.46432416], [ -0.66904404], [ 0.43205526]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
u = np.array([ 26.55390939])
np.testing.assert_allclose(reg.u[0],u,RTOL)
np.testing.assert_allclose(reg.e_filtered[0],np.array([ 31.74114306]),RTOL)
predy = np.array([ 53.91309361])
np.testing.assert_allclose(reg.predy[0],predy,RTOL)
np.testing.assert_allclose(reg.n,49,RTOL)
np.testing.assert_allclose(reg.k,3,RTOL)
vm = np.array([[ 5.52064057e+02, -1.61264555e+01, -8.86360735e+00, 1.04251912e+00], [ -1.61264555e+01, 5.44898242e-01, 2.39518645e-01, -1.88092950e-02], [ -8.86360735e+00, 2.39518645e-01, 1.55501840e-01, -2.18638648e-02], [ 1.04251912e+00, -1.88092950e-02, -2.18638648e-02, 3.71222222e-02]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
its = 1
np.testing.assert_allclose(reg.iteration,its,RTOL)
my = 38.436224469387746
np.testing.assert_allclose(reg.mean_y,my)
std_y = 18.466069465206047
np.testing.assert_allclose(reg.std_y,std_y)
pr2 = 0.34647366525657419
np.testing.assert_allclose(reg.pr2,pr2)
sig2 = 190.59435238060928
np.testing.assert_allclose(reg.sig2,sig2)
#std_err
std_err = np.array([ 23.49604343, 0.73817223, 0.39433722, 0.19267128])
np.testing.assert_allclose(reg.std_err,std_err,RTOL)
z_stat = np.array([[ 2.35638617, 0.01845372], [ 0.62901874, 0.52933679], [-1.69662923, 0.08976678], [ 2.24244556, 0.02493259]])
np.testing.assert_allclose(reg.z_stat,z_stat,RTOL)
class BaseGM_Combo_Hom_Tester(unittest.TestCase):
def setUp(self):
db=pysal.lib.io.open(pysal.lib.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
self.X = np.array(X).T
self.w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
yd2, q2 = pysal.model.spreg.utils.set_endog(self.y, self.X, self.w, None, None, 1, True)
self.X = np.hstack((np.ones(self.y.shape),self.X))
reg = HOM.BaseGM_Combo_Hom(self.y, self.X, yend=yd2, q=q2, w=self.w.sparse, A1='hom_sc')
np.testing.assert_allclose(reg.y[0],np.array([80.467003]),RTOL)
x = np.array([ 1. , 19.531])
np.testing.assert_allclose(reg.x[0],x,RTOL)
betas = np.array([[ 10.12541428], [ 1.56832263], [ 0.15132076], [ 0.21033397]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
np.testing.assert_allclose(reg.u[0],np.array([34.3450723]),RTOL)
np.testing.assert_allclose(reg.e_filtered[0],np.array([ 36.6149682]),RTOL)
np.testing.assert_allclose(reg.predy[0],np.array([ 46.1219307]),RTOL)
np.testing.assert_allclose(reg.n,49,RTOL)
np.testing.assert_allclose(reg.k,3,RTOL)
vm = np.array([[ 2.33694742e+02, -6.66856869e-01, -5.58304254e+00, 4.85488380e+00], [ -6.66856869e-01, 1.94241504e-01, -5.42327138e-02, 5.37225570e-02], [ -5.58304254e+00, -5.42327138e-02, 1.63860721e-01, -1.44425498e-01], [ 4.85488380e+00, 5.37225570e-02, -1.44425498e-01, 1.78622255e-01]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
z = np.array([ 1. , 19.531 , 35.4585005])
np.testing.assert_allclose(reg.z[0],z,RTOL)
h = np.array([ 1. , 19.531, 18.594])
np.testing.assert_allclose(reg.h[0],h,RTOL)
yend = np.array([ 35.4585005])
np.testing.assert_allclose(reg.yend[0],yend,RTOL)
q = np.array([ 18.594])
np.testing.assert_allclose(reg.q[0],q,RTOL)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
its = 1
np.testing.assert_allclose(reg.iteration,its,RTOL)
my = 38.436224469387746
np.testing.assert_allclose(reg.mean_y,my)
std_y = 18.466069465206047
np.testing.assert_allclose(reg.std_y,std_y)
sig2 = 232.22680651270042
#np.testing.assert_allclose(reg.sig2,sig2)
np.testing.assert_allclose(reg.sig2,sig2)
hth = np.array([[ 49. , 704.371999 , 724.7435916 ], [ 704.371999 , 11686.67338121, 11092.519988 ], [ 724.7435916 , 11092.519988 , 11614.62257048]])
np.testing.assert_allclose(reg.hth,hth,RTOL)
class GM_Combo_Hom_Tester(unittest.TestCase):
def setUp(self):
db=pysal.lib.io.open(pysal.lib.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
self.X = np.array(X).T
self.w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = HOM.GM_Combo_Hom(self.y, self.X, w=self.w, A1='hom_sc')
np.testing.assert_allclose(reg.y[0],np.array([80.467003]),RTOL)
x = np.array([ 1. , 19.531])
np.testing.assert_allclose(reg.x[0],x,RTOL)
betas = np.array([[ 10.12541428], [ 1.56832263], [ 0.15132076], [ 0.21033397]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
np.testing.assert_allclose(reg.u[0],np.array([34.3450723]),RTOL)
np.testing.assert_allclose(reg.e_filtered[0],np.array([ 36.6149682]),RTOL)
np.testing.assert_allclose(reg.e_pred[0],np.array([ 32.90372983]),RTOL)
np.testing.assert_allclose(reg.predy[0],np.array([ 46.1219307]),RTOL)
np.testing.assert_allclose(reg.predy_e[0],np.array([47.56327317]),RTOL)
np.testing.assert_allclose(reg.n,49,RTOL)
np.testing.assert_allclose(reg.k,3,RTOL)
z = np.array([ 1. , 19.531 , 35.4585005])
np.testing.assert_allclose(reg.z[0],z,RTOL)
h = np.array([ 1. , 19.531, 18.594])
np.testing.assert_allclose(reg.h[0],h,RTOL)
yend = np.array([ 35.4585005])
np.testing.assert_allclose(reg.yend[0],yend,RTOL)
q = np.array([ 18.594])
np.testing.assert_allclose(reg.q[0],q,RTOL)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
np.testing.assert_allclose(reg.iteration,1,RTOL)
my = 38.436224469387746
np.testing.assert_allclose(reg.mean_y,my)
std_y = 18.466069465206047
np.testing.assert_allclose(reg.std_y,std_y)
pr2 = 0.28379825632694394
np.testing.assert_allclose(reg.pr2,pr2)
pr2_e = 0.25082892555141506
np.testing.assert_allclose(reg.pr2_e,pr2_e)
sig2 = 232.22680651270042
#np.testing.assert_allclose(reg.sig2, sig2)
np.testing.assert_allclose(reg.sig2, sig2)
std_err = np.array([ 15.28707761, 0.44072838, 0.40479714, 0.42263726])
np.testing.assert_allclose(reg.std_err,std_err,RTOL)
z_stat = np.array([[ 6.62351206e-01, 5.07746167e-01], [ 3.55847888e+00, 3.73008780e-04], [ 3.73818749e-01, 7.08539170e-01], [ 4.97670189e-01, 6.18716523e-01]])
np.testing.assert_allclose(reg.z_stat,z_stat,RTOL)
vm = np.array([[ 2.33694742e+02, -6.66856869e-01, -5.58304254e+00, 4.85488380e+00], [ -6.66856869e-01, 1.94241504e-01, -5.42327138e-02, 5.37225570e-02], [ -5.58304254e+00, -5.42327138e-02, 1.63860721e-01, -1.44425498e-01], [ 4.85488380e+00, 5.37225570e-02, -1.44425498e-01, 1.78622255e-01]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
suite = unittest.TestSuite()
test_classes = [BaseGM_Error_Hom_Tester, GM_Error_Hom_Tester,\
BaseGM_Endog_Error_Hom_Tester, GM_Endog_Error_Hom_Tester, \
BaseGM_Combo_Hom_Tester, GM_Combo_Hom_Tester]
for i in test_classes:
a = unittest.TestLoader().loadTestsFromTestCase(i)
suite.addTest(a)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
|
lixun910/pysal
|
pysal/model/spreg/tests/test_error_sp_hom.py
|
Python
|
bsd-3-clause
| 17,463
|
[
"COLUMBUS"
] |
975ff913be46336ade7716f2eed3bef8ea09306ed7cfc8954c6d33c793396cae
|
from ase import Atoms
# Setup a chain of H,O,C
# H-O Dist = 2
# O-C Dist = 3
# C-H Dist = 5 with mic=False
# C-H Dist = 4 with mic=True
a = Atoms('HOC', positions=[(1, 1, 1), (3, 1, 1), (6, 1, 1)])
a.set_cell((9, 2, 2))
a.set_pbc((True, False, False))
# Calculate indiviually with mic=True
assert a.get_distance(0, 1, mic=True) == 2
assert a.get_distance(1, 2, mic=True) == 3
assert a.get_distance(0, 2, mic=True) == 4
# Calculate indiviually with mic=False
assert a.get_distance(0, 1, mic=False) == 2
assert a.get_distance(1, 2, mic=False) == 3
assert a.get_distance(0, 2, mic=False) == 5
# Calculate in groups with mic=True
assert (a.get_distances(0, [1, 2], mic=True) == [2, 4]).all()
# Calculate in groups with mic=False
assert (a.get_distances(0, [1, 2], mic=False) == [2, 5]).all()
# Calculate all with mic=True
assert (a.get_all_distances(mic=True) == [[0, 2, 4],
[2, 0, 3],
[4, 3, 0]]).all()
# Calculate all with mic=False
assert (a.get_all_distances(mic=False) == [[0, 2, 5],
[2, 0, 3],
[5, 3, 0]]).all()
|
askhl/ase
|
ase/test/atoms_distance.py
|
Python
|
gpl-2.0
| 1,192
|
[
"ASE"
] |
fd105633f7de177283bd41e0c7a8828d23ea1ac7397ddf7e61da42a86da33f8f
|
#!/usr/bin/env python3
# coding: utf-8
from __future__ import unicode_literals
from collections import Counter
from molbiox.algor import interval
def find_next_contigs(samfile, contig, lendict, insertmax,
direction='next', orientation='fr'):
"""
:param samfile: pysam.calignmentfile.AlignmentFile object
:param contig: contig name (a string)
:param insertmax: maximun allowed insert size
:param orientation: fr / rf
:param direction: prev / next
:return:
"""
length = lendict[contig]
if direction == 'prev':
headpos = 0
tailpos = min(insertmax, length)
elif direction == 'next':
headpos = max(0, length-insertmax)
tailpos = length
else:
raise ValueError('direction can only be prev or next')
reads = samfile.fetch(contig, headpos, tailpos)
reads = (r for r in reads if r.is_paired and not r.mate_is_unmapped)
# should read be reversed?
revdict = dict(prevfr=True, prevrf=False, nextfr=False, nextrf=True)
needrev = revdict[direction + orientation]
reads = (r for r in reads if bool(r.is_reverse) == needrev)
valid_pairs = []
for read in reads:
# this line, VERY SLOW!
mate = samfile.mate(read)
if read.reference_id == mate.reference_id:
continue
mcontig = samfile.references[mate.reference_id]
mlength = lendict[mcontig]
if not isinstance(mate.reference_start, int):
continue
if not isinstance(mate.reference_end, int):
continue
if mate.is_reverse and orientation == 'fr' or \
not mate.is_reverse and orientation == 'rf':
mheadpos = 0
mtailpos = min(insertmax, mlength)
else:
mheadpos = max(0, length-insertmax)
mtailpos = mlength
# overlap size
ov = interval.overlap_size(
mheadpos, mtailpos, mate.reference_start, mate.reference_end)
if ov > 0:
valid_pairs.append((read, mate))
refs = samfile.references
strand = lambda r, m: 'diff' if r.is_reverse == m.is_reverse else 'same'
nextcontigs = ((refs[m.reference_id], strand(r, m)) for r, m in valid_pairs)
return Counter(nextcontigs)
|
frozflame/molbiox
|
molbiox/algor/mapping.py
|
Python
|
gpl-2.0
| 2,293
|
[
"pysam"
] |
28b471ec16a7f7e2b66bf39184f11cfc90480f230ce4646415d48a36645a1c6e
|
"""Fabric deployment file to install genomic data on remote instances.
Designed to automatically download and manage biologically associated
data on cloud instances like Amazon EC2.
Fabric (http://docs.fabfile.org) manages automation of remote servers.
Usage:
fab -i key_file -H servername -f data_fabfile.py install_data
"""
import os
import sys
from fabric.main import load_settings
from fabric.api import *
from fabric.contrib.files import *
from fabric.context_managers import path
try:
import boto
except ImportError:
boto = None
# preferentially use local cloudbio directory
for to_remove in [p for p in sys.path if p.find("cloudbiolinux-") > 0]:
sys.path.remove(to_remove)
sys.path.append(os.path.dirname(__file__))
from cloudbio.utils import _setup_logging, _configure_fabric_environment
from cloudbio.biodata import genomes
# -- Host specific setup
env.remove_old_genomes = False
def setup_environment():
"""Setup environment with required data file locations.
"""
_setup_logging(env)
_add_defaults()
_configure_fabric_environment(env, ignore_distcheck=True)
def _add_defaults():
"""Defaults from fabricrc.txt file; loaded if not specified at commandline.
"""
env.config_dir = os.path.join(os.path.dirname(__file__), "config")
conf_file = "tool_data_table_conf.xml"
env.tool_data_table_conf_file = os.path.join(os.path.dirname(__file__),
"installed_files", conf_file)
if not env.has_key("distribution"):
config_file = os.path.join(env.config_dir, "fabricrc.txt")
if os.path.exists(config_file):
env.update(load_settings(config_file))
CONFIG_FILE = os.path.join(os.path.dirname(__file__), "config", "biodata.yaml")
def install_data(config_source=CONFIG_FILE):
"""Main entry point for installing useful biological data.
"""
setup_environment()
genomes.install_data(config_source)
def install_data_raw(config_source=CONFIG_FILE):
"""Installing useful biological data building from scratch. Useful for debugging.
"""
setup_environment()
genomes.install_data(config_source, approaches=["raw"])
def install_data_s3(config_source=CONFIG_FILE, do_setup_environment=True):
"""Install data using pre-existing genomes present on Amazon s3.
"""
setup_environment()
genomes.install_data_s3(config_source)
def install_data_rsync(config_source=CONFIG_FILE):
"""Install data using Galaxy rsync data servers.
"""
setup_environment()
genomes.install_data_rsync(config_source)
def install_data_ggd(recipe, organism):
"""Install data using Get Genomics Data (GGD) recipes.
"""
setup_environment()
from cloudbio.biodata import ggd, genomes
genome_dir = os.path.join(genomes._make_genome_dir(), organism)
recipe_file = os.path.join(os.path.dirname(__file__), "ggd-recipes", organism, "%s.yaml" % recipe)
ggd.install_recipe(genome_dir, env, recipe_file, organism)
def upload_s3(config_source=CONFIG_FILE):
"""Upload prepared genome files by identifier to Amazon s3 buckets.
"""
setup_environment()
genomes.upload_s3(config_source)
|
chapmanb/cloudbiolinux
|
data_fabfile.py
|
Python
|
mit
| 3,179
|
[
"Galaxy"
] |
b92d34c9a2283cc54684728b513637e2ac9ead2ca80ca616c613edf75ba93e28
|
# -*- coding: utf-8 -*-
#
# Author: Taylor Smith <taylor.smith@alkaline-ml.com>
#
# Utils for autoencoders
from __future__ import print_function, absolute_import, division
import tensorflow as tf
__all__ = [
'cross_entropy',
'kullback_leibler'
]
def cross_entropy(actual, predicted, eps=1e-10):
"""Binary cross entropy
Parameters
----------
actual : TensorFlow ``Tensor``
Actual
predicted : TensorFlow ``Tensor``
Predicted
eps : float, optional (default=1e-10)
The amount to offset difference in ``predicted`` and ``actual``
to avoid any log(0) operations.
"""
# clip to avoid nan
p_ = tf.clip_by_value(predicted, eps, 1 - eps)
return -tf.reduce_sum(actual * tf.log(p_) + (1 - actual) * tf.log(1 - p_), 1)
def kullback_leibler(mu, log_sigma):
"""Gaussian Kullback-Leibler divergence:
KL(q | p)
Parameters
----------
mu : TensorFlow ``Tensor``
The z_mean tensor.
log_sigma : TensorFlow ``Tensor``
The z_log_sigma tensor.
"""
# -0.5 * (1 + log(sigma ** 2) - mu ** 2 - sigma ** 2)
return -0.5 * tf.reduce_sum(1 + (2 * log_sigma) - (mu ** 2) - tf.exp(2 * log_sigma), 1)
|
tgsmith61591/smrt
|
smrt/autoencode/_ae_utils.py
|
Python
|
bsd-3-clause
| 1,213
|
[
"Gaussian"
] |
ad8817ac6fc9f96e38809a4eeffd9762163e9081b672888a74a4560e499be456
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your ) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# gen/plug/menu/__init__.py
"""
The menu package for allowing plugins to specify options in a generic way.
"""
from ._menu import Menu
from ._option import Option
from ._string import StringOption
from ._color import ColorOption
from ._number import NumberOption
from ._text import TextOption
from ._boolean import BooleanOption
from ._enumeratedlist import EnumeratedListOption
from ._filter import FilterOption
from ._person import PersonOption
from ._family import FamilyOption
from ._note import NoteOption
from ._media import MediaOption
from ._personlist import PersonListOption
from ._placelist import PlaceListOption
from ._surnamecolor import SurnameColorOption
from ._destination import DestinationOption
from ._style import StyleOption
from ._booleanlist import BooleanListOption
|
SNoiraud/gramps
|
gramps/gen/plug/menu/__init__.py
|
Python
|
gpl-2.0
| 1,586
|
[
"Brian"
] |
a54af05a5c807911132fa63cb844570938c7fab127d2b63fa5b50d2997c0a024
|
"""Test the DataRecoveryAgent"""
import unittest
from collections import defaultdict
from mock import MagicMock as Mock, patch, ANY
from parameterized import parameterized, param
import DIRAC
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.TransformationSystem.Agent.DataRecoveryAgent import DataRecoveryAgent
from DIRAC.TransformationSystem.Utilities.JobInfo import TaskInfoException
from DIRAC.tests.Utilities.utils import MatchStringWith
__RCSID__ = "$Id$"
MODULE_NAME = 'DIRAC.TransformationSystem.Agent.DataRecoveryAgent'
class TestDRA(unittest.TestCase):
"""Test the DataRecoveryAgent"""
dra = None
@patch("DIRAC.Core.Base.AgentModule.PathFinder", new=Mock())
@patch("DIRAC.ConfigurationSystem.Client.PathFinder.getSystemInstance", new=Mock())
@patch("%s.ReqClient" % MODULE_NAME, new=Mock())
def setUp(self):
self.dra = DataRecoveryAgent(agentName="ILCTransformationSystem/DataRecoveryAgent", loadName="TestDRA")
self.dra.transNoInput = ['MCGeneration']
self.dra.transWithInput = ['MCSimulation', 'MCReconstruction']
self.dra.transformationTypes = ['MCGeneration', 'MCSimulation', 'MCReconstruction']
self.dra.reqClient = Mock(name="reqMock", spec=DIRAC.RequestManagementSystem.Client.ReqClient.ReqClient)
self.dra.tClient = Mock(
name="transMock",
spec=DIRAC.TransformationSystem.Client.TransformationClient.TransformationClient)
self.dra.fcClient = Mock(name="fcMock", spec=DIRAC.Resources.Catalog.FileCatalogClient.FileCatalogClient)
self.dra.jobMon = Mock(
name="jobMonMock",
spec=DIRAC.WorkloadManagementSystem.Client.JobMonitoringClient.JobMonitoringClient)
self.dra.printEveryNJobs = 10
self.dra.log = Mock(name="LogMock")
self.dra.addressTo = 'myself'
self.dra.addressFrom = 'me'
def tearDown(self):
pass
def getTestMock(self, nameID=0, jobID=1234567):
"""create a JobInfo object with mocks"""
from DIRAC.TransformationSystem.Utilities.JobInfo import JobInfo
testJob = Mock(name="jobInfoMock_%s" % nameID, spec=JobInfo)
testJob.jobID = jobID
testJob.tType = "testType"
testJob.otherTasks = []
testJob.errorCounts = []
testJob.status = "Done"
testJob.transFileStatus = ['Assigned', 'Assigned']
testJob.inputFileStatus = ['Exists', 'Exists']
testJob.outputFiles = ["/my/stupid/file.lfn", "/my/stupid/file2.lfn"]
testJob.outputFileStatus = ["Exists", "Exists"]
testJob.inputFiles = ['inputfile.lfn', 'inputfile2.lfn']
testJob.pendingRequest = False
testJob.getTaskInfo = Mock()
return testJob
@patch("DIRAC.Core.Base.AgentModule.PathFinder", new=Mock())
@patch("DIRAC.ConfigurationSystem.Client.PathFinder.getSystemInstance", new=Mock())
@patch("%s.ReqClient" % MODULE_NAME, new=Mock())
def test_init(self):
"""test for DataRecoveryAgent initialisation...................................................."""
res = DataRecoveryAgent(agentName="ILCTransformationSystem/DataRecoveryAgent", loadName="TestDRA")
self.assertIsInstance(res, DataRecoveryAgent)
def test_beginExecution(self):
"""test for DataRecoveryAgent beginExecution...................................................."""
theOps = Mock(name='OpsInstance')
theOps.getValue.side_effect = [['MCGeneration'], ['MCReconstruction', 'Merge']]
with patch('DIRAC.TransformationSystem.Agent.DataRecoveryAgent.Operations', return_value=theOps):
res = self.dra.beginExecution()
assert isinstance(self.dra.transformationTypes, list)
assert set(['MCGeneration', 'MCReconstruction', 'Merge']) == set(self.dra.transformationTypes)
assert set(['MCGeneration']) == set(self.dra.transNoInput)
assert set(['MCReconstruction', 'Merge']) == set(self.dra.transWithInput)
self.assertFalse(self.dra.enabled)
self.assertTrue(res['OK'])
def test_getEligibleTransformations_success(self):
"""test for DataRecoveryAgent getEligibleTransformations success................................"""
transInfoDict = dict(TransformationID=1234, TransformationName="TestProd12", Type="TestProd",
AuthorDN='/some/cert/owner', AuthorGroup='Test_Prod')
self.dra.tClient.getTransformations = Mock(return_value=S_OK([transInfoDict]))
res = self.dra.getEligibleTransformations(status="Active", typeList=['TestProds'])
self.assertTrue(res['OK'])
self.assertIsInstance(res['Value'], dict)
vals = res['Value']
self.assertIn("1234", vals)
self.assertIsInstance(vals['1234'], dict)
self.assertEqual(transInfoDict, vals["1234"])
def test_getEligibleTransformations_failed(self):
"""test for DataRecoveryAgent getEligibleTransformations failure................................"""
self.dra.tClient.getTransformations = Mock(return_value=S_ERROR("No can Do"))
res = self.dra.getEligibleTransformations(status="Active", typeList=['TestProds'])
self.assertFalse(res['OK'])
self.assertEqual("No can Do", res['Message'])
def test_treatTransformation1(self):
"""test for DataRecoveryAgent treatTransformation success1.........................................."""
getJobMock = Mock(name="getJobMOck")
getJobMock.getJobs.return_value = (Mock(name="jobsMOck"), 50, 50)
tinfoMock = Mock(name="infoMock", return_value=getJobMock)
self.dra.checkAllJobs = Mock()
# catch the printout to check path taken
transInfoDict = dict(TransformationID=1234, TransformationName="TestProd12", Type="TestProd",
AuthorDN='/some/cert/owner', AuthorGroup='Test_Prod')
with patch("%s.TransformationInfo" % MODULE_NAME, new=tinfoMock):
self.dra.treatTransformation(1234, transInfoDict) # returns None
# check we start with the summary right away
for _name, args, _kwargs in self.dra.log.notice.mock_calls:
self.assertNotIn('Getting Tasks:', str(args))
def test_treatTransformation2(self):
"""test for DataRecoveryAgent treatTransformation success2.........................................."""
getJobMock = Mock(name="getJobMOck")
getJobMock.getJobs.return_value = (Mock(name="jobsMock"), 50, 50)
tinfoMock = Mock(name="infoMock", return_value=getJobMock)
self.dra.checkAllJobs = Mock()
# catch the printout to check path taken
transInfoDict = dict(TransformationID=1234, TransformationName="TestProd12", Type="MCSimulation",
AuthorDN='/some/cert/owner', AuthorGroup='Test_Prod')
with patch("%s.TransformationInfo" % MODULE_NAME, new=tinfoMock):
self.dra.treatTransformation(1234, transInfoDict) # returns None
self.dra.log.notice.assert_any_call(MatchStringWith("Getting tasks..."))
def test_treatTransformation3(self):
"""test for DataRecoveryAgent treatTransformation skip.............................................."""
getJobMock = Mock(name="getJobMOck")
getJobMock.getJobs.return_value = (Mock(name="jobsMock"), 50, 50)
self.dra.checkAllJobs = Mock()
self.dra.jobCache[1234] = (50, 50)
# catch the printout to check path taken
transInfoDict = dict(TransformationID=1234, TransformationName="TestProd12", Type="TestProd",
AuthorDN='/some/cert/owner', AuthorGroup='Test_Prod')
with patch("%s.TransformationInfo" % MODULE_NAME,
autospec=True,
return_value=getJobMock):
self.dra.treatTransformation(transID=1234, transInfoDict=transInfoDict) # returns None
self.dra.log.notice.assert_called_with(MatchStringWith("Skipping transformation 1234"))
def test_checkJob(self):
"""test for DataRecoveryAgent checkJob No inputFiles............................................."""
from DIRAC.TransformationSystem.Utilities.TransformationInfo import TransformationInfo
tInfoMock = Mock(name="tInfoMock", spec=TransformationInfo)
from DIRAC.TransformationSystem.Utilities.JobInfo import JobInfo
# Test First option for MCGeneration
tInfoMock.reset_mock()
testJob = JobInfo(jobID=1234567, status="Failed", tID=123, tType="MCGeneration")
testJob.outputFiles = ["/my/stupid/file.lfn"]
testJob.outputFileStatus = ["Exists"]
self.dra.checkJob(testJob, tInfoMock)
self.assertIn("setJobDone", tInfoMock.method_calls[0])
self.assertEqual(self.dra.todo['NoInputFiles'][0]['Counter'], 1)
self.assertEqual(self.dra.todo['NoInputFiles'][1]['Counter'], 0)
# Test Second option for MCGeneration
tInfoMock.reset_mock()
testJob.status = "Done"
testJob.outputFileStatus = ["Missing"]
self.dra.checkJob(testJob, tInfoMock)
self.assertIn("setJobFailed", tInfoMock.method_calls[0])
self.assertEqual(self.dra.todo['NoInputFiles'][0]['Counter'], 1)
self.assertEqual(self.dra.todo['NoInputFiles'][1]['Counter'], 1)
# Test Second option for MCGeneration
tInfoMock.reset_mock()
testJob.status = "Done"
testJob.outputFileStatus = ["Exists"]
self.dra.checkJob(testJob, tInfoMock)
self.assertEqual(tInfoMock.method_calls, [])
self.assertEqual(self.dra.todo['NoInputFiles'][0]['Counter'], 1)
self.assertEqual(self.dra.todo['NoInputFiles'][1]['Counter'], 1)
@parameterized.expand([
param(0, ['setJobDone', 'setInputProcessed'], jStat='Failed', ifStat=[
'Exists'], ofStat=['Exists'], tFiStat=['Assigned'], others=True),
param(1, ['setJobFailed'], ifStat=['Exists'], ofStat=['Missing'], others=True, ifProcessed=['/my/inputfile.lfn']),
param(2, ['setJobFailed', 'cleanOutputs'], ifStat=['Exists'], others=True, ifProcessed=['/my/inputfile.lfn']),
param(3, ['cleanOutputs', 'setJobFailed', 'setInputDeleted'], ifStat=['Missing']),
param(4, ['cleanOutputs', 'setJobFailed'], tFiStat=['Deleted'], ifStat=['Missing']),
param(5, ['setJobDone', 'setInputProcessed'], jStat='Failed', ifStat=['Exists'], tFiStat=['Assigned']),
param(6, ['setJobDone'], jStat='Failed', ifStat=['Exists'], tFiStat=['Processed']),
param(7, ['setInputProcessed'], jStat='Done', ifStat=['Exists'], tFiStat=['Assigned']),
param(8, ['setInputMaxReset'], jStat='Failed', ifStat=['Exists'],
ofStat=['Missing'], tFiStat=['Assigned'], errorCount=[14]),
param(9, ['setInputUnused'], jStat='Failed', ifStat=['Exists'],
ofStat=['Missing'], tFiStat=['Assigned'], errorCount=[2]),
param(10, ['setInputUnused', 'setJobFailed'], jStat='Done',
ifStat=['Exists'], ofStat=['Missing'], tFiStat=['Assigned']),
param(11, ['cleanOutputs', 'setInputUnused'], jStat='Failed', ifStat=[
'Exists'], ofStat=['Missing', 'Exists'], tFiStat=['Assigned']),
param(12, ['cleanOutputs', 'setInputUnused', 'setJobFailed'], jStat='Done',
ifStat=['Exists'], ofStat=['Missing', 'Exists'], tFiStat=['Assigned']),
param(13, ['setJobFailed'], jStat='Done', ifStat=['Exists'], ofStat=['Missing', 'Missing'], tFiStat=['Unused']),
param(14, [], jStat='Strange', ifStat=['Exists'], ofStat=['Exists'], tFiStat=['Processed']),
param(-1, [], jStat='Failed', ifStat=['Exists'], ofStat=['Missing', 'Missing'],
outFiles=['/my/stupid/file.lfn', "/my/stupid/file2.lfn"], tFiStat=['Processed'], others=True),
])
def test_checkJob_others_(self, counter, infoCalls, jID=1234567, jStat='Done', others=False,
inFiles=['/my/inputfile.lfn'], outFiles=['/my/stupid/file.lfn'],
ifStat=[], ofStat=['Exists'], ifProcessed=[],
tFiStat=['Processed'], errorCount=[]):
from DIRAC.TransformationSystem.Utilities.TransformationInfo import TransformationInfo
from DIRAC.TransformationSystem.Utilities.JobInfo import JobInfo
tInfoMock = Mock(name="tInfoMock", spec=TransformationInfo)
testJob = JobInfo(jobID=jID, status=jStat, tID=123, tType="MCSimulation")
testJob.outputFiles = outFiles
testJob.outputFileStatus = ofStat
testJob.otherTasks = others
testJob.inputFiles = inFiles
testJob.inputFileStatus = ifStat
testJob.transFileStatus = tFiStat
testJob.errorCounts = errorCount
self.dra.inputFilesProcessed = set(ifProcessed)
self.dra.checkJob(testJob, tInfoMock)
gLogger.notice('Testing counter', counter)
gLogger.notice('Expecting calls', infoCalls)
gLogger.notice('Called', tInfoMock.method_calls)
assert len(infoCalls) == len(tInfoMock.method_calls)
for index, infoCall in enumerate(infoCalls):
self.assertIn(infoCall, tInfoMock.method_calls[index])
for count in range(15):
gLogger.notice('Checking Counter:', count)
if count == counter:
self.assertEqual(self.dra.todo['InputFiles'][count]['Counter'], 1)
else:
self.assertEqual(self.dra.todo['InputFiles'][count]['Counter'], 0)
if 0 <= counter <= 2:
assert set(testJob.inputFiles).issubset(self.dra.inputFilesProcessed)
@parameterized.expand([
param(['cleanOutputs', 'setJobFailed']),
param([], jID=667, jStat='Failed', ofStat=['Missing']),
param([], jID=668, jStat='Failed', ofStat=['Missing'], inFiles=['some']),
])
def test_failHard(self, infoCalls, jID=666, jStat='Done', inFiles=None, ofStat=['Exists']):
"""Test the job.failHard function."""
from DIRAC.TransformationSystem.Utilities.TransformationInfo import TransformationInfo
from DIRAC.TransformationSystem.Utilities.JobInfo import JobInfo
tInfoMock = Mock(name="tInfoMock", spec=TransformationInfo)
tInfoMock.reset_mock()
testJob = JobInfo(jobID=666, status=jStat, tID=123, tType='MCSimulation')
testJob.outputFiles = ["/my/stupid/file.lfn"]
testJob.outputFileStatus = ofStat
testJob.otherTasks = True
testJob.inputFiles = inFiles
testJob.inputFileExists = True
testJob.fileStatus = 'Processed'
self.dra.inputFilesProcessed = set()
self.dra._DataRecoveryAgent__failJobHard(testJob, tInfoMock) # pylint: disable=protected-access, no-member
gLogger.notice('Expecting calls', infoCalls)
gLogger.notice('Called', tInfoMock.method_calls)
assert len(infoCalls) == len(tInfoMock.method_calls)
for index, infoCall in enumerate(infoCalls):
self.assertIn(infoCall, tInfoMock.method_calls[index])
if jStat == 'Done':
self.assertIn('Failing job %s' % jID, self.dra.notesToSend)
else:
self.assertNotIn('Failing job %s' % jID, self.dra.notesToSend)
def test_notOnlyKeepers(self):
""" test for __notOnlyKeepers function """
funcToTest = self.dra._DataRecoveryAgent__notOnlyKeepers # pylint: disable=protected-access, no-member
self.assertTrue(funcToTest('MCGeneration'))
self.dra.todo['InputFiles'][0]['Counter'] = 3 # keepers
self.dra.todo['InputFiles'][3]['Counter'] = 0
self.assertFalse(funcToTest("MCSimulation"))
self.dra.todo['InputFiles'][0]['Counter'] = 3 # keepers
self.dra.todo['InputFiles'][3]['Counter'] = 3
self.assertTrue(funcToTest("MCSimulation"))
def test_checkAllJob(self):
"""test for DataRecoveryAgent checkAllJobs ....................................................."""
from DIRAC.TransformationSystem.Utilities.JobInfo import JobInfo
# test with additional task dicts
from DIRAC.TransformationSystem.Utilities.TransformationInfo import TransformationInfo
tInfoMock = Mock(name="tInfoMock", spec=TransformationInfo)
mockJobs = dict([(i, self.getTestMock()) for i in xrange(11)])
mockJobs[2].pendingRequest = True
mockJobs[3].getJobInformation = Mock(side_effect=(RuntimeError('ARGJob1'), None))
mockJobs[4].getTaskInfo = Mock(side_effect=(TaskInfoException('ARG1'), None))
taskDict = True
lfnTaskDict = True
self.dra.checkAllJobs(mockJobs, tInfoMock, taskDict, lfnTaskDict)
self.dra.log.error.assert_any_call(MatchStringWith('+++++ Exception'), 'ARGJob1')
self.dra.log.error.assert_any_call(MatchStringWith("Skip Task, due to TaskInfoException: ARG1"))
self.dra.log.reset_mock()
# test inputFile None
mockJobs = dict([(i, self.getTestMock(nameID=i)) for i in xrange(5)])
mockJobs[1].inputFiles = []
mockJobs[1].getTaskInfo = Mock(side_effect=(TaskInfoException("NoInputFile"), None))
mockJobs[1].tType = "MCSimulation"
tInfoMock.reset_mock()
self.dra.checkAllJobs(mockJobs, tInfoMock, taskDict, lfnTaskDict=True)
self.dra.log.notice.assert_any_call(MatchStringWith("Failing job hard"))
def test_checkAllJob_2(self):
"""Test where failJobHard fails (via cleanOutputs)."""
from DIRAC.TransformationSystem.Utilities.TransformationInfo import TransformationInfo
tInfoMock = Mock(name='tInfoMock', spec=TransformationInfo)
mockJobs = dict([(i, self.getTestMock()) for i in xrange(5)])
mockJobs[2].pendingRequest = True
mockJobs[3].getTaskInfo = Mock(side_effect=(TaskInfoException('ARGJob3'), None))
mockJobs[3].inputFiles = []
mockJobs[3].tType = 'MCReconstruction'
self.dra._DataRecoveryAgent__failJobHard = Mock(side_effect=(RuntimeError('ARGJob4'), None), name='FJH')
self.dra.checkAllJobs(mockJobs, tInfoMock, tasksDict=True, lfnTaskDict=True)
mockJobs[3].getTaskInfo.assert_called()
self.dra._DataRecoveryAgent__failJobHard.assert_called()
self.dra.log.error.assert_any_call(MatchStringWith('+++++ Exception'), 'ARGJob4')
self.dra.log.reset_mock()
def test_execute(self):
"""test for DataRecoveryAgent execute .........................................................."""
self.dra.treatTransformation = Mock()
self.dra.transformationsToIgnore = [123, 456, 789]
self.dra.jobCache = defaultdict(lambda: (0, 0))
self.dra.jobCache[123] = (10, 10)
self.dra.jobCache[124] = (10, 10)
self.dra.jobCache[125] = (10, 10)
# Eligible fails
self.dra.log.reset_mock()
self.dra.getEligibleTransformations = Mock(return_value=S_ERROR("outcast"))
res = self.dra.execute()
self.assertFalse(res["OK"])
self.dra.log.error.assert_any_call(ANY, MatchStringWith("outcast"))
self.assertEqual("Failure to get transformations", res['Message'])
d123 = dict(TransformationID=123, TransformationName='TestProd123', Type='MCGeneration',
AuthorDN='/some/cert/owner', AuthorGroup='Test_Prod')
d124 = dict(TransformationID=124, TransformationName='TestProd124', Type='MCGeneration',
AuthorDN='/some/cert/owner', AuthorGroup='Test_Prod')
d125 = dict(TransformationID=125, TransformationName='TestProd125', Type='MCGeneration',
AuthorDN='/some/cert/owner', AuthorGroup='Test_Prod')
# Eligible succeeds
self.dra.log.reset_mock()
self.dra.getEligibleTransformations = Mock(return_value=S_OK({123: d123, 124: d124, 125: d125}))
res = self.dra.execute()
self.assertTrue(res["OK"])
self.dra.log.notice.assert_any_call(MatchStringWith("Will ignore the following transformations: [123, 456, 789]"))
self.dra.log.notice.assert_any_call(MatchStringWith("Ignoring Transformation: 123"))
self.dra.log.notice.assert_any_call(MatchStringWith("Running over Transformation: 124"))
# Notes To Send
self.dra.log.reset_mock()
self.dra.getEligibleTransformations = Mock(return_value=S_OK({123: d123, 124: d124, 125: d125}))
self.dra.notesToSend = "Da hast du deine Karte"
sendmailMock = Mock()
sendmailMock.sendMail.return_value = S_OK("Nice Card")
notificationMock = Mock(return_value=sendmailMock)
with patch("%s.NotificationClient" % MODULE_NAME, new=notificationMock):
res = self.dra.execute()
self.assertTrue(res["OK"])
self.dra.log.notice.assert_any_call(MatchStringWith("Will ignore the following transformations: [123, 456, 789]"))
self.dra.log.notice.assert_any_call(MatchStringWith("Ignoring Transformation: 123"))
self.dra.log.notice.assert_any_call(MatchStringWith("Running over Transformation: 124"))
self.assertNotIn(124, self.dra.jobCache) # was popped
self.assertIn(125, self.dra.jobCache) # was not popped
gLogger.notice("JobCache: %s" % self.dra.jobCache)
# sending notes fails
self.dra.log.reset_mock()
self.dra.notesToSend = "Da hast du deine Karte"
sendmailMock = Mock()
sendmailMock.sendMail.return_value = S_ERROR("No stamp")
notificationMock = Mock(return_value=sendmailMock)
with patch("%s.NotificationClient" % MODULE_NAME, new=notificationMock):
res = self.dra.execute()
self.assertTrue(res["OK"])
self.assertNotIn(124, self.dra.jobCache) # was popped
self.assertIn(125, self.dra.jobCache) # was not popped
self.dra.log.error.assert_any_call(MatchStringWith("Cannot send notification mail"), ANY)
self.assertEqual("", self.dra.notesToSend)
def test_printSummary(self):
"""test DataRecoveryAgent printSummary.........................................................."""
self.dra.notesToSend = ""
self.dra.printSummary()
self.assertNotIn(" Other Tasks --> Keep : 0", self.dra.notesToSend)
self.dra.notesToSend = "Note This"
self.dra.printSummary()
def test_setPendingRequests_1(self):
"""Check the setPendingRequests function."""
mockJobs = dict((i, self.getTestMock(jobID=i)) for i in xrange(11))
reqMock = Mock()
reqMock.Status = "Done"
reqClient = Mock(name="reqMock", spec=DIRAC.RequestManagementSystem.Client.ReqClient.ReqClient)
reqClient.readRequestsForJobs.return_value = S_OK({"Successful": {}})
self.dra.reqClient = reqClient
self.dra.setPendingRequests(mockJobs)
for _index, mj in mockJobs.items():
self.assertFalse(mj.pendingRequest)
def test_setPendingRequests_2(self):
"""Check the setPendingRequests function."""
mockJobs = dict((i, self.getTestMock(jobID=i)) for i in xrange(11))
reqMock = Mock()
reqMock.RequestID = 666
reqClient = Mock(name="reqMock", spec=DIRAC.RequestManagementSystem.Client.ReqClient.ReqClient)
reqClient.readRequestsForJobs.return_value = S_OK({"Successful": {6: reqMock}})
reqClient.getRequestStatus.return_value = {'Value': 'Done'}
self.dra.reqClient = reqClient
self.dra.setPendingRequests(mockJobs)
for _index, mj in mockJobs.items():
self.assertFalse(mj.pendingRequest)
reqClient.getRequestStatus.assert_called_once_with(666)
def test_setPendingRequests_3(self):
"""Check the setPendingRequests function."""
mockJobs = dict((i, self.getTestMock(jobID=i)) for i in xrange(11))
reqMock = Mock()
reqMock.RequestID = 555
reqClient = Mock(name="reqMock", spec=DIRAC.RequestManagementSystem.Client.ReqClient.ReqClient)
reqClient.readRequestsForJobs.return_value = S_OK({'Successful': {5: reqMock}})
reqClient.getRequestStatus.return_value = {'Value': 'Pending'}
self.dra.reqClient = reqClient
self.dra.setPendingRequests(mockJobs)
for index, mj in mockJobs.items():
if index == 5:
self.assertTrue(mj.pendingRequest)
else:
self.assertFalse(mj.pendingRequest)
reqClient.getRequestStatus.assert_called_once_with(555)
def test_setPendingRequests_Fail(self):
"""Check the setPendingRequests function."""
mockJobs = dict((i, self.getTestMock(jobID=i)) for i in xrange(11))
reqMock = Mock()
reqMock.Status = "Done"
reqClient = Mock(name="reqMock", spec=DIRAC.RequestManagementSystem.Client.ReqClient.ReqClient)
reqClient.readRequestsForJobs.side_effect = (S_ERROR('Failure'), S_OK({'Successful': {}}))
self.dra.reqClient = reqClient
self.dra.setPendingRequests(mockJobs)
for _index, mj in mockJobs.items():
self.assertFalse(mj.pendingRequest)
def test_getLFNStatus(self):
"""Check the getLFNStatus function."""
mockJobs = dict((i, self.getTestMock(jobID=i)) for i in xrange(11))
self.dra.fcClient.exists.return_value = S_OK({'Successful':
{'/my/stupid/file.lfn': True,
'/my/stupid/file2.lfn': True}})
lfnExistence = self.dra.getLFNStatus(mockJobs)
self.assertEqual(lfnExistence, {'/my/stupid/file.lfn': True,
'/my/stupid/file2.lfn': True})
self.dra.fcClient.exists.side_effect = (S_ERROR('args'),
S_OK({'Successful':
{'/my/stupid/file.lfn': True,
'/my/stupid/file2.lfn': True}}))
lfnExistence = self.dra.getLFNStatus(mockJobs)
self.assertEqual(lfnExistence, {'/my/stupid/file.lfn': True,
'/my/stupid/file2.lfn': True})
|
fstagni/DIRAC
|
TransformationSystem/test/Test_DRA.py
|
Python
|
gpl-3.0
| 24,555
|
[
"DIRAC"
] |
1bbc3b94d4ce33d62d23ac54a21a4b5015f61b2fbe329e03b3bcbc3aa3c452ca
|
#########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2013 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
import math
import pylab
import numpy
import moose
def makeModel():
# create container for model
model = moose.Neutral( 'model' )
compartment = moose.CubeMesh( '/model/compartment' )
compartment.volume = 1e-20
# the mesh is created automatically by the compartment
mesh = moose.element( '/model/compartment/mesh' )
# create molecules and reactions
a = moose.Pool( '/model/compartment/a' )
b = moose.Pool( '/model/compartment/b' )
c = moose.Pool( '/model/compartment/c' )
enz1 = moose.Enz( '/model/compartment/b/enz1' )
enz2 = moose.Enz( '/model/compartment/c/enz2' )
cplx1 = moose.Pool( '/model/compartment/b/enz1/cplx' )
cplx2 = moose.Pool( '/model/compartment/c/enz2/cplx' )
reac = moose.Reac( '/model/compartment/reac' )
# connect them up for reactions
moose.connect( enz1, 'sub', a, 'reac' )
moose.connect( enz1, 'prd', b, 'reac' )
moose.connect( enz1, 'enz', b, 'reac' )
moose.connect( enz1, 'cplx', cplx1, 'reac' )
moose.connect( enz2, 'sub', b, 'reac' )
moose.connect( enz2, 'prd', a, 'reac' )
moose.connect( enz2, 'enz', c, 'reac' )
moose.connect( enz2, 'cplx', cplx2, 'reac' )
moose.connect( reac, 'sub', a, 'reac' )
moose.connect( reac, 'prd', b, 'reac' )
# connect them up to the compartment for volumes
#for x in ( a, b, c, cplx1, cplx2 ):
# moose.connect( x, 'mesh', mesh, 'mesh' )
# Assign parameters
a.concInit = 1
b.concInit = 0
c.concInit = 0.01
enz1.kcat = 0.4
enz1.Km = 4
enz2.kcat = 0.6
enz2.Km = 0.01
reac.Kf = 0.001
reac.Kb = 0.01
# Create the output tables
graphs = moose.Neutral( '/model/graphs' )
outputA = moose.Table2( '/model/graphs/concA' )
outputB = moose.Table2( '/model/graphs/concB' )
# connect up the tables
moose.connect( outputA, 'requestOut', a, 'getConc' );
moose.connect( outputB, 'requestOut', b, 'getConc' );
'''
# Schedule the whole lot
moose.setClock( 4, 0.01 ) # for the computational objects
moose.setClock( 8, 1.0 ) # for the plots
# The wildcard uses # for single level, and ## for recursive.
moose.useClock( 4, '/model/compartment/##', 'process' )
moose.useClock( 8, '/model/graphs/#', 'process' )
'''
def displayPlots():
for x in moose.wildcardFind( '/model/graphs/conc#' ):
t = numpy.arange( 0, x.vector.size, 1 ) #sec
pylab.plot( t, x.vector, label=x.name )
def main():
"""
This example illustrates how to run a model at different volumes.
The key line is just to set the volume of the compartment::
compt.volume = vol
If everything
else is set up correctly, then this change propagates through to all
reactions molecules.
For a deterministic reaction one would not see any change in output
concentrations.
For a stochastic reaction illustrated here, one sees the level of
'noise'
changing, even though the concentrations are similar up to a point.
This example creates a bistable model having two enzymes and a reaction.
One of the enzymes is autocatalytic.
This model is set up within the script rather than using an external
file.
The model is set up to run using the GSSA (Gillespie Stocahstic systems
algorithim) method in MOOSE.
To run the example, run the script
``python scaleVolumes.py``
and hit ``enter`` every cycle to see the outcome of stochastic
calculations at ever smaller volumes, keeping concentrations the same.
"""
makeModel()
moose.seed( 11111 )
gsolve = moose.Gsolve( '/model/compartment/gsolve' )
stoich = moose.Stoich( '/model/compartment/stoich' )
compt = moose.element( '/model/compartment' );
stoich.compartment = compt
stoich.ksolve = gsolve
stoich.reacSystemPath = "/model/compartment/##"
#moose.setClock( 5, 1.0 ) # clock for the solver
#moose.useClock( 5, '/model/compartment/gsolve', 'process' )
a = moose.element( '/model/compartment/a' )
for vol in ( 1e-19, 1e-20, 1e-21, 3e-22, 1e-22, 3e-23, 1e-23 ):
# Set the volume
compt.volume = vol
print(('vol = ', vol, ', a.concInit = ', a.concInit, ', a.nInit = ', a.nInit))
moose.reinit()
moose.start( 100.0 ) # Run the model for 100 seconds.
a = moose.element( '/model/compartment/a' )
b = moose.element( '/model/compartment/b' )
# move most molecules over to b
b.conc = b.conc + a.conc * 0.9
a.conc = a.conc * 0.1
moose.start( 100.0 ) # Run the model for 100 seconds.
# move most molecules back to a
a.conc = a.conc + b.conc * 0.99
b.conc = b.conc * 0.01
moose.start( 100.0 ) # Run the model for 100 seconds.
# Iterate through all plots, dump their contents to data.plot.
displayPlots()
#pylab.show( block=False )
print(('vol = ', vol, 'Close graph to go to next plot'))
pylab.show( )
#print(('vol = ', vol, 'hit 0 to go to next plot'))
#eval( input() )
#eval(str(input()))
quit()
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
|
BhallaLab/moose-examples
|
snippets/scaleVolumes.py
|
Python
|
gpl-2.0
| 6,386
|
[
"MOOSE"
] |
ff7bc079ed53cf37931c9112f0fa2fb7f543dff4f034d1479b7b105f3bf216b9
|
from __future__ import print_function
from __future__ import absolute_import
import sys
from copy import copy
import numpy as nm
from sfepy.base.base import (complex_types, dict_from_keys_init,
assert_, is_derived_class, ordered_iteritems,
insert_static_method, output, get_default,
get_default_attr, Struct, basestr)
from sfepy.base.ioutils import (skip_read_line, look_ahead_line, read_token,
read_array, read_list, pt, enc, dec,
read_from_hdf5, write_to_hdf5,
HDF5ContextManager, get_or_create_hdf5_group)
import os.path as op
import six
from six.moves import range
supported_formats = {
'.mesh' : 'medit',
'.vtk' : 'vtk',
'.node' : 'tetgen',
'.txt' : 'comsol',
'.h5' : 'hdf5',
# Order is important, avs_ucd does not guess -> it is the default.
'.inp' : ('abaqus', 'ansys_cdb', 'avs_ucd'),
'.dat' : 'ansys_cdb',
'.hmascii' : 'hmascii',
'.mesh3d' : 'mesh3d',
'.bdf' : 'nastran',
'.neu' : 'gambit',
'.med' : 'med',
'.cdb' : 'ansys_cdb',
'.msh' : 'msh_v2',
}
# Map mesh formats to read and write capabilities.
# 'r' ... read mesh
# 'w' ... write mesh
# 'rn' ... read nodes for boundary conditions
# 'wn' ... write nodes for boundary conditions
supported_capabilities = {
'medit' : ['r', 'w'],
'vtk' : ['r', 'w'],
'tetgen' : ['r'],
'comsol' : ['r', 'w'],
'hdf5' : ['r', 'w'],
'abaqus' : ['r'],
'avs_ucd' : ['r'],
'hmascii' : ['r'],
'mesh3d' : ['r'],
'nastran' : ['r', 'w'],
'gambit' : ['r', 'rn'],
'med' : ['r'],
'ansys_cdb' : ['r'],
'msh_v2' : ['r'],
}
supported_cell_types = {
'medit' : ['line2', 'tri3', 'quad4', 'tetra4', 'hexa8'],
'vtk' : ['line2', 'tri3', 'quad4', 'tetra4', 'hexa8'],
'tetgen' : ['tetra4'],
'comsol' : ['tri3', 'quad4', 'tetra4', 'hexa8'],
'hdf5' : ['user'],
'abaqus' : ['tri3', 'quad4', 'tetra4', 'hexa8'],
'avs_ucd' : ['tetra4', 'hexa8'],
'hmascii' : ['tri3', 'quad4', 'tetra4', 'hexa8'],
'mesh3d' : ['tetra4', 'hexa8'],
'nastran' : ['tri3', 'quad4', 'tetra4', 'hexa8'],
'gambit' : ['tri3', 'quad4', 'tetra4', 'hexa8'],
'med' : ['tri3', 'quad4', 'tetra4', 'hexa8'],
'ansys_cdb' : ['tetra4', 'hexa8'],
'msh_v2' : ['line2', 'tri3', 'quad4', 'tetra4', 'hexa8'],
'function' : ['user'],
}
def output_mesh_formats(mode='r'):
for key, vals in ordered_iteritems(supported_formats):
if isinstance(vals, basestr):
vals = [vals]
for val in vals:
caps = supported_capabilities[val]
if mode in caps:
output('%s (%s), cell types: %s'
% (val, key, supported_cell_types[val]))
def split_conns_mat_ids(conns_in):
"""
Split connectivities (columns except the last ones in `conns_in`) from cell
groups (the last columns of `conns_in`).
"""
conns, mat_ids = [], []
for conn in conns_in:
conn = nm.asarray(conn, dtype=nm.int32)
conns.append(conn[:, :-1])
mat_ids.append(conn[:, -1])
return conns, mat_ids
def convert_complex_output(out_in):
"""
Convert complex values in the output dictionary `out_in` to pairs of
real and imaginary parts.
"""
out = {}
for key, val in six.iteritems(out_in):
if val.data.dtype in complex_types:
rval = copy(val)
rval.data = val.data.real
out['real(%s)' % key] = rval
ival = copy(val)
ival.data = val.data.imag
out['imag(%s)' % key] = ival
else:
out[key] = val
return out
def _read_bounding_box(fd, dim, node_key,
c0=0, ndplus=1, ret_fd=False, ret_dim=False):
while 1:
line = skip_read_line(fd, no_eof=True).split()
if line[0] == node_key:
num = int(read_token(fd))
nod = read_array(fd, num, dim + ndplus, nm.float64)
break
bbox = nm.vstack((nm.amin(nod[:,c0:(dim + c0)], 0),
nm.amax(nod[:,c0:(dim + c0)], 0)))
if ret_dim:
if ret_fd:
return bbox, dim, fd
else:
fd.close()
return bbox, dim
else:
if ret_fd:
return bbox, fd
else:
fd.close()
return bbox
class MeshIO(Struct):
"""
The abstract class for importing and exporting meshes.
Read the docstring of the Mesh() class. Basically all you need to do is to
implement the read() method::
def read(self, mesh, **kwargs):
nodes = ...
ngroups = ...
conns = ...
mat_ids = ...
descs = ...
mesh._set_io_data(nodes, ngroups, conns, mat_ids, descs)
return mesh
See the Mesh class' docstring how the nodes, ngroups, conns, mat_ids and
descs should look like. You just need to read them from your specific
format from disk.
To write a mesh to disk, just implement the write() method and use the
information from the mesh instance (e.g. nodes, conns, mat_ids and descs)
to construct your specific format.
The methods read_dimension(), read_bounding_box() should be implemented in
subclasses, as it is often possible to get that kind of information without
reading the whole mesh file.
Optionally, subclasses can implement read_data() to read also computation
results. This concerns mainly the subclasses with implemented write()
supporting the 'out' kwarg.
The default implementation od read_last_step() just returns 0. It should be
reimplemented in subclasses capable of storing several steps.
"""
format = None
call_msg = 'called an abstract MeshIO instance!'
def __init__(self, filename, **kwargs):
Struct.__init__(self, filename=filename, **kwargs)
self.set_float_format()
def get_filename_trunk(self):
if isinstance(self.filename, basestr):
trunk = op.splitext(self.filename)[0]
else:
trunk = 'from_descriptor'
return trunk
def read_dimension(self, ret_fd=False):
raise ValueError(MeshIO.call_msg)
def read_bounding_box(self, ret_fd=False, ret_dim=False):
raise ValueError(MeshIO.call_msg)
def read_last_step(self):
"""The default implementation: just return 0 as the last step."""
return 0
def read_times(self, filename=None):
"""
Read true time step data from individual time steps.
Returns
-------
steps : array
The time steps.
times : array
The times of the time steps.
nts : array
The normalized times of the time steps, in [0, 1].
Notes
-----
The default implementation returns empty arrays.
"""
aux = nm.array([0.0], dtype=nm.float64)
return aux.astype(nm.int32), aux, aux
def read(self, mesh, omit_facets=False, **kwargs):
raise ValueError(MeshIO.call_msg)
def write(self, filename, mesh, **kwargs):
raise ValueError(MeshIO.call_msg)
def read_data(self, step, filename=None, cache=None):
raise ValueError(MeshIO.call_msg)
def set_float_format(self, format=None):
self.float_format = get_default(format, '%e')
def get_vector_format(self, dim):
return ' '.join([self.float_format] * dim)
class UserMeshIO(MeshIO):
"""
Special MeshIO subclass that enables reading and writing a mesh using a
user-supplied function.
"""
format = 'function'
def __init__(self, filename, **kwargs):
assert_(hasattr(filename, '__call__'))
self.function = filename
MeshIO.__init__(self, filename='function:%s' % self.function.__name__,
**kwargs)
def get_filename_trunk(self):
return self.filename
def read(self, mesh, *args, **kwargs):
aux = self.function(mesh, mode='read')
if aux is not None:
mesh = aux
self.filename = mesh.name
return mesh
def write(self, filename, mesh, *args, **kwargs):
self.function(mesh, mode='write')
class MeditMeshIO(MeshIO):
format = 'medit'
def read_dimension(self, ret_fd=False):
fd = open(self.filename, 'r')
while 1:
line = skip_read_line(fd, no_eof=True).split()
if line[0] == 'Dimension':
if len(line) == 2:
dim = int(line[1])
else:
dim = int(fd.readline())
break
if ret_fd:
return dim, fd
else:
fd.close()
return dim
def read_bounding_box(self, ret_fd=False, ret_dim=False):
fd = open(self.filename, 'r')
dim, fd = self.read_dimension(ret_fd=True)
return _read_bounding_box(fd, dim, 'Vertices',
ret_fd=ret_fd, ret_dim=ret_dim)
def read(self, mesh, omit_facets=False, **kwargs):
dim, fd = self.read_dimension(ret_fd=True)
conns_in = []
descs = []
def _read_cells(dimension, size, has_id=True):
num = int(read_token(fd))
data = read_array(fd, num, size + 1 * has_id, nm.int32)
if omit_facets and (dimension < dim): return
data[:, :-1] -= 1
conns_in.append(data)
descs.append('%i_%i' % (dimension, size))
while 1:
line = skip_read_line(fd).split()
if not line:
break
ls = line[0]
if (ls == 'Vertices'):
num = int(read_token(fd))
nod = read_array(fd, num, dim + 1, nm.float64)
elif (ls == 'Corners'):
_read_cells(1, 1, False)
elif (ls == 'Edges'):
_read_cells(1, 2)
elif (ls == 'Tetrahedra'):
_read_cells(3, 4)
elif (ls == 'Hexahedra'):
_read_cells(3, 8)
elif (ls == 'Triangles'):
_read_cells(2, 3)
elif (ls == 'Quadrilaterals'):
_read_cells(2, 4)
elif ls == 'End':
break
elif line[0] == '#':
continue
else:
output('skipping unknown entity: %s' % line)
continue
fd.close()
# Detect wedges and pyramides -> separate groups.
if ('3_8' in descs):
ic = descs.index('3_8')
conn_in = conns_in.pop(ic)
flag = nm.zeros((conn_in.shape[0],), nm.int32)
for ii, el in enumerate(conn_in):
if (el[4] == el[5]):
if (el[5] == el[6]):
flag[ii] = 2
else:
flag[ii] = 1
conn = []
desc = []
ib = nm.where(flag == 0)[0]
if (len(ib) > 0):
conn.append(conn_in[ib])
desc.append('3_8')
iw = nm.where(flag == 1)[0]
if (len(iw) > 0):
ar = nm.array([0,1,2,3,4,6], nm.int32)
conn.append(conn_in[iw[:, None], ar])
desc.append('3_6')
ip = nm.where(flag == 2)[0]
if (len(ip) > 0):
ar = nm.array([0,1,2,3,4], nm.int32)
conn.append(conn_in[ip[:, None], ar])
desc.append('3_5')
conns_in[ic:ic] = conn
del(descs[ic])
descs[ic:ic] = desc
conns, mat_ids = split_conns_mat_ids(conns_in)
mesh._set_io_data(nod[:,:-1], nod[:,-1], conns, mat_ids, descs)
return mesh
def write(self, filename, mesh, out=None, **kwargs):
fd = open(filename, 'w')
coors, ngroups, conns, mat_ids, desc = mesh._get_io_data()
n_nod, dim = coors.shape
fd.write("MeshVersionFormatted 1\nDimension %d\n" % dim)
fd.write("Vertices\n%d\n" % n_nod)
format = self.get_vector_format(dim) + ' %d\n'
for ii in range(n_nod):
nn = tuple(coors[ii]) + (ngroups[ii],)
fd.write(format % tuple(nn))
for ig, conn in enumerate(conns):
ids = mat_ids[ig]
if (desc[ig] == "1_1"):
fd.write("Corners\n%d\n" % conn.shape[0])
for ii in range(conn.shape[0]):
nn = conn[ii] + 1
fd.write("%d\n"
% nn[0])
elif (desc[ig] == "1_2"):
fd.write("Edges\n%d\n" % conn.shape[0])
for ii in range(conn.shape[0]):
nn = conn[ii] + 1
fd.write("%d %d %d\n"
% (nn[0], nn[1], ids[ii]))
elif (desc[ig] == "2_4"):
fd.write("Quadrilaterals\n%d\n" % conn.shape[0])
for ii in range(conn.shape[0]):
nn = conn[ii] + 1
fd.write("%d %d %d %d %d\n"
% (nn[0], nn[1], nn[2], nn[3], ids[ii]))
elif (desc[ig] == "2_3"):
fd.write("Triangles\n%d\n" % conn.shape[0])
for ii in range(conn.shape[0]):
nn = conn[ii] + 1
fd.write("%d %d %d %d\n" % (nn[0], nn[1], nn[2], ids[ii]))
elif (desc[ig] == "3_4"):
fd.write("Tetrahedra\n%d\n" % conn.shape[0])
for ii in range(conn.shape[0]):
nn = conn[ii] + 1
fd.write("%d %d %d %d %d\n"
% (nn[0], nn[1], nn[2], nn[3], ids[ii]))
elif (desc[ig] == "3_8"):
fd.write("Hexahedra\n%d\n" % conn.shape[0])
for ii in range(conn.shape[0]):
nn = conn[ii] + 1
fd.write("%d %d %d %d %d %d %d %d %d\n"
% (nn[0], nn[1], nn[2], nn[3], nn[4], nn[5],
nn[6], nn[7], ids[ii]))
else:
raise ValueError('unknown element type! (%s)' % desc[ig])
fd.close()
if out is not None:
for key, val in six.iteritems(out):
raise NotImplementedError
vtk_header = r"""x vtk DataFile Version 2.0
step %d time %e normalized time %e, generated by %s
ASCII
DATASET UNSTRUCTURED_GRID
"""
vtk_cell_types = {'1_1' : 1, '1_2' : 3, '2_2' : 3, '3_2' : 3,
'2_3' : 5, '2_4' : 9, '3_4' : 10, '3_8' : 12}
vtk_dims = {1 : 1, 3 : 1, 5 : 2, 9 : 2, 10 : 3, 12 : 3}
vtk_inverse_cell_types = {3 : '1_2', 5 : '2_3', 8 : '2_4', 9 : '2_4',
10 : '3_4', 11 : '3_8', 12 : '3_8'}
vtk_remap = {8 : nm.array([0, 1, 3, 2], dtype=nm.int32),
11 : nm.array([0, 1, 3, 2, 4, 5, 7, 6], dtype=nm.int32)}
vtk_remap_keys = list(vtk_remap.keys())
class VTKMeshIO(MeshIO):
format = 'vtk'
def read_coors(self, ret_fd=False):
fd = open(self.filename, 'r')
while 1:
line = skip_read_line(fd, no_eof=True).split()
if line[0] == 'POINTS':
n_nod = int(line[1])
coors = read_array(fd, n_nod, 3, nm.float64)
break
if ret_fd:
return coors, fd
else:
fd.close()
return coors
def get_dimension(self, coors):
dz = nm.diff(coors[:,2])
if nm.allclose(dz, 0.0):
dim = 2
else:
dim = 3
return dim
def read_dimension(self, ret_fd=False):
coors, fd = self.read_coors(ret_fd=True)
dim = self.get_dimension(coors)
if ret_fd:
return dim, fd
else:
fd.close()
return dim
def read_bounding_box(self, ret_fd=False, ret_dim=False):
coors, fd = self.read_coors(ret_fd=True)
dim = self.get_dimension(coors)
bbox = nm.vstack((nm.amin(coors[:,:dim], 0),
nm.amax(coors[:,:dim], 0)))
if ret_dim:
if ret_fd:
return bbox, dim, fd
else:
fd.close()
return bbox, dim
else:
if ret_fd:
return bbox, fd
else:
fd.close()
return bbox
def read(self, mesh, **kwargs):
fd = open(self.filename, 'r')
mode = 'header'
mode_status = 0
coors = conns = mat_id = node_grps = None
finished = 0
while 1:
line = skip_read_line(fd)
if not line:
break
if mode == 'header':
if mode_status == 0:
if line.strip() == 'ASCII':
mode_status = 1
elif mode_status == 1:
if line.strip() == 'DATASET UNSTRUCTURED_GRID':
mode_status = 0
mode = 'points'
elif mode == 'points':
line = line.split()
if line[0] == 'POINTS':
n_nod = int(line[1])
coors = read_array(fd, n_nod, 3, nm.float64)
mode = 'cells'
elif mode == 'cells':
line = line.split()
if line[0] == 'CELLS':
n_el, n_val = map(int, line[1:3])
raw_conn = read_list(fd, n_val, int)
mode = 'cell_types'
elif mode == 'cell_types':
line = line.split()
if line[0] == 'CELL_TYPES':
assert_(int(line[1]) == n_el)
cell_types = read_array(fd, n_el, 1, nm.int32)
mode = 'cp_data'
elif mode == 'cp_data':
line = line.split()
if line[0] == 'CELL_DATA':
assert_(int(line[1]) == n_el)
mode_status = 1
mode = 'mat_id'
elif line[0] == 'POINT_DATA':
assert_(int(line[1]) == n_nod)
mode_status = 1
mode = 'node_groups'
elif mode == 'mat_id':
if mode_status == 1:
if 'SCALARS mat_id int' in line.strip():
mode_status = 2
elif mode_status == 2:
if line.strip() == 'LOOKUP_TABLE default':
mat_id = read_list(fd, n_el, int)
mode_status = 0
mode = 'cp_data'
finished += 1
elif mode == 'node_groups':
if mode_status == 1:
if 'SCALARS node_groups int' in line.strip():
mode_status = 2
elif mode_status == 2:
if line.strip() == 'LOOKUP_TABLE default':
node_grps = read_list(fd, n_nod, int)
mode_status = 0
mode = 'cp_data'
finished += 1
elif finished >= 2:
break
fd.close()
if mat_id is None:
mat_id = [[0]] * n_el
else:
if len(mat_id) < n_el:
mat_id = [[ii] for jj in mat_id for ii in jj]
if node_grps is None:
node_grps = [0] * n_nod
else:
if len(node_grps) < n_nod:
node_grps = [ii for jj in node_grps for ii in jj]
dim = self.get_dimension(coors)
if dim == 2:
coors = coors[:,:2]
coors = nm.ascontiguousarray(coors)
cell_types = cell_types.squeeze()
dconns = {}
for iel, row in enumerate(raw_conn):
vct = cell_types[iel]
if vct not in vtk_inverse_cell_types:
continue
ct = vtk_inverse_cell_types[vct]
dconns.setdefault(vct, []).append(row[1:] + mat_id[iel])
descs = []
conns = []
mat_ids = []
for ct, conn in six.iteritems(dconns):
sct = vtk_inverse_cell_types[ct]
descs.append(sct)
aux = nm.array(conn, dtype=nm.int32)
aconn = aux[:, :-1]
if ct in vtk_remap_keys: # Remap pixels and voxels.
aconn[:] = aconn[:, vtk_remap[ct]]
conns.append(aconn)
mat_ids.append(aux[:, -1])
mesh._set_io_data(coors, node_grps, conns, mat_ids, descs)
return mesh
def write(self, filename, mesh, out=None, ts=None, **kwargs):
def _reshape_tensors(data, dim, sym, nc):
if dim == 3:
if nc == sym:
aux = data[:, [0,3,4,3,1,5,4,5,2]]
elif nc == (dim * dim):
aux = data[:, [0,3,4,6,1,5,7,8,2]]
else:
aux = data.reshape((data.shape[0], dim*dim))
else:
zz = nm.zeros((data.shape[0], 1), dtype=nm.float64)
if nc == sym:
aux = nm.c_[data[:,[0,2]], zz, data[:,[2,1]],
zz, zz, zz, zz]
elif nc == (dim * dim):
aux = nm.c_[data[:,[0,2]], zz, data[:,[3,1]],
zz, zz, zz, zz]
else:
aux = nm.c_[data[:,0,[0,1]], zz, data[:,1,[0,1]],
zz, zz, zz, zz]
return aux
def _write_tensors(data):
format = self.get_vector_format(3)
format = '\n'.join([format] * 3) + '\n\n'
for row in aux:
fd.write(format % tuple(row))
if ts is None:
step, time, nt = 0, 0.0, 0.0
else:
step, time, nt = ts.step, ts.time, ts.nt
coors, ngroups, conns, mat_ids, descs = mesh._get_io_data()
fd = open(filename, 'w')
fd.write(vtk_header % (step, time, nt, op.basename(sys.argv[0])))
n_nod, dim = coors.shape
sym = (dim + 1) * dim // 2
fd.write('\nPOINTS %d float\n' % n_nod)
aux = coors
if dim < 3:
aux = nm.hstack((aux, nm.zeros((aux.shape[0], 3 - dim),
dtype=aux.dtype)))
format = self.get_vector_format(3) + '\n'
for row in aux:
fd.write(format % tuple(row))
n_el = mesh.n_el
n_els, n_e_ps = nm.array([conn.shape for conn in conns]).T
total_size = nm.dot(n_els, n_e_ps + 1)
fd.write('\nCELLS %d %d\n' % (n_el, total_size))
ct = []
for ig, conn in enumerate(conns):
nn = n_e_ps[ig] + 1
ct += [vtk_cell_types[descs[ig]]] * n_els[ig]
format = ' '.join(['%d'] * nn + ['\n'])
for row in conn:
fd.write(format % ((nn-1,) + tuple(row)))
fd.write('\nCELL_TYPES %d\n' % n_el)
fd.write(''.join(['%d\n' % ii for ii in ct]))
fd.write('\nPOINT_DATA %d\n' % n_nod)
# node groups
fd.write('\nSCALARS node_groups int 1\nLOOKUP_TABLE default\n')
fd.write(''.join(['%d\n' % ii for ii in ngroups]))
if out is not None:
point_keys = [key for key, val in six.iteritems(out)
if val.mode == 'vertex']
else:
point_keys = {}
for key in point_keys:
val = out[key]
nr, nc = val.data.shape
if nc == 1:
fd.write('\nSCALARS %s float %d\n' % (key, nc))
fd.write('LOOKUP_TABLE default\n')
format = self.float_format + '\n'
for row in val.data:
fd.write(format % row)
elif nc == dim:
fd.write('\nVECTORS %s float\n' % key)
if dim == 2:
aux = nm.hstack((val.data,
nm.zeros((nr, 1), dtype=nm.float64)))
else:
aux = val.data
format = self.get_vector_format(3) + '\n'
for row in aux:
fd.write(format % tuple(row))
elif (nc == sym) or (nc == (dim * dim)):
fd.write('\nTENSORS %s float\n' % key)
aux = _reshape_tensors(val.data, dim, sym, nc)
_write_tensors(aux)
else:
raise NotImplementedError(nc)
if out is not None:
cell_keys = [key for key, val in six.iteritems(out)
if val.mode == 'cell']
else:
cell_keys = {}
fd.write('\nCELL_DATA %d\n' % n_el)
# cells - mat_id
fd.write('SCALARS mat_id int 1\nLOOKUP_TABLE default\n')
aux = nm.hstack(mat_ids).tolist()
fd.write(''.join(['%d\n' % ii for ii in aux]))
for key in cell_keys:
val = out[key]
ne, aux, nr, nc = val.data.shape
if (nr == 1) and (nc == 1):
fd.write('\nSCALARS %s float %d\n' % (key, nc))
fd.write('LOOKUP_TABLE default\n')
format = self.float_format + '\n'
aux = val.data.squeeze()
if len(aux.shape) == 0:
fd.write(format % aux)
else:
for row in aux:
fd.write(format % row)
elif (nr == dim) and (nc == 1):
fd.write('\nVECTORS %s float\n' % key)
if dim == 2:
aux = nm.hstack((val.data.squeeze(),
nm.zeros((ne, 1), dtype=nm.float64)))
else:
aux = val.data
format = self.get_vector_format(3) + '\n'
for row in aux:
fd.write(format % tuple(row.squeeze()))
elif (((nr == sym) or (nr == (dim * dim))) and (nc == 1)) \
or ((nr == dim) and (nc == dim)):
fd.write('\nTENSORS %s float\n' % key)
data = val.data.squeeze()
aux = _reshape_tensors(data, dim, sym, nr)
_write_tensors(aux)
else:
raise NotImplementedError(nr, nc)
fd.close()
# Mark the write finished.
fd = open(filename, 'r+')
fd.write('#')
fd.close()
def read_data(self, step, filename=None, cache=None):
filename = get_default(filename, self.filename)
out = {}
dim, fd = self.read_dimension(ret_fd=True)
while 1:
line = skip_read_line(fd, no_eof=True).split()
if line[0] == 'POINT_DATA':
break
num = int(line[1])
mode = 'vertex'
while 1:
line = skip_read_line(fd)
if not line:
break
line = line.split()
if line[0] == 'SCALARS':
name, dtype, nc = line[1:]
assert_(int(nc) == 1)
fd.readline() # skip lookup table line
data = nm.empty((num,), dtype=nm.float64)
for ii in range(num):
data[ii] = float(fd.readline())
out[name] = Struct(name=name, mode=mode, data=data,
dofs=None)
elif line[0] == 'VECTORS':
name, dtype = line[1:]
data = nm.empty((num, dim), dtype=nm.float64)
for ii in range(num):
data[ii] = [float(val)
for val in fd.readline().split()][:dim]
out[name] = Struct(name=name, mode=mode, data=data,
dofs=None)
elif line[0] == 'TENSORS':
name, dtype = line[1:]
data3 = nm.empty((3 * num, 3), dtype=nm.float64)
ii = 0
while ii < 3 * num:
aux = [float(val) for val in fd.readline().split()]
if not len(aux): continue
data3[ii] = aux
ii += 1
data = data3.reshape((-1, 1, 3, 3))[..., :dim, :dim]
out[name] = Struct(name=name, mode=mode, data=data,
dofs=None)
elif line[0] == 'CELL_DATA':
num = int(line[1])
mode = 'cell'
else:
line = fd.readline()
fd.close()
return out
class TetgenMeshIO(MeshIO):
format = "tetgen"
def read(self, mesh, **kwargs):
import os
fname = os.path.splitext(self.filename)[0]
nodes = self.getnodes(fname+".node")
etype, elements, regions = self.getele(fname+".ele")
descs = []
conns = []
mat_ids = []
elements = nm.array(elements, dtype=nm.int32) - 1
for key, value in six.iteritems(regions):
descs.append(etype)
mat_ids.append(nm.ones_like(value) * key)
conns.append(elements[nm.array(value)-1].copy())
mesh._set_io_data(nodes, None, conns, mat_ids, descs)
return mesh
@staticmethod
def getnodes(fnods):
"""
Reads t.1.nodes, returns a list of nodes.
Example:
>>> self.getnodes("t.1.node")
[(0.0, 0.0, 0.0), (4.0, 0.0, 0.0), (0.0, 4.0, 0.0), (-4.0, 0.0, 0.0),
(0.0, 0.0, 4.0), (0.0, -4.0, 0.0), (0.0, -0.0, -4.0), (-2.0, 0.0,
-2.0), (-2.0, 2.0, 0.0), (0.0, 2.0, -2.0), (0.0, -2.0, -2.0), (2.0,
0.0, -2.0), (2.0, 2.0, 0.0), ... ]
"""
f = open(fnods)
l = [int(x) for x in f.readline().split()]
npoints, dim, nattrib, nbound = l
if dim == 2:
ndapp = [0.0]
else:
ndapp = []
nodes = []
for line in f:
if line[0] == "#": continue
l = [float(x) for x in line.split()]
l = l[:(dim + 1)]
assert_(int(l[0]) == len(nodes)+1)
l = l[1:]
nodes.append(tuple(l + ndapp))
assert_(npoints == len(nodes))
return nodes
@staticmethod
def getele(fele):
"""
Reads t.1.ele, returns a list of elements.
Example:
>>> elements, regions = self.getele("t.1.ele")
>>> elements
[(20, 154, 122, 258), (86, 186, 134, 238), (15, 309, 170, 310), (146,
229, 145, 285), (206, 207, 125, 211), (99, 193, 39, 194), (185, 197,
158, 225), (53, 76, 74, 6), (19, 138, 129, 313), (23, 60, 47, 96),
(119, 321, 1, 329), (188, 296, 122, 322), (30, 255, 177, 256), ...]
>>> regions
{100: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 7, ...],
...}
"""
f = open(fele)
l = [int(x) for x in f.readline().split()]
ntetra,nnod,nattrib = l
#we have either linear or quadratic tetrahedra:
elem = None
if nnod in [4,10]:
elem = '3_4'
linear = (nnod == 4)
if nnod in [3, 7]:
elem = '2_3'
linear = (nnod == 3)
if elem is None or not linear:
raise ValueError("Only linear triangle and tetrahedra reader"
" is implemented")
els = []
regions = {}
for line in f:
if line[0] == "#": continue
l = [int(x) for x in line.split()]
if elem == '2_3':
assert_((len(l) - 1 - nattrib) == 3)
els.append((l[1],l[2],l[3]))
if elem == '3_4':
assert_((len(l) - 1 - nattrib) == 4)
els.append((l[1],l[2],l[3],l[4]))
if nattrib == 1:
regionnum = l[-1]
else:
regionnum = 1
if regionnum == 0:
msg = "see %s, element # %d\n"%(fele,l[0])
msg += "there are elements not belonging to any physical entity"
raise ValueError(msg)
if regionnum in regions:
regions[regionnum].append(l[0])
else:
regions[regionnum]=[l[0]]
assert_(l[0] == len(els))
return elem, els, regions
def write(self, filename, mesh, out=None, **kwargs):
raise NotImplementedError
def read_dimension(self):
# TetGen only supports 3D mesh
return 3
def read_bounding_box(self):
raise NotImplementedError
class ComsolMeshIO(MeshIO):
format = 'comsol'
def _read_commented_int(self):
return int(skip_read_line(self.fd).split('#')[0])
def _skip_comment(self):
read_token(self.fd)
self.fd.readline()
def read(self, mesh, **kwargs):
self.fd = fd = open(self.filename, 'r')
mode = 'header'
coors = conns = None
while 1:
if mode == 'header':
line = skip_read_line(fd)
n_tags = self._read_commented_int()
for ii in range(n_tags):
skip_read_line(fd)
n_types = self._read_commented_int()
for ii in range(n_types):
skip_read_line(fd)
skip_read_line(fd)
assert_(skip_read_line(fd).split()[1] == 'Mesh')
skip_read_line(fd)
dim = self._read_commented_int()
assert_((dim == 2) or (dim == 3))
n_nod = self._read_commented_int()
i0 = self._read_commented_int()
mode = 'points'
elif mode == 'points':
self._skip_comment()
coors = read_array(fd, n_nod, dim, nm.float64)
mode = 'cells'
elif mode == 'cells':
n_types = self._read_commented_int()
conns = []
descs = []
mat_ids = []
for it in range(n_types):
t_name = skip_read_line(fd).split()[1]
n_ep = self._read_commented_int()
n_el = self._read_commented_int()
self._skip_comment()
aux = read_array(fd, n_el, n_ep, nm.int32)
if t_name == 'tri':
conns.append(aux)
descs.append('2_3')
is_conn = True
elif t_name == 'quad':
# Rearrange element node order to match SfePy.
aux = aux[:,(0,1,3,2)]
conns.append(aux)
descs.append('2_4')
is_conn = True
elif t_name == 'hex':
# Rearrange element node order to match SfePy.
aux = aux[:,(0,1,3,2,4,5,7,6)]
conns.append(aux)
descs.append('3_8')
is_conn = True
elif t_name == 'tet':
conns.append(aux)
descs.append('3_4')
is_conn = True
else:
is_conn = False
# Skip parameters.
n_pv = self._read_commented_int()
n_par = self._read_commented_int()
for ii in range(n_par):
skip_read_line(fd)
n_domain = self._read_commented_int()
assert_(n_domain == n_el)
if is_conn:
self._skip_comment()
mat_id = read_array(fd, n_domain, 1, nm.int32)
mat_ids.append(mat_id)
else:
for ii in range(n_domain):
skip_read_line(fd)
# Skip up/down pairs.
n_ud = self._read_commented_int()
for ii in range(n_ud):
skip_read_line(fd)
break
fd.close()
self.fd = None
mesh._set_io_data(coors, None, conns, mat_ids, descs)
return mesh
def write(self, filename, mesh, out=None, **kwargs):
def write_elements(fd, ig, conn, mat_ids, type_name,
npe, format, norder, nm_params):
fd.write("# Type #%d\n\n" % ig)
fd.write("%s # type name\n\n\n" % type_name)
fd.write("%d # number of nodes per element\n" % npe)
fd.write("%d # number of elements\n" % conn.shape[0])
fd.write("# Elements\n")
for ii in range(conn.shape[0]):
nn = conn[ii] # Zero based
fd.write(format % tuple(nn[norder]))
fd.write("\n%d # number of parameter values per element\n"
% nm_params)
# Top level always 0?
fd.write("0 # number of parameters\n")
fd.write("# Parameters\n\n")
fd.write("%d # number of domains\n"
% sum([mi.shape[0] for mi in mat_ids]))
fd.write("# Domains\n")
for mi in mat_ids:
# Domains in comsol have to be > 0
if (mi <= 0).any():
mi += mi.min() + 1
for dom in mi:
fd.write("%d\n" % abs(dom))
fd.write("\n0 # number of up/down pairs\n")
fd.write("# Up/down\n")
fd = open(filename, 'w')
coors, ngroups, conns, mat_ids, desc = mesh._get_io_data()
n_nod, dim = coors.shape
# Header
fd.write("# Created by SfePy\n\n\n")
fd.write("# Major & minor version\n")
fd.write("0 1\n")
fd.write("1 # number of tags\n")
fd.write("# Tags\n")
fd.write("2 m1\n")
fd.write("1 # number of types\n")
fd.write("# Types\n")
fd.write("3 obj\n\n")
# Record
fd.write("# --------- Object 0 ----------\n\n")
fd.write("0 0 1\n") # version unused serializable
fd.write("4 Mesh # class\n")
fd.write("1 # version\n")
fd.write("%d # sdim\n" % dim)
fd.write("%d # number of mesh points\n" % n_nod)
fd.write("0 # lowest mesh point index\n\n") # Always zero in SfePy
fd.write("# Mesh point coordinates\n")
format = self.get_vector_format(dim) + '\n'
for ii in range(n_nod):
nn = tuple(coors[ii])
fd.write(format % tuple(nn))
fd.write("\n%d # number of element types\n\n\n" % len(conns))
for ig, conn in enumerate(conns):
if (desc[ig] == "2_4"):
write_elements(fd, ig, conn, mat_ids,
"4 quad", 4, "%d %d %d %d\n", [0, 1, 3, 2], 8)
elif (desc[ig] == "2_3"):
# TODO: Verify number of parameters for tri element
write_elements(fd, ig, conn, mat_ids,
"3 tri", 3, "%d %d %d\n", [0, 1, 2], 4)
elif (desc[ig] == "3_4"):
# TODO: Verify number of parameters for tet element
write_elements(fd, ig, conn, mat_ids,
"3 tet", 4, "%d %d %d %d\n", [0, 1, 2, 3], 16)
elif (desc[ig] == "3_8"):
write_elements(fd, ig, conn, mat_ids,
"3 hex", 8, "%d %d %d %d %d %d %d %d\n",
[0, 1, 3, 2, 4, 5, 7, 6], 24)
else:
raise ValueError('unknown element type! (%s)' % desc[ig])
fd.close()
if out is not None:
for key, val in six.iteritems(out):
raise NotImplementedError
class HDF5MeshIO(MeshIO):
format = "hdf5"
import string
_all = ''.join(map(chr, list(range(256))))
_letters = string.ascii_letters + string.digits + '_'
_rubbish = ''.join([ch for ch in set(_all) - set(_letters)])
if sys.version_info[0] >= 3:
_tr = str.maketrans(_rubbish, '_' * len(_rubbish))
else:
_tr = string.maketrans(_rubbish, '_' * len(_rubbish))
@staticmethod
def read_mesh_from_hdf5(filename, group=None, mesh=None):
"""
Read the mesh from a HDF5 file.
filename: str or tables.File
The HDF5 file to read the mesh from.
group: tables.group.Group or str, optional
The HDF5 file group to read the mesh from.
If None, the root group is used.
mesh: sfepy.dicrete.fem.Mesh or None
If None, the new mesh is created and returned, otherwise
content of this argument is replaced by the read mesh.
Returns
-------
sfepy.dicrete.fem.Mesh
readed mesh
"""
with HDF5ContextManager(filename, mode='r') as fd:
if group is None:
group = fd.root
elif not isinstance(group, pt.group.Group):
group = fd.get_node(group)
set_shape_info = mesh is None
if mesh is None:
from .mesh import Mesh
mesh = Mesh('mesh')
mesh.name = dec(group.name.read())
coors = group.coors.read()
ngroups = group.ngroups.read()
n_gr = group.n_gr.read()
conns = []
descs = []
mat_ids = []
for ig in range(n_gr):
gr_name = 'group%d' % ig
conn_group = group._f_get_child(gr_name)
conns.append(conn_group.conn.read())
mat_ids.append(conn_group.mat_id.read())
descs.append(dec(conn_group.desc.read()))
nodal_bcs = {}
try:
node_sets_groups = group.node_sets
except:
pass
else:
for group in node_sets_groups:
key = dec(group.key.read())
nods = group.nods.read()
nodal_bcs[key] = nods
mesh._set_io_data(coors, ngroups, conns, mat_ids, descs,
nodal_bcs=nodal_bcs)
if set_shape_info:
mesh._set_shape_info()
return mesh
@staticmethod
def write_mesh_to_hdf5(filename, group, mesh):
"""
Write mesh to a hdf5 file.
filename: str or tables.File
The HDF5 file to write the mesh to.
group: tables.group.Group or None or str
The HDF5 file group to write the mesh to.
If None, the root group is used.
The group can be given as a path from root, e.g. /path/to/mesh
mesh: sfepy.dicrete.fem.Mesh
The mesh to write.
"""
with HDF5ContextManager(filename, mode='w') as fd:
if group is None:
group = fd.root
elif not isinstance(group, pt.group.Group):
group = get_or_create_hdf5_group(fd, group)
coors, ngroups, conns, mat_ids, descs = mesh._get_io_data()
fd.create_array(group, 'name', enc(mesh.name), 'name')
fd.create_array(group, 'coors', coors, 'coors')
fd.create_array(group, 'ngroups', ngroups, 'ngroups')
fd.create_array(group, 'n_gr', len(conns), 'n_gr')
for ig, conn in enumerate(conns):
conn_group = fd.create_group(group, 'group%d' % ig,
'connectivity group')
fd.create_array(conn_group, 'conn', conn, 'connectivity')
fd.create_array(conn_group, 'mat_id', mat_ids[ig],
'material id')
fd.create_array(conn_group, 'desc', enc(descs[ig]),
'element Type')
node_sets_groups = fd.create_group(group, 'node_sets',
'node sets groups')
ii = 0
for key, nods in six.iteritems(mesh.nodal_bcs):
group = fd.create_group(node_sets_groups, 'group%d' % ii,
'node sets group')
fd.create_array(group, 'key', enc(key), 'key')
fd.create_array(group, 'nods', nods, 'nods')
ii += 1
def read_dimension(self, ret_fd=False):
fd = pt.open_file(self.filename, mode="r")
dim = fd.root.mesh.coors.shape[1]
if ret_fd:
return dim, fd
else:
fd.close()
return dim
def read_bounding_box(self, ret_fd=False, ret_dim=False):
fd = pt.open_file(self.filename, mode="r")
mesh_group = fd.root.mesh
coors = mesh_group.coors.read()
bbox = nm.vstack((nm.amin(coors, 0),
nm.amax(coors, 0)))
if ret_dim:
dim = coors.shape[1]
if ret_fd:
return bbox, dim, fd
else:
fd.close()
return bbox, dim
else:
if ret_fd:
return bbox, fd
else:
fd.close()
return bbox
def read(self, mesh=None, **kwargs):
return self.read_mesh_from_hdf5(self.filename, '/mesh', mesh=mesh)
def write(self, filename, mesh, out=None, ts=None, cache=None, **kwargs):
from time import asctime
if pt is None:
raise ValueError('pytables not imported!')
step = get_default_attr(ts, 'step', 0)
if (step == 0) or not op.exists(filename):
# A new file.
with pt.open_file(filename, mode="w",
title="SfePy output file") as fd:
mesh_group = fd.create_group('/', 'mesh', 'mesh')
self.write_mesh_to_hdf5(fd, mesh_group, mesh)
if ts is not None:
ts_group = fd.create_group('/', 'ts', 'time stepper')
fd.create_array(ts_group, 't0', ts.t0, 'initial time')
fd.create_array(ts_group, 't1', ts.t1, 'final time' )
fd.create_array(ts_group, 'dt', ts.dt, 'time step')
fd.create_array(ts_group, 'n_step', ts.n_step, 'n_step')
tstat_group = fd.create_group('/', 'tstat',
'global time statistics')
fd.create_array(tstat_group, 'created', enc(asctime()),
'file creation time')
fd.create_array(tstat_group, 'finished', enc('.' * 24),
'file closing time')
fd.create_array(fd.root, 'last_step',
nm.array([step], dtype=nm.int32),
'last saved step')
if out is not None:
if ts is None:
step, time, nt = 0, 0.0, 0.0
else:
step, time, nt = ts.step, ts.time, ts.nt
# Existing file.
fd = pt.open_file(filename, mode="r+")
step_group_name = 'step%d' % step
if step_group_name in fd.root:
raise ValueError('step %d is already saved in "%s" file!'
' Possible help: remove the old file or'
' start saving from the initial time.'
% (step, filename))
step_group = fd.create_group('/', step_group_name, 'time step data')
ts_group = fd.create_group(step_group, 'ts', 'time stepper')
fd.create_array(ts_group, 'step', step, 'step')
fd.create_array(ts_group, 't', time, 'time')
fd.create_array(ts_group, 'nt', nt, 'normalized time')
name_dict = {}
for key, val in six.iteritems(out):
group_name = '__' + key.translate(self._tr)
data_group = fd.create_group(step_group, group_name,
'%s data' % key)
fd.create_array(data_group, 'dname', enc(key), 'data name')
fd.create_array(data_group, 'mode', enc(val.mode), 'mode')
name = val.get('name', 'output_data')
fd.create_array(data_group, 'name', enc(name), 'object name')
if val.mode == 'custom':
write_to_hdf5(fd, data_group, 'data', val.data,
cache=cache,
unpack_markers=getattr(val, 'unpack_markers',
False))
continue
shape = val.get('shape', val.data.shape)
dofs = val.get('dofs', None)
if dofs is None:
dofs = [''] * nm.squeeze(shape)[-1]
var_name = val.get('var_name', '')
fd.create_array(data_group, 'data', val.data, 'data')
fd.create_array(data_group, 'dofs', [enc(ic) for ic in dofs],
'dofs')
fd.create_array(data_group, 'shape', shape, 'shape')
fd.create_array(data_group, 'var_name',
enc(var_name), 'object parent name')
if val.mode == 'full':
fd.create_array(data_group, 'field_name',
enc(val.field_name), 'field name')
name_dict[key] = group_name
step_group._v_attrs.name_dict = name_dict
fd.root.last_step[0] = step
fd.remove_node(fd.root.tstat.finished)
fd.create_array(fd.root.tstat, 'finished', enc(asctime()),
'file closing time')
fd.close()
def read_last_step(self, filename=None):
filename = get_default(filename, self.filename)
fd = pt.open_file(filename, mode="r")
last_step = fd.root.last_step[0]
fd.close()
return last_step
def read_time_stepper(self, filename=None):
filename = get_default(filename, self.filename)
fd = pt.open_file(filename, mode="r")
try:
ts_group = fd.root.ts
out = (ts_group.t0.read(), ts_group.t1.read(),
ts_group.dt.read(), ts_group.n_step.read())
except:
raise ValueError('no time stepper found!')
finally:
fd.close()
return out
def _get_step_group_names(self, fd):
return sorted([name for name in fd.root._v_groups.keys()
if name.startswith('step')],
key=lambda name: int(name[4:]))
def read_times(self, filename=None):
"""
Read true time step data from individual time steps.
Returns
-------
steps : array
The time steps.
times : array
The times of the time steps.
nts : array
The normalized times of the time steps, in [0, 1].
"""
filename = get_default(filename, self.filename)
fd = pt.open_file(filename, mode='r')
steps = []
times = []
nts = []
for gr_name in self._get_step_group_names(fd):
ts_group = fd.get_node(fd.root, gr_name + '/ts')
steps.append(ts_group.step.read())
times.append(ts_group.t.read())
nts.append(ts_group.nt.read())
fd.close()
steps = nm.asarray(steps, dtype=nm.int32)
times = nm.asarray(times, dtype=nm.float64)
nts = nm.asarray(nts, dtype=nm.float64)
return steps, times, nts
def _get_step_group(self, step, filename=None):
filename = get_default(filename, self.filename)
fd = pt.open_file(filename, mode="r")
if step is None:
step = int(self._get_step_group_names(fd)[0][4:])
gr_name = 'step%d' % step
try:
step_group = fd.get_node(fd.root, gr_name)
except:
output('step %d data not found - premature end of file?' % step)
fd.close()
return None, None
return fd, step_group
def read_data(self, step, filename=None, cache=None):
fd, step_group = self._get_step_group(step, filename=filename)
if fd is None: return None
out = {}
for data_group in step_group:
try:
key = dec(data_group.dname.read())
except pt.exceptions.NoSuchNodeError:
continue
mode = dec(data_group.mode.read())
if mode == 'custom':
out[key] = read_from_hdf5(fd, data_group.data, cache=cache)
continue
name = dec(data_group.name.read())
data = data_group.data.read()
dofs = tuple([dec(ic) for ic in data_group.dofs.read()])
try:
shape = tuple(int(ii) for ii in data_group.shape.read())
except pt.exceptions.NoSuchNodeError:
shape = data.shape
if mode == 'full':
field_name = dec(data_group.field_name.read())
else:
field_name = None
out[key] = Struct(name=name, mode=mode, data=data,
dofs=dofs, shape=shape, field_name=field_name)
if out[key].dofs == (-1,):
out[key].dofs = None
fd.close()
return out
def read_data_header(self, dname, step=None, filename=None):
fd, step_group = self._get_step_group(step, filename=filename)
if fd is None: return None
groups = step_group._v_groups
for name, data_group in six.iteritems(groups):
try:
key = dec(data_group.dname.read())
except pt.exceptions.NoSuchNodeError:
continue
if key == dname:
mode = dec(data_group.mode.read())
fd.close()
return mode, name
fd.close()
raise KeyError('non-existent data: %s' % dname)
def read_time_history(self, node_name, indx, filename=None):
filename = get_default(filename, self.filename)
fd = pt.open_file(filename, mode="r")
th = dict_from_keys_init(indx, list)
for gr_name in self._get_step_group_names(fd):
step_group = fd.get_node(fd.root, gr_name)
data = step_group._f_get_child(node_name).data
for ii in indx:
th[ii].append(nm.array(data[ii]))
fd.close()
for key, val in six.iteritems(th):
aux = nm.array(val)
if aux.ndim == 4: # cell data.
aux = aux[:,0,:,0]
th[key] = aux
return th
def read_variables_time_history(self, var_names, ts, filename=None):
filename = get_default(filename, self.filename)
fd = pt.open_file(filename, mode="r")
assert_((fd.root.last_step[0] + 1) == ts.n_step)
ths = dict_from_keys_init(var_names, list)
arr = nm.asarray
for step in range(ts.n_step):
gr_name = 'step%d' % step
step_group = fd.get_node(fd.root, gr_name)
name_dict = step_group._v_attrs.name_dict
for var_name in var_names:
data = step_group._f_get_child(name_dict[var_name]).data
ths[var_name].append(arr(data.read()))
fd.close()
return ths
class MEDMeshIO(MeshIO):
format = "med"
def read(self, mesh, **kwargs):
fd = pt.open_file(self.filename, mode="r")
mesh_root = fd.root.ENS_MAA
#TODO: Loop through multiple meshes?
mesh_group = mesh_root._f_get_child(list(mesh_root._v_groups.keys())[0])
if not ('NOE' in list(mesh_group._v_groups.keys())):
mesh_group = mesh_group._f_get_child(list(mesh_group._v_groups.keys())[0])
mesh.name = mesh_group._v_name
aux_coors = mesh_group.NOE.COO.read()
n_nodes = mesh_group.NOE.COO.get_attr('NBR')
# Unflatten the node coordinate array
dim = aux_coors.shape[0] // n_nodes
coors = nm.zeros((n_nodes,dim), dtype=nm.float64)
for ii in range(dim):
coors[:,ii] = aux_coors[n_nodes*ii:n_nodes*(ii+1)]
ngroups = mesh_group.NOE.FAM.read()
assert_((ngroups >= 0).all())
# Dict to map MED element names to SfePy descs
#NOTE: The commented lines are elements which
# produce KeyError in SfePy
med_descs = {
'TE4' : '3_4',
#'T10' : '3_10',
#'PY5' : '3_5',
#'P13' : '3_13',
'HE8' : '3_8',
#'H20' : '3_20',
#'PE6' : '3_6',
#'P15' : '3_15',
#TODO: Polyhedrons (POE) - need special handling
'TR3' : '2_3',
#'TR6' : '2_6',
'QU4' : '2_4',
#'QU8' : '2_8',
#TODO: Polygons (POG) - need special handling
#'SE2' : '1_2',
#'SE3' : '1_3',
}
conns = []
descs = []
mat_ids = []
for md, desc in six.iteritems(med_descs):
if int(desc[0]) != dim: continue
try:
group = mesh_group.MAI._f_get_child(md)
aux_conn = group.NOD.read()
n_conns = group.NOD.get_attr('NBR')
# (0 based indexing in numpy vs. 1 based in MED)
nne = aux_conn.shape[0] // n_conns
conn = nm.zeros((n_conns,nne), dtype=nm.int32)
for ii in range(nne):
conn[:,ii] = aux_conn[n_conns*ii:n_conns*(ii+1)] - 1
conns.append(conn)
mat_id = group.FAM.read()
assert_((mat_id <= 0).all())
mat_id = nm.abs(mat_id)
mat_ids.append(mat_id)
descs.append(med_descs[md])
except pt.exceptions.NoSuchNodeError:
pass
fd.close()
mesh._set_io_data(coors, ngroups, conns, mat_ids, descs)
return mesh
class Mesh3DMeshIO(MeshIO):
format = "mesh3d"
def read(self, mesh, **kwargs):
f = open(self.filename)
# read the whole file:
vertices = self._read_section(f, integer=False)
tetras = self._read_section(f)
hexes = self._read_section(f)
prisms = self._read_section(f)
tris = self._read_section(f)
quads = self._read_section(f)
# substract 1 from all elements, because we count from 0:
conns = []
mat_ids = []
descs = []
if len(tetras) > 0:
conns.append(tetras - 1)
mat_ids.append([0]*len(tetras))
descs.append("3_4")
if len(hexes) > 0:
conns.append(hexes - 1)
mat_ids.append([0]*len(hexes))
descs.append("3_8")
mesh._set_io_data(vertices, None, conns, mat_ids, descs)
return mesh
def read_dimension(self):
return 3
def _read_line(self, f):
"""
Reads one non empty line (if it's a comment, it skips it).
"""
l = f.readline().strip()
while l == "" or l[0] == "#": # comment or an empty line
l = f.readline().strip()
return l
def _read_section(self, f, integer=True):
"""
Reads one section from the mesh3d file.
integer ... if True, all numbers are passed to int(), otherwise to
float(), before returning
Some examples how a section can look like:
2
1 2 5 4 7 8 11 10
2 3 6 5 8 9 12 11
or
5
1 2 3 4 1
1 2 6 5 1
2 3 7 6 1
3 4 8 7 1
4 1 5 8 1
or
0
"""
if integer:
dtype=int
else:
dtype=float
l = self._read_line(f)
N = int(l)
rows = []
for i in range(N):
l = self._read_line(f)
row = nm.fromstring(l, sep=" ", dtype=dtype)
rows.append(row)
return nm.array(rows)
def mesh_from_groups(mesh, ids, coors, ngroups,
tris, mat_tris, quads, mat_quads,
tetras, mat_tetras, hexas, mat_hexas, remap=None):
ids = nm.asarray(ids, dtype=nm.int32)
coors = nm.asarray(coors, dtype=nm.float64)
if remap is None:
n_nod = coors.shape[0]
remap = nm.zeros((ids.max()+1,), dtype=nm.int32)
remap[ids] = nm.arange(n_nod, dtype=nm.int32)
tris = remap[nm.array(tris, dtype=nm.int32)]
quads = remap[nm.array(quads, dtype=nm.int32)]
tetras = remap[nm.array(tetras, dtype=nm.int32)]
hexas = remap[nm.array(hexas, dtype=nm.int32)]
conns = [tris, quads, tetras, hexas]
mat_ids = [nm.array(ar, dtype=nm.int32)
for ar in [mat_tris, mat_quads, mat_tetras, mat_hexas]]
descs = ['2_3', '2_4', '3_4', '3_8']
# Remove empty groups.
conns, mat_ids, descs = zip(*[(conns[ig], mat_ids[ig], descs[ig])
for ig in range(4)
if conns[ig].shape[0] > 0])
mesh._set_io_data(coors, ngroups, conns, mat_ids, descs)
return mesh
class AVSUCDMeshIO(MeshIO):
format = 'avs_ucd'
@staticmethod
def guess(filename):
return True
def read(self, mesh, **kwargs):
fd = open(self.filename, 'r')
# Skip all comments.
while 1:
line = fd.readline()
if line and (line[0] != '#'):
break
header = [int(ii) for ii in line.split()]
n_nod, n_el = header[0:2]
ids = nm.zeros((n_nod,), dtype=nm.int32)
dim = 3
coors = nm.zeros((n_nod, dim), dtype=nm.float64)
for ii in range(n_nod):
line = fd.readline().split()
ids[ii] = int(line[0])
coors[ii] = [float(coor) for coor in line[1:]]
mat_tetras = []
tetras = []
mat_hexas = []
hexas = []
for ii in range(n_el):
line = fd.readline().split()
if line[2] == 'tet':
mat_tetras.append(int(line[1]))
tetras.append([int(ic) for ic in line[3:]])
elif line[2] == 'hex':
mat_hexas.append(int(line[1]))
hexas.append([int(ic) for ic in line[3:]])
fd.close()
mesh = mesh_from_groups(mesh, ids, coors, None,
[], [], [], [],
tetras, mat_tetras, hexas, mat_hexas)
return mesh
def read_dimension(self):
return 3
def write(self, filename, mesh, out=None, **kwargs):
raise NotImplementedError
class HypermeshAsciiMeshIO(MeshIO):
format = 'hmascii'
def read(self, mesh, **kwargs):
fd = open(self.filename, 'r')
ids = []
coors = []
tetras = []
mat_tetras = []
hexas = []
mat_hexas = []
quads = []
mat_quads = []
trias = []
mat_trias = []
mat_id = 0
for line in fd:
if line and (line[0] == '*'):
if line[1:10] == 'component':
line = line.strip()[11:-1].split(',')
mat_id = int(line[0])
if line[1:5] == 'node':
line = line.strip()[6:-1].split(',')
ids.append(int(line[0]))
coors.append([float(coor) for coor in line[1:4]])
elif line[1:7] == 'tetra4':
line = line.strip()[8:-1].split(',')
mat_tetras.append(mat_id)
tetras.append([int(ic) for ic in line[2:6]])
elif line[1:6] == 'hexa8':
line = line.strip()[7:-1].split(',')
mat_hexas.append(mat_id)
hexas.append([int(ic) for ic in line[2:10]])
elif line[1:6] == 'quad4':
line = line.strip()[7:-1].split(',')
mat_quads.append(mat_id)
quads.append([int(ic) for ic in line[2:6]])
elif line[1:6] == 'tria3':
line = line.strip()[7:-1].split(',')
mat_trias.append(mat_id)
trias.append([int(ic) for ic in line[2:5]])
fd.close()
mesh = mesh_from_groups(mesh, ids, coors, None,
trias, mat_trias, quads, mat_quads,
tetras, mat_tetras, hexas, mat_hexas)
return mesh
def read_dimension(self):
return 3
def write(self, filename, mesh, out=None, **kwargs):
raise NotImplementedError
class AbaqusMeshIO(MeshIO):
format = 'abaqus'
@staticmethod
def guess(filename):
ok = False
fd = open(filename, 'r')
for ii in range(100):
try:
line = fd.readline().strip().split(',')
except:
break
if line[0].lower() == '*node':
ok = True
break
fd.close()
return ok
def read(self, mesh, **kwargs):
fd = open(self.filename, 'r')
ids = []
coors = []
tetras = []
mat_tetras = []
hexas = []
mat_hexas = []
tris = []
mat_tris = []
quads = []
mat_quads = []
nsets = {}
ing = 1
dim = 0
line = fd.readline().split(',')
while 1:
if not line[0]: break
token = line[0].strip().lower()
if token == '*node':
while 1:
line = fd.readline().split(',')
if (not line[0]) or (line[0][0] == '*'): break
if dim == 0:
dim = len(line) - 1
ids.append(int(line[0]))
if dim == 2:
coors.append([float(coor) for coor in line[1:3]])
else:
coors.append([float(coor) for coor in line[1:4]])
elif token == '*element':
if line[1].find('C3D8') >= 0:
while 1:
line = fd.readline().split(',')
if (not line[0]) or (line[0][0] == '*'): break
mat_hexas.append(0)
hexas.append([int(ic) for ic in line[1:9]])
elif line[1].find('C3D4') >= 0:
while 1:
line = fd.readline().split(',')
if (not line[0]) or (line[0][0] == '*'): break
mat_tetras.append(0)
tetras.append([int(ic) for ic in line[1:5]])
elif (
line[1].find('CPS') >= 0
or line[1].find('CPE') >= 0
or line[1].find('CAX') >= 0
):
if line[1].find('4') >= 0:
while 1:
line = fd.readline().split(',')
if (not line[0]) or (line[0][0] == '*'): break
mat_quads.append(0)
quads.append([int(ic) for ic in line[1:5]])
elif line[1].find('3') >= 0:
while 1:
line = fd.readline().split(',')
if (not line[0]) or (line[0][0] == '*'): break
mat_tris.append(0)
tris.append([int(ic) for ic in line[1:4]])
else:
raise ValueError('unknown element type! (%s)' % line[1])
else:
raise ValueError('unknown element type! (%s)' % line[1])
elif token == '*nset':
if line[-1].strip().lower() == 'generate':
line = fd.readline()
continue
while 1:
line = fd.readline().strip().split(',')
if (not line[0]) or (line[0][0] == '*'): break
if not line[-1]: line = line[:-1]
aux = [int(ic) for ic in line]
nsets.setdefault(ing, []).extend(aux)
ing += 1
else:
line = fd.readline().split(',')
fd.close()
ngroups = nm.zeros((len(coors),), dtype=nm.int32)
for ing, ii in six.iteritems(nsets):
ngroups[nm.array(ii)-1] = ing
mesh = mesh_from_groups(mesh, ids, coors, ngroups,
tris, mat_tris, quads, mat_quads,
tetras, mat_tetras, hexas, mat_hexas)
return mesh
def read_dimension(self):
fd = open(self.filename, 'r')
line = fd.readline().split(',')
while 1:
if not line[0]: break
token = line[0].strip().lower()
if token == '*node':
while 1:
line = fd.readline().split(',')
if (not line[0]) or (line[0][0] == '*'): break
dim = len(line) - 1
fd.close()
return dim
def write(self, filename, mesh, out=None, **kwargs):
raise NotImplementedError
class BDFMeshIO(MeshIO):
format = 'nastran'
def read_dimension(self, ret_fd=False):
fd = open(self.filename, 'r')
el3d = 0
while 1:
try:
line = fd.readline()
except:
output("reading " + fd.name + " failed!")
raise
if len(line) == 1: continue
if line[0] == '$': continue
aux = line.split()
if aux[0] == 'CHEXA':
el3d += 1
elif aux[0] == 'CTETRA':
el3d += 1
if el3d > 0:
dim = 3
else:
dim = 2
if ret_fd:
return dim, fd
else:
fd.close()
return dim
def read(self, mesh, **kwargs):
def mfloat(s):
if len(s) > 3:
if s[-3] == '-':
return float(s[:-3]+'e'+s[-3:])
return float(s)
import string
fd = open(self.filename, 'r')
el = {'3_8' : [], '3_4' : [], '2_4' : [], '2_3' : []}
nod = []
cmd = ''
dim = 2
conns_in = []
descs = []
node_grp = None
while 1:
try:
line = fd.readline()
except EOFError:
break
except:
output("reading " + fd.name + " failed!")
raise
if (len(line) == 0): break
if len(line) < 4: continue
if line[0] == '$': continue
row = line.strip().split()
if row[0] == 'GRID':
cs = line.strip()[-24:]
aux = [ cs[0:8], cs[8:16], cs[16:24] ]
nod.append([mfloat(ii) for ii in aux]);
elif row[0] == 'GRID*':
aux = row[1:4];
cmd = 'GRIDX';
elif row[0] == 'CHEXA':
aux = [int(ii)-1 for ii in row[3:9]]
aux2 = int(row[2])
aux3 = row[9]
cmd ='CHEXAX'
elif row[0] == 'CTETRA':
aux = [int(ii)-1 for ii in row[3:]]
aux.append(int(row[2]))
el['3_4'].append(aux)
dim = 3
elif row[0] == 'CQUAD4':
aux = [int(ii)-1 for ii in row[3:]]
aux.append(int(row[2]))
el['2_4'].append(aux)
elif row[0] == 'CTRIA3':
aux = [int(ii)-1 for ii in row[3:]]
aux.append(int(row[2]))
el['2_3'].append(aux)
elif cmd == 'GRIDX':
cmd = ''
aux2 = row[1]
if aux2[-1] == '0':
aux2 = aux2[:-1]
aux3 = aux[1:]
aux3.append(aux2)
nod.append([float(ii) for ii in aux3]);
elif cmd == 'CHEXAX':
cmd = ''
aux4 = row[0]
aux5 = aux4.find(aux3)
aux.append(int(aux4[(aux5+len(aux3)):])-1)
aux.extend([int(ii)-1 for ii in row[1:]])
aux.append(aux2)
el['3_8'].append(aux)
dim = 3
elif row[0] == 'SPC' or row[0] == 'SPC*':
if node_grp is None:
node_grp = [0] * len(nod)
node_grp[int(row[2]) - 1] = int(row[1])
for elem in el.keys():
if len(el[elem]) > 0:
conns_in.append(el[elem])
descs.append(elem)
fd.close()
nod = nm.array(nod, nm.float64)
if dim == 2:
nod = nod[:,:2].copy()
conns, mat_ids = split_conns_mat_ids(conns_in)
mesh._set_io_data(nod, node_grp, conns, mat_ids, descs)
return mesh
@staticmethod
def format_str(str, idx, n=8):
out = ''
for ii, istr in enumerate(str):
aux = '%d' % istr
out += aux + ' ' * (n - len(aux))
if ii == 7:
out += '+%07d\n+%07d' % (idx, idx)
return out
def write(self, filename, mesh, out=None, **kwargs):
fd = open(filename, 'w')
coors, ngroups, conns, mat_ids, desc = mesh._get_io_data()
n_nod, dim = coors.shape
fd.write("$NASTRAN Bulk Data File created by SfePy\n")
fd.write("$\nBEGIN BULK\n")
fd.write("$\n$ ELEMENT CONNECTIVITY\n$\n")
iel = 0
mats = {}
for ig, conn in enumerate(conns):
ids = mat_ids[ig]
for ii in range(conn.shape[0]):
iel += 1
nn = conn[ii] + 1
mat = ids[ii]
if mat in mats:
mats[mat] += 1
else:
mats[mat] = 0
if (desc[ig] == "2_4"):
fd.write("CQUAD4 %s\n" %\
self.format_str([ii + 1, mat,
nn[0], nn[1], nn[2], nn[3]],
iel))
elif (desc[ig] == "2_3"):
fd.write("CTRIA3 %s\n" %\
self.format_str([ii + 1, mat,
nn[0], nn[1], nn[2]], iel))
elif (desc[ig] == "3_4"):
fd.write("CTETRA %s\n" %\
self.format_str([ii + 1, mat,
nn[0], nn[1], nn[2], nn[3]],
iel))
elif (desc[ig] == "3_8"):
fd.write("CHEXA %s\n" %\
self.format_str([ii + 1, mat, nn[0], nn[1], nn[2],
nn[3], nn[4], nn[5], nn[6],
nn[7]], iel))
else:
raise ValueError('unknown element type! (%s)' % desc[ig])
fd.write("$\n$ NODAL COORDINATES\n$\n")
format = 'GRID* %s % 08E % 08E\n'
if coors.shape[1] == 3:
format += '* % 08E0 \n'
else:
format += '* % 08E0 \n' % 0.0
for ii in range(n_nod):
sii = str(ii + 1)
fd.write(format % ((sii + ' ' * (8 - len(sii)),)
+ tuple(coors[ii])))
fd.write("$\n$ GEOMETRY\n$\n1 ")
fd.write("0.000000E+00 0.000000E+00\n")
fd.write("* 0.000000E+00 0.000000E+00\n* \n")
fd.write("$\n$ MATERIALS\n$\n")
matkeys = list(mats.keys())
matkeys.sort()
for ii, imat in enumerate(matkeys):
fd.write("$ material%d : Isotropic\n" % imat)
aux = str(imat)
fd.write("MAT1* %s " % (aux + ' ' * (8 - len(aux))))
fd.write("0.000000E+00 0.000000E+00\n")
fd.write("* 0.000000E+00 0.000000E+00\n")
fd.write("$\n$ GEOMETRY\n$\n")
for ii, imat in enumerate(matkeys):
fd.write("$ material%d : solid%d\n" % (imat, imat))
fd.write("PSOLID* %s\n" % self.format_str([ii + 1, imat], 0, 16))
fd.write("* \n")
fd.write("ENDDATA\n")
fd.close()
class NEUMeshIO(MeshIO):
format = 'gambit'
def read_dimension(self, ret_fd=False):
fd = open(self.filename, 'r')
row = fd.readline().split()
while 1:
if not row: break
if len(row) == 0: continue
if (row[0] == 'NUMNP'):
row = fd.readline().split()
n_nod, n_el, dim = row[0], row[1], int(row[4])
break;
if ret_fd:
return dim, fd
else:
fd.close()
return dim
def read(self, mesh, **kwargs):
el = {'3_8' : [], '3_4' : [], '2_4' : [], '2_3' : []}
nod = []
conns_in = []
descs = []
group_ids = []
group_n_els = []
groups = []
nodal_bcs = {}
fd = open(self.filename, 'r')
row = fd.readline()
while 1:
if not row: break
row = row.split()
if len(row) == 0:
row = fd.readline()
continue
if (row[0] == 'NUMNP'):
row = fd.readline().split()
n_nod, n_el, dim = int(row[0]), int(row[1]), int(row[4])
elif (row[0] == 'NODAL'):
row = fd.readline().split()
while not(row[0] == 'ENDOFSECTION'):
nod.append(row[1:])
row = fd.readline().split()
elif (row[0] == 'ELEMENTS/CELLS'):
row = fd.readline().split()
while not(row[0] == 'ENDOFSECTION'):
elid = [row[0]]
gtype = int(row[1])
if gtype == 6:
el['3_4'].append(row[3:]+elid)
elif gtype == 4:
rr = row[3:]
if (len(rr) < 8):
rr.extend(fd.readline().split())
el['3_8'].append(rr+elid)
elif gtype == 3:
el['2_3'].append(row[3:]+elid)
elif gtype == 2:
el['2_4'].append(row[3:]+elid)
row = fd.readline().split()
elif (row[0] == 'GROUP:'):
group_ids.append(row[1])
g_n_el = int(row[3])
group_n_els.append(g_n_el)
name = fd.readline().strip()
els = []
row = fd.readline().split()
row = fd.readline().split()
while not(row[0] == 'ENDOFSECTION'):
els.extend(row)
row = fd.readline().split()
if g_n_el != len(els):
msg = 'wrong number of group elements! (%d == %d)'\
% (n_el, len(els))
raise ValueError(msg)
groups.append(els)
elif (row[0] == 'BOUNDARY'):
row = fd.readline().split()
key = row[0]
num = int(row[2])
inod = read_array(fd, num, None, nm.int32) - 1
nodal_bcs[key] = inod.squeeze()
row = fd.readline().split()
assert_(row[0] == 'ENDOFSECTION')
row = fd.readline()
fd.close()
if int(n_el) != sum(group_n_els):
print('wrong total number of group elements! (%d == %d)'\
% (int(n_el), len(group_n_els)))
mat_ids = nm.zeros(n_el, dtype=nm.int32)
for ii, els in enumerate(groups):
els = nm.array(els, dtype=nm.int32)
mat_ids[els - 1] = group_ids[ii]
for elem in el.keys():
if len(el[elem]) > 0:
els = nm.array(el[elem], dtype=nm.int32)
els[:, :-1] -= 1
els[:, -1] = mat_ids[els[:, -1]-1]
if elem == '3_8':
els = els[:, [0, 1, 3, 2, 4, 5, 7, 6, 8]]
conns_in.append(els)
descs.append(elem)
nod = nm.array(nod, nm.float64)
conns, mat_ids = split_conns_mat_ids(conns_in)
mesh._set_io_data(nod, None, conns, mat_ids, descs, nodal_bcs=nodal_bcs)
return mesh
def write(self, filename, mesh, out=None, **kwargs):
raise NotImplementedError
class ANSYSCDBMeshIO(MeshIO):
format = 'ansys_cdb'
@staticmethod
def guess(filename):
fd = open(filename, 'r')
for ii in range(1000):
row = fd.readline()
if not row: break
if len(row) == 0: continue
row = row.split(',')
kw = row[0].lower()
if (kw == 'nblock'):
ok = True
break
else:
ok = False
fd.close()
return ok
@staticmethod
def make_format(format, nchar=1000):
idx = [];
dtype = [];
start = 0;
for iform in format:
ret = iform.partition('i')
if not ret[1]:
ret = iform.partition('e')
if not ret[1]:
raise ValueError
aux = ret[2].partition('.')
step = int(aux[0])
for j in range(int(ret[0])):
if (start + step) > nchar:
break
idx.append((start, start+step))
start += step
dtype.append(ret[1])
return idx, dtype
def write(self, filename, mesh, out=None, **kwargs):
raise NotImplementedError
def read_bounding_box(self):
raise NotImplementedError
def read_dimension(self, ret_fd=False):
return 3
def read(self, mesh, **kwargs):
ids = []
coors = []
tetras = []
hexas = []
qtetras = []
qhexas = []
nodal_bcs = {}
fd = open(self.filename, 'r')
while True:
row = fd.readline()
if not row: break
if len(row) == 0: continue
row = row.split(',')
kw = row[0].lower()
if (kw == 'nblock'):
# Solid keyword -> 3, otherwise 1 is the starting coors index.
ic = 3 if len(row) == 3 else 1
fmt = fd.readline()
fmt = fmt.strip()[1:-1].split(',')
row = look_ahead_line(fd)
nchar = len(row)
idx, dtype = self.make_format(fmt, nchar)
ii0, ii1 = idx[0]
while True:
row = fd.readline()
if ((row[0] == '!') or (row[:2] == '-1')
or len(row) != nchar):
break
line = [float(row[i0:i1]) for i0, i1 in idx[ic:]]
ids.append(int(row[ii0:ii1]))
coors.append(line)
elif (kw == 'eblock'):
if (len(row) <= 2) or row[2].strip().lower() != 'solid':
continue
fmt = fd.readline()
fmt = [fmt.strip()[1:-1]]
row = look_ahead_line(fd)
nchar = len(row)
idx, dtype = self.make_format(fmt, nchar)
imi0, imi1 = idx[0] # Material id.
inn0, inn1 = idx[8] # Number of nodes in line.
ien0, ien1 = idx[10] # Element number.
ic0 = 11
while True:
row = fd.readline()
if ((row[0] == '!') or (row[:2] == '-1')
or (len(row) != nchar)):
break
line = [int(row[imi0:imi1])]
n_nod = int(row[inn0:inn1])
line.extend(int(row[i0:i1])
for i0, i1 in idx[ic0 : ic0 + n_nod])
if n_nod == 4:
tetras.append(line)
elif n_nod == 8:
hexas.append(line)
elif n_nod == 10:
row = fd.readline()
line.extend(int(row[i0:i1])
for i0, i1 in idx[:2])
qtetras.append(line)
elif n_nod == 20:
row = fd.readline()
line.extend(int(row[i0:i1])
for i0, i1 in idx[:12])
qhexas.append(line)
else:
raise ValueError('unsupported element type! (%d nodes)'
% n_nod)
elif kw == 'cmblock':
if row[2].lower() != 'node': # Only node sets support.
continue
n_nod = int(row[3].split('!')[0])
fd.readline() # Format line not needed.
nods = read_array(fd, n_nod, 1, nm.int32)
nodal_bcs[row[1].strip()] = nods.ravel()
fd.close()
coors = nm.array(coors, dtype=nm.float64)
tetras = nm.array(tetras, dtype=nm.int32)
if len(tetras):
mat_ids_tetras = tetras[:, 0]
tetras = tetras[:, 1:]
else:
tetras.shape = (0, 4)
mat_ids_tetras = nm.array([])
hexas = nm.array(hexas, dtype=nm.int32)
if len(hexas):
mat_ids_hexas = hexas[:, 0]
hexas = hexas[:, 1:]
else:
hexas.shape = (0, 8)
mat_ids_hexas = nm.array([])
if len(qtetras):
qtetras = nm.array(qtetras, dtype=nm.int32)
tetras.shape = (max(0, tetras.shape[0]), 4)
tetras = nm.r_[tetras, qtetras[:, 1:5]]
mat_ids_tetras = nm.r_[mat_ids_tetras, qtetras[:, 0]]
if len(qhexas):
qhexas = nm.array(qhexas, dtype=nm.int32)
hexas.shape = (max(0, hexas.shape[0]), 8)
hexas = nm.r_[hexas, qhexas[:, 1:9]]
mat_ids_hexas = nm.r_[mat_ids_hexas, qhexas[:, 0]]
if len(qtetras) or len(qhexas):
ii = nm.union1d(tetras.ravel(), hexas.ravel())
n_nod = len(ii)
remap = nm.zeros((ii.max()+1,), dtype=nm.int32)
remap[ii] = nm.arange(n_nod, dtype=nm.int32)
ic = nm.searchsorted(ids, ii)
coors = coors[ic]
else:
n_nod = coors.shape[0]
remap = nm.zeros((nm.array(ids).max() + 1,), dtype=nm.int32)
remap[ids] = nm.arange(n_nod, dtype=nm.int32)
# Convert tetras as degenerate hexas to true tetras.
ii = nm.where((hexas[:, 2] == hexas[:, 3])
& (hexas[:, 4] == hexas[:, 5])
& (hexas[:, 4] == hexas[:, 6])
& (hexas[:, 4] == hexas[:, 7]))[0]
if len(ii) == len(hexas):
tetras = nm.r_[tetras, hexas[ii[:, None], [0, 1, 2, 4]]]
mat_ids_tetras = nm.r_[mat_ids_tetras, mat_ids_hexas[ii]]
hexas = nm.delete(hexas, ii, axis=0)
mat_ids_hexas = nm.delete(mat_ids_hexas, ii)
else:
output('WARNING: mesh "%s" has both tetrahedra and hexahedra!'
% mesh.name)
ngroups = nm.zeros(len(coors), dtype=nm.int32)
mesh = mesh_from_groups(mesh, ids, coors, ngroups,
[], [], [], [],
tetras, mat_ids_tetras,
hexas, mat_ids_hexas, remap=remap)
mesh.nodal_bcs = {}
for key, nods in six.iteritems(nodal_bcs):
nods = nods[nods < len(remap)]
mesh.nodal_bcs[key] = remap[nods]
return mesh
class Msh2MeshIO(MeshIO):
format = 'msh_v2'
msh_cells = {
1: (2, 2),
2: (2, 3),
3: (2, 4),
4: (3, 4),
5: (3, 8),
6: (3, 6),
}
prism2hexa = nm.asarray([0, 1, 2, 2, 3, 4, 5, 5])
def read_dimension(self, ret_fd=True):
fd = open(self.filename, 'r')
while 1:
lastpos = fd.tell()
line = skip_read_line(fd).split()
if line[0] in ['$Nodes', '$Elements']:
num = int(read_token(fd))
coors = read_array(fd, num, 4, nm.float64)
fd.seek(lastpos)
if nm.sum(nm.abs(coors[:,3])) < 1e-16:
dims = 2
else:
dims = 3
break
if line[0] == '$PhysicalNames':
num = int(read_token(fd))
dims = []
for ii in range(num):
dims.append(int(skip_read_line(fd, no_eof=True).split()[0]))
break
dim = nm.max(dims)
if ret_fd:
return dim, fd
else:
fd.close()
return dim
def read_bounding_box(self, ret_fd=False, ret_dim=False):
fd = open(self.filename, 'r')
dim, fd = self.read_dimension(ret_fd=True)
return _read_bounding_box(fd, dim, '$Nodes',
c0=1, ret_fd=ret_fd, ret_dim=ret_dim)
def read(self, mesh, omit_facets=True, **kwargs):
fd = open(self.filename, 'r')
conns = []
descs = []
mat_ids = []
tags = []
dims = []
while 1:
line = skip_read_line(fd).split()
if not line:
break
ls = line[0]
if ls == '$MeshFormat':
skip_read_line(fd)
elif ls == '$PhysicalNames':
num = int(read_token(fd))
for ii in range(num):
skip_read_line(fd)
elif ls == '$Nodes':
num = int(read_token(fd))
coors = read_array(fd, num, 4, nm.float64)
elif ls == '$Elements':
num = int(read_token(fd))
for ii in range(num):
line = [int(jj) for jj in skip_read_line(fd).split()]
if line[1] > 6:
continue
dimension, nc = self.msh_cells[line[1]]
dims.append(dimension)
ntag = line[2]
mat_id = line[3]
conn = line[(3 + ntag):]
desc = '%d_%d' % (dimension, nc)
if desc in descs:
idx = descs.index(desc)
conns[idx].append(conn)
mat_ids[idx].append(mat_id)
tags[idx].append(line[3:(3 + ntag)])
else:
descs.append(desc)
conns.append([conn])
mat_ids.append([mat_id])
tags.append(line[3:(3 + ntag)])
elif ls == '$Periodic':
periodic = ''
while 1:
pline = skip_read_line(fd)
if '$EndPeriodic' in pline:
break
else:
periodic += pline
elif line[0] == '#' or ls[:4] == '$End':
pass
else:
output('skipping unknown entity: %s' % line)
continue
fd.close()
dim = nm.max(dims)
if '2_2' in descs:
idx2 = descs.index('2_2')
descs.pop(idx2)
del(conns[idx2])
del(mat_ids[idx2])
if '3_6' in descs:
idx6 = descs.index('3_6')
c3_6as8 = nm.asarray(conns[idx6],
dtype=nm.int32)[:,self.prism2hexa]
if '3_8' in descs:
descs.pop(idx6)
c3_6m = nm.asarray(mat_ids.pop(idx6), type=nm.int32)
idx8 = descs.index('3_8')
c3_8 = nm.asarray(conns[idx8], type=nm.int32)
c3_8m = nm.asarray(mat_ids[idx8], type=nm.int32)
conns[idx8] = nm.vstack([c3_8, c3_6as8])
mat_ids[idx8] = nm.hstack([c3_8m, c3_6m])
else:
descs[idx6] = '3_8'
conns[idx6] = c3_6as8
descs0, mat_ids0, conns0 = [], [], []
for ii in range(len(descs)):
if int(descs[ii][0]) == dim:
conns0.append(nm.asarray(conns[ii], dtype=nm.int32) - 1)
mat_ids0.append(nm.asarray(mat_ids[ii], dtype=nm.int32))
descs0.append(descs[ii])
mesh._set_io_data(coors[:,1:], nm.int32(coors[:,-1] * 0),
conns0, mat_ids0, descs0)
return mesh
def guess_format(filename, ext, formats, io_table):
"""
Guess the format of filename, candidates are in formats.
"""
ok = False
for format in formats:
output('guessing %s' % format)
try:
ok = io_table[format].guess(filename)
except AttributeError:
pass
if ok: break
else:
raise NotImplementedError('cannot guess format of a *%s file!' % ext)
return format
var_dict = list(vars().items())
io_table = {}
for key, var in var_dict:
try:
if is_derived_class(var, MeshIO):
io_table[var.format] = var
except TypeError:
pass
del var_dict
def any_from_filename(filename, prefix_dir=None):
"""
Create a MeshIO instance according to the kind of `filename`.
Parameters
----------
filename : str, function or MeshIO subclass instance
The name of the mesh file. It can be also a user-supplied function
accepting two arguments: `mesh`, `mode`, where `mesh` is a Mesh
instance and `mode` is one of 'read','write', or a MeshIO subclass
instance.
prefix_dir : str
The directory name to prepend to `filename`.
Returns
-------
io : MeshIO subclass instance
The MeshIO subclass instance corresponding to the kind of `filename`.
"""
if not isinstance(filename, basestr):
if isinstance(filename, MeshIO):
return filename
else:
return UserMeshIO(filename)
ext = op.splitext(filename)[1].lower()
try:
format = supported_formats[ext]
except KeyError:
raise ValueError('unsupported mesh file suffix! (%s)' % ext)
if isinstance(format, tuple):
format = guess_format(filename, ext, format, io_table)
if prefix_dir is not None:
filename = op.normpath(op.join(prefix_dir, filename))
return io_table[format](filename)
insert_static_method(MeshIO, any_from_filename)
del any_from_filename
def for_format(filename, format=None, writable=False, prefix_dir=None):
"""
Create a MeshIO instance for file `filename` with forced `format`.
Parameters
----------
filename : str
The name of the mesh file.
format : str
One of supported formats. If None,
:func:`MeshIO.any_from_filename()` is called instead.
writable : bool
If True, verify that the mesh format is writable.
prefix_dir : str
The directory name to prepend to `filename`.
Returns
-------
io : MeshIO subclass instance
The MeshIO subclass instance corresponding to the `format`.
"""
ext = op.splitext(filename)[1].lower()
try:
_format = supported_formats[ext]
except KeyError:
_format = None
format = get_default(format, _format)
if format is None:
io = MeshIO.any_from_filename(filename, prefix_dir=prefix_dir)
else:
if not isinstance(format, basestr):
raise ValueError('ambigous suffix! (%s -> %s)' % (ext, format))
if format not in io_table:
raise ValueError('unknown output mesh format! (%s)' % format)
if writable and ('w' not in supported_capabilities[format]):
output('writable mesh formats:')
output_mesh_formats('w')
msg = 'write support not implemented for output mesh format "%s",' \
' see above!' % format
raise ValueError(msg)
if prefix_dir is not None:
filename = op.normpath(op.join(prefix_dir, filename))
io = io_table[format](filename)
return io
insert_static_method(MeshIO, for_format)
del for_format
|
lokik/sfepy
|
sfepy/discrete/fem/meshio.py
|
Python
|
bsd-3-clause
| 97,709
|
[
"VTK"
] |
75f575d839a461551744cd54a962ce3a66881bc5860456fc2e356554858caf4a
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
#
# pylint: disable=no-member
"""Wrapper for netCDF readers."""
import logging
import os.path
import warnings
import numpy as np
from monty.collections import AttrDict
from monty.dev import requires
from monty.functools import lazy_property
from monty.string import marquee
from pymatgen.core.structure import Structure
from pymatgen.core.units import ArrayWithUnit
from pymatgen.core.xcfunc import XcFunc
logger = logging.getLogger(__name__)
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__email__ = "gmatteo at gmail.com"
__status__ = "Development"
__date__ = "$Feb 21, 2013M$"
__all__ = [
"as_ncreader",
"as_etsfreader",
"NetcdfReader",
"ETSF_Reader",
"NO_DEFAULT",
"structure_from_ncdata",
]
try:
import netCDF4
except ImportError as exc:
netCDF4 = None
warnings.warn(
"""\
`import netCDF4` failed with the following error:
%s
Please install netcdf4 with `conda install netcdf4`
If the conda version does not work, uninstall it with `conda uninstall hdf4 hdf5 netcdf4`
and use `pip install netcdf4`"""
% str(exc)
)
def _asreader(file, cls):
closeit = False
if not isinstance(file, cls):
file, closeit = cls(file), True
return file, closeit
def as_ncreader(file):
"""
Convert file into a NetcdfReader instance.
Returns reader, closeit where closeit is set to True
if we have to close the file before leaving the procedure.
"""
return _asreader(file, NetcdfReader)
def as_etsfreader(file):
"""Return an ETSF_Reader. Accepts filename or ETSF_Reader."""
return _asreader(file, ETSF_Reader)
class NetcdfReaderError(Exception):
"""Base error class for NetcdfReader"""
class NO_DEFAULT:
"""Signal that read_value should raise an Error"""
class NetcdfReader:
"""
Wraps and extends netCDF4.Dataset. Read only mode. Supports with statements.
Additional documentation available at:
http://netcdf4-python.googlecode.com/svn/trunk/docs/netCDF4-module.html
"""
Error = NetcdfReaderError
@requires(netCDF4 is not None, "netCDF4 must be installed to use this class")
def __init__(self, path):
"""Open the Netcdf file specified by path (read mode)."""
self.path = os.path.abspath(path)
try:
self.rootgrp = netCDF4.Dataset(self.path, mode="r")
except Exception as exc:
raise self.Error(f"In file {self.path}: {exc}")
self.ngroups = len(list(self.walk_tree()))
# Always return non-masked numpy arrays.
# Slicing a ncvar returns a MaskedArrray and this is really annoying
# because it can lead to unexpected behaviour in e.g. calls to np.matmul!
# See also https://github.com/Unidata/netcdf4-python/issues/785
self.rootgrp.set_auto_mask(False)
def __enter__(self):
"""Activated when used in the with statement."""
return self
def __exit__(self, type, value, traceback):
"""Activated at the end of the with statement. It automatically closes the file."""
self.rootgrp.close()
def close(self):
"""Close the file."""
try:
self.rootgrp.close()
except Exception as exc:
logger.warning(f"Exception {exc} while trying to close {self.path}")
def walk_tree(self, top=None):
"""
Navigate all the groups in the file starting from top.
If top is None, the root group is used.
"""
if top is None:
top = self.rootgrp
values = top.groups.values()
yield values
for value in top.groups.values():
yield from self.walk_tree(value)
def print_tree(self):
"""Print all the groups in the file."""
for children in self.walk_tree():
for child in children:
print(child)
def read_dimvalue(self, dimname, path="/", default=NO_DEFAULT):
"""
Returns the value of a dimension.
Args:
dimname: Name of the variable
path: path to the group.
default: return `default` if `dimname` is not present and
`default` is not `NO_DEFAULT` else raise self.Error.
"""
try:
dim = self._read_dimensions(dimname, path=path)[0]
return len(dim)
except self.Error:
if default is NO_DEFAULT:
raise
return default
def read_varnames(self, path="/"):
"""List of variable names stored in the group specified by path."""
if path == "/":
return self.rootgrp.variables.keys()
group = self.path2group[path]
return group.variables.keys()
def read_value(self, varname, path="/", cmode=None, default=NO_DEFAULT):
"""
Returns the values of variable with name varname in the group specified by path.
Args:
varname: Name of the variable
path: path to the group.
cmode: if cmode=="c", a complex ndarrays is constructed and returned
(netcdf does not provide native support from complex datatype).
default: returns default if varname is not present.
self.Error is raised if default is set to NO_DEFAULT
Returns:
numpy array if varname represents an array, scalar otherwise.
"""
try:
var = self.read_variable(varname, path=path)
except self.Error:
if default is NO_DEFAULT:
raise
return default
if cmode is None:
# scalar or array
# getValue is not portable!
try:
return var.getValue()[0] if not var.shape else var[:]
except IndexError:
return var.getValue() if not var.shape else var[:]
assert var.shape[-1] == 2
if cmode == "c":
return var[..., 0] + 1j * var[..., 1]
raise ValueError(f"Wrong value for cmode {cmode}")
def read_variable(self, varname, path="/"):
"""Returns the variable with name varname in the group specified by path."""
return self._read_variables(varname, path=path)[0]
def _read_dimensions(self, *dimnames, **kwargs):
path = kwargs.get("path", "/")
try:
if path == "/":
return [self.rootgrp.dimensions[dname] for dname in dimnames]
group = self.path2group[path]
return [group.dimensions[dname] for dname in dimnames]
except KeyError:
raise self.Error(
f"In file {self.path}:\nError while reading dimensions: `{dimnames}` with kwargs: `{kwargs}`"
)
def _read_variables(self, *varnames, **kwargs):
path = kwargs.get("path", "/")
try:
if path == "/":
return [self.rootgrp.variables[vname] for vname in varnames]
group = self.path2group[path]
return [group.variables[vname] for vname in varnames]
except KeyError:
raise self.Error(
f"In file {self.path}:\nError while reading variables: `{varnames}` with kwargs `{kwargs}`."
)
def read_keys(self, keys, dict_cls=AttrDict, path="/"):
"""
Read a list of variables/dimensions from file. If a key is not present the corresponding
entry in the output dictionary is set to None.
"""
od = dict_cls()
for k in keys:
try:
# Try to read a variable.
od[k] = self.read_value(k, path=path)
except self.Error:
try:
# Try to read a dimension.
od[k] = self.read_dimvalue(k, path=path)
except self.Error:
od[k] = None
return od
class ETSF_Reader(NetcdfReader):
"""
This object reads data from a file written according to the ETSF-IO specifications.
We assume that the netcdf file contains at least the crystallographic section.
"""
@lazy_property
def chemical_symbols(self):
"""Chemical symbols char [number of atom species][symbol length]."""
charr = self.read_value("chemical_symbols")
symbols = []
for v in charr:
s = "".join(c.decode("utf-8") for c in v)
symbols.append(s.strip())
return symbols
def typeidx_from_symbol(self, symbol):
"""Returns the type index from the chemical symbol. Note python convention."""
return self.chemical_symbols.index(symbol)
def read_structure(self, cls=Structure):
"""Returns the crystalline structure stored in the rootgrp."""
return structure_from_ncdata(self, cls=cls)
def read_abinit_xcfunc(self):
"""
Read ixc from an Abinit file. Return :class:`XcFunc` object.
"""
ixc = int(self.read_value("ixc"))
return XcFunc.from_abinit_ixc(ixc)
def read_abinit_hdr(self):
"""
Read the variables associated to the Abinit header.
Return :class:`AbinitHeader`
"""
d = {}
for hvar in _HDR_VARIABLES.values():
ncname = hvar.etsf_name if hvar.etsf_name is not None else hvar.name
if ncname in self.rootgrp.variables:
d[hvar.name] = self.read_value(ncname)
elif ncname in self.rootgrp.dimensions:
d[hvar.name] = self.read_dimvalue(ncname)
else:
raise ValueError(f"Cannot find `{ncname}` in `{self.path}`")
# Convert scalars to (well) scalars.
if hasattr(d[hvar.name], "shape") and not d[hvar.name].shape:
d[hvar.name] = np.asarray(d[hvar.name]).item()
if hvar.name in ("title", "md5_pseudos", "codvsn"):
# Convert array of numpy bytes to list of strings
if hvar.name == "codvsn":
d[hvar.name] = "".join(bs.decode("utf-8").strip() for bs in d[hvar.name])
else:
d[hvar.name] = ["".join(bs.decode("utf-8") for bs in astr).strip() for astr in d[hvar.name]]
return AbinitHeader(d)
def structure_from_ncdata(ncdata, site_properties=None, cls=Structure):
"""
Reads and returns a pymatgen structure from a NetCDF file
containing crystallographic data in the ETSF-IO format.
Args:
ncdata: filename or NetcdfReader instance.
site_properties: Dictionary with site properties.
cls: The Structure class to instantiate.
"""
ncdata, closeit = as_ncreader(ncdata)
# TODO check whether atomic units are used
lattice = ArrayWithUnit(ncdata.read_value("primitive_vectors"), "bohr").to("ang")
red_coords = ncdata.read_value("reduced_atom_positions")
natom = len(red_coords)
znucl_type = ncdata.read_value("atomic_numbers")
# type_atom[0:natom] --> index Between 1 and number of atom species
type_atom = ncdata.read_value("atom_species")
# Fortran to C index and float --> int conversion.
species = natom * [None]
for atom in range(natom):
type_idx = type_atom[atom] - 1
species[atom] = int(znucl_type[type_idx])
d = {}
if site_properties is not None:
for prop in site_properties:
d[prop] = ncdata.read_value(prop)
structure = cls(lattice, species, red_coords, site_properties=d)
# Quick and dirty hack.
# I need an abipy structure since I need to_abivars and other methods.
try:
from abipy.core.structure import Structure as AbipyStructure
structure.__class__ = AbipyStructure
except ImportError:
pass
if closeit:
ncdata.close()
return structure
class _H:
__slots__ = ["name", "doc", "etsf_name"]
def __init__(self, name, doc, etsf_name=None):
self.name, self.doc, self.etsf_name = name, doc, etsf_name
_HDR_VARIABLES = (
# Scalars
_H("bantot", "total number of bands (sum of nband on all kpts and spins)"),
_H("date", "starting date"),
_H("headform", "format of the header"),
_H("intxc", "input variable"),
_H("ixc", "input variable"),
_H("mband", "maxval(hdr%nband)", etsf_name="max_number_of_states"),
_H("natom", "input variable", etsf_name="number_of_atoms"),
_H("nkpt", "input variable", etsf_name="number_of_kpoints"),
_H("npsp", "input variable"),
_H("nspden", "input variable", etsf_name="number_of_components"),
_H("nspinor", "input variable", etsf_name="number_of_spinor_components"),
_H("nsppol", "input variable", etsf_name="number_of_spins"),
_H("nsym", "input variable", etsf_name="number_of_symmetry_operations"),
_H("ntypat", "input variable", etsf_name="number_of_atom_species"),
_H("occopt", "input variable"),
_H("pertcase", "the index of the perturbation, 0 if GS calculation"),
_H("usepaw", "input variable (0=norm-conserving psps, 1=paw)"),
_H("usewvl", "input variable (0=plane-waves, 1=wavelets)"),
_H("kptopt", "input variable (defines symmetries used for k-point sampling)"),
_H("pawcpxocc", "input variable"),
_H(
"nshiftk_orig",
"original number of shifts given in input (changed in inkpts, the actual value is nshiftk)",
),
_H("nshiftk", "number of shifts after inkpts."),
_H("icoulomb", "input variable."),
_H("ecut", "input variable", etsf_name="kinetic_energy_cutoff"),
_H("ecutdg", "input variable (ecut for NC psps, pawecutdg for paw)"),
_H("ecutsm", "input variable"),
_H("ecut_eff", "ecut*dilatmx**2 (dilatmx is an input variable)"),
_H("etot", "EVOLVING variable"),
_H("fermie", "EVOLVING variable", etsf_name="fermi_energy"),
_H("residm", "EVOLVING variable"),
_H("stmbias", "input variable"),
_H("tphysel", "input variable"),
_H("tsmear", "input variable"),
_H("nelect", "number of electrons (computed from pseudos and charge)"),
_H("charge", "input variable"),
# Arrays
_H("qptn", "qptn(3) the wavevector, in case of a perturbation"),
# _H("rprimd", "rprimd(3,3) EVOLVING variables", etsf_name="primitive_vectors"),
# _H(ngfft, "ngfft(3) input variable", number_of_grid_points_vector1"
# _H("nwvlarr", "nwvlarr(2) the number of wavelets for each resolution.", etsf_name="number_of_wavelets"),
_H("kptrlatt_orig", "kptrlatt_orig(3,3) Original kptrlatt"),
_H("kptrlatt", "kptrlatt(3,3) kptrlatt after inkpts."),
_H("istwfk", "input variable istwfk(nkpt)"),
_H("lmn_size", "lmn_size(npsp) from psps"),
_H("nband", "input variable nband(nkpt*nsppol)", etsf_name="number_of_states"),
_H(
"npwarr",
"npwarr(nkpt) array holding npw for each k point",
etsf_name="number_of_coefficients",
),
_H("pspcod", "pscod(npsp) from psps"),
_H("pspdat", "psdat(npsp) from psps"),
_H("pspso", "pspso(npsp) from psps"),
_H("pspxc", "pspxc(npsp) from psps"),
_H("so_psp", "input variable so_psp(npsp)"),
_H("symafm", "input variable symafm(nsym)"),
# _H(symrel="input variable symrel(3,3,nsym)", etsf_name="reduced_symmetry_matrices"),
_H("typat", "input variable typat(natom)", etsf_name="atom_species"),
_H(
"kptns",
"input variable kptns(nkpt, 3)",
etsf_name="reduced_coordinates_of_kpoints",
),
_H("occ", "EVOLVING variable occ(mband, nkpt, nsppol)", etsf_name="occupations"),
_H(
"tnons",
"input variable tnons(nsym, 3)",
etsf_name="reduced_symmetry_translations",
),
_H("wtk", "weight of kpoints wtk(nkpt)", etsf_name="kpoint_weights"),
_H("shiftk_orig", "original shifts given in input (changed in inkpts)."),
_H("shiftk", "shiftk(3,nshiftk), shiftks after inkpts"),
_H("amu", "amu(ntypat) ! EVOLVING variable"),
# _H("xred", "EVOLVING variable xred(3,natom)", etsf_name="reduced_atom_positions"),
_H("zionpsp", "zionpsp(npsp) from psps"),
_H(
"znuclpsp",
"znuclpsp(npsp) from psps. Note the difference between (znucl|znucltypat) and znuclpsp",
),
_H("znucltypat", "znucltypat(ntypat) from alchemy", etsf_name="atomic_numbers"),
_H("codvsn", "version of the code"),
_H("title", "title(npsp) from psps"),
_H(
"md5_pseudos",
"md5pseudos(npsp), md5 checksums associated to pseudos (read from file)",
),
# _H(type(pawrhoij_type), allocatable :: pawrhoij(:) ! EVOLVING variable, only for paw
)
_HDR_VARIABLES = {h.name: h for h in _HDR_VARIABLES} # type: ignore
class AbinitHeader(AttrDict):
"""Stores the values reported in the Abinit header."""
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# for k, v in self.items():
# v.__doc__ = _HDR_VARIABLES[k].doc
def __str__(self):
return self.to_string()
def to_string(self, verbose=0, title=None, **kwargs):
"""
String representation. kwargs are passed to `pprint.pformat`.
Args:
verbose: Verbosity level
title: Title string.
"""
from pprint import pformat
s = pformat(self, **kwargs)
if title is not None:
return "\n".join([marquee(title, mark="="), s])
return s
|
materialsproject/pymatgen
|
pymatgen/io/abinit/netcdf.py
|
Python
|
mit
| 17,462
|
[
"ABINIT",
"NetCDF",
"pymatgen"
] |
15c1cafd7ec55f867429d9a89a3ff2709409c7a3359abe423b0621268079148e
|
# -*- test-case-name: pyflakes -*-
# (c) 2005-2010 Divmod, Inc.
# See LICENSE file for details
try:
import __builtin__ # NOQA
except ImportError:
import builtins as __builtin__ # NOQA
import os.path
import _ast
import sys
from flake8 import messages
from flake8.util import skip_warning
__version__ = '0.5.0'
# utility function to iterate over an AST node's children, adapted
# from Python 2.6's standard ast module
try:
import ast
iter_child_nodes = ast.iter_child_nodes
except (ImportError, AttributeError):
def iter_child_nodes(node, astcls=_ast.AST):
"""
Yield all direct child nodes of *node*, that is, all fields that are
nodes and all items of fields that are lists of nodes.
"""
for name in node._fields:
field = getattr(node, name, None)
if isinstance(field, astcls):
yield field
elif isinstance(field, list):
for item in field:
yield item
class Binding(object):
"""
Represents the binding of a value to a name.
The checker uses this to keep track of which names have been bound and
which names have not. See L{Assignment} for a special type of binding that
is checked with stricter rules.
@ivar used: pair of (L{Scope}, line-number) indicating the scope and
line number that this binding was last used
"""
def __init__(self, name, source):
self.name = name
self.source = source
self.used = False
def __str__(self):
return self.name
def __repr__(self):
return '<%s object %r from line %r at 0x%x>' % (
self.__class__.__name__,
self.name,
self.source.lineno,
id(self))
class UnBinding(Binding):
'''Created by the 'del' operator.'''
class Importation(Binding):
"""
A binding created by an import statement.
@ivar fullName: The complete name given to the import statement,
possibly including multiple dotted components.
@type fullName: C{str}
"""
def __init__(self, name, source):
self.fullName = name
name = name.split('.')[0]
super(Importation, self).__init__(name, source)
class Argument(Binding):
"""
Represents binding a name as an argument.
"""
class Assignment(Binding):
"""
Represents binding a name with an explicit assignment.
The checker will raise warnings for any Assignment that isn't used. Also,
the checker does not consider assignments in tuple/list unpacking to be
Assignments, rather it treats them as simple Bindings.
"""
class FunctionDefinition(Binding):
_property_decorator = False
class ExportBinding(Binding):
"""
A binding created by an C{__all__} assignment. If the names in the list
can be determined statically, they will be treated as names for export and
additional checking applied to them.
The only C{__all__} assignment that can be recognized is one which takes
the value of a literal list containing literal strings. For example::
__all__ = ["foo", "bar"]
Names which are imported and not otherwise used but appear in the value of
C{__all__} will not have an unused import warning reported for them.
"""
def names(self):
"""
Return a list of the names referenced by this binding.
"""
names = []
if isinstance(self.source, _ast.List):
for node in self.source.elts:
if isinstance(node, _ast.Str):
names.append(node.s)
return names
class Scope(dict):
importStarred = False # set to True when import * is found
def __repr__(self):
return '<%s at 0x%x %s>' % (self.__class__.__name__,
id(self),
dict.__repr__(self))
def __init__(self):
super(Scope, self).__init__()
class ClassScope(Scope):
pass
class FunctionScope(Scope):
"""
I represent a name scope for a function.
@ivar globals: Names declared 'global' in this function.
"""
def __init__(self):
super(FunctionScope, self).__init__()
self.globals = {}
class ModuleScope(Scope):
pass
# Globally defined names which are not attributes of the __builtin__ module.
_MAGIC_GLOBALS = ['__file__', '__builtins__']
class Checker(object):
"""
I check the cleanliness and sanity of Python code.
@ivar _deferredFunctions: Tracking list used by L{deferFunction}. Elements
of the list are two-tuples. The first element is the callable passed
to L{deferFunction}. The second element is a copy of the scope stack
at the time L{deferFunction} was called.
@ivar _deferredAssignments: Similar to C{_deferredFunctions}, but for
callables which are deferred assignment checks.
"""
nodeDepth = 0
traceTree = False
def __init__(self, tree, filename='(none)'):
self._deferredFunctions = []
self._deferredAssignments = []
self.dead_scopes = []
self.messages = []
self.filename = filename
self.scopeStack = [ModuleScope()]
self.futuresAllowed = True
self.handleChildren(tree)
self._runDeferred(self._deferredFunctions)
# Set _deferredFunctions to None so that deferFunction will fail
# noisily if called after we've run through the deferred functions.
self._deferredFunctions = None
self._runDeferred(self._deferredAssignments)
# Set _deferredAssignments to None so that deferAssignment will fail
# noisly if called after we've run through the deferred assignments.
self._deferredAssignments = None
del self.scopeStack[1:]
self.popScope()
self.check_dead_scopes()
def deferFunction(self, callable):
'''
Schedule a function handler to be called just before completion.
This is used for handling function bodies, which must be deferred
because code later in the file might modify the global scope. When
`callable` is called, the scope at the time this is called will be
restored, however it will contain any new bindings added to it.
'''
self._deferredFunctions.append((callable, self.scopeStack[:]))
def deferAssignment(self, callable):
"""
Schedule an assignment handler to be called just after deferred
function handlers.
"""
self._deferredAssignments.append((callable, self.scopeStack[:]))
def _runDeferred(self, deferred):
"""
Run the callables in C{deferred} using their associated scope stack.
"""
for handler, scope in deferred:
self.scopeStack = scope
handler()
def scope(self):
return self.scopeStack[-1]
scope = property(scope)
def popScope(self):
self.dead_scopes.append(self.scopeStack.pop())
def check_dead_scopes(self):
"""
Look at scopes which have been fully examined and report names in them
which were imported but unused.
"""
for scope in self.dead_scopes:
export = isinstance(scope.get('__all__'), ExportBinding)
if export:
all = scope['__all__'].names()
if os.path.split(self.filename)[1] != '__init__.py':
# Look for possible mistakes in the export list
undefined = set(all) - set(scope)
for name in undefined:
self.report(
messages.UndefinedExport,
scope['__all__'].source.lineno,
name)
else:
all = []
# Look for imported names that aren't used.
for importation in scope.values():
if isinstance(importation, Importation):
if not importation.used and importation.name not in all:
self.report(
messages.UnusedImport,
importation.source.lineno,
importation.name)
def pushFunctionScope(self):
self.scopeStack.append(FunctionScope())
def pushClassScope(self):
self.scopeStack.append(ClassScope())
def report(self, messageClass, *args, **kwargs):
self.messages.append(messageClass(self.filename, *args, **kwargs))
def handleChildren(self, tree):
for node in iter_child_nodes(tree):
self.handleNode(node, tree)
def isDocstring(self, node):
"""
Determine if the given node is a docstring, as long as it is at the
correct place in the node tree.
"""
return isinstance(node, _ast.Str) or \
(isinstance(node, _ast.Expr) and
isinstance(node.value, _ast.Str))
def handleNode(self, node, parent):
node.parent = parent
if self.traceTree:
print(' ' * self.nodeDepth + node.__class__.__name__)
self.nodeDepth += 1
if self.futuresAllowed and not \
(isinstance(node, _ast.ImportFrom) or self.isDocstring(node)):
self.futuresAllowed = False
nodeType = node.__class__.__name__.upper()
try:
handler = getattr(self, nodeType)
handler(node)
finally:
self.nodeDepth -= 1
if self.traceTree:
print(' ' * self.nodeDepth + 'end ' + node.__class__.__name__)
def ignore(self, node):
pass
# "stmt" type nodes
RETURN = DELETE = PRINT = WHILE = IF = WITH = RAISE = TRYEXCEPT = \
TRYFINALLY = ASSERT = EXEC = EXPR = handleChildren
CONTINUE = BREAK = PASS = ignore
# "expr" type nodes
BOOLOP = BINOP = UNARYOP = IFEXP = DICT = SET = YIELD = COMPARE = \
CALL = REPR = ATTRIBUTE = SUBSCRIPT = LIST = TUPLE = handleChildren
NUM = STR = ELLIPSIS = ignore
# "slice" type nodes
SLICE = EXTSLICE = INDEX = handleChildren
# expression contexts are node instances too, though being constants
LOAD = STORE = DEL = AUGLOAD = AUGSTORE = PARAM = ignore
# same for operators
AND = OR = ADD = SUB = MULT = DIV = MOD = POW = LSHIFT = RSHIFT = \
BITOR = BITXOR = BITAND = FLOORDIV = INVERT = NOT = UADD = USUB = \
EQ = NOTEQ = LT = LTE = GT = GTE = IS = ISNOT = IN = NOTIN = ignore
# additional node types
COMPREHENSION = KEYWORD = handleChildren
def EXCEPTHANDLER(self, node):
if node.name is not None:
if isinstance(node.name, str):
name = node.name
else:
name = node.name.id
self.addBinding(node.lineno, Assignment(name, node))
def runException():
for stmt in iter_child_nodes(node):
self.handleNode(stmt, node)
self.deferFunction(runException)
def addBinding(self, lineno, value, reportRedef=True):
'''Called when a binding is altered.
- `lineno` is the line of the statement responsible for the change
- `value` is the optional new value, a Binding instance, associated
with the binding; if None, the binding is deleted if it exists.
- if `reportRedef` is True (default), rebinding while unused will be
reported.
'''
if (isinstance(self.scope.get(value.name), FunctionDefinition)
and isinstance(value, FunctionDefinition)):
if not value._property_decorator:
self.report(messages.RedefinedFunction,
lineno, value.name, self.scope[value.name].source.lineno)
if not isinstance(self.scope, ClassScope):
for scope in self.scopeStack[::-1]:
existing = scope.get(value.name)
if (isinstance(existing, Importation)
and not existing.used
and (not isinstance(value, Importation)
or value.fullName == existing.fullName)
and reportRedef):
self.report(messages.RedefinedWhileUnused,
lineno, value.name,
scope[value.name].source.lineno)
if isinstance(value, UnBinding):
try:
del self.scope[value.name]
except KeyError:
self.report(messages.UndefinedName, lineno, value.name)
else:
self.scope[value.name] = value
def GLOBAL(self, node):
"""
Keep track of globals declarations.
"""
if isinstance(self.scope, FunctionScope):
self.scope.globals.update(dict.fromkeys(node.names))
def LISTCOMP(self, node):
# handle generators before element
for gen in node.generators:
self.handleNode(gen, node)
self.handleNode(node.elt, node)
GENERATOREXP = SETCOMP = LISTCOMP
# dictionary comprehensions; introduced in Python 2.7
def DICTCOMP(self, node):
for gen in node.generators:
self.handleNode(gen, node)
self.handleNode(node.key, node)
self.handleNode(node.value, node)
def FOR(self, node):
"""
Process bindings for loop variables.
"""
vars = []
def collectLoopVars(n):
if isinstance(n, _ast.Name):
vars.append(n.id)
elif isinstance(n, _ast.expr_context):
return
else:
for c in iter_child_nodes(n):
collectLoopVars(c)
collectLoopVars(node.target)
for varn in vars:
if (isinstance(self.scope.get(varn), Importation)
# unused ones will get an unused import warning
and self.scope[varn].used):
self.report(messages.ImportShadowedByLoopVar,
node.lineno, varn, self.scope[varn].source.lineno)
self.handleChildren(node)
def NAME(self, node):
"""
Handle occurrence of Name (which can be a load/store/delete access.)
"""
# Locate the name in locals / function / globals scopes.
if isinstance(node.ctx, (_ast.Load, _ast.AugLoad)):
# try local scope
importStarred = self.scope.importStarred
try:
self.scope[node.id].used = (self.scope, node.lineno)
except KeyError:
pass
else:
return
# try enclosing function scopes
for scope in self.scopeStack[-2:0:-1]:
importStarred = importStarred or scope.importStarred
if not isinstance(scope, FunctionScope):
continue
try:
scope[node.id].used = (self.scope, node.lineno)
except KeyError:
pass
else:
return
# try global scope
importStarred = importStarred or self.scopeStack[0].importStarred
try:
self.scopeStack[0][node.id].used = (self.scope, node.lineno)
except KeyError:
if ((not hasattr(__builtin__, node.id))
and node.id not in _MAGIC_GLOBALS
and not importStarred):
if (os.path.basename(self.filename) == '__init__.py' and
node.id == '__path__'):
# the special name __path__ is valid only in packages
pass
else:
self.report(messages.UndefinedName,
node.lineno,
node.id)
elif isinstance(node.ctx, (_ast.Store, _ast.AugStore)):
# if the name hasn't already been defined in the current scope
if isinstance(self.scope, FunctionScope) and \
node.id not in self.scope:
# for each function or module scope above us
for scope in self.scopeStack[:-1]:
if not isinstance(scope, (FunctionScope, ModuleScope)):
continue
# if the name was defined in that scope, and the name has
# been accessed already in the current scope, and hasn't
# been declared global
if (node.id in scope
and scope[node.id].used
and scope[node.id].used[0] is self.scope
and node.id not in self.scope.globals):
# then it's probably a mistake
self.report(messages.UndefinedLocal,
scope[node.id].used[1],
node.id,
scope[node.id].source.lineno)
break
if isinstance(node.parent,
(_ast.For,
_ast.comprehension,
_ast.Tuple,
_ast.List)):
binding = Binding(node.id, node)
elif (node.id == '__all__' and
isinstance(self.scope, ModuleScope)):
binding = ExportBinding(node.id, node.parent.value)
else:
binding = Assignment(node.id, node)
if node.id in self.scope:
binding.used = self.scope[node.id].used
self.addBinding(node.lineno, binding)
elif isinstance(node.ctx, _ast.Del):
if isinstance(self.scope, FunctionScope) and \
node.id in self.scope.globals:
del self.scope.globals[node.id]
else:
self.addBinding(node.lineno, UnBinding(node.id, node))
else:
# must be a Param context -- this only happens for names
# in function arguments, but these aren't dispatched through here
raise RuntimeError(
"Got impossible expression context: %r" % (node.ctx,))
def FUNCTIONDEF(self, node):
# the decorators attribute is called decorator_list as of Python 2.6
if hasattr(node, 'decorators'):
for deco in node.decorators:
self.handleNode(deco, node)
else:
for deco in node.decorator_list:
self.handleNode(deco, node)
# Check for property decorator
func_def = FunctionDefinition(node.name, node)
for decorator in node.decorator_list:
if getattr(decorator, 'attr', None) in ('setter', 'deleter'):
func_def._property_decorator = True
self.addBinding(node.lineno, func_def)
self.LAMBDA(node)
def LAMBDA(self, node):
for default in node.args.defaults:
self.handleNode(default, node)
def runFunction():
args = []
def addArgs(arglist):
for arg in arglist:
if isinstance(arg, _ast.Tuple):
addArgs(arg.elts)
else:
try:
id_ = arg.id
except AttributeError:
id_ = arg.arg
if id_ in args:
self.report(messages.DuplicateArgument,
node.lineno, id_)
args.append(id_)
self.pushFunctionScope()
addArgs(node.args.args)
# vararg/kwarg identifiers are not Name nodes
if node.args.vararg:
args.append(node.args.vararg)
if node.args.kwarg:
args.append(node.args.kwarg)
for name in args:
self.addBinding(node.lineno, Argument(name, node),
reportRedef=False)
if isinstance(node.body, list):
# case for FunctionDefs
for stmt in node.body:
self.handleNode(stmt, node)
else:
# case for Lambdas
self.handleNode(node.body, node)
def checkUnusedAssignments():
"""
Check to see if any assignments have not been used.
"""
for name, binding in self.scope.items():
if (not binding.used and not name in self.scope.globals
and isinstance(binding, Assignment)):
self.report(messages.UnusedVariable,
binding.source.lineno, name)
self.deferAssignment(checkUnusedAssignments)
self.popScope()
self.deferFunction(runFunction)
def CLASSDEF(self, node):
"""
Check names used in a class definition, including its decorators, base
classes, and the body of its definition. Additionally, add its name to
the current scope.
"""
# decorator_list is present as of Python 2.6
for deco in getattr(node, 'decorator_list', []):
self.handleNode(deco, node)
for baseNode in node.bases:
self.handleNode(baseNode, node)
self.pushClassScope()
for stmt in node.body:
self.handleNode(stmt, node)
self.popScope()
self.addBinding(node.lineno, Binding(node.name, node))
def ASSIGN(self, node):
self.handleNode(node.value, node)
for target in node.targets:
self.handleNode(target, node)
def AUGASSIGN(self, node):
# AugAssign is awkward: must set the context explicitly
# and visit twice, once with AugLoad context, once with
# AugStore context
node.target.ctx = _ast.AugLoad()
self.handleNode(node.target, node)
self.handleNode(node.value, node)
node.target.ctx = _ast.AugStore()
self.handleNode(node.target, node)
def IMPORT(self, node):
for alias in node.names:
name = alias.asname or alias.name
importation = Importation(name, node)
self.addBinding(node.lineno, importation)
def IMPORTFROM(self, node):
if node.module == '__future__':
if not self.futuresAllowed:
self.report(messages.LateFutureImport, node.lineno,
[n.name for n in node.names])
else:
self.futuresAllowed = False
for alias in node.names:
if alias.name == '*':
self.scope.importStarred = True
self.report(messages.ImportStarUsed, node.lineno, node.module)
continue
name = alias.asname or alias.name
importation = Importation(name, node)
if node.module == '__future__':
importation.used = (self.scope, node.lineno)
self.addBinding(node.lineno, importation)
def checkPath(filename):
"""
Check the given path, printing out any warnings detected.
@return: the number of warnings printed
"""
try:
return check(open(filename, 'U').read() + '\n', filename)
except IOError:
msg = sys.exc_info()[1]
sys.stderr.write("%s: %s\n" % (filename, msg.args[1]))
return 1
def check(codeString, filename='(code)'):
"""
Check the Python source given by C{codeString} for flakes.
@param codeString: The Python source to check.
@type codeString: C{str}
@param filename: The name of the file the source came from, used to report
errors.
@type filename: C{str}
@return: The number of warnings emitted.
@rtype: C{int}
"""
# First, compile into an AST and handle syntax errors.
try:
tree = compile(codeString, filename, "exec", _ast.PyCF_ONLY_AST)
except SyntaxError:
value = sys.exc_info()[1]
msg = value.args[0]
(lineno, offset, text) = value.lineno, value.offset, value.text
# If there's an encoding problem with the file, the text is None.
if text is None:
# Avoid using msg, since for the only known case, it contains a
# bogus message that claims the encoding the file declared was
# unknown.
sys.stderr.write("%s: problem decoding source\n" % (filename))
else:
line = text.splitlines()[-1]
if offset is not None:
offset = offset - (len(text) - len(line))
sys.stderr.write('%s:%d: %s\n' % (filename, lineno, msg))
sys.stderr.write(line + '\n')
if offset is not None:
sys.stderr.write(" " * offset + "^\n")
return 1
else:
# Okay, it's syntactically valid. Now check it.
w = Checker(tree, filename)
sorting = [(msg.lineno, msg) for msg in w.messages]
sorting.sort()
w.messages = [msg for index, msg in sorting]
valid_warnings = 0
for warning in w.messages:
if skip_warning(warning):
continue
print(warning)
valid_warnings += 1
return valid_warnings
|
fivestars/flake8
|
flake8/pyflakes.py
|
Python
|
mit
| 25,440
|
[
"VisIt"
] |
8516b5089e560cb3e8b79d628a87938db24cb2e115223db51c16c12f2d366838
|
__author__ = 'ddeconti'
import FileHandler
import numpy
import random
import sys
from bokeh.plotting import figure, output_file, show, VBox, HBox
from rdkit import DataStructs
from rdkit.Chem import AllChem, SDMolSupplier
from sklearn.decomposition.pca import PCA
from sklearn.cross_validation import train_test_split
def pca(target, control, title, name_one, name_two):
np_fps = []
for fp in target + control:
arr = numpy.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
np_fps.append(arr)
ys_fit = [1] * len(target) + [0] * len(control)
names = ["PAINS", "Control"]
pca = PCA(n_components=3)
pca.fit(np_fps)
np_fps_r = pca.transform(np_fps)
p1 = figure(x_axis_label="PC1",
y_axis_label="PC2",
title=title)
p1.scatter(np_fps_r[:len(target), 0], np_fps_r[:len(target), 1],
color="blue", legend=name_one)
p1.scatter(np_fps_r[len(target):, 0], np_fps_r[len(target):, 1],
color="red", legend=name_two)
p2 = figure(x_axis_label="PC2",
y_axis_label="PC3",
title=title)
p2.scatter(np_fps_r[:len(target), 1], np_fps_r[:len(target), 2],
color="blue", legend=name_one)
p2.scatter(np_fps_r[len(target):, 1], np_fps_r[len(target):, 2],
color="red", legend=name_two)
return HBox(p1, p2)
def pca_no_labels(target, title="PCA clustering of PAINS", color="blue"):
np_fps = []
for fp in target:
arr = numpy.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
np_fps.append(arr)
pca = PCA(n_components=3)
pca.fit(np_fps)
np_fps_r = pca.transform(np_fps)
p3 = figure(x_axis_label="PC1",
y_axis_label="PC2",
title=title)
p3.scatter(np_fps_r[:, 0], np_fps_r[:, 1], color=color)
p4 = figure(x_axis_label="PC2",
y_axis_label="PC3",
title=title)
p4.scatter(np_fps_r[:, 1], np_fps_r[:, 2], color=color)
return HBox(p3, p4)
def randomly_pick_from_sdf(sdf_filename, max_N):
sdf_struct = SDMolSupplier(sdf_filename)
print len(sdf_struct)
sdf_struct = random.sample(sdf_struct, max_N)
try:
mol_list = [m for m in sdf_struct]
except:
sys.stderr.write("Error parsing SDMolSupplier object\n" +
"Error in randomly_pick_from_sdf()\n")
sys.exit()
fp_list = []
for m in mol_list:
try:
fp_list.append(AllChem.GetMorganFingerprintAsBitVect(m, 2))
except:
continue
return filter(lambda x: x != None, fp_list)
def main(sa):
sln_filename = sa[0]
sdf_filename = sa[1]
setsix_filename = sa[2]
smiles_filename = sa[3]
sln_fp = FileHandler.SlnFile(sln_filename).get_fingerprint_list()
sdf_fp = randomly_pick_from_sdf(sdf_filename, 400)
setsix_fp = FileHandler.SdfFile(setsix_filename).get_fingerprint_list()
smile_fp = FileHandler.SmilesFile(smiles_filename).get_fingerprint_list()
print "PCA for PAINS vs. Chembl"
pvc = pca(sln_fp, sdf_fp, "PAINS vs. ChEMBL",
"PAINS", "ChEMBL")
print "PCA for PAINS vs. Set six"
pvb = pca(sln_fp, setsix_fp, "PAINS vs. ChEMBL set 5",
"PAINS", "ChEMBL.5")
print "PCA for PAINS vs. STitch"
pva = pca(sln_fp, smile_fp, "PAINS vs. Stitch",
"PAINS", "Stitch")
print "PCA within PAINS"
pvp = pca_no_labels(sln_fp)
bvb = pca_no_labels(setsix_fp, title="PCA clustering of ChEMBL set 5",
color="red")
output_file("pca_plots.html")
p = VBox(pvc, pvb, pva, pvp, bvb)
show(p)
if __name__ == "__main__":
main(sys.argv[1:])
|
dkdeconti/PAINS-train
|
training_methods/clustering/pca_plots_on_fp.py
|
Python
|
mit
| 3,717
|
[
"RDKit"
] |
96cd1e86e67e10a1950d60800b2da661ed379cd66bf82cc557b9424132296779
|
# ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
'''
Created on Sep 25, 2009
@author: Erik De Rijcke
'''
import datetime
from elixir.entity import Entity, EntityMeta
from sqlalchemy.types import Date, Unicode
from sqlalchemy.sql import and_
from elixir.fields import Field
from elixir.options import using_options
from elixir.relationships import ManyToOne, OneToMany
from camelot.model.authentication import end_of_times
from camelot.admin.entity_admin import EntityAdmin
from camelot.types import Code, Enumeration
#
# Global dict keeping track of which status class is used for which class
#
__status_classes__ = {}
def get_status_class(cls_name):
"""
:param cls_name: an Entity class name
:return: the status class used for this entity
"""
return __status_classes__[cls_name]
def create_type_3_status_mixin(status_attribute):
"""Create a class that can be subclassed to provide a class that
has a type 3 status with methods to manipulate and review its status
:param status_attribute: the name of the type 3 status attribute
"""
class Type3StatusMixin(object):
@property
def current_status(self):
classified_by = None
today = datetime.date.today()
for status_history in self.status:
if status_history.status_from_date<=today and status_history.status_thru_date>=today:
classified_by = status_history.classified_by
return classified_by
def change_status(self, new_status, status_from_date=None, status_thru_date=end_of_times()):
from sqlalchemy import orm
if not status_from_date:
status_from_date = datetime.date.today()
mapper = orm.class_mapper(self.__class__)
status_property = mapper.get_property('status')
status_type = status_property._get_target().class_
old_status = status_type.query.filter( and_( status_type.status_for == self,
status_type.status_from_date <= status_from_date,
status_type.status_thru_date >= status_from_date ) ).first()
if old_status != None:
old_status.thru_date = datetime.date.today() - datetime.timedelta( days = 1 )
old_status.status_thru_date = status_from_date - datetime.timedelta( days = 1 )
new_status = status_type( status_for = self,
classified_by = new_status,
status_from_date = status_from_date,
status_thru_date = status_thru_date,
from_date = datetime.date.today(),
thru_date = end_of_times() )
if old_status:
self.query.session.flush( [old_status] )
self.query.session.flush( [new_status] )
return Type3StatusMixin
def type_3_status( statusable_entity, metadata, collection, verbose_entity_name = None, enumeration=None ):
'''
Creates a new type 3 status related to the given entity
:statusable_entity: A string referring to an entity.
:enumeration: if this parameter is used, no status type Entity is created, but the status type is
described by the enumeration.
'''
t3_status_name = statusable_entity + '_status'
t3_status_type_name = statusable_entity + '_status_type'
if not enumeration:
class Type3StatusTypeMeta( EntityMeta ):
def __new__( cls, classname, bases, dictionary ):
return EntityMeta.__new__( cls, t3_status_type_name,
bases, dictionary )
def __init__( self, classname, bases, dictionary ):
EntityMeta.__init__( self, t3_status_type_name,
bases, dictionary )
class Type3StatusType( Entity, ):
using_options( tablename = t3_status_type_name.lower(), metadata=metadata, collection=collection )
__metaclass__ = Type3StatusTypeMeta
code = Field( Code( parts = ['>AAAA'] ), index = True,
required = True, unique = True )
description = Field( Unicode( 40 ), index = True )
def __unicode__( self ):
return 'Status type: %s : %s' % ( '.'.join( self.code ), self.description )
class Admin( EntityAdmin ):
list_display = ['code', 'description']
verbose_name = statusable_entity + ' Status Type'
if verbose_entity_name is not None:
verbose_name = verbose_entity_name + ' Status Type'
class Type3StatusMeta( EntityMeta ):
def __new__( cls, classname, bases, dictionary ):
return EntityMeta.__new__( cls, t3_status_name,
bases, dictionary )
def __init__( self, classname, bases, dictionary ):
EntityMeta.__init__( self, t3_status_name,
bases, dictionary )
class Type3Status( Entity, ):
"""
Status Pattern
.. attribute:: status_datetime For statuses that occur at a specific point in time
.. attribute:: status_from_date For statuses that require a date range
.. attribute:: from_date When a status was enacted or set
"""
using_options( tablename = t3_status_name.lower(), metadata=metadata, collection=collection )
__metaclass__ = Type3StatusMeta
status_datetime = Field( Date, required = False )
status_from_date = Field( Date, required = False )
status_thru_date = Field( Date, required = False )
from_date = Field( Date, required = True, default = datetime.date.today )
thru_date = Field( Date, required = True, default = end_of_times )
status_for = ManyToOne( statusable_entity, #required = True,
ondelete = 'cascade', onupdate = 'cascade' )
if not enumeration:
classified_by = ManyToOne( t3_status_type_name, required = True,
ondelete = 'cascade', onupdate = 'cascade' )
else:
classified_by = Field(Enumeration(enumeration), required=True, index=True)
class Admin( EntityAdmin ):
verbose_name = statusable_entity + ' Status'
verbose_name_plural = statusable_entity + ' Statuses'
list_display = ['status_from_date', 'status_thru_date', 'classified_by']
verbose_name = statusable_entity + ' Status'
if verbose_entity_name is not None:
verbose_name = verbose_entity_name + ' Status'
def __unicode__( self ):
return u'Status'
__status_classes__[statusable_entity] = Type3Status
return t3_status_name
def entity_type( typable_entity, metadata, collection, verbose_entity_name = None ):
'''
Creates a new type related to the given entity.
.. typeable_entity:: A string referring to an entity.
'''
type_name = typable_entity + '_type'
class TypeMeta( EntityMeta ):
def __new__( cls, classname, bases, dictionary ):
return EntityMeta.__new__( cls, type_name,
bases, dictionary )
def __init__( self, classname, bases, dictionary ):
EntityMeta.__init__( self, type_name,
bases, dictionary )
class Type( Entity ):
using_options( tablename = type_name.lower(), metadata=metadata, collection=collection )
__metaclass__ = TypeMeta
type_description_for = OneToMany( typable_entity )
description = Field( Unicode( 48 ), required = True )
class Admin( EntityAdmin ):
verbose_name = typable_entity + ' Type'
list_display = ['description', ]
verbose_name = typable_entity + ' Type'
if verbose_entity_name is not None:
verbose_name = verbose_entity_name + ' Type'
def __unicode__( self ):
return u'Type: %s' % ( self.description )
return type_name
|
kurtraschke/camelot
|
camelot/model/type_and_status.py
|
Python
|
gpl-2.0
| 9,374
|
[
"VisIt"
] |
4de075c8c445791589cb19725e84c1109e69a8e185fa3fc92b3602436d0c287d
|
#!/usr/bin/env python
import glob
import os
try:
from setuptools import setup
have_setuptools = True
except ImportError:
from distutils.core import setup
have_setuptools = False
kwargs = {'name': 'openmc',
'version': '0.7.1',
'packages': ['openmc', 'openmc.mgxs'],
'scripts': glob.glob('scripts/openmc-*'),
# Metadata
'author': 'Will Boyd',
'author_email': 'wbinventor@gmail.com',
'description': 'OpenMC Python API',
'url': 'https://github.com/mit-crpg/openmc',
'classifiers': [
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: Scientific/Engineering'
]}
if have_setuptools:
kwargs.update({
# Required dependencies
'install_requires': ['numpy', 'h5py', 'matplotlib'],
# Optional dependencies
'extras_require': {
'pandas': ['pandas'],
'vtk': ['vtk', 'silomesh'],
'validate': ['lxml']
}})
setup(**kwargs)
|
mjlong/openmc
|
setup.py
|
Python
|
mit
| 1,289
|
[
"VTK"
] |
0ffe8900cb9724bd740b4c6e428f11dae24e9cecfb8180e55c93df3f271dfbc8
|
import lb_loader
import numpy as np
import pandas as pd
import simtk.openmm as mm
from simtk import unit as u
from openmmtools import hmc_integrators, testsystems
pd.set_option('display.width', 1000)
n_steps = 3000
temperature = 300. * u.kelvin
collision_rate = 1.0 / u.picoseconds
timestep = 1.0 * u.femtoseconds
steps_per_hmc = 12
system, positions = lb_loader.load_lb()
positions = lb_loader.pre_equil(system, positions, temperature)
integrator = mm.VerletIntegrator(timestep)
context = mm.Context(system, integrator)
context.setPositions(positions)
context.setVelocitiesToTemperature(temperature)
integrator.step(2)
%timeit integrator.step(200)
integrator = mm.LangevinIntegrator(temperature, 1.0/u.picoseconds, timestep)
context = mm.Context(system, integrator)
context.setPositions(positions)
context.setVelocitiesToTemperature(temperature)
integrator.step(2)
%timeit integrator.step(200)
integrator = hmc_integrators.VelocityVerletIntegrator(timestep)
context = mm.Context(system, integrator)
context.setPositions(positions)
context.setVelocitiesToTemperature(temperature)
integrator.step(2)
%timeit integrator.step(200)
integrator = hmc_integrators.GHMC2(temperature, 100, timestep)
context = mm.Context(system, integrator)
context.setPositions(positions)
context.setVelocitiesToTemperature(temperature)
integrator.step(2)
%timeit integrator.step(2)
integrator = hmc_integrators.GHMC2(temperature, 50, timestep)
integrator.getNumComputations()
context = mm.Context(system, integrator)
context.setPositions(positions)
context.setVelocitiesToTemperature(temperature)
integrator.step(1)
%timeit integrator.step(4)
for k in range(integrator.getNumComputations()):
print(integrator.getComputationStep(k))
groups = [(0, 1)]
integrator = hmc_integrators.GHMCRESPA(temperature, 50, timestep, collision_rate, groups)
integrator.getNumComputations()
context = mm.Context(system, integrator)
context.setPositions(positions)
context.setVelocitiesToTemperature(temperature)
integrator.step(1)
%timeit integrator.step(4)
for k in range(integrator.getNumComputations()):
print(integrator.getComputationStep(k))
[1, 'x1', 'x']
[3, '', '']
[1, 'v', 'v+0.5*dt*f/m+(x-x1)/dt']
[4, '', '']
[1, 'v', 'v+0.5*dt*f/m']
[1, 'x', 'x+dt*v']
[1, 'x1', 'x']
[1, 'x', 'x+(dt/1)*v']
[3, '', '']
[1, 'v', '(x-x1)/(dt/1)']
[1, 'v', 'v+0.5*(dt/1)*f0/m']
[4, '', '']
[1, 'v', 'v+0.5*(dt/1)*f0/m']
[1, 'x1', 'x']
for step in range(self.steps_per_hmc):
self.addComputePerDof("v", "v+0.5*dt*f/m")
self.addComputePerDof("x", "x+dt*v")
self.addComputePerDof("x1", "x")
self.addConstrainPositions()
self.addComputePerDof("v", "v+0.5*dt*f/m+(x-x1)/dt")
self.addConstrainVelocities()
for i in range(stepsPerParentStep):
self.addComputePerDof("v", "v+0.5*(dt/%s)*f%s/m" % (str_sub, str_group))
if len(groups) == 1:
self.addComputePerDof("x1", "x")
self.addComputePerDof("x", "x+(dt/%s)*v" % (str_sub))
self.addConstrainPositions()
self.addComputePerDof("v", "(x-x1)/(dt/%s)" % (str_sub))
else:
self._create_substeps(substeps, groups[1:])
self.addComputePerDof("v", "v+0.5*(dt/%s)*f%s/m" % (str_sub, str_group))
|
kyleabeauchamp/HMCNotes
|
code/old/test_vv.py
|
Python
|
gpl-2.0
| 3,374
|
[
"OpenMM"
] |
f5006055283f8f31816c86a9934cce9f4d8861edad4368dc7637f5e4813c7023
|
import numpy as np
from ..filters import gaussian
def binary_blobs(length=512, blob_size_fraction=0.1, n_dim=2,
volume_fraction=0.5, seed=None):
"""
Generate synthetic binary image with several rounded blob-like objects.
Parameters
----------
length : int, optional
Linear size of output image.
blob_size_fraction : float, optional
Typical linear size of blob, as a fraction of ``length``, should be
smaller than 1.
n_dim : int, optional
Number of dimensions of output image.
volume_fraction : float, default 0.5
Fraction of image pixels covered by the blobs (where the output is 1).
Should be in [0, 1].
seed : int, optional
Seed to initialize the random number generator.
If `None`, a random seed from the operating system is used.
Returns
-------
blobs : ndarray of bools
Output binary image
Examples
--------
>>> from skimage import data
>>> data.binary_blobs(length=5, blob_size_fraction=0.2, seed=1)
array([[ True, False, True, True, True],
[ True, True, True, False, True],
[False, True, False, True, True],
[ True, False, False, True, True],
[ True, False, False, False, True]], dtype=bool)
>>> blobs = data.binary_blobs(length=256, blob_size_fraction=0.1)
>>> # Finer structures
>>> blobs = data.binary_blobs(length=256, blob_size_fraction=0.05)
>>> # Blobs cover a smaller volume fraction of the image
>>> blobs = data.binary_blobs(length=256, volume_fraction=0.3)
"""
rs = np.random.RandomState(seed)
shape = tuple([length] * n_dim)
mask = np.zeros(shape)
n_pts = max(int(1. / blob_size_fraction) ** n_dim, 1)
points = (length * rs.rand(n_dim, n_pts)).astype(np.int)
mask[tuple(indices for indices in points)] = 1
mask = gaussian(mask, sigma=0.25 * length * blob_size_fraction)
threshold = np.percentile(mask, 100 * (1 - volume_fraction))
return np.logical_not(mask < threshold)
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/skimage/data/_binary_blobs.py
|
Python
|
gpl-3.0
| 2,068
|
[
"Gaussian"
] |
56c3d95856ff5609044e6bcfeb9fcdd09632d08e0ac81cd0875360c66e37562f
|
# Licensed under an MIT open source license - see LICENSE
from __future__ import print_function, absolute_import, division
import pytest
import numpy as np
import numpy.testing as npt
import astropy.units as u
import astropy.constants as const
import os
try:
import emcee
EMCEE_INSTALLED = True
except ImportError:
EMCEE_INSTALLED = False
from ..statistics import PCA, PCA_Distance
from ..statistics.pca.width_estimate import WidthEstimate1D, WidthEstimate2D
from ._testing_data import (dataset1, dataset2, computed_data,
computed_distances)
from .generate_test_images import generate_2D_array, generate_1D_array
from .testing_utilities import assert_between
def test_PCA_method():
tester = PCA(dataset1["cube"], distance=250 * u.pc)
tester.run(mean_sub=True, eigen_cut_method='proportion',
min_eigval=0.75,
spatial_method='contour',
spectral_method='walk-down',
fit_method='odr', brunt_beamcorrect=False,
verbose=True, save_name='test.png')
os.system("rm test.png")
slice_used = slice(0, tester.n_eigs)
npt.assert_allclose(tester.eigvals[slice_used],
computed_data['pca_val'][slice_used])
npt.assert_allclose(tester.spatial_width().value,
computed_data['pca_spatial_widths'])
npt.assert_allclose(tester.spectral_width(unit=u.pix).value,
computed_data['pca_spectral_widths'])
fit_values = computed_data["pca_fit_vals"].reshape(-1)[0]
assert_between(fit_values["index"], tester.index_error_range[0],
tester.index_error_range[1])
assert_between(fit_values["gamma"], tester.gamma_error_range[0],
tester.gamma_error_range[1])
assert_between(fit_values["intercept"],
tester.intercept_error_range(unit=u.pix)[0].value,
tester.intercept_error_range(unit=u.pix)[1].value)
assert_between(fit_values["sonic_length"],
tester.sonic_length(use_gamma=True)[1][0].value,
tester.sonic_length(use_gamma=True)[1][1].value)
# Try setting fit limits to the max and min and ensure we get the same
# values out
tester.fit_plaw(xlow=np.nanmin(tester.spatial_width()),
xhigh=np.nanmax(tester.spatial_width()))
assert_between(fit_values["index"], tester.index_error_range[0],
tester.index_error_range[1])
assert_between(fit_values["gamma"], tester.gamma_error_range[0],
tester.gamma_error_range[1])
assert_between(fit_values["intercept"],
tester.intercept_error_range(unit=u.pix)[0].value,
tester.intercept_error_range(unit=u.pix)[1].value)
assert_between(fit_values["sonic_length"],
tester.sonic_length(use_gamma=True)[1][0].value,
tester.sonic_length(use_gamma=True)[1][1].value)
# Test loading and saving
tester.save_results("pca_output.pkl", keep_data=False)
saved_tester = PCA.load_results("pca_output.pkl")
# Remove the file
os.remove("pca_output.pkl")
npt.assert_allclose(saved_tester.eigvals[slice_used],
computed_data['pca_val'][slice_used])
npt.assert_allclose(saved_tester.spatial_width().value,
computed_data['pca_spatial_widths'])
npt.assert_allclose(saved_tester.spectral_width(unit=u.pix).value,
computed_data['pca_spectral_widths'])
fit_values = computed_data["pca_fit_vals"].reshape(-1)[0]
assert_between(fit_values["index"], saved_tester.index_error_range[0],
saved_tester.index_error_range[1])
assert_between(fit_values["gamma"], saved_tester.gamma_error_range[0],
saved_tester.gamma_error_range[1])
assert_between(fit_values["intercept"],
saved_tester.intercept_error_range(unit=u.pix)[0].value,
saved_tester.intercept_error_range(unit=u.pix)[1].value)
assert_between(fit_values["sonic_length"],
saved_tester.sonic_length(use_gamma=True)[1][0].value,
saved_tester.sonic_length(use_gamma=True)[1][1].value)
@pytest.mark.skipif("not EMCEE_INSTALLED")
def test_PCA_method_w_bayes():
tester = PCA(dataset1["cube"])
tester.run(mean_sub=True, eigen_cut_method='proportion',
min_eigval=0.75,
spatial_method='contour',
spectral_method='walk-down',
fit_method='bayes', brunt_beamcorrect=False,
spectral_output_unit=u.m / u.s)
slice_used = slice(0, tester.n_eigs)
npt.assert_allclose(tester.eigvals[slice_used],
computed_data['pca_val'][slice_used])
npt.assert_allclose(tester.spatial_width().value,
computed_data['pca_spatial_widths'])
npt.assert_allclose(tester.spectral_width(unit=u.pix).value,
computed_data['pca_spectral_widths'])
fit_values = computed_data["pca_fit_vals"].reshape(-1)[0]
assert_between(fit_values["index_bayes"], tester.index_error_range[0],
tester.index_error_range[1])
assert_between(fit_values["gamma_bayes"], tester.gamma_error_range[0],
tester.gamma_error_range[1])
assert_between(fit_values["intercept_bayes"],
tester.intercept_error_range(unit=u.pix)[0].value,
tester.intercept_error_range(unit=u.pix)[1].value)
assert_between(fit_values["sonic_length_bayes"],
tester.sonic_length(use_gamma=True)[1][0].value,
tester.sonic_length(use_gamma=True)[1][1].value)
@pytest.mark.parametrize("method", ['odr', 'bayes'])
@pytest.mark.skipif("not EMCEE_INSTALLED")
def test_PCA_fitting(method):
tester = PCA(dataset1["cube"])
index = 2.
intercept = 1.
err = 0.02
tester._spectral_width = (intercept * np.arange(10)**index +
err * np.random.random(10)) * u.pix
tester._spectral_width_error = np.array([err] * 10) * u.pix
tester._spatial_width = (np.arange(10) + err * np.random.random(10)) * \
u.pix
tester._spatial_width_error = np.array([err] * 10) * u.pix
tester.fit_plaw(fit_method=method)
npt.assert_allclose(tester.index, index, atol=0.05)
npt.assert_allclose(tester.intercept(unit=u.pix).value, 1, atol=0.05)
npt.assert_allclose(tester.gamma, 1.52 * index - 0.19, atol=0.05)
# Check the sonic length
T_k = 10 * u.K
mu = 1.36
c_s = np.sqrt(const.k_B.decompose() * T_k / (mu * const.m_p))
# Convert into number of spectral channel widths
c_s = c_s.to(u.m / u.s).value / np.abs(tester.header['CDELT3'])
l_s = np.power(c_s / 1., 1. / index)
npt.assert_allclose(l_s,
tester.sonic_length(use_gamma=False)[0].value,
atol=0.05)
@pytest.mark.parametrize(("method", "min_eigval"),
[("proportion", 0.99), ("value", 0.001)])
def test_PCA_auto_n_eigs(method, min_eigval):
tester = PCA(dataset1["cube"])
tester.run(mean_sub=True, n_eigs='auto', min_eigval=min_eigval,
eigen_cut_method=method, decomp_only=True)
fit_values = computed_data["pca_fit_vals"].reshape(-1)[0]
assert tester.n_eigs == fit_values["n_eigs_" + method]
def test_PCA_distance():
tester_dist = \
PCA_Distance(dataset1["cube"],
dataset2["cube"])
tester_dist.distance_metric(verbose=True, save_name='test.png')
os.system("rm test.png")
npt.assert_almost_equal(tester_dist.distance,
computed_distances['pca_distance'])
# With PCA classes as inputs
tester_dist2 = \
PCA_Distance(tester_dist.pca1,
tester_dist.pca2)
tester_dist2.distance_metric(verbose=False)
npt.assert_almost_equal(tester_dist2.distance,
computed_distances['pca_distance'])
# With fresh PCA classes
tester = PCA(dataset1["cube"])
tester2 = PCA(dataset2["cube"])
tester_dist3 = \
PCA_Distance(tester,
tester2)
tester_dist3.distance_metric(verbose=False)
npt.assert_almost_equal(tester_dist3.distance,
computed_distances['pca_distance'])
@pytest.mark.parametrize(('method'), ('fit', 'contour', 'interpolate',
'xinterpolate'))
def test_spatial_width_methods(method):
'''
Generate a 2D gaussian and test whether each method returns the expected
size.
Note that, as defined by Heyer & Brunt, the shape will be sigma / sqrt(2),
NOT just the Gaussian width equivalent!
'''
model_gauss = generate_2D_array(x_std=10, y_std=10)
model_gauss += np.random.normal(loc=0.0, scale=0.001,
size=model_gauss.shape)
model_gauss = model_gauss[np.newaxis, :]
widths, errors = WidthEstimate2D(model_gauss, method=method,
brunt_beamcorrect=False)
# NOTE: previous versions were testing against 10 / 2**0.5
# THIS WAS WRONG!
npt.assert_allclose(widths[0], 10.0 * np.sqrt(2), rtol=0.02)
# npt.assert_approx_equal(widths[0], 10.0 / np.sqrt(2), significant=3)
# I get 0.000449 for the error, but we're in a noiseless case so just
# ensure that is very small.
# assert errors[0] < 0.2
def test_spatial_with_beam():
'''
Test running the spatial width find with beam corrections enabled.
'''
conv_scale = np.sqrt(10**2 + (4 / 2.35)**2)
model_gauss = generate_2D_array(x_std=conv_scale, y_std=conv_scale)
model_gauss = model_gauss[np.newaxis, :]
widths, errors = WidthEstimate2D(model_gauss, method='contour',
brunt_beamcorrect=True,
beam_fwhm=2.0 * u.deg,
spatial_cdelt=0.5 * u.deg)
# Using value based on run with given settings.
npt.assert_approx_equal(widths[0], 13.9229, significant=4)
@pytest.mark.parametrize(('method'), ('fit', 'interpolate', 'walk-down'))
def test_spectral_width_methods(method):
'''
Generate a 1D gaussian and test whether each method returns the expected
size.
'''
model_gauss = generate_1D_array(std=10, mean=100.)
# Apply same shift as if taking FFT
shiftx = np.fft.fftshift(model_gauss)[:, np.newaxis]
widths, errors = WidthEstimate1D(shiftx, method=method)
# Expected 1/e width is sqrt(2) * sigma
# Error is at most 1/2 a spectral channel, or just 0.5 in this case
npt.assert_allclose(widths[0], 10.0 * np.sqrt(2), rtol=0.01)
@pytest.mark.xfail(raises=Warning)
def test_PCA_velocity_axis():
'''
PCA requires a velocity spectral axis.
'''
new_hdr = dataset1["cube"][1].copy()
new_hdr["CTYPE3"] = "FREQ "
new_hdr["CUNIT3"] = "Hz "
PCA([dataset1["cube"][0], new_hdr])
|
e-koch/TurbuStat
|
turbustat/tests/test_pca.py
|
Python
|
mit
| 11,070
|
[
"Gaussian"
] |
e3f2d941b914ed3f605b6940971484def1d2b975c29a2a93e6ff220969033ed7
|
# Copyright 2000-2002 by Andrew Dalke.
# Revisions copyright 2007-2008 by Peter Cock.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Alphabets used in Seq objects etc to declare sequence type and letters.
This is used by sequences which contain a finite number of similar words.
"""
class Alphabet:
size = None # no fixed size for words
letters = None # no fixed alphabet; implement as a list-like
# interface,
def __repr__(self):
return self.__class__.__name__ + "()"
def contains(self, other):
"""Does this alphabet 'contain' the other (OBSOLETE?).
Returns a boolean. This relies on the Alphabet subclassing
hierarchy only, and does not check the letters property.
This isn't ideal, and doesn't seem to work as intended
with the AlphabetEncoder classes."""
return isinstance(other, self.__class__)
def _case_less(self):
"""Return an case-less variant of the current alphabet (PRIVATE)."""
#TODO - remove this method by dealing with things in subclasses?
if isinstance(self, ProteinAlphabet):
return generic_protein
elif isinstance(self, DNAAlphabet):
return generic_dna
elif isinstance(self, NucleotideAlphabet):
return generic_rna
elif isinstance(self, NucleotideAlphabet):
return generic_nucleotide
elif isinstance(self, SingleLetterAlphabet):
return single_letter_alphabet
else:
return generic_alphabet
def _upper(self):
"""Return an upper case variant of the current alphabet (PRIVATE)."""
if not self.letters or self.letters==self.letters.upper():
#Easy case, no letters or already upper case!
return self
else:
#TODO - Raise NotImplementedError and handle via subclass?
return self._case_less()
def _lower(self):
"""Return a lower case variant of the current alphabet (PRIVATE)."""
if not self.letters or self.letters==self.letters.lower():
#Easy case, no letters or already lower case!
return self
else:
#TODO - Raise NotImplementedError and handle via subclass?
return self._case_less()
generic_alphabet = Alphabet()
class SingleLetterAlphabet(Alphabet):
size = 1
letters = None # string of all letters in the alphabet
single_letter_alphabet = SingleLetterAlphabet()
########### Protein
class ProteinAlphabet(SingleLetterAlphabet):
pass
generic_protein = ProteinAlphabet()
########### DNA
class NucleotideAlphabet(SingleLetterAlphabet):
pass
generic_nucleotide = NucleotideAlphabet()
class DNAAlphabet(NucleotideAlphabet):
pass
generic_dna = DNAAlphabet()
########### RNA
class RNAAlphabet(NucleotideAlphabet):
pass
generic_rna = RNAAlphabet()
########### Other per-sequence encodings
class SecondaryStructure(SingleLetterAlphabet):
letters = "HSTC"
class ThreeLetterProtein(Alphabet):
size = 3
letters = [
"Ala", "Asx", "Cys", "Asp", "Glu", "Phe", "Gly", "His", "Ile",
"Lys", "Leu", "Met", "Asn", "Pro", "Gln", "Arg", "Ser", "Thr",
"Sec", "Val", "Trp", "Xaa", "Tyr", "Glx",
]
###### Non per-sequence modifications
# (These are Decorator classes)
class AlphabetEncoder:
def __init__(self, alphabet, new_letters):
self.alphabet = alphabet
self.new_letters = new_letters
if alphabet.letters is not None:
self.letters = alphabet.letters + new_letters
else:
self.letters = None
def __getattr__(self, key):
if key[:2] == "__" and key[-2:] == "__":
raise AttributeError(key)
return getattr(self.alphabet, key)
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__, self.alphabet,
self.new_letters)
def contains(self, other):
"""Does this alphabet 'contain' the other (OBSOLETE?).
This is isn't implemented for the base AlphabetEncoder,
which will always return 0 (False)."""
return 0
def _upper(self):
"""Return an upper case variant of the current alphabet (PRIVATE)."""
return AlphabetEncoder(self.alphabet._upper(), self.new_letters.upper())
def _lower(self):
"""Return a lower case variant of the current alphabet (PRIVATE)."""
return AlphabetEncoder(self.alphabet._lower(), self.new_letters.lower())
class Gapped(AlphabetEncoder):
def __init__(self, alphabet, gap_char = "-"):
AlphabetEncoder.__init__(self, alphabet, gap_char)
self.gap_char = gap_char
def contains(self, other):
"""Does this alphabet 'contain' the other (OBSOLETE?).
Returns a boolean. This relies on the Alphabet subclassing
hierarchy, and attempts to check the gap character. This fails
if the other alphabet does not have a gap character!
"""
return other.gap_char == self.gap_char and \
self.alphabet.contains(other.alphabet)
def _upper(self):
"""Return an upper case variant of the current alphabet (PRIVATE)."""
return Gapped(self.alphabet._upper(), self.gap_char.upper())
def _lower(self):
"""Return a lower case variant of the current alphabet (PRIVATE)."""
return Gapped(self.alphabet._lower(), self.gap_char.lower())
class HasStopCodon(AlphabetEncoder):
def __init__(self, alphabet, stop_symbol = "*"):
AlphabetEncoder.__init__(self, alphabet, stop_symbol)
self.stop_symbol = stop_symbol
def __cmp__(self, other):
x = cmp(self.alphabet, other.alphabet)
if x == 0:
return cmp(self.stop_symbol, other.stop_symbol)
return x
def contains(self, other):
"""Does this alphabet 'contain' the other (OBSOLETE?).
Returns a boolean. This relies on the Alphabet subclassing
hierarchy, and attempts to check the stop symbol. This fails
if the other alphabet does not have a stop symbol!
"""
return other.stop_symbol == self.stop_symbol and \
self.alphabet.contains(other.alphabet)
def _upper(self):
"""Return an upper case variant of the current alphabet (PRIVATE)."""
return HasStopCodon(self.alphabet._upper(), self.stop_symbol.upper())
def _lower(self):
"""Return a lower case variant of the current alphabet (PRIVATE)."""
return HasStopCodon(self.alphabet._lower(), self.stop_symbol.lower())
def _get_base_alphabet(alphabet):
"""Returns the non-gapped non-stop-codon Alphabet object (PRIVATE)."""
a = alphabet
while isinstance(a, AlphabetEncoder):
a = a.alphabet
assert isinstance(a, Alphabet), \
"Invalid alphabet found, %s" % repr(a)
return a
def _ungap(alphabet):
"""Returns the alphabet without any gap encoder (PRIVATE)."""
#TODO - Handle via method of the objects?
if not hasattr(alphabet, "gap_char"):
return alphabet
elif isinstance(alphabet, Gapped):
return alphabet.alphabet
elif isinstance(alphabet, HasStopCodon):
return HasStopCodon(_ungap(alphabet.alphabet), stop_symbol=alphabet.stop_symbol)
elif isinstance(alphabet, AlphabetEncoder):
return AlphabetEncoder(_ungap(alphabet.alphabet), letters=alphabet.letters)
else:
raise NotImplementedError
def _consensus_base_alphabet(alphabets):
"""Returns a common but often generic base alphabet object (PRIVATE).
This throws away any AlphabetEncoder information, e.g. Gapped alphabets.
Note that DNA+RNA -> Nucleotide, and Nucleotide+Protein-> generic single
letter. These DO NOT raise an exception!"""
common = None
for alpha in alphabets:
a = _get_base_alphabet(alpha)
if common is None:
common = a
elif common == a:
pass
elif isinstance(a, common.__class__):
pass
elif isinstance(common, a.__class__):
common = a
elif isinstance(a, NucleotideAlphabet) \
and isinstance(common, NucleotideAlphabet):
#e.g. Give a mix of RNA and DNA alphabets
common = generic_nucleotide
elif isinstance(a, SingleLetterAlphabet) \
and isinstance(common, SingleLetterAlphabet):
#This is a pretty big mis-match!
common = single_letter_alphabet
else:
#We have a major mis-match... take the easy way out!
return generic_alphabet
if common is None:
#Given NO alphabets!
return generic_alphabet
return common
def _consensus_alphabet(alphabets):
"""Returns a common but often generic alphabet object (PRIVATE).
Note that DNA+RNA -> Nucleotide, and Nucleotide+Protein-> generic single
letter. These DO NOT raise an exception!
This is aware of Gapped and HasStopCodon and new letters added by
other AlphabetEncoders. This WILL raise an exception if more than
one gap character or stop symbol is present."""
base = _consensus_base_alphabet(alphabets)
gap = None
stop = None
new_letters = ""
for alpha in alphabets:
#Gaps...
if not hasattr(alpha, "gap_char"):
pass
elif gap is None:
gap = alpha.gap_char
elif gap == alpha.gap_char:
pass
else:
raise ValueError("More than one gap character present")
#Stops...
if not hasattr(alpha, "stop_symbol"):
pass
elif stop is None:
stop = alpha.stop_symbol
elif stop == alpha.stop_symbol:
pass
else:
raise ValueError("More than one stop symbol present")
#New letters...
if hasattr(alpha, "new_letters"):
for letter in alpha.new_letters:
if letter not in new_letters \
and letter != gap and letter != stop:
new_letters += letter
alpha = base
if new_letters:
alpha = AlphabetEncoder(alpha, new_letters)
if gap:
alpha = Gapped(alpha, gap_char=gap)
if stop:
alpha = HasStopCodon(alpha, stop_symbol=stop)
return alpha
def _check_type_compatible(alphabets):
"""Returns True except for DNA+RNA or Nucleotide+Protein (PRIVATE).
This relies on the Alphabet subclassing hierarchy. It does not
check things like gap characters or stop symbols."""
dna, rna, nucl, protein = False, False, False, False
for alpha in alphabets:
a = _get_base_alphabet(alpha)
if isinstance(a, DNAAlphabet):
dna = True
nucl = True
if rna or protein : return False
elif isinstance(a, RNAAlphabet):
rna = True
nucl = True
if dna or protein : return False
elif isinstance(a, NucleotideAlphabet):
nucl = True
if protein : return False
elif isinstance(a, ProteinAlphabet):
protein = True
if nucl : return False
return True
|
NirBenTalLab/proorigami-cde-package
|
cde-root/usr/lib64/python2.4/site-packages/Bio/Alphabet/__init__.py
|
Python
|
mit
| 11,346
|
[
"Biopython"
] |
ee5aeaf3c62b823f2c6a3bf64351a6f74719ca5752ff97945153cdfb646bf344
|
# Natural Language Toolkit: Feature Structures
#
# Copyright (C) 2001-2013 NLTK Project
# Author: Edward Loper <edloper@gmail.com>,
# Rob Speer,
# Steven Bird <stevenbird1@gmail.com>
# URL: <http://nltk.sourceforge.net>
# For license information, see LICENSE.TXT
"""
Basic data classes for representing feature structures, and for
performing basic operations on those feature structures. A feature
structure is a mapping from feature identifiers to feature values,
where each feature value is either a basic value (such as a string or
an integer), or a nested feature structure. There are two types of
feature structure, implemented by two subclasses of ``FeatStruct``:
- feature dictionaries, implemented by ``FeatDict``, act like
Python dictionaries. Feature identifiers may be strings or
instances of the ``Feature`` class.
- feature lists, implemented by ``FeatList``, act like Python
lists. Feature identifiers are integers.
Feature structures are typically used to represent partial information
about objects. A feature identifier that is not mapped to a value
stands for a feature whose value is unknown (*not* a feature without
a value). Two feature structures that represent (potentially
overlapping) information about the same object can be combined by
unification. When two inconsistent feature structures are unified,
the unification fails and returns None.
Features can be specified using "feature paths", or tuples of feature
identifiers that specify path through the nested feature structures to
a value. Feature structures may contain reentrant feature values. A
"reentrant feature value" is a single feature value that can be
accessed via multiple feature paths. Unification preserves the
reentrance relations imposed by both of the unified feature
structures. In the feature structure resulting from unification, any
modifications to a reentrant feature value will be visible using any
of its feature paths.
Feature structure variables are encoded using the ``nltk.sem.Variable``
class. The variables' values are tracked using a bindings
dictionary, which maps variables to their values. When two feature
structures are unified, a fresh bindings dictionary is created to
track their values; and before unification completes, all bound
variables are replaced by their values. Thus, the bindings
dictionaries are usually strictly internal to the unification process.
However, it is possible to track the bindings of variables if you
choose to, by supplying your own initial bindings dictionary to the
``unify()`` function.
When unbound variables are unified with one another, they become
aliased. This is encoded by binding one variable to the other.
Lightweight Feature Structures
==============================
Many of the functions defined by ``nltk.featstruct`` can be applied
directly to simple Python dictionaries and lists, rather than to
full-fledged ``FeatDict`` and ``FeatList`` objects. In other words,
Python ``dicts`` and ``lists`` can be used as "light-weight" feature
structures.
>>> from nltk.featstruct import unify
>>> unify(dict(x=1, y=dict()), dict(a='a', y=dict(b='b'))) # doctest: +SKIP
{'y': {'b': 'b'}, 'x': 1, 'a': 'a'}
However, you should keep in mind the following caveats:
- Python dictionaries & lists ignore reentrance when checking for
equality between values. But two FeatStructs with different
reentrances are considered nonequal, even if all their base
values are equal.
- FeatStructs can be easily frozen, allowing them to be used as
keys in hash tables. Python dictionaries and lists can not.
- FeatStructs display reentrance in their string representations;
Python dictionaries and lists do not.
- FeatStructs may *not* be mixed with Python dictionaries and lists
(e.g., when performing unification).
- FeatStructs provide a number of useful methods, such as ``walk()``
and ``cyclic()``, which are not available for Python dicts and lists.
In general, if your feature structures will contain any reentrances,
or if you plan to use them as dictionary keys, it is strongly
recommended that you use full-fledged ``FeatStruct`` objects.
"""
from __future__ import print_function, unicode_literals, division
import re
import copy
from nltk.internals import parse_str, raise_unorderable_types
from nltk.sem.logic import (Variable, Expression, SubstituteBindingsI,
LogicParser, ParseException)
from nltk.compat import (string_types, integer_types, total_ordering,
python_2_unicode_compatible, unicode_repr)
######################################################################
# Feature Structure
######################################################################
@total_ordering
class FeatStruct(SubstituteBindingsI):
"""
A mapping from feature identifiers to feature values, where each
feature value is either a basic value (such as a string or an
integer), or a nested feature structure. There are two types of
feature structure:
- feature dictionaries, implemented by ``FeatDict``, act like
Python dictionaries. Feature identifiers may be strings or
instances of the ``Feature`` class.
- feature lists, implemented by ``FeatList``, act like Python
lists. Feature identifiers are integers.
Feature structures may be indexed using either simple feature
identifiers or 'feature paths.' A feature path is a sequence
of feature identifiers that stand for a corresponding sequence of
indexing operations. In particular, ``fstruct[(f1,f2,...,fn)]`` is
equivalent to ``fstruct[f1][f2]...[fn]``.
Feature structures may contain reentrant feature structures. A
"reentrant feature structure" is a single feature structure
object that can be accessed via multiple feature paths. Feature
structures may also be cyclic. A feature structure is "cyclic"
if there is any feature path from the feature structure to itself.
Two feature structures are considered equal if they assign the
same values to all features, and have the same reentrancies.
By default, feature structures are mutable. They may be made
immutable with the ``freeze()`` method. Once they have been
frozen, they may be hashed, and thus used as dictionary keys.
"""
_frozen = False
""":ivar: A flag indicating whether this feature structure is
frozen or not. Once this flag is set, it should never be
un-set; and no further modification should be made to this
feature structue."""
##////////////////////////////////////////////////////////////
#{ Constructor
##////////////////////////////////////////////////////////////
def __new__(cls, features=None, **morefeatures):
"""
Construct and return a new feature structure. If this
constructor is called directly, then the returned feature
structure will be an instance of either the ``FeatDict`` class
or the ``FeatList`` class.
:param features: The initial feature values for this feature
structure:
- FeatStruct(string) -> FeatStructParser().parse(string)
- FeatStruct(mapping) -> FeatDict(mapping)
- FeatStruct(sequence) -> FeatList(sequence)
- FeatStruct() -> FeatDict()
:param morefeatures: If ``features`` is a mapping or None,
then ``morefeatures`` provides additional features for the
``FeatDict`` constructor.
"""
# If the FeatStruct constructor is called directly, then decide
# whether to create a FeatDict or a FeatList, based on the
# contents of the `features` argument.
if cls is FeatStruct:
if features is None:
return FeatDict.__new__(FeatDict, **morefeatures)
elif _is_mapping(features):
return FeatDict.__new__(FeatDict, features, **morefeatures)
elif morefeatures:
raise TypeError('Keyword arguments may only be specified '
'if features is None or is a mapping.')
if isinstance(features, string_types):
if FeatStructParser._START_FDICT_RE.match(features):
return FeatDict.__new__(FeatDict, features, **morefeatures)
else:
return FeatList.__new__(FeatList, features, **morefeatures)
elif _is_sequence(features):
return FeatList.__new__(FeatList, features)
else:
raise TypeError('Expected string or mapping or sequence')
# Otherwise, construct the object as normal.
else:
return super(FeatStruct, cls).__new__(cls, features,
**morefeatures)
##////////////////////////////////////////////////////////////
#{ Uniform Accessor Methods
##////////////////////////////////////////////////////////////
# These helper functions allow the methods defined by FeatStruct
# to treat all feature structures as mappings, even if they're
# really lists. (Lists are treated as mappings from ints to vals)
def _keys(self):
"""Return an iterable of the feature identifiers used by this
FeatStruct."""
raise NotImplementedError() # Implemented by subclasses.
def _values(self):
"""Return an iterable of the feature values directly defined
by this FeatStruct."""
raise NotImplementedError() # Implemented by subclasses.
def _items(self):
"""Return an iterable of (fid,fval) pairs, where fid is a
feature identifier and fval is the corresponding feature
value, for all features defined by this FeatStruct."""
raise NotImplementedError() # Implemented by subclasses.
##////////////////////////////////////////////////////////////
#{ Equality & Hashing
##////////////////////////////////////////////////////////////
def equal_values(self, other, check_reentrance=False):
"""
Return True if ``self`` and ``other`` assign the same value to
to every feature. In particular, return true if
``self[p]==other[p]`` for every feature path *p* such
that ``self[p]`` or ``other[p]`` is a base value (i.e.,
not a nested feature structure).
:param check_reentrance: If True, then also return False if
there is any difference between the reentrances of ``self``
and ``other``.
:note: the ``==`` is equivalent to ``equal_values()`` with
``check_reentrance=True``.
"""
return self._equal(other, check_reentrance, set(), set(), set())
def __eq__(self, other):
"""
Return true if ``self`` and ``other`` are both feature structures,
assign the same values to all features, and contain the same
reentrances. I.e., return
``self.equal_values(other, check_reentrance=True)``.
:see: ``equal_values()``
"""
return self._equal(other, True, set(), set(), set())
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if not isinstance(other, FeatStruct):
# raise_unorderable_types("<", self, other)
# Sometimes feature values can be pure strings,
# so we need to be able to compare with non-featstructs:
return self.__class__.__name__ < other.__class__.__name__
else:
return len(self) < len(other)
def __hash__(self):
"""
If this feature structure is frozen, return its hash value;
otherwise, raise ``TypeError``.
"""
if not self._frozen:
raise TypeError('FeatStructs must be frozen before they '
'can be hashed.')
try: return self._hash
except AttributeError:
self._hash = self._calculate_hashvalue(set())
return self._hash
def _equal(self, other, check_reentrance, visited_self,
visited_other, visited_pairs):
"""
Return True iff self and other have equal values.
:param visited_self: A set containing the ids of all ``self``
feature structures we've already visited.
:param visited_other: A set containing the ids of all ``other``
feature structures we've already visited.
:param visited_pairs: A set containing ``(selfid, otherid)`` pairs
for all pairs of feature structures we've already visited.
"""
# If we're the same object, then we're equal.
if self is other: return True
# If we have different classes, we're definitely not equal.
if self.__class__ != other.__class__: return False
# If we define different features, we're definitely not equal.
# (Perform len test first because it's faster -- we should
# do profiling to see if this actually helps)
if len(self) != len(other): return False
if set(self._keys()) != set(other._keys()): return False
# If we're checking reentrance, then any time we revisit a
# structure, make sure that it was paired with the same
# feature structure that it is now. Note: if check_reentrance,
# then visited_pairs will never contain two pairs whose first
# values are equal, or two pairs whose second values are equal.
if check_reentrance:
if id(self) in visited_self or id(other) in visited_other:
return (id(self), id(other)) in visited_pairs
# If we're not checking reentrance, then we still need to deal
# with cycles. If we encounter the same (self, other) pair a
# second time, then we won't learn anything more by examining
# their children a second time, so just return true.
else:
if (id(self), id(other)) in visited_pairs:
return True
# Keep track of which nodes we've visited.
visited_self.add(id(self))
visited_other.add(id(other))
visited_pairs.add( (id(self), id(other)) )
# Now we have to check all values. If any of them don't match,
# then return false.
for (fname, self_fval) in self._items():
other_fval = other[fname]
if isinstance(self_fval, FeatStruct):
if not self_fval._equal(other_fval, check_reentrance,
visited_self, visited_other,
visited_pairs):
return False
else:
if self_fval != other_fval: return False
# Everything matched up; return true.
return True
def _calculate_hashvalue(self, visited):
"""
Return a hash value for this feature structure.
:require: ``self`` must be frozen.
:param visited: A set containing the ids of all feature
structures we've already visited while hashing.
"""
if id(self) in visited: return 1
visited.add(id(self))
hashval = 5831
for (fname, fval) in sorted(self._items()):
hashval *= 37
hashval += hash(fname)
hashval *= 37
if isinstance(fval, FeatStruct):
hashval += fval._calculate_hashvalue(visited)
else:
hashval += hash(fval)
# Convert to a 32 bit int.
hashval = int(hashval & 0x7fffffff)
return hashval
##////////////////////////////////////////////////////////////
#{ Freezing
##////////////////////////////////////////////////////////////
#: Error message used by mutating methods when called on a frozen
#: feature structure.
_FROZEN_ERROR = "Frozen FeatStructs may not be modified."
def freeze(self):
"""
Make this feature structure, and any feature structures it
contains, immutable. Note: this method does not attempt to
'freeze' any feature value that is not a ``FeatStruct``; it
is recommended that you use only immutable feature values.
"""
if self._frozen: return
self._freeze(set())
def frozen(self):
"""
Return True if this feature structure is immutable. Feature
structures can be made immutable with the ``freeze()`` method.
Immutable feature structures may not be made mutable again,
but new mutable copies can be produced with the ``copy()`` method.
"""
return self._frozen
def _freeze(self, visited):
"""
Make this feature structure, and any feature structure it
contains, immutable.
:param visited: A set containing the ids of all feature
structures we've already visited while freezing.
"""
if id(self) in visited: return
visited.add(id(self))
self._frozen = True
for (fname, fval) in sorted(self._items()):
if isinstance(fval, FeatStruct):
fval._freeze(visited)
##////////////////////////////////////////////////////////////
#{ Copying
##////////////////////////////////////////////////////////////
def copy(self, deep=True):
"""
Return a new copy of ``self``. The new copy will not be frozen.
:param deep: If true, create a deep copy; if false, create
a shallow copy.
"""
if deep:
return copy.deepcopy(self)
else:
return self.__class__(self)
# Subclasses should define __deepcopy__ to ensure that the new
# copy will not be frozen.
def __deepcopy__(self, memo):
raise NotImplementedError() # Implemented by subclasses.
##////////////////////////////////////////////////////////////
#{ Structural Information
##////////////////////////////////////////////////////////////
def cyclic(self):
"""
Return True if this feature structure contains itself.
"""
return self._find_reentrances({})[id(self)]
def walk(self):
"""
Return an iterator that generates this feature structure, and
each feature structure it contains. Each feature structure will
be generated exactly once.
"""
return self._walk(set())
def _walk(self, visited):
"""
Return an iterator that generates this feature structure, and
each feature structure it contains.
:param visited: A set containing the ids of all feature
structures we've already visited while freezing.
"""
raise NotImplementedError() # Implemented by subclasses.
def _walk(self, visited):
if id(self) in visited: return
visited.add(id(self))
yield self
for fval in self._values():
if isinstance(fval, FeatStruct):
for elt in fval._walk(visited):
yield elt
# Walk through the feature tree. The first time we see a feature
# value, map it to False (not reentrant). If we see a feature
# value more than once, then map it to True (reentrant).
def _find_reentrances(self, reentrances):
"""
Return a dictionary that maps from the ``id`` of each feature
structure contained in ``self`` (including ``self``) to a
boolean value, indicating whether it is reentrant or not.
"""
if id(self) in reentrances:
# We've seen it more than once.
reentrances[id(self)] = True
else:
# This is the first time we've seen it.
reentrances[id(self)] = False
# Recurse to contained feature structures.
for fval in self._values():
if isinstance(fval, FeatStruct):
fval._find_reentrances(reentrances)
return reentrances
##////////////////////////////////////////////////////////////
#{ Variables & Bindings
##////////////////////////////////////////////////////////////
def substitute_bindings(self, bindings):
""":see: ``nltk.featstruct.substitute_bindings()``"""
return substitute_bindings(self, bindings)
def retract_bindings(self, bindings):
""":see: ``nltk.featstruct.retract_bindings()``"""
return retract_bindings(self, bindings)
def variables(self):
""":see: ``nltk.featstruct.find_variables()``"""
return find_variables(self)
def rename_variables(self, vars=None, used_vars=(), new_vars=None):
""":see: ``nltk.featstruct.rename_variables()``"""
return rename_variables(self, vars, used_vars, new_vars)
def remove_variables(self):
"""
Return the feature structure that is obtained by deleting
any feature whose value is a ``Variable``.
:rtype: FeatStruct
"""
return remove_variables(self)
##////////////////////////////////////////////////////////////
#{ Unification
##////////////////////////////////////////////////////////////
def unify(self, other, bindings=None, trace=False,
fail=None, rename_vars=True):
return unify(self, other, bindings, trace, fail, rename_vars)
def subsumes(self, other):
"""
Return True if ``self`` subsumes ``other``. I.e., return true
If unifying ``self`` with ``other`` would result in a feature
structure equal to ``other``.
"""
return subsumes(self, other)
##////////////////////////////////////////////////////////////
#{ String Representations
##////////////////////////////////////////////////////////////
def __repr__(self):
"""
Display a single-line representation of this feature structure,
suitable for embedding in other representations.
"""
return self._repr(self._find_reentrances({}), {})
def _repr(self, reentrances, reentrance_ids):
"""
Return a string representation of this feature structure.
:param reentrances: A dictionary that maps from the ``id`` of
each feature value in self, indicating whether that value
is reentrant or not.
:param reentrance_ids: A dictionary mapping from each ``id``
of a feature value to a unique identifier. This is modified
by ``repr``: the first time a reentrant feature value is
displayed, an identifier is added to ``reentrance_ids`` for it.
"""
raise NotImplementedError()
# Mutation: disable if frozen.
_FROZEN_ERROR = "Frozen FeatStructs may not be modified."
_FROZEN_NOTICE = "\n%sIf self is frozen, raise ValueError."
def _check_frozen(method, indent=''):
"""
Given a method function, return a new method function that first
checks if ``self._frozen`` is true; and if so, raises ``ValueError``
with an appropriate message. Otherwise, call the method and return
its result.
"""
def wrapped(self, *args, **kwargs):
if self._frozen: raise ValueError(_FROZEN_ERROR)
else: return method(self, *args, **kwargs)
wrapped.__name__ = method.__name__
wrapped.__doc__ = (method.__doc__ or '') + (_FROZEN_NOTICE % indent)
return wrapped
######################################################################
# Feature Dictionary
######################################################################
@python_2_unicode_compatible
class FeatDict(FeatStruct, dict):
"""
A feature structure that acts like a Python dictionary. I.e., a
mapping from feature identifiers to feature values, where a feature
identifier can be a string or a ``Feature``; and where a feature value
can be either a basic value (such as a string or an integer), or a nested
feature structure. A feature identifiers for a ``FeatDict`` is
sometimes called a "feature name".
Two feature dicts are considered equal if they assign the same
values to all features, and have the same reentrances.
:see: ``FeatStruct`` for information about feature paths, reentrance,
cyclic feature structures, mutability, freezing, and hashing.
"""
def __init__(self, features=None, **morefeatures):
"""
Create a new feature dictionary, with the specified features.
:param features: The initial value for this feature
dictionary. If ``features`` is a ``FeatStruct``, then its
features are copied (shallow copy). If ``features`` is a
dict, then a feature is created for each item, mapping its
key to its value. If ``features`` is a string, then it is
parsed using ``FeatStructParser``. If ``features`` is a list of
tuples ``(name, val)``, then a feature is created for each tuple.
:param morefeatures: Additional features for the new feature
dictionary. If a feature is listed under both ``features`` and
``morefeatures``, then the value from ``morefeatures`` will be
used.
"""
if isinstance(features, string_types):
FeatStructParser().parse(features, self)
self.update(**morefeatures)
else:
# update() checks the types of features.
self.update(features, **morefeatures)
#////////////////////////////////////////////////////////////
#{ Dict methods
#////////////////////////////////////////////////////////////
_INDEX_ERROR = str("Expected feature name or path. Got %r.")
def __getitem__(self, name_or_path):
"""If the feature with the given name or path exists, return
its value; otherwise, raise ``KeyError``."""
if isinstance(name_or_path, (string_types, Feature)):
return dict.__getitem__(self, name_or_path)
elif isinstance(name_or_path, tuple):
try:
val = self
for fid in name_or_path:
if not isinstance(val, FeatStruct):
raise KeyError # path contains base value
val = val[fid]
return val
except (KeyError, IndexError):
raise KeyError(name_or_path)
else:
raise TypeError(self._INDEX_ERROR % name_or_path)
def get(self, name_or_path, default=None):
"""If the feature with the given name or path exists, return its
value; otherwise, return ``default``."""
try: return self[name_or_path]
except KeyError: return default
def __contains__(self, name_or_path):
"""Return true if a feature with the given name or path exists."""
try: self[name_or_path]; return True
except KeyError: return False
def has_key(self, name_or_path):
"""Return true if a feature with the given name or path exists."""
return name_or_path in self
def __delitem__(self, name_or_path):
"""If the feature with the given name or path exists, delete
its value; otherwise, raise ``KeyError``."""
if self._frozen: raise ValueError(_FROZEN_ERROR)
if isinstance(name_or_path, (string_types, Feature)):
return dict.__delitem__(self, name_or_path)
elif isinstance(name_or_path, tuple):
if len(name_or_path) == 0:
raise ValueError("The path () can not be set")
else:
parent = self[name_or_path[:-1]]
if not isinstance(parent, FeatStruct):
raise KeyError(name_or_path) # path contains base value
del parent[name_or_path[-1]]
else:
raise TypeError(self._INDEX_ERROR % name_or_path)
def __setitem__(self, name_or_path, value):
"""Set the value for the feature with the given name or path
to ``value``. If ``name_or_path`` is an invalid path, raise
``KeyError``."""
if self._frozen: raise ValueError(_FROZEN_ERROR)
if isinstance(name_or_path, (string_types, Feature)):
return dict.__setitem__(self, name_or_path, value)
elif isinstance(name_or_path, tuple):
if len(name_or_path) == 0:
raise ValueError("The path () can not be set")
else:
parent = self[name_or_path[:-1]]
if not isinstance(parent, FeatStruct):
raise KeyError(name_or_path) # path contains base value
parent[name_or_path[-1]] = value
else:
raise TypeError(self._INDEX_ERROR % name_or_path)
clear = _check_frozen(dict.clear)
pop = _check_frozen(dict.pop)
popitem = _check_frozen(dict.popitem)
setdefault = _check_frozen(dict.setdefault)
def update(self, features=None, **morefeatures):
if self._frozen: raise ValueError(_FROZEN_ERROR)
if features is None:
items = ()
elif hasattr(features, 'items') and callable(features.items):
items = features.items()
elif hasattr(features, '__iter__'):
items = features
else:
raise ValueError('Expected mapping or list of tuples')
for key, val in items:
if not isinstance(key, (string_types, Feature)):
raise TypeError('Feature names must be strings')
self[key] = val
for key, val in morefeatures.items():
if not isinstance(key, (string_types, Feature)):
raise TypeError('Feature names must be strings')
self[key] = val
##////////////////////////////////////////////////////////////
#{ Copying
##////////////////////////////////////////////////////////////
def __deepcopy__(self, memo):
memo[id(self)] = selfcopy = self.__class__()
for (key, val) in self._items():
selfcopy[copy.deepcopy(key,memo)] = copy.deepcopy(val,memo)
return selfcopy
##////////////////////////////////////////////////////////////
#{ Uniform Accessor Methods
##////////////////////////////////////////////////////////////
def _keys(self): return self.keys()
def _values(self): return self.values()
def _items(self): return self.items()
##////////////////////////////////////////////////////////////
#{ String Representations
##////////////////////////////////////////////////////////////
def __str__(self):
"""
Display a multi-line representation of this feature dictionary
as an FVM (feature value matrix).
"""
return '\n'.join(self._str(self._find_reentrances({}), {}))
def _repr(self, reentrances, reentrance_ids):
segments = []
prefix = ''
suffix = ''
# If this is the first time we've seen a reentrant structure,
# then assign it a unique identifier.
if reentrances[id(self)]:
assert id(self) not in reentrance_ids
reentrance_ids[id(self)] = repr(len(reentrance_ids)+1)
# sorting note: keys are unique strings, so we'll never fall
# through to comparing values.
for (fname, fval) in sorted(self.items()):
display = getattr(fname, 'display', None)
if id(fval) in reentrance_ids:
segments.append('%s->(%s)' %
(fname, reentrance_ids[id(fval)]))
elif (display == 'prefix' and not prefix and
isinstance(fval, (Variable, string_types))):
prefix = '%s' % fval
elif display == 'slash' and not suffix:
if isinstance(fval, Variable):
suffix = '/%s' % fval.name
else:
suffix = '/%s' % unicode_repr(fval)
elif isinstance(fval, Variable):
segments.append('%s=%s' % (fname, fval.name))
elif fval is True:
segments.append('+%s' % fname)
elif fval is False:
segments.append('-%s' % fname)
elif isinstance(fval, Expression):
segments.append('%s=<%s>' % (fname, fval))
elif not isinstance(fval, FeatStruct):
segments.append('%s=%s' % (fname, unicode_repr(fval)))
else:
fval_repr = fval._repr(reentrances, reentrance_ids)
segments.append('%s=%s' % (fname, fval_repr))
# If it's reentrant, then add on an identifier tag.
if reentrances[id(self)]:
prefix = '(%s)%s' % (reentrance_ids[id(self)], prefix)
return '%s[%s]%s' % (prefix, ', '.join(segments), suffix)
def _str(self, reentrances, reentrance_ids):
"""
:return: A list of lines composing a string representation of
this feature dictionary.
:param reentrances: A dictionary that maps from the ``id`` of
each feature value in self, indicating whether that value
is reentrant or not.
:param reentrance_ids: A dictionary mapping from each ``id``
of a feature value to a unique identifier. This is modified
by ``repr``: the first time a reentrant feature value is
displayed, an identifier is added to ``reentrance_ids`` for
it.
"""
# If this is the first time we've seen a reentrant structure,
# then tack on an id string.
if reentrances[id(self)]:
assert id(self) not in reentrance_ids
reentrance_ids[id(self)] = repr(len(reentrance_ids)+1)
# Special case: empty feature dict.
if len(self) == 0:
if reentrances[id(self)]:
return ['(%s) []' % reentrance_ids[id(self)]]
else:
return ['[]']
# What's the longest feature name? Use this to align names.
maxfnamelen = max(len("%s" % k) for k in self.keys())
lines = []
# sorting note: keys are unique strings, so we'll never fall
# through to comparing values.
for (fname, fval) in sorted(self.items()):
fname = ("%s" % fname).ljust(maxfnamelen)
if isinstance(fval, Variable):
lines.append('%s = %s' % (fname,fval.name))
elif isinstance(fval, Expression):
lines.append('%s = <%s>' % (fname, fval))
elif isinstance(fval, FeatList):
fval_repr = fval._repr(reentrances, reentrance_ids)
lines.append('%s = %s' % (fname, unicode_repr(fval_repr)))
elif not isinstance(fval, FeatDict):
# It's not a nested feature structure -- just print it.
lines.append('%s = %s' % (fname, unicode_repr(fval)))
elif id(fval) in reentrance_ids:
# It's a feature structure we've seen before -- print
# the reentrance id.
lines.append('%s -> (%s)' % (fname, reentrance_ids[id(fval)]))
else:
# It's a new feature structure. Separate it from
# other values by a blank line.
if lines and lines[-1] != '': lines.append('')
# Recursively print the feature's value (fval).
fval_lines = fval._str(reentrances, reentrance_ids)
# Indent each line to make room for fname.
fval_lines = [(' '*(maxfnamelen+3))+l for l in fval_lines]
# Pick which line we'll display fname on, & splice it in.
nameline = (len(fval_lines)-1) // 2
fval_lines[nameline] = (
fname+' ='+fval_lines[nameline][maxfnamelen+2:])
# Add the feature structure to the output.
lines += fval_lines
# Separate FeatStructs by a blank line.
lines.append('')
# Get rid of any excess blank lines.
if lines[-1] == '': lines.pop()
# Add brackets around everything.
maxlen = max(len(line) for line in lines)
lines = ['[ %s%s ]' % (line, ' '*(maxlen-len(line))) for line in lines]
# If it's reentrant, then add on an identifier tag.
if reentrances[id(self)]:
idstr = '(%s) ' % reentrance_ids[id(self)]
lines = [(' '*len(idstr))+l for l in lines]
idline = (len(lines)-1) // 2
lines[idline] = idstr + lines[idline][len(idstr):]
return lines
######################################################################
# Feature List
######################################################################
class FeatList(FeatStruct, list):
"""
A list of feature values, where each feature value is either a
basic value (such as a string or an integer), or a nested feature
structure.
Feature lists may contain reentrant feature values. A "reentrant
feature value" is a single feature value that can be accessed via
multiple feature paths. Feature lists may also be cyclic.
Two feature lists are considered equal if they assign the same
values to all features, and have the same reentrances.
:see: ``FeatStruct`` for information about feature paths, reentrance,
cyclic feature structures, mutability, freezing, and hashing.
"""
def __init__(self, features=()):
"""
Create a new feature list, with the specified features.
:param features: The initial list of features for this feature
list. If ``features`` is a string, then it is paresd using
``FeatStructParser``. Otherwise, it should be a sequence
of basic values and nested feature structures.
"""
if isinstance(features, string_types):
FeatStructParser().parse(features, self)
else:
list.__init__(self, features)
#////////////////////////////////////////////////////////////
#{ List methods
#////////////////////////////////////////////////////////////
_INDEX_ERROR = "Expected int or feature path. Got %r."
def __getitem__(self, name_or_path):
if isinstance(name_or_path, integer_types):
return list.__getitem__(self, name_or_path)
elif isinstance(name_or_path, tuple):
try:
val = self
for fid in name_or_path:
if not isinstance(val, FeatStruct):
raise KeyError # path contains base value
val = val[fid]
return val
except (KeyError, IndexError):
raise KeyError(name_or_path)
else:
raise TypeError(self._INDEX_ERROR % name_or_path)
def __delitem__(self, name_or_path):
"""If the feature with the given name or path exists, delete
its value; otherwise, raise ``KeyError``."""
if self._frozen: raise ValueError(_FROZEN_ERROR)
if isinstance(name_or_path, (integer_types, slice)):
return list.__delitem__(self, name_or_path)
elif isinstance(name_or_path, tuple):
if len(name_or_path) == 0:
raise ValueError("The path () can not be set")
else:
parent = self[name_or_path[:-1]]
if not isinstance(parent, FeatStruct):
raise KeyError(name_or_path) # path contains base value
del parent[name_or_path[-1]]
else:
raise TypeError(self._INDEX_ERROR % name_or_path)
def __setitem__(self, name_or_path, value):
"""Set the value for the feature with the given name or path
to ``value``. If ``name_or_path`` is an invalid path, raise
``KeyError``."""
if self._frozen: raise ValueError(_FROZEN_ERROR)
if isinstance(name_or_path, (integer_types, slice)):
return list.__setitem__(self, name_or_path, value)
elif isinstance(name_or_path, tuple):
if len(name_or_path) == 0:
raise ValueError("The path () can not be set")
else:
parent = self[name_or_path[:-1]]
if not isinstance(parent, FeatStruct):
raise KeyError(name_or_path) # path contains base value
parent[name_or_path[-1]] = value
else:
raise TypeError(self._INDEX_ERROR % name_or_path)
# __delslice__ = _check_frozen(list.__delslice__, ' ')
# __setslice__ = _check_frozen(list.__setslice__, ' ')
__iadd__ = _check_frozen(list.__iadd__)
__imul__ = _check_frozen(list.__imul__)
append = _check_frozen(list.append)
extend = _check_frozen(list.extend)
insert = _check_frozen(list.insert)
pop = _check_frozen(list.pop)
remove = _check_frozen(list.remove)
reverse = _check_frozen(list.reverse)
sort = _check_frozen(list.sort)
##////////////////////////////////////////////////////////////
#{ Copying
##////////////////////////////////////////////////////////////
def __deepcopy__(self, memo):
memo[id(self)] = selfcopy = self.__class__()
selfcopy.extend(copy.deepcopy(fval,memo) for fval in self)
return selfcopy
##////////////////////////////////////////////////////////////
#{ Uniform Accessor Methods
##////////////////////////////////////////////////////////////
def _keys(self): return list(range(len(self)))
def _values(self): return self
def _items(self): return enumerate(self)
##////////////////////////////////////////////////////////////
#{ String Representations
##////////////////////////////////////////////////////////////
# Special handling for: reentrances, variables, expressions.
def _repr(self, reentrances, reentrance_ids):
# If this is the first time we've seen a reentrant structure,
# then assign it a unique identifier.
if reentrances[id(self)]:
assert id(self) not in reentrance_ids
reentrance_ids[id(self)] = repr(len(reentrance_ids)+1)
prefix = '(%s)' % reentrance_ids[id(self)]
else:
prefix = ''
segments = []
for fval in self:
if id(fval) in reentrance_ids:
segments.append('->(%s)' % reentrance_ids[id(fval)])
elif isinstance(fval, Variable):
segments.append(fval.name)
elif isinstance(fval, Expression):
segments.append('%s' % fval)
elif isinstance(fval, FeatStruct):
segments.append(fval._repr(reentrances, reentrance_ids))
else:
segments.append('%s' % unicode_repr(fval))
return '%s[%s]' % (prefix, ', '.join(segments))
######################################################################
# Variables & Bindings
######################################################################
def substitute_bindings(fstruct, bindings, fs_class='default'):
"""
Return the feature structure that is obtained by replacing each
variable bound by ``bindings`` with its binding. If a variable is
aliased to a bound variable, then it will be replaced by that
variable's value. If a variable is aliased to an unbound
variable, then it will be replaced by that variable.
:type bindings: dict(Variable -> any)
:param bindings: A dictionary mapping from variables to values.
"""
if fs_class == 'default': fs_class = _default_fs_class(fstruct)
fstruct = copy.deepcopy(fstruct)
_substitute_bindings(fstruct, bindings, fs_class, set())
return fstruct
def _substitute_bindings(fstruct, bindings, fs_class, visited):
# Visit each node only once:
if id(fstruct) in visited: return
visited.add(id(fstruct))
if _is_mapping(fstruct): items = fstruct.items()
elif _is_sequence(fstruct): items = enumerate(fstruct)
else: raise ValueError('Expected mapping or sequence')
for (fname, fval) in items:
while (isinstance(fval, Variable) and fval in bindings):
fval = fstruct[fname] = bindings[fval]
if isinstance(fval, fs_class):
_substitute_bindings(fval, bindings, fs_class, visited)
elif isinstance(fval, SubstituteBindingsI):
fstruct[fname] = fval.substitute_bindings(bindings)
def retract_bindings(fstruct, bindings, fs_class='default'):
"""
Return the feature structure that is obtained by replacing each
feature structure value that is bound by ``bindings`` with the
variable that binds it. A feature structure value must be
identical to a bound value (i.e., have equal id) to be replaced.
``bindings`` is modified to point to this new feature structure,
rather than the original feature structure. Feature structure
values in ``bindings`` may be modified if they are contained in
``fstruct``.
"""
if fs_class == 'default': fs_class = _default_fs_class(fstruct)
(fstruct, new_bindings) = copy.deepcopy((fstruct, bindings))
bindings.update(new_bindings)
inv_bindings = dict((id(val),var) for (var,val) in bindings.items())
_retract_bindings(fstruct, inv_bindings, fs_class, set())
return fstruct
def _retract_bindings(fstruct, inv_bindings, fs_class, visited):
# Visit each node only once:
if id(fstruct) in visited: return
visited.add(id(fstruct))
if _is_mapping(fstruct): items = fstruct.items()
elif _is_sequence(fstruct): items = enumerate(fstruct)
else: raise ValueError('Expected mapping or sequence')
for (fname, fval) in items:
if isinstance(fval, fs_class):
if id(fval) in inv_bindings:
fstruct[fname] = inv_bindings[id(fval)]
_retract_bindings(fval, inv_bindings, fs_class, visited)
def find_variables(fstruct, fs_class='default'):
"""
:return: The set of variables used by this feature structure.
:rtype: set(Variable)
"""
if fs_class == 'default': fs_class = _default_fs_class(fstruct)
return _variables(fstruct, set(), fs_class, set())
def _variables(fstruct, vars, fs_class, visited):
# Visit each node only once:
if id(fstruct) in visited: return
visited.add(id(fstruct))
if _is_mapping(fstruct): items = fstruct.items()
elif _is_sequence(fstruct): items = enumerate(fstruct)
else: raise ValueError('Expected mapping or sequence')
for (fname, fval) in items:
if isinstance(fval, Variable):
vars.add(fval)
elif isinstance(fval, fs_class):
_variables(fval, vars, fs_class, visited)
elif isinstance(fval, SubstituteBindingsI):
vars.update(fval.variables())
return vars
def rename_variables(fstruct, vars=None, used_vars=(), new_vars=None,
fs_class='default'):
"""
Return the feature structure that is obtained by replacing
any of this feature structure's variables that are in ``vars``
with new variables. The names for these new variables will be
names that are not used by any variable in ``vars``, or in
``used_vars``, or in this feature structure.
:type vars: set
:param vars: The set of variables that should be renamed.
If not specified, ``find_variables(fstruct)`` is used; i.e., all
variables will be given new names.
:type used_vars: set
:param used_vars: A set of variables whose names should not be
used by the new variables.
:type new_vars: dict(Variable -> Variable)
:param new_vars: A dictionary that is used to hold the mapping
from old variables to new variables. For each variable *v*
in this feature structure:
- If ``new_vars`` maps *v* to *v'*, then *v* will be
replaced by *v'*.
- If ``new_vars`` does not contain *v*, but ``vars``
does contain *v*, then a new entry will be added to
``new_vars``, mapping *v* to the new variable that is used
to replace it.
To consistently rename the variables in a set of feature
structures, simply apply rename_variables to each one, using
the same dictionary:
>>> from nltk.featstruct import FeatStruct
>>> fstruct1 = FeatStruct('[subj=[agr=[gender=?y]], obj=[agr=[gender=?y]]]')
>>> fstruct2 = FeatStruct('[subj=[agr=[number=?z,gender=?y]], obj=[agr=[number=?z,gender=?y]]]')
>>> new_vars = {} # Maps old vars to alpha-renamed vars
>>> fstruct1.rename_variables(new_vars=new_vars)
[obj=[agr=[gender=?y2]], subj=[agr=[gender=?y2]]]
>>> fstruct2.rename_variables(new_vars=new_vars)
[obj=[agr=[gender=?y2, number=?z2]], subj=[agr=[gender=?y2, number=?z2]]]
If new_vars is not specified, then an empty dictionary is used.
"""
if fs_class == 'default': fs_class = _default_fs_class(fstruct)
# Default values:
if new_vars is None: new_vars = {}
if vars is None: vars = find_variables(fstruct, fs_class)
else: vars = set(vars)
# Add our own variables to used_vars.
used_vars = find_variables(fstruct, fs_class).union(used_vars)
# Copy ourselves, and rename variables in the copy.
return _rename_variables(copy.deepcopy(fstruct), vars, used_vars,
new_vars, fs_class, set())
def _rename_variables(fstruct, vars, used_vars, new_vars, fs_class, visited):
if id(fstruct) in visited: return
visited.add(id(fstruct))
if _is_mapping(fstruct): items = fstruct.items()
elif _is_sequence(fstruct): items = enumerate(fstruct)
else: raise ValueError('Expected mapping or sequence')
for (fname, fval) in items:
if isinstance(fval, Variable):
# If it's in new_vars, then rebind it.
if fval in new_vars:
fstruct[fname] = new_vars[fval]
# If it's in vars, pick a new name for it.
elif fval in vars:
new_vars[fval] = _rename_variable(fval, used_vars)
fstruct[fname] = new_vars[fval]
used_vars.add(new_vars[fval])
elif isinstance(fval, fs_class):
_rename_variables(fval, vars, used_vars, new_vars,
fs_class, visited)
elif isinstance(fval, SubstituteBindingsI):
# Pick new names for any variables in `vars`
for var in fval.variables():
if var in vars and var not in new_vars:
new_vars[var] = _rename_variable(var, used_vars)
used_vars.add(new_vars[var])
# Replace all variables in `new_vars`.
fstruct[fname] = fval.substitute_bindings(new_vars)
return fstruct
def _rename_variable(var, used_vars):
name, n = re.sub('\d+$', '', var.name), 2
if not name: name = '?'
while Variable('%s%s' % (name, n)) in used_vars: n += 1
return Variable('%s%s' % (name, n))
def remove_variables(fstruct, fs_class='default'):
"""
:rtype: FeatStruct
:return: The feature structure that is obtained by deleting
all features whose values are ``Variables``.
"""
if fs_class == 'default': fs_class = _default_fs_class(fstruct)
return _remove_variables(copy.deepcopy(fstruct), fs_class, set())
def _remove_variables(fstruct, fs_class, visited):
if id(fstruct) in visited:
return
visited.add(id(fstruct))
if _is_mapping(fstruct):
items = list(fstruct.items())
elif _is_sequence(fstruct):
items = list(enumerate(fstruct))
else:
raise ValueError('Expected mapping or sequence')
for (fname, fval) in items:
if isinstance(fval, Variable):
del fstruct[fname]
elif isinstance(fval, fs_class):
_remove_variables(fval, fs_class, visited)
return fstruct
######################################################################
# Unification
######################################################################
@python_2_unicode_compatible
class _UnificationFailure(object):
def __repr__(self):
return 'nltk.featstruct.UnificationFailure'
UnificationFailure = _UnificationFailure()
"""A unique value used to indicate unification failure. It can be
returned by ``Feature.unify_base_values()`` or by custom ``fail()``
functions to indicate that unificaiton should fail."""
# The basic unification algorithm:
# 1. Make copies of self and other (preserving reentrance)
# 2. Destructively unify self and other
# 3. Apply forward pointers, to preserve reentrance.
# 4. Replace bound variables with their values.
def unify(fstruct1, fstruct2, bindings=None, trace=False,
fail=None, rename_vars=True, fs_class='default'):
"""
Unify ``fstruct1`` with ``fstruct2``, and return the resulting feature
structure. This unified feature structure is the minimal
feature structure that contains all feature value assignments from both
``fstruct1`` and ``fstruct2``, and that preserves all reentrancies.
If no such feature structure exists (because ``fstruct1`` and
``fstruct2`` specify incompatible values for some feature), then
unification fails, and ``unify`` returns None.
Bound variables are replaced by their values. Aliased
variables are replaced by their representative variable
(if unbound) or the value of their representative variable
(if bound). I.e., if variable *v* is in ``bindings``,
then *v* is replaced by ``bindings[v]``. This will
be repeated until the variable is replaced by an unbound
variable or a non-variable value.
Unbound variables are bound when they are unified with
values; and aliased when they are unified with variables.
I.e., if variable *v* is not in ``bindings``, and is
unified with a variable or value *x*, then
``bindings[v]`` is set to *x*.
If ``bindings`` is unspecified, then all variables are
assumed to be unbound. I.e., ``bindings`` defaults to an
empty dict.
>>> from nltk.featstruct import FeatStruct
>>> FeatStruct('[a=?x]').unify(FeatStruct('[b=?x]'))
[a=?x, b=?x2]
:type bindings: dict(Variable -> any)
:param bindings: A set of variable bindings to be used and
updated during unification.
:type trace: bool
:param trace: If true, generate trace output.
:type rename_vars: bool
:param rename_vars: If True, then rename any variables in
``fstruct2`` that are also used in ``fstruct1``, in order to
avoid collisions on variable names.
"""
# Decide which class(es) will be treated as feature structures,
# for the purposes of unification.
if fs_class == 'default':
fs_class = _default_fs_class(fstruct1)
if _default_fs_class(fstruct2) != fs_class:
raise ValueError("Mixing FeatStruct objects with Python "
"dicts and lists is not supported.")
assert isinstance(fstruct1, fs_class)
assert isinstance(fstruct2, fs_class)
# If bindings are unspecified, use an empty set of bindings.
user_bindings = (bindings is not None)
if bindings is None: bindings = {}
# Make copies of fstruct1 and fstruct2 (since the unification
# algorithm is destructive). Do it all at once, to preserve
# reentrance links between fstruct1 and fstruct2. Copy bindings
# as well, in case there are any bound vars that contain parts
# of fstruct1 or fstruct2.
(fstruct1copy, fstruct2copy, bindings_copy) = (
copy.deepcopy((fstruct1, fstruct2, bindings)))
# Copy the bindings back to the original bindings dict.
bindings.update(bindings_copy)
if rename_vars:
vars1 = find_variables(fstruct1copy, fs_class)
vars2 = find_variables(fstruct2copy, fs_class)
_rename_variables(fstruct2copy, vars1, vars2, {}, fs_class, set())
# Do the actual unification. If it fails, return None.
forward = {}
if trace: _trace_unify_start((), fstruct1copy, fstruct2copy)
try: result = _destructively_unify(fstruct1copy, fstruct2copy, bindings,
forward, trace, fail, fs_class, ())
except _UnificationFailureError: return None
# _destructively_unify might return UnificationFailure, e.g. if we
# tried to unify a mapping with a sequence.
if result is UnificationFailure:
if fail is None: return None
else: return fail(fstruct1copy, fstruct2copy, ())
# Replace any feature structure that has a forward pointer
# with the target of its forward pointer.
result = _apply_forwards(result, forward, fs_class, set())
if user_bindings: _apply_forwards_to_bindings(forward, bindings)
# Replace bound vars with values.
_resolve_aliases(bindings)
_substitute_bindings(result, bindings, fs_class, set())
# Return the result.
if trace: _trace_unify_succeed((), result)
if trace: _trace_bindings((), bindings)
return result
class _UnificationFailureError(Exception):
"""An exception that is used by ``_destructively_unify`` to abort
unification when a failure is encountered."""
def _destructively_unify(fstruct1, fstruct2, bindings, forward,
trace, fail, fs_class, path):
"""
Attempt to unify ``fstruct1`` and ``fstruct2`` by modifying them
in-place. If the unification succeeds, then ``fstruct1`` will
contain the unified value, the value of ``fstruct2`` is undefined,
and forward[id(fstruct2)] is set to fstruct1. If the unification
fails, then a _UnificationFailureError is raised, and the
values of ``fstruct1`` and ``fstruct2`` are undefined.
:param bindings: A dictionary mapping variables to values.
:param forward: A dictionary mapping feature structures ids
to replacement structures. When two feature structures
are merged, a mapping from one to the other will be added
to the forward dictionary; and changes will be made only
to the target of the forward dictionary.
``_destructively_unify`` will always 'follow' any links
in the forward dictionary for fstruct1 and fstruct2 before
actually unifying them.
:param trace: If true, generate trace output
:param path: The feature path that led us to this unification
step. Used for trace output.
"""
# If fstruct1 is already identical to fstruct2, we're done.
# Note: this, together with the forward pointers, ensures
# that unification will terminate even for cyclic structures.
if fstruct1 is fstruct2:
if trace: _trace_unify_identity(path, fstruct1)
return fstruct1
# Set fstruct2's forward pointer to point to fstruct1; this makes
# fstruct1 the canonical copy for fstruct2. Note that we need to
# do this before we recurse into any child structures, in case
# they're cyclic.
forward[id(fstruct2)] = fstruct1
# Unifying two mappings:
if _is_mapping(fstruct1) and _is_mapping(fstruct2):
for fname in fstruct1:
if getattr(fname, 'default', None) is not None:
fstruct2.setdefault(fname, fname.default)
for fname in fstruct2:
if getattr(fname, 'default', None) is not None:
fstruct1.setdefault(fname, fname.default)
# Unify any values that are defined in both fstruct1 and
# fstruct2. Copy any values that are defined in fstruct2 but
# not in fstruct1 to fstruct1. Note: sorting fstruct2's
# features isn't actually necessary; but we do it to give
# deterministic behavior, e.g. for tracing.
for fname, fval2 in sorted(fstruct2.items()):
if fname in fstruct1:
fstruct1[fname] = _unify_feature_values(
fname, fstruct1[fname], fval2, bindings,
forward, trace, fail, fs_class, path+(fname,))
else:
fstruct1[fname] = fval2
return fstruct1 # Contains the unified value.
# Unifying two sequences:
elif _is_sequence(fstruct1) and _is_sequence(fstruct2):
# If the lengths don't match, fail.
if len(fstruct1) != len(fstruct2):
return UnificationFailure
# Unify corresponding values in fstruct1 and fstruct2.
for findex in range(len(fstruct1)):
fstruct1[findex] = _unify_feature_values(
findex, fstruct1[findex], fstruct2[findex], bindings,
forward, trace, fail, fs_class, path+(findex,))
return fstruct1 # Contains the unified value.
# Unifying sequence & mapping: fail. The failure function
# doesn't get a chance to recover in this case.
elif ((_is_sequence(fstruct1) or _is_mapping(fstruct1)) and
(_is_sequence(fstruct2) or _is_mapping(fstruct2))):
return UnificationFailure
# Unifying anything else: not allowed!
raise TypeError('Expected mappings or sequences')
def _unify_feature_values(fname, fval1, fval2, bindings, forward,
trace, fail, fs_class, fpath):
"""
Attempt to unify ``fval1`` and and ``fval2``, and return the
resulting unified value. The method of unification will depend on
the types of ``fval1`` and ``fval2``:
1. If they're both feature structures, then destructively
unify them (see ``_destructively_unify()``.
2. If they're both unbound variables, then alias one variable
to the other (by setting bindings[v2]=v1).
3. If one is an unbound variable, and the other is a value,
then bind the unbound variable to the value.
4. If one is a feature structure, and the other is a base value,
then fail.
5. If they're both base values, then unify them. By default,
this will succeed if they are equal, and fail otherwise.
"""
if trace: _trace_unify_start(fpath, fval1, fval2)
# Look up the "canonical" copy of fval1 and fval2
while id(fval1) in forward: fval1 = forward[id(fval1)]
while id(fval2) in forward: fval2 = forward[id(fval2)]
# If fval1 or fval2 is a bound variable, then
# replace it by the variable's bound value. This
# includes aliased variables, which are encoded as
# variables bound to other variables.
fvar1 = fvar2 = None
while isinstance(fval1, Variable) and fval1 in bindings:
fvar1 = fval1
fval1 = bindings[fval1]
while isinstance(fval2, Variable) and fval2 in bindings:
fvar2 = fval2
fval2 = bindings[fval2]
# Case 1: Two feature structures (recursive case)
if isinstance(fval1, fs_class) and isinstance(fval2, fs_class):
result = _destructively_unify(fval1, fval2, bindings, forward,
trace, fail, fs_class, fpath)
# Case 2: Two unbound variables (create alias)
elif (isinstance(fval1, Variable) and
isinstance(fval2, Variable)):
if fval1 != fval2: bindings[fval2] = fval1
result = fval1
# Case 3: An unbound variable and a value (bind)
elif isinstance(fval1, Variable):
bindings[fval1] = fval2
result = fval1
elif isinstance(fval2, Variable):
bindings[fval2] = fval1
result = fval2
# Case 4: A feature structure & a base value (fail)
elif isinstance(fval1, fs_class) or isinstance(fval2, fs_class):
result = UnificationFailure
# Case 5: Two base values
else:
# Case 5a: Feature defines a custom unification method for base values
if isinstance(fname, Feature):
result = fname.unify_base_values(fval1, fval2, bindings)
# Case 5b: Feature value defines custom unification method
elif isinstance(fval1, CustomFeatureValue):
result = fval1.unify(fval2)
# Sanity check: unify value should be symmetric
if (isinstance(fval2, CustomFeatureValue) and
result != fval2.unify(fval1)):
raise AssertionError(
'CustomFeatureValue objects %r and %r disagree '
'about unification value: %r vs. %r' %
(fval1, fval2, result, fval2.unify(fval1)))
elif isinstance(fval2, CustomFeatureValue):
result = fval2.unify(fval1)
# Case 5c: Simple values -- check if they're equal.
else:
if fval1 == fval2:
result = fval1
else:
result = UnificationFailure
# If either value was a bound variable, then update the
# bindings. (This is really only necessary if fname is a
# Feature or if either value is a CustomFeatureValue.)
if result is not UnificationFailure:
if fvar1 is not None:
bindings[fvar1] = result
result = fvar1
if fvar2 is not None and fvar2 != fvar1:
bindings[fvar2] = result
result = fvar2
# If we unification failed, call the failure function; it
# might decide to continue anyway.
if result is UnificationFailure:
if fail is not None: result = fail(fval1, fval2, fpath)
if trace: _trace_unify_fail(fpath[:-1], result)
if result is UnificationFailure:
raise _UnificationFailureError
# Normalize the result.
if isinstance(result, fs_class):
result = _apply_forwards(result, forward, fs_class, set())
if trace: _trace_unify_succeed(fpath, result)
if trace and isinstance(result, fs_class):
_trace_bindings(fpath, bindings)
return result
def _apply_forwards_to_bindings(forward, bindings):
"""
Replace any feature structure that has a forward pointer with
the target of its forward pointer (to preserve reentrancy).
"""
for (var, value) in bindings.items():
while id(value) in forward:
value = forward[id(value)]
bindings[var] = value
def _apply_forwards(fstruct, forward, fs_class, visited):
"""
Replace any feature structure that has a forward pointer with
the target of its forward pointer (to preserve reentrancy).
"""
# Follow our own forwards pointers (if any)
while id(fstruct) in forward: fstruct = forward[id(fstruct)]
# Visit each node only once:
if id(fstruct) in visited: return
visited.add(id(fstruct))
if _is_mapping(fstruct): items = fstruct.items()
elif _is_sequence(fstruct): items = enumerate(fstruct)
else: raise ValueError('Expected mapping or sequence')
for fname, fval in items:
if isinstance(fval, fs_class):
# Replace w/ forwarded value.
while id(fval) in forward:
fval = forward[id(fval)]
fstruct[fname] = fval
# Recurse to child.
_apply_forwards(fval, forward, fs_class, visited)
return fstruct
def _resolve_aliases(bindings):
"""
Replace any bound aliased vars with their binding; and replace
any unbound aliased vars with their representative var.
"""
for (var, value) in bindings.items():
while isinstance(value, Variable) and value in bindings:
value = bindings[var] = bindings[value]
def _trace_unify_start(path, fval1, fval2):
if path == ():
print('\nUnification trace:')
else:
fullname = '.'.join("%s" % n for n in path)
print(' '+'| '*(len(path)-1)+'|')
print(' '+'| '*(len(path)-1)+'| Unify feature: %s' % fullname)
print(' '+'| '*len(path)+' / '+_trace_valrepr(fval1))
print(' '+'| '*len(path)+'|\\ '+_trace_valrepr(fval2))
def _trace_unify_identity(path, fval1):
print(' '+'| '*len(path)+'|')
print(' '+'| '*len(path)+'| (identical objects)')
print(' '+'| '*len(path)+'|')
print(' '+'| '*len(path)+'+-->'+unicode_repr(fval1))
def _trace_unify_fail(path, result):
if result is UnificationFailure: resume = ''
else: resume = ' (nonfatal)'
print(' '+'| '*len(path)+'| |')
print(' '+'X '*len(path)+'X X <-- FAIL'+resume)
def _trace_unify_succeed(path, fval1):
# Print the result.
print(' '+'| '*len(path)+'|')
print(' '+'| '*len(path)+'+-->'+unicode_repr(fval1))
def _trace_bindings(path, bindings):
# Print the bindings (if any).
if len(bindings) > 0:
binditems = sorted(bindings.items(), key=lambda v:v[0].name)
bindstr = '{%s}' % ', '.join(
'%s: %s' % (var, _trace_valrepr(val))
for (var, val) in binditems)
print(' '+'| '*len(path)+' Bindings: '+bindstr)
def _trace_valrepr(val):
if isinstance(val, Variable):
return '%s' % val
else:
return '%s' % unicode_repr(val)
def subsumes(fstruct1, fstruct2):
"""
Return True if ``fstruct1`` subsumes ``fstruct2``. I.e., return
true if unifying ``fstruct1`` with ``fstruct2`` would result in a
feature structure equal to ``fstruct2.``
:rtype: bool
"""
return fstruct2 == unify(fstruct1, fstruct2)
def conflicts(fstruct1, fstruct2, trace=0):
"""
Return a list of the feature paths of all features which are
assigned incompatible values by ``fstruct1`` and ``fstruct2``.
:rtype: list(tuple)
"""
conflict_list = []
def add_conflict(fval1, fval2, path):
conflict_list.append(path)
return fval1
unify(fstruct1, fstruct2, fail=add_conflict, trace=trace)
return conflict_list
######################################################################
# Helper Functions
######################################################################
def _is_mapping(v):
return hasattr(v, '__contains__') and hasattr(v, 'keys')
def _is_sequence(v):
return (hasattr(v, '__iter__') and hasattr(v, '__len__') and
not isinstance(v, string_types))
def _default_fs_class(obj):
if isinstance(obj, FeatStruct): return FeatStruct
if isinstance(obj, (dict, list)): return (dict, list)
else:
raise ValueError('To unify objects of type %s, you must specify '
'fs_class explicitly.' % obj.__class__.__name__)
######################################################################
# FeatureValueSet & FeatureValueTuple
######################################################################
class SubstituteBindingsSequence(SubstituteBindingsI):
"""
A mixin class for sequence clases that distributes variables() and
substitute_bindings() over the object's elements.
"""
def variables(self):
return ([elt for elt in self if isinstance(elt, Variable)] +
sum([list(elt.variables()) for elt in self
if isinstance(elt, SubstituteBindingsI)], []))
def substitute_bindings(self, bindings):
return self.__class__([self.subst(v, bindings) for v in self])
def subst(self, v, bindings):
if isinstance(v, SubstituteBindingsI):
return v.substitute_bindings(bindings)
else:
return bindings.get(v, v)
@python_2_unicode_compatible
class FeatureValueTuple(SubstituteBindingsSequence, tuple):
"""
A base feature value that is a tuple of other base feature values.
FeatureValueTuple implements ``SubstituteBindingsI``, so it any
variable substitutions will be propagated to the elements
contained by the set. A ``FeatureValueTuple`` is immutable.
"""
def __repr__(self): # [xx] really use %s here?
if len(self) == 0: return '()'
return '(%s)' % ', '.join('%s' % (b,) for b in self)
@python_2_unicode_compatible
class FeatureValueSet(SubstituteBindingsSequence, frozenset):
"""
A base feature value that is a set of other base feature values.
FeatureValueSet implements ``SubstituteBindingsI``, so it any
variable substitutions will be propagated to the elements
contained by the set. A ``FeatureValueSet`` is immutable.
"""
def __repr__(self): # [xx] really use %s here?
if len(self) == 0: return '{/}' # distinguish from dict.
# n.b., we sort the string reprs of our elements, to ensure
# that our own repr is deterministic.
return '{%s}' % ', '.join(sorted('%s' % (b,) for b in self))
__str__ = __repr__
@python_2_unicode_compatible
class FeatureValueUnion(SubstituteBindingsSequence, frozenset):
"""
A base feature value that represents the union of two or more
``FeatureValueSet`` or ``Variable``.
"""
def __new__(cls, values):
# If values contains FeatureValueUnions, then collapse them.
values = _flatten(values, FeatureValueUnion)
# If the resulting list contains no variables, then
# use a simple FeatureValueSet instead.
if sum(isinstance(v, Variable) for v in values) == 0:
values = _flatten(values, FeatureValueSet)
return FeatureValueSet(values)
# If we contain a single variable, return that variable.
if len(values) == 1:
return list(values)[0]
# Otherwise, build the FeatureValueUnion.
return frozenset.__new__(cls, values)
def __repr__(self):
# n.b., we sort the string reprs of our elements, to ensure
# that our own repr is deterministic. also, note that len(self)
# is guaranteed to be 2 or more.
return '{%s}' % '+'.join(sorted('%s' % (b,) for b in self))
@python_2_unicode_compatible
class FeatureValueConcat(SubstituteBindingsSequence, tuple):
"""
A base feature value that represents the concatenation of two or
more ``FeatureValueTuple`` or ``Variable``.
"""
def __new__(cls, values):
# If values contains FeatureValueConcats, then collapse them.
values = _flatten(values, FeatureValueConcat)
# If the resulting list contains no variables, then
# use a simple FeatureValueTuple instead.
if sum(isinstance(v, Variable) for v in values) == 0:
values = _flatten(values, FeatureValueTuple)
return FeatureValueTuple(values)
# If we contain a single variable, return that variable.
if len(values) == 1:
return list(values)[0]
# Otherwise, build the FeatureValueConcat.
return tuple.__new__(cls, values)
def __repr__(self):
# n.b.: len(self) is guaranteed to be 2 or more.
return '(%s)' % '+'.join('%s' % (b,) for b in self)
def _flatten(lst, cls):
"""
Helper function -- return a copy of list, with all elements of
type ``cls`` spliced in rather than appended in.
"""
result = []
for elt in lst:
if isinstance(elt, cls): result.extend(elt)
else: result.append(elt)
return result
######################################################################
# Specialized Features
######################################################################
@total_ordering
@python_2_unicode_compatible
class Feature(object):
"""
A feature identifier that's specialized to put additional
constraints, default values, etc.
"""
def __init__(self, name, default=None, display=None):
assert display in (None, 'prefix', 'slash')
self._name = name # [xx] rename to .identifier?
self._default = default # [xx] not implemented yet.
self._display = display
if self._display == 'prefix':
self._sortkey = (-1, self._name)
elif self._display == 'slash':
self._sortkey = (1, self._name)
else:
self._sortkey = (0, self._name)
@property
def name(self):
"""The name of this feature."""
return self._name
@property
def default(self):
"""Default value for this feature."""
return self._default
@property
def display(self):
"""Custom display location: can be prefix, or slash."""
return self._display
def __repr__(self):
return '*%s*' % self.name
def __lt__(self, other):
if isinstance(other, string_types):
return True
if not isinstance(other, Feature):
raise_unorderable_types("<", self, other)
return self._sortkey < other._sortkey
def __eq__(self, other):
return type(self) == type(other) and self._name == other._name
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._name)
#////////////////////////////////////////////////////////////
# These can be overridden by subclasses:
#////////////////////////////////////////////////////////////
def parse_value(self, s, position, reentrances, parser):
return parser.parse_value(s, position, reentrances)
def unify_base_values(self, fval1, fval2, bindings):
"""
If possible, return a single value.. If not, return
the value ``UnificationFailure``.
"""
if fval1 == fval2: return fval1
else: return UnificationFailure
class SlashFeature(Feature):
def parse_value(self, s, position, reentrances, parser):
return parser.partial_parse(s, position, reentrances)
class RangeFeature(Feature):
RANGE_RE = re.compile('(-?\d+):(-?\d+)')
def parse_value(self, s, position, reentrances, parser):
m = self.RANGE_RE.match(s, position)
if not m: raise ValueError('range', position)
return (int(m.group(1)), int(m.group(2))), m.end()
def unify_base_values(self, fval1, fval2, bindings):
if fval1 is None: return fval2
if fval2 is None: return fval1
rng = max(fval1[0], fval2[0]), min(fval1[1], fval2[1])
if rng[1] < rng[0]: return UnificationFailure
return rng
SLASH = SlashFeature('slash', default=False, display='slash')
TYPE = Feature('type', display='prefix')
######################################################################
# Specialized Feature Values
######################################################################
@total_ordering
class CustomFeatureValue(object):
"""
An abstract base class for base values that define a custom
unification method. The custom unification method of
``CustomFeatureValue`` will be used during unification if:
- The ``CustomFeatureValue`` is unified with another base value.
- The ``CustomFeatureValue`` is not the value of a customized
``Feature`` (which defines its own unification method).
If two ``CustomFeatureValue`` objects are unified with one another
during feature structure unification, then the unified base values
they return *must* be equal; otherwise, an ``AssertionError`` will
be raised.
Subclasses must define ``unify()``, ``__eq__()`` and ``__lt__()``.
Subclasses may also wish to define ``__hash__()``.
"""
def unify(self, other):
"""
If this base value unifies with ``other``, then return the
unified value. Otherwise, return ``UnificationFailure``.
"""
raise NotImplementedError('abstract base class')
def __eq__(self, other):
raise NotImplementedError('abstract base class')
def __ne__(self, other):
return not self == other
def __lt__(self, other):
raise NotImplementedError('abstract base class')
def __hash__(self):
raise TypeError('%s objects or unhashable' % self.__class__.__name__)
######################################################################
# Feature Structure Parser
######################################################################
class FeatStructParser(object):
def __init__(self, features=(SLASH, TYPE), fdict_class=FeatStruct,
flist_class=FeatList, logic_parser=None):
self._features = dict((f.name,f) for f in features)
self._fdict_class = fdict_class
self._flist_class = flist_class
self._prefix_feature = None
self._slash_feature = None
for feature in features:
if feature.display == 'slash':
if self._slash_feature:
raise ValueError('Multiple features w/ display=slash')
self._slash_feature = feature
if feature.display == 'prefix':
if self._prefix_feature:
raise ValueError('Multiple features w/ display=prefix')
self._prefix_feature = feature
self._features_with_defaults = [feature for feature in features
if feature.default is not None]
if logic_parser is None:
logic_parser = LogicParser()
self._logic_parser = logic_parser
def parse(self, s, fstruct=None):
"""
Convert a string representation of a feature structure (as
displayed by repr) into a ``FeatStruct``. This parse
imposes the following restrictions on the string
representation:
- Feature names cannot contain any of the following:
whitespace, parentheses, quote marks, equals signs,
dashes, commas, and square brackets. Feature names may
not begin with plus signs or minus signs.
- Only the following basic feature value are supported:
strings, integers, variables, None, and unquoted
alphanumeric strings.
- For reentrant values, the first mention must specify
a reentrance identifier and a value; and any subsequent
mentions must use arrows (``'->'``) to reference the
reentrance identifier.
"""
s = s.strip()
value, position = self.partial_parse(s, 0, {}, fstruct)
if position != len(s):
self._error(s, 'end of string', position)
return value
_START_FSTRUCT_RE = re.compile(r'\s*(?:\((\d+)\)\s*)?(\??[\w-]+)?(\[)')
_END_FSTRUCT_RE = re.compile(r'\s*]\s*')
_SLASH_RE = re.compile(r'/')
_FEATURE_NAME_RE = re.compile(r'\s*([+-]?)([^\s\(\)<>"\'\-=\[\],]+)\s*')
_REENTRANCE_RE = re.compile(r'\s*->\s*')
_TARGET_RE = re.compile(r'\s*\((\d+)\)\s*')
_ASSIGN_RE = re.compile(r'\s*=\s*')
_COMMA_RE = re.compile(r'\s*,\s*')
_BARE_PREFIX_RE = re.compile(r'\s*(?:\((\d+)\)\s*)?(\??[\w-]+\s*)()')
# This one is used to distinguish fdicts from flists:
_START_FDICT_RE = re.compile(r'(%s)|(%s\s*(%s\s*(=|->)|[+-]%s|\]))' % (
_BARE_PREFIX_RE.pattern, _START_FSTRUCT_RE.pattern,
_FEATURE_NAME_RE.pattern, _FEATURE_NAME_RE.pattern))
def partial_parse(self, s, position=0, reentrances=None, fstruct=None):
"""
Helper function that parses a feature structure.
:param s: The string to parse.
:param position: The position in the string to start parsing.
:param reentrances: A dictionary from reentrance ids to values.
Defaults to an empty dictionary.
:return: A tuple (val, pos) of the feature structure created by
parsing and the position where the parsed feature structure ends.
:rtype: bool
"""
if reentrances is None: reentrances = {}
try:
return self._partial_parse(s, position, reentrances, fstruct)
except ValueError as e:
if len(e.args) != 2: raise
self._error(s, *e.args)
def _partial_parse(self, s, position, reentrances, fstruct=None):
# Create the new feature structure
if fstruct is None:
if self._START_FDICT_RE.match(s, position):
fstruct = self._fdict_class()
else:
fstruct = self._flist_class()
# Read up to the open bracket.
match = self._START_FSTRUCT_RE.match(s, position)
if not match:
match = self._BARE_PREFIX_RE.match(s, position)
if not match:
raise ValueError('open bracket or identifier', position)
position = match.end()
# If there as an identifier, record it.
if match.group(1):
identifier = match.group(1)
if identifier in reentrances:
raise ValueError('new identifier', match.start(1))
reentrances[identifier] = fstruct
if isinstance(fstruct, FeatDict):
fstruct.clear()
return self._partial_parse_featdict(s, position, match,
reentrances, fstruct)
else:
del fstruct[:]
return self._partial_parse_featlist(s, position, match,
reentrances, fstruct)
def _partial_parse_featlist(self, s, position, match,
reentrances, fstruct):
# Prefix features are not allowed:
if match.group(2): raise ValueError('open bracket')
# Bare prefixes are not allowed:
if not match.group(3): raise ValueError('open bracket')
# Build a list of the features defined by the structure.
while position < len(s):
# Check for the close bracket.
match = self._END_FSTRUCT_RE.match(s, position)
if match is not None:
return fstruct, match.end()
# Reentances have the form "-> (target)"
match = self._REENTRANCE_RE.match(s, position)
if match:
position = match.end()
match = self._TARGET_RE.match(s, position)
if not match: raise ValueError('identifier', position)
target = match.group(1)
if target not in reentrances:
raise ValueError('bound identifier', position)
position = match.end()
fstruct.append(reentrances[target])
# Anything else is a value.
else:
value, position = (
self._parse_value(0, s, position, reentrances))
fstruct.append(value)
# If there's a close bracket, handle it at the top of the loop.
if self._END_FSTRUCT_RE.match(s, position):
continue
# Otherwise, there should be a comma
match = self._COMMA_RE.match(s, position)
if match is None: raise ValueError('comma', position)
position = match.end()
# We never saw a close bracket.
raise ValueError('close bracket', position)
def _partial_parse_featdict(self, s, position, match,
reentrances, fstruct):
# If there was a prefix feature, record it.
if match.group(2):
if self._prefix_feature is None:
raise ValueError('open bracket or identifier', match.start(2))
prefixval = match.group(2).strip()
if prefixval.startswith('?'):
prefixval = Variable(prefixval)
fstruct[self._prefix_feature] = prefixval
# If group 3 is empty, then we just have a bare prefix, so
# we're done.
if not match.group(3):
return self._finalize(s, match.end(), reentrances, fstruct)
# Build a list of the features defined by the structure.
# Each feature has one of the three following forms:
# name = value
# name -> (target)
# +name
# -name
while position < len(s):
# Use these variables to hold info about each feature:
name = value = None
# Check for the close bracket.
match = self._END_FSTRUCT_RE.match(s, position)
if match is not None:
return self._finalize(s, match.end(), reentrances, fstruct)
# Get the feature name's name
match = self._FEATURE_NAME_RE.match(s, position)
if match is None: raise ValueError('feature name', position)
name = match.group(2)
position = match.end()
# Check if it's a special feature.
if name[0] == '*' and name[-1] == '*':
name = self._features.get(name[1:-1])
if name is None:
raise ValueError('known special feature', match.start(2))
# Check if this feature has a value already.
if name in fstruct:
raise ValueError('new name', match.start(2))
# Boolean value ("+name" or "-name")
if match.group(1) == '+': value = True
if match.group(1) == '-': value = False
# Reentrance link ("-> (target)")
if value is None:
match = self._REENTRANCE_RE.match(s, position)
if match is not None:
position = match.end()
match = self._TARGET_RE.match(s, position)
if not match:
raise ValueError('identifier', position)
target = match.group(1)
if target not in reentrances:
raise ValueError('bound identifier', position)
position = match.end()
value = reentrances[target]
# Assignment ("= value").
if value is None:
match = self._ASSIGN_RE.match(s, position)
if match:
position = match.end()
value, position = (
self._parse_value(name, s, position, reentrances))
# None of the above: error.
else:
raise ValueError('equals sign', position)
# Store the value.
fstruct[name] = value
# If there's a close bracket, handle it at the top of the loop.
if self._END_FSTRUCT_RE.match(s, position):
continue
# Otherwise, there should be a comma
match = self._COMMA_RE.match(s, position)
if match is None: raise ValueError('comma', position)
position = match.end()
# We never saw a close bracket.
raise ValueError('close bracket', position)
def _finalize(self, s, pos, reentrances, fstruct):
"""
Called when we see the close brace -- checks for a slash feature,
and adds in default values.
"""
# Add the slash feature (if any)
match = self._SLASH_RE.match(s, pos)
if match:
name = self._slash_feature
v, pos = self._parse_value(name, s, match.end(), reentrances)
fstruct[name] = v
## Add any default features. -- handle in unficiation instead?
#for feature in self._features_with_defaults:
# fstruct.setdefault(feature, feature.default)
# Return the value.
return fstruct, pos
def _parse_value(self, name, s, position, reentrances):
if isinstance(name, Feature):
return name.parse_value(s, position, reentrances, self)
else:
return self.parse_value(s, position, reentrances)
def parse_value(self, s, position, reentrances):
for (handler, regexp) in self.VALUE_HANDLERS:
match = regexp.match(s, position)
if match:
handler_func = getattr(self, handler)
return handler_func(s, position, reentrances, match)
raise ValueError('value', position)
def _error(self, s, expected, position):
lines = s.split('\n')
while position > len(lines[0]):
position -= len(lines.pop(0))+1 # +1 for the newline.
estr = ('Error parsing feature structure\n ' +
lines[0] + '\n ' + ' '*position + '^ ' +
'Expected %s' % expected)
raise ValueError(estr)
#////////////////////////////////////////////////////////////
#{ Value Parsers
#////////////////////////////////////////////////////////////
#: A table indicating how feature values should be parsed. Each
#: entry in the table is a pair (handler, regexp). The first entry
#: with a matching regexp will have its handler called. Handlers
#: should have the following signature::
#:
#: def handler(s, position, reentrances, match): ...
#:
#: and should return a tuple (value, position), where position is
#: the string position where the value ended. (n.b.: order is
#: important here!)
VALUE_HANDLERS = [
('parse_fstruct_value', _START_FSTRUCT_RE),
('parse_var_value', re.compile(r'\?[a-zA-Z_][a-zA-Z0-9_]*')),
('parse_str_value', re.compile("[uU]?[rR]?(['\"])")),
('parse_int_value', re.compile(r'-?\d+')),
('parse_sym_value', re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*')),
('parse_app_value', re.compile(r'<(app)\((\?[a-z][a-z]*)\s*,'
r'\s*(\?[a-z][a-z]*)\)>')),
# ('parse_logic_value', re.compile(r'<([^>]*)>')),
#lazily match any character after '<' until we hit a '>' not preceded by '-'
('parse_logic_value', re.compile(r'<(.*?)(?<!-)>')),
('parse_set_value', re.compile(r'{')),
('parse_tuple_value', re.compile(r'\(')),
]
def parse_fstruct_value(self, s, position, reentrances, match):
return self.partial_parse(s, position, reentrances)
def parse_str_value(self, s, position, reentrances, match):
return parse_str(s, position)
def parse_int_value(self, s, position, reentrances, match):
return int(match.group()), match.end()
# Note: the '?' is included in the variable name.
def parse_var_value(self, s, position, reentrances, match):
return Variable(match.group()), match.end()
_SYM_CONSTS = {'None':None, 'True':True, 'False':False}
def parse_sym_value(self, s, position, reentrances, match):
val, end = match.group(), match.end()
return self._SYM_CONSTS.get(val, val), end
def parse_app_value(self, s, position, reentrances, match):
"""Mainly included for backwards compat."""
return self._logic_parser.parse('%s(%s)' % match.group(2,3)), match.end()
def parse_logic_value(self, s, position, reentrances, match):
try:
try:
expr = self._logic_parser.parse(match.group(1))
except ParseException:
raise ValueError()
return expr, match.end()
except ValueError:
raise ValueError('logic expression', match.start(1))
def parse_tuple_value(self, s, position, reentrances, match):
return self._parse_seq_value(s, position, reentrances, match, ')',
FeatureValueTuple, FeatureValueConcat)
def parse_set_value(self, s, position, reentrances, match):
return self._parse_seq_value(s, position, reentrances, match, '}',
FeatureValueSet, FeatureValueUnion)
def _parse_seq_value(self, s, position, reentrances, match,
close_paren, seq_class, plus_class):
"""
Helper function used by parse_tuple_value and parse_set_value.
"""
cp = re.escape(close_paren)
position = match.end()
# Special syntax fo empty tuples:
m = re.compile(r'\s*/?\s*%s' % cp).match(s, position)
if m: return seq_class(), m.end()
# Read values:
values = []
seen_plus = False
while True:
# Close paren: return value.
m = re.compile(r'\s*%s' % cp).match(s, position)
if m:
if seen_plus: return plus_class(values), m.end()
else: return seq_class(values), m.end()
# Read the next value.
val, position = self.parse_value(s, position, reentrances)
values.append(val)
# Comma or looking at close paren
m = re.compile(r'\s*(,|\+|(?=%s))\s*' % cp).match(s, position)
if m.group(1) == '+': seen_plus = True
if not m: raise ValueError("',' or '+' or '%s'" % cp, position)
position = m.end()
######################################################################
#{ Demo
######################################################################
def display_unification(fs1, fs2, indent=' '):
# Print the two input feature structures, side by side.
fs1_lines = ("%s" % fs1).split('\n')
fs2_lines = ("%s" % fs2).split('\n')
if len(fs1_lines) > len(fs2_lines):
blankline = '['+' '*(len(fs2_lines[0])-2)+']'
fs2_lines += [blankline]*len(fs1_lines)
else:
blankline = '['+' '*(len(fs1_lines[0])-2)+']'
fs1_lines += [blankline]*len(fs2_lines)
for (fs1_line, fs2_line) in zip(fs1_lines, fs2_lines):
print(indent + fs1_line + ' ' + fs2_line)
print(indent+'-'*len(fs1_lines[0])+' '+'-'*len(fs2_lines[0]))
linelen = len(fs1_lines[0])*2+3
print(indent+'| |'.center(linelen))
print(indent+'+-----UNIFY-----+'.center(linelen))
print(indent+'|'.center(linelen))
print(indent+'V'.center(linelen))
bindings = {}
result = fs1.unify(fs2, bindings)
if result is None:
print(indent+'(FAILED)'.center(linelen))
else:
print('\n'.join(indent+l.center(linelen)
for l in ("%s" % result).split('\n')))
if bindings and len(bindings.bound_variables()) > 0:
print(repr(bindings).center(linelen))
return result
def interactive_demo(trace=False):
import random, sys
HELP = '''
1-%d: Select the corresponding feature structure
q: Quit
t: Turn tracing on or off
l: List all feature structures
?: Help
'''
print('''
This demo will repeatedly present you with a list of feature
structures, and ask you to choose two for unification. Whenever a
new feature structure is generated, it is added to the list of
choices that you can pick from. However, since this can be a
large number of feature structures, the demo will only print out a
random subset for you to choose between at a given time. If you
want to see the complete lists, type "l". For a list of valid
commands, type "?".
''')
print('Press "Enter" to continue...')
sys.stdin.readline()
fstruct_strings = [
'[agr=[number=sing, gender=masc]]',
'[agr=[gender=masc, person=3]]',
'[agr=[gender=fem, person=3]]',
'[subj=[agr=(1)[]], agr->(1)]',
'[obj=?x]', '[subj=?x]',
'[/=None]', '[/=NP]',
'[cat=NP]', '[cat=VP]', '[cat=PP]',
'[subj=[agr=[gender=?y]], obj=[agr=[gender=?y]]]',
'[gender=masc, agr=?C]',
'[gender=?S, agr=[gender=?S,person=3]]'
]
all_fstructs = [(i, FeatStruct(fstruct_strings[i]))
for i in range(len(fstruct_strings))]
def list_fstructs(fstructs):
for i, fstruct in fstructs:
print()
lines = ("%s" % fstruct).split('\n')
print('%3d: %s' % (i+1, lines[0]))
for line in lines[1:]: print(' '+line)
print()
while True:
# Pick 5 feature structures at random from the master list.
MAX_CHOICES = 5
if len(all_fstructs) > MAX_CHOICES:
fstructs = sorted(random.sample(all_fstructs, MAX_CHOICES))
else:
fstructs = all_fstructs
print('_'*75)
print('Choose two feature structures to unify:')
list_fstructs(fstructs)
selected = [None,None]
for (nth,i) in (('First',0), ('Second',1)):
while selected[i] is None:
print(('%s feature structure (1-%d,q,t,l,?): '
% (nth, len(all_fstructs))), end=' ')
try:
input = sys.stdin.readline().strip()
if input in ('q', 'Q', 'x', 'X'): return
if input in ('t', 'T'):
trace = not trace
print(' Trace = %s' % trace)
continue
if input in ('h', 'H', '?'):
print(HELP % len(fstructs)); continue
if input in ('l', 'L'):
list_fstructs(all_fstructs); continue
num = int(input)-1
selected[i] = all_fstructs[num][1]
print()
except:
print('Bad sentence number')
continue
if trace:
result = selected[0].unify(selected[1], trace=1)
else:
result = display_unification(selected[0], selected[1])
if result is not None:
for i, fstruct in all_fstructs:
if repr(result) == repr(fstruct): break
else:
all_fstructs.append((len(all_fstructs), result))
print('\nType "Enter" to continue unifying; or "q" to quit.')
input = sys.stdin.readline().strip()
if input in ('q', 'Q', 'x', 'X'): return
def demo(trace=False):
"""
Just for testing
"""
#import random
# parser breaks with values like '3rd'
fstruct_strings = [
'[agr=[number=sing, gender=masc]]',
'[agr=[gender=masc, person=3]]',
'[agr=[gender=fem, person=3]]',
'[subj=[agr=(1)[]], agr->(1)]',
'[obj=?x]', '[subj=?x]',
'[/=None]', '[/=NP]',
'[cat=NP]', '[cat=VP]', '[cat=PP]',
'[subj=[agr=[gender=?y]], obj=[agr=[gender=?y]]]',
'[gender=masc, agr=?C]',
'[gender=?S, agr=[gender=?S,person=3]]'
]
all_fstructs = [FeatStruct(fss) for fss in fstruct_strings]
#MAX_CHOICES = 5
#if len(all_fstructs) > MAX_CHOICES:
#fstructs = random.sample(all_fstructs, MAX_CHOICES)
#fstructs.sort()
#else:
#fstructs = all_fstructs
for fs1 in all_fstructs:
for fs2 in all_fstructs:
print("\n*******************\nfs1 is:\n%s\n\nfs2 is:\n%s\n\nresult is:\n%s" % (fs1, fs2, unify(fs1, fs2)))
if __name__ == '__main__':
demo()
__all__ = ['FeatStruct', 'FeatDict', 'FeatList', 'unify', 'subsumes', 'conflicts',
'Feature', 'SlashFeature', 'RangeFeature', 'SLASH', 'TYPE',
'FeatStructParser']
|
TeamSPoon/logicmoo_workspace
|
packs_sys/logicmoo_nlu/ext/pldata/nltk_3.0a3/nltk/featstruct.py
|
Python
|
mit
| 102,180
|
[
"VisIt"
] |
4e272441614737b0ae8d21a23ad23c1a6900845214b83b7112956fe51f7c57dd
|
"""
Newick format (:mod:`skbio.io.format.newick`)
=============================================
.. currentmodule:: skbio.io.format.newick
Newick format (``newick``) stores spanning-trees with weighted edges and node
names in a minimal file format [1]_. This is useful for representing
phylogenetic trees and taxonomies. Newick was created as an informal
specification on June 26, 1986 [2]_.
Format Support
--------------
**Has Sniffer: Yes**
+------+------+---------------------------------------------------------------+
|Reader|Writer| Object Class |
+======+======+===============================================================+
|Yes |Yes |:mod:`skbio.tree.TreeNode` |
+------+------+---------------------------------------------------------------+
Format Specification
--------------------
A Newick file represents a tree using the following grammar. See below for an
explanation of the format in plain English.
Formal Grammar
^^^^^^^^^^^^^^
.. code-block:: none
NEWICK ==> NODE ;
NODE ==> FORMATTING SUBTREE FORMATTING NODE_INFO FORMATTING
SUBTREE ==> ( CHILDREN ) | null
NODE_INFO ==> LABEL | LENGTH | LABEL FORMATTING LENGTH | null
FORMATTING ==> [ COMMENT_CHARS ] | whitespace | null
CHILDREN ==> NODE | CHILDREN , NODE
LABEL ==> ' ALL_CHARS ' | SAFE_CHARS
LENGTH ==> : FORMATTING NUMBER
COMMENT_CHARS ==> any
ALL_CHARS ==> any
SAFE_CHARS ==> any except: ,;:()[] and whitespace
NUMBER ==> a decimal or integer
.. note:: The ``_`` character inside of SAFE_CHARS will be converted to a
blank space in ``skbio.tree.TreeNode`` and vice versa.
``'`` is considered the escape character. To escape ``'`` use a
preceding ``'``.
The implementation of newick in scikit-bio allows nested comments. To
escape ``[`` or ``]`` from within COMMENT_CHARS, use a preceding ``'``.
Explanation
^^^^^^^^^^^
The Newick format defines a tree by creating a minimal representation of nodes
and their relationships to each other.
Basic Symbols
~~~~~~~~~~~~~
There are several symbols which define nodes, the first of which is the
semi-colon (``;``). The semi-colon creates a root node to its left. Recall that
there can only be one root in a tree.
The next symbol is the comma (``,``), which creates a node to its right.
However, these two alone are not enough. For example imagine the following
string: ``, , , ;``. It is evident that there is a root, but the other 3 nodes,
defined by commas, have no relationship. For this reason, it is not a valid
Newick string to have more than one node at the root level.
To provide these relationships, there is another structure:
paired parenthesis (``( )``). These are inserted at the location of an existing
node and give it the ability to have children. Placing ``( )`` in a node's
location will create a child inside the parenthesis on the left-most
inner edge.
Application of Rules
~~~~~~~~~~~~~~~~~~~~
Adding a comma within the parenthesis will create two children: ``( , )``
(also known as a bifurcating node). Notice that only one comma is needed
because the parenthesis have already created a child. Adding more commas will
create more children who are siblings to each other. For example, writing
``( , , , )`` will create a multifurcating node with 4 child nodes who are
siblings to each other.
The notation for a root can be used to create a complete tree. The ``;`` will
create a root node where parenthesis can be placed: ``( );``. Adding commas
will create more children: ``( , );``. These rules can be applied recursively
ad. infinitum: ``(( , ), ( , ));``.
Adding Node Information
~~~~~~~~~~~~~~~~~~~~~~~
Information about a node can be added to improve the clarity and meaning of a
tree. Each node may have a label and/or a length (to the parent). Newick always
places the node information at the right-most edge of a node's position.
Starting with labels, ``(( , ), ( , ));`` would become
``((D, E)B, (F, G)C)A;``. There is a named root ``A`` and the root's children
(from left to right) are ``B`` and ``C``. ``B`` has the children ``D`` and
``E``, and ``C`` has the children ``F`` and ``G``.
Length represents the distance (or weight of the edge) that connects a node to
its parent. This must be a decimal or integer. As an example, suppose ``D`` is
rather estranged from ``B``, and ``E`` is very close. That can be written as:
``((D:10, E:0.5)B, (F, G)C)A;``. Notice that the colon (``:``) separates the
label from the length. If the length is provided but the label is omitted, a
colon must still precede the length (``(:0.25,:0.5):0.0;``). Without this, the
length would be interpreted as a label (which happens to be a number).
.. note:: Internally scikit-bio will cast a length to ``float`` which
technically means that even exponent strings (``1e-3``) are supported)
Advanced Label and Length Rules
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
More characters can be used to create more descriptive labels. When creating a
label there are some rules that must be considered due to limitations in the
Newick format. The following characters are not allowed within a standard
label: parenthesis, commas, square-brackets, colon, semi-colon, and whitespace.
These characters are also disallowed from occurring within a length, which has
a much stricter format: decimal or integer. Many of these characters are
symbols which define the structure of a Newick tree and are thus disallowed for
obvious reasons. The symbols not yet mentioned are square-brackets (``[ ]``)
and whitespace (space, tab, and newline).
What if these characters are needed within a label? In the simple case of
spaces, an underscore (``_``) will be translated as a space on read and vice
versa on write.
What if a literal underscore or any of the others mentioned are needed?
A label can be escaped (meaning that its contents are understood as regular
text) using single-quotes (``'``). When a label is surrounded by single-quotes,
any character is permissible. If a single-quote is needed inside of an escaped
label or anywhere else, it can be escaped with another single-quote.
For example, ``A_1`` is written ``'A_1'`` and ``'A'_1`` would be ``'''A''_1'``.
Inline Comments
~~~~~~~~~~~~~~~
Square-brackets define a comment, which are the least commonly used part of
the Newick format. Comments are not included in the generated objects and exist
only as human readable text ignored by the parser. The implementation in
scikit-bio allows for nested comments (``[comment [nested]]``). Unpaired
square-brackets can be escaped with a single-quote preceding the bracket when
inside an existing comment. (This is identical to escaping a single-quote).
The single-quote has the highest operator precedence, so there is no need to
worry about starting a comment from within a properly escaped label.
Whitespace
~~~~~~~~~~
Whitespace is not allowed within any un-escaped label or in any length, but it
is permitted anywhere else.
Caveats
~~~~~~~
Newick cannot always provide a unique representation of any tree, in other
words, the same tree can be written multiple ways. For example: ``(A, B);`` is
isomorphic to ``(B, A);``. The implementation in scikit-bio maintains the given
sibling order in its object representations.
Newick has no representation of an unrooted tree. Some biological packages make
the assumption that when a trifurcated root exists in an otherwise bifurcated
tree that the tree must be unrooted. In scikit-bio, ``skbio.tree.TreeNode``
will always be rooted at the ``newick`` root (``;``).
Format Parameters
-----------------
The only supported format parameter is `convert_underscores`. This is `True` by
default. When `False`, underscores found in unescaped labels will not be
converted to spaces. This is useful when reading the output of an external
program in which the underscores were not escaped. This parameter only affects
`read` operations. It does not exist for `write` operations; they will always
properly escape underscores.
Examples
--------
This is a simple Newick string.
>>> from io import StringIO
>>> from skbio import read
>>> from skbio.tree import TreeNode
>>> f = StringIO("((D, E)B, (F, G)C)A;")
>>> tree = read(f, format="newick", into=TreeNode)
>>> f.close()
>>> print(tree.ascii_art())
/-D
/B-------|
| \-E
-A-------|
| /-F
\C-------|
\-G
This is a complex Newick string.
>>> f = StringIO("[example](a:0.1, 'b_b''':0.2, (c:0.3, d_d:0.4)e:0.5)f:0.0;")
>>> tree = read(f, format="newick", into=TreeNode)
>>> f.close()
>>> print(tree.ascii_art())
/-a
|
-f-------|--b_b'
|
| /-c
\e-------|
\-d d
Notice that the node originally labeled ``d_d`` became ``d d``. Additionally
``'b_b'''`` became ``b_b'``. Note that the underscore was preserved in `b_b'`.
References
----------
.. [1] http://evolution.genetics.washington.edu/phylip/newick_doc.html
.. [2] http://evolution.genetics.washington.edu/phylip/newicktree.html
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from skbio.io import create_format, NewickFormatError
from skbio.tree import TreeNode
newick = create_format('newick')
@newick.sniffer()
def _newick_sniffer(fh):
# Strategy:
# The following conditions preclude a file from being newick:
# * It is an empty file.
# * There is whitespace inside of a label (handled by tokenizer)
# * : is followed by anything that is an operator
# * ( is not preceded immediately by , or another (
# * The parens are unablanced when ; is found.
# If 100 tokens (or less if EOF occurs earlier) then it is probably
# newick, or at least we can't prove it isn't.
operators = set(",;:()")
empty = True
last_token = ','
indent = 0
try:
# 100 tokens ought to be enough for anybody.
for token, _ in zip(_tokenize_newick(fh), range(100)):
if token not in operators:
pass
elif token == ',' and last_token != ':' and indent > 0:
pass
elif token == ':' and last_token != ':':
pass
elif token == ';' and last_token != ':' and indent == 0:
pass
elif token == ')' and last_token != ':':
indent -= 1
elif token == '(' and (last_token == '(' or last_token == ','):
indent += 1
else:
raise NewickFormatError()
last_token = token
empty = False
except NewickFormatError:
return False, {}
return not empty, {}
@newick.reader(TreeNode)
def _newick_to_tree_node(fh, convert_underscores=True):
tree_stack = []
current_depth = 0
last_token = ''
next_is_distance = False
root = TreeNode()
tree_stack.append((root, current_depth))
for token in _tokenize_newick(fh, convert_underscores=convert_underscores):
# Check for a label
if last_token not in '(,):':
if not next_is_distance:
tree_stack[-1][0].name = last_token if last_token else None
else:
next_is_distance = False
# Check for a distance
if token == ':':
next_is_distance = True
elif last_token == ':':
try:
tree_stack[-1][0].length = float(token)
except ValueError:
raise NewickFormatError("Could not read length as numeric type"
": %s." % token)
elif token == '(':
current_depth += 1
tree_stack.append((TreeNode(), current_depth))
elif token == ',':
tree_stack.append((TreeNode(), current_depth))
elif token == ')':
if len(tree_stack) < 2:
raise NewickFormatError("Could not parse file as newick."
" Parenthesis are unbalanced.")
children = []
# Pop all nodes at this depth as they belong to the remaining
# node on the top of the stack as children.
while current_depth == tree_stack[-1][1]:
node, _ = tree_stack.pop()
children.insert(0, node)
parent = tree_stack[-1][0]
if parent.children:
raise NewickFormatError("Could not parse file as newick."
" Contains unnested children.")
# This is much faster than TreeNode.extend
for child in children:
child.parent = parent
parent.children = children
current_depth -= 1
elif token == ';':
if len(tree_stack) == 1:
return root
break
last_token = token
raise NewickFormatError("Could not parse file as newick."
" `(Parenthesis)`, `'single-quotes'`,"
" `[comments]` may be unbalanced, or tree may be"
" missing its root.")
@newick.writer(TreeNode)
def _tree_node_to_newick(obj, fh):
operators = set(",:_;()[]")
current_depth = 0
nodes_left = [(obj, 0)]
while len(nodes_left) > 0:
entry = nodes_left.pop()
node, node_depth = entry
if node.children and node_depth >= current_depth:
fh.write('(')
nodes_left.append(entry)
nodes_left += ((child, node_depth + 1) for child in
reversed(node.children))
current_depth = node_depth + 1
else:
if node_depth < current_depth:
fh.write(')')
current_depth -= 1
# Note we don't check for None because there is no way to represent
# an empty string as a label in Newick. Therefore, both None and ''
# are considered to be the absence of a label.
if node.name:
escaped = "%s" % node.name.replace("'", "''")
if any(t in operators for t in node.name):
fh.write("'")
fh.write(escaped)
fh.write("'")
else:
fh.write(escaped.replace(" ", "_"))
if node.length is not None:
fh.write(':')
fh.write("%s" % node.length)
if nodes_left and nodes_left[-1][1] == current_depth:
fh.write(',')
fh.write(';\n')
def _tokenize_newick(fh, convert_underscores=True):
structure_tokens = set('(),;:')
not_escaped = True
label_start = False
last_non_ws_char = ''
last_char = ''
comment_depth = 0
metadata_buffer = []
# Strategy:
# We will iterate by character.
# Comments in newick are defined as:
# [This is a comment]
# Nested comments are allowed.
#
# The following characters indicate structure:
# ( ) , ; :
#
# Whitespace is never allowed in a newick label, so an exception will be
# thrown.
#
# We use ' to indicate a literal string. It has the highest precedence of
# any operator.
for line in fh:
for character in line:
# We will start by handling the comment case.
# This code branch will probably never execute in practice.
# Using a comment_depth we can handle nested comments.
# Additionally if we are inside an escaped literal string, then
# we don't want to consider it a comment.
if character == "[" and not_escaped:
# Sometimes we might not want to nest a comment, so we will use
# our escape character. This is not explicitly mentioned in
# any format specification, but seems like what a reasonable
# person might do.
if last_non_ws_char != "'" or comment_depth == 0:
# Once again, only advance our depth if [ has not been
# escaped inside our comment.
comment_depth += 1
if comment_depth > 0:
# Same as above, but in reverse
if character == "]" and last_non_ws_char != "'":
comment_depth -= 1
last_non_ws_char = character
continue
# We are not in a comment block if we are below here.
# If we are inside of an escaped string literal, then ( ) , ; are
# meaningless to the structure.
# Otherwise, we are ready to submit our metadata token.
if not_escaped and character in structure_tokens:
label_start = False
metadata = ''.join(metadata_buffer)
# If the following condition is True, then we must have just
# closed a literal. We know this because last_non_ws_char is
# either None or the last non-whitespace character.
# last_non_ws_char is None when we have just escaped an escape
# and at the first iteration.
if last_non_ws_char == "'" or not convert_underscores:
# Make no modifications.
yield metadata
elif metadata:
# Underscores are considered to be spaces when not in an
# escaped literal string.
yield metadata.replace('_', ' ')
# Clear our buffer for the next metadata token and yield our
# current structure token.
metadata_buffer = []
yield character
# We will now handle escaped string literals.
# They are inconvenient because any character inside of them is
# valid, especially whitespace.
# We also need to allow ' to be escaped by '. e.g. '' -> '
elif character == "'":
not_escaped = not not_escaped
label_start = True
if last_non_ws_char == "'":
# We are escaping our escape, so it should be added to our
# metadata_buffer which will represent some future token.
metadata_buffer.append(character)
# We do not want a running chain of overcounts, so we need
# to clear the last character and continue iteration from
# the top. Without this, the following would happen:
# ''' ' -> '' <open literal>
# What we want is:
# ''' ' -> '<open literal> <close literal>
last_non_ws_char = ''
last_char = ''
continue
elif not character.isspace() or not not_escaped:
if label_start and last_char.isspace() and not_escaped:
raise NewickFormatError("Newick files cannot have"
" unescaped whitespace in their"
" labels.")
metadata_buffer.append(character)
label_start = True
# This is equivalent to an `else` however it prevents coverage from
# mis-identifying the `continue` as uncalled because cpython will
# optimize it to a jump that is slightly different from the normal
# jump it would have done anyways.
elif True:
# Skip the last statement
last_char = character
continue
last_char = character
# This line is skipped in the following cases:
# * comment_depth > 0, i.e. we are in a comment.
# * We have just processed the sequence '' and we don't want
# the sequence ''' to result in ''.
# * We have encountered whitespace that is not properly escaped.
last_non_ws_char = character
|
kdmurray91/scikit-bio
|
skbio/io/format/newick.py
|
Python
|
bsd-3-clause
| 20,466
|
[
"scikit-bio"
] |
cc72453c28a9bea6a6a80cab53a8889756abb762b4243f8caa0414e33fec53dd
|
"""
canny.py - Canny Edge detector
Reference: Canny, J., A Computational Approach To Edge Detection, IEEE Trans.
Pattern Analysis and Machine Intelligence, 8:679-714, 1986
Originally part of CellProfiler, code licensed under both GPL and BSD licenses.
Website: http://www.cellprofiler.org
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2011 Broad Institute
All rights reserved.
Original author: Lee Kamentsky
"""
import numpy as np
import scipy.ndimage as ndi
from scipy.ndimage import (gaussian_filter,
generate_binary_structure, binary_erosion, label)
from skimage import dtype_limits
def smooth_with_function_and_mask(image, function, mask):
"""Smooth an image with a linear function, ignoring masked pixels
Parameters
----------
image : array
Image you want to smooth.
function : callable
A function that does image smoothing.
mask : array
Mask with 1's for significant pixels, 0's for masked pixels.
Notes
------
This function calculates the fractional contribution of masked pixels
by applying the function to the mask (which gets you the fraction of
the pixel data that's due to significant points). We then mask the image
and apply the function. The resulting values will be lower by the
bleed-over fraction, so you can recalibrate by dividing by the function
on the mask to recover the effect of smoothing from just the significant
pixels.
"""
bleed_over = function(mask.astype(float))
masked_image = np.zeros(image.shape, image.dtype)
masked_image[mask] = image[mask]
smoothed_image = function(masked_image)
output_image = smoothed_image / (bleed_over + np.finfo(float).eps)
return output_image
def canny(image, sigma=1., low_threshold=None, high_threshold=None, mask=None):
"""Edge filter an image using the Canny algorithm.
Parameters
-----------
image : 2D array
Greyscale input image to detect edges on; can be of any dtype.
sigma : float
Standard deviation of the Gaussian filter.
low_threshold : float
Lower bound for hysteresis thresholding (linking edges).
If None, low_threshold is set to 10% of dtype's max.
high_threshold : float
Upper bound for hysteresis thresholding (linking edges).
If None, high_threshold is set to 20% of dtype's max.
mask : array, dtype=bool, optional
Mask to limit the application of Canny to a certain area.
Returns
-------
output : 2D array (image)
The binary edge map.
See also
--------
skimage.sobel
Notes
-----
The steps of the algorithm are as follows:
* Smooth the image using a Gaussian with ``sigma`` width.
* Apply the horizontal and vertical Sobel operators to get the gradients
within the image. The edge strength is the norm of the gradient.
* Thin potential edges to 1-pixel wide curves. First, find the normal
to the edge at each point. This is done by looking at the
signs and the relative magnitude of the X-Sobel and Y-Sobel
to sort the points into 4 categories: horizontal, vertical,
diagonal and antidiagonal. Then look in the normal and reverse
directions to see if the values in either of those directions are
greater than the point in question. Use interpolation to get a mix of
points instead of picking the one that's the closest to the normal.
* Perform a hysteresis thresholding: first label all points above the
high threshold as edges. Then recursively label any point above the
low threshold that is 8-connected to a labeled point as an edge.
References
-----------
Canny, J., A Computational Approach To Edge Detection, IEEE Trans.
Pattern Analysis and Machine Intelligence, 8:679-714, 1986
William Green's Canny tutorial
http://dasl.mem.drexel.edu/alumni/bGreen/www.pages.drexel.edu/_weg22/can_tut.html
Examples
--------
>>> from skimage import filter
>>> # Generate noisy image of a square
>>> im = np.zeros((256, 256))
>>> im[64:-64, 64:-64] = 1
>>> im += 0.2 * np.random.random(im.shape)
>>> # First trial with the Canny filter, with the default smoothing
>>> edges1 = filter.canny(im)
>>> # Increase the smoothing for better results
>>> edges2 = filter.canny(im, sigma=3)
"""
#
# The steps involved:
#
# * Smooth using the Gaussian with sigma above.
#
# * Apply the horizontal and vertical Sobel operators to get the gradients
# within the image. The edge strength is the sum of the magnitudes
# of the gradients in each direction.
#
# * Find the normal to the edge at each point using the arctangent of the
# ratio of the Y sobel over the X sobel - pragmatically, we can
# look at the signs of X and Y and the relative magnitude of X vs Y
# to sort the points into 4 categories: horizontal, vertical,
# diagonal and antidiagonal.
#
# * Look in the normal and reverse directions to see if the values
# in either of those directions are greater than the point in question.
# Use interpolation to get a mix of points instead of picking the one
# that's the closest to the normal.
#
# * Label all points above the high threshold as edges.
# * Recursively label any point above the low threshold that is 8-connected
# to a labeled point as an edge.
#
# Regarding masks, any point touching a masked point will have a gradient
# that is "infected" by the masked point, so it's enough to erode the
# mask by one and then mask the output. We also mask out the border points
# because who knows what lies beyond the edge of the image?
#
if image.ndim != 2:
raise TypeError("The input 'image' must be a two-dimensional array.")
if low_threshold is None:
low_threshold = 0.1 * dtype_limits(image)[1]
if high_threshold is None:
high_threshold = 0.2 * dtype_limits(image)[1]
if mask is None:
mask = np.ones(image.shape, dtype=bool)
fsmooth = lambda x: gaussian_filter(x, sigma, mode='constant')
smoothed = smooth_with_function_and_mask(image, fsmooth, mask)
jsobel = ndi.sobel(smoothed, axis=1)
isobel = ndi.sobel(smoothed, axis=0)
abs_isobel = np.abs(isobel)
abs_jsobel = np.abs(jsobel)
magnitude = np.hypot(isobel, jsobel)
#
# Make the eroded mask. Setting the border value to zero will wipe
# out the image edges for us.
#
s = generate_binary_structure(2, 2)
eroded_mask = binary_erosion(mask, s, border_value=0)
eroded_mask = eroded_mask & (magnitude > 0)
#
#--------- Find local maxima --------------
#
# Assign each point to have a normal of 0-45 degrees, 45-90 degrees,
# 90-135 degrees and 135-180 degrees.
#
local_maxima = np.zeros(image.shape, bool)
#----- 0 to 45 degrees ------
pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
# Get the magnitudes shifted left to make a matrix of the points to the
# right of pts. Similarly, shift left and down to get the points to the
# top right of pts.
c1 = magnitude[1:, :][pts[:-1, :]]
c2 = magnitude[1:, 1:][pts[:-1, :-1]]
m = magnitude[pts]
w = abs_jsobel[pts] / abs_isobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[:-1, :][pts[1:, :]]
c2 = magnitude[:-1, :-1][pts[1:, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = c_plus & c_minus
#----- 45 to 90 degrees ------
# Mix diagonal and vertical
#
pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
c1 = magnitude[:, 1:][pts[:, :-1]]
c2 = magnitude[1:, 1:][pts[:-1, :-1]]
m = magnitude[pts]
w = abs_isobel[pts] / abs_jsobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[:, :-1][pts[:, 1:]]
c2 = magnitude[:-1, :-1][pts[1:, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = c_plus & c_minus
#----- 90 to 135 degrees ------
# Mix anti-diagonal and vertical
#
pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
c1a = magnitude[:, 1:][pts[:, :-1]]
c2a = magnitude[:-1, 1:][pts[1:, :-1]]
m = magnitude[pts]
w = abs_isobel[pts] / abs_jsobel[pts]
c_plus = c2a * w + c1a * (1.0 - w) <= m
c1 = magnitude[:, :-1][pts[:, 1:]]
c2 = magnitude[1:, :-1][pts[:-1, 1:]]
c_minus = c2 * w + c1 * (1.0 - w) <= m
local_maxima[pts] = c_plus & c_minus
#----- 135 to 180 degrees ------
# Mix anti-diagonal and anti-horizontal
#
pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
c1 = magnitude[:-1, :][pts[1:, :]]
c2 = magnitude[:-1, 1:][pts[1:, :-1]]
m = magnitude[pts]
w = abs_jsobel[pts] / abs_isobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[1:, :][pts[:-1, :]]
c2 = magnitude[1:, :-1][pts[:-1, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = c_plus & c_minus
#
#---- Create two masks at the two thresholds.
#
high_mask = local_maxima & (magnitude >= high_threshold)
low_mask = local_maxima & (magnitude >= low_threshold)
#
# Segment the low-mask, then only keep low-segments that have
# some high_mask component in them
#
strel = np.ones((3, 3), bool)
labels, count = label(low_mask, strel)
if count == 0:
return low_mask
sums = (np.array(ndi.sum(high_mask, labels,
np.arange(count, dtype=np.int32) + 1),
copy=False, ndmin=1))
good_label = np.zeros((count + 1,), bool)
good_label[1:] = sums > 0
output_mask = good_label[labels]
return output_mask
|
chintak/scikit-image
|
skimage/filter/_canny.py
|
Python
|
bsd-3-clause
| 10,380
|
[
"Gaussian"
] |
de5148e32a853d49f8e62f2c5f710911e3811bb06b52b0f1bf316acab60f2925
|
# Copyright 2001 by Gavin E. Crooks. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Handle the SCOP HIErarchy files, which describe the SCOP hierarchy in
terms of SCOP unique identifiers (sunid).
The file format is described in the scop
"release notes.":http://scop.berkeley.edu/release-notes-1.55.html
The latest HIE file can be found
"elsewhere at SCOP.":http://scop.mrc-lmb.cam.ac.uk/scop/parse/
"Release 1.55":http://scop.berkeley.edu/parse/dir.hie.scop.txt_1.55 (July 2001)
"""
class Record(object):
"""Holds information for one node in the SCOP hierarchy.
Attributes:
- sunid - SCOP unique identifiers of this node
- parent - Parents sunid
- children - Sequence of childrens sunids
"""
def __init__(self, line=None):
self.sunid = ''
self.parent = ''
self.children = []
if line:
self._process(line)
def _process(self, line):
"""Parses HIE records.
Records consist of 3 tab deliminated fields; node's sunid,
parent's sunid, and a list of children's sunids.
"""
# For example ::
#
# 0 - 46456,48724,51349,53931,56572,56835,56992,57942
# 21953 49268 -
# 49267 49266 49268,49269
line = line.rstrip() # no trailing whitespace
columns = line.split('\t') # separate the tab-delineated cols
if len(columns) != 3:
raise ValueError("I don't understand the format of %s" % line)
sunid, parent, children = columns
if sunid == '-':
self.sunid = ''
else:
self.sunid = int(sunid)
if parent == '-':
self.parent = ''
else:
self.parent = int(parent)
if children == '-':
self.children = ()
else:
children = children.split(',')
self.children = [int(x) for x in children]
def __str__(self):
s = []
s.append(str(self.sunid))
if self.parent:
s.append(str(self.parent))
else:
if self.sunid != 0:
s.append('0')
else:
s.append('-')
if self.children:
s.append(",".join(str(x) for x in self.children))
else:
s.append('-')
return "\t".join(s) + "\n"
def parse(handle):
"""Iterates over a HIE file as Hie records for each line.
Arguments:
- handle - file-like object.
"""
for line in handle:
if line.startswith('#'):
continue
yield Record(line)
|
zjuchenyuan/BioWeb
|
Lib/Bio/SCOP/Hie.py
|
Python
|
mit
| 2,745
|
[
"Biopython"
] |
d8b0fdea4cd71707c51581b4ff59bc8debedd2eafb50e403e394116675dccc57
|
# -*- coding: utf8
"""Random Projection transformers
Random Projections are a simple and computationally efficient way to
reduce the dimensionality of the data by trading a controlled amount
of accuracy (as additional variance) for faster processing times and
smaller model sizes.
The dimensions and distribution of Random Projections matrices are
controlled so as to preserve the pairwise distances between any two
samples of the dataset.
The main theoretical result behind the efficiency of random projection is the
`Johnson-Lindenstrauss lemma (quoting Wikipedia)
<https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma>`_:
In mathematics, the Johnson-Lindenstrauss lemma is a result
concerning low-distortion embeddings of points from high-dimensional
into low-dimensional Euclidean space. The lemma states that a small set
of points in a high-dimensional space can be embedded into a space of
much lower dimension in such a way that distances between the points are
nearly preserved. The map used for the embedding is at least Lipschitz,
and can even be taken to be an orthogonal projection.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>,
# Arnaud Joly <a.joly@ulg.ac.be>
# License: BSD 3 clause
from __future__ import division
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.testing import assert_equal
import scipy.sparse as sp
from .base import BaseEstimator, TransformerMixin
from .externals import six
from .externals.six.moves import xrange
from .utils import check_random_state
from .utils.extmath import safe_sparse_dot
from .utils.random import sample_without_replacement
from .utils.validation import check_array, check_is_fitted
from .exceptions import DataDimensionalityWarning
__all__ = ["SparseRandomProjection",
"GaussianRandomProjection",
"johnson_lindenstrauss_min_dim"]
def johnson_lindenstrauss_min_dim(n_samples, eps=0.1):
"""Find a 'safe' number of components to randomly project to
The distortion introduced by a random projection `p` only changes the
distance between two points by a factor (1 +- eps) in an euclidean space
with good probability. The projection `p` is an eps-embedding as defined
by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features], eps is in ]0, 1[ and p is a projection by a random Gaussian
N(0, 1) matrix with shape [n_components, n_features] (or a sparse
Achlioptas matrix).
The minimum number of components to guarantee the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
Note that the number of dimensions is independent of the original
number of features but instead depends on the size of the dataset:
the larger the dataset, the higher is the minimal dimensionality of
an eps-embedding.
Read more in the :ref:`User Guide <johnson_lindenstrauss>`.
Parameters
----------
n_samples : int or numpy array of int greater than 0,
Number of samples. If an array is given, it will compute
a safe number of components array-wise.
eps : float or numpy array of float in ]0,1[, optional (default=0.1)
Maximum distortion rate as defined by the Johnson-Lindenstrauss lemma.
If an array is given, it will compute a safe number of components
array-wise.
Returns
-------
n_components : int or numpy array of int,
The minimal number of components to guarantee with good probability
an eps-embedding with n_samples.
Examples
--------
>>> johnson_lindenstrauss_min_dim(1e6, eps=0.5)
663
>>> johnson_lindenstrauss_min_dim(1e6, eps=[0.5, 0.1, 0.01])
array([ 663, 11841, 1112658])
>>> johnson_lindenstrauss_min_dim([1e4, 1e5, 1e6], eps=0.1)
array([ 7894, 9868, 11841])
References
----------
.. [1] https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
.. [2] Sanjoy Dasgupta and Anupam Gupta, 1999,
"An elementary proof of the Johnson-Lindenstrauss Lemma."
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.45.3654
"""
eps = np.asarray(eps)
n_samples = np.asarray(n_samples)
if np.any(eps <= 0.0) or np.any(eps >= 1):
raise ValueError(
"The JL bound is defined for eps in ]0, 1[, got %r" % eps)
if np.any(n_samples) <= 0:
raise ValueError(
"The JL bound is defined for n_samples greater than zero, got %r"
% n_samples)
denominator = (eps ** 2 / 2) - (eps ** 3 / 3)
return (4 * np.log(n_samples) / denominator).astype(np.int)
def _check_density(density, n_features):
"""Factorize density check according to Li et al."""
if density == 'auto':
density = 1 / np.sqrt(n_features)
elif density <= 0 or density > 1:
raise ValueError("Expected density in range ]0, 1], got: %r"
% density)
return density
def _check_input_size(n_components, n_features):
"""Factorize argument checking for random matrix generation"""
if n_components <= 0:
raise ValueError("n_components must be strictly positive, got %d" %
n_components)
if n_features <= 0:
raise ValueError("n_features must be strictly positive, got %d" %
n_components)
def gaussian_random_matrix(n_components, n_features, random_state=None):
"""Generate a dense Gaussian random matrix.
The components of the random matrix are drawn from
N(0, 1.0 / n_components).
Read more in the :ref:`User Guide <gaussian_random_matrix>`.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
random_state : int, RandomState instance or None, optional (default=None)
Control the pseudo random number generator used to generate the matrix
at fit time. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`.
Returns
-------
components : numpy array of shape [n_components, n_features]
The generated Gaussian random matrix.
See Also
--------
GaussianRandomProjection
sparse_random_matrix
"""
_check_input_size(n_components, n_features)
rng = check_random_state(random_state)
components = rng.normal(loc=0.0,
scale=1.0 / np.sqrt(n_components),
size=(n_components, n_features))
return components
def sparse_random_matrix(n_components, n_features, density='auto',
random_state=None):
"""Generalized Achlioptas random sparse matrix for random projection
Setting density to 1 / 3 will yield the original matrix by Dimitris
Achlioptas while setting a lower value will yield the generalization
by Ping Li et al.
If we note :math:`s = 1 / density`, the components of the random matrix are
drawn from:
- -sqrt(s) / sqrt(n_components) with probability 1 / 2s
- 0 with probability 1 - 1 / s
- +sqrt(s) / sqrt(n_components) with probability 1 / 2s
Read more in the :ref:`User Guide <sparse_random_matrix>`.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
density : float in range ]0, 1] or 'auto', optional (default='auto')
Ratio of non-zero component in the random projection matrix.
If density = 'auto', the value is set to the minimum density
as recommended by Ping Li et al.: 1 / sqrt(n_features).
Use density = 1 / 3.0 if you want to reproduce the results from
Achlioptas, 2001.
random_state : int, RandomState instance or None, optional (default=None)
Control the pseudo random number generator used to generate the matrix
at fit time. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`.
Returns
-------
components : array or CSR matrix with shape [n_components, n_features]
The generated Gaussian random matrix.
See Also
--------
SparseRandomProjection
gaussian_random_matrix
References
----------
.. [1] Ping Li, T. Hastie and K. W. Church, 2006,
"Very Sparse Random Projections".
http://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf
.. [2] D. Achlioptas, 2001, "Database-friendly random projections",
http://www.cs.ucsc.edu/~optas/papers/jl.pdf
"""
_check_input_size(n_components, n_features)
density = _check_density(density, n_features)
rng = check_random_state(random_state)
if density == 1:
# skip index generation if totally dense
components = rng.binomial(1, 0.5, (n_components, n_features)) * 2 - 1
return 1 / np.sqrt(n_components) * components
else:
# Generate location of non zero elements
indices = []
offset = 0
indptr = [offset]
for i in xrange(n_components):
# find the indices of the non-zero components for row i
n_nonzero_i = rng.binomial(n_features, density)
indices_i = sample_without_replacement(n_features, n_nonzero_i,
random_state=rng)
indices.append(indices_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
# Among non zero components the probability of the sign is 50%/50%
data = rng.binomial(1, 0.5, size=np.size(indices)) * 2 - 1
# build the CSR structure by concatenating the rows
components = sp.csr_matrix((data, indices, indptr),
shape=(n_components, n_features))
return np.sqrt(1 / density) / np.sqrt(n_components) * components
class BaseRandomProjection(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class for random projections.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self, n_components='auto', eps=0.1, dense_output=False,
random_state=None):
self.n_components = n_components
self.eps = eps
self.dense_output = dense_output
self.random_state = random_state
@abstractmethod
def _make_random_matrix(self, n_components, n_features):
""" Generate the random projection matrix
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.
"""
def fit(self, X, y=None):
"""Generate a sparse random projection matrix
Parameters
----------
X : numpy array or scipy.sparse of shape [n_samples, n_features]
Training set: only the shape is used to find optimal random
matrix dimensions based on the theory referenced in the
afore mentioned papers.
y
Ignored
Returns
-------
self
"""
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples, n_features = X.shape
if self.n_components == 'auto':
self.n_components_ = johnson_lindenstrauss_min_dim(
n_samples=n_samples, eps=self.eps)
if self.n_components_ <= 0:
raise ValueError(
'eps=%f and n_samples=%d lead to a target dimension of '
'%d which is invalid' % (
self.eps, n_samples, self.n_components_))
elif self.n_components_ > n_features:
raise ValueError(
'eps=%f and n_samples=%d lead to a target dimension of '
'%d which is larger than the original space with '
'n_features=%d' % (self.eps, n_samples, self.n_components_,
n_features))
else:
if self.n_components <= 0:
raise ValueError("n_components must be greater than 0, got %s"
% self.n_components)
elif self.n_components > n_features:
warnings.warn(
"The number of components is higher than the number of"
" features: n_features < n_components (%s < %s)."
"The dimensionality of the problem will not be reduced."
% (n_features, self.n_components),
DataDimensionalityWarning)
self.n_components_ = self.n_components
# Generate a projection matrix of size [n_components, n_features]
self.components_ = self._make_random_matrix(self.n_components_,
n_features)
# Check contract
assert_equal(
self.components_.shape,
(self.n_components_, n_features),
err_msg=('An error has occurred the self.components_ matrix has '
' not the proper shape.'))
return self
def transform(self, X):
"""Project the data by using matrix product with the random matrix
Parameters
----------
X : numpy array or scipy.sparse of shape [n_samples, n_features]
The input data to project into a smaller dimensional space.
Returns
-------
X_new : numpy array or scipy sparse of shape [n_samples, n_components]
Projected array.
"""
X = check_array(X, accept_sparse=['csr', 'csc'])
check_is_fitted(self, 'components_')
if X.shape[1] != self.components_.shape[1]:
raise ValueError(
'Impossible to perform projection:'
'X at fit stage had a different number of features. '
'(%s != %s)' % (X.shape[1], self.components_.shape[1]))
X_new = safe_sparse_dot(X, self.components_.T,
dense_output=self.dense_output)
return X_new
class GaussianRandomProjection(BaseRandomProjection):
"""Reduce dimensionality through Gaussian random projection
The components of the random matrix are drawn from N(0, 1 / n_components).
Read more in the :ref:`User Guide <gaussian_random_matrix>`.
Parameters
----------
n_components : int or 'auto', optional (default = 'auto')
Dimensionality of the target projection space.
n_components can be automatically adjusted according to the
number of samples in the dataset and the bound given by the
Johnson-Lindenstrauss lemma. In that case the quality of the
embedding is controlled by the ``eps`` parameter.
It should be noted that Johnson-Lindenstrauss lemma can yield
very conservative estimated of the required number of components
as it makes no assumption on the structure of the dataset.
eps : strictly positive float, optional (default=0.1)
Parameter to control the quality of the embedding according to
the Johnson-Lindenstrauss lemma when n_components is set to
'auto'.
Smaller values lead to better embedding and higher number of
dimensions (n_components) in the target projection space.
random_state : int, RandomState instance or None, optional (default=None)
Control the pseudo random number generator used to generate the matrix
at fit time. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`.
Attributes
----------
n_component_ : int
Concrete number of components computed when n_components="auto".
components_ : numpy array of shape [n_components, n_features]
Random matrix used for the projection.
See Also
--------
SparseRandomProjection
"""
def __init__(self, n_components='auto', eps=0.1, random_state=None):
super(GaussianRandomProjection, self).__init__(
n_components=n_components,
eps=eps,
dense_output=True,
random_state=random_state)
def _make_random_matrix(self, n_components, n_features):
""" Generate the random projection matrix
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.
"""
random_state = check_random_state(self.random_state)
return gaussian_random_matrix(n_components,
n_features,
random_state=random_state)
class SparseRandomProjection(BaseRandomProjection):
"""Reduce dimensionality through sparse random projection
Sparse random matrix is an alternative to dense random
projection matrix that guarantees similar embedding quality while being
much more memory efficient and allowing faster computation of the
projected data.
If we note `s = 1 / density` the components of the random matrix are
drawn from:
- -sqrt(s) / sqrt(n_components) with probability 1 / 2s
- 0 with probability 1 - 1 / s
- +sqrt(s) / sqrt(n_components) with probability 1 / 2s
Read more in the :ref:`User Guide <sparse_random_matrix>`.
Parameters
----------
n_components : int or 'auto', optional (default = 'auto')
Dimensionality of the target projection space.
n_components can be automatically adjusted according to the
number of samples in the dataset and the bound given by the
Johnson-Lindenstrauss lemma. In that case the quality of the
embedding is controlled by the ``eps`` parameter.
It should be noted that Johnson-Lindenstrauss lemma can yield
very conservative estimated of the required number of components
as it makes no assumption on the structure of the dataset.
density : float in range ]0, 1], optional (default='auto')
Ratio of non-zero component in the random projection matrix.
If density = 'auto', the value is set to the minimum density
as recommended by Ping Li et al.: 1 / sqrt(n_features).
Use density = 1 / 3.0 if you want to reproduce the results from
Achlioptas, 2001.
eps : strictly positive float, optional, (default=0.1)
Parameter to control the quality of the embedding according to
the Johnson-Lindenstrauss lemma when n_components is set to
'auto'.
Smaller values lead to better embedding and higher number of
dimensions (n_components) in the target projection space.
dense_output : boolean, optional (default=False)
If True, ensure that the output of the random projection is a
dense numpy array even if the input and random projection matrix
are both sparse. In practice, if the number of components is
small the number of zero components in the projected data will
be very small and it will be more CPU and memory efficient to
use a dense representation.
If False, the projected data uses a sparse representation if
the input is sparse.
random_state : int, RandomState instance or None, optional (default=None)
Control the pseudo random number generator used to generate the matrix
at fit time. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`.
Attributes
----------
n_component_ : int
Concrete number of components computed when n_components="auto".
components_ : CSR matrix with shape [n_components, n_features]
Random matrix used for the projection.
density_ : float in range 0.0 - 1.0
Concrete density computed from when density = "auto".
See Also
--------
GaussianRandomProjection
References
----------
.. [1] Ping Li, T. Hastie and K. W. Church, 2006,
"Very Sparse Random Projections".
http://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf
.. [2] D. Achlioptas, 2001, "Database-friendly random projections",
https://users.soe.ucsc.edu/~optas/papers/jl.pdf
"""
def __init__(self, n_components='auto', density='auto', eps=0.1,
dense_output=False, random_state=None):
super(SparseRandomProjection, self).__init__(
n_components=n_components,
eps=eps,
dense_output=dense_output,
random_state=random_state)
self.density = density
def _make_random_matrix(self, n_components, n_features):
""" Generate the random projection matrix
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.
"""
random_state = check_random_state(self.random_state)
self.density_ = _check_density(self.density, n_features)
return sparse_random_matrix(n_components,
n_features,
density=self.density_,
random_state=random_state)
|
BiaDarkia/scikit-learn
|
sklearn/random_projection.py
|
Python
|
bsd-3-clause
| 22,853
|
[
"Gaussian"
] |
b87a5392e855aa46a9591d823b522174e140f8d5bbb4c9aa90ec068dd7647440
|
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Packet sending and receiving with libdnet and libpcap/WinPcap.
"""
import time, struct, sys, platform
import socket
if not sys.platform.startswith("win"):
from fcntl import ioctl
from scapy.data import *
from scapy.config import conf
from scapy.utils import mac2str
from scapy.supersocket import SuperSocket
from scapy.error import Scapy_Exception, log_loading, warning
import scapy.arch
import scapy.consts
if conf.use_winpcapy:
#mostly code from https://github.com/phaethon/scapy translated to python2.X
try:
from scapy.modules.winpcapy import *
def winpcapy_get_if_list():
err = create_string_buffer(PCAP_ERRBUF_SIZE)
devs = POINTER(pcap_if_t)()
ret = []
if pcap_findalldevs(byref(devs), err) < 0:
return ret
try:
p = devs
while p:
ret.append(p.contents.name.decode('ascii'))
p = p.contents.next
return ret
except:
raise
finally:
pcap_freealldevs(devs)
# Detect Pcap version
version = pcap_lib_version()
if b"winpcap" in version.lower():
if os.path.exists(os.environ["WINDIR"] + "\\System32\\Npcap\\wpcap.dll"):
warning("Winpcap is installed over Npcap. Will use Winpcap (see 'Winpcap/Npcap conflicts' in scapy's docs)", True)
elif platform.release() != "XP":
warning("WinPcap is now deprecated (not maintened). Please use Npcap instead", True)
elif b"npcap" in version.lower():
conf.use_npcap = True
LOOPBACK_NAME = scapy.consts.LOOPBACK_NAME = "Npcap Loopback Adapter"
except OSError as e:
def winpcapy_get_if_list():
return []
conf.use_winpcapy = False
if conf.interactive:
log_loading.warning("wpcap.dll is not installed. You won't be able to send/recieve packets. Visit the scapy's doc to install it")
# From BSD net/bpf.h
#BIOCIMMEDIATE=0x80044270
BIOCIMMEDIATE=-2147204496
class PcapTimeoutElapsed(Scapy_Exception):
pass
def get_if_raw_hwaddr(iff):
err = create_string_buffer(PCAP_ERRBUF_SIZE)
devs = POINTER(pcap_if_t)()
ret = b"\0\0\0\0\0\0"
if pcap_findalldevs(byref(devs), err) < 0:
return ret
try:
p = devs
while p:
if p.contents.name.endswith(iff):
a = p.contents.addresses
while a:
if hasattr(socket, 'AF_LINK') and a.contents.addr.contents.sa_family == socket.AF_LINK:
ap = a.contents.addr
val = cast(ap, POINTER(sockaddr_dl))
ret = str(val.contents.sdl_data[ val.contents.sdl_nlen : val.contents.sdl_nlen + val.contents.sdl_alen ])
a = a.contents.next
break
p = p.contents.next
return ret
finally:
pcap_freealldevs(devs)
def get_if_raw_addr(iff):
err = create_string_buffer(PCAP_ERRBUF_SIZE)
devs = POINTER(pcap_if_t)()
ret = b"\0\0\0\0"
if pcap_findalldevs(byref(devs), err) < 0:
return ret
try:
p = devs
while p:
if p.contents.name.endswith(iff.guid):
a = p.contents.addresses
while a:
if a.contents.addr.contents.sa_family == socket.AF_INET:
ap = a.contents.addr
val = cast(ap, POINTER(sockaddr_in))
ret = "".join(chr(x) for x in val.contents.sin_addr[:4])
a = a.contents.next
break
p = p.contents.next
return ret
finally:
pcap_freealldevs(devs)
if conf.use_winpcapy:
get_if_list = winpcapy_get_if_list
def in6_getifaddr():
err = create_string_buffer(PCAP_ERRBUF_SIZE)
devs = POINTER(pcap_if_t)()
ret = []
if pcap_findalldevs(byref(devs), err) < 0:
return ret
try:
p = devs
ret = []
while p:
a = p.contents.addresses
while a:
if a.contents.addr.contents.sa_family == socket.AF_INET6:
ap = a.contents.addr
val = cast(ap, POINTER(sockaddr_in6))
addr = socket.inet_ntop(socket.AF_INET6, str(val.contents.sin6_addr[:]))
scope = scapy.utils6.in6_getscope(addr)
ret.append((addr, scope, p.contents.name.decode('ascii')))
a = a.contents.next
p = p.contents.next
return ret
finally:
pcap_freealldevs(devs)
from ctypes import POINTER, byref, create_string_buffer
class _PcapWrapper_pypcap:
def __init__(self, device, snaplen, promisc, to_ms):
self.errbuf = create_string_buffer(PCAP_ERRBUF_SIZE)
self.iface = create_string_buffer(device)
self.pcap = pcap_open_live(self.iface, snaplen, promisc, to_ms, self.errbuf)
self.header = POINTER(pcap_pkthdr)()
self.pkt_data = POINTER(c_ubyte)()
self.bpf_program = bpf_program()
def next(self):
c = pcap_next_ex(self.pcap, byref(self.header), byref(self.pkt_data))
if not c > 0:
return
ts = self.header.contents.ts.tv_sec + float(self.header.contents.ts.tv_usec) / 1000000
pkt = "".join(chr(i) for i in self.pkt_data[:self.header.contents.len])
return ts, pkt
__next__ = next
def datalink(self):
return pcap_datalink(self.pcap)
def fileno(self):
if sys.platform.startswith("win"):
log_loading.error("Cannot get selectable PCAP fd on Windows")
return 0
return pcap_get_selectable_fd(self.pcap)
def setfilter(self, f):
filter_exp = create_string_buffer(f)
if pcap_compile(self.pcap, byref(self.bpf_program), filter_exp, 0, -1) == -1:
log_loading.error("Could not compile filter expression %s" % f)
return False
else:
if pcap_setfilter(self.pcap, byref(self.bpf_program)) == -1:
log_loading.error("Could not install filter %s" % f)
return False
return True
def setnonblock(self, i):
pcap_setnonblock(self.pcap, i, self.errbuf)
def send(self, x):
pcap_sendpacket(self.pcap, x, len(x))
def close(self):
pcap_close(self.pcap)
open_pcap = lambda *args,**kargs: _PcapWrapper_pypcap(*args,**kargs)
class PcapTimeoutElapsed(Scapy_Exception):
pass
class L2pcapListenSocket(SuperSocket):
desc = "read packets at layer 2 using libpcap"
def __init__(self, iface = None, type = ETH_P_ALL, promisc=None, filter=None):
self.type = type
self.outs = None
self.iface = iface
if iface is None:
iface = conf.iface
if promisc is None:
promisc = conf.sniff_promisc
self.promisc = promisc
self.ins = open_pcap(iface, 1600, self.promisc, 100)
try:
ioctl(self.ins.fileno(),BIOCIMMEDIATE,struct.pack("I",1))
except:
pass
if type == ETH_P_ALL: # Do not apply any filter if Ethernet type is given
if conf.except_filter:
if filter:
filter = "(%s) and not (%s)" % (filter, conf.except_filter)
else:
filter = "not (%s)" % conf.except_filter
if filter:
self.ins.setfilter(filter)
def close(self):
self.ins.close()
def recv(self, x=MTU):
ll = self.ins.datalink()
if ll in conf.l2types:
cls = conf.l2types[ll]
else:
cls = conf.default_l2
warning("Unable to guess datalink type (interface=%s linktype=%i). Using %s" % (self.iface, ll, cls.name))
pkt = None
while pkt is None:
pkt = self.ins.next()
if pkt is not None:
ts,pkt = pkt
if scapy.arch.WINDOWS and pkt is None:
raise PcapTimeoutElapsed
try:
pkt = cls(pkt)
except KeyboardInterrupt:
raise
except:
if conf.debug_dissector:
raise
pkt = conf.raw_layer(pkt)
pkt.time = ts
return pkt
def send(self, x):
raise Scapy_Exception("Can't send anything with L2pcapListenSocket")
conf.L2listen = L2pcapListenSocket
class L2pcapSocket(SuperSocket):
desc = "read/write packets at layer 2 using only libpcap"
def __init__(self, iface = None, type = ETH_P_ALL, promisc=None, filter=None, nofilter=0):
if iface is None:
iface = conf.iface
self.iface = iface
if promisc is None:
promisc = 0
self.promisc = promisc
self.ins = open_pcap(iface, 1600, self.promisc, 100)
# We need to have a different interface open because of an
# access violation in Npcap that occurs in multi-threading
# (see https://github.com/nmap/nmap/issues/982)
self.outs = open_pcap(iface, 1600, self.promisc, 100)
try:
ioctl(self.ins.fileno(),BIOCIMMEDIATE,struct.pack("I",1))
except:
pass
if nofilter:
if type != ETH_P_ALL: # PF_PACKET stuff. Need to emulate this for pcap
filter = "ether proto %i" % type
else:
filter = None
else:
if conf.except_filter:
if filter:
filter = "(%s) and not (%s)" % (filter, conf.except_filter)
else:
filter = "not (%s)" % conf.except_filter
if type != ETH_P_ALL: # PF_PACKET stuff. Need to emulate this for pcap
if filter:
filter = "(ether proto %i) and (%s)" % (type,filter)
else:
filter = "ether proto %i" % type
if filter:
self.ins.setfilter(filter)
def send(self, x):
sx = str(x)
if hasattr(x, "sent_time"):
x.sent_time = time.time()
return self.outs.send(sx)
def recv(self,x=MTU):
ll = self.ins.datalink()
if ll in conf.l2types:
cls = conf.l2types[ll]
else:
cls = conf.default_l2
warning("Unable to guess datalink type (interface=%s linktype=%i). Using %s" % (self.iface, ll, cls.name))
pkt = self.ins.next()
if pkt is not None:
ts,pkt = pkt
if pkt is None:
return
try:
pkt = cls(pkt)
except KeyboardInterrupt:
raise
except:
if conf.debug_dissector:
raise
pkt = conf.raw_layer(pkt)
pkt.time = ts
return pkt
def nonblock_recv(self):
self.ins.setnonblock(1)
p = self.recv(MTU)
self.ins.setnonblock(0)
return p
def close(self):
if not self.closed:
if hasattr(self, "ins"):
self.ins.close()
if hasattr(self, "outs"):
self.outs.close()
self.closed = True
class L3pcapSocket(L2pcapSocket):
desc = "read/write packets at layer 3 using only libpcap"
#def __init__(self, iface = None, type = ETH_P_ALL, filter=None, nofilter=0):
# L2pcapSocket.__init__(self, iface, type, filter, nofilter)
def recv(self, x = MTU):
r = L2pcapSocket.recv(self, x)
if r:
return r.payload
else:
return
def send(self, x):
cls = conf.l2types[1]
sx = str(cls()/x)
if hasattr(x, "sent_time"):
x.sent_time = time.time()
return self.ins.send(sx)
conf.L2socket=L2pcapSocket
conf.L3socket=L3pcapSocket
if conf.use_pcap:
try:
import pcap
except ImportError as e:
try:
import pcapy as pcap
except ImportError as e2:
if conf.interactive:
log_loading.error("Unable to import pcap module: %s/%s" % (e,e2))
conf.use_pcap = False
else:
raise
if conf.use_pcap:
# From BSD net/bpf.h
#BIOCIMMEDIATE=0x80044270
BIOCIMMEDIATE=-2147204496
if hasattr(pcap,"pcap"): # python-pypcap
class _PcapWrapper_pypcap:
def __init__(self, device, snaplen, promisc, to_ms):
try:
self.pcap = pcap.pcap(device, snaplen, promisc, immediate=1, timeout_ms=to_ms)
except TypeError:
# Older pypcap versions do not support the timeout_ms argument
self.pcap = pcap.pcap(device, snaplen, promisc, immediate=1)
def __getattr__(self, attr):
return getattr(self.pcap, attr)
def __del__(self):
warning("__del__: don't know how to close the file descriptor. Bugs ahead ! Please report this bug.")
def next(self):
c = self.pcap.next()
if c is None:
return
ts, pkt = c
return ts, str(pkt)
__next__ = next
open_pcap = lambda *args,**kargs: _PcapWrapper_pypcap(*args,**kargs)
elif hasattr(pcap,"pcapObject"): # python-libpcap
class _PcapWrapper_libpcap:
def __init__(self, *args, **kargs):
self.pcap = pcap.pcapObject()
self.pcap.open_live(*args, **kargs)
def setfilter(self, filter):
self.pcap.setfilter(filter, 0, 0)
def next(self):
c = self.pcap.next()
if c is None:
return
l,pkt,ts = c
return ts,pkt
__next__ = next
def __getattr__(self, attr):
return getattr(self.pcap, attr)
def __del__(self):
fd = self.pcap.fileno()
os.close(fd)
open_pcap = lambda *args,**kargs: _PcapWrapper_libpcap(*args,**kargs)
elif hasattr(pcap,"open_live"): # python-pcapy
class _PcapWrapper_pcapy:
def __init__(self, *args, **kargs):
self.pcap = pcap.open_live(*args, **kargs)
def next(self):
try:
c = self.pcap.next()
except pcap.PcapError:
return None
else:
h,p = c
if h is None:
return
s,us = h.getts()
return (s+0.000001*us), p
__next__ = next
def fileno(self):
raise RuntimeError("%s has no fileno. Please report this bug." %
self.__class__.__name__)
def __getattr__(self, attr):
return getattr(self.pcap, attr)
def __del__(self):
warning("__del__: don't know how to close the file descriptor. Bugs ahead ! Please report this bug.")
open_pcap = lambda *args,**kargs: _PcapWrapper_pcapy(*args,**kargs)
class PcapTimeoutElapsed(Scapy_Exception):
pass
class L2pcapListenSocket(SuperSocket):
desc = "read packets at layer 2 using libpcap"
def __init__(self, iface = None, type = ETH_P_ALL, promisc=None, filter=None):
self.type = type
self.outs = None
self.iface = iface
if iface is None:
iface = conf.iface
if promisc is None:
promisc = conf.sniff_promisc
self.promisc = promisc
self.ins = open_pcap(iface, 1600, self.promisc, 100)
try:
ioctl(self.ins.fileno(),BIOCIMMEDIATE,struct.pack("I",1))
except:
pass
if type == ETH_P_ALL: # Do not apply any filter if Ethernet type is given
if conf.except_filter:
if filter:
filter = "(%s) and not (%s)" % (filter, conf.except_filter)
else:
filter = "not (%s)" % conf.except_filter
if filter:
self.ins.setfilter(filter)
def close(self):
del(self.ins)
def recv(self, x=MTU):
ll = self.ins.datalink()
if ll in conf.l2types:
cls = conf.l2types[ll]
else:
cls = conf.default_l2
warning("Unable to guess datalink type (interface=%s linktype=%i). Using %s" % (self.iface, ll, cls.name))
pkt = self.ins.next()
if scapy.arch.WINDOWS and pkt is None:
raise PcapTimeoutElapsed
if pkt is not None:
ts,pkt = pkt
try:
pkt = cls(pkt)
except KeyboardInterrupt:
raise
except:
if conf.debug_dissector:
raise
pkt = conf.raw_layer(pkt)
pkt.time = ts
return pkt
def send(self, x):
raise Scapy_Exception("Can't send anything with L2pcapListenSocket")
conf.L2listen = L2pcapListenSocket
if conf.use_dnet:
try:
try:
# First try to import dnet
import dnet
except ImportError:
# Then, try to import dumbnet as dnet
import dumbnet as dnet
except ImportError as e:
if conf.interactive:
log_loading.error("Unable to import dnet module: %s" % e)
conf.use_dnet = False
def get_if_raw_hwaddr(iff):
"dummy"
return (0,b"\0\0\0\0\0\0")
def get_if_raw_addr(iff):
"dummy"
return b"\0\0\0\0"
def get_if_list():
"dummy"
return []
else:
raise
else:
def get_if_raw_hwaddr(iff):
"""Return a tuple containing the link type and the raw hardware
address corresponding to the interface 'iff'"""
if iff == scapy.arch.LOOPBACK_NAME:
return (ARPHDR_LOOPBACK, b'\x00'*6)
# Retrieve interface information
try:
l = dnet.intf().get(iff)
link_addr = l["link_addr"]
except:
raise Scapy_Exception("Error in attempting to get hw address"
" for interface [%s]" % iff)
if hasattr(link_addr, "type"):
# Legacy dnet module
return link_addr.type, link_addr.data
else:
# dumbnet module
mac = mac2str(str(link_addr))
# Adjust the link type
if l["type"] == 6: # INTF_TYPE_ETH from dnet
return (ARPHDR_ETHER, mac)
return (l["type"], mac)
def get_if_raw_addr(ifname):
i = dnet.intf()
try:
return i.get(ifname)["addr"].data
except OSError:
warning("No MAC address found on %s !" % ifname)
return b"\0\0\0\0"
def get_if_list():
return [i.get("name", None) for i in dnet.intf()]
if conf.use_pcap and conf.use_dnet:
class L3dnetSocket(SuperSocket):
desc = "read/write packets at layer 3 using libdnet and libpcap"
def __init__(self, type = ETH_P_ALL, promisc=None, filter=None, iface=None, nofilter=0):
self.iflist = {}
self.intf = dnet.intf()
if iface is None:
iface = conf.iface
self.iface = iface
if promisc is None:
promisc = 0
self.promisc = promisc
self.ins = open_pcap(iface, 1600, self.promisc, 100)
try:
ioctl(self.ins.fileno(),BIOCIMMEDIATE,struct.pack("I",1))
except:
pass
if nofilter:
if type != ETH_P_ALL: # PF_PACKET stuff. Need to emulate this for pcap
filter = "ether proto %i" % type
else:
filter = None
else:
if conf.except_filter:
if filter:
filter = "(%s) and not (%s)" % (filter, conf.except_filter)
else:
filter = "not (%s)" % conf.except_filter
if type != ETH_P_ALL: # PF_PACKET stuff. Need to emulate this for pcap
if filter:
filter = "(ether proto %i) and (%s)" % (type,filter)
else:
filter = "ether proto %i" % type
if filter:
self.ins.setfilter(filter)
def send(self, x):
iff,a,gw = x.route()
if iff is None:
iff = conf.iface
ifs,cls = self.iflist.get(iff,(None,None))
if ifs is None:
iftype = self.intf.get(iff)["type"]
if iftype == dnet.INTF_TYPE_ETH:
try:
cls = conf.l2types[1]
except KeyError:
warning("Unable to find Ethernet class. Using nothing")
ifs = dnet.eth(iff)
else:
ifs = dnet.ip()
self.iflist[iff] = ifs,cls
if cls is None:
sx = str(x)
else:
sx = str(cls()/x)
x.sent_time = time.time()
ifs.send(sx)
def recv(self,x=MTU):
ll = self.ins.datalink()
if ll in conf.l2types:
cls = conf.l2types[ll]
else:
cls = conf.default_l2
warning("Unable to guess datalink type (interface=%s linktype=%i). Using %s" % (self.iface, ll, cls.name))
pkt = self.ins.next()
if pkt is not None:
ts,pkt = pkt
if pkt is None:
return
try:
pkt = cls(pkt)
except KeyboardInterrupt:
raise
except:
if conf.debug_dissector:
raise
pkt = conf.raw_layer(pkt)
pkt.time = ts
return pkt.payload
def nonblock_recv(self):
self.ins.setnonblock(1)
p = self.recv()
self.ins.setnonblock(0)
return p
def close(self):
if not self.closed:
if hasattr(self, "ins"):
del(self.ins)
if hasattr(self, "outs"):
del(self.outs)
self.closed = True
class L2dnetSocket(SuperSocket):
desc = "read/write packets at layer 2 using libdnet and libpcap"
def __init__(self, iface = None, type = ETH_P_ALL, promisc=None, filter=None, nofilter=0):
if iface is None:
iface = conf.iface
self.iface = iface
if promisc is None:
promisc = 0
self.promisc = promisc
self.ins = open_pcap(iface, 1600, self.promisc, 100)
try:
ioctl(self.ins.fileno(),BIOCIMMEDIATE,struct.pack("I",1))
except:
pass
if nofilter:
if type != ETH_P_ALL: # PF_PACKET stuff. Need to emulate this for pcap
filter = "ether proto %i" % type
else:
filter = None
else:
if conf.except_filter:
if filter:
filter = "(%s) and not (%s)" % (filter, conf.except_filter)
else:
filter = "not (%s)" % conf.except_filter
if type != ETH_P_ALL: # PF_PACKET stuff. Need to emulate this for pcap
if filter:
filter = "(ether proto %i) and (%s)" % (type,filter)
else:
filter = "ether proto %i" % type
if filter:
self.ins.setfilter(filter)
self.outs = dnet.eth(iface)
def recv(self,x=MTU):
ll = self.ins.datalink()
if ll in conf.l2types:
cls = conf.l2types[ll]
else:
cls = conf.default_l2
warning("Unable to guess datalink type (interface=%s linktype=%i). Using %s" % (self.iface, ll, cls.name))
pkt = self.ins.next()
if pkt is not None:
ts,pkt = pkt
if pkt is None:
return
try:
pkt = cls(pkt)
except KeyboardInterrupt:
raise
except:
if conf.debug_dissector:
raise
pkt = conf.raw_layer(pkt)
pkt.time = ts
return pkt
def nonblock_recv(self):
self.ins.setnonblock(1)
p = self.recv(MTU)
self.ins.setnonblock(0)
return p
def close(self):
if not self.closed:
if hasattr(self, "ins"):
del(self.ins)
if hasattr(self, "outs"):
del(self.outs)
self.closed = True
conf.L3socket=L3dnetSocket
conf.L2socket=L2dnetSocket
|
CodeNameGhost/shiva
|
thirdparty/scapy/arch/pcapdnet.py
|
Python
|
mit
| 26,496
|
[
"VisIt"
] |
9e73ee8fc86f647f30dcdf3174d1168acdb769ce97073e9beabf1ed663387478
|
import datetime
import json, yaml
import io
import csv
from bson import json_util, ObjectId
from collections import OrderedDict
from dateutil.parser import parse
from django.conf import settings
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
try:
from django_mongoengine import Document
except ImportError:
from mongoengine import Document
from mongoengine import EmbeddedDocument, DynamicEmbeddedDocument
from mongoengine import StringField, ListField, EmbeddedDocumentField
from mongoengine import IntField, DateTimeField, ObjectIdField, BooleanField
from mongoengine.base import BaseDocument
try:
from mongoengine import ValidationError
except ImportError:
from mongoengine.base import ValidationError
# Determine if we should be caching queries or not.
if settings.QUERY_CACHING:
try:
from django_mongoengine import QuerySet as QS
except ImportError:
from mongoengine import QuerySet as QS
else:
try:
from django_mongoengine import QuerySetNoCache as QS
except ImportError:
from mongoengine import QuerySetNoCache as QS
from pprint import pformat
from crits.core.user_tools import user_sources
from crits.core.fields import CritsDateTimeField
from crits.core.class_mapper import class_from_id, class_from_type
from crits.vocabulary.relationships import RelationshipTypes
from crits.vocabulary.objects import ObjectTypes
# Hack to fix an issue with non-cached querysets and django-tastypie-mongoengine
# The issue is in django-tastypie-mongoengine in resources.py from what I can
# tell.
try:
from mongoengine.queryset import tranform as mongoengine_tranform
except ImportError:
mongoengine_tranform = None
QUERY_TERMS_ALL = getattr(mongoengine_tranform, 'MATCH_OPERATORS', (
'ne', 'gt', 'gte', 'lt', 'lte', 'in', 'nin', 'mod', 'all', 'size', 'exists',
'not', 'within_distance', 'within_spherical_distance', 'within_box',
'within_polygon', 'near', 'near_sphere', 'contains', 'icontains',
'startswith', 'istartswith', 'endswith', 'iendswith', 'exact', 'iexact',
'match'
))
class Query(object):
"""
Query class to hold available query terms.
"""
query_terms = dict([(query_term, None) for query_term in QUERY_TERMS_ALL])
class CritsQuerySet(QS):
"""
CRITs default QuerySet. Used to override methods like .only() and to extend
it with other methods we want to perform on a QuerySet object.
"""
_len = None
query = Query()
def __len__(self):
"""
Modified version of the default __len__() which allows
us to get the length with or without caching enabled.
"""
if self._len is not None:
return self._len
if settings.QUERY_CACHING:
if self._has_more:
# populate the cache
list(self._iter_results())
self._len = len(self._result_cache)
else:
self._len = self.count()
return self._len
def only(self, *fields):
"""
Modified version of the default only() which allows
us to add default fields we always want to include.
"""
# We don't need to modify the fields when None are passed
if not fields:
return super(CritsQuerySet, self).only(*fields)
#Always include schema_version so we can migrate if needed.
if 'schema_version' not in fields:
fields = fields + ('schema_version',)
return super(CritsQuerySet, self).only(*fields)
def from_json(self, json_data):
"""
Converts JSON data to unsaved objects.
Takes either a Python list of individual JSON objects or the
result of calling json.dumps on a Python list of Python
dictionaries.
:param json_data: List or result of json.dumps.
:type json_data: list or str
:returns: :class:`crits.core.crits_mongoengine.CritsQuerySet`
"""
if not isinstance(json_data, list):
son_data = json_util.loads(json_data)
return [self._document._from_son(data) for data in son_data]
else:
#Python list of JSON objects
return [self._document.from_json(data) for data in json_data]
def to_dict(self, excludes=[], projection=[]):
"""
Converts CritsQuerySet to a list of dictionaries.
:param excludes: List fields to exclude in each document.
:type excludes: list
:param projection: List fields to limit results on.
:type projectsion: list
:returns: list of dictionaries
"""
return [obj.to_dict(excludes,projection) for obj in self]
def to_csv(self, fields):
"""
Converts CritsQuerySet to CSV formatted string.
:param fields: List fields to return for each document.
:type fields: list
:returns: str
"""
filter_keys = [
'id',
'password',
'password_reset',
'schema_version',
]
if not fields:
fields = self[0]._data.keys()
# Create a local copy
fields = fields[:]
for key in filter_keys:
if key in fields:
fields.remove(key)
csvout = str(",".join(fields) + "\n")
csvout += "".join(obj.to_csv(fields) for obj in self)
return csvout
def to_json(self, exclude=[]):
"""
Converts a CritsQuerySet to JSON.
:param exclude: Fields to exclude from each document.
:type exclude: list
:returns: json
"""
return json.dumps([obj.to_dict(exclude) for obj in self],
default=json_handler)
def from_yaml(self, yaml_data):
"""
Converts YAML data to a list of unsaved objects.
:param yaml_data: The YAML to convert.
:type yaml_data: list
:returns: list
"""
return [self._document.from_yaml(doc) for doc in yaml_data]
def to_yaml(self, exclude=[]):
"""
Converts a CritsQuerySet to a list of YAML docs.
:param exclude: Fields to exclude from each document.
:type exclude: list
:returns: list
"""
return [doc.to_yaml(exclude) for doc in self]
def sanitize_sources(self, username=None):
"""
Sanitize each document in a CritsQuerySet for source information and
return the results as a list.
:param username: The user which requested the data.
:type username: str
:returns: list
"""
if not username:
return self
sources = user_sources(username)
final_list = []
for doc in self:
doc.sanitize_sources(username, sources)
final_list.append(doc)
return final_list
class CritsDocumentFormatter(object):
"""
Class to inherit from to gain the ability to convert a top-level object
class to another format.
"""
def to_json(self):
"""
Return the object in JSON format.
"""
return self.to_mongo()
def to_dict(self):
"""
Return the object as a dict.
"""
return self.to_mongo().to_dict()
def __str__(self):
"""
Allow us to use `print`.
"""
return self.to_json()
def get(self, attr=None, ret=None):
"""
Simulate a `.get()` from a dict.
"""
if attr is not None:
return getattr(self, attr)
return ret
def merge(self, arg_dict=None, overwrite=False, **kwargs):
"""
Merge a dictionary into a top-level object class.
:param arg_dict: The dictionary to get data from.
:type arg_dict: dict
:param overwrite: Whether or not to overwrite data in the object.
:type overwrite: boolean
"""
merge(self, arg_dict=arg_dict, overwrite=overwrite)
class CritsStatusDocument(BaseDocument):
"""
Inherit to add status to a top-level object.
"""
status = StringField(default="New")
def set_status(self, status):
"""
Set the status of a top-level object.
:param status: The status to set:
('New', 'In Progress', 'Analyzed', 'Informational', 'Deprecated')
"""
if status in ('New', 'In Progress', 'Analyzed', 'Informational', 'Deprecated'):
self.status = status
if status == 'Deprecated' and 'actions' in self:
for action in self.actions:
action.active = "off"
class CritsBaseDocument(BaseDocument):
"""
Inherit to add a created and modified date to a top-level object.
"""
created = CritsDateTimeField(default=datetime.datetime.now)
# modified will be overwritten on save
modified = CritsDateTimeField()
class CritsSchemaDocument(BaseDocument):
"""
Inherit to add a schema_version to a top-level object.
Default schema_version is 0 so that later, on .save(), we can tell if a
document coming from the DB never had a schema_version assigned and
raise an error.
"""
schema_version = IntField(default=0)
class UnsupportedAttrs(DynamicEmbeddedDocument, CritsDocumentFormatter):
"""
Inherit to allow a top-level object to store unsupported attributes.
"""
meta = {"auto_create_index": False,}
class CritsDocument(BaseDocument):
"""
Mixin for adding CRITs specific functionality to the MongoEngine module.
All CRITs MongoEngine-based classes should inherit from this class
in addition to MongoEngine's Document.
NOTE: this class uses some undocumented methods and attributes from MongoEngine's
BaseDocument and may need to be revisited if/when the code is updated.
"""
meta = {
'duplicate_attrs':[],
'migrated': False,
'migrating': False,
'needs_migration': False,
'queryset_class': CritsQuerySet,
"auto_create_index": False,
}
unsupported_attrs = EmbeddedDocumentField(UnsupportedAttrs)
def __init__(self, **values):
"""
Override .save() and .delete() with our own custom versions.
"""
if hasattr(self, 'save'):
#.save() is normally defined on a Document, not BaseDocument, so
# we'll have to monkey patch to call our save.
self.save = self._custom_save
if hasattr(self, 'delete'):
#.delete() is normally defined on a Document, not BaseDocument, so
# we'll have to monkey patch to call our delete.
self.delete = self._custom_delete
self._meta['strict'] = False
super(CritsDocument, self).__init__(**values)
def _custom_save(self, force_insert=False, validate=True, clean=False,
write_concern=1, cascade=None, cascade_kwargs=None,
_refs=None, username=None, **kwargs):
"""
Custom save function. Extended to check for valid schema versions,
automatically update modified times, and audit the changes made.
"""
from crits.core.handlers import audit_entry
if hasattr(self, 'schema_version') and not self.schema_version:
#Check that documents retrieved from the DB have a recognized
# schema_version
if not self._created:
raise UnrecognizedSchemaError(self)
#If it's a new document, set the appropriate schema version
elif hasattr(self, '_meta') and 'latest_schema_version' in self._meta:
self.schema_version = self._meta['latest_schema_version']
#TODO: convert this to using UTC
if hasattr(self, 'modified'):
self.modified = datetime.datetime.now()
do_audit = False
if self.id:
audit_entry(self, username, "save")
else:
do_audit = True
# MongoEngine evidently tries to add partial functions as attributes:
# https://github.com/MongoEngine/mongoengine/blob/master/mongoengine/base/document.py#L967
# A bit of a hack but removing it manually until we can figure out why it is
# here and how to stop it from happening.
try:
self.unsupported_attrs.__delattr__('get_tlp_display')
except:
pass
if hasattr(super(self.__class__, self).save(), 'w'):
super(self.__class__, self).save(force_insert=force_insert,
validate=validate,
clean=clean,
w=write_concern,
cascade=cascade,
cascade_kwargs=cascade_kwargs,
_refs=_refs)
else:
super(self.__class__, self).save(force_insert=force_insert,
validate=validate,
clean=clean,
cascade=cascade,
cascade_kwargs=cascade_kwargs,
_refs=_refs)
if do_audit:
audit_entry(self, username, "save", new_doc=True)
return
def _custom_delete(self, username=None, **kwargs):
"""
Custom delete function. Overridden to allow us to extend to other parts
of CRITs and clean up dangling relationships, comments, objects, GridFS
files, bucket_list counts, and favorites.
"""
from crits.core.handlers import audit_entry, alter_bucket_list
audit_entry(self, username, "delete")
if self._has_method("delete_all_relationships"):
self.delete_all_relationships(username=username)
if self._has_method("delete_all_comments"):
self.delete_all_comments()
if self._has_method("delete_all_analysis_results"):
self.delete_all_analysis_results()
if self._has_method("delete_all_objects"):
self.delete_all_objects()
if self._has_method("delete_all_favorites"):
self.delete_all_favorites()
if hasattr(self, 'filedata'):
self.filedata.delete()
if hasattr(self, 'bucket_list'):
alter_bucket_list(self, self.bucket_list, -1)
super(self.__class__, self).delete()
return
def __setattr__(self, name, value):
"""
Overriden to handle unsupported attributes.
"""
#Make sure name is a valid field for MongoDB. Also, name cannot begin with
# underscore because that indicates a private MongoEngine attribute.
if (not self._dynamic and hasattr(self, 'unsupported_attrs')
and not name in self._fields and not name.startswith('_')
and not name.startswith('$') and not '.' in name
and name not in ('save', 'delete')):
if not self.unsupported_attrs:
self.unsupported_attrs = UnsupportedAttrs()
self.unsupported_attrs.__setattr__(name, value)
else:
super(CritsDocument, self).__setattr__(name, value)
def _has_method(self, method):
"""
Convenience method for determining if a method exists for this class.
:param method: The method to check for.
:type method: str
:returns: True, False
"""
if hasattr(self, method) and callable(getattr(self, method)):
return True
else:
return False
@classmethod
def _from_son(cls, son, _auto_dereference=True, only_fields=None, created=False):
"""
Override the default _from_son(). Allows us to move attributes in the
database to unsupported_attrs if needed, validate the schema_version,
and automatically migrate to newer schema versions.
"""
doc = super(CritsDocument, cls)._from_son(son, _auto_dereference)
#Make sure any fields that are unsupported but exist in the database
# get added to the document's unsupported_attributes field.
#Get database names for all fields that *should* exist on the object.
db_fields = [val.db_field for key,val in cls._fields.iteritems()]
#custom __setattr__ does logic of moving fields to unsupported_fields
[doc.__setattr__("%s"%key, val) for key,val in son.iteritems()
if key not in db_fields]
#After a document is retrieved from the database, and any unsupported
# fields have been moved to unsupported_attrs, make sure the original
# fields will get removed from the document when it's saved.
if hasattr(doc, 'unsupported_attrs'):
if doc.unsupported_attrs is not None:
for attr in doc.unsupported_attrs:
#mark for deletion
if not hasattr(doc, '_changed_fields'):
doc._changed_fields = []
doc._changed_fields.append(attr)
# Check for a schema_version. Raise exception so we don't
# infinitely loop through attempting to migrate.
if hasattr(doc, 'schema_version'):
if doc.schema_version == 0:
raise UnrecognizedSchemaError(doc)
# perform migration, if needed
if hasattr(doc, '_meta'):
if ('schema_version' in doc and
'latest_schema_version' in doc._meta and
doc.schema_version < doc._meta['latest_schema_version']):
# mark for migration
doc._meta['needs_migration'] = True
# reload doc to get full document from database
if (doc._meta.get('needs_migration', False) and
not doc._meta.get('migrating', False)):
doc._meta['migrating'] = True
doc.reload()
try:
doc.migrate()
doc._meta['migrated'] = True
doc._meta['needs_migration'] = False
doc._meta['migrating'] = False
except Exception as e:
e.tlo = doc.id
raise e
return doc
def migrate(self):
"""
Should be overridden by classes which inherit this class.
"""
pass
def merge(self, arg_dict=None, overwrite=False, **kwargs):
"""
Merge a dictionary into a top-level object class.
:param arg_dict: The dictionary to get data from.
:type arg_dict: dict
:param overwrite: Whether or not to overwrite data in the object.
:type overwrite: boolean
"""
merge(self, arg_dict=arg_dict, overwrite=overwrite)
def to_csv(self, fields=[],headers=False):
"""
Convert a class into a CSV.
:param fields: Fields to include in the CSV.
:type fields: list
:param headers: Whether or not to write out column headers.
:type headers: boolean
:returns: str
"""
if not fields:
fields = self._data.keys()
csv_string = io.BytesIO()
csv_wr = csv.writer(csv_string)
if headers:
csv_wr.writerow([f.encode('utf-8') for f in fields])
# Build the CSV Row
row = []
for field in fields:
if field in self._data:
data = ""
if field == "actions" and self._has_method("get_action_types"):
data = ";".join(self.get_action_types())
elif field == "aliases" and self._has_method("get_aliases"):
data = ";".join(self.get_aliases())
elif field == "campaign" and self._has_method("get_campaign_names"):
data = ';'.join(self.get_campaign_names())
elif field == "source" and self._has_method("get_source_names"):
data = ';'.join(self.get_source_names())
elif field == "tickets":
data = ';'.join(self.get_tickets())
else:
data = self._data[field]
if not hasattr(data, 'encode'):
try: # convert list of strings
data = ";".join(data)
except: # Convert non-string data types
data = unicode(data)
row.append(data.encode('utf-8'))
else:
row.append('')
csv_wr.writerow(row)
return csv_string.getvalue()
def to_dict(self, exclude=[], include=[]):
"""
Return the object's _data as a python dictionary.
All fields will be converted to base python types so that
no MongoEngine fields remain.
:param exclude: list of fields to exclude in the result.
:type exclude: list
:param include: list of fields to include in the result.
:type include: list
:returns: dict
"""
#MongoEngine's to_mongo() returns an object in a MongoDB friendly
# dictionary format. If we have no extra processing to do, just
# return that.
data = self.to_mongo()
#
# Include, Exclude, return
# Check projection in db_field_map
# After the to_mongo, the fields have changed
newproj = []
for p in include:
if p in self._db_field_map:
p = self._db_field_map[p]
elif p == "id": # _id is not in the db_field_map
p = "_id"
newproj.append(p)
if include:
result = {}
for k, v in data.items():
if k in newproj and k not in exclude:
if k == "_id":
k = "id"
result[k] = v
return result
elif exclude:
result = {}
for k, v in data.items():
if k in exclude:
continue
if k == "_id":
k = "id"
result[k] = v
return result
return data
def _json_yaml_convert(self, exclude=[]):
"""
Helper to convert to a dict before converting to JSON.
:param exclude: list of fields to exclude.
:type exclude: list
:returns: json
"""
d = self.to_dict(exclude)
return json.dumps(d, default=json_handler)
@classmethod
def from_json(cls, json_data):
"""
Converts JSON data to an unsaved document instance.
NOTE: this method already exists in mongoengine 0.8, so it can
be removed from here when the codebase is updated.
:returns: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`
"""
return cls._from_son(json_util.loads(json_data))
def to_json(self, exclude=[]):
"""
Convert to JSON.
:param exclude: list of fields to exclude.
:type exclude: list
:returns: json
"""
return self._json_yaml_convert(exclude)
@classmethod
def from_yaml(cls, yaml_data):
"""
Converts YAML data to an unsaved document instance.
:returns: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`
"""
return cls._from_son(yaml.load(yaml_data))
def to_yaml(self, exclude=[]):
"""
Convert to JSON.
:param exclude: list of fields to exclude.
:type exclude: list
:returns: json
"""
return yaml.dump(yaml.load(self._json_yaml_convert(exclude)),
default_flow_style=False)
def __str__(self):
"""
Allow us to print the class in a readable fashion.
:returns: str
"""
return pformat(self.to_dict())
class EmbeddedPreferredAction(EmbeddedDocument, CritsDocumentFormatter):
"""
Embedded Preferred Action
"""
object_type = StringField()
object_field = StringField()
object_value = StringField()
class Action(CritsDocument, CritsSchemaDocument, Document):
"""
Action type class.
"""
meta = {
"collection": settings.COL_IDB_ACTIONS,
"auto_create_index": False,
"crits_type": 'Action',
"latest_schema_version": 1,
"schema_doc": {
'name': 'The name of this Action',
'active': 'Enabled in the UI (on/off)',
'object_types': 'List of TLOs this is for',
'preferred': 'List of dictionaries defining where this is preferred'
},
}
name = StringField()
active = StringField(default="on")
object_types = ListField(StringField())
preferred = ListField(EmbeddedDocumentField(EmbeddedPreferredAction))
class EmbeddedAction(EmbeddedDocument, CritsDocumentFormatter):
"""
Embedded action class.
"""
action_type = StringField()
active = StringField()
analyst = StringField()
begin_date = CritsDateTimeField(default=datetime.datetime.now)
date = CritsDateTimeField(default=datetime.datetime.now)
end_date = CritsDateTimeField()
performed_date = CritsDateTimeField(default=datetime.datetime.now)
reason = StringField()
class CritsActionsDocument(BaseDocument):
"""
Inherit if you want to track actions information on a top-level object.
"""
actions = ListField(EmbeddedDocumentField(EmbeddedAction))
def add_action(self, type_, active, analyst, begin_date,
end_date, performed_date, reason, date=None):
"""
Add an action to an Indicator.
:param type_: The type of action.
:type type_: str
:param active: Whether this action is active or not.
:param active: str ("on", "off")
:param analyst: The user adding this action.
:type analyst: str
:param begin_date: The date this action begins.
:type begin_date: datetime.datetime
:param end_date: The date this action ends.
:type end_date: datetime.datetime
:param performed_date: The date this action was performed.
:type performed_date: datetime.datetime
:param reason: The reason for this action.
:type reason: str
:param date: The date this action was added to CRITs.
:type date: datetime.datetime
"""
ea = EmbeddedAction()
ea.action_type = type_
ea.active = active
ea.analyst = analyst
ea.begin_date = begin_date
ea.end_date = end_date
ea.performed_date = performed_date
ea.reason = reason
if date:
ea.date = date
self.actions.append(ea)
def delete_action(self, date=None, action=None):
"""
Delete an action.
:param date: The date of the action to delete.
:type date: datetime.datetime
:param action: The action to delete.
:type action: str
"""
if not date or not action:
return
for t in self.actions:
if t.date == date and t.action_type == action:
self.actions.remove(t)
break
def edit_action(self, type_, active, analyst, begin_date,
end_date, performed_date, reason, date=None):
"""
Edit an action for an Indicator.
:param type_: The type of action.
:type type_: str
:param active: Whether this action is active or not.
:param active: str ("on", "off")
:param analyst: The user editing this action.
:type analyst: str
:param begin_date: The date this action begins.
:type begin_date: datetime.datetime
:param end_date: The date this action ends.
:type end_date: datetime.datetime
:param performed_date: The date this action was performed.
:type performed_date: datetime.datetime
:param reason: The reason for this action.
:type reason: str
:param date: The date this action was added to CRITs.
:type date: datetime.datetime
"""
if not date:
return
for t in self.actions:
if t.date == date and t.action_type == type_:
self.actions.remove(t)
ea = EmbeddedAction()
ea.action_type = type_
ea.active = active
ea.analyst = analyst
ea.begin_date = begin_date
ea.end_date = end_date
ea.performed_date = performed_date
ea.reason = reason
ea.date = date
self.actions.append(ea)
break
def get_action_types(self, active_only=False):
"""
Return a list of action types applied to the object.
:param active_only: If True, only return active Actions.
If False, return all Actions.
:type active_only: boolean
:returns: bool
"""
if active_only:
return [a['action_type'] for a in self._data['actions']
if a['active'] == 'on']
else:
return [a['action_type'] for a in self._data['actions']]
# Embedded Documents common to most classes
class EmbeddedSource(EmbeddedDocument, CritsDocumentFormatter):
"""
Embedded Source.
"""
class SourceInstance(EmbeddedDocument, CritsDocumentFormatter):
"""
Information on the instance of this source.
"""
analyst = StringField()
date = CritsDateTimeField(default=datetime.datetime.now)
method = StringField()
reference = StringField()
tlp = StringField(default='red', choices=('white', 'green', 'amber', 'red'))
def __eq__(self, other):
"""
Two source instances are equal if their data attributes are equal
"""
if isinstance(other, type(self)):
if (self.analyst == other.analyst and
self.date == other.date and
self.method == other.method and
self.reference == other.reference):
# all data attributes are equal, so sourceinstances are equal
return True
return False
instances = ListField(EmbeddedDocumentField(SourceInstance))
name = StringField()
class CritsSourceDocument(BaseDocument):
"""
Inherit if you want to track source information on a top-level object.
"""
source = ListField(EmbeddedDocumentField(EmbeddedSource), required=True)
def add_source(self, source_item=None, source=None, method='',
reference='', date=None, analyst=None, tlp=None):
"""
Add a source instance to this top-level object.
:param source_item: An entire source instance.
:type source_item: :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param source: Name of the source.
:type source: str
:param method: Method of acquisition.
:type method: str
:param reference: Reference to the data from the source.
:type reference: str
:param date: The date of acquisition.
:type date: datetime.datetime
:param analyst: The user adding the source instance.
:type analyst: str
:param tlp: The TLP level this data was shared under.
:type tlp: str
"""
sc = len(self.source)
s = None
if source and analyst and tlp:
if tlp not in ('white', 'green', 'amber', 'red'):
tlp = 'red'
if not date:
date = datetime.datetime.now()
s = EmbeddedSource()
s.name = source
i = EmbeddedSource.SourceInstance()
i.date = date
i.reference = reference
i.method = method
i.analyst = analyst
i.tlp = tlp
s.instances = [i]
if not isinstance(source_item, EmbeddedSource):
source_item = s
if isinstance(source_item, EmbeddedSource):
match = None
if method or reference or tlp: # use method, reference, and tlp
for instance in source_item.instances:
instance.method = method or instance.method
instance.reference = reference or instance.reference
instance.tlp = tlp or instance.tlp
for c, s in enumerate(self.source):
if s.name == source_item.name: # find index of matching source
match = c
break
if match is not None: # if source exists, add instances to it
# Don't add exact duplicates
for new_inst in source_item.instances:
for exist_inst in self.source[match].instances:
if new_inst == exist_inst:
break
else:
self.source[match].instances.append(new_inst)
else: # else, add as new source
self.source.append(source_item)
if not sc:
self.tlp = source_item.instances[0].tlp
def edit_source(self, source=None, date=None, method='',
reference='', analyst=None, tlp=None):
"""
Edit a source instance from this top-level object.
:param source: Name of the source.
:type source: str
:param date: The date of acquisition to match on.
:type date: datetime.datetime
:param method: Method of acquisition.
:type method: str
:param reference: Reference to the data from the source.
:type reference: str
:param analyst: The user editing the source instance.
:type analyst: str
:param tlp: The TLP this data was shared under.
:type tlp: str
"""
if tlp not in ('white', 'green', 'amber', 'red'):
tlp = 'red'
if source and date:
for c, s in enumerate(self.source):
if s.name == source:
for i, si in enumerate(s.instances):
if si.date == date:
self.source[c].instances[i].method = method
self.source[c].instances[i].reference = reference
self.source[c].instances[i].analyst = analyst
self.source[c].instances[i].tlp = tlp
def remove_source(self, source=None, date=None, remove_all=False):
"""
Remove a source or source instance from a top-level object.
:param source: Name of the source.
:type source: str
:param date: Date to match on.
:type date: datetime.datetime
:param remove_all: Remove all instances of this source.
:type remove_all: boolean
:returns: dict with keys "success" (boolean) and "message" (str)
"""
keepone = {'success': False,
'message': "Must leave at least one source for access controls. "
"If you wish to change the source, please assign a new source and then remove the old."}
if not source:
return {'success': False,
'message': 'No source to locate'}
if not remove_all and not date:
return {'success': False,
'message': 'Not removing all and no date to find.'}
for s in self.source:
if s.name == source:
if remove_all:
if len(self.source) > 1:
self.source.remove(s)
message = "Deleted source %s" % source
return {'success': True,
'message': message}
else:
return keepone
else:
for si in s.instances:
if si.date == date:
if len(s.instances) > 1:
s.instances.remove(si)
message = "Deleted instance of %s" % source
return {'success': True,
'message': message}
else:
if len(self.source) > 1:
self.source.remove(s)
message = "Deleted source %s" % source
return {'success': True,
'message': message}
else:
return keepone
def sanitize_sources(self, username=None, sources=None):
"""
Sanitize the source list down to only those a user has access to see.
:param username: The user requesting this data.
:type username: str
:param sources: A list of sources the user has access to.
:type sources: list
"""
if username and hasattr(self, 'source'):
length = len(self.source)
if not sources:
sources = user_sources(username)
# use slice to modify in place in case any code is referencing
# the source already will reflect the changes as well
self.source[:] = [s for s in self.source if s.name in sources]
# a bit of a hack but we add a poorly formatted source to the
# source list which has an instances length equal to the amount
# of sources that were sanitized out of the user's list.
# not tested but this has the added benefit of throwing a
# ValidationError if someone were to try and save() this.
new_length = len(self.source)
if length > new_length:
i_length = length - new_length
s = EmbeddedSource()
s.name = "Other"
s.instances = [0] * i_length
self.source.append(s)
def get_source_names(self):
"""
Return a list of source names that have provided this data.
"""
return [obj['name'] for obj in self._data['source']]
class EmbeddedTicket(EmbeddedDocument, CritsDocumentFormatter):
"""
Embedded Ticket Class.
"""
analyst = StringField()
date = CritsDateTimeField(default=datetime.datetime.now)
ticket_number = StringField()
class EmbeddedTickets(BaseDocument):
"""
Embedded Tickets List.
"""
tickets = ListField(EmbeddedDocumentField(EmbeddedTicket))
def is_ticket_exist(self, ticket_number):
"""
Does this ticket already exist?
:param ticket_number: The ticket to look for.
:type ticket_number: str
:returns: True, False
"""
for ticket in self.tickets:
if ticket_number == ticket.ticket_number:
return True;
return False;
def add_ticket(self, tickets, analyst=None, date=None):
"""
Add a ticket to this top-level object.
:param tickets: The ticket(s) to add.
:type tickets: str, list, or
:class:`crits.core.crits_mongoengine.EmbeddedTicket`
:param analyst: The user adding this ticket.
:type analyst: str
:param date: The date for the ticket.
:type date: datetime.datetime.
"""
if isinstance(tickets, basestring):
tickets = tickets.split(',')
elif not isinstance(tickets, list):
tickets = [tickets]
for ticket in tickets:
if isinstance(ticket, EmbeddedTicket):
if not self.is_ticket_exist(ticket.ticket_number): # stop dups
self.tickets.append(ticket)
elif isinstance(ticket, basestring):
if ticket and not self.is_ticket_exist(ticket): # stop dups
et = EmbeddedTicket()
et.analyst = analyst
et.ticket_number = ticket
if date:
et.date = date
self.tickets.append(et)
def edit_ticket(self, analyst, ticket_number, date=None):
"""
Edit a ticket this top-level object.
:param analyst: The user editing this ticket.
:type analyst: str
:param ticket_number: The new ticket value.
:type ticket_number: str
:param date: The date for the ticket.
:type date: datetime.datetime.
"""
if not date:
return
for t in self.tickets:
if t.date == date:
self.tickets.remove(t)
et = EmbeddedTicket()
et.analyst = analyst
et.ticket_number = ticket_number
et.date = date
self.tickets.append(et)
break
def delete_ticket(self, date=None):
"""
Delete a ticket from this top-level object.
:param date: The date the ticket was added.
:type date: datetime.datetime
"""
if not date:
return
for t in self.tickets:
if t.date == date:
self.tickets.remove(t)
break
def get_tickets(self):
"""
Get the tickets for this top-level object.
:returns: list
"""
return [obj['ticket_number'] for obj in self._data['tickets']]
class EmbeddedCampaign(EmbeddedDocument, CritsDocumentFormatter):
"""
Embedded Campaign Class.
"""
analyst = StringField()
confidence = StringField(default='low', choices=('low', 'medium', 'high'))
date = CritsDateTimeField(default=datetime.datetime.now)
description = StringField()
name = StringField(required=True)
class EmbeddedLocation(EmbeddedDocument, CritsDocumentFormatter):
"""
Embedded Location object
"""
location_type = StringField(required=True)
location = StringField(required=True)
description = StringField(required=False)
latitude = StringField(required=False)
longitude = StringField(required=False)
analyst = StringField(required=True)
date = DateTimeField(default=datetime.datetime.now)
class Releasability(EmbeddedDocument, CritsDocumentFormatter):
"""
Releasability Class.
"""
class ReleaseInstance(EmbeddedDocument, CritsDocumentFormatter):
"""
Releasability Instance Class.
"""
analyst = StringField()
date = DateTimeField()
note = StringField()
name = StringField()
analyst = StringField()
instances = ListField(EmbeddedDocumentField(ReleaseInstance))
class UnrecognizedSchemaError(ValidationError):
"""
Error if the schema for a document is not found or unrecognized.
"""
def __init__(self, doc, **kwargs):
message = "Document schema is unrecognized: %s" % doc.schema_version
self.schema = doc._meta['schema_doc']
self.doc = doc.to_dict()
super(UnrecognizedSchemaError, self).__init__(message=message,
field_name='schema_version', **kwargs)
class EmbeddedObject(EmbeddedDocument, CritsDocumentFormatter):
"""
Embedded Object Class.
"""
analyst = StringField()
date = CritsDateTimeField(default=datetime.datetime.now)
source = ListField(EmbeddedDocumentField(EmbeddedSource), required=True)
object_type = StringField(required=True, db_field="type")
value = StringField(required=True)
class EmbeddedRelationship(EmbeddedDocument, CritsDocumentFormatter):
"""
Embedded Relationship Class.
"""
relationship = StringField(required=True)
relationship_date = CritsDateTimeField()
object_id = ObjectIdField(required=True, db_field="value")
date = CritsDateTimeField(default=datetime.datetime.now)
rel_type = StringField(db_field="type", required=True)
analyst = StringField()
rel_reason = StringField()
rel_confidence = StringField(default='unknown', required=True)
class CritsBaseAttributes(CritsDocument, CritsBaseDocument,
CritsSchemaDocument, CritsStatusDocument, EmbeddedTickets):
"""
CRITs Base Attributes Class. The main class that should be inherited if you
are making a new top-level object. Adds all of the standard top-level object
features.
"""
analyst = StringField()
bucket_list = ListField(StringField())
campaign = ListField(EmbeddedDocumentField(EmbeddedCampaign))
locations = ListField(EmbeddedDocumentField(EmbeddedLocation))
description = StringField()
obj = ListField(EmbeddedDocumentField(EmbeddedObject), db_field="objects")
relationships = ListField(EmbeddedDocumentField(EmbeddedRelationship))
releasability = ListField(EmbeddedDocumentField(Releasability))
screenshots = ListField(StringField())
sectors = ListField(StringField())
tlp = StringField(default='red', choices=('white', 'green', 'amber', 'red'))
def set_tlp(self, tlp):
"""
Set the TLP of this TLO.
:param tlp: The TLP to set.
"""
if tlp not in ('white', 'green', 'amber', 'red'):
tlp = 'red'
if tlp in self.get_acceptable_tlp_levels():
self.tlp = tlp
def get_acceptable_tlp_levels(self):
"""
Based on what TLP levels sources have shared, limit the list of TLP
levels you can share this with accordingly.
:returns: list
"""
d = {'white': ['white', 'green', 'amber', 'red'],
'green': ['green', 'amber', 'red'],
'amber': ['amber', 'red'],
'red': ['red']}
my_tlps = []
for s in self.source:
for i in s.instances:
my_tlps.append(i.tlp)
my_tlps = OrderedDict.fromkeys(my_tlps).keys()
if 'white' in my_tlps:
return d['white']
elif 'green' in my_tlps:
return d['green']
elif 'amber' in my_tlps:
return d['amber']
else:
return d['red']
def add_campaign(self, campaign_item=None, update=True):
"""
Add a campaign to this top-level object.
:param campaign_item: The campaign to add.
:type campaign_item: :class:`crits.core.crits_mongoengine.EmbeddedCampaign`
:param update: If True, allow merge with pre-existing campaigns
: If False, do not change any pre-existing campaigns
:type update: boolean
:returns: dict with keys "success" (boolean) and "message" (str)
"""
if isinstance(campaign_item, EmbeddedCampaign):
if campaign_item.name != None and campaign_item.name.strip() != '':
campaign_item.confidence = campaign_item.confidence.strip().lower()
if campaign_item.confidence == '':
campaign_item.confidence = 'low'
for c, campaign in enumerate(self.campaign):
if campaign.name == campaign_item.name:
if not update:
return {'success': False, 'message': 'This Campaign is already assigned.'}
con = {'low': 1, 'medium': 2, 'high': 3}
if con.get(campaign.confidence, 0) < con.get(campaign_item.confidence):
self.campaign[c].confidence = campaign_item.confidence
self.campaign[c].analyst = campaign_item.analyst
break
else:
self.campaign.append(campaign_item)
return {'success': True, 'message': 'Campaign assigned successfully!'}
return {'success': False, 'message': 'Campaign is invalid'}
def remove_campaign(self, campaign_name=None, campaign_date=None):
"""
Remove a campaign from this top-level object.
:param campaign_name: The campaign to remove.
:type campaign_name: str
:param campaign_date: The date the campaign was added.
:type campaign_date: datetime.datetime.
"""
for campaign in self.campaign:
if campaign.name == campaign_name or campaign.date == campaign_date:
self.campaign.remove(campaign)
break
def edit_campaign(self, campaign_name=None, campaign_item=None):
"""
Edit an existing Campaign. This just removes the old entry and adds a
new one.
:param campaign_name: The campaign to remove.
:type campaign_name: str
:param campaign_item: The campaign to add.
:type campaign_item: :class:`crits.core.crits_mongoengine.EmbeddedCampaign`
"""
if isinstance(campaign_item, EmbeddedCampaign):
self.remove_campaign(campaign_name=campaign_item.name)
self.add_campaign(campaign_item=campaign_item)
def add_location(self, location_item=None):
"""
Add a location to this top-level object.
:param location_item: The location to add.
:type location_item: :class:`crits.core.crits_mongoengine.EmbeddedLocation`
:returns: dict with keys "success" (boolean) and "message" (str)
"""
if isinstance(location_item, EmbeddedLocation):
if (location_item.location != None and
location_item.location.strip() != ''):
for l, location in enumerate(self.locations):
if (location.location == location_item.location and
location.location_type == location_item.location_type and
location.date == location_item.date):
return {'success': False,
'message': 'This location is already assigned.'}
else:
self.locations.append(location_item)
return {'success': True,
'message': 'Location assigned successfully!'}
return {'success': False,
'message': 'Location is invalid'}
def edit_location(self, location_name=None, location_type=None, date=None,
description=None, latitude=None, longitude=None):
"""
Edit a location.
:param location_name: The location_name to edit.
:type location_name: str
:param location_type: The location_type to edit.
:type location_type: str
:param date: The location date to edit.
:type date: str
:param description: The new description.
:type description: str
:param latitude: The new latitude.
:type latitude: str
:param longitude: The new longitude.
:type longitude: str
"""
if isinstance(date, basestring):
date = parse(date, fuzzy=True)
for location in self.locations:
if (location.location == location_name and
location.location_type == location_type and
location.date == date):
if description:
location.description = description
if latitude:
location.latitude = latitude
if longitude:
location.longitude = longitude
break
def remove_location(self, location_name=None, location_type=None, date=None):
"""
Remove a location from this top-level object.
:param location_name: The location to remove.
:type location_name: str
:param location_type: The location type.
:type location_type: str
:param date: The location date.
:type date: str
"""
if isinstance(date, basestring):
date = parse(date, fuzzy=True)
for location in self.locations:
if (location.location == location_name and
location.location_type == location_type and
location.date == date):
self.locations.remove(location)
break
def add_bucket_list(self, tags, analyst, append=True):
"""
Add buckets to this top-level object.
:param tags: The buckets to be added.
:type tags: list, str
:param analyst: The analyst adding these buckets.
:type analyst: str
:param append: Whether or not to replace or append these buckets.
:type append: boolean
"""
from crits.core.handlers import alter_bucket_list
# Track the addition or subtraction of tags.
# Get the bucket_list for the object, find out if this is an addition
# or subtraction of a bucket_list.
if isinstance(tags, list) and len(tags) == 1 and tags[0] == '':
parsed_tags = []
elif isinstance(tags, (str, unicode)):
parsed_tags = tags.split(',')
else:
parsed_tags = tags
parsed_tags = [t.strip() for t in parsed_tags]
names = None
if len(self.bucket_list) >= len(parsed_tags):
names = [x for x in self.bucket_list if x not in parsed_tags and x != '']
val = -1
else:
names = [x for x in parsed_tags if x not in self.bucket_list and x != '']
val = 1
if names:
alter_bucket_list(self, names, val)
if append:
for t in parsed_tags:
if t and t not in self.bucket_list:
self.bucket_list.append(t)
else:
self.bucket_list = parsed_tags
def get_bucket_list_string(self):
"""
Collapse the list of buckets into a single comma-separated string.
:returns: str
"""
return ','.join(str(x) for x in self.bucket_list)
def add_sector_list(self, sectors, analyst, append=True):
"""
Add sectors to this top-level object.
:param sectors: The sectors to be added.
:type tags: list, str
:param analyst: The analyst adding these sectors.
:type analyst: str
:param append: Whether or not to replace or append these sectors.
:type append: boolean
"""
from crits.core.handlers import alter_sector_list
# Track the addition or subtraction of tags.
# Get the sectors for the object, find out if this is an addition
# or subtraction of a sector.
if isinstance(sectors, list) and len(sectors) == 1 and sectors[0] == '':
parsed_sectors = []
elif isinstance(sectors, (str, unicode)):
parsed_sectors = sectors.split(',')
else:
parsed_sectors = sectors
parsed_sectors = [s.strip() for s in parsed_sectors]
names = None
if len(self.sectors) >= len(parsed_sectors):
names = [x for x in self.sectors if x not in parsed_sectors and x != '']
val = -1
else:
names = [x for x in parsed_sectors if x not in self.sectors and x != '']
val = 1
if names:
alter_sector_list(self, names, val)
if append:
for t in parsed_sectors:
if t not in self.sectors:
self.sectors.append(t)
else:
self.sectors = parsed_sectors
def get_sectors_list_string(self):
"""
Collapse the list of sectors into a single comma-separated string.
:returns: str
"""
return ','.join(str(x) for x in self.sectors)
def get_comments(self):
"""
Get the comments for this top-level object.
:returns: list
"""
from crits.comments.handlers import get_comments
comments = get_comments(self.id, self._meta['crits_type'])
return comments
def delete_all_comments(self):
"""
Delete all comments for this top-level object.
"""
from crits.comments.comment import Comment
Comment.objects(obj_id=self.id,
obj_type=self._meta['crits_type']).delete()
def get_screenshots(self, analyst):
"""
Get the screenshots for this top-level object.
:returns: list
"""
from crits.screenshots.handlers import get_screenshots_for_id
screenshots = get_screenshots_for_id(self._meta['crits_type'],
self.id,
analyst,
True)
if 'screenshots' in screenshots:
return screenshots['screenshots']
else:
return []
def add_object(self, object_type, value, source, method, reference,
analyst, object_item=None):
"""
Add an object to this top-level object.
:param object_type: The Object Type being added.
:type object_type: str
:param value: The value of the object being added.
:type value: str
:param source: The name of the source adding this object.
:type source: str
:param method: The method in which the object was added or gathered.
:type method: str
:param reference: A reference to the original object.
:type reference: str
:param analyst: The user adding this object.
:type analyst: str
:param object_item: An entire object ready to be added.
:type object_item: :class:`crits.core.crits_mongoengine.EmbeddedObject`
:returns: dict with keys:
"success" (boolean)
"message" (str)
"object" (EmbeddedObject)
"""
if not isinstance(object_item, EmbeddedObject):
object_item = EmbeddedObject()
object_item.analyst = analyst
src = create_embedded_source(source,
method=method,
reference=reference,
needs_tlp=False,
analyst=analyst)
if not src:
return {'success': False, 'message': 'Invalid Source'}
object_item.source = [src]
object_item.object_type = object_type
object_item.value = value
for o in self.obj:
if (o.object_type == object_item.object_type
and o.value == object_item.value):
return {'success': False, 'object': o,
'message': 'Object already exists'}
self.obj.append(object_item)
return {'success': True, 'object': object_item}
def remove_object(self, object_type, value):
"""
Remove an object from this top-level object.
:param object_type: The type of the object being removed.
:type object_type: str
:param value: The value of the object being removed.
:type value: str
"""
for o in self.obj:
if (o.object_type == object_type and
o.value == value):
from crits.objects.handlers import delete_object_file
self.obj.remove(o)
delete_object_file(value)
break
def delete_all_analysis_results(self):
"""
Delete all analysis results for this top-level object.
"""
from crits.services.analysis_result import AnalysisResult
results = AnalysisResult.objects(object_id=str(self.id))
for result in results:
result.delete()
def delete_all_objects(self):
"""
Delete all objects for this top-level object.
"""
from crits.objects.handlers import delete_object_file
for o in self.obj:
if o.object_type == ObjectTypes.FILE_UPLOAD:
delete_object_file(o.value)
self.obj = []
def delete_all_favorites(self):
"""
Delete all favorites for this top-level object.
"""
from crits.core.user import CRITsUser
users = CRITsUser.objects()
for user in users:
type_ = self._meta['crits_type']
if type_ in user.favorites and str(self.id) in user.favorites[type_]:
user.favorites[type_].remove(str(self.id))
user.save()
def update_object_value(self, object_type, value, new_value):
"""
Update the value for an object on this top-level object.
:param object_type: The type of the object being updated.
:type object_type: str
:param value: The value of the object being updated.
:type value: str
:param new_value: The new value of the object being updated.
:type new_value: str
"""
for c, o in enumerate(self.obj):
if (o.object_type == object_type and
o.value == value):
self.obj[c].value = new_value
break
def update_object_source(self, object_type, value,
new_source=None, new_method='',
new_reference='', analyst=None):
"""
Update the source for an object on this top-level object.
:param object_type: The type of the object being updated.
:type object_type: str
:param value: The value of the object being updated.
:type value: str
:param new_source: The name of the new source.
:type new_source: str
:param new_method: The method of the new source.
:type new_method: str
:param new_reference: The reference of the new source.
:type new_reference: str
:param analyst: The user updating the source.
:type analyst: str
"""
for c, o in enumerate(self.obj):
if (o.object_type == object_type and
o.value == value):
if not analyst:
analyst = self.obj[c].source[0].intances[0].analyst
source = [create_embedded_source(new_source,
method=new_method,
reference=new_reference,
needs_tlp=False,
analyst=analyst)]
self.obj[c].source = source
break
def format_campaign(self, campaign, analyst):
"""
Render a campaign to HTML to prepare for inclusion in a template.
:param campaign: The campaign to templetize.
:type campaign: :class:`crits.core.crits_mongoengine.EmbeddedCampaign`
:param analyst: The user requesting the Campaign.
:type analyst: str
:returns: str
"""
html = render_to_string('campaigns_display_row_widget.html',
{'campaign': campaign,
'hit': self,
'obj': None,
'relationship': {'type': self._meta['crits_type']}})
return html
def format_location(self, location, analyst):
"""
Render a location to HTML to prepare for inclusion in a template.
:param location: The location to templetize.
:type location: :class:`crits.core.crits_mongoengine.EmbeddedLocation`
:param analyst: The user requesting the Campaign.
:type analyst: str
:returns: str
"""
html = render_to_string('locations_display_row_widget.html',
{'location': location,
'hit': self,
'obj': None,
'relationship': {'type': self._meta['crits_type']}})
return html
def sort_objects(self):
"""
Sort the objects for this top-level object.
:returns: dict
"""
o_dict = dict((o.object_type,[]) for o in self.obj)
o_dict['Other'] = 0
o_dict['Count'] = len(self.obj)
for o in self.obj:
o_dict[o.object_type].append(o.to_dict())
return o_dict
def add_relationship(self, rel_item, rel_type, rel_date=None,
analyst=None, rel_confidence='unknown',
rel_reason='', get_rels=False):
"""
Add a relationship to this top-level object. The rel_item will be
saved. It is up to the caller to save "self".
:param rel_item: The top-level object to relate to.
:type rel_item: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param rel_type: The type of relationship.
:type rel_type: str
:param rel_date: The date this relationship applies.
:type rel_date: datetime.datetime
:param analyst: The user forging this relationship.
:type analyst: str
:param rel_confidence: The confidence of the relationship.
:type rel_confidence: str
:param rel_reason: The reason for the relationship.
:type rel_reason: str
:param get_rels: If True, return all relationships after forging.
If False, return the new EmbeddedRelationship object
:type get_rels: boolean
:returns: dict with keys:
"success" (boolean)
"message" (str if failed, else dict or EmbeddedRelationship)
"""
# Prevent class from having a relationship to itself
if self == rel_item:
return {'success': False,
'message': 'Cannot forge relationship to oneself'}
# get reverse relationship
rev_type = RelationshipTypes.inverse(rel_type)
if rev_type is None:
return {'success': False,
'message': 'Could not find relationship type'}
date = datetime.datetime.now()
# setup the relationship for me
my_rel = EmbeddedRelationship()
my_rel.relationship = rel_type
my_rel.rel_type = rel_item._meta['crits_type']
my_rel.analyst = analyst
my_rel.date = date
my_rel.relationship_date = rel_date
my_rel.object_id = rel_item.id
my_rel.rel_confidence = rel_confidence
my_rel.rel_reason = rel_reason
# setup the relationship for them
their_rel = EmbeddedRelationship()
their_rel.relationship = rev_type
their_rel.rel_type = self._meta['crits_type']
their_rel.analyst = analyst
their_rel.date = date
their_rel.relationship_date = rel_date
their_rel.object_id = self.id
their_rel.rel_confidence = rel_confidence
their_rel.rel_reason = rel_reason
# variables for detecting if an existing relationship exists
my_existing_rel = None
their_existing_rel = None
# check for existing relationship before blindly adding
for r in self.relationships:
if (r.object_id == my_rel.object_id
and r.relationship == my_rel.relationship
and (not rel_date or r.relationship_date == rel_date)
and r.rel_type == my_rel.rel_type):
my_existing_rel = r
break # If relationship already exists then exit loop
for r in rel_item.relationships:
if (r.object_id == their_rel.object_id
and r.relationship == their_rel.relationship
and (not rel_date or r.relationship_date == rel_date)
and r.rel_type == their_rel.rel_type):
their_existing_rel = r
break # If relationship already exists then exit loop
# If the relationship already exists on both sides then do nothing
if my_existing_rel and their_existing_rel:
return {'success': False,
'message': 'Relationship already exists'}
# Repair unreciprocated relationships
if not my_existing_rel: # If my rel does not exist then add it
if their_existing_rel: # If their rel exists then use its data
my_rel.analyst = their_existing_rel.analyst
my_rel.date = their_existing_rel.date
my_rel.relationship_date = their_existing_rel.relationship_date
my_rel.rel_confidence = their_existing_rel.rel_confidence
my_rel.rel_reason = their_existing_rel.rel_reason
self.relationships.append(my_rel) # add my new relationship
if not their_existing_rel: # If their rel does not exist then add it
if my_existing_rel: # If my rel exists then use its data
their_rel.analyst = my_existing_rel.analyst
their_rel.date = my_existing_rel.date
their_rel.relationship_date = my_existing_rel.relationship_date
their_rel.rel_confidence = my_existing_rel.rel_confidence
their_rel.rel_reason = my_existing_rel.rel_reason
rel_item.relationships.append(their_rel) # add to passed rel_item
# updating DB this way can be much faster than saving entire TLO
rel_item.update(add_to_set__relationships=their_rel)
if get_rels:
results = {'success': True,
'message': self.sort_relationships(analyst, meta=True)}
else:
results = {'success': True,
'message': my_rel}
# In case of relating to a versioned backdoor we also want to relate to
# the family backdoor.
self_type = self._meta['crits_type']
rel_item_type = rel_item._meta['crits_type']
# If both are not backdoors, just return
if self_type != 'Backdoor' and rel_item_type != 'Backdoor':
return results
# If either object is a family backdoor, don't go further.
if ((self_type == 'Backdoor' and self.version == '') or
(rel_item_type == 'Backdoor' and rel_item.version == '')):
return results
# If one is a versioned backdoor and the other is a family backdoor,
# don't go further.
if ((self_type == 'Backdoor' and self.version != '' and
rel_item_type == 'Backdoor' and rel_item.version == '') or
(rel_item_type == 'Backdoor' and rel_item.version != '' and
self_type == 'Backdoor' and self.version == '')):
return results
# Figure out which is the backdoor object.
if self_type == 'Backdoor':
bd = self
other = rel_item
else:
bd = rel_item
other = self
# Find corresponding family backdoor object.
klass = class_from_type('Backdoor')
family = klass.objects(name=bd.name, version='').first()
if family:
other.add_relationship(family,
rel_type,
rel_date=rel_date,
analyst=analyst,
rel_confidence=rel_confidence,
rel_reason=rel_reason,
get_rels=get_rels)
other.save(user=analyst)
return results
def _modify_relationship(self, rel_item=None, rel_id=None, type_=None,
rel_type=None, rel_date=None, new_type=None,
new_date=None, new_confidence='unknown',
new_reason="N/A", modification=None, analyst=None):
"""
Modify a relationship to this top-level object. If rel_item is provided it
will be used, otherwise rel_id and type_ must be provided.
:param rel_item: The top-level object to relate to.
:type rel_item: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param rel_id: The ObjectId of the top-level object to relate to.
:type rel_id: str
:param type_: The type of top-level object to relate to.
:type type_: str
:param rel_type: The type of relationship.
:type rel_type: str
:param rel_date: The date this relationship applies.
:type rel_date: datetime.datetime
:param new_type: The new relationship type.
:type new_type: str
:param new_date: The new relationship date.
:type new_date: datetime.datetime
:param new_confidence: The new confidence.
:type new_confidence: str
:param new_reason: The new reason.
:type new_reason: str
:param modification: What type of modification this is ("type",
"delete", "date", "confidence").
:type modification: str
:param analyst: The user forging this relationship.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
got_rel = True
if not rel_item:
got_rel = False
if isinstance(rel_id, basestring) and isinstance(type_, basestring):
rel_item = class_from_id(type_, rel_id)
else:
return {'success': False,
'message': 'Could not find object'}
if isinstance(new_date, basestring):
new_date = parse(new_date, fuzzy=True)
if rel_item and rel_type and modification:
# get reverse relationship
rev_type = RelationshipTypes.inverse(rel_type)
if rev_type is None:
return {'success': False,
'message': 'Could not find relationship type'}
if modification == "type":
# get new reverse relationship
new_rev_type = RelationshipTypes.inverse(new_type)
if new_rev_type is None:
return {'success': False,
'message': 'Could not find reverse relationship type'}
for c, r in enumerate(self.relationships):
if rel_date:
if (r.object_id == rel_item.id
and r.relationship == rel_type
and r.relationship_date == rel_date
and r.rel_type == rel_item._meta['crits_type']):
if modification == "type":
self.relationships[c].relationship = new_type
elif modification == "date":
self.relationships[c].relationship_date = new_date
elif modification == "confidence":
self.relationships[c].rel_confidence = new_confidence
elif modification == "reason":
self.relationships[c].rel_reason = new_reason
elif modification == "delete":
self.relationships.remove(r)
break
else:
if (r.object_id == rel_item.id
and r.relationship == rel_type
and r.rel_type == rel_item._meta['crits_type']):
if modification == "type":
self.relationships[c].relationship = new_type
elif modification == "date":
self.relationships[c].relationship_date = new_date
elif modification == "confidence":
self.relationships[c].rel_confidence = new_confidence
elif modification == "reason":
self.relationships[c].rel_reason = new_reason
elif modification == "delete":
self.relationships.remove(r)
break
for c, r in enumerate(rel_item.relationships):
if rel_date:
if (r.object_id == self.id
and r.relationship == rev_type
and r.relationship_date == rel_date
and r.rel_type == self._meta['crits_type']):
if modification == "type":
rel_item.relationships[c].relationship = new_rev_type
elif modification == "date":
rel_item.relationships[c].relationship_date = new_date
elif modification == "confidence":
rel_item.relationships[c].rel_confidence = new_confidence
elif modification == "reason":
rel_item.relationships[c].rel_reason = new_reason
elif modification == "delete":
rel_item.relationships.remove(r)
break
else:
if (r.object_id == self.id
and r.relationship == rev_type
and r.rel_type == self._meta['crits_type']):
if modification == "type":
rel_item.relationships[c].relationship = new_rev_type
elif modification == "date":
rel_item.relationships[c].relationship_date = new_date
elif modification == "confidence":
rel_item.relationships[c].rel_confidence = new_confidence
elif modification == "reason":
rel_item.relationships[c].rel_reason = new_reason
elif modification == "delete":
rel_item.relationships.remove(r)
break
if not got_rel:
rel_item.save(username=analyst)
if modification == "delete":
return {'success': True,
'message': 'Relationship deleted'}
else:
return {'success': True,
'message': 'Relationship modified'}
else:
return {'success': False,
'message': 'Need valid object and relationship type'}
def edit_relationship_date(self, rel_item=None, rel_id=None, type_=None, rel_type=None,
rel_date=None, new_date=None, analyst=None):
"""
Modify a relationship date for a relationship to this top-level object.
If rel_item is provided it will be used, otherwise rel_id and type_ must
be provided.
:param rel_item: The top-level object to relate to.
:type rel_item: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param rel_id: The ObjectId of the top-level object to relate to.
:type rel_id: str
:param type_: The type of top-level object to relate to.
:type type_: str
:param rel_type: The type of relationship.
:type rel_type: str
:param rel_date: The date this relationship applies.
:type rel_date: datetime.datetime
:param new_date: The new relationship date.
:type new_date: datetime.datetime
:param analyst: The user editing this relationship.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
return self._modify_relationship(rel_item=rel_item, rel_id=rel_id,
type_=type_, rel_type=rel_type,
rel_date=rel_date, new_date=new_date,
modification="date", analyst=analyst)
def edit_relationship_type(self, rel_item=None, rel_id=None, type_=None, rel_type=None,
rel_date=None, new_type=None, analyst=None):
"""
Modify a relationship type for a relationship to this top-level object.
If rel_item is provided it will be used, otherwise rel_id and type_ must
be provided.
:param rel_item: The top-level object to relate to.
:type rel_item: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param rel_id: The ObjectId of the top-level object to relate to.
:type rel_id: str
:param type_: The type of top-level object to relate to.
:type type_: str
:param rel_type: The type of relationship.
:type rel_type: str
:param rel_date: The date this relationship applies.
:type rel_date: datetime.datetime
:param new_type: The new relationship type.
:type new_type: str
:param analyst: The user editing this relationship.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
return self._modify_relationship(rel_item=rel_item, rel_id=rel_id,
type_=type_, rel_type=rel_type,
rel_date=rel_date, new_type=new_type,
modification="type", analyst=analyst)
def edit_relationship_confidence(self, rel_item=None, rel_id=None,
type_=None, rel_type=None, rel_date=None,
new_confidence='unknown', analyst=None):
"""
Modify a relationship type for a relationship to this top-level object.
If rel_item is provided it will be used, otherwise rel_id and type_ must
be provided.
:param rel_item: The top-level object to relate to.
:type rel_item: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param rel_id: The ObjectId of the top-level object to relate to.
:type rel_id: str
:param type_: The type of top-level object to relate to.
:type type_: str
:param rel_type: The type of relationship.
:type rel_type: str
:param rel_date: The date this relationship applies.
:type rel_date: datetime.datetime
:param new_confidence: The new confidence of the relationship.
:type new_confidence: str
:param analyst: The user editing this relationship.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
return self._modify_relationship(rel_item=rel_item, rel_id=rel_id,
type_=type_, rel_type=rel_type,
rel_date=rel_date, new_confidence=new_confidence,
modification="confidence", analyst=analyst)
def edit_relationship_reason(self, rel_item=None, rel_id=None, type_=None,
rel_type=None, rel_date=None, new_reason="N/A",
analyst=None):
"""
Modify a relationship type for a relationship to this top-level object.
If rel_item is provided it will be used, otherwise rel_id and type_ must
be provided.
:param rel_item: The top-level object to relate to.
:type rel_item: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param rel_id: The ObjectId of the top-level object to relate to.
:type rel_id: str
:param type_: The type of top-level object to relate to.
:type type_: str
:param rel_type: The type of relationship.
:type rel_type: str
:param rel_date: The date this relationship applies.
:type rel_date: datetime.datetime
:param new_confidence: The new confidence of the relationship.
:type new_confidence: int
:param analyst: The user editing this relationship.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
return self._modify_relationship(rel_item=rel_item, rel_id=rel_id,
type_=type_, rel_type=rel_type,
rel_date=rel_date, new_reason=new_reason,
modification="reason", analyst=analyst)
def delete_relationship(self, rel_item=None, rel_id=None, type_=None, rel_type=None,
rel_date=None, analyst=None, *args, **kwargs):
"""
Delete a relationship from a relationship to this top-level object.
If rel_item is provided it will be used, otherwise rel_id and type_ must
be provided.
:param rel_item: The top-level object to remove relationship to.
:type rel_item: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param rel_id: The ObjectId of the top-level object to relate to.
:type rel_id: str
:param type_: The type of top-level object to relate to.
:type type_: str
:param rel_type: The type of relationship.
:type rel_type: str
:param rel_date: The date this relationship applies.
:type rel_date: datetime.datetime
:param analyst: The user removing this relationship.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
return self._modify_relationship(rel_item=rel_item, rel_id=rel_id,
type_=type_, rel_type=rel_type,
rel_date=rel_date, analyst=analyst,
modification="delete")
def delete_all_relationships(self, username=None):
"""
Delete all relationships from this top-level object.
:param username: The user deleting all of the relationships.
:type username: str
"""
for r in self.relationships[:]:
if r.relationship_date:
self.delete_relationship(rel_id=str(r.object_id),
type_=r.rel_type,
rel_type=r.relationship,
rel_date=r.relationship_date,
analyst=username)
else:
self.delete_relationship(rel_id=str(r.object_id),
type_=r.rel_type,
rel_type=r.relationship,
analyst=username)
def sort_relationships(self, username=None, meta=False):
"""
Sort the relationships for inclusion in a template.
:param username: The user requesting the relationships.
:type username: str
:param meta: Limit the results to only a subset of metadata.
:type meta: boolean
:returns: dict
"""
if len(self.relationships) < 1:
return {}
rel_dict = dict((r.rel_type,[]) for r in self.relationships)
query_dict = {
'Actor': ('id', 'name', 'campaign'),
'Backdoor': ('id', 'name', 'version', 'campaign'),
'Campaign': ('id', 'name'),
'Certificate': ('id', 'md5', 'filename', 'description', 'campaign'),
'Domain': ('id', 'domain'),
'Email': ('id', 'from_address', 'sender', 'subject', 'campaign'),
'Event': ('id', 'title', 'event_type', 'description', 'campaign'),
'Exploit': ('id', 'name', 'cve', 'campaign'),
'Indicator': ('id', 'ind_type', 'value', 'campaign', 'actions'),
'IP': ('id', 'ip', 'campaign'),
'PCAP': ('id', 'md5', 'filename', 'description', 'campaign'),
'RawData': ('id', 'title', 'data_type', 'tool', 'description',
'version', 'campaign'),
'Sample': ('id',
'md5',
'filename',
'mimetype',
'size',
'campaign'),
'Signature': ('id', 'title', 'data_type', 'description',
'version', 'campaign'),
'Target': ('id', 'firstname', 'lastname', 'email_address', 'email_count'),
}
rel_dict['Other'] = 0
rel_dict['Count'] = len(self.relationships)
if not meta:
for r in self.relationships:
rd = r.to_dict()
rel_dict[rd['type']].append(rd)
return rel_dict
elif username:
user_source_access = user_sources(username)
for r in self.relationships:
rd = r.to_dict()
obj_class = class_from_type(rd['type'])
# TODO: these should be limited to the fields above, or at
# least exclude larger fields that we don't need.
fields = query_dict.get(rd['type'])
if r.rel_type not in ["Campaign", "Target"]:
obj = obj_class.objects(id=rd['value'],
source__name__in=user_source_access).only(*fields).first()
else:
obj = obj_class.objects(id=rd['value']).only(*fields).first()
if obj:
# we can't add and remove attributes on the class
# so convert it to a dict that we can manipulate.
result = obj.to_dict()
if "_id" in result:
result["id"] = result["_id"]
if "type" in result:
result["ind_type"] = result["type"]
del result["type"]
if "value" in result:
result["ind_value"] = result["value"]
del result["value"]
# turn this relationship into a dict so we can update
# it with the object information
rd.update(result)
rel_dict[rd['type']].append(rd)
else:
rel_dict['Other'] += 1
return rel_dict
else:
return {}
def get_relationship_objects(self, username=None, sources=None):
"""
Return the top-level objects this top-level object is related to.
:param username: The user requesting these top-level objects.
:type username: str
:param sources: The user's source access list to limit by.
:type sources: list
:returns: list
"""
results = []
if not username:
return results
if not hasattr(self, 'relationships'):
return results
if not sources:
sources = user_sources(username)
for ty in set(rel.to_dict()['type'] for rel in self.relationships):
obj_class = class_from_type(ty)
objids = [ty_o.to_dict()['value'] for ty_o in filter(lambda o: o.to_dict()['type'] == ty, self.relationships)]
if r.rel_type not in ['Campaign', 'Target']:
obj = obj_class.objects(id__in=objids, source__name__in=sources)
else:
obj = obj_class.objects(id__in=objids)
if obj:
results.extend(obj)
return results
def add_releasability(self, source_item=None, analyst=None, *args, **kwargs):
"""
Add a source as releasable for this top-level object.
:param source_item: The source to allow releasability for.
:type source_item: dict or
:class:`crits.core.crits_mongoengine.Releasability`
:param analyst: The user marking this as releasable.
:type analyst: str
"""
if isinstance(source_item, Releasability):
rels = self.releasability
for r in rels:
if r.name == source_item.name:
break
else:
if analyst:
source_item.analyst = analyst
self.releasability.append(source_item)
elif isinstance(source_item, dict):
rels = self.releasability
for r in rels:
if r.name == source_item['name']:
break
else:
if analyst:
source_item['analyst'] = analyst
self.releasability.append(Releasability(**source_item))
else:
rel = Releasability(**kwargs)
if analyst:
rel.analyst = analyst
rels = self.releasability
for r in rels:
if r.name == rel.name:
break
else:
self.releasability.append(rel)
def add_releasability_instance(self, name=None, instance=None, *args, **kwargs):
"""
Add an instance of releasing this top-level object to a source.
:param name: The name of the source that received the data.
:type name: str
:param instance: The instance of releasability.
:type instance:
:class:`crits.core.crits_mongoengine.Releasability.ReleaseInstance`
"""
if isinstance(instance, Releasability.ReleaseInstance):
for r in self.releasability:
if r.name == name:
r.instances.append(instance)
def remove_releasability(self, name=None, *args, **kwargs):
"""
Remove a source as releasable for this top-level object.
:param name: The name of the source to remove from releasability.
:type name: str
"""
if isinstance(name, basestring):
for r in self.releasability:
if r.name == name and len(r.instances) == 0:
self.releasability.remove(r)
break
def remove_releasability_instance(self, name=None, date=None, *args, **kwargs):
"""
Remove an instance of releasing this top-level object to a source.
:param name: The name of the source.
:type name: str
:param date: The date of the instance to remove.
:type date: datetime.datetime
"""
if not isinstance(date, datetime.datetime):
date = parse(date, fuzzy=True)
for r in self.releasability:
if r.name == name:
for ri in r.instances:
if ri.date == date:
r.instances.remove(ri)
def sanitize_relationships(self, username=None, sources=None):
"""
Sanitize the relationships list down to only what the user can see based
on source access.
:param username: The user to sanitize for.
:type username: str
:param source: The user's source list.
:type source: list
"""
if username:
if not sources:
sources = user_sources(username)
final_rels = []
for r in self.relationships:
rd = r.to_dict()
obj_class = class_from_type(rd['type'])
if r.rel_type not in ["Campaign", "Target"]:
obj = obj_class.objects(id=rd['value'],
source__name__in=sources).only('id').first()
else:
obj = obj_class.objects(id=rd['value']).only('id').first()
if obj:
final_rels.append(r)
self.relationships = final_rels
def sanitize_releasability(self, username=None, sources=None):
"""
Sanitize releasability list down to only what the user can see based
on source access.
:param username: The user to sanitize for.
:type username: str
:param source: The user's source list.
:type source: list
"""
if username:
if not sources:
sources = user_sources(username)
# use slice to modify in place in case any code is referencing
# the source already will reflect the changes as well
self.releasability[:] = [r for r in self.releasability if r.name in sources]
def sanitize(self, username=None, sources=None, rels=True):
"""
Sanitize this top-level object down to only what the user can see based
on source access.
:param username: The user to sanitize for.
:type username: str
:param source: The user's source list.
:type source: list
:param rels: Whether or not to sanitize relationships.
:type rels: boolean
"""
if username:
if not sources:
sources = user_sources(username)
if hasattr(self, 'source'):
self.sanitize_sources(username, sources)
if hasattr(self, 'releasability'):
self.sanitize_releasability(username, sources)
if rels:
if hasattr(self, 'relationships'):
self.sanitize_relationships(username, sources)
def get_campaign_names(self):
"""
Get the campaigns associated with this top-level object as a list of
names.
:returns: list
"""
return [obj['name'] for obj in self._data['campaign']]
def get_analysis_results(self):
"""
Get analysis results for this TLO.
:returns: list
"""
from crits.services.analysis_result import AnalysisResult
return AnalysisResult.objects(object_id=str(self.id))
def get_details_url(self):
"""
Generic function that generates a details url for a
:class:`crits.core.crits_mongoengine.CritsBaseAttributes` object.
"""
mapper = self._meta.get('jtable_opts')
if mapper is not None:
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
try:
return reverse(details_url, args=(unicode(self[details_url_key]),))
except Exception:
return None
else:
return None
class CommonAccess(BaseDocument):
"""
ACL for common TLO content.
"""
# Basics
read = BooleanField(default=False)
write = BooleanField(default=False)
delete = BooleanField(default=False)
download = BooleanField(default=False)
description_read = BooleanField(default=False)
description_edit = BooleanField(default=False)
#Actions List
actions_read = BooleanField(default=False)
actions_add = BooleanField(default=False)
actions_edit = BooleanField(default=False)
actions_delete = BooleanField(default=False)
# Bucket List
bucketlist_read = BooleanField(default=False)
bucketlist_edit = BooleanField(default=False)
# Campaigns
campaigns_read = BooleanField(default=False)
campaigns_add = BooleanField(default=False)
campaigns_edit = BooleanField(default=False)
campaigns_delete = BooleanField(default=False)
# Comments
comments_read = BooleanField(default=False)
comments_add = BooleanField(default=False)
comments_edit = BooleanField(default=False)
comments_delete = BooleanField(default=False)
# Locations
locations_read = BooleanField(default=False)
locations_add = BooleanField(default=False)
locations_edit = BooleanField(default=False)
locations_delete = BooleanField(default=False)
# Objects
objects_read = BooleanField(default=False)
objects_add = BooleanField(default=False)
objects_edit = BooleanField(default=False)
objects_delete = BooleanField(default=False)
# Relationships
relationships_read = BooleanField(default=False)
relationships_add = BooleanField(default=False)
relationships_edit = BooleanField(default=False)
relationships_delete = BooleanField(default=False)
# Releasability
releasability_read = BooleanField(default=False)
releasability_add = BooleanField(default=False)
releasability_delete = BooleanField(default=False)
# Screenshots
screenshots_read = BooleanField(default=False)
screenshots_add = BooleanField(default=False)
screenshots_delete = BooleanField(default=False)
# Sectors
sectors_read = BooleanField(default=False)
sectors_edit = BooleanField(default=False)
# Services
services_read = BooleanField(default=False)
services_execute = BooleanField(default=False)
# Sources
sources_read = BooleanField(default=False)
sources_add = BooleanField(default=False)
sources_edit = BooleanField(default=False)
sources_delete = BooleanField(default=False)
# Status
status_read = BooleanField(default=False)
status_edit = BooleanField(default=False)
# Tickets
tickets_read = BooleanField(default=False)
tickets_add = BooleanField(default=False)
tickets_edit = BooleanField(default=False)
tickets_delete = BooleanField(default=False)
def merge(self, arg_dict=None, overwrite=False, **kwargs):
"""
Merge attributes into self.
If arg_dict is supplied, it should be either a dictionary or
another object that can be iterated over like a dictionary's
iteritems (e.g., a list of two-tuples).
If arg_dict is not supplied, attributes can also be defined with
named keyword arguments; attributes supplied as keyword arguments
will be ignored if arg_dict is not None.
If overwrite is True, any attributes passed to merge will be
assigned to the object, regardless of whether those attributes
already exist. If overwrite is False, pre-existing attributes
will be preserved.
"""
if not arg_dict:
arg_dict = kwargs
if isinstance(arg_dict, dict):
iterator = arg_dict.iteritems()
else:
iterator = arg_dict
if overwrite:
for k,v in iterator:
if k != '_id' and k != 'schema_version':
self.__setattr__(k, v)
else:
for k,v in iterator:
check = getattr(self, k, None)
if not check:
self.__setattr__(k, v)
elif hasattr(self, '_meta') and 'duplicate_attrs' in self._meta:
self._meta['duplicate_attrs'].append((k,v))
# this is a duplicate of the function in data_tools to prevent
# circular imports. long term the one in data_tools might go
# away as most json conversion will happen using .to_json()
# on the object.
def json_handler(obj):
"""
Handles converting datetimes and Mongo ObjectIds to string.
Usage: json.dumps(..., default=json_handler)
"""
if isinstance(obj, datetime.datetime):
return datetime.datetime.strftime(obj, settings.PY_DATETIME_FORMAT)
elif isinstance(obj, ObjectId):
return str(obj)
def create_embedded_source(name, source_instance=None, date=None,
reference='', method='', tlp=None,
needs_tlp=True, analyst=None):
"""
Create an EmbeddedSource object. If source_instance is provided it will be
used, otherwise date, reference, and method will be used.
:param name: The name of the source.
:type name: str
:param source_instance: An instance of this source.
:type source_instance:
:class:`crits.core.crits_mongoengine.EmbeddedSource.SourceInstance`
:param date: The date for the source instance.
:type date: datetime.datetime
:param method: The method for this source instance.
:type method: str
:param reference: The reference for this source instance.
:type reference: str
:param tlp: The TLP for this source instance.
:type tlp: str
:param needs_tlp: If this source needs a TLP (object sources don't yet).
:type needs_tlp: bool
:param analyst: The user creating this embedded source.
:type analyst: str
:returns: None, :class:`crits.core.crits_mongoengine.EmbeddedSource`
"""
if tlp not in ('white', 'green', 'amber', 'red', None):
return None
if isinstance(name, basestring):
s = EmbeddedSource()
s.name = name
if isinstance(source_instance, EmbeddedSource.SourceInstance):
s.instances = [source_instance]
else:
if not date:
date = datetime.datetime.now()
i = EmbeddedSource.SourceInstance()
i.date = date
i.reference = reference
i.method = method
if needs_tlp:
if not tlp:
return None
i.tlp = tlp
i.analyst = analyst
s.instances = [i]
return s
else:
return None
|
Magicked/crits
|
crits/core/crits_mongoengine.py
|
Python
|
mit
| 105,301
|
[
"Amber"
] |
00c9f59919acdfaf5237c3258cf18795c4370e8c8c876f10d1f2205a7ae47254
|
#!/usr/bin/env python
# Copyright (C) 2011-2012, 2014, 2016 The PISM Authors
# script to generate figure: results from SeaRISE experiments
# usage: if UAFX_G_D3_C?_??.nc are result NetCDF files then do
# $ slr_show.py -m UAFX
# try different netCDF modules
try:
from netCDF4 import Dataset as CDF
except:
print "netCDF4 is not installed!"
sys.exit(1)
from numpy import zeros
import pylab as plt
from optparse import OptionParser
parser = OptionParser()
parser.usage = "usage: %prog [options]"
parser.description = "A script for PISM output files to show time series plots using pylab."
parser.add_option("-a", dest="t_a", type="int",
help="start year, in years since 2004, default = 0", default=0)
parser.add_option("-e", dest="t_e", type="int",
help="end year, in years since 2004, default = 500", default=500)
parser.add_option("-m", "--model", dest="model",
help="choose experiment, default UAF1", default="UAF1")
(options, args) = parser.parse_args()
model = options.model
t_a = options.t_a
t_e = options.t_e
# first name in this list is CONTROL
NCNAMES = [model + "_G_D3_C1_E0.nc", model + "_G_D3_C2_E0.nc", model + "_G_D3_C3_E0.nc", model + "_G_D3_C4_E0.nc", model + "_G_D3_C1_S1.nc", model + "_G_D3_C1_S2.nc", model + "_G_D3_C1_S3.nc", model + "_G_D3_C1_M1.nc", model + "_G_D3_C1_M2.nc", model + "_G_D3_C1_M3.nc", model + "_G_D3_C1_T1.nc"]
# labels
labels = ["AR4 A1B", "AR4 A1B 1.5x", "AR4 A1B 2x", "2x basal sliding", "2.5x basal sliding", "3x basal sliding", "2 m/a bmr", "20 m/a bmr", "200 m/a bmr", "AR4 A1B + 2x sliding"]
# line colors
colors = ['#984EA3', # violet
'#984EA3', # violet
'#984EA3', # violet
'#FF7F00', # orange
'#FF7F00', # orange
'#FF7F00', # orange
'#377EB8', # light blue
'#377EB8', # light blue
'#377EB8', # light blue
'#4DAF4A'] # green
dashes = ['-', '--', '-.', '-', '--', '-.', '-', '--', '-.', '-']
print "control run name is " + NCNAMES[0]
n = len(NCNAMES)
nc0 = CDF(NCNAMES[0], 'r')
try:
t_units = nc0.variables['tseries'].units
t = nc0.variables['tseries'][t_a:t_e]
except:
t_units = nc0.variables['time'].units
t = nc0.variables['time'][t_a:t_e]
nc0.close()
# convert to years if t is in seconds
if (t_units.split()[0] == ('seconds' or 's')):
t /= 3.15569259747e7
volume_glacierized = zeros((len(t), n))
ivolshift = zeros((len(t), n - 1))
for j in range(n):
nc = CDF(NCNAMES[j], 'r')
volume_glacierized[:, j] = nc.variables['volume_glacierized'][t_a:t_e]
nc.close()
for j in range(n - 1):
ivolshift[:, j] = volume_glacierized[:, j + 1] - volume_glacierized[:, 0]
# "2,850,000 km3 of ice were to melt, global sea levels would rise 7.2 m"
scale = 7.2 / 2.850e6
# screen plot with high contrast
fig = plt.figure()
ax = fig.add_subplot(111, axisbg='0.15')
for j in range(n - 1):
ax.plot(t, -(ivolshift[:, j] / 1.0e9) * scale, dashes[j], color=colors[j], linewidth=3)
ax.set_xlabel('years from 2004')
ax.set_ylabel('sea level rise relative to control (m)')
ax.legend(labels, loc='upper left')
ax.grid(True, color='w')
plt.show()
# line colors
colors = ['#984EA3', # violet
'#984EA3', # violet
'#984EA3', # violet
'#FF7F00', # orange
'#FF7F00', # orange
'#FF7F00', # orange
'#084594', # dark blue
'#084594', # dark blue
'#084594', # dark blue
'#4DAF4A'] # green
# print plot with white background
fig = plt.figure()
ax = fig.add_subplot(111)
for j in range(n - 1):
ax.plot(t, -(ivolshift[:, j] / 1.0e9) * scale, dashes[j], color=colors[j], linewidth=2)
ax.set_xlabel('years from 2004')
ax.set_ylabel('sea level rise relative to control (m)')
ax.legend(labels, loc='upper left')
ax.grid(True)
plt.savefig(model + '_slr.pdf')
|
citibeth/twoway
|
pism/std-greenland/slr_show.py
|
Python
|
gpl-3.0
| 3,893
|
[
"NetCDF"
] |
b0247e0d85c54625d57db3c0ac5ea9936e712e5817225181617436469fd143cf
|
from topo.pattern import Gabor, SineGrating, Gaussian
import __main__
import numpy
import pylab
import matplotlib
from numpy import array, size, mat, shape, ones, arange
from topo import numbergen
#from topo.base.functionfamily import IdentityTF
from topo.transferfn.misc import PatternCombine
from topo.transferfn.misc import AttributeTrackingTF
from topo.transferfn.misc import HalfRectify
from topo.transferfn import PiecewiseLinear, DivisiveNormalizeL1, IdentityTF, ActivityAveragingTF, Sigmoid
from topo.base.cf import CFSheet
from topo.projection import CFProjection, SharedWeightCFProjection
import topo
from topo.sheet import GeneratorSheet
from topo.base.boundingregion import BoundingBox
from topo.pattern.image import FileImage
import contrib.jacommands
import contrib.dd
from matplotlib.ticker import MaxNLocator
from contrib.JanA.noiseEstimation import signal_power_test
from helper import *
from contrib.JanA.dataimport import sortOutLoading
#dd = contrib.dd.DB()
#dd.load_db("modelfitDB.dat")
save_fig=__main__.__dict__.get('SaveFig',False)
save_fig_directory='./'
def release_fig(filename=None):
import pylab
if save_fig:
pylab.savefig(save_fig_directory+filename)
class ModelFit():
weigths = []
retina_diameter=1.2
density=24
epochs = 500
learning_rate = 0.00001
DC = 0
reliable_indecies=[]
momentum=0.0
num_of_units=0
def init(self):
self.reliable_indecies = ones(self.num_of_units)
def calculateModelOutput(self,inputs,index):
return self.weigths*inputs[index].T+self.DC.T
def trainModel(self,inputs,activities,validation_inputs,validation_activities,stop=None):
if stop==None:
stop = numpy.ones(numpy.shape(activities[0])).copy()*1000000000000000000000
delta=[]
self.DC=numpy.array(activities[0]).copy()*0.0
#self.weigths=numpy.mat(numpy.zeros((size(activities,1),size(inputs[0],1))))
self.weigths=numpy.mat(numpy.identity(size(inputs[0],1)))
best_weights = self.weigths.copy()
mean_error=numpy.mat(numpy.zeros(shape(activities[0].T)))
first_val_error=0
val_err=0
min_err=1000000000
min_val_err=1000000000000
min_val_err_array = ones(self.num_of_units)*10000000000000000000
first_val_err_array = []
err_hist = []
val_err_hist = []
val_pve = []
validation_error=numpy.mat(numpy.zeros(shape(activities[0].T)))
variance=numpy.mat(numpy.zeros(shape(activities[0].T)))
for i in xrange(0,len(validation_inputs)):
error = ((validation_activities[i].T - self.weigths*validation_inputs[i].T - self.DC.T))
validation_error=validation_error+numpy.power(error,2)
variance = variance + numpy.power((validation_activities[i] - numpy.mean(validation_activities,axis=0)),2).T
print "INITIAL VALIDATION ERROR", numpy.sum(validation_error)/len(validation_inputs)/len(validation_error)
print "INITIAL FEV on validation set", numpy.mean(1.0-numpy.divide(validation_error,variance))
for k in xrange(0,self.epochs):
stop_learning = (stop>k)*1.0
sl = numpy.mat(stop_learning).T
for i in xrange(1, size(inputs[0],1)):
sl = numpy.concatenate((sl,numpy.mat(stop_learning).T),axis=1)
mean_error=numpy.mat(numpy.zeros(shape(activities[0].T)))
validation_error=numpy.mat(numpy.zeros((len(activities[0].T),1)))
tmp_weigths=numpy.mat(numpy.zeros((size(activities,1),size(inputs[0],1))))
for i in xrange(0,len(inputs)):
error = ((activities[i].T - self.weigths*inputs[i].T - self.DC.T))
tmp_weigths = tmp_weigths + (error * inputs[i])
mean_error=mean_error+numpy.power(error,2)
err_hist.append(mean_error)
if k == 0:
delta = tmp_weigths/numpy.sqrt(numpy.sum(numpy.power(tmp_weigths,2)))
else:
delta = self.momentum*delta + (1.0-self.momentum)*tmp_weigths/numpy.sqrt(numpy.sum(numpy.power(tmp_weigths,2)))
delta = numpy.multiply(delta/numpy.sqrt(numpy.sum(numpy.power(delta,2))),sl)
self.weigths = self.weigths + self.learning_rate*delta
err = numpy.sum(mean_error)/len(inputs)/len(mean_error)
for i in xrange(0,len(validation_inputs)):
error = ((validation_activities[i].T - self.weigths*validation_inputs[i].T - self.DC.T))
validation_error=validation_error+numpy.power(error,2)
val_err_hist.append(validation_error)
val_err = numpy.sum(validation_error)/len(validation_inputs)/len(validation_error)
if val_err < min_val_err:
min_val_err = val_err
if err < min_err:
min_err = err
for i in xrange(0,len(min_val_err_array)):
if min_val_err_array[i] > validation_error[i,0]:
min_val_err_array[i] = validation_error[i,0]
#!!!!!!!!!!!!!
best_weights[i,:] = self.weigths[i,:]
if k == 0:
first_val_error=val_err
first_val_err_array = min_val_err_array.copy()
print (k,err,val_err)
print "First val error:" + str(first_val_error) + "\n Minimum val error:" + str(min_val_err) + "\n Last val error:" + str(val_err) + "\nImprovement:" + str((first_val_error - min_val_err)/first_val_error * 100) + "%" #+ "\nBest cell by cell error:" + str(numpy.sum(min_val_err_array)/len(min_val_err_array)/len(validation_inputs)) + "\nBest cell by cell error improvement:" + str((first_val_err_array - min_val_err_array)/len(validation_inputs)/first_val_err_array)
# plot error evolution
a = err_hist[0].T/self.epochs
b = val_err_hist[0].T/self.epochs
for i in xrange(1,len(err_hist)):
a = numpy.concatenate((a,err_hist[i].T/self.epochs))
b = numpy.concatenate((b,val_err_hist[i].T/self.epochs))
a = numpy.mat(a).T
b = numpy.mat(b).T
pylab.figure()
for i in xrange(0,size(activities,1)):
pylab.hold(True)
pylab.plot(numpy.array(a[i])[0])
pylab.figure()
for i in xrange(0,size(activities,1)):
pylab.hold(True)
pylab.plot(numpy.array(b[i])[0])
# recalculate a new model DC based on what we have learned
self.DC*=0.0
# set weights to the minimum ones
self.weigths = best_weights
#for i in xrange(0,len(validation_inputs)):
# self.DC+=(validation_activities[i].T - self.weigths*validation_inputs[i].T).T
#self.DC = self.DC/len(validation_inputs)
#!!!!!!!!!
validation_error=numpy.mat(numpy.zeros(shape(activities[0].T)))
variance=numpy.mat(numpy.zeros(shape(activities[0].T)))
for i in xrange(0,len(validation_inputs)):
error = ((validation_activities[i].T - self.weigths*validation_inputs[i].T - self.DC.T))
validation_error=validation_error+numpy.power(error,2)
variance = variance + numpy.power((validation_activities[i].T - numpy.mean(validation_activities,axis=0).T),2)
print "FINAL VALIDATION ERROR", numpy.sum(validation_error)/len(validation_inputs)/len(validation_error)
print "FINAL FEV on validation set", numpy.mean(1-numpy.divide(validation_error,variance))
return (min_val_err,numpy.argmin(b.T,axis=0),min_val_err_array/len(validation_inputs))
def returnPredictedActivities(self,inputs):
for i in xrange(0,len(inputs)):
if i == 0:
modelActivities = self.calculateModelOutput(inputs,i)
else:
a = self.calculateModelOutput(inputs,i)
modelActivities = numpy.concatenate((modelActivities,a),axis=1)
return numpy.mat(modelActivities).T
def calculateReliabilities(self,inputs,activities,top_percentage):
err=numpy.zeros(self.num_of_units)
modelResponses=[]
modelActivities=[]
for i in xrange(0,len(inputs)):
if i == 0:
modelActivities = self.calculateModelOutput(inputs,i)
else:
a = self.calculateModelOutput(inputs,i)
modelActivities = numpy.concatenate((modelActivities,a),axis=1)
modelActivities = numpy.mat(modelActivities)
#for i in xrange(0,self.num_of_units):
# corr_coef[i] = numpy.corrcoef(modelActivities[i], activities.T[i])[0][1]
#print numpy.shape(modelActivities)
#print numpy.shape(activities)
for i in xrange(0,self.num_of_units):
err[i] = numpy.sum(numpy.power(modelActivities[i]- activities.T[i],2))
t = []
import operator
for i in xrange(0,self.num_of_units):
t.append((i,err[i]))
#t=sorted(t, key=operator.itemgetter(1))
self.reliable_indecies*=0
for i in xrange(0,self.num_of_units*top_percentage/100):
self.reliable_indecies[t[i][0]] = 1
#print t[self.num_of_units-1-i][0]
#pylab.figure()
#pylab.show._needmain=False
#pylab.subplot(3,1,1)
#pylab.plot(numpy.array(activities.T[t[self.num_of_units-1-i][0]][0].T))
#pylab.plot(numpy.array(modelActivities[t[self.num_of_units-1-i][0]][0].T))
#pylab.show()
def testModel(self,inputs,activities,target_inputs=None):
modelActivities=[]
modelResponses=[]
error = 0
if target_inputs == None:
target_inputs = [a for a in xrange(0,len(inputs))]
for index in range(len(inputs)):
modelActivities.append(self.calculateModelOutput(inputs,index))
tmp = []
correct = 0
for i in target_inputs:
tmp = []
for j in target_inputs:
tmp.append(numpy.sum(numpy.power(numpy.multiply(activities[i].T-modelActivities[j],numpy.mat(self.reliable_indecies).T),2)))
#tmp.append(numpy.sum(numpy.abs( numpy.multiply(activities[i].T - modelActivities[j],numpy.mat(self.reliable_indecies).T)) ))
#tmp.append(numpy.corrcoef(modelActivities[j].T, activities[i])[0][1])
x = numpy.argmin(array(tmp))
#x = numpy.argmax(array(tmp))
x = target_inputs[x]
#print (x,i)
if (i % 1) ==1:
pylab.show._needmain=False
pylab.figure()
pylab.subplot(3,1,1)
pylab.plot(numpy.array(activities[i])[0],'o',label='traget')
pylab.plot(numpy.array(modelActivities[x].T)[0], 'o',label='predicted model')
pylab.plot(numpy.array(modelActivities[i].T)[0], 'o',label='correct model')
pylab.legend()
#pylab.show()
if x == i:
correct+=1.0
print correct, " correct out of ", len(target_inputs)
print "Percentage of correct answers:" ,correct/len(target_inputs)*100, "%"
def testModelBiased(self,inputs,activities,t):
modelActivities=[]
modelResponses=[]
error = 0
(num_inputs,act_len)= numpy.shape(activities)
print (num_inputs,act_len)
for index in range(num_inputs):
modelActivities.append(self.calculateModelOutput(inputs,index))
m = numpy.array(numpy.mean(activities,0))[0]
tmp = []
correct = 0
for i in xrange(0,num_inputs):
tmp = []
significant_neurons=numpy.zeros(numpy.shape(activities[0]))
for z in xrange(0,act_len):
if activities[i,z] >= m[z]*t: significant_neurons[0,z]=1.0
for j in xrange(0,num_inputs):
tmp.append(numpy.sum(numpy.power(numpy.multiply(numpy.multiply(activities[i].T-modelActivities[j],numpy.mat(self.reliable_indecies)),numpy.mat(significant_neurons).T),2))/ numpy.sum(significant_neurons))
x = numpy.argmin(array(tmp))
if x == i: correct+=1.0
print correct, " correct out of ", num_inputs
print "Percentage of correct answers:" ,correct/num_inputs*100, "%"
class MotionModelFit(ModelFit):
real_time=True
def init(self):
self.reliable_indecies = ones(self.num_of_units)
for freq in [1.0,2.0,4.0,8.0]:
for xpos in xrange(0,int(freq)):
for ypos in xrange(0,int(freq)):
x=xpos*(self.retina_diameter/freq)-self.retina_diameter/2 + self.retina_diameter/freq/2
y=ypos*(self.retina_diameter/freq)-self.retina_diameter/2 + self.retina_diameter/freq/2
for orient in xrange(0,8):
g1 = []
g2 = []
t = 2
sigma = 1.0
for speed in [3,6,30]:
for p in xrange(0,speed):
#temporal_gauss = numpy.exp(-(p-(t+1))*(p-(t+1)) / 2*sigma)
temporal_gauss=1.0
g1.append(temporal_gauss*Gabor(bounds=BoundingBox(radius=self.retina_diameter/2),frequency=freq,x=x,y=y,xdensity=self.density,ydensity=self.density,size=1/freq,orientation=2*numpy.pi/8*orient,phase=p*(numpy.pi/(speed)))())
g2.append(temporal_gauss*Gabor(bounds=BoundingBox(radius=self.retina_diameter/2),frequency=freq,x=x,y=y,xdensity=self.density,ydensity=self.density,size=1/freq,orientation=2*numpy.pi/8*orient,phase=p*(numpy.pi/(speed))+numpy.pi/2)())
self.filters.append((g1,g2))
def calculateModelResponse(self,inputs,index):
if self.real_time:
res = []
for (gabor1,gabor2) in self.filters:
r1 = 0
r2 = 0
r=0
l = len(gabor1)
for i in xrange(0,numpy.min([index+1,l])):
r1 += numpy.sum(numpy.multiply(gabor1[l-1-i],inputs[index-i]))
r2 += numpy.sum(numpy.multiply(gabor2[l-1-i],inputs[index-i]))
res.append(numpy.sqrt(r1*r1+r2*r2))
#res.append(r)
#numpy.max([res.append(r1),res.append(r2)])
else:
res = []
for (gabor1,gabor2) in self.filters:
r1 = 0
r2 = 0
r=0
li = len(inputs[index])
l = len(gabor1)
for i in xrange(0,li):
r1 += numpy.sum(numpy.multiply(gabor1[l-1-numpy.mod(i,l)],inputs[index][li-1-i]))
r2 += numpy.sum(numpy.multiply(gabor2[l-1-numpy.mod(i,l)],inputs[index][li-1-i]))
#r += numpy.sqrt(r1*r1+r2*r2)
res.append(numpy.sqrt(r1*r1+r2*r2))
#res.append(r)
return numpy.mat(res)
class BasicBPModelFit(ModelFit):
def init(self):
self.reliable_indecies = ones(self.num_of_units)
import libfann
self.ann = libfann.neural_net()
def calculateModelOutput(self,inputs,index):
import libfann
return numpy.mat(self.ann.run(numpy.array(inputs[index].T))).T
def trainModel(self,inputs,activities,validation_inputs,validation_activities):
import libfann
delta=[]
connection_rate = 1.0
num_input = len(inputs[0])
num_neurons_hidden = numpy.size(activities,1)
num_output = numpy.size(activities,1)
print (num_input,num_neurons_hidden,num_output)
desired_error = 0.000001
max_iterations = 1000
iterations_between_reports = 1
self.ann.create_sparse_array(connection_rate, (num_input, num_neurons_hidden, num_output))
self.ann.set_learning_rate(self.learning_rate)
self.ann.set_activation_function_output(libfann.SIGMOID_SYMMETRIC)
train_data = libfann.training_data()
test_data = libfann.training_data()
print shape(inputs)
print shape(activities)
train_data.set_train_dataset(numpy.array(inputs),numpy.array(activities))
test_data.set_train_dataset(numpy.array(validation_inputs),numpy.array(validation_activities))
self.ann.reset_MSE()
self.ann.test_data(test_data)
print "MSE error on test data: %f" % self.ann.get_MSE()
self.ann.reset_MSE()
self.ann.test_data(train_data)
print "MSE error on train data: %f" % self.ann.get_MSE()
self.ann.reset_MSE()
for i in range(0,self.epochs):
e = self.ann.train_epoch(train_data)
self.ann.reset_MSE()
self.ann.test_data(test_data)
print "%d > MSE error on train/test data: %f / %f" % (i,e,self.ann.get_MSE())
self.ann.reset_MSE()
self.ann.test_data(test_data)
print "MSE error on test data: %f" % self.ann.get_MSE()
self.ann.reset_MSE()
def showMotionEnergyPatterns():
topo.sim['Retina']=GeneratorSheet(nominal_density=24.0,
input_generator=SineGrating(),
period=1.0, phase=0.01,
nominal_bounds=BoundingBox(radius=0.5))
mf = MotionModelFit()
mf.retina_diameter = 1.0
mf.density = topo.sim["Retina"].nominal_density
mf.init()
for i in xrange(0,8):
g1,g2 = mf.filters[i]
pylab.figure()
for g in g1:
pylab.imshow(g)
pylab.show._needmain=False
pylab.show()
def calculateReceptiveField(RFs,weights):
RF = numpy.zeros(shape(RFs[0][0]))
i = 0
for (rf1,rf2) in RFs:
RF += weights.T[i,0]*rf1
#RF += weights.T[i,0]*rf2
i+=1
return RF
def generate_pyramid_model(num_or,freqs,num_phase,size):
filters=[]
for freq in freqs:
for orient in xrange(0,num_or):
g1 = Gabor(bounds=BoundingBox(radius=0.5),frequency=1.0,x=0.0,y=0.0,xdensity=size/freq,ydensity=size/freq,size=0.3,orientation=numpy.pi/8*orient,phase=numpy.pi)
g2 = Gabor(bounds=BoundingBox(radius=0.5),frequency=1.0,x=0.0,y=0.0,xdensity=size/freq,ydensity=size/freq,size=0.3,orientation=numpy.pi/8*orient,phase=0)
filters.append((g1(),g2()))
#for p in xrange(0,num_phase):
# g = Gabor(bounds=BoundingBox(radius=0.5),frequency=1.0,x=0.0,y=0.0,xdensity=size/freq,ydensity=size/freq,size=0.3,orientation=numpy.pi/8*orient,phase=p*2*numpy.pi/num_phase)
# filters.append(g())
return filters
def apply_filters(inputs, filters, spacing):
(sizex,sizey) = numpy.shape(inputs[0])
out = []
for i in inputs:
o = []
for f in filters:
(f1,f2) = f
(s,tresh) = numpy.shape(f1)
step = int(s*spacing)
x=0
y=0
while (x*step + s) < sizex:
while (y*step + s) < sizey:
a = numpy.sum(numpy.sum(numpy.multiply(f1,i[x*step:x*step+s,y*step:y*step+s])))
b = numpy.sum(numpy.sum(numpy.multiply(f2,i[x*step:x*step+s,y*step:y*step+s])))
o.append(numpy.sqrt(a*a+b*b))
y+=step
x+=step
print len(o)
out.append(numpy.array(o))
return numpy.array(out)
def clump_low_responses(dataset,threshold):
(index,data) = dataset
avg=0
count=0
for cell in data:
for stimuli in cell:
for rep in stimuli:
for frame in rep:
avg+=frame
count+=1
avg = avg/count
for index1, cell in enumerate(data):
for index2, stimulus in enumerate(cell):
for index3, repetition in enumerate(stimulus):
for index4, frame in enumerate(repetition):
if frame>=avg*(1.0+threshold):
repetition[index4]=frame
else:
repetition[index4]=0
return (index,data)
def runModelFit():
density=__main__.__dict__.get('density', 20)
#dataset = loadRandomizedDataSet("Flogl/JAN1/20090707__retinotopy_region1_stationary_testing01_1rep_125stim_ALL",6,15,125,60)
dataset = loadSimpleDataSet("Flogl/DataNov2009/(20090925_14_36_01)-_retinotopy_region2_sequence_50cells_2700images_on_&_off_response",2700,50)
#this dataset has images numbered from 1
(index,data) = dataset
index+=1
dataset=(index,data)
#print shape(dataset[1])
#dataset = clump_low_responses(dataset,__main__.__dict__.get('ClumpMag',0.0))
print shape(dataset[1])
dataset = averageRangeFrames(dataset,0,1)
print shape(dataset[1])
dataset = averageRepetitions(dataset)
print shape(dataset[1])
(testing_data_set,dataset) = splitDataset(dataset,0.015)
(validation_data_set,dataset) = splitDataset(dataset,0.1)
training_set = generateTrainingSet(dataset)
training_inputs=generateInputs(dataset,"/home/antolikjan/topographica/topographica/Flogl/DataOct2009","/20090925_image_list_used/image_%04d.tif",density,1.8,offset=1000)
validation_set = generateTrainingSet(validation_data_set)
validation_inputs=generateInputs(validation_data_set,"/home/antolikjan/topographica/topographica/Flogl/DataOct2009","/20090925_image_list_used/image_%04d.tif",density,1.8,offset=1000)
testing_set = generateTrainingSet(testing_data_set)
testing_inputs=generateInputs(testing_data_set,"/home/antolikjan/topographica/topographica/Flogl/DataOct2009","/20090925_image_list_used/image_%04d.tif",density,1.8,offset=1000)
#print numpy.shape(training_inputs[0])
#compute_spike_triggered_average_rf(training_inputs,training_set,density)
#pylab.figure()
#pylab.imshow(training_inputs[0])
#pylab.show()
#return
if __main__.__dict__.get('NormalizeInputs',True):
avgRF = compute_average_input(training_inputs)
training_inputs = normalize_image_inputs(training_inputs,avgRF)
validation_inputs = normalize_image_inputs(validation_inputs,avgRF)
testing_inputs = normalize_image_inputs(testing_inputs,avgRF)
(x,y)= numpy.shape(training_inputs[0])
training_inputs = cut_out_images_set(training_inputs,int(y*0.4),(int(x*0.1),int(y*0.4)))
validation_inputs = cut_out_images_set(validation_inputs,int(y*0.4),(int(x*0.1),int(y*0.4)))
testing_inputs = cut_out_images_set(testing_inputs,int(y*0.4),(int(x*0.1),int(y*0.4)))
#training_inputs = cut_out_images_set(training_inputs,int(density*0.33),(0,int(density*0.33)))
#validation_inputs = cut_out_images_set(validation_inputs,int(density*0.33),(0,int(density*0.33)))
#testing_inputs = cut_out_images_set(testing_inputs,int(density*0.33),(0,int(density*0.33)))
sizex,sizey=numpy.shape(training_inputs[0])
print sizex,sizey
if __main__.__dict__.get('Gabor',True):
fil = generate_pyramid_model(__main__.__dict__.get('num_or',8),__main__.__dict__.get('freq',[1,2,4]),__main__.__dict__.get('num_phase',8),numpy.min(numpy.shape(training_inputs[0])))
print len(fil)
training_inputs = apply_filters(training_inputs, fil, __main__.__dict__.get('spacing',0.1))
testing_inputs = apply_filters(testing_inputs, fil, __main__.__dict__.get('spacing',0.1))
validation_inputs = apply_filters(validation_inputs, fil, __main__.__dict__.get('spacing',0.1))
else:
training_inputs = generate_raw_training_set(training_inputs)
testing_inputs = generate_raw_training_set(testing_inputs)
validation_inputs = generate_raw_training_set(validation_inputs)
if __main__.__dict__.get('NormalizeActivities',True):
(a,v) = compute_average_min_max(numpy.concatenate((training_set,validation_set),axis=0))
training_set = normalize_data_set(training_set,a,v)
validation_set = normalize_data_set(validation_set,a,v)
testing_set = normalize_data_set(testing_set,a,v)
print shape(training_set)
print shape(training_inputs)
#mf = BasicBPModelFit()
#mf.retina_diameter = 1.2
mf = ModelFit()
mf.density = density
mf.learning_rate = __main__.__dict__.get('lr',0.1)
mf.epochs=__main__.__dict__.get('epochs',1000)
mf.num_of_units = 50
mf.init()
pylab.hist(training_set.flatten())
(err,stop,min_errors) = mf.trainModel(mat(training_inputs),numpy.mat(training_set),mat(validation_inputs),numpy.mat(validation_set))
print "\nStop criterions", stop
print "\nNon-zero stop criterions",numpy.nonzero(stop)
print "\nMinimal errors per cell",numpy.nonzero(min_errors)
print "Model test with all neurons"
mf.testModel(mat(testing_inputs),numpy.mat(testing_set))
mf.testModelBiased(mat(testing_inputs),numpy.mat(testing_set),0.1)
mf.testModelBiased(mat(testing_inputs),numpy.mat(testing_set),0.3)
mf.testModelBiased(mat(testing_inputs),numpy.mat(testing_set),0.6)
mf.testModelBiased(mat(testing_inputs),numpy.mat(testing_set),1.0)
mf.testModelBiased(mat(testing_inputs),numpy.mat(testing_set),2.0)
mf.testModelBiased(mat(testing_inputs),numpy.mat(testing_set),3.0)
mf.testModelBiased(mat(testing_inputs),numpy.mat(testing_set),4.0)
print "Model test with double weights"
mf.weigths*=2.0
mf.testModel(mat(testing_inputs),numpy.mat(testing_set))
mf.weigths/=2.0
print "Model test on validation inputs"
mf.testModel(mat(validation_inputs),numpy.mat(validation_set))
#print "Model test on training inputs"
#mf.testModel(mat(training_inputs),mat(training_set))
mf.calculateReliabilities(mat(testing_inputs),numpy.mat(testing_set),95)
print "95: " , mf.reliable_indecies
mf.testModel(mat(testing_inputs),numpy.mat(testing_set))
mf.calculateReliabilities(mat(testing_inputs),numpy.mat(testing_set),90)
print "90: " , mf.reliable_indecies
mf.testModel(mat(testing_inputs),numpy.mat(testing_set))
mf.calculateReliabilities(mat(testing_inputs),numpy.mat(testing_set),50)
print "50: " , mf.reliable_indecies
mf.testModel(mat(testing_inputs),numpy.mat(testing_set))
mf.calculateReliabilities(mat(testing_inputs),numpy.mat(testing_set),40)
print "40: " , mf.reliable_indecies
mf.testModel(mat(testing_inputs),numpy.mat(testing_set))
mf.calculateReliabilities(mat(testing_inputs),numpy.mat(testing_set),30)
print "30: " , mf.reliable_indecies
mf.testModel(mat(testing_inputs),numpy.mat(testing_set))
mf.calculateReliabilities(mat(testing_inputs),numpy.mat(testing_set),20)
print "20: " , mf.reliable_indecies
mf.testModel(mat(testing_inputs),numpy.mat(testing_set))
mf.reliable_indecies=(stop>=100.0)*1.0
print mf.reliable_indecies
mf.testModel(mat(testing_inputs),numpy.mat(testing_set))
lookForCorrelations(mf, numpy.mat(training_set),numpy.mat(training_inputs))
lookForCorrelations(mf, numpy.mat(validation_set),numpy.mat(validation_inputs))
pylab.show()
return (mf,mat(testing_inputs),mat(testing_set))
def showRF(mf,indexes,x,y):
pylab.figure()
pylab.show._needmain=False
#pylab.subplot(9,7,1)
print numpy.min(numpy.min(mf.weigths))
print numpy.max(numpy.max(mf.weigths))
for i in indexes:
pylab.subplot(9,7,i+1)
w = mf.weigths[i].reshape(x,y)
pylab.imshow(w,vmin=numpy.min(mf.weigths[i]),vmax=numpy.max(mf.weigths[i]),cmap=pylab.cm.RdBu)
pylab.show()
def analyseDataSet(data_set):
# for cell in dataset:
for z in xrange(0,10):
pylab.figure()
pylab.plot(numpy.arange(0,num_im,1),a[z],'bo')
pylab.show()
def set_fann_dataset(td,inputs,outputs):
import os
f = open("./tmp.txt",'w')
f.write(str(len(inputs))+" "+str(size(inputs[0],1))+" "+ str(size(outputs,1)) + "\n")
for i in range(len(inputs)):
for j in range(size(inputs[0],1)):
f.write(str(inputs[i][0][j]))
f.write('\n')
for j in range(size(outputs,1)):
f.write(str(outputs[i,j]))
f.write('\n')
f.close()
td.read_train_from_file("./tmp.txt")
def regulerized_inverse_rf(inputs,activities,sizex,sizey,alpha,validation_inputs,validation_activities,dd,display=False):
p = len(inputs[0])
np = len(activities[0])
inputs = numpy.mat(inputs)
activities = numpy.mat(activities)
validation_inputs = numpy.mat(validation_inputs)
validation_activities = numpy.mat(validation_activities)
S = numpy.mat(inputs).copy()
for x in xrange(0,sizex):
for y in xrange(0,sizey):
norm = numpy.mat(numpy.zeros((sizex,sizey)))
norm[x,y]=4
if x > 0:
norm[x-1,y]=-1
if x < sizex-1:
norm[x+1,y]=-1
if y > 0:
norm[x,y-1]=-1
if y < sizey-1:
norm[x,y+1]=-1
S = numpy.concatenate((S,alpha*norm.flatten()),axis=0)
activities_padded = numpy.concatenate((activities,numpy.mat(numpy.zeros((sizey*sizex,np)))),axis=0)
Z = numpy.linalg.pinv(S)*activities_padded
Z=Z.T
ma = numpy.max(numpy.max(Z))
mi = numpy.min(numpy.min(Z))
m = max([abs(ma),abs(mi)])
RFs=[]
of = run_nonlinearity_detection(activities,inputs*Z.T,10,display)
predicted_activities = inputs*Z.T
validation_predicted_activities = validation_inputs*Z.T
tf_predicted_activities = apply_output_function(predicted_activities,of)
tf_validation_predicted_activities = apply_output_function(validation_predicted_activities,of)
errors = numpy.sum(numpy.power(validation_activities - validation_predicted_activities,2),axis=0)
tf_errors = numpy.sum(numpy.power(validation_activities - tf_validation_predicted_activities,2),axis=0)
mean_mat = numpy.array(numpy.mean(validation_activities,axis=1).T)[0]
corr_coef=[]
corr_coef_tf=[]
for i in xrange(0,np):
corr_coef.append(numpy.corrcoef(validation_activities[:,i].T, validation_predicted_activities[:,i].T)[0][1])
corr_coef_tf.append(numpy.corrcoef(validation_activities[:,i].T, tf_validation_predicted_activities[:,i].T)[0][1])
for i in xrange(0,np):
RFs.append(numpy.array(Z[i]).reshape(sizex,sizey))
av=[]
for i in xrange(0,np):
av.append(numpy.sqrt(numpy.sum(numpy.power(Z[i],2))))
if display:
pylab.figure()
pylab.title(str(alpha), fontsize=16)
for i in xrange(0,np):
pylab.subplot(10,11,i+1)
w = numpy.array(Z[i]).reshape(sizex,sizey)
pylab.show._needmain=False
pylab.imshow(w,vmin=-m,vmax=m,interpolation='nearest',cmap=pylab.cm.RdBu)
pylab.axis('off')
pylab.figure()
pylab.title("relationship", fontsize=16)
for i in xrange(0,np):
pylab.subplot(10,11,i+1)
pylab.plot(validation_predicted_activities.T[i],validation_activities.T[i],'ro')
pylab.figure()
pylab.title("relationship_tf", fontsize=16)
for i in xrange(0,np):
pylab.subplot(10,11,i+1)
pylab.plot(numpy.mat(tf_validation_predicted_activities).T[i],validation_activities.T[i],'ro')
pylab.figure()
pylab.title(str(alpha), fontsize=16)
for i in xrange(0,np):
pylab.subplot(10,11,i+1)
w = numpy.array(Z[i]).reshape(sizex,sizey)
pylab.show._needmain=False
m = numpy.max([abs(numpy.min(numpy.min(w))),abs(numpy.max(numpy.max(w)))])
pylab.imshow(w,vmin=-m,vmax=m,interpolation='nearest',cmap=pylab.cm.RdBu)
pylab.axis('off')
# prediction
act_var = numpy.sum(numpy.power(validation_activities-array([mean_mat,]*np).T,2),axis=0)
normalized_errors = 1-numpy.array(errors / act_var)[0]
tf_normalized_errors = 1-numpy.array(tf_errors / act_var)[0]
error = numpy.mean(errors)
normalized_error = numpy.mean(normalized_errors)
(rank,correct,tr) = performIdentification(validation_activities,validation_predicted_activities)
(tf_rank,tf_correct,tf_tr) = performIdentification(validation_activities,tf_validation_predicted_activities)
if display:
pylab.figure()
pylab.hist(av)
pylab.xlabel("rf_magnitued")
pylab.figure()
print shape(av)
print shape(normalized_errors)
pylab.plot(av,normalized_errors,'ro')
pylab.xlabel("rf_magnitued")
pylab.ylabel("normalized error")
pylab.figure()
pylab.plot(av,tf_normalized_errors,'ro')
pylab.xlabel("rf_magnitued")
pylab.ylabel("tf_normalized error")
pylab.figure()
pylab.hist(normalized_errors)
pylab.xlabel("normalized_errors")
pylab.figure()
pylab.hist(tf_normalized_errors)
pylab.xlabel("tf_normalized_errors")
pylab.figure()
pylab.hist(corr_coef)
pylab.xlabel("Correlation coefficient")
pylab.figure()
pylab.hist(corr_coef_tf)
pylab.xlabel("Correlation coefficient with transfer function")
#saving section
dd.add_data("ReversCorrelationRFs",RFs,force=True)
dd.add_data("ReversCorrelationCorrectPercentage",correct*1.0 / len(validation_inputs)* 100,force=True)
dd.add_data("ReversCorrelationTFCorrectPercentage",tf_correct*1.0 / len(validation_inputs) *100,force=True)
dd.add_data("ReversCorrelationPredictedActivities",predicted_activities,force=True)
dd.add_data("ReversCorrelationPredictedActivities+TF",tf_predicted_activities,force=True)
dd.add_data("ReversCorrelationPredictedValidationActivities",validation_predicted_activities,force=True)
dd.add_data("ReversCorrelationPredictedValidationActivities+TF",tf_validation_predicted_activities,force=True)
dd.add_data("ReversCorrelationNormalizedErrors",normalized_errors,force=True)
dd.add_data("ReversCorrelationNormalizedErrors+TF",tf_normalized_errors,force=True)
dd.add_data("ReversCorrelationCorrCoefs",corr_coef,force=True)
dd.add_data("ReversCorrelationCorrCoefs+TF",corr_coef_tf,force=True)
dd.add_data("ReversCorrelationTransferFunction",of,force=True)
dd.add_data("ReversCorrelationRFMagnitude",av,force=True)
print "Correct:", correct ," out of ", len(validation_inputs), " percentage:", correct*1.0 / len(validation_inputs)* 100 ,"%"
print "TFCorrect:", tf_correct, " out of ", len(validation_inputs), " percentage:", tf_correct*1.0 / len(validation_inputs) *100 ,"%"
print "Normalized_error:", normalized_error
return (normalized_errors,tf_normalized_errors,correct,tf_correct,RFs,predicted_activities,validation_predicted_activities,corr_coef,corr_coef_tf)
def run_nonlinearity_detection(activities,predicted_activities,num_bins=20,display=False):
(num_act,num_neurons) = numpy.shape(activities)
os = []
if display:
pylab.figure()
for i in xrange(0,num_neurons):
min_pact = numpy.min(predicted_activities[:,i])
max_pact = numpy.max(predicted_activities[:,i])
bins = numpy.arange(0,num_bins+1,1)/(num_bins*1.0)*(max_pact-min_pact) + min_pact
bins[-1]+=0.000001
ps = numpy.zeros(num_bins)
pss = numpy.zeros(num_bins)
for j in xrange(0,num_act):
bin = numpy.nonzero(bins>=predicted_activities[j,i])[0][0]-1
ps[bin]+=1
pss[bin]+=activities[j,i]
idx = numpy.nonzero(ps==0)
ps[idx]=1.0
tf = pss/ps
tf[idx]=0.0
if display:
pylab.subplot(13,13,i+1)
#pylab.plot(bins[0:-1],ps)
#pylab.plot(bins[0:-1],pss)
pylab.plot(bins[0:-1],tf)
os.append((bins,tf))
return os
def apply_output_function(activities,of):
(x,y) = numpy.shape(activities)
acts = numpy.zeros(numpy.shape(activities))
for i in xrange(0,x):
for j in xrange(0,y):
(bins,tf) = of[j]
if activities[i,j] >= numpy.max(bins):
acts[i,j] = tf[-1]
elif activities[i,j] <= numpy.min(bins):
acts[i,j] = tf[0]
else:
bin = numpy.nonzero(bins>=activities[i,j])[0][0]-1
# do linear interpolation
a = bins[bin]
b = bins[bin+1]
alpha = (activities[i,j]-a)/(b-a)
if bin!=0:
c = (tf[bin]+tf[bin-1])/2
else:
c = tf[bin]
if bin!=len(tf)-1:
d = (tf[bin]+tf[bin+1])/2
else:
d = tf[bin]
acts[i,j] = c + (d-c)* alpha
return acts
def fit_sigmoids_to_of(activities,predicted_activities,offset=True,display=True):
(num_in,num_ne) = numpy.shape(activities)
from scipy import optimize
rand =numbergen.UniformRandom(seed=513)
if display:
pylab.figure()
fitfunc = lambda p, x: (offset*p[2])+p[3] / (1 + numpy.exp(-p[0]*(x-p[1]))) # Target function
errfunc = lambda p,x, y: numpy.mean(numpy.power(fitfunc(p, x) - y,2)) # Distance to the target function
params=[]
for i in xrange(0,num_ne):
min_err = 10e10
best_p = 0
for j in xrange(0,100):
p0 = [20*rand(),10*(rand()-0.5),20*(rand()-0.5),10*rand()]
(p,success,c)=optimize.fmin_tnc(errfunc,p0[:],bounds=[(0,20),(-5,5),(-10,10),(0,10)],args=(numpy.array(predicted_activities[:,i].T)[0],numpy.array(activities[:,i].T)[0]),approx_grad=True,messages=0)
err = errfunc(p,numpy.array(predicted_activities[:,i].T)[0],numpy.array(activities[:,i].T)[0])
if err < min_err:
best_p = p
params.append(best_p)
if display:
pylab.subplot(13,13,i+1)
pylab.plot(numpy.array(predicted_activities[:,i].T)[0],numpy.array(activities[:,i].T)[0],'go')
pylab.plot(numpy.array(predicted_activities[:,i].T)[0],fitfunc(best_p,numpy.array(predicted_activities[:,i].T)[0]),'bo')
return params
def fit_exponential_to_of(activities,predicted_activities,offset=True,display=True):
(num_in,num_ne) = numpy.shape(activities)
from scipy import optimize
pylab.figure()
fitfunc = lambda p, x: offset*p[0] + p[1] * numpy.exp(p[2]*(x-p[3])) # Target function
errfunc = lambda p,x, y: numpy.mean(numpy.power(fitfunc(p, x) - y,2)) # Distance to the target function
params=[]
for i in xrange(0,num_ne):
p0 = [0.0,1.0,0.1,0.0] # Initial guess for the parameters
(p,success,c)=optimize.fmin_tnc(errfunc,p0[:],bounds=[(-20,20),(-10,10),(0,10),(-5,5)],args=(numpy.array(predicted_activities[:,i].T)[0],numpy.array(activities[:,i].T)[0]),approx_grad=True,messages=0)
params.append(p)
if display:
pylab.subplot(13,13,i+1)
pylab.plot(numpy.array(predicted_activities[:,i].T)[0],numpy.array(activities[:,i].T)[0],'go')
pylab.plot(numpy.array(predicted_activities[:,i].T)[0],fitfunc(p,numpy.array(predicted_activities[:,i].T)[0]),'bo')
return params
def fit_power_to_of(activities,predicted_activities,display=True):
(num_in,num_ne) = numpy.shape(activities)
from scipy import optimize
pylab.figure()
fitfunc = lambda p, x: p[0] + p[1] * numpy.power(x,p[2]) # Target function
errfunc = lambda p,x, y: numpy.mean(numpy.power(fitfunc(p, x) - y,2)) # Distance to the target function
params=[]
for i in xrange(0,num_ne):
p0 = [0.0,1.0,-0.5] # Initial guess for the parameters
(p,success,c)=optimize.fmin_tnc(errfunc,p0[:],bounds=[(-20,20),(-1,1),(-1,2)],args=(numpy.array(predicted_activities[:,i].T)[0],numpy.array(activities[:,i].T)[0]),approx_grad=True,messages=0)
params.append(p)
if display:
pylab.subplot(13,13,i+1)
pylab.plot(numpy.array(predicted_activities[:,i].T)[0],numpy.array(activities[:,i].T)[0],'go')
pylab.plot(numpy.array(predicted_activities[:,i].T)[0],fitfunc(p,numpy.array(predicted_activities[:,i].T)[0]),'bo')
return params
def apply_sigmoid_output_function(activities,of,offset=True):
sig = lambda p, x: (offset*p[2]) + p[3] * 1 / (1 + numpy.exp(-p[0]*(x-p[1])))
(x,y) = numpy.shape(activities)
new_acts = numpy.zeros((x,y))
for i in xrange(0,y):
new_acts[:,i] = sig(of[i],numpy.array(activities[:,i].T)[0]).T
return new_acts
def apply_exponential_output_function(activities,of,offset=True):
sig = lambda p, x: offset*p[0] + p[1] * numpy.exp(p[2]*(x-p[3]))
(x,y) = numpy.shape(activities)
new_acts = numpy.zeros((x,y))
for i in xrange(0,y):
new_acts[:,i] = sig(of[i],numpy.array(activities[:,i].T)[0]).T
return new_acts
def apply_power_output_function(activities,of):
sig = lambda p, x: p[0] + p[1] * numpy.power(x,p[2])
(x,y) = numpy.shape(activities)
new_acts = numpy.zeros((x,y))
for i in xrange(0,y):
new_acts[:,i] = sig(of[i],numpy.array(activities[:,i].T)[0]).T
return new_acts
def later_interaction_prediction(activities,predicted_activities,validation_activities,validation_predicted_activities,raw_validation_set,node,display=True):
(num_pres,num_neurons) = numpy.shape(activities)
cor_orig = numpy.zeros((num_neurons,num_neurons))
cor = numpy.zeros((num_neurons,num_neurons))
residues = activities - predicted_activities
for i in xrange(0,num_neurons):
for j in xrange(0,num_neurons):
cor[i,j] = numpy.corrcoef(numpy.array(residues[:,i].T),numpy.array(residues[:,j].T))[0][1]
pylab.figure()
pylab.imshow(cor,vmin=-0.1,vmax=0.5,interpolation='nearest')
pylab.colorbar()
for i in xrange(0,num_neurons):
for j in xrange(0,num_neurons):
cor_orig[i,j] = numpy.corrcoef(numpy.array(activities[:,i].T),numpy.array(activities[:,j].T))[0][1]
pylab.figure()
pylab.imshow(cor_orig,vmin=-0.1,vmax=0.5,interpolation='nearest')
pylab.colorbar()
mf = ModelFit()
mf.learning_rate = __main__.__dict__.get('lr',0.005)
mf.epochs=__main__.__dict__.get('epochs',4000)
mf.num_of_units = num_neurons
mf.init()
#pylab.figure()
#print "Weight shape",numpy.shape(mf.weigths)
#pylab.imshow(numpy.array(mf.weigths),vmin=-1.0,vmax=1.0,interpolation='nearest')
#pylab.colorbar()
(err,stop,min_errors) = mf.trainModel(numpy.mat(activities[0:num_pres*0.9]),mat(predicted_activities[0:num_pres*0.9]),numpy.mat(activities[num_pres*0.9:-1]),mat(predicted_activities[num_pres*0.9:-1]))
print "\nStop criterions", stop
new_activities = mf.returnPredictedActivities(mat(activities))
new_validation_activities = mf.returnPredictedActivities(mat(validation_activities))
new_raw_validation_set = []
for r in raw_validation_set:
new_raw_validation_set.append(mf.returnPredictedActivities(mat(r)))
ofs = fit_sigmoids_to_of(numpy.mat(new_activities),numpy.mat(predicted_activities))
predicted_activities_t = apply_sigmoid_output_function(numpy.mat(predicted_activities),ofs)
validation_predicted_activities_t = apply_sigmoid_output_function(numpy.mat(validation_predicted_activities),ofs)
if display:
pylab.figure()
print "Weight shape",numpy.shape(mf.weigths)
pylab.imshow(numpy.array(mf.weigths),vmin=-numpy.max(numpy.abs(mf.weigths)),vmax=numpy.max(numpy.abs(mf.weigths)),interpolation='nearest')
pylab.colorbar()
#print numpy.sum(mf.weigths,axis=0)
print numpy.sum(mf.weigths,axis=1)
pylab.figure()
pylab.title("model_relationship", fontsize=16)
for i in xrange(0,num_neurons):
pylab.subplot(13,13,i+1)
pylab.plot(numpy.mat(validation_activities).T[i],numpy.mat(validation_predicted_activities).T[i],'ro')
pylab.figure()
pylab.title("model_relationship", fontsize=16)
for i in xrange(0,num_neurons):
pylab.subplot(13,13,i+1)
pylab.plot(new_validation_activities.T[i],numpy.mat(validation_predicted_activities).T[i],'ro')
(ranks,correct,pred) = performIdentification(validation_activities,validation_predicted_activities)
print "ORIGINAL> Correct:", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(validation_activities - validation_predicted_activities,2))
print '\n\nWithout TF'
(ranks,correct,pred) = performIdentification(new_validation_activities,validation_predicted_activities)
print "LATER> Correct:", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(new_validation_activities - validation_predicted_activities,2))
print '\n\nWith TF'
(ranks,correct,pred) = performIdentification(new_validation_activities,validation_predicted_activities_t)
print "LATER> Correct:", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(new_validation_activities - validation_predicted_activities_t,2))
raw_validation_data_set=numpy.rollaxis(numpy.array(new_raw_validation_set),2)
signal_power,noise_power,normalized_noise_power,training_prediction_power,validation_prediction_power = signal_power_test(raw_validation_data_set, numpy.array(new_activities), numpy.array(new_validation_activities), predicted_activities, validation_predicted_activities)
signal_power,noise_power,normalized_noise_power,training_prediction_power_t,validation_prediction_power_t = signal_power_test(raw_validation_data_set, numpy.array(new_activities), numpy.array(new_validation_activities), predicted_activities_t, validation_predicted_activities_t)
print "Prediction power on training set / validation set: ", numpy.mean(training_prediction_power*(training_prediction_power>0)) , " / " , numpy.mean(validation_prediction_power*(validation_prediction_power>0))
print "Prediction power after TF on training set / validation set: ", numpy.mean(training_prediction_power_t*(training_prediction_power_t>0)) , " / " , numpy.mean(validation_prediction_power_t*(validation_prediction_power_t>0))
node.add_data("LaterReversCorrelationPredictedActivities+TF",predicted_activities_t,force=True)
node.add_data("LaterReversCorrelationPredictedValidationActivities+TF",validation_predicted_activities_t,force=True)
node.add_data("LaterTrainingSet",new_activities,force=True)
node.add_data("LaterValidationSet",new_validation_activities,force=True)
node.add_data("LaterModel",mf,force=True)
return (new_activities,new_validation_activities)
def runRFinference():
d = contrib.dd.loadResults("results.dat")
(sizex,sizey,training_inputs,training_set,validation_inputs,validation_set,ff,db_node) = sortOutLoading(d)
e = []
c = []
b = []
if False:
x = 0.0
for i in xrange(1,10):
print i
x = 0.003*i
params={}
params["alpha"] = __main__.__dict__.get('Alpha',x)
db_node = db_node.get_child(params)
(e1,te1,c1,tc1,RFs,pa,pva,corr_coef,corr_coef_tf) = regulerized_inverse_rf(training_inputs,training_set,sizex,sizey,x,validation_inputs,validation_set,db_node,True)
e.append(e1)
c.append(c1)
b.append(x)
#x = x*2
pylab.figure()
#pylab.semilogx(b,e)
pylab.plot(b,numpy.mat(e))
pylab.figure()
#pylab.semilogx(b,c)
pylab.plot(b,c)
#f = open("results.dat",'wb')
#pickle.dump(d,f,-2)
#f.close()
return (e,c,b)
params={}
params["alpha"] = __main__.__dict__.get('Alpha',0.02)
db_node1 = db_node
db_node = db_node.get_child(params)
if False:
alphas=[120, 290, 50, 240, 290, 260, 120, 100, 290, 130, 290, 290, 230,170, 120, 190, 290, 100, 140, 290, 290, 60, 290, 290, 80, 210,50, 250, 170, 290, 290, 290, 60, 290, 290, 60, 260, 290, 290,290, 60, 90, 290, 120, 290, 80, 270, 120, 290, 290]
RFs = []
e = []
c = []
te = []
tc = []
#pa = []
pva = []
for i in xrange(0,len(alphas)):
print numpy.shape(training_set)
print numpy.shape(training_set[:,i:i+1])
(e1,te1,c1,tc1,RF,pa1,pva1,corr_coef,corr_coef_tf) = regulerized_inverse_rf(training_inputs,training_set[:,i:i+1],sizex,sizey,alphas[i],validation_inputs,validation_set[:,i:i+1],False)
print numpy.shape(RF)
RFs.append(RF[0])
e.append(e1)
c.append(c1)
te.append(te1)
tc.append(tc1)
pa.append(pa1)
pva.append(pva1)
pylab.figure()
pylab.hist(e)
pylab.figure()
pylab.hist(c)
pylab.figure()
pylab.hist(te)
pylab.figure()
pylab.hist(tc)
return (e,te,c,tc,RFs,pa,pva)
(e,te,c,tc,RFs,pa,pva,corr_coef,corr_coef_tf) = regulerized_inverse_rf(training_inputs,training_set,sizex,sizey,params["alpha"],validation_inputs,validation_set,db_node,True)
pylab.figure()
pylab.xlabel("fano factor")
pylab.ylabel("normalized error")
pylab.plot(ff,e,'ro')
pylab.figure()
pylab.xlabel("fano factor")
pylab.ylabel("tf normalized error")
pylab.plot(ff,te,'ro')
pylab.figure()
pylab.xlabel("fano factor")
pylab.ylabel("correlation coef")
pylab.plot(ff,corr_coef,'ro')
pylab.figure()
pylab.xlabel("fano factor")
pylab.ylabel("correlation coef after transfer function")
pylab.plot(ff,corr_coef_tf,'ro')
contrib.dd.saveResults(d,"results.dat")
pylab.show()
return (training_set,pa,validation_set,pva)
def runRFFftInference():
f = open("results.dat",'rb')
import pickle
d = pickle.load(f)
f.close()
(sizex,sizey,training_inputs,training_set,validation_inputs,validation_set,ff,db_node) = sortOutLoading(d)
params={}
params["FFTalpha"] = __main__.__dict__.get('Alpha',50)
db_node1 = db_node
db_node = db_node.get_child(params)
# turn inputs into fft domain
sx,sy = numpy.shape(training_set)
new_training_inputs = numpy.zeros(numpy.shape(training_inputs))
new_validation_inputs = numpy.zeros(numpy.shape(validation_inputs))
fft_norm=numpy.zeros((sizex,sizey))
#for i in xrange(0,sx):
# fft_norm += numpy.abs(numpy.fft.fft2(numpy.reshape(training_inputs[i,:],(sizex,sizey))).flatten())
#fft_norm/=sx
for i in xrange(0,sizex):
for j in xrange(0,sizex):
if i-1==sizex/2 and j-1==sizex/2:
fft_norm[i,j]=1
else:
fft_norm[i,j]=1.0/numpy.power((i-1-sizex/2)**2+(j-1-sizey/2)**2,2)
print fft_norm
for i in xrange(0,sx):
new_training_inputs[i,:] = numpy.divide(numpy.abs(numpy.fft.fft2(numpy.reshape(training_inputs[i,:],(sizex,sizey)))), fft_norm).flatten()
#new_training_inputs[i,:] = numpy.fft.fft2(numpy.reshape(training_inputs[i,:],(sizex,sizey))).flatten()
for i in xrange(0,50):
new_validation_inputs[i,:] = numpy.divide(numpy.abs(numpy.fft.fft2(numpy.reshape(validation_inputs[i,:],(sizex,sizey)))), fft_norm).flatten()
#new_validation_inputs[i,:] = numpy.fft.fft2(numpy.reshape(validation_inputs[i,:],(sizex,sizey))).flatten()
print params["FFTalpha"]
(e,te,c,tc,RFs,pa,pva,corr_coef,corr_coef_tf) = regulerized_inverse_rf(new_training_inputs,training_set,sizex,sizey,params["FFTalpha"],new_validation_inputs,validation_set,db_node,True)
ofs = run_nonlinearity_detection(numpy.mat(training_set),numpy.mat(pa),10,display=True)
pa_t = apply_output_function(numpy.mat(pa),ofs)
pva_t = apply_output_function(numpy.mat(pva),ofs)
(ranks,correct,pred) = performIdentification(validation_set,pva)
print "Direct Correct:", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(validation_set - pva,2))
(ranks,correct,pred) = performIdentification(validation_set,pva_t)
print "Direct Correct+TF:", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(validation_set - pva_t,2))
(ranks,correct,pred) = performIdentification(training_set,pa_t)
print "Direct Correct+TF:", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(training_set - pa_t,2))
f = open("results.dat",'wb')
pickle.dump(d,f,-2)
f.close()
for i in xrange(0,sx):
for j in xrange(0,sy):
z = numpy.multiply(numpy.reshape(new_training_inputs[i,:],(sizex,sizey)),RFs[j])
pa[i,j] = numpy.mean(numpy.power(numpy.fft.ifft2(z),2))
for i in xrange(0,50):
for j in xrange(0,sy):
z = numpy.multiply(numpy.reshape(new_validation_inputs[i,:],(sizex,sizey)),RFs[j])
pva[i,j] = numpy.mean(numpy.power(numpy.fft.ifft2(z),2))
ofs = run_nonlinearity_detection(numpy.mat(training_set),numpy.mat(pa),10,display=True)
pa_t = apply_output_function(numpy.mat(pa),ofs)
pva_t = apply_output_function(numpy.mat(pva),ofs)
(ranks,correct,pred) = performIdentification(validation_set,pva)
print "Correct:", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(validation_set - pva,2))
(ranks,correct,pred) = performIdentification(validation_set,pva_t)
print "Correct+TF:", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(validation_set - pva_t,2))
return (training_set,pa,validation_set,pva)
def compute_spike_triggered_average_rf(inputs,activities,density):
(num_inputs,num_activities) = shape(activities)
RFs = [numpy.zeros(shape(inputs[0])) for j in xrange(0,num_activities)]
avgRF = numpy.zeros(shape(inputs[0]))
for i in xrange(0,num_inputs):
for j in xrange(0,num_activities):
RFs[j] += activities[i][j]*inputs[i]
for i in inputs:
avgRF += i
avgRF = avgRF/(num_inputs*1.0)
activity_avg = numpy.zeros((num_activities,1))
for z in xrange(0,num_activities):
activity_avg[z] = numpy.sum(activities.T[z])
activity_avg = activity_avg.T[0]
pylab.figure()
for j in xrange(0,10):
fig = pylab.figure()
pylab.show._needmain=False
pylab.subplot(1,5,1)
RFs[j]/= activity_avg[j]
pylab.imshow(RFs[j],vmin=numpy.min(RFs[j]),vmax=numpy.max(RFs[j]))
pylab.colorbar()
pylab.subplot(1,5,2)
pylab.imshow(RFs[j] - avgRF,vmin=numpy.min(RFs[j]- avgRF),vmax=numpy.max(RFs[j]- avgRF))
pylab.colorbar()
pylab.subplot(1,5,3)
pylab.imshow(RFs[j]/avgRF,vmin=numpy.min(RFs[j]/avgRF),vmax=numpy.max(RFs[j]/avgRF))
pylab.colorbar()
pylab.subplot(1,5,4)
pylab.imshow(avgRF,vmin=numpy.min(avgRF),vmax=numpy.max(avgRF))
pylab.colorbar()
pylab.show()
#w = mf.weigths[j].reshape(density*1.2,density*1.2)
#pylab.subplot(1,5,5)
#pylab.imshow(w,vmin=numpy.min(w),vmax=numpy.max(w))
#pylab.colorbar()
#
def analyze_rf_possition(w,level):
import matplotlib
from matplotlib.patches import Circle
a= pylab.figure().gca()
(sx,sy) = numpy.shape(w[0])
X = numpy.tile(numpy.arange(0,sx,1),(sy,1))
Y = numpy.tile(numpy.arange(0,sy,1),(sx,1)).T
cgs = []
RFs=[]
for i in xrange(0,len(w)):
pylab.subplot(15,15,i+1)
mi=numpy.min(numpy.min(w[i]))
ma=numpy.max(numpy.max(w[i]))
#z = ((w[i]<=(mi-mi*level))*1.0) * w[i] + ((w[i]>=(ma-ma*level))*1.0) * w[i]
z = w[i] * (numpy.abs(w[i])>= numpy.max(numpy.abs(w[i]))*(1-level))
RFs.append(z)
cgx = numpy.sum(numpy.multiply(X,numpy.power((abs(z)>0.0)*1.0,2)))/numpy.sum(numpy.power((abs(z)>0.0)*1.0,2))
cgy = numpy.sum(numpy.multiply(Y,numpy.power((abs(z)>0.0)*1.0,2)))/numpy.sum(numpy.power((abs(z)>0.0)*1.0,2))
cgs.append((cgx,cgy))
r = numpy.max([numpy.abs(numpy.min(numpy.min(z))),numpy.abs(numpy.max(numpy.max(z)))])
cir = Circle( (cgx,cgy), radius=1)
pylab.gca().add_patch(cir)
pylab.show._needmain=False
pylab.imshow(z,vmin=-r,vmax=r,cmap=pylab.cm.RdBu)
pylab.show()
return (cgs,RFs)
def fitGabor(weights):
from matplotlib.patches import Circle
from scipy.optimize import leastsq,fmin,fmin_tnc,anneal
from topo.base.arrayutil import array_argmax
#(x,y) = numpy.shape(weights[0])
#weights = cut_out_images_set(weights,int(y*0.49),(int(x*0.1),int(y*0.4)))
(denx,deny) = numpy.shape(weights[0])
centers,RFs = analyze_rf_possition(weights,0.5)
RFs=weights
# determine frequency
freqor = []
for w in weights:
ff = pylab.fftshift(pylab.fft2(w))
(x,y) = array_argmax(numpy.abs(ff))
(n,rubish) = shape(ff)
freq = numpy.sqrt((x - n/2)*(x - n/2) + (y - n/2)*(y - n/2))
#phase = numpy.arctan(ff[x,y].imag/ff[x,y].real)
phase = numpy.angle(ff[x,y])
if (x - n/2) != 0:
orr = numpy.arctan((y - n/2.0)/(x - n/2.0))
else:
orr = numpy.pi/2
if orr < 0:
orr = orr+numpy.pi
#if phase < 0:
# phase = phase+2*numpy.pi
freqor.append((freq,orr,phase))
parameters=[]
errors = []
variances = []
for j in xrange(0,len(RFs)):
print 'nenuron',j
minf = 0
x = centers[j][0]
y = centers[j][1]
#pylab.figure()
#gab([x,y,0.2,freqor[j][1],freqor[j][0],freqor[j][2],1.0,0.001],weights[j]/numpy.sum(numpy.abs(weights[j])),display=True)
rand =numbergen.UniformRandom(seed=513)
min_x = []
min_err = 100000000000000
for r in xrange(0,30):
print 'rep',r
x0 = [x,y,4,freqor[j][1],freqor[j][0]/denx,freqor[j][2],1.0,0.0002]
#pylab.figure()
#pylab.imshow(RFs[0])
#gab(x0,RFs[0],display=True)
#return
x1 = [0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]
x1[0] = x0[0] + 2.0*(rand()-0.5)*0.15*denx
x1[1] = x0[1] + 2.0*(rand()-0.5)*0.15*deny
x1[2] = rand()*0.2*denx
x1[3] = rand()*numpy.pi#x0[3]+2.0*(rand()-0.5)*(numpy.pi/4)
x1[4] = x0[4]*(rand()*2)
x1[5] = rand()*numpy.pi
x1[6] = 0.3 + rand()*3.7
x1[7] = rand()*x0[7]
(z,b,c) = fmin_tnc(gab,x1,bounds=[(x-denx*0.3,x+denx*0.3) ,(y-deny*0.3,y+deny*0.3),(1.0,denx*0.5),(0.0,numpy.pi),(minf,freqor[j][0]/denx*3),(0,numpy.pi*2),(0.3,4.0),(0.0,0.1)],args=[weights[j]], xtol=0.0000000001,scale=[0.5,0.5,0.5,2.0,0.5,2.0,2.0,2.0],maxCGit=1000, ftol=0.0000000000001,approx_grad=True,maxfun=10000,eta=0.01,messages=0)
e = gab(z,weights[j],display=False)
if(e < min_err):
min_err = e
min_x = z
#pylab.figure()
#gab(min_x,weights[j],display=True)
errors.append(min_err/(denx*deny))
variances = numpy.var(weights[j])
parameters.append(min_x)
pylab.figure()
pylab.hist(numpy.array(errors)/numpy.array(variances))
pylab.xlabel('Fraction of unexplained variance')
pylab.ylabel('# Cells')
pylab.figure()
(x,y,sigma,angle,f,p,ar,alpha) = tuple(parameters[0])
pylab.imshow(gabor(frequency=f,x=x,y=y,xdensity=denx,ydensity=deny,size=sigma,orientation=angle,phase=p,ar=ar) * alpha)
pylab.colorbar()
pylab.figure()
pylab.imshow(weights[0])
pylab.colorbar()
pylab.figure()
for i in xrange(0,len(parameters)):
pylab.subplot(15,15,i+1)
(x,y,sigma,angle,f,p,ar,alpha) = tuple(parameters[i])
g = gabor(frequency=f,x=x,y=y,xdensity=denx,ydensity=deny,size=sigma,orientation=angle,phase=p,ar=ar) * alpha
m = numpy.max([-numpy.min(g),numpy.max(g)])
pylab.show._needmain=False
pylab.imshow(g,vmin=-m,vmax=m,cmap=pylab.cm.RdBu)
pylab.show()
return parameters
def gab(z,w,display=False):
from matplotlib.patches import Circle
(x,y,sigma,angle,f,p,ar,alpha) = tuple(z)
a = numpy.zeros(numpy.shape(w))
(dx,dy) = numpy.shape(w)
g = gabor(frequency=f,x=x,y=y,xdensity=dx,ydensity=dy,size=sigma,orientation=angle,phase=p,ar=ar) * alpha
if display:
pylab.subplot(2,1,1)
m = numpy.max([-numpy.min(g[0:dx,0:dy]),numpy.max(g[0:dx,0:dy])])
cir = Circle( (y*dy,x*dx), radius=1)
pylab.gca().add_patch(cir)
pylab.imshow(g[0:dx,0:dy],vmin=-m,vmax=m,cmap=pylab.cm.RdBu)
pylab.colorbar()
pylab.subplot(2,1,2)
m = numpy.max([-numpy.min(w),numpy.max(w)])
cir = Circle( (y*dy,x*dx), radius=1)
pylab.gca().add_patch(cir)
pylab.imshow(w,vmin=-m,vmax=m,cmap=pylab.cm.RdBu)
pylab.show._needmain=False
pylab.colorbar()
pylab.show()
#print numpy.sum(numpy.power(g[0:dx,0:dy] - w,2))
return numpy.sum(numpy.power(g[0:dx,0:dy] - w,2))
def gabor(frequency=1.0,x=0.0,y=0.0,xdensity=1.0,ydensity=1.0,size=1.0,orientation=1.0,phase=1.0,ar=1.0):
X = numpy.tile(numpy.arange(0,xdensity,1),(ydensity,1))
Y = numpy.tile(numpy.arange(0,ydensity,1),(xdensity,1)).T
X1 = (X-x)*numpy.cos(orientation) + (Y-y)*numpy.sin(orientation)
Y1 = -(X-x)*numpy.sin(orientation) + (Y-y)*numpy.cos(orientation)
ker = - ((X1/numpy.sqrt(2)/(size*ar))**2 + (Y1/numpy.sqrt(2)/size)**2)
g = numpy.exp(ker)*numpy.cos(2*numpy.pi*X1*frequency+phase)
return g
def runSTC():
f = open("modelfitDB2.dat",'rb')
import pickle
dd = pickle.load(f)
f.close()
(sizex,sizey,training_inputs,training_set,validation_inputs,validation_set,ff,db_node) = sortOutLoading(dd)
#params={}
#params["alpha"] = __main__.__dict__.get('Alpha',50)
#db_node = db_node.get_child(params)
rfs = db_node.children[0].data["ReversCorrelationRFs"]
a = STC(training_inputs-0.5,training_set[:,0:103],validation_inputs,validation_set,rfs)
db_node.add_data("STCrfs",a,True)
#return a
pylab.figure()
pylab.subplot(16,14,1)
j=0
m = []
for (ei,vv,vva,em,ep) in a:
ind = numpy.argsort(numpy.abs(vv))
w = numpy.array(ei[ind[len(ind)-1],:].real)
m.append(numpy.max([-numpy.min(w),numpy.max(w)]))
m = numpy.max(m)
s = numpy.sqrt(sizey)
i=0
acts=[]
ofs=[]
for (ei,vv,avv,em,ep) in a:
ind = numpy.argsort(vv)
pylab.figure()
#if len(avv) == 0:
# acts.append([])
# continue
j=0
act=[]
act_val=[]
of = []
pylab.subplot(10,1,9)
pylab.plot(numpy.sort(vv)[len(vv)-30:],'ro')
pylab.plot(em[len(vv)-30:])
pylab.plot(ep[len(vv)-30:])
pylab.subplot(10,1,10)
pylab.plot(numpy.sort(vv)[0:20],'ro')
pylab.plot(em[0:20])
pylab.plot(ep[0:20])
for v in avv:
w = numpy.array(ei[v,:].real).reshape(sizex,sizey)
m = numpy.max([-numpy.min(w),numpy.max(w)])
pylab.subplot(10,1,j)
pylab.imshow(w,vmin=-m,vmax=m,interpolation='nearest',cmap=pylab.cm.RdBu)
pylab.axis('off')
o = run_nonlinearity_detection((training_inputs*ei[v,:].T),numpy.mat(training_set[:,i]).T,10,display=False)
of.append(o)
(bins,tf) = o[0]
act.append(apply_output_function(training_inputs*ei[v,:].T,o))
act_val.append(apply_output_function(validation_inputs*ei[v,:].T,o))
pylab.subplot(10,1,j+1)
pylab.plot(bins[0:-1],tf)
j = j+2
acts.append((act,act_val,of))
#print "corr_coef =", numpy.corrcoef(act.T, training_set.T[i])[0][1]
#print "PVE =", 1-numpy.sum(numpy.power(act.T- training_set.T[i],2)) / numpy.sum(numpy.power(numpy.mean(training_set.T[i])- training_set.T[i],2))
#pylab.plot(numpy.array((training_inputs*ei[ind[len(ind)-1],:].real.T)),numpy.array(numpy.mat(training_set[:,i]).T),'ro')
i = i+1
db_node.add_data("STCact",acts,True)
f = open("modelfitDB2.dat",'wb')
import pickle
dd = pickle.dump(dd,f)
f.close()
#pylab.show()
return a
def STC(inputs,activities,validation_inputs,validation_activities,STA,cutoff=85,display=False):
from scipy import linalg
print "input size:",numpy.shape(inputs)
t,s = numpy.shape(inputs)
s = numpy.sqrt(s)
(num_in,input_len) = numpy.shape(inputs)
(num_in,act_len) = numpy.shape(activities)
print numpy.mean(activities)
tt = numpy.mat(numpy.zeros(numpy.shape(inputs[0])))
SWa = []
laa = []
Ninva = []
C = []
eis = []
for a in xrange(0,act_len):
CC = numpy.mat(numpy.zeros((input_len,input_len)))
U = numpy.mat(numpy.zeros((input_len,input_len)))
N = numpy.mat(numpy.zeros((input_len,input_len)))
Ninv = numpy.mat(numpy.zeros((input_len,input_len)))
for i in xrange(0,num_in):
CC = CC + (numpy.mat(inputs[i,:]) - STA[a].flatten()/num_in).T * (numpy.mat(inputs[i,:]- STA[a].flatten()/num_in))
CC = CC / num_in
v,la = linalg.eigh(CC)
la = numpy.mat(la)
ind = numpy.argsort(v)
for j in xrange(0,int(input_len*(cutoff/100.0))):
v[ind[j]]=0.0
for i in xrange(0,input_len):
if v[i] != 0:
N[i,i] = 1/numpy.sqrt(v[i])
Ninv[i,i] = numpy.sqrt(v[i])
else:
N[i,i]=0.0
Ninv[i,i] = 0.0
U = la * numpy.mat(N)
SW = numpy.matrix(inputs) * U
SWa.append(SW)
laa.append(la)
Ninva.append(Ninv)
if a == 0:
SW1 = SW*linalg.inv(la)
F = numpy.mat(numpy.zeros((s,s)))
for i in xrange(0,num_in):
F += abs(pylab.fftshift(pylab.fft2(inputs[i,:].reshape(s,s)-STA[0])))
pylab.figure()
pylab.imshow(F.A,interpolation='nearest',cmap=pylab.cm.gray)
F = numpy.mat(numpy.zeros((s,s)))
for i in xrange(0,num_in):
F += abs(pylab.fftshift(pylab.fft2(SW1[i,:].reshape(s,s))))
pylab.figure()
pylab.imshow(F.A,interpolation='nearest',cmap=pylab.cm.gray)
#do significance testing
vv=[]
for r in xrange(0,50):
from numpy.random import shuffle
act = numpy.array(activities[:,a].T).copy()
shuffle(act)
C = numpy.zeros((input_len,input_len))
for i in xrange(0,num_in):
C += (numpy.mat(SWa[a][i,:]).T * numpy.mat(SWa[a][i,:])) * act[i]
C = C / num_in
v,ei = linalg.eigh(C)
vv.append(numpy.sort(v))
vv = numpy.mat(vv)
mean_diff = []
for i in xrange(0,50):
for j in xrange(0,input_len-1):
mean_diff.append(numpy.abs(vv[i,j]-vv[i,j+1]))
diff_min = numpy.mean(mean_diff) - 15*numpy.std(mean_diff)
diff_max = numpy.mean(mean_diff) + 15*numpy.std(mean_diff)
error_minus = numpy.array(numpy.mean(vv,axis=0)-3.0*numpy.std(vv,axis=0))[0]
error_plus = numpy.array(numpy.mean(vv,axis=0)+3.0*numpy.std(vv,axis=0))[0]
C = numpy.zeros((input_len,input_len))
for i in xrange(0,num_in):
C += (numpy.mat(SWa[a][i,:]).T * numpy.mat(SWa[a][i,:])) * activities[i,a]
C = C / num_in
if a == 0:
pylab.figure()
pylab.imshow(C)
vv,ei = linalg.eigh(C)
ind = numpy.argsort(vv)
accepted=[]
for i in xrange(len(vv)-30,len(vv)):
if (vv[ind[i]] >= error_plus[i]) or (vv[ind[i]] <= error_minus[i]):
accepted.append(i)
accepted_vv=[]
flag=False
for i in accepted:
if i != 0:
if (vv[ind[i]]-vv[ind[i-1]] >= diff_max):
flag=True
if flag:
accepted_vv.append(ind[i])
print len(accepted_vv)
ei=numpy.mat(ei).T
ei = ei*(Ninva[a]*linalg.inv(laa[a]))
eis.append((ei,vv,accepted_vv,error_minus,error_plus))
return eis
def fitting():
f = open("results.dat",'rb')
import pickle
dd = pickle.load(f)
f.close()
which = [0,1,4]
for i in which:
node = dd.children[i].children[0]
rfs = node.data["ReversCorrelationRFs"]
params = fitGabor(rfs)
node.add_data("FittedParams",params,force=True)
#m = numpy.max([numpy.abs(numpy.min(rfs)),numpy.abs(numpy.max(rfs))])
#pylab.figure()
#for i in xrange(0,len(rfs)):
# pylab.subplot(15,15,i+1)
# w = numpy.array(rfs[i])
# pylab.show._needmain=False
# pylab.imshow(w,vmin=-m,vmax=m,interpolation='nearest',cmap=pylab.cm.RdBu)
# pylab.axis('off')
#pylab.figure()
f = open("results.dat",'wb')
pickle.dump(dd,f,-2)
f.close()
return (params)
def tiling():
contrib.modelfit.save_fig_directory='/home/antolikjan/Doc/reports/Sparsness/'
f = open("results.dat",'rb')
import pickle
from matplotlib.patches import Circle
dd = pickle.load(f)
rand =numbergen.UniformRandom()
rfs = [dd.children[0].children[0].data["ReversCorrelationRFs"],
dd.children[1].children[0].data["ReversCorrelationRFs"],
dd.children[4].children[0].data["ReversCorrelationRFs"]]
m=0
for r in rfs:
m = numpy.max([numpy.max([numpy.abs(numpy.min(r)),numpy.abs(numpy.max(r))]),m])
loc = []
f = file("./Mice/2009_11_04/region3_cell_locations", "r")
loc.append([line.split() for line in f])
f.close()
f = file("./Mice/2009_11_04/region5_cell_locations", "r")
loc.append([line.split() for line in f])
f.close()
f = file("./Mice/20090925_14_36_01/(20090925_14_36_01)-_retinotopy_region2_sequence_50cells_cell_locations.txt", "r")
loc.append([line.split() for line in f])
f.close()
param=[]
param.append(dd.children[0].children[0].data["FittedParams"])
param.append(dd.children[1].children[0].data["FittedParams"])
param.append(dd.children[4].children[0].data["FittedParams"])
denx,deny=numpy.shape(rfs[0][0])
view_angle = monitor_view_angle(59,20)
degrees_per_pixel = view_angle / (2*denx)
for locations in loc:
(a,b) = numpy.shape(locations)
for i in xrange(0,a):
for j in xrange(0,b):
locations[i][j] = float(locations[i][j])
loc[0] = numpy.array(loc[0])/256.0*261.0
loc[1] = numpy.array(loc[1])/256.0*261.0
loc[2] = numpy.array(loc[2])/256.0*230.0
fitted_corr=[]
fitted_rfs=[]
fev = []
for (j,rf) in zip(numpy.arange(0,len(rfs),1),rfs):
q=[]
g=[]
v=[]
rand_g=[]
numpy.random.seed(1111)
for i in xrange(0,len(rf)):
(x,y,sigma,angle,f,p,ar,alpha) = tuple(param[j][i])
(dx,dy) = numpy.shape(rfs[j][0])
g.append(gabor(frequency=f,x=x,y=y,xdensity=dx,ydensity=dy,size=sigma,orientation=angle,phase=p,ar=ar) * alpha)
#q.append(numpy.sum(numpy.power(rf[i].flatten()- numpy.mean(rf[i].flatten()),2)))
q.append(numpy.mean(numpy.power(rf[i],2)))
v.append(numpy.var(rf[i]- gabor(frequency=f,x=x,y=y,xdensity=dx,ydensity=dy,size=sigma,orientation=angle,phase=p,ar=ar) * alpha) / numpy.var(rf[i]))
fitted_corr.append(q)
fitted_rfs.append(g)
fev.append(v)
#dd.children[0].children[0].add_data("FittedRFs",fitted_rfs[0],force=True)
#dd.children[1].children[0].add_data("FittedRFs",fitted_rfs[1],force=True)
#dd.children[4].children[0].add_data("FittedRFs",fitted_rfs[2],force=True)
#f = open("results.dat",'wb')
#pickle.dump(dd,f,-2)
#f.close()
pylab.figure()
pylab.hist(fev[0] + fev[1] + fev[2])
pylab.xlabel('Fraction of un-explained variance')
pylab.figure()
ii=0
for k in xrange(0,len(rfs)):
m = numpy.max([numpy.abs(numpy.min(rfs[k])),numpy.abs(numpy.max(rfs[k]))])
for i in xrange(0,len(rfs[k])):
pylab.subplot(15,15,ii+1)
w = numpy.array(rfs[k][i])
pylab.show._needmain=False
pylab.imshow(w,vmin=-m,vmax=m,interpolation='nearest',cmap=pylab.cm.RdBu)
cir = Circle( (param[k][i][0],param[k][i][1]), radius=1,color='r')
pylab.gca().add_patch(cir)
xx,yy = centre_of_gravity(rfs[k][i])
cir = Circle( (xx,yy), radius=1,color='b')
pylab.gca().add_patch(cir)
pylab.axis('off')
ii+=1
pylab.figure()
ii=0
for k in xrange(0,len(rfs)):
m = numpy.max([numpy.abs(numpy.min(fitted_rfs[k])),numpy.abs(numpy.max(fitted_rfs[k]))])
for i in xrange(0,len(fitted_rfs[k])):
pylab.subplot(15,15,ii+1)
w = numpy.array(fitted_rfs[k][i])
pylab.show._needmain=False
pylab.imshow(w,vmin=-m,vmax=m,interpolation='nearest',cmap=pylab.cm.RdBu)
pylab.axis('off')
ii+=1
for i in xrange(0,len(rfs)):
#to_delete = numpy.nonzero((numpy.array(fitted_corr[i]) < 0.3*(10**-9))*1.0)[0]
to_delete = numpy.nonzero((numpy.array(fev[i]) > 0.3)*1.0)[0]
rfs[i] = numpy.delete(rfs[i],to_delete,axis=0)
fitted_rfs[i] = numpy.delete(fitted_rfs[i],to_delete,axis=0)
loc[i] = numpy.delete(numpy.array(loc[i]),to_delete,axis=0)
param[i] = numpy.delete(numpy.array(param[i]),to_delete,axis=0)
bb = [[],[],[]]
rand_fitted_rfs=[]
for a in xrange(0,3):
for j in xrange(0,10):
perm1 = numpy.random.permutation(len(param[a]))
perm2 = numpy.random.permutation(len(param[a]))
perm3 = numpy.random.permutation(len(param[a]))
perm4 = numpy.random.permutation(len(param[a]))
perm5 = numpy.random.permutation(len(param[a]))
perm6 = numpy.random.permutation(len(param[a]))
perm7 = numpy.random.permutation(len(param[a]))
perm8 = numpy.random.permutation(len(param[a]))
mmin = numpy.min(param[a],axis=0)
mmax = numpy.max(param[a],axis=0)
z = numpy.zeros(numpy.shape(rfs[a][0]))
for i in xrange(0,len(rfs[a])):
(x,y,sigma,angle,f,p,ar,alpha) = tuple(param[a][i])
x = param[a][perm1[i]][0]
y = param[a][perm2[i]][1]
sigma = param[a][perm3[i]][2]
angle = param[a][perm4[i]][3]
f = param[a][perm5[i]][4]
p = param[a][perm6[i]][5]
ar = param[a][perm7[i]][6]
alpha = param[a][perm8[i]][7]
#x = (mmin+numpy.random.rand(8)*(mmax-mmin))[0]
#y = (mmin+numpy.random.rand(8)*(mmax-mmin))[1]
#sigma = (mmin+numpy.random.rand(8)*(mmax-mmin))[2]
#angle = (mmin+numpy.random.rand(8)*(mmax-mmin))[3]
#f = (mmin+numpy.random.rand(8)*(mmax-mmin))[4]
#p = (mmin+numpy.random.rand(8)*(mmax-mmin))[5]
#ar = (mmin+numpy.random.rand(8)*(mmax-mmin))[6]
#alpha = (mmin+numpy.random.rand(8)*(mmax-mmin))[7]
z = z+ gabor(frequency=f,x=x,y=y,xdensity=dx,ydensity=dy,size=sigma,orientation=angle,phase=p,ar=ar) * alpha
z = z / len(rfs[a])
if j == 0:
rand_fitted_rfs.append(z)
bb[a].append(numpy.var(z))
fitted_rfs_merged=numpy.concatenate(fitted_rfs)
rfs_merged=numpy.concatenate(rfs)
params_merged=numpy.concatenate(param)
order = numpy.argsort(params_merged[:,4])
pylab.figure(dpi=100,facecolor='w',figsize=(15,11))
m = numpy.max([numpy.abs(numpy.min(rfs_merged)),numpy.abs(numpy.max(rfs_merged))])
for i in xrange(0,len(rfs_merged)):
pylab.subplot(15,15,i+1)
w = numpy.array(rfs_merged[i])
pylab.show._needmain=False
pylab.imshow(w,vmin=-m,vmax=m,interpolation='nearest',cmap=pylab.cm.RdBu)
pylab.axis('off')
release_fig('RawRFs.pdf')
pylab.figure(dpi=100,facecolor='w',figsize=(15,11))
m = numpy.max([numpy.abs(numpy.min(fitted_rfs_merged)),numpy.abs(numpy.max(fitted_rfs_merged))])
for i in xrange(0,len(fitted_rfs_merged)):
pylab.subplot(15,15,i+1)
w = numpy.array(fitted_rfs_merged[i])
pylab.show._needmain=False
pylab.imshow(w,vmin=-m,vmax=m,interpolation='nearest',cmap=pylab.cm.RdBu)
pylab.axis('off')
release_fig('FittedRFs.pdf')
pylab.figure(dpi=100,facecolor='w',figsize=(15,11))
m = numpy.max([numpy.abs(numpy.min(rfs_merged)),numpy.abs(numpy.max(rfs_merged))])
for i in xrange(0,len(order)):
pylab.subplot(15,15,i+1)
w = numpy.array(rfs_merged[order[i]])
pylab.show._needmain=False
pylab.imshow(w,vmin=-m,vmax=m,interpolation='nearest',cmap=pylab.cm.RdBu)
pylab.axis('off')
release_fig('OrderedRFs.pdf')
pylab.figure(dpi=100,facecolor='w',figsize=(15,11))
m = numpy.max([numpy.abs(numpy.min(fitted_rfs_merged)),numpy.abs(numpy.max(fitted_rfs_merged))])
for i in xrange(0,len(order)):
pylab.subplot(15,15,i+1)
w = numpy.array(fitted_rfs_merged[order[i]])
pylab.show._needmain=False
pylab.imshow(w,vmin=-m,vmax=m,interpolation='nearest',cmap=pylab.cm.RdBu)
pylab.axis('off')
release_fig('OrderedFittedRFs.pdf')
# show RF coverage
rc=[]
for rf in rfs:
r=[]
for idx in xrange(0,len(rf)):
r.append(numpy.array(centre_of_gravity(rf[idx])))
rc.append(numpy.array(r))
nx=[]
ny=[]
for i in xrange(len(param)):
for j in xrange(len(param[i])):
nx.append(param[i][j][4] * param[i][j][2]* param[i][j][6])
ny.append(param[i][j][4] * param[i][j][2])
# PLOT DIFFERENT FITTED PARAMETERS HISTOGRAMS
pylab.figure(dpi=300,facecolor='w',figsize=(6,4))
pylab.scatter(nx,ny,s=10,facecolor='none', edgecolor='b',marker='o')
pylab.axis([0,1.5,0.0,1.5])
pylab.axes().set_aspect('equal')
pylab.xlabel('nx')
pylab.ylabel('ny')
pylab.gca().xaxis.set_major_locator(MaxNLocator(3))
pylab.gca().yaxis.set_major_locator(MaxNLocator(3))
release_fig('NxNy.png')
pylab.figure(dpi=100,facecolor='w',figsize=(17,5))
pylab.subplot(1,5,1)
pylab.hist(numpy.array(param[0][:,4].flatten().tolist() + param[1][:,4].flatten().tolist() + param[2][:,4].flatten().tolist())*degrees_per_pixel)
pylab.xlabel('Frequency (deg. / visual angle)')
pylab.gca().xaxis.set_major_locator(MaxNLocator(5))
pylab.subplot(1,5,2)
pylab.hist(numpy.array(param[0][:,2].flatten().tolist() + param[1][:,2].flatten().tolist() + param[2][:,2].flatten().tolist())*degrees_per_pixel)
pylab.xlabel('Sigma (deg. / visual angle)')
pylab.gca().xaxis.set_major_locator(MaxNLocator(5))
pylab.subplot(1,5,3)
pylab.hist(param[0][:,6].flatten().tolist() + param[1][:,6].flatten().tolist() + param[2][:,6].flatten().tolist())
pylab.xlabel('Aspect ratio')
pylab.gca().xaxis.set_major_locator(MaxNLocator(5))
pylab.subplot(1,5,4)
c=[]
for j in xrange(0,len(param)):
for i in xrange(0,len(param[j][:,5])):
c.append(numpy.complex(numpy.abs(numpy.cos(param[j][i,5])),numpy.abs(numpy.sin(param[j][i,5]))))
pylab.hist(numpy.angle(c))
pylab.gca().xaxis.set_major_locator(MaxNLocator(5))
pylab.xlabel('Phase')
pylab.subplot(1,5,5)
pylab.hist(param[0][:,3].flatten().tolist() + param[1][:,3].flatten().tolist() + param[2][:,3].flatten().tolist())
pylab.xlabel('Orientation')
pylab.gca().xaxis.set_major_locator(MaxNLocator(5))
release_fig('FittedParametersDistribution.pdf')
#PLOT THE RETINOTOPIC COVERAGE
pylab.figure(dpi=100,facecolor='w',figsize=(15,11))
for i in xrange(0,len(param)):
pylab.subplot(1,3,i+1)
pylab.title('Retinotopic coverage')
pylab.plot(param[i][:,0],param[i][:,1],'bo',label='Fitted')
pylab.plot(rc[i][:,0],rc[i][:,1],'ro',label='Center of gravity')
pylab.axis([0,numpy.shape(rfs[0][0])[0],0.0,numpy.shape(rfs[0][0])[1]])
pylab.gca().set_aspect('equal')
pylab.xlabel('X coordinate')
pylab.ylabel('Y coordinate')
pylab.legend()
release_fig('RetinotopicCoverage.pdf')
#PLOT THE ORIENTATION PREFERENCE AGIANST RETINOTOPY
pylab.figure(dpi=100,facecolor='w',figsize=(15,11))
for i in xrange(0,len(param)):
pylab.subplot(1,3,i+1)
pylab.title('Retinotopic coverage')
pylab.scatter(param[i][:,0],param[i][:,1],c=param[i][:,3]/numpy.pi,s=50,cmap=pylab.cm.hsv)
#pylab.axis([0,numpy.shape(rfs[0][0])[0],0.0,numpy.shape(rfs[0][0])[1]])
pylab.gca().set_aspect('equal')
pylab.xlabel('X coordinate')
pylab.ylabel('Y coordinate')
pylab.colorbar(shrink=0.3)
release_fig('ORandRetinotopy.pdf')
aaa = []
d = []
for i in xrange(0,len(fitted_rfs[0])):
for j in xrange(i+1,len(fitted_rfs[0])):
d.append(distance(loc[0],i,j))
aaa.append(numpy.corrcoef(fitted_rfs[0][i].flatten(),fitted_rfs[0][j].flatten())[0][1])
pylab.figure(facecolor='w')
pylab.title('Correlation between distance and fitted RFs correlations')
ax = pylab.axes()
ax.plot(d,aaa,'ro')
ax.plot(d,contrib.jacommands.weighted_local_average(d,aaa,30),'go')
ax.plot(d,numpy.array(contrib.jacommands.weighted_local_average(d,aaa,30))+numpy.array(contrib.jacommands.weighted_local_std(d,aaa,30)),'bo')
ax.plot(d,numpy.array(contrib.jacommands.weighted_local_average(d,aaa,30))-numpy.array(contrib.jacommands.weighted_local_std(d,aaa,30)),'bo')
ax.axhline(0,linewidth=4)
#PLOT RF COVERAGE
#first raw
z = numpy.zeros(numpy.shape(rfs[0][0]))
for f in rfs[0]:
z = z+f
z = z/len(rfs[0])
pylab.figure(dpi=100,facecolor='w',figsize=(15,11))
pylab.subplot(1,5,1)
pylab.title('Raw')
m = numpy.max([numpy.abs(numpy.min(z)),numpy.abs(numpy.max(z))])
pylab.imshow(z,vmin=-m,vmax=m,interpolation='nearest',cmap=pylab.cm.RdBu)
pylab.colorbar(shrink=0.3)
print 'Variance of the raw averaged RFs',numpy.var(z)
#then fitted
z = numpy.zeros(numpy.shape(fitted_rfs[0][0]))
for f in fitted_rfs[0]:
z = z+f
z = z/len(fitted_rfs[0])
pylab.subplot(1,5,2)
pylab.title('Fitted')
m = numpy.max([numpy.abs(numpy.min(z)),numpy.abs(numpy.max(z))])
pylab.imshow(z,vmin=-m,vmax=m,interpolation='nearest',cmap=pylab.cm.RdBu)
pylab.colorbar(shrink=0.3)
print 'Variance of the fitted averaged RFs',numpy.var(z)
#then randomized fitted
pylab.subplot(1,5,3)
pylab.title('Randmozied fitted')
m = numpy.max([numpy.abs(numpy.min(rand_fitted_rfs[0])),numpy.abs(numpy.max(rand_fitted_rfs[0]))])
pylab.imshow(numpy.array(rand_fitted_rfs[0]),vmin=-m,vmax=m,interpolation='nearest',cmap=pylab.cm.RdBu)
pylab.colorbar(shrink=0.3)
pylab.subplot(1,5,4)
pylab.title('Example fitted')
m = numpy.max([numpy.abs(numpy.min(fitted_rfs[0][0])),numpy.abs(numpy.max(fitted_rfs[0][0]))])
pylab.imshow(fitted_rfs[0][0],vmin=-m,vmax=m,interpolation='nearest',cmap=pylab.cm.RdBu)
pylab.colorbar(shrink=0.3)
pylab.subplot(1,5,5)
pylab.title('Example raw')
m = numpy.max([numpy.abs(numpy.min(rfs[0][0])),numpy.abs(numpy.max(rfs[0][0]))])
pylab.imshow(rfs[0][0],vmin=-m,vmax=m,interpolation='nearest',cmap=pylab.cm.RdBu)
pylab.colorbar(shrink=0.3)
release_fig('ONOFFcoverage.pdf')
print 'Average variance and +/- 2*variance of the randomized fitted averaged RF',numpy.mean(bb[0]), numpy.mean(bb[0]) + 2*numpy.sqrt(numpy.var(bb[0])), numpy.mean(bb[0]) - 2*numpy.sqrt(numpy.var(bb[0]))
pylab.figure()
pylab.subplot(1,3,1)
pylab.hist(bb[0],bins=30)
pylab.axvline(numpy.var(numpy.mean(fitted_rfs[0],axis=0)))
print numpy.var(numpy.mean(fitted_rfs[0],axis=0))
pylab.subplot(1,3,2)
pylab.hist(bb[1],bins=30)
pylab.axvline(numpy.var(numpy.mean(fitted_rfs[1],axis=0)))
print numpy.var(numpy.mean(fitted_rfs[1],axis=0))
pylab.subplot(1,3,3)
pylab.hist(bb[2],bins=30)
pylab.axvline(numpy.var(numpy.mean(fitted_rfs[2],axis=0)))
print numpy.var(numpy.mean(fitted_rfs[2],axis=0))
return
membership=[]
membership1=[]
locs = []
for i in xrange(0,len(rfs)):
m = []
m1 = []
l = []
for j in xrange(0,len(rfs[i])):
if (circular_distance(param[i][j][3],0) <= numpy.pi/8):
m.append(0)
m1.append(0)
l.append(loc[i][j])
elif (circular_distance(param[i][j][3],numpy.pi/4) <= numpy.pi/8):
m1.append(1)
elif (circular_distance(param[i][j][3],numpy.pi/2) <= numpy.pi/8):
m.append(2)
m1.append(2)
l.append(loc[i][j])
elif (circular_distance(param[i][j][3],3*numpy.pi/4) <= numpy.pi/8):
m1.append(3)
membership.append(m)
membership1.append(m1)
locs.append(l)
ors=[]
orrf=[]
orrphase=[]
for (p,rf) in zip(param,rfs):
ors.append(numpy.array(p[:,3].T))
orrf.append(zip(numpy.array(p[:,3].T),rf))
orrphase.append(zip(numpy.array(p[:,3].T),numpy.array(p[:,5].T)))
#monte_carlo(loc,orrphase,histogram_of_phase_dist_correl_of_cooriented_neurons,30)
#monte_carlo(loc,orrf,histogram_of_RF_correl_of_cooriented_neurons,30)
#monte_carlo(loc,ors,average_or_histogram_of_proximite,30)
#monte_carlo(loc,ors,average_or_diff,30)
#monte_carlo(loc,ors,average_or_diff,30)
monte_carlo(loc,orrf,average_cooriented_RF_corr,30)
#monte_carlo(loc,orrf,average_RF_corr,30)
return
#return
#monte_carlo(loc,orrphase,histogram_of_phase_dist_correl_of_cooriented_neurons,30)
#monte_carlo(loc,orrf,histogram_of_RF_correl_of_cooriented_neurons,30)
#monte_carlo(loc,ors,average_or_histogram_of_proximite,30)
#return
#monte_carlo(locs,membership,number_of_same_neighbours,100)
#monte_carlo(loc,membership1,number_of_same_neighbours,100)
#return
colors=[]
xx=[]
yy=[]
d1=[]
d2=[]
d3=[]
for r,l in zip(rfs,loc):
for i in xrange(0,len(r)):
for j in xrange(i+1,len(r)):
d3.append(distance(l,i,j))
for i in xrange(0,len(rfs)):
colors=[]
xx=[]
yy=[]
for idx in xrange(0,len(rfs[i])):
if (circular_distance(param[i][idx][3],0) <= numpy.pi/12):
xx.append(loc[i][idx][0])
yy.append(loc[i][idx][1])
colors.append(0.9)
for j in xrange(idx+1,len(rfs[i])):
if (circular_distance(param[i][j][3],0) <= numpy.pi/12):
d1.append(distance(loc[i],idx,j))
d2.append(distance(loc[i],idx,j))
if (circular_distance(param[i][j][3],numpy.pi/2) <= numpy.pi/12):
d2.append(distance(loc[i],idx,j))
if (circular_distance(param[i][idx][3],numpy.pi/2) <= numpy.pi/12):
xx.append(loc[i][idx][0])
yy.append(loc[i][idx][1])
colors.append(0.1)
for j in xrange(idx+1,len(rfs[i])):
if (circular_distance(param[i][j][3],numpy.pi/2) <= numpy.pi/12):
d1.append(distance(loc[i],idx,j))
d2.append(distance(loc[i],idx,j))
if (circular_distance(param[i][j][3],0) <= numpy.pi/12):
d2.append(distance(loc[i],idx,j))
pylab.figure(figsize=(5,5))
pylab.scatter(xx,yy,c=colors,s=200,cmap=pylab.cm.RdBu)
pylab.colorbar()
print "Average distance of colinear", numpy.mean(numpy.power(d1,2))
print "Average distance of horizontal and vertical", numpy.mean(numpy.power(d2,2))
print "Average distance of whole population", numpy.mean(numpy.power(d3,2))
def monte_carlo(locations,property,property_measure,reps):
from numpy.random import shuffle
for (l,m) in zip(locations,property):
a = property_measure(l,m)
curves = numpy.zeros((reps,len(a)))
for x in xrange(0,reps):
mm = list(m)
shuffle(mm)
curves[x,:] = property_measure(l,mm)
f = numpy.median(curves,axis=0)
f_m = a
#std = numpy.std(curves,axis=0,ddof=1)
err_bar_upper = numpy.sort(curves,axis=0)[int(reps*0.95),:]
err_bar_lower = numpy.sort(curves,axis=0)[int(reps*0.05),:]
pylab.figure()
pylab.plot(f,'b')
pylab.plot(f_m,'g')
pylab.plot(err_bar_lower,'r')
pylab.plot(err_bar_upper,'r')
def number_of_same_neighbours(locations,membership):
curve = [0 for i in xrange(0,30)]
for dist in xrange(0,30):
for i in xrange(0,len(locations)):
for j in xrange(0,len(locations)):
if i!=j:
if distance(locations,i,j) < (dist+1)*10:
if membership[i] == membership[j]:
curve[dist] += 1
curve[dist]/= len(locations)
return curve
def average_or_diff(locations,ors):
curve = [0 for i in xrange(0,30)]
for dist in xrange(0,30):
n = 0
for i in xrange(0,len(locations)):
for j in xrange(0,len(locations)):
if i!=j:
if distance(locations,i,j) < (dist+1)*10:
curve[dist]+=circular_distance(ors[i],ors[j])
n+=1
if n!=0:
curve[dist]/=n
return curve
def average_cooriented_RF_corr(locations,data):
(ors,rfs) = zip(*data)
curve = [0 for i in xrange(0,30)]
for dist in xrange(0,30):
n = 0
for i in xrange(0,len(locations)):
for j in xrange(0,len(locations)):
if i!=j:
if distance(locations,i,j) < (dist+1)*10:
if circular_distance(ors[i],ors[j]) < (numpy.pi/12.0):
curve[dist]+=numpy.corrcoef(rfs[i].flatten(),rfs[j].flatten())[0][1]
n+=1
if n!=0:
curve[dist]/=n
return curve
def average_RF_corr(locations,data):
(ors,rfs) = zip(*data)
curve = [0 for i in xrange(0,30)]
for dist in xrange(0,30):
n = 0
for i in xrange(0,len(locations)):
for j in xrange(0,len(locations)):
if i!=j:
if distance(locations,i,j) < (dist+1)*10:
curve[dist]+=numpy.corrcoef(rfs[i].flatten(),rfs[j].flatten())[0][1]
n+=1
if n!=0:
curve[dist]/=n
return curve
def average_or_histogram_of_proximite(locations,ors):
curve = []
for i in xrange(0,len(locations)):
for j in xrange(0,len(locations)):
if i!=j:
if distance(locations,i,j) < 50:
curve.append(circular_distance(ors[i],ors[j]))
if len(curve) != 0:
return numpy.histogram(curve,range=(0.0,numpy.pi/2))[0]/(len(curve)*1.0)
else:
return [0 for i in xrange(0,10)]
def histogram_of_RF_correl_of_cooriented_neurons(locations,data):
(ors,rfs) = zip(*data)
difs=[]
for i in xrange(0,len(locations)):
for j in xrange(0,len(locations)):
if i!=j:
if circular_distance(ors[i],ors[j]) < (numpy.pi/8.0):
if distance(locations,i,j) < 50:
difs.append(numpy.corrcoef(rfs[i].flatten(),rfs[j].flatten())[0][1])
if len(difs) != 0:
return numpy.histogram(difs,range=(-1.0,1.0),bins=10)[0]/(len(difs)*1.0)
else:
return [0 for i in xrange(0,10)]
def histogram_of_phase_dist_correl_of_cooriented_neurons(locations,data):
(ors,phase) = zip(*data)
difs=[]
for i in xrange(0,len(locations)):
for j in xrange(0,len(locations)):
if i!=j:
if circular_distance(ors[i],ors[j]) < (numpy.pi/12.0):
if distance(locations,i,j) < 40:
dif = numpy.abs(phase[i] - phase[j])
if dif > numpy.pi:
dif = 2*numpy.pi - dif
difs.append(dif)
if len(difs) != 0:
return numpy.histogram(difs,range=(0,numpy.pi),bins=10)[0]/(len(difs)*1.0)
else:
return [0 for i in xrange(0,10)]
def RF_correlations():
f = open("modelfitDB2.dat",'rb')
import pickle
dd = pickle.load(f)
rfs = [dd.children[0].children[0].data["ReversCorrelationRFs"],
dd.children[1].children[0].data["ReversCorrelationRFs"],
dd.children[3].children[0].data["ReversCorrelationRFs"]]
m=0
for r in rfs:
m = numpy.max([numpy.max([numpy.abs(numpy.min(r)),numpy.abs(numpy.max(r))]),m])
loc = []
f = file("./Mice/2009_11_04/region3_cell_locations", "r")
loc.append([line.split() for line in f])
f.close()
f = file("./Mice/2009_11_04/region5_cell_locations", "r")
loc.append([line.split() for line in f])
f.close()
f = file("./Mice/20090925_14_36_01/(20090925_14_36_01)-_retinotopy_region2_sequence_50cells_cell_locations.txt", "r")
loc.append([line.split() for line in f])
f.close()
param=[]
f = open("./Mice/2009_11_04/region=3_fitting_rep=100","rb")
import pickle
param.append(pickle.load(f))
f.close()
f = open("./Mice/2009_11_04/region=5_fitting_rep=100","rb")
param.append(pickle.load(f))
f.close()
f = open("./Mice/20090925_14_36_01/region=2_fitting_rep=100","rb")
param.append(pickle.load(f))
f.close()
for locations in loc:
(a,b) = numpy.shape(locations)
for i in xrange(0,a):
for j in xrange(0,b):
locations[i][j] = float(locations[i][j])
loc[0] = numpy.array(loc[0])/256.0*261.0
loc[1] = numpy.array(loc[1])/256.0*261.0
loc[2] = numpy.array(loc[2])/256.0*230.0
fitted_corr=[]
for rf in rfs:
f=[]
for i in xrange(0,len(rf)):
#(x,y,sigma,angle,f,p,ar,alpha) = tuple(params[i])
#(dx,dy) = numpy.shape(rfs[0])
#g = Gabor(bounds=BoundingBox(radius=0.5),frequency=f,x=y-0.5,y=0.5-x,xdensity=dx,ydensity=dy,size=sigma,orientation=angle,phase=p,aspect_ratio=ar)() * alpha
f.append(numpy.sum(numpy.power(rf[i].flatten()- numpy.mean(rf[i].flatten()),2)))
fitted_corr.append(f)
pylab.title("The histogram of the variability of RFs")
pylab.hist(flatten(fitted_corr))
pylab.xlabel('RF variability')
for i in xrange(0,len(fitted_corr)):
pylab.figure()
z = numpy.argsort(fitted_corr[i])
b=0
for j in z:
pylab.subplot(15,15,b+1)
pylab.show._needmain=False
pylab.imshow(rfs[i][j],vmin=-m,vmax=m,interpolation='nearest',cmap=pylab.cm.RdBu)
pylab.axis('off')
b+=1
for i in xrange(0,len(rfs)):
to_delete = numpy.nonzero((numpy.array(fitted_corr[i]) < 0.00000004)*1.0)[0]
rfs[i] = numpy.delete(rfs[i],to_delete,axis=0)
loc[i] = numpy.delete(numpy.array(loc[i]),to_delete,axis=0)
param[i] = numpy.delete(numpy.array(param[i]),to_delete,axis=0)
rc=[]
for rf in rfs:
r=[]
for idx in xrange(0,len(rf)):
r.append(numpy.array(centre_of_gravity(numpy.power(rf[idx],2)))*1000)
rc.append(r)
for i in xrange(0,len(rfs)):
pylab.figure()
b=0
for j in rfs[i]:
pylab.subplot(15,15,b+1)
pylab.show._needmain=False
pylab.imshow(j,vmin=-m*0.5,vmax=m*0.5,interpolation='nearest',cmap=pylab.cm.RdBu)
pylab.axis('off')
b+=1
pylab.savefig('RFsGrid'+str(i)+'.png')
rf_cross=[]
for r in rfs:
print len(r)
rf_cros = numpy.zeros((len(r),len(r)))
for i in xrange(0,len(rfs)):
for j in xrange(0,len(rfs)):
rf_cros[i,j] = numpy.corrcoef(r[i].flatten(),r[j].flatten())[0][1]
rf_cross.append(rf_cros)
i=0
for (r,locations) in zip(rfs,loc):
pylab.figure(figsize=(5,5))
pylab.axes([0.0,0.0,1.0,1.0])
for idx in xrange(0,len(r)):
x = locations[idx][0]/300
y = locations[idx][1]/300
pylab.axes([x-0.02,y-0.02,0.04,0.04])
pylab.imshow(r[idx],vmin=-m*0.5,vmax=m*0.5,interpolation='nearest',cmap=pylab.cm.RdBu)
pylab.axis('off')
pylab.savefig('RFsLocalized'+str(i)+'.png')
i+=1
from matplotlib.lines import Line2D
from matplotlib.patches import Circle
i=0
for (r,locations,p) in zip(rfs,loc,param):
pylab.figure(figsize=(5,5))
pylab.axes([0.0,0.0,1.0,1.0])
for idx in xrange(0,len(r)):
x = locations[idx][0]/300
y = locations[idx][1]/300
pylab.axes([x-0.02,y-0.02,0.04,0.04])
pylab.imshow(r[idx],vmin=-m*0.5,vmax=m*0.5,interpolation='nearest',cmap=pylab.cm.RdBu)
pylab.axis('off')
ax = pylab.axes([0.0,0.0,1.0,1.0])
cir = Circle( (x,y), radius=0.01)
pylab.gca().add_patch(cir)
l = Line2D([x-numpy.cos(p[idx][3])*0.03,x+numpy.cos(p[idx][3])*0.03],[y-numpy.sin(p[idx][3])*0.03,y+numpy.sin(p[idx][3])*0.03],transform=ax.transAxes,linewidth=5.1, color='g')
pylab.gca().add_line(l)
pylab.savefig('RFsLocalizedOR'+str(i)+'.png')
i+=1
for (r,locations,p) in zip(rfs,loc,param):
pylab.figure(figsize=(5,5))
pylab.axes([0.0,0.0,1,1])
for idx in xrange(0,len(r)):
if circular_distance(p[idx][3],0)<= numpy.pi/8:
x = locations[idx][0]/300
y = locations[idx][1]/300
pylab.axes([x-0.02,y-0.02,0.04,0.04])
pylab.imshow(r[idx],vmin=-m,vmax=m,interpolation='nearest',cmap=pylab.cm.RdBu)
pylab.axis('off')
ax = pylab.axes([0.0,0.0,1,1])
cir = Circle( (x,y), radius=0.01)
pylab.gca().add_patch(cir)
l = Line2D([x-numpy.cos(p[idx][3])*0.03,x+numpy.cos(p[idx][3])*0.03],[y-numpy.sin(p[idx][3])*0.03,y+numpy.sin(p[idx][3])*0.03],transform=ax.transAxes,linewidth=5.1, color='g')
pylab.gca().add_line(l)
pylab.figure(figsize=(5,5))
pylab.axes([0.0,0.0,1,1])
for idx in xrange(0,len(r)):
if circular_distance(p[idx][3],numpy.pi/4)<= numpy.pi/8:
x = locations[idx][0]/300
y = locations[idx][1]/300
pylab.axes([x-0.02,y-0.02,0.04,0.04])
pylab.imshow(r[idx],vmin=-m,vmax=m,interpolation='nearest',cmap=pylab.cm.RdBu)
pylab.axis('off')
ax = pylab.axes([0.0,0.0,1,1])
cir = Circle( (x,y), radius=0.01)
pylab.gca().add_patch(cir)
l = Line2D([x-numpy.cos(p[idx][3])*0.03,x+numpy.cos(p[idx][3])*0.03],[y-numpy.sin(p[idx][3])*0.03,y+numpy.sin(p[idx][3])*0.03],transform=ax.transAxes,linewidth=5.1, color='g')
pylab.gca().add_line(l)
pylab.figure(figsize=(5,5))
pylab.axes([0.0,0.0,1,1])
for idx in xrange(0,len(r)):
if circular_distance(p[idx][3],numpy.pi/2)<= numpy.pi/8:
x = locations[idx][0]/300
y = locations[idx][1]/300
pylab.axes([x-0.02,y-0.02,0.04,0.04])
pylab.imshow(r[idx],vmin=-m,vmax=m,interpolation='nearest',cmap=pylab.cm.RdBu)
pylab.axis('off')
ax = pylab.axes([0.0,0.0,1,1])
cir = Circle( (x,y), radius=0.01)
pylab.gca().add_patch(cir)
l = Line2D([x-numpy.cos(p[idx][3])*0.03,x+numpy.cos(p[idx][3])*0.03],[y-numpy.sin(p[idx][3])*0.03,y+numpy.sin(p[idx][3])*0.03],transform=ax.transAxes,linewidth=5.1, color='g')
pylab.gca().add_line(l)
pylab.figure(figsize=(5,5))
pylab.axes([0.0,0.0,1,1])
for idx in xrange(0,len(r)):
if circular_distance(p[idx][3],3*numpy.pi/4)<= numpy.pi/8:
x = locations[idx][0]/300
y = locations[idx][1]/300
pylab.axes([x-0.02,y-0.02,0.04,0.04])
pylab.imshow(r[idx],vmin=-m,vmax=m,interpolation='nearest',cmap=pylab.cm.RdBu)
pylab.axis('off')
ax = pylab.axes([0.0,0.0,1,1])
cir = Circle( (x,y), radius=0.01)
pylab.gca().add_patch(cir)
l = Line2D([x-numpy.cos(p[idx][3])*0.03,x+numpy.cos(p[idx][3])*0.03],[y-numpy.sin(p[idx][3])*0.03,y+numpy.sin(p[idx][3])*0.03],transform=ax.transAxes,linewidth=5.1, color='g')
pylab.gca().add_line(l)
for i in xrange(0,len(rfs)):
colors=[]
xx=[]
yy=[]
for idx in xrange(0,len(rfs[i])):
xx.append(loc[i][idx][0])
yy.append(loc[i][idx][1])
colors.append(param[i][idx][3]/numpy.pi)
pylab.figure(figsize=(5,5))
pylab.scatter(xx,yy,c=colors,s=200,cmap=pylab.cm.hsv)
pylab.colorbar()
for i in xrange(0,len(rfs)):
colors=[]
xx=[]
yy=[]
for idx in xrange(0,len(rfs[i])):
xx.append(rc[i][idx][0])
yy.append(rc[i][idx][1])
colors.append(param[i][idx][3]/numpy.pi)
pylab.figure(figsize=(5,5))
pylab.scatter(xx,yy,c=colors,s=200,cmap=pylab.cm.hsv)
pylab.colorbar()
c = []
rf_dist = []
c_cut = []
d = []
orr_diff = []
phase_diff_of_colinear20 = []
phase_diff_of_colinear30 = []
phase_diff_of_colinear50 = []
phase_diff_of_colinear100 = []
phase_diff_of_colinear1000 = []
for (r,locations,params) in zip(rfs,loc,param):
for i in xrange(0,len(r)):
for j in xrange(i+1,len(r)):
corr1= numpy.corrcoef(r[i].flatten(),r[j].flatten())[0][1]
c.append(corr1)
rf_dist.append(numpy.mean(numpy.power(r[i].flatten()-r[j].flatten(),2)))
c_cut.append(RF_corr_centered(r[i],r[j],0.3,display=False))
dist = distance(locations,i,j)
d.append(dist)
a = circular_distance(params[i][3],params[j][3])
if a < numpy.pi/16:
pd = numpy.abs(params[i][5] - params[j][5])
if pd > numpy.pi:
pd = 2*numpy.pi - pd
if dist <= 20:
phase_diff_of_colinear20.append(pd)
if dist <= 40:
phase_diff_of_colinear30.append(pd)
if dist <= 60:
phase_diff_of_colinear50.append(pd)
if dist <= 100:
phase_diff_of_colinear100.append(pd)
if dist <= 300:
phase_diff_of_colinear1000.append(pd)
orr_diff.append(numpy.abs(circular_distance(params[i][3],params[j][3])))
print len(d)
print len(c)
nn_orr_diff=[]
nn_corr=[]
nn_rf_dist=[]
all_orr_diff=[]
all_corr=[]
all_rf_dist=[]
for (r,locations,params) in zip(rfs,loc,param):
for i in xrange(0,len(r)):
dst = []
for j in xrange(0,len(r)):
dst.append(distance(locations,i,j))
if i != j:
all_orr_diff.append(circular_distance(params[i][3],params[j][3]))
all_corr.append(numpy.corrcoef(r[i].flatten(),r[j].flatten())[0][1])
all_rf_dist.append(numpy.mean(numpy.power(r[i].flatten()-r[j].flatten(),2)))
idx = (numpy.argsort(dst))[1]
nn_orr_diff.append(circular_distance(params[i][3],params[idx][3]))
nn_corr.append(numpy.corrcoef(r[i].flatten(),r[idx].flatten())[0][1])
nn_rf_dist.append(numpy.mean(numpy.power(r[i].flatten()-r[idx].flatten(),2)))
pylab.figure()
pylab.title('Nearest neighbour orrientation difference')
pylab.hist([nn_orr_diff,all_orr_diff],normed=True)
pylab.figure()
pylab.title('Nearest neighbour RFs correlation')
pylab.hist([nn_corr,all_corr],normed=True)
pylab.figure()
pylab.title('Nearest neighbour RF distance')
pylab.hist([nn_rf_dist,all_rf_dist],normed=True)
#angle_dif50 = []
#angle50 = []
#corr50 = []
#for i in xrange(0,len(new_rfs)):
#for j in xrange(i+1,len(new_rfs)):
#dist = distance(locations,new_rfs_idx[i],new_rfs_idx[j])
#if (dist < 50) and (circular_distance(params[i][3],params[j][3])<numpy.pi/6):
#a = numpy.arccos((locations[new_rfs_idx[j]][0]-locations[new_rfs_idx[i]][0])/dist)
#a = a * numpy.sign(locations[new_rfs_idx[j]][1]-locations[new_rfs_idx[i]][1])
#if a < 0:
#a = a + numpy.pi
#angle_dif50.append(a)
#angle50.append(params[i][3])
#corr50.append(numpy.corrcoef(new_rfs[i].flatten(),new_rfs[j].flatten())[0][1])
#angle_dif100 = []
#angle100 = []
#corr100= []
#for i in xrange(0,len(new_rfs)):
#for j in xrange(i+1,len(new_rfs)):
#dist = distance(locations,new_rfs_idx[i],new_rfs_idx[j])
#if (dist < 100) and (circular_distance(params[i][3],params[j][3])<numpy.pi/6):
#a = numpy.arccos((locations[new_rfs_idx[j]][0]-locations[new_rfs_idx[i]][0])/dist)
#a = a * numpy.sign(locations[new_rfs_idx[j]][1]-locations[new_rfs_idx[i]][1])
#if a < 0:
#a = a + numpy.pi
#angle_dif100.append(a)
#angle100.append(params[i][3])
#corr100.append(numpy.corrcoef(new_rfs[i].flatten(),new_rfs[j].flatten())[0][1])
#data=[]
#dataset = loadSimpleDataSet("Mice/2009_11_04/region3_stationary_180_15fr_103cells_on_response_spikes",1800,103)
#(index,data) = dataset
#index+=1
#dataset = (index,data)
#dataset = averageRangeFrames(dataset,0,1)
#dataset = averageRepetitions(dataset)
#dataset = generateTrainingSet(dataset)
#(a,v) = compute_average_min_max(dataset)
#dataset = normalize_data_set(dataset,a,v)
#data.append(dataset)
#cor_orig = []
#for i in xrange(0,len(new_rfs)):
#for i in xrange(0,len(new_rfs)):
#for j in xrange(i+1,len(new_rfs)):
#cor_orig.append(numpy.corrcoef(dataset[:,new_rfs_idx[i]].T,dataset[:,new_rfs_idx[j]].T)[0][1])
print len(phase_diff_of_colinear20)
pylab.figure()
pylab.title('Histogram of phase difference of co-oriented proximite <0.1 neurons')
pylab.hist(phase_diff_of_colinear20)
pylab.figure()
pylab.title('Histogram of phase difference of co-oriented proximite <0.2 neurons')
pylab.hist(phase_diff_of_colinear30)
pylab.figure()
pylab.title('Histogram of phase difference of co-oriented proximite <0.3 neurons')
pylab.hist(phase_diff_of_colinear50)
pylab.figure()
pylab.title('Histogram of phase difference of co-oriented proximite <0.4 neurons')
pylab.hist(phase_diff_of_colinear100)
pylab.figure()
pylab.title('Histogram of phase difference of co-oriented proximite <0.1 neurons normalized against histogram of all couples')
(h,b) = numpy.histogram(phase_diff_of_colinear30)
(h2,b) = numpy.histogram(phase_diff_of_colinear1000)
pylab.plot((h*1.0/numpy.sum(h))/(h2*1.0/numpy.sum(h2)))
pylab.figure()
pylab.title('Histogram of phase difference of co-oriented proximite <0.1 neurons normalized against histogram of all couples')
(h,b) = numpy.histogram(phase_diff_of_colinear30)
(h2,b) = numpy.histogram(phase_diff_of_colinear1000)
pylab.plot((h*1.0/numpy.sum(h))-(h2*1.0/numpy.sum(h2)))
pylab.figure()
pylab.title('Histogram of phases')
a = numpy.concatenate([numpy.mat(param[0])[:,5].flatten(),numpy.mat(param[1])[:,5].flatten(),numpy.mat(param[2])[:,5].flatten()],axis=1).flatten()
pylab.hist(a.T)
import contrib.jacommands
pylab.figure()
pylab.title('Correlation between distance and raw RFs average distance')
pylab.plot(d,rf_dist,'ro')
pylab.plot(d,contrib.jacommands.weighted_local_average(d,rf_dist,30),'go')
pylab.figure(facecolor='w')
pylab.title('Correlation between distance and raw RFs correlations')
ax = pylab.axes()
ax.plot(d,c,'ro')
ax.plot(d,contrib.jacommands.weighted_local_average(d,c,30),'go')
ax.axhline(0,linewidth=4)
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(18)
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(18)
pylab.savefig('RFsCorrelationsVsDistance.png')
pylab.xlabel("distance",fontsize=18)
pylab.ylabel("correlation coefficient",fontsize=18)
#pylab.plot(d,contrib.jacommands.weighted_local_average(d,numpy.abs(c),30),'bo')
pylab.figure()
pylab.title('Correlation between distance and centered raw RFs correlations')
pylab.plot(d,c_cut,'ro')
pylab.plot(d,contrib.jacommands.weighted_local_average(d,c_cut,30),'go')
pylab.plot(d,contrib.jacommands.weighted_local_average(d,numpy.abs(c_cut),30),'bo')
pylab.figure()
pylab.title('Correlation between distance and orr preference')
pylab.plot(d,orr_diff,'ro')
pylab.axhline(numpy.pi/4)
pylab.plot(d,contrib.jacommands.weighted_local_average(d,orr_diff,30),'go')
#pylab.figure()
#pylab.title('Correlation between firing rate correlations and raw RFs correlations')
#pylab.plot(c,cor_orig,'ro')
#pylab.figure()
#pylab.title('Correlation between firing rate correlations and distance')
#pylab.plot(d,cor_orig,'ro')
#pylab.figure()
#pylab.title('Angular difference against orientation of proximit (<50) co-oriented pairs of cells')
#pylab.scatter(angle50,angle_dif50,s=numpy.abs(corr50)*100,marker='o',c='r',cmap=pylab.cm.RdBu)
#pylab.xlabel("average orienation of pair")
#pylab.ylabel("angular difference")
#pylab.figure()
#pylab.title('Angular difference against orientation of proximit (<100) co-oriented pairs of cells')
#pylab.scatter(angle100,angle_dif100,s=numpy.abs(corr100)*100,marker='o',cmap=pylab.cm.RdBu)
#pylab.xlabel("average orienation of pair")
#pylab.ylabel("angular difference")
pylab.figure()
pylab.title('Histogram of orientations')
pylab.hist(numpy.matrix(params)[:,3])
def distance(locations,x,y):
return numpy.sqrt(numpy.power(locations[x][0] - locations[y][0],2)+numpy.power(locations[x][1] - locations[y][1],2))
def circular_distance(angle_a,angle_b):
c= abs(angle_a - angle_b)
if c > numpy.pi/2:
c = numpy.pi-c
return c
def RF_corr_centered(RF1,RF2,fraction,display=True):
sx,sy = numpy.shape(RF1)
X = numpy.zeros((sx,sy))
Y = numpy.zeros((sx,sy))
for x in xrange(0,sx):
for y in xrange(0,sy):
X[x][y] = x
Y[x][y] = y
cgs = []
RFs=[]
cg1x = numpy.round(numpy.sum(numpy.sum(numpy.multiply(X,numpy.power(RF1,2))))/numpy.sum(numpy.sum(numpy.power(RF1,2))))
cg1y = numpy.round(numpy.sum(numpy.sum(numpy.multiply(Y,numpy.power(RF1,2))))/numpy.sum(numpy.sum(numpy.power(RF1,2))))
cg2x = numpy.round(numpy.sum(numpy.sum(numpy.multiply(X,numpy.power(RF2,2))))/numpy.sum(numpy.sum(numpy.power(RF2,2))))
cg2y = numpy.round(numpy.sum(numpy.sum(numpy.multiply(Y,numpy.power(RF2,2))))/numpy.sum(numpy.sum(numpy.power(RF2,2))))
RF1c = RF1[cg1x-sx*fraction:cg1x+sx*fraction,cg1y-sy*fraction:cg1y+sy*fraction]
RF2c = RF2[cg2x-sx*fraction:cg2x+sx*fraction,cg2y-sy*fraction:cg2y+sy*fraction]
if display:
pylab.figure()
pylab.subplot(2,1,1)
pylab.imshow(RF1c,cmap=pylab.cm.RdBu)
pylab.subplot(2,1,2)
pylab.imshow(RF2c,cmap=pylab.cm.RdBu)
return numpy.corrcoef(RF1c.flatten(),RF2c.flatten())[0][1]
def centre_of_gravity(matrix):
sx,sy = numpy.shape(matrix)
m = matrix*(numpy.abs(matrix)>(0.5*numpy.max(numpy.abs(matrix))))
X = numpy.tile(numpy.arange(0,sx,1),(sy,1))
Y = numpy.tile(numpy.arange(0,sy,1),(sx,1)).T
x = numpy.sum(numpy.multiply(X,numpy.power(m,2)))/numpy.sum(numpy.power(m,2))
y = numpy.sum(numpy.multiply(Y,numpy.power(m,2)))/numpy.sum(numpy.power(m,2))
return (x,y)
def low_power(image,t):
z = numpy.fft.fft2(image)
z = numpy.fft.fftshift(z)
y = numpy.zeros(numpy.shape(z))
(x,trash) = numpy.shape(y)
c = x/2
for i in xrange(0,x):
for j in xrange(0,x):
if numpy.sqrt((c-i)*(c-i) + (c-j)*(c-j)) <= t:
y[i,j]=1.0
z = numpy.multiply(z,y)
z = numpy.fft.ifftshift(z)
#pylab.figure()
#pylab.imshow(y)
#pylab.figure()
#pylab.imshow(image)
#pylab.colorbar()
#pylab.figure()
#pylab.imshow(numpy.fft.ifft2(z).real)
#pylab.colorbar()
return contrast(numpy.fft.ifft2(z).real)
def band_power(image,t,t2):
z = numpy.fft.fft2(image)
z = numpy.fft.fftshift(z)
y = numpy.zeros(numpy.shape(z))
(x,trash) = numpy.shape(y)
c = x/2
for i in xrange(0,x):
for j in xrange(0,x):
if numpy.sqrt((c-i)*(c-i) + (c-j)*(c-j)) <= t:
if numpy.sqrt((c-i)*(c-i) + (c-j)*(c-j)) >= t2:
y[i,j]=1.0
z = numpy.multiply(z,y)
z = numpy.fft.ifftshift(z)
return contrast(numpy.fft.ifft2(z).real)
def contrast(image):
im = image - numpy.mean(image)
return numpy.sqrt(numpy.mean(numpy.power(im,2)))
def run_LIP():
import scipy
from scipy import linalg
f = open("results.dat",'rb')
import pickle
dd = pickle.load(f)
node = dd.children[0]
rfs = node.children[0].data["ReversCorrelationRFs"]
pred_act = numpy.array(node.children[0].data["ReversCorrelationPredictedActivities"])
pred_val_act = numpy.array(node.children[0].data["ReversCorrelationPredictedValidationActivities"])
training_set = numpy.array(node.children[0].data["LaterTrainingSet"])
validation_set = numpy.array(node.children[0].data["LaterValidationSet"])
m = node.children[0].data["LaterModel"]
#training_set = node.data["training_set"]
#validation_set = node.data["validation_set"]
training_inputs = numpy.array(node.data["training_inputs"])
validation_inputs = numpy.array(node.data["validation_inputs"])
raw_validation_set = node.data["raw_validation_set"]
for i in xrange(0,len(raw_validation_set)):
raw_validation_set[i] = numpy.array(m.returnPredictedActivities(numpy.mat(raw_validation_set[i])))
#discard low image mean images
image_mean=[]
for i in xrange(0,len(validation_inputs)):
image_mean.append(numpy.mean(validation_inputs[i]))
idx = numpy.argsort(image_mean)[0:14]
#idx=[]
print idx
print "Deleting trials with low mean of images"
validation_inputs = numpy.delete(validation_inputs, idx, axis = 0)
validation_set = numpy.delete(validation_set, idx, axis = 0)
pred_val_act = numpy.delete(pred_val_act, idx, axis = 0)
for i in xrange(0,len(raw_validation_set)):
raw_validation_set[i] = numpy.delete(raw_validation_set[i], idx, axis = 0)
#compute neurons mean before normalization
neuron_mean = numpy.mean(training_set,axis=0)
neuron_mean_val = numpy.mean(validation_set,axis=0)
#training_set = training_set - numpy.min(training_set)
#validation_set = validation_set - numpy.min(training_set)
#pred_act_t_a = pred_act_t - numpy.min(pred_act_t)
#print numpy.sum(((training_set_a >= 0)*1.0))
#print numpy.sum(((pred_act_t_a >= 0)*1.0))
#training_set_a = numpy.multiply(training_set_a,((training_set_a > 0)*1.0))
#pred_act_t_a = numpy.multiply(pred_act_t_a,((pred_act_t_a > 0)*1.0))
#print numpy.sum(((training_set_a >= 0)*1.0))
#print numpy.sum(((pred_act_t_a >= 0)*1.0))
#of = run_nonlinearity_detection(numpy.mat(training_set),pred_act,10,False)
ofs = fit_sigmoids_to_of(numpy.mat(training_set),numpy.mat(pred_act))
pred_act_t = apply_sigmoid_output_function(numpy.mat(pred_act),ofs)
pred_val_act_t= apply_sigmoid_output_function(numpy.mat(pred_val_act),ofs)
pylab.figure()
pylab.hist(training_set.flatten())
(num_pres,num_neurons) = numpy.shape(training_set)
raw_validation_data_set=numpy.rollaxis(numpy.array(raw_validation_set),2)
signal_power,noise_power,normalized_noise_power,training_prediction_power,validation_prediction_power = signal_power_test(raw_validation_data_set, training_set, validation_set, pred_act, pred_val_act)
signal_power,noise_power,normalized_noise_power,training_prediction_power_t,validation_prediction_power_t = signal_power_test(raw_validation_data_set, training_set, validation_set, pred_act_t, pred_val_act_t)
print "Mean Reg. pseudoinverse prediction power on training set / validation set: ", numpy.mean(training_prediction_power) , " / " , numpy.mean(validation_prediction_power)
print "Mean Reg. pseudoinverse prediction power after TF on training set / validation set: ", numpy.mean(training_prediction_power_t) , " / " , numpy.mean(validation_prediction_power_t)
corr_coef=[]
for i in xrange(0,len(rfs)):
corr_coef.append(numpy.corrcoef(pred_act_t.T[i], training_set.T[i])[0][1])
print "The mean correltation coefficient : ", numpy.mean(corr_coef)
print "Mean variance of training set:", numpy.mean(numpy.power(numpy.std(training_set,axis=0),2))
print "Mean variance of validation set:", numpy.mean(numpy.power(numpy.std(validation_set,axis=0),2))
val_corr_coef = []
measured_neuron_sparsity = []
predicted_neuron_sparsity = []
for i in xrange(0,num_neurons):
measured_neuron_sparsity.append(numpy.power(numpy.mean(training_set.T[i]),2) / numpy.mean(numpy.power(training_set.T[i],2)))
predicted_neuron_sparsity.append(numpy.power(numpy.mean(pred_act_t.T[i]),2) / numpy.mean(numpy.power(pred_act_t.T[i],2)))
val_corr_coef.append(numpy.corrcoef(pred_val_act_t.T[i], validation_set.T[i])[0][1])
measured_pop_sparsity = []
predicted_pop_sparsity = []
for i in xrange(0,num_pres):
measured_pop_sparsity.append(numpy.power(numpy.mean(training_set[i]),2) / numpy.mean(numpy.power(training_set[i],2)))
predicted_pop_sparsity.append(numpy.power(numpy.mean(pred_act_t[i]),2) / numpy.mean(numpy.power(pred_act_t[i],2)))
pylab.figure()
pylab.title('The sparsity of measured and predicted activity per neurons')
pylab.hist(numpy.vstack([numpy.array(measured_neuron_sparsity),numpy.array(predicted_neuron_sparsity)]).T,bins=numpy.arange(0,1.01,0.1),label=['measured','predicted'])
pylab.axvline(numpy.mean(measured_neuron_sparsity),color='b')
pylab.axvline(numpy.mean(predicted_neuron_sparsity),color='g')
pylab.legend()
pylab.figure()
pylab.title('The sparsity of measured and predicted activity per population')
pylab.hist(numpy.vstack([numpy.array(measured_pop_sparsity),numpy.array(predicted_pop_sparsity)]).T,bins=numpy.arange(0,1.01,0.1),label=['measured','predicted'])
pylab.axvline(numpy.mean(measured_pop_sparsity),color='b')
pylab.axvline(numpy.mean(predicted_pop_sparsity),color='g')
pylab.legend()
pylab.figure()
print "The mean correlation coeficient on validation set: ", numpy.mean(val_corr_coef)
pylab.figure()
pylab.title("Histogram of neural response means")
pylab.hist(neuron_mean)
pylab.figure()
pylab.title("Histogram of training prediction powers")
pylab.hist(training_prediction_power_t)
pylab.figure()
pylab.title("Histogram of validation prediction powers")
pylab.hist(validation_prediction_power_t)
#discard low correlation neurons
r=[]
corr=[]
tresh=[]
print training_prediction_power_t
for i in xrange(0,30):
f = numpy.nonzero((numpy.array(training_prediction_power_t) < ((i-10)/30.0))*1.0)[0]
tresh.append(((i-10.0)/50.0))
training_set_good = numpy.delete(training_set, f, axis = 1)
pred_act_good = numpy.delete(pred_act_t, f, axis = 1)
validation_set_good = numpy.delete(validation_set, f, axis = 1)
pred_val_act_good = numpy.delete(pred_val_act_t, f, axis = 1)
(rank,correct,tr) = performIdentification(validation_set_good,pred_val_act_good)
r.append(numpy.mean(rank))
corr.append(correct)
f = numpy.nonzero((numpy.array(training_prediction_power_t) < 0.1)*1.0)[0]
f = []
print f
training_set = numpy.delete(training_set, f, axis = 1)
pred_act = numpy.delete(pred_act, f, axis = 1)
pred_act_t = numpy.delete(pred_act_t, f, axis = 1)
validation_set = numpy.delete(validation_set, f, axis = 1)
pred_val_act_t = numpy.delete(pred_val_act_t, f, axis = 1)
pred_val_act = numpy.delete(pred_val_act, f, axis = 1)
(num_pres,num_neurons) = numpy.shape(training_set)
(ranks,correct,tr) = performIdentification(validation_set,pred_val_act)
print "Correct", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(validation_set - pred_val_act,2))
(tf_ranks,tf_correct,pred) = performIdentification(validation_set,pred_val_act_t)
print "Correct", tf_correct , "Mean rank:", numpy.mean(tf_ranks) , "MSE", numpy.mean(numpy.power(validation_set - pred_val_act_t,2))
pylab.figure()
pylab.title("Ranks histogram")
pylab.xlabel("ranks")
pylab.hist(tf_ranks,bins=numpy.arange(0,len(tf_ranks),1))
pylab.figure()
pylab.xlabel("tresh")
pylab.ylabel("correct")
pylab.plot(tresh,corr)
pylab.figure()
pylab.xlabel("tresh")
pylab.ylabel("rank")
pylab.plot(tresh,r)
errors=[]
bp=[]
lp5=[]
lp6=[]
lp7=[]
lp8=[]
lp9=[]
lp10=[]
image_contrast=[]
response_mean=[]
image_mean=[]
corr_of_pop_resp=[]
sx,sy = numpy.shape(rfs[0])
if False:
for i in xrange(0,len(pred_val_act)):
errors.append(numpy.sum(numpy.power(pred_val_act_t[i] - validation_set[i],2)) /
numpy.sum(numpy.power(validation_set[i] - numpy.mean(validation_set[i]),2)))
corr_of_pop_resp.append(numpy.corrcoef(pred_val_act_t[i],validation_set[i],2)[0][1])
bp.append(band_power(numpy.reshape(validation_inputs[i],(sx,sy)),7,3))
lp5.append(low_power(numpy.reshape(validation_inputs[i],(sx,sy)),5))
lp6.append(low_power(numpy.reshape(validation_inputs[i],(sx,sy)),6))
lp7.append(low_power(numpy.reshape(validation_inputs[i],(sx,sy)),7))
lp8.append(low_power(numpy.reshape(validation_inputs[i],(sx,sy)),8))
lp9.append(low_power(numpy.reshape(validation_inputs[i],(sx,sy)),9))
lp10.append(low_power(numpy.reshape(validation_inputs[i],(sx,sy)),10))
image_contrast.append(contrast(numpy.reshape(validation_inputs[i],(sx,sy))))
response_mean.append(numpy.mean(training_set[i]))
image_mean.append(numpy.mean(numpy.reshape(validation_inputs[i],(sx,sy))))
pylab.figure()
pylab.title("Correlation between prediction error and contrast of band-passed images")
pylab.plot(errors,bp,'ro')
pylab.xlabel("prediction error")
pylab.ylabel("band pass contrast")
pylab.figure()
pylab.title("Correlation between prediction error and contrast of low-passed images")
pylab.plot(errors,lp7,'ro')
pylab.xlabel("prediction error")
pylab.ylabel("low pass contrast")
pylab.figure()
pylab.title("Correlation between prediction error and basic contrast of images")
pylab.plot(errors,image_contrast,'ro')
pylab.xlabel("prediction error")
pylab.ylabel("basic contrast")
pylab.figure()
pylab.title("Correlation between correlation of pop resp and contrast of band-passed images")
pylab.plot(corr_of_pop_resp,bp,'ro')
pylab.xlabel("correlation of pop resp")
pylab.ylabel("band pass contrast")
pylab.figure()
pylab.title("Correlation between correlation of pop resp and contrast of low-passed images")
pylab.plot(corr_of_pop_resp,lp7,'ro')
pylab.xlabel("correlation of pop resp")
pylab.ylabel("low pass contrast")
pylab.figure()
pylab.title("Correlation between correlation of pop resp and basic contrast of images")
pylab.plot(corr_of_pop_resp,image_contrast,'ro')
pylab.xlabel("correlation of pop resp")
pylab.ylabel("basic contrast")
pylab.figure()
pylab.xlabel("neuronal response mean")
pylab.ylabel("correlation coef")
pylab.plot(neuron_mean,corr_coef,'ro')
pylab.figure()
pylab.hist(tf_ranks,bins=numpy.arange(0,len(tf_ranks),1))
pylab.title("Histogram of ranks after application of transfer function")
pylab.figure()
pylab.title("Correlation between correlation of pop resp and rank")
pylab.plot(corr_of_pop_resp,tf_ranks,'ro')
pylab.xlabel("correlation of pop resp")
pylab.ylabel("rank")
pylab.figure()
pylab.title("Correlation between rand and prediction error")
pylab.plot(tf_ranks,errors,'ro')
pylab.ylabel("prediction error")
pylab.xlabel("rank")
pylab.figure()
pylab.title("Correlation between rank error and contrast of band-passed images")
pylab.plot(tf_ranks,bp,'ro')
pylab.xlabel("rank error")
pylab.ylabel("band pass contrast")
pylab.figure()
pylab.title("Correlation between rank error and contrast of low-passed images, tresh=5")
pylab.plot(tf_ranks,lp5,'ro')
pylab.xlabel("rank error")
pylab.ylabel("low pass contrast")
pylab.figure()
pylab.title("Correlation between rank error and contrast of low-passed images, tresh=6")
pylab.plot(tf_ranks,lp6,'ro')
pylab.xlabel("rank error")
pylab.ylabel("low pass contrast")
pylab.figure()
pylab.title("Correlation between rank error and contrast of low-passed images, tresh=7")
pylab.plot(tf_ranks,lp7,'ro')
pylab.xlabel("rank error")
pylab.ylabel("low pass contrast")
pylab.figure()
pylab.title("Correlation between rank error and contrast of low-passed images, tresh=8")
pylab.plot(tf_ranks,lp8,'ro')
pylab.xlabel("rank error")
pylab.ylabel("low pass contrast")
pylab.figure()
pylab.title("Correlation between rank error and contrast of low-passed images, tresh=9")
pylab.plot(tf_ranks,lp9,'ro')
pylab.xlabel("rank error")
pylab.ylabel("low pass contrast")
pylab.figure()
pylab.title("Correlation between rank error and contrast of low-passed images, tresh=10")
pylab.plot(tf_ranks,lp10,'ro')
pylab.xlabel("rank error")
pylab.ylabel("low pass contrast")
pylab.figure()
pylab.title("Correlation between rank error and basic contrast of images")
pylab.plot(tf_ranks,image_contrast,'ro')
pylab.xlabel("rank error")
pylab.ylabel("basic contrast")
print "Bad images"
print image_mean[11]
pylab.figure()
pylab.title("Correlation between rank error and mean of images")
pylab.plot(tf_ranks,image_mean,'ro')
pylab.xlabel("rank error")
pylab.ylabel("image mean")
pylab.figure()
pylab.title("Correlation between rank error and measured response mean")
pylab.plot(tf_ranks,response_mean,'ro')
pylab.xlabel("rank error")
pylab.ylabel("measured response mean")
pylab.figure()
pylab.title("Correlation between correlation of pop resp and response mean")
pylab.plot(corr_of_pop_resp,response_mean,'ro')
pylab.xlabel("correlation of pop resp")
pylab.ylabel("response mean")
pylab.figure()
pylab.title("Correlation between correlation of pop resp and image mean")
pylab.plot(corr_of_pop_resp,image_mean,'ro')
pylab.xlabel("correlation of pop resp")
pylab.ylabel("image mean")
pylab.figure()
pylab.title("Correlation between measured response mean and image mean")
pylab.plot(response_mean,image_mean,'ro')
pylab.xlabel("response mean")
pylab.ylabel("image mean")
pylab.figure()
pylab.title("Correlation between measured response mean and image basic contrast")
pylab.plot(response_mean,image_contrast,'ro')
pylab.xlabel("response mean")
pylab.ylabel("image basic contrast")
pylab.figure()
pylab.title("Correlation between measured response mean and contrast of low passed image t=7")
pylab.plot(response_mean,lp7,'ro')
pylab.xlabel("response mean")
pylab.ylabel("low passed image contrast")
fig = pylab.figure()
from mpl_toolkits.mplot3d import Axes3D
ax = Axes3D(fig)
ax.scatter(lp7,image_mean,tf_ranks)
ax.set_xlabel("contrast")
ax.set_ylabel("image mean")
ax.set_zlabel("rank")
fig = pylab.figure()
ax = Axes3D(fig)
ax.scatter(lp7,corr_of_pop_resp,tf_ranks)
ax.set_xlabel("contrast")
ax.set_ylabel("corr_of_pop_resp")
ax.set_zlabel("rank")
if False:
(later_pred_act,later_pred_val_act) = later_interaction_prediction(training_set,pred_act_t,validation_set,pred_val_act_t,contrib.dd.DB(None))
#of = run_nonlinearity_detection(numpy.mat(training_set),later_pred_act,10,False)
training_set += 2.0
validation_set += 2.0
ofs = fit_exponential_to_of(numpy.mat(training_set),numpy.mat(later_pred_act)+2.0)
later_pred_act_t = apply_exponential_output_function(later_pred_act+2.0,ofs)
later_pred_val_act_t= apply_exponential_output_function(later_pred_val_act+2.0,ofs)
#later_pred_act_t = later_pred_act
#later_pred_val_act_t = later_pred_val_act
(ranks,correct,pred) = performIdentification(validation_set,later_pred_val_act+2.0)
print "After lateral identification> Correct:", correct , "Mean rank:", numpy.mean(ranks)
(tf_ranks,tf_correct,pred) = performIdentification(validation_set,later_pred_val_act_t)
print "After lateral identification> TFCorrect:", tf_correct , "Mean tf_rank:", numpy.mean(tf_ranks)
#signal_power,noise_power,normalized_noise_power,training_prediction_power,validation_prediction_power = signal_power_test(raw_validation_data_set, training_set, validation_set, later_pred_act, later_pred_val_act)
signal_power,noise_power,normalized_noise_power,training_prediction_power_t,validation_prediction_power_t = signal_power_test(raw_validation_data_set, training_set, validation_set, later_pred_act_t, later_pred_val_act_t)
#return
#for ii in xrange(0,5):
# pylab.figure()
# pylab.hist(later_pred_act_t[:,ii].flatten())
# pylab.figure()
# pylab.hist(training_set[:,ii].flatten())
g = 1
for (x,i) in pred:
#if x==i: continue
pylab.figure()
pylab.subplot(3,1,1)
pylab.imshow(numpy.reshape(validation_inputs[i],(sx,sy)),vmin=-128,vmax=128,interpolation='nearest',cmap=pylab.cm.gray)
pylab.title('Correct')
pylab.axis('off')
pylab.subplot(3,1,2)
pylab.imshow(numpy.reshape(validation_inputs[x],(sx,sy)),vmin=-128,vmax=128,interpolation='nearest',cmap=pylab.cm.gray)
pylab.title('Picked')
pylab.axis('off')
pylab.subplot(3,1,3)
pylab.plot(numpy.array(pred_val_act_t)[i],'ro',label='Predicted activity')
pylab.plot(validation_set[i],'bo',label='Measured activity')
pylab.axhline(y=numpy.mean(validation_set[i]),linewidth=1, color='b')
pylab.axhline(y=numpy.mean(pred_val_act_t[i]),linewidth=1, color='r')
if x != i:
pylab.plot(numpy.array(pred_val_act_t)[x],'go',label='Most similar')
pylab.axhline(y=numpy.mean(numpy.array(pred_val_act_t)[x]),linewidth=1, color='g')
pylab.legend()
for j in xrange(0,len(validation_set[0])):
if(abs(numpy.array(pred_val_act_t)[i][j] - validation_set[i][j]) < abs(numpy.array(pred_val_act_t)[x][j] - validation_set[i][j])):
pylab.axvline(j)
g+=1
def performIdentification(responses,model_responses):
correct=0
ranks=[]
pred=[]
for i in xrange(0,len(responses)):
tmp = []
for j in xrange(0,len(responses)):
tmp.append(numpy.sqrt(numpy.mean(numpy.power(numpy.mat(responses)[i]-model_responses[j],2))))
x = numpy.argmin(tmp)
z = tmp[i]
ranks.append(numpy.nonzero((numpy.sort(tmp)==z)*1.0)[0][0])
if (x == i): correct+=1
pred.append((x,i))
return (ranks,correct,pred)
#from pygene.organism import Organism
#from pygene.gene import FloatGene
#class ComplexCellOrganism(Organism):
#training_set = []
#training_inputs = []
#def fitness(self):
#z,t = numpy.shape(self.training_inputs)
#x = self[str(0)]
#y = self[str(1)]
#sigma = self[str(2)]*0.1
#angle = self[str(3)]*numpy.pi
#p = self[str(4)]*numpy.pi*2
#f = self[str(5)]*10
#ar = self[str(6)]*2.5
#alpha = self[str(7)]
#dx = numpy.sqrt(t)
#dy = dx
#g = numpy.mat(Gabor(bounds=BoundingBox(radius=0.5),frequency=f,x=x-0.5,y=y-0.5,xdensity=dx,ydensity=dy,size=sigma,orientation=angle,phase=p,aspect_ratio=ar)() * alpha)
#r1 = self.training_inputs * g.flatten().T
#return numpy.mean(numpy.power(r1-self.training_set,2))
#rand =numbergen.UniformRandom(seed=513)
#class CCGene(FloatGene):
#randMin=0.0
#randMax=1.0
##def mutate(self):
## self.value = self.value + self.value*2.0*(0.5-rand())
#def GeneticAlgorithms():
#from pygene.gamete import Gamete
#from pygene.population import Population
#f = open("modelfitDB2.dat",'rb')
#import pickle
#dd = pickle.load(f)
#training_set = dd.children[0].data["training_set"][0:1800,:]
#training_inputs = dd.children[0].data["training_inputs"][0:1800,:]
##dd = contrib.dd.DB(None)
##(sizex,sizey,training_inputs,training_set,validation_inputs,validation_set,ff,db_node) = sortOutLoading(dd)
#genome = {}
#for i in range(8):
#genome[str(i)] = CCGene
#ComplexCellOrganism.genome = genome
#ComplexCellOrganism.training_set = numpy.mat(training_set)[:,0]
#ComplexCellOrganism.training_inputs = numpy.mat(training_inputs)
#class CPopulation(Population):
#species = ComplexCellOrganism
#initPopulation = 200
#childCull = 100
#childCount = 500
#incest=10
#i = 0
#pop = CPopulation()
#pylab.ion()
#pylab.hold(False)
#pylab.figure()
#pylab.show._needmain=False
#pylab.show()
#pylab.figure()
#while True:
#pop.gen()
#best = pop.best()
#print "fitness:" , (best.fitness())
#z,t = numpy.shape(training_inputs)
#x = best[str(0)]
#y = best[str(1)]
#sigma = best[str(2)]*0.1
#angle = best[str(3)]*numpy.pi
#p = best[str(4)]*numpy.pi*2
#f = best[str(5)]*10
#ar = best[str(6)]*2.5
#alpha = best[str(7)]
#dx = numpy.sqrt(t)
#dy = dx
#g = numpy.mat(Gabor(bounds=BoundingBox(radius=0.5),frequency=f,x=x-0.5,y=y-0.5,xdensity=dx,ydensity=dy,size=sigma,orientation=angle,phase=p,aspect_ratio=ar)() * alpha)
#m=numpy.max([numpy.abs(numpy.min(g)),numpy.abs(numpy.max(g))])
#pylab.subplot(2,1,1)
#pylab.imshow(g,vmin=-m,vmax=m,cmap=pylab.cm.RdBu,interpolation='nearest')
#pylab.show._needmain=False
#pylab.show()
def runSurrondStructureDetection():
f = open("results.dat",'rb')
import pickle
dd = pickle.load(f)
node = dd.children[0]
act = node.data["training_set"]
val_act = node.data["validation_set"]
node = node.children[0]
pred_act = numpy.array(node.data["ReversCorrelationPredictedActivities"])
pred_val_act = numpy.array(node.data["ReversCorrelationPredictedValidationActivities"])
dataset = loadSimpleDataSet("Mice/2009_11_04/region3_stationary_180_15fr_103cells_on_response_spikes",1800,103)
(index,data) = dataset
index+=1
dataset = (index,data)
valdataset = loadSimpleDataSet("Mice/2009_11_04/region3_50stim_10reps_15fr_103cells_on_response_spikes",50,103,10)
#(valdataset,trash) = splitDataset(valdataset,40)
training_inputs=generateInputs(dataset,"/home/antolikjan/topographica/topographica/Flogl/DataOct2009","/20090925_image_list_used/image_%04d.tif",__main__.__dict__.get('density', 20),1.8,offset=1000)
validation_inputs=generateInputs(valdataset,"/home/antolikjan/topographica/topographica/Mice/2009_11_04/","/20091104_50stimsequence/50stim%04d.tif",__main__.__dict__.get('density', 20),1.8,offset=0)
#validation_inputs = validation_inputs[0:40]
(sizex,sizey) = numpy.shape(training_inputs[0])
#mask = numpy.zeros(numpy.shape(training_inputs[0]))
#mask[sizex*0.1:sizex*0.9,sizey*0.6:sizey*0.9]=1.0
#for i in xrange(0,1800):
# training_inputs[i] = training_inputs[i][:,sizey/2:sizey]
#for i in xrange(0,50):
# validation_inputs[i] = validation_inputs[i][:,sizey/2:sizey]
(sizex,sizey) = numpy.shape(training_inputs[0])
ofs = fit_sigmoids_to_of(numpy.mat(act),numpy.mat(pred_act))
pred_act = apply_sigmoid_output_function(numpy.mat(pred_act),ofs)
pred_val_act= apply_sigmoid_output_function(numpy.mat(pred_val_act),ofs)
print sizex,sizey
cc = 0.7
#print pred_act
new_target_act = numpy.divide(act+cc,pred_act+cc)
new_val_target_act = numpy.divide(val_act+cc,pred_val_act+cc)
training_inputs = generate_raw_training_set(training_inputs)
validation_inputs = generate_raw_training_set(validation_inputs)
print "Mins"
print numpy.min(pred_act)
print numpy.min(pred_val_act)
print numpy.min(act)
print numpy.min(val_act)
(e,te,c,tc,RFs,pa,pva,corr_coef,corr_coef_tf) = regulerized_inverse_rf(training_inputs,new_target_act,sizex,sizey,__main__.__dict__.get('Alpha',50),numpy.mat(validation_inputs),numpy.mat(new_val_target_act),contrib.dd.DB(None),True)
ofs = run_nonlinearity_detection(numpy.mat(act+cc),numpy.mat(numpy.multiply(pred_act+cc,pa)))
pa_t = apply_output_function(numpy.multiply(pred_act+cc,pa),ofs)
pva_t = apply_output_function(numpy.multiply(pred_val_act+cc,pva),ofs)
print numpy.min(pva)
print numpy.min(pva_t)
(ranks,correct,pred) = performIdentification(val_act+cc,pred_val_act+cc)
print "Without surround", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(val_act+cc - pred_val_act-cc,2))
(ranks,correct,pred) = performIdentification(val_act+cc,numpy.multiply(pred_val_act+cc,pva))
print "With surround", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(val_act+cc - numpy.multiply(pred_val_act+cc,pva),2))
(ranks,correct,pred) = performIdentification(val_act+cc,numpy.multiply(pred_val_act+cc,pva_t))
print "With surround+ TF", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(val_act+cc - numpy.multiply(pred_val_act+cc,pva_t),2))
params={}
params["SurrAnalysis"] = True
node = node.get_child(params)
node.add_data("TrainingInputs",training_inputs,force=True)
node.add_data("ValidationInputs",validation_inputs,force=True)
params={}
params["Alpha"] = __main__.__dict__.get('Alpha',50)
params["Density"] = __main__.__dict__.get('density', 20)
node = node.get_child(params)
node.add_data("SurrRFs",RFs,force=True)
f = open("results.dat",'wb')
pickle.dump(dd,f,-2)
f.close()
def runSTCandSTAtest():
f = open("modelfitDB2.dat",'rb')
import pickle
dd = pickle.load(f)
STCact = dd.children[6].data["STCact"]
STCrfs = dd.children[6].data["STCrfs"]
predicted_activities = dd.children[0].children[0].data["ReversCorrelationPredictedActivities"]
tf_predicted_activities = dd.children[0].children[0].data["ReversCorrelationPredictedActivities+TF"]
predicted_validation_activities = dd.children[0].children[0].data["ReversCorrelationPredictedValidationActivities"]
tf_validation_predicted_activities = dd.children[0].children[0].data["ReversCorrelationPredictedValidationActivities+TF"]
target_act = dd.children[6].data["training_set"]
target_val_act = dd.children[6].data["validation_set"]
training_inputs = dd.children[6].data["training_inputs"]
validation_inputs = dd.children[6].data["validation_inputs"]
model_predicted_activities = numpy.mat(numpy.zeros(numpy.shape(predicted_activities)))
model_validation_predicted_activities = numpy.mat(numpy.zeros(numpy.shape(predicted_validation_activities)))
for (rfs,i) in zip(STCrfs,xrange(0,len(STCrfs))):
(ei,vv,avv,em,ep) = rfs
a = predicted_activities[:,i]
a_v = predicted_validation_activities[:,i]
for j in avv:
r = ei[j,:].real
o = run_nonlinearity_detection((training_inputs*r.T),numpy.mat(target_act)[:,i],10,display=False)
act = apply_output_function(training_inputs*r.T,o)
val_act = apply_output_function(validation_inputs*r.T,o)
a = numpy.concatenate((a,act),axis=1)
a_v = numpy.concatenate((a_v,val_act),axis=1)
mf = ModelFit()
mf.learning_rate = __main__.__dict__.get('lr',0.01)
mf.epochs=__main__.__dict__.get('epochs',100)
mf.num_of_units = 1
mf.init()
(err,stop,min_errors) = mf.trainModel(a,numpy.mat(target_act)[:,i],a_v,numpy.mat(target_val_act)[:,i])
print numpy.shape(numpy.mat(model_predicted_activities)[:,i])
print numpy.shape(mf.returnPredictedActivities(mat(a))[:,0])
model_predicted_activities[:,i] = mf.returnPredictedActivities(mat(a))[:,0]
model_validation_predicted_activities[:,i] = mf.returnPredictedActivities(mat(a_v))[:,0]
#(ranks,correct,cc) = performIdentification(target_act,model_predicted_activities)
#print "After lateral identification> TFCorrect:", tf_correct , "Mean tf_rank:", numpy.mean(tf_ranks)
(ranks,correct,cc) = performIdentification(target_val_act,predicted_validation_activities)
print "Simple Correct:", correct , "Mean tf_rank:", numpy.mean(ranks), "Percentage:" ,correct/(len(ranks)*1.0)*100 ,"%"
(ranks,correct,cc) = performIdentification(target_val_act,model_validation_predicted_activities)
print "Simple + Complex Correct:", correct , "Mean tf_rank:", numpy.mean(ranks), "Percentage:" ,correct/(len(ranks)*1.0)*100 ,"%"
ofs = run_nonlinearity_detection(numpy.mat(target_act),model_predicted_activities,10,display=True)
pred_act_t = apply_output_function(model_predicted_activities,ofs)
pred_val_act_t= apply_output_function(model_validation_predicted_activities,ofs)
(ranks,correct,cc) = performIdentification(target_val_act,pred_val_act_t)
print "Simple + Complex + TF Correct:", correct , "Mean tf_rank:", numpy.mean(ranks), "Percentage:" ,correct/(len(ranks)*1.0)*100 ,"%"
def analyseInhFiring():
dataset = loadSimpleDataSet("Mice/2009716_17_03_10/(20090716_17_03_10)-_orientation_classic_region9_15hz_8oris_4grey_2mov_DFOF",6138,27,transpose=True)
#dataset = loadSimpleDataSet("Mice/2009_11_04/region3_stationary_180_15fr_103cells_on_response_spikes",1800,103,transpose=False)
ts = generateTrainingSet(dataset)
(x,y) = numpy.shape(ts)
#ts = ts[0:6000,:]
inh = numpy.array([2, 20, 22,23,26])
inh = inh - 1.0
exc = numpy.delete(ts,inh,axis=1)
inh = numpy.delete(ts,numpy.delete(numpy.arange(0,y,1),inh),axis=1)
exc_base = numpy.concatenate((exc[0:93,:] , exc[0:93,-93:]),axis=0)
inh_base = numpy.concatenate((inh[0:93,:] , inh[0:93,-93:]),axis=0)
exc = exc[93:-93,:]
inh = inh[93:-93,:]
print numpy.shape(exc)
exc_average_trace = [numpy.mean(e.reshape(93,64),axis=0) for e in exc.T]
inh_average_trace = [numpy.mean(i.reshape(93,64),axis=0) for i in inh.T]
print numpy.shape(exc_average_trace)
pylab.figure()
pylab.title("Inhibitory neurons")
for e in exc.T:
pylab.plot(e)
pylab.ylim((-0.07,0.2))
pylab.figure()
pylab.title("Excitatory neurons")
for i in inh.T:
pylab.plot(i)
pylab.ylim((-0.07,0.2))
pylab.figure()
pylab.title("Trace excitatory")
for e in exc_average_trace:
pylab.plot(e)
pylab.ylim((-0.0,0.05))
pylab.figure()
pylab.title("Trace inhibitory")
for i in inh_average_trace:
pylab.plot(i)
pylab.ylim((-0.0,0.05))
pylab.figure()
pylab.title("baseline: Excitatory neurons")
for e in exc_base.T:
pylab.plot(e)
pylab.ylim((-0.05,0.2))
pylab.figure()
pylab.title("baseline: Inhibitory neurons")
for i in inh_base.T:
pylab.plot(i)
pylab.ylim((-0.05,0.2))
pylab.figure()
pylab.title('mean vs max of neurons')
pylab.plot(numpy.mean(exc.T,axis=1),numpy.max(exc.T,axis=1),'ro')
pylab.plot(numpy.mean(inh.T,axis=1),numpy.max(inh.T,axis=1),'go')
pylab.figure()
pylab.title('mean vs variance of neurons')
pylab.plot(numpy.mean(exc.T,axis=1),numpy.var(exc.T,axis=1),'ro')
pylab.plot(numpy.mean(inh.T,axis=1),numpy.var(inh.T,axis=1),'go')
pylab.figure()
pylab.title('mean triggered vs mean at base')
pylab.plot(numpy.mean(exc.T,axis=1),numpy.mean(exc_base.T,axis=1),'ro')
pylab.plot(numpy.mean(inh.T,axis=1),numpy.mean(inh_base.T,axis=1),'go')
exc_fft = [numpy.fft.fft(e) for e in exc.T]
inh_fft = [numpy.fft.fft(i) for i in inh.T]
exc_fft_power = [numpy.abs(e) for e in exc_fft]
inh_fft_power = [numpy.abs(e) for e in inh_fft]
exc_fft_phase = [numpy.angle(e) for e in exc_fft]
inh_fft_phase = [numpy.angle(e) for e in inh_fft]
exc_fft_base = [numpy.fft.fft(e) for e in exc_base.T]
inh_fft_base = [numpy.fft.fft(i) for i in inh_base.T]
exc_fft_power_base = [numpy.abs(e) for e in exc_fft_base]
inh_fft_power_base = [numpy.abs(e) for e in inh_fft_base]
exc_fft_phase_base = [numpy.angle(e) for e in exc_fft_base]
inh_fft_phase_base = [numpy.angle(e) for e in inh_fft_base]
pylab.figure()
pylab.plot(numpy.mean(exc_fft_power,axis=0))
pylab.figure()
pylab.plot(numpy.mean(inh_fft_power,axis=0))
pylab.figure()
pylab.plot(numpy.mean(exc_fft_phase,axis=0))
pylab.figure()
pylab.plot(numpy.mean(inh_fft_phase,axis=0))
pylab.figure()
pylab.title('Power spectrum of baseline of excitatory neurons')
pylab.plot(numpy.mean(exc_fft_power_base,axis=0))
pylab.figure()
pylab.title('Power spectrum of baseline of inhibitory neurons')
pylab.plot(numpy.mean(inh_fft_power_base,axis=0))
pylab.figure()
pylab.title('high feq power of baseline vs mean of triggered')
pylab.plot(numpy.mean(numpy.mat(exc_fft_power_base)[:,7:25],axis=1).T,numpy.mat(exc_fft_power).T[0],'ro')
pylab.plot(numpy.mean(numpy.mat(inh_fft_power_base)[:,7:25],axis=1).T,numpy.mat(inh_fft_power).T[0],'go')
pylab.figure()
pylab.title('high feq power of triggered vs mean of triggered')
pylab.plot(numpy.mean(numpy.mat(exc_fft_power)[:,7:25],axis=1).T,numpy.mat(exc_fft_power).T[0],'ro')
pylab.plot(numpy.mean(numpy.mat(inh_fft_power)[:,7:25],axis=1).T,numpy.mat(inh_fft_power).T[0],'go')
pylab.figure()
pylab.title('high feq power of triggered vs mean of triggered')
pylab.plot(numpy.mean(numpy.mat(exc_fft_power)[:,7:50],axis=1).T,numpy.mat(exc_fft_power).T[0],'ro')
pylab.plot(numpy.mean(numpy.mat(inh_fft_power)[:,7:50],axis=1).T,numpy.mat(inh_fft_power).T[0],'go')
pylab.figure()
pylab.title('high feq power of triggered vs mean of triggered')
pylab.plot(numpy.mean(numpy.mat(exc_fft_power)[:,7:70],axis=1).T,numpy.mat(exc_fft_power).T[0],'ro')
pylab.plot(numpy.mean(numpy.mat(inh_fft_power)[:,7:70],axis=1).T,numpy.mat(inh_fft_power).T[0],'go')
pylab.figure()
pylab.title('fanofactor vs variance of trace')
pylab.plot(numpy.var(exc_average_trace,axis=1)/numpy.mean(exc_average_trace,axis=1),numpy.var(exc_average_trace,axis=1),'ro')
pylab.plot(numpy.var(inh_average_trace,axis=1)/numpy.mean(inh_average_trace,axis=1),numpy.var(inh_average_trace,axis=1),'go')
pylab.figure()
pylab.title('fanofactor vs mean of trace')
pylab.plot(numpy.var(exc_average_trace,axis=1)/numpy.mean(exc_average_trace,axis=1),numpy.mean(exc_average_trace,axis=1),'ro')
pylab.plot(numpy.var(inh_average_trace,axis=1)/numpy.mean(inh_average_trace,axis=1),numpy.mean(inh_average_trace,axis=1),'go')
pylab.figure()
pylab.title('variance vs mean of trace')
pylab.plot(numpy.var(exc_average_trace,axis=1),numpy.mean(exc_average_trace,axis=1),'ro')
pylab.plot(numpy.var(inh_average_trace,axis=1),numpy.mean(inh_average_trace,axis=1),'go')
pylab.figure()
pylab.title('fanofactor vs variance of base')
pylab.plot(numpy.var(exc_base,axis=0)/numpy.mean(exc_base,axis=0),numpy.var(exc_base,axis=0),'ro')
pylab.plot(numpy.var(inh_base,axis=0)/numpy.mean(inh_base,axis=0),numpy.var(inh_base,axis=0),'go')
pylab.figure()
pylab.title('fanofactor vs mean of base')
pylab.plot(numpy.var(exc_base,axis=0)/numpy.mean(exc_base,axis=0),numpy.mean(exc_base,axis=0),'ro')
pylab.plot(numpy.var(inh_base,axis=0)/numpy.mean(inh_base,axis=0),numpy.mean(inh_base,axis=0),'go')
pylab.figure()
pylab.title('variance vs mean of base')
pylab.plot(numpy.var(exc_base,axis=0),numpy.mean(exc_base,axis=0),'ro')
pylab.plot(numpy.var(inh_base,axis=0),numpy.mean(inh_base,axis=0),'go')
pylab.figure()
pylab.title('mean vs 1st harmonic of neurons')
pylab.plot(numpy.mat(exc_fft_power).T[0],numpy.mat(exc_fft_power).T[64],'ro')
pylab.plot(numpy.mat(inh_fft_power).T[0],numpy.mat(inh_fft_power).T[64],'go')
pylab.figure()
pylab.title('1st vs 2nd harmonic of neurons')
pylab.plot(numpy.mat(exc_fft_power).T[64],numpy.mat(exc_fft_power).T[128],'ro')
pylab.plot(numpy.mat(inh_fft_power).T[64],numpy.mat(inh_fft_power).T[128],'go')
pylab.figure()
pylab.title('mean/1st harmonic vs 1st/2nd harmonic of neurons')
pylab.plot(numpy.mat(exc_fft_power).T[0] / numpy.mat(exc_fft_power).T[64], numpy.mat(exc_fft_power).T[64] / numpy.mat(exc_fft_power).T[128] ,'ro')
pylab.plot(numpy.mat(inh_fft_power).T[0] / numpy.mat(inh_fft_power).T[64], numpy.mat(inh_fft_power).T[64] / numpy.mat(inh_fft_power).T[128] ,'go')
pylab.figure()
pylab.title('mean vs power at 1st harmonic of neurons')
pylab.plot(numpy.mean(exc.T,axis=1),numpy.array(numpy.mat(exc_fft_power).T[64])[0],'ro')
pylab.plot(numpy.mean(inh.T,axis=1),numpy.array(numpy.mat(inh_fft_power).T[64])[0],'go')
pylab.figure()
pylab.title('power at harmonic vs phase at harmonic of neurons')
pylab.plot(numpy.mat(exc_fft_power).T[64],numpy.mat(exc_fft_phase).T[64],'ro')
pylab.plot(numpy.mat(inh_fft_power).T[64],numpy.mat(inh_fft_phase).T[64],'go')
print zip(numpy.mean(exc_fft_power,axis=0),numpy.arange(0,x,1))[0:200]
return(numpy.mean(inh_fft_power,axis=0))
def activationPatterns():
from scipy import linalg
f = open("modelfitDB2.dat",'rb')
import pickle
dd = pickle.load(f)
node = dd.children[0]
activities = node.data["training_set"]
validation_activities = node.data["validation_set"]
num_act,len_act = numpy.shape(activities)
CC = numpy.zeros((len_act,len_act))
for a in activities:
CC = CC + numpy.mat(a).T * numpy.mat(a)
CC = CC / num_act
v,la = linalg.eigh(CC)
pylab.figure()
pylab.plot(numpy.sort(numpy.abs(v.real[-30:-1])),'ro')
ind = numpy.argsort(numpy.abs(v.real))
pylab.figure()
pylab.plot(la[ind[-1],:],'ro')
pylab.figure()
pylab.plot(la[ind[-2],:],'ro')
pylab.figure()
pylab.plot(la[ind[-3],:],'ro')
pylab.figure()
pylab.plot(numpy.mat(activities)*numpy.mat(la[ind[-1],:]).T)
pylab.figure()
pylab.hist(numpy.mat(activities)*numpy.mat(la[ind[-1],:]).T)
pylab.figure()
pylab.plot(numpy.mat(activities)*numpy.mat(la[ind[-1],:]).T,numpy.mat(activities)*numpy.mat(la[ind[-2],:]).T,'ro')
node.add_data("ActivityPattern",la[ind[-1],:],force=True)
f = open("modelfitDB2.dat",'wb')
pickle.dump(dd,f,-2)
f.close()
pred_act=node.children[0].data["ReversCorrelationPredictedActivities"]
pred_val_act=node.children[0].data["ReversCorrelationPredictedValidationActivities"]
ofs = fit_sigmoids_to_of(numpy.mat(activities),numpy.mat(pred_act))
pred_act = apply_sigmoid_output_function(numpy.mat(pred_act),ofs)
pred_val_act= apply_sigmoid_output_function(numpy.mat(pred_val_act),ofs)
pylab.figure()
print numpy.shape(1-numpy.divide(numpy.sum(numpy.power(activities-pred_act,2),axis=1),numpy.var(activities,axis=1)))
print numpy.shape(numpy.sum(numpy.power(activities-pred_act,2),axis=1))
print numpy.shape(numpy.var(activities,axis=1)*len_act)
pylab.plot(1-numpy.divide(numpy.sum(numpy.power(activities-pred_act,2),axis=1),numpy.mat(numpy.var(activities,axis=1)*len_act).T).T,numpy.mat(activities)*numpy.mat(la[ind[-1],:]).T,'ro')
(ranks,correct,pred) = performIdentification(validation_activities,pred_val_act)
print correct
pylab.figure()
pylab.plot(ranks,numpy.mat(validation_activities)*numpy.mat(la[ind[-1],:]).T,'ro')
def AdaptationAnalysis():
import scipy
from scipy import linalg
f = open("modelfitDatabase.dat",'rb')
import pickle
dd = pickle.load(f)
rfs_area1 = dd.children[0].children[0].data["ReversCorrelationRFs"]
rfs_area2 = dd.children[1].children[0].data["ReversCorrelationRFs"]
pred_act_area1 = dd.children[0].children[0].data["ReversCorrelationPredictedActivities"][0:1260,:]
pred_act_area2 = dd.children[1].children[0].data["ReversCorrelationPredictedActivities"]
training_set_area1 = dd.children[0].data["training_set"][0:1260,:]
training_set_area2 = dd.children[1].data["training_set"]
rfs = numpy.concatenate((rfs_area1,rfs_area2),axis=0)
pred_act = numpy.mat(numpy.concatenate((pred_act_area1,pred_act_area2),axis=1))
training_set = numpy.mat(numpy.concatenate((training_set_area1,training_set_area2),axis=1))
#weights = [0.0,1.0,0.5,0.3,0.1,0.05]
weights = numpy.exp(-numpy.arange(0,100,1.0)/500.0)
weights = numpy.insert(weights,0,0)
print weights
kl = len(weights)-1
hist=[]
for i in xrange(0,158):
hist.append(scipy.convolve(numpy.array(training_set[:,i])[:,0],weights,mode='valid'))
print numpy.shape(numpy.mat(training_set[kl:,:]))
print numpy.shape(numpy.mat(numpy.array(hist)).T)
ofs = run_nonlinearity_detection(numpy.mat(training_set[kl:,:]),numpy.mat(numpy.array(hist)).T,display=True,num_bins=8)
pylab.figure()
pylab.hist(numpy.array(hist).flatten())
pylab.figure()
pylab.hist(numpy.array(training_set).flatten())
pylab.figure()
for i in xrange(0,103):
pylab.subplot(13,13,i+1)
errors = numpy.array((training_set[kl:,i] - pred_act[kl:,i]))[:,0]
pylab.plot(hist[i],errors,'ro')
pylab.figure()
for i in xrange(0,103):
pylab.subplot(13,13,i+1)
t = numpy.array(training_set[kl:,i])[:,0]
pylab.plot(hist[i],t,'ro')
pylab.figure()
for i in xrange(0,103):
pylab.subplot(13,13,i+1)
t = numpy.array(pred_act[kl:,i])[:,0]
pylab.plot(hist[i],t,'ro')
fig = pylab.figure()
from mpl_toolkits.mplot3d import Axes3D
ax = Axes3D(fig)
ax.scatter(pred_act[kl:,89],hist[89],training_set[kl:,i])
ax.set_xlabel("predicted activity")
ax.set_ylabel("history")
ax.set_zlabel("training_set")
fig = pylab.figure()
from mpl_toolkits.mplot3d import Axes3D
ax = Axes3D(fig)
ax.scatter(pred_act[kl:,88],hist[88],training_set[kl:,i])
ax.set_xlabel("predicted activity")
ax.set_ylabel("history")
ax.set_zlabel("training_set")
#lets do the gradient
from scipy.optimize import leastsq
xs = []
err = []
for i in xrange(0,158):
rand =numbergen.UniformRandom(seed=513)
x0 = [0.7,-1.0,1.6,-1.0]
xopt = leastsq(history_error, x0[:], args=(numpy.array(hist)[i],numpy.array(pred_act)[kl:,i],numpy.array(training_set)[kl:,i]),ftol=0.0000000000000000001,xtol=0.0000000000000001,warning=False)
xs.append(xopt[0])
new_error = numpy.sum(history_error(xopt[0],numpy.array(hist)[i],numpy.array(pred_act)[kl:,i],numpy.array(training_set)[kl:,i])**2)
old_error = numpy.sum((numpy.array(pred_act)[kl:,i] - numpy.array(training_set)[kl:,i])**2)
err.append( (old_error - new_error)/old_error * 100)
print "Error decreased by:", numpy.mean(err) , '%'
new_act = []
for i in xrange(0,158):
new_act.append(history_estim(xs[i],numpy.array(hist)[i],numpy.array(pred_act)[kl:,i]))
new_act = numpy.mat(new_act).T
print numpy.shape(new_act[0:40,:])
print numpy.shape(training_set[kl:40+kl,:])
(ranks,correct,tr) = performIdentification(training_set[kl:40+kl,:],pred_act[kl:40+kl,:])
print "Correct:", correct , "Mean rank:", numpy.mean(ranks)
(ranks,correct,tr) = performIdentification(training_set[kl:40+kl,:],new_act[0:40,:])
print "Correct:", correct , "Mean rank:", numpy.mean(ranks)
ofs = run_nonlinearity_detection(numpy.mat(training_set),numpy.mat(pred_act))
pred_act_t = apply_output_function(numpy.mat(pred_act),ofs)
ofs = run_nonlinearity_detection(numpy.mat(training_set[kl:,:]),numpy.mat(new_act))
new_act_t = apply_output_function(numpy.mat(new_act),ofs)
(ranks,correct,tr) = performIdentification(training_set[kl:40+kl,:],pred_act_t[kl:40+kl,:])
print "TFCorrect:", correct , "Mean tf_rank:", numpy.mean(ranks)
(ranks,correct,tr) = performIdentification(training_set[kl:40+kl,:],new_act_t[0:40,:])
print "TFCorrect:", correct , "Mean tf_rank:", numpy.mean(ranks)
pylab.figure()
pylab.hist(ranks)
def history_estim(x,hist,pred_act):
(a,b,c,d) = list(x)
return numpy.multiply(a*(pred_act+3.0)+b,c*(hist+3.0)+d)
def history_error(x,hist,pred_act,training_set):
return training_set - history_estim(x,hist,pred_act)
def history_der(x,hist,pred_act,training_set):
(a,b,c,d) = list(x)
ad = numpy.multiply(hist , c*pred_act+d)
bd = c*pred_act+d
cd = numpy.multiply(a*hist+b,pred_act)
dd = a*hist+b
return numpy.vstack((ad,bd,cd,dd)).T
def CompareRegressions():
import scipy
from scipy import linalg
f = open("results.dat",'rb')
import pickle
dd = pickle.load(f)
node = dd.children[0]
rfs = node.children[0].data["ReversCorrelationRFs"]
pred_act = numpy.array(node.children[0].data["ReversCorrelationPredictedActivities"])
pred_val_act = numpy.array(node.children[0].data["ReversCorrelationPredictedValidationActivities"])
pred_act_t = numpy.array(node.children[0].data["ReversCorrelationPredictedActivities+TF"])
pred_val_act_t = numpy.array(node.children[0].data["ReversCorrelationPredictedValidationActivities+TF"])
training_set = node.data["training_set"]
validation_set = node.data["validation_set"]
#training_set = numpy.array(node.children[0].data["LaterTrainingSet"])
#validation_set = numpy.array(node.children[0].data["LaterValidationSet"])
#m = node.children[0].data["LaterModel"]
training_inputs = node.data["training_inputs"]
validation_inputs = node.data["validation_inputs"]
raw_validation_set = node.data["raw_validation_set"]
#for i in xrange(0,len(raw_validation_set)):
# raw_validation_set[i] = numpy.array(m.returnPredictedActivities(numpy.mat(raw_validation_set[i])))
asd_rfs = node.children[2].data["RFs"]
# REGINV
pylab.figure()
m = numpy.max(numpy.abs(rfs))
for i in xrange(0,numpy.shape(training_set)[1]):
pylab.subplot(10,11,i+1)
w = rfs[i]
pylab.show._needmain=False
pylab.imshow(w,vmin=-m,vmax=m,interpolation='nearest',cmap=pylab.cm.RdBu)
pylab.axis('off')
# ASD
pylab.figure()
size = numpy.sqrt(numpy.shape(asd_rfs)[1])
for i in xrange(0,numpy.shape(training_set)[1]):
m = numpy.max(numpy.abs(asd_rfs[i]))
pylab.subplot(10,11,i+1)
w = numpy.array(asd_rfs[i]).reshape(size,size)
pylab.show._needmain=False
pylab.imshow(w,vmin=-m,vmax=m,interpolation='nearest',cmap=pylab.cm.RdBu)
pylab.axis('off')
# create predicted activities
asd_pred_act = numpy.array(numpy.mat(training_inputs) * numpy.mat(asd_rfs).T)
asd_pred_val_act = numpy.array(numpy.mat(validation_inputs) * numpy.mat(asd_rfs).T)
ofs = fit_sigmoids_to_of(numpy.mat(training_set),numpy.mat(asd_pred_act))
asd_pred_act_t = apply_sigmoid_output_function(numpy.mat(asd_pred_act),ofs)
asd_pred_val_act_t= apply_sigmoid_output_function(numpy.mat(asd_pred_val_act),ofs)
raw_validation_data_set=numpy.rollaxis(numpy.array(raw_validation_set),2)
signal_power,noise_power,normalized_noise_power,reg_training_prediction_power,reg_validation_prediction_power = signal_power_test(raw_validation_data_set, training_set, validation_set, pred_act, pred_val_act)
pylab.suptitle('Signal power estimation for Pseudo inverse. Averaged trials validation set.')
print "Mean Reg. pseudoinverse POSITIVE prediction power on training set / validation set(averaged) :", numpy.mean(reg_training_prediction_power * (reg_training_prediction_power > 0)) , " / " , numpy.mean(reg_validation_prediction_power * (reg_validation_prediction_power > 0))
signal_power,noise_power,normalized_noise_power,asd_training_prediction_power,asd_validation_prediction_power = signal_power_test(raw_validation_data_set, training_set, validation_set, asd_pred_act, asd_pred_val_act)
pylab.suptitle('Signal power estimation for ASDRD Averaged trials validation set.')
print "Mean ASD POSITIVE prediction power on training set / validation set(averaged) :", numpy.mean(asd_training_prediction_power * (asd_training_prediction_power > 0)) , " / " , numpy.mean(asd_validation_prediction_power * (asd_validation_prediction_power > 0))
pylab.figure()
pylab.title('Before TF averaged trials')
pylab.plot(reg_validation_prediction_power,asd_validation_prediction_power,'ro')
pylab.plot([-2.0,2.0],[-2.0,2.0])
pylab.axis([-2.0,2.0,-2.0,2.0])
pylab.xlabel('Regurelized inverse prediction power')
pylab.ylabel('ASDRD prediction power')
signal_power,noise_power,normalized_noise_power,reg_training_prediction_power,reg_validation_prediction_power = signal_power_test(raw_validation_data_set, training_set, raw_validation_set[0], pred_act, pred_val_act)
pylab.suptitle('Signal power estimation for Pseudo inverse. Single trial validation set.')
print "Mean Reg. pseudoinverse POSITIVE prediction power on training set / validation set : ", numpy.mean(reg_training_prediction_power * (reg_training_prediction_power > 0)) , " / " , numpy.mean(reg_validation_prediction_power * (reg_validation_prediction_power > 0))
signal_power,noise_power,normalized_noise_power,asd_training_prediction_power,asd_validation_prediction_power = signal_power_test(raw_validation_data_set, training_set, raw_validation_set[0], asd_pred_act, asd_pred_val_act)
pylab.suptitle('Signal power estimation for ASDRD. Single trial validation set.')
print "Mean ASD POSITIVE prediction power on training set / validation set : ", numpy.mean(asd_training_prediction_power * (asd_training_prediction_power > 0)) , " / " , numpy.mean(asd_validation_prediction_power * (asd_validation_prediction_power > 0))
pylab.figure()
pylab.title('Before TF single trial')
pylab.plot(reg_validation_prediction_power,asd_validation_prediction_power,'ro')
pylab.plot([-2.0,2.0],[-2.0,2.0])
pylab.axis([-2.0,2.0,-2.0,2.0])
pylab.xlabel('Regurelized inverse prediction power')
pylab.ylabel('ASDRD prediction power')
(ranks,correct,pred) = performIdentification(raw_validation_set[0],pred_val_act)
print "Reg. pseudoinverse identification single trial> Correct:", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(raw_validation_set[0] - pred_val_act,2))
(ranks,correct,pred) = performIdentification(raw_validation_set[0],asd_pred_val_act)
print "ASD identification single trial> Correct:", correct , "Mean rank:", numpy.mean(ranks), "MSE", numpy.mean(numpy.power(raw_validation_set[0] - asd_pred_val_act,2))
(ranks,correct,pred) = performIdentification(validation_set,pred_val_act)
print "Reg. pseudoinverse identification trial average> Correct:", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(validation_set - pred_val_act,2))
(ranks,correct,pred) = performIdentification(validation_set,asd_pred_val_act)
print "ASD identification trial average> Correct:", correct , "Mean rank:", numpy.mean(ranks), "MSE", numpy.mean(numpy.power(validation_set - asd_pred_val_act,2))
# with transfer function analysis
print '\n\n\nAFTER TRANSFER FUNCTION APPLICATION \n '
signal_power,noise_power,normalized_noise_power,reg_training_prediction_power,reg_validation_prediction_power = signal_power_test(raw_validation_data_set, training_set, validation_set, pred_act_t, pred_val_act_t)
pylab.suptitle('Signal power estimation for Pseudo inverse. Averaged trials validation set with applied transfer function.')
print "Mean Reg. pseudoinverse POSITIVE prediction power on training set / validation set(averaged): ", numpy.mean(reg_training_prediction_power * (reg_training_prediction_power > 0)) , " / " , numpy.mean(reg_validation_prediction_power * (reg_validation_prediction_power > 0))
signal_power,noise_power,normalized_noise_power,asd_training_prediction_power,asd_validation_prediction_power = signal_power_test(raw_validation_data_set, training_set, validation_set, asd_pred_act_t, asd_pred_val_act_t)
pylab.suptitle('Signal power estimation for ASDRD. Averaged trials validation set with applied transfer function.')
print "Mean ASD POSITIVE prediction power on training set / validation set(averaged): ", numpy.mean(asd_training_prediction_power * (asd_training_prediction_power > 0)) , " / " , numpy.mean(asd_validation_prediction_power * (asd_validation_prediction_power > 0))
pylab.figure()
pylab.title('After TF averaged trials')
pylab.plot(reg_validation_prediction_power,asd_validation_prediction_power,'ro')
pylab.plot([-2.0,2.0],[-2.0,2.0])
pylab.axis([-2.0,2.0,-2.0,2.0])
pylab.xlabel('Regurelized inverse prediction power')
pylab.ylabel('ASDRD prediction power')
signal_power,noise_power,normalized_noise_power,reg_training_prediction_power,reg_validation_prediction_power = signal_power_test(raw_validation_data_set, training_set, raw_validation_set[0], pred_act_t, pred_val_act_t)
pylab.suptitle('Signal power estimation for Pseudo inverse. Single trial validation set with applied transfer function.')
print "Mean Reg. pseudoinverse POSITIVE prediction power on training set / validation set: ", numpy.mean(reg_training_prediction_power * (reg_training_prediction_power > 0)) , " / " , numpy.mean(reg_validation_prediction_power * (reg_validation_prediction_power > 0))
signal_power,noise_power,normalized_noise_power,asd_training_prediction_power,asd_validation_prediction_power = signal_power_test(raw_validation_data_set, training_set, raw_validation_set[0], asd_pred_act_t, asd_pred_val_act_t)
pylab.suptitle('Signal power estimation for ASDRD. Single trial validation set with applied transfer function.')
print "Mean ASD POSITIVE prediction power on training set / validation set: ", numpy.mean(asd_training_prediction_power * (asd_training_prediction_power > 0)) , " / " , numpy.mean(asd_validation_prediction_power * (asd_validation_prediction_power > 0))
pylab.figure()
pylab.title('After TF single trial')
pylab.plot(reg_validation_prediction_power,asd_validation_prediction_power,'ro')
pylab.plot([-2.0,2.0],[-2.0,2.0])
pylab.axis([-2.0,2.0,-2.0,2.0])
pylab.xlabel('Regurelized inverse prediction power')
pylab.ylabel('ASDRD prediction power')
(ranks,correct,pred) = performIdentification(raw_validation_set[0],pred_val_act_t)
print "Reg. pseudoinverse identification single trial> Correct:", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(raw_validation_set[0] - pred_val_act_t,2))
(ranks,correct,pred) = performIdentification(raw_validation_set[0],asd_pred_val_act_t)
print "ASD identification single trial> Correct:", correct , "Mean rank:", numpy.mean(ranks), "MSE", numpy.mean(numpy.power(raw_validation_set[0] - asd_pred_val_act_t,2))
(ranks,correct,pred) = performIdentification(validation_set,pred_val_act_t)
print "Reg. pseudoinverse identification trial average> Correct:", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(validation_set - pred_val_act_t,2))
(ranks,correct,pred) = performIdentification(validation_set,asd_pred_val_act_t)
print "ASD identification trial average> Correct:", correct , "Mean rank:", numpy.mean(ranks), "MSE", numpy.mean(numpy.power(validation_set - asd_pred_val_act_t,2))
pylab.show()
def DeepLook():
import scipy
from scipy import linalg
f = open("results.dat",'rb')
import pickle
dd = pickle.load(f)
node = dd.children[0]
rfs = node.children[0].data["ReversCorrelationRFs"]
sx,sy = numpy.shape(rfs[0])
asd_rfs = node.children[1].data["RFs"]
asdrd_rfs = node.children[2].data["RFs"]
pred_act = numpy.array(node.children[0].data["ReversCorrelationPredictedActivities"])
pred_val_act = numpy.array(node.children[0].data["ReversCorrelationPredictedValidationActivities"])
training_set_old = node.data["training_set"]
validation_set_old = node.data["validation_set"]
training_set = numpy.array(node.children[0].data["LaterTrainingSet"])
validation_set = numpy.array(node.children[0].data["LaterValidationSet"])
training_inputs = node.data["training_inputs"]
validation_inputs = node.data["validation_inputs"]
print "Mean of old one:",numpy.mean(training_set_old) , " Variance of old one:", numpy.mean(numpy.var(training_set_old,axis=0))
print "Mean of modified:",numpy.mean(training_set) , " Variance of modified:", numpy.mean(numpy.var(training_set,axis=0))
print "Mean of predicted:",numpy.mean(pred_act) , " Variance of predicted:", numpy.mean(numpy.var(pred_act,axis=0))
#(e,te,c,tc,RFs,pred_act,pred_val_act,corr_coef,corr_coef_tf) = regulerized_inverse_rf(training_inputs,training_set,sx,sy,__main__.__dict__.get('Alpha',50),numpy.mat(validation_inputs),validation_set,contrib.dd.DB2(None),True)
valdataset = loadSimpleDataSet("Mice/2009_11_04/region3_50stim_10reps_15fr_103cells_on_response_spikes",50,103,10)
validation_inputs_big=generateInputs(valdataset,"/home/antolikjan/topographica/topographica/Mice/2009_11_04/","/20091104_50stimsequence/50stim%04d.tif",__main__.__dict__.get('density', 0.4),1.8,offset=0)
ofs = fit_sigmoids_to_of(numpy.mat(training_set),numpy.mat(pred_act),display=False)
pred_act_t = apply_sigmoid_output_function(numpy.mat(pred_act),ofs)
pred_val_act_t= apply_sigmoid_output_function(numpy.mat(pred_val_act),ofs)
val_errors = numpy.array(numpy.power(pred_val_act - validation_set,2))
val_errors_t = numpy.array(numpy.power(pred_val_act_t - validation_set,2))
neurons = [0,1,8,15,17,19,24,25,27,33,37,38]#,40,42,44,45,46,47,48,53,55,56,58,61,65,65,75,85,93,96]
ssy,ssx = numpy.shape(validation_inputs_big[0])
(ranks,correct,pred) = performIdentification(validation_set,pred_val_act)
print "Without TF. Correct:", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(validation_set - pred_val_act,2))
(ranks,correct,pred) = performIdentification(validation_set,pred_val_act_t)
print "With TF. Correct:", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(validation_set - pred_val_act_t,2))
for n in neurons:
s = numpy.argsort(val_errors_t[:,n])[::-1]
f = pylab.figure()
tn = 8
ax = f.add_axes([0.01,0.75,1.0/(tn+1)-0.02,0.24])
m = numpy.max([-numpy.min(rfs[n]),numpy.max(rfs[n])])
pylab.imshow(rfs[n],vmin=-m,vmax=m,interpolation='nearest',cmap=pylab.cm.RdBu)
ax = f.add_axes([0.01,0.5,1.0/(tn+1)-0.02,0.24])
m = numpy.max([-numpy.min(asd_rfs[n]),numpy.max(asd_rfs[n])])
pylab.imshow(numpy.reshape(asd_rfs[n],(sx,sy)),vmin=-m,vmax=m,interpolation='nearest',cmap=pylab.cm.RdBu)
ax = f.add_axes([0.01,0.25,1.0/(tn+1)-0.02,0.24])
m = numpy.max([-numpy.min(asdrd_rfs[n]),numpy.max(asdrd_rfs[n])])
pylab.imshow(numpy.reshape(asdrd_rfs[n],(sx,sy)),vmin=-m,vmax=m,interpolation='nearest',cmap=pylab.cm.RdBu)
m = numpy.max([-numpy.min(rfs[n]),numpy.max(rfs[n])])
for i in xrange(0,tn):
ax = f.add_axes()
ax = f.add_axes([(i+1.0)/(tn+1.0),0.75,1.0/(tn+1)-0.02,0.24])
ax.imshow(validation_inputs_big[s[i]],vmin=0,vmax=256,interpolation='nearest',cmap=pylab.cm.gray)
ax.axis('off')
ax.add_line(matplotlib.lines.Line2D([ssx*0.3,ssx*0.3],[ssy*0.0,ssy*1.0]))
ax.add_line(matplotlib.lines.Line2D([ssx*0.8,ssx*0.8],[ssy*0.0,ssy*1.0]))
ax = f.add_axes([(i+1.0)/(tn+1.0),0.5,1.0/(tn+1)-0.02,0.24])
ax.imshow(numpy.multiply(numpy.reshape(validation_inputs[s[i]],(sx,sy)),numpy.abs(rfs[n])/m),vmin=-135,vmax=135,interpolation='nearest',cmap=pylab.cm.gray)
ax.axis('off')
ax = f.add_axes([(i+1.0)/(tn+1.0),0.25,1.0/(tn+1)-0.02,0.15])
ax.plot(validation_set[:,n],'ro')
ax.plot(pred_val_act_t[:,n],'bo')
ax.axvline(s[i])
ax = f.add_axes([(i+1.0)/(tn+1.0),0.05,1.0/(tn+1)-0.02,0.15])
ax.plot(numpy.mean(validation_set,axis=1),'ro')
ax.axvline(s[i])
def SuperModel():
import scipy
from scipy import linalg
f = open("results.dat",'rb')
import pickle
dd = pickle.load(f)
node = dd.children[0]
rfs = node.children[0].data["ReversCorrelationRFs"][0:103]
#fitted_rfs = node.children[0].data["FittedRFs"][0:103]
#SurrRFs = node.children[0].children[0].children[4].data["SurrRFs"]
#SurrTI = node.children[0].children[0].data["TrainingInputs"]
#SurrVI = node.children[0].children[0].data["ValidationInputs"]
pred_act = numpy.array(node.children[0].data["ReversCorrelationPredictedActivities"][:,0:103])
pred_val_act = numpy.array(node.children[0].data["ReversCorrelationPredictedValidationActivities"][:,0:103])
training_set = node.data["training_set"][:,0:103]
validation_set = node.data["validation_set"][:,0:103]
#training_set = numpy.array(node.children[0].data["LaterTrainingSet"])
#validation_set = numpy.array(node.children[0].data["LaterValidationSet"])
training_inputs = node.data["training_inputs"]
validation_inputs = node.data["validation_inputs"]
raw_validation_set = node.data["raw_validation_set"]
rf_mag = [numpy.sum(numpy.power(r,2)) for r in rfs]
#discard ugly RFs
pylab.figure()
pylab.hist(rf_mag)
#pylab.show()
to_delete = numpy.nonzero((numpy.array(rf_mag) < 0.000000)*1.0)[0]
print to_delete
rfs = numpy.delete(rfs,to_delete,axis=0)
pred_act = numpy.delete(pred_act,to_delete,axis=1)
pred_val_act = numpy.delete(pred_val_act,to_delete,axis=1)
training_set = numpy.delete(training_set,to_delete,axis=1)
validation_set = numpy.delete(validation_set,to_delete,axis=1)
#for i in xrange(0,len(raw_validation_set)):
# raw_validation_set[i] = numpy.delete(raw_validation_set[i],to_delete,axis=1)
(sx,sy) = numpy.shape(rfs[0])
#pred_act = numpy.array(numpy.mat(training_inputs)*numpy.mat(numpy.reshape(fitted_rfs,(103,sx*sy)).T))
#pred_val_act = numpy.array(numpy.mat(validation_inputs)*numpy.mat(numpy.reshape(fitted_rfs,(103,sx*sy)).T))
ofs = fit_sigmoids_to_of(numpy.mat(training_set),numpy.mat(pred_act))
pred_act_t = apply_sigmoid_output_function(numpy.mat(pred_act),ofs)
pred_val_act_t= apply_sigmoid_output_function(numpy.mat(pred_val_act),ofs)
c=[]
for i in xrange(0,103-len(to_delete)):
c.append(numpy.corrcoef(validation_set[:,i],pred_val_act_t[:,i])[0][1])
print c
print numpy.mean(c)
#return
#z = numpy.argsort(numpy.mean(training_set,axis=0))
#pylab.figure()
#pylab.plot(numpy.mean(training_set[:,z[0:10]],axis=1),numpy.mean(training_set[:,z[80:103]],axis=1),'ro')
#pylab.figure()
#pylab.plot(numpy.mean(pred_act[:,z[0:10]],axis=1),numpy.mean(pred_act[:,z[80:103]],axis=1),'ro')
#pylab.figure()
#pylab.plot(numpy.mean(pred_act_t[:,z[0:10]],axis=1),numpy.mean(pred_act_t[:,z[80:103]],axis=1),'ro')
#temp = numpy.reshape(rfs,(1800,sx*sy))
#var = numpy.mat(training_inputs) * numpy.mat(numpy.abs(temp))
#val_var = numpy.mat(validation_inputs) * numpy.mat(temp)
#var = numpy.var(training_inputs,axis=1)
#val_var = numpy.var(validation_inputs,axis=1)
#dataset= loadSimpleDataSet('/home/antolikjan/topographica/topographica/Mice/20090925_14_36_01/spont_filtered.dat',2852,50,num_rep=1,num_frames=1,offset=0,transpose=False)
#spont = generateTrainingSet(dataset)
spont_corr,p = pearcorr(training_set)
print numpy.shape(training_set)
print numpy.shape(spont_corr)
print numpy.shape(numpy.eye(len(rfs)))
spont_corr = numpy.multiply(numpy.multiply(spont_corr,abs(numpy.eye(len(rfs))-1.0)),(p<0.000001)*1.0)
pylab.figure()
pylab.imshow(spont_corr)
pylab.colorbar()
#var1=var2=var3 = numpy.array(numpy.mat(training_set)*numpy.mat(spont_corr))
var1=var2=var3 = numpy.array(node.children[3].data["ReversCorrelationPredictedActivities+TF"][:,0:103])
#training_set = training_set-numpy.array(numpy.mat(training_set)*numpy.mat(spont_corr))
#validation_set = validation_set-numpy.array(numpy.mat(validation_set)*numpy.mat(spont_corr))
#val_var = numpy.zeros(numpy.shape(validation_set))
#val_var1 = val_var2 = val_var3= numpy.array(numpy.mat(validation_set)*numpy.mat(spont_corr))
val_var1 = val_var2 = val_var3 = numpy.array(node.children[3].data["ReversCorrelationPredictedValidationActivities+TF"][:,0:103])
#var = numpy.mat(SurrTI)*numpy.mat(numpy.reshape(SurrRFs,(103,sx*sy)).T)
#val_var = numpy.mat(SurrVI)*numpy.mat(numpy.reshape(SurrRFs,(103,sx*sy)).T)
#try what effect rotated RFs could have
if False:
rfs_90 = []
rfs_180 = []
rfs_270 = []
for rf in rfs:
r90 = rot90_around_center_of_gravity(rf)
#r180 = rot90_around_center_of_gravity(r90)
#r270 = rot90_around_center_of_gravity(r180)
r180 = rf*-1.0
r270 = r90*-1.0
rfs_90.append(r90.flatten())
rfs_180.append(r180.flatten())
rfs_270.append(r270.flatten())
var1 = numpy.mat(training_inputs)*numpy.mat(rfs_90).T
val_var1 = numpy.mat(validation_inputs)*numpy.mat(rfs_90).T
var2 = numpy.mat(training_inputs)*numpy.mat(rfs_180).T
val_var2 = numpy.mat(validation_inputs)*numpy.mat(rfs_180).T
var3 = numpy.mat(training_inputs)*numpy.mat(rfs_270).T
val_var3 = numpy.mat(validation_inputs)*numpy.mat(rfs_270).T
from scipy.optimize import leastsq
xs = []
err = []
for i in xrange(0,len(rfs)):
#print i
min_err = 100000000000000000
xo=True
for r in xrange(0,10):
rand =numbergen.UniformRandom(seed=513)
r0 = (numpy.array([rand(),rand(),rand(),rand(),rand()])-0.5)*2.0
x0 = [0.0,0.0,0.0,0.0,1.0]
rand_scale = [3.0,3.0,3.0,3.0,3.0]
x0 = x0 + numpy.multiply(r0,rand_scale)
xopt = leastsq(supermodel_error, x0[:], args=(numpy.array(var1)[:,i],numpy.array(var2)[:,i],numpy.array(var3)[:,i],numpy.array(pred_act)[:,i],numpy.array(training_set)[:,i]),ftol=0.0000000000000000001,xtol=0.0000000000000001,warning=False)
er = numpy.sum(supermodel_error(xopt[0],numpy.array(var1)[:,i],numpy.array(var2)[:,i],numpy.array(var3)[:,i],numpy.array(pred_act)[:,i],numpy.array(training_set)[:,i])**2)
if min_err > er:
min_err = er
xo=xopt[0]
xs.append(xo)
new_error = numpy.sum(supermodel_error(xo,numpy.array(var1)[:,i],numpy.array(var2)[:,i],numpy.array(var3)[:,i],numpy.array(pred_act)[:,i],numpy.array(training_set)[:,i])**2)
old_error = numpy.sum((numpy.array(pred_act)[:,i] - numpy.array(training_set)[:,i])**2)
err.append( (old_error - new_error)/old_error * 100)
print numpy.mat(xs)
print "Training error decreased by:", numpy.mean(err) , '%'
new_val_act = apply_supermodel_estim(xs,val_var1,val_var2,val_var3,pred_val_act)
new_act = apply_supermodel_estim(xs,var1,var2,var3,pred_act)
#new_act = pred_act
#new_val_act = pred_val_act
ofs = fit_sigmoids_to_of(numpy.mat(training_set),numpy.mat(new_act))
new_val_act_t= numpy.array(apply_sigmoid_output_function(numpy.mat(new_val_act),ofs))
new_act_t= numpy.array(apply_sigmoid_output_function(numpy.mat(new_act),ofs))
pylab.figure()
for i in xrange(0,103):
pylab.subplot(11,11,i+1)
pylab.plot(pred_val_act[:,i],validation_set[:,i],'o')
(ranks,correct,pred) = performIdentification(validation_set,pred_val_act)
print "Original:", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(validation_set - pred_val_act,2))
(ranks,correct,pred) = performIdentification(validation_set,pred_val_act_t)
print "TF+Original:", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(validation_set - pred_val_act_t,2))
#validation_set = validation_set-numpy.array(numpy.mat(validation_set)*numpy.mat(spont_corr))*numpy.array(numpy.tile(numpy.mat(xs)[:,4].T,(len(validation_set),1)))
#(e,te,c,tc,RFs,pred_act,pred_val_act,corr_coef,corr_coef_tf) = regulerized_inverse_rf(numpy.array(training_inputs),numpy.array(training_set),sx,sy,__main__.__dict__.get('Alpha',50),numpy.mat(validation_inputs),numpy.mat(validation_set),contrib.dd.DB(None),True)
#examine_correlated_noise(validation_set,raw_validation_set,xs,pred_val_act,spont_corr)
(ranks,correct,pred) = performIdentification(validation_set,new_val_act)
print "After contrast normalization:", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(validation_set - new_val_act,2))
(ranks,correct,pred) = performIdentification(validation_set,new_val_act_t)
print "After contrast normalization:+TF", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(validation_set - new_val_act_t,2))
raw_validation_data_set=numpy.rollaxis(numpy.array(raw_validation_set),2)
print 'ORIGINAL:'
signal_power,noise_power,normalized_noise_power,training_prediction_power,validation_prediction_power = signal_power_test(raw_validation_data_set, numpy.array(training_set), numpy.array(validation_set), pred_act, pred_val_act)
signal_power,noise_power,normalized_noise_power,training_prediction_power_t,validation_prediction_power_t = signal_power_test(raw_validation_data_set, numpy.array(training_set), numpy.array(validation_set), pred_act_t, pred_val_act_t)
print "Prediction power on training set / validation set: ", numpy.mean(training_prediction_power*(training_prediction_power>0)) , " / " , numpy.mean(validation_prediction_power)
print "Prediction power after TF on training set / validation set: ", numpy.mean(training_prediction_power_t) , " / " , numpy.mean(validation_prediction_power_t)
print 'NEW:'
signal_power,noise_power,normalized_noise_power,training_prediction_power,validation_prediction_power = signal_power_test(raw_validation_data_set, numpy.array(training_set), numpy.array(validation_set), new_act, new_val_act)
signal_power,noise_power,normalized_noise_power,training_prediction_power_t,validation_prediction_power_t = signal_power_test(raw_validation_data_set, numpy.array(training_set), numpy.array(validation_set), new_act_t, new_val_act_t)
print "Prediction power on training set / validation set: ", numpy.mean(training_prediction_power*(training_prediction_power>0)) , " / " , numpy.mean(validation_prediction_power)
print "Prediction power after TF on training set / validation set: ", numpy.mean(training_prediction_power_t) , " / " , numpy.mean(validation_prediction_power_t)
def supermodel_estim(x,var1,var2,var3,pred_act):
(a,b,c,d,e) = list(x)
#return numpy.divide(a*pred_act+b,c*var+d)
#zz= (a*numpy.max([numpy.zeros(numpy.shape(pred_act)),pred_act+b],axis=0) + c*numpy.max([numpy.zeros(numpy.shape(pred_act)),-pred_act+d],axis=0))+e*var
#zz= numpy.divide(a*pred_act,1.0 + numpy.max([numpy.zeros(numpy.shape(pred_act)),e*numpy.abs(var1)+b]))
#zz = numpy.divide(a*pred_act,numpy.max([numpy.ones(numpy.shape(pred_act)),e + b*var1 + c*var2 + d*var3],axis=0))
#zz = numpy.multiply(a*pred_act,1.0+ d*numpy.abs(var1)+b)
zz = a*pred_act + e*var1
#zz = 1.0*var1
#zz = a*(numpy.shape(pred_act)+b)*var
#return numpy.divide(zz+e,f*var+g)
return zz
def supermodel_error(x,var1,var2,var3,pred_act,training_set):
return training_set - supermodel_estim(x,var1,var2,var3,pred_act)
def apply_supermodel_estim(params,var1,var2,var3,pred_act):
new_act = []
for i in xrange(0,len(params)):
new_act.append(supermodel_estim(params[i],numpy.array(var1)[:,i],numpy.array(var2)[:,i],numpy.array(var3)[:,i],numpy.array(pred_act)[:,i]))
return numpy.array(numpy.mat(new_act).T)
def examine_correlated_noise(validation_set,raw_validation_set,params,pred_val_act,spont_corr):
raw_later_act=[]
for raw in raw_validation_set:
var=numpy.mat(raw)*numpy.mat(spont_corr)
raw_later_act.append(var)
# MSE with predictions from the same trials
m=[]
m_orig=[]
for (rawlat,rawvs) in zip(raw_later_act,raw_validation_set):
m.append(MSE(apply_supermodel_estim(params,rawlat,rawlat,rawlat,pred_val_act),rawvs))
m_orig.append(MSE(pred_val_act,rawvs))
print "MSE from matching trials, ORIGINAL:",numpy.mean(m_orig)
print "MSE from averaged trials, ORIGINAL:",MSE(pred_val_act,numpy.mean(numpy.array(raw_validation_set),0))
print "MSE from matching trials:",numpy.mean(m)
var=numpy.mat(numpy.mean(numpy.array(raw_validation_set),0))*numpy.mat(spont_corr)
print "MSE from averaged trials:",MSE(apply_supermodel_estim(params,var,var,var,pred_val_act),numpy.mean(numpy.array(raw_validation_set),0))
# MSE with predictions from the same trials
m=[]
m_orig=[]
for j in xrange(0,len(raw_later_act)):
for k in xrange(0,len(raw_later_act)):
if j != k:
m.append(MSE(apply_supermodel_estim(params,raw_later_act[k],raw_later_act[k],raw_later_act[k],pred_val_act),raw_validation_set[j]))
print "MSE from different trials",numpy.mean(m)
def MSE(predictions,targets):
return numpy.mean(numpy.power(predictions-targets,2))
def spontaneousActivity():
import scipy
import scipy.stats
from scipy import linalg
f = open("results.dat",'rb')
import pickle
dd = pickle.load(f)
node = dd.children[9]
rfs = node.children[0].data["ReversCorrelationRFs"]
pred_act = numpy.array(node.children[0].data["ReversCorrelationPredictedActivities"])
val_pred_act = numpy.array(node.children[0].data["ReversCorrelationPredictedValidationActivities"])
training_set = node.data["training_set"]
validation_set = node.data["validation_set"]
#flat_validation_set = node.data["flat_validation_set"]
#flat_validation_inputs = node.data["flat_validation_inputs"]
(z,sizex,sizey) = numpy.shape(rfs)
#flat_val_pred_act = numpy.mat(flat_validation_inputs) *numpy.mat(numpy.reshape(rfs,(z,size*sizey))).T
#dataset = loadSimpleDataSet('/home/antolikjan/topographica/topographica/Mice/20091110_19_16_53/spont_filtered.dat',5952,68,num_rep=1,num_frames=1,offset=0,transpose=False)
dataset= loadSimpleDataSet('/home/antolikjan/topographica/topographica/Mice/20090925_14_36_01/spont_filtered.dat',2852,50,num_rep=1,num_frames=1,offset=0,transpose=False)
spont = generateTrainingSet(dataset)
f = file("./Mice/20090925_14_36_01/(20090925_14_36_01)-_retinotopy_region2_sequence_50cells_cell_locations.txt", "r")
loc= [line.split() for line in f]
(a,b) = numpy.shape(loc)
for i in xrange(0,a):
for j in xrange(0,b):
loc[i][j] = float(loc[i][j])
ofs = fit_sigmoids_to_of(numpy.mat(training_set),numpy.mat(pred_act))
pred_act_t = apply_sigmoid_output_function(numpy.mat(pred_act),ofs)
val_pred_act_t = apply_sigmoid_output_function(numpy.mat(val_pred_act),ofs)
diff_act = pred_act - training_set
diff_act_t = pred_act_t - training_set
val_diff_act = val_pred_act - validation_set
val_diff_act_t = val_pred_act_t - validation_set
rfs_corr=[]
spont_corr=[]
diff_corr=[]
diff_t_corr=[]
act_corr=[]
val_act_corr=[]
val_diff_corr=[]
val_diff_t_corr=[]
pred_val_act_t_corr=[]
for i in xrange(0,len(rfs)):
for j in xrange(i+1,len(rfs)):
rfs_corr.append(scipy.stats.pearsonr(rfs[i].flatten(), rfs[j].flatten())[0])
spont_corr.append(scipy.stats.pearsonr(spont[:,i], spont[:,j])[0])
diff_corr.append(scipy.stats.pearsonr(diff_act[:,i], diff_act[:,j])[0])
diff_t_corr.append(scipy.stats.pearsonr(diff_act_t[:,i], diff_act[:,j])[0])
act_corr.append(scipy.stats.pearsonr(training_set[:,i], training_set[:,j])[0])
val_act_corr.append(scipy.stats.pearsonr(validation_set[:,i], validation_set[:,j])[0])
val_diff_corr.append(scipy.stats.pearsonr(val_diff_act[:,i], val_diff_act[:,j])[0])
val_diff_t_corr.append(scipy.stats.pearsonr(val_diff_act_t[:,i], val_diff_act[:,j])[0])
pred_val_act_t_corr.append(scipy.stats.pearsonr(val_pred_act_t[:,i], val_pred_act_t[:,j])[0])
pylab.figure()
pylab.title('Correlation between spontaneous activity correlations and RFs correlations')
pylab.plot(rfs_corr,spont_corr,'bo')
pylab.xlabel('RFs correlations')
pylab.ylabel('Spontaneous activity correlations')
pylab.figure()
pylab.title('Correlation between triggered activity correlations and RFs correlations')
pylab.plot(act_corr,spont_corr,'bo')
pylab.xlabel('Triggered activity')
pylab.ylabel('Spontaneous activity correlations')
pylab.figure()
pylab.title('Correlation between spontaneous activity correlations and prediction residuals correlations after removal of nonspecific RFs')
pylab.plot(diff_corr,spont_corr,'bo')
pylab.xlabel('Residuals correlations')
pylab.ylabel('Spontaneous activity correlations')
pylab.figure()
pylab.title('Correlation between spontaneous activity correlations and prediction residuals +TF correlations after removal of nonspecific RFs')
pylab.plot(diff_t_corr,spont_corr,'bo')
pylab.xlabel('Residuals correlations+TF')
pylab.ylabel('Spontaneous activity correlations')
pylab.figure()
pylab.title('Correlation between triggered activity correlations and prediction residuals correlations after removal of nonspecific RFs')
pylab.plot(act_corr,diff_corr,'bo')
pylab.xlabel('Triggered activity')
pylab.ylabel('Residuals correlations')
print 'Correlation between RFs corr. and Spont act corr.:', scipy.stats.pearsonr(rfs_corr,spont_corr)
print 'On training set:'
print 'Correlation between Triggered activity corr. and Spont act corr.:', scipy.stats.pearsonr(act_corr,spont_corr)
print 'Correlation between Residuals corr. and Spont act corr.:', scipy.stats.pearsonr(diff_corr,spont_corr)
print 'Correlation between Residuals+TF corr. and Spont act corr.:', scipy.stats.pearsonr(diff_t_corr,spont_corr)
print 'Correlation between Residuals corr. and Triggered act corr.:', scipy.stats.pearsonr(diff_corr,act_corr)
print 'On validation set:'
print 'Correlation between Triggered validation activity corr. and Spont act corr.:', scipy.stats.pearsonr(val_act_corr,spont_corr)
print 'Correlation between Residuals corr. and Spont act corr.:', scipy.stats.pearsonr(val_diff_corr,spont_corr)
print 'Correlation between Residuals+TF corr. and Spont act corr.:', scipy.stats.pearsonr(val_diff_t_corr,spont_corr)
print 'Correlation between Residuals corr. and Triggered act corr.:', scipy.stats.pearsonr(val_diff_corr,val_act_corr)
print 'Correlation between predicted validation activities corr. and Spont act corr.:', scipy.stats.pearsonr(pred_val_act_t_corr,spont_corr)
print 'Correlation between difference of predicted validation activities and measured validation activities corr. and Spont act corr.:', scipy.stats.pearsonr(numpy.array(val_act_corr)-numpy.array(pred_val_act_t_corr),spont_corr)
#remove ugly neurons
print 'Removing weak RFs'
rfs_mag=numpy.sum(numpy.reshape(numpy.abs(numpy.array(rfs)),(len(rfs),numpy.size(rfs[0]))),axis=1)
to_delete = numpy.nonzero((rfs_mag < 0.03) * 1.0)
pylab.figure()
pylab.hist(rfs_mag)
rfs = numpy.delete(numpy.array(rfs),to_delete[0],axis=0)
spont = numpy.array(numpy.delete(numpy.mat(spont),to_delete[0],axis=1))
diff_act = numpy.array(numpy.delete(numpy.mat(diff_act),to_delete[0],axis=1))
diff_act_t = numpy.array(numpy.delete(numpy.mat(diff_act_t),to_delete[0],axis=1))
training_set = numpy.array(numpy.delete(numpy.mat(training_set),to_delete[0],axis=1))
validation_set = numpy.array(numpy.delete(numpy.mat(validation_set),to_delete[0],axis=1))
val_diff_act = numpy.array(numpy.delete(numpy.mat(val_diff_act),to_delete[0],axis=1))
val_diff_act_t = numpy.array(numpy.delete(numpy.mat(val_diff_act_t),to_delete[0],axis=1))
val_pred_act_t = numpy.array(numpy.delete(numpy.mat(val_pred_act_t),to_delete[0],axis=1))
spont_corr=[]
diff_corr=[]
diff_t_corr=[]
act_corr=[]
val_act_corr=[]
val_diff_corr=[]
val_diff_t_corr=[]
pred_val_act_t_corr=[]
pos_rfs_corr=[]
pos_spont_corr=[]
pos_dist=[]
neg_rfs_corr=[]
neg_spont_corr=[]
neg_dist=[]
for i in xrange(0,len(rfs)):
for j in xrange(i+1,len(rfs)):
cor = numpy.corrcoef(rfs[i].flatten(), rfs[j].flatten())[0][1]
if cor > 0:
pos_rfs_corr.append(cor)
pos_spont_corr.append(numpy.corrcoef(spont[:,i], spont[:,j])[0])
pos_dist.append(distance(loc,i,j))
else:
neg_rfs_corr.append(cor)
neg_spont_corr.append(numpy.corrcoef(spont[:,i], spont[:,j])[0])
neg_dist.append(distance(loc,i,j))
spont_corr.append(scipy.stats.pearsonr(spont[:,i], spont[:,j])[0])
diff_corr.append(scipy.stats.pearsonr(diff_act[:,i], diff_act[:,j])[0])
diff_t_corr.append(scipy.stats.pearsonr(diff_act_t[:,i], diff_act[:,j])[0])
act_corr.append(scipy.stats.pearsonr(training_set[:,i], training_set[:,j])[0])
val_act_corr.append(scipy.stats.pearsonr(validation_set[:,i], validation_set[:,j])[0])
val_diff_corr.append(scipy.stats.pearsonr(val_diff_act[:,i], val_diff_act[:,j])[0])
val_diff_t_corr.append(scipy.stats.pearsonr(val_diff_act_t[:,i], val_diff_act[:,j])[0])
pred_val_act_t_corr.append(scipy.stats.pearsonr(val_pred_act_t[:,i], val_pred_act_t[:,j])[0])
pylab.figure()
pylab.title('Correlation between spontaneous activit correlations and RFs positive correlations after removal of nonspecific RFs')
pylab.plot(pos_rfs_corr,pos_spont_corr,'bo')
pylab.xlabel('RFs correlations')
pylab.ylabel('Spontaneous activity correlations')
#fig = pylab.figure()
#from mpl_toolkits.mplot3d import Axes3D
#ax = Axes3D(fig)
#pylab.title('Correlation between spontaneous activit correlations and RFs positive correlations after removal of nonspecific RFs')
#ax.scatter(pos_rfs_corr,pos_spont_corr,pos_dist)
#ax.set_xlabel('RFs correlations')
#ax.set_ylabel('Spontaneous activity correlations')
#ax.set_zlabel('distance')
pylab.figure()
pylab.title('Correlation between spontaneous activit correlations and RFs negative correlations after removal of nonspecific RFs')
pylab.plot(neg_rfs_corr,neg_spont_corr,'bo')
pylab.xlabel('RFs correlations')
pylab.ylabel('Spontaneous activity correlations')
#fig = pylab.figure()
#from mpl_toolkits.mplot3d import Axes3D
#ax = Axes3D(fig)
#pylab.title('Correlation between spontaneous activit correlations and RFs positive correlations after removal of nonspecific RFs')
#ax.scatter(neg_rfs_corr,neg_spont_corr,neg_dist)
#ax.set_xlabel('RFs correlations')
#ax.set_ylabel('Spontaneous activity correlations')
#ax.set_zlabel('distance')
print 'Correlation between positive RFs corr. and Spont act corr.:', numpy.corrcoef(pos_rfs_corr,pos_spont_corr)
print 'Correlation between negative RFs corr. and Spont act corr.:', numpy.corrcoef(neg_rfs_corr,neg_spont_corr)[0][1]
print 'On training set:'
print 'Correlation between Triggered activity corr. and Spont act corr.:', numpy.corrcoef(act_corr,spont_corr)
print 'Correlation between Residuals corr. and Spont act corr.:', numpy.corrcoef(diff_corr,spont_corr)
print 'Correlation between Residuals+TF corr. and Spont act corr.:', numpy.corrcoef(diff_t_corr,spont_corr)
print 'Correlation between Residuals corr. and Triggered act corr.:', numpy.corrcoef(diff_corr,act_corr)
print 'On validation set:'
print 'Correlation between Triggered validation activity corr. and Spont act corr.:', numpy.corrcoef(val_act_corr,spont_corr)
print 'Correlation between Residuals corr. and Spont act corr.:', numpy.corrcoef(val_diff_corr,spont_corr)
print 'Correlation between Residuals+TF corr. and Spont act corr.:', numpy.corrcoef(val_diff_t_corr,spont_corr)
print 'Correlation between Residuals corr. and Triggered act corr.:', numpy.corrcoef(val_diff_corr,val_act_corr)
print 'Correlation between predicted validation activities corr. and Spont act corr.:', numpy.corrcoef(pred_val_act_t_corr,spont_corr)
print 'Correlation between difference of predicted validation activities and measured validation activities corr. and Spont act corr.:', numpy.corrcoef(numpy.array(val_act_corr)-numpy.array(pred_val_act_t_corr),spont_corr)
def RFestimationFromOtherCells():
import scipy
from scipy import linalg
f = open("results.dat",'rb')
import pickle
dd = pickle.load(f)
node = dd.children[0]
rfs = node.children[0].data["ReversCorrelationRFs"]
pred_act = numpy.array(node.children[0].data["ReversCorrelationPredictedActivities"])
val_pred_act = numpy.array(node.children[0].data["ReversCorrelationPredictedValidationActivities"])
training_set = node.data["training_set"]
validation_set = node.data["validation_set"]
training_inputs = node.data["training_inputs"]
validation_inputs = node.data["validation_inputs"]
raw_validation_set = node.data["raw_validation_set"]
ofs = fit_sigmoids_to_of(numpy.mat(training_set),numpy.mat(pred_act))
pred_act_t = apply_sigmoid_output_function(numpy.mat(pred_act),ofs)
val_pred_act_t = apply_sigmoid_output_function(numpy.mat(val_pred_act),ofs)
(later_pred_act,later_pred_val_act) = later_interaction_prediction(training_set,pred_act_t,validation_set,val_pred_act_t,raw_validation_set,node.children[0])
#f = open("results.dat",'wb')
#pickle.dump(dd,f,-2)
#f.close()
return
(z,sizex,sizey) = numpy.shape(rfs)
dataset= loadSimpleDataSet('/home/antolikjan/topographica/topographica/Mice/20090925_14_36_01/spont_filtered.dat',2852,50,num_rep=1,num_frames=1,offset=0,transpose=False)
spont = generateTrainingSet(dataset)
trig_corr,p = pearcorr(training_set)
trig_corr = numpy.multiply(numpy.multiply(trig_corr,abs(numpy.eye(z)-1.0)),(p<0.01)*1.0)
spont_corr,p = pearcorr(spont)
spont_corr = numpy.multiply(numpy.multiply(spont_corr,abs(numpy.eye(z)-1.0)),(p<0.01)*1.0)
trig_pred_train_act = numpy.array(numpy.mat(training_set) * numpy.mat(trig_corr))
trig_pred_validation_act = numpy.array(numpy.mat(validation_set) * numpy.mat(trig_corr))
spont_pred_train_act = numpy.array(numpy.mat(training_set) * numpy.mat(spont_corr))
spont_pred_validation_act = numpy.array(numpy.mat(validation_set) * numpy.mat(spont_corr))
avg_pred_train_act = numpy.array(numpy.mat(training_set) * numpy.mat(numpy.ones((z,1))))
avg_pred_validation_act = numpy.array(numpy.mat(validation_set) * numpy.mat(numpy.ones((z,1))))
pylab.figure()
pylab.imshow(trig_corr,interpolation='nearest')
pylab.colorbar()
pylab.figure()
pylab.imshow(spont_corr,interpolation='nearest')
pylab.colorbar()
print numpy.shape(trig_pred_train_act)
print numpy.shape(trig_pred_validation_act)
print sizex
print sizey
print numpy.shape(training_set)
print numpy.shape(training_inputs)
(e,te,c,tc,RFs,pa,pva,corr_coef,corr_coef_tf) = regulerized_inverse_rf(training_inputs,numpy.divide(training_set,trig_pred_train_act),sizex,sizey,__main__.__dict__.get('Alpha',50),numpy.mat(validation_inputs),numpy.divide(validation_set,numpy.mat(spont_pred_validation_act)),contrib.dd.DB(None),True)
pylab.show()
def pearcorr(X):
X = numpy.array(X)
import scipy.stats
x,y = numpy.shape(X)
c = numpy.zeros((y,y))
p = numpy.zeros((y,y))
for i in xrange(0,y):
for j in xrange(0,y):
a,b = scipy.stats.pearsonr(X[:,i],X[:,j])
c[i][j]=a
p[i][j]=b
return (c,p)
def rot90_around_center_of_gravity(rf):
"""
Assumes the RF is in lower right quadrant!!!!!!!!!!!!!!!!
"""
sx,sy = numpy.shape(rf)
cy,cx = centre_of_gravity(rf)
cx = round(cx)
cy = round(cy)
z=numpy.min([cx,cy,sx-cx,sy-cy])
res = numpy.zeros((sx,sy))
res[cx-z:cx+z,cy-z:cy+z] = numpy.rot90(rf[cx-z:cx+z,cy-z:cy+z])
return res
def performance_analysis(training_set,validation_set,pred_act,pred_val_act,raw_validation_set):
raw_validation_data_set=numpy.rollaxis(numpy.array(raw_validation_set),2)
signal_power,noise_power,normalized_noise_power,training_prediction_power,validation_prediction_power,signal_power_variance = signal_power_test(raw_validation_data_set, numpy.array(training_set), numpy.array(validation_set), numpy.array(pred_act), numpy.array(pred_val_act))
print 'Using all neurons:'
(ranks,correct,pred) = performIdentification(validation_set,pred_val_act)
print "Prediction power on training set / validation set: ", numpy.mean(training_prediction_power) , " / " , numpy.mean(validation_prediction_power)
print "Correctly prediced:", correct ,'(', (correct*1.0)/numpy.shape(validation_set)[0]*100 ,'%)' , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(validation_set - pred_val_act,2))
significant = numpy.mat(numpy.nonzero(((numpy.array(signal_power) - 0.5*numpy.array(signal_power_variance)) > 0.0)*1.0)).getA1()
print 'Using significant neurons:','(', len(significant) ,')'
(ranks,correct,pred) = performIdentification(validation_set[:,significant],pred_val_act[:,significant])
print "Prediction power on training set / validation set: ", numpy.mean(training_prediction_power[significant]) , " / " , numpy.mean(validation_prediction_power[significant])
print "Correctly prediced:", correct ,'(', (correct*1.0)/numpy.shape(validation_set)[0]*100 ,'%)' , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(validation_set[:,significant] - pred_val_act[:,significant],2))
return (signal_power,noise_power,normalized_noise_power,training_prediction_power,validation_prediction_power,signal_power_variance)
|
ioam/svn-history
|
contrib/modelfit.py
|
Python
|
bsd-3-clause
| 198,364
|
[
"Gaussian"
] |
505eda46203f60dbdd3d996479e196fc14b710183cfc5fc0e2fb3e41bc6bf36d
|
"""Factor Analysis.
A latent linear variable model, similar to ProbabilisticPCA.
This implementation is based on David Barber's Book,
Bayesian Reasoning and Machine Learning,
http://www.cs.ucl.ac.uk/staff/d.barber/brml,
Algorithm 21.1
"""
# Author: Christian Osendorfer <osendorf@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Licence: BSD3
from math import sqrt, log
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..externals.six.moves import xrange
from ..utils import array2d, check_arrays
from ..utils.extmath import fast_logdet, fast_dot
class FactorAnalysis(BaseEstimator, TransformerMixin):
"""Factor Analysis (FA)
A simple linear generative model with Gaussian latent variables.
The observations are assumed to be caused by a linear transformation of
lower dimensional latent factors and added Gaussian noise.
Without loss of generality the factors are distributed according to a
Gaussian with zero mean and unit covariance. The noise is also zero mean
and has an arbitrary diagonal covariance matrix.
If we would restrict the model further, by assuming that the Gaussian
noise is even isotropic (all diagonal entries are the same) we would obtain
:class:`PPCA`.
FactorAnalysis performs a maximum likelihood estimate of the so-called
`loading` matrix, the transformation of the latent variables to the
observed ones, using expectation-maximization (EM).
Parameters
----------
n_components : int | None
Dimensionality of latent space, the number of components
of ``X`` that are obtained after ``transform``.
If None, n_components is set to the number of features.
tol : float
Stopping tolerance for EM algorithm.
copy : bool
Whether to make a copy of X. If ``False``, the input X gets overwritten
during fitting.
max_iter : int
Maximum number of iterations.
verbose : int | bool
Print verbose output.
noise_variance_init : None | array, shape=(n_features,)
The initial guess of the noise variance for each feature.
If None, it defaults to np.ones(n_features)
Attributes
----------
`components_` : array, [n_components, n_features]
Components with maximum variance.
`loglike_` : list, [n_iterations]
The log likelihood at each iteration.
`noise_variance_` : array, shape=(n_features,)
The estimated noise variance for each feature.
References
----------
.. David Barber, Bayesian Reasoning and Machine Learning,
Algorithm 21.1
.. Christopher M. Bishop: Pattern Recognition and Machine Learning,
Chapter 12.2.4
See also
--------
PCA: Principal component analysis, a similar non-probabilistic
model model that can be computed in closed form.
ProbabilisticPCA: probabilistic PCA.
FastICA: Independent component analysis, a latent variable model with
non-Gaussian latent variables.
"""
def __init__(self, n_components=None, tol=1e-2, copy=True, max_iter=1000,
verbose=0, noise_variance_init=None):
self.n_components = n_components
self.copy = copy
self.tol = tol
self.max_iter = max_iter
self.verbose = verbose
self.noise_variance_init = noise_variance_init
def fit(self, X, y=None):
"""Fit the FactorAnalysis model to X using EM
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self
"""
X = array2d(check_arrays(X, copy=self.copy, sparse_format='dense',
dtype=np.float)[0])
n_samples, n_features = X.shape
n_components = self.n_components
if n_components is None:
n_components = n_features
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
# some constant terms
nsqrt = sqrt(n_samples)
llconst = n_features * log(2. * np.pi) + n_components
var = np.var(X, axis=0)
if self.noise_variance_init is None:
psi = np.ones(n_features, dtype=X.dtype)
else:
if len(self.noise_variance_init) != n_features:
raise ValueError("noise_variance_init dimension does not "
"with number of features : %d != %d" %
(len(self.noise_variance_init), n_features))
psi = np.array(self.noise_variance_init)
loglike = []
old_ll = -np.inf
SMALL = 1e-12
for i in xrange(self.max_iter):
# SMALL helps numerics
sqrt_psi = np.sqrt(psi) + SMALL
Xtilde = X / (sqrt_psi * nsqrt)
_, s, V = linalg.svd(Xtilde, full_matrices=False)
V = V[:n_components]
s **= 2
# Use 'maximum' here to avoid sqrt problems.
W = np.sqrt(np.maximum(s[:n_components] - 1., 0.))[:, np.newaxis] * V
W *= sqrt_psi
# loglikelihood
ll = llconst + np.sum(np.log(s[:n_components]))
ll += np.sum(s[n_components:]) + np.sum(np.log(psi))
ll *= -n_samples / 2.
loglike.append(ll)
if (ll - old_ll) < self.tol:
break
old_ll = ll
psi = np.maximum(var - np.sum(W ** 2, axis=0), SMALL)
else:
if self.verbose:
print("Did not converge")
self.components_ = W
self.noise_variance_ = psi
self.loglike_ = loglike
return self
def transform(self, X):
"""Apply dimensionality reduction to X using the model.
Compute the expected mean of the latent variables.
See Barber, 21.2.33 (or Bishop, 12.66).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
The latent variables of X.
"""
X = array2d(X)
Ih = np.eye(len(self.components_))
X_transformed = X - self.mean_
Wpsi = self.components_ / self.noise_variance_
cov_z = linalg.inv(Ih + np.dot(Wpsi, self.components_.T))
tmp = fast_dot(X_transformed, Wpsi.T)
X_transformed = fast_dot(tmp, cov_z)
return X_transformed
def get_covariance(self):
"""Compute data covariance with the FactorAnalysis model.
``cov = components_.T * components_ + diag(noise_variance)``
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
cov = np.dot(self.components_.T, self.components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def score(self, X, y=None):
"""Compute score of X under FactorAnalysis model.
Parameters
----------
X: array of shape(n_samples, n_features)
The data to test
Returns
-------
ll: array of shape (n_samples),
log-likelihood of each row of X under the current model
"""
Xr = X - self.mean_
cov = self.get_covariance()
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
self.precision_ = linalg.inv(cov)
log_like = -.5 * (Xr * (np.dot(Xr, self.precision_))).sum(axis=1)
log_like -= .5 * (fast_logdet(cov) + n_features * log(2. * np.pi))
return log_like
|
depet/scikit-learn
|
sklearn/decomposition/factor_analysis.py
|
Python
|
bsd-3-clause
| 7,690
|
[
"Gaussian"
] |
e6911d046dbad612dd432fcc2aad020bb7b40a4c66ba6c7b8d756f07bd790f4e
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2010 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
""" Slaves for books """
import datetime
from dateutil.relativedelta import relativedelta
import gtk
from kiwi.datatypes import ValidationError
from stoqlib.api import api
from stoqlib.domain.taxes import (InvoiceItemIcms, ProductIcmsTemplate,
InvoiceItemIpi, ProductIpiTemplate,
ProductTaxTemplate)
from stoqlib.lib.dateutils import localtoday
from stoqlib.lib.translation import stoqlib_gettext
from stoqlib.gui.editors.baseeditor import BaseEditorSlave
_ = stoqlib_gettext
class BaseTaxSlave(BaseEditorSlave):
combo_widgets = ()
percentage_widgets = ()
value_widgets = ()
hide_widgets = ()
tooltips = {}
field_options = {}
def __init__(self, store, *args, **kargs):
self.is_updating = False
self.proxy = None
BaseEditorSlave.__init__(self, store, *args, **kargs)
def _setup_widgets(self):
for name, options in self.field_options.items():
widget = getattr(self, name)
widget.prefill(options)
widget.set_size_request(220, -1)
for name in self.percentage_widgets:
widget = getattr(self, name)
widget.set_digits(2)
widget.set_adjustment(
gtk.Adjustment(lower=0, upper=100, step_incr=1))
for w in self.hide_widgets:
getattr(self, w).hide()
getattr(self, w + '_label').hide()
for name, tooltip in self.tooltips.items():
widget = getattr(self, name)
if isinstance(widget, gtk.Entry):
widget.set_property('primary-icon-stock', gtk.STOCK_INFO)
widget.set_property('primary-icon-tooltip-text', tooltip)
widget.set_property('primary-icon-sensitive', True)
widget.set_property('primary-icon-activatable', False)
self.setup_callbacks()
def setup_callbacks(self):
"""Implement this in a child when necessary
"""
pass
def set_valid_widgets(self, valid_widgets):
if self.visual_mode:
return
for widget in self.all_widgets:
if widget in valid_widgets:
getattr(self, widget).set_sensitive(True)
lbl = getattr(self, widget + '_label', None)
if lbl:
lbl.set_sensitive(True)
else:
getattr(self, widget).set_sensitive(False)
lbl = getattr(self, widget + '_label', None)
if lbl:
lbl.set_sensitive(True)
class InvoiceItemMixin(object):
def fill_combo(self, combo, type):
types = [(None, None)]
types.extend([(t.name, t.get_tax_model()) for t in
self.store.find(ProductTaxTemplate, tax_type=type)])
combo.prefill(types)
def on_template__changed(self, widget):
template = widget.read()
if not template:
return
self.model.set_item_tax(self.invoice_item, template)
self.update_values(self.proxy_widgets)
#
# ICMS
#
class BaseICMSSlave(BaseTaxSlave):
gladefile = 'TaxICMSSlave'
combo_widgets = ['cst', 'csosn', 'orig', 'mod_bc', 'mod_bc_st']
percentage_widgets = ['p_icms', 'p_mva_st', 'p_red_bc_st', 'p_icms_st',
'p_red_bc', 'p_cred_sn']
value_widgets = ['v_bc', 'v_icms', 'v_bc_st', 'v_icms_st',
'v_cred_icms_sn', 'v_bc_st_ret', 'v_icms_st_ret']
bool_widgets = ['bc_include_ipi', 'bc_st_include_ipi']
date_widgets = ['p_cred_sn_valid_until']
all_widgets = (combo_widgets + percentage_widgets + value_widgets +
bool_widgets + date_widgets)
simples_widgets = ['orig', 'csosn', 'mod_bc_st', 'p_mva_st', 'p_red_bc_st',
'p_icms_st', 'v_bc_st', 'v_icms_st', 'p_cred_sn',
'p_cred_sn_valid_until' 'v_cred_icms_sn', 'v_bc_st_ret',
'v_icms_st_ret'],
normal_widgets = ['orig', 'cst', 'mod_bc_st', 'p_mva_st', 'p_red_bc_st',
'p_icms_st', 'v_bc_st', 'v_icms_st', 'bc_st_include_ipi',
'mod_bc', 'p_icms', 'v_bc', 'v_icms', 'p_red_bc',
'bc_include_ipi', 'bc_st_include_ipi']
tooltips = {
'p_icms': u'Aliquota do imposto',
'p_mva_st': u'Percentual da margem de valor adicionado do ICMS ST',
'p_red_bc_st': u'Percentual da Redução de Base de Cálculo do ICMS ST'
}
field_options = {
'cst': (
(None, None),
(u'00 - Tributada Integralmente', 0),
(u'10 - Tributada e com cobrança de ICMS por subst. trib.', 10),
(u'20 - Com redução de BC', 20),
(u'30 - Isenta ou não trib. e com cobrança de ICMS por subst. trib.', 30),
(u'40 - Isenta', 40),
(u'41 - Não tributada', 41),
(u'50 - Suspensão', 50),
(u'51 - Deferimento', 51),
(u'60 - ICMS cobrado anteriormente por subst. trib.', 60),
(u'70 - Com redução da BC cobrança do ICMS por subst. trib.', 70),
(u'90 - Outros', 90),
),
'csosn': (
(None, None),
(u'101 - Tributada com permissão de crédito', 101),
(u'102 - Tributada sem permissão de crédito', 102),
(u'103 - Isenção do ICMS para faixa de receita bruta', 103),
(u'201 - Tributada com permissão de crédito e com cobrança do ICMS por ST', 201),
(u'202 - Tributada sem permissão de crédito e com cobrança do ICMS por ST', 202),
(u'203 - Isenção do ICMS para faixa de receita bruta e com cobrança do ICMS por ST',
203),
(u'300 - Imune', 300),
(u'400 - Não tributada', 400),
(u'500 - ICMS cobrado anteriormente por ST ou por antecipação', 500),
(u'900 - Outros', 900),
),
# http://www.fazenda.gov.br/confaz/confaz/ajustes/2012/AJ_020_12.htm
# http://www.fazenda.gov.br/confaz/confaz/ajustes/2013/AJ_015_13.htm
# http://www.fazenda.gov.br/confaz/confaz/Convenios/ICMS/2013/CV038_13.htm
'orig': (
(None, None),
(u'0 - Nacional, '
u'exceto as indicadas nos códigos 3, 4, 5 e 8', 0),
(u'1 - Estrangeira - Importação direta, '
u'exceto a indicada no código 6', 1),
(u'2 - Estrangeira - Adquirida no mercado interno, '
u'exceto a indicada no código 7', 2),
(u'3 - Nacional, mercadoria ou bem com Conteúdo de '
u'Importação superior a 40% e inferior ou igual a 70%', 3),
(u'4 - Nacional, cuja produção tenha sido feita em '
u'conformidade com os processos produtivos básicos', 4),
(u'5 - Nacional, mercadoria ou bem com Conteúdo de '
u'Importação inferior ou igual a 40%', 5),
(u'6 - Estrangeira - Importação direta, '
u'sem similar nacional, constante na CAMEX', 6),
(u'7 - Estrangeira - Adquirida no mercado interno, '
u'sem similar nacional, constante na CAMEX', 7),
(u'8 - Nacional, mercadoria ou bem com Conteúdo de Importação '
u'superior a 70%', 8),
),
'mod_bc': (
(None, None),
(u'0 - Margem do valor agregado (%)', 0),
(u'1 - Pauta (Valor)', 1),
(u'2 - Preço tabelado máximo (valor)', 2),
(u'3 - Valor da operação', 3),
),
'mod_bc_st': (
(None, None),
(u'0 - Preço tabelado ou máximo sugerido', 0),
(u'1 - Lista negativa (valor)', 1),
(u'2 - Lista positiva (valor)', 2),
(u'3 - Lista neutra (valor)', 3),
(u'4 - Margem Valor Agregado (%)', 4),
(u'5 - Pauta (valor)', 5),
),
}
MAP_VALID_WIDGETS = {
0: ['orig', 'cst', 'mod_bc', 'p_icms', 'v_bc', 'v_icms',
'bc_include_ipi'],
10: ['orig', 'cst', 'mod_bc', 'p_icms', 'mod_bc_st', 'p_mva_st',
'p_red_bc_st', 'p_icms_st', 'v_bc', 'v_icms', 'v_bc_st',
'v_icms_st', 'bc_include_ipi', 'bc_st_include_ipi'],
20: ['orig', 'cst', 'mod_bc', 'p_icms', 'p_red_bc', 'v_bc',
'v_icms', 'bc_include_ipi'],
30: ['orig', 'cst', 'mod_bc_st', 'p_mva_st', 'p_red_bc_st',
'p_icms_st', 'v_bc_st', 'v_icms_st', 'bc_st_include_ipi'],
40: ['orig', 'cst'],
41: ['orig', 'cst'], # Same tag
50: ['orig', 'cst'],
51: ['orig', 'cst', 'mod_bc', 'p_red_bc', 'p_icms', 'v_bc',
'v_icms', 'bc_st_include_ipi'],
60: ['orig', 'cst', 'v_bc_st', 'v_icms_st'],
70: normal_widgets,
90: normal_widgets,
# Simples Nacional
101: ['orig', 'csosn', 'p_cred_sn', 'p_cred_sn_valid_until',
'v_cred_icms_sn'],
102: ['orig', 'csosn'],
103: ['orig', 'csosn'],
201: ['orig', 'csosn', 'mod_bc_st', 'p_mva_st', 'p_red_bc_st',
'p_icms_st', 'v_bc_st', 'v_icms_st', 'p_cred_sn',
'p_cred_sn_valid_until', 'v_cred_icms_sn'],
202: ['orig', 'csosn', 'mod_bc_st', 'p_mva_st', 'p_red_bc_st',
'p_icms_st', 'v_bc_st', 'v_icms_st'],
203: ['orig', 'csosn', 'mod_bc_st', 'p_mva_st', 'p_red_bc_st',
'p_icms_st', 'v_bc_st', 'v_icms_st'],
300: ['orig', 'csosn'],
400: ['orig', 'csosn'],
500: ['orig', 'csosn', 'v_bc_st_ret', 'v_icms_st_ret'],
900: ['orig', 'csosn', 'mod_bc', 'v_bc', 'p_red_bc', 'p_icms', 'v_icms',
'mod_bc_st', 'p_mva_st', 'p_red_bc_st', 'v_bc_st', 'p_icms_st',
'v_icms_st', 'p_cred_sn', 'v_cred_icms_sn']
}
def setup_proxies(self):
self._setup_widgets()
self.branch = api.get_current_branch(self.model.store)
self.proxy = self.add_proxy(self.model, self.proxy_widgets)
# Simple Nacional
if self.branch.crt in [1, 2]:
self._update_selected_csosn()
else:
self._update_selected_cst()
def _update_widgets(self):
has_p_cred_sn = (self.p_cred_sn.get_sensitive()
and bool(self.p_cred_sn.get_value()))
self.p_cred_sn_valid_until.set_sensitive(has_p_cred_sn)
def _update_p_cred_sn_valid_until(self):
if (self.p_cred_sn.get_value()
and not self.p_cred_sn_valid_until.get_date()):
# Set the default expire date to the last day of current month.
default_expire_date = (localtoday().date() +
relativedelta(day=1, months=+1, days=-1))
self.p_cred_sn_valid_until.set_date(default_expire_date)
def _update_selected_cst(self):
cst = self.cst.get_selected_data()
valid_widgets = self.MAP_VALID_WIDGETS.get(cst, ('cst', ))
self.set_valid_widgets(valid_widgets)
def _update_selected_csosn(self):
csosn = self.csosn.get_selected_data()
valid_widgets = self.MAP_VALID_WIDGETS.get(csosn, ('csosn', ))
self.set_valid_widgets(valid_widgets)
def on_cst__changed(self, widget):
self._update_selected_cst()
def on_csosn__changed(self, widget):
self._update_selected_csosn()
self._update_widgets()
def after_p_cred_sn__changed(self, widget):
self._update_p_cred_sn_valid_until()
self.p_cred_sn_valid_until.validate(force=True)
self._update_widgets()
def on_p_cred_sn_valid_until__validate(self, widget, value):
if not self.p_cred_sn.get_value():
return
if value <= datetime.date.today():
return ValidationError(_(u"This date must be set in the future."))
class ICMSTemplateSlave(BaseICMSSlave):
model_type = ProductIcmsTemplate
proxy_widgets = (BaseICMSSlave.combo_widgets +
BaseICMSSlave.percentage_widgets +
BaseICMSSlave.bool_widgets +
BaseICMSSlave.date_widgets)
hide_widgets = BaseICMSSlave.value_widgets + ['template']
class InvoiceItemIcmsSlave(BaseICMSSlave, InvoiceItemMixin):
model_type = InvoiceItemIcms
proxy_widgets = (BaseICMSSlave.combo_widgets +
BaseICMSSlave.percentage_widgets +
BaseICMSSlave.bool_widgets +
BaseICMSSlave.value_widgets)
hide_widgets = BaseICMSSlave.date_widgets
def __init__(self, store, model, invoice_item):
self.invoice_item = invoice_item
BaseICMSSlave.__init__(self, store, model)
def setup_callbacks(self):
self.fill_combo(self.template, ProductTaxTemplate.TYPE_ICMS)
for name in self.percentage_widgets:
widget = getattr(self, name)
widget.connect_after('changed', self._after_field_changed)
self.bc_include_ipi.connect_after('toggled', self._after_field_changed)
self.bc_st_include_ipi.connect_after('toggled',
self._after_field_changed)
self.cst.connect_after('changed', self._after_field_changed)
self.csosn.connect_after('changed', self._after_field_changed)
def update_values(self, widgets=None):
self.is_updating = True
self.model.update_values(self.invoice_item)
widgets = widgets or self.value_widgets
for name in widgets:
if name in ('csosn', 'cst'):
continue
self.proxy.update(name)
# We need to update cst and csosn last: Since when one of those is
# changed we change some widgets sensitivity, when changing the widget
# sensitivity, kiwi will reset the model value incorrectly.
self.proxy.update_many(['csosn', 'cst'])
self.is_updating = False
def _after_field_changed(self, widget):
if not self.proxy or self.is_updating:
return
self.update_values()
class BaseIPISlave(BaseTaxSlave):
gladefile = 'TaxIPISlave'
combo_widgets = ['cst', 'calculo']
percentage_widgets = ['p_ipi']
text_widgets = ['cl_enq', 'cnpj_prod', 'c_selo', 'c_enq']
value_widgets = ['v_ipi', 'v_bc', 'v_unid', 'q_selo', 'q_unid']
all_widgets = (combo_widgets + percentage_widgets + value_widgets +
text_widgets)
tooltips = {
'cl_enq': u'Preenchimento conforme Atos Normativos editados pela '
u'Receita Federal (Observação 4)',
'cnpj_prod': u'Informar os zeros não significativos',
'c_selo': u'Preenchimento conforme Atos Normativos editados pela '
u'Receita Federal (Observação 3)',
'c_enq': u'Tabela a ser criada pela RFB, informar 999 enquanto a '
u'tabela não for criada',
}
field_options = {
'cst': (
(None, None),
(u'00 - Entrada com recuperação de crédito', 0),
(u'01 - Entrada tributada com alíquota zero', 1),
(u'02 - Entrada isenta', 2),
(u'03 - Entrada não-tributada', 3),
(u'04 - Entrada imune', 4),
(u'05 - Entrada com suspensão', 5),
(u'49 - Outras entradas', 49),
(u'50 - Saída tributada', 50),
(u'51 - Saída tributada com alíquota zero', 51),
(u'52 - Saída isenta', 52),
(u'53 - Saída não-tributada', 53),
(u'54 - Saída imune', 54),
(u'55 - Saída com suspensão', 55),
(u'99 - Outras saídas', 99),
),
'calculo': (
(u'Por alíquota', 0),
(u'Valor por unidade', 1),
)
}
# This widgets should be enabled when this option is selected.
MAP_VALID_WIDGETS = {
0: all_widgets,
1: ['cst', 'cl_enq', 'cnpj_prod', 'c_selo', 'q_selo', 'c_enq'],
2: ['cst', 'cl_enq', 'cnpj_prod', 'c_selo', 'q_selo', 'c_enq'],
3: ['cst', 'cl_enq', 'cnpj_prod', 'c_selo', 'q_selo', 'c_enq'],
4: ['cst', 'cl_enq', 'cnpj_prod', 'c_selo', 'q_selo', 'c_enq'],
5: ['cst', 'cl_enq', 'cnpj_prod', 'c_selo', 'q_selo', 'c_enq'],
49: all_widgets,
50: all_widgets,
51: ['cst', 'cl_enq', 'cnpj_prod', 'c_selo', 'q_selo', 'c_enq'],
52: ['cst', 'cl_enq', 'cnpj_prod', 'c_selo', 'q_selo', 'c_enq'],
53: ['cst', 'cl_enq', 'cnpj_prod', 'c_selo', 'q_selo', 'c_enq'],
54: ['cst', 'cl_enq', 'cnpj_prod', 'c_selo', 'q_selo', 'c_enq'],
55: ['cst', 'cl_enq', 'cnpj_prod', 'c_selo', 'q_selo', 'c_enq'],
99: all_widgets,
}
def setup_proxies(self):
self._setup_widgets()
self.proxy = self.add_proxy(self.model, self.proxy_widgets)
self._update_selected_cst()
self._update_selected_calculo()
def _update_selected_cst(self):
cst = self.cst.get_selected_data()
valid_widgets = self.MAP_VALID_WIDGETS.get(cst, ('cst', ))
self.set_valid_widgets(valid_widgets)
def _update_selected_calculo(self):
# IPI is only calculated if cst is one of the following
if not self.model.cst in (0, 49, 50, 99):
return
calculo = self.calculo.get_selected_data()
if calculo == InvoiceItemIpi.CALC_ALIQUOTA:
self.p_ipi.set_sensitive(True)
self.v_bc.set_sensitive(True)
self.v_unid.set_sensitive(False)
self.q_unid.set_sensitive(False)
elif calculo == InvoiceItemIpi.CALC_UNIDADE:
self.p_ipi.set_sensitive(False)
self.v_bc.set_sensitive(False)
self.v_unid.set_sensitive(True)
self.q_unid.set_sensitive(True)
def on_cst__changed(self, widget):
self._update_selected_cst()
def on_calculo__changed(self, widget):
self._update_selected_calculo()
class IPITemplateSlave(BaseIPISlave):
model_type = ProductIpiTemplate
proxy_widgets = (BaseIPISlave.combo_widgets +
BaseIPISlave.percentage_widgets +
BaseIPISlave.text_widgets)
hide_widgets = BaseIPISlave.value_widgets + ['template']
class InvoiceItemIpiSlave(BaseIPISlave, InvoiceItemMixin):
model_type = InvoiceItemIpi
proxy_widgets = BaseIPISlave.all_widgets
def __init__(self, store, model, invoice_item):
self.invoice_item = invoice_item
BaseIPISlave.__init__(self, store, model)
def setup_callbacks(self):
self.fill_combo(self.template, ProductTaxTemplate.TYPE_IPI)
self.p_ipi.connect_after('changed', self._after_field_changed)
self.q_unid.connect_after('changed', self._after_field_changed)
self.v_unid.connect_after('changed', self._after_field_changed)
self.cst.connect_after('changed', self._after_field_changed)
def update_values(self, widgets=None):
self.model.update_values(self.invoice_item)
widgets = widgets or ['v_bc', 'v_ipi']
for name in widgets:
if name == 'cst':
continue
self.proxy.update(name)
# We need to update cst and csosn last: Since when one of those is
# changed we change some widgets sensitivity, when changing the widget
# sensitivity, kiwi will reset the model value incorrectly.
self.proxy.update('cst')
def _after_field_changed(self, widget):
if not self.proxy or self.is_updating:
return
self.update_values()
|
tiagocardosos/stoq
|
stoqlib/gui/slaves/taxslave.py
|
Python
|
gpl-2.0
| 20,452
|
[
"VisIt"
] |
28bec556226977559affdda6121bc975d6659d3daf4437dc1a99bde367e76b13
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017-2019 Satpy developers
#
# This file is part of satpy.
#
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see <http://www.gnu.org/licenses/>.
"""Interface to MTG-FCI L1c NetCDF files.
This module defines the :class:`FCIL1cNCFileHandler` file handler, to
be used for reading Meteosat Third Generation (MTG) Flexible Combined
Imager (FCI) Level-1c data. FCI will fly
on the MTG Imager (MTG-I) series of satellites, scheduled to be launched
in 2022 by the earliest. For more information about FCI, see `EUMETSAT`_.
For simulated test data to be used with this reader, see `test data release`_.
For the Product User Guide (PUG) of the FCI L1c data, see `PUG`_.
.. note::
This reader currently supports Full Disk High Spectral Resolution Imagery
(FDHSI) files. Support for High Spatial Resolution Fast Imagery (HRFI) files
will be implemented when corresponding test datasets will be available.
Geolocation is based on information from the data files. It uses:
* From the shape of the data variable ``data/<channel>/measured/effective_radiance``,
start and end line columns of current swath.
* From the data variable ``data/<channel>/measured/x``, the x-coordinates
for the grid, in radians (azimuth angle positive towards West).
* From the data variable ``data/<channel>/measured/y``, the y-coordinates
for the grid, in radians (elevation angle positive towards North).
* From the attribute ``semi_major_axis`` on the data variable
``data/mtg_geos_projection``, the Earth equatorial radius
* From the attribute ``inverse_flattening`` on the same data variable, the
(inverse) flattening of the ellipsoid
* From the attribute ``perspective_point_height`` on the same data
variable, the geostationary altitude in the normalised geostationary
projection
* From the attribute ``longitude_of_projection_origin`` on the same
data variable, the longitude of the projection origin
* From the attribute ``sweep_angle_axis`` on the same, the sweep angle
axis, see https://proj.org/operations/projections/geos.html
From the pixel centre angles in radians and the geostationary altitude, the
extremities of the lower left and upper right corners are calculated in units
of arc length in m. This extent along with the number of columns and rows, the
sweep angle axis, and a dictionary with equatorial radius, polar radius,
geostationary altitude, and longitude of projection origin, are passed on to
``pyresample.geometry.AreaDefinition``, which then uses proj4 for the actual
geolocation calculations.
The reading routine supports channel data in counts, radiances, and (depending
on channel) brightness temperatures or reflectances. The brightness temperature and reflectance calculation is based on the formulas indicated in
`PUG`_.
Radiance datasets are returned in units of radiance per unit wavenumber (mW m-2 sr-1 (cm-1)-1). Radiances can be
converted to units of radiance per unit wavelength (W m-2 um-1 sr-1) by multiplying with the
`radiance_unit_conversion_coefficient` dataset attribute.
For each channel, it also supports a number of auxiliary datasets, such as the pixel quality,
the index map and the related geometric and acquisition parameters: time,
subsatellite latitude, subsatellite longitude, platform altitude, subsolar latitude, subsolar longitude,
earth-sun distance, sun-satellite distance, swath number, and swath direction.
All auxiliary data can be obtained by prepending the channel name such as
``"vis_04_pixel_quality"``.
.. warning::
The API for the direct reading of pixel quality is temporary and likely to
change. Currently, for each channel, the pixel quality is available by
``<chan>_pixel_quality``. In the future, they will likely all be called
``pixel_quality`` and disambiguated by a to-be-decided property in the
`DataID`.
.. _PUG: https://www-cdn.eumetsat.int/files/2020-07/pdf_mtg_fci_l1_pug.pdf
.. _EUMETSAT: https://www.eumetsat.int/mtg-flexible-combined-imager # noqa: E501
.. _test data release: https://www.eumetsat.int/simulated-mtg-fci-l1c-enhanced-non-nominal-datasets
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
import xarray as xr
from netCDF4 import default_fillvals
from pyresample import geometry
from satpy.readers._geos_area import get_geos_area_naming
from satpy.readers.eum_base import get_service_mode
from .netcdf_utils import NetCDF4FileHandler
logger = logging.getLogger(__name__)
# dict containing all available auxiliary data parameters to be read using the index map. Keys are the
# parameter name and values are the paths to the variable inside the netcdf
AUX_DATA = {
'subsatellite_latitude': 'state/platform/subsatellite_latitude',
'subsatellite_longitude': 'state/platform/subsatellite_longitude',
'platform_altitude': 'state/platform/platform_altitude',
'subsolar_latitude': 'state/celestial/subsolar_latitude',
'subsolar_longitude': 'state/celestial/subsolar_longitude',
'earth_sun_distance': 'state/celestial/earth_sun_distance',
'sun_satellite_distance': 'state/celestial/sun_satellite_distance',
'time': 'time',
'swath_number': 'data/swath_number',
'swath_direction': 'data/swath_direction',
}
def _get_aux_data_name_from_dsname(dsname):
aux_data_name = [key for key in AUX_DATA.keys() if key in dsname]
if len(aux_data_name) > 0:
return aux_data_name[0]
return None
def _get_channel_name_from_dsname(dsname):
# FIXME: replace by .removesuffix after we drop support for Python < 3.9
if dsname.endswith("_pixel_quality"):
channel_name = dsname[:-len("_pixel_quality")]
elif dsname.endswith("_index_map"):
channel_name = dsname[:-len("_index_map")]
elif _get_aux_data_name_from_dsname(dsname) is not None:
channel_name = dsname[:-len(_get_aux_data_name_from_dsname(dsname)) - 1]
else:
channel_name = dsname
return channel_name
class FCIL1cNCFileHandler(NetCDF4FileHandler):
"""Class implementing the MTG FCI L1c Filehandler.
This class implements the Meteosat Third Generation (MTG) Flexible
Combined Imager (FCI) Level-1c NetCDF reader.
It is designed to be used through the :class:`~satpy.Scene`
class using the :mod:`~satpy.Scene.load` method with the reader
``"fci_l1c_nc"``.
"""
# Platform names according to the MTG FCI L1 Product User Guide,
# EUM/MTG/USR/13/719113 from 2019-06-27, pages 32 and 124, are MTI1, MTI2,
# MTI3, and MTI4, but we want to use names such as described in WMO OSCAR
# MTG-I1, MTG-I2, MTG-I3, and MTG-I4.
#
# After launch: translate to METEOSAT-xx instead? Not sure how the
# numbering will be considering MTG-S1 and MTG-S2 will be launched
# in-between.
_platform_name_translate = {
"MTI1": "MTG-I1",
"MTI2": "MTG-I2",
"MTI3": "MTG-I3",
"MTI4": "MTG-I4"}
def __init__(self, filename, filename_info, filetype_info):
"""Initialize file handler."""
super().__init__(filename, filename_info,
filetype_info,
cache_var_size=10000,
cache_handle=True)
logger.debug('Reading: {}'.format(self.filename))
logger.debug('Start: {}'.format(self.start_time))
logger.debug('End: {}'.format(self.end_time))
self._cache = {}
@property
def start_time(self):
"""Get start time."""
return self.filename_info['start_time']
@property
def end_time(self):
"""Get end time."""
return self.filename_info['end_time']
def get_dataset(self, key, info=None):
"""Load a dataset."""
logger.debug('Reading {} from {}'.format(key['name'], self.filename))
if "pixel_quality" in key['name']:
return self._get_dataset_quality(key['name'])
elif "index_map" in key['name']:
return self._get_dataset_index_map(key['name'])
elif _get_aux_data_name_from_dsname(key['name']) is not None:
return self._get_dataset_aux_data(key['name'])
elif any(lb in key['name'] for lb in {"vis_", "ir_", "nir_", "wv_"}):
return self._get_dataset_measurand(key, info=info)
else:
raise ValueError("Unknown dataset key, not a channel, quality or auxiliary data: "
f"{key['name']:s}")
def _get_dataset_measurand(self, key, info=None):
"""Load dataset corresponding to channel measurement.
Load a dataset when the key refers to a measurand, whether uncalibrated
(counts) or calibrated in terms of brightness temperature, radiance, or
reflectance.
"""
# Get the dataset
# Get metadata for given dataset
measured = self.get_channel_measured_group_path(key['name'])
data = self[measured + "/effective_radiance"]
attrs = data.attrs.copy()
info = info.copy()
fv = attrs.pop(
"FillValue",
default_fillvals.get(data.dtype.str[1:], np.nan))
vr = attrs.get("valid_range", [-np.inf, np.inf])
if key['calibration'] == "counts":
attrs["_FillValue"] = fv
nfv = fv
else:
nfv = np.nan
data = data.where(data >= vr[0], nfv)
data = data.where(data <= vr[1], nfv)
res = self.calibrate(data, key)
# pre-calibration units no longer apply
attrs.pop("units")
# For each channel, the effective_radiance contains in the
# "ancillary_variables" attribute the value "pixel_quality". In
# FileYAMLReader._load_ancillary_variables, satpy will try to load
# "pixel_quality" but is lacking the context from what group to load
# it: in the FCI format, each channel group (data/<channel>/measured) has
# its own data variable 'pixel_quality'.
# Until we can have multiple pixel_quality variables defined (for
# example, with https://github.com/pytroll/satpy/pull/1088), rewrite
# the ancillary variable to include the channel. See also
# https://github.com/pytroll/satpy/issues/1171.
if "pixel_quality" in attrs["ancillary_variables"]:
attrs["ancillary_variables"] = attrs["ancillary_variables"].replace(
"pixel_quality", key['name'] + "_pixel_quality")
else:
raise ValueError(
"Unexpected value for attribute ancillary_variables, "
"which the FCI file handler intends to rewrite (see "
"https://github.com/pytroll/satpy/issues/1171 for why). "
f"Expected 'pixel_quality', got {attrs['ancillary_variables']:s}")
res.attrs.update(key.to_dict())
res.attrs.update(info)
res.attrs.update(attrs)
res.attrs["platform_name"] = self._platform_name_translate.get(
self["/attr/platform"], self["/attr/platform"])
# remove unpacking parameters for calibrated data
if key['calibration'] in ['brightness_temperature', 'reflectance']:
res.attrs.pop("add_offset")
res.attrs.pop("warm_add_offset")
res.attrs.pop("scale_factor")
res.attrs.pop("warm_scale_factor")
# remove attributes from original file which don't apply anymore
res.attrs.pop('long_name')
return res
def _get_dataset_quality(self, dsname):
"""Load a quality field for an FCI channel."""
grp_path = self.get_channel_measured_group_path(_get_channel_name_from_dsname(dsname))
dv_path = grp_path + "/pixel_quality"
data = self[dv_path]
return data
def _get_dataset_index_map(self, dsname):
"""Load the index map for an FCI channel."""
grp_path = self.get_channel_measured_group_path(_get_channel_name_from_dsname(dsname))
dv_path = grp_path + "/index_map"
data = self[dv_path]
data = data.where(data != data.attrs.get('_FillValue', 65535))
return data
def _get_aux_data_lut_vector(self, aux_data_name):
"""Load the lut vector of an auxiliary variable."""
lut = self[AUX_DATA[aux_data_name]]
fv = default_fillvals.get(lut.dtype.str[1:], np.nan)
lut = lut.where(lut != fv)
return lut
@staticmethod
def _getitem(block, lut):
return lut[block.astype('uint16')]
def _get_dataset_aux_data(self, dsname):
"""Get the auxiliary data arrays using the index map."""
# get index map
index_map = self._get_dataset_index_map(_get_channel_name_from_dsname(dsname))
# index map indexing starts from 1
index_map -= 1
# get lut values from 1-d vector
lut = self._get_aux_data_lut_vector(_get_aux_data_name_from_dsname(dsname))
# assign lut values based on index map indices
aux = index_map.data.map_blocks(self._getitem, lut.data, dtype=lut.data.dtype)
aux = xr.DataArray(aux, dims=index_map.dims, attrs=index_map.attrs, coords=index_map.coords)
# filter out out-of-disk values
aux = aux.where(index_map >= 0)
return aux
@staticmethod
def get_channel_measured_group_path(channel):
"""Get the channel's measured group path."""
measured_group_path = 'data/{}/measured'.format(channel)
return measured_group_path
def calc_area_extent(self, key):
"""Calculate area extent for a dataset."""
# if a user requests a pixel quality or index map before the channel data, the
# yaml-reader will ask the area extent of the pixel quality/index map field,
# which will ultimately end up here
channel_name = _get_channel_name_from_dsname(key['name'])
# Get metadata for given dataset
measured = self.get_channel_measured_group_path(channel_name)
# Get start/end line and column of loaded swath.
nlines, ncols = self[measured + "/effective_radiance/shape"]
logger.debug('Channel {} resolution: {}'.format(channel_name, ncols))
logger.debug('Row/Cols: {} / {}'.format(nlines, ncols))
# Calculate full globe line extent
h = float(self["data/mtg_geos_projection/attr/perspective_point_height"])
extents = {}
for coord in "xy":
coord_radian = self["data/{:s}/measured/{:s}".format(channel_name, coord)]
coord_radian_num = coord_radian[:] * coord_radian.scale_factor + coord_radian.add_offset
# FCI defines pixels by centroids (see PUG), while pyresample
# defines corners as lower left corner of lower left pixel, upper right corner of upper right pixel
# (see https://pyresample.readthedocs.io/en/latest/geo_def.html).
# Therefore, half a pixel (i.e. half scale factor) needs to be added in each direction.
# The grid origin is in the South-West corner.
# Note that the azimuth angle (x) is defined as positive towards West (see PUG - Level 1c Reference Grid)
# The elevation angle (y) is defined as positive towards North as per usual convention. Therefore:
# The values of x go from positive (West) to negative (East) and the scale factor of x is negative.
# The values of y go from negative (South) to positive (North) and the scale factor of y is positive.
# South-West corner (x positive, y negative)
first_coord_radian = coord_radian_num[0] - coord_radian.scale_factor / 2
# North-East corner (x negative, y positive)
last_coord_radian = coord_radian_num[-1] + coord_radian.scale_factor / 2
# convert to arc length in m
first_coord = first_coord_radian * h # arc length in m
last_coord = last_coord_radian * h
# the .item() call is needed with the h5netcdf backend, see
# https://github.com/pytroll/satpy/issues/972#issuecomment-558191583
# but we need to compute it first if this is dask
try:
first_coord = first_coord.compute()
last_coord = last_coord.compute()
except AttributeError: # not a dask.array
pass
extents[coord] = (first_coord.item(), last_coord.item())
# For the final extents, take into account that the image is upside down (lower line is North), and that
# East is defined as positive azimuth in Proj, so we need to multiply by -1 the azimuth extents.
# lower left x: west-ward extent: first coord of x, multiplied by -1 to account for azimuth orientation
# lower left y: north-ward extent: last coord of y
# upper right x: east-ward extent: last coord of x, multiplied by -1 to account for azimuth orientation
# upper right y: south-ward extent: first coord of y
area_extent = (-extents["x"][0], extents["y"][1], -extents["x"][1], extents["y"][0])
return area_extent, nlines, ncols
def get_area_def(self, key):
"""Calculate on-fly area definition for a dataset in geos-projection."""
# assumption: channels with same resolution should have same area
# cache results to improve performance
if key['resolution'] in self._cache:
return self._cache[key['resolution']]
a = float(self["data/mtg_geos_projection/attr/semi_major_axis"])
h = float(self["data/mtg_geos_projection/attr/perspective_point_height"])
rf = float(self["data/mtg_geos_projection/attr/inverse_flattening"])
lon_0 = float(self["data/mtg_geos_projection/attr/longitude_of_projection_origin"])
sweep = str(self["data/mtg_geos_projection"].sweep_angle_axis)
area_extent, nlines, ncols = self.calc_area_extent(key)
logger.debug('Calculated area extent: {}'
.format(''.join(str(area_extent))))
# use a (semi-major axis) and rf (reverse flattening) to define ellipsoid as recommended by EUM (see PUG)
proj_dict = {'a': a,
'lon_0': lon_0,
'h': h,
"rf": rf,
'proj': 'geos',
'units': 'm',
"sweep": sweep}
area_naming_input_dict = {'platform_name': 'mtg',
'instrument_name': 'fci',
'resolution': int(key['resolution'])
}
area_naming = get_geos_area_naming({**area_naming_input_dict,
**get_service_mode('fci', lon_0)})
area = geometry.AreaDefinition(
area_naming['area_id'],
area_naming['description'],
"",
proj_dict,
ncols,
nlines,
area_extent)
self._cache[key['resolution']] = area
return area
def calibrate(self, data, key):
"""Calibrate data."""
if key['calibration'] in ['brightness_temperature', 'reflectance', 'radiance']:
data = self.calibrate_counts_to_physical_quantity(data, key)
elif key['calibration'] != "counts":
logger.error(
"Received unknown calibration key. Expected "
"'brightness_temperature', 'reflectance', 'radiance' or 'counts', got "
+ key['calibration'] + ".")
return data
def calibrate_counts_to_physical_quantity(self, data, key):
"""Calibrate counts to radiances, brightness temperatures, or reflectances."""
# counts to radiance scaling
data = self.calibrate_counts_to_rad(data, key)
if key['calibration'] == 'brightness_temperature':
data = self.calibrate_rad_to_bt(data, key)
elif key['calibration'] == 'reflectance':
data = self.calibrate_rad_to_refl(data, key)
return data
def calibrate_counts_to_rad(self, data, key):
"""Calibrate counts to radiances."""
if key['name'] == 'ir_38':
data = xr.where(((2 ** 12 - 1 < data) & (data <= 2 ** 13 - 1)),
(data * data.attrs.get("warm_scale_factor", 1) +
data.attrs.get("warm_add_offset", 0)),
(data * data.attrs.get("scale_factor", 1) +
data.attrs.get("add_offset", 0))
)
else:
data = (data * data.attrs.get("scale_factor", 1) +
data.attrs.get("add_offset", 0))
measured = self.get_channel_measured_group_path(key['name'])
data.attrs.update({'radiance_unit_conversion_coefficient': self[measured +
'/radiance_unit_conversion_coefficient']})
return data
def calibrate_rad_to_bt(self, radiance, key):
"""IR channel calibration."""
# using the method from PUG section Converting from Effective Radiance to Brightness Temperature for IR Channels
measured = self.get_channel_measured_group_path(key['name'])
vc = self[measured + "/radiance_to_bt_conversion_coefficient_wavenumber"]
a = self[measured + "/radiance_to_bt_conversion_coefficient_a"]
b = self[measured + "/radiance_to_bt_conversion_coefficient_b"]
c1 = self[measured + "/radiance_to_bt_conversion_constant_c1"]
c2 = self[measured + "/radiance_to_bt_conversion_constant_c2"]
for v in (vc, a, b, c1, c2):
if v == v.attrs.get("FillValue",
default_fillvals.get(v.dtype.str[1:])):
logger.error(
"{:s} set to fill value, cannot produce "
"brightness temperatures for {:s}.".format(
v.attrs.get("long_name",
"at least one necessary coefficient"),
measured))
return radiance * np.nan
nom = c2 * vc
denom = a * np.log(1 + (c1 * vc ** 3) / radiance)
res = nom / denom - b / a
return res
def calibrate_rad_to_refl(self, radiance, key):
"""VIS channel calibration."""
measured = self.get_channel_measured_group_path(key['name'])
cesi = self[measured + "/channel_effective_solar_irradiance"]
if cesi == cesi.attrs.get(
"FillValue", default_fillvals.get(cesi.dtype.str[1:])):
logger.error(
"channel effective solar irradiance set to fill value, "
"cannot produce reflectance for {:s}.".format(measured))
return radiance * np.nan
sun_earth_distance = np.mean(self["state/celestial/earth_sun_distance"]) / 149597870.7 # [AU]
res = 100 * radiance * np.pi * sun_earth_distance ** 2 / cesi
return res
|
pytroll/satpy
|
satpy/readers/fci_l1c_nc.py
|
Python
|
gpl-3.0
| 23,583
|
[
"NetCDF"
] |
ceab32c442de1819937c807cc40fabc9171d7ec39cee248ce6f66817d031ddf2
|
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
from telegram import (ReplyKeyboardMarkup, ReplyKeyboardRemove)
from telegram.ext import (Updater, CommandHandler, MessageHandler, Filters, RegexHandler,
ConversationHandler)
from config import Telegram_BOTID
import logging
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
GENDER, PHOTO, LOCATION, BIO = range(4)
def start(bot, update):
reply_keyboard = [['Boy', 'Girl', 'Other']]
update.message.reply_text(
'Hi! My name is Professor Bot. I will hold a conversation with you. '
'Send /cancel to stop talking to me.\n\n'
'Are you a boy or a girl?',
reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))
return GENDER
def gender(bot, update):
user = update.message.from_user
logger.info("Gender of %s: %s" % (user.first_name, update.message.text))
update.message.reply_text('I see! Please send me a photo of yourself, '
'so I know what you look like, or send /skip if you don\'t want to.',
reply_markup=ReplyKeyboardRemove())
return PHOTO
def photo(bot, update):
user = update.message.from_user
photo_file = bot.get_file(update.message.photo[-1].file_id)
photo_file.download('user_photo.jpg')
logger.info("Photo of %s: %s" % (user.first_name, 'user_photo.jpg'))
update.message.reply_text('Gorgeous! Now, send me your location please, '
'or send /skip if you don\'t want to.')
return LOCATION
def skip_photo(bot, update):
user = update.message.from_user
logger.info("User %s did not send a photo." % user.first_name)
update.message.reply_text('I bet you look great! Now, send me your location please, '
'or send /skip.')
return LOCATION
def location(bot, update):
user = update.message.from_user
user_location = update.message.location
logger.info("Location of %s: %f / %f"
% (user.first_name, user_location.latitude, user_location.longitude))
update.message.reply_text('Maybe I can visit you sometime! '
'At last, tell me something about yourself.')
return BIO
def skip_location(bot, update):
user = update.message.from_user
logger.info("User %s did not send a location." % user.first_name)
update.message.reply_text('You seem a bit paranoid! '
'At last, tell me something about yourself.')
return BIO
def bio(bot, update):
user = update.message.from_user
logger.info("Bio of %s: %s" % (user.first_name, update.message.text))
update.message.reply_text('Thank you! I hope we can talk again some day.')
return ConversationHandler.END
def cancel(bot, update):
user = update.message.from_user
logger.info("User %s canceled the conversation." % user.first_name)
update.message.reply_text('Bye! I hope we can talk again some day.',
reply_markup=ReplyKeyboardRemove())
return ConversationHandler.END
def error(bot, update, error):
logger.warn('Update "%s" caused error "%s"' % (update, error))
def main():
# Create the EventHandler and pass it your bot's token.
updater = Updater(Telegram_BOTID)
# Get the dispatcher to register handlers
dp = updater.dispatcher
# Add conversation handler with the states GENDER, PHOTO, LOCATION and BIO
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
GENDER: [RegexHandler('^(Boy|Girl|Other)$', gender)],
PHOTO: [MessageHandler(Filters.photo, photo),
CommandHandler('skip', skip_photo)],
LOCATION: [MessageHandler(Filters.location, location),
CommandHandler('skip', skip_location)],
BIO: [MessageHandler(Filters.text, bio)]
},
fallbacks=[CommandHandler('cancel', cancel)]
)
dp.add_handler(conv_handler)
# log all errors
dp.add_error_handler(error)
# Start the Bot
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
main()
|
giuva90/TreeBot
|
TEST/botConversation.py
|
Python
|
gpl-3.0
| 4,255
|
[
"VisIt"
] |
618c5b2a42304d1a98b9c71ef1b87a34d0673211ea53ce460f32862d12470973
|
# -*- coding: utf-8 -*-
"""
Main module for mito network normalization
@author: sweel_lim
"""
import os
import os.path as op
import cPickle as pickle
import re
from collections import defaultdict
from post_mitograph import mkdir_exist
from pipeline import pipefuncs as pf
import Tkinter
import TkClas
# pylint: disable=C0103
# data folder should contain subfolders of cell conditions, each condition
# having a skeleton, channel 1 and channel2 resampled VTK file for each cell
# quick check is the number of files in each subfolder is divisible by 3
datafolder = op.join('.', 'mutants', 'pre_normalized')
# output will be saved here
#savefolder = op.join('.', 'mutants', 'normalized_vtk')
# modify the 'RFP' and 'GFP' keys to suit
SEARCHDICT = defaultdict(dict,
{'resampled': {'RFP': 'ch2',
'GFP': 'ch1'},
'skeleton': {'RFP': 'skel'}})
# master dictionary of file paths stored here, with 'skel', 'ch1' and 'ch2' as
# the top level keys
vtks = defaultdict(dict)
def readfolder(folder):
"""
Helper function to return a defaultdict of VTK file paths and labels
"""
for subfolder in os.listdir(folder):
if op.isdir(op.join(folder, subfolder)):
for files in os.listdir(op.join(folder, subfolder)):
vtk_type = re.search(r'(skeleton|resampled)', files)
channel_type = re.search(r'([GR]FP)\w+\d+', files)
if vtk_type and channel_type:
cell_id = '_'.join([subfolder,
channel_type.
string[:channel_type.end()]])
# this is just used to determine which type of VTK file
# (ie skeleton or voxel/channel file to use)
prefix = (SEARCHDICT.
get(vtk_type.group(1)).
get(channel_type.group(1)))
# if no prefix found, means skeleton.vtk is based on
# ch1 which is not what we want
if prefix:
vtks[prefix][cell_id] = op.join(folder,
subfolder, files)
return vtks
def main():
"""
Pipeline to normalize'raw' vtk files and make mito network graph
"""
root = Tkinter.Tk()
root.withdraw()
gui = TkClas.SelectDirClient(root,
initialdir='./mutants/pre_normalized')
basedir = gui.askdirectory()
savefolder = op.join(basedir, 'Normalized')
print "files will be saved in {}!".format(savefolder)
mkdir_exist(savefolder)
try:
with open(op.join(basedir, 'background_all.pkl'), 'rb') as inpt:
bck = pickle.load(inpt)
except IOError:
print ("File not found: Make sure you have file 'background_all.pkl' "
"in selected directory")
paths = readfolder(basedir)
cells = paths['skel']
keys = sorted(cells.keys())
# use for loop here instead of while.. because we know we will iterate
# fully everytime over list (i.e. no STOP flag)
for key in keys:
savename = op.join(savefolder,
'Normalized_{}_mitoskel.vtk'.format(key))
data, v1, v2 = pf.point_cloud_scalars(
paths['skel'][key],
paths['ch1'][key.replace('RFP', 'GFP')],
paths['ch2'][key])
dict_output = pf.normalize_skel(data, v1, v2,
backgroundfile=bck[key[:-4]])
pf.write_vtk(data, savename, **dict_output)
print "{} normalized!".format(key)
if __name__ == '__main__':
main()
|
moosekaka/sweepython
|
pipeline/write_raw_vtk.py
|
Python
|
mit
| 3,736
|
[
"VTK"
] |
b0ec7d36ad1ad597a8a0cf0034ca1cf250ba248d5fe9ba4ab5329d7eebba4ba5
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RClusterprofiler(RPackage):
"""This package implements methods to analyze and visualize functional
profiles (GO and KEGG) of gene and gene clusters."""
homepage = "https://www.bioconductor.org/packages/clusterProfiler/"
git = "https://git.bioconductor.org/packages/clusterProfiler.git"
version('3.4.4', commit='b86b00e8405fe130e439362651a5567736e2d9d7')
depends_on('r@3.4.0:3.4.9', when='@3.4.4')
depends_on('r-tidyr', type=('build', 'run'))
depends_on('r-rvcheck', type=('build', 'run'))
depends_on('r-qvalue', type=('build', 'run'))
depends_on('r-plyr', type=('build', 'run'))
depends_on('r-magrittr', type=('build', 'run'))
depends_on('r-gosemsim', type=('build', 'run'))
depends_on('r-go-db', type=('build', 'run'))
depends_on('r-ggplot2', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-dose', type=('build', 'run'))
|
mfherbst/spack
|
var/spack/repos/builtin/packages/r-clusterprofiler/package.py
|
Python
|
lgpl-2.1
| 2,199
|
[
"Bioconductor"
] |
e3fe004ece779e729c991b2b72a0b5ecb5f84c53d7bbf467175ab1f9454c952b
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
from pyscf.nao.m_dipole_ni import dipole_ni
#
#
#
def dipole_coo(sv, ao_log=None, funct=dipole_ni, **kvargs):
"""
Computes the dipole matrix and returns it in coo format (simplest sparse format to construct)
Args:
sv : (System Variables), this must have arrays of coordinates and species, etc
Returns:
overlap (real-space overlap) for the whole system
"""
from pyscf.nao.m_ao_matelem import ao_matelem_c
from scipy.sparse import coo_matrix
from numpy import array, int64, zeros
aome = ao_matelem_c(sv.ao_log.rr, sv.ao_log.pp)
me = aome.init_one_set(sv.ao_log) if ao_log is None else aome.init_one_set(ao_log)
atom2s = zeros((sv.natm+1), dtype=int64)
for atom,sp in enumerate(sv.atom2sp): atom2s[atom+1]=atom2s[atom]+me.ao1.sp2norbs[sp]
sp2rcut = array([max(mu2rcut) for mu2rcut in me.ao1.sp_mu2rcut])
nnz = 0
for sp1,rv1 in zip(sv.atom2sp,sv.atom2coord):
n1,rc1 = me.ao1.sp2norbs[sp1],sp2rcut[sp1]
for sp2,rv2 in zip(sv.atom2sp,sv.atom2coord):
if (rc1+sp2rcut[sp2])**2>((rv1-rv2)**2).sum() : nnz = nnz + n1*me.ao1.sp2norbs[sp2]
irow,icol,data = zeros(nnz, dtype=int64),zeros(nnz, dtype=int64),zeros((3,nnz)) # Start to construct three coo matrices
inz=-1
for atom1,[sp1,rv1,s1,f1] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])):
for atom2,[sp2,rv2,s2,f2] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])):
if (sp2rcut[sp1]+sp2rcut[sp2])**2<=sum((rv1-rv2)**2) : continue
dd = funct(me,sp1,rv1,sp2,rv2,**kvargs)
for o1 in range(s1,f1):
for o2 in range(s2,f2):
inz = inz+1
irow[inz],icol[inz],data[:,inz] = o1,o2,dd[:,o1-s1,o2-s2]
norbs = atom2s[-1]
sh = (norbs,norbs)
rc = (irow,icol)
return coo_matrix((data[0], rc), shape=sh),coo_matrix((data[1], rc), shape=sh),coo_matrix((data[2],rc), shape=sh)
|
gkc1000/pyscf
|
pyscf/nao/m_dipole_coo.py
|
Python
|
apache-2.0
| 2,518
|
[
"PySCF"
] |
bda25d01aef86ea2da05ae76a2f516b9c4acaec2d9da54dbbe926481fe1d9788
|
################################################################
#
# kim_compare_lammps
#
################################################################
#
# Copyright 2018 the potfit development team
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the “Software”), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall
# be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE
# AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# https://www.potfit.net/
#
#################################################################
import math
import os
import random
from subprocess import run
class lammps_run(object):
def __init__(self, binary, model, config, directory):
self.binary = binary
self.config = config
self.directory = directory
self.model = model
self.energy = None
self.forces = []
self.__write_config()
self.__write_input()
def __write_config(self):
filename = os.path.join(self.directory, 'config')
with open(filename, 'w') as f:
f.write('LAMMPS atomic data generated by kim_compare_lammps python script\n')
f.write('{} atoms\n'.format(self.config.num_atoms()))
f.write('{} atom types\n'.format(self.config.num_atom_types))
self.__write_box(f, self.config.box, self.config.scale)
f.write('\n Masses\n\n')
for i in range(self.config.num_atom_types):
f.write('{} {}\n'.format(i + 1, round(random.uniform(1, 200), 2)))
f.write('\n Atoms\n\n')
for i in range(len(self.config.atoms)):
f.write('{:5} {:5}\t{:2.8f}\t{:2.8f}\t{:2.8f}\n'.format(i + 1, self.config.atom_types[i], self.config.atoms[i][0], self.config.atoms[i][1], self.config.atoms[i][2]))
def __write_box(self, f, box, scale):
A = [x * scale[0] for x in box[0]]
B = [x * scale[1] for x in box[1]]
C = [x * scale[2] for x in box[2]]
ax = lenA = math.sqrt(sum(i*i for i in A))
lenB = math.sqrt(sum(i*i for i in B))
lenC = math.sqrt(sum(i*i for i in C))
normA = [x / lenA for x in A]
bx = sum(x * y for x, y in zip(B, normA))
by = math.sqrt(lenB * lenB - bx * bx)
cx = sum(x * y for x, y in zip(C, normA))
cy = (sum(x * y for x, y in zip(B, C)) - bx * cx) / by
cz = math.sqrt(lenC * lenC - cx * cx - cy * cy)
f.write('0 {} xlo xhi\n'.format(ax))
f.write('0 {} ylo yhi\n'.format(by))
f.write('0 {} zlo zhi\n'.format(cz))
f.write('{} {} {} xy xz yz\n'.format(bx, cx, cy))
def __write_input(self):
filename = self.directory / 'input'
with open(filename, 'w') as f:
f.write('units\t\tmetal\n')
f.write('atom_style\tatomic\n')
f.write('newton\t\ton\n')
f.write('dimension\t3\n')
f.write('boundary p p p\n')
f.write('read_data config\n')
f.write('replicate 1 1 1\n')
f.write('neigh_modify one 10000\n')
f.write('pair_style kim {}\n'.format(self.model['NAME']))
f.write('pair_coeff * * {}\n'.format(' '.join(self.model['SPECIES']) if self.config.num_atom_types > 1 else self.model['SPECIES']))
f.write('fix 1 all nve\n')
f.write('dump myDump all custom 100 forces id type x y z fx fy fz\n')
f.write('run 0')
def run(self):
my_env = os.environ.copy()
my_env['ASAN_OPTIONS'] = 'detect_leaks=0'
res = run([self.binary, '-in', 'input'], capture_output=True, cwd=self.directory, env=my_env)
if res.returncode:
print(self.directory)
print(res.args)
print(res.stdout.decode())
print(res.stderr.decode())
raise Exception('Error running potfit')
filename = os.path.join(self.directory, 'log.lammps')
with open(filename, 'r') as f:
capture = False
for line in f:
if capture:
self.energy = float(line.split()[2])
break
if 'Step Temp E_pair E_mol TotEng Press' in line:
capture = True
filename = os.path.join(self.directory, 'forces')
with open(filename, 'r') as f:
capture = False
for line in f:
if capture:
items = line.split()
self.forces.append([float(x) for x in items[5:]])
continue
if 'ITEM: ATOMS' in line:
capture = True
return self.energy, self.forces
def cleanup(self):
pass
if __name__ == '__main__':
print('Please do not run this script directly, use kim_compare_lammps.py instead!')
sys.exit(-1)
|
potfit/potfit
|
util/kim/kim_compare_lammps/lammps.py
|
Python
|
gpl-2.0
| 5,179
|
[
"LAMMPS"
] |
36207409d0e75898a112a0697b26b921d2b3be7df3c51359abb095ea05c5deaa
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2002 Gary Shao
# Copyright (C) 2007 Brian G. Matherly
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2009 Gary Burton
# Copyright (C) 2012 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# standard python modules
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
import fontscale
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
log = logging.getLogger(".drawdoc")
#------------------------------------------------------------------------
#
# DrawDoc
#
#------------------------------------------------------------------------
class DrawDoc(object):
"""
Abstract Interface for graphical document generators. Output formats
for graphical reports must implement this interface to be used by the
report system.
"""
def start_page(self):
raise NotImplementedError
def end_page(self):
raise NotImplementedError
def get_usable_width(self):
"""
Return the width of the text area in centimeters. The value is
the page width less the margins.
"""
width = self.paper.get_size().get_width()
right = self.paper.get_right_margin()
left = self.paper.get_left_margin()
return width - (right + left)
def get_usable_height(self):
"""
Return the height of the text area in centimeters. The value is
the page height less the margins.
"""
height = self.paper.get_size().get_height()
top = self.paper.get_top_margin()
bottom = self.paper.get_bottom_margin()
return height - (top + bottom)
def string_width(self, fontstyle, text):
"Determine the width need for text in given font"
return fontscale.string_width(fontstyle, text)
def string_multiline_width(self, fontstyle, text):
"Determine the width need for multiline text in given font"
return fontscale.string_multiline_width(fontstyle, text)
def draw_path(self, style, path):
raise NotImplementedError
def draw_box(self, style, text, x, y, w, h, mark=None):
""" @param mark: IndexMark to use for indexing (if supported) """
raise NotImplementedError
def draw_text(self, style, text, x1, y1, mark=None):
""" @param mark: IndexMark to use for indexing (if supported) """
raise NotImplementedError
def center_text(self, style, text, x1, y1, mark=None):
""" @param mark: IndexMark to use for indexing (if supported) """
raise NotImplementedError
def rotate_text(self, style, text, x, y, angle, mark=None):
""" @param mark: IndexMark to use for indexing (if supported) """
raise NotImplementedError
def draw_line(self, style, x1, y1, x2, y2):
raise NotImplementedError
|
arunkgupta/gramps
|
gramps/gen/plug/docgen/drawdoc.py
|
Python
|
gpl-2.0
| 3,990
|
[
"Brian"
] |
5c14dd1cfb8d8667d619700b6b326bb27fc61ee1d56fa61a4d7f84769f778501
|
# cell.py ---
#
# Filename: cell.py
# Description:
# Author: subhasis ray
# Maintainer:
# Created: Fri Jul 24 10:04:47 2009 (+0530)
# Version:
# Last-Updated: Fri Oct 21 17:17:50 2011 (+0530)
# By: Subhasis Ray
# Update #: 225
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
# This is an extension of Cell class - to add some utility
# functions for debugging. All cell types should derive from this.
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import sys
from collections import defaultdict
import numpy as np
# from enthought.mayavi import mlab
import moose
import config
import pymoose
from nachans import *
from kchans import *
from cachans import *
from archan import *
from capool import *
from compartment import MyCompartment
def init_channel_lib():
"""Initialize the prototype channels in library"""
if not config.channel_lib:
config.LOGGER.debug('* Generating channel prototypes in /library')
for channel_name in config.channel_name_list:
channel_class = eval(channel_name)
channel = channel_class(channel_name, config.lib)
config.channel_lib[channel_name] = channel
config.LOGGER.debug( '* Created %s' % (channel.path))
config.channel_lib['SpikeGen'] = moose.SpikeGen('spike', config.lib)
return config.channel_lib
def nameindex(comp):
"""Utility function to sort by index in the compartment name"""
if comp is None:
return -1
pos = comp.name.rfind('_')
if pos >= 0:
index = int(comp.name[pos+1:])
return index
else:
return -1
def get_comp(cell, index):
"""Return a wrapper over compartment specified by index. None if
no such compartment exists."""
if index <= 0: return None
path = cell.path + '/comp_' + str(index)
# print 'get_comp', path
if config.context.exists(path):
return MyCompartment(path)
else:
raise Exception('Cell: %s , index: %d - no such compartment.' % (cell.path, index))
class TraubCell(moose.Cell):
channel_lib = init_channel_lib()
def __init__(self, *args):
# print 'TraubCell.__init__:', args
moose.Cell.__init__(self, *args)
self.method = config.solver # To override hsolve and use ee
# print 'Cell.__init__ done'
# Dynamic access to a compartment by index. It mimics a python
# list 'comp' via underlying function call to get_comp(cell,
# index)
comp = moose.listproperty(get_comp)
@property
def soma(self):
return get_comp(self, 1)
def pfile_name(self):
"""Each cell type subclass should implement this"""
raise NotImplementedError, "function pfile_name not implemented"
@classmethod
def read_proto(cls, filename, cellname, level_dict=None, depth_dict=None, params=None):
"""Read a prototype cell from .p file into library.
Each cell type class should initialize its prototype with a
call to this function. with something like this within the
class declaration:
prototype = TraubCell.read_proto("MyCellType.p", "MyClassName")
filename -- path(relative/absolute) of the cell prototype file.
cellname -- path of the cell to be Created
params -- if specified, channels in /library are adjusted with
the parameters specified in this (via a call to
adjust_chanlib).
"""
config.LOGGER.debug('Reading proto:%s' % (filename))
if params is not None:
TraubCell.adjust_chanlib(params)
ret = None
cellpath = config.lib.path + '/' + cellname
if not config.context.exists(cellpath):
config.LOGGER.debug(__name__ + ' reading cell: ' + cellpath)
for handler in config.LOGGER.handlers:
handler.flush()
config.context.readCell(filename, cellpath)
else:
config.LOGGER.debug(__name__ + ' cell exists: ' + cellpath)
ret = moose.Cell(cellpath)
# TraubCell.generate_morphology(ret)
if (depth_dict is not None) and (level_dict is not None):
for level, comp_nos in level_dict.items():
try:
depth = depth_dict[level]
for comp_no in comp_nos:
comp = get_comp(ret, comp_no)
comp.z = depth
except KeyError:
print 'No depth info for level %s' % (level)
config.LOGGER.debug('Returning cell %s' % (ret.path))
for handler in config.LOGGER.handlers:
handler.flush()
return ret
@classmethod
def adjust_chanlib(cls, chan_params):
"""Set the properties of prototype channels in /library to fit
the channel properties of this cell type.
chan_params -- dict containing the channel parameters. The
following string keys should be there with float values:
ENa -- Na channle reversal potential
EK -- K+ channel reversal potential
EAR -- AR channel reversal potential
ECa -- Ca+2 channel reversal potential
TauCa -- CaPool decay time constant
X_AR -- AR channel's initial X value.
"""
config.LOGGER.debug('Adjusting channel properties.')
for key, channel in init_channel_lib().items():
if isinstance(channel, KChannel):
channel.Ek = chan_params['EK']
elif isinstance(channel, NaChannel):
channel.Ek = chan_params['ENa']
elif isinstance(channel, CaChannel):
channel.Ek = chan_params['ECa']
elif isinstance(channel, AR):
channel.Ek = chan_params['EAR']
try:
channel.X = chan_params['X_AR']
except KeyError:
channel.X = 0.25
elif isinstance(channel, CaPool):
channel.tau = chan_params['TauCa']
@classmethod
def readlevels(cls, filename):
"""Read the mapping between levels and compartment numbers and
return a defaultdict with level no. as key and set of
compartments in it as value.
The file filename should have two columns:
comp_no level_no
"""
ret = defaultdict(set)
with(open(filename, 'r')) as level_file:
for line in level_file:
tokens = line.split()
if not tokens:
continue
if len(tokens) != 2:
print filename, ' - Tokens: ', tokens, len(tokens)
sys.exit(0)
ret[int(tokens[1])].add(int(tokens[0]))
return ret
def _ca_tau(self):
raise NotImplementedError("You must set tau for [Ca2+] decay in the method _ca_tau() in subclass.")
def _setup_passive(self):
raise NotImplementedError("You must define _setup_passive to set the passive membrane properties and other post-readcell tweakings.")
def _setup_channels(self):
raise NotImplementedError("You must define setup_channels to set the channel reversal potential and other post-readcell tweakings.")
def _topology(self):
raise NotImplementedError("You must define cell topology in the method _topology() in subclass.")
def has_cycle(self, comp=None):
if comp is None:
comp = self.soma
comp._visited = True
ret = False
for item in comp.raxial_list:
if hasattr(item, '_visited') and item._visited:
config.LOGGER.warning('Cycle between: %s and %s.' % (comp.path, item.path))
return True
ret = ret or has_cycle(item)
return ret
@classmethod
def generate_morphology(cls, cell, iterations=50):
"""Automatically generate morphology information for spatial
layout.
An implementation of Fruchterman Reingold algorithm in 3D."""
nodes = defaultdict(dict)
for comp in cell.childList:
if moose.Neutral(comp).className == 'Compartment':
config.LOGGER.debug('Appending %s' % (comp))
nodes[comp]['pos'] = 0.0
nodes[comp]['disp'] = 0.0
# populate the edge set
edges = set()
for comp in nodes.keys():
nid_list = moose.Neutral(comp).neighbours('raxial')
for neighbour in nid_list:
config.LOGGER.debug('Adding (%s, %s)' % (comp, neighbour))
edges.add((comp, neighbour))
# Generate random initial positions for all the compartments
init_pos = np.ones((len(nodes), 3)) * 0.5 - np.random.rand(len(nodes), 3)
width = 1.0
depth = 1.0
height = 1.0
ii = 0
for key, value in nodes.items():
value['pos'] =init_pos[ii]
ii += 1
volume = width * height * depth
k = np.power(volume / len(nodes), 1.0/3)
t = 0.1
dt = t / iterations
for ii in range(iterations):
print 'Iteration', ii
# calculate repulsive forces
for comp, data in nodes.items():
data['disp'] = np.zeros(3)
for other, o_data in nodes.items():
if comp != other:
delta = data['pos'] - o_data['pos']
distance = np.linalg.norm(delta)
if distance < 1e-2:
distance = 1e-2
data['disp'] += delta * k * k / distance ** 2
print comp, other, delta, data['disp']
for edge in edges: # calculate attractive forces
delta = nodes[edge[0]]['pos'] - nodes[edge[1]]['pos']
distance = np.linalg.norm(delta)
if distance < 1e-2:
distance = 1e-2
nodes[edge[0]]['disp'] -= delta * distance / k
nodes[edge[1]]['disp'] += delta * distance / k
print edge[0], edge[1], delta, nodes[edge[0]]['disp'], nodes[edge[1]]['disp']
for key, data in nodes.items():
data['pos'] += data['disp']/np.linalg.norm(data['disp']) * min(np.linalg.norm(data['disp']), t)
data['pos'][0] = min(width/2, max(-width/2, data['pos'][0]))
data['pos'][1] = min(height/2, max(-height/2, data['pos'][1]))
data['pos'][2] = min(depth/2, max(-depth/2, data['pos'][2]))
t -= dt
pos = []
for key, data in nodes.items():
print key, data['pos']
pos.append(data['pos'])
pos = np.array(pos)
points = mlab.points3d(pos[:,0], pos[:, 1], pos[:, 2])
mlab.show()
raise Exception('Stop here for testing')
#
# cell.py ends here
|
BhallaLab/moose-thalamocortical
|
DEMOS/pymoose/traub2005/py/cell.py
|
Python
|
lgpl-2.1
| 11,594
|
[
"MOOSE",
"Mayavi"
] |
1f409d50d31a1d797c400974b5fc4fbcc7441e991e1f4175b6ea020a6fa9c20d
|
import io
from useful import reverse, remove_dashes, complement
file = [line.rstrip('\n') for line in open("results_RCM.txt", 'r')] #'''input("What's the file you want to read?")'''
left_distances = []
right_distances = []
circs = ["no circ at this position"]
for index, line in enumerate(file):
if ("Left intron" in line):
circs.append([left_distances, right_distances, file[index-2]])
left_distances = []
right_distances = []
continue
elif ("Subject reverse" in line):
#print(remove_dashes(line).replace('Subject reverse ', ''))
pos_ref=index
while (not "Right intron" in file[pos_ref]):
pos_ref+=1
pos_ref+=2
right_distances.append(file[pos_ref].replace("5' ","").replace(" 3'","")[::-1].find(line.replace('Subject reverse ', '')[::-1]))
elif ("blast query" in line):
#print(remove_dashes(line).replace(' blast query', ''))
pos_ref=index
while (not "Left intron" in file[pos_ref]):
pos_ref+=1
pos_ref+=2
left_distances.append(file[pos_ref].replace("5' ","").replace(" 3'","")[::-1].find(remove_dashes(line).replace(' blast query', '')[::-1]))
output = open('distance.csv', 'w')
output.truncate()
for index, circ in enumerate(circs):
if (index == 0):
continue
output.write(circ[2])
output.write("\t")
output.write(str((sum(circ[0], 0.0)+sum(circ[1], 0.0)) / (len(circ[0])+len(circ[1]))))
output.write("\n")
#print(sum(circ[index], 0.0) / len(circ[index]))
output.close()
#print (sum(circs[1][0], 0.0) / len(circs[1][0]))
#print(len(circs))
|
alexandruioanvoda/autoBLAST
|
distance_analysis.py
|
Python
|
gpl-3.0
| 1,641
|
[
"BLAST"
] |
1ffb5f82c4dba7091b135714a1539f60fb2a0eb2bb55cf33c934b92f81e85154
|
# -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import dask.array as da
import numpy as np
from packaging.version import Version
import sympy
from hyperspy.component import _get_scaling_factor
from hyperspy._components.expression import Expression
from hyperspy.misc.utils import is_binned # remove in v2.0
sqrt2pi = np.sqrt(2 * np.pi)
def _estimate_skewnormal_parameters(signal, x1, x2, only_current):
axis = signal.axes_manager.signal_axes[0]
i1, i2 = axis.value_range_to_indices(x1, x2)
X = axis.axis[i1:i2]
if only_current is True:
data = signal()[i1:i2]
X_shape = (len(X),)
i = 0
x0_shape = (1,)
else:
i = axis.index_in_array
data_gi = [slice(None), ] * len(signal.data.shape)
data_gi[axis.index_in_array] = slice(i1, i2)
data = signal.data[tuple(data_gi)]
X_shape = [1, ] * len(signal.data.shape)
X_shape[axis.index_in_array] = data.shape[i]
x0_shape = list(data.shape)
x0_shape[i] = 1
a1 = np.sqrt(2 / np.pi)
b1 = (4 / np.pi - 1) * a1
m1 = np.sum(X.reshape(X_shape) * data, i) / np.sum(data, i)
m2 = np.abs(np.sum((X.reshape(X_shape) - m1.reshape(x0_shape)) ** 2 * data, i)
/ np.sum(data, i))
m3 = np.abs(np.sum((X.reshape(X_shape) - m1.reshape(x0_shape)) ** 3 * data, i)
/ np.sum(data, i))
x0 = m1 - a1 * (m3 / b1) ** (1 / 3)
scale = np.sqrt(m2 + a1 ** 2 * (m3 / b1) ** (2 / 3))
delta = np.sqrt(1 / (a1**2 + m2 * (b1 / m3) ** (2 / 3)))
shape = delta / np.sqrt(1 - delta**2)
iheight = np.argmin(np.abs(X.reshape(X_shape) - x0.reshape(x0_shape)), i)
# height is the value of the function at x0, shich has to be computed
# differently for dask array (lazy) and depending on the dimension
if isinstance(data, da.Array):
x0, iheight, scale, shape = da.compute(x0, iheight, scale, shape)
if only_current is True or signal.axes_manager.navigation_dimension == 0:
height = data.vindex[iheight].compute()
elif signal.axes_manager.navigation_dimension == 1:
height = data.vindex[np.arange(signal.axes_manager.navigation_size),
iheight].compute()
else:
height = data.vindex[(*np.indices(signal.axes_manager.navigation_shape),
iheight)].compute()
else:
if only_current is True or signal.axes_manager.navigation_dimension == 0:
height = data[iheight]
elif signal.axes_manager.navigation_dimension == 1:
height = data[np.arange(signal.axes_manager.navigation_size),
iheight]
else:
height = data[(*np.indices(signal.axes_manager.navigation_shape),
iheight)]
return x0, height, scale, shape
class SkewNormal(Expression):
r"""Skew normal distribution component.
| Asymmetric peak shape based on a normal distribution.
| For definition see
https://en.wikipedia.org/wiki/Skew_normal_distribution
| See also http://azzalini.stat.unipd.it/SN/
|
.. math::
f(x) &= 2 A \phi(x) \Phi(x) \\
\phi(x) &= \frac{1}{\sqrt{2\pi}}\mathrm{exp}{\left[
-\frac{t(x)^2}{2}\right]} \\
\Phi(x) &= \frac{1}{2}\left[1 + \mathrm{erf}\left(\frac{
\alpha~t(x)}{\sqrt{2}}\right)\right] \\
t(x) &= \frac{x-x_0}{\omega}
============== =============
Variable Parameter
============== =============
:math:`x_0` x0
:math:`A` A
:math:`\omega` scale
:math:`\alpha` shape
============== =============
Parameters
-----------
x0 : float
Location of the peak position (not maximum, which is given by
the `mode` property).
A : float
Height parameter of the peak.
scale : float
Width (sigma) parameter.
shape: float
Skewness (asymmetry) parameter. For shape=0, the normal
distribution (Gaussian) is obtained. The distribution is
right skewed (longer tail to the right) if shape>0 and is
left skewed if shape<0.
The properties `mean` (position), `variance`, `skewness` and `mode`
(=position of maximum) are defined for convenience.
"""
def __init__(self, x0=0., A=1., scale=1., shape=0.,
module=['numpy', 'scipy'], **kwargs):
if Version(sympy.__version__) < Version("1.3"):
raise ImportError("The `SkewNormal` component requires "
"SymPy >= 1.3")
# We use `_shape` internally because `shape` is already taken in sympy
# https://github.com/sympy/sympy/pull/20791
super().__init__(
expression="2 * A * normpdf * normcdf;\
normpdf = exp(- t ** 2 / 2) / sqrt(2 * pi);\
normcdf = (1 + erf(_shape * t / sqrt(2))) / 2;\
t = (x - x0) / scale",
name="SkewNormal",
x0=x0,
A=A,
scale=scale,
shape=shape,
module=module,
autodoc=False,
rename_pars={"_shape": "shape"},
**kwargs,
)
# Boundaries
self.A.bmin = 0.
self.scale.bmin = 0
self.isbackground = False
self.convolved = True
def estimate_parameters(self, signal, x1, x2, only_current=False):
"""Estimate the skew normal distribution by calculating the momenta.
Parameters
----------
signal : Signal1D instance
x1 : float
Defines the left limit of the spectral range to use for the
estimation.
x2 : float
Defines the right limit of the spectral range to use for the
estimation.
only_current : bool
If False estimates the parameters for the full dataset.
Returns
-------
bool
Notes
-----
Adapted from Lin, Lee and Yen, Statistica Sinica 17, 909-927 (2007)
https://www.jstor.org/stable/24307705
Examples
--------
>>> g = hs.model.components1D.SkewNormal()
>>> x = np.arange(-10, 10, 0.01)
>>> data = np.zeros((32, 32, 2000))
>>> data[:] = g.function(x).reshape((1, 1, 2000))
>>> s = hs.signals.Signal1D(data)
>>> s.axes_manager._axes[-1].offset = -10
>>> s.axes_manager._axes[-1].scale = 0.01
>>> g.estimate_parameters(s, -10, 10, False)
"""
super()._estimate_parameters(signal)
axis = signal.axes_manager.signal_axes[0]
x0, height, scale, shape = _estimate_skewnormal_parameters(
signal, x1, x2, only_current
)
scaling_factor = _get_scaling_factor(signal, axis, x0)
if only_current is True:
self.x0.value = x0
self.A.value = height * sqrt2pi
self.scale.value = scale
self.shape.value = shape
if is_binned(signal):
# in v2 replace by
#if axis.is_binned:
self.A.value /= scaling_factor
return True
else:
if self.A.map is None:
self._create_arrays()
self.A.map['values'][:] = height * sqrt2pi
if is_binned(signal):
# in v2 replace by
#if axis.is_binned:
self.A.map['values'] /= scaling_factor
self.A.map['is_set'][:] = True
self.x0.map['values'][:] = x0
self.x0.map['is_set'][:] = True
self.scale.map['values'][:] = scale
self.scale.map['is_set'][:] = True
self.shape.map['values'][:] = shape
self.shape.map['is_set'][:] = True
self.fetch_stored_values()
return True
@property
def mean(self):
delta = self.shape.value / np.sqrt(1 + self.shape.value**2)
return self.x0.value + self.scale.value * delta * np.sqrt(2 / np.pi)
@property
def variance(self):
delta = self.shape.value / np.sqrt(1 + self.shape.value**2)
return self.scale.value**2 * (1 - 2 * delta**2 / np.pi)
@property
def skewness(self):
delta = self.shape.value / np.sqrt(1 + self.shape.value**2)
return (4 - np.pi)/2 * (delta * np.sqrt(2/np.pi))**3 / (1 -
2 * delta**2 / np.pi)**(3/2)
@property
def mode(self):
delta = self.shape.value / np.sqrt(1 + self.shape.value**2)
muz = np.sqrt(2 / np.pi) * delta
sigmaz = np.sqrt(1 - muz**2)
if self.shape.value == 0:
return self.x0.value
else:
m0 = muz - self.skewness * sigmaz / 2 - np.sign(self.shape.value) \
/ 2 * np.exp(- 2 * np.pi / np.abs(self.shape.value))
return self.x0.value + self.scale.value * m0
|
erh3cq/hyperspy
|
hyperspy/_components/skew_normal.py
|
Python
|
gpl-3.0
| 9,625
|
[
"Gaussian"
] |
331a4d5f76f2358074cf4a455c48d00f230b6d97ae9bf866fad5ff2a0b7d28b5
|
import copy
import json
import multiprocessing
import os
import random
import shutil
import string
import tempfile
from contextlib import contextmanager
from os import chdir, getcwd, mkdir
from os.path import exists
import pkgpanda.build.constants
import pkgpanda.build.src_fetchers
from pkgpanda import expand_require as expand_require_exceptions
from pkgpanda import Install, PackageId, Repository
from pkgpanda.actions import add_package_file
from pkgpanda.constants import install_root, PKG_DIR, RESERVED_UNIT_NAMES
from pkgpanda.exceptions import FetchError, PackageError, ValidationError
from pkgpanda.subprocess import CalledProcessError, check_call, check_output
from pkgpanda.util import (check_forbidden_services, download_atomic,
hash_checkout, is_windows, load_json, load_string, logger,
make_directory, make_file, make_tar, remove_directory, rewrite_symlinks, write_json,
write_string)
class BuildError(Exception):
"""An error while building something."""
def __init__(self, msg: str):
self.msg = msg
def __str__(self):
return self.msg
class DockerCmd:
def __init__(self):
self.volumes = dict()
self.environment = dict()
self.container = str()
def run(self, name, cmd):
container_name = "{}-{}".format(
name, ''.join(
random.choice(string.ascii_lowercase) for _ in range(10)
)
)
docker = ["docker", "run", "--name={}".format(container_name)]
if is_windows:
# Default number of processes on Windows is 1, so bumping up to use all of them.
# The default memory allowed on Windows is 1GB. Some packages (mesos is an example)
# needs about 3.5gb to compile a single file. Therefore we need about 4gb per CPU.
numprocs = os.environ.get('NUMBER_OF_PROCESSORS')
docker += ["-m", "{0}gb".format(int(numprocs) * 4), "--cpu-count", numprocs]
for host_path, container_path in self.volumes.items():
docker += ["-v", "{0}:{1}".format(host_path, container_path)]
for k, v in self.environment.items():
docker += ["-e", "{0}={1}".format(k, v)]
docker.append(self.container)
docker += cmd
check_call(docker)
DockerCmd.clean(container_name)
@staticmethod
def clean(name):
"""Cleans up the specified container"""
check_call(["docker", "rm", "-v", name])
def get_variants_from_filesystem(directory, extension):
results = set()
for filename in os.listdir(directory):
# Skip things that don't end in the extension
if not filename.endswith(extension):
continue
variant = filename[:-len(extension)]
# Empty name variant shouldn't have a `.` following it
if variant == '.':
raise BuildError("Invalid filename {}. The \"default\" variant file should be just {}".format(
filename, extension))
# Empty / default variant is represented as 'None'.
if variant == '':
variant = None
else:
# Should be foo. since we've moved the extension.
if variant[-1] != '.':
raise BuildError("Invalid variant filename {}. Expected a '.' separating the "
"variant name and extension '{}'.".format(filename, extension))
variant = variant[:-1]
results.add(variant)
return results
def get_src_fetcher(src_info, cache_dir, working_directory):
try:
kind = src_info['kind']
if kind not in pkgpanda.build.src_fetchers.all_fetchers:
raise ValidationError("No known way to catch src with kind '{}'. Known kinds: {}".format(
kind,
pkgpanda.src_fetchers.all_fetchers.keys()))
args = {
'src_info': src_info,
'cache_dir': cache_dir
}
if src_info['kind'] in ['git_local', 'url', 'url_extract']:
args['working_directory'] = working_directory
return pkgpanda.build.src_fetchers.all_fetchers[kind](**args)
except ValidationError as ex:
raise BuildError("Validation error when fetching sources for package: {}".format(ex))
class TreeInfo:
ALLOWED_TREEINFO_KEYS = {'exclude', 'variants', 'core_package_list', 'bootstrap_package_list'}
def __init__(self, treeinfo_dict):
if treeinfo_dict.keys() > self.ALLOWED_TREEINFO_KEYS:
raise BuildError(
"treeinfo can only include the keys {}. Found {}".format(
self.ALLOWED_TREEINFO_KEYS, treeinfo_dict.keys()))
self.excludes = set(self._get_package_list(treeinfo_dict, 'exclude'))
self.core_package_list = set(self._get_package_list(treeinfo_dict, 'core_package_list', self.excludes))
self.bootstrap_package_list = set(self._get_package_list(
treeinfo_dict,
'bootstrap_package_list',
self.excludes))
# List of mandatory package variants to include in the buildinfo.
self.variants = treeinfo_dict.get('variants', dict())
if not isinstance(self.variants, dict):
raise BuildError("treeinfo variants must be a dictionary of package name to variant name")
@staticmethod
def _get_package_list(treeinfo_dict, key, excludes=None):
"""Return a list of package name strings from treeinfo_dict by key.
If key isn't present in treeinfo_dict, an empty list is returned.
"""
excludes = excludes or list()
package_list = treeinfo_dict.get(key, list())
# Validate package list.
if not isinstance(package_list, list):
raise BuildError("{} must be either null (meaning don't use) or a list of package names.".format(key))
for package_name in package_list:
if not isinstance(package_name, str):
raise BuildError("{} must be a list of strings. Found a {} with the value: {}".format(
key, type(package_name), package_name))
try:
PackageId.validate_name(package_name)
except ValidationError as ex:
raise BuildError("Invalid package name in {}: {}".format(key, package_name)) from ex
if package_name in excludes:
raise BuildError("Package found in both exclude and {}: {}".format(key, package_name))
return package_list
class PackageSet:
def __init__(self, variant, treeinfo, package_store):
self.variant = variant
self.all_packages = self.package_tuples_with_dependencies(
# If core_package_list is empty, default to all non-excluded packages.
treeinfo.core_package_list or (package_store.packages_by_name.keys() - treeinfo.excludes),
treeinfo,
package_store
)
self.validate_package_tuples(self.all_packages, treeinfo, package_store)
if treeinfo.bootstrap_package_list:
self.bootstrap_packages = self.package_tuples_with_dependencies(
treeinfo.bootstrap_package_list,
treeinfo,
package_store
)
self.validate_package_tuples(self.bootstrap_packages, treeinfo, package_store)
else:
self.bootstrap_packages = self.all_packages
# Validate bootstrap packages are a subset of all packages.
for package_name, variant in self.bootstrap_packages:
if (package_name, variant) not in self.all_packages:
raise BuildError("Bootstrap package {} (variant {}) not found in set of all packages".format(
package_name, pkgpanda.util.variant_name(variant)))
@staticmethod
def package_tuples_with_dependencies(package_names, treeinfo, package_store):
package_tuples = set((name, treeinfo.variants.get(name)) for name in set(package_names))
to_visit = list(package_tuples)
while to_visit:
package_tuple = to_visit.pop()
for require in package_store.get_buildinfo(*package_tuple)['requires']:
require_tuple = expand_require(require)
if require_tuple not in package_tuples:
to_visit.append(require_tuple)
package_tuples.add(require_tuple)
return package_tuples
@staticmethod
def validate_package_tuples(package_tuples, treeinfo, package_store):
# Validate that all packages have the variant specified in treeinfo.
print('package_tuples = %r' % package_tuples)
print('treeinfo = %r' % treeinfo.variants)
for package_name, variant in package_tuples:
treeinfo_variant = treeinfo.variants.get(package_name)
if variant != treeinfo_variant:
raise BuildError(
"package {} is supposed to have variant {} included in the tree according to the treeinfo, "
"but variant {} was found.".format(
package_name,
pkgpanda.util.variant_name(treeinfo_variant),
pkgpanda.util.variant_name(variant),
)
)
# Validate that all needed packages are built and not excluded by treeinfo.
for package_name, variant in package_tuples:
if (package_name, variant) not in package_store.packages:
raise BuildError(
"package {} variant {} is needed (explicitly requested or as a requires) "
"but is not in the set of built packages.".format(
package_name,
pkgpanda.util.variant_name(variant),
)
)
if package_name in treeinfo.excludes:
raise BuildError("package {} is needed (explicitly requested or as a requires) "
"but is excluded according to the treeinfo.json.".format(package_name))
class PackageStore:
def __init__(self, packages_dir, repository_url):
self._builders = {}
self._repository_url = repository_url.rstrip('/') if repository_url is not None else None
self._packages_dir = packages_dir.rstrip('/')
# Load all possible packages, making a dictionary from (name, variant) -> buildinfo
self._packages = dict()
self._packages_by_name = dict()
self._package_folders = dict()
# Load an upstream if one exists
# TODO(cmaloney): Allow upstreams to have upstreams
self._package_cache_dir = self._packages_dir + "/cache/packages"
self._upstream_dir = self._packages_dir + "/cache/upstream/checkout"
self._upstream = None
self._upstream_package_dir = self._upstream_dir + "/packages"
# TODO(cmaloney): Make it so the upstream directory can be kept around
remove_directory(self._upstream_dir)
upstream_config = self._packages_dir + '/upstream.json'
if os.path.exists(upstream_config):
try:
self._upstream = get_src_fetcher(
load_optional_json(upstream_config),
self._packages_dir + '/cache/upstream',
packages_dir)
self._upstream.checkout_to(self._upstream_dir)
if os.path.exists(self._upstream_package_dir + "/upstream.json"):
raise Exception("Support for upstreams which have upstreams is not currently implemented")
except Exception as ex:
raise BuildError("Error fetching upstream: {}".format(ex))
# Iterate through the packages directory finding all packages. Note this package dir comes
# first, then we ignore duplicate definitions of the same package
package_dirs = [self._packages_dir]
if self._upstream:
package_dirs.append(self._upstream_package_dir)
for directory in package_dirs:
for name in os.listdir(directory):
package_folder = directory + '/' + name
# Ignore files / non-directories
if not os.path.isdir(package_folder):
continue
# If we've already found this package, it means 1+ versions have been defined. Use
# those and ignore everything in the upstreams.
if name in self._packages_by_name:
continue
if is_windows:
builder_folder = os.path.join(directory, name, 'docker.windows')
else:
builder_folder = os.path.join(directory, name, 'docker')
if os.path.exists(builder_folder):
self._builders[name] = builder_folder
# Search the directory for buildinfo.json files, record the variants
for variant in get_variants_from_filesystem(package_folder, 'buildinfo.json'):
# Only adding the default dictionary once we know we have a package.
self._packages_by_name.setdefault(name, dict())
buildinfo = load_buildinfo(package_folder, variant)
self._packages[(name, variant)] = buildinfo
self._packages_by_name[name][variant] = buildinfo
if name in self._package_folders:
assert self._package_folders[name] == package_folder
else:
self._package_folders[name] = package_folder
def get_package_folder(self, name):
return self._package_folders[name]
def get_bootstrap_cache_dir(self):
return self._packages_dir + "/cache/bootstrap"
def get_complete_cache_dir(self):
return self._packages_dir + "/cache/complete"
def get_buildinfo(self, name, variant):
return self._packages[(name, variant)]
def get_last_complete_set(self, variants):
def get_last_complete(variant):
complete_latest = (
self.get_complete_cache_dir() + '/' + pkgpanda.util.variant_prefix(variant) + 'complete.latest.json')
if not os.path.exists(complete_latest):
raise BuildError("No last complete found for variant {}. Expected to find {} to match "
"{}".format(pkgpanda.util.variant_name(variant), complete_latest,
pkgpanda.util.variant_prefix(variant) + 'treeinfo.json'))
return load_json(complete_latest)
result = {}
if variants is None:
# Get all defined variants.
requested_variants = self.list_trees()
else:
requested_variants = variants
for variant in requested_variants:
result[variant] = get_last_complete(variant)
return result
def get_last_build_filename(self, name, variant):
return self.get_package_cache_folder(name) + '/{}latest'.format(pkgpanda.util.variant_prefix(variant))
def get_package_path(self, pkg_id):
return self.get_package_cache_folder(pkg_id.name) + '/{}.tar.xz'.format(pkg_id)
def get_package_cache_folder(self, name):
directory = self._package_cache_dir + '/' + name
make_directory(directory)
return directory
def list_trees(self):
return get_variants_from_filesystem(self._packages_dir, 'treeinfo.json')
def get_package_set(self, variant):
return PackageSet(variant, TreeInfo(load_config_variant(self._packages_dir, variant, 'treeinfo.json')), self)
def get_all_package_sets(self):
return [self.get_package_set(variant) for variant in sorted(self.list_trees(), key=pkgpanda.util.variant_str)]
@property
def packages(self):
return self._packages
@property
def builders(self):
return self._builders.copy()
@property
def packages_by_name(self):
return self._packages_by_name
@property
def packages_dir(self):
return self._packages_dir
def try_fetch_by_id(self, pkg_id: PackageId):
if self._repository_url is None:
return False
# TODO(cmaloney): Use storage providers to download instead of open coding.
pkg_path = "{}.tar.xz".format(pkg_id)
url = self._repository_url + '/packages/{0}/{1}'.format(pkg_id.name, pkg_path)
try:
directory = self.get_package_cache_folder(pkg_id.name)
# TODO(cmaloney): Move to some sort of logging mechanism?
print("Attempting to download", pkg_id, "from", url, "to", directory)
download_atomic(directory + '/' + pkg_path, url, directory)
assert os.path.exists(directory + '/' + pkg_path)
return directory + '/' + pkg_path
except FetchError:
return False
def try_fetch_bootstrap_and_active(self, bootstrap_id):
if self._repository_url is None:
return False
try:
bootstrap_name = '{}.bootstrap.tar.xz'.format(bootstrap_id)
active_name = '{}.active.json'.format(bootstrap_id)
# TODO(cmaloney): Use storage providers to download instead of open coding.
bootstrap_url = self._repository_url + '/bootstrap/' + bootstrap_name
active_url = self._repository_url + '/bootstrap/' + active_name
print("Attempting to download", bootstrap_name, "from", bootstrap_url)
dest_dir = self.get_bootstrap_cache_dir()
# Normalize to no trailing slash for repository_url
download_atomic(dest_dir + '/' + bootstrap_name, bootstrap_url, self._packages_dir)
print("Attempting to download", active_name, "from", active_url)
download_atomic(dest_dir + '/' + active_name, active_url, self._packages_dir)
return True
except FetchError:
return False
def expand_require(require):
try:
return expand_require_exceptions(require)
except ValidationError as ex:
raise BuildError(str(ex)) from ex
def get_docker_id(docker_name):
return check_output(["docker", "inspect", "-f", "{{ .Id }}", docker_name]).decode('utf-8').strip()
def hash_files_in_folder(directory):
"""Given a relative path, hashes all files inside that folder and subfolders
Returns a dictionary from filename to the hash of that file. If that whole
dictionary is hashed, you get a hash of all the contents of the folder.
This is split out from calculating the whole folder hash so that the
behavior in different walking corner cases can be more easily tested.
"""
assert not directory.startswith('/'), \
"For the hash to be reproducible on other machines relative paths must always be used. " \
"Got path: {}".format(directory)
directory = directory.rstrip('/')
file_hash_dict = {}
# TODO(cmaloney): Disallow symlinks as they're hard to hash, people can symlink / copy in their
# build steps if needed.
for root, dirs, filenames in os.walk(directory):
assert not root.startswith('/')
for name in filenames:
path = root + '/' + name
base = path[len(directory) + 1:]
file_hash_dict[base] = pkgpanda.util.sha1(path)
# If the directory has files inside of it, then it'll be picked up implicitly. by the files
# or folders inside of it. If it contains nothing, it wouldn't be picked up but the existence
# is important, so added it with a value for it's hash not-makeable via sha1 (empty string).
if len(filenames) == 0 and len(dirs) == 0:
path = root[len(directory) + 1:]
# Empty path means it is the root directory, in which case we want no entries, not a
# single entry "": ""
if path:
file_hash_dict[root[len(directory) + 1:]] = ""
return file_hash_dict
@contextmanager
def as_cwd(path):
start_dir = getcwd()
chdir(path)
yield
chdir(start_dir)
def hash_folder_abs(directory, work_dir):
assert directory.startswith(work_dir), "directory must be inside work_dir: {} {}".format(directory, work_dir)
assert not work_dir[-1] == '/', "This code assumes no trailing slash on the work_dir"
with as_cwd(work_dir):
return hash_folder(directory[len(work_dir) + 1:])
def hash_folder(directory):
return hash_checkout(hash_files_in_folder(directory))
# Try to read json from the given file. If it is an empty file, then return an
# empty json dictionary.
def load_optional_json(filename):
try:
with open(filename) as f:
text = f.read().strip()
if text:
return json.loads(text)
return {}
except OSError as ex:
raise BuildError("Failed to open JSON file {}: {}".format(filename, ex))
except ValueError as ex:
raise BuildError("Unable to parse json in {}: {}".format(filename, ex))
def load_config_variant(directory, variant, extension):
assert directory[-1] != '/'
return load_optional_json(directory + '/' + pkgpanda.util.variant_prefix(variant) + extension)
def load_buildinfo(path, variant):
buildinfo = load_config_variant(path, variant, 'buildinfo.json')
# Fill in default / guaranteed members so code everywhere doesn't have to guard around it.
default_build_script = 'build'
if is_windows:
default_build_script = 'build.ps1'
buildinfo.setdefault('build_script', pkgpanda.util.variant_prefix(variant) + default_build_script)
buildinfo.setdefault('docker', 'dcos/dcos-builder:dcos-builder_dockerdir-latest')
buildinfo.setdefault('environment', dict())
buildinfo.setdefault('requires', list())
buildinfo.setdefault('state_directory', False)
return buildinfo
def make_bootstrap_tarball(package_store, packages, variant):
# Convert filenames to package ids
pkg_ids = list()
for pkg_path in packages:
# Get the package id from the given package path
filename = os.path.basename(pkg_path)
if not filename.endswith(".tar.xz"):
raise BuildError("Packages must be packaged / end with a .tar.xz. Got {}".format(filename))
pkg_id = filename[:-len(".tar.xz")]
pkg_ids.append(pkg_id)
bootstrap_cache_dir = package_store.get_bootstrap_cache_dir()
# Filename is output_name.<sha-1>.{active.json|.bootstrap.tar.xz}
bootstrap_id = hash_checkout(pkg_ids)
latest_name = "{}/{}bootstrap.latest".format(bootstrap_cache_dir, pkgpanda.util.variant_prefix(variant))
output_name = bootstrap_cache_dir + '/' + bootstrap_id + '.'
# bootstrap tarball = <sha1 of packages in tarball>.bootstrap.tar.xz
bootstrap_name = "{}bootstrap.tar.xz".format(output_name)
active_name = "{}active.json".format(output_name)
def mark_latest():
# Ensure latest is always written
write_string(latest_name, bootstrap_id)
print("bootstrap: {}".format(bootstrap_name))
print("active: {}".format(active_name))
print("latest: {}".format(latest_name))
return bootstrap_id
if (os.path.exists(bootstrap_name)):
print("Bootstrap already up to date, not recreating")
return mark_latest()
make_directory(bootstrap_cache_dir)
# Try downloading.
if package_store.try_fetch_bootstrap_and_active(bootstrap_id):
print("Bootstrap already up to date, Not recreating. Downloaded from repository-url.")
return mark_latest()
print("Unable to download from cache. Building.")
print("Creating bootstrap tarball for variant {}".format(variant))
work_dir = tempfile.mkdtemp(prefix='mkpanda_bootstrap_tmp')
def make_abs(path):
return os.path.join(work_dir, path)
pkgpanda_root = make_abs("opt/mesosphere")
repository = Repository(os.path.join(pkgpanda_root, "packages"))
# Fetch all the packages to the root
for pkg_path in packages:
filename = os.path.basename(pkg_path)
pkg_id = filename[:-len(".tar.xz")]
def local_fetcher(id, target):
shutil.unpack_archive(pkg_path, target, "gztar")
repository.add(local_fetcher, pkg_id, False)
# Activate the packages inside the repository.
# Do generate dcos.target.wants inside the root so that we don't
# try messing with /etc/systemd/system.
install = Install(
root=pkgpanda_root,
config_dir=None,
rooted_systemd=True,
manage_systemd=False,
block_systemd=True,
fake_path=True,
skip_systemd_dirs=True,
manage_users=False,
manage_state_dir=False)
install.activate(repository.load_packages(pkg_ids))
# Mark the tarball as a bootstrap tarball/filesystem so that
# dcos-setup.service will fire.
make_file(make_abs("opt/mesosphere/bootstrap"))
# Write out an active.json for the bootstrap tarball
write_json(active_name, pkg_ids)
# Rewrite all the symlinks to point to /opt/mesosphere
rewrite_symlinks(work_dir, work_dir, "/")
make_tar(bootstrap_name, pkgpanda_root)
remove_directory(work_dir)
# Update latest last so that we don't ever use partially-built things.
write_string(latest_name, bootstrap_id)
print("Built bootstrap")
return mark_latest()
def build_tree_variants(package_store, mkbootstrap):
""" Builds all possible tree variants in a given package store
"""
result = dict()
tree_variants = get_variants_from_filesystem(package_store.packages_dir, 'treeinfo.json')
if len(tree_variants) == 0:
raise Exception('No treeinfo.json can be found in {}'.format(package_store.packages_dir))
for variant in tree_variants:
result[variant] = pkgpanda.build.build_tree(package_store, mkbootstrap, variant)
return result
def build_tree(package_store, mkbootstrap, tree_variants):
"""Build packages and bootstrap tarballs for one or all tree variants.
Returns a dict mapping tree variants to bootstrap IDs.
If tree_variant is None, builds all available tree variants.
"""
# TODO(cmaloney): Add support for circular dependencies. They are doable
# long as there is a pre-built version of enough of the packages.
# TODO(cmaloney): Make it so when we're building a treeinfo which has a
# explicit package list we don't build all the other packages.
build_order = list()
visited = set()
built = set()
def visit(pkg_tuple: tuple):
"""Add a package and its requires to the build order.
Raises AssertionError if pkg_tuple is in the set of visited packages.
If the package has any requires, they're recursively visited and added
to the build order depth-first. Then the package itself is added.
"""
# Visit the node for the first (and only) time.
assert pkg_tuple not in visited
visited.add(pkg_tuple)
# Ensure all dependencies are built. Sorted for stability.
# Requirements may be either strings or dicts, so we convert them all to (name, variant) tuples before sorting.
for require_tuple in sorted(expand_require(r) for r in package_store.packages[pkg_tuple]['requires']):
# If the dependency has already been built, we can move on.
if require_tuple in built:
continue
# If the dependency has not been built but has been visited, then
# there's a cycle in the dependency graph.
if require_tuple in visited:
raise BuildError("Circular dependency. Circular link {0} -> {1}".format(pkg_tuple, require_tuple))
if PackageId.is_id(require_tuple[0]):
raise BuildError("Depending on a specific package id is not supported. Package {} "
"depends on {}".format(pkg_tuple, require_tuple))
if require_tuple not in package_store.packages:
raise BuildError("Package {0} require {1} not buildable from tree.".format(pkg_tuple, require_tuple))
# Add the dependency (after its dependencies, if any) to the build
# order.
visit(require_tuple)
build_order.append(pkg_tuple)
built.add(pkg_tuple)
# Can't compare none to string, so expand none -> "true" / "false", then put
# the string in a field after "" if none, the string if not.
def key_func(elem):
return elem[0], elem[1] is None, elem[1] or ""
def visit_packages(package_tuples):
for pkg_tuple in sorted(package_tuples, key=key_func):
if pkg_tuple in visited:
continue
visit(pkg_tuple)
if tree_variants:
package_sets = [package_store.get_package_set(v) for v in tree_variants]
else:
package_sets = package_store.get_all_package_sets()
with logger.scope("resolve package graph"):
# Build all required packages for all tree variants.
for package_set in package_sets:
visit_packages(package_set.all_packages)
built_packages = dict()
for (name, variant) in build_order:
built_packages.setdefault(name, dict())
# Run the build, store the built package path for later use.
# TODO(cmaloney): Only build the requested variants, rather than all variants.
built_packages[name][variant] = build(
package_store,
name,
variant,
True)
# Build bootstrap tarballs for all tree variants.
def make_bootstrap(package_set):
with logger.scope("Making bootstrap variant: {}".format(pkgpanda.util.variant_name(package_set.variant))):
package_paths = list()
for name, pkg_variant in package_set.bootstrap_packages:
package_paths.append(built_packages[name][pkg_variant])
if mkbootstrap:
return make_bootstrap_tarball(
package_store,
list(sorted(package_paths)),
package_set.variant)
# Build bootstraps and and package lists for all variants.
# TODO(cmaloney): Allow distinguishing between "build all" and "build the default one".
complete_cache_dir = package_store.get_complete_cache_dir()
make_directory(complete_cache_dir)
results = {}
for package_set in package_sets:
info = {
'bootstrap': make_bootstrap(package_set),
'packages': sorted(
load_string(package_store.get_last_build_filename(*pkg_tuple))
for pkg_tuple in package_set.all_packages)}
write_json(
complete_cache_dir + '/' + pkgpanda.util.variant_prefix(package_set.variant) + 'complete.latest.json',
info)
results[package_set.variant] = info
return results
def assert_no_duplicate_keys(lhs, rhs):
if len(lhs.keys() & rhs.keys()) != 0:
print("ASSERTION FAILED: Duplicate keys between {} and {}".format(lhs, rhs))
assert len(lhs.keys() & rhs.keys()) == 0
# Find all build variants and build them
def build_package_variants(package_store, name, clean_after_build=True, recursive=False):
# Find the packages dir / root of the packages tree, and create a PackageStore
results = dict()
for variant in package_store.packages_by_name[name].keys():
results[variant] = build(
package_store,
name,
variant,
clean_after_build=clean_after_build,
recursive=recursive)
return results
class IdBuilder():
def __init__(self, buildinfo):
self._start_keys = set(buildinfo.keys())
self._buildinfo = copy.deepcopy(buildinfo)
self._taken = set()
def _check_no_key(self, field):
if field in self._buildinfo:
raise BuildError("Key {} shouldn't be in buildinfo, but was".format(field))
def add(self, field, value):
self._check_no_key(field)
self._buildinfo[field] = value
def has(self, field):
return field in self._buildinfo
def take(self, field):
self._taken.add(field)
return self._buildinfo[field]
def replace(self, taken_field, new_field, new_value):
assert taken_field in self._buildinfo
self._check_no_key(new_field)
del self._buildinfo[taken_field]
self._buildinfo[new_field] = new_value
self._taken.add(new_field)
def update(self, field, new_value):
assert field in self._buildinfo
self._buildinfo[field] = new_value
def get_build_ids(self):
# If any keys are left in the buildinfo, error that there were unused keys
remaining_keys = self._start_keys - self._taken
if remaining_keys:
raise BuildError("ERROR: Unknown keys {} in buildinfo.json".format(remaining_keys))
return self._buildinfo
def build(package_store: PackageStore, name: str, variant, clean_after_build, recursive=False):
msg = "Building package {} variant {}".format(name, pkgpanda.util.variant_name(variant))
with logger.scope(msg):
return _build(package_store, name, variant, clean_after_build, recursive)
def _build(package_store, name, variant, clean_after_build, recursive):
assert isinstance(package_store, PackageStore)
tmpdir = tempfile.TemporaryDirectory(prefix="pkgpanda_repo")
repository = Repository(tmpdir.name)
package_dir = package_store.get_package_folder(name)
def src_abs(name):
return package_dir + '/' + name
def cache_abs(filename):
return package_store.get_package_cache_folder(name) + '/' + filename
# Build pkginfo over time, translating fields from buildinfo.
pkginfo = {}
# Build up the docker command arguments over time, translating fields as needed.
cmd = DockerCmd()
assert (name, variant) in package_store.packages, \
"Programming error: name, variant should have been validated to be valid before calling build()."
builder = IdBuilder(package_store.get_buildinfo(name, variant))
final_buildinfo = dict()
builder.add('name', name)
builder.add('variant', pkgpanda.util.variant_str(variant))
# Convert single_source -> sources
if builder.has('sources'):
if builder.has('single_source'):
raise BuildError('Both sources and single_source cannot be specified at the same time')
sources = builder.take('sources')
elif builder.has('single_source'):
sources = {name: builder.take('single_source')}
builder.replace('single_source', 'sources', sources)
else:
builder.add('sources', {})
sources = dict()
print("NOTICE: No sources specified")
final_buildinfo['sources'] = sources
# Construct the source fetchers, gather the checkout ids from them
checkout_ids = dict()
fetchers = dict()
try:
for src_name, src_info in sorted(sources.items()):
# TODO(cmaloney): Switch to a unified top level cache directory shared by all packages
cache_dir = package_store.get_package_cache_folder(name) + '/' + src_name
make_directory(cache_dir)
fetcher = get_src_fetcher(src_info, cache_dir, package_dir)
fetchers[src_name] = fetcher
checkout_ids[src_name] = fetcher.get_id()
except ValidationError as ex:
raise BuildError("Validation error when fetching sources for package: {}".format(ex))
for src_name, checkout_id in checkout_ids.items():
# NOTE: single_source buildinfo was expanded above so the src_name is
# always correct here.
# Make sure we never accidentally overwrite something which might be
# important. Fields should match if specified (And that should be
# tested at some point). For now disallowing identical saves hassle.
assert_no_duplicate_keys(checkout_id, final_buildinfo['sources'][src_name])
final_buildinfo['sources'][src_name].update(checkout_id)
# Add the sha1 of the buildinfo.json + build file to the build ids
builder.update('sources', checkout_ids)
build_script_file = builder.take('build_script')
# TODO(cmaloney): Change dest name to build_script_sha1
builder.replace('build_script', 'build', pkgpanda.util.sha1(src_abs(build_script_file)))
builder.add('pkgpanda_version', pkgpanda.build.constants.version)
extra_dir = src_abs("extra")
# Add the "extra" folder inside the package as an additional source if it
# exists
if os.path.exists(extra_dir):
extra_id = hash_folder_abs(extra_dir, package_dir)
builder.add('extra_source', extra_id)
final_buildinfo['extra_source'] = extra_id
# Figure out the docker name.
docker_name = builder.take('docker')
cmd.container = docker_name
# Add the id of the docker build environment to the build_ids.
try:
docker_id = get_docker_id(docker_name)
except CalledProcessError:
# docker pull the container and try again
check_call(['docker', 'pull', docker_name])
docker_id = get_docker_id(docker_name)
builder.update('docker', docker_id)
# TODO(cmaloney): The environment variables should be generated during build
# not live in buildinfo.json.
pkginfo['environment'] = builder.take('environment')
# Whether pkgpanda should on the host make sure a `/var/lib` state directory is available
pkginfo['state_directory'] = builder.take('state_directory')
if pkginfo['state_directory'] not in [True, False]:
raise BuildError("state_directory in buildinfo.json must be a boolean `true` or `false`")
username = None
if builder.has('username'):
username = builder.take('username')
if not isinstance(username, str):
raise BuildError("username in buildinfo.json must be either not set (no user for this"
" package), or a user name string")
try:
pkgpanda.UserManagement.validate_username(username)
except ValidationError as ex:
raise BuildError("username in buildinfo.json didn't meet the validation rules. {}".format(ex))
pkginfo['username'] = username
group = None
if builder.has('group'):
group = builder.take('group')
if not isinstance(group, str):
raise BuildError("group in buildinfo.json must be either not set (use default group for this user)"
", or group must be a string")
try:
pkgpanda.UserManagement.validate_group_name(group)
except ValidationError as ex:
raise BuildError("group in buildinfo.json didn't meet the validation rules. {}".format(ex))
pkginfo['group'] = group
# Packages need directories inside the fake install root (otherwise docker
# will try making the directories on a readonly filesystem), so build the
# install root now, and make the package directories in it as we go.
install_dir = tempfile.mkdtemp(prefix="pkgpanda-")
active_packages = list()
active_package_ids = set()
active_package_variants = dict()
auto_deps = set()
# Final package has the same requires as the build.
requires = builder.take('requires')
pkginfo['requires'] = requires
if builder.has("sysctl"):
pkginfo["sysctl"] = builder.take("sysctl")
# TODO(cmaloney): Pull generating the full set of requires a function.
to_check = copy.deepcopy(requires)
if type(to_check) != list:
raise BuildError("`requires` in buildinfo.json must be an array of dependencies.")
while to_check:
requires_info = to_check.pop(0)
requires_name, requires_variant = expand_require(requires_info)
if requires_name in active_package_variants:
# TODO(cmaloney): If one package depends on the <default>
# variant of a package and 1+ others depends on a non-<default>
# variant then update the dependency to the non-default variant
# rather than erroring.
if requires_variant != active_package_variants[requires_name]:
# TODO(cmaloney): Make this contain the chains of
# dependencies which contain the conflicting packages.
# a -> b -> c -> d {foo}
# e {bar} -> d {baz}
raise BuildError(
"Dependncy on multiple variants of the same package {}. variants: {} {}".format(
requires_name,
requires_variant,
active_package_variants[requires_name]))
# The variant has package {requires_name, variant} already is a
# dependency, don't process it again / move on to the next.
continue
active_package_variants[requires_name] = requires_variant
# Figure out the last build of the dependency, add that as the
# fully expanded dependency.
requires_last_build = package_store.get_last_build_filename(requires_name, requires_variant)
if not os.path.exists(requires_last_build):
if recursive:
# Build the dependency
build(package_store, requires_name, requires_variant, clean_after_build, recursive)
else:
raise BuildError("No last build file found for dependency {} variant {}. Rebuild "
"the dependency".format(requires_name, requires_variant))
try:
pkg_id_str = load_string(requires_last_build)
auto_deps.add(pkg_id_str)
pkg_buildinfo = package_store.get_buildinfo(requires_name, requires_variant)
pkg_requires = pkg_buildinfo['requires']
pkg_path = repository.package_path(pkg_id_str)
pkg_tar = pkg_id_str + '.tar.xz'
if not os.path.exists(package_store.get_package_cache_folder(requires_name) + '/' + pkg_tar):
raise BuildError(
"The build tarball {} refered to by the last_build file of the dependency {} "
"variant {} doesn't exist. Rebuild the dependency.".format(
pkg_tar,
requires_name,
requires_variant))
active_package_ids.add(pkg_id_str)
# Mount the package into the docker container.
cmd.volumes[pkg_path] = install_root + "/packages/{}:ro".format(pkg_id_str)
os.makedirs(os.path.join(install_dir, "packages/{}".format(pkg_id_str)))
# Add the dependencies of the package to the set which will be
# activated.
# TODO(cmaloney): All these 'transitive' dependencies shouldn't
# be available to the package being built, only what depends on
# them directly.
to_check += pkg_requires
except ValidationError as ex:
raise BuildError("validating package needed as dependency {0}: {1}".format(requires_name, ex)) from ex
except PackageError as ex:
raise BuildError("loading package needed as dependency {0}: {1}".format(requires_name, ex)) from ex
# Add requires to the package id, calculate the final package id.
# NOTE: active_packages isn't fully constructed here since we lazily load
# packages not already in the repository.
builder.update('requires', list(active_package_ids))
version_extra = None
if builder.has('version_extra'):
version_extra = builder.take('version_extra')
build_ids = builder.get_build_ids()
version_base = hash_checkout(build_ids)
version = None
if builder.has('version_extra'):
version = "{0}-{1}".format(version_extra, version_base)
else:
version = version_base
pkg_id = PackageId.from_parts(name, version)
# Everything must have been extracted by now. If it wasn't, then we just
# had a hard error that it was set but not used, as well as didn't include
# it in the caluclation of the PackageId.
builder = None
# Save the build_ids. Useful for verify exactly what went into the
# package build hash.
final_buildinfo['build_ids'] = build_ids
final_buildinfo['package_version'] = version
# Save the package name and variant. The variant is used when installing
# packages to validate dependencies.
final_buildinfo['name'] = name
final_buildinfo['variant'] = variant
# If the package is already built, don't do anything.
pkg_path = package_store.get_package_cache_folder(name) + '/{}.tar.xz'.format(pkg_id)
# Done if it exists locally
if exists(pkg_path):
print("Package up to date. Not re-building.")
# TODO(cmaloney): Updating / filling last_build should be moved out of
# the build function.
write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))
return pkg_path
# Try downloading.
dl_path = package_store.try_fetch_by_id(pkg_id)
if dl_path:
print("Package up to date. Not re-building. Downloaded from repository-url.")
# TODO(cmaloney): Updating / filling last_build should be moved out of
# the build function.
write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))
print(dl_path, pkg_path)
assert dl_path == pkg_path
return pkg_path
# Fall out and do the build since it couldn't be downloaded
print("Unable to download from cache. Proceeding to build")
print("Building package {} with buildinfo: {}".format(
pkg_id,
json.dumps(final_buildinfo, indent=2, sort_keys=True)))
# Clean out src, result so later steps can use them freely for building.
def clean():
# Run a docker container to remove src/ and result/
cmd = DockerCmd()
cmd.volumes = {
package_store.get_package_cache_folder(name): PKG_DIR + "/:rw",
}
if is_windows:
cmd.container = "microsoft/windowsservercore:1709"
filename = PKG_DIR + "\\src"
cmd.run("package-cleaner",
["cmd.exe", "/c", "if", "exist", filename, "rmdir", "/s", "/q", filename])
filename = PKG_DIR + "\\result"
cmd.run("package-cleaner",
["cmd.exe", "/c", "if", "exist", filename, "rmdir", "/s", "/q", filename])
else:
cmd.container = "ubuntu:14.04.4"
cmd.run("package-cleaner", ["rm", "-rf", PKG_DIR + "/src", PKG_DIR + "/result"])
clean()
# Only fresh builds are allowed which don't overlap existing artifacts.
result_dir = cache_abs("result")
if exists(result_dir):
raise BuildError("result folder must not exist. It will be made when the package is "
"built. {}".format(result_dir))
# 'mkpanda add' all implicit dependencies since we actually need to build.
for dep in auto_deps:
print("Auto-adding dependency: {}".format(dep))
# NOTE: Not using the name pkg_id because that overrides the outer one.
id_obj = PackageId(dep)
add_package_file(repository, package_store.get_package_path(id_obj))
package = repository.load(dep)
active_packages.append(package)
# Checkout all the sources int their respective 'src/' folders.
try:
src_dir = cache_abs('src')
if os.path.exists(src_dir):
raise ValidationError(
"'src' directory already exists, did you have a previous build? " +
"Currently all builds must be from scratch. Support should be " +
"added for re-using a src directory when possible. src={}".format(src_dir))
os.mkdir(src_dir)
for src_name, fetcher in sorted(fetchers.items()):
root = cache_abs('src/' + src_name)
os.mkdir(root)
fetcher.checkout_to(root)
except ValidationError as ex:
raise BuildError("Validation error when fetching sources for package: {}".format(ex))
# Activate the packages so that we have a proper path, environment
# variables.
# TODO(cmaloney): RAII type thing for temproary directory so if we
# don't get all the way through things will be cleaned up?
install = Install(
root=install_dir,
config_dir=None,
rooted_systemd=True,
manage_systemd=False,
block_systemd=True,
fake_path=True,
manage_users=False,
manage_state_dir=False)
install.activate(active_packages)
# Rewrite all the symlinks inside the active path because we will
# be mounting the folder into a docker container, and the absolute
# paths to the packages will change.
# TODO(cmaloney): This isn't very clean, it would be much nicer to
# just run pkgpanda inside the package.
rewrite_symlinks(install_dir, repository.path, install_root + "/packages/")
print("Building package in docker")
# TODO(cmaloney): Run as a specific non-root user, make it possible
# for non-root to cleanup afterwards.
# Run the build, prepping the environment as necessary.
mkdir(cache_abs("result"))
# Copy the build info to the resulting tarball
write_json(cache_abs("src/buildinfo.full.json"), final_buildinfo)
write_json(cache_abs("result/buildinfo.full.json"), final_buildinfo)
write_json(cache_abs("result/pkginfo.json"), pkginfo)
# Make the folder for the package we are building. If docker does it, it
# gets auto-created with root permissions and we can't actually delete it.
os.makedirs(os.path.join(install_dir, "packages", str(pkg_id)))
# TOOD(cmaloney): Disallow writing to well known files and directories?
# Source we checked out
cmd.volumes.update({
# TODO(cmaloney): src should be read only...
# Source directory
cache_abs("src"): PKG_DIR + "/src:rw",
# Getting the result out
cache_abs("result"): install_root + "/packages/{}:rw".format(pkg_id),
# The build script directory
package_dir: PKG_DIR + "/build:ro"
})
if is_windows:
cmd.volumes.update({
# todo: This is a temporary work around until Windows RS4 comes out that has a fix
# that allows overlapping mount directories. We should not make this also happen
# on Linux as it will probably break a bunch of stuff unnecessarily that will only
# need to be undone in the future.
install_dir: install_root + "/install_dir:ro"
})
else:
cmd.volumes.update({
install_dir: install_root + ":ro"
})
if os.path.exists(extra_dir):
cmd.volumes[extra_dir] = PKG_DIR + "/extra:ro"
cmd.environment = {
"PKG_VERSION": version,
"PKG_NAME": name,
"PKG_ID": pkg_id,
"PKG_PATH": install_root + "/packages/{}".format(pkg_id),
"PKG_VARIANT": variant if variant is not None else "<default>",
"NUM_CORES": multiprocessing.cpu_count()
}
try:
# TODO(cmaloney): Run a wrapper which sources
# /opt/mesosphere/environment then runs a build. Also should fix
# ownership of /opt/mesosphere/packages/{pkg_id} post build.
command = [PKG_DIR + "/build/" + build_script_file]
cmd.run("package-builder", command)
except CalledProcessError as ex:
raise BuildError("docker exited non-zero: {}\nCommand: {}".format(ex.returncode, ' '.join(ex.cmd)))
# Clean up the temporary install dir used for dependencies.
# TODO(cmaloney): Move to an RAII wrapper.
remove_directory(install_dir)
with logger.scope("Build package tarball"):
# Check for forbidden services before packaging the tarball:
try:
check_forbidden_services(cache_abs("result"), RESERVED_UNIT_NAMES)
except ValidationError as ex:
raise BuildError("Package validation failed: {}".format(ex))
# TODO(cmaloney): Updating / filling last_build should be moved out of
# the build function.
write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))
# Bundle the artifacts into the pkgpanda package
tmp_name = pkg_path + "-tmp.tar.xz"
make_tar(tmp_name, cache_abs("result"))
os.replace(tmp_name, pkg_path)
print("Package built.")
if clean_after_build:
clean()
return pkg_path
|
dcos/dcos
|
pkgpanda/build/__init__.py
|
Python
|
apache-2.0
| 52,595
|
[
"VisIt"
] |
55517cb7f0bf5a8dcd118e5c916ca734d221eff00dcbd88fbca9556378106536
|
"""
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : float
estimated precision of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
References
----------
D. J. C. MacKay, Bayesian Interpolation, Computation and Neural Systems,
Vol. 4, No. 3, 1992.
R. Salakhutdinov, Lecture notes on Statistical Machine Learning,
http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
Their beta is our self.alpha_
Their alpha is our self.lambda_
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
n_samples, n_features = X.shape
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
# Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
# Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ +
lambda_ / alpha_)[:, np.newaxis])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
# Preserve the alpha and lambda values that were used to
# calculate the final coefficients
self.alpha_ = alpha_
self.lambda_ = lambda_
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_) /
(lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1) /
(np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1) /
(rmse_ + 2 * alpha_2))
# Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_) +
n_samples * log(alpha_) -
alpha_ * rmse_ -
(lambda_ * np.sum(coef_ ** 2)) -
logdet_sigma_ -
n_samples * log(2 * np.pi))
self.scores_.append(s)
# Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
sigma_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis])
self.sigma_ = (1. / alpha_) * sigma_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
return_std : boolean, optional
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array, shape = (n_samples,)
Mean of predictive distribution of query points.
y_std : array, shape = (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self.normalize:
X = (X - self.X_offset_) / self.X_scale_
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))
return y_mean, y_std
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
References
----------
D. J. C. MacKay, Bayesian nonlinear modeling for the prediction
competition, ASHRAE Transactions, 1994.
R. Salakhutdinov, Lecture notes on Statistical Machine Learning,
http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
Their beta is our self.alpha_
Their alpha is our self.lambda_
ARD is a little different than the slide: only dimensions/features for
which self.lambda_ < self.threshold_lambda are kept and the rest are
discarded.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
# Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
# Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
# Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1]) *
X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1) /
((coef_[keep_lambda]) ** 2 +
2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1) /
(rmse_ + 2. * alpha_2))
# Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
# Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_) +
np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
# Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
return_std : boolean, optional
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array, shape = (n_samples,)
Mean of predictive distribution of query points.
y_std : array, shape = (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self.normalize:
X = (X - self.X_offset_) / self.X_scale_
X = X[:, self.lambda_ < self.threshold_lambda]
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))
return y_mean, y_std
|
RomainBrault/scikit-learn
|
sklearn/linear_model/bayes.py
|
Python
|
bsd-3-clause
| 19,494
|
[
"Gaussian"
] |
0d95a4bd1d3790d69e220b50bba48e4b2cd84e4568f9f5b18618b1610689bfdc
|
###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
""" This module defines the following classes:
- QShellDialog
- QShell
QShell is based on ideas and code from PyCute developed by Gerard Vermeulen.
Used with the author's permission.
More information on PyCute, visit:
http://gerard.vermeulen.free.fr/html/pycute-intro.html
"""
from PyQt4 import QtGui, QtCore
from code import InteractiveInterpreter
import copy
import sys
import time
import os.path
import api
from core.configuration import get_vistrails_configuration
from core.interpreter.default import get_default_interpreter
import core.modules.module_registry
import core.system
from core.vistrail.port_spec import PortSpec
from gui.vistrails_palette import QVistrailsPaletteInterface
from core.utils import all
################################################################################
class QShellDialog(QtGui.QWidget, QVistrailsPaletteInterface):
"""This class incorporates the QShell into a dockable widget for use in the
VisTrails environment"""
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent=parent)
#locals() returns the original dictionary, not a copy as
#the docs say
self.firstLocals = copy.copy(locals())
self.shell = QShell(self.firstLocals,None)
layout = QtGui.QVBoxLayout()
layout.setMargin(0)
layout.setSpacing(0)
layout.addWidget(self.shell)
self.setLayout(layout)
# self.setWidget(self.shell)
self.setWindowTitle(self.shell.windowTitle())
# self.setTitleBarWidget(QtGui.QLabel(self.shell.windowTitle()))
# self.monitorWindowTitle(self.shell)
self.vistrails_interpreter = get_default_interpreter()
def createMenu(self):
"""createMenu() -> None
Creates a menu bar and adds it to the main layout.
"""
self.newSessionAct = QtGui.QAction(self.tr("&Restart"),self)
self.newSessionAct.setShortcut(self.tr("Ctrl+R"))
self.connect(self.newSessionAct, QtCore.SIGNAL("triggered()"),
self.newSession)
self.saveSessionAct = QtGui.QAction(self.tr("&Save"), self)
self.saveSessionAct.setShortcut(self.tr("Ctrl+S"))
self.connect(self.saveSessionAct, QtCore.SIGNAL("triggered()"),
self.saveSession)
self.closeSessionAct = QtGui.QAction(self.tr("Close"), self)
self.closeSessionAct.setShortcut(self.tr("Ctrl+W"))
self.connect(self.closeSessionAct,QtCore.SIGNAL("triggered()"),
self.closeSession)
self.menuBar = QtGui.QMenuBar(self)
menu = self.menuBar.addMenu(self.tr("&Session"))
menu.addAction(self.newSessionAct)
menu.addAction(self.saveSessionAct)
menu.addAction(self.closeSessionAct)
self.layout().setMenuBar(self.menuBar)
def closeEvent(self, e):
"""closeEvent(e) -> None
Event handler called when the dialog is about to close."""
self.closeSession()
self.emit(QtCore.SIGNAL("shellHidden()"))
def showEvent(self, e):
"""showEvent(e) -> None
Event handler called when the dialog acquires focus
"""
self.shell.show()
def closeSession(self):
"""closeSession() -> None.
Hides the dialog instead of closing it, so the session continues open.
"""
self.hide()
def newSession(self):
"""newSession() -> None
Tells the shell to start a new session passing a copy of the original
locals dictionary.
"""
self.shell.restart(copy.copy(self.firstLocals))
def saveSession(self):
"""saveSession() -> None
Opens a File Save dialog and passes the filename to shell's saveSession.
"""
default = 'visTrails' + '-' + time.strftime("%Y%m%d-%H%M.log")
default = os.path.join(core.system.vistrails_file_directory(),default)
fileName = QtGui.QFileDialog.getSaveFileName(self,
"Save Session As..",
default,
"Log files (*.log)")
if not fileName:
return
self.shell.saveSession(str(fileName))
def visibility_changed(self, visible):
QVistrailsPaletteInterface.visibility_changed(self, visible)
if visible:
self.shell.show()
else:
self.shell.hide()
##############################################################################
# QShell
class vistrails_port(object):
def __init__(self, vistrails_module, port_spec):
# print 'calling vistrails_port.__init__'
self._vistrails_module = vistrails_module
self._port_spec = port_spec
def __call__(self, *args, **kwargs):
if len(args) + len(kwargs) > 0:
self._vistrails_module._update_func(self._port_spec,
*args, **kwargs)
return None
return self
class vistrails_module(object):
def __init__(self, *args, **kwargs):
if not hasattr(self, '_module'):
self._module = \
api.add_module_from_descriptor(self._module_desc)
# FIXME if constant, we can use args
module_desc = self._module_desc
for attr_name, value in kwargs.iteritems():
self._process_attr_value(attr_name, value)
def _process_attr_value(self, attr_name, value):
if self._module.has_port_spec(attr_name, 'input'):
port_spec = self._module.get_port_spec(attr_name, 'input')
args = None
# FIXME want this to be any iterable
if type(value) == tuple:
args = value
else:
args = (value,)
self._update_func(port_spec, *args)
else:
raise AttributeError("type object '%s' has no "
"attribute '%s'" % \
(self.__class__.__name__,
attr_name))
def __getattr__(self, attr_name):
def create_port(port_spec):
return vistrails_port(self, port_spec)
try:
return self.__dict__[attr_name]
except KeyError:
if self._module.has_port_spec(attr_name, 'output'):
port_spec = \
self._module.get_port_spec(attr_name, 'output')
return create_port(port_spec)
elif self._module.has_port_spec(attr_name, 'input'):
port_spec = \
self._module.get_port_spec(attr_name, 'input')
return create_port(port_spec)
else:
raise AttributeError("type object '%s' has no "
"attribute '%s'" % \
(self.__class__.__name__,
attr_name))
def __setattr__(self, attr_name, value):
if attr_name.startswith('_'):
self.__dict__[attr_name] = value
else:
self._process_attr_value(attr_name, value)
def _update_func(self, port_spec, *args, **kwargs):
# print 'running _update_func', port_spec.name
# print args
if port_spec.type != 'input':
if self._module.has_port_spec(port_spec.name, 'input'):
port_spec = \
self._module.get_port_spec(port_spec.name, 'input')
else:
raise Exception("cannot update an output port spec")
# FIXME deal with kwargs
num_ports = 0
num_params = 0
for value in args:
# print 'processing', type(value), value
if isinstance(value, vistrails_port):
# make connection to specified output port
# print 'updating port'
num_ports += 1
elif isinstance(value, vistrails_module):
# make connection to 'self' output port of value
# print 'updating module'
num_ports += 1
else:
# print 'update literal', type(value), value
num_params += 1
if num_ports > 1 or (num_ports == 1 and num_params > 0):
reg = core.modules.module_registry.get_module_registry()
tuple_desc = \
reg.get_descriptor_by_name('edu.utah.sci.vistrails.basic',
'Tuple', '')
d = {'_module_desc': tuple_desc,
'_package': self._package,}
tuple = type('module', (vistrails_module,), d)()
output_port_spec = PortSpec(id=-1,
name='value',
type='output',
sigstring=port_spec.sigstring)
api.add_port_spec(tuple._module.id, output_port_spec)
self._update_func(port_spec, *[tuple.value()])
assert len(port_spec.descriptors()) == len(args)
for i, descriptor in enumerate(port_spec.descriptors()):
arg_name = 'arg%d' % i
sigstring = "(" + descriptor.sigstring + ")"
tuple_port_spec = PortSpec(id=-1,
name=arg_name,
type='input',
sigstring=sigstring)
api.add_port_spec(tuple._module.id, tuple_port_spec)
tuple._process_attr_value(arg_name, args[i])
# create tuple object
pass
elif num_ports == 1:
other = args[0]
if isinstance(other, vistrails_port):
if other._port_spec.type != 'output':
other_module = other._vistrails_module._module
if other_module.has_port_spec(port_spec.name,
'output'):
other_port_spec = \
other_module.get_port_spec(port_spec.name,
'output')
else:
raise Exception("cannot update an input "
"port spec")
else:
other_port_spec = other._port_spec
api.add_connection(other._vistrails_module._module.id,
other_port_spec,
self._module.id,
port_spec)
elif isinstance(other, vistrails_module):
other_port_spec = \
other._module.get_port_spec('self', 'output')
api.add_connection(other._module.id,
other_port_spec,
self._module.id,
port_spec)
else:
api.change_parameter(self._module.id,
port_spec.name,
[str(x) for x in args])
class QShell(QtGui.QTextEdit):
"""This class embeds a python interperter in a QTextEdit Widget
It is based on PyCute developed by Gerard Vermeulen.
"""
def __init__(self, locals=None, parent=None):
"""Constructor.
The optional 'locals' argument specifies the dictionary in which code
will be executed; it defaults to a newly created dictionary with key
"__name__" set to "__console__" and key "__doc__" set to None.
The optional 'log' argument specifies the file in which the interpreter
session is to be logged.
The optional 'parent' argument specifies the parent widget. If no parent
widget has been specified, it is possible to exit the interpreter
by Ctrl-D.
"""
QtGui.QTextEdit.__init__(self, parent)
self.setReadOnly(False)
self.setWindowTitle("Console")
# to exit the main interpreter by a Ctrl-D if QShell has no parent
if parent is None:
self.eofKey = QtCore.Qt.Key_D
else:
self.eofKey = None
# flag for knowing when selecting text
self.selectMode = False
self.interpreter = None
self.controller = None
# storing current state
#this is not working on mac
#self.prev_stdout = sys.stdout
#self.prev_stdin = sys.stdin
#self.prev_stderr = sys.stderr
# capture all interactive input/output
#sys.stdout = self
#sys.stderr = self
#sys.stdin = self
# user interface setup
self.setAcceptRichText(False)
self.setWordWrapMode(QtGui.QTextOption.WrapAnywhere)
conf = get_vistrails_configuration()
shell_conf = conf.shell
# font
font = QtGui.QFont(shell_conf.font_face, shell_conf.font_size)
font.setFixedPitch(1)
self.setFont(font)
self.reset(locals)
def load_package(self, pkg_name):
reg = core.modules.module_registry.get_module_registry()
package = reg.get_package_by_name(pkg_name)
def create_dict(modules, ns, m, mdesc):
md = {}
if len(ns) == 0:
d = {'_module_desc': mdesc,
'_package': pkg,}
modules[m] = type('module', (vistrails_module,), d)
else:
if ns[0] in modules:
md = create_dict(modules[ns[0]], ns[1:], m, mdesc)
else:
md = create_dict(md, ns[1:], m, mdesc)
modules[ns[0]] = md
return modules
def create_namespace_path(root, modules):
for k,v in modules.iteritems():
if type(v) == type({}):
d = create_namespace_path(k,v)
modules[k] = d
if root is not None:
modules['_package'] = pkg
return type(root, (object,), modules)()
else:
return modules
def get_module_init(module_desc):
def init(self, *args, **kwargs):
self.__dict__['module'] = \
api.add_module_from_descriptor(module_desc)
return init
def get_module(package):
def getter(self, attr_name):
desc_tuple = (attr_name, '')
if desc_tuple in package.descriptors:
module_desc = package.descriptors[desc_tuple]
d = {'_module_desc': module_desc,
'_package': self,}
return type('module', (vistrails_module,), d)
else:
raise AttributeError("type object '%s' has no attribute "
"'%s'" % (self.__class__.__name__,
attr_name))
return getter
d = {'__getattr__': get_module(package),}
pkg = type(package.name, (object,), d)()
modules = {}
for (m,ns) in package.descriptors:
module_desc = package.descriptors[(m,ns)]
modules = create_dict(modules, ns.split('|'), m, module_desc)
modules = create_namespace_path(None, modules)
for (k,v) in modules.iteritems():
setattr(pkg, k, v)
return pkg
def selected_modules(self):
shell_modules = []
modules = api.get_selected_modules()
for module in modules:
d = {'_module': module}
shell_modules.append(type('module', (vistrails_module,), d)())
return shell_modules
def reset(self, locals):
"""reset(locals) -> None
Reset shell preparing it for a new session.
"""
locals['load_package'] = self.load_package
locals['selected_modules'] = self.selected_modules
if self.interpreter:
del self.interpreter
self.interpreter = InteractiveInterpreter(locals)
# last line + last incomplete lines
self.line = QtCore.QString()
self.lines = []
# the cursor position in the last line
self.point = 0
# flag: the interpreter needs more input to run the last lines.
self.more = 0
# flag: readline() is being used for e.g. raw_input() and input()
self.reading = 0
# history
self.history = []
self.pointer = 0
self.last = 0
# interpreter prompt.
if hasattr(sys, "ps1"):
sys.ps1
else:
sys.ps1 = ">>> "
if hasattr(sys, "ps2"):
sys.ps2
else:
sys.ps2 = "... "
# interpreter banner
self.write('VisTrails shell running Python %s on %s.\n' %
(sys.version, sys.platform))
self.write('Type "copyright", "credits" or "license"'
' for more information on Python.\n')
self.write(sys.ps1)
def flush(self):
"""flush() -> None.
Simulate stdin, stdout, and stderr.
"""
pass
def isatty(self):
"""isatty() -> int
Simulate stdin, stdout, and stderr.
"""
return 1
def readline(self):
"""readline() -> str
Simulate stdin, stdout, and stderr.
"""
self.reading = 1
self.__clearLine()
cursor = self.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
self.setTextCursor(cursor)
while self.reading:
qApp.processOneEvent()
if self.line.length() == 0:
return '\n'
else:
return str(self.line)
def write(self, text):
"""write(text: str) -> None
Simulate stdin, stdout, and stderr.
"""
cursor = self.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
cursor.clearSelection()
self.setTextCursor(cursor)
self.insertPlainText(text)
cursor = self.textCursor()
self.last = cursor.position()
def insertFromMimeData(self, source):
if source.hasText():
cursor = self.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
cursor.clearSelection()
self.setTextCursor(cursor)
self.__insertText(source.text())
def scroll_bar_at_bottom(self):
"""Returns true if vertical bar exists and is at bottom, or if
vertical bar does not exist."""
bar = self.verticalScrollBar()
if not bar:
return True
return bar.value() == bar.maximum()
def __run(self):
"""__run() -> None
Append the last line to the history list, let the interpreter execute
the last line(s), and clean up accounting for the interpreter results:
(1) the interpreter succeeds
(2) the interpreter fails, finds no errors and wants more line(s)
(3) the interpreter fails, finds errors and writes them to sys.stderr
"""
cursor = self.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
self.setTextCursor(cursor)
# self.set_controller()
should_scroll = self.scroll_bar_at_bottom()
self.pointer = 0
self.history.append(QtCore.QString(self.line))
self.lines.append(str(self.line))
source = '\n'.join(self.lines)
self.write('\n')
self.more = self.interpreter.runsource(source)
if self.more:
self.write(sys.ps2)
else:
self.write(sys.ps1)
self.lines = []
self.__clearLine()
if should_scroll:
bar = self.verticalScrollBar()
if bar:
bar.setValue(bar.maximum())
def __clearLine(self):
"""__clearLine() -> None
Clear input line buffer.
"""
self.line.truncate(0)
self.point = 0
def __insertText(self, text):
"""__insertText(text) -> None
Insert text at the current cursor position.
"""
self.insertPlainText(text)
self.line.insert(self.point, text)
self.point += text.length()
# def add_pipeline(self, p):
# """
# add_pipeline(p) -> None
# Set the active pipeline in the command shell. This replaces the modules
# variable with the list of current active modules of the selected pipeline.
# """
# if self.controller:
# self.interpreter.active_pipeline = self.controller.current_pipeline
# else:
# self.interpreter.active_pipeline = p
# cmd = 'active_pipeline = self.shell.interpreter.active_pipeline'
# self.interpreter.runcode(cmd)
# cmd = 'modules = self.vistrails_interpreter.find_persistent_entities(active_pipeline)[0]'
# self.interpreter.runcode(cmd)
def set_controller(self, controller=None):
"""set_controller(controller: VistrailController) -> None
Set the current VistrailController on the shell.
"""
self.controller = controller
if controller:
self.interpreter.active_pipeline = self.controller.current_pipeline
cmd = 'active_pipeline = self.shell.interpreter.active_pipeline'
self.interpreter.runcode(cmd)
cmd = 'modules = self.vistrails_interpreter.' \
'find_persistent_entities(active_pipeline)[0]'
self.interpreter.runcode(cmd)
# def set_pipeline(self):
# """set_active_pipeline() -> None
# Makes sure that the pipeline being displayed is present in the shell for
# direct inspection and manipulation
# """
# self.add_pipeline(None)
def keyPressEvent(self, e):
"""keyPressEvent(e) -> None
Handle user input a key at a time.
Notice that text might come more than one keypress at a time
if user is a fast enough typist!
"""
text = e.text()
key = e.key()
# NB: Sometimes len(str(text)) > 1!
if text.length() and all(ord(x) >= 32 and
ord(x) < 127
for x in str(text)):
# exit select mode and jump to end of text
cursor = self.textCursor()
if self.selectMode or cursor.hasSelection():
self.selectMode = False
cursor.movePosition(QtGui.QTextCursor.End)
cursor.clearSelection()
self.setTextCursor(cursor)
self.__insertText(text)
return
if e.modifiers() & QtCore.Qt.MetaModifier and key == self.eofKey:
self.parent().closeSession()
if e.modifiers() & QtCore.Qt.ControlModifier:
if key == QtCore.Qt.Key_C or key == QtCore.Qt.Key_Insert:
self.copy()
elif key == QtCore.Qt.Key_V:
cursor = self.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
cursor.clearSelection()
self.setTextCursor(cursor)
self.paste()
elif key == QtCore.Qt.Key_A:
self.selectAll()
self.selectMode = True
else:
e.ignore()
return
if e.modifiers() & QtCore.Qt.ShiftModifier:
if key == QtCore.Qt.Key_Insert:
cursor = self.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
cursor.clearSelection()
self.setTextCursor(cursor)
self.paste()
else:
e.ignore()
return
# exit select mode and jump to end of text
cursor = self.textCursor()
if self.selectMode or cursor.hasSelection():
self.selectMode = False
cursor.movePosition(QtGui.QTextCursor.End)
cursor.clearSelection()
self.setTextCursor(cursor)
if key == QtCore.Qt.Key_Backspace:
if self.point:
QtGui.QTextEdit.keyPressEvent(self, e)
self.point -= 1
self.line.remove(self.point, 1)
elif key == QtCore.Qt.Key_Delete:
QtGui.QTextEdit.keyPressEvent(self, e)
self.line.remove(self.point, 1)
elif key == QtCore.Qt.Key_Return or key == QtCore.Qt.Key_Enter:
if self.reading:
self.reading = 0
else:
self.__run()
elif key == QtCore.Qt.Key_Tab:
self.__insertText(text)
elif key == QtCore.Qt.Key_Left:
if self.point:
QtGui.QTextEdit.keyPressEvent(self, e)
self.point -= 1
elif key == QtCore.Qt.Key_Right:
if self.point < self.line.length():
QtGui.QTextEdit.keyPressEvent(self, e)
self.point += 1
elif key == QtCore.Qt.Key_Home:
cursor = self.textCursor()
cursor.movePosition(QtGui.QTextCursor.StartOfLine)
cursor.setPosition(cursor.position() + 4)
self.setTextCursor(cursor)
self.point = 0
elif key == QtCore.Qt.Key_End:
QtGui.QTextEdit.keyPressEvent(self, e)
self.point = self.line.length()
elif key == QtCore.Qt.Key_Up:
if len(self.history):
if self.pointer == 0:
self.pointer = len(self.history)
self.pointer -= 1
self.__recall()
elif key == QtCore.Qt.Key_Down:
if len(self.history):
self.pointer += 1
if self.pointer == len(self.history):
self.pointer = 0
self.__recall()
else:
e.ignore()
def __recall(self):
"""__recall() -> None
Display the current item from the command history.
"""
cursor = self.textCursor()
cursor.setPosition(self.last)
cursor.select(QtGui.QTextCursor.LineUnderCursor)
cursor.removeSelectedText()
self.setTextCursor(cursor)
self.insertPlainText(sys.ps1)
self.__clearLine()
self.__insertText(self.history[self.pointer])
def focusNextPrevChild(self, next):
"""focusNextPrevChild(next) -> None
Suppress tabbing to the next window in multi-line commands.
"""
if next and self.more:
return 0
return QtGui.QTextEdit.focusNextPrevChild(self, next)
def mousePressEvent(self, e):
"""mousePressEvent(e) -> None
Keep the cursor after the last prompt.
"""
if e.button() == QtCore.Qt.LeftButton:
self.selectMode = True
QtGui.QTextEdit.mousePressEvent(self, e)
# cursor = self.textCursor()
# cursor.movePosition(QtGui.QTextCursor.End)
# self.setTextCursor(cursor)
return
def hide(self):
"""suspend() -> None
Called when hiding the parent window in order to recover the previous
state.
"""
#recovering the state
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
sys.stdin = sys.__stdin__
def show(self):
"""show() -> None
Store previous state and starts capturing all interactive input and
output.
"""
# capture all interactive input/output
sys.stdout = self
sys.stderr = self
sys.stdin = self
self.setFocus()
def saveSession(self, fileName):
"""saveSession(fileName: str) -> None
Write its contents to a file """
output = open(str(fileName), 'w')
output.write(self.toPlainText())
output.close()
def restart(self, locals=None):
"""restart(locals=None) -> None
Restart a new session
"""
self.clear()
self.reset(locals)
def contentsContextMenuEvent(self,ev):
"""
contentsContextMenuEvent(ev) -> None
Suppress the right button context menu.
"""
return
|
CMUSV-VisTrails/WorkflowRecommendation
|
vistrails/gui/shell.py
|
Python
|
bsd-3-clause
| 30,589
|
[
"VisIt"
] |
4a6583dcabf60d1b92ab555685d7ba729abe2a3d36862e35a752a72320a97207
|
#!/usr/bin/env python3
#
# DNSChef is a highly configurable DNS Proxy for Penetration Testers
# and Malware Analysts. Please visit http://thesprawl.org/projects/dnschef/
# for the latest version and documentation. Please forward all issues and
# concerns to iphelix [at] thesprawl.org.
DNSCHEF_VERSION = "0.4"
# Copyright (C) 2019 Peter Kacherginsky, Marcello Salvati
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from argparse import ArgumentParser
from configparser import ConfigParser
from dnslib import *
from ipaddress import ip_address
import logging
import threading
import random
import operator
import socketserver
import socket
import sys
import os
import binascii
import string
import base64
class DNSChefFormatter(logging.Formatter):
FORMATS = {
logging.ERROR: "(%(asctime)s) [!] %(msg)s",
logging.INFO: "(%(asctime)s) [*] %(msg)s",
logging.WARNING: "WARNING: %(msg)s",
logging.DEBUG: "DBG: %(module)s: %(lineno)d: %(msg)s",
"DEFAULT": "%(asctime)s - %(msg)s"
}
def format(self, record):
format_orig = self._style._fmt
self._style._fmt = self.FORMATS.get(record.levelno, self.FORMATS['DEFAULT'])
result = logging.Formatter.format(self, record)
self._style._fmt = format_orig
return result
log = logging.getLogger("dnschef")
log.setLevel(logging.DEBUG)
log_ch = logging.StreamHandler()
log_ch.setLevel(logging.INFO)
log_ch.setFormatter(DNSChefFormatter(datefmt="%H:%M:%S"))
log.addHandler(log_ch)
# DNSHandler Mixin. The class contains generic functions to parse DNS requests and
# calculate an appropriate response based on user parameters.
class DNSHandler():
def parse(self, data):
response = ""
try:
# Parse data as DNS
d = DNSRecord.parse(data)
except Exception:
log.error(f"{self.client_address[0]}: ERROR: invalid DNS request")
else:
# Only Process DNS Queries
if QR[d.header.qr] == "QUERY":
# Gather query parameters
# NOTE: Do not lowercase qname here, because we want to see
# any case request weirdness in the logs.
qname = str(d.q.qname)
# Chop off the last period
if qname[-1] == '.': qname = qname[:-1]
qtype = QTYPE[d.q.qtype]
# Find all matching fake DNS records for the query name or get False
fake_records = dict()
for record in self.server.nametodns:
fake_records[record] = self.findnametodns(qname, self.server.nametodns[record])
# Check if there is a fake record for the current request qtype
if qtype in fake_records and fake_records[qtype]:
fake_record = fake_records[qtype]
# Create a custom response to the query
response = DNSRecord(DNSHeader(id=d.header.id, bitmap=d.header.bitmap, qr=1, aa=1, ra=1), q=d.q)
log.info(f"{self.client_address[0]}: cooking the response of type '{qtype}' for {qname} to {fake_record}")
# IPv6 needs additional work before inclusion:
if qtype == "AAAA":
ipv6_hex_tuple = list(map(int, ip_address(fake_record).packed))
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](ipv6_hex_tuple)))
elif qtype == "SOA":
mname,rname,t1,t2,t3,t4,t5 = fake_record.split(" ")
times = tuple([int(t) for t in [t1,t2,t3,t4,t5]])
# dnslib doesn't like trailing dots
if mname[-1] == ".": mname = mname[:-1]
if rname[-1] == ".": rname = rname[:-1]
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](mname,rname,times)))
elif qtype == "NAPTR":
order,preference,flags,service,regexp,replacement = list(map(lambda x: x.encode(), fake_record.split(" ")))
order = int(order)
preference = int(preference)
# dnslib doesn't like trailing dots
if replacement[-1] == ".": replacement = replacement[:-1]
response.add_answer( RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](order,preference,flags,service,regexp,DNSLabel(replacement))) )
elif qtype == "SRV":
priority, weight, port, target = fake_record.split(" ")
priority = int(priority)
weight = int(weight)
port = int(port)
if target[-1] == ".": target = target[:-1]
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](priority, weight, port, target) ))
elif qtype == "DNSKEY":
flags, protocol, algorithm, key = fake_record.split(" ")
flags = int(flags)
protocol = int(protocol)
algorithm = int(algorithm)
key = base64.b64decode(("".join(key)).encode('ascii'))
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](flags, protocol, algorithm, key) ))
elif qtype == "RRSIG":
covered, algorithm, labels, orig_ttl, sig_exp, sig_inc, key_tag, name, sig = fake_record.split(" ")
covered = getattr(QTYPE,covered) # NOTE: Covered QTYPE
algorithm = int(algorithm)
labels = int(labels)
orig_ttl = int(orig_ttl)
sig_exp = int(time.mktime(time.strptime(sig_exp +'GMT',"%Y%m%d%H%M%S%Z")))
sig_inc = int(time.mktime(time.strptime(sig_inc +'GMT',"%Y%m%d%H%M%S%Z")))
key_tag = int(key_tag)
if name[-1] == '.': name = name[:-1]
sig = base64.b64decode(("".join(sig)).encode('ascii'))
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](covered, algorithm, labels,orig_ttl, sig_exp, sig_inc, key_tag, name, sig) ))
else:
# dnslib doesn't like trailing dots
if fake_record[-1] == ".": fake_record = fake_record[:-1]
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](fake_record)))
response = response.pack()
elif qtype == "*" and not None in list(fake_records.values()):
log.info(f"{self.client_address[0]}: cooking the response of type 'ANY' for {qname} with all known fake records")
response = DNSRecord(DNSHeader(id=d.header.id, bitmap=d.header.bitmap,qr=1, aa=1, ra=1), q=d.q)
for qtype,fake_record in list(fake_records.items()):
if fake_record:
# NOTE: RDMAP is a dictionary map of qtype strings to handling classses
# IPv6 needs additional work before inclusion:
if qtype == "AAAA":
fake_record = list(map(int, ip_address(fake_record).packed))
elif qtype == "SOA":
mname,rname,t1,t2,t3,t4,t5 = fake_record.split(" ")
times = tuple([int(t) for t in [t1,t2,t3,t4,t5]])
# dnslib doesn't like trailing dots
if mname[-1] == ".": mname = mname[:-1]
if rname[-1] == ".": rname = rname[:-1]
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](mname,rname,times)))
elif qtype == "NAPTR":
order,preference,flags,service,regexp,replacement = fake_record.split(" ")
order = int(order)
preference = int(preference)
# dnslib doesn't like trailing dots
if replacement and replacement[-1] == ".": replacement = replacement[:-1]
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](order,preference,flags,service,regexp,replacement)))
elif qtype == "SRV":
priority, weight, port, target = fake_record.split(" ")
priority = int(priority)
weight = int(weight)
port = int(port)
if target[-1] == ".": target = target[:-1]
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](priority, weight, port, target) ))
elif qtype == "DNSKEY":
flags, protocol, algorithm, key = fake_record.split(" ")
flags = int(flags)
protocol = int(protocol)
algorithm = int(algorithm)
key = base64.b64decode(("".join(key)).encode('ascii'))
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](flags, protocol, algorithm, key) ))
elif qtype == "RRSIG":
covered, algorithm, labels, orig_ttl, sig_exp, sig_inc, key_tag, name, sig = fake_record.split(" ")
covered = getattr(QTYPE,covered) # NOTE: Covered QTYPE
algorithm = int(algorithm)
labels = int(labels)
orig_ttl = int(orig_ttl)
sig_exp = int(time.mktime(time.strptime(sig_exp +'GMT',"%Y%m%d%H%M%S%Z")))
sig_inc = int(time.mktime(time.strptime(sig_inc +'GMT',"%Y%m%d%H%M%S%Z")))
key_tag = int(key_tag)
if name[-1] == '.': name = name[:-1]
sig = base64.b64decode(("".join(sig)).encode('ascii'))
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](covered, algorithm, labels,orig_ttl, sig_exp, sig_inc, key_tag, name, sig) ))
else:
# dnslib doesn't like trailing dots
if fake_record[-1] == ".": fake_record = fake_record[:-1]
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](fake_record)))
response = response.pack()
# Proxy the request
else:
log.info(f"{self.client_address[0]}: proxying the response of type '{qtype}' for {qname}")
nameserver_tuple = random.choice(self.server.nameservers).split('#')
response = self.proxyrequest(data, *nameserver_tuple)
return response
# Find appropriate ip address to use for a queried name. The function can
def findnametodns(self,qname,nametodns):
# Make qname case insensitive
qname = qname.lower()
# Split and reverse qname into components for matching.
qnamelist = qname.split('.')
qnamelist.reverse()
# HACK: It is important to search the nametodns dictionary before iterating it so that
# global matching ['*.*.*.*.*.*.*.*.*.*'] will match last. Use sorting for that.
for domain,host in sorted(iter(nametodns.items()), key=operator.itemgetter(1)):
# NOTE: It is assumed that domain name was already lowercased
# when it was loaded through --file, --fakedomains or --truedomains
# don't want to waste time lowercasing domains on every request.
# Split and reverse domain into components for matching
domain = domain.split('.')
domain.reverse()
# Compare domains in reverse.
for a, b in zip(qnamelist, domain):
if a != b and b != "*":
break
else:
# Could be a real IP or False if we are doing reverse matching with 'truedomains'
return host
else:
return False
# Obtain a response from a real DNS server.
def proxyrequest(self, request, host, port="53", protocol="udp"):
reply = None
try:
if self.server.ipv6:
if protocol == "udp":
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
elif protocol == "tcp":
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
if protocol == "udp":
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
elif protocol == "tcp":
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(3.0)
# Send the proxy request to a randomly chosen DNS server
if protocol == "udp":
sock.sendto(request, (host, int(port)))
reply = sock.recv(1024)
sock.close()
elif protocol == "tcp":
sock.connect((host, int(port)))
# Add length for the TCP request
length = binascii.unhexlify("%04x" % len(request))
sock.sendall(length+request)
# Strip length from the response
reply = sock.recv(1024)
reply = reply[2:]
sock.close()
except Exception as e:
log.error(f"[!] Could not proxy request: {e}")
else:
return reply
# UDP DNS Handler for incoming requests
class UDPHandler(DNSHandler, socketserver.BaseRequestHandler):
def handle(self):
(data, socket) = self.request
response = self.parse(data)
if response:
socket.sendto(response, self.client_address)
# TCP DNS Handler for incoming requests
class TCPHandler(DNSHandler, socketserver.BaseRequestHandler):
def handle(self):
data = self.request.recv(1024)
# Remove the addition "length" parameter used in the
# TCP DNS protocol
data = data[2:]
response = self.parse(data)
if response:
# Calculate and add the additional "length" parameter
# used in TCP DNS protocol
length = binascii.unhexlify("%04x" % len(response))
self.request.sendall(length + response)
class ThreadedUDPServer(socketserver.ThreadingMixIn, socketserver.UDPServer):
# Override SocketServer.UDPServer to add extra parameters
def __init__(self, server_address, RequestHandlerClass, nametodns, nameservers, ipv6, log):
self.nametodns = nametodns
self.nameservers = nameservers
self.ipv6 = ipv6
self.address_family = socket.AF_INET6 if self.ipv6 else socket.AF_INET
self.log = log
socketserver.UDPServer.__init__(self, server_address, RequestHandlerClass)
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
# Override default value
allow_reuse_address = True
# Override SocketServer.TCPServer to add extra parameters
def __init__(self, server_address, RequestHandlerClass, nametodns, nameservers, ipv6, log):
self.nametodns = nametodns
self.nameservers = nameservers
self.ipv6 = ipv6
self.address_family = socket.AF_INET6 if self.ipv6 else socket.AF_INET
self.log = log
socketserver.TCPServer.__init__(self, server_address, RequestHandlerClass)
# Initialize and start the DNS Server
def start_cooking(interface, nametodns, nameservers, tcp=False, ipv6=False, port="53", logfile=None):
try:
if logfile:
fh = logging.FileHandler(logfile, encoding='UTF-8')
fh.setLevel(logging.INFO)
fh.setFormatter(DNSChefFormatter(datefmt="%d/%b/%Y:%H:%M:%S %z"))
log.addHandler(fh)
log.info("DNSChef is active.")
if tcp:
log.info("DNSChef is running in TCP mode")
server = ThreadedTCPServer((interface, int(port)), TCPHandler, nametodns, nameservers, ipv6, log)
else:
server = ThreadedUDPServer((interface, int(port)), UDPHandler, nametodns, nameservers, ipv6, log)
# Start a thread with the server -- that thread will then start
# more threads for each request
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
# Loop in the main thread
while True: time.sleep(100)
except (KeyboardInterrupt, SystemExit):
server.shutdown()
log.info("DNSChef is shutting down.")
sys.exit()
except Exception as e:
log.error(f"Failed to start the server: {e}")
if __name__ == "__main__":
header = " _ _ __ \n"
header += " | | version %s | | / _| \n" % DNSCHEF_VERSION
header += " __| |_ __ ___ ___| |__ ___| |_ \n"
header += " / _` | '_ \/ __|/ __| '_ \ / _ \ _|\n"
header += " | (_| | | | \__ \ (__| | | | __/ | \n"
header += " \__,_|_| |_|___/\___|_| |_|\___|_| \n"
header += " iphelix@thesprawl.org \n"
# Parse command line arguments
parser = ArgumentParser(usage = "dnschef.py [options]:\n" + header, description="DNSChef is a highly configurable DNS Proxy for Penetration Testers and Malware Analysts. It is capable of fine configuration of which DNS replies to modify or to simply proxy with real responses. In order to take advantage of the tool you must either manually configure or poison DNS server entry to point to DNSChef. The tool requires root privileges to run on privileged ports." )
fakegroup = parser.add_argument_group("Fake DNS records:")
fakegroup.add_argument('--fakeip', metavar="192.0.2.1", help='IP address to use for matching DNS queries. If you use this parameter without specifying domain names, then all \'A\' queries will be spoofed. Consider using --file argument if you need to define more than one IP address.')
fakegroup.add_argument('--fakeipv6', metavar="2001:db8::1", help='IPv6 address to use for matching DNS queries. If you use this parameter without specifying domain names, then all \'AAAA\' queries will be spoofed. Consider using --file argument if you need to define more than one IPv6 address.')
fakegroup.add_argument('--fakemail', metavar="mail.fake.com", help='MX name to use for matching DNS queries. If you use this parameter without specifying domain names, then all \'MX\' queries will be spoofed. Consider using --file argument if you need to define more than one MX record.')
fakegroup.add_argument('--fakealias', metavar="www.fake.com", help='CNAME name to use for matching DNS queries. If you use this parameter without specifying domain names, then all \'CNAME\' queries will be spoofed. Consider using --file argument if you need to define more than one CNAME record.')
fakegroup.add_argument('--fakens', metavar="ns.fake.com", help='NS name to use for matching DNS queries. If you use this parameter without specifying domain names, then all \'NS\' queries will be spoofed. Consider using --file argument if you need to define more than one NS record.')
fakegroup.add_argument('--file', help="Specify a file containing a list of DOMAIN=IP pairs (one pair per line) used for DNS responses. For example: google.com=1.1.1.1 will force all queries to 'google.com' to be resolved to '1.1.1.1'. IPv6 addresses will be automatically detected. You can be even more specific by combining --file with other arguments. However, data obtained from the file will take precedence over others.")
mexclusivegroup = parser.add_mutually_exclusive_group()
mexclusivegroup.add_argument('--fakedomains', metavar="thesprawl.org,google.com", help='A comma separated list of domain names which will be resolved to FAKE values specified in the the above parameters. All other domain names will be resolved to their true values.')
mexclusivegroup.add_argument('--truedomains', metavar="thesprawl.org,google.com", help='A comma separated list of domain names which will be resolved to their TRUE values. All other domain names will be resolved to fake values specified in the above parameters.')
rungroup = parser.add_argument_group("Optional runtime parameters.")
rungroup.add_argument("--logfile", metavar="FILE", help="Specify a log file to record all activity")
rungroup.add_argument("--nameservers", metavar="8.8.8.8#53 or 4.2.2.1#53#tcp or 2001:4860:4860::8888", default='8.8.8.8', help='A comma separated list of alternative DNS servers to use with proxied requests. Nameservers can have either IP or IP#PORT format. A randomly selected server from the list will be used for proxy requests when provided with multiple servers. By default, the tool uses Google\'s public DNS server 8.8.8.8 when running in IPv4 mode and 2001:4860:4860::8888 when running in IPv6 mode.')
rungroup.add_argument("-i","--interface", metavar="127.0.0.1 or ::1", default="127.0.0.1", help='Define an interface to use for the DNS listener. By default, the tool uses 127.0.0.1 for IPv4 mode and ::1 for IPv6 mode.')
rungroup.add_argument("-t","--tcp", action="store_true", default=False, help="Use TCP DNS proxy instead of the default UDP.")
rungroup.add_argument("-6","--ipv6", action="store_true", default=False, help="Run in IPv6 mode.")
rungroup.add_argument("-p","--port", metavar="53", default="53", help='Port number to listen for DNS requests.')
rungroup.add_argument("-q", "--quiet", action="store_false", dest="verbose", default=True, help="Don't show headers.")
options = parser.parse_args()
# Print program header
if options.verbose:
print(header)
# Main storage of domain filters
# NOTE: RDMAP is a dictionary map of qtype strings to handling classes
nametodns = dict()
for qtype in list(RDMAP.keys()):
nametodns[qtype] = dict()
if not (options.fakeip or options.fakeipv6) and (options.fakedomains or options.truedomains):
log.error("You have forgotten to specify which IP to use for fake responses")
sys.exit(0)
# Notify user about alternative listening port
if options.port != "53":
log.info(f"Listening on an alternative port {options.port}")
# Adjust defaults for IPv6
if options.ipv6:
log.info("Using IPv6 mode.")
if options.interface == "127.0.0.1":
options.interface = "::1"
if options.nameservers == "8.8.8.8":
options.nameservers = "2001:4860:4860::8888"
log.info(f"DNSChef started on interface: {options.interface}")
# Use alternative DNS servers
if options.nameservers:
nameservers = options.nameservers.split(',')
log.info(f"Using the following nameservers: {', '.join(nameservers)}")
# External file definitions
if options.file:
config = ConfigParser()
config.read(options.file)
for section in config.sections():
if section in nametodns:
for domain, record in config.items(section):
# Make domain case insensitive
domain = domain.lower()
nametodns[section][domain] = record
log.info(f"Cooking {section} replies for domain {domain} with '{record}'")
else:
log.warning(f"DNS Record '{section}' is not supported. Ignoring section contents.")
# DNS Record and Domain Name definitions
# NOTE: '*.*.*.*.*.*.*.*.*.*' domain is used to match all possible queries.
if options.fakeip or options.fakeipv6 or options.fakemail or options.fakealias or options.fakens:
fakeip = options.fakeip
fakeipv6 = options.fakeipv6
fakemail = options.fakemail
fakealias = options.fakealias
fakens = options.fakens
if options.fakedomains:
for domain in options.fakedomains.split(','):
# Make domain case insensitive
domain = domain.lower()
domain = domain.strip()
if fakeip:
nametodns["A"][domain] = fakeip
log.info(f"Cooking A replies to point to {options.fakeip} matching: {domain}")
if fakeipv6:
nametodns["AAAA"][domain] = fakeipv6
log.info(f"Cooking AAAA replies to point to {options.fakeipv6} matching: {domain}")
if fakemail:
nametodns["MX"][domain] = fakemail
log.info(f"Cooking MX replies to point to {options.fakemail} matching: {domain}")
if fakealias:
nametodns["CNAME"][domain] = fakealias
log.info(f"Cooking CNAME replies to point to {options.fakealias} matching: {domain}")
if fakens:
nametodns["NS"][domain] = fakens
log.info(f"Cooking NS replies to point to {options.fakens} matching: {domain}")
elif options.truedomains:
for domain in options.truedomains.split(','):
# Make domain case insensitive
domain = domain.lower()
domain = domain.strip()
if fakeip:
nametodns["A"][domain] = False
log.info(f"Cooking A replies to point to {options.fakeip} not matching: {domain}")
nametodns["A"]['*.*.*.*.*.*.*.*.*.*'] = fakeip
if fakeipv6:
nametodns["AAAA"][domain] = False
log.info(f"Cooking AAAA replies to point to {options.fakeipv6} not matching: {domain}")
nametodns["AAAA"]['*.*.*.*.*.*.*.*.*.*'] = fakeipv6
if fakemail:
nametodns["MX"][domain] = False
log.info(f"Cooking MX replies to point to {options.fakemail} not matching: {domain}")
nametodns["MX"]['*.*.*.*.*.*.*.*.*.*'] = fakemail
if fakealias:
nametodns["CNAME"][domain] = False
log.info(f"Cooking CNAME replies to point to {options.fakealias} not matching: {domain}")
nametodns["CNAME"]['*.*.*.*.*.*.*.*.*.*'] = fakealias
if fakens:
nametodns["NS"][domain] = False
log.info(f"Cooking NS replies to point to {options.fakens} not matching: {domain}")
nametodns["NS"]['*.*.*.*.*.*.*.*.*.*'] = fakealias
else:
# NOTE: '*.*.*.*.*.*.*.*.*.*' domain is a special ANY domain
# which is compatible with the wildflag algorithm above.
if fakeip:
nametodns["A"]['*.*.*.*.*.*.*.*.*.*'] = fakeip
log.info(f"Cooking all A replies to point to {fakeip}")
if fakeipv6:
nametodns["AAAA"]['*.*.*.*.*.*.*.*.*.*'] = fakeipv6
log.info(f"Cooking all AAAA replies to point to {fakeipv6}")
if fakemail:
nametodns["MX"]['*.*.*.*.*.*.*.*.*.*'] = fakemail
log.info(f"Cooking all MX replies to point to {fakemail}")
if fakealias:
nametodns["CNAME"]['*.*.*.*.*.*.*.*.*.*'] = fakealias
log.info(f"Cooking all CNAME replies to point to {fakealias}")
if fakens:
nametodns["NS"]['*.*.*.*.*.*.*.*.*.*'] = fakens
log.info(f"Cooking all NS replies to point to {fakens}")
# Proxy all DNS requests
if not options.fakeip and not options.fakeipv6 and not options.fakemail and not options.fakealias and not options.fakens and not options.file:
log.info("No parameters were specified. Running in full proxy mode")
# Launch DNSChef
start_cooking(interface=options.interface, nametodns=nametodns, nameservers=nameservers, tcp=options.tcp, ipv6=options.ipv6, port=options.port, logfile=options.logfile)
|
iphelix/dnschef
|
dnschef.py
|
Python
|
bsd-3-clause
| 30,352
|
[
"VisIt"
] |
126bc70a88c8a38d723ed78c1c31046d924e63597edf59ce0228681c4986affa
|
import os,sys,re,fileinput
path=os.path.split(os.path.realpath(sys.argv[0]))[0]
filelst=os.listdir(path)
filelst_gjf = []
for filename in filelst:
if filename[-4:] == '.gjf':
filelst_gjf.append(filename)
filenumber=input()
for i in range(filenumber):
filename = str(i) + '.pbs'
fp = open(filename,'w')
content0 = '#PBS -S /bin/bash\n'
content1 = '#PBS -N gaussian201603' + str(i) + '\n'
content2 = '''#PBS -l nodes=1:ppn=24
#PBS -q snode
#G09
export g09root=$HOME
. $g09root/g09/bsd/g09.profile
export PATH=$PATH:$g09root/g09:$g09root/g09/bsd
export GAUSS_SCRDIR=$g09root/scratch/gaussian
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$g09root/g09
cd $PBS_O_WORKDIR\n\n'''
content3 = '$HOME/g09/g09 ' + filelst_gjf[i] + '\n'
allcontent = [content0,content1,content2,content3]
fp.writelines(allcontent)
fp.close()
|
Yuxin-H/test
|
test3.py
|
Python
|
gpl-3.0
| 851
|
[
"Gaussian"
] |
29aeff5cbf53f995958b09819f208b70edefc23409d226c9e4704d31340203bd
|
import ast
import collections
import math
import numpy as np
import random
from copy import copy
import cgen as c
from parcels.field import Field
from parcels.field import NestedField
from parcels.field import SummedField
from parcels.field import VectorField
from parcels.grid import Grid
from parcels.particle import JITParticle
from parcels.tools.loggers import logger
class IntrinsicNode(ast.AST):
def __init__(self, obj, ccode):
self.obj = obj
self.ccode = ccode
class FieldSetNode(IntrinsicNode):
def __getattr__(self, attr):
if isinstance(getattr(self.obj, attr), Field):
return FieldNode(getattr(self.obj, attr),
ccode="%s->%s" % (self.ccode, attr))
elif isinstance(getattr(self.obj, attr), NestedField):
if isinstance(getattr(self.obj, attr)[0], VectorField):
return NestedVectorFieldNode(getattr(self.obj, attr),
ccode="%s->%s" % (self.ccode, attr))
else:
return NestedFieldNode(getattr(self.obj, attr),
ccode="%s->%s" % (self.ccode, attr))
elif isinstance(getattr(self.obj, attr), SummedField) or isinstance(getattr(self.obj, attr), list):
if isinstance(getattr(self.obj, attr)[0], VectorField):
return SummedVectorFieldNode(getattr(self.obj, attr),
ccode="%s->%s" % (self.ccode, attr))
else:
return SummedFieldNode(getattr(self.obj, attr),
ccode="%s->%s" % (self.ccode, attr))
elif isinstance(getattr(self.obj, attr), VectorField):
return VectorFieldNode(getattr(self.obj, attr),
ccode="%s->%s" % (self.ccode, attr))
else:
return ConstNode(getattr(self.obj, attr),
ccode="%s" % (attr))
class FieldNode(IntrinsicNode):
def __getattr__(self, attr):
if isinstance(getattr(self.obj, attr), Grid):
return GridNode(getattr(self.obj, attr),
ccode="%s->%s" % (self.ccode, attr))
elif attr == "eval":
return FieldEvalCallNode(self)
else:
raise NotImplementedError('Access to Field attributes are not (yet) implemented in JIT mode')
class FieldEvalCallNode(IntrinsicNode):
def __init__(self, field):
self.field = field
self.obj = field.obj
self.ccode = ""
class FieldEvalNode(IntrinsicNode):
def __init__(self, field, args, var, convert=True):
self.field = field
self.args = args
self.var = var # the variable in which the interpolated field is written
self.convert = convert # whether to convert the result (like field.applyConversion)
class VectorFieldNode(IntrinsicNode):
def __getitem__(self, attr):
return VectorFieldEvalNode(self.obj, attr)
class VectorFieldEvalNode(IntrinsicNode):
def __init__(self, field, args, var, var2, var3):
self.field = field
self.args = args
self.var = var # the variable in which the interpolated field is written
self.var2 = var2 # second variable for UV interpolation
self.var3 = var3 # third variable for UVW interpolation
class SummedFieldNode(IntrinsicNode):
def __getitem__(self, attr):
return SummedFieldEvalNode(self.obj, attr)
class SummedFieldEvalNode(IntrinsicNode):
def __init__(self, fields, args, var):
self.fields = fields
self.args = args
self.var = var # the variable in which the interpolated field is written
class SummedVectorFieldNode(IntrinsicNode):
def __getitem__(self, attr):
return SummedVectorFieldEvalNode(self.obj, attr)
class SummedVectorFieldEvalNode(IntrinsicNode):
def __init__(self, fields, args, var, var2, var3):
self.fields = fields
self.args = args
self.var = var # the variable in which the interpolated field is written
self.var2 = var2 # second variable for UV interpolation
self.var3 = var3 # third variable for UVW interpolation
class NestedFieldNode(IntrinsicNode):
def __getitem__(self, attr):
return NestedFieldEvalNode(self.obj, attr)
class NestedFieldEvalNode(IntrinsicNode):
def __init__(self, fields, args, var):
self.fields = fields
self.args = args
self.var = var # the variable in which the interpolated field is written
class NestedVectorFieldNode(IntrinsicNode):
def __getitem__(self, attr):
return NestedVectorFieldEvalNode(self.obj, attr)
class NestedVectorFieldEvalNode(IntrinsicNode):
def __init__(self, fields, args, var, var2, var3):
self.fields = fields
self.args = args
self.var = var # the variable in which the interpolated field is written
self.var2 = var2 # second variable for UV interpolation
self.var3 = var3 # third variable for UVW interpolation
class GridNode(IntrinsicNode):
def __getattr__(self, attr):
raise NotImplementedError('Access to Grids is not (yet) implemented in JIT mode')
class ConstNode(IntrinsicNode):
def __getitem__(self, attr):
return attr
class MathNode(IntrinsicNode):
symbol_map = {'pi': 'M_PI', 'e': 'M_E', 'nan': 'NAN'}
def __getattr__(self, attr):
if hasattr(math, attr):
if attr in self.symbol_map:
attr = self.symbol_map[attr]
return IntrinsicNode(None, ccode=attr)
else:
raise AttributeError("""Unknown math function encountered: %s"""
% attr)
class RandomNode(IntrinsicNode):
symbol_map = {'random': 'parcels_random',
'uniform': 'parcels_uniform',
'randint': 'parcels_randint',
'normalvariate': 'parcels_normalvariate',
'expovariate': 'parcels_expovariate',
'vonmisesvariate': 'parcels_vonmisesvariate',
'seed': 'parcels_seed'}
def __getattr__(self, attr):
if hasattr(random, attr):
if attr in self.symbol_map:
attr = self.symbol_map[attr]
return IntrinsicNode(None, ccode=attr)
else:
raise AttributeError("""Unknown random function encountered: %s"""
% attr)
class StatusCodeNode(IntrinsicNode):
symbol_map = {'Success': 'SUCCESS', 'Evaluate': 'EVALUATE', # StateCodes
'Repeat': 'REPEAT', 'Delete': 'DELETE', 'StopExecution': 'STOP_EXECUTION', # OperationCodes
'Error': 'ERROR', 'ErrorInterpolation': 'ERROR_INTERPOLATION', # ErrorCodes
'ErrorOutOfBounds': 'ERROR_OUT_OF_BOUNDS', 'ErrorThroughSurface': 'ERROR_THROUGH_SURFACE',
'ErrorTimeExtrapolation': 'ERROR_TIME_EXTRAPOLATION'}
def __getattr__(self, attr):
if attr in self.symbol_map:
attr = self.symbol_map[attr]
return IntrinsicNode(None, ccode=attr)
else:
raise AttributeError("""Unknown status code encountered: %s"""
% attr)
class PrintNode(IntrinsicNode):
def __init__(self):
self.obj = 'print'
class ParticleAttributeNode(IntrinsicNode):
def __init__(self, obj, attr):
self.obj = obj
self.attr = attr
self.ccode = "%s->%s[pnum]" % (obj.ccode, attr)
class ParticleNode(IntrinsicNode):
def __getattr__(self, attr):
if attr in [v.name for v in self.obj.variables]:
return ParticleAttributeNode(self, attr)
elif attr in ['delete']:
return ParticleAttributeNode(self, 'state')
else:
raise AttributeError("""Particle type %s does not define attribute "%s".
Please add '%s' to %s.users_vars or define an appropriate sub-class."""
% (self.obj, attr, attr, self.obj))
class IntrinsicTransformer(ast.NodeTransformer):
"""AST transformer that catches any mention of intrinsic variable
names, such as 'particle' or 'fieldset', inserts placeholder objects
and propagates attribute access."""
def __init__(self, fieldset=None, ptype=JITParticle):
self.fieldset = fieldset
self.ptype = ptype
# Counter and variable names for temporaries
self._tmp_counter = 0
self.tmp_vars = []
# A stack of additonal staements to be inserted
self.stmt_stack = []
def get_tmp(self):
"""Create a new temporary veriable name"""
tmp = "parcels_tmpvar%d" % self._tmp_counter
self._tmp_counter += 1
self.tmp_vars += [tmp]
return tmp
def visit_Name(self, node):
"""Inject IntrinsicNode objects into the tree according to keyword"""
if node.id == 'fieldset' and self.fieldset is not None:
node = FieldSetNode(self.fieldset, ccode='fset')
elif node.id == 'particle':
node = ParticleNode(self.ptype, ccode='particles')
elif node.id in ['StateCode', 'OperationCode', 'ErrorCode', 'Error']:
node = StatusCodeNode(math, ccode='')
elif node.id == 'math':
node = MathNode(math, ccode='')
elif node.id == 'ParcelsRandom':
node = RandomNode(math, ccode='')
elif node.id == 'print':
node = PrintNode()
elif (node.id == 'pnum') or ('parcels_tmpvar' in node.id):
raise NotImplementedError("Custom Kernels cannot contain string %s; please change your kernel" % node.id)
return node
def visit_Attribute(self, node):
node.value = self.visit(node.value)
if isinstance(node.value, IntrinsicNode):
if node.attr == 'update_next_dt':
return 'update_next_dt'
return getattr(node.value, node.attr)
else:
if node.value.id in ['np', 'numpy']:
raise NotImplementedError("Cannot convert numpy functions in kernels to C-code.\n"
"Either use functions from the math library or run Parcels in Scipy mode.\n"
"For more information, see http://oceanparcels.org/faq.html#kernelwriting")
else:
raise NotImplementedError("Cannot convert '%s' used in kernel to C-code" % node.value.id)
def visit_Subscript(self, node):
node.value = self.visit(node.value)
node.slice = self.visit(node.slice)
# If we encounter field evaluation we replace it with a
# temporary variable and put the evaluation call on the stack.
if isinstance(node.value, SummedFieldNode):
tmp = [self.get_tmp() for _ in node.value.obj]
# Insert placeholder node for field eval ...
self.stmt_stack += [SummedFieldEvalNode(node.value, node.slice, tmp)]
# .. and return the name of the temporary that will be populated
return ast.Name(id='+'.join(tmp))
elif isinstance(node.value, SummedVectorFieldNode):
tmp = [self.get_tmp() for _ in range(len(node.value.obj))]
tmp2 = [self.get_tmp() for _ in range(len(node.value.obj))]
tmp3 = [self.get_tmp() if list.__getitem__(node.value.obj, 0).vector_type == '3D' else None for _ in range(len(node.value.obj))]
# Insert placeholder node for field eval ...
self.stmt_stack += [SummedVectorFieldEvalNode(node.value, node.slice, tmp, tmp2, tmp3)]
# .. and return the name of the temporary that will be populated
if all(tmp3):
return ast.Tuple([ast.Name(id='+'.join(tmp)), ast.Name(id='+'.join(tmp2)), ast.Name(id='+'.join(tmp3))], ast.Load())
else:
return ast.Tuple([ast.Name(id='+'.join(tmp)), ast.Name(id='+'.join(tmp2))], ast.Load())
elif isinstance(node.value, FieldNode):
tmp = self.get_tmp()
# Insert placeholder node for field eval ...
self.stmt_stack += [FieldEvalNode(node.value, node.slice, tmp)]
# .. and return the name of the temporary that will be populated
return ast.Name(id=tmp)
elif isinstance(node.value, VectorFieldNode):
tmp = self.get_tmp()
tmp2 = self.get_tmp()
tmp3 = self.get_tmp() if node.value.obj.vector_type == '3D' else None
# Insert placeholder node for field eval ...
self.stmt_stack += [VectorFieldEvalNode(node.value, node.slice, tmp, tmp2, tmp3)]
# .. and return the name of the temporary that will be populated
if tmp3:
return ast.Tuple([ast.Name(id=tmp), ast.Name(id=tmp2), ast.Name(id=tmp3)], ast.Load())
else:
return ast.Tuple([ast.Name(id=tmp), ast.Name(id=tmp2)], ast.Load())
elif isinstance(node.value, NestedFieldNode):
tmp = self.get_tmp()
self.stmt_stack += [NestedFieldEvalNode(node.value, node.slice, tmp)]
return ast.Name(id=tmp)
elif isinstance(node.value, NestedVectorFieldNode):
tmp = self.get_tmp()
tmp2 = self.get_tmp()
tmp3 = self.get_tmp() if list.__getitem__(node.value.obj, 0).vector_type == '3D' else None
self.stmt_stack += [NestedVectorFieldEvalNode(node.value, node.slice, tmp, tmp2, tmp3)]
if tmp3:
return ast.Tuple([ast.Name(id=tmp), ast.Name(id=tmp2), ast.Name(id=tmp3)], ast.Load())
else:
return ast.Tuple([ast.Name(id=tmp), ast.Name(id=tmp2)], ast.Load())
else:
return node
def visit_AugAssign(self, node):
node.target = self.visit(node.target)
node.op = self.visit(node.op)
node.value = self.visit(node.value)
stmts = [node]
# Inject statements from the stack
if len(self.stmt_stack) > 0:
stmts = self.stmt_stack + stmts
self.stmt_stack = []
return stmts
def visit_Assign(self, node):
node.targets = [self.visit(t) for t in node.targets]
node.value = self.visit(node.value)
stmts = [node]
# Inject statements from the stack
if len(self.stmt_stack) > 0:
stmts = self.stmt_stack + stmts
self.stmt_stack = []
return stmts
def visit_Call(self, node):
node.func = self.visit(node.func)
node.args = [self.visit(a) for a in node.args]
node.keywords = {kw.arg: self.visit(kw.value) for kw in node.keywords}
if isinstance(node.func, ParticleAttributeNode) \
and node.func.attr == 'state':
node = IntrinsicNode(node, "return DELETE")
elif isinstance(node.func, FieldEvalCallNode):
# get a temporary value to assign result to
tmp = self.get_tmp()
# whether to convert
convert = True
if "applyConversion" in node.keywords:
k = node.keywords["applyConversion"]
if isinstance(k, ast.NameConstant):
convert = k.value
# convert args to Index(Tuple(*args))
args = ast.Index(value=ast.Tuple(node.args, ast.Load()))
self.stmt_stack += [FieldEvalNode(node.func.field, args, tmp, convert)]
return ast.Name(id=tmp)
return node
class TupleSplitter(ast.NodeTransformer):
"""AST transformer that detects and splits Pythonic tuple
assignments into multiple statements for conversion to C."""
def visit_Assign(self, node):
if isinstance(node.targets[0], ast.Tuple) \
and isinstance(node.value, ast.Tuple):
t_elts = node.targets[0].elts
v_elts = node.value.elts
if len(t_elts) != len(v_elts):
raise AttributeError("Tuple lenghts in assignment do not agree")
node = [ast.Assign() for _ in t_elts]
for n, t, v in zip(node, t_elts, v_elts):
n.targets = [t]
n.value = v
return node
class KernelGenerator(ast.NodeVisitor):
"""Code generator class that translates simple Python kernel
functions into C functions by populating and accessing the `ccode`
attriibute on nodes in the Python AST."""
# Intrinsic variables that appear as function arguments
kernel_vars = ['particle', 'fieldset', 'time', 'output_time', 'tol']
array_vars = []
def __init__(self, fieldset=None, ptype=JITParticle):
self.fieldset = fieldset
self.ptype = ptype
self.field_args = collections.OrderedDict()
self.vector_field_args = collections.OrderedDict()
self.const_args = collections.OrderedDict()
def generate(self, py_ast, funcvars):
# Replace occurences of intrinsic objects in Python AST
transformer = IntrinsicTransformer(self.fieldset, self.ptype)
py_ast = transformer.visit(py_ast)
# Untangle Pythonic tuple-assignment statements
py_ast = TupleSplitter().visit(py_ast)
# Generate C-code for all nodes in the Python AST
self.visit(py_ast)
self.ccode = py_ast.ccode
# Insert variable declarations for non-instrinsics
# Make sure that repeated variables are not declared more than
# once. If variables occur in multiple Kernels, give a warning
used_vars = []
funcvars_copy = copy(funcvars) # editing a list while looping over it is dangerous
for kvar in funcvars:
if kvar in used_vars:
if kvar not in ['particle', 'fieldset', 'time']:
logger.warning(kvar+" declared in multiple Kernels")
funcvars_copy.remove(kvar)
else:
used_vars.append(kvar)
funcvars = funcvars_copy
for kvar in self.kernel_vars + self.array_vars:
if kvar in funcvars:
funcvars.remove(kvar)
self.ccode.body.insert(0, c.Value('StatusCode', 'err'))
if len(funcvars) > 0:
self.ccode.body.insert(0, c.Value("type_coord", ", ".join(funcvars)))
if len(transformer.tmp_vars) > 0:
self.ccode.body.insert(0, c.Value("float", ", ".join(transformer.tmp_vars)))
return self.ccode
@staticmethod
def _check_FieldSamplingArguments(ccode):
if ccode == 'particles':
args = ('time', 'particles->depth[pnum]', 'particles->lat[pnum]', 'particles->lon[pnum]')
elif ccode[-1] == 'particles':
args = ccode[:-1]
else:
args = ccode
return args
def visit_FunctionDef(self, node):
# Generate "ccode" attribute by traversing the Python AST
for stmt in node.body:
if not (hasattr(stmt, 'value') and type(stmt.value) is ast.Str): # ignore docstrings
self.visit(stmt)
# Create function declaration and argument list
decl = c.Static(c.DeclSpecifier(c.Value("StatusCode", node.name), spec='inline'))
args = [c.Pointer(c.Value(self.ptype.name + 'p', "particles")),
c.Value("int", "pnum"),
c.Value("double", "time")]
for field in self.field_args.values():
args += [c.Pointer(c.Value("CField", "%s" % field.ccode_name))]
for field in self.vector_field_args.values():
for fcomponent in ['U', 'V', 'W']:
try:
f = getattr(field, fcomponent)
if f.ccode_name not in self.field_args:
args += [c.Pointer(c.Value("CField", "%s" % f.ccode_name))]
self.field_args[f.ccode_name] = f
except:
pass # field.W does not always exist
for const, _ in self.const_args.items():
args += [c.Value("float", const)]
# Create function body as C-code object
body = [stmt.ccode for stmt in node.body if not (hasattr(stmt, 'value') and type(stmt.value) is ast.Str)]
body += [c.Statement("return SUCCESS")]
node.ccode = c.FunctionBody(c.FunctionDeclaration(decl, args), c.Block(body))
def visit_Call(self, node):
"""Generate C code for simple C-style function calls. Please
note that starred and keyword arguments are currently not
supported."""
pointer_args = False
parcels_customed_Cfunc = False
if isinstance(node.func, PrintNode):
# Write our own Print parser because Python3-AST does not seem to have one
if isinstance(node.args[0], ast.Str):
node.ccode = str(c.Statement('printf("%s\\n")' % (node.args[0].s)))
elif isinstance(node.args[0], ast.Name):
node.ccode = str(c.Statement('printf("%%f\\n", %s)' % (node.args[0].id)))
elif isinstance(node.args[0], ast.BinOp):
if hasattr(node.args[0].right, 'ccode'):
args = node.args[0].right.ccode
elif hasattr(node.args[0].right, 'id'):
args = node.args[0].right.id
elif hasattr(node.args[0].right, 'elts'):
args = []
for a in node.args[0].right.elts:
if hasattr(a, 'ccode'):
args.append(a.ccode)
elif hasattr(a, 'id'):
args.append(a.id)
else:
args = []
s = 'printf("%s\\n"' % node.args[0].left.s
if isinstance(args, str):
s = s + (", %s)" % args)
else:
for arg in args:
s = s + (", %s" % arg)
s = s + ")"
node.ccode = str(c.Statement(s))
else:
raise RuntimeError("This print statement is not supported in Python3 version of Parcels")
else:
for a in node.args:
self.visit(a)
if a.ccode == 'parcels_customed_Cfunc_pointer_args':
pointer_args = True
parcels_customed_Cfunc = True
elif a.ccode == 'parcels_customed_Cfunc':
parcels_customed_Cfunc = True
elif isinstance(a, FieldNode) or isinstance(a, VectorFieldNode):
a.ccode = a.obj.ccode_name
elif isinstance(a, ParticleNode):
continue
elif pointer_args:
a.ccode = "&%s" % a.ccode
ccode_args = ", ".join([a.ccode for a in node.args[pointer_args:]])
try:
if isinstance(node.func, str):
node.ccode = node.func + '(' + ccode_args + ')'
else:
self.visit(node.func)
rhs = "%s(%s)" % (node.func.ccode, ccode_args)
if parcels_customed_Cfunc:
node.ccode = str(c.Block([c.Assign("err", rhs),
c.Statement("CHECKSTATUS(err)")]))
else:
node.ccode = rhs
except:
raise RuntimeError("Error in converting Kernel to C. See http://oceanparcels.org/#writing-parcels-kernels for hints and tips")
def visit_Name(self, node):
"""Catches any mention of intrinsic variable names, such as
'particle' or 'fieldset' and inserts our placeholder objects"""
if node.id == 'True':
node.id = "1"
if node.id == 'False':
node.id = "0"
node.ccode = node.id
def visit_NameConstant(self, node):
if node.value is True:
node.ccode = "1"
if node.value is False:
node.ccode = "0"
def visit_Expr(self, node):
self.visit(node.value)
node.ccode = c.Statement(node.value.ccode)
def visit_Assign(self, node):
self.visit(node.targets[0])
self.visit(node.value)
if isinstance(node.value, ast.List):
# Detect in-place initialisation of multi-dimensional arrays
tmp_node = node.value
decl = c.Value('float', node.targets[0].id)
while isinstance(tmp_node, ast.List):
decl = c.ArrayOf(decl, len(tmp_node.elts))
if isinstance(tmp_node.elts[0], ast.List):
# Check type and dimension are the same
if not all(isinstance(e, ast.List) for e in tmp_node.elts):
raise TypeError("Non-list element discovered in array declaration")
if not all(len(e.elts) == len(tmp_node.elts[0].elts) for e in tmp_node.elts):
raise TypeError("Irregular array length not allowed in array declaration")
tmp_node = tmp_node.elts[0]
node.ccode = c.Initializer(decl, node.value.ccode)
self.array_vars += [node.targets[0].id]
else:
node.ccode = c.Assign(node.targets[0].ccode, node.value.ccode)
def visit_AugAssign(self, node):
self.visit(node.target)
self.visit(node.op)
self.visit(node.value)
node.ccode = c.Statement("%s %s= %s" % (node.target.ccode,
node.op.ccode,
node.value.ccode))
def visit_If(self, node):
self.visit(node.test)
for b in node.body:
self.visit(b)
for b in node.orelse:
self.visit(b)
# field evals are replaced by a tmp variable is added to the stack.
# Here it means field evals passes from node.test to node.body. We take it out manually
fieldInTestCount = node.test.ccode.count('parcels_tmpvar')
body0 = c.Block([b.ccode for b in node.body[:fieldInTestCount]])
body = c.Block([b.ccode for b in node.body[fieldInTestCount:]])
orelse = c.Block([b.ccode for b in node.orelse]) if len(node.orelse) > 0 else None
ifcode = c.If(node.test.ccode, body, orelse)
node.ccode = c.Block([body0, ifcode])
def visit_Compare(self, node):
self.visit(node.left)
assert(len(node.ops) == 1)
self.visit(node.ops[0])
assert(len(node.comparators) == 1)
self.visit(node.comparators[0])
node.ccode = "%s %s %s" % (node.left.ccode, node.ops[0].ccode,
node.comparators[0].ccode)
def visit_Index(self, node):
self.visit(node.value)
node.ccode = node.value.ccode
def visit_Tuple(self, node):
for e in node.elts:
self.visit(e)
node.ccode = tuple([e.ccode for e in node.elts])
def visit_List(self, node):
for e in node.elts:
self.visit(e)
node.ccode = "{" + ", ".join([e.ccode for e in node.elts]) + "}"
def visit_Subscript(self, node):
self.visit(node.value)
self.visit(node.slice)
if isinstance(node.value, FieldNode) or isinstance(node.value, VectorFieldNode):
node.ccode = node.value.__getitem__(node.slice.ccode).ccode
elif isinstance(node.value, IntrinsicNode):
raise NotImplementedError("Subscript not implemented for object type %s"
% type(node.value).__name__)
else:
node.ccode = "%s[%s]" % (node.value.ccode, node.slice.ccode)
def visit_UnaryOp(self, node):
self.visit(node.op)
self.visit(node.operand)
node.ccode = "%s(%s)" % (node.op.ccode, node.operand.ccode)
def visit_BinOp(self, node):
self.visit(node.left)
self.visit(node.op)
self.visit(node.right)
if isinstance(node.op, ast.BitXor):
raise RuntimeError("JIT kernels do not support the '^' operator.\n"
"Did you intend to use the exponential/power operator? In that case, please use '**'")
elif node.op.ccode == 'pow': # catching '**' pow statements
node.ccode = "pow(%s, %s)" % (node.left.ccode, node.right.ccode)
else:
node.ccode = "(%s %s %s)" % (node.left.ccode, node.op.ccode, node.right.ccode)
node.s_print = True
def visit_Add(self, node):
node.ccode = "+"
def visit_UAdd(self, node):
node.ccode = "+"
def visit_Sub(self, node):
node.ccode = "-"
def visit_USub(self, node):
node.ccode = "-"
def visit_Mult(self, node):
node.ccode = "*"
def visit_Div(self, node):
node.ccode = "/"
def visit_Mod(self, node):
node.ccode = "%"
def visit_Pow(self, node):
node.ccode = "pow"
def visit_Num(self, node):
node.ccode = str(node.n)
def visit_BoolOp(self, node):
self.visit(node.op)
for v in node.values:
self.visit(v)
op_str = " %s " % node.op.ccode
node.ccode = op_str.join([v.ccode for v in node.values])
def visit_Eq(self, node):
node.ccode = "=="
def visit_NotEq(self, node):
node.ccode = "!="
def visit_Lt(self, node):
node.ccode = "<"
def visit_LtE(self, node):
node.ccode = "<="
def visit_Gt(self, node):
node.ccode = ">"
def visit_GtE(self, node):
node.ccode = ">="
def visit_And(self, node):
node.ccode = "&&"
def visit_Or(self, node):
node.ccode = "||"
def visit_Not(self, node):
node.ccode = "!"
def visit_While(self, node):
self.visit(node.test)
for b in node.body:
self.visit(b)
if len(node.orelse) > 0:
raise RuntimeError("Else clause in while clauses cannot be translated to C")
body = c.Block([b.ccode for b in node.body])
node.ccode = c.DoWhile(node.test.ccode, body)
def visit_For(self, node):
raise RuntimeError("For loops cannot be translated to C")
def visit_Break(self, node):
node.ccode = c.Statement("break")
def visit_Pass(self, node):
node.ccode = c.Statement("")
def visit_FieldNode(self, node):
"""Record intrinsic fields used in kernel"""
self.field_args[node.obj.ccode_name] = node.obj
def visit_SummedFieldNode(self, node):
"""Record intrinsic fields used in kernel"""
for fld in node.obj:
self.field_args[fld.ccode_name] = fld
def visit_NestedFieldNode(self, node):
"""Record intrinsic fields used in kernel"""
for fld in node.obj:
self.field_args[fld.ccode_name] = fld
def visit_VectorFieldNode(self, node):
"""Record intrinsic fields used in kernel"""
self.vector_field_args[node.obj.ccode_name] = node.obj
def visit_SummedVectorFieldNode(self, node):
"""Record intrinsic fields used in kernel"""
for fld in node.obj:
self.vector_field_args[fld.ccode_name] = fld
def visit_NestedVectorFieldNode(self, node):
"""Record intrinsic fields used in kernel"""
for fld in node.obj:
self.vector_field_args[fld.ccode_name] = fld
def visit_ConstNode(self, node):
self.const_args[node.ccode] = node.obj
def visit_FieldEvalNode(self, node):
self.visit(node.field)
self.visit(node.args)
args = self._check_FieldSamplingArguments(node.args.ccode)
ccode_eval = node.field.obj.ccode_eval(node.var, *args)
stmts = [c.Assign("err", ccode_eval)]
if node.convert:
ccode_conv = node.field.obj.ccode_convert(*args)
conv_stat = c.Statement("%s *= %s" % (node.var, ccode_conv))
stmts += [conv_stat]
node.ccode = c.Block(stmts + [c.Statement("CHECKSTATUS(err)")])
def visit_VectorFieldEvalNode(self, node):
self.visit(node.field)
self.visit(node.args)
args = self._check_FieldSamplingArguments(node.args.ccode)
ccode_eval = node.field.obj.ccode_eval(node.var, node.var2, node.var3,
node.field.obj.U, node.field.obj.V, node.field.obj.W,
*args)
if node.field.obj.U.interp_method != 'cgrid_velocity':
ccode_conv1 = node.field.obj.U.ccode_convert(*args)
ccode_conv2 = node.field.obj.V.ccode_convert(*args)
statements = [c.Statement("%s *= %s" % (node.var, ccode_conv1)),
c.Statement("%s *= %s" % (node.var2, ccode_conv2))]
else:
statements = []
if node.field.obj.vector_type == '3D':
ccode_conv3 = node.field.obj.W.ccode_convert(*args)
statements.append(c.Statement("%s *= %s" % (node.var3, ccode_conv3)))
conv_stat = c.Block(statements)
node.ccode = c.Block([c.Assign("err", ccode_eval),
conv_stat, c.Statement("CHECKSTATUS(err)")])
def visit_SummedFieldEvalNode(self, node):
self.visit(node.fields)
self.visit(node.args)
cstat = []
args = self._check_FieldSamplingArguments(node.args.ccode)
for fld, var in zip(node.fields.obj, node.var):
ccode_eval = fld.ccode_eval(var, *args)
ccode_conv = fld.ccode_convert(*args)
conv_stat = c.Statement("%s *= %s" % (var, ccode_conv))
cstat += [c.Assign("err", ccode_eval), conv_stat, c.Statement("CHECKSTATUS(err)")]
node.ccode = c.Block(cstat)
def visit_SummedVectorFieldEvalNode(self, node):
self.visit(node.fields)
self.visit(node.args)
cstat = []
args = self._check_FieldSamplingArguments(node.args.ccode)
for fld, var, var2, var3 in zip(node.fields.obj, node.var, node.var2, node.var3):
ccode_eval = fld.ccode_eval(var, var2, var3,
fld.U, fld.V, fld.W,
*args)
if fld.U.interp_method != 'cgrid_velocity':
ccode_conv1 = fld.U.ccode_convert(*args)
ccode_conv2 = fld.V.ccode_convert(*args)
statements = [c.Statement("%s *= %s" % (var, ccode_conv1)),
c.Statement("%s *= %s" % (var2, ccode_conv2))]
else:
statements = []
if fld.vector_type == '3D':
ccode_conv3 = fld.W.ccode_convert(*args)
statements.append(c.Statement("%s *= %s" % (var3, ccode_conv3)))
cstat += [c.Assign("err", ccode_eval), c.Block(statements)]
cstat += [c.Statement("CHECKSTATUS(err)")]
node.ccode = c.Block(cstat)
def visit_NestedFieldEvalNode(self, node):
self.visit(node.fields)
self.visit(node.args)
cstat = []
args = self._check_FieldSamplingArguments(node.args.ccode)
for fld in node.fields.obj:
ccode_eval = fld.ccode_eval(node.var, *args)
ccode_conv = fld.ccode_convert(*args)
conv_stat = c.Statement("%s *= %s" % (node.var, ccode_conv))
cstat += [c.Assign("err", ccode_eval),
conv_stat,
c.If("err != ERROR_OUT_OF_BOUNDS ", c.Block([c.Statement("CHECKSTATUS(err)"), c.Statement("break")]))]
cstat += [c.Statement("CHECKSTATUS(err)"), c.Statement("break")]
node.ccode = c.While("1==1", c.Block(cstat))
def visit_NestedVectorFieldEvalNode(self, node):
self.visit(node.fields)
self.visit(node.args)
cstat = []
args = self._check_FieldSamplingArguments(node.args.ccode)
for fld in node.fields.obj:
ccode_eval = fld.ccode_eval(node.var, node.var2, node.var3,
fld.U, fld.V, fld.W,
*args)
if fld.U.interp_method != 'cgrid_velocity':
ccode_conv1 = fld.U.ccode_convert(*args)
ccode_conv2 = fld.V.ccode_convert(*args)
statements = [c.Statement("%s *= %s" % (node.var, ccode_conv1)),
c.Statement("%s *= %s" % (node.var2, ccode_conv2))]
else:
statements = []
if fld.vector_type == '3D':
ccode_conv3 = fld.W.ccode_convert(*args)
statements.append(c.Statement("%s *= %s" % (node.var3, ccode_conv3)))
cstat += [c.Assign("err", ccode_eval),
c.Block(statements),
c.If("err != ERROR_OUT_OF_BOUNDS ", c.Block([c.Statement("CHECKSTATUS(err)"), c.Statement("break")]))]
cstat += [c.Statement("CHECKSTATUS(err)"), c.Statement("break")]
node.ccode = c.While("1==1", c.Block(cstat))
def visit_Return(self, node):
self.visit(node.value)
node.ccode = c.Statement('return %s' % node.value.ccode)
def visit_Print(self, node):
for n in node.values:
self.visit(n)
if hasattr(node.values[0], 's'):
node.ccode = c.Statement('printf("%s\\n")' % (n.ccode))
return
if hasattr(node.values[0], 's_print'):
args = node.values[0].right.ccode
s = ('printf("%s\\n"' % node.values[0].left.ccode)
if isinstance(args, str):
s = s + (", %s)" % args)
else:
for arg in args:
s = s + (", %s" % arg)
s = s + ")"
node.ccode = c.Statement(s)
return
vars = ', '.join([n.ccode for n in node.values])
int_vars = ['particle->id', 'particle->xi', 'particle->yi', 'particle->zi']
stat = ', '.join(["%d" if n.ccode in int_vars else "%f" for n in node.values])
node.ccode = c.Statement('printf("%s\\n", %s)' % (stat, vars))
def visit_Str(self, node):
if node.s == 'parcels_customed_Cfunc_pointer_args':
node.ccode = node.s
else:
node.ccode = ''
class LoopGenerator(object):
"""Code generator class that adds type definitions and the outer
loop around kernel functions to generate compilable C code."""
def __init__(self, fieldset, ptype=None):
self.fieldset = fieldset
self.ptype = ptype
def generate(self, funcname, field_args, const_args, kernel_ast, c_include):
ccode = []
pname = self.ptype.name + 'p'
# ==== Add include for Parcels and math header ==== #
ccode += [str(c.Include("parcels.h", system=False))]
ccode += [str(c.Include("math.h", system=False))]
ccode += [str(c.Assign('double _next_dt', '0'))]
ccode += [str(c.Assign('size_t _next_dt_set', '0'))]
ccode += [str(c.Assign('const int ngrid', str(self.fieldset.gridset.size if self.fieldset is not None else 1)))]
# ==== Generate type definition for particle type ==== #
vdeclp = [c.Pointer(c.POD(v.dtype, v.name)) for v in self.ptype.variables]
ccode += [str(c.Typedef(c.GenerableStruct("", vdeclp, declname=pname)))]
# Generate type definition for single particle type
vdecl = [c.POD(v.dtype, v.name) for v in self.ptype.variables if v.dtype != np.uint64]
ccode += [str(c.Typedef(c.GenerableStruct("", vdecl, declname=self.ptype.name)))]
args = [c.Pointer(c.Value(self.ptype.name, "particle_backup")),
c.Pointer(c.Value(pname, "particles")),
c.Value("int", "pnum")]
p_back_set_decl = c.FunctionDeclaration(c.Static(c.DeclSpecifier(c.Value("void", "set_particle_backup"),
spec='inline')), args)
body = []
for v in self.ptype.variables:
if v.dtype != np.uint64 and v.name not in ['dt', 'state']:
body += [c.Assign(("particle_backup->%s" % v.name), ("particles->%s[pnum]" % v.name))]
p_back_set_body = c.Block(body)
p_back_set = str(c.FunctionBody(p_back_set_decl, p_back_set_body))
ccode += [p_back_set]
args = [c.Pointer(c.Value(self.ptype.name, "particle_backup")),
c.Pointer(c.Value(pname, "particles")),
c.Value("int", "pnum")]
p_back_get_decl = c.FunctionDeclaration(c.Static(c.DeclSpecifier(c.Value("void", "get_particle_backup"),
spec='inline')), args)
body = []
for v in self.ptype.variables:
if v.dtype != np.uint64 and v.name not in ['dt', 'state']:
body += [c.Assign(("particles->%s[pnum]" % v.name), ("particle_backup->%s" % v.name))]
p_back_get_body = c.Block(body)
p_back_get = str(c.FunctionBody(p_back_get_decl, p_back_get_body))
ccode += [p_back_get]
update_next_dt_decl = c.FunctionDeclaration(c.Static(c.DeclSpecifier(c.Value("void", "update_next_dt"),
spec='inline')), [c.Value('double', 'dt')])
if 'update_next_dt' in str(kernel_ast):
body = []
body += [c.Assign("_next_dt", "dt")]
body += [c.Assign("_next_dt_set", "1")]
update_next_dt_body = c.Block(body)
update_next_dt = str(c.FunctionBody(update_next_dt_decl, update_next_dt_body))
ccode += [update_next_dt]
if c_include:
ccode += [c_include]
# ==== Insert kernel code ==== #
ccode += [str(kernel_ast)]
# Generate outer loop for repeated kernel invocation
args = [c.Value("int", "num_particles"),
c.Pointer(c.Value(pname, "particles")),
c.Value("double", "endtime"), c.Value("double", "dt")]
for field, _ in field_args.items():
args += [c.Pointer(c.Value("CField", "%s" % field))]
for const, _ in const_args.items():
args += [c.Value("double", const)]
fargs_str = ", ".join(['particles->time[pnum]'] + list(field_args.keys())
+ list(const_args.keys()))
# ==== statement clusters use to compose 'body' variable and variables 'time_loop' and 'part_loop' ==== ##
sign_dt = c.Assign("sign_dt", "dt > 0 ? 1 : -1")
particle_backup = c.Statement("%s particle_backup" % self.ptype.name)
sign_end_part = c.Assign("sign_end_part", "(endtime - particles->time[pnum]) > 0 ? 1 : -1")
reset_res_state = c.Assign("res", "particles->state[pnum]")
update_state = c.Assign("particles->state[pnum]", "res")
update_pdt = c.If("_next_dt_set == 1",
c.Block([c.Assign("_next_dt_set", "0"), c.Assign("particles->dt[pnum]", "_next_dt")]))
dt_pos = c.Assign("__dt", "fmin(fabs(particles->dt[pnum]), fabs(endtime - particles->time[pnum]))") # original
pdt_eq_dt_pos = c.Assign("__pdt_prekernels", "__dt * sign_dt")
partdt = c.Assign("particles->dt[pnum]", "__pdt_prekernels")
check_pdt = c.If("(res == SUCCESS) & !is_equal_dbl(__pdt_prekernels, particles->dt[pnum])", c.Assign("res", "REPEAT"))
dt_0_break = c.If("is_zero_dbl(particles->dt[pnum])", c.Statement("break"))
notstarted_continue = c.If("(( sign_end_part != sign_dt) || is_close_dbl(__dt, 0) ) && !is_zero_dbl(particles->dt[pnum])",
c.Block([
c.If("fabs(particles->time[pnum]) >= fabs(endtime)",
c.Assign("particles->state[pnum]", "SUCCESS")),
c.Statement("continue")
]))
# ==== main computation body ==== #
body = [c.Statement("set_particle_backup(&particle_backup, particles, pnum)")]
body += [pdt_eq_dt_pos]
body += [partdt]
body += [c.Value("StatusCode", "state_prev"), c.Assign("state_prev", "particles->state[pnum]")]
body += [c.Assign("res", "%s(particles, pnum, %s)" % (funcname, fargs_str))]
body += [c.If("(res==SUCCESS) && (particles->state[pnum] != state_prev)", c.Assign("res", "particles->state[pnum]"))]
body += [check_pdt]
body += [c.If("res == SUCCESS || res == DELETE", c.Block([c.Statement("particles->time[pnum] += particles->dt[pnum]"),
update_pdt,
dt_pos,
sign_end_part,
c.If("(res != DELETE) && !is_close_dbl(__dt, 0) && (sign_dt == sign_end_part)",
c.Assign("res", "EVALUATE")),
c.If("sign_dt != sign_end_part", c.Assign("__dt", "0")),
update_state,
dt_0_break
]),
c.Block([c.Statement("get_particle_backup(&particle_backup, particles, pnum)"),
dt_pos,
sign_end_part,
c.If("sign_dt != sign_end_part", c.Assign("__dt", "0")),
update_state,
c.Statement("break")])
)]
time_loop = c.While("(particles->state[pnum] == EVALUATE || particles->state[pnum] == REPEAT) || is_zero_dbl(particles->dt[pnum])", c.Block(body))
part_loop = c.For("pnum = 0", "pnum < num_particles", "++pnum",
c.Block([sign_end_part, reset_res_state, dt_pos, notstarted_continue, time_loop]))
fbody = c.Block([c.Value("int", "pnum, sign_dt, sign_end_part"),
c.Value("StatusCode", "res"),
c.Value("double", "__pdt_prekernels"),
c.Value("double", "__dt"), # 1e-8 = built-in tolerance for np.isclose()
sign_dt, particle_backup, part_loop])
fdecl = c.FunctionDeclaration(c.Value("void", "particle_loop"), args)
ccode += [str(c.FunctionBody(fdecl, fbody))]
return "\n\n".join(ccode)
|
OceanPARCELS/parcels
|
parcels/codegenerator.py
|
Python
|
mit
| 46,535
|
[
"VisIt"
] |
975dcf2f8eecd8db52b629f6662d8499d59b91e630aec6c9210ea71be7f9dd15
|
# -*- coding: latin-1 -*-
# Copyright (C) 2009-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
# Hexa : Creation d'hexaedres
#### import os
import hexablock
geompy = hexablock.geompy
#---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
use_paraview = False
# ======================================================= test_pipes
def test_piquage () :
name = "piquage"
doc = hexablock.addDocument (name)
orig = doc.addVertex ( 8, 0, 0)
vx = doc.addVector ( 1 ,0, 0)
vy = doc.addVector ( 0, 1, 0)
vz = doc.addVector ( 0, 0, 1)
size_x = 5
size_y = 5
size_z = 3
size_cyl = 12
rint = 2
rext = 3
angle = 360
haut = 1
nr = 1
nh = 1
grid = doc.makeCartesianTop (size_x, size_y, size_z)
pipe = doc.makePipeUni (orig, vx, vz, rint, rext, angle,
haut, nr, size_cyl, nh)
c1 = grid.getVertexIJK (2, 1, size_z)
c2 = grid.getVertexIJK (3, 1, size_z)
p1 = pipe.getVertexIJK (1, 7, 1)
p2 = pipe.getVertexIJK (1, 8, 1)
qpattern = []
qtarget = []
for na in range (size_cyl) :
quad = pipe.getQuadIJ (0, na, 1)
quad.setColor (2)
qpattern.append (quad)
for ni in range (1, size_x-1) :
for nj in range (1, size_y-1) :
quad = grid.getQuadIJ (ni, nj, size_z)
quad.setColor (2)
qtarget.append (quad)
c1.setColor (6)
c2.setColor (4)
p1.setColor (6)
p2.setColor (4)
if use_paraview :
doc.saveVtk ("replace0.vtk")
doc.replace (qpattern, qtarget, p1,c1, p2,c2)
if use_paraview :
doc.saveVtk ("replace1.vtk")
pipe.remove()
if use_paraview :
doc.saveVtk ("replace2.vtk")
return doc
# ================================================================= Begin
doc = test_piquage ()
doc.addLaws (0.9, True)
mesh_hexas = hexablock.mesh (doc, "maillage:hexas")
|
FedoraScientific/salome-hexablock
|
src/TEST_PY/test_unit/test_piquage.py
|
Python
|
lgpl-2.1
| 2,758
|
[
"VTK"
] |
7d95a18ecfc4ee656855d964be6073cf9a3b2bc32b0362cddfa00dfbcf824132
|
"""Leetcode 142. Linked List Cycle II
Medium
URL: https://leetcode.com/problems/linked-list-cycle-ii/
Given a linked list, return the node where the cycle begins.
If there is no cycle, return null.
To represent a cycle in the given linked list, we use an integer pos which
represents the position (0-indexed) in the linked list where tail connects to.
If pos is -1, then there is no cycle in the linked list.
Note: Do not modify the linked list.
Example 1:
Input: head = [3,2,0,-4], pos = 1
Output: tail connects to node index 1
Explanation: There is a cycle in the linked list, where tail connects to the
second node.
Example 2:
Input: head = [1,2], pos = 0
Output: tail connects to node index 0
Explanation: There is a cycle in the linked list,
where tail connects to the first node.
Example 3:
Input: head = [1], pos = -1
Output: no cycle
Explanation: There is no cycle in the linked list.
Follow-up:
Can you solve it without using extra space?
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, val):
self.val = val
self.next = None
class SolutionSet(object):
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
Time complexity: O(n).
Space complexity: O(n).
"""
# Use set to collect visited nodes.
visited_nodes = set()
# Visit node and check it exists in set.
current = head
while current:
if current in visited_nodes:
return current
visited_nodes.add(current)
current = current.next
return None
class SolutionSlowFast(object):
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
If there is a cycle, the node where fast and slow meet will
have the same distance to cycle starting node as head.
Specifically, suppose
- distance from head to loop start: x1
- distance from loop start to their meet: x2
- distance from their meet to loop start: x3
Then
- distance of fast moves when meets slow: x1 + x2 + x3 + x2.
- distance of slow moves when meets fast: x1 + x2.
- their relationship: x1 + x2 + x3 + x2 = 2 * (x1 + x2) => x1 = x3.
Time complexity: O(n).
Space complexity: O(1).
"""
# Two pointers: slow move for 1 step; fast for 2 steps.
slow, fast = head, head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
# If there exists cycle, move head & slow together until they meet.
if slow == fast:
current = head
while current:
if current == slow:
return current
current = current.next
slow = slow.next
return None
def main():
# Input: head = [3,2,0,-4], pos = 1 (val: 2)
# Output: 2
head = ListNode(3)
head.next = ListNode(2)
head.next.next = ListNode(0)
head.next.next = ListNode(-4)
head.next.next.next = head.next
print SolutionSet().detectCycle(head).val
print SolutionSlowFast().detectCycle(head).val
# head = [1,2], pos = 0 (val: 1)
# Output: 1
head = ListNode(1)
head.next = ListNode(2)
head.next.next = head
print SolutionSet().detectCycle(head).val
print SolutionSlowFast().detectCycle(head).val
# head = [1, 2], pos = -1
# Output: None
head = ListNode(1)
head.next = ListNode(2)
print SolutionSet().detectCycle(head)
print SolutionSlowFast().detectCycle(head)
# head = [-1,-7,7,-4,19,6,-9,-5,-2,-5], pos = 6 (val: -9)
head = ListNode(-1)
head.next = ListNode(-7)
head.next.next = ListNode(7)
head.next.next.next = ListNode(-4)
head.next.next.next.next = ListNode(19)
head.next.next.next.next.next = ListNode(6)
head.next.next.next.next.next.next = ListNode(-9)
head.next.next.next.next.next.next.next = ListNode(-5)
head.next.next.next.next.next.next.next.next = ListNode(-2)
head.next.next.next.next.next.next.next.next.next = ListNode(-5)
head.next.next.next.next.next.next.next.next.next.next = (
head.next.next.next.next.next.next)
print SolutionSet().detectCycle(head).val
print SolutionSlowFast().detectCycle(head).val
if __name__ == '__main__':
main()
|
bowen0701/algorithms_data_structures
|
lc0142_linked_list_cycle_ii.py
|
Python
|
bsd-2-clause
| 4,455
|
[
"VisIt"
] |
5816497dc11911cf6d973f9e269991dfd49499f0b4984d6c61cb672cb8a00423
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.