text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
from degas.gridding import gridGalaxy
datadir='/mnt/bigdata/erosolow/surveys/DEGAS/'
datadir = '/mnt/bigdata/erosolow/surveys/DEGAS/'
ppo = False
gridGalaxy(galaxy='IC0342', setup='12CO',
release='QA0', datadir=datadir,
PostprocOnly=ppo)
gallist = ['IC0342',
'NGC0337',
'NGC2146',
'NGC2903',
'NGC3147',
'NGC3521',
'NGC3631',
'NGC4030',
'NGC4038',
'NGC4258',
'NGC4321',
'NGC4414',
'NGC4501',
'NGC4535',
'NGC4569',
'NGC5055',
'NGC6946']
# gallist = gallist[-2:]
# gallist = ['IC0342']
HCNgals = gallist
# HCNgals = ['NGC2903', 'NGC2146', 'IC0342']
# HCNgals = ['IC0342']
for gal in HCNgals:
gridGalaxy(galaxy=gal, setup='HCN_HCO+',
release='QA0', datadir=datadir, PostprocOnly=ppo)
COgals = gallist
# COgals = ['NGC2903', 'NGC2146', 'IC0342']
# COgals = ['NGC2146']
# COgals = ['IC0342']
for gal in COgals:
gridGalaxy(galaxy=gal, setup='13CO_C18O',
release='QA0', datadir=datadir, PostprocOnly=ppo)
HCNgals = [
'NGC4038',
'NGC2146',
'NGC6946',
'NGC7331',
'NGC5248',
'NGC2903',
'NGC4321',
'NGC5055',
'NGC4501',
'NGC3147',
'NGC3521',
'NGC4414',
'NGC0337',
'NGC3631',
'NGC4030',
'NGC4258',
'NGC4535',
'NGC4569',
]
HCNgals=['IC0342']
COgals = [
'NGC4038',
'NGC2146',
'NGC7331',
'NGC2903',
'NGC4321',
'NGC5055',
'NGC4501',
'NGC3147',
'NGC0337',
'NGC4569',
'NGC3521',
'NGC3631',
'NGC4030',
'NGC4258',
'NGC4414',
'NGC4535',
'IC0342',
]
# gridGalaxy(galaxy='NGC5055', setup='13CO_C18O', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC5055', setup='HCN_HCO+', release='QA0', datadir=datadir)
#gridGalaxy(galaxy='NGC7331', setup='HCN_HCO+',
# release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC6946', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC4569', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC4569', setup='13CO_C18O', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC4501', setup='13CO_C18O', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC4501', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC4414', setup='13CO_C18O', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC4414', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC4321', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC4321', setup='13CO_C18O', release='QA0', datadir=datadir)
# # gridGalaxy(galaxy='NGC4038', setup='13CO_C18O', release='QA0', datadir=datadir)
# # gridGalaxy(galaxy='NGC4038', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC3521', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC2903', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC2903', setup='13CO_C18O', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC2146', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC2146', setup='13CO_C18O', release='QA0', datadir=datadir)
# # gridGalaxy(galaxy='IC0342', setup='13CO_C18O', release='QA0', datadir=datadir)
# # gridGalaxy(galaxy='IC0342', setup='HCN_HCO+', release='QA0', datadir=datadir)
|
low-sky/degas
|
degas/examples/gridgals.py
|
Python
|
gpl-3.0
| 3,461
|
[
"Galaxy"
] |
82217dfb0a9f466161fba0552f2114fbd1c727fe84c7af203ef493493074d575
|
# coding=utf8
#
# Copyright 2013 Dreamlab Onet.pl
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 3.0.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, visit
#
# http://www.gnu.org/licenses/lgpl.txt
#
import logging
import json
from rmock.errors import RmockError
logger = logging.getLogger("rmock.jsonrpc")
#TODO: better error handling
#TODO: inherit from generic RPCError?
class JsonRPCError(RmockError):
def __init__(self, message, code=-32700):
RmockError.__init__(self, message, code)
self.message = message
self.code = code
class JsonRPCProtocol(object):
def __init__(self):
self.request_id = None
def loads(self, method, url, body):
if method != 'post':
raise JsonRPCError("unsupported http method: %s" % method)
parsed_params = json.loads(body)
logger.debug("parsed params: %s", parsed_params)
if isinstance(parsed_params, list):
parsed_params = parsed_params[0]
self.request_id = parsed_params['id']
call_params = parsed_params.get('params', {})
if isinstance(call_params, list):
call_params = call_params[0]
return (parsed_params['method'], (), call_params)
def dumps(self, result):
result_dict = {
"jsonrpc": "2.0",
"id": self.request_id,
}
if isinstance(result, JsonRPCError):
result_dict.update({
"error": {"code": result.code,
"message": result.message}
})
else:
result_dict.update({
"result": result,
})
return json.dumps(result_dict)
|
tikan/rmock
|
src/rmock/runners/http/protocols/jsonrpc.py
|
Python
|
lgpl-3.0
| 2,257
|
[
"VisIt"
] |
c5d477836ba13dc8df99b5138f4e92679efc41020f0fa0c1f25f41132c3e046e
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
A helper folder for solvent models
"""
_have_pe = False
try:
from . import pol_embed
_have_pe = True
except ImportError:
pass
|
psi4/psi4
|
psi4/driver/procrouting/solvent/__init__.py
|
Python
|
lgpl-3.0
| 1,057
|
[
"Psi4"
] |
e145264a1ccbc0e51214e9b334f413cda8c33feb0a619aae68d1399f4ed4cd51
|
import sys
import gobject
import gtk
import goocanvas
import math
import random
import re, db_conf
from phamerator_manage_db import *
class PhamCircle:
"""a class for drawing phamily circles on a pygoocanvas"""
def __init__(self, phamName, c,**kargs):
"""initializes circle center and radius"""
print 'creating PhamCircle...'
try:
self.radius = kargs["radius"]
except:
self.radius = 750
self.h, self.k = self.radius + 900,self.radius + 500
#The radii above change where the circle is positioned on the page
self.c = c
self.phamName = phamName
if 'verbose' in kargs.keys():
self.verbose = True
else:
self.verbose = False
#self.verbose = True
def set_threshold(self,thresh):
self.threshold = thresh
def getName(self,geneID):
exp = re.compile('\d+[.]*\d*$')
try:
name = get_phage_name_from_GeneID(self.c, geneID) + " (gp" + str(int((exp.search(get_gene_name_from_GeneID(self.c, geneID))).group().strip())) + ")"
except:
try:
name = get_phage_name_from_GeneID(self.c, geneID) + ' (gp' + str(float(get_gene_name_from_GeneID(self.c, geneID))) + ')'
except:
name = get_phage_name_from_GeneID(self.c, geneID) + ' (gp' + get_gene_name_from_GeneID(self.c, geneID) + ')'
return name
def create_canvas_model(self, nonMemberPhages, geneList, inputList,adjustment,threshold,**kargs):
"""Creates the canvas model to be drawn to goocanvas. This includes the polygon itself, and also the arcs connecting related sides"""
#########################################
"""Colors are grabbed and color boolean set"""
if 'allColor' in kargs.keys():
self.allColor = kargs['allColor']
self.singleColor = True
else:
self.singleColor = False
for key in kargs.keys():
#bothColor = kargs[bothColor]
self.clustalwColor = kargs['clustalwColor']
self.blastColor = kargs['blastColor']
#########################################
"""Labels containing gene/genome data are created and sorted alphabetically according to their comparator string """
self.adjustment = adjustment
self.threshold = threshold
self.nonMemberPhages = nonMemberPhages
self.geneList = geneList
self.inputList = InputData(inputList)
labels = []
rawLabels = nonMemberPhages + geneList
self.root = goocanvas.GroupModel()
self.polygon_n_labels = goocanvas.GroupModel()
for nMP in nonMemberPhages:
comparator = nMP
labels.append(Label(nMP, 'phage',comparator))
for gL in geneList:
comparator = self.getName(gL)
labels.append(Label(gL, 'gene',comparator))
clusters = {}
for clust in get_clusters(self.c, include_unclustered=False): # WAS FALSE
clusters[clust] = Cluster(clust)
clusters[''] = Cluster('')
#print 'clusters: %s' % clusters
for label in labels:
cluster = get_cluster_from_PhageID(self.c, get_PhageID_from_name(self.c,label.comparator.split(' ')[0]))
if cluster in clusters:
clusters[cluster].add(label)
else:
clusters[''].add(label)
self.clusters = clusters
#for clust in get_clusters(self.c, include_unclustered=False):
# print "################################################"
# print clust
# print "################################################"
# for lbl in clusters[clust].getLabels():
# print lbl.comparator
#print "##############################################"
#print "NON MEMBERS"
#print "##############################################"
#for lbl in clusters[''].getLabels():
# print lbl.comparator
#########################################
"""Set up all constants and initialize some variables to be used for drawing polygon/arcs"""
radius, h, k = self.radius, self.h, self.k
self.centers = {}
numItems = len(geneList) + len(nonMemberPhages)
self.numItems = numItems
rectWidth = (2*math.pi*radius)/numItems
rectHeight = rectWidth/4
self.rectHeight = rectHeight
font = str(int(rectWidth/2.0))
print 'using font size %s' % font
#font = '6'
bigFont = str(int((rectWidth/2)+10))
#bigFont = '50'
xPos = h-(rectWidth/2)
yPos = k-radius
rotationIncrement = 360.0/numItems
current = 0
currentRotation = 270
r = g = b = 0
fill = 'gray'
labelPad = radius * 0.33
print 'displaying pham %s' % self.phamName
item =goocanvas.TextModel(text='Pham %s' % str(self.phamName),
x=self.h, y=self.k/15.0, #(self.k - self.radius),
anchor=gtk.ANCHOR_CENTER,
font="Arial " + bigFont)
self.polygon_n_labels.add_child(item, -1)
#########################################
"""Iterate through all labels containing genome/gene names and draw them to n-sided polygon."""
lastY = yPos - 10
cluster_list = clusters.keys()
cluster_list.sort()
color_counter = 0
self.clusterCenters = {}
color_list = ['#ea55ff','#ffaaea','#ff5f00','#d7b9a8','#e47532','#ffdcc7','#6aff55','#ceffc7','#e0ff00','#1cffff','#9c38ff','#bb7ef8','#b8ff71','#ff5555','#ffaaaa','#1c55ff']
for cluster in cluster_list:
if color_counter < len(color_list):
current_color = color_list[color_counter]
color_counter = color_counter + 1
else:
current_color = fill
labels = clusters[cluster].getLabels()
first = 0
last = len(labels)-1
for label in labels:
if currentRotation >= 360:
currentRotation = currentRotation - 360
if currentRotation == 0:
signX = 1
signY = -1
elif currentRotation > 0 and currentRotation < 90:
signX = 1
signY = 1
elif currentRotation<180 and currentRotation >=90:
signX = -1
signY = 1
elif currentRotation<270 and currentRotation >=180:
signX = -1
signY = -1
elif currentRotation >= 270 and currentRotation < 360:
signX = 1
signY = -1
else:
print 'something is messed up'
sys.exit()
rgb = '#%02x%02x%02x' % (r,g,b)
centerX = h + (math.cos(math.radians(currentRotation))*radius)
centerY = k + (math.sin(math.radians(currentRotation))*radius)
self.centers[label.text] = [centerX,centerY]
if labels[first] == label:
self.clusterCenters[cluster] = [[centerX,centerY],]
if labels[last] == label:
self.clusterCenters[cluster].append([centerX,centerY])
item = goocanvas.RectModel(x=centerX-(rectWidth/2.0), y=centerY+(-1*rectHeight), width=rectWidth, height=rectHeight,
line_width=1.5,
radius_x=1.0,
radius_y=1.0,
stroke_color=rgb,
fill_color=current_color)
item.rotate(currentRotation+90, centerX,centerY)
self.polygon_n_labels.add_child(item, -1)
if signX == 1:
textX = h + (math.cos(math.radians(currentRotation))*(radius+rectHeight+6))
if signY == 1:
textY = k + (math.sin(math.radians(currentRotation))*(radius+rectHeight+6))
anchor = gtk.ANCHOR_NORTH_WEST
if signY == -1:
textY = k + (math.sin(math.radians(currentRotation))*(radius+rectHeight+6))
anchor = gtk.ANCHOR_SOUTH_WEST
else:
textX = h + (math.cos(math.radians(currentRotation))*(radius+rectHeight+6))
if signY == 1:
textY = k + (math.sin(math.radians(currentRotation))*(radius+rectHeight+6))
anchor = gtk.ANCHOR_NORTH_EAST
if signY == -1:
textY = k + (math.sin(math.radians(currentRotation))*(radius+rectHeight+6))
anchor = gtk.ANCHOR_SOUTH_EAST
if label.type == 'phage': text = label.text
else:
text = label.comparator
# if (90-rotationIncrement)<currentRotation<(90+rotationIncrement) or (270-rotationIncrement)<currentRotation<(270+rotationIncrement):
# textY = textY + (signY*int(font)*3)
# if (90-(rotationIncrement*2))<currentRotation<(90-rotationIncrement) or (90+rotationIncrement)<currentRotation<(90+rotationIncrement*2) or (270-(rotationIncrement*2))<currentRotation<(270-rotationIncrement) or (270+rotationIncrement)<currentRotation<(270+(rotationIncrement*2)):
# textY = textY + (signY*int(font)*2)
# if (90-(rotationIncrement*3))<currentRotation<(90-(rotationIncrement*2)) or (90+rotationIncrement*2)<currentRotation<(90+rotationIncrement*3) or (270-(rotationIncrement*3))<currentRotation<(270-(rotationIncrement*2)) or (270+(rotationIncrement*2))<currentRotation<(270+(rotationIncrement*3)):
# textY = textY + (signY*int(font))
textModel =goocanvas.TextModel(text=text,
x=textX, y=textY,
anchor=anchor,
#font="Arial " + str(rectHeight))
font="Arial " + font)
lastY = textY
self.polygon_n_labels.add_child(textModel, -1)
if currentRotation <= 90 or currentRotation >= 270:
textModel.rotate(currentRotation, textX, textY)
else:
textModel.rotate(180+currentRotation, textX, textY)
if self.verbose == True:
print str(text) + " at rotation of " + str(currentRotation) + " degrees, with a Y-sign value of " + str(signY) + "\n"
currentRotation = currentRotation + rotationIncrement
self.root.add_child(self.polygon_n_labels,-1)
self.first_time = True
root = self.update_arc_groupModel(adjustment)
return root
def showClusters(self):
self.clusterGroup = goocanvas.GroupModel()
cluster_list = self.clusters.keys()
cluster_list.sort()
for cluster in cluster_list:
startx,starty = self.clusterCenters[cluster][0]
endx,endy = self.clusterCenters[cluster][1]
startAngle = math.atan2(starty - self.k,startx - self.h)
endAngle = math.atan2(endy - self.k,endx - self.h)
startx,starty = self.h + (math.cos(startAngle)*(6 + self.radius)),self.k + (math.sin(startAngle)*(6 + self.radius))
endx,endy = self.h + (math.cos(endAngle)*(6 + self.radius)),self.k + (math.sin(endAngle)*(6 + self.radius))
"""<path d="M275,175 v-150 a150,150 0 0,0 -150,150 z" fill="yellow" stroke="blue" stroke-width="5" />""" #example path for pie shape
""" item = goocanvas.PathModel(data=svgPathString,stroke_color_rgba=self.colorData,line_width=lineWidth)""" #example pygoocanvas call
d = "M%s,%s A%s,%s 0 0,1 %s,%s" %(startx,starty,(self.radius+5),(self.radius+5),endx,endy)
item = goocanvas.PathModel(data=d,stroke_color="#969696",line_width=4)
self.clusterGroup.add_child(item,-1)
# add a label for the cluster on the circle, change number + self.radius to move cluster labels outward
if startAngle <= endAngle:
x = self.h + (math.cos((startAngle+endAngle)/2.0)*(200 + self.radius))
else:
x = self.h + (math.cos((startAngle+endAngle)/2.0)*-(200 + self.radius))
y = self.k + (math.sin((startAngle+endAngle)/2.0)*(200 + self.radius))
item = goocanvas.TextModel(text=cluster,
x=x, y=y,
anchor=gtk.ANCHOR_WEST,
font="Arial %s" % str(3500/self.numItems),
fill_color="#0000ff")
self.clusterGroup.add_child(item,-1)
self.root.add_child(self.clusterGroup,2)
def hideClusters(self):
self.root.remove_child(2)
def update_arc_groupModel(self,adjustment):
"""Iterate through all nodes containing arc data, and draw them to the n-sided polygon """
if adjustment != None:
self.adjustment = adjustment
if self.first_time == False:
self.hideClusters()
current = self.root.get_child(1)
self.root.remove_child(1)
self.first_time = False
self.arcGroup = goocanvas.GroupModel()
for node in self.inputList.nodeList:
if node.relation >= adjustment:
alpha = hex(255)
else:
scaler = ((node.relation/adjustment) - self.threshold)/.725
if scaler < 0:
scaler = 0
if scaler > 1:
scaler = 1
scaler = 255 * scaler
alpha = hex(int(scaler))
if alpha > 0x0:
if self.verbose == True:
print 'drawing arc from', self.getName(node.fromGene), 'to', self.getName(node.toGene),"\n",'using',node.clust_blast,"\n",'with scaled score ', node.relation,"\n",'and unscaled score',node.unScaledRelation,"\n","with an adjustment of",adjustment,"\n","and a score threshold of",str(self.threshold),"\n"
if self.singleColor == True:
self.arc = Arc(self.centers[node.fromGene],self.centers[node.toGene],node.relation, self.h, self.k, self.radius,self.allColor,alpha)
else:
clust_blast_info = node.clust_blast
try:
if clust_blast_info == "clustalw":
self.arc = Arc(self.centers[node.fromGene],self.centers[node.toGene],node.relation, self.h, self.k, self.radius,self.clustalwColor,alpha)
elif clust_blast_info == "blast":
self.arc = Arc(self.centers[node.fromGene],self.centers[node.toGene],node.relation, self.h, self.k, self.radius,self.blastColor,alpha)
else:
self.arc = Arc(self.centers[node.fromGene],self.centers[node.toGene],node.relation, self.h, self.k, self.radius,self.bothColor,alpha)
except KeyError:
pass
#if hasattr(self, 'arc'):
self.arcGroup.add_child(self.arc.draw_arc(self.radius), -1)
self.root.add_child(self.arcGroup,-1)
self.showClusters()
return self.root
#########################################
"""This is our handler for the "item-view-created" signal of the GooCanvasView. We connect to the "button-press-event" signal of new rect views."""
def on_item_view_created (self, view, item_view, item):
if isinstance(item, goocanvas.Rect):
item_view.connect("button_press_event", on_rect_button_press)
"""This handles button presses in item views. We simply output a message to the console."""
def on_rect_button_press (self, view, target, event):
print "rect item received button press event" + str(target)
return True
"""This is our handler for the "delete-event" signal of the window, which is emitted when the 'x' close button is clicked. We just exit here."""
def on_delete_event(self, window, event):
raise SystemExit
#########################################
"""End create_canvas_model"""
#########################################
class Arc:
"""a class that holds information about where to draw arcs"""
def __init__(self, p0, p1, relation, h, k, radius,colorData,alpha):
colorData = colorData.strip("#")
alpha = str(alpha)
alpha = alpha.replace('0x', '')
if len(alpha) == 1:
alpha = "0" + alpha
self.colorData = int(str(colorData+alpha),16)
self.points = [p0, p1]
self.h, self.k, self.radius = h, k, radius
self.relation = relation
def draw_arc(self, radius):
"""Draw the arc connecting two related genes/genomes"""
#########################################
p0, p1 = self.points
toX, toY = p0[0], p0[1]
fromX, fromY = p1[0], p1[1]
lineWidth = self.relation*3
if toX < fromX:
toX, fromX, toY, fromY = self.swap_to_and_from(toX, fromX, toY, fromY)
if toX == fromX:
if fromY > toY:
toX, fromX, toY, fromY = self.swap_to_and_from(toX, fromX, toY, fromY)
dx = toX-fromX
dy = toY-fromY
distance = math.sqrt(dx**2+dy**2)
if ((2*radius)-distance)<1.5:
linedata = "M" + str(fromX) + "," + str(fromY) + "L" + str(toX) + "," + str(toY)
item = goocanvas.PathModel(data=linedata,stroke_color_rgba=self.colorData, line_width=lineWidth)
else:
svgPathString = self.make_svg_string(fromX, fromY, toX, toY)
item = goocanvas.PathModel(data=svgPathString,stroke_color_rgba=self.colorData,line_width=lineWidth)
if dx == 0:
rotation = 90
else:
rotation = math.degrees(math.atan(float(dy)/dx))
item.rotate(rotation,fromX,fromY)
return item
#########################################
def swap_to_and_from(self, toX, fromX, toY, fromY):
"""Swap the x and y coordinates to ensure the arc will be drawn from left to right """
return fromX, toX, fromY, toY
def make_svg_string(self, fromX, fromY, toX, toY):
"""Create the SVG string that describes the arc being drawn """
#########################################
dx = toX-fromX
dy = toY-fromY
distance = math.sqrt(dx**2+dy**2)
if dx==0:
angleFromPointToPoint=270
else:
angleFromPointToPoint = math.degrees(math.atan(float(dy)/dx))
dxCenter = self.h-fromX
dyCenter = self.k-fromY
if dxCenter==0:
if dyCenter > 0: angleFromPointToCenter=270 # remove if
else: angleFromPointToCenter=90 # delete me
else:
angleFromPointToCenter = math.degrees(math.atan(float(dyCenter)/dxCenter))
if fromX == self.h:
if angleFromPointToCenter == 90: sign = 1
else:
sign = -1
if fromX < self.h:
if angleFromPointToPoint > angleFromPointToCenter:
sign = 1
if angleFromPointToPoint < angleFromPointToCenter:
sign = -1
if fromX > self.h:
if angleFromPointToPoint > angleFromPointToCenter:
sign = -1
if angleFromPointToPoint < angleFromPointToCenter:
sign = 1
toX,toY = fromX+distance, fromY
con1y = fromY+(sign*(math.sin(60)) * distance *.3)
con2y = con1y
con1x = fromX + (0.25*distance)
con2x = fromX + (0.75*distance)
p = "M%s,%s " \
"C%s,%s " \
"%s,%s " \
"%s,%s " % (
fromX,fromY, #absolute start point
con1x,con1y,
con2x,con2y,
toX,toY
)
return p
#########################################
"""End Arc"""
#########################################
class Label:
"""a class that holds information about phage and gene labels for pham circles"""
#########################################
def __init__(self, text, type, comparator):
self.text = text
self.type = type
self.comparator = comparator
def __cmp__(self, other):
this = self.comparator
other = other.comparator
if this.rfind(" (gp")!= -1 and other.rfind(" (gp")!= -1:
thisTuple = this.split(" (gp")
otherTuple = other.split(" (gp")
thisFirst = thisTuple[0]
otherFirst = otherTuple[0]
try: thisLast = float(thisTuple[1].replace(")",""))
except: thisLast = thisTuple[1].replace(")","")
try: otherLast = float(otherTuple[1].replace(")",""))
except: otherLast = otherTuple[1].replace(")","")
if thisFirst<otherFirst:
return -1
elif thisFirst>otherFirst:
return 1
elif thisLast==otherLast:
return 0
elif thisLast<otherLast:
return -1
elif thisLast>otherLast:
return 1
else:
if this>other:
return 1
if this<other:
return -1
if this == other:
return 0
#########################################
"""End Label"""
#########################################
#########################################
class Node:
"""An inner class containing the actual data """
def __init__(self,fromGene,toGene,clust_blast,rel,rel2):
self.fromGene = fromGene
self.toGene = toGene
self.clust_blast = clust_blast
self.relation = rel
self.unScaledRelation = rel2
self.ACONSTANT = 3
def __cmp__(self, other):
if self.relation > other.relation:
return -1
elif self.relation < other.relation:
return 1
else:
return 0
class InputData:
"""A class that holds input data and enables it to be sorted by relation score"""
def __init__(self,inputlist):
self.inputlist = inputlist
self.nodeList = []
def convert_score(score):
'''convert the score from a very small number to one that is probably between 0 and 200'''
score = '%e' % score
if score.find('e-') != -1: score = int(score.split('e-')[1])
elif score == 0.0 and score.find('e+') != -1: score = int(score.split('e+')[1])
if score > 3 and score < 21: score = 0.325
elif score >= 21 and score < 39: score = 0.325 + ((1-.325)/9)
elif score >= 39 and score < 57: score = 0.325 + ((1-.325)/9)*2
elif score >= 57 and score < 75: score = 0.325 + ((1-.325)/9)*3
elif score >= 75 and score < 93: score = 0.325 + ((1-.325)/9)*4
elif score >= 93 and score < 111: score = 0.325 + ((1-.325)/9)*5
elif score >=111 and score < 129: score = 0.325 + ((1-.325)/9)*6
elif score >=129 and score < 147: score = 0.325 + ((1-.325)/9)*7
elif score >=147 and score < 165: score = 0.325 + ((1-.325)/9)*8
elif score >=165 and score < 183: score = 0.325 + ((1-.325)/9)*9
elif score >=183 or score == 0.0: score = 1
return score
for currentFromGene,currentToGene,clust_blast_string,unScaledRelation in inputlist:
if clust_blast_string == 'blast': scaledRelation = convert_score(unScaledRelation)
else:
scaledRelation = unScaledRelation
unScaledRelation = "none"
self.nodeList.append(Node(currentFromGene,currentToGene,clust_blast_string,scaledRelation,unScaledRelation))
self.nodeList.sort()
#########################################
"""End InputData """
#########################################
class Cluster:
def __init__(self,name):
self.name = name
self.labels = []
def add(self,label):
self.labels.append(label)
def getLabels(self):
self.labels.sort()
return self.labels
def __cmp__(self, other):
if self.name > other.name:
return 1
elif self.name < other.name:
return -1
else:
return 0
def main(argv):
phamC = PhamCircle("test","")
print "hello main"
window = gtk.Window()
window.set_default_size(800, 800)
window.show()
#window.connect("delete_event", on_delete_event)
scrolled_win = gtk.ScrolledWindow()
scrolled_win.set_shadow_type(gtk.SHADOW_IN)
scrolled_win.show()
window.add(scrolled_win)
phamName = "11"
c = db_conf.db_conf(username='anonymous',password='anonymous',server='localhost',db='SEA').get_cursor()
GeneIDs = get_members_of_pham(c, phamName)
if (True):
memberPhages, nonMemberPhages = [], []
for GeneID in GeneIDs:
PhageID = get_PhageID_from_GeneID(c, GeneID)
if PhageID not in memberPhages: memberPhages.append(PhageID)
totalPhages = get_PhageIDs(c)
for p in totalPhages:
if p not in memberPhages: nonMemberPhages.append(get_phage_name_from_PhageID(c, p))
l = []
genes = []
genes = GeneIDs
for a in GeneIDs:
clustalwScores, blastScores = get_pham_scores(c, a)
for cs in clustalwScores:
if cs[2] >= 0.325: l.append((cs[0], cs[1], 'clustalw',cs[2]))
for bs in blastScores:
if bs[2] <= 1e-50: l.append((bs[0], bs[1], 'blast',bs[2]))
phamCircle = PhamCircle(phamName, c)
adjustment = 0.325
phamCircleCanvas = goocanvas.Canvas()
scrolled_win.add(phamCircleCanvas)
phamCircleCanvas.set_root_item_model(phamCircle.create_canvas_model(nonMemberPhages, genes, l,adjustment,'27.0',blastColor='#ff0000', clustalwColor='#0000ff'))
x, y = (800, 800)
phamCircleCanvas.set_size_request(x, y)
defaultPhamCircleCanvasSize = (x, y)
phamCircleCanvas.show()
window.window.set_cursor(None)
gtk.main()
if __name__ == "__main__":
main(sys.argv)
|
byuphamerator/phamerator-dev
|
phamerator/PhamDisplay.py
|
Python
|
gpl-2.0
| 24,040
|
[
"BLAST"
] |
8590e617295d60c2919eb6286b388e96471674f399f335d3a9c9f9f535314de2
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import numpy as np
from pymatgen.core.structure import Structure
"""
This module implements symmetry-related structure forms.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 9, 2012"
class SymmetrizedStructure(Structure):
"""
This class represents a symmetrized structure, i.e. a structure
where the spacegroup and symmetry operations are defined. This class is
typically not called but instead is typically obtained by calling
pymatgen.symmetry.analyzer.SpacegroupAnalyzer.get_symmetrized_structure.
Args:
structure (Structure): Original structure
spacegroup (SpacegroupOperations): An input SpacegroupOperations from
SpacegroupAnalyzer.
equivalent_positions: Equivalent positions from SpacegroupAnalyzer.
.. attribute: equivalent_indices
indices of structure grouped by equivalency
"""
def __init__(self, structure, spacegroup, equivalent_positions,
wyckoff_letters):
self.spacegroup = spacegroup
u, inv = np.unique(equivalent_positions, return_inverse=True)
self.site_labels = equivalent_positions
# site_properties = structure.site_properties
# site_properties["wyckoff"] = [
# "%d%s" % (list(self.site_labels).count(self.site_labels[i]),
# wyckoff_letters[i]) for i in range(len(structure))]
super().__init__(
structure.lattice, [site.species for site in structure],
structure.frac_coords, site_properties=structure.site_properties)
self.equivalent_indices = [[] for i in range(len(u))]
self.equivalent_sites = [[] for i in range(len(u))]
wyckoff_symbols = [[] for i in range(len(u))]
for i, inv in enumerate(inv):
self.equivalent_indices[inv].append(i)
self.equivalent_sites[inv].append(self.sites[i])
wyckoff_symbols[inv].append(wyckoff_letters[i])
self.wyckoff_symbols = ["%d%s" % (len(w), w[0])
for w in wyckoff_symbols]
def find_equivalent_sites(self, site):
"""
Finds all symmetrically equivalent sites for a particular site
Args:
site (PeriodicSite): A site in the structure
Returns:
([PeriodicSite]): List of all symmetrically equivalent sites.
"""
for sites in self.equivalent_sites:
if site in sites:
return sites
raise ValueError("Site not in structure")
|
dongsenfo/pymatgen
|
pymatgen/symmetry/structure.py
|
Python
|
mit
| 2,764
|
[
"pymatgen"
] |
020f8a0765c94c7379f5c2e54b5d390dbbd4de46eeb7bd3631b86aafa904b759
|
"""
Generate a hints database file from RNAseq/IsoSeq alignments for AugustusTMR/AugustusCGP.
"""
import collections
import itertools
import os
import logging
import pyfasta
import pysam
try:
from toil.fileStores import FileID
except ImportError:
from toil.fileStore import FileID
from toil.common import Toil
from toil.job import Job
import tools.dataOps
import tools.fileOps
import tools.mathOps
import tools.misc
import tools.procOps
import tools.toilInterface
import tools.transcripts
import tools.bio
from .exceptions import UserException
logger = logging.getLogger('cat')
def hints_db(hints_args, toil_options):
"""
Entry point for hints database Toil pipeline.
"""
def validate_import_bam(t, bam_path, fasta_sequences, genome):
validate_bam_fasta_pairs(bam_path, fasta_sequences, genome)
return [FileID.forPath(t.importFile('file://' + bam_path), bam_path),
FileID.forPath(t.importFile('file://' + bam_path + '.bai'), bam_path + '.bai')]
fasta = pyfasta.Fasta(hints_args.fasta)
fasta_sequences = {(x.split()[0], len(fasta[x])) for x in fasta.keys()}
with Toil(toil_options) as t:
if not t.options.restart:
# load the RNA-seq data, if we have any
bam_file_ids = {'BAM': {}, 'INTRONBAM': {}}
for dtype in ['BAM', 'INTRONBAM']:
if hints_args.genome not in hints_args.cfg[dtype]:
continue
for bam_path in hints_args.cfg[dtype][hints_args.genome]:
bam_file_ids[dtype][os.path.basename(bam_path)] = validate_import_bam(t, bam_path,
fasta_sequences,
hints_args.genome)
# load the IsoSeq data, if we have any
iso_seq_file_ids = []
if hints_args.genome in hints_args.cfg['ISO_SEQ_BAM']:
for bam_path in hints_args.cfg['ISO_SEQ_BAM'][hints_args.genome]:
validate_bam_fasta_pairs(bam_path, fasta_sequences, hints_args.genome)
iso_seq_file_ids.append(validate_import_bam(t, bam_path, fasta_sequences, hints_args.genome))
if hints_args.annotation_gp is None:
annotation_file_id = None
else:
annotation_file_id = FileID.forPath(t.importFile('file://' + hints_args.annotation_gp),
hints_args.annotation_gp)
if hints_args.protein_fasta is None:
protein_fasta_file_id = genome_fasta_file_id = None
else:
protein_fasta_file_id = FileID.forPath(t.importFile('file://' + hints_args.protein_fasta),
hints_args.protein_fasta)
genome_fasta_file_id = FileID.forPath(t.importFile('file://' + hints_args.fasta), hints_args.fasta)
input_file_ids = {'bams': bam_file_ids,
'iso_seq_bams': iso_seq_file_ids,
'annotation': annotation_file_id,
'protein_fasta': protein_fasta_file_id,
'genome_fasta': genome_fasta_file_id}
if len(input_file_ids['bams']) + len(input_file_ids['iso_seq_bams']) > 0:
logger.info('All BAMs validated for {}. Beginning Toil hints pipeline'.format(hints_args.genome))
disk_usage = tools.toilInterface.find_total_disk_usage(input_file_ids)
job = Job.wrapJobFn(setup_hints, input_file_ids, disk=disk_usage)
combined_hints = t.start(job)
else:
logger.info('Restarting Toil hints pipeline for {}.'.format(hints_args.genome))
combined_hints = t.restart()
tools.fileOps.ensure_file_dir(hints_args.hints_path)
t.exportFile(combined_hints, 'file://' + hints_args.hints_path)
def setup_hints(job, input_file_ids):
"""
Generates hints for a given genome with a list of BAMs. Will add annotation if it exists.
"""
# RNA-seq hints
filtered_bam_file_ids = {'BAM': collections.defaultdict(list), 'INTRONBAM': collections.defaultdict(list)}
for dtype, bam_dict in input_file_ids['bams'].items():
if len(bam_dict) == 0:
continue
# Since BAMs are valid, we can assume that they all share the same header
bam_file_id, bai_file_id = list(bam_dict.values())[0]
bam_path = job.fileStore.readGlobalFile(bam_file_id)
sam_handle = pysam.Samfile(bam_path)
# triple disk usage to deal with name sorted bam
disk_usage = tools.toilInterface.find_total_disk_usage([bam_file_id, bai_file_id]) * 3 + 2
# generate reference grouping that will be used downstream until final cat step
grouped_references = [tuple(x) for x in group_references(sam_handle)]
for original_path, (bam_file_id, bai_file_id) in bam_dict.items():
for reference_subset in grouped_references:
j = job.addChildJobFn(namesort_bam, bam_file_id, bai_file_id, reference_subset, disk_usage,
disk=disk_usage, cores=4, memory='16G')
filtered_bam_file_ids[dtype][reference_subset].append(j.rv())
# IsoSeq hints
iso_seq_hints_file_ids = []
iso_seq_file_ids = input_file_ids['iso_seq_bams']
if len(iso_seq_file_ids) > 0:
for bam_file_id, bai_file_id in iso_seq_file_ids:
disk_usage = tools.toilInterface.find_total_disk_usage([bam_file_id, bai_file_id])
j = job.addChildJobFn(generate_iso_seq_hints, bam_file_id, bai_file_id, disk=disk_usage)
iso_seq_hints_file_ids.append(j.rv())
# protein hints
if input_file_ids['protein_fasta'] is not None:
disk_usage = tools.toilInterface.find_total_disk_usage(input_file_ids['protein_fasta'])
j = job.addChildJobFn(generate_protein_hints, input_file_ids['protein_fasta'], input_file_ids['genome_fasta'],
disk=disk_usage)
protein_hints_file_id = j.rv()
else:
protein_hints_file_id = None
# annotation hints
if input_file_ids['annotation'] is not None:
disk_usage = tools.toilInterface.find_total_disk_usage(input_file_ids['annotation'])
j = job.addChildJobFn(generate_annotation_hints, input_file_ids['annotation'], disk=disk_usage)
annotation_hints_file_id = j.rv()
else:
annotation_hints_file_id = None
return job.addFollowOnJobFn(merge_bams, filtered_bam_file_ids, annotation_hints_file_id,
iso_seq_hints_file_ids, protein_hints_file_id).rv()
def namesort_bam(job, bam_file_id, bai_file_id, reference_subset, disk_usage, num_reads=50 ** 6):
"""
Slices out the reference subset from a BAM, name sorts that subset, then chunks the resulting reads up for
processing by filterBam.
"""
def write_bam(r, ns_handle):
"""Write to the path, returns file ID"""
outf = tools.fileOps.get_tmp_toil_file()
outf_h = pysam.Samfile(outf, 'wb', template=ns_handle)
for rec in r:
outf_h.write(rec)
outf_h.close()
return job.fileStore.writeGlobalFile(outf)
bam_path = job.fileStore.readGlobalFile(bam_file_id)
is_paired = bam_is_paired(bam_path)
job.fileStore.readGlobalFile(bai_file_id, bam_path + '.bai')
name_sorted = tools.fileOps.get_tmp_toil_file(suffix='name_sorted.bam')
cmd = [['samtools', 'view', '-b', bam_path] + list(reference_subset),
['sambamba', 'sort', '--tmpdir={}'.format(job.fileStore.getLocalTempDir()),
'-t', '4', '-m', '15G', '-o', '/dev/stdout', '-n', '/dev/stdin']]
tools.procOps.run_proc(cmd, stdout=name_sorted)
ns_handle = pysam.Samfile(name_sorted)
# this group may come up empty -- check to see if we have at least one mapped read
try:
_ = next(ns_handle)
except StopIteration:
return None
# reset file handle to start
ns_handle = pysam.Samfile(name_sorted)
filtered_file_ids = []
r = []
for qname, reads in itertools.groupby(ns_handle, lambda x: x.qname):
r.extend(list(reads))
if len(r) >= num_reads:
file_id = write_bam(r, ns_handle)
j = job.addChildJobFn(filter_bam, file_id, is_paired, disk='4G', memory='2G')
filtered_file_ids.append(j.rv())
r = []
# do the last bin, if its non-empty
if len(r) > 0:
file_id = write_bam(r, ns_handle)
j = job.addChildJobFn(filter_bam, file_id, is_paired, disk='4G', memory='2G')
filtered_file_ids.append(j.rv())
return job.addFollowOnJobFn(merge_filtered_bams, filtered_file_ids, disk=disk_usage, memory='16G').rv()
def filter_bam(job, file_id, is_paired):
"""
Filters a name-sorted bam, returns a bam re-sorted by position
"""
bam_path = job.fileStore.readGlobalFile(file_id)
assert os.path.getsize(bam_path) > 0
tmp_filtered = tools.fileOps.get_tmp_toil_file()
filter_cmd = ['filterBam', '--uniq', '--in', bam_path, '--out', tmp_filtered]
if is_paired is True:
filter_cmd.extend(['--paired', '--pairwiseAlignments'])
tools.procOps.run_proc(filter_cmd)
if os.path.getsize(tmp_filtered) == 0:
raise RuntimeError('After filtering one BAM subset became empty. This could be bad.')
out_filter = tools.fileOps.get_tmp_toil_file()
sort_cmd = ['sambamba', 'sort', tmp_filtered, '-o', out_filter, '-t', '1']
tools.procOps.run_proc(sort_cmd)
return job.fileStore.writeGlobalFile(out_filter)
def merge_filtered_bams(job, filtered_file_ids):
"""
Merges filtered BAMs
"""
local_paths = [job.fileStore.readGlobalFile(x) for x in filtered_file_ids]
fofn = tools.fileOps.get_tmp_toil_file()
with open(fofn, 'w') as outf:
for l in local_paths:
if os.environ.get('CAT_BINARY_MODE') == 'singularity':
l = tools.procOps.singularify_arg(l)
outf.write(l + '\n')
out_bam = tools.fileOps.get_tmp_toil_file()
cmd = ['samtools', 'merge', '-b', fofn, out_bam]
tools.procOps.run_proc(cmd)
return job.fileStore.writeGlobalFile(out_bam)
def merge_bams(job, filtered_bam_file_ids, annotation_hints_file_id, iso_seq_hints_file_ids,
protein_hints_file_id):
"""
Takes a dictionary mapping reference chunks to filtered BAMs. For each reference chunk, these BAMs will be
first concatenated then sorted, then passed off to hint building. Passes through the annotation/protein hints file
IDs for inclusion.
"""
merged_bam_file_ids = {'BAM': {}, 'INTRONBAM': {}}
for dtype in filtered_bam_file_ids:
for ref_group, file_ids in filtered_bam_file_ids[dtype].items():
file_ids = [x for x in file_ids if x is not None] # some groups will end up empty
if len(file_ids) > 0:
disk_usage = tools.toilInterface.find_total_disk_usage(file_ids)
merged_bam_file_ids[dtype][ref_group] = job.addChildJobFn(cat_sort_bams, file_ids, disk=disk_usage,
memory='16G', cores=4).rv()
return job.addFollowOnJobFn(build_hints, merged_bam_file_ids, annotation_hints_file_id, iso_seq_hints_file_ids,
protein_hints_file_id).rv()
def cat_sort_bams(job, bam_file_ids):
"""
Takes a list of bam file IDs and combines/sorts them.
TODO: the 4096 file hack below is hacky. Should only be a problem for very fragmented references.
"""
bamfiles = [job.fileStore.readGlobalFile(x) for x in bam_file_ids]
# cat only 4095 bams at a time to avoid bash command length problems
catfile = tools.fileOps.get_tmp_toil_file()
sam_iter = tools.dataOps.grouper(bamfiles, 4095)
# do the first one
cmd = ['samtools', 'cat', '-o', catfile]
cmd.extend(next(sam_iter))
tools.procOps.run_proc(cmd)
# do any subsequent ones left, creating a new file each time
for more in sam_iter:
old_catfile = catfile
catfile = tools.fileOps.get_tmp_toil_file()
cmd = ['samtools', 'cat', '-o', catfile, old_catfile]
cmd.extend(more)
tools.procOps.run_proc(cmd)
# combine and merge
merged = tools.fileOps.get_tmp_toil_file()
cmd = ['sambamba', 'sort', catfile, '-o', merged, '-t', '4', '-m', '15G']
tools.procOps.run_proc(cmd)
return job.fileStore.writeGlobalFile(merged)
def generate_protein_hints(job, protein_fasta_file_id, genome_fasta_file_id):
"""
Entry point to a BLAT-based protein alignment pipeline
"""
disk_usage = tools.toilInterface.find_total_disk_usage(genome_fasta_file_id)
protein_fasta = job.fileStore.readGlobalFile(protein_fasta_file_id)
cmd = ['pyfasta', 'flatten', protein_fasta]
tools.procOps.run_proc(cmd)
protein_handle = tools.bio.get_sequence_dict(protein_fasta)
# group up proteins for sub-jobs
results = []
for chunk in tools.dataOps.grouper(protein_handle.items(), 100):
j = job.addChildJobFn(run_protein_aln, chunk, genome_fasta_file_id, disk=disk_usage, memory='8G')
results.append(j.rv())
# return merged results
return job.addFollowOnJobFn(convert_protein_aln_results_to_hints, results, memory='8G').rv()
def run_protein_aln(job, protein_subset, genome_fasta_file_id):
"""
Runs BLAT on a small chunk of proteins
"""
genome_fasta = job.fileStore.readGlobalFile(genome_fasta_file_id)
# write proteins to fasta
protein_fasta = tools.fileOps.get_tmp_toil_file()
with open(protein_fasta, 'w') as outf:
for name, seq in protein_subset:
tools.bio.write_fasta(outf, name, str(seq))
# perform alignment
tmp_exonerate = tools.fileOps.get_tmp_toil_file()
cmd = ['exonerate', '--model', 'protein2genome', '--showvulgar', 'no', '--showalignment', 'no',
'--showquerygff', 'yes', protein_fasta, genome_fasta]
tools.procOps.run_proc(cmd, stdout=tmp_exonerate)
return job.fileStore.writeGlobalFile(tmp_exonerate)
def convert_protein_aln_results_to_hints(job, results):
"""
Concatenates exonerate protein2genome, converts to hints
"""
merged_exonerate = tools.fileOps.get_tmp_toil_file()
with open(merged_exonerate, 'w') as outf:
for r in results:
f = job.fileStore.readGlobalFile(r)
outf.write(open(f).read())
# sort psl and generate hints
tmp_sorted = tools.fileOps.get_tmp_toil_file()
tools.misc.sort_gff(merged_exonerate, tmp_sorted)
out_hints = tools.fileOps.get_tmp_toil_file()
cmd = ['exonerate2hints.pl', '--in={}'.format(tmp_sorted), '--CDSpart_cutoff=5', '--out={}'.format(out_hints)]
tools.procOps.run_proc(cmd)
return job.fileStore.writeGlobalFile(out_hints)
def build_hints(job, merged_bam_file_ids, annotation_hints_file_id, iso_seq_hints_file_ids, protein_hints_file_id):
"""
Takes the merged BAM for a genome and produces both intron and exon hints.
"""
intron_hints_file_ids = []
exon_hints_file_ids = []
for dtype in merged_bam_file_ids:
for ref_group, file_ids in merged_bam_file_ids[dtype].items():
intron_hints_file_ids.append(job.addChildJobFn(build_intron_hints, file_ids).rv())
if dtype == 'BAM':
exon_hints_file_ids.append(job.addChildJobFn(build_exon_hints, file_ids).rv())
disk_usage = tools.toilInterface.find_total_disk_usage(itertools.chain.from_iterable([intron_hints_file_ids,
exon_hints_file_ids,
iso_seq_hints_file_ids,
[annotation_hints_file_id,
protein_hints_file_id]]))
return job.addFollowOnJobFn(cat_hints, intron_hints_file_ids, exon_hints_file_ids, annotation_hints_file_id,
iso_seq_hints_file_ids, protein_hints_file_id, disk=disk_usage).rv()
def build_intron_hints(job, merged_bam_file_id):
"""Builds intronhints from a BAM. Returns a fileID to the hints."""
bam_file = job.fileStore.readGlobalFile(merged_bam_file_id)
intron_gff_path = tools.fileOps.get_tmp_toil_file()
cmd = ['bam2hints', '--intronsonly', '--in', bam_file, '--out', intron_gff_path]
tools.procOps.run_proc(cmd)
return job.fileStore.writeGlobalFile(intron_gff_path)
def build_exon_hints(job, merged_bam_file_id):
"""Builds exonhints from a BAM Returns a fileID to the hints."""
bam_file = job.fileStore.readGlobalFile(merged_bam_file_id)
cmd = [['bam2wig', bam_file],
['wig2hints.pl', '--width=10', '--margin=10', '--minthresh=2', '--minscore=4', '--prune=0.1', '--src=W',
'--type=ep', '--UCSC=/dev/null', '--radius=4.5', '--pri=4', '--strand=.']]
exon_gff_path = tools.fileOps.get_tmp_toil_file()
tools.procOps.run_proc(cmd, stdout=exon_gff_path)
return job.fileStore.writeGlobalFile(exon_gff_path)
def generate_iso_seq_hints(job, bam_file_id, bai_file_id):
"""
Generates hints from a IsoSeq BAM. Due to the usual depth of IsoSeq, there is no real need to split it up by
chunks of reference sequence.
Adapted from http://bioinf.uni-greifswald.de/bioinf/wiki/pmwiki.php?n=Augustus.PacBioGMAP
"""
bam_path = job.fileStore.readGlobalFile(bam_file_id)
job.fileStore.readGlobalFile(bai_file_id, bam_path + '.bai')
pacbio_gff_path = tools.fileOps.get_tmp_toil_file()
cmd = [['samtools', 'view', '-b', '-F', '4', bam_path], # unmapped reads causes bamToPsl to crash
['bamToPsl', '-nohead', '/dev/stdin', '/dev/stdout'],
['sort', '-n', '-k', '16,16'],
['sort', '-s', '-k', '14,14'],
['perl', '-ne', '@f=split; print if ($f[0]>=100)'],
['blat2hints.pl', '--source=PB', '--nomult', '--ep_cutoff=20', '--in=/dev/stdin',
'--out={}'.format(pacbio_gff_path)]]
tools.procOps.run_proc(cmd)
return job.fileStore.writeGlobalFile(pacbio_gff_path)
def generate_annotation_hints(job, annotation_hints_file_id):
"""
Converts the annotation file into hints.
Hints are derived from both CDS exonic intervals and intron intervals
"""
annotation_gp = job.fileStore.readGlobalFile(annotation_hints_file_id)
tx_dict = tools.transcripts.get_gene_pred_dict(annotation_gp)
hints = []
for tx_id, tx in tx_dict.items():
if tx.cds_size == 0:
continue
# rather than try to re-do the arithmetic, we will use the get_bed() function to convert this transcript
cds_tx = tools.transcripts.Transcript(tx.get_bed(new_start=tx.thick_start, new_stop=tx.thick_stop))
for intron in cds_tx.intron_intervals:
r = [intron.chromosome, 'a2h', 'intron', intron.start + 1, intron.stop, 0, intron.strand, '.',
'grp={};src=M;pri=2'.format(tx_id)]
hints.append(r)
for exon in cds_tx.exon_intervals:
r = [exon.chromosome, 'a2h', 'CDS', exon.start + 1, exon.stop, 0, exon.strand, '.',
'grp={};src=M;pri=2'.format(tx_id)]
hints.append(r)
annotation_hints_gff = tools.fileOps.get_tmp_toil_file()
tools.fileOps.print_rows(annotation_hints_gff, hints)
return job.fileStore.writeGlobalFile(annotation_hints_gff)
def cat_hints(job, intron_hints_file_ids, exon_hints_file_ids, annotation_hints_file_id, iso_seq_hints_file_ids,
protein_hints_file_id):
"""Returns file ID to combined, sorted hints"""
cat_hints = tools.fileOps.get_tmp_toil_file()
with open(cat_hints, 'w') as outf:
for file_id in itertools.chain(intron_hints_file_ids, exon_hints_file_ids):
f = job.fileStore.readGlobalFile(file_id)
for line in open(f):
outf.write(line)
for file_id in [annotation_hints_file_id, protein_hints_file_id]:
if file_id is not None:
f = job.fileStore.readGlobalFile(file_id)
for line in open(f):
outf.write(line)
# sorted so that hints that should be summarized are below each other
cmd = [['sort', '-n', '-k4,4', cat_hints],
['sort', '-s', '-n', '-k5,5'],
['sort', '-s', '-k3,3'],
['sort', '-s', '-k1,1'],
['join_mult_hints.pl']]
combined_hints = tools.fileOps.get_tmp_toil_file()
tools.procOps.run_proc(cmd, stdout=combined_hints)
# don't add the IsoSeq until after join_mult_hints because we don't want them to be joined
with open(combined_hints, 'a') as outf:
for file_id in iso_seq_hints_file_ids:
f = job.fileStore.readGlobalFile(file_id)
for line in open(f):
outf.write(line)
# sort the combined hints, now sorting by chrom and start
sorted_combined_hints = tools.fileOps.get_tmp_toil_file()
tools.misc.sort_gff(combined_hints, sorted_combined_hints)
return job.fileStore.writeGlobalFile(sorted_combined_hints)
###
# Functions
###
def validate_bam_fasta_pairs(bam_path, fasta_sequences, genome):
"""
Make sure that this BAM is actually aligned to this fasta. Every sequence should be the same length. Sequences
can exist in the reference that do not exist in the BAM, but not the other way around.
"""
handle = pysam.Samfile(bam_path, 'rb')
bam_sequences = {(n, s) for n, s in zip(*[handle.references, handle.lengths])}
difference = bam_sequences - fasta_sequences
if len(difference) > 0:
base_err = 'Error: BAM {} has the following sequence/length pairs not found in the {} fasta: {}.'
err = base_err.format(bam_path, genome, ','.join(['-'.join(map(str, x)) for x in difference]))
raise UserException(err)
missing_seqs = fasta_sequences - bam_sequences
if len(missing_seqs) > 0:
base_msg = 'BAM {} does not have the following sequence/length pairs in its header: {}.'
msg = base_msg.format(bam_path, ','.join(['-'.join(map(str, x)) for x in missing_seqs]))
logger.warning(msg)
def bam_is_paired(bam_path, num_reads=20000, paired_cutoff=0.75):
"""
Infers the paired-ness of a bam file.
"""
sam = pysam.Samfile(bam_path)
count = 0
for rec in itertools.islice(sam, num_reads):
if rec.is_paired:
count += 1
if tools.mathOps.format_ratio(count, num_reads) > 0.75:
return True
elif tools.mathOps.format_ratio(count, num_reads) < 1 - paired_cutoff:
return False
else:
raise UserException("Unable to infer pairing from bamfile {}".format(bam_path))
def group_references(sam_handle, num_bases=10 ** 7, max_seqs=1000):
"""
Group up references by num_bases, unless that exceeds max_seqs. A greedy implementation of the bin packing problem.
"""
name_iter = zip(*[sam_handle.references, sam_handle.lengths])
name, size = next(name_iter)
this_bin = [name]
bin_base_count = size
num_seqs = 1
for name, size in name_iter:
bin_base_count += size
num_seqs += 1
if bin_base_count >= num_bases or num_seqs > max_seqs:
yield this_bin
this_bin = [name]
bin_base_count = size
num_seqs = 1
else:
this_bin.append(name)
yield this_bin
|
ComparativeGenomicsToolkit/Comparative-Annotation-Toolkit
|
cat/hints_db.py
|
Python
|
apache-2.0
| 23,705
|
[
"pysam"
] |
ffd7e247e263807e723da1952632c8a2a42d79a93a7674b1d4aba6ece6c7051c
|
__author__ = 'Matteo'
__doc__ = ''''''
N = "\n"
T = "\t"
# N="<br/>"
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio.Blast import NCBIXML
import csv
import math
import re
from difflib import Differ
import itertools
def get_genome(fp):
return list(SeqIO.parse(open(fp, "rU"), "genbank"))
def genelooper(genome):
return [gene for chr in genome for gene in chr.features if gene.type =="CDS"]
def check4():
fieldnames="id id_raw element_raw element length query_coverage plus query_start query_end sbj_start sbj_end sense al_sbj al_query mutationsT2D mutations".split()
w=csv.DictWriter(open('TMorf_onContigs.csv','w'),fieldnames=fieldnames)
w.writeheader()
for rec in NCBIXML.parse(open("align4.xml")):
id=rec.query
if rec.alignments:
align=rec.alignments[0]
hit=align.hsps[0]
if hit.sbjct_start<hit.sbjct_end:
strand="same"
else:
strand="opposite"
#print(rec.query+" is antisense)
ps=Seq(hit.sbjct.replace('-','')).translate()
pq=Seq(hit.query.replace('-','')).translate()
mutations=[]
for i in range(len(ps)):
if len(pq)-1<i:
mutations.append(str(ps[i])+str(i+1)+'?')
elif ps[i] != pq[i]:
mutations.append(str(ps[i])+str(i+1)+str(pq[i]))
best={"id":id,
"id_raw": rec.query,
"element_raw":align.hit_def,
"element":'NA',
"length":rec.query_length,
"query_coverage":(hit.query_end-hit.query_start+1)/rec.query_length,
"plus":hit.positives,
"query_start":hit.query_start,
"query_end":hit.query_end,
"sbj_start":hit.sbjct_start,
"sbj_end":hit.sbjct_end,
"sense":strand,
"al_sbj":hit.sbjct,
"al_query":hit.query,
"mutationsT2D":"+".join(mutations),
"mutations":len(mutations)}
else:
best={"id":id,
"id_raw": rec.query,
"element_raw":"",
"element":"",
"length":rec.query_length,
"query_coverage":0,
"plus":0,
"query_start":0,
"query_end":0,
"sbj_start":0,
"sbj_end":0,
"sense":0,
"al_sbj":0,
"al_query":0,
"mutationsT2D":"NA",
"mutations":"NA"}
w.writerow(best)
if __name__ == "__main__":
check4()
|
matteoferla/Geobacillus
|
geo_prot_eq2.py
|
Python
|
gpl-2.0
| 2,897
|
[
"BLAST"
] |
45f17d9f87d8d51627c31f8ac840fc6cc3b627eee71b553e0eebff00f32082d1
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
|
dgasmith/psi4
|
psi4/driver/util/__init__.py
|
Python
|
lgpl-3.0
| 913
|
[
"Psi4"
] |
869f86b115eca439325f14c3aa410f42b787f358bd8b0e8ec63063094ad22d4c
|
# loadKineticModel.py ---
#
# Filename: loadKineticModel.py
# Description:
# Author: Upi Bhalla
# Maintainer:
# Created: Sat Oct 04 12:14:15 2014 (+0530)
# Version:
# Last-Updated: Tue Apr 18 17:40:00 2017(+0530)
# By:
# Update #: 0
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import moose
import pylab
import numpy
import sys
import os
def main():
"""
This example illustrates loading, running, and saving a kinetic
model defined in kkit format. It uses a default kkit model but
you can specify another using the command line
``python loadKineticModel.py filepath runtime solver``.
We use default solver as gsl.
The model already defines a couple of plots and sets the runtime 20 secs.
"""
defaultsolver = "gsl" # Pick any of gsl, gssa, ee..
defaultfile = '../genesis/kkit_objects_example.g'
defaultruntime = 20.0
try:
sys.argv[1]
except IndexError:
filepath = defaultfile
else:
filepath = sys.argv[1]
if not os.path.exists(filepath):
print ("Filename or path does not exist \"%s\" loading default file \"%s\" " %(filepath ,defaultfile))
filepath = defaultfile
try:
sys.argv[2]
except :
runtime = defaultruntime
else:
runtime = float(sys.argv[2])
try:
sys.argv[3]
except :
solver = defaultsolver
else:
solver = sys.argv[3]
modelId = moose.loadModel( filepath, 'model', solver )
# Increase volume so that the stochastic solver gssa
# gives an interesting output
#compt = moose.element( '/model/kinetics' )
#compt.volume = 1e-19
moose.reinit()
moose.start( runtime )
# Report parameters
'''
for x in moose.wildcardFind( '/model/kinetics/##[ISA=PoolBase]' ):
print x.name, x.nInit, x.concInit
for x in moose.wildcardFind( '/model/kinetics/##[ISA=ReacBase]' ):
print x.name, 'num: (', x.numKf, ', ', x.numKb, '), conc: (', x.Kf, ', ', x.Kb, ')'
for x in moose.wildcardFind('/model/kinetics/##[ISA=EnzBase]'):
print x.name, '(', x.Km, ', ', x.numKm, ', ', x.kcat, ')'
'''
# Display all plots.
for x in moose.wildcardFind( '/model/#graphs/conc#/#' ):
t = numpy.arange( 0, x.vector.size, 1 ) * x.dt
pylab.plot( t, x.vector, label=x.name )
pylab.legend()
pylab.show()
quit()
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
|
BhallaLab/moose-examples
|
snippets/loadKineticModel.py
|
Python
|
gpl-2.0
| 3,284
|
[
"MOOSE"
] |
7de109b8ba6cff8d2440907c6e73af47d139a3a14fb639299fc76be6f3b467a6
|
"""
:class:`ASE_OTF` is the on-the-fly training module for ASE, WITHOUT molecular dynamics engine.
It needs to be used adjointly with ASE MD engine.
"""
import os
import json
import sys
import inspect
import pickle
from time import time
from copy import deepcopy
import logging
import numpy as np
from flare.ase.npt import NPT_mod
from ase.md.nvtberendsen import NVTBerendsen
from ase.md.nptberendsen import NPTBerendsen
from ase.md.verlet import VelocityVerlet
from ase.md.langevin import Langevin
from flare.ase.nosehoover import NoseHoover
from ase import units
from ase.io import read, write
import flare
from flare.otf import OTF
from flare.ase.atoms import FLARE_Atoms
from flare.ase.calculator import FLARE_Calculator
import flare.ase.dft as dft_source
class ASE_OTF(OTF):
"""
On-the-fly training module using ASE MD engine, a subclass of OTF.
Args:
atoms (ASE Atoms): the ASE Atoms object for the on-the-fly MD run.
calculator: ASE calculator. Must have "get_uncertainties" method
implemented.
timestep: the timestep in MD. Please use ASE units, e.g. if the
timestep is 1 fs, then set `timestep = 1 * units.fs`
number_of_steps (int): the total number of steps for MD.
dft_calc (ASE Calculator): any ASE calculator is supported,
e.g. Espresso, VASP etc.
md_engine (str): the name of MD thermostat, only `VelocityVerlet`,
`NVTBerendsen`, `NPTBerendsen`, `NPT` and `Langevin`, `NoseHoover`
are supported.
md_kwargs (dict): Specify the args for MD as a dictionary, the args are
as required by the ASE MD modules consistent with the `md_engine`.
trajectory (ASE Trajectory): default `None`, not recommended,
currently in experiment.
The following arguments are for on-the-fly training, the user can also
refer to :class:`flare.otf.OTF`
Args:
prev_pos_init ([type], optional): Previous positions. Defaults
to None.
rescale_steps (List[int], optional): List of frames for which the
velocities of the atoms are rescaled. Defaults to [].
rescale_temps (List[int], optional): List of rescaled temperatures.
Defaults to [].
calculate_energy (bool, optional): If True, the energy of each
frame is calculated with the GP. Defaults to False.
write_model (int, optional): If 0, write never. If 1, write at
end of run. If 2, write after each training and end of run.
If 3, write after each time atoms are added and end of run.
If 4, write after each training and end of run, and back up
after each write.
std_tolerance_factor (float, optional): Threshold that determines
when DFT is called. Specifies a multiple of the current noise
hyperparameter. If the epistemic uncertainty on a force
component exceeds this value, DFT is called. Defaults to 1.
skip (int, optional): Number of frames that are skipped when
dumping to the output file. Defaults to 0.
init_atoms (List[int], optional): List of atoms from the input
structure whose local environments and force components are
used to train the initial GP model. If None is specified, all
atoms are used to train the initial GP. Defaults to None.
output_name (str, optional): Name of the output file. Defaults to
'otf_run'.
max_atoms_added (int, optional): Number of atoms added each time
DFT is called. Defaults to 1.
freeze_hyps (int, optional): Specifies the number of times the
hyperparameters of the GP are optimized. After this many
updates to the GP, the hyperparameters are frozen.
Defaults to 10.
n_cpus (int, optional): Number of cpus used during training.
Defaults to 1.
"""
def __init__(
self,
atoms,
timestep,
number_of_steps,
dft_calc,
md_engine,
md_kwargs,
calculator=None,
trajectory=None,
**otf_kwargs,
):
self.structure = FLARE_Atoms.from_ase_atoms(atoms)
if calculator is not None:
self.structure.calc = calculator
self.timestep = timestep
self.md_engine = md_engine
self.md_kwargs = md_kwargs
self._kernels = None
if md_engine == "VelocityVerlet":
MD = VelocityVerlet
elif md_engine == "NVTBerendsen":
MD = NVTBerendsen
elif md_engine == "NPTBerendsen":
MD = NPTBerendsen
elif md_engine == "NPT":
MD = NPT_mod
elif md_engine == "Langevin":
MD = Langevin
elif md_engine == "NoseHoover":
MD = NoseHoover
else:
raise NotImplementedError(md_engine + " is not implemented in ASE")
self.md = MD(
atoms=self.structure,
timestep=timestep,
trajectory=trajectory,
**md_kwargs,
)
force_source = dft_source
self.flare_calc = self.structure.calc
# Convert ASE timestep to ps for the output file.
flare_dt = timestep / (units.fs * 1e3)
super().__init__(
dt=flare_dt,
number_of_steps=number_of_steps,
gp=self.flare_calc.gp_model,
force_source=force_source,
dft_loc=dft_calc,
dft_input=self.structure,
**otf_kwargs,
)
self.flare_name = self.output_name + "_flare.json"
self.dft_name = self.output_name + "_dft.pickle"
self.structure_name = self.output_name + "_atoms.json"
self.checkpt_files = [
self.checkpt_name,
self.flare_name,
self.dft_name,
self.structure_name,
self.dft_xyz,
]
def get_structure_from_input(self, prev_pos_init):
if prev_pos_init is None:
self.structure.prev_positions = np.copy(self.structure.positions)
else:
assert len(self.structure.positions) == len(
self.structure.prev_positions
), "Previous positions and positions are not same length"
self.structure.prev_positions = prev_pos_init
def initialize_train(self):
super().initialize_train()
# TODO: Turn this into a "reset" method.
if not isinstance(self.structure.calc, FLARE_Calculator):
self.flare_calc.reset()
self.structure.calc = self.flare_calc
if self.md_engine == "NPT":
if not self.md.initialized:
self.md.initialize()
else:
if self.md.have_the_atoms_been_changed():
raise NotImplementedError(
"You have modified the atoms since the last timestep."
)
def compute_properties(self):
"""
Compute energies, forces, stresses, and their uncertainties with
the FLARE ASE calcuator, and write the results to the
OTF structure object.
"""
# Change to FLARE calculator if necessary.
if not isinstance(self.structure.calc, FLARE_Calculator):
self.flare_calc.reset()
self.structure.calc = self.flare_calc
if not self.flare_calc.results:
self.structure.calc.calculate(self.structure)
def md_step(self):
"""
Get new position in molecular dynamics based on the forces predicted by
FLARE_Calculator or DFT calculator
"""
# Update previous positions.
self.structure.prev_positions = np.copy(self.structure.positions)
# Reset FLARE calculator.
if self.dft_step:
self.flare_calc.reset()
self.structure.calc = self.flare_calc
# Take MD step.
# Inside the step() function, get_forces() is called
self.md.step()
self.curr_step += 1
def write_gp(self):
self.flare_calc.write_model(self.flare_name)
def rescale_temperature(self, new_pos):
# call OTF method
super().rescale_temperature(new_pos)
# update ASE atoms
if self.curr_step in self.rescale_steps:
rescale_ind = self.rescale_steps.index(self.curr_step)
new_temp = self.rescale_temps[rescale_ind]
temp_fac = new_temp / self.temperature
vel_fac = np.sqrt(temp_fac)
curr_velocities = self.structure.get_velocities()
self.structure.set_velocities(curr_velocities * vel_fac)
# Reset thermostat parameters.
if self.md_engine in ["NVTBerendsen", "NPTBerendsen", "NPT", "Langevin"]:
self.md.set_temperature(temperature_K=new_temp)
self.md_kwargs["temperature"] = new_temp * units.kB
def update_temperature(self):
self.KE = self.structure.get_kinetic_energy()
self.temperature = self.structure.get_temperature()
# Convert velocities to Angstrom / ps.
self.velocities = self.structure.get_velocities() * units.fs * 1e3
def update_gp(self, train_atoms, dft_frcs, dft_energy=None, dft_stress=None):
stds = self.flare_calc.results.get("stds", np.zeros_like(dft_frcs))
self.output.add_atom_info(train_atoms, stds)
# Convert ASE stress (xx, yy, zz, yz, xz, xy) to FLARE stress
# (xx, xy, xz, yy, yz, zz).
flare_stress = None
if dft_stress is not None:
flare_stress = -np.array(
[
dft_stress[0],
dft_stress[5],
dft_stress[4],
dft_stress[1],
dft_stress[3],
dft_stress[2],
]
)
if self.force_only:
dft_energy = None
flare_stress = None
# The structure will be added to self.gp.training_structures (struc.Structure).
# Create a new structure by deepcopy to avoid the forces of the saved
# structure get modified.
try:
struc_to_add = deepcopy(self.structure)
except TypeError:
# The structure might be attached with a non-picklable calculator,
# e.g., when we use LAMMPS empirical potential for training.
# When deepcopy fails, create a SinglePointCalculator to store results
from ase.calculators.singlepoint import SinglePointCalculator
properties = ["forces", "energy", "stress"]
results = {
"forces": self.structure.forces,
"energy": self.structure.potential_energy,
"stress": self.structure.stress,
}
calc = self.structure.calc
self.structure.calc = None
struc_to_add = deepcopy(self.structure)
struc_to_add.calc = SinglePointCalculator(struc_to_add, **results)
self.structure.calc = calc
# update gp model
self.gp.update_db(
struc_to_add,
dft_frcs,
custom_range=train_atoms,
energy=dft_energy,
stress=flare_stress,
)
self.gp.set_L_alpha()
# train model
if (self.dft_count - 1) < self.freeze_hyps:
self.train_gp()
# update mgp model
if self.flare_calc.use_mapping:
self.flare_calc.build_map()
# write model
if (self.dft_count - 1) < self.freeze_hyps:
if self.write_model == 2:
self.write_gp()
if self.write_model == 3:
self.write_gp()
def record_dft_data(self, structure, target_atoms):
structure.info["target_atoms"] = np.array(target_atoms)
write(self.dft_xyz, structure, append=True)
def as_dict(self):
# DFT module and Trajectory are not picklable
self.dft_module = self.dft_module.__name__
md = self.md
self.md = None
_kernels = self._kernels
self._kernels = None
dft_loc = self.dft_loc
self.dft_loc = None
calc = self.dft_input.calc
self.dft_input.calc = None
gp = self.gp
self.gp = None
# Deepcopy OTF object.
dct = deepcopy(dict(vars(self)))
# Reset attributes.
self.dft_module = eval(self.dft_module)
self.md = md
self._kernels = _kernels
self.dft_loc = dft_loc
self.dft_input.calc = calc
self.gp = gp
# write atoms and flare calculator to separate files
write(self.structure_name, self.structure)
dct["atoms"] = self.structure_name
self.flare_calc.write_model(self.flare_name)
dct["flare_calc"] = self.flare_name
# dump dft calculator as pickle
with open(self.dft_name, "wb") as f:
pickle.dump(self.dft_loc, f) # dft_loc is the dft calculator
dct["dft_loc"] = self.dft_name
dct["gp"] = self.gp_name
for key in ["output", "pred_func", "structure", "dft_input", "md"]:
dct.pop(key)
dct["md"] = self.md.todict()
return dct
@staticmethod
def from_dict(dct):
flare_calc_dict = json.load(open(dct["flare_calc"]))
# Build FLARE_Calculator from dict
if flare_calc_dict["class"] == "FLARE_Calculator":
flare_calc = FLARE_Calculator.from_file(dct["flare_calc"])
_kernels = None
# Build SGP_Calculator from dict
# TODO: we still have the issue that the c++ kernel needs to be
# in the current space, otherwise there is Seg Fault
# That's why there is the _kernels
elif flare_calc_dict["class"] == "SGP_Calculator":
from flare_pp.sparse_gp_calculator import SGP_Calculator
flare_calc, _kernels = SGP_Calculator.from_file(dct["flare_calc"])
else:
raise TypeError(
f"The calculator from {dct['flare_calc']} is not recognized."
)
flare_calc.reset()
dct["atoms"] = read(dct["atoms"])
dct["calculator"] = flare_calc
dct.pop("gp")
with open(dct["dft_loc"], "rb") as f:
dct["dft_calc"] = pickle.load(f)
for key in ["dt", "dft_loc"]:
dct.pop(key)
new_otf = ASE_OTF(**dct)
new_otf._kernels = _kernels
new_otf.dft_count = dct["dft_count"]
new_otf.curr_step = dct["curr_step"]
new_otf.std_tolerance = dct["std_tolerance"]
if new_otf.md_engine == "NPT":
if not new_otf.md.initialized:
new_otf.md.initialize()
return new_otf
|
mir-group/flare
|
flare/ase/otf.py
|
Python
|
mit
| 14,797
|
[
"ASE",
"ESPResSo",
"LAMMPS",
"VASP"
] |
f81d24d0ee65efba37b975a438f4ae9f5634d857607fb769287b60262ef60091
|
"""
Manipulate HTML or XHTML documents.
Version 1.1.0. This source code has been placed in the
public domain by Connelly Barnes.
Features:
- Translate HTML back and forth to data structures.
This allows you to read and write HTML documents
programmably, with much flexibility.
- Extract and modify URLs in an HTML document.
- Compatible with Python 2.0 - 2.4.
See the L{examples} for a quick start.
"""
__version__ = '1.1.0'
__all__ = ['examples', 'tagextract', 'tagjoin', 'urlextract',
'urljoin', 'URLMatch']
# Define True and False for Python < 2.2.
import sys
if sys.version_info[:3] < (2, 2, 0):
exec "True = 1; False = 0"
# -------------------------------------------------------------------
# Globals
# -------------------------------------------------------------------
import re
import shlex
import string
import urllib
import urlparse
import types
# Translate text between these strings as plain text (not HTML).
_IGNORE_TAGS = [('script', '/script'),
('style', '/style')]
# Special tags where we have to look for _END_X as part of the
# HTML/XHTML parsing rules.
_BEGIN_COMMENT = '<!--'
_END_COMMENT = '-->'
_BEGIN_CDATA = '<![CDATA['
_END_CDATA = ']]>'
# Mime types that can be parsed as HTML or HTML-like.
_HTML_MIMETYPES = ['text/html', 'application/xhtml',
'application/xhtml+xml', 'text/xml',
'application/xml']
# Mime types that can be parsed as CSS.
_CSS_MIMETYPES = ['text/css']
# -------------------------------------------------------------------
# HTML <-> Data structure
# -------------------------------------------------------------------
def tagextract(doc):
"""
Convert HTML to data structure.
Returns a list. HTML tags become C{(name, keyword_dict)} tuples
within the list, while plain text becomes strings within the
list. All tag names are lowercased and stripped of whitespace.
Tags which end with forward slashes have a single forward slash
placed at the end of their name, to indicate that they are XML
unclosed tags.
Example:
>>> tagextract('<img src=hi.gif alt="hi">foo<br><br/></body>')
[('img', {'src': 'hi.gif', 'alt': 'hi'}), 'foo',
('br', {}), ('br/', {}), ('/body', {})]
Text between C{'<script>'} and C{'<style>'} is rendered directly to
plain text. This prevents rogue C{'<'} or C{'>'} characters from
interfering with parsing.
>>> tagextract('<script type="a"><blah>var x; </script>')
[('script', {'type': 'a'}), '<blah>var x; ', ('/script', {})]
Comment strings and XML directives are rendered as a single long
tag with no attributes. The case of the tag "name" is not changed:
>>> tagextract('<!-- blah -->')
[('!-- blah --', {})]
>>> tagextract('<?xml version="1.0" encoding="utf-8" ?>')
[('?xml version="1.0" encoding="utf-8" ?', {})]
>>> tagextract('<!DOCTYPE html PUBLIC etc...>')
[('!DOCTYPE html PUBLIC etc...', {})]
Greater-than and less-than characters occuring inside comments or
CDATA blocks are correctly kept as part of the block:
>>> tagextract('<!-- <><><><>>..> -->')
[('!-- <><><><>>..> --', {})]
>>> tagextract('<!CDATA[[><>><>]<> ]]>')
[('!CDATA[[><>><>]<> ]]', {})]
Note that if one modifies these tags, it is important to retain the
C{"--"} (for comments) or C{"]]"} (for C{CDATA}) at the end of the
tag name, so that output from L{tagjoin} will be correct HTML/XHTML.
"""
L = _full_tag_extract(doc)
for i in range(len(L)):
if isinstance(L[i], _TextTag):
# _TextTag object.
L[i] = L[i].text
else:
# _HTMLTag object.
L[i] = (L[i].name, L[i].attrs)
return L
def _is_str(s):
"""
True iff s is a string (checks via duck typing).
"""
return hasattr(s, 'capitalize')
def tagjoin(L):
"""
Convert data structure back to HTML.
This reverses the L{tagextract} function.
More precisely, if an HTML string is turned into a data structure,
then back into HTML, the resulting string will be functionally
equivalent to the original HTML.
>>> tagjoin(tagextract(s))
(string that is functionally equivalent to s)
Three changes are made to the HTML by L{tagjoin}: tags are
lowercased, C{key=value} pairs are sorted, and values are placed in
double-quotes.
"""
if _is_str(L):
raise ValueError('got string arg, expected non-string iterable')
ans = []
for item in L:
# Check for string using duck typing.
if _is_str(item):
# Handle plain text.
ans.append(item)
elif item[0] == '--':
# Handle closing comment.
ans.append('-->')
elif item[0] == '!--':
# Handle opening comment.
ans.append('<!--')
else:
# Handle regular HTML tag.
(name, d) = item
if name[-1:] == '/':
rslash = ' /'
name = name[:-1]
else:
rslash = ''
tag_items = []
items = d.items()
items.sort()
for (key, value) in items:
if value != None:
if '"' in value and "'" in value:
raise ValueError('attribute value contains both single' +
' and double quotes')
elif '"' in value:
tag_items.append(key + "='" + value + "'")
else:
tag_items.append(key + '="' + value + '"')
else:
tag_items.append(key)
tag_items = ' '.join(tag_items)
if tag_items != '':
tag_items = ' ' + tag_items
ans.append('<' + name + tag_items + rslash + '>')
return ''.join(ans)
def _enumerate(L):
"""
Like C{enumerate}, provided for compatibility with Python < 2.3.
Returns a list instead of an iterator.
"""
return zip(range(len(L)),L)
def _ignore_tag_index(s, i):
"""
Helper routine: Find index within C{_IGNORE_TAGS}, or C{-1}.
If C{s[i:]} begins with an opening tag from C{_IGNORE_TAGS}, return
the index. Otherwise, return C{-1}.
"""
for (j, (a, b)) in _enumerate(_IGNORE_TAGS):
if s[i:i+len(a)+1].lower() == '<' + a:
return j
return -1
def _html_split(s):
"""
Helper routine: Split string into a list of tags and non-tags.
>>> html_split(' blah <tag text> more </tag stuff> ')
[' blah ', '<tag text>', ' more ', '</tag stuff>', ' ']
Tags begin with C{'<'} and end with C{'>'}.
The identity C{''.join(L) == s} is always satisfied.
Exceptions to the normal parsing of HTML tags:
C{'<script>'}, C{'<style>'}, and HTML comment tags ignore all HTML
until the closing pair, and are added as three elements:
>>> html_split(' blah<style><<<><></style><!-- hi -->' +
... ' <script language="Javascript"></>a</script>end')
[' blah', '<style>', '<<<><>', '</style>', '<!--', ' hi ', '-->',
' ', '<script language="Javascript">', '</>a', '</script>', 'end']
"""
s_lower = s.lower()
L = []
i = 0 # Index of char being processed
while i < len(s):
c = s[i]
if c == '<':
# Left bracket, handle various cases.
if s[i:i+len(_BEGIN_COMMENT)].startswith(_BEGIN_COMMENT):
# HTML begin comment tag, '<!--'. Scan for '-->'.
i2 = s.find(_END_COMMENT, i)
if i2 < 0:
# No '-->'. Append the remaining malformed content and stop.
L.append(s[i:])
break
else:
# Append the comment.
L.append(s[i:i2+len(_END_COMMENT)])
i = i2 + len(_END_COMMENT)
elif s[i:i+len(_BEGIN_CDATA)].startswith(_BEGIN_CDATA):
# XHTML begin CDATA tag. Scan for ']]>'.
i2 = s.find(_END_CDATA, i)
if i2 < 0:
# No ']]>'. Append the remaining malformed content and stop.
L.append(s[i:])
break
else:
# Append the CDATA.
L.append(s[i:i2+len(_END_CDATA)])
i = i2 + len(_END_CDATA)
else:
# Regular HTML tag. Scan for '>'.
orig_i = i
found = False
in_quot1 = False
in_quot2 = False
for i2 in xrange(i+1, len(s)):
c2 = s[i2]
if c2 == '"' and not in_quot1:
in_quot2 = not in_quot2
# Only turn on double quote if it's in a realistic place.
if in_quot2 and not in_quot1:
if i2 > 0 and s[i2-1] not in [' ', '\t', '=']:
in_quot2 = False
elif c2 == "'" and not in_quot2:
in_quot1 = not in_quot1
# Only turn on single quote if it's in a realistic place.
if in_quot1 and not in_quot2:
if i2 > 0 and s[i2-1] not in [' ', '\t', '=']:
in_quot1 = False
elif c2 == '>' and (not in_quot2 and not in_quot1):
found = True
break
if not found:
# No end '>'. Append the rest as text.
L.append(s[i:])
break
else:
# Append the tag.
L.append(s[i:i2+1])
i = i2 + 1
# Check whether we found a special ignore tag, eg '<script>'
tagi = _ignore_tag_index(s, orig_i)
if tagi >= 0:
# It's an ignore tag. Scan for the end tag.
i2 = s_lower.find('<' + _IGNORE_TAGS[tagi][1], i)
if i2 < 0:
# No end tag. Append the rest as text.
L.append(s[i2:])
break
else:
# Append the text sandwiched between the tags.
L.append(s[i:i2])
# Catch the closing tag with the next loop iteration.
i = i2
else:
# Not a left bracket, append text up to next left bracket.
i2 = s.find('<', i)
if i2 < 0:
# No left brackets, append the rest as text.
L.append(s[i:])
break
else:
L.append(s[i:i2])
i = i2
return L
def _shlex_split(s):
"""
Like C{shlex.split}, but reversible, and for HTML.
Splits a string into a list C{L} of strings. List elements
contain either an HTML tag C{name=value} pair, an HTML name
singleton (eg C{"checked"}), or whitespace.
The identity C{''.join(L) == s} is always satisfied.
>>> _shlex_split('a=5 b="15" name="Georgette A"')
['a=5', ' ', 'b="15"', ' ', 'name="Georgette A"']
>>> _shlex_split('a = a5 b=#b19 name="foo bar" q="hi"')
['a = a5', ' ', 'b=#b19', ' ', 'name="foo bar"', ' ', 'q="hi"']
>>> _shlex_split('a="9"b="15"')
['a="9"', 'b="15"']
"""
ans = []
i = 0
while i < len(s):
c = s[i]
if c in string.whitespace:
# Whitespace. Add whitespace while found.
for i2 in range(i, len(s)):
if s[i2] not in string.whitespace:
break
# Include the entire string if the last char is whitespace.
if s[i2] in string.whitespace:
i2 += 1
ans.append(s[i:i2])
i = i2
else:
# Match 'name = "value"'
c = re.compile(r'[^ \t\n\r\f\v"\']+\s*\=\s*"[^"]*"')
m = c.match(s, i)
if m:
ans.append(s[i:m.end()])
i = m.end()
continue
# Match "name = 'value'"
c = re.compile(r'[^ \t\n\r\f\v"\']+\s*\=\s*\'[^\']*\'')
m = c.match(s, i)
if m:
ans.append(s[i:m.end()])
i = m.end()
continue
# Match 'name = value'
c = re.compile(r'[^ \t\n\r\f\v"\']+\s*\=\s*[^ \t\n\r\f\v"\']*')
m = c.match(s, i)
if m:
ans.append(s[i:m.end()])
i = m.end()
continue
# Match 'name'
c = re.compile(r'[^ \t\n\r\f\v"\']+')
m = c.match(s, i)
if m:
ans.append(s[i:m.end()])
i = m.end()
continue
# Couldn't match anything so far, so it's likely that the page
# has malformed quotes inside a tag. Add leading quotes
# and spaces to the previous field until we see something.
subadd = []
while i < len(s) and s[i] in ['"', "'", ' ', '\t']:
subadd.append(s[i])
i += 1
# Add whatever we could salvage from the situation and move on.
if len(subadd) > 0:
ans.append(''.join(subadd))
else:
# We totally failed at matching this character, so add it
# as a separate item and move on.
ans.append(s[i])
return ans
def _test_shlex_split():
"""
Unit test for L{_shlex_split}.
"""
assert _shlex_split('') == []
assert _shlex_split(' ') == [' ']
assert _shlex_split('a=5 b="15" name="Georgette A"') == \
['a=5', ' ', 'b="15"', ' ', 'name="Georgette A"']
assert _shlex_split('a=cvn b=32vsd c= 234jk\te d \t="hi"') == \
['a=cvn', ' ', 'b=32vsd', ' ', 'c= 234jk', '\t', 'e', ' ',
'd \t="hi"']
assert _shlex_split(' a b c d=e f g h i="jk" l mno = p ' + \
'qr = "st"') == \
[' ', 'a', ' ', 'b', ' ', 'c', ' ', 'd=e', ' ', 'f', ' ', \
'g', ' ', 'h', ' ', 'i="jk"', ' ', 'l', ' ', 'mno = p', \
' ', 'qr = "st"']
assert _shlex_split('a=5 b="9"c="15 dfkdfkj "d="25"') == \
['a=5', ' ', 'b="9"', 'c="15 dfkdfkj "', 'd="25"']
assert _shlex_split('a=5 b="9"c="15 dfkdfkj "d="25" e=4') == \
['a=5', ' ', 'b="9"', 'c="15 dfkdfkj "', 'd="25"', ' ', \
'e=4']
assert _shlex_split('a=5 b=\'9\'c=\'15 dfkdfkj \'d=\'25\' e=4') == \
['a=5', ' ', 'b=\'9\'', 'c=\'15 dfkdfkj \'', 'd=\'25\'', \
' ', 'e=4']
def _tag_dict(s):
"""
Helper routine: Extracts a dict from an HTML tag string.
>>> _tag_dict('bgcolor=#ffffff text="#000000" blink')
({'bgcolor':'#ffffff', 'text':'#000000', 'blink': None},
{'bgcolor':(0,7), 'text':(16,20), 'blink':(31,36)},
{'bgcolor':(8,15), 'text':(22,29), 'blink':(36,36)})
Returns a 3-tuple. First element is a dict of
C{(key, value)} pairs from the HTML tag. Second element
is a dict mapping keys to C{(start, end)} indices of the
key in the text. Third element maps keys to C{(start, end)}
indices of the value in the text.
Names are lowercased.
Raises C{ValueError} for unmatched quotes and other errors.
"""
d = _shlex_split(s)
attrs = {}
key_pos = {}
value_pos = {}
start = 0
for item in d:
end = start + len(item)
equals = item.find('=')
if equals >= 0:
# Contains an equals sign.
(k1, k2) = (start, start + equals)
(v1, v2) = (start + equals + 1, start + len(item))
# Strip spaces.
while k1 < k2 and s[k1] in string.whitespace: k1 += 1
while k1 < k2 and s[k2-1] in string.whitespace: k2 -= 1
while v1 < v2 and s[v1] in string.whitespace: v1 += 1
while v1 < v2 and s[v2-1] in string.whitespace: v2 -= 1
# Strip one pair of double quotes around value.
if v1 < v2 - 1 and s[v1] == '"' and s[v2-1] == '"':
v1 += 1
v2 -= 1
# Strip one pair of single quotes around value.
if v1 < v2 - 1 and s[v1] == "'" and s[v2-1] == "'":
v1 += 1
v2 -= 1
(key, value) = (s[k1:k2].lower(), s[v1:v2])
# Drop bad keys and values.
if '"' in key or "'" in key:
continue
if '"' in value and "'" in value:
continue
attrs[key] = value
key_pos[key] = (k1, k2)
value_pos[key] = (v1, v2)
elif item.split() == []:
# Whitespace. Ignore it.
pass
else:
# A single token, like 'blink'.
key = item.lower()
# Drop bad keys.
if '"' in key or "'" in key:
continue
attrs[key] = None
key_pos[key] = (start, end)
value_pos[key] = (end, end)
start = end
return (attrs, key_pos, value_pos)
def _test_tag_dict():
"""
Unit test for L{_tag_dict}.
"""
assert _tag_dict('') == ({}, {}, {})
assert _tag_dict(' \t\r \n\n \r\n ') == ({}, {}, {})
assert _tag_dict('bgcolor=#ffffff text="#000000" blink') == \
({'bgcolor':'#ffffff', 'text':'#000000', 'blink': None},
{'bgcolor':(0,7), 'text':(16,20), 'blink':(31,36)},
{'bgcolor':(8,15), 'text':(22,29), 'blink':(36,36)})
assert _tag_dict("bgcolor='#ffffff'text='#000000' blink") == \
({'bgcolor':'#ffffff', 'text':'#000000', 'blink': None},
{'bgcolor':(0,7), 'text':(17,21), 'blink':(32,37)},
{'bgcolor':(9,16), 'text':(23,30), 'blink':(37,37)})
s = ' \r\nbg = val text \t= "hi you" name\t e="5"\t\t\t\n'
(a, b, c) = _tag_dict(s)
assert a == {'text': 'hi you', 'bg': 'val', 'e': '5', 'name': None}
for key in a.keys():
assert s[b[key][0]:b[key][1]] == key
if a[key] != None:
assert s[c[key][0]:c[key][1]] == a[key]
def _full_tag_extract(s):
"""
Like L{tagextract}, but different return format.
Returns a list of L{_HTMLTag} and L{_TextTag} instances.
The return format is very inconvenient for manipulating HTML, and
only will be useful if you want to find the exact locations where
tags occur in the original HTML document.
"""
L = _html_split(s)
# Starting position of each L[i] in s.
Lstart = [0] * len(L)
for i in range(1, len(L)):
Lstart[i] = Lstart[i-1] + len(L[i-1])
class NotTagError(Exception): pass
for (i, text) in _enumerate(L):
try:
# Is it an HTML tag?
is_tag = False
if len(text) >= 2 and text[0] == '<' and text[-1] == '>':
# Turn HTML tag text into (name, keyword_dict) tuple.
is_tag = True
is_special = False
if len(text) >= 2 and (text[1] == '!' or text[1] == '?'):
is_special = True
if is_special:
# A special tag such as XML directive or <!-- comment -->
pos = (Lstart[i], Lstart[i] + len(L[i]))
# Wrap inside an _HTMLTag object.
L[i] = _HTMLTag(pos, text[1:-1].strip(), {}, {}, {})
elif is_tag:
# If an HTML tag, strip brackets and handle what's left.
# Strip off '<>' and update offset.
orig_offset = 0
if len(text) >= 1 and text[0] == '<':
text = text[1:]
orig_offset = 1
if len(text) >= 1 and text[-1] == '>':
text = text[:-1]
if len(text) > 0 and text[-1] == '/':
rslash = True
text = text[:-1]
else:
rslash = False
first_space = text.find(' ')
if first_space < 0:
(name, dtext) = (text, '')
else:
name = text[:first_space]
dtext = text[first_space+1:len(text)]
# Position of dtext relative to original text.
dtext_offset = len(name) + 1 + orig_offset # +1 for space.
# Lowercase everything except XML directives and comments.
if not name.startswith('!') and not name.startswith('?'):
name = name.strip().lower()
if rslash:
name += '/'
# Strip off spaces, and update dtext_offset as appropriate.
orig_dtext = dtext
dtext = dtext.strip()
dtext_offset += orig_dtext.index(dtext)
(attrs, key_pos, value_pos) = _tag_dict(dtext)
# Correct offsets in key_pos and value_pos.
for key in attrs.keys():
key_pos[key] = (key_pos[key][0]+Lstart[i]+dtext_offset,
key_pos[key][1]+Lstart[i]+dtext_offset)
value_pos[key] = (value_pos[key][0]+Lstart[i]+dtext_offset,
value_pos[key][1]+Lstart[i]+dtext_offset)
pos = (Lstart[i], Lstart[i] + len(L[i]))
# Wrap inside an _HTMLTag object.
L[i] = _HTMLTag(pos, name, attrs, key_pos, value_pos)
else:
# Not an HTML tag.
raise NotTagError
except NotTagError:
# Wrap non-HTML strings inside a _TextTag object.
pos = (Lstart[i], Lstart[i] + len(L[i]))
L[i] = _TextTag(pos, L[i])
return L
class _HTMLTag:
"""
HTML tag extracted by L{_full_tag_extract}.
@ivar pos: C{(start, end)} indices of the entire tag in the
HTML document.
@ivar name: Name of tag. For example, C{'img'}.
@ivar attrs: Dictionary mapping tag attributes to corresponding
tag values.
Example:
>>> tag = _full_tag_extract('<a href="d.com">')[0]
>>> tag.attrs
{'href': 'd.com'}
Surrounding quotes are stripped from the values.
@ivar key_pos: Key position dict.
Maps the name of a tag attribute to C{(start, end)}
indices for the key string in the C{"key=value"}
HTML pair. Indices are absolute, where 0 is the
start of the HTML document.
Example:
>>> tag = _full_tag_extract('<a href="d.com">')[0]
>>> tag.key_pos['href']
(3, 7)
>>> '<a href="d.com">'[3:7]
'href'
@ivar value_pos: Value position dict.
Maps the name of a tag attribute to C{(start, end)}
indices for the value in the HTML document string.
Surrounding quotes are excluded from this range.
Indices are absolute, where 0 is the start of the
HTML document.
Example:
>>> tag = _full_tag_extract('<a href="d.com">')[0]
>>> tag.value_pos['href']
(9, 14)
>>> '<a href="d.com">'[9:14]
'd.com'
"""
def __init__(self, pos, name, attrs, key_pos, value_pos):
"""
Create an _HTMLTag object.
"""
self.pos = pos
self.name = name
self.attrs = attrs
self.key_pos = key_pos
self.value_pos = value_pos
class _TextTag:
"""
Text extracted from an HTML document by L{_full_tag_extract}.
@ivar text: Extracted text.
@ivar pos: C{(start, end)} indices of the text.
"""
def __init__(self, pos, text):
"""
Create a _TextTag object.
"""
self.pos = pos
self.text = text
# -------------------------------------------------------------------
# URL Editing
# -------------------------------------------------------------------
# Tags within which URLs may be found.
_URL_TAGS = ['a href', 'applet archive', 'applet code',
'applet codebase', 'area href', 'base href',
'blockquote cite', 'body background', 'del cite',
'form action', 'frame longdesc', 'frame src',
'head profile', 'iframe src', 'iframe longdesc',
'img src', 'img ismap', 'img longdesc', 'img usemap',
'input src', 'ins cite', 'link href', 'object archive',
'object codebase', 'object data', 'object usemap',
'script src', 'table background', 'tbody background',
'td background', 'tfoot background', 'th background',
'thead background', 'tr background']
_URL_TAGS = map(lambda s: tuple(s.split()), _URL_TAGS)
def _finditer(pattern, string):
"""
Like C{re.finditer}, provided for compatibility with Python < 2.3.
Returns a list instead of an iterator. Otherwise the return format
is identical to C{re.finditer} (except possibly in the details of
empty matches).
"""
compiled = re.compile(pattern)
ans = []
start = 0
while True:
m = compiled.search(string, start)
if m:
ans.append(m)
else:
return ans
m_start = m.start(m.lastindex)
m_end = m.end(m.lastindex)
if m_end > m_start:
start = m_end
else:
start += 1
def _remove_comments(doc):
"""
Replaces commented out characters with spaces in a CSS document.
"""
ans = []
i = 0
while True:
i2 = doc.find('/*', i)
if i2 < 0:
ans += [doc[i:]]
break
ans += [doc[i:i2]]
i3 = doc.find('*/', i2+1)
if i3 < 0:
i3 = len(doc) - 2
ans += [' ' * (i3 - i2 + 2)]
i = i3 + 2
return ''.join(ans)
def _test_remove_comments():
"""
Unit test for L{_remove_comments}.
"""
s = '/*d s kjlsdf */*//*/*//**/**/*//**/a' * 50
assert len(_remove_comments(s)) == len(s)
s = '/**/' * 50 + '/*5845*/*/*//*/**/dfd'+'/*//**//'
assert len(_remove_comments(s)) == len(s)
s = 'a/**/' * 50 + '/**//**/////***/****/*//**//*/' * 5
assert len(_remove_comments(s)) == len(s)
s = 'hi /* foo */ hello /* bar!!!!! \n\n */ there!'
assert _remove_comments(s) == \
'hi hello there!'
def urlextract(doc, siteurl=None, mimetype='text/html'):
"""
Extract URLs from HTML or stylesheet.
Extracts only URLs that are linked to or embedded in the document.
Ignores plain text URLs that occur in the non-HTML part of the
document.
Returns a list of L{URLMatch} objects.
>>> L = urlextract('<img src="a.gif"><a href="www.google.com">')
>>> L[0].url
'a.gif'
>>> L[1].url
'www.google.com'
If C{siteurl} is specified, all URLs are made into absolute URLs
by assuming that C{doc} is located at the URL C{siteurl}.
>>> doc = '<img src="a.gif"><a href="/b.html">'
>>> L = urlextract(doc, 'http://www.python.org/~guido/')
>>> L[0].url
'http://www.python.org/~guido/a.gif'
>>> L[1].url
'http://www.python.org/b.html'
If C{mimetype} is C{"text/css"}, the document will be parsed
as a stylesheet.
If a stylesheet is embedded inside an HTML document, then
C{urlextract} will extract the URLs from both the HTML and the
stylesheet.
"""
mimetype = mimetype.lower()
if mimetype.split()[0] in _CSS_MIMETYPES:
doc = _remove_comments(doc)
# Match URLs within CSS stylesheet.
# Match url(blah) or url('blah') or url("blah").
L = _finditer(
r'''url\s*\(([^\r\n\("']*?)\)|''' +
r'''url\s*\(\s*"([^\r\n]*?)"\s*\)|''' +
r'''url\s*\(\s*'([^\r\n]*?)'\s*\)|''' +
r'''@import\s+([^ \t\r\n"';@\(\)]+)[^\r\n;@\(\)]*[\r\n;]|''' +
r'''@import\s+'([^ \t\r\n"';@\(\)]+)'[^\r\n;@\(\)]*[\r\n;]|''' +
r'''@import\s+"([^ \t\r\n"';\(\)']+)"[^\r\n;@\(\)]*[\r\n;]''',
doc + ';\n')
L = [(x.start(x.lastindex), x.end(x.lastindex)) for x in L]
ans = []
for (s, e) in L:
e = min(e, len(doc))
if e > s:
ans.append(URLMatch(doc, s, e, siteurl, False, True))
elif mimetype.split()[0] in _HTML_MIMETYPES:
# Match URLs within HTML document.
ans = []
L = _full_tag_extract(doc)
item = None
for i in range(len(L)):
prev_item = item
item = L[i]
# Handle string item (text) or tuple item (tag).
if isinstance(item, _TextTag):
# Current item is text.
if isinstance(prev_item, _HTMLTag) and prev_item.name == \
'style':
# And previous item is <style>. Process a stylesheet.
temp = urlextract(item.text, siteurl, 'text/css')
# Offset indices and add to ans.
for j in range(len(temp)):
temp[j].start += item.pos[0]
temp[j].end += item.pos[0]
ans += temp
else:
# Regular text. Ignore.
pass
else:
# Current item is a tag.
if item.attrs.has_key('style'):
# Process a stylesheet embedded in the 'style' attribute.
temp = urlextract(item.attrs['style'], siteurl, 'text/css')
# Offset indices and add to ans.
for j in range(len(temp)):
temp[j].start += item.value_pos['style'][0]
temp[j].end += item.value_pos['style'][0]
ans += temp
for (a, b) in _URL_TAGS:
if item.name.startswith(a) and b in item.attrs.keys():
# Got one URL.
url = item.attrs[b]
# FIXME: Some HTML tag wants a URL list, look up which
# tag and make it a special case.
(start, end) = item.value_pos[b]
tag_name = a
tag_attr = b
tag_attrs = item.attrs
tag_index = i
tag = URLMatch(doc, start, end, siteurl, True, False, \
tag_attr, tag_attrs, tag_index, tag_name)
ans.append(tag)
# End of 'text/html' mimetype case.
else:
raise ValueError('unknown MIME type: ' + repr(mimetype))
# Filter the answer, removing duplicate matches.
start_end_map = {}
filtered_ans = []
for item in ans:
if not start_end_map.has_key((item.start, item.end)):
start_end_map[(item.start, item.end)] = None
filtered_ans.append(item)
return filtered_ans
def _tuple_replace(s, Lindices, Lreplace):
"""
Replace slices of a string with new substrings.
Given a list of slice tuples in C{Lindices}, replace each slice
in C{s} with the corresponding replacement substring from
C{Lreplace}.
Example:
>>> _tuple_replace('0123456789',[(4,5),(6,9)],['abc', 'def'])
'0123abc5def9'
"""
ans = []
Lindices = Lindices[:]
Lindices.sort()
if len(Lindices) != len(Lreplace):
raise ValueError('lists differ in length')
for i in range(len(Lindices)-1):
if Lindices[i][1] > Lindices[i+1][0]:
raise ValueError('tuples overlap')
if Lindices[i][1] < Lindices[i][0]:
raise ValueError('invalid tuple')
if min(Lindices[i][0], Lindices[i][1]) < 0 or \
max(Lindices[i][0], Lindices[i][1]) >= len(s):
raise ValueError('bad index')
j = 0
offset = 0
for i in range(len(Lindices)):
len1 = Lindices[i][1] - Lindices[i][0]
len2 = len(Lreplace[i])
ans.append(s[j:Lindices[i][0]+offset])
ans.append(Lreplace[i])
j = Lindices[i][1]
ans.append(s[j:])
return ''.join(ans)
def _test_tuple_replace():
"""
Unit test for L{_tuple_replace}.
"""
assert _tuple_replace('',[],[]) == ''
assert _tuple_replace('0123456789',[],[]) == '0123456789'
assert _tuple_replace('0123456789',[(4,5),(6,9)],['abc', 'def'])== \
'0123abc5def9'
assert _tuple_replace('01234567890123456789', \
[(1,9),(13,14),(16,18)],['abcd','efg','hijk']) == \
'0abcd9012efg45hijk89'
def urljoin(s, L):
"""
Write back document with modified URLs (reverses L{urlextract}).
Given a list C{L} of L{URLMatch} objects obtained from
L{urlextract}, substitutes changed URLs into the original
document C{s}, and returns the modified document.
One should only modify the C{.url} attribute of the L{URLMatch}
objects. The ordering of the URLs in the list is not important.
>>> doc = '<img src="a.png"><a href="b.png">'
>>> L = urlextract(doc)
>>> L[0].url = 'foo'
>>> L[1].url = 'bar'
>>> urljoin(doc, L)
'<img src="foo"><a href="bar">'
"""
return _tuple_replace(s, [(x.start, x.end) for x in L], \
[x.url for x in L])
def examples():
"""
Examples of the C{htmldata} module.
Example 1:
Print all absolutized URLs from Google.
Here we use L{urlextract} to obtain all URLs in the document.
>>> import urllib2, htmldata
>>> url = 'http://www.google.com/'
>>> contents = urllib2.urlopen(url).read()
>>> for u in htmldata.urlextract(contents, url):
... print u.url
...
http://www.google.com/images/logo.gif
http://www.google.com/search
(More output)
Note that the second argument to L{urlextract} causes the
URLs to be made absolute with respect to that base URL.
Example 2:
Print all image URLs from Google in relative form.
>>> import urllib2, htmldata
>>> url = 'http://www.google.com/'
>>> contents = urllib2.urlopen(url).read()
>>> for u in htmldata.urlextract(contents):
... if u.tag_name == 'img':
... print u.url
...
/images/logo.gif
Equivalently, one can use L{tagextract}, and look for occurrences
of C{<img>} tags. The L{urlextract} function is mostly a convenience
function for when one wants to extract and/or modify all URLs in a
document.
Example 3:
Replace all C{<a href>} links on Google with the Microsoft web page.
Here we use L{tagextract} to turn the HTML into a data structure,
and then loop over the in-order list of tags (items which are not
tuples are plain text, which is ignored).
>>> import urllib2, htmldata
>>> url = 'http://www.google.com/'
>>> contents = urllib2.urlopen(url).read()
>>> L = htmldata.tagextract(contents)
>>> for item in L:
... if isinstance(item, tuple) and item[0] == 'a':
... # It's an HTML <a> tag! Give it an href=.
... item[1]['href'] = 'http://www.microsoft.com/'
...
>>> htmldata.tagjoin(L)
(Microsoftized version of Google)
Example 4:
Make all URLs on an HTML document be absolute.
>>> import urllib2, htmldata
>>> url = 'http://www.google.com/'
>>> contents = urllib2.urlopen(url).read()
>>> htmldata.urljoin(htmldata.urlextract(contents, url))
(Google HTML page with absolute URLs)
Example 5:
Properly quote all HTML tag values for pedants.
>>> import urllib2, htmldata
>>> url = 'http://www.google.com/'
>>> contents = urllib2.urlopen(url).read()
>>> htmldata.tagjoin(htmldata.tagextract(contents))
(Properly quoted version of the original HTML)
Example 6:
Modify all URLs in a document so that they are appended
to our proxy CGI script C{http://mysite.com/proxy.cgi}.
>>> import urllib2, htmldata
>>> url = 'http://www.google.com/'
>>> contents = urllib2.urlopen(url).read()
>>> proxy_url = 'http://mysite.com/proxy.cgi?url='
>>> L = htmldata.urlextract(contents)
>>> for u in L:
... u.url = proxy_url + u.url
...
>>> htmldata.urljoin(L)
(Document with all URLs wrapped in our proxy script)
Example 7:
Download all images from a website.
>>> import urllib, htmldata, time
>>> url = 'http://www.google.com/'
>>> contents = urllib.urlopen(url).read()
>>> for u in htmldata.urlextract(contents, url):
... if u.tag_name == 'img':
... filename = urllib.quote_plus(u.url)
... urllib.urlretrieve(u.url, filename)
... time.sleep(0.5)
...
(Images are downloaded to the current directory)
Many sites will protect against bandwidth-draining robots by
checking the HTTP C{Referer} [sic] and C{User-Agent} fields.
To circumvent this, one can create a C{urllib2.Request} object
with a legitimate C{Referer} and a C{User-Agent} such as
C{"Mozilla/4.0 (compatible; MSIE 5.5)"}. Then use
C{urllib2.urlopen} to download the content. Be warned that some
website operators will respond to rapid robot requests by banning
the offending IP address.
"""
print examples.__doc__
class URLMatch:
"""
A matched URL inside an HTML document or stylesheet.
A list of C{URLMatch} objects is returned by L{urlextract}.
@ivar url: URL extracted.
@ivar start: Starting character index.
@ivar end: End character index.
@ivar in_html: C{True} if URL occurs within an HTML tag.
@ivar in_css: C{True} if URL occurs within a stylesheet.
@ivar tag_attr: Specific tag attribute in which URL occurs.
Example: C{'href'}.
C{None} if the URL does not occur within an HTML
tag.
@ivar tag_attrs: Dictionary of all tag attributes and values.
Example: C{{'src':'http://X','alt':'Img'}}.
C{None} if the URL does not occur within an HTML
tag.
@ivar tag_index: Index of the tag in the list that would be
generated by a call to L{tagextract}.
@ivar tag_name: HTML tag name in which URL occurs.
Example: C{'img'}.
C{None} if the URL does not occur within an HTML
tag.
"""
def __init__(self, doc, start, end, siteurl, in_html, in_css,
tag_attr=None, tag_attrs=None, tag_index=None,
tag_name=None):
"""
Create a URLMatch object.
"""
self.doc = doc
self.start = start
self.end = end
self.url = doc[start:end]
self.in_html = in_html
self.in_css = in_css
if siteurl != None:
self.url = urlparse.urljoin(siteurl, self.url)
self.tag_attr = tag_attr
self.tag_attrs = tag_attrs
self.tag_index = tag_index
self.tag_name = tag_name
def _cast_to_str(arg, str_class):
"""
Casts string components of several data structures to str_class.
Casts string, list of strings, or list of tuples (as returned by
L{tagextract}) such that all strings are made to type str_class.
"""
if _is_str(arg):
return str_class(arg)
elif isinstance(arg, types.ListType):
ans = []
for item in arg:
if _is_str(item):
ans.append(str_class(item))
elif isinstance(item, types.TupleType) and len(item) == 2:
(a, b) = item
b_prime = {}
for (b_key, b_value) in b.items():
if b_value is None:
b_prime[str_class(b_key)] = None
else:
b_prime[str_class(b_key)] = str_class(b_value)
ans.append((str_class(a), b_prime))
else:
raise ValueError('unknown argument type')
return ans
else:
raise ValueError('unknown argument type')
# -------------------------------------------------------------------
# Unit Tests: HTML <-> Data structure
# -------------------------------------------------------------------
def _test_tagextract(str_class=str):
"""
Unit tests for L{tagextract} and L{tagjoin}.
Strings are cast to the string class argument str_class.
"""
# Work around lack of nested scopes in Python <= 2.1.
def f(obj, str_class2=str_class):
return _cast_to_str(obj, str_class2)
# Simple HTML document to test.
doc1 = f('\n\n<Html><BODY bgcolor=#ffffff>Hi<h1>Ho</h1><br>' +
'<br /><img SRc="text%5f.gif"><TAG NOshow>' +
'<img test="5%ff" /></body></html>\nBye!\n')
doc2 = f('\r<HTML><!-- Comment<a href="blah"> --><hiYa><foo>' +
'<test tag="5" content=6><is broken=False><yay>' +
'<style><><>><</style><foo bar=5>end<!-- <!-- nested --> '+
'<script language="JavaScript"><>!><!_!_!-->!_-></script>')
doc3 = f('\r\t< html >< tag> <!--comment--> <tag a = 5> ' +
'<foo \r\nbg = val text \t= "hi you" name\t e="5"\t\t\t\n>')
doc4 = f('<?xml ??><foo><!-- <img> --><!DOCTYPE blah""/>' +
'<![CDATA[ more and weirder<bar> ] ][]]><![C[DATA[[>' +
'<abc key=value><![CDATA[to eof')
doc5 = f('<a href="foobar/ \t="base="10" x="15"><a x="9"t="20">')
# -----------------------------------------------------------------
# Test _html_split()
# -----------------------------------------------------------------
s = doc1
assert s == f('').join(_html_split(s))
assert _html_split(s) == f(
['\n\n', '<Html>', '<BODY bgcolor=#ffffff>', 'Hi', '<h1>', 'Ho',
'</h1>', '<br>', '<br />', '<img SRc="text%5f.gif">',
'<TAG NOshow>', '<img test="5%ff" />', '</body>', '</html>',
'\nBye!\n'])
s = doc2
assert s == f('').join(_html_split(s))
# Test single quotes
s = doc2.replace(f('"'), f("'"))
assert s == f('').join(_html_split(s))
s = f('<!-- test weird comment <body> <html> --> <h1>Header' +
'</h1 value=10 a=11>')
assert s == f('').join(_html_split(s))
assert _html_split(s) == f(
['<!-- test weird comment <body> <html> -->', ' ',
'<h1>', 'Header', '</h1 value=10 a=11>'])
s = f('<!-- <!-- nested messed up --> blah ok <now> what<style>hi' +
'<><>></style><script language="Java"><aL><>><>></script>a')
assert s == f('').join(_html_split(s))
assert _html_split(s) == f(
['<!-- <!-- nested messed up -->', ' blah ok ', '<now>',
' what', '<style>', 'hi<><>>', '</style>',
'<script language="Java">', '<aL><>><>>', '</script>', 'a'])
s = f('<!-- ><# -->!<!-!._-><!-- aa--> <style><tag//</style> <tag '+
'<tag <! <! -> <!-- </who< <who> tag> <huh-->-</style>' +
'</style<style>')
assert s == f('').join(_html_split(s))
assert _html_split(s) == f(
['<!-- ><# -->', '!', '<!-!._->', '<!-- aa-->',
' ', '<style>', '<tag//', '</style>', ' ', '<tag <tag <! <! ->',
' ', '<!-- </who< <who> tag> <huh-->', '-', '</style>',
'</style<style>'])
s = doc4
assert s == f('').join(_html_split(s))
assert _html_split(s) == f(
['<?xml ??>', '<foo>', '<!-- <img> -->', '<!DOCTYPE blah""/>',
'<![CDATA[ more and weirder<bar> ] ][]]>', '<![C[DATA[[>',
'<abc key=value>', '<![CDATA[to eof'])
# -----------------------------------------------------------------
# Test tagextract() and tagjoin()
# -----------------------------------------------------------------
s = doc1
s2 = doc1.replace(f('"'), f("'")) # Test single quotes, too.
assert tagextract(f('')) == []
assert tagextract(s) == tagextract(s2) == \
f(['\n\n', ('html', {}), ('body', {'bgcolor': '#ffffff'}),
'Hi', ('h1', {}), 'Ho', ('/h1', {}), ('br', {}),
('br/', {}), ('img', {'src': 'text%5f.gif'}),
('tag', {'noshow': None}), ('img/', {'test': '5%ff'}),
('/body', {}), ('/html', {}), '\nBye!\n'])
s2 = f('\n\n<html><body bgcolor="#ffffff">Hi<h1>Ho</h1><br>' +
'<br /><img src="text%5f.gif"><tag noshow>' +
'<img test="5%ff" /></body></html>\nBye!\n')
assert tagjoin(tagextract(s)) == s2
doc2old = doc2
doc2 = f('\r<HTML><!-- Comment<a href="blah"> --><hiYa><foo>' +
'<test tag="5" content=6><is broken=False><yay>' +
'<style><><>><</style><foo bar=5>end<!-- <!-- nested --> '+
'<script language="JavaScript"><>!><!_!_!-->!_-></script>')
assert doc2old == doc2
s = doc2
assert tagextract(s) == f(
['\r', ('html', {}), ('!-- Comment<a href="blah"> --', {}),
('hiya', {}), ('foo', {}),
('test', {'content': '6', 'tag': '5'}),
('is', {'broken': 'False'}), ('yay', {}), ('style', {}), '<><>><',
('/style', {}), ('foo', {'bar': '5'}), 'end',
('!-- <!-- nested --', {}), ' ',
('script', {'language': 'JavaScript'}), ('>!><!_!_!-->!_-', {}),
('/script', {})])
assert tagjoin(tagextract(s)) == f(
'\r<html><!-- Comment<a href="blah"> --><hiya><foo><test ' +
'content="6" tag="5"><is broken="False"><yay><style><><>><' +
'</style><foo bar="5">end<!-- <!-- nested --> ' +
'<script language="JavaScript"><>!><!_!_!-->!_-></script>')
s = doc5
assert tagextract(s) == f(
[('a', {'href':'foobar/ \t=', 'base':'10', 'x':'15'}),
('a', {'x':'9', 't':'20'})])
assert tagjoin(tagextract(s)) == f(
'<a base="10" href="foobar/ \t=" x="15"><a t="20" x="9">')
# -----------------------------------------------------------------
# Test _full_tag_extract()
# -----------------------------------------------------------------
for s in [doc1, doc2, doc3,
doc1.replace(f('"'), f("'")), doc2.replace(f('"'), f("'")),
doc3.replace(f('"'), f("'"))]:
L = _full_tag_extract(s)
for (i, item) in _enumerate(L):
if isinstance(item, _HTMLTag):
for key in item.attrs.keys():
assert s[item.key_pos[key][0]:item.key_pos[key][1]].lower()\
== key
if item.attrs[key] != None:
assert s[item.value_pos[key][0]:item.value_pos[key][1]] \
== item.attrs[key]
n = 1000
doc4 = f('<tag name = "5" value ="6afdjherknc4 cdk j" a="7" b=8/>')
doc4 *= n
L = tagextract(doc4)
assert len(L) == n
for i in range(n):
assert L[i] == f([('tag/',{'name':'5','value':'6afdjherknc4 cdk j',
'a':'7', 'b':'8'})])[0]
# -----------------------------------------------------------------
# Test tagextract() and tagjoin() with XML directives.
# -----------------------------------------------------------------
doc1 = f(
'a<?xml version="1.0"?>' +
'b<!DOCTYPE html' +
'PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"' +
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd" >c' +
'<html a=b><!-- Comment <><> hi! -->' +
'z<![CDATA[ some content ]]>rx' +
'<![C[DATA[ more and weirder ] ][]]>tt')
doc1join = f(
'a<?xml version="1.0"?>b<!DOCTYPE htmlPUBLIC "-//W3C//DTD ' +
'XHTML 1.0 Transitional//EN""http://www.w3.org/TR/xhtml1/DTD/' +
'xhtml1-transitional.dtd">c<html a="b"><!-- Comment <><> hi! ' +
'-->z<![CDATA[ some content ]]>rx<![C[DATA[ more and weirder ]' +
' ][]]>tt')
ans1 = f(
['a', ('?xml version="1.0"?', {}), 'b',
('!DOCTYPE html' +
'PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"' +
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"', {}),
'c', ('html', {'a':'b'}), ('!-- Comment <><> hi! --', {}), 'z',
('![CDATA[ some content ]]', {}), 'rx',
('![C[DATA[ more and weirder ] ][]]', {}), 'tt'])
assert (tagextract(f('<?xml version="1.0" encoding="utf-8" ?>')) ==
f([('?xml version="1.0" encoding="utf-8" ?', {})]))
assert (tagextract(f('<!DOCTYPE html PUBLIC etc...>')) ==
f([('!DOCTYPE html PUBLIC etc...', {})]))
assert tagextract(doc1) == ans1
assert tagjoin(tagextract(doc1)) == doc1join
# -------------------------------------------------------------------
# Unit Tests: URL Parsing
# -------------------------------------------------------------------
def _test_urlextract(str_class=str):
"""
Unit tests for L{urlextract} and L{urljoin}.
Strings are cast to the string class argument str_class.
"""
# Work around lack of nested scopes in Python <= 2.1.
def f(obj, str_class2=str_class):
return _cast_to_str(obj, str_class2)
doc1 = f('urlblah, url ( blah2, url( blah3) url(blah4) ' +
'url("blah5") hum("blah6") url)"blah7"( url ( " blah8 " );;')
doc2 = f('<html><img src="a.gif" alt="b"><a href = b.html name=' +
'"c"><td background = ./c.png width=100%><a value=/f.jpg>' +
'<img src="http://www.abc.edu/d.tga">http://www.ignore.us/' +
'\nhttp://www.nowhere.com <style>url(h.gif) ' +
'url(http://www.testdomain.com/) http://ignore.com/a' +
'</style><img alt="c" src = "a.gif"><img src=/i.png>')
doc3 = f('@import foo;\n@import bar\n@import url(\'foo2\');' +
'@import url(\'http://bar2\')\n@import\turl("foo!");' +
'@import \'foo3\'\n@import "bar3";\n@importfails;' +
'@import;@import\n;url(\'howdy!\')\n@import foo5 ;' +
'@import \'foo6\' \n@import "foo7";')
doc4 = f('@import foo handheld;\n@import \'bar\' handheld\n' +
'@import url(\'foo2\') handheld; @import url(bar2) ha\n' +
'@import url("foo3") handheld\n')
doc5 = f('<html><img src="a.gif" alt="b" style="url(\'foo\')">' +
'<a href = b.html name="c" style="@import \'bar.css\'">')
doc6 = doc2.replace(f('"'), f("'")) # Test single quotes, too.
# Test CSS.
s = doc1
L = urlextract(s, mimetype='text/css')
L2 = [x.url for x in L]
assert L2 == f([' blah3', 'blah4', 'blah5', ' blah8 '])
assert [s[x.start:x.end] == x.url for x in L].count(False) == 0
# Test CSS more.
s = doc3
L = urlextract(s, mimetype='text/css')
L2 = [x.url for x in L]
assert L2 == f(['foo', 'bar', 'foo2', 'http://bar2', 'foo!',
'foo3', 'bar3', 'howdy!', 'foo5', 'foo6', 'foo7'])
assert [s[x.start:x.end] == x.url for x in L].count(False) == 0
# Test CSS even more.
s = doc4
L = urlextract(s, mimetype='text/css')
L2 = [x.url for x in L]
assert L2 == f(['foo', 'bar', 'foo2', 'bar2', 'foo3'])
assert [s[x.start:x.end] == x.url for x in L].count(False) == 0
# Test HTML.
s = doc2
L = urlextract(s)
L2 = [x.url for x in L]
L3 = [x.url for x in urlextract(doc6)]
ans = f(['a.gif', 'b.html', './c.png',
'http://www.abc.edu/d.tga', 'h.gif',
'http://www.testdomain.com/', 'a.gif', '/i.png'])
assert L2 == L3 == ans
for i in range(len(L)):
assert s[L[i].start:L[i].end] == L[i].url
# Test HTML more.
n = 100
s2 = s * n
L3 = urlextract(s2)
L4 = [x.url for x in L3]
assert L4 == L2 * n
for i in range(len(L3)):
assert s2[L3[i].start:L3[i].end] == L3[i].url
# Test HTML w/ siteurl.
base = f('http://www.python.org/~guido/')
L = urlextract(s, base)
L2 = [x.url for x in L]
assert L2 == [urlparse.urljoin(base, x) for x in ans]
# Test urljoin().
assert urljoin(doc1, urlextract(doc1, mimetype='text/css')) == doc1
assert urljoin(doc2, urlextract(doc2)) == doc2
s = doc2
L = urlextract(s)
L[3].url = f('FOO')
L[5].url = f('BAR')
L[7].url = f('F00!')
assert urljoin(s, L) == f(
'<html><img src="a.gif" alt="b"><a href = b.html name="c">' +
'<td background = ./c.png width=100%><a value=/f.jpg>' +
'<img src="FOO">http://www.ignore.us/\nhttp://www.nowhere.com ' +
'<style>url(h.gif) url(BAR) http://ignore.com/a</style>' +
'<img alt="c" src = "a.gif"><img src=F00!>')
# Test HTML yet more.
s = doc5
L = urlextract(s)
L2 = [x.url for x in L]
assert L2 == f(['foo', 'a.gif', 'bar.css', 'b.html'])
assert [s[x.start:x.end] == x.url for x in L].count(False) == 0
def _python_has_unicode():
"""
True iff Python was compiled with unicode().
"""
try:
unicode
return True
except:
return False
# -------------------------------------------------------------------
# Unit Test Main Routine
# -------------------------------------------------------------------
def _test():
"""
Unit test main routine.
"""
print 'Unit tests:'
_test_remove_comments()
print ' _remove_comments: OK'
_test_shlex_split()
print ' _shlex_split: OK'
_test_tag_dict()
print ' _tag_dict: OK'
_test_tuple_replace()
print ' _tuple_replace: OK'
_test_tagextract()
print ' tagextract*: OK'
if _python_has_unicode():
_test_tagextract(unicode)
print ' tagextract (unicode)*: OK'
_test_urlextract()
print ' urlextract*: OK'
if _python_has_unicode():
_test_urlextract(unicode)
print ' urlextract (unicode)*: OK'
print
print '* The corresponding join method has been tested as well.'
if __name__ == '__main__':
_test()
|
mjhea0/feedzilla
|
feedzilla/util/htmldata.py
|
Python
|
bsd-3-clause
| 49,864
|
[
"CDK"
] |
18012ccd4d037c5e1e0665a74d2afb80f32843c29ca1c68950ef3d9ac1d16563
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import unittest_decorators as utx
import espressomd
import numpy as np
class LangevinThermostat(ut.TestCase):
"""Test Langevin Dynamics"""
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
system.cell_system.set_regular_decomposition(use_verlet_lists=True)
system.cell_system.skin = 0
system.periodicity = [0, 0, 0]
def setUp(self):
np.random.seed(42)
self.system.time_step = 1e-12
self.system.cell_system.skin = 0.0
self.system.integrator.set_vv()
def tearDown(self):
self.system.part.clear()
self.system.thermostat.turn_off()
def check_rng(self, per_particle_gamma=False):
"""Test for RNG consistency."""
kT = 1.1
gamma = 3.5
def reset_particle():
self.system.part.clear()
p = system.part.add(pos=[0, 0, 0])
if espressomd.has_features("ROTATION"):
p.rotation = [1, 1, 1]
if per_particle_gamma:
assert espressomd.has_features("THERMOSTAT_PER_PARTICLE")
if espressomd.has_features("PARTICLE_ANISOTROPY"):
p.gamma = 3 * [gamma / 2]
else:
p.gamma = gamma / 2
if espressomd.has_features("ROTATION"):
p.gamma_rot = p.gamma * 1.5
return p
system = self.system
system.time_step = 0.01
system.thermostat.set_langevin(kT=kT, gamma=gamma, seed=41)
system.integrator.set_vv()
# run(0) does not increase the philox counter and should give the same
# force
p = reset_particle()
system.integrator.run(0, recalc_forces=True)
force0 = np.copy(p.f)
if espressomd.has_features("ROTATION"):
torque0 = np.copy(p.torque_lab)
system.integrator.run(0, recalc_forces=True)
force1 = np.copy(p.f)
np.testing.assert_almost_equal(force0, force1)
if espressomd.has_features("ROTATION"):
torque1 = np.copy(p.torque_lab)
np.testing.assert_almost_equal(torque0, torque1)
# run(1) should give a different force
p = reset_particle()
system.integrator.run(1)
force2 = np.copy(p.f)
self.assertTrue(np.all(np.not_equal(force1, force2)))
if espressomd.has_features("ROTATION"):
torque2 = np.copy(p.torque_lab)
self.assertTrue(np.all(np.not_equal(torque1, torque2)))
# Different seed should give a different force with same counter state
# force2: langevin.rng_counter() = 1, langevin.rng_seed() = 41
# force3: langevin.rng_counter() = 1, langevin.rng_seed() = 42
p = reset_particle()
system.integrator.run(0, recalc_forces=True)
force2 = np.copy(p.f)
if espressomd.has_features("ROTATION"):
torque2 = np.copy(p.torque_lab)
system.thermostat.set_langevin(kT=kT, gamma=gamma, seed=42)
system.integrator.run(0, recalc_forces=True)
force3 = np.copy(p.f)
self.assertTrue(np.all(np.not_equal(force2, force3)))
if espressomd.has_features("ROTATION"):
torque3 = np.copy(p.torque_lab)
self.assertTrue(np.all(np.not_equal(torque2, torque3)))
# Same seed should not give the same force with different counter state
p = reset_particle()
system.thermostat.set_langevin(kT=kT, gamma=gamma, seed=42)
system.integrator.run(1)
force4 = np.copy(p.f)
self.assertTrue(np.all(np.not_equal(force3, force4)))
if espressomd.has_features("ROTATION"):
torque4 = np.copy(p.torque_lab)
self.assertTrue(np.all(np.not_equal(torque3, torque4)))
# Seed offset should not give the same force with a lag
# force4: langevin.rng_counter() = 2, langevin.rng_seed() = 42
# force5: langevin.rng_counter() = 3, langevin.rng_seed() = 41
reset_particle()
system.thermostat.set_langevin(kT=kT, gamma=gamma, seed=41)
system.integrator.run(1)
p = reset_particle()
system.integrator.run(0, recalc_forces=True)
force5 = np.copy(p.f)
self.assertTrue(np.all(np.not_equal(force4, force5)))
if espressomd.has_features("ROTATION"):
torque5 = np.copy(p.torque_lab)
self.assertTrue(np.all(np.not_equal(torque4, torque5)))
def test_01__rng(self):
"""Test for RNG consistency."""
# No seed should throw exception
with self.assertRaises(ValueError):
self.system.thermostat.set_langevin(kT=1, gamma=2)
self.check_rng()
@utx.skipIfMissingFeatures("THERMOSTAT_PER_PARTICLE")
def test_01__rng_per_particle(self):
"""Test for RNG consistency."""
self.check_rng(True)
def test_02__friction_trans(self):
"""Tests the translational friction-only part of the thermostat."""
system = self.system
# Translation
gamma_t_i = 2
gamma_t_a = np.array((0.5, 2, 1.5))
v0 = np.array((5., 5., 5.))
system.time_step = 0.0005
p = system.part.add(pos=(0, 0, 0), v=v0)
if espressomd.has_features("MASS"):
p.mass = 3
if espressomd.has_features("PARTICLE_ANISOTROPY"):
system.thermostat.set_langevin(kT=0, gamma=gamma_t_a, seed=41)
else:
system.thermostat.set_langevin(kT=0, gamma=gamma_t_i, seed=41)
system.time = 0
for _ in range(100):
system.integrator.run(10)
if espressomd.has_features("PARTICLE_ANISOTROPY"):
np.testing.assert_allclose(
np.copy(p.v),
v0 * np.exp(-gamma_t_a / p.mass * system.time),
atol=4E-4)
else:
np.testing.assert_allclose(
np.copy(p.v),
v0 * np.exp(-gamma_t_i / p.mass * system.time),
atol=45E-4)
@utx.skipIfMissingFeatures("ROTATION")
def test_03__friction_rot(self):
"""Tests the rotational friction-only part of the thermostat."""
system = self.system
# Translation
if espressomd.has_features("PARTICLE_ANISOTROPY"):
gamma_t = [0.5, 2, 1.5]
gamma_r = np.array((1.5, 0.7, 1.2))
else:
gamma_t = 2
gamma_r = 3
o0 = np.array((5., 5., 5.))
system.time = 0
system.time_step = 0.0001
p = system.part.add(pos=(0, 0, 0), omega_body=o0, rotation=(1, 1, 1))
if espressomd.has_features("ROTATIONAL_INERTIA"):
rinertia = np.array((2, 2, 2))
p.rinertia = rinertia
else:
rinertia = np.array((1, 1, 1))
system.thermostat.set_langevin(
kT=0, gamma=gamma_t, gamma_rotation=gamma_r, seed=41)
system.time = 0
for _ in range(100):
system.integrator.run(10)
ref_omega_body = o0 * np.exp(-gamma_r / rinertia * system.time)
np.testing.assert_allclose(
np.copy(p.omega_body), ref_omega_body, atol=5E-4)
@utx.skipIfMissingFeatures("VIRTUAL_SITES")
def test_07__virtual(self):
system = self.system
system.time_step = 0.01
virtual = system.part.add(pos=[0, 0, 0], virtual=True, v=[1, 0, 0])
physical = system.part.add(pos=[0, 0, 0], virtual=False, v=[1, 0, 0])
system.thermostat.set_langevin(
kT=0, gamma=1, gamma_rotation=1., act_on_virtual=False, seed=41)
system.integrator.run(0)
np.testing.assert_almost_equal(np.copy(virtual.f), [0, 0, 0])
np.testing.assert_almost_equal(np.copy(physical.f), [-1, 0, 0])
system.thermostat.set_langevin(
kT=0, gamma=1, gamma_rotation=1., act_on_virtual=True, seed=41)
system.integrator.run(0)
np.testing.assert_almost_equal(np.copy(virtual.f), [-1, 0, 0])
np.testing.assert_almost_equal(np.copy(physical.f), [-1, 0, 0])
if __name__ == "__main__":
ut.main()
|
espressomd/espresso
|
testsuite/python/langevin_thermostat.py
|
Python
|
gpl-3.0
| 8,806
|
[
"ESPResSo"
] |
fcdf1cc6edb1d6d3a0e3aa7c19f23b323517c4b568537779258f4e90acf9ed32
|
#!/usr/bin/env python
# filename: alignment.py
#
# Copyright (c) 2015 Bryan Briney
# License: The MIT license (http://opensource.org/licenses/MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import print_function
import logging
import os
from StringIO import StringIO
import subprocess as sp
import tempfile
import traceback
from skbio.alignment import StripedSmithWaterman
import nwalign as nw
from Bio import AlignIO, pairwise2
from Bio.SeqRecord import SeqRecord
from abtools import log
from abtools.pipeline import list_files
from abtools.sequence import Sequence
# -------------------------------------
#
# MULTIPLE SEQUENCE ALIGNMENT
#
# -------------------------------------
def mafft(sequences=None, alignment_file=None, fasta=None, fmt='fasta', threads=-1, as_file=False,
print_stdout=False, print_stderr=False):
'''
Performs multiple sequence alignment with MAFFT.
MAFFT is a required dependency.
Args:
sequences (list): Sequences to be aligned. ``sequences`` can be one of four things:
1. a FASTA-formatted string
2. a list of BioPython ``SeqRecord`` objects
3. a list of AbTools ``Sequence`` objects
4. a list of lists/tuples, of the format ``[sequence_id, sequence]``
alignment_file (str): Path for the output alignment file. If not supplied,
a name will be generated using ``tempfile.NamedTemporaryFile()``.
fasta (str): Path to a FASTA-formatted file of sequences. Used as an
alternative to ``sequences`` when suppling a FASTA file.
fmt (str): Format of the alignment. Options are 'fasta' and 'clustal'. Default
is 'fasta'.
threads (int): Number of threads for MAFFT to use. Default is ``-1``, which
results in MAFFT using ``multiprocessing.cpu_count()`` threads.
as_file (bool): If ``True``, returns a path to the alignment file. If ``False``,
returns a BioPython ``MultipleSeqAlignment`` object (obtained by calling
``Bio.AlignIO.read()`` on the alignment file).
Returns:
Returns a BioPython ``MultipleSeqAlignment`` object, unless ``as_file`` is ``True``,
in which case the path to the alignment file is returned.
'''
if sequences:
fasta_string = _get_fasta_string(sequences)
fasta_file = tempfile.NamedTemporaryFile(delete=False)
fasta_file.write(fasta_string)
ffile = fasta_file.name
fasta_file.close()
elif fasta:
ffile = fasta
if alignment_file is None:
alignment_file = tempfile.NamedTemporaryFile(delete=False).name
aln_format = ''
if fmt == 'clustal':
aln_format = '--clustalout '
mafft_cline = 'mafft --thread {} {}{} > {}'.format(threads, aln_format, ffile, alignment_file)
mafft = sp.Popen(str(mafft_cline),
stdout=sp.PIPE,
stderr=sp.PIPE,
universal_newlines=True,
shell=True)
stdout, stderr = mafft.communicate()
if print_stdout:
print(mafft_cline)
print(stdout)
if print_stderr:
print(stderr)
os.unlink(ffile)
if as_file:
return alignment_file
if os.stat(alignment_file).st_size == 0:
return None
aln = AlignIO.read(open(alignment_file), fmt)
os.unlink(alignment_file)
return aln
def muscle(sequences=None, alignment_file=None, fasta=None,
fmt='fasta', as_file=False, maxiters=None, diags=False,
gap_open=None, gap_extend=None):
'''
Performs multiple sequence alignment with MUSCLE.
MUSCLE is a required dependency.
Args:
sequences (list): Sequences to be aligned. ``sequences`` can be one of four things:
1. a FASTA-formatted string
2. a list of BioPython ``SeqRecord`` objects
3. a list of AbTools ``Sequence`` objects
4. a list of lists/tuples, of the format ``[sequence_id, sequence]``
alignment_file (str): Path for the output alignment file. If not supplied,
a name will be generated using ``tempfile.NamedTemporaryFile()``.
fasta (str): Path to a FASTA-formatted file of sequences. Used as an
alternative to ``sequences`` when suppling a FASTA file.
fmt (str): Format of the alignment. Options are 'fasta' and 'clustal'. Default
is 'fasta'.
threads (int): Number of threads for MAFFT to use. Default is ``-1``, which
results in MAFFT using ``multiprocessing.cpu_count()`` threads.
as_file (bool): If ``True``, returns a path to the alignment file. If ``False``,
returns a BioPython ``MultipleSeqAlignment`` object (obtained by calling
``Bio.AlignIO.read()`` on the alignment file).
maxiters (int): Passed directly to MUSCLE using the ``-maxiters`` flag.
diags (int): Passed directly to MUSCLE using the ``-diags`` flag.
gap_open (float): Passed directly to MUSCLE using the ``-gapopen`` flag. Ignored
if ``gap_extend`` is not also provided.
gap_extend (float): Passed directly to MUSCLE using the ``-gapextend`` flag. Ignored
if ``gap_open`` is not also provided.
Returns:
Returns a BioPython ``MultipleSeqAlignment`` object, unless ``as_file`` is ``True``,
in which case the path to the alignment file is returned.
'''
if sequences:
fasta_string = _get_fasta_string(sequences)
elif fasta:
fasta_string = open(fasta, 'r').read()
aln_format = ''
if fmt == 'clustal':
aln_format = ' -clwstrict'
muscle_cline = 'muscle{} '.format(aln_format)
if maxiters is not None:
muscle_cline += ' -maxiters {}'.format(maxiters)
if diags:
muscle_cline += ' -diags'
if all([gap_open is not None, gap_extend is not None]):
muscle_cline += ' -gapopen {} -gapextend {}'.format(gap_open, gap_extend)
muscle = sp.Popen(str(muscle_cline),
stdin=sp.PIPE,
stdout=sp.PIPE,
stderr=sp.PIPE,
universal_newlines=True,
shell=True)
alignment = muscle.communicate(input=fasta_string)[0]
aln = AlignIO.read(StringIO(alignment), fmt)
if as_file:
if not alignment_file:
alignment_file = tempfile.NamedTemporaryFile().name
AlignIO.write(aln, alignment_file, fmt)
return alignment_file
return aln
def consensus(aln, name=None, threshold=0.51, ambiguous='N'):
summary_align = AlignInfo.SummaryInfo(aln)
consensus = summary_align.gap_consensus(threshold=threshold, ambiguous=ambiguous)
if name is None:
name = uuid.uuid4()
consensus_string = str(consensus).replace('-', '')
return (name, consensus_string.upper())
def _get_fasta_string(sequences):
if type(sequences) == str:
return sequences
elif all([type(s) == Sequence for s in sequences]):
return '\n'.join([s.fasta for s in sequences])
else:
return '\n'.join([Sequence(s).fasta for s in sequences])
# elif type(sequences[0]) == SeqRecord:
# return '\n'.join(['>{}\n{}'.format(seq.id, str(seq.seq).upper()) for seq in sequences])
# # elif type(sequences[0]) == Sequence:
# # return '\n'.join(['>{}\n{}'.format(seq.id, seq.seq) for seq in sequences])
# elif type(sequences[0]) in [list, tuple]:
# return '\n'.join(['>{}\n{}'.format(seq[0], seq[1]) for seq in sequences])
# ----------------------------
#
# PAIRWISE ALIGNMENT
#
# ----------------------------
def local_alignment(query, target=None, targets=None, match=3, mismatch=-2,
gap_open=-5, gap_extend=-2, matrix=None, aa=False, gap_open_penalty=None, gap_extend_penalty=None):
'''
Striped Smith-Waterman local pairwise alignment.
Args:
query: Query sequence. ``query`` can be one of four things:
1. a nucleotide or amino acid sequence, as a string
2. a Biopython ``SeqRecord`` object
3. an AbTools ``Sequence`` object
4. a list/tuple of the format ``[seq_id, sequence]``
target: A single target sequence. ``target`` can be anything that
``query`` accepts.
targets (list): A list of target sequences, to be proccssed iteratively.
Each element in the ``targets`` list can be anything accepted by
``query``.
match (int): Match score. Should be a positive integer. Default is 3.
mismatch (int): Mismatch score. Should be a negative integer. Default is -2.
gap_open (int): Penalty for opening gaps. Should be a negative integer.
Default is -5.
gap_extend (int): Penalty for extending gaps. Should be a negative
integer. Default is -2.
matrix (str, dict): Alignment scoring matrix. Two options for passing the
alignment matrix:
- The name of a built-in matrix. Current options are ``blosum62`` and ``pam250``.
- A nested dictionary, giving an alignment score for each residue pair. Should be formatted
such that retrieving the alignment score for A and G is accomplished by::
matrix['A']['G']
aa (bool): Must be set to ``True`` if aligning amino acid sequences. Default
is ``False``.
Returns:
If a single target sequence is provided (via ``target``), a single ``SSWAlignment``
object will be returned. If multiple target sequences are supplied (via ``targets``),
a list of ``SSWAlignment`` objects will be returned.
'''
if aa and not matrix:
err = 'ERROR: You must supply a scoring matrix for amino acid alignments'
raise RuntimeError(err)
if not target and not targets:
err = 'ERROR: You must supply a target sequence (or sequences).'
raise RuntimeError(err)
if target:
targets = [target, ]
# to maintain backward compatibility with earlier AbTools API
if gap_open_penalty is not None:
gap_open = -1 * gap_open_penalty
if gap_extend_penalty is not None:
gap_extend = -1 * gap_extend_penalty
alignments = []
for t in targets:
try:
alignment = SSWAlignment(query=query,
target=t,
match=match,
mismatch=mismatch,
matrix=matrix,
gap_open=-1 * gap_open,
gap_extend=-1 * gap_extend,
aa=aa)
alignments.append(alignment)
except IndexError:
continue
if len(alignments) == 1:
return alignments[0]
return alignments
def local_alignment_biopython(query, target=None, targets=None, match=3, mismatch=-2, matrix=None,
gap_open=-5, gap_extend=-2, aa=False):
if not target and not targets:
err = 'ERROR: You must supply a target sequence (or sequences).'
raise RuntimeError(err)
if target:
targets = [target, ]
alignments = []
for t in targets:
try:
alignment = alignment = BiopythonAlignment(query=query,
target=t,
match=match,
mismatch=mismatch,
matrix=matrix,
gap_open=gap_open,
gap_extend=gap_extend,
aa=aa)
alignments.append(alignment)
except IndexError:
continue
if len(alignments) == 1:
return alignments[0]
return alignments
def global_alignment(query, target=None, targets=None, match=3, mismatch=-2, gap_open=-5, gap_extend=-2,
score_match=None, score_mismatch=None, score_gap_open=None,
score_gap_extend=None, matrix=None, aa=False):
'''
Needleman-Wunch global pairwise alignment.
With ``global_alignment``, you can score an alignment using different
paramaters than were used to compute the alignment. This allows you to
compute pure identity scores (match=1, mismatch=0) on pairs of sequences
for which those alignment parameters would be unsuitable. For example::
seq1 = 'ATGCAGC'
seq2 = 'ATCAAGC'
using identity scoring params (match=1, all penalties are 0) for both alignment
and scoring produces the following alignment::
ATGCA-GC
|| || ||
AT-CAAGC
with an alignment score of 6 and an alignment length of 8 (identity = 75%). But
what if we want to calculate the identity of a gapless alignment? Using::
global_alignment(seq1, seq2,
gap_open=20,
score_match=1,
score_mismatch=0,
score_gap_open=10,
score_gap_extend=1)
we get the following alignment::
ATGCAGC
|| |||
ATCAAGC
which has an score of 5 and an alignment length of 7 (identity = 71%). Obviously,
this is an overly simple example (it would be much easier to force gapless alignment
by just iterating over each sequence and counting the matches), but there are several
real-life cases in which different alignment and scoring paramaters are desirable.
Args:
query: Query sequence. ``query`` can be one of four things:
1. a nucleotide or amino acid sequence, as a string
2. a Biopython ``SeqRecord`` object
3. an AbTools ``Sequence`` object
4. a list/tuple of the format ``[seq_id, sequence]``
target: A single target sequence. ``target`` can be anything that
``query`` accepts.
targets (list): A list of target sequences, to be proccssed iteratively.
Each element in the ``targets`` list can be anything accepted by
``query``.
match (int): Match score for alignment. Should be a positive integer. Default is 3.
mismatch (int): Mismatch score for alignment. Should be a negative integer. Default is -2.
gap_open (int): Penalty for opening gaps in alignment. Should be a negative integer.
Default is -5.
gap_extend (int): Penalty for extending gaps in alignment. Should be a negative
integer. Default is -2.
score_match (int): Match score for scoring the alignment. Should be a positive integer.
Default is to use the score from ``match`` or ``matrix``, whichever is provided.
score_mismatch (int): Mismatch score for scoring the alignment. Should be a negative
integer. Default is to use the score from ``mismatch`` or ``matrix``, whichever
is provided.
score_gap_open (int): Gap open penalty for scoring the alignment. Should be a negative
integer. Default is to use ``gap_open``.
score_gap_extend (int): Gap extend penalty for scoring the alignment. Should be a negative
integer. Default is to use ``gap_extend``.
matrix (str, dict): Alignment scoring matrix. Two options for passing the alignment matrix:
- The name of a built-in matrix. Current options are ``blosum62`` and ``pam250``.
- A nested dictionary, giving an alignment score for each residue pair. Should be
formatted such that retrieving the alignment score for A and G is accomplished by::
matrix['A']['G']
aa (bool): Must be set to ``True`` if aligning amino acid sequences. Default
is ``False``.
Returns:
If a single target sequence is provided (via ``target``), a single ``NWAlignment``
object will be returned. If multiple target sequences are supplied (via ``targets``),
a list of ``NWAlignment`` objects will be returned.
'''
if not target and not targets:
err = 'ERROR: You must supply a target sequence (or sequences).'
raise RuntimeError(err)
if target:
targets = [target, ]
if type(targets) not in (list, tuple):
err = 'ERROR: ::targets:: requires an iterable (list or tuple).'
err += 'For a single sequence, use ::target::'
raise RuntimeError(err)
alignments = []
for t in targets:
alignment = NWAlignment(query=query,
target=t,
match=match,
mismatch=mismatch,
gap_open=gap_open,
gap_extend=gap_extend,
score_match=score_match,
score_mismatch=score_mismatch,
score_gap_open=score_gap_open,
score_gap_extend=score_gap_extend,
matrix=matrix,
aa=aa)
alignments.append(alignment)
if target is not None:
return alignments[0]
return alignments
class BaseAlignment(object):
"""
Base class for local and global pairwise alignments.
.. note::
All comparisons between ``BaseAlignments``
are done on the ``score`` attribute (which must be implemented
by any classes that subclass ``BaseAlignment``). This was done
so that sorting alignments like so::
alignments = [list of alignments]
alignments.sort(reverse=True)
results in a sorted list of alignments from the highest alignment
score to the lowest.
Attributes:
query (Sequence): The input query sequence, as an AbTools
``Sequence`` object.
target (Sequence): The input target sequence, as an AbTools
``Sequence`` object.
target_id (str): ID of the target sequence.
raw_query: The raw query, before conversion to a ``Sequence``.
raw_target: The raw target, before conversion to a ``Sequence``.
"""
def __init__(self, query, target, matrix,
match, mismatch, gap_open, gap_extend, aa):
super(BaseAlignment, self).__init__()
self.query = self._process_sequence(query, aa=aa)
self.target = self._process_sequence(target, aa=aa)
self.raw_query = query
self.raw_target = target
self._matrix = matrix
self._match = int(match)
self._mismatch = int(mismatch)
self._gap_open = int(gap_open)
self._gap_extend = int(gap_extend)
self._aa = bool(aa)
def __repr__(self):
if len(self.aligned_query) > 20:
qstring = '{}...{}'.format(self.aligned_query[:10], self.aligned_query[-10:])
mstring = '{}...{}'.format(self.alignment_midline[:10], self.alignment_midline[-10:])
tstring = '{}...{}'.format(self.aligned_target[:10], self.aligned_target[-10:])
else:
qstring = self.aligned_query
mstring = self.alignment_midline
tstring = self.aligned_target
return_string = '\n\n'
return_string += 'Pairwise Alignment\n'
return_string += '------------------\n\n'
return_string += 'query: {}\n'.format(qstring)
return_string += ' {}\n'.format(mstring)
return_string += 'target: {}\n\n'.format(tstring)
return_string += 'score: {}\n'.format(str(self.score))
return_string += 'type: {}\n'.format(self.alignment_type)
return_string += 'length: {}'.format(str(len(self.aligned_query)))
print(return_string)
return ''
def __str__(self):
return_string = ''
return_string += '{}\n'.format(self.aligned_query)
return_string += '{}\n'.format(self.alignment_midline)
return_string += '{}\n'.format(self.aligned_target)
return return_string
def __len__(self):
return len(self.aligned_query)
def __eq__(self, other):
if not hasattr(other, 'score'):
if type(other) in [int, float]:
return self.score == other
return False
return self.score == other.score
def __lt__(self, other):
if not hasattr(other, 'score'):
if type(other) in [int, float]:
return self.score == other
return False
return self.score < other.score
def __le__(self, other):
if not hasattr(other, 'score'):
if type(other) in [int, float]:
return self.score == other
return False
return self.score <= other.score
def __gt__(self, other):
if not hasattr(other, 'score'):
if type(other) in [int, float]:
return self.score == other
return False
return self.score > other.score
def __ge__(self, other):
if not hasattr(other, 'score'):
if type(other) in [int, float]:
return self.score == other
return False
return self.score >= other.score
@property
def target_id(self):
return self._target_id
@target_id.setter
def target_id(self, target_id):
self._target_id = target_id
@staticmethod
def _process_sequence(sequence, aa):
if type(sequence) == Sequence:
return sequence
return Sequence(sequence)
def _alignment_midline(self):
midline = ''
for q, t in zip(self.aligned_query, self.aligned_target):
if q == t:
midline += '|'
else:
midline += ' '
return midline
class SSWAlignment(BaseAlignment):
"""
Structure for performing and analyzing a Smith-Waterman local alignment.
.. note:
Exposed attributes and methods are the same as ``NWAlignment``, so
local and global alignmnts can be handled in the same way. In fact,
since comparisons are made based on score, local and global alignments
can be directly compared with constructions like::
local_aln == global_aln
local_aln > global_aln
alignments = sorted([global_aln, local_aln])
Attributes:
alignment_type (str): Is 'local' for all ``SSWAlignment`` objects.
aligned_query (str): The aligned query sequence (including gaps).
aligned_target (str): The aligned target sequence (including gaps).
alignment_midline (str): Midline for the aligned sequences, with ``|`` indicating
matches and a gap indicating mismatches::
print(aln.aligned_query)
print(aln.alignment_midline)
print(aln.aligned_target)
# ATGC
# || |
# ATCC
score (int): Alignment score.
query_begin (int): Position in the raw query sequence at which
the optimal alignment begins.
query_end (int): Position in the raw query sequence at which the
optimal alignment ends.
target_begin (int): Position in the raw target sequence at which
the optimal alignment begins.
target_end (int): Position in the raw target sequence at which the
optimal alignment ends.
"""
def __init__(self, query, target, match=3, mismatch=-2, matrix=None,
gap_open=5, gap_extend=2, aa=False):
super(SSWAlignment, self).__init__(query, target, matrix,
match, mismatch, gap_open, gap_extend, aa)
self.alignment_type = 'local'
self._alignment = self._align()
self.aligned_query = self._alignment.aligned_query_sequence
self.aligned_target = self._alignment.aligned_target_sequence
self.alignment_midline = self._alignment_midline()
self.score = self._alignment.optimal_alignment_score
self.query_begin = self._alignment.query_begin
self.query_end = self._alignment.query_end
self.target_begin = self._alignment.target_begin
self.target_end = self._alignment.target_end_optimal
def _align(self):
aligner = StripedSmithWaterman(self.query.sequence,
match_score=self._match,
mismatch_score=self._mismatch,
gap_open_penalty=self._gap_open,
gap_extend_penalty=self._gap_extend,
substitution_matrix=self._matrix,
protein=self._aa)
return aligner(self.target.sequence)
class BiopythonAlignment(BaseAlignment):
def __init__(self, query, target, match=3, mismatch=-2, matrix=None,
gap_open=5, gap_extend=2, aa=False):
super(BiopythonAlignment, self).__init__(query, target, matrix,
match, mismatch, gap_open, gap_extend, aa)
self.alignment_type = 'local'
self._aln = self._align()
aln_query, aln_target, score, begin, end = self._aln
self.aligned_query = aln_query[begin:end]
self.aligned_target = aln_target[begin:end]
self.alignment_midline = self._alignment_midline()
self.score = score
self.query_begin = self._get_begin_pos(aln_query, begin)
self.query_end = self._get_end_pos(aln_query, end)
self.target_begin = self._get_begin_pos(aln_target, begin)
self.target_end = self._get_end_pos(aln_target, end)
def _align(self):
aln = pairwise2.align.localms(self.query.sequence,
self.target.sequence,
self._match,
self._mismatch,
self._gap_open,
self._gap_extend)
return aln[0]
def _get_begin_pos(self, seq, begin):
dashes = seq.count('-', 0, begin)
return begin - dashes
def _get_end_pos(self, seq, end):
return len(seq[:end].replace('-', ''))
class NWAlignment(BaseAlignment):
"""
Structure for performing and analyzing a Needleman-Wunch global alignment.
.. note:
Exposed attributes and methods are the same as ``SSWAlignment``, so
local and global alignmnts can be handled in the same way. In fact,
since comparisons are made based on score, local and global alignments
can be directly compared with constructions like::
local_aln == global_aln
local_aln > global_aln
alignments = sorted([global_aln, local_aln])
Attributes:
alignment_type (str): Is 'global' for all ``NWAlignment`` objects.
aligned_query (str): The aligned query sequence (including gaps).
aligned_target (str): The aligned target sequence (including gaps).
alignment_midline (str): Midline for the aligned sequences, with
``|`` indicating matches and a gap indicating mismatches::
print(aln.aligned_query)
print(aln.alignment_midline)
print(aln.aligned_target)
# ATGC
# || |
# ATCC
score (int): Alignment score.
query_begin (int): Position in the raw query sequence at which
the optimal alignment begins.
query_end (int): Position in the raw query sequence at which the
optimal alignment ends.
target_begin (int): Position in the raw target sequence at which
the optimal alignment begins.
target_end (int): Position in the raw target sequence at which the
optimal alignment ends.
"""
def __init__(self, query, target, match=3, mismatch=-2,
gap_open=-5, gap_extend=-2,
score_match=None, score_mismatch=None,
score_gap_open=None, score_gap_extend=None,
matrix=None, aa=False):
super(NWAlignment, self).__init__(query, target, matrix,
match, mismatch, gap_open, gap_extend, aa)
self.alignment_type = 'global'
self._score_match = int(score_match) if score_match is not None else None
self._score_mismatch = int(score_mismatch) if score_mismatch is not None else None
self._score_gap_open = int(score_gap_open) if score_gap_open is not None else None
self._score_gap_extend = int(score_gap_extend) if score_gap_extend is not None else None
self._matrix = matrix
self._alignment = self._align()
self.aligned_query = self._alignment[0]
self.aligned_target = self._alignment[1]
self.alignment_midline = self._alignment_midline()
self.score = self._score_alignment()
def _get_matrix_file(self, match=None, mismatch=None, matrix=None):
matrix_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'utils/matrices')
builtins = ['blosum62', 'match3mismatch2', 'match1mismatch0']
if self._matrix is not None:
matrix_name = self._matrix
else:
matrix_name = 'match{}mismatch{}'.format(abs(match), abs(mismatch))
if matrix_name.lower() in builtins:
return os.path.join(matrix_dir, matrix_name)
builtin_names = [os.path.basename(f) for f in list_files(matrix_dir)]
if self._matrix is not None:
if self._matrix.lower() in builtin_names:
return os.path.join(matrix_dir, self._matrix.lower())
else:
err = 'The supplied matrix name ({}) does not exist. '.format(matrix)
err += 'Built-in matrices are: {}'.format(', '.join(builtins))
raise RuntimeError(err)
else:
self._build_matrix_from_params(match, mismatch, os.path.join(matrix_dir, matrix_name))
return os.path.join(matrix_dir, matrix_name)
def _align(self):
matrix = self._get_matrix_file(match=self._match,
mismatch=self._mismatch,
matrix=self._matrix)
aln = nw.global_align(self.query.sequence,
self.target.sequence,
gap_open=self._gap_open,
gap_extend=self._gap_extend,
matrix=matrix)
return aln
def _score_alignment(self):
if all([self._score_match is not None, self._score_mismatch is not None]):
matrix = self._get_matrix_file(match=self._score_match,
mismatch=self._score_mismatch)
elif self._matrix is not None:
matrix = self._get_matrix_file(matrix=self._matrix)
else:
matrix = self._get_matrix_file(match=self._match,
mismatch=self._mismatch)
gap_open = self._score_gap_open if self._score_gap_open is not None else self._gap_open
gap_extend = self._score_gap_extend if self._score_gap_extend is not None else self._gap_extend
aln = nw.score_alignment(self.aligned_query,
self.aligned_target,
gap_open=gap_open,
gap_extend=gap_extend,
matrix=matrix)
return aln
@staticmethod
def _build_matrix_from_params(match, mismatch, matrix_file):
mstring = ' {}'.format(match) if len(str(match)) == 1 else str(match)
mmstring = ' {}'.format(mismatch) if len(str(mismatch)) == 1 else str(mismatch)
residues = ['A', 'C', 'D', 'E', 'F',
'G', 'H', 'I', 'K', 'L',
'M', 'N', 'P', 'Q', 'R',
'S', 'T', 'V', 'W', 'Y', '*']
header = ' ' + ' '.join(residues)
matlist = [header, ]
for r1 in residues:
resline = [r1, ]
for r2 in residues:
resline.append(mstring if r1 == r2 else mmstring)
matlist.append(' '.join(resline))
open(matrix_file, 'w').write('\n'.join(matlist))
return matrix_file
@staticmethod
def _get_builtin_matrix(matrix_name):
matrix_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'matrices')
matrices = [os.path.basename(f) for f in list_files(matrix_dir)]
if matrix_name.lower() not in matrices:
err = 'The maxtrix name you provided ({}) is not built-in.'.format(matrix_name)
err += 'Built in matrices are: {}'.format(', '.join(matrices))
raise RuntimeError()
return os.path.join(matrix_dir, matrix_name.lower())
|
briney/abtools
|
abtools/alignment.py
|
Python
|
mit
| 33,992
|
[
"Biopython"
] |
2d9df1ac615d7a322571bb0ccdec4f22fe26a5ffe421b65dcb8f80043a278730
|
#-------------------------------------------------------------------------------
# Copyright (c) 2011, Kafti team
#
# Released under the MIT license. See the LICENSE file for details.
#-------------------------------------------------------------------------------
"""
Visitor for collecting nasl scripts family statistic.
Module can categorize nasl script into directories based on its family
"""
import os
import shutil
import logging
from collections import defaultdict
from pynasl.naslAST import BaseNodeVisitor
from pynasl.visitors.statistic.statistic import write_func_dict_to_csv
logger = logging.getLogger("scripts_family")
logger.setLevel(logging.INFO)
class FamilyGetter(BaseNodeVisitor):
def __init__(self):
self.variables = {}
self.family_name = None
def visit_FuncCall(self, node):
self.generic_visit(node)
if node.name == "script_family":
self.family_name = node.args_list.args[0].value.value
if self.family_name in self.variables:
self.family_name = self.variables[self.family_name]
def visit_Affectation(self, node):
self.generic_visit(node)
str_lvalue = node.lvalue.value
str_expr = node.expr.value
if node.operation == "=":
self.variables[str_lvalue] = str_expr
def _log_family(plugins_dir, categorize_path=None):
"""logger script_family
@param plugins_dir: string with path to directory with nasl scripts.
@param categorize_path: string with path to directory
to which nasl scripts will be categorized.
Default value - None, that means not categorize nasl scripts.
"""
from pynasl.naslparse import naslparser
scripts_family = defaultdict(list)
strange_family = []
if categorize_path:
if not os.path.exists(categorize_path):
os.makedirs(categorize_path)
else:
shutil.rmtree(categorize_path)
logger.info('Files processing started')
total_files = 0
for root, dirs, files in os.walk(plugins_dir):
for name in files:
if not name.endswith('.nasl'):
continue
family = FamilyGetter()
full_path = os.path.join(root, name)
family.visit(naslparser(full_path, True))
if not family.family_name:
strange_family.append(name)
else:
family_name = family.family_name[1:-1].replace(':','')
scripts_family[family_name].append(name)
if categorize_path:
dst = os.path.join(categorize_path, family_name)
try:
if not os.path.exists(dst):
os.makedirs(dst)
shutil.copy(full_path, dst)
except OSError, why:
logger.error(str(why))
total_files += 1
if total_files % 1000 == 0:
logger.info("Processed %s files" % total_files)
logger.info('Files processing finished')
write_func_dict_to_csv(scripts_family, "scripts_family.csv")
logger.info("*.nasl files contain %s different script_family" % len(scripts_family))
logger.info("Detailed statistic is in scripts_family.txt")
logger.info("%s scripts has strange family" % len(strange_family))
logger.info('%s' % strange_family)
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(name)-20s %(message)s',
datefmt='%H:%M:%S')
_log_family(os.environ['KAFTI_NASLSCRIPTS_PATH'], r'd:\temp')
|
kafti/pynasl
|
pynasl/visitors/statistic/scripts_family.py
|
Python
|
mit
| 3,741
|
[
"VisIt"
] |
aef8d56a43ac473870aa2bef358fbb4df24e7eedf46f40d510492cdfba47b177
|
import numpy as np
import ase
from pyemto.utilities import distort, rotation_matrix
from pyemto.examples.emto_input_generator import *
from pymatgen import Lattice
from ase.visualize import view
from ase.build import cut, make_supercell
import sys
find_primitive = False
make_supercell = None
coords_are_cartesian = True
nkx = 13
nky = 13
nkz = 13
# Primitive fcc
prims0 = np.array([
[0.5,0.5,0],
[0.5,0,0.5],
[0,0.5,0.5]])
basis0 = np.array([
[0.0,0.0,0.0]
])
species = [['Co','Co','Cr','Cr','Fe','Fe','Mn','Mn','Ni','Ni']]
splts = [[1,-1]*5]
ncpu = 1
folder = os.getcwd()
emtopath = folder
latpath = emtopath
slurm_options=['#SBATCH -A snic2018-3-205',
'#SBATCH -n {0}'.format(ncpu),
#'#SBATCH --mem-per-cpu=4000',
#'module load emto/5.8.1/build16-QNA'
]
deltas = np.linspace(0,0.05,6)
# We need to use a non-zero value for the first delta to break the symmetry of the structure.
deltas[0] = 0.001
# Only two distortions for cubic (third one is bulk modulus EOS fit)
distortions = ['Cprime', 'C44']
# Calculate equilibrium volume
for i, distortion in enumerate(distortions):
if i > 0:
pass
"""
print('#'*100)
print('distortion = ',distortion)
print('#'*100)
for delta in deltas:
print('#'*100)
print('delta = ',delta)
print('#'*100)
if distortion == 'Cprime':
dist_matrix = np.array([
[1+delta,0,0],
[0,1-delta,0],
[0,0,1/(1-delta**2)]
])
elif distortion == 'C44':
dist_matrix = np.array([
[1, delta, 0],
[delta ,1, 0],
[0, 0, 1/(1-delta**2)]
])
# Calculate new lattice vectors and atomic positions
prims = distort(dist_matrix, prims0)
basis = distort(dist_matrix, basis0)
# Each different distortion might need different set of nkx, nky, nkz
if distortion == 'Cprime':
nkx = 21; nky = 21; nkz = 21
elif distortion == 'C44':
nkx = 20; nky = 20; nkz = 25
"""
input_creator = EMTO(folder=emtopath, EMTOdir='/home/x_henle/emto/KED_ELF/emto-dev/build')
input_creator.prepare_input_files(latpath=latpath,
jobname='CoCrFeMnNi',
species=species,
splts=splts,
#concs=concs,
prims=prims0,
basis=basis0,
find_primitive=find_primitive,
coords_are_cartesian=coords_are_cartesian,
latname='fcc',
#nz1=32,
ncpa=15,
sofc='Y',
nkx=nkx,
nky=nky,
nkz=nkz,
ncpu=ncpu,
parallel=False,
alpcpa=0.6,
runtime='24:00:00',
KGRN_file_type='scf',
KFCD_file_type='fcd',
amix=0.01,
#efgs=-1.0,
#depth=2.0,
tole=1e-6,
tolef=1e-6,
iex=4,
niter=200,
kgrn_nfi=91,
#strt='B',
make_supercell=make_supercell,
slurm_options=slurm_options)
sws_range = np.linspace(2.5, 2.75, 15)
input_creator.write_bmdl_kstr_shape_input()
input_creator.write_kgrn_kfcd_swsrange(sws=sws_range)
|
hpleva/pyemto
|
example_projects/fcc_elastic_constants/input_scripts/equilibrium_volume/input.py
|
Python
|
mit
| 4,206
|
[
"ASE",
"pymatgen"
] |
bde7b44bdc45f7f0a16a9883e2693856a2c04df216f9fd38bbb198350d0842c2
|
from typing import Optional
from urllib.parse import urlparse
import click
from click.exceptions import Exit
from valohai_cli import __version__
from valohai_cli.api import APISession
from valohai_cli.consts import default_app_host, yes_option
from valohai_cli.exceptions import APIError
from valohai_cli.messages import banner, error, info, success, warn
from valohai_cli.settings import settings
TOKEN_LOGIN_HELP = '''
Oops!
The error code "{code}" indicates username + password authentication is not possible.
Use a login token instead:
1. Log in on {host}
2. Visit {host}auth/tokens/ to generate an authentication token
3. Once you have an authentication token, log in with:
{command}
'''.strip()
@click.command()
@click.option('--username', '-u', envvar='VALOHAI_USERNAME', help='Your Valohai username')
@click.option('--password', '-p', envvar='VALOHAI_PASSWORD', help='Your Valohai password')
@click.option('--token', '-t', envvar='VALOHAI_TOKEN', help='A Valohai API token (instead of username and password)')
@click.option('--host', '-h', help='Valohai host to login on (for private installations)')
@click.option('--verify-ssl/--no-verify-ssl', default=True, help='Whether to verify SSL connections (this setting is persisted)')
@yes_option
def login(
username: str,
password: str,
token: Optional[str],
host: Optional[str],
yes: bool,
verify_ssl: bool,
) -> None:
"""Log in into Valohai."""
if settings.user and settings.token:
current_username = settings.user['username']
current_host = settings.host
if not yes:
click.confirm((
f'You are already logged in as {current_username} on {current_host}.\n'
'Are you sure you wish to acquire a new token?'
), abort=True)
else:
info(f'--yes set: ignoring pre-existing login for {current_username} on {current_host}')
if not (token or username or password or host):
# Don't show the banner if this seems like a non-interactive login.
click.secho(f'Welcome to Valohai CLI {__version__}!', bold=True)
host = validate_host(host)
if token:
if username or password:
error('Token is mutually exclusive with username/password')
raise Exit(1)
click.echo(f'Using token {token[:5]}... to log in.')
else:
token = do_user_pass_login(
host=host,
username=username,
password=password,
verify_ssl=verify_ssl,
)
click.echo(f'Verifying API token on {host}...')
with APISession(host, token, verify_ssl=verify_ssl) as sess:
user_data = sess.get('/api/v0/users/me/').json()
settings.persistence.update(
host=host,
user=user_data,
token=token,
verify_ssl=verify_ssl,
)
settings.persistence.save()
success(f"Logged in. Hey {user_data.get('username', 'there')}!")
if not verify_ssl:
warn("SSL verification is off. This may leave you vulnerable to man-in-the-middle attacks.")
def do_user_pass_login(
*,
host: str,
username: Optional[str] = None,
password: Optional[str] = None,
verify_ssl: bool = True,
) -> str:
click.echo(f'\nIf you don\'t yet have an account, please create one at {host} first.\n')
if not username:
username = click.prompt(f'{host} - Username').strip()
else:
click.echo(f'Username: {username}')
if not password:
password = click.prompt(f'{username} on {host} - Password', hide_input=True)
click.echo(f'Retrieving API token from {host}...')
with APISession(host) as sess:
try:
token_data = sess.post('/api/v0/get-token/', data={
'username': username,
'password': password,
}, verify=verify_ssl).json()
return str(token_data['token'])
except APIError as ae:
code = ae.code
if code in ('has_external_identity', 'has_2fa'):
command = 'vh login --token TOKEN_HERE '
if host != default_app_host:
command += f'--host {host}'
banner(TOKEN_LOGIN_HELP.format(code=code, host=host, command=command))
raise
def validate_host(host: Optional[str]) -> str:
default_host = (
settings.overrides.get('host') # from the top-level CLI (or envvar) ...
or default_app_host # ... or the global default
)
while True:
if not host:
host = click.prompt(
f'Login hostname? (You can just also accept the default {default_host} by leaving this empty.) ',
default=default_host,
prompt_suffix=' ',
show_default=False,
)
parsed_host = urlparse(host)
if parsed_host.scheme not in ('http', 'https'):
error(f'The hostname {host} is not properly formed missing http:// or https://')
host = None
continue
assert isinstance(host, str)
return host
|
valohai/valohai-cli
|
valohai_cli/commands/login.py
|
Python
|
mit
| 5,072
|
[
"VisIt"
] |
e21124139497ac68c947e8721c9567d7745d5875beb5ae3127d302a03eb52e29
|
#!/usr/bin/env python
# to run the script with the correct version of uvcdat:
# source /usr/local/uvcdat/1.4.0/bin/setup_runtime.sh
import cdms2
from cdms2 import MV2
import numpy
import glob
import sys
import os
from os import path
import shutil
import re
import string
import random
import gc
import logging
import logging.handlers
# ____________________________
def usage():
textUsage='SYNOPSIS:\n\tmake_ensemble_Mean_tzyx.py -v VARIABLE -path PATHIN -outdir PATHOUT [-tmpdir TMPPATH] [keepTmp] \n\t-minVar MINVAL -maxVar MAXVAL\tn-model MODELLIST -startYear STARTYEAR -endYear ENDYEAR [-monthList MONTHLIST]\n\t[-regridFirst REGRIDBOOL] [-deleteGrid DELETEBOOL] -rcp RCP\n'
textUsage=textUsage+'\tVARIABLE: a netcdf CMIP5 variable name, such as tos, zos, so, thetao;\n'
textUsage=textUsage+'\tPATHIN: input data directory (does not support sub-directories);\n'
textUsage=textUsage+'\tPATHOUT: output directory, created if does not exist;\n'
textUsage=textUsage+'\tTMPPATH: temporary path. Default: a random pathname is defined at runtime, as a leaf of PATHOUT;\n'
textUsage=textUsage+'\tkeepTmp: do not remove temporary directories;\n'
textUsage=textUsage+'\tMINVAL: any value below minVar is considered as nodata;\n'
textUsage=textUsage+'\tMAXVAL: any value above maxVar is considered as nodata;\n'
textUsage=textUsage+'\tMODELLIST: a text file with a model name per name, the model name is used to select the files to process;\n'
textUsage=textUsage+'\tSTARTYEAR: first year in the series of dates to process;\n'
textUsage=textUsage+'\tENDYEAR: last year in the series of date to process;\n'
textUsage=textUsage+'\tMONTHLIST: a comma separated list of month, such as "1,2,3" or "1,6,12". Values range is [1, 12].\n'
textUsage=textUsage+'In first place, the programme will average model output per model (if a model output has several rXiYpZ ensemble, they are averaged. Then, the averages are averaged to produce the ensemble mean;\n'
textUsage=textUsage+'\tREGRIDBOOL\n'
textUsage=textUsage+'\tDELETEBOOL\n'
textUsage=textUsage+'\tRCP a string corresponding to the RCP string to match in filenames.\n'
textUsage=textUsage+'Averages are computed for each month of the year.\n'
return textUsage
# ____________________________
def exitMessage(msg, exitCode='1'):
thisLogger.critical(msg)
print msg
print
print usage()
sys.exit(exitCode)
# ___________________________
def boolConvert(code):
if code=='0':
return False
if code.lower()=='false':
return False
if code.lower()=='no':
return False
if code=='1':
return True
if code.lower()=='true':
return True
if code.lower()=='yes':
return True
# ____________________________
def decodeMonthList(parameter):
listMonth = [int(x) for x in parameter.strip().split(',')]
for ii in listMonth:
if ii<1 or ii>12:
exitMessage('month defined in the month list must be in [1, 12]. Exit(100).',100)
return listMonth
# ____________________________
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
#_____________________________
def flatten(foo):
for x in foo:
if hasattr(x, '__iter__'):
for y in flatten(x):
yield y
else:
yield x
# ____________________________
# dict{date:[filename]}
def agregateDict(refDict, newDict):
if refDict is None and newDict is None:
return None
# get list of all keys
if refDict is None:
return newDict
if len(refDict)==0:
return newDict
if newDict is None:
return refDict
if len(newDict)==0:
return refDict
keyList = sorted(set(refDict.keys() + newDict.keys()))
result={}
for ikey in keyList:
val = []
if ikey in refDict.keys(): val.append( refDict[ikey] )
if ikey in newDict.keys(): val.append( newDict[ikey] )
result[ikey] = [ x for x in flatten(val) ]
del val
gc.collect()
return result
# ____________________________
def make_levels():
values = [3.3, 10, 20, 30, 50, 75, 100, 125, 150, 200, 250, 300, 400, 500]
levelAxis = cdms2.createAxis( values )
bounds = [0]
for ii in xrange(len(values)-1):
bounds.append( 0.5*(values[ii] + values[ii+1]) )
bounds.append( values[-1] + 0.5 * (values[-1] + values[-2]) )
levelAxis.setBounds(numpy.array(bounds))
levelAxis.id='levels'
levelAxis.designateLevel(True)
levelAxis.units='meters'
return levelAxis
# ____________________________
def makeGrid(thisStep=0.5):
xstart=0
xend=360
xstep=thisStep
ystart=-85
yend=85
ystep=thisStep
lon_bnds=[]
lon=[]
for ii in numpy.arange(xstart, xend, xstep):
lon_bnds.append( [ii, ii + xstep] )
lon.append(ii+0.5*xstep)
lon_bnds=numpy.array(lon_bnds)
lon=numpy.array(lon)
lat_bnds=[]
lat=[]
for ii in numpy.arange(ystart, yend, ystep):
lat_bnds.append([ii, ii + ystep])
lat.append(ii+0.5*ystep)
lat_bnds=numpy.array(lat_bnds)
lat=numpy.array(lat)
latAxis = cdms2.createAxis(lat, lat_bnds)
latAxis.designateLatitude(True)
latAxis.units='degrees_north'
latAxis.id='latitude'
latAxis.long_name='Latitude'
lonAxis = cdms2.createAxis(lon, lon_bnds)
lonAxis.designateLongitude(True, xend)
lonAxis.designateCircular(xend)
lonAxis.units='degrees_east'
lonAxis.id='longitude'
lonAxis.long_name='Longitude'
return((cdms2.createGenericGrid(latAxis, lonAxis, lat_bnds, lon_bnds), latAxis, lonAxis, lat_bnds, lon_bnds))
# ____________________________
def do_cleanNodataLines(var, nodata):
oneSlice = numpy.squeeze(var[:,:,0])
refShape=oneSlice.shape
# where are the nodata vertical lines?
# 1./ transform the slice: 0=data, 1=nodata
test = numpy.zeros(oneSlice.shape)
wto1 = oneSlice >= nodata
if wto1.any():
test[wto1] = 1
else:
thisLogger.info('do_cleanNodataLines: no-data is missing from this dataset. Return.')
return var
# 2./ multiplications: if there are only nodata, results is 1
line = numpy.array(oneSlice[0, :]) # copy first line
for il in range(oneSlice.shape[1]):
line = line * oneSlice[il, :]
# 3./ do we have a 1 somewhere? It means that there was only nodata along the line
wone = line == 1
if wone.any():
thisLogger.info('do_cleanNodataLines: found {0} lines to correct.'.format(len(wone)))
else:
thisLogger.info('do_cleanNodataLines: found no line to correct.')
return var
# ____________________________
# auto mask based on the principle that the mask does not change in-between dates
def autoMask(var, nodata):
refshape = var.shape
if len(refshape)==3:
tmp = numpy.reshape(var, (refshape[0], refshape[1] * refshape[2]) )
elif len(refshape)==4:
tmp = numpy.reshape(var, (refshape[0], refshape[1] * refshape[2] * refshape[3]) )
wtnodata = (tmp.max(axis=0) - tmp.min(axis=0)) < 0.001
if wtnodata.any():
for ii in range(refshape[0]):
tmp[ii, wtnodata] = nodata
var[:] = numpy.reshape(tmp, refshape)
del tmp, wtnodata
gc.collect()
return var
# ____________________________
def updateCounters(accum, N, mini, maxi, data, minVar, maxVar, nodata=1.e20):
if data is None:
return [accum, N, mini, maxi]
dim = numpy.squeeze(data[:]).shape
if accum is None:
accum = numpy.zeros(dim) + nodata
N = numpy.zeros(dim) + nodata
mini = data.copy()
maxi = data.copy()
wtadd = (data >= minVar ) * (data < maxVar) * (accum < nodata) # add where not nodata
wtreplace = (data >= minVar) * (data < maxVar) * (accum >= nodata) # replace if no data
wmax = (data >= maxi) * (data < nodata) * (data >= minVar) * (data < maxVar)
wmaxReplace = (mini >= nodata) * (data < nodata) * (data >= minVar)
wmin = (data <= mini) * (data >= minVar) * ( data < maxVar) * ( maxi < nodata )
wminReplace = (mini >= nodata) * (data < nodata) * (data >= minVar)
if wtadd.any():
accum[wtadd] = accum[wtadd] + data[wtadd]
N[wtadd] = N[wtadd] + 1 #numpy.ones(dim)
if wtreplace.any():
accum[wtreplace] = data[wtreplace]
N[wtreplace] = 1 #numpy.ones(dim)
if wmax.any():
maxi[wmax] = data[wmax]
if wmin.any():
mini[wmin] = data[wmin]
if wmaxReplace.any():
maxi[wmaxReplace] = data[wmaxReplace]
if wminReplace.any():
mini[wminReplace] = data[wminReplace]
del wtadd, wtreplace, wmax, wmaxReplace, wmin, wminReplace
gc.collect()
return [accum, N, mini, maxi]
# ___________________________
def do_regrid(variable, lstInFile, outdir, stringBefore, yearStart, yearEnd, topLevel=0, bottomLevel=1000):
createdFiles=[]
nodata=1.e20
if lstInFile is None:
thisLogger.info( 'No file to process. Return' )
return None
if len(lstInFile)==0:
thisLogger.info('Found no file to process, consider revising search pattern. Return.')
return None
(newGrid, latAxis, lonAxis, lat_bnds, lon_bnds) = makeGrid()
for fileName in lstInFile:
thisLogger.info('Regriding file: {0}'.format(fileName))
thisFile = cdms2.open(fileName)
# to reduce output file size and memory use, collect start/end times according to internal file encoding
startTimeraw = [t for t in thisFile[variable].getTime().asComponentTime()]
endTimeraw = [t for t in thisFile[variable].getTime().asComponentTime()]
thisLogger.info('start time raw = {0}-{1:02}'.format(startTimeraw[0].year, startTimeraw[0].month) )
thisLogger.info('end time raw = {0}-{1:02}'.format(endTimeraw[-1].year, endTimeraw[-1].month))
startTime = [t for t in thisFile[variable].getTime().asComponentTime() if (t.year==startYear)]
endTime = [t for t in thisFile[variable].getTime().asComponentTime() if (t.year==endYear)]
if len(startTime)==0 and len(endTime)==0: # this file does not contain useful data, next iteration
thisLogger.info('Data not useful')
continue
if len(startTime)==0: # the first date is not in this file, process from the start
startTime = thisFile[variable].getTime().asComponentTime()
if len(endTime)==0: # the last date is not in this file, process up to the end
endTime = thisFile[variable].getTime().asComponentTime()
thisLogger.info('start time = {0}-{1:02}'.format(startTime[0].year, startTime[0].month) )
thisLogger.info('end time = {0}-{1:02}'.format(endTime[-1].year, endTime[-1].month))
if thisFile[variable].getLevel() is None:
# some files do not have nodata set to 1.e20 (EC-EARTH), some have masked values set to something else (0 and 1.e20, for MRI):
# let's process our mask by identifying unchanged values
tmp = cdms2.createVariable(thisFile[variable].subRegion( time=(startTime[0], endTime[-1], 'cc'), level=(topLevel, bottomLevel,'cc') ))
data = autoMask(tmp, nodata)
del tmp
gc.collect()
else:
verticalGrid = make_levels()
# print dir(verticalGrid)
# print verticalGrid.getBounds()
print verticalGrid.getBounds().min() , verticalGrid.getBounds().max()
topLevel = verticalGrid.getBounds().min()
bottomLevel = verticalGrid.getBounds().max()
if thisFile[variable].getMissing() is None:
tmp = cdms2.createVariable(thisFile[variable].subRegion( time=(startTime[0], endTime[-1], 'cc'), level=(topLevel, bottomLevel,'cc') ))
data = autoMask(tmp, nodata)
del tmp
gc.collect()
else:
data = cdms2.createVariable(thisFile[variable].subRegion( time=(startTime[0], endTime[-1], 'cc'), level=(topLevel, bottomLevel,'cc') ))
mask = numpy.array(data) < nodata
if thisFile[variable].getLevel() is None:
regrided = data.regrid(newGrid, missing=nodata, order=thisFile[variable].getOrder(), mask=mask)
else:
tmp = data.regrid(newGrid, missing=nodata, order=thisFile[variable].getOrder(), mask=mask)
regrided = tmp.pressureRegrid( verticalGrid, method='linear')
regrided.id=variable
outfilename = '{0}/{1}{2}'.format(outdir, stringBefore, os.path.basename(fileName))
createdFiles.append(outfilename )
if os.path.exists(outfilename): os.remove(outfilename)
outfile = cdms2.open(outfilename, 'w')
outfile.write(regrided)
outfile.close()
thisFile.close()
del mask, regrided
gc.collect()
del newGrid, latAxis, lonAxis, lat_bnds, lon_bnds
gc.collect()
return createdFiles
# ___________________________
# for a list of files: open all files, go from date 1 to date 2, compute avg for thisdate, save thisdate
# if a new grid is passed: regrid
def do_stats(variable, validYearList, monthList, lstInFile, outdir, stringBefore, outnameBase, minVar=-1.e20, maxVar=1.e20, doSTD=False):
if validYearList is None:
exitMessage('List of years to process is undefined, edit code. Exit 5.',5)
createdFiles={}
nodata=1.e20
if lstInFile is None:
thisLogger.info('No file to process. Return.')
return
if len(lstInFile)==0:
thisLogger.info('Found no file to process, consider revising search pattern.')
return
# open all files
listFID=[]
if type(lstInFile)==type([]):
if len(lstInFile[0]) == 1:
ifile = ''.join(lstInFile)
thisLogger.debug('Case 2, lstInFile={0}'.format(ifile))
if not os.path.isfile(ifile):
exitMessage('File {0} not found. Exit 202'.format(lstInFile), 202)
listFID.append(cdms2.open(ifile, 'r'))
else:
for ifile in lstInFile:
thisLogger.debug('Case 1, ifile={0}'.format(ifile))
if not os.path.isfile(ifile):
exitMessage('File {0} not found. Exit 201.'.format(ifile), 201)
listFID.append(cdms2.open(ifile, 'r'))
# elif type(lstInFile)==type(''):
# thisLogger.debug('Case 2, lstInFile={0}'.format(lstInFile))
# if not os.path.isfile(lstInFile):
# exitMessage('File {0} not found. Exit 202'.format(lstInFile), 202)
# listFID.append(cdms2.open(lstInFile, 'r'))
else:
exitMessage('Unknown type for object lstInFile. Exit(200)',200)
# go through the list of dates, compute ensemble average
for iyear in validYearList:
thisLogger.info('Processing year {0}'.format(iyear))
for imonth in monthList:
accumVar=None
accumN=None
mini=None
maxi=None
refGrid=None
dims=None
units=None
for ifile in listFID:
if ifile[variable].getTime() is None: # no time reference
if refGrid is None:
refGrid = ifile[variable].getGrid()
# axis=ifile[variable].getAxisList(omit='time')
dims=numpy.squeeze(ifile[variable]).shape
[accumVar, accumN, mini, maxi] = updateCounters( accumVar, accumN, mini, maxi,
numpy.array(ifile[variable]).ravel(),
minVar, maxVar, nodata)
else: # we can do some time slice
thisTime = [ii for ii in ifile[variable].getTime().asComponentTime() if (ii.year==iyear and ii.month==imonth)]
if len(thisTime)==1:
if refGrid is None:
refGrid = ifile[variable].getGrid()
dims = numpy.squeeze(ifile[variable].subRegion(time=thisTime[0])).shape
units= ifile[variable].units
[accumVar, accumN, mini, maxi]= updateCounters(accumVar, accumN, mini, maxi,
numpy.array( ifile[variable].subRegion(time=thisTime[0])).ravel(),
minVar, maxVar, nodata )
units= ifile[variable].units
# compute average
# it can happen that there is no data to process: if the input files for the current model has an ending date before the current date
# in this case, accumN is None: do not save stats, and do not add a file name in createdFiles
# compute average
if accumN is not None:
wtdivide = (accumN < nodata) * (accumN > 0)
if wtdivide.any():
accumVar[wtdivide] = accumVar[wtdivide] / accumN[wtdivide]
# compute std
if doSTD:
thisLogger.info('Computing std: to be implemented')
# create and save variables
meanVar = cdms2.createVariable( accumVar.reshape(dims), typecode='f', id='mean_{0}'.format(variable), fill_value=nodata, attributes=dict(long_name='mean', units=units) )
meanVar.setGrid(refGrid)
counter = cdms2.createVariable(accumN.reshape(dims), typecode='i', id='count', fill_value=nodata, attributes=dict(long_name='count', units='None') )
counter.setGrid(refGrid)
miniVar = cdms2.createVariable(mini.reshape(dims), typecode='f', id='minimum', fill_value=nodata, attributes=dict(long_name='minimum', units=units) )
miniVar.setGrid(refGrid)
maxiVar = cdms2.createVariable(maxi.reshape(dims), typecode='f', id='maximum', fill_value=nodata, attributes=dict(long_name='maximum', units=units) )
maxiVar.setGrid(refGrid)
outfilename = '{0}/{1}_{2}_{3}{4:02}.nc'.format(outdir, stringBefore, outnameBase, iyear, imonth )
if os.path.exists(outfilename): os.remove(outfilename)
thisLogger.debug('Saving stats to file {0}'.format(outfilename))
outfile = cdms2.open(outfilename, 'w')
outfile.write(meanVar)
outfile.write(counter)
outfile.write(miniVar)
outfile.write(maxiVar)
outfile.close()
createdFiles['{0}{1:02}'.format(iyear,imonth)] = outfilename
del wtdivide
gc.collect()
del accumVar, mini, maxi, accumN
gc.collect()
# close input files
for ii in listFID: ii.close()
return(createdFiles)
#___________________________
if __name__=="__main__":
variable = None
indir = None
tmpdir = None
outdir = None
modelListFile=None
startYear=None
endYear=None
monthList=range(1,13)
regridFirst = True
deleteRegrid = False
modelStat = True
rcp=None
logFile='{0}.log'.format(__file__)
minVar=-1.e20
maxVar=1.e20
topLevel=0
bottomLevel=300
deleteTmp=True
ii = 1
while ii < len(sys.argv):
arg = sys.argv[ii].lower()
if arg == '-path':
ii = ii + 1
indir = sys.argv[ii]
elif arg == '-outdir':
ii = ii + 1
outdir = sys.argv[ii]
elif arg == '-tmpdir':
ii = ii + 1
tmpdir = sys.argv[ii]
elif arg == '-keeptmp':
deleteTmp=False
elif arg == '-v':
ii = ii + 1
variable = sys.argv[ii]
elif arg=='-minVar':
ii = ii + 1
minVar = float(sys.argv[ii])
elif arg == '-maxVar':
ii = ii + 1
maxVar = float(sys.argv[ii])
elif arg =='-modellist':
ii = ii + 1
modelListFile = sys.argv[ii]
elif arg=='-startyear':
ii = ii + 1
startYear = int(sys.argv[ii])
elif arg=='-endyear':
ii = ii + 1
endYear = int(sys.argv[ii]) + 1
elif arg=='-monthlist':
ii = ii + 1
monthList=decodeMonthList(sys.argv[ii])
elif arg=='-regridfirst':
ii=ii+1
regridFirst=boolConvert(sys.argv[ii])
elif arg=='-deleteregrid':
ii = ii + 1
deleteRegrid = boolConvert(sys.argv[ii])
elif arg=='-rcp':
ii=ii+1
rcp=sys.argv[ii]
elif arg=='-log':
ii = ii + 1
logFile = sys.argv[ii]
ii = ii + 1
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
thisLogger = logging.getLogger('MyLogger')
thisLogger.setLevel(logging.DEBUG)
handler = logging.handlers.RotatingFileHandler(logFile, maxBytes=1024*500, backupCount=5)
thisLogger.addHandler(handler)
if variable is None:
exitMessage('Missing variable name, use option -v. Exit(1).', 1)
if indir is None:
exitMessage('Missing input directory, use option -path. Exit(2).',2)
if outdir is None:
exitMessage('Missing output directory, use option -outdir. Exit(3).', 3)
if modelListFile is None:
exitMessage('Missing a model list file, use option -modellist. Exit(12).',12)
if startYear is None:
exitMessage('Please define a starting year, use option -startyear. Exit(13).',13)
if endYear is None:
exitMessage('Please define an ending year, use option -endyear. Exit(14).',14)
if rcp is None:
exitMessage('Please define an rcp, use option -rcp. Exit(15).',15)
if tmpdir is None:
tmpdir = '{0}/tmp_{1}'.format(outdir, id_generator() )
if not os.path.exists(outdir): os.makedirs(outdir)
if not os.path.exists(tmpdir): os.makedirs(tmpdir)
# for netcdf3: set flag to 0
cdms2.setNetcdfShuffleFlag(1)
cdms2.setNetcdfDeflateFlag(1)
cdms2.setNetcdfDeflateLevelFlag(3)
# models list
modelList=[]
try:
with open(modelListFile,"r") as f:
for textLine in f:
thisStr = textLine.replace(" ","").replace('\n','')
thisLogger.info('Writing models List {0}'.format(thisStr))
if not (thisStr==""):
modelList.append( thisStr )
except IOError as e:
exitMessage('I/O Error {1} while processing text file {0}:{2}. Exit(10).'.format(modelListFile, e.errno, e.strerror), 10)
except:
exitMessage('Unexpected error while processing text file {0}. Exit(11).'.format(modeListFile), 11)
validYearList=range(startYear, endYear)
if len(validYearList)==0:
exitMessage('No date to process, startYear={0}, endYear{1}. Exit(20).'.format(startYear, endYear),20)
processedFiles=None
for thisModel in modelList:
thisLogger.info('Model {0}'.format(thisModel))
pattern=re.compile('{0}_{1}_{2}_{3}_{4}_{5}.nc'.format(variable, 'Omon', thisModel, rcp, 'r.*i.*p.*', '.*') )
lstInFile=[f for f in glob.glob('{0}/*.nc'.format(indir)) if (os.stat(f).st_size and pattern.match(os.path.basename(f) ) ) ]
thisLogger.info('TESTING ' + variable + " " + tmpdir + " " + str(startYear) + " " + str(endYear) + " " + str(topLevel) + " " + str(bottomLevel) + " " + str(len(lstInFile)))
if regridFirst:
regridedFiles = do_regrid(variable, lstInFile, tmpdir, 'regrid_', startYear, endYear, topLevel, bottomLevel)
thisLogger.info('FIRST ')
else:
thisLogger.info('NOFIRST ')
regridedFiles = lstInFile
thisModelFiles = do_stats(variable, validYearList, monthList, regridedFiles, tmpdir, 'stats', '{0}_{1}_{2}'.format(variable,thisModel, rcp), minVar, maxVar )
if deleteRegrid:
for ii in regridedFiles: os.remove(ii)
processedFiles = agregateDict(processedFiles, thisModelFiles)
gc.collect()
if len(modelList)==1:
thisLogger.info('>>> 1 model in input: job finished after first averaging round.')
elif len(processedFiles)==0:
thisLogger.info('>>>> no data to process')
else:
thisLogger.info( '>> Averaging models averages, for each date')
for idate in processedFiles: # iteration over keys
thisYear = int(idate[0:4])
thisMonth= int(idate[4:6])
thisLogger.info('>> Averaging date {0}'.format(idate))
listFiles = [x for x in flatten(processedFiles[idate])]
thisLogger.info('>> averaging files '.format(listFiles))
returnedList = do_stats('mean_{0}'.format(variable), [thisYear], [thisMonth], listFiles, outdir, 'ensemble', '{0}_{1}'.format(variable, rcp) , minVar, maxVar)
gc.collect()
# delete tmpdir
if deleteTmp:
shutil.rmtree(tmpdir)
# end of file
|
IOC-CODE/esgf_ensemble_mean
|
ensemble_stats/bin/make_ensembleMean_tzyx.py
|
Python
|
gpl-2.0
| 25,104
|
[
"NetCDF"
] |
0524452cdddf53c88057262494e78515625b5393afb752d731dacfd7611d2dd5
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import sys
from ansible import constants as C
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves import builtins
from ansible.plugins.loader import filter_loader, test_loader
def safe_eval(expr, locals=None, include_exceptions=False):
'''
This is intended for allowing things like:
with_items: a_list_variable
Where Jinja2 would return a string but we do not want to allow it to
call functions (outside of Jinja2, where the env is constrained).
Based on:
http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
'''
locals = {} if locals is None else locals
# define certain JSON types
# eg. JSON booleans are unknown to python eval()
OUR_GLOBALS = {
'__builtins__': {}, # avoid global builtins as per eval docs
'false': False,
'null': None,
'true': True,
# also add back some builtins we do need
'True': True,
'False': False,
}
# this is the whitelist of AST nodes we are going to
# allow in the evaluation. Any node type other than
# those listed here will raise an exception in our custom
# visitor class defined below.
SAFE_NODES = set(
(
ast.Add,
ast.BinOp,
# ast.Call,
ast.Compare,
ast.Dict,
ast.Div,
ast.Expression,
ast.List,
ast.Load,
ast.Mult,
ast.Num,
ast.Name,
ast.Str,
ast.Sub,
ast.USub,
ast.Tuple,
ast.UnaryOp,
)
)
# AST node types were expanded after 2.6
if sys.version_info[:2] >= (2, 7):
SAFE_NODES.update(
set(
(ast.Set,)
)
)
# And in Python 3.4 too
if sys.version_info[:2] >= (3, 4):
SAFE_NODES.update(
set(
(ast.NameConstant,)
)
)
# And in Python 3.6 too, although not encountered until Python 3.8, see https://bugs.python.org/issue32892
if sys.version_info[:2] >= (3, 6):
SAFE_NODES.update(
set(
(ast.Constant,)
)
)
filter_list = []
for filter_ in filter_loader.all():
filter_list.extend(filter_.filters().keys())
test_list = []
for test in test_loader.all():
test_list.extend(test.tests().keys())
CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list + test_list
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False):
if type(node) not in SAFE_NODES:
raise Exception("invalid expression (%s)" % expr)
elif isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Name) and inside_call:
# Disallow calls to builtin functions that we have not vetted
# as safe. Other functions are excluded by setting locals in
# the call to eval() later on
if hasattr(builtins, node.id) and node.id not in CALL_WHITELIST:
raise Exception("invalid function: %s" % node.id)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(child_node, inside_call)
if not isinstance(expr, string_types):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (expr, None)
return expr
cnv = CleansingNodeVisitor()
try:
parsed_tree = ast.parse(expr, mode='eval')
cnv.visit(parsed_tree)
compiled = compile(parsed_tree, expr, 'eval')
# Note: passing our own globals and locals here constrains what
# callables (and other identifiers) are recognized. this is in
# addition to the filtering of builtins done in CleansingNodeVisitor
result = eval(compiled, OUR_GLOBALS, dict(locals))
if include_exceptions:
return (result, None)
else:
return result
except SyntaxError as e:
# special handling for syntax errors, we just return
# the expression string back as-is to support late evaluation
if include_exceptions:
return (expr, None)
return expr
except Exception as e:
if include_exceptions:
return (expr, e)
return expr
|
t794104/ansible
|
lib/ansible/template/safe_eval.py
|
Python
|
gpl-3.0
| 5,339
|
[
"VisIt"
] |
617a7f9cb7df209f30b1d5ff35b25bd02afec02f8470f683abff8372b64ef9a8
|
# encoding: utf-8
# Copyright (c) 2001-2016, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from flask import logging
import pybreaker
import requests as requests
from jormungandr import cache, app
from jormungandr.realtime_schedule.realtime_proxy import RealtimeProxy, RealtimeProxyError
from jormungandr.schedule import RealTimePassage
import xml.etree.ElementTree as et
import aniso8601
from datetime import datetime
class Siri(RealtimeProxy):
"""
Class managing calls to siri external service providing real-time next passages
"""
def __init__(self, id, service_url, requestor_ref,
object_id_tag=None, destination_id_tag=None, instance=None, timeout=10, **kwargs):
self.service_url = service_url
self.requestor_ref = requestor_ref # login for siri
self.timeout = timeout #timeout in seconds
self.rt_system_id = id
self.object_id_tag = object_id_tag if object_id_tag else id
self.destination_id_tag = destination_id_tag
self.instance = instance
self.breaker = pybreaker.CircuitBreaker(fail_max=app.config.get('CIRCUIT_BREAKER_MAX_SIRI_FAIL', 5),
reset_timeout=app.config.get('CIRCUIT_BREAKER_SIRI_TIMEOUT_S', 60))
def __repr__(self):
"""
used as the cache key. we use the rt_system_id to share the cache between servers in production
"""
try:
return self.rt_system_id.encode('utf-8', 'backslashreplace')
except:
return self.rt_system_id
def _get_next_passage_for_route_point(self, route_point, count, from_dt, current_dt, duration=None):
stop = route_point.fetch_stop_id(self.object_id_tag)
request = self._make_request(monitoring_ref=stop, dt=from_dt, count=count)
if not request:
return None
siri_response = self._call_siri(request)
if not siri_response or siri_response.status_code != 200:
raise RealtimeProxyError('invalid response')
logging.getLogger(__name__).debug('siri for {}: {}'.format(stop, siri_response.text))
return self._get_passages(siri_response.content, route_point)
def status(self):
return {
'id': unicode(self.rt_system_id),
'timeout': self.timeout,
'circuit_breaker': {
'current_state': self.breaker.current_state,
'fail_counter': self.breaker.fail_counter,
'reset_timeout': self.breaker.reset_timeout
},
}
def _get_passages(self, xml, route_point):
ns = {'siri': 'http://www.siri.org.uk/siri'}
try:
root = et.fromstring(xml)
except et.ParseError as e:
logging.getLogger(__name__).exception("invalid xml")
raise RealtimeProxyError('invalid xml')
stop = route_point.fetch_stop_id(self.object_id_tag)
line = route_point.fetch_line_id(self.object_id_tag)
route = route_point.fetch_route_id(self.object_id_tag)
next_passages = []
for visit in root.findall('.//siri:MonitoredStopVisit', ns):
cur_stop = visit.find('.//siri:StopPointRef', ns).text
if stop != cur_stop:
continue
cur_line = visit.find('.//siri:LineRef', ns).text
if line != cur_line:
continue
cur_route = visit.find('.//siri:DirectionName', ns).text
if route != cur_route:
continue
cur_destination = visit.find('.//siri:DestinationName', ns).text
cur_dt = visit.find('.//siri:ExpectedDepartureTime', ns).text
cur_dt = aniso8601.parse_datetime(cur_dt)
next_passages.append(RealTimePassage(cur_dt, cur_destination))
return next_passages
@cache.memoize(app.config['CACHE_CONFIGURATION'].get('TIMEOUT_SIRI', 60))
def _call_siri(self, request):
encoded_request = request.encode('utf-8', 'backslashreplace')
headers = {
"Content-Type": "text/xml; charset=UTF-8",
"Content-Length": len(encoded_request)
}
logging.getLogger(__name__).debug('siri RT service, post at {}: {}'.format(self.service_url, request))
try:
return self.breaker.call(requests.post,
url=self.service_url,
headers=headers,
data=encoded_request,
verify=False,
timeout=self.timeout)
except pybreaker.CircuitBreakerError as e:
logging.getLogger(__name__).error('siri RT service dead, using base '
'schedule (error: {}'.format(e))
raise RealtimeProxyError('circuit breaker open')
except requests.Timeout as t:
logging.getLogger(__name__).error('siri RT service timeout, using base '
'schedule (error: {}'.format(t))
raise RealtimeProxyError('timeout')
except Exception as e:
logging.getLogger(__name__).exception('siri RT error, using base schedule')
raise RealtimeProxyError(str(e))
def _make_request(self, dt, count, monitoring_ref):
#we don't want to ask 1000 next departure to SIRI :)
count = min(count or 5, 5)# if no value defined we ask for 5 passages
message_identifier='IDontCare'
request = """<?xml version="1.0" encoding="UTF-8"?>
<x:Envelope xmlns:x="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:wsd="http://wsdl.siri.org.uk" xmlns:siri="http://www.siri.org.uk/siri">
<x:Header/>
<x:Body>
<GetStopMonitoring xmlns="http://wsdl.siri.org.uk" xmlns:siri="http://www.siri.org.uk/siri">
<ServiceRequestInfo xmlns="">
<siri:RequestTimestamp>{dt}</siri:RequestTimestamp>
<siri:RequestorRef>{RequestorRef}</siri:RequestorRef>
<siri:MessageIdentifier>{MessageIdentifier}</siri:MessageIdentifier>
</ServiceRequestInfo>
<Request version="1.3" xmlns="">
<siri:RequestTimestamp>{dt}</siri:RequestTimestamp>
<siri:MessageIdentifier>{MessageIdentifier}</siri:MessageIdentifier>
<siri:MonitoringRef>{MonitoringRef}</siri:MonitoringRef>
<siri:MaximumStopVisits>{count}</siri:MaximumStopVisits>
</Request>
<RequestExtension xmlns=""/>
</GetStopMonitoring>
</x:Body>
</x:Envelope>
""".format(dt=datetime.utcfromtimestamp(dt).isoformat(),
count=count,
RequestorRef=self.requestor_ref,
MessageIdentifier=message_identifier,
MonitoringRef=monitoring_ref)
return request
|
Tisseo/navitia
|
source/jormungandr/jormungandr/realtime_schedule/siri.py
|
Python
|
agpl-3.0
| 8,180
|
[
"VisIt"
] |
c10660650b57caa8930d1b40960fc2e83cdab0f8aef4995cee8b3cbd1bb14041
|
''' ``traverse`` statement parsing and evaluation. '''
import syntax
import exp_parser
from collections import deque
from spidy.document import *
from spidy.common import *
from tokenizer import *
from nodes import Node
from nodes import NumberNode
from skip_node import SkipNode
# traverse ident in @path_string [mode depth]:
class TraverseNode(Node):
'''
Traverses document tree from specified element or from document root -
sub-branch is specified by path operator.
.. note:: Internally, setting sub-branch to certain element is done by
``skip`` command. Thereby sometimes when it fails you can see *SkipNode*
messages in log.
Sets loop variable to absolute (document-wide) element's path string, which
can be used in path ``&``, ``skip`` or another ``traverse`` operators.
Optionally, traverse method can be specified:
- breadthfirst (default)
- depthfirst
If method is specified, depth of traversing can be set as well. Default is 1.
Example, default form::
traverse div in &'//div[@data_container]':
if &div == '':
break
Or, visit each document's element to find pictures::
images = []
traverse element in & depthfirst 1000000:
if 'img' in element:
images << &(element + '@src')
'''
_ident = None
_path = None
_body = None
_mode = syntax.TraverseMode.BREADTH_FIRST
_depth = None
def __init__(self, context):
super(TraverseNode, self).__init__(context)
self._depth = NumberNode(context)
self._depth.set_value('1')
def get_ident(self):
return self._ident
def set_ident(self, ident):
self._ident = ident
def get_path(self):
return self._path
def set_path(self, path):
self._path = path
def get_body(self):
return self._body
def set_body(self, body):
self._body = body
def get_mode(self):
return self._mode
def set_mode(self, mode):
self._mode = mode
def get_depth(self):
return self._depth
def set_depth(self, depth):
self._depth = depth
def evaluate(self):
log.debug(self._id, 'TraverseNode: evaluating')
doc_type = self._context.get_doc_type()
doc = self._context.get_doc()
validate_eval(self._id, self._sline, doc != None and doc_type != DocType.UNKNOWN,
'TraverseNode: document should be loaded using {0} command'.format(syntax.OP_GET))
validate_eval(self._id, self._sline, doc_type != DocType.TXT,
'TraverseNode: document should be of structured format')
validate_eval(self._id, self._sline, not self._context.is_bound(self._ident),
'TraverseNode: loop variable is already defined')
depth = self._depth.evaluate()
validate_eval(self._id, self._sline, depth >= 0, 'TraverseNode: traverse depth should be equals or greater than zero')
# skip to path
cur_cursor = self._context.get_doc_cursor()
skip = SkipNode(self._context)
skip.set_script_line(self._sline)
skip.set_path(self._path)
skip.evaluate()
# initialize
self._context.bind_var(self._ident)
roots = None
doc_cursor = self._context.get_doc_cursor()
if doc_cursor != None:
roots = doc_cursor.get_children()
else:
roots = doc
cur = None
cur_depth = 0
if roots != None and len(roots) > 0:
cur_depth = roots[0].get_depth()
box = deque()
if self._mode == syntax.TraverseMode.BREADTH_FIRST:
box.extend(roots)
else: # self._mode == syntax.TraverseMode.DEPTH_FIRST:
box.extend(reversed(roots))
while len(box) > 0:
if self._mode == syntax.TraverseMode.BREADTH_FIRST:
cur = box.popleft()
if cur.get_depth() - cur_depth >= depth:
continue
box.extend(cur.get_children())
else: # self._mode == syntax.TraverseMode.DEPTH_FIRST:
cur = box.pop()
if cur.get_depth() - cur_depth >= depth:
continue
box.extend(reversed(cur.get_children()))
cur_path = cur.make_path(self._context.get_doc_cursor())
self._context.set_var(self._ident, cur_path)
self._body.evaluate()
# check flags
flags = self._context.get_flags() & ~ExecutionFlags.CONTINUE
self._context.set_flags(flags)
if flags & ExecutionFlags.BREAK:
self._context.set_flags(flags & ~ExecutionFlags.BREAK)
break
self._context.unbind_var(self._ident)
self._context.set_doc_cursor(cur_cursor)
def parse(self, line_num):
log.debug(self._id, 'TraverseNode: parsing')
lines = self._context.get_script()
self._sline = lines[line_num]
line = self._sline.string
# check if we have indented 'traverse...in' block
validate(self._id, self._sline, line_num + 1 < len(lines),
'TraverseNode: missing script block after ' + syntax.OP_FOR)
validate(self._id, self._sline, syntax.is_indented_block(lines[line_num:line_num + 2]),
'TraverseNode: expected an indented block after ' + syntax.OP_TRAVERSE)
validate(self._id, self._sline, line.rstrip().endswith(syntax.COLON),
'TraverseNode: expected ' + syntax.COLON + ' after ' + syntax.OP_TRAVERSE + ' path string')
# parse 'traverse...in' line
idx = line.index(syntax.OP_TRAVERSE) + len(syntax.OP_TRAVERSE)
l = line[idx:]
idx = skip_space(l)
l = l[idx:]
ident_idx = skip_token(l)
# set loop identity first
ident = l[:ident_idx]
validate(self._id, self._sline, syntax.is_var_name(ident), 'TraverseNode: invalid loop variable name')
self._ident = ident
# check 'in' operator
l = l[ident_idx:]
idx = skip_space(l)
l = l[idx:]
in_idx = skip_token(l)
validate(self._id, self._sline, l[:in_idx] == syntax.OP_IN, 'TraverseNode: invalid syntax')
# now parse path
l = l[in_idx:]
idx = skip_space(l)
path = l[idx:].replace(syntax.COLON, '').strip()
ep = exp_parser.ExpressionParser(self._context, line_num)
self._path = ep.parse(path)
# try to parse traverse mode and/or depth
mode = ep.get_stop_word()
if mode != '':
validate(self._id, self._sline, mode == syntax.TraverseMode.DEPTH_FIRST
or mode == syntax.TraverseMode.BREADTH_FIRST,
'TraverseNode: invalid traverse mode')
self._mode = mode
depth_shift = ep.get_stop_idx() + len(ep.get_stop_word())
depth = path[depth_shift:]
if depth != '':
ep.reset()
self._depth = ep.parse(depth)
def __str__(self):
string = (syntax.OP_TRAVERSE + syntax.WHITESPACE + self._ident + syntax.WHITESPACE +
syntax.OP_IN + syntax.WHITESPACE + str(self._path) + syntax.WHITESPACE +
self._mode + syntax.WHITESPACE + str(self._depth) + syntax.COLON + syntax.LINEFEED)
body_lines = str(self._body).strip().split(syntax.LINEFEED)
for i in range(len(body_lines)):
body_lines[i] = syntax.TAB + body_lines[i]
string += syntax.LINEFEED.join(body_lines)
return string
|
AlexPereverzyev/spidy
|
spidy/language/traverse_node.py
|
Python
|
bsd-3-clause
| 7,985
|
[
"VisIt"
] |
f86f50af50f77c72f1fd6cde222bd85bb5de55f72dd18890c505a5bd7e3be0b0
|
from ase import Atoms
from gpaw import GPAW
from gpaw.test import equal
# Self-consistent calculation:
a = 2.5
slab = Atoms('Li', cell=(a, a, 2 * a), pbc=1)
slab.calc = GPAW(kpts=(3,3,1), txt='li.txt')
slab.get_potential_energy()
slab.calc.write('Li.gpw')
# Gamma point:
e1 = slab.calc.get_eigenvalues(kpt=0)[0]
# Fix density and continue:
kpts = [(0,0,0)]
slab.calc.set(fixdensity=True,
nbands=5,
kpts=kpts,
usesymm=None,
eigensolver='cg')
slab.get_potential_energy()
e2 = slab.calc.get_eigenvalues(kpt=0)[0]
# Start from gpw-file:
calc = GPAW('Li.gpw',
fixdensity=True,
nbands=5,
kpts=kpts,
usesymm=None,
eigensolver='cg')
calc.scf.reset()
calc.get_potential_energy()
e3 = calc.get_eigenvalues(kpt=0)[0]
equal(e1, e2, 1e-5)
equal(e1, e3, 1e-5)
|
ajylee/gpaw-rtxs
|
gpaw/test/fixdensity.py
|
Python
|
gpl-3.0
| 869
|
[
"ASE",
"GPAW"
] |
8e27953ac1f1cce87326e8ecf9b06ce75352b94e7cfa79cc6966e6b9a1d6fd35
|
from gui.views.EMITView import EMITView
from sprint import *
from utilities import gui
import wx
from gui import events
from coordinator.engineManager import Engine
import coordinator.engineAccessors as engine
from emitLogging import elog
import threading
from gui.controller.NetcdfCtrl import NetcdfCtrl
from gui.controller.UserCtrl import UserCtrl
from ..controller.NetcdfDetailsCtrl import NetcdfDetailsCtrl
from gui.controller.ModelInputPromptCtrl import ModelInputPromptCtrl
from gui.controller.SettingsCtrl import SettingsCtrl
class EMITCtrl(EMITView):
def __init__(self, parent):
EMITView.__init__(self, parent)
self.FloatCanvas = self.Canvas.FloatCanvas
connections_txt = os.environ['APP_CONNECTIONS_PATH']
self.local_db_path = os.environ['APP_LOCAL_DB_PATH']
self.loading_path = None
if "APP_DEFAULT_SAVE_PATH" in os.environ:
self.defaultLoadDirectory = os.environ["APP_DEFAULT_SAVE_PATH"]
else:
self.defaultLoadDirectory = os.getcwd() + "/models/MyConfigurations/"
environment.setEnvironmentVar("APP", "default_save_path", self.defaultLoadDirectory)
# load databases threaded
t = threading.Thread(target=self.connect_to_databases, name='Connect_To_Databases', args=(connections_txt,))
t.setDaemon(True)
t.start()
self.check_users_json()
# File Option Bindings
self.Bind(wx.EVT_MENU, self.on_load_configuration, self._load)
self.Bind(wx.EVT_MENU, self.on_add_user, self._add_user_menu)
self.Bind(wx.EVT_MENU, self.on_save_configuration, self._save_menu)
self.Bind(wx.EVT_MENU, self.on_save_configuration_as, self.save_as_menu)
self.Bind(wx.EVT_MENU, self.on_settings, self._settings_menu)
self.Bind(wx.EVT_MENU, self.on_close, self._exit)
# View Option Bindings
self.Bind(wx.EVT_MENU, self.on_toggle_console, self._toggle_console_menu)
self.Bind(wx.EVT_MENU, self.on_toggle_toolbar, self._toggle_toolbar_menu)
self.Bind(wx.EVT_MENU, self.on_default_view, self._default_view_menu)
# Data Menu Bindings
self.Bind(wx.EVT_MENU, self.on_add_csv_file, self._add_csv_file_menu)
self.Bind(wx.EVT_MENU, self.on_add_net_cdf_file, self._add_netcdf)
self.Bind(wx.EVT_MENU, self.on_open_dap_viewer, self._open_dap_viewer_menu)
# All other bindings
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.on_switch_lower_panel_tab)
self.Bind(wx.EVT_CLOSE, self.on_close)
events.onSaveFromCanvas += self.on_save_configuration_as
def on_toggle_toolbar(self, event):
pane = self.m_mgr.GetPane(self.Toolbox)
if event.Selection == 0:
pane.Show(show=True)
if event.Selection == 1:
pane.Hide()
self.m_mgr.Update()
def model_input_prompt(self, path):
ModelInputPromptCtrl(self, path)
def check_users_json(self):
UserCtrl.create_user_json()
if UserCtrl.is_user_json_empty():
controller = UserCtrl(self)
controller.CenterOnScreen()
controller.Show()
def connect_to_databases(self, connections_txt):
# connect to databases defined in the connections file
dbs = gui.read_database_connection_from_file(connections_txt)
for db in dbs:
usr, pwd = self.decrypt_db_username_password(db['username'], db['password'])
if usr is not None:
engine.connectToDb(db['name'],db['description'],db['engine'],db['address'],db['database'],usr,pwd)
else:
msg = 'Could not resolve database username for %s/%s. Make sure secret.py is created correcly.' % (db['address'], db['database'])
sPrint(msg, MessageType.ERROR)
# load the local database into the engine
engine.connectToDb(title='ODM2 SQLite (local)', desc='Local SQLite database',
engine='sqlite', address=self.local_db_path,
dbname=None, user=None,
pwd=None, default=True)
def decrypt_db_username_password(self, uhash, phash):
"""
decrypts database username and password that is stored in connections.txt using secret key (secret.py) and AES encryption
Args:
uhash: encrypted username hash
phash: encrypted password hash
Returns: decrypted username (or None), decrypted password
"""
import secret
import encrypt
cipher = encrypt.AESCipher(secret.key)
usr = cipher.decrypt(uhash) or None
pwd = cipher.decrypt(phash)
return usr, pwd
##################################
# EVENTS
##################################
def on_add_csv_file(self, event):
file_dialog = wx.FileDialog(self.Parent,
message="Add *.csv file",
defaultDir=os.getcwd(),
defaultFile="",
wildcard=" CSV File (*.csv)|*.csv", style=wx.FD_OPEN)
if file_dialog.ShowModal() == wx.ID_OK:
path = file_dialog.GetPath()
# Do something with the CSV file ???
def on_add_net_cdf_file(self, event):
file_dialog = wx.FileDialog(self.Parent,
message="Add *.nc file",
defaultDir=os.getcwd(),
defaultFile="",
wildcard="NetCDF File(*.nc)|*.nc", style=wx.FD_OPEN)
# if a file is selected
if file_dialog.ShowModal() == wx.ID_OK:
path = file_dialog.GetPath()
filename = file_dialog.GetFilename()
NetcdfDetailsCtrl(self.Parent, path, filename)
def on_add_user(self, event):
controller = UserCtrl(self)
controller.CenterOnScreen()
controller.Show()
def on_close(self, event):
dial = wx.MessageDialog(None, 'Are you sure to quit?', 'Question',
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
dial.SetYesNoLabels(yes="Quit", no="Cancel")
if event == None or dial.ShowModal() == wx.ID_YES:
# kill multiprocessing
e = Engine()
msg = e.close()
elog.debug('Closing Engine Processes: %s' % msg)
# kill all threads
threads = {t.name:t for t in threading.enumerate()}
mainthread = threads.pop('MainThread')
elog.debug('Closing EMIT Threads: %s' % msg)
non_daemon = []
for t in threads.itervalues():
# check if the thread is a daemon, if so, it should not cause any problems
if t.isDaemon():
elog.debug('%s daemon=%s' %(t.name, t.isDaemon()))
else:
# add this thread to the non-daemon list
non_daemon.append(t)
for t in non_daemon:
elog.warning('%s is not a daemon thread and may cause problems while shutting down' % t.name)
t.join(1)
# determine if there are any non-daemon threads that are still alive
non_daemon_and_alive = []
for t in threads.itervalues():
if not t.isDaemon() and t.isAlive():
non_daemon_and_alive.append(t)
# attempt to stop non-daemon threads
try:
for t in non_daemon_and_alive:
t._Thread__stop()
except Exception, e:
elog.error('Error encountered closing thread %s: %s' % (t.name, e))
# close the main thread
self.Destroy()
wx.App.ExitMainLoop
wx.WakeUpMainThread
def set_model_details_by_model(self, model):
self.model_details.model_object = model
self.model_details.grid.reset_grid()
self.model_details.populate_grid_by_model_object()
self.toggle_model_details(1)
def toggle_model_details(self, selection):
pane = self.m_mgr.GetPane(self.model_details)
if selection:
pane.Show(show=True)
pane.CaptionVisible(True)
pane.CloseButton(True)
pane.PinButton(False)
else:
pane.Hide()
self.m_mgr.Update()
def on_default_view(self, event):
"""
Restore previously saved perspective
"""
self.m_mgr.LoadPerspective(self._default_perspective)
def on_open_dap_viewer(self, event):
NetcdfCtrl(self.Canvas.GetTopLevelParent())
def on_toggle_console(self, event):
ConsolePane = self.m_mgr.GetPane(self.bnb)
if event.Selection == 0:
ConsolePane.Show(show=True)
if event.Selection == 1:
ConsolePane.Hide()
self.m_mgr.Update()
def on_load_configuration(self, event):
file_dialog = wx.FileDialog(self, message="Load New File",
defaultDir=self.defaultLoadDirectory,
defaultFile="",
wildcard="Simulation Files (*.sim)|*.sim|MDL Files (*.mdl)|*.mdl",
style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if file_dialog.ShowModal() == wx.ID_OK:
self.defaultLoadDirectory = os.path.dirname(file_dialog.GetPath())
self.model_input_prompt(file_dialog.GetPath())
file_dialog.Destroy()
def on_save_configuration_as(self, event):
print os.environ.get("APP_DEFAULT_SAVE_PATH")
# Executes from File ->Save As
save = wx.FileDialog(self.Canvas.GetTopLevelParent(), message="Save Configuration",
defaultDir=self.defaultLoadDirectory, defaultFile="",
wildcard="Simulation Files (*.sim)|*.sim", style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if save.ShowModal() == wx.ID_OK:
self.save_path = save.GetPath()
if self.save_path[-4] != '.': # check if extension was added
self.save_path += '.sim'
self.loading_path = self.save_path
self.defaultLoadDirectory = os.path.dirname(self.loading_path)
self.Canvas.save_simulation(self.save_path)
txt = save.Filename.split('.sim')[0]
e = dict(cat=self.Toolbox.cat, txt=txt, fullpath=save.Path)
self.Toolbox.loadSIMFile(e)
self.Toolbox.refresh_toolbox()
save.Destroy()
def on_save_configuration(self, event):
if not self.loading_path:
self.on_save_configuration_as(event)
return
self.Canvas.save_simulation(self.save_path)
self.Toolbox.refresh_toolbox()
def on_switch_lower_panel_tab(self, event):
try:
# update databases in a generic way
selected_page = self.bnb.GetPage(event.GetSelection())
if len(selected_page.connection_combobox.GetItems()) == 0:
selected_page.refreshConnectionsListBox()
except: pass
def on_settings(self, event):
SettingsCtrl(self)
|
Castronova/EMIT
|
gui/controller/EMITCtrl.py
|
Python
|
gpl-2.0
| 11,268
|
[
"NetCDF"
] |
10f48d131c8d48c5da2e4185b49392c4145543525fe4016afe23ca567312a847
|
#!/usr/bin/env python3
# pylint: disable=line-too-long
"""
Various functions for annotating genomes.
"""
# System imports
import pickle
import shutil
import os
import tempfile
import logging
import subprocess
import multiprocessing as mp
from os import path, close, mkdir, listdir
from enrichm.genome import Genome, AnnotationParser
from enrichm.databases import Databases
from enrichm.sequence_io import SequenceIO
from enrichm.writer import Writer, MatrixGenerator
from enrichm.toolbox import list_splitter, run_command
def parse_genomes(params):
'''
Parses an input genome file into a Genome object. This is outside
of a class to enable parallelisation
'''
genome = Genome(*params)
return genome
class Annotate:
'''
Annotates proteins, and MAGs
'''
GENOME_BIN = 'genome_bin'
GENOME_PROTEINS = 'genome_proteins'
GENOME_GENES = 'genome_genes'
GENOME_KO = 'annotations_ko'
GENOME_KO_HMM = 'annotations_ko_hmm'
GENOME_EC = 'annotations_ec'
GENOME_PFAM = 'annotations_pfam'
GENOME_TIGRFAM = 'annotations_tigrfam'
GENOME_HYPOTHETICAL = 'annotations_hypothetical'
GENOME_CAZY = 'annotations_cazy'
GENOME_GFF = 'annotations_gff'
GENOME_OBJ = 'annotations_genomes'
OUTPUT_KO = 'ko_frequency_table.tsv'
OUTPUT_KO_HMM = 'ko_hmm_frequency_table.tsv'
OUTPUT_EC = 'ec_frequency_table.tsv'
OUTPUT_PFAM = 'pfam_frequency_table.tsv'
OUTPUT_TIGRFAM = 'tigrfam_frequency_table.tsv'
OUTPUT_CAZY = 'cazy_frequency_table.tsv'
OUTPUT_CLUSTER = 'cluster_frequency_table.tsv'
OUTPUT_ORTHOLOG = 'ortholog_frequency_table.tsv'
OUTPUT_HYPOTHETICAL_ANNOTATIONS = 'hypothetical_annotations.tsv'
OUTPUT_DIAMOND = "DIAMOND_search"
GFF_SUFFIX = '.gff'
PROTEINS_SUFFIX = '.faa'
ANNOTATION_SUFFIX = '.tsv'
PICKLE_SUFFIX = '.pickle'
def __init__(self, output_directory, annotate_ko, annotate_ko_hmm, annotate_pfam,
annotate_tigrfam, annoatate_cluster, annotate_ortholog, annotate_cazy, annotate_ec,
annotate_orthogroup, evalue, bit, percent_id_cutoff, aln_query, aln_reference,
fraction_aligned, cut_ga_pfam, cut_nc_pfam, cut_tc_pfam, cut_ga_tigrfam, cut_nc_tigrfam,
cut_tc_tigrfam, cut_hmm, inflation, chunk_number, chunk_max,
count_domains, threads, parallel, suffix, light):
# Define inputs and outputs
self.output_directory = output_directory
# Define type of annotation to be carried out
self.annotate_ko = annotate_ko
self.annotate_ko_hmm = annotate_ko_hmm
self.annotate_pfam = annotate_pfam
self.annotate_tigrfam = annotate_tigrfam
self.annotate_cluster = annoatate_cluster
self.annotate_ortholog = annotate_ortholog
self.annotate_orthogroup = annotate_orthogroup
self.annotate_cazy = annotate_cazy
self.annotate_ec = annotate_ec
# Cutoffs
self.evalue = evalue
self.bit = bit
self.percent_id_cutoff = percent_id_cutoff
self.aln_query = aln_query
self.aln_reference = aln_reference
self.fraction_aligned = fraction_aligned
self.cut_ga_pfam = cut_ga_pfam
self.cut_nc_pfam = cut_nc_pfam
self.cut_tc_pfam = cut_tc_pfam
self.cut_ga_tigrfam = cut_ga_tigrfam
self.cut_nc_tigrfam = cut_nc_tigrfam
self.cut_tc_tigrfam = cut_tc_tigrfam
self.cut_hmm = cut_hmm
self.inflation = inflation
self.chunk_number = chunk_number
self.chunk_max = chunk_max
self.count_domains = count_domains
# Parameters
self.threads = threads
self.parallel = parallel
self.suffix = suffix
self.light = light
# Set up multiprocesses pool
self.pool = mp.Pool(processes=int(self.parallel))
# Load databases
self.databases = Databases()
def prep_genome(self, genome_file_list, genome_directory):
'''
Do any preparation specific to the genome annotation pipeline.
Inputs
------
genome_file_list - List. list of strings, each a path to a file
containing a genome
Outputs
-------
returns the directory with all genome ids sym-linked into it.
'''
# link all the genomes into one file
logging.info('Preparing genomes for annotation')
if genome_file_list:
mkdir(genome_directory)
genome_paths = list()
for genome_path in genome_file_list:
if genome_path.endswith(self.suffix):
genome_paths.append(f"{genome_path}")
cmd = f"xargs --arg-file=/dev/stdin cp --target-directory={genome_directory}"
logging.debug(cmd)
process = subprocess.Popen(["bash", "-c", cmd],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
process.communicate(input=str('\n'.join(genome_paths)))
return genome_directory
def call_proteins(self, genome_directory):
'''
Use prodigal to call proteins within the genomes
Parameters
----------
genome_directory - string. Directory containing .fna files for each
input genome
Outputs
-------
returns the directory containing an .faa file for each input genomes
'''
protein_directory_path = path.join(self.output_directory, self.GENOME_PROTEINS)
gene_directory_path = path.join(self.output_directory, self.GENOME_GENES)
mkdir(protein_directory_path)
mkdir(gene_directory_path)
genome_list = list()
genome_paths = list()
for genome in listdir(genome_directory):
if genome.endswith(self.suffix):
genome_paths.append(path.splitext(genome)[0])
logging.info(" - Calling proteins for %i genomes", len(genome_paths))
cmd = "ls %s/*%s | \
sed 's/%s//g' | \
grep -o '[^/]*$' | \
parallel -j %s \
prodigal \
-q \
-p meta \
-o /dev/null \
-d %s/{}%s \
-a %s/{}%s \
-i %s/{}%s \
> /dev/null 2>&1" \
% (genome_directory, self.suffix, self.suffix, self.parallel, gene_directory_path,
self.suffix, protein_directory_path, self.PROTEINS_SUFFIX, genome_directory,
self.suffix)
run_command(cmd)
protein_directory_files = listdir(protein_directory_path)
genome_directory_files = listdir(genome_directory)
for genome_protein, genome_nucl in zip(protein_directory_files, genome_directory_files):
genome_protein_base = genome_protein.replace(self.PROTEINS_SUFFIX, self.suffix)
output_genome_protein_path = path.join(protein_directory_path, genome_protein)
output_genome_nucl_path = path.join(genome_directory, genome_nucl)
output_genome_gene_path = path.join(gene_directory_path, genome_protein_base)
genome = (self.light, output_genome_protein_path, output_genome_nucl_path,
output_genome_gene_path)
genome_list.append(genome)
return genome_list
def annotate_diamond(self, genomes_list, database, parser_type, ids_type, output_subdirectory):
'''
Annotate the proteins encoded by each genome with KO ids using either BLAST or using HMM
searches (no implemented yet).
Parameters
----------
genome_faa_directory - string. Directory containing .faa files for
each input genome
Outputs
-------
returns a directory containing the search results for each of the input population genomes,
and a frequency matrix contining with the KOs as rows, and the genomes as columns.
'''
output_directory_path = path.join(self.output_directory,
output_subdirectory)
genome_dict = {genome.name:genome for genome in genomes_list}
mkdir(output_directory_path)
specific_cutoffs = None
with tempfile.NamedTemporaryFile() as temp:
to_write = str()
for genome in genomes_list:
to_write += f"sed \"s/>/>{genome.name}~/g\" {genome.path}\n"
temp.write(str.encode(to_write))
temp.flush()
output_annotation_path = path.join(output_directory_path, self.OUTPUT_DIAMOND) + \
self.ANNOTATION_SUFFIX
logging.info(' - BLASTing genomes')
self.diamond_search(temp.name, output_annotation_path, database)
for genome_name, batch in self.get_batches(output_annotation_path):
if batch:
genome = genome_dict[genome_name]
genome.add(batch, self.evalue, self.bit, self.aln_query, self.aln_reference,
specific_cutoffs, parser_type, ids_type)
def get_batches(self, input_file):
'''
Separate DIAMOND blast results into batches, where a batch is all the hits for a genome.
Parameters
----------
input_file - string. Directory to search for blast results.
'''
last = None
input_file_io = open(input_file)
for line in input_file_io:
split_line = line.strip().split('\t')
genome_id = split_line[0].split('~')[0]
if last is None:
last = genome_id
batch = [split_line]
else:
if last == genome_id:
batch.append(split_line)
else:
yield last, batch
batch = [split_line]
last = genome_id
if last is None:
yield None, None
else:
yield last, batch
def diamond_search(self, tmp_name, output_path, database):
'''
Carry out a diamond blastp search.
Parameters
----------
input_genome_path - string. Path to file containing .faa file for an input genome
output_path - string. Path to file to output results into
databases - string. Path to HMM to use for searching
'''
cmd = f'bash {tmp_name} | diamond blastp \
--quiet \
--outfmt 6 \
--max-target-seqs 1 \
--query /dev/stdin \
--out {output_path} \
--db {database} \
--threads {self.threads} '
if self.evalue:
cmd += f'--evalue {self.evalue} '
if self.bit:
cmd += f'--min-score {self.bit} '
if self.percent_id_cutoff:
cmd += f'--id {self.percent_id_cutoff*100} '
if self.aln_query:
cmd += f"--query-cover {self.aln_query*100} "
if self.aln_reference:
cmd += f"--subject-cover {self.aln_reference*100} "
run_command(cmd)
def hmmsearch_annotation(self, genomes_list, output_directory_path, database, ids_type, parser):
'''
Annotate the proteins encoded by each genome with pfam ids using HMM searches.
Parameters
----------
genomes_list - list. list of Genome objects
'''
mkdir(output_directory_path)
genome_dict = {genome.name: genome for genome in genomes_list}
hmmcutoff = (ids_type in (AnnotationParser.TIGRFAM, AnnotationParser.PFAM))
if ids_type == AnnotationParser.KO_HMM:
specific_cutoffs = self.databases.parse_ko_cutoffs()
else:
specific_cutoffs = None
self.hmm_search(output_directory_path, database, hmmcutoff)
if ids_type == AnnotationParser.PFAM:
pfam2clan = self.databases.pfam2clan()
else:
pfam2clan = None
for genome_annotation in listdir(output_directory_path):
genome_id = path.splitext(genome_annotation)[0]
genome = genome_dict[genome_id]
output_annotation_path = path.join(output_directory_path, genome_annotation)
genome.add(output_annotation_path, self.evalue, self.bit, self.aln_query,
self.aln_reference, specific_cutoffs, parser, ids_type,
pfam2clan=pfam2clan)
def annotate_hypothetical(self, genomes_list):
'''
Sort proteins coded by each genome into homologous clusters.
Inputs
------
genomes_list - list. list of Genome objects
'''
output_directory_path = path.join(self.output_directory, self.GENOME_HYPOTHETICAL)
mkdir(output_directory_path)
renamed_genomes = list()
for genome in genomes_list:
renamed_genome = next(tempfile._get_candidate_names())
cmd = f"sed 's/>/>{genome.name}~/g' {genome.path} > {renamed_genome}"
run_command(cmd)
renamed_genomes.append(renamed_genome)
tmp_dir = tempfile.mkdtemp()
db_path = path.join(output_directory_path, "db")
clu_path = path.join(output_directory_path, "clu")
align_path = path.join(output_directory_path, "alignDb")
blast_output_path = path.join(output_directory_path, "alignDb.m8")
formatted_blast_output_path = path.join(output_directory_path, "alignDb.formatted.m8")
clu_tsv_path = path.join(output_directory_path, "hypothetical_clusters.tsv")
logging.info(' - Generating MMSeqs2 database')
cmd = f"mmseqs createdb {' '.join(renamed_genomes)} {db_path}"
run_command(cmd)
for renamed_genome in renamed_genomes:
os.remove(renamed_genome)
logging.info(' - Clustering genome proteins')
cmd = f"mmseqs cluster \
{db_path} \
{clu_path} \
{tmp_dir} \
--threads {self.threads} \
--min-seq-id {self.percent_id_cutoff} \
-c {self.fraction_aligned} \
-v 0"
run_command(cmd)
logging.info(' - Extracting clusters')
cmd = f'mmseqs createtsv \
{db_path} \
{db_path} \
{clu_path} \
{clu_tsv_path} \
--threads {self.threads} \
-v 0'
run_command(cmd)
if self.annotate_ortholog:
logging.info(' - Computing Smith-Waterman alignments for clustering results')
cmd = f"mmseqs alignall \
{db_path} \
{clu_path} \
{align_path} \
--alignment-mode 3 \
--threads {self.threads} \
-v 0"
run_command(cmd)
logging.info(' - Converting to BLAST-like output')
cmd = f"mmseqs createtsv \
{db_path} \
{db_path} \
{align_path} \
{blast_output_path} \
--threads {self.threads} \
-v 0"
# --format-output query,target,bits
run_command(cmd)
logging.info(' - Reformatting BLAST output')
cmd = "OFS=\"\t\" awk 'FNR==NR{a[$1]=$2;next}{$3=a[$3]; \
$1=\"\"; for(i=2;i<NF;i++){printf(\"%s\t\",$i)} \
printf(\"\\n\")}' %s %s | cut -f1,2,5 > %s" \
% ("%s", db_path + '.lookup', blast_output_path, formatted_blast_output_path)
run_command(cmd)
ortholog_dict = self.run_mcl(formatted_blast_output_path,
output_directory_path)
ortholog_ids = ortholog_dict.keys()
else:
ortholog_dict = dict()
ortholog_ids = list()
cluster_ids = self.parse_cluster_results(clu_tsv_path,
genomes_list,
ortholog_dict,
output_directory_path)
return cluster_ids, ortholog_ids
def run_mcl(self, blast_abc, output_directory_path):
'''
Parse the protein clusters produced from Mmseqs2 using mcl
Parameters
----------
blast_abc - string. an abc file for mcl to run on. More information on the format of abc
files can be found at https://micans.org/mcl/man/clmprotocols.html
output_directory_path - string. Path to write the results of mcl parsing to.
'''
dict_path = path.join(output_directory_path, "alignDb.dict")
mci_path = path.join(output_directory_path, "alignDb.mci")
cluster_path = path.join(output_directory_path, "mcl_clusters.tsv")
output_path = path.join(output_directory_path, "mcl_clusters.convert.tsv")
logging.info(' - Preparing network')
ortholog_dict = dict()
cmd = f"mcxload \
-abc {blast_abc} \
-write-tab {dict_path} \
-o {mci_path} \
--stream-mirror \
--stream-neg-log10"
run_command(cmd)
logging.info(' - Finding orthologs')
ortholog_dict = dict()
cmd = f'mcl \
{mci_path} \
-te {self.threads} \
-I {self.inflation} \
-o {cluster_path}'
run_command(cmd)
logging.info(' - Reformatting output')
ortholog_dict = dict()
cmd = f'mcxdump \
-icl {cluster_path} \
-o {output_path} \
-tabr {dict_path}'
run_command(cmd)
ortholog = 1
for line in open(output_path):
ortholog_idx = "ortholog_%i" % ortholog
ortholog_dict[ortholog_idx] = set()
for protein in line.strip().split('\t'):
ortholog_dict[ortholog_idx].add(protein)
ortholog += 1
return ortholog_dict
def parse_cluster_results(self, cluster_output_path, genomes_list, ortholog_dict,
output_directory_path):
'''
Parse cluster output in tab format.
Inputs
------
from_cluster_results - String. Path to mmseqs2 clustering output file
Yields
-------
A cluster name, and a list of sequences in that cluster.
'''
logging.info(' - Parsing input cluster file: %s', cluster_output_path)
cluster_ids = set()
previous_cluster_name = None
counter = 0
genome_dictionary = {genome.name:genome for genome in genomes_list}
output_hypothetical_annotations = path.join(output_directory_path,
self.OUTPUT_HYPOTHETICAL_ANNOTATIONS)
with open(output_hypothetical_annotations, 'w') as out_io:
for line in open(cluster_output_path):
cluster_id, member = line.strip().split('\t')
genome_id, sequence_id = member.split('~')
if cluster_id == previous_cluster_name:
genome_dictionary[genome_id].add_cluster(sequence_id, "cluster_%i" % counter)
else:
counter += 1
previous_cluster_name = cluster_id
cluster_ids.add("cluster_%i" % counter)
genome_dictionary[genome_id].add_cluster(sequence_id, "cluster_%i" % counter)
out_io.write('\t'.join([genome_id, sequence_id, "cluster_%i" % counter]) + '\n')
for ortholog, group in ortholog_dict.items():
for member in group:
genome, protein = member.split('~')
genome_dictionary[genome].add_ortholog(protein, ortholog)
return cluster_ids
def _default_hmmsearch_options(self):
cmd = str()
if self.bit:
cmd += '-T %s ' % (str(self.bit))
else:
cmd += '-E %s ' % (str(self.evalue))
return cmd
def hmm_search(self, output_path, database, hmmcutoff):
'''
Carry out a hmmsearch.
Parameters
----------
input_genome_path - string. Path to file containing .faa file for
an input genome
output_path - string. Path to file to output results into
databases - string. Path to HMM to use for searching
'''
input_genome_path = path.join(self.output_directory, self.GENOME_PROTEINS)
cmd = "ls %s | sed 's/%s//g' | parallel -j %s\
hmmsearch \
--cpu %s \
-o /dev/null \
--noali \
--domtblout %s/{}%s " \
% (input_genome_path, self.PROTEINS_SUFFIX, self.parallel,
self.threads, output_path, self.ANNOTATION_SUFFIX)
if hmmcutoff:
if (self.cut_ga_pfam or self.cut_nc_pfam or self.cut_tc_pfam) and 'pfam' in database:
if self.cut_ga_pfam:
cmd += " --cut_ga "
if self.cut_nc_pfam:
cmd += " --cut_nc "
if self.cut_tc_pfam:
cmd += " --cut_tc "
elif (self.cut_ga_tigrfam or self.cut_nc_tigrfam or self.cut_tc_tigrfam) and 'tigrfam' in database:
if self.cut_ga_tigrfam:
cmd += " --cut_ga "
if self.cut_nc_tigrfam:
cmd += " --cut_nc "
if self.cut_tc_tigrfam:
cmd += " --cut_tc "
else:
cmd += self._default_hmmsearch_options()
else:
cmd += self._default_hmmsearch_options()
cmd += "%s %s/{}.faa 2> /dev/null" % (database, input_genome_path)
run_command(cmd)
def generate_gff_files(self, genomes_list):
'''
Write GFF files for each of the genome objects in genomes_list
Parameters
----------
genomes_list - List. List of Genome objects
'''
output_directory_path = path.join(self.output_directory,
self.GENOME_GFF)
mkdir(output_directory_path)
for genome in genomes_list:
logging.info(' - Generating .gff file for %s', genome.name)
gff_output = path.join(output_directory_path, genome.name + self.GFF_SUFFIX)
Writer.write_gff(genome, gff_output)
def rename_fasta(self, genomes_list):
'''
Rename the called proteins with annotation ids.
Parameters
----------
genomes_list - List. List of Genome objects
'''
seqio = SequenceIO()
for genome in genomes_list:
file_object, fname = tempfile.mkstemp(suffix='.faa', text=True)
if genome.gene:
fd_gene, fname_gene = tempfile.mkstemp(suffix='.fna', text=True)
with open(fname_gene, 'w') as out_gene_io:
for description, sequence in seqio.each(open(genome.gene)):
name = description.partition(' ')[0]
annotations = ' '.join(genome.sequences[name].all_annotations())
out_gene_io.write(">%s %s\n" % (name, annotations))
out_gene_io.write(sequence + '\n')
close(fd_gene)
logging.debug('Moving %s to %s', fname_gene, genome.gene)
shutil.move(fname_gene, genome.gene)
with open(fname, 'w') as out_io:
for description, sequence in seqio.each(open(genome.path)):
name = description.partition(' ')[0]
annotations = ' '.join(genome.sequences[name].all_annotations())
out_io.write(">%s %s\n" % (name, annotations))
out_io.write(str(sequence) + '\n')
close(file_object)
logging.debug('Moving %s to %s', fname, genome.path)
shutil.move(fname, genome.path)
def pickle_objects(self, genomes_list):
'''
Store annotated genome objects as pickles.
Parameters
----------
genomes_list - List. List of Genome objects
'''
output_directory_path = path.join(self.output_directory,
self.GENOME_OBJ)
mkdir(output_directory_path)
for genome in genomes_list:
genome_pickle_path = path.join(output_directory_path, genome.name + self.PICKLE_SUFFIX)
with open(genome_pickle_path, 'wb') as output:
pickle.dump(genome, output)
def parse_genome_inputs(self, genome_directory, protein_directory, genome_files, protein_files):
'''
Inputs
------
Outputs
-------
'''
prep_genomes_list = list()
genomes_list = list()
if protein_directory:
logging.info("Using provided proteins")
protein_genome_list = list()
for protein_file in listdir(protein_directory):
protein_genome_list.append(path.join(protein_directory, protein_file))
directory = self.prep_genome(protein_genome_list,
path.join(self.output_directory,
self.GENOME_PROTEINS))
for genome_proteins_file in listdir(directory):
if genome_proteins_file.endswith(self.suffix):
genome = (self.light, path.join(directory, genome_proteins_file), None, None)
prep_genomes_list.append(genome)
elif protein_files:
logging.info("Using provided proteins")
genome_proteins_path = path.join(self.output_directory, self.GENOME_PROTEINS)
directory = self.prep_genome(protein_files, genome_proteins_path)
for protein_file in listdir(directory):
protein_file_path = path.join(directory, path.basename(protein_file))
prep_genomes_list.append((self.light, protein_file_path, None, None))
elif genome_directory:
logging.info("Calling proteins for annotation")
prep_genomes_list = self.call_proteins(genome_directory)
directory = genome_directory
elif genome_files:
logging.info("Calling proteins for annotation")
directory = self.prep_genome(genome_files,
path.join(self.output_directory, self.GENOME_BIN))
prep_genomes_list = self.call_proteins(directory)
for chunk in list_splitter(prep_genomes_list, self.chunk_number, self.chunk_max):
genomes_list += self.pool.map(parse_genomes, chunk)
return genomes_list
def annotate_pipeline(self, genome_directory, protein_directory, genome_files, protein_files):
'''
Run Annotate pipeline for enrichM
Parameters
----------
genome_directory - String. Path to directory containing genomes
protein_directory - String. Path to directory containing proteins (.faa files) for genomes
genome_files - List. List of strings, each to a .fna genome file.
protein_files - List. List of strings, each to a .faa proteins file.
'''
logging.info("Running pipeline: annotate")
logging.info("Setting up for genome annotation")
genomes_list = self.parse_genome_inputs(genome_directory, protein_directory,
genome_files, protein_files)
if genomes_list:
logging.info("Starting annotation:")
if (self.annotate_cluster or self.annotate_ortholog):
logging.info(' - Annotating genomes with hypothetical clusters')
cluster_ids, ortholog_ids = self.annotate_hypothetical(genomes_list)
logging.info(' - Generating hypotheticals frequency table')
matrix_generator = MatrixGenerator(MatrixGenerator.HYPOTHETICAL, cluster_ids)
freq_table = path.join(self.output_directory, self.OUTPUT_CLUSTER)
matrix_generator.write_matrix(genomes_list, self.count_domains, freq_table)
if self.annotate_ortholog:
matrix_generator = MatrixGenerator(MatrixGenerator.ORTHOLOG, ortholog_ids)
freq_table = path.join(self.output_directory, self.OUTPUT_ORTHOLOG)
matrix_generator.write_matrix(genomes_list, self.count_domains, freq_table)
if self.annotate_orthogroup:
logging.warning(f"Not yet implemented")
#self.annotate_orthogroup(genomes_list)
if self.annotate_ko:
annotation_type = AnnotationParser.BLASTPARSER
logging.info(' - Annotating genomes with ko ids using DIAMOND')
self.annotate_diamond(genomes_list, self.databases.KO_DB,
annotation_type, AnnotationParser.KO,
self.GENOME_KO)
logging.info(' - Generating ko frequency table')
matrix_generator = MatrixGenerator(MatrixGenerator.KO)
freq_table = path.join(self.output_directory, self.OUTPUT_KO)
matrix_generator.write_matrix(genomes_list, self.count_domains, freq_table)
if self.annotate_ko_hmm:
annotation_type = AnnotationParser.HMMPARSER
logging.info(' - Annotating genomes with ko ids using HMMs')
self.hmmsearch_annotation(genomes_list,
path.join(
self.output_directory, self.GENOME_KO_HMM),
self.databases.KO_HMM_DB,
AnnotationParser.KO,
annotation_type)
logging.info(' - Generating ko frequency table')
matrix_generator = MatrixGenerator(MatrixGenerator.KO)
freq_table = path.join(
self.output_directory, self.OUTPUT_KO_HMM)
matrix_generator.write_matrix(genomes_list, self.count_domains, freq_table)
if self.annotate_ec:
annotation_type = AnnotationParser.BLASTPARSER
logging.info(' - Annotating genomes with ec ids')
self.annotate_diamond(genomes_list, self.databases.EC_DB, annotation_type,
AnnotationParser.EC, self.GENOME_EC)
logging.info(' - Generating ec frequency table')
matrix_generator = MatrixGenerator(MatrixGenerator.EC)
freq_table = path.join(self.output_directory, self.OUTPUT_EC)
matrix_generator.write_matrix(genomes_list, self.count_domains, freq_table)
if self.annotate_pfam:
annotation_type = AnnotationParser.HMMPARSER
logging.info(' - Annotating genomes with pfam ids')
self.hmmsearch_annotation(genomes_list,
path.join(self.output_directory, self.GENOME_PFAM),
self.databases.PFAM_DB,
AnnotationParser.PFAM,
annotation_type)
logging.info(' - Generating pfam frequency table')
matrix_generator = MatrixGenerator(MatrixGenerator.PFAM)
freq_table = path.join(self.output_directory, self.OUTPUT_PFAM)
matrix_generator.write_matrix(genomes_list, self.count_domains, freq_table)
if self.annotate_tigrfam:
annotation_type = AnnotationParser.HMMPARSER
logging.info(' - Annotating genomes with tigrfam ids')
self.hmmsearch_annotation(genomes_list,
path.join(self.output_directory, self.GENOME_TIGRFAM),
self.databases.TIGRFAM_DB,
AnnotationParser.TIGRFAM,
annotation_type)
logging.info(' - Generating tigrfam frequency table')
matrix_generator = MatrixGenerator(MatrixGenerator.TIGRFAM)
freq_table = path.join(self.output_directory, self.OUTPUT_TIGRFAM)
matrix_generator.write_matrix(genomes_list, self.count_domains, freq_table)
if self.annotate_cazy:
annotation_type = AnnotationParser.HMMPARSER
logging.info(' - Annotating genomes with CAZY ids')
self.hmmsearch_annotation(genomes_list,
path.join(self.output_directory, self.GENOME_CAZY),
self.databases.CAZY_DB,
AnnotationParser.CAZY,
annotation_type)
logging.info(' - Generating CAZY frequency table')
matrix_generator = MatrixGenerator(MatrixGenerator.CAZY)
freq_table = path.join(self.output_directory, self.OUTPUT_CAZY)
matrix_generator.write_matrix(genomes_list, self.count_domains, freq_table)
if hasattr(list(genomes_list[0].sequences.values())[0], "prod_id"):
logging.info('Generating .gff files:')
self.generate_gff_files(genomes_list)
logging.info('Renaming protein headers')
self.rename_fasta(genomes_list)
if not self.light:
logging.info('Storing genome objects')
self.pickle_objects(genomes_list)
logging.info('Finished annotation')
else:
logging.error('No files found with %s suffix in input directory', self.suffix)
raise Exception("No input files found")
|
geronimp/enrichM
|
enrichm/annotate.py
|
Python
|
gpl-3.0
| 35,214
|
[
"BLAST"
] |
55df7056270ef20e7bd4af9a1a065259cc8006b15f68b787086b0235d8477fef
|
"""
Django module container for classes and operations related to the "Course Module" content type
"""
import logging
from cStringIO import StringIO
from math import exp
from lxml import etree
from path import path # NOTE (THK): Only used for detecting presence of syllabus
import requests
from datetime import datetime
import dateutil.parser
from lazy import lazy
from xmodule.seq_module import SequenceDescriptor, SequenceModule
from xmodule.graders import grader_from_conf
from xmodule.tabs import CourseTabList
import json
from xblock.fields import Scope, List, String, Dict, Boolean, Integer, Float
from .fields import Date
from django.utils.timezone import UTC
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
DEFAULT_START_DATE = datetime(2030, 1, 1, tzinfo=UTC())
CATALOG_VISIBILITY_CATALOG_AND_ABOUT = "both"
CATALOG_VISIBILITY_ABOUT = "about"
CATALOG_VISIBILITY_NONE = "none"
class StringOrDate(Date):
def from_json(self, value):
"""
Parse an optional metadata key containing a time or a string:
if present, assume it's a string if it doesn't parse.
"""
try:
result = super(StringOrDate, self).from_json(value)
except ValueError:
return value
if result is None:
return value
else:
return result
def to_json(self, value):
"""
Convert a time struct or string to a string.
"""
try:
result = super(StringOrDate, self).to_json(value)
except:
return value
if result is None:
return value
else:
return result
edx_xml_parser = etree.XMLParser(dtd_validation=False, load_dtd=False,
remove_comments=True, remove_blank_text=True)
_cached_toc = {}
class Textbook(object):
def __init__(self, title, book_url):
self.title = title
self.book_url = book_url
@lazy
def start_page(self):
return int(self.table_of_contents[0].attrib['page'])
@lazy
def end_page(self):
# The last page should be the last element in the table of contents,
# but it may be nested. So recurse all the way down the last element
last_el = self.table_of_contents[-1]
while last_el.getchildren():
last_el = last_el[-1]
return int(last_el.attrib['page'])
@lazy
def table_of_contents(self):
"""
Accesses the textbook's table of contents (default name "toc.xml") at the URL self.book_url
Returns XML tree representation of the table of contents
"""
toc_url = self.book_url + 'toc.xml'
# cdodge: I've added this caching of TOC because in Mongo-backed instances (but not Filesystem stores)
# course modules have a very short lifespan and are constantly being created and torn down.
# Since this module in the __init__() method does a synchronous call to AWS to get the TOC
# this is causing a big performance problem. So let's be a bit smarter about this and cache
# each fetch and store in-mem for 10 minutes.
# NOTE: I have to get this onto sandbox ASAP as we're having runtime failures. I'd like to swing back and
# rewrite to use the traditional Django in-memory cache.
try:
# see if we already fetched this
if toc_url in _cached_toc:
(table_of_contents, timestamp) = _cached_toc[toc_url]
age = datetime.now(UTC) - timestamp
# expire every 10 minutes
if age.seconds < 600:
return table_of_contents
except Exception as err:
pass
# Get the table of contents from S3
log.info("Retrieving textbook table of contents from %s" % toc_url)
try:
r = requests.get(toc_url)
except Exception as err:
msg = 'Error %s: Unable to retrieve textbook table of contents at %s' % (err, toc_url)
log.error(msg)
raise Exception(msg)
# TOC is XML. Parse it
try:
table_of_contents = etree.fromstring(r.text)
except Exception as err:
msg = 'Error %s: Unable to parse XML for textbook table of contents at %s' % (err, toc_url)
log.error(msg)
raise Exception(msg)
return table_of_contents
def __eq__(self, other):
return (self.title == other.title and
self.book_url == other.book_url)
def __ne__(self, other):
return not self == other
class TextbookList(List):
def from_json(self, values):
textbooks = []
for title, book_url in values:
try:
textbooks.append(Textbook(title, book_url))
except:
# If we can't get to S3 (e.g. on a train with no internet), don't break
# the rest of the courseware.
log.exception("Couldn't load textbook ({0}, {1})".format(title, book_url))
continue
return textbooks
def to_json(self, values):
json_data = []
for val in values:
if isinstance(val, Textbook):
json_data.append((val.title, val.book_url))
elif isinstance(val, tuple):
json_data.append(val)
else:
continue
return json_data
class CourseFields(object):
lti_passports = List(
display_name=_("LTI Passports"),
help=_('Enter the passports for course LTI tools in the following format: "id:client_key:client_secret".'),
scope=Scope.settings
)
textbooks = TextbookList(help="List of pairs of (title, url) for textbooks used in this course",
default=[], scope=Scope.content)
wiki_slug = String(help="Slug that points to the wiki for this course", scope=Scope.content)
enrollment_start = Date(help="Date that enrollment for this class is opened", scope=Scope.settings)
enrollment_end = Date(help="Date that enrollment for this class is closed", scope=Scope.settings)
start = Date(help="Start time when this module is visible",
default=DEFAULT_START_DATE,
scope=Scope.settings)
end = Date(help="Date that this class ends", scope=Scope.settings)
cosmetic_display_price = Integer(
display_name=_("Cosmetic Course Display Price"),
help=_(
"The cost displayed to students for enrolling in the course. If a paid course registration price is "
"set by an administrator in the database, that price will be displayed instead of this one."
),
default=0,
scope=Scope.settings,
)
advertised_start = String(
display_name=_("Course Advertised Start Date"),
help=_(
"Enter the date you want to advertise as the course start date, if this date is different from the set "
"start date. To advertise the set start date, enter null."
),
scope=Scope.settings
)
pre_requisite_courses = List(
display_name=_("Pre-Requisite Courses"),
help=_("Pre-Requisite Course key if this course has a pre-requisite course"),
scope=Scope.settings
)
grading_policy = Dict(
help="Grading policy definition for this class",
default={
"GRADER": [
{
"type": "Homework",
"min_count": 12,
"drop_count": 2,
"short_label": "HW",
"weight": 0.15,
},
{
"type": "Lab",
"min_count": 12,
"drop_count": 2,
"weight": 0.15,
},
{
"type": "Midterm Exam",
"short_label": "Midterm",
"min_count": 1,
"drop_count": 0,
"weight": 0.3,
},
{
"type": "Final Exam",
"short_label": "Final",
"min_count": 1,
"drop_count": 0,
"weight": 0.4,
}
],
"GRADE_CUTOFFS": {
"Pass": 0.5,
},
},
scope=Scope.content
)
show_calculator = Boolean(
display_name=_("Show Calculator"),
help=_("Enter true or false. When true, students can see the calculator in the course."),
default=False,
scope=Scope.settings
)
display_name = String(
help=_("Enter the name of the course as it should appear in the edX.org course list."),
default="Empty",
display_name=_("Course Display Name"),
scope=Scope.settings
)
course_edit_method = String(
display_name=_("Course Editor"),
help=_('Enter the method by which this course is edited ("XML" or "Studio").'),
default="Studio",
scope=Scope.settings,
deprecated=True # Deprecated because someone would not edit this value within Studio.
)
show_chat = Boolean(
display_name=_("Show Chat Widget"),
help=_("Enter true or false. When true, students can see the chat widget in the course."),
default=False,
scope=Scope.settings
)
tabs = CourseTabList(help="List of tabs to enable in this course", scope=Scope.settings, default=[])
end_of_course_survey_url = String(
display_name=_("Course Survey URL"),
help=_("Enter the URL for the end-of-course survey. If your course does not have a survey, enter null."),
scope=Scope.settings
)
discussion_blackouts = List(
display_name=_("Discussion Blackout Dates"),
help=_(
'Enter pairs of dates between which students cannot post to discussion forums. Each pair should be '
'formatted as ["YYYY-MM-DD", "YYYY-MM-DD"]. To specify times as well as dates, format each pair '
'as ["YYYY-MM-DDTHH:MM", "YYYY-MM-DDTHH:MM"] (be sure to include the "T" between the date and '
'time). An entry defining more than one blackout period might look like this: '
'[["2014-09-15", "2014-09-21"], ["2014-10-01", "2014-10-08"]]'
),
scope=Scope.settings
)
discussion_topics = Dict(
display_name=_("Discussion Topic Mapping"),
help=_(
'Enter discussion categories in the following format: "CategoryName": '
'{"id": "i4x-InstitutionName-CourseNumber-course-CourseRun"}. For example, one discussion '
'category may be "Lydian Mode": {"id": "i4x-UniversityX-MUS101-course-2014_T1"}. The "id" '
'value for each category must be unique.'
),
scope=Scope.settings
)
discussion_sort_alpha = Boolean(
display_name=_("Discussion Sorting Alphabetical"),
scope=Scope.settings, default=False,
help=_(
"Enter true or false. If true, discussion categories and subcategories are sorted alphabetically. "
"If false, they are sorted chronologically."
)
)
announcement = Date(
display_name=_("Course Announcement Date"),
help=_("Enter the date to announce your course."),
scope=Scope.settings
)
cohort_config = Dict(
display_name=_("Cohort Configuration"),
help=_(
"Enter policy keys and values to enable the cohort feature, define automated student assignment to "
"groups, or identify any course-wide discussion topics as private to cohort members."
),
scope=Scope.settings
)
is_new = Boolean(
display_name=_("Course Is New"),
help=_(
"Enter true or false. If true, the course appears in the list of new courses on edx.org, and a New! "
"badge temporarily appears next to the course image."
),
scope=Scope.settings
)
mobile_available = Boolean(
display_name=_("Mobile Course Available"),
help=_("Enter true or false. If true, the course will be available to mobile devices."),
default=False,
scope=Scope.settings
)
video_upload_pipeline = Dict(
display_name=_("Video Upload Credentials"),
help=_("Enter the unique identifier for your course's video files provided by edX."),
scope=Scope.settings
)
no_grade = Boolean(
display_name=_("Course Not Graded"),
help=_("Enter true or false. If true, the course will not be graded."),
default=False,
scope=Scope.settings
)
disable_progress_graph = Boolean(
display_name=_("Disable Progress Graph"),
help=_("Enter true or false. If true, students cannot view the progress graph."),
default=False,
scope=Scope.settings
)
pdf_textbooks = List(
display_name=_("PDF Textbooks"),
help=_("List of dictionaries containing pdf_textbook configuration"), scope=Scope.settings
)
html_textbooks = List(
display_name=_("HTML Textbooks"),
help=_(
"For HTML textbooks that appear as separate tabs in the courseware, enter the name of the tab (usually "
"the name of the book) as well as the URLs and titles of all the chapters in the book."
),
scope=Scope.settings
)
remote_gradebook = Dict(
display_name=_("Remote Gradebook"),
help=_(
"Enter the remote gradebook mapping. Only use this setting when "
"REMOTE_GRADEBOOK_URL has been specified."
),
scope=Scope.settings
)
allow_anonymous = Boolean(
display_name=_("Allow Anonymous Discussion Posts"),
help=_("Enter true or false. If true, students can create discussion posts that are anonymous to all users."),
scope=Scope.settings, default=True
)
allow_anonymous_to_peers = Boolean(
display_name=_("Allow Anonymous Discussion Posts to Peers"),
help=_(
"Enter true or false. If true, students can create discussion posts that are anonymous to other "
"students. This setting does not make posts anonymous to course staff."
),
scope=Scope.settings, default=False
)
advanced_modules = List(
display_name=_("Advanced Module List"),
help=_("Enter the names of the advanced components to use in your course."),
scope=Scope.settings
)
has_children = True
checklists = List(
scope=Scope.settings,
default=[
{
"short_description": _("Getting Started With Studio"),
"items": [
{
"short_description": _("Add Course Team Members"),
"long_description": _(
"Grant your collaborators permission to edit your course so you can work together."
),
"is_checked": False,
"action_url": "ManageUsers",
"action_text": _("Edit Course Team"),
"action_external": False,
},
{
"short_description": _("Set Important Dates for Your Course"),
"long_description": _(
"Establish your course's student enrollment and launch dates on the Schedule and Details "
"page."
),
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": _("Edit Course Details & Schedule"),
"action_external": False,
},
{
"short_description": _("Draft Your Course's Grading Policy"),
"long_description": _(
"Set up your assignment types and grading policy even if you haven't created all your "
"assignments."
),
"is_checked": False,
"action_url": "SettingsGrading",
"action_text": _("Edit Grading Settings"),
"action_external": False,
},
{
"short_description": _("Explore the Other Studio Checklists"),
"long_description": _(
"Discover other available course authoring tools, and find help when you need it."
),
"is_checked": False,
"action_url": "",
"action_text": "",
"action_external": False,
},
],
},
{
"short_description": _("Draft a Rough Course Outline"),
"items": [
{
"short_description": _("Create Your First Section and Subsection"),
"long_description": _("Use your course outline to build your first Section and Subsection."),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Set Section Release Dates"),
"long_description": _(
"Specify the release dates for each Section in your course. Sections become visible to "
"students on their release dates."
),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Designate a Subsection as Graded"),
"long_description": _(
"Set a Subsection to be graded as a specific assignment type. Assignments within graded "
"Subsections count toward a student's final grade."
),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Reordering Course Content"),
"long_description": _("Use drag and drop to reorder the content in your course."),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Renaming Sections"),
"long_description": _("Rename Sections by clicking the Section name from the Course Outline."),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Deleting Course Content"),
"long_description": _(
"Delete Sections, Subsections, or Units you don't need anymore. Be careful, as there is "
"no Undo function."
),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Add an Instructor-Only Section to Your Outline"),
"long_description": _(
"Some course authors find using a section for unsorted, in-progress work useful. To do "
"this, create a section and set the release date to the distant future."
),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
],
},
{
"short_description": _("Explore edX's Support Tools"),
"items": [
{
"short_description": _("Explore the Studio Help Forum"),
"long_description": _(
"Access the Studio Help forum from the menu that appears when you click your user name "
"in the top right corner of Studio."
),
"is_checked": False,
"action_url": "http://help.edge.edx.org/",
"action_text": _("Visit Studio Help"),
"action_external": True,
},
{
"short_description": _("Enroll in edX 101"),
"long_description": _("Register for edX 101, edX's primer for course creation."),
"is_checked": False,
"action_url": "https://edge.edx.org/courses/edX/edX101/How_to_Create_an_edX_Course/about",
"action_text": _("Register for edX 101"),
"action_external": True,
},
{
"short_description": _("Download the Studio Documentation"),
"long_description": _("Download the searchable Studio reference documentation in PDF form."),
"is_checked": False,
"action_url": "http://files.edx.org/Getting_Started_with_Studio.pdf",
"action_text": _("Download Documentation"),
"action_external": True,
},
],
},
{
"short_description": _("Draft Your Course About Page"),
"items": [
{
"short_description": _("Draft a Course Description"),
"long_description": _(
"Courses on edX have an About page that includes a course video, description, and more. "
"Draft the text students will read before deciding to enroll in your course."
),
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": _("Edit Course Schedule & Details"),
"action_external": False,
},
{
"short_description": _("Add Staff Bios"),
"long_description": _(
"Showing prospective students who their instructor will be is helpful. "
"Include staff bios on the course About page."
),
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": _("Edit Course Schedule & Details"),
"action_external": False,
},
{
"short_description": _("Add Course FAQs"),
"long_description": _("Include a short list of frequently asked questions about your course."),
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": _("Edit Course Schedule & Details"),
"action_external": False,
},
{
"short_description": _("Add Course Prerequisites"),
"long_description": _(
"Let students know what knowledge and/or skills they should have before "
"they enroll in your course."
),
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": _("Edit Course Schedule & Details"),
"action_external": False,
},
],
},
],
)
info_sidebar_name = String(
display_name=_("Course Info Sidebar Name"),
help=_(
"Enter the heading that you want students to see above your course handouts on the Course Info page. "
"Your course handouts appear in the right panel of the page."
),
scope=Scope.settings, default='Course Handouts')
show_timezone = Boolean(
help=_(
"True if timezones should be shown on dates in the courseware. "
"Deprecated in favor of due_date_display_format."
),
scope=Scope.settings, default=True
)
due_date_display_format = String(
display_name=_("Due Date Display Format"),
help=_(
"Enter the format due dates are displayed in. Due dates must be in MM-DD-YYYY, DD-MM-YYYY, YYYY-MM-DD, "
"or YYYY-DD-MM format."
),
scope=Scope.settings, default=None
)
enrollment_domain = String(
display_name=_("External Login Domain"),
help=_("Enter the external login method students can use for the course."),
scope=Scope.settings
)
certificates_show_before_end = Boolean(
display_name=_("Certificates Downloadable Before End"),
help=_(
"Enter true or false. If true, students can download certificates before the course ends, if they've "
"met certificate requirements."
),
scope=Scope.settings,
default=False,
deprecated=True
)
certificates_display_behavior = String(
display_name=_("Certificates Display Behavior"),
help=_(
"Has three possible states: 'end', 'early_with_info', 'early_no_info'. 'end' is the default behavior, "
"where certificates will only appear after a course has ended. 'early_with_info' will display all "
"certificate information before a course has ended. 'early_no_info' will hide all certificate "
"information unless a student has earned a certificate."
),
scope=Scope.settings,
default="end"
)
course_image = String(
display_name=_("Course About Page Image"),
help=_(
"Edit the name of the course image file. You must upload this file on the Files & Uploads page. "
"You can also set the course image on the Settings & Details page."
),
scope=Scope.settings,
# Ensure that courses imported from XML keep their image
default="images_course_image.jpg"
)
## Course level Certificate Name overrides.
cert_name_short = String(
help=_(
"Between quotation marks, enter the short name of the course to use on the certificate that "
"students receive when they complete the course."
),
display_name=_("Certificate Name (Short)"),
scope=Scope.settings,
default=""
)
cert_name_long = String(
help=_(
"Between quotation marks, enter the long name of the course to use on the certificate that students "
"receive when they complete the course."
),
display_name=_("Certificate Name (Long)"),
scope=Scope.settings,
default=""
)
# An extra property is used rather than the wiki_slug/number because
# there are courses that change the number for different runs. This allows
# courses to share the same css_class across runs even if they have
# different numbers.
#
# TODO get rid of this as soon as possible or potentially build in a robust
# way to add in course-specific styling. There needs to be a discussion
# about the right way to do this, but arjun will address this ASAP. Also
# note that the courseware template needs to change when this is removed.
css_class = String(
display_name=_("CSS Class for Course Reruns"),
help=_("Allows courses to share the same css class across runs even if they have different numbers."),
scope=Scope.settings, default="",
deprecated=True
)
# TODO: This is a quick kludge to allow CS50 (and other courses) to
# specify their own discussion forums as external links by specifying a
# "discussion_link" in their policy JSON file. This should later get
# folded in with Syllabus, Course Info, and additional Custom tabs in a
# more sensible framework later.
discussion_link = String(
display_name=_("Discussion Forum External Link"),
help=_("Allows specification of an external link to replace discussion forums."),
scope=Scope.settings,
deprecated=True
)
# TODO: same as above, intended to let internal CS50 hide the progress tab
# until we get grade integration set up.
# Explicit comparison to True because we always want to return a bool.
hide_progress_tab = Boolean(
display_name=_("Hide Progress Tab"),
help=_("Allows hiding of the progress tab."),
scope=Scope.settings,
deprecated=True
)
display_organization = String(
display_name=_("Course Organization Display String"),
help=_(
"Enter the course organization that you want to appear in the courseware. This setting overrides the "
"organization that you entered when you created the course. To use the organization that you entered "
"when you created the course, enter null."
),
scope=Scope.settings
)
display_coursenumber = String(
display_name=_("Course Number Display String"),
help=_(
"Enter the course number that you want to appear in the courseware. This setting overrides the course "
"number that you entered when you created the course. To use the course number that you entered when "
"you created the course, enter null."
),
scope=Scope.settings
)
max_student_enrollments_allowed = Integer(
display_name=_("Course Maximum Student Enrollment"),
help=_(
"Enter the maximum number of students that can enroll in the course. To allow an unlimited number of "
"students, enter null."
),
scope=Scope.settings
)
allow_public_wiki_access = Boolean(
display_name=_("Allow Public Wiki Access"),
help=_(
"Enter true or false. If true, edX users can view the course wiki even "
"if they're not enrolled in the course."
),
default=False,
scope=Scope.settings
)
invitation_only = Boolean(
display_name=_("Invitation Only"),
help=_("Whether to restrict enrollment to invitation by the course staff."),
default=False,
scope=Scope.settings
)
course_survey_name = String(
display_name=_("Pre-Course Survey Name"),
help=_("Name of SurveyForm to display as a pre-course survey to the user."),
default=None,
scope=Scope.settings,
deprecated=True
)
course_survey_required = Boolean(
display_name=_("Pre-Course Survey Required"),
help=_(
"Specify whether students must complete a survey before they can view your course content. If you "
"set this value to true, you must add a name for the survey to the Course Survey Name setting above."
),
default=False,
scope=Scope.settings,
deprecated=True
)
catalog_visibility = String(
display_name=_("Course Visibility In Catalog"),
help=_(
"Defines the access permissions for showing the course in the course catalog. This can be set to one "
"of three values: 'both' (show in catalog and allow access to about page), 'about' (only allow access "
"to about page), 'none' (do not show in catalog and do not allow access to an about page)."
),
default=CATALOG_VISIBILITY_CATALOG_AND_ABOUT,
scope=Scope.settings,
values=[
{"display_name": _("Both"), "value": CATALOG_VISIBILITY_CATALOG_AND_ABOUT},
{"display_name": _("About"), "value": CATALOG_VISIBILITY_ABOUT},
{"display_name": _("None"), "value": CATALOG_VISIBILITY_NONE}]
)
entrance_exam_enabled = Boolean(
display_name=_("Entrance Exam Enabled"),
help=_(
"Specify whether students must complete an entrance exam before they can view your course content. "
"Note, you must enable Entrance Exams for this course setting to take effect."
),
default=False,
scope=Scope.settings,
)
entrance_exam_minimum_score_pct = Float(
display_name=_("Entrance Exam Minimum Score (%)"),
help=_(
"Specify a minimum percentage score for an entrance exam before students can view your course content. "
"Note, you must enable Entrance Exams for this course setting to take effect."
),
default=65,
scope=Scope.settings,
)
entrance_exam_id = String(
display_name=_("Entrance Exam ID"),
help=_("Content module identifier (location) of entrance exam."),
default=None,
scope=Scope.settings,
)
class CourseDescriptor(CourseFields, SequenceDescriptor):
module_class = SequenceModule
def __init__(self, *args, **kwargs):
"""
Expects the same arguments as XModuleDescriptor.__init__
"""
super(CourseDescriptor, self).__init__(*args, **kwargs)
_ = self.runtime.service(self, "i18n").ugettext
if self.wiki_slug is None:
self.wiki_slug = self.location.course
if self.due_date_display_format is None and self.show_timezone is False:
# For existing courses with show_timezone set to False (and no due_date_display_format specified),
# set the due_date_display_format to what would have been shown previously (with no timezone).
# Then remove show_timezone so that if the user clears out the due_date_display_format,
# they get the default date display.
self.due_date_display_format = "DATE_TIME"
delattr(self, 'show_timezone')
# NOTE: relies on the modulestore to call set_grading_policy() right after
# init. (Modulestore is in charge of figuring out where to load the policy from)
# NOTE (THK): This is a last-minute addition for Fall 2012 launch to dynamically
# disable the syllabus content for courses that do not provide a syllabus
if self.system.resources_fs is None:
self.syllabus_present = False
else:
self.syllabus_present = self.system.resources_fs.exists(path('syllabus'))
self._grading_policy = {}
self.set_grading_policy(self.grading_policy)
if self.discussion_topics == {}:
self.discussion_topics = {_('General'): {'id': self.location.html_id()}}
if not getattr(self, "tabs", []):
CourseTabList.initialize_default(self)
def set_grading_policy(self, course_policy):
"""
The JSON object can have the keys GRADER and GRADE_CUTOFFS. If either is
missing, it reverts to the default.
"""
if course_policy is None:
course_policy = {}
# Load the global settings as a dictionary
grading_policy = self.grading_policy
# BOY DO I HATE THIS grading_policy CODE ACROBATICS YET HERE I ADD MORE (dhm)--this fixes things persisted w/
# defective grading policy values (but not None)
if 'GRADER' not in grading_policy:
grading_policy['GRADER'] = CourseFields.grading_policy.default['GRADER']
if 'GRADE_CUTOFFS' not in grading_policy:
grading_policy['GRADE_CUTOFFS'] = CourseFields.grading_policy.default['GRADE_CUTOFFS']
# Override any global settings with the course settings
grading_policy.update(course_policy)
# Here is where we should parse any configurations, so that we can fail early
# Use setters so that side effecting to .definitions works
self.raw_grader = grading_policy['GRADER'] # used for cms access
self.grade_cutoffs = grading_policy['GRADE_CUTOFFS']
@classmethod
def read_grading_policy(cls, paths, system):
"""Load a grading policy from the specified paths, in order, if it exists."""
# Default to a blank policy dict
policy_str = '{}'
for policy_path in paths:
if not system.resources_fs.exists(policy_path):
continue
log.debug("Loading grading policy from {0}".format(policy_path))
try:
with system.resources_fs.open(policy_path) as grading_policy_file:
policy_str = grading_policy_file.read()
# if we successfully read the file, stop looking at backups
break
except (IOError):
msg = "Unable to load course settings file from '{0}'".format(policy_path)
log.warning(msg)
return policy_str
@classmethod
def from_xml(cls, xml_data, system, id_generator):
instance = super(CourseDescriptor, cls).from_xml(xml_data, system, id_generator)
# bleh, have to parse the XML here to just pull out the url_name attribute
# I don't think it's stored anywhere in the instance.
course_file = StringIO(xml_data.encode('ascii', 'ignore'))
xml_obj = etree.parse(course_file, parser=edx_xml_parser).getroot()
policy_dir = None
url_name = xml_obj.get('url_name', xml_obj.get('slug'))
if url_name:
policy_dir = 'policies/' + url_name
# Try to load grading policy
paths = ['grading_policy.json']
if policy_dir:
paths = [policy_dir + '/grading_policy.json'] + paths
try:
policy = json.loads(cls.read_grading_policy(paths, system))
except ValueError:
system.error_tracker("Unable to decode grading policy as json")
policy = {}
# now set the current instance. set_grading_policy() will apply some inheritance rules
instance.set_grading_policy(policy)
return instance
@classmethod
def definition_from_xml(cls, xml_object, system):
textbooks = []
for textbook in xml_object.findall("textbook"):
textbooks.append((textbook.get('title'), textbook.get('book_url')))
xml_object.remove(textbook)
# Load the wiki tag if it exists
wiki_slug = None
wiki_tag = xml_object.find("wiki")
if wiki_tag is not None:
wiki_slug = wiki_tag.attrib.get("slug", default=None)
xml_object.remove(wiki_tag)
definition, children = super(CourseDescriptor, cls).definition_from_xml(xml_object, system)
definition['textbooks'] = textbooks
definition['wiki_slug'] = wiki_slug
return definition, children
def definition_to_xml(self, resource_fs):
xml_object = super(CourseDescriptor, self).definition_to_xml(resource_fs)
if len(self.textbooks) > 0:
textbook_xml_object = etree.Element('textbook')
for textbook in self.textbooks:
textbook_xml_object.set('title', textbook.title)
textbook_xml_object.set('book_url', textbook.book_url)
xml_object.append(textbook_xml_object)
if self.wiki_slug is not None:
wiki_xml_object = etree.Element('wiki')
wiki_xml_object.set('slug', self.wiki_slug)
xml_object.append(wiki_xml_object)
return xml_object
def has_ended(self):
"""
Returns True if the current time is after the specified course end date.
Returns False if there is no end date specified.
"""
if self.end is None:
return False
return datetime.now(UTC()) > self.end
def may_certify(self):
"""
Return True if it is acceptable to show the student a certificate download link
"""
show_early = self.certificates_display_behavior in ('early_with_info', 'early_no_info') or self.certificates_show_before_end
return show_early or self.has_ended()
def has_started(self):
return datetime.now(UTC()) > self.start
@property
def grader(self):
return grader_from_conf(self.raw_grader)
@property
def raw_grader(self):
# force the caching of the xblock value so that it can detect the change
# pylint: disable=pointless-statement
self.grading_policy['GRADER']
return self._grading_policy['RAW_GRADER']
@raw_grader.setter
def raw_grader(self, value):
# NOTE WELL: this change will not update the processed graders. If we need that, this needs to call grader_from_conf
self._grading_policy['RAW_GRADER'] = value
self.grading_policy['GRADER'] = value
@property
def grade_cutoffs(self):
return self._grading_policy['GRADE_CUTOFFS']
@grade_cutoffs.setter
def grade_cutoffs(self, value):
self._grading_policy['GRADE_CUTOFFS'] = value
# XBlock fields don't update after mutation
policy = self.grading_policy
policy['GRADE_CUTOFFS'] = value
self.grading_policy = policy
@property
def lowest_passing_grade(self):
return min(self._grading_policy['GRADE_CUTOFFS'].values())
@property
def is_cohorted(self):
"""
Return whether the course is cohorted.
"""
config = self.cohort_config
if config is None:
return False
return bool(config.get("cohorted"))
@property
def auto_cohort(self):
"""
Return whether the course is auto-cohorted.
"""
if not self.is_cohorted:
return False
return bool(self.cohort_config.get(
"auto_cohort", False))
@property
def auto_cohort_groups(self):
"""
Return the list of groups to put students into. Returns [] if not
specified. Returns specified list even if is_cohorted and/or auto_cohort are
false.
"""
if self.cohort_config is None:
return []
else:
return self.cohort_config.get("auto_cohort_groups", [])
@property
def top_level_discussion_topic_ids(self):
"""
Return list of topic ids defined in course policy.
"""
topics = self.discussion_topics
return [d["id"] for d in topics.values()]
@property
def cohorted_discussions(self):
"""
Return the set of discussions that is explicitly cohorted. It may be
the empty set. Note that all inline discussions are automatically
cohorted based on the course's is_cohorted setting.
"""
config = self.cohort_config
if config is None:
return set()
return set(config.get("cohorted_discussions", []))
@property
def always_cohort_inline_discussions(self):
"""
This allow to change the default behavior of inline discussions cohorting. By
setting this to False, all inline discussions are non-cohorted unless their
ids are specified in cohorted_discussions.
"""
config = self.cohort_config
if config is None:
return True
return bool(config.get("always_cohort_inline_discussions", True))
@property
def is_newish(self):
"""
Returns if the course has been flagged as new. If
there is no flag, return a heuristic value considering the
announcement and the start dates.
"""
flag = self.is_new
if flag is None:
# Use a heuristic if the course has not been flagged
announcement, start, now = self._sorting_dates()
if announcement and (now - announcement).days < 30:
# The course has been announced for less that month
return True
elif (now - start).days < 1:
# The course has not started yet
return True
else:
return False
elif isinstance(flag, basestring):
return flag.lower() in ['true', 'yes', 'y']
else:
return bool(flag)
@property
def sorting_score(self):
"""
Returns a tuple that can be used to sort the courses according
the how "new" they are. The "newness" score is computed using a
heuristic that takes into account the announcement and
(advertized) start dates of the course if available.
The lower the number the "newer" the course.
"""
# Make courses that have an announcement date shave a lower
# score than courses than don't, older courses should have a
# higher score.
announcement, start, now = self._sorting_dates()
scale = 300.0 # about a year
if announcement:
days = (now - announcement).days
score = -exp(-days / scale)
else:
days = (now - start).days
score = exp(days / scale)
return score
def _sorting_dates(self):
# utility function to get datetime objects for dates used to
# compute the is_new flag and the sorting_score
announcement = self.announcement
if announcement is not None:
announcement = announcement
try:
start = dateutil.parser.parse(self.advertised_start)
if start.tzinfo is None:
start = start.replace(tzinfo=UTC())
except (ValueError, AttributeError):
start = self.start
now = datetime.now(UTC())
return announcement, start, now
@lazy
def grading_context(self):
"""
This returns a dictionary with keys necessary for quickly grading
a student. They are used by grades.grade()
The grading context has two keys:
graded_sections - This contains the sections that are graded, as
well as all possible children modules that can affect the
grading. This allows some sections to be skipped if the student
hasn't seen any part of it.
The format is a dictionary keyed by section-type. The values are
arrays of dictionaries containing
"section_descriptor" : The section descriptor
"xmoduledescriptors" : An array of xmoduledescriptors that
could possibly be in the section, for any student
all_descriptors - This contains a list of all xmodules that can
effect grading a student. This is used to efficiently fetch
all the xmodule state for a FieldDataCache without walking
the descriptor tree again.
"""
all_descriptors = []
graded_sections = {}
def yield_descriptor_descendents(module_descriptor):
for child in module_descriptor.get_children():
yield child
for module_descriptor in yield_descriptor_descendents(child):
yield module_descriptor
for c in self.get_children():
for s in c.get_children():
if s.graded:
xmoduledescriptors = list(yield_descriptor_descendents(s))
xmoduledescriptors.append(s)
# The xmoduledescriptors included here are only the ones that have scores.
section_description = {
'section_descriptor': s,
'xmoduledescriptors': filter(lambda child: child.has_score, xmoduledescriptors)
}
section_format = s.format if s.format is not None else ''
graded_sections[section_format] = graded_sections.get(section_format, []) + [section_description]
all_descriptors.extend(xmoduledescriptors)
all_descriptors.append(s)
return {'graded_sections': graded_sections,
'all_descriptors': all_descriptors, }
@staticmethod
def make_id(org, course, url_name):
return '/'.join([org, course, url_name])
@property
def id(self):
"""Return the course_id for this course"""
return self.location.course_key
def start_datetime_text(self, format_string="SHORT_DATE"):
"""
Returns the desired text corresponding the course's start date and time in UTC. Prefers .advertised_start,
then falls back to .start
"""
i18n = self.runtime.service(self, "i18n")
_ = i18n.ugettext
strftime = i18n.strftime
def try_parse_iso_8601(text):
try:
result = Date().from_json(text)
if result is None:
result = text.title()
else:
result = strftime(result, format_string)
if format_string == "DATE_TIME":
result = self._add_timezone_string(result)
except ValueError:
result = text.title()
return result
if isinstance(self.advertised_start, basestring):
return try_parse_iso_8601(self.advertised_start)
elif self.start_date_is_still_default:
# Translators: TBD stands for 'To Be Determined' and is used when a course
# does not yet have an announced start date.
return _('TBD')
else:
when = self.advertised_start or self.start
if format_string == "DATE_TIME":
return self._add_timezone_string(strftime(when, format_string))
return strftime(when, format_string)
@property
def start_date_is_still_default(self):
"""
Checks if the start date set for the course is still default, i.e. .start has not been modified,
and .advertised_start has not been set.
"""
return self.advertised_start is None and self.start == CourseFields.start.default
def end_datetime_text(self, format_string="SHORT_DATE"):
"""
Returns the end date or date_time for the course formatted as a string.
If the course does not have an end date set (course.end is None), an empty string will be returned.
"""
if self.end is None:
return ''
else:
strftime = self.runtime.service(self, "i18n").strftime
date_time = strftime(self.end, format_string)
return date_time if format_string == "SHORT_DATE" else self._add_timezone_string(date_time)
def _add_timezone_string(self, date_time):
"""
Adds 'UTC' string to the end of start/end date and time texts.
"""
return date_time + u" UTC"
@property
def forum_posts_allowed(self):
date_proxy = Date()
try:
blackout_periods = [(date_proxy.from_json(start),
date_proxy.from_json(end))
for start, end
in filter(None, self.discussion_blackouts)]
now = datetime.now(UTC())
for start, end in blackout_periods:
if start <= now <= end:
return False
except:
log.exception("Error parsing discussion_blackouts %s for course %s", self.discussion_blackouts, self.id)
return True
@property
def number(self):
return self.location.course
@property
def display_number_with_default(self):
"""
Return a display course number if it has been specified, otherwise return the 'course' that is in the location
"""
if self.display_coursenumber:
return self.display_coursenumber
return self.number
@property
def org(self):
return self.location.org
@property
def display_org_with_default(self):
"""
Return a display organization if it has been specified, otherwise return the 'org' that is in the location
"""
if self.display_organization:
return self.display_organization
return self.org
@property
def video_pipeline_configured(self):
"""
Returns whether the video pipeline advanced setting is configured for this course.
"""
return (
self.video_upload_pipeline is not None and
'course_video_upload_token' in self.video_upload_pipeline
)
|
mtlchun/edx
|
common/lib/xmodule/xmodule/course_module.py
|
Python
|
agpl-3.0
| 53,923
|
[
"VisIt"
] |
c7831ccd67298cd8fe97826a50d3e58609bdacec7b6eed3d6d6c9f58af0d99ce
|
# -*- coding: utf-8 -*-
# test_gc.py ---
#
# Filename: test_gc.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Mon May 19 10:25:13 2014 (+0530)
# Version:
# Last-Updated:
# By:
# Update #: 0
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
"""Test script for memory allocation and garbage collection issues."""
from __future__ import print_function
try:
from future_builtins import zip, range
except ImportError:
pass
import random
from collections import defaultdict
from itertools import cycle
import moose
classes = [
'Reac',
'IzhikevichNrn',
'PIDController',
'HHChannel',
'PulseGen',
'Pool',
'RC',
'BufPool',
'DiffAmp',
'IntFire',
'MgBlock',]
def allocate_large_vecs(m, n):
"""Allocate m vecs with n elements each"""
test = moose.Neutral('/test')
ret = []
for jj, mclass in zip(range(m), cycle(classes)):
eval_str = 'moose.vec(path="%s/%s_%d", n=%d, dtype="%s")' % (test.path,
mclass,
jj,
n,
mclass)
mobj = eval(eval_str)
print('Created', mobj.path)
ret.append(mobj)
return ret
def create_finfos():
fields = defaultdict(list)
for el in moose.wildcardFind('/test/#'):
print(el)
mobj = moose.element(el)
for f in moose.getFieldNames(mobj.className, 'lookup'):
fields[mobj].append(getattr(mobj, f))
for f in moose.getFieldNames(mobj.className, 'element'):
fields[mobj].append(getattr(mobj, f))
return fields
def check_vector_field(m, n):
test = moose.Neutral('/test')
tabs = []
for ii in range(m):
comp = moose.Compartment('/test/comp_%d' % (ii), n=n)
for jj in range(n):
tab = moose.Table('/test/data_%d_%d' % (ii, jj))
moose.connect(tab, 'requestOut', comp.vec[jj], 'getVm')
tabs.append(tab)
moose.setClock(0, 1e-3)
moose.setClock(1, 1e-3)
moose.setClock(2, 1e-3)
moose.useClock(0, '/##[ISA=Compartment]', 'init')
moose.useClock(1, '/##[ISA=Compartment]', 'process')
moose.useClock(2, '/##[ISA=Table]', 'process')
moose.reinit()
moose.start(0.01)
return tabs
import numpy as np
if __name__ == '__main__':
np_arrays = []
for ii in range(3):
print('Creating elements')
allocate_large_vecs(100, 100)
print('Created elements. Creating field dict now')
create_finfos()
moose.delete(moose.element('/test'))
tabs = check_vector_field(100, 100)
for t in tabs:
np_arrays.append(np.array(t.vec))
moose.delete('/test')
print('Finished')
#
# test_gc.py ends here
|
dharmasam9/moose-core
|
tests/python/test_gc.py
|
Python
|
gpl-3.0
| 3,698
|
[
"MOOSE"
] |
37dbf4d0f807e5d40bb097e7ec7ac4e8ba0ccfad6173d704018703fdeaa8e2f8
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""WebsiteTest testing class."""
import logging
import time
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
import environment
SCRIPT_DEBUG = 9 # TODO(vabr) -- make this consistent with run_tests.py.
class WebsiteTest:
"""WebsiteTest testing class.
Represents one website, defines some generic operations on that site.
To customise for a particular website, this class needs to be inherited
and the Login() method overridden.
"""
# Possible values of self.autofill_expectation.
AUTOFILLED = 1 # Expect password and username to be autofilled.
NOT_AUTOFILLED = 2 # Expect password and username not to be autofilled.
# The maximal accumulated time to spend in waiting for website UI
# interaction.
MAX_WAIT_TIME_IN_SECONDS = 200
def __init__(self, name, username_not_auto=False):
"""Creates a new WebsiteTest.
Args:
name: The website name, identifying it in the test results.
username_not_auto: Expect that the tested website fills username field
on load, and Chrome cannot autofill in that case.
"""
self.name = name
self.username = None
self.password = None
self.username_not_auto = username_not_auto
# Specify, whether it is expected that credentials get autofilled.
self.autofill_expectation = WebsiteTest.NOT_AUTOFILLED
self.remaining_seconds_to_wait = WebsiteTest.MAX_WAIT_TIME_IN_SECONDS
# The testing Environment, if added to any.
self.environment = None
# The webdriver from the environment.
self.driver = None
# Mouse/Keyboard actions.
def Click(self, selector):
"""Clicks on the element described by |selector|.
Args:
selector: The clicked element's CSS selector.
"""
logging.log(SCRIPT_DEBUG, "action: Click %s" % selector)
element = self.WaitUntilDisplayed(selector)
element.click()
def ClickIfClickable(self, selector):
"""Clicks on the element described by |selector| if it is clickable.
The driver's find_element_by_css_selector method defines what is clickable
-- anything for which it does not throw, is clickable. To be clickable,
the element must:
* exist in the DOM,
* be not covered by another element
* be inside the visible area.
Note that transparency does not influence clickability.
Args:
selector: The clicked element's CSS selector.
Returns:
True if the element is clickable (and was clicked on).
False otherwise.
"""
logging.log(SCRIPT_DEBUG, "action: ClickIfVisible %s" % selector)
element = self.WaitUntilDisplayed(selector)
try:
element.click()
return True
except Exception:
return False
def GoTo(self, url):
"""Navigates the main frame to |url|.
Args:
url: The URL of where to go to.
"""
logging.log(SCRIPT_DEBUG, "action: GoTo %s" % self.name)
self.driver.get(url)
def HoverOver(self, selector):
"""Hovers over the element described by |selector|.
Args:
selector: The CSS selector of the element to hover over.
"""
logging.log(SCRIPT_DEBUG, "action: Hover %s" % selector)
element = self.WaitUntilDisplayed(selector)
hover = ActionChains(self.driver).move_to_element(element)
hover.perform()
# Waiting/Displaying actions.
def _ReturnElementIfDisplayed(self, selector):
"""Returns the element described by |selector|, if displayed.
Note: This takes neither overlapping among elements nor position with
regards to the visible area into account.
Args:
selector: The CSS selector of the checked element.
Returns:
The element if displayed, None otherwise.
"""
try:
element = self.driver.find_element_by_css_selector(selector)
return element if element.is_displayed() else None
except Exception:
return None
def IsDisplayed(self, selector):
"""Check if the element described by |selector| is displayed.
Note: This takes neither overlapping among elements nor position with
regards to the visible area into account.
Args:
selector: The CSS selector of the checked element.
Returns:
True if the element is in the DOM and less than 100% transparent.
False otherwise.
"""
logging.log(SCRIPT_DEBUG, "action: IsDisplayed %s" % selector)
return self._ReturnElementIfDisplayed(selector) is not None
def Wait(self, duration):
"""Wait for |duration| in seconds.
To avoid deadlocks, the accummulated waiting time for the whole object does
not exceed MAX_WAIT_TIME_IN_SECONDS.
Args:
duration: The time to wait in seconds.
Raises:
Exception: In case the accummulated waiting limit is exceeded.
"""
logging.log(SCRIPT_DEBUG, "action: Wait %s" % duration)
self.remaining_seconds_to_wait -= duration
if self.remaining_seconds_to_wait < 0:
raise Exception("Waiting limit exceeded for website: %s" % self.name)
time.sleep(duration)
# TODO(vabr): Pull this out into some website-utils and use in Environment
# also?
def WaitUntilDisplayed(self, selector):
"""Waits until the element described by |selector| is displayed.
Args:
selector: The CSS selector of the element to wait for.
Returns:
The displayed element.
"""
element = self._ReturnElementIfDisplayed(selector)
while not element:
self.Wait(1)
element = self._ReturnElementIfDisplayed(selector)
return element
# Form actions.
def FillPasswordInto(self, selector):
"""Ensures that the selected element's value is the saved password.
Depending on self.autofill_expectation, this either checks that the
element already has the password autofilled, or checks that the value
is empty and replaces it with the password.
Args:
selector: The CSS selector for the filled element.
Raises:
Exception: An exception is raised if the element's value is different
from the expectation.
"""
logging.log(SCRIPT_DEBUG, "action: FillPasswordInto %s" % selector)
password_element = self.WaitUntilDisplayed(selector)
# Chrome protects the password inputs and doesn't fill them until
# the user interacts with the page. To be sure that such thing has
# happened we perform |Keys.CONTROL| keypress.
action_chains = ActionChains(self.driver)
action_chains.key_down(Keys.CONTROL).key_up(Keys.CONTROL).perform()
self.Wait(2) # TODO(vabr): Detect when autofill finished.
if self.autofill_expectation == WebsiteTest.AUTOFILLED:
if password_element.get_attribute("value") != self.password:
raise Exception("Error: autofilled password is different from the saved"
" one on website: %s" % self.name)
elif self.autofill_expectation == WebsiteTest.NOT_AUTOFILLED:
if password_element.get_attribute("value"):
raise Exception("Error: password value unexpectedly not empty on"
"website: %s" % self.name)
password_element.send_keys(self.password)
def FillUsernameInto(self, selector):
"""Ensures that the selected element's value is the saved username.
Depending on self.autofill_expectation, this either checks that the
element already has the username autofilled, or checks that the value
is empty and replaces it with the password. If self.username_not_auto
is true, it skips the checks and just overwrites the value with the
username.
Args:
selector: The CSS selector for the filled element.
Raises:
Exception: An exception is raised if the element's value is different
from the expectation.
"""
logging.log(SCRIPT_DEBUG, "action: FillUsernameInto %s" % selector)
username_element = self.WaitUntilDisplayed(selector)
self.Wait(2) # TODO(vabr): Detect when autofill finished.
if not self.username_not_auto:
if self.autofill_expectation == WebsiteTest.AUTOFILLED:
if username_element.get_attribute("value") != self.username:
raise Exception("Error: filled username different from the saved"
" one on website: %s" % self.name)
return
if self.autofill_expectation == WebsiteTest.NOT_AUTOFILLED:
if username_element.get_attribute("value"):
raise Exception("Error: username value unexpectedly not empty on"
"website: %s" % self.name)
username_element.clear()
username_element.send_keys(self.username)
def Submit(self, selector):
"""Finds an element using CSS |selector| and calls its submit() handler.
Args:
selector: The CSS selector for the element to call submit() on.
"""
logging.log(SCRIPT_DEBUG, "action: Submit %s" % selector)
element = self.WaitUntilDisplayed(selector)
element.submit()
# Login/Logout methods
def Login(self):
"""Login Method. Has to be overridden by the WebsiteTest test."""
raise NotImplementedError("Login is not implemented.")
def LoginWhenAutofilled(self):
"""Logs in and checks that the password is autofilled."""
self.autofill_expectation = WebsiteTest.AUTOFILLED
self.Login()
def LoginWhenNotAutofilled(self):
"""Logs in and checks that the password is not autofilled."""
self.autofill_expectation = WebsiteTest.NOT_AUTOFILLED
self.Login()
def Logout(self):
self.environment.DeleteCookies()
# Test scenarios
def PromptFailTest(self):
"""Checks that prompt is not shown on a failed login attempt.
Tries to login with a wrong password and checks that the password
is not offered for saving.
Raises:
Exception: An exception is raised if the test fails.
"""
logging.log(SCRIPT_DEBUG, "PromptFailTest for %s" % self.name)
correct_password = self.password
# Hardcoded random wrong password. Chosen by fair `pwgen` call.
# For details, see: http://xkcd.com/221/.
self.password = "ChieF2ae"
self.LoginWhenNotAutofilled()
self.password = correct_password
self.environment.CheckForNewString(
[environment.MESSAGE_ASK, environment.MESSAGE_SAVE],
False,
"Error: did not detect wrong login on website: %s" % self.name)
def PromptSuccessTest(self):
"""Checks that prompt is shown on a successful login attempt.
Tries to login with a correct password and checks that the password
is offered for saving. Chrome cannot have the auto-save option on
when running this test.
Raises:
Exception: An exception is raised if the test fails.
"""
logging.log(SCRIPT_DEBUG, "PromptSuccessTest for %s" % self.name)
if not self.environment.show_prompt:
raise Exception("Switch off auto-save during PromptSuccessTest.")
self.LoginWhenNotAutofilled()
self.environment.CheckForNewString(
[environment.MESSAGE_ASK],
True,
"Error: did not detect login success on website: %s" % self.name)
def SaveAndAutofillTest(self):
"""Checks that a correct password is saved and autofilled.
Tries to login with a correct password and checks that the password
is saved and autofilled on next visit. Chrome must have the auto-save
option on when running this test.
Raises:
Exception: An exception is raised if the test fails.
"""
logging.log(SCRIPT_DEBUG, "SaveAndAutofillTest for %s" % self.name)
if self.environment.show_prompt:
raise Exception("Switch off auto-save during PromptSuccessTest.")
self.LoginWhenNotAutofilled()
self.environment.CheckForNewString(
[environment.MESSAGE_SAVE],
True,
"Error: did not detect login success on website: %s" % self.name)
self.Logout()
self.LoginWhenAutofilled()
self.environment.CheckForNewString(
[environment.MESSAGE_SAVE],
True,
"Error: failed autofilled login on website: %s" % self.name)
|
guorendong/iridium-browser-ubuntu
|
components/test/data/password_manager/automated_tests/websitetest.py
|
Python
|
bsd-3-clause
| 12,124
|
[
"VisIt"
] |
27ea80fc18af8bfde3b91d208e944ed0a9463634cd2849f37f1dcef4572dec62
|
# -*- coding: utf-8 -*-
#
# hl_api_simulation.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Functions for simulation control
"""
from contextlib import contextmanager
import warnings
from ..ll_api import *
from .hl_api_helper import *
from .hl_api_parallel_computing import Rank
__all__ = [
'Cleanup',
'DisableStructuralPlasticity',
'EnableStructuralPlasticity',
'GetKernelStatus',
'Install',
'Prepare',
'ResetKernel',
'Run',
'RunManager',
'SetKernelStatus',
'Simulate',
]
@check_stack
def Simulate(t):
"""Simulate the network for `t` milliseconds.
Parameters
----------
t : float
Time to simulate in ms
See Also
--------
RunManager
"""
sps(float(t))
sr('ms Simulate')
@check_stack
def Run(t):
"""Simulate the network for `t` milliseconds.
Parameters
----------
t : float
Time to simulate in ms
Notes
------
Call between `Prepare` and `Cleanup` calls, or within a
``with RunManager`` clause.
Simulate(t): t' = t/m; Prepare(); for _ in range(m): Run(t'); Cleanup()
`Prepare` must be called before `Run` to calibrate the system, and
`Cleanup` must be called after `Run` to close files, cleanup handles, and
so on. After `Cleanup`, `Prepare` can and must be called before more `Run`
calls.
Be careful about modifying the network or neurons between `Prepare` and `Cleanup`
calls. In particular, do not call `Create`, `Connect`, or `SetKernelStatus`.
Calling `SetStatus` to change membrane potential `V_m` of neurons or synaptic
weights (but not delays!) will in most cases work as expected, while changing
membrane or synaptic times constants will not work correctly. If in doubt, assume
that changes may cause undefined behavior and check these thoroughly.
See Also
--------
Prepare, Cleanup, RunManager, Simulate
"""
sps(float(t))
sr('ms Run')
@check_stack
def Prepare():
"""Calibrate the system before a `Run` call. Not needed for `Simulate`.
Call before the first `Run` call, or before calling `Run` after changing
the system, calling `SetStatus` or `Cleanup`.
See Also
--------
Run, Cleanup
"""
sr('Prepare')
@check_stack
def Cleanup():
"""Cleans up resources after a `Run` call. Not needed for `Simulate`.
Closes state for a series of runs, such as flushing and closing files.
A `Prepare` is needed after a `Cleanup` before any more calls to `Run`.
See Also
--------
Run, Prepare
"""
sr('Cleanup')
@contextmanager
def RunManager():
"""ContextManager for `Run`
Calls `Prepare` before a series of `Run` calls, and calls `Cleanup` at end.
E.g.:
::
with RunManager():
for _ in range(10):
Run(100)
# extract results
Notes
-----
Be careful about modifying the network or neurons inside the `RunManager` context.
In particular, do not call `Create`, `Connect`, or `SetKernelStatus`. Calling `SetStatus`
to change membrane potential `V_m` of neurons or synaptic weights (but not delays!)
will in most cases work as expected, while changing membrane or synaptic times
constants will not work correctly. If in doubt, assume that changes may cause
undefined behavior and check these thoroughly.
See Also
--------
Prepare, Run, Cleanup, Simulate
"""
Prepare()
try:
yield
finally:
Cleanup()
@check_stack
def ResetKernel():
"""Reset the simulation kernel.
This will destroy the network as well as all custom models created with
:py:func:`.CopyModel`. Calling this function is equivalent to restarting NEST.
In particular,
* all network nodes
* all connections
* all user-defined neuron and synapse models
are deleted, and
* time
* random generators
are reset. The only exception is that dynamically loaded modules are not
unloaded. This may change in a future version of NEST.
"""
sr('ResetKernel')
@check_stack
def SetKernelStatus(params):
"""Set parameters for the simulation kernel.
See the documentation of :ref:`sec:kernel_attributes` for a valid
list of params.
Parameters
----------
params : dict
Dictionary of parameters to set.
See Also
--------
GetKernelStatus
"""
# We need the nest module to be fully initialized in order to access the
# _kernel_attr_names and _readonly_kernel_attrs. As hl_api_simulation is
# imported via hl_api during initialization, we can't put the import on
# the module level, but have to have it on the function level.
import nest # noqa
raise_errors = params.get('dict_miss_is_error', nest.dict_miss_is_error)
valids = nest._kernel_attr_names
readonly = nest._readonly_kernel_attrs
keys = list(params.keys())
for key in keys:
msg = None
if key not in valids:
msg = f'`{key}` is not a valid kernel parameter, ' + \
'valid parameters are: ' + \
', '.join(f"'{p}'" for p in sorted(valids))
elif key in readonly:
msg = f'`{key}` is a readonly kernel parameter'
if msg is not None:
if raise_errors:
raise ValueError(msg)
else:
warnings.warn(msg + f' \n`{key}` has been ignored')
del params[key]
sps(params)
sr('SetKernelStatus')
@check_stack
def GetKernelStatus(keys=None):
"""Obtain parameters of the simulation kernel.
Parameters
----------
keys : str or list, optional
Single parameter name or `list` of parameter names
Returns
-------
dict:
Parameter dictionary, if called without argument
type:
Single parameter value, if called with single parameter name
list:
List of parameter values, if called with list of parameter names
Raises
------
TypeError
If `keys` are of the wrong type.
Notes
-----
See SetKernelStatus for documentation on each parameter key.
See Also
--------
SetKernelStatus
"""
sr('GetKernelStatus')
status_root = spp()
if keys is None:
return status_root
elif is_literal(keys):
return status_root[keys]
elif is_iterable(keys):
return tuple(status_root[k] for k in keys)
else:
raise TypeError("keys should be either a string or an iterable")
@check_stack
def Install(module_name):
"""Load a dynamically linked NEST module.
Parameters
----------
module_name : str
Name of the dynamically linked module
Returns
-------
handle
NEST module identifier, required for unloading
Notes
-----
Dynamically linked modules are searched in the NEST library
directory (``<prefix>/lib/nest``) and in ``LD_LIBRARY_PATH`` (on
Linux) or ``DYLD_LIBRARY_PATH`` (on OSX).
**Example**
::
nest.Install("mymodule")
"""
return sr("(%s) Install" % module_name)
@check_stack
def EnableStructuralPlasticity():
"""Enable structural plasticity for the network simulation
See Also
--------
DisableStructuralPlasticity
"""
sr('EnableStructuralPlasticity')
@check_stack
def DisableStructuralPlasticity():
"""Disable structural plasticity for the network simulation
See Also
--------
EnableStructuralPlasticity
"""
sr('DisableStructuralPlasticity')
|
jougs/nest-simulator
|
pynest/nest/lib/hl_api_simulation.py
|
Python
|
gpl-2.0
| 8,229
|
[
"NEURON"
] |
f5dc283597b35a3353f2d603c6be8d40777f6eaa35bbf6b3a9f5ca94b60e1ca4
|
"""
======================================
Plotting sensor layouts of MEG systems
======================================
In this example, sensor layouts of different MEG systems
are shown.
"""
# Author: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
from mayavi import mlab
import mne
from mne.io import read_raw_fif, read_raw_ctf, read_raw_bti, read_raw_kit
from mne.io import read_raw_artemis123
from mne.datasets import sample, spm_face, testing
from mne.viz import plot_trans
print(__doc__)
bti_path = op.abspath(op.dirname(mne.__file__)) + '/io/bti/tests/data/'
kit_path = op.abspath(op.dirname(mne.__file__)) + '/io/kit/tests/data/'
raws = dict(
Neuromag=read_raw_fif(sample.data_path() +
'/MEG/sample/sample_audvis_raw.fif'),
CTF_275=read_raw_ctf(spm_face.data_path() +
'/MEG/spm/SPM_CTF_MEG_example_faces1_3D.ds'),
Magnes_3600wh=read_raw_bti(op.join(bti_path, 'test_pdf_linux'),
op.join(bti_path, 'test_config_linux'),
op.join(bti_path, 'test_hs_linux')),
KIT=read_raw_kit(op.join(kit_path, 'test.sqd')),
Artemis123=read_raw_artemis123(op.join(testing.data_path(), 'ARTEMIS123',
'Artemis_Data_2016-11-03-15h-58m_test.bin'))
)
for system, raw in raws.items():
# We don't have coil definitions for KIT refs, so exclude them
ref_meg = False if system == 'KIT' else True
fig = plot_trans(raw.info, trans=None, dig=False, eeg_sensors=False,
meg_sensors=True, coord_frame='meg', ref_meg=ref_meg)
text = mlab.title(system)
text.x_position = 0.5
text.y_position = 0.95
text.property.vertical_justification = 'top'
text.property.justification = 'center'
text.actor.text_scale_mode = 'none'
text.property.bold = True
mlab.draw(fig)
|
nicproulx/mne-python
|
examples/visualization/plot_meg_sensors.py
|
Python
|
bsd-3-clause
| 1,910
|
[
"Mayavi"
] |
808599606bed6f79c0320c5f7bcfa50796de28b97b2c01a7131ceb50160481cd
|
import sys
from redux.assignmentdeclare import AssignmentScopeAnalyzer
from redux.ast import BitfieldDefinition
from redux.callinliner import CallInliner
from redux.enuminliner import EnumInliner
from redux.intrinsics import get_intrinsic_functions
from redux.parser import parse
from redux.stringinliner import StringInliner
from redux.typeannotate import TypeAnnotator
from redux.types import str_, float_, int_, object_, is_numeric
from redux.visitor import ASTVisitor
from redux.requireinliner import RequireInliner
class CodeGenerator(ASTVisitor):
"""Generates code from AST."""
def __init__(self):
super(CodeGenerator, self).__init__()
self.intrinsics = dict(get_intrinsic_functions())
self.code = ""
def emit(self, new_code):
self.code += new_code
def push_scope(self):
self.emit("{\n")
def pop_scope(self):
self.emit("}\n")
def type_name(self, type_):
if isinstance(type_, BitfieldDefinition):
type_ = int_
return {int_: "int", float_: "float", object_: "object"}[type_]
def visit_Constant(self, constant):
if is_numeric(constant.type):
self.emit(repr(constant.value))
elif constant.type is str_:
self.emit('"%s"' % constant.value.encode("unicode_escape").decode("utf8").replace('"', '\"'))
else:
assert False, "constant of unknown type %r" % constant.type
def visit_FunctionCall(self, func_call):
if isinstance(func_call.type, BitfieldDefinition):
self.visit(func_call.arguments[0])
else:
self.intrinsics[func_call.function].codegen(self, func_call.arguments)
def visit_VarRef(self, var_ref):
self.emit(var_ref.name)
def emit_binary_op(self, binop, op):
self.emit("(")
self.visit(binop.lhs)
self.emit(op)
self.visit(binop.rhs)
self.emit(")")
def emit_unary_op(self, unop, op):
self.emit("(")
self.emit(op)
self.visit(unop.expression)
self.emit(")")
def visit_AddOp(self, binop):
self.emit_binary_op(binop, "+")
def visit_SubOp(self, binop):
self.emit_binary_op(binop, "-")
def visit_MulOp(self, binop):
self.emit_binary_op(binop, "*")
def visit_DivOp(self, binop):
self.emit_binary_op(binop, "/")
def visit_LessThanOp(self, binop):
self.emit_binary_op(binop, "<")
def visit_GreaterThanOp(self, binop):
self.emit_binary_op(binop, ">")
def visit_LessThanOrEqualToOp(self, binop):
self.emit_binary_op(binop, "<=")
def visit_GreaterThanOrEqualToOp(self, binop):
self.emit_binary_op(binop, ">=")
def visit_EqualToOp(self, binop):
self.emit_binary_op(binop, "==")
def visit_NotEqualToOp(self, binop):
self.emit_binary_op(binop, "!=")
def visit_LogicalAndOp(self, binop):
self.emit_binary_op(binop, "&&")
def visit_LogicalOrOp(self, binop):
self.emit_binary_op(binop, "||")
def visit_LogicalNotOp(self, a):
self.emit_unary_op(a, "!")
def visit_BitwiseOrOp(self, binop):
self.emit_binary_op(binop, "|")
def visit_BitwiseXorOp(self, binop):
self.emit_binary_op(binop, "^")
def visit_BitwiseAndOp(self, binop):
self.emit_binary_op(binop, "&")
def visit_BitwiseLeftShiftOp(self, binop):
self.emit_binary_op(binop, "<<")
def visit_BitwiseRightShiftOp(self, binop):
self.emit_binary_op(binop, ">>")
def visit_PowerOp(self, binop):
self.emit_binary_op(binop, "**")
def visit_ModuloOp(self, binop):
self.emit_binary_op(binop, "%")
def visit_NegateOp(self, unop):
self.emit_unary_op(unop, "-")
def visit_BitwiseNotOp(self, unop):
self.emit_unary_op(unop, "~")
def visit_BitfieldAssignment(self, assignment):
self.visit(assignment.variable)
self.emit(" = ")
self.visit(assignment.expression)
def visit_Assignment(self, assignment):
if assignment.declare is True:
self.emit("%s " % self.type_name(assignment.expression.type))
self.visit(assignment.variable)
self.emit(" = ")
self.visit(assignment.expression)
def visit_DottedAccess(self, dotted_access):
if dotted_access.expression.type == object_:
self.emit("(")
self.visit(dotted_access.expression)
self.emit(".%s)" % dotted_access.member)
else:
self.visit(dotted_access.expression)
self.emit("[%d, %d]" % dotted_access.expression.type.get_member_limits(dotted_access.member))
def visit_Block(self, block):
self.push_scope()
for statement in block.statements:
old_length = len(self.code)
self.visit(statement)
if old_length < len(self.code) and self.code[-1] != ";" and self.code[-2:] != "}\n":
self.emit(";\n")
self.pop_scope()
def visit_IfStmt(self, if_stmt):
self.emit("if(")
self.visit(if_stmt.condition)
self.emit(")")
self.visit(if_stmt.then_block)
if if_stmt.else_part is not None:
self.emit("else ")
self.visit(if_stmt.else_part)
def visit_WhileStmt(self, while_stmt):
self.emit("while(")
self.visit(while_stmt.condition)
self.emit(")")
self.visit(while_stmt.block)
def visit_ForStmt(self, for_stmt):
self.emit("for(")
self.visit(for_stmt.assignment)
self.emit("; ")
if for_stmt.condition is not None:
self.visit(for_stmt.condition)
self.emit("; ")
if for_stmt.step_expr is not None:
self.visit(for_stmt.step_expr)
self.emit(")")
self.visit(for_stmt.block)
def visit_CodeLiteral(self, code_literal):
self.emit(code_literal.code)
def visit_BreakStmt(self, break_stmt):
self.emit("break")
def visit_ChronalAccess(self, chronal_access):
self.emit("(")
self.visit(chronal_access.object)
self.emit("->%s)" % chronal_access.member)
def visit_ClassAccess(self, class_access):
self.emit("(")
self.visit(class_access.class_)
self.emit("::%s)" % class_access.member)
def visit_Query(self, query):
self.emit("(")
self.emit("QUERY %s [" % query.query_type)
self.visit(query.unit)
self.emit("] %s [" % query.op)
self.visit(query.op_expr)
self.emit("] WHERE [")
self.visit(query.where_cond)
self.emit("])")
def compile_script(filename, code):
code_generator = CodeGenerator()
ast_, errors = parse(code)
for lineno, message in errors:
sys.stderr.write("%s:%d: %s\n" % (filename, lineno, message))
ast_ = RequireInliner().visit(ast_)
ast_ = AssignmentScopeAnalyzer().visit(ast_)
ast_ = TypeAnnotator().visit(ast_)
ast_ = CallInliner().visit(ast_)
ast_ = EnumInliner().visit(ast_)
ast_ = StringInliner().visit(ast_)
code_generator.visit(ast_)
return code_generator.code
|
Muon/redux
|
redux/codegenerator.py
|
Python
|
mit
| 7,154
|
[
"VisIt"
] |
77c016d4b93074e3029a9b0416942b64abb03ea04d02e556119b9a93aab0bd1a
|
"""
Acceptance tests for Studio related to the split_test module.
"""
import json
import math
from unittest import skip
from nose.plugins.attrib import attr
from selenium.webdriver.support.ui import Select
from xmodule.partitions.partitions import Group
from bok_choy.promise import Promise, EmptyPromise
from ...fixtures.course import XBlockFixtureDesc
from ...pages.studio.component_editor import ComponentEditorView
from ...pages.studio.overview import CourseOutlinePage, CourseOutlineUnit
from ...pages.studio.container import ContainerPage
from ...pages.studio.settings_group_configurations import GroupConfigurationsPage
from ...pages.studio.utils import add_advanced_component
from ...pages.xblock.utils import wait_for_xblock_initialization
from ...pages.lms.courseware import CoursewarePage
from ..helpers import create_user_partition_json
from base_studio_test import StudioCourseTest
from test_studio_container import ContainerBase
class SplitTestMixin(object):
"""
Mixin that contains useful methods for split_test module testing.
"""
def verify_groups(self, container, active_groups, inactive_groups, verify_missing_groups_not_present=True):
"""
Check that the groups appear and are correctly categorized as to active and inactive.
Also checks that the "add missing groups" button/link is not present unless a value of False is passed
for verify_missing_groups_not_present.
"""
def wait_for_xblocks_to_render():
# First xblock is the container for the page, subtract 1.
return (len(active_groups) + len(inactive_groups) == len(container.xblocks) - 1, len(active_groups))
Promise(wait_for_xblocks_to_render, "Number of xblocks on the page are incorrect").fulfill()
def check_xblock_names(expected_groups, actual_blocks):
self.assertEqual(len(expected_groups), len(actual_blocks))
for idx, expected in enumerate(expected_groups):
self.assertEqual(expected, actual_blocks[idx].name)
check_xblock_names(active_groups, container.active_xblocks)
check_xblock_names(inactive_groups, container.inactive_xblocks)
# Verify inactive xblocks appear after active xblocks
check_xblock_names(active_groups + inactive_groups, container.xblocks[1:])
if verify_missing_groups_not_present:
self.verify_add_missing_groups_button_not_present(container)
def verify_add_missing_groups_button_not_present(self, container):
"""
Checks that the "add missing groups" button/link is not present.
"""
def missing_groups_button_not_present():
button_present = container.missing_groups_button_present()
return (not button_present, not button_present)
Promise(missing_groups_button_not_present, "Add missing groups button should not be showing.").fulfill()
@attr('shard_1')
class SplitTest(ContainerBase, SplitTestMixin):
"""
Tests for creating and editing split test instances in Studio.
"""
__test__ = True
def setUp(self):
super(SplitTest, self).setUp()
# This line should be called once courseFixture is installed
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Configuration alpha,beta',
'first',
[Group("0", 'alpha'), Group("1", 'beta')]
),
create_user_partition_json(
1,
'Configuration 0,1,2',
'second',
[Group("0", 'Group 0'), Group("1", 'Group 1'), Group("2", 'Group 2')]
),
],
},
})
def populate_course_fixture(self, course_fixture):
course_fixture.add_advanced_settings(
{u"advanced_modules": {"value": ["split_test"]}}
)
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
)
)
def verify_add_missing_groups_button_not_present(self, container):
"""
Checks that the "add missing groups" button/link is not present.
"""
def missing_groups_button_not_present():
button_present = container.missing_groups_button_present()
return (not button_present, not button_present)
Promise(missing_groups_button_not_present, "Add missing groups button should not be showing.").fulfill()
def create_poorly_configured_split_instance(self):
"""
Creates a split test instance with a missing group and an inactive group.
Returns the container page.
"""
unit = self.go_to_unit_page()
add_advanced_component(unit, 0, 'split_test')
container = self.go_to_nested_container_page()
container.edit()
component_editor = ComponentEditorView(self.browser, container.locator)
component_editor.set_select_value_and_save('Group Configuration', 'Configuration alpha,beta')
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Configuration alpha,beta',
'first',
[Group("0", 'alpha'), Group("2", 'gamma')]
)
],
},
})
return self.go_to_nested_container_page()
def test_create_and_select_group_configuration(self):
"""
Tests creating a split test instance on the unit page, and then
assigning the group configuration.
"""
unit = self.go_to_unit_page()
add_advanced_component(unit, 0, 'split_test')
container = self.go_to_nested_container_page()
container.edit()
component_editor = ComponentEditorView(self.browser, container.locator)
component_editor.set_select_value_and_save('Group Configuration', 'Configuration alpha,beta')
self.verify_groups(container, ['alpha', 'beta'], [])
# Switch to the other group configuration. Must navigate again to the container page so
# that there is only a single "editor" on the page.
container = self.go_to_nested_container_page()
container.edit()
component_editor = ComponentEditorView(self.browser, container.locator)
component_editor.set_select_value_and_save('Group Configuration', 'Configuration 0,1,2')
self.verify_groups(container, ['Group 0', 'Group 1', 'Group 2'], ['Group ID 0', 'Group ID 1'])
# Reload the page to make sure the groups were persisted.
container = self.go_to_nested_container_page()
self.verify_groups(container, ['Group 0', 'Group 1', 'Group 2'], ['Group ID 0', 'Group ID 1'])
@skip("This fails periodically where it fails to trigger the add missing groups action.Dis")
def test_missing_group(self):
"""
The case of a split test with invalid configuration (missing group).
"""
container = self.create_poorly_configured_split_instance()
# Wait for the xblock to be fully initialized so that the add button is rendered
wait_for_xblock_initialization(self, '.xblock[data-block-type="split_test"]')
# Click the add button and verify that the groups were added on the page
container.add_missing_groups()
self.verify_groups(container, ['alpha', 'gamma'], ['beta'])
# Reload the page to make sure the groups were persisted.
container = self.go_to_nested_container_page()
self.verify_groups(container, ['alpha', 'gamma'], ['beta'])
def test_delete_inactive_group(self):
"""
Test deleting an inactive group.
"""
container = self.create_poorly_configured_split_instance()
# The inactive group is the 2nd group, but it is the first one
# with a visible delete button, so use index 0
container.delete(0)
self.verify_groups(container, ['alpha'], [], verify_missing_groups_not_present=False)
@attr('shard_1')
class GroupConfigurationsNoSplitTest(StudioCourseTest):
"""
Tests how the Group Configuration page should look when the split_test module is not enabled.
"""
def setUp(self):
super(GroupConfigurationsNoSplitTest, self).setUp()
self.group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def test_no_content_experiment_sections(self):
"""
Scenario: if split_test module is not present in Advanced Settings, content experiment
parts of the Group Configurations page are not shown.
Given I have a course with split_test module not enabled
Then when I go to the Group Configurations page there are no content experiment sections
"""
self.group_configurations_page.visit()
self.assertFalse(self.group_configurations_page.experiment_group_sections_present)
@attr('shard_1')
class GroupConfigurationsTest(ContainerBase, SplitTestMixin):
"""
Tests that Group Configurations page works correctly with previously
added configurations in Studio
"""
__test__ = True
def setUp(self):
super(GroupConfigurationsTest, self).setUp()
self.page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.outline_page = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def _assert_fields(self, config, cid=None, name='', description='', groups=None):
self.assertEqual(config.mode, 'details')
if name:
self.assertIn(name, config.name)
if cid:
self.assertEqual(cid, config.id)
else:
# To make sure that id is present on the page and it is not an empty.
# We do not check the value of the id, because it's generated randomly and we cannot
# predict this value
self.assertTrue(config.id)
# Expand the configuration
config.toggle()
if description:
self.assertIn(description, config.description)
if groups:
allocation = int(math.floor(100 / len(groups)))
self.assertEqual(groups, [group.name for group in config.groups])
for group in config.groups:
self.assertEqual(str(allocation) + "%", group.allocation)
# Collapse the configuration
config.toggle()
def _add_split_test_to_vertical(self, number, group_configuration_metadata=None):
"""
Add split test to vertical #`number`.
If `group_configuration_metadata` is not None, use it to assign group configuration to split test.
"""
vertical = self.course_fixture.get_nested_xblocks(category="vertical")[number]
if group_configuration_metadata:
split_test = XBlockFixtureDesc('split_test', 'Test Content Experiment', metadata=group_configuration_metadata)
else:
split_test = XBlockFixtureDesc('split_test', 'Test Content Experiment')
self.course_fixture.create_xblock(vertical.locator, split_test)
return split_test
def populate_course_fixture(self, course_fixture):
course_fixture.add_advanced_settings({
u"advanced_modules": {"value": ["split_test"]},
})
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
)
)
def create_group_configuration_experiment(self, groups, associate_experiment):
"""
Creates a Group Configuration containing a list of groups.
Optionally creates a Content Experiment and associates it with previous Group Configuration.
Returns group configuration or (group configuration, experiment xblock)
"""
# Create a new group configurations
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(0, "Name", "Description.", groups),
],
},
})
if associate_experiment:
# Assign newly created group configuration to experiment
vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
split_test = XBlockFixtureDesc('split_test', 'Test Content Experiment', metadata={'user_partition_id': 0})
self.course_fixture.create_xblock(vertical.locator, split_test)
# Go to the Group Configuration Page
self.page.visit()
config = self.page.experiment_group_configurations[0]
if associate_experiment:
return config, split_test
return config
def publish_unit_in_LMS_and_view(self, courseware_page):
"""
Given course outline page, publish first unit and view it in LMS
"""
self.outline_page.visit()
self.outline_page.expand_all_subsections()
section = self.outline_page.section_at(0)
unit = section.subsection_at(0).unit_at(0).go_to()
# I publish and view in LMS and it is rendered correctly
unit.publish_action.click()
unit.view_published_version()
self.assertEqual(len(self.browser.window_handles), 2)
courseware_page.wait_for_page()
def get_select_options(self, page, selector):
"""
Get list of options of dropdown that is specified by selector on a given page.
"""
select_element = page.q(css=selector)
self.assertTrue(select_element.is_present())
return [option.text for option in Select(select_element[0]).options]
def test_no_group_configurations_added(self):
"""
Scenario: Ensure that message telling me to create a new group configuration is
shown when group configurations were not added.
Given I have a course without group configurations
When I go to the Group Configuration page in Studio
Then I see "You have not created any group configurations yet." message
"""
self.page.visit()
self.assertTrue(self.page.experiment_group_sections_present)
self.assertTrue(self.page.no_experiment_groups_message_is_present)
self.assertIn(
"You have not created any group configurations yet.",
self.page.no_experiment_groups_message_text
)
def test_group_configurations_have_correct_data(self):
"""
Scenario: Ensure that the group configuration is rendered correctly in expanded/collapsed mode.
Given I have a course with 2 group configurations
And I go to the Group Configuration page in Studio
And I work with the first group configuration
And I see `name`, `id` are visible and have correct values
When I expand the first group configuration
Then I see `description` and `groups` appear and also have correct values
And I do the same checks for the second group configuration
"""
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Name of the Group Configuration',
'Description of the group configuration.',
[Group("0", 'Group 0'), Group("1", 'Group 1')]
),
create_user_partition_json(
1,
'Name of second Group Configuration',
'Second group configuration.',
[Group("0", 'Alpha'), Group("1", 'Beta'), Group("2", 'Gamma')]
),
],
},
})
self.page.visit()
config = self.page.experiment_group_configurations[0]
# no groups when the the configuration is collapsed
self.assertEqual(len(config.groups), 0)
self._assert_fields(
config,
cid="0", name="Name of the Group Configuration",
description="Description of the group configuration.",
groups=["Group 0", "Group 1"]
)
config = self.page.experiment_group_configurations[1]
self._assert_fields(
config,
name="Name of second Group Configuration",
description="Second group configuration.",
groups=["Alpha", "Beta", "Gamma"]
)
def test_can_create_and_edit_group_configuration(self):
"""
Scenario: Ensure that the group configuration can be created and edited correctly.
Given I have a course without group configurations
When I click button 'Create new Group Configuration'
And I set new name and description, change name for the 2nd default group, add one new group
And I click button 'Create'
Then I see the new group configuration is added and has correct data
When I edit the group group_configuration
And I change the name and description, add new group, remove old one and change name for the Group A
And I click button 'Save'
Then I see the group configuration is saved successfully and has the new data
"""
self.page.visit()
self.assertEqual(len(self.page.experiment_group_configurations), 0)
# Create new group configuration
self.page.create_experiment_group_configuration()
config = self.page.experiment_group_configurations[0]
config.name = "New Group Configuration Name"
config.description = "New Description of the group configuration."
config.groups[1].name = "New Group Name"
# Add new group
config.add_group() # Group C
# Save the configuration
self.assertEqual(config.get_text('.action-primary'), "Create")
self.assertFalse(config.delete_button_is_present)
config.save()
self._assert_fields(
config,
name="New Group Configuration Name",
description="New Description of the group configuration.",
groups=["Group A", "New Group Name", "Group C"]
)
# Edit the group configuration
config.edit()
# Update fields
self.assertTrue(config.id)
config.name = "Second Group Configuration Name"
config.description = "Second Description of the group configuration."
self.assertEqual(config.get_text('.action-primary'), "Save")
# Add new group
config.add_group() # Group D
# Remove group with name "New Group Name"
config.groups[1].remove()
# Rename Group A
config.groups[0].name = "First Group"
# Save the configuration
config.save()
self._assert_fields(
config,
name="Second Group Configuration Name",
description="Second Description of the group configuration.",
groups=["First Group", "Group C", "Group D"]
)
def test_use_group_configuration(self):
"""
Scenario: Ensure that the group configuration can be used by split_module correctly
Given I have a course without group configurations
When I create new group configuration
And I set new name and add a new group, save the group configuration
And I go to the unit page in Studio
And I add new advanced module "Content Experiment"
When I assign created group configuration to the module
Then I see the module has correct groups
"""
self.page.visit()
# Create new group configuration
self.page.create_experiment_group_configuration()
config = self.page.experiment_group_configurations[0]
config.name = "New Group Configuration Name"
# Add new group
config.add_group()
config.groups[2].name = "New group"
# Save the configuration
config.save()
split_test = self._add_split_test_to_vertical(number=0)
container = ContainerPage(self.browser, split_test.locator)
container.visit()
container.edit()
component_editor = ComponentEditorView(self.browser, container.locator)
component_editor.set_select_value_and_save('Group Configuration', 'New Group Configuration Name')
self.verify_groups(container, ['Group A', 'Group B', 'New group'], [])
def test_container_page_active_verticals_names_are_synced(self):
"""
Scenario: Ensure that the Content Experiment display synced vertical names and correct groups.
Given I have a course with group configuration
And I go to the Group Configuration page in Studio
And I edit the name of the group configuration, add new group and remove old one
And I change the name for the group "New group" to "Second Group"
And I go to the Container page in Studio
And I edit the Content Experiment
Then I see the group configuration name is changed in `Group Configuration` dropdown
And the group configuration name is changed on container page
And I see the module has 2 active groups and one inactive
And I see "Add missing groups" link exists
When I click on "Add missing groups" link
The I see the module has 3 active groups and one inactive
"""
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Name of the Group Configuration',
'Description of the group configuration.',
[Group("0", 'Group A'), Group("1", 'Group B'), Group("2", 'Group C')]
),
],
},
})
# Add split test to vertical and assign newly created group configuration to it
split_test = self._add_split_test_to_vertical(number=0, group_configuration_metadata={'user_partition_id': 0})
self.page.visit()
config = self.page.experiment_group_configurations[0]
config.edit()
config.name = "Second Group Configuration Name"
# `Group C` -> `Second Group`
config.groups[2].name = "Second Group"
# Add new group
config.add_group() # Group D
# Remove Group A
config.groups[0].remove()
# Save the configuration
config.save()
container = ContainerPage(self.browser, split_test.locator)
container.visit()
container.edit()
component_editor = ComponentEditorView(self.browser, container.locator)
self.assertEqual(
"Second Group Configuration Name",
component_editor.get_selected_option_text('Group Configuration')
)
component_editor.cancel()
self.assertIn(
"Second Group Configuration Name",
container.get_xblock_information_message()
)
self.verify_groups(
container, ['Group B', 'Second Group'], ['Group ID 0'],
verify_missing_groups_not_present=False
)
# Click the add button and verify that the groups were added on the page
container.add_missing_groups()
self.verify_groups(container, ['Group B', 'Second Group', 'Group D'], ['Group ID 0'])
def test_can_cancel_creation_of_group_configuration(self):
"""
Scenario: Ensure that creation of the group configuration can be canceled correctly.
Given I have a course without group configurations
When I click button 'Create new Group Configuration'
And I set new name and description, add 1 additional group
And I click button 'Cancel'
Then I see that there is no new group configurations in the course
"""
self.page.visit()
self.assertEqual(len(self.page.experiment_group_configurations), 0)
# Create new group configuration
self.page.create_experiment_group_configuration()
config = self.page.experiment_group_configurations[0]
config.name = "Name of the Group Configuration"
config.description = "Description of the group configuration."
# Add new group
config.add_group() # Group C
# Cancel the configuration
config.cancel()
self.assertEqual(len(self.page.experiment_group_configurations), 0)
def test_can_cancel_editing_of_group_configuration(self):
"""
Scenario: Ensure that editing of the group configuration can be canceled correctly.
Given I have a course with group configuration
When I go to the edit mode of the group configuration
And I set new name and description, add 2 additional groups
And I click button 'Cancel'
Then I see that new changes were discarded
"""
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Name of the Group Configuration',
'Description of the group configuration.',
[Group("0", 'Group 0'), Group("1", 'Group 1')]
),
create_user_partition_json(
1,
'Name of second Group Configuration',
'Second group configuration.',
[Group("0", 'Alpha'), Group("1", 'Beta'), Group("2", 'Gamma')]
),
],
},
})
self.page.visit()
config = self.page.experiment_group_configurations[0]
config.name = "New Group Configuration Name"
config.description = "New Description of the group configuration."
# Add 2 new groups
config.add_group() # Group C
config.add_group() # Group D
# Cancel the configuration
config.cancel()
self._assert_fields(
config,
name="Name of the Group Configuration",
description="Description of the group configuration.",
groups=["Group 0", "Group 1"]
)
def test_group_configuration_validation(self):
"""
Scenario: Ensure that validation of the group configuration works correctly.
Given I have a course without group configurations
And I create new group configuration with 2 default groups
When I set only description and try to save
Then I see error message "Group Configuration name is required."
When I set a name
And I delete the name of one of the groups and try to save
Then I see error message "All groups must have a name"
When I delete all the groups and try to save
Then I see error message "There must be at least one group."
When I add a group and try to save
Then I see the group configuration is saved successfully
"""
def try_to_save_and_verify_error_message(message):
# Try to save
config.save()
# Verify that configuration is still in editing mode
self.assertEqual(config.mode, 'edit')
# Verify error message
self.assertEqual(message, config.validation_message)
self.page.visit()
# Create new group configuration
self.page.create_experiment_group_configuration()
# Leave empty required field
config = self.page.experiment_group_configurations[0]
config.description = "Description of the group configuration."
try_to_save_and_verify_error_message("Group Configuration name is required.")
# Set required field
config.name = "Name of the Group Configuration"
config.groups[1].name = ''
try_to_save_and_verify_error_message("All groups must have a name.")
config.groups[0].remove()
config.groups[0].remove()
try_to_save_and_verify_error_message("There must be at least one group.")
config.add_group()
# Save the configuration
config.save()
self._assert_fields(
config,
name="Name of the Group Configuration",
description="Description of the group configuration.",
groups=["Group A"]
)
def test_group_configuration_empty_usage(self):
"""
Scenario: When group configuration is not used, ensure that the link to outline page works correctly.
Given I have a course without group configurations
And I create new group configuration with 2 default groups
Then I see a link to the outline page
When I click on the outline link
Then I see the outline page
"""
# Create a new group configurations
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
"Name",
"Description.",
[Group("0", "Group A"), Group("1", "Group B")]
),
],
},
})
# Go to the Group Configuration Page and click on outline anchor
self.page.visit()
config = self.page.experiment_group_configurations[0]
config.toggle()
config.click_outline_anchor()
# Waiting for the page load and verify that we've landed on course outline page
EmptyPromise(
lambda: self.outline_page.is_browser_on_page(), "loaded page {!r}".format(self.outline_page),
timeout=30
).fulfill()
def test_group_configuration_non_empty_usage(self):
"""
Scenario: When group configuration is used, ensure that the links to units using a group configuration work correctly.
Given I have a course without group configurations
And I create new group configuration with 2 default groups
And I create a unit and assign the newly created group configuration
And open the Group Configuration page
Then I see a link to the newly created unit
When I click on the unit link
Then I see correct unit page
"""
# Create a new group configurations
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
"Name",
"Description.",
[Group("0", "Group A"), Group("1", "Group B")]
),
],
},
})
# Assign newly created group configuration to unit
vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
self.course_fixture.create_xblock(
vertical.locator,
XBlockFixtureDesc('split_test', 'Test Content Experiment', metadata={'user_partition_id': 0})
)
unit = CourseOutlineUnit(self.browser, vertical.locator)
# Go to the Group Configuration Page and click unit anchor
self.page.visit()
config = self.page.experiment_group_configurations[0]
config.toggle()
usage = config.usages[0]
config.click_unit_anchor()
unit = ContainerPage(self.browser, vertical.locator)
# Waiting for the page load and verify that we've landed on the unit page
EmptyPromise(
lambda: unit.is_browser_on_page(), "loaded page {!r}".format(unit),
timeout=30
).fulfill()
self.assertIn(unit.name, usage)
def test_can_delete_unused_group_configuration(self):
"""
Scenario: Ensure that the user can delete unused group configuration.
Given I have a course with 2 group configurations
And I go to the Group Configuration page
When I delete the Group Configuration with name "Configuration 1"
Then I see that there is one Group Configuration
When I edit the Group Configuration with name "Configuration 2"
And I delete the Group Configuration with name "Configuration 2"
Then I see that the are no Group Configurations
"""
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Configuration 1',
'Description of the group configuration.',
[Group("0", 'Group 0'), Group("1", 'Group 1')]
),
create_user_partition_json(
1,
'Configuration 2',
'Second group configuration.',
[Group("0", 'Alpha'), Group("1", 'Beta'), Group("2", 'Gamma')]
)
],
},
})
self.page.visit()
self.assertEqual(len(self.page.experiment_group_configurations), 2)
config = self.page.experiment_group_configurations[1]
# Delete first group configuration via detail view
config.delete()
self.assertEqual(len(self.page.experiment_group_configurations), 1)
config = self.page.experiment_group_configurations[0]
config.edit()
self.assertFalse(config.delete_button_is_disabled)
# Delete first group configuration via edit view
config.delete()
self.assertEqual(len(self.page.experiment_group_configurations), 0)
def test_cannot_delete_used_group_configuration(self):
"""
Scenario: Ensure that the user cannot delete unused group configuration.
Given I have a course with group configuration that is used in the Content Experiment
When I go to the Group Configuration page
Then I do not see delete button and I see a note about that
When I edit the Group Configuration
Then I do not see delete button and I see the note about that
"""
# Create a new group configurations
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
"Name",
"Description.",
[Group("0", "Group A"), Group("1", "Group B")]
)
],
},
})
vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
self.course_fixture.create_xblock(
vertical.locator,
XBlockFixtureDesc('split_test', 'Test Content Experiment', metadata={'user_partition_id': 0})
)
# Go to the Group Configuration Page and click unit anchor
self.page.visit()
config = self.page.experiment_group_configurations[0]
self.assertTrue(config.delete_button_is_disabled)
self.assertIn('Cannot delete when in use by an experiment', config.delete_note)
config.edit()
self.assertTrue(config.delete_button_is_disabled)
self.assertIn('Cannot delete when in use by an experiment', config.delete_note)
def test_easy_access_from_experiment(self):
"""
Scenario: When a Content Experiment uses a Group Configuration,
ensure that the link to that Group Configuration works correctly.
Given I have a course with two Group Configurations
And Content Experiment is assigned to one Group Configuration
Then I see a link to Group Configuration
When I click on the Group Configuration link
Then I see the Group Configurations page
And I see that appropriate Group Configuration is expanded.
"""
# Create a new group configurations
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
"Name",
"Description.",
[Group("0", "Group A"), Group("1", "Group B")]
),
create_user_partition_json(
1,
'Name of second Group Configuration',
'Second group configuration.',
[Group("0", 'Alpha'), Group("1", 'Beta'), Group("2", 'Gamma')]
),
],
},
})
# Assign newly created group configuration to unit
vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
self.course_fixture.create_xblock(
vertical.locator,
XBlockFixtureDesc('split_test', 'Test Content Experiment', metadata={'user_partition_id': 1})
)
unit = ContainerPage(self.browser, vertical.locator)
unit.visit()
experiment = unit.xblocks[0]
group_configuration_link_name = experiment.group_configuration_link_name
experiment.go_to_group_configuration_page()
self.page.wait_for_page()
# Appropriate Group Configuration is expanded.
self.assertFalse(self.page.experiment_group_configurations[0].is_expanded)
self.assertTrue(self.page.experiment_group_configurations[1].is_expanded)
self.assertEqual(
group_configuration_link_name,
self.page.experiment_group_configurations[1].name
)
def test_details_error_validation_message(self):
"""
Scenario: When a Content Experiment uses a Group Configuration, ensure
that an error validation message appears if necessary.
Given I have a course with a Group Configuration containing two Groups
And a Content Experiment is assigned to that Group Configuration
When I go to the Group Configuration Page
Then I do not see a error icon and message in the Group Configuration details view.
When I add a Group
Then I see an error icon and message in the Group Configuration details view
"""
# Create group configuration and associated experiment
config, _ = self.create_group_configuration_experiment([Group("0", "Group A"), Group("1", "Group B")], True)
# Display details view
config.toggle()
# Check that error icon and message are not present
self.assertFalse(config.details_error_icon_is_present)
self.assertFalse(config.details_message_is_present)
# Add a group
config.toggle()
config.edit()
config.add_group()
config.save()
# Display details view
config.toggle()
# Check that error icon and message are present
self.assertTrue(config.details_error_icon_is_present)
self.assertTrue(config.details_message_is_present)
self.assertIn(
"This content experiment has issues that affect content visibility.",
config.details_message_text
)
def test_details_warning_validation_message(self):
"""
Scenario: When a Content Experiment uses a Group Configuration, ensure
that a warning validation message appears if necessary.
Given I have a course with a Group Configuration containing three Groups
And a Content Experiment is assigned to that Group Configuration
When I go to the Group Configuration Page
Then I do not see a warning icon and message in the Group Configuration details view.
When I remove a Group
Then I see a warning icon and message in the Group Configuration details view
"""
# Create group configuration and associated experiment
config, _ = self.create_group_configuration_experiment([Group("0", "Group A"), Group("1", "Group B"), Group("2", "Group C")], True)
# Display details view
config.toggle()
# Check that warning icon and message are not present
self.assertFalse(config.details_warning_icon_is_present)
self.assertFalse(config.details_message_is_present)
# Remove a group
config.toggle()
config.edit()
config.groups[2].remove()
config.save()
# Display details view
config.toggle()
# Check that warning icon and message are present
self.assertTrue(config.details_warning_icon_is_present)
self.assertTrue(config.details_message_is_present)
self.assertIn(
"This content experiment has issues that affect content visibility.",
config.details_message_text
)
def test_edit_warning_message_empty_usage(self):
"""
Scenario: When a Group Configuration is not used, ensure that there are no warning icon and message.
Given I have a course with a Group Configuration containing two Groups
When I edit the Group Configuration
Then I do not see a warning icon and message
"""
# Create a group configuration with no associated experiment and display edit view
config = self.create_group_configuration_experiment([Group("0", "Group A"), Group("1", "Group B")], False)
config.edit()
# Check that warning icon and message are not present
self.assertFalse(config.edit_warning_icon_is_present)
self.assertFalse(config.edit_warning_message_is_present)
def test_edit_warning_message_non_empty_usage(self):
"""
Scenario: When a Group Configuration is used, ensure that there are a warning icon and message.
Given I have a course with a Group Configuration containing two Groups
When I edit the Group Configuration
Then I see a warning icon and message
"""
# Create a group configuration with an associated experiment and display edit view
config, _ = self.create_group_configuration_experiment([Group("0", "Group A"), Group("1", "Group B")], True)
config.edit()
# Check that warning icon and message are present
self.assertTrue(config.edit_warning_icon_is_present)
self.assertTrue(config.edit_warning_message_is_present)
self.assertIn(
"This configuration is currently used in content experiments. If you make changes to the groups, you may need to edit those experiments.",
config.edit_warning_message_text
)
def publish_unit_and_verify_groups_in_LMS(self, courseware_page, group_names):
"""
Publish first unit in LMS and verify that Courseware page has given Groups
"""
self.publish_unit_in_LMS_and_view(courseware_page)
self.assertEqual(u'split_test', courseware_page.xblock_component_type())
self.assertTrue(courseware_page.q(css=".split-test-select").is_present())
rendered_group_names = self.get_select_options(page=courseware_page, selector=".split-test-select")
self.assertListEqual(group_names, rendered_group_names)
@skip # TODO fix this, see TNL-2035
def test_split_test_LMS_staff_view(self):
"""
Scenario: Ensure that split test is correctly rendered in LMS staff mode as it is
and after inactive group removal.
Given I have a course with group configurations and split test that assigned to first group configuration
Then I publish split test and view it in LMS in staff view
And it is rendered correctly
Then I go to group configuration and delete group
Then I publish split test and view it in LMS in staff view
And it is rendered correctly
Then I go to split test and delete inactive vertical
Then I publish unit and view unit in LMS in staff view
And it is rendered correctly
"""
config, split_test = self.create_group_configuration_experiment([Group("0", "Group A"), Group("1", "Group B"), Group("2", "Group C")], True)
container = ContainerPage(self.browser, split_test.locator)
# render in LMS correctly
courseware_page = CoursewarePage(self.browser, self.course_id)
self.publish_unit_and_verify_groups_in_LMS(courseware_page, [u'Group A', u'Group B', u'Group C'])
# I go to group configuration and delete group
self.page.visit()
self.page.q(css='.group-toggle').first.click()
config.edit()
config.groups[2].remove()
config.save()
self.page.q(css='.group-toggle').first.click()
self._assert_fields(config, name="Name", description="Description", groups=["Group A", "Group B"])
self.browser.close()
self.browser.switch_to_window(self.browser.window_handles[0])
# render in LMS to see how inactive vertical is rendered
self.publish_unit_and_verify_groups_in_LMS(courseware_page, [u'Group A', u'Group B', u'Group ID 2 (inactive)'])
self.browser.close()
self.browser.switch_to_window(self.browser.window_handles[0])
# I go to split test and delete inactive vertical
container.visit()
container.delete(0)
# render in LMS again
self.publish_unit_and_verify_groups_in_LMS(courseware_page, [u'Group A', u'Group B'])
|
dkarakats/edx-platform
|
common/test/acceptance/tests/studio/test_studio_split_test.py
|
Python
|
agpl-3.0
| 46,739
|
[
"VisIt"
] |
111c101c4d17365eb727fe88952ded36923e39b5037794ca27034a3331dec539
|
from argparse import *
from sys import *
from struct import *
from os.path import *
from paraview.simple import *
from paraview.servermanager import *
parser = ArgumentParser(description = 'Process the arguments')
parser.add_argument('vtkFilePrefix', help = 'Path to the VTK file before the frame index')
parser.add_argument('vtkFileSufix', help = 'Path to the VTK file after the frame index')
parser.add_argument('outputDir', help = 'Path to the output directory (must be writable)')
parser.add_argument('startFrame', help = 'Start frame number of the sequence')
parser.add_argument('endFrame', help = 'End frame number of the sequence')
args = parser.parse_args()
start = -1
end = -1
try :
start = int(args.startFrame)
except ValueError :
print >> stderr, 'ERROR: startFrame is not a number. Aborting'
stderr.flush()
exit(-1)
# endtry
try :
end = int(args.endFrame)
except ValueError :
print >> stderr, 'ERROR: endFrame is not a number. Aborting'
stderr.flush()
exit(-2)
# endtry
frames = []
numFrames = end - start + 1
for i in range(numFrames) :
index = str(start + i).zfill(5)
frames.append(args.vtkFilePrefix + index + args.vtkFileSufix)
# endfor
if frames != [] :
vtkReader = OpenDataFile(frames)
vktInstance = Fetch(vtkReader)
frameBounds = vktInstance.GetBounds()
frameExtent = vktInstance.GetExtent()
res = []
res.append(frameExtent[1] - frameExtent[0] + 1)
res.append(frameExtent[3] - frameExtent[2] + 1)
res.append(frameExtent[5] - frameExtent[4] + 1)
# Header: XRES, YRES, ZRES, NUMFRAMES
outputFilePrefix = basename(args.vtkFilePrefix)
f = open(args.outputDir + '/' + outputFilePrefix + 'header.bin', 'wb')
f.write(pack('i'*len(res), *res) )
f.write(pack('i', numFrames) )
f.flush()
f.close()
f = open(args.outputDir + '/' + outputFilePrefix + 'header.txt', 'w')
f.write('XSIZE: ' + str(frameBounds[1] - frameBounds[0]) + '\n')
f.write('YSIZE: ' + str(frameBounds[3] - frameBounds[2]) + '\n')
f.write('ZSIZE: ' + str(frameBounds[5] - frameBounds[4]) + '\n')
f.write('XRES: ' + str(res[0]) + '\n')
f.write('YRES: ' + str(res[1]) + '\n')
f.write('ZRES: ' + str(res[2]) + '\n')
f.write('NUMFRAMES: ' + str(numFrames) + '\n')
f.flush()
f.close()
programmableFilter1 = ProgrammableFilter(Input=vtkReader)
programmableFilter1.OutputDataSetType = 'vtkImageData'
programmableFilter1.Script = '\
import math\n\
executive = self.GetExecutive()\n\
inputImageData = self.GetInput()\n\
inputPointData = inputImageData.GetPointData().GetArray(0)\n\
outInfo = executive.GetOutputInformation(0)\n\
updateExtent = [executive.UPDATE_EXTENT().Get(outInfo, i) for i in range(6)]\n\
imageData = self.GetOutput()\n\
imageData.SetExtent(updateExtent)\n\
imageData.AllocateScalars(vtk.VTK_FLOAT, 1)\n\
pointData = imageData.GetPointData().GetScalars()\n\
pointData.SetName("value")\n\
goodValue = 0.0\n\
dimensions = imageData.GetDimensions()\n\
for i in range(dimensions[0]) :\n\
for j in range(dimensions[1]) :\n\
for k in range(dimensions[2]) :\n\
pointId = vtk.vtkStructuredData.ComputePointId(dimensions, (i, j, k) )\n\
goodValue = inputPointData.GetValue(pointId)\n\
if not math.isnan(goodValue) :\n\
break\n\
if not math.isnan(goodValue) :\n\
break\n\
if not math.isnan(goodValue) :\n\
break\n\
for i in range(dimensions[0]) :\n\
for j in range(dimensions[1]) :\n\
for k in range(dimensions[2]) :\n\
pointId = vtk.vtkStructuredData.ComputePointId(dimensions, (i, j, k) )\n\
value = inputPointData.GetValue(pointId)\n\
if math.isnan(value) :\n\
pointData.SetValue(pointId, goodValue)\n\
else :\n\
pointData.SetValue(pointId, value)'
programmableFilter1.RequestUpdateExtentScript = ''
programmableFilter1.CopyArrays = 0
programmableFilter1.PythonPath = ''
if 'TimestepValues' in dir(vtkReader) and vtkReader.TimestepValues.__str__() != 'None' and vtkReader.TimestepValues.__len__() != 0:
limits = [float('+inf'), float('-inf')]
for timeStep in vtkReader.TimestepValues :
programmableFilter1.UpdatePipeline(timeStep)
frameRange = Fetch(programmableFilter1).GetPointData().GetArray(0).GetRange()
if frameRange[0] < limits[0] :
limits[0] = frameRange[0]
# endif
if frameRange[1] > limits[1] :
limits[1] = frameRange[1]
# endif
limits.append(frameRange[0])
limits.append(frameRange[1])
# endfor
f = open(args.outputDir + '/' + outputFilePrefix + 'limits.bin', 'wb')
f.write(pack('f'*len(limits), *limits) )
f.flush()
f.close()
f = open(args.outputDir + '/' + outputFilePrefix + 'limits.txt', 'w')
f.write('GLOBAL MIN: ' + str(limits[0]) + '\n')
f.write('GLOBAL MAX: ' + str(limits[1]) + '\n')
for i in range(numFrames) :
f.write('FRAME ' + str(i) + ' MIN: ' + str(limits[2 * i + 2]) + '\n')
f.write('FRAME ' + str(i) + ' MAX: ' + str(limits[2 * i + 3]) + '\n')
# endfor
f.flush()
f.close()
else :
programmableFilter1.UpdatePipeline()
frameRange = Fetch(programmableFilter1).GetPointData().GetArray(0).GetRange()
f = open(args.outputDir + '/' + outputFilePrefix + 'limits.bin', 'wb')
f.write(pack('f'*len(frameRange), *frameRange) )
f.flush()
f.close()
f = open(args.outputDir + '/' + outputFilePrefix + 'limits.txt', 'w')
f.write("MIN: " + str(frameRange[0]) + "\n")
f.write("MAX: " + str(frameRange[1]) + "\n")
f.flush()
f.close()
# endif
# endif
|
fercook/SciViz
|
Voxels/NETCDF_2_VTK_2_Blender/job/bvox/paraview/VTKVoxel2BVOX_prolog.py
|
Python
|
gpl-2.0
| 5,497
|
[
"ParaView",
"VTK"
] |
2ad14b79f559c04d737931cfd555f8fd40511ea735c19e1f8d3be9deae3acae0
|
# -*- coding: utf-8 -*-
#
# test_refractory.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import unittest
import numpy as np
import nest
"""
Assert that all neuronal models that have a refractory period implement it
correctly (except for Hodgkin-Huxley models which cannot be tested).
Details
-------
Submit the neuron to a constant excitatory current so that it spikes in the
[0, 50] ms.
A ``spike_detector`` is used to detect the time at which the neuron spikes and
a ``voltmeter`` is then used to make sure the voltage is clamped to ``V_reset``
during exactly ``t_ref``.
For neurons that do not clamp the potential, use a very large current to
trigger immediate spiking
Untested models
---------------
* ``aeif_cond_alpha_RK5``
* ``ginzburg_neuron``
* ``hh_cond_exp_traub``
* ``hh_psc_alpha``
* ``hh_psc_alpha_gap``
* ``ht_neuron``
* ``iaf_chs_2007``
* ``iaf_chxk_2008``
* ``iaf_tum_2000``
* ``izhikevich``
* ``mcculloch_pitts_neuron``
* ``parrot_neuron``
* ``parrot_neuron_ps``
* ``pp_pop_psc_delta``
* ``pp_psc_delta``
* ``sli_neuron``
"""
# --------------------------------------------------------------------------- #
# Models, specific parameters
# -------------------------
#
# list of all neuronal models that can be tested by looking at clamped V
neurons_V_clamped = [
'aeif_cond_alpha',
'aeif_cond_alpha_multisynapse',
'aeif_cond_beta_multisynapse',
'aeif_cond_exp',
'aeif_psc_alpha',
'aeif_psc_exp',
'gif_cond_exp',
'gif_cond_exp_multisynapse',
'gif_psc_exp',
'gif_psc_exp_multisynapse',
'iaf_cond_alpha',
'iaf_cond_alpha_mc',
'iaf_cond_exp',
'iaf_cond_exp_sfa_rr',
'iaf_psc_alpha',
'iaf_psc_alpha_multisynapse',
'iaf_psc_delta',
'iaf_psc_exp',
'iaf_psc_exp_multisynapse',
]
# neurons that must be tested through a high current to spike immediately
# (t_ref = interspike)
neurons_interspike = [
"amat2_psc_exp",
"mat2_psc_exp",
"ht_neuron",
]
neurons_interspike_ps = [
"iaf_psc_alpha_canon",
"iaf_psc_alpha_presc",
"iaf_psc_delta_canon",
"iaf_psc_exp_ps",
]
# models that cannot be tested
ignore_model = [
"aeif_cond_alpha_RK5", # this one is faulty and will be removed
"ginzburg_neuron",
"hh_cond_exp_traub",
"hh_psc_alpha",
"hh_psc_alpha_gap",
"iaf_chs_2007",
"iaf_chxk_2008",
"iaf_tum_2000",
"izhikevich",
"mcculloch_pitts_neuron",
"parrot_neuron",
"parrot_neuron_ps",
"pp_pop_psc_delta",
"pp_psc_delta",
"sli_neuron",
]
tested_models = [m for m in nest.Models("nodes") if (nest.GetDefaults(
m, "element_type") == "neuron" and m not in ignore_model)]
# additional parameters for the connector
add_connect_param = {
"iaf_cond_alpha_mc": {"receptor_type": 7},
}
# --------------------------------------------------------------------------- #
# Simulation time and refractory time limits
# -------------------------
#
simtime = 100
resolution = 0.1
min_steps = 1 # minimal number of refractory steps (t_ref = resolution)
max_steps = 200 # maximal number of steps (t_ref = 200 * resolution)
# --------------------------------------------------------------------------- #
# Test class
# -------------------------
#
def foreach_neuron(func):
'''
Decorator that automatically does the test for all neurons.
'''
def wrapper(*args, **kwargs):
self = args[0]
msd = 123456
N_vp = nest.GetKernelStatus(['total_num_virtual_procs'])[0]
pyrngs = [np.random.RandomState(s) for s in range(msd, msd + N_vp)]
for name in tested_models:
nest.ResetKernel()
nest.SetKernelStatus({
'resolution': resolution, 'grng_seed': msd + N_vp,
'rng_seeds': range(msd + N_vp + 1, msd + 2 * N_vp + 1)})
func(self, name, **kwargs)
return wrapper
class RefractoryTestCase(unittest.TestCase):
"""
Check the correct implementation of refractory time in all neuronal models.
"""
def compute_reftime(self, model, sd, vm, neuron):
'''
Compute the refractory time of the neuron.
Parameters
----------
model : str
Name of the neuronal model.
sd : tuple
GID of the spike detector.
vm : tuple
GID of the voltmeter.
neuron : tuple
GID of the recorded neuron.
Returns
-------
t_ref_sim : double
Value of the simulated refractory period.
'''
spike_times = nest.GetStatus(sd, "events")[0]["times"]
if model in neurons_interspike:
# spike emitted at next timestep so substract resolution
return spike_times[1]-spike_times[0]-resolution
elif model in neurons_interspike_ps:
return spike_times[1]-spike_times[0]
else:
Vr = nest.GetStatus(neuron, "V_reset")[0]
times = nest.GetStatus(vm, "events")[0]["times"]
# index of the 2nd spike
idx_max = np.argwhere(times == spike_times[1])[0][0]
name_Vm = "V_m.s" if model == "iaf_cond_alpha_mc" else "V_m"
Vs = nest.GetStatus(vm, "events")[0][name_Vm]
# get the index at which the spike occured
idx_spike = np.argwhere(times == spike_times[0])[0][0]
# find end of refractory period between 1st and 2nd spike
idx_end = np.where(
np.isclose(Vs[idx_spike:idx_max], Vr, 1e-6))[0][-1]
t_ref_sim = idx_end * resolution
return t_ref_sim
@foreach_neuron
def test_refractory_time(self, model):
'''
Check that refractory time implementation is correct.
'''
# randomly set a refractory period
t_ref = resolution * np.random.randint(min_steps, max_steps)
# create the neuron and devices
nparams = {"t_ref": t_ref}
neuron = nest.Create(model, params=nparams)
name_Vm = "V_m.s" if model == "iaf_cond_alpha_mc" else "V_m"
vm_params = {"interval": resolution, "record_from": [name_Vm]}
vm = nest.Create("voltmeter", params=vm_params)
sd = nest.Create("spike_detector", params={'precise_times': True})
cg = nest.Create("dc_generator", params={"amplitude": 900.})
# for models that do not clamp V_m, use very large current to trigger
# almost immediate spiking => t_ref almost equals interspike
if model in neurons_interspike_ps:
nest.SetStatus(cg, "amplitude", 10000000.)
elif model in neurons_interspike:
nest.SetStatus(cg, "amplitude", 2000.)
# connect them and simulate
nest.Connect(vm, neuron)
nest.Connect(cg, neuron, syn_spec=add_connect_param.get(model, {}))
nest.Connect(neuron, sd)
nest.Simulate(simtime)
# get and compare t_ref
t_ref_sim = self.compute_reftime(model, sd, vm, neuron)
# approximate result for precise spikes (interpolation error)
if model in neurons_interspike_ps:
self.assertAlmostEqual(t_ref, t_ref_sim, places=3,
msg='''Error in model {}:
{} != {}'''.format(model, t_ref, t_ref_sim))
else:
self.assertAlmostEqual(t_ref, t_ref_sim, msg='''Error in model {}:
{} != {}'''.format(model, t_ref, t_ref_sim))
# --------------------------------------------------------------------------- #
# Run the comparisons
# ------------------------
#
def suite():
return unittest.makeSuite(RefractoryTestCase, "test")
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == '__main__':
run()
|
mschmidt87/nest-simulator
|
pynest/nest/tests/test_refractory.py
|
Python
|
gpl-2.0
| 8,419
|
[
"NEURON"
] |
b523d1efddd1cff25b1f29882dc90c9d94c08f01473bc5b5260f79cddaf599e3
|
# A simple HTTP server implemented using h11 and Trio:
# http://trio.readthedocs.io/en/latest/index.html
# (so requires python 3.5+).
#
# All requests get echoed back a JSON document containing information about
# the request.
#
# This is a rather involved example, since it attempts to both be
# fully-HTTP-compliant and also demonstrate error handling.
#
# The main difference between an HTTP client and an HTTP server is that in a
# client, if something goes wrong, you can just throw away that connection and
# make a new one. In a server, you're expected to handle all kinds of garbage
# input and internal errors and recover with grace and dignity. And that's
# what this code does.
#
# I recommend pushing on it to see how it works -- e.g. watch what happens if
# you visit http://localhost:8080 in a webbrowser that supports keep-alive,
# hit reload a few times, and then wait for the keep-alive to time out on the
# server.
#
# Or try using curl to start a chunked upload and then hit control-C in the
# middle of the upload:
#
# (for CHUNK in $(seq 10); do echo $CHUNK; sleep 1; done) \
# | curl -T - http://localhost:8080/foo
#
# (Note that curl will send Expect: 100-Continue, too.)
#
# Or, heck, try letting curl complete successfully ;-).
# Some potential improvements, if you wanted to try and extend this to a real
# general-purpose HTTP server (and to give you some hints about the many
# considerations that go into making a robust HTTP server):
#
# - The timeout handling is rather crude -- we impose a flat 10 second timeout
# on each request (starting from the end of the previous
# response). Something finer-grained would be better. Also, if a timeout is
# triggered we unconditionally send a 500 Internal Server Error; it would be
# better to keep track of whether the timeout is the client's fault, and if
# so send a 408 Request Timeout.
#
# - The error handling policy here is somewhat crude as well. It handles a lot
# of cases perfectly, but there are corner cases where the ideal behavior is
# more debateable. For example, if a client starts uploading a large
# request, uses 100-Continue, and we send an error response, then we'll shut
# down the connection immediately (for well-behaved clients) or after
# spending TIMEOUT seconds reading and discarding their upload (for
# ill-behaved ones that go on and try to upload their request anyway). And
# for clients that do this without 100-Continue, we'll send the error
# response and then shut them down after TIMEOUT seconds. This might or
# might not be your preferred policy, though -- maybe you want to shut such
# clients down immediately (even if this risks their not seeing the
# response), or maybe you're happy to let them continue sending all the data
# and wasting your bandwidth if this is what it takes to guarantee that they
# see your error response. Up to you, really.
#
# - Another example of a debateable choice: if a response handler errors out
# without having done *anything* -- hasn't started responding, hasn't read
# the request body -- then this connection actually is salvagable, if the
# server sends an error response + reads and discards the request body. This
# code sends the error response, but it doesn't try to salvage the
# connection by reading the request body, it just closes the
# connection. This is quite possibly the best option, but again this is a
# policy decision.
#
# - Our error pages always include the exception text. In real life you might
# want to log the exception but not send that information to the client.
#
# - Our error responses perhaps should include Connection: close when we know
# we're going to close this connection.
#
# - We don't support the HEAD method, but ought to.
#
# - We should probably do something cleverer with buffering responses and
# TCP_CORK and suchlike.
import json
from itertools import count
from wsgiref.handlers import format_date_time
import trio
import h11
MAX_RECV = 2 ** 16
TIMEOUT = 10
################################################################
# I/O adapter: h11 <-> trio
################################################################
# The core of this could be factored out to be usable for trio-based clients
# too, as well as servers. But as a simplified pedagogical example we don't
# attempt this here.
class TrioHTTPWrapper:
_next_id = count()
def __init__(self, stream):
self.stream = stream
self.conn = h11.Connection(h11.SERVER)
# Our Server: header
self.ident = " ".join(
["h11-example-trio-server/{}".format(h11.__version__), h11.PRODUCT_ID]
).encode("ascii")
# A unique id for this connection, to include in debugging output
# (useful for understanding what's going on if there are multiple
# simultaneous clients).
self._obj_id = next(TrioHTTPWrapper._next_id)
async def send(self, event):
# The code below doesn't send ConnectionClosed, so we don't bother
# handling it here either -- it would require that we do something
# appropriate when 'data' is None.
assert type(event) is not h11.ConnectionClosed
data = self.conn.send(event)
await self.stream.send_all(data)
async def _read_from_peer(self):
if self.conn.they_are_waiting_for_100_continue:
self.info("Sending 100 Continue")
go_ahead = h11.InformationalResponse(
status_code=100, headers=self.basic_headers()
)
await self.send(go_ahead)
try:
data = await self.stream.receive_some(MAX_RECV)
except ConnectionError:
# They've stopped listening. Not much we can do about it here.
data = b""
self.conn.receive_data(data)
async def next_event(self):
while True:
event = self.conn.next_event()
if event is h11.NEED_DATA:
await self._read_from_peer()
continue
return event
async def shutdown_and_clean_up(self):
# When this method is called, it's because we definitely want to kill
# this connection, either as a clean shutdown or because of some kind
# of error or loss-of-sync bug, and we no longer care if that violates
# the protocol or not. So we ignore the state of self.conn, and just
# go ahead and do the shutdown on the socket directly. (If you're
# implementing a client you might prefer to send ConnectionClosed()
# and let it raise an exception if that violates the protocol.)
#
try:
await self.stream.send_eof()
except trio.BrokenResourceError:
# They're already gone, nothing to do
return
# Wait and read for a bit to give them a chance to see that we closed
# things, but eventually give up and just close the socket.
# XX FIXME: possibly we should set SO_LINGER to 0 here, so
# that in the case where the client has ignored our shutdown and
# declined to initiate the close themselves, we do a violent shutdown
# (RST) and avoid the TIME_WAIT?
# it looks like nginx never does this for keepalive timeouts, and only
# does it for regular timeouts (slow clients I guess?) if explicitly
# enabled ("Default: reset_timedout_connection off")
with trio.move_on_after(TIMEOUT):
try:
while True:
# Attempt to read until EOF
got = await self.stream.receive_some(MAX_RECV)
if not got:
break
except trio.BrokenResourceError:
pass
finally:
await self.stream.aclose()
def basic_headers(self):
# HTTP requires these headers in all responses (client would do
# something different here)
return [
("Date", format_date_time(None).encode("ascii")),
("Server", self.ident),
]
def info(self, *args):
# Little debugging method
print("{}:".format(self._obj_id), *args)
################################################################
# Server main loop
################################################################
# General theory:
#
# If everything goes well:
# - we'll get a Request
# - our response handler will read the request body and send a full response
# - that will either leave us in MUST_CLOSE (if the client doesn't
# support keepalive) or DONE/DONE (if the client does).
#
# But then there are many, many different ways that things can go wrong
# here. For example:
# - we don't actually get a Request, but rather a ConnectionClosed
# - exception is raised from somewhere (naughty client, broken
# response handler, whatever)
# - depending on what went wrong and where, we might or might not be
# able to send an error response, and the connection might or
# might not be salvagable after that
# - response handler doesn't fully read the request or doesn't send a
# full response
#
# But these all have one thing in common: they involve us leaving the
# nice easy path up above. So we can just proceed on the assumption
# that the nice easy thing is what's happening, and whenever something
# goes wrong do our best to get back onto that path, and h11 will keep
# track of how successful we were and raise new errors if things don't work
# out.
async def http_serve(stream):
wrapper = TrioHTTPWrapper(stream)
wrapper.info("Got new connection")
while True:
assert wrapper.conn.states == {h11.CLIENT: h11.IDLE, h11.SERVER: h11.IDLE}
try:
with trio.fail_after(TIMEOUT):
wrapper.info("Server main loop waiting for request")
event = await wrapper.next_event()
wrapper.info("Server main loop got event:", event)
if type(event) is h11.Request:
await send_echo_response(wrapper, event)
except Exception as exc:
wrapper.info("Error during response handler: {!r}".format(exc))
await maybe_send_error_response(wrapper, exc)
if wrapper.conn.our_state is h11.MUST_CLOSE:
wrapper.info("connection is not reusable, so shutting down")
await wrapper.shutdown_and_clean_up()
return
else:
try:
wrapper.info("trying to re-use connection")
wrapper.conn.start_next_cycle()
except h11.ProtocolError:
states = wrapper.conn.states
wrapper.info("unexpected state", states, "-- bailing out")
await maybe_send_error_response(
wrapper, RuntimeError("unexpected state {}".format(states))
)
await wrapper.shutdown_and_clean_up()
return
################################################################
# Actual response handlers
################################################################
# Helper function
async def send_simple_response(wrapper, status_code, content_type, body):
wrapper.info("Sending", status_code, "response with", len(body), "bytes")
headers = wrapper.basic_headers()
headers.append(("Content-Type", content_type))
headers.append(("Content-Length", str(len(body))))
res = h11.Response(status_code=status_code, headers=headers)
await wrapper.send(res)
await wrapper.send(h11.Data(data=body))
await wrapper.send(h11.EndOfMessage())
async def maybe_send_error_response(wrapper, exc):
# If we can't send an error, oh well, nothing to be done
wrapper.info("trying to send error response...")
if wrapper.conn.our_state not in {h11.IDLE, h11.SEND_RESPONSE}:
wrapper.info("...but I can't, because our state is", wrapper.conn.our_state)
return
try:
if isinstance(exc, h11.RemoteProtocolError):
status_code = exc.error_status_hint
elif isinstance(exc, trio.TooSlowError):
status_code = 408 # Request Timeout
else:
status_code = 500
body = str(exc).encode("utf-8")
await send_simple_response(
wrapper, status_code, "text/plain; charset=utf-8", body
)
except Exception as exc:
wrapper.info("error while sending error response:", exc)
async def send_echo_response(wrapper, request):
wrapper.info("Preparing echo response")
if request.method not in {b"GET", b"POST"}:
# Laziness: we should send a proper 405 Method Not Allowed with the
# appropriate Accept: header, but we don't.
raise RuntimeError("unsupported method")
response_json = {
"method": request.method.decode("ascii"),
"target": request.target.decode("ascii"),
"headers": [
(name.decode("ascii"), value.decode("ascii"))
for (name, value) in request.headers
],
"body": "",
}
while True:
event = await wrapper.next_event()
if type(event) is h11.EndOfMessage:
break
assert type(event) is h11.Data
response_json["body"] += event.data.decode("ascii")
response_body_unicode = json.dumps(
response_json, sort_keys=True, indent=4, separators=(",", ": ")
)
response_body_bytes = response_body_unicode.encode("utf-8")
await send_simple_response(
wrapper, 200, "application/json; charset=utf-8", response_body_bytes
)
async def serve(port):
print("listening on http://localhost:{}".format(port))
try:
await trio.serve_tcp(http_serve, port)
except KeyboardInterrupt:
print("KeyboardInterrupt - shutting down")
################################################################
# Run the server
################################################################
if __name__ == "__main__":
trio.run(serve, 8080)
|
python-hyper/h11
|
examples/trio-server.py
|
Python
|
mit
| 13,993
|
[
"VisIt"
] |
c37a2a8c22a4afcf5f19ee5a9c182fb87f86575b58456e4690d3b073b2727b64
|
#!/usr/bin/env python
###############################################################################
#
# pytableaucreate - Python implementation of protein Tableau creator
#
# File: pytableaucreate.py
# Author: Alex Stivala
# Created: February 2008
#
# $Id: pytableaucreate.py 4291 2012-08-09 23:43:39Z astivala $
#
#
# Create a protein tableau and write it to stdout.
# The implemntation is actually in pttableau.py which is used by ptgraph2.py
# (Pro-Origami), this is basically just a wrapper for testing / standalone
# tableau creation (see pttableau.py).
#
# Also used to create SSE midpoint distance matrix.
#
# Tableaux are described by Kamat and Lesk 2007
# 'Contact Patterns Between Helices and Strands of Sheet Define Protein
# Folding Patterns' Proteins 66:869-876
# and Lesk 2003 'From Electrons to Proteins and Back Again'
# Int. J. Quant. Chem. 95:678-682
# and Lesk 1995 'Systematic representation of folding patterns'
# J. Mol. Graph. 13:159-164.
#
# The implementation is based on Arun Konagurthu's TableauCreator program, see
# Konagurthu, Stuckey and Lesk 2008 'Structural search and retrieval using
# a tableau representation of protein folding patterns' Bioinformatics
# (advance access, to be published Jan 5 2008).
#
# Example usage:
#
# pytableaucreate.py 1QLP.pdb
#
# Filenames may be either in the format above or the pdbq1lp.pdb format.
# Compressed pdb files are supported (gzip) (e.g. pdb1qlp.ent.gz).
#
# It is written in Python and depends on some Python libraries:
#
# . BioPython (including Bio.PDB)
# http://www.biopython.org
#
# Reference for Bio.PDB is:
# Hamelryck and Manderick 2003 "PDB parser and structure class implemented
# in Python" Bioinformatics 19:2308-2310
#
# which in turn depends on Numeric
# http://sourceforge.net/projects/numpy
#
#
# Developed on Linux 2.6.9 (x86_64) with Python 2.5.1
# and BioPython 1.43 with Numeric 24.2
#
###############################################################################
import warnings # so we can suppress the annoying tempnam 'security' warning
import sys,os
import getopt
import re
import pickle
import random
import copy
from math import degrees
import numpy.oldnumeric as Numeric
from Bio.PDB import *
import ptsecstruct
from ptnode import ptnode_set_verbose
from ptdomain import *
from ptutils import cleanup_tmpdir,isNaN
import getdomains
from tableaubuild import TableauBuild,make_tableaux
from pttableau import PTTableauPacked
#-----------------------------------------------------------------------------
#
# Function definitions
#
#-----------------------------------------------------------------------------
def write_tableau(n, tableau, permutation, use_numeric,
fortran_format, build_distance_matrix):
"""
Write tableau or distance matrix to stdout.
n - order of tableau or distance matrix (n by n)
tableau - PTTableau object for tableau or Numeric matrix for
Omega matrix or Numeric matrix for distance matrix
permutation - permuted list of ingeters in interval [0, n-1] to
permute the rows+cols of the tableau/matrix by
(so [0,1,2,...n-1] for no permutation.
use_numeric - boolean. If true, tableau is a Numeric Omega matrix
not a tableau.
fortran_format - boolean. If True, put in lower triangle format
for FORTRAN programs tsrchd etc.
build_distance_matrix - boolean. If True is a distance matrix not a
tableau or Omega matrix.
"""
if build_distance_matrix:
distmatrix = tableau
if fortran_format:
for k in range(n):
for l in range(k+1):
kprime = permutation[k]
lprime = permutation[l]
if isNaN(distmatrix[kprime,lprime]):
dist = 0.0
else:
dist = distmatrix[kprime,lprime]
if dist > 99.9:
sys.stderr.write('WARNING: distance %f at (%d,%d) truncated to 99.9 for fortran format\n' % (dist,kprime,lprime))
dist = 99.9
sys.stdout.write("%6.3f " % dist)
sys.stdout.write("\n")
else:
for k in range(n):
for l in range(n):
kprime = permutation[k]
lprime = permutation[l]
sys.stdout.write("% 6.2f " % distmatrix[kprime,lprime])
sys.stdout.write("\n")
elif use_numeric:
Omega = tableau
if fortran_format:
for k in range(n):
for l in range(k+1):
kprime = permutation[k]
lprime = permutation[l]
if isNaN(Omega[kprime,lprime]):
angle = 0.0
else:
angle = Omega[kprime,lprime]
sys.stdout.write("%6.3f " % angle)
sys.stdout.write("\n")
else:
for k in range(n):
for l in range(n):
kprime = permutation[k]
lprime = permutation[l]
sys.stdout.write("% 4.3f " % Omega[kprime,lprime])
sys.stdout.write("\n")
else:
if fortran_format:
for k in range(n):
for l in range(k+1):
kprime = permutation[k]
lprime = permutation[l]
sys.stdout.write(tableau[(kprime,lprime)] + " ")
sys.stdout.write("\n")
else:
# can't just sys.stdout.write(str(tableau)) if shuffled...
for k in range(n):
for l in range(n):
kprime = permutation[k]
lprime = permutation[l]
sys.stdout.write(tableau[(kprime,lprime)] + " ")
sys.stdout.write('\n')
def write_tableau_old_format(n, Omega, ssestr):
"""
Write tableau to stdout in the original
(Arun) TableauCreator format, with angles in degrees,
full matrix, number of SSEs on first line and SSE sequence
(DSSP codes E,H) on second line.
n - order of tableau matrix (n by n)
Omega - Numeric matrix for Omega matrix
sse_str - SSE string correspdonding to the Omega matrix
"""
sys.stdout.write(str(len(Omega)) + '\n')
sys.stdout.write(ssestr + '\n')
for k in range(n):
for l in range(n):
angle = degrees(Omega[k,l])
if isNaN(angle) or k == l:
angle = -999.0
sys.stdout.write("% 7.1f " % angle)
sys.stdout.write("\n")
def write_distmatrix_old_format(n, dmat, ssestr):
"""
Write distance matrix to stdout in format for TableauComparer
(Arun)
full matrix, number of SSEs on first line and SSE sequence
(DSSP codes E,H) on second line.
n - order of distance matrix (n by n)
dmat - Numeric matrix for distance matrix
sse_str - SSE string correspdonding to the distance matrix
"""
sys.stdout.write(str(len(dmat)) + '\n')
sys.stdout.write(ssestr + '\n')
for k in range(n):
for l in range(n):
d = dmat[k,l]
if isNaN(d) or k == l:
d = -999.0
sys.stdout.write("% 7.1f " % d)
sys.stdout.write("\n")
#-----------------------------------------------------------------------------
#
# Main
#
#-----------------------------------------------------------------------------
def usage(progname):
"""
Print usage message and exit
"""
sys.stderr.write("Usage: " + progname +
" [-35knuvfe] [-d|-b] [-t struct_prog] "
"[-p domain_prog] [-a domainid] [-s sse_num_list] [-c chainid] "
"[-m min_sse_len] [-o <savefile>] [-i identifier] "
"<PDBfile>\n")
sys.stderr.write(" -3 include 3_10 helices\n")
sys.stderr.write(" -5 include pi helices\n")
sys.stderr.write(" -k use HH and KK codes for anti/parallel strands in same sheet\n")
sys.stderr.write(" -n output numeric matrix rather than tableau\n")
sys.stderr.write(" -e output numeric tableau angles in degrees, in original TableauCreator .angles file format\n")
sys.stderr.write(" -f output in FORTRAN style format for TSRCHN\n")
sys.stderr.write(" -d build SSE axis midpoint distance matrix not tableau\n")
sys.stderr.write(" -b build both tableau and distance matrix\n")
sys.stderr.write(" -p domain decomposition method/db\n"
" valid values are none (default), "
"ddomain, cath:cdffile, pdomains:pdomainsfile\n")
sys.stderr.write(" -a domainid : only output for specified domain\n")
sys.stderr.write(" -t struct_prog : use struct_prog define " \
"secondary structure\n")
sys.stderr.write(" supported is 'pdb' (default) or 'stride' or 'dssp' or 'pmml'\n")
sys.stderr.write(" -s sse_num_list : specifies comma-separated list of "
"SSE sequential numbers to include in the tableau\n")
sys.stderr.write(" -m min_sse_len : minimum number of residues in SSE to "
"be included in tableau\n")
sys.stderr.write(" -c chainid : specify chain identifier; only build"
"tableau for that chain\n")
sys.stderr.write(" -i identifier : when using -f, specify identifier "
" to use rather than deriving from filename\n")
sys.stderr.write(" -o savefile : save tableau in packed format for use "
"in other programs such as tabsearchqpml.py\n"
" WARNING: savefile is overwritten if it exists.\n")
sys.stderr.write(" -u randomly permute the tableau/distance matrix\n")
sys.stderr.write(" -v print verbose debugging messages to stderr\n")
sys.exit(1)
def main():
"""
main for pytableaucreate.py
Usage: pytableaucreate [-35nefuv] [-d|-b] [-t structprog] [-p domainprog]
[-a domainid]
[-s sse_num_list] [-c chainid] [-m min_sse_len]
[-o savefile] <PDBfile>
-3 specifies to include 3_10 helices in the diagram. Default is only
alpha helices.
-5 specifies to include pi helices in the diagram. Defaul is only
alpha helices.
-k use the HH and KK codes for respectively antiparallel and parallel
strands in the same sheet, rather than the O, P etc. codes.
-n output a numeric omega matrix instead of tableau.
-e output numeric tableau angles in degrees, in the original
TableauCreator .angles file format, with number of entries on
first line, SSE sequence description on second line (E/H), then
(full) matrix with angles in degrees (rather than radians).
For distance matrix, same format with distances between SSEs
in Angstroms.
-f output the matrix in 'FORTRAN style' lower triangle with
header line suitable for input to TMATN.
-d build SSE axis midpoint distance matrix rather than tableau.
-b build both the tableau and distance matrix and output together,
for use with tsrchd etc. for example. If -u is used to permute
the matrices, they are permuted the same way so they are still
consistent.
-p specify the domain decomposition method.
Valid values are 'none' (default), 'ddomain', 'cath:cdf_filename'.
-a domainid : only output specified domain
-t specifies the secondary structure assignment program to use.
Currently suppoed is 'pdb' and 'dfh,ssp' and 'stride' or 'pmml'.
Default 'pdb'.
-s sse_num_list specifies a comman-separated
list of SSE sequential ids to build the
tableau for. SSE sequential id's start at 1 and go from N to C
terminus. E.g. -s1,5,8 includes only the 1st, 5th and 8ths SSEs.
Numbers do not restart at chains (but do restart in each domain).
These nubmers are those assigned by 'ptgraph2 -b sequential' option.
TODO: this currently does not make sense when multiple domains
are being procssed, this option applies to each domain.
-c chainid : specify chain identifier; only build tableau for that chain
-m min_sse_len : minimum nubmer of residues in SSE for it to be included
-i identifier : when using fortran format (-f), specify the identifier
to use in the output rather than deriving it from the filename
-o savefile : save tableau in packed format for use in other
programs, such as tabsearchqpml.py
WARNING: savefile is overwritten if it exists
TODO: this currently does not make sense when multiple domains
are being procssed, this option only saves first domain.
-u randomly pemute the rows+cols (symmetric) of the tableau/distance matrix.
writes the permutation vector in form
permutation = i,j,..,m
e.g.
permutation = 3,1,2,4
as first line of output before identifier information and tableau
-v specifies verbose mode: debugging output is written to stderr.
"""
global verbose
try:
opts, args = getopt.getopt(sys.argv[1:], "35bdfknep:a:t:s:c:m:i:o:uv?")
except getopt.GetoptError:
usage(os.path.basename(sys.argv[0]))
valid_secstruct_programs = ["dssp", "stride", "pdb", "pmml"]
valid_domain_programs = getdomains.valid_domain_programs + [r"none"]
valid_domain_programs_re = [ re.compile(re_str) for re_str in
valid_domain_programs ]
verbose = False # global (python globals are only 'global' to module though)
secstruct_program = "pdb"
include_310_helices = False
include_pi_helices = False
domain_program = "none"
sse_id_list = None
use_numeric = False
use_hk = False
savefilename = None
min_sse_len = None
fortran_format = False
build_distance_matrix = False
chainid = None
fident = None
do_shuffle = False
build_both = False # both tableau and dist matrix
use_old_format = False # size + SSE chain + degrees omega matrix
domainid = None
for opt,arg in opts:
if opt == "-3": # include 3_10 helices
include_310_helices = True
elif opt == "-5": # include pi helices
include_pi_helices = True
elif opt == "-d": # build SSE midpoint distance matrix not tableau
build_distance_matrix = True
elif opt == "-b": # build both tableau and distance matrix
build_both = True
elif opt == "-k": # use HH and KK codes
use_hk = True
elif opt == "-n": # output numeric matrix not tableau
use_numeric = True
elif opt == "-e": # use TableauCreator .angles file format
use_old_format = True
elif opt == "-f": # FORTRAN style format for TMATN
fortran_format = True
elif opt == "-p": # domain parsing program
domain_program = None
for valid_domarg_re in valid_domain_programs_re:
if valid_domarg_re.match(arg):
domain_program = arg
break
if domain_program == None:
sys.stderr.write("valid values for -p are: " +
str(valid_domain_programs) + "\n")
usage(sys.argv[0])
elif opt == "-a": # only output tableau for specified domain id
domainid = arg
elif opt == "-t":
if arg not in valid_secstruct_programs:
sys.stderr.write("valid values for -t are: " +
str(valid_secstruct_programs) + "\n")
usage(sys.argv[0])
secstruct_program = arg
elif opt == "-s":
sse_id_list_str = arg.split(',')
sse_id_list = []
sse_id_uniq_dict = {} # { id : True } just for checking all unique
for sse_id_str in sse_id_list_str:
if sse_id_str.isdigit():
if sse_id_uniq_dict.has_key(int(sse_id_str)):
sys.stderr.write("duplicate SSE sequential number " +
sse_id_str + "\n")
usage(sys.argv[0])
sse_id_uniq_dict[int(sse_id_str)] = True
sse_id_list.append(int(sse_id_str))
else:
sys.stderr.write("not a valid SSE sequential number '" +
sse_id_str + "'\n")
usage(sys.argv[0])
sse_id_list.sort() # ensure SSEs are in order
elif opt == "-c": # chain identifier
if len(arg) != 1:
sys.stderr.write("invalid chain identifier for -c option\n")
usage(sys.argv[0])
chainid = arg.upper()
elif opt == "-m": # min sse len
min_sse_len = int(arg)
elif opt == "-i": # identifier to use for fortran format
fident = arg
elif opt == "-o": # save tableau in packed format
savefilename = arg
elif opt == "-u": # randomly permute the tableau/matrix
do_shuffle = True
elif opt == "-v": # verbose
verbose = True # this module only
ptnode_set_verbose(True) # ptnode module
ptsecstruct.ptsecstruct_set_verbose(True) # ptsecstruct module
ptdomain_set_verbose(True) # ptdomain module
else:
usage(sys.argv[0])
if use_numeric and use_hk:
sys.stderr.write("-n (numeric) and -k (use HH and KK codes) are "
"mutually exlusive\n")
usage(sys.argv[0])
if build_distance_matrix and build_both:
sys.stderr.write("WARNING: both -d (build dist matrix) and -b "
"(build both) specified, ignoring -d\n")
build_distance_matrix = False
if savefilename and do_shuffle:
sys.stderr.write('WARNING: saved tableau will not be shuffled\n')
if build_distance_matrix:
if use_numeric:
use_numeric = False
sys.stderr.write("WARNING: -n (numeric) ignored for -d (distance matrix)\n")
if use_hk:
sys.stderr.write("-k (use HH and KK) invalid for -d (distance matrix)\n");
usage(sys.argv[0])
if (secstruct_program == "pmml" and
(min_sse_len == None or min_sse_len < 3)):
sys.stderr.write("WARNING: PMML can give SSEs of length 1 or 2 causing axis fitting to fail, setting minimum length to 3 as if -m3 were specfified\n")
min_sse_len = 3
if fident:
if not fortran_format:
sys.stderr.write("-i is only valid with -f\n")
usage(sys.argv[0])
elif len(fident) > 8:
sys.stderr.write("identifier must be 8 chars or less\n")
usage(sys.argv[0])
if use_old_format and (build_both or
use_hk or use_numeric or fortran_format or
do_shuffle or savefilename):
sys.stderr.write("-e (use old .angles format) is not compatible "
"with -b -k or -n or -f or -u or -o\n")
usage(os.path.basename(sys.argv[0]))
if len(args) != 1:
usage(os.path.basename(sys.argv[0]))
pdb_filename = args[0]
# check for compressed files. We only support gzip (.gz)
# Note we are not using the zlib or GzipFile python modules
# since we are calling to external programs which require the
# file uncompressed themsevles anyway so we'll just run gzip
# to uncompress the file to a temporary directory.
pdb_file_basename = os.path.basename(pdb_filename)
(name,extension) = os.path.splitext(pdb_file_basename)
if extension == '.gz':
TMPDIR = os.tempnam(None, "ptgz")
os.mkdir(TMPDIR)
tmp_pdbfilename = os.path.join(TMPDIR, name)
os.system("gzip " + pdb_filename + " -d -c > " + tmp_pdbfilename)
our_pdb_filename = tmp_pdbfilename
used_tmp_file = True
else:
our_pdb_filename = pdb_filename
used_tmp_file = False
try:
if fortran_format and fident:
pdbid = fident
else:
pdbid = name.upper()
if len(pdbid) >= 6 and pdbid[:3] == "PDB":
pdbid = pdbid[3:7]
if chainid:
pdbid += '_' + chainid
# parse PDB file
pdb_parser = PDBParser()
pdb_struct = pdb_parser.get_structure(pdbid, our_pdb_filename)
# create the Tableaux and output them
(tableaux_list, ssestr_list) = make_tableaux(our_pdb_filename,
pdb_struct,
secstruct_program,
domain_program,
include_310_helices,
include_pi_helices,
(use_numeric or use_old_format),
sse_id_list,
use_hk,
min_sse_len,
build_distance_matrix,
chainid,
domainid)
if build_both:
(distmatrix_list, ssestr_list) = make_tableaux(our_pdb_filename,
pdb_struct,
secstruct_program,
domain_program,
include_310_helices,
include_pi_helices,
use_numeric,
sse_id_list,
use_hk,
min_sse_len,
True, # build_distance_matrix
chainid,
domainid)
i = 1
for tableau in tableaux_list:
n = len(tableau)
permutation = range(n) # used to permute rows/cols: null permutation
if do_shuffle:
random.shuffle(permutation) # actually permute for shuffle mode
if verbose:
sys.stderr.write('permutation is: ' + str(permutation)+'\n')
sys.stdout.write('permutation = ' + ','.join([str(x+1) for x in permutation]) + '\n')
if i > 1:
sys.stdout.write('\ndomain ' + str(i) + ':\n')
if fortran_format:
sys.stdout.write("%7.7s %4d\n" % (pdbid.upper(), n))
if use_old_format:
if build_distance_matrix:
write_distmatrix_old_format(n, tableau, ssestr_list[i-1])
else:
write_tableau_old_format(n, tableau, ssestr_list[i-1])
else:
write_tableau(n, tableau, permutation, use_numeric,
fortran_format, build_distance_matrix)
if build_both:
write_tableau(n, distmatrix_list[i-1],
permutation, use_numeric,
fortran_format, True)
i += 1
finally:
if used_tmp_file:
cleanup_tmpdir(TMPDIR)
if savefilename:
if verbose:
sys.stderr.write('writing tableau to ' + savefilename +'\n')
fh = open(savefilename, "w")
if len(tableaux_list) > 1:
sys.stderr.write('WARNING: only saving first tableau in list\n')
if build_distance_matrix:
pickle.dump(distmatrix, fh)
elif use_numeric:
# Numeric/numpy seems to have no 'packed' format for symmetric
# matrices, so we just have to dump the whole thing.
pickle.dump(Omega, fh)
else:
pickle.dump(PTTableauPacked(tableaux_list[0]), fh)
fh.close()
if __name__ == "__main__":
warnings.filterwarnings('ignore', 'tempnam', RuntimeWarning)
main()
|
NirBenTalLab/proorigami-ptgraph
|
pytableaucreate.py
|
Python
|
mit
| 24,326
|
[
"Biopython"
] |
5fd27c760569449ed099a294f2f65bdef3998007b11a5ed24bdd54f4794c4e4d
|
import enum
import struct
class SpecialSectionNumber(enum.IntEnum):
UNDEFINED = 0
ABSOLUTE = -1
DEBUG = -2
class StorageClass(enum.IntEnum):
END_OF_FUNCTION = -1
NULL = 0
AUTOMATIC = 1
EXTERNAL = 2
STATIC = 3
REGISTER = 4
EXTERNAL_DEF = 5
LABEL = 6
UNDEFINED_LABEL = 7
MEMBER_OF_STRUCT = 8
ARGUMENT = 9
STRUCT_TAG = 10
MEMBER_OF_UNION = 11
UNION_TAG = 12
TYPE_DEFINITION = 13
UNDEFINED_STATIC = 14
ENUM_TAG = 15
MEMBER_OF_ENUM = 16
REGISTER_PARAM = 17
BIT_FIELD = 18
BLOCK = 100
FUNCTION = 101
END_OF_STRUCT = 102
FILE = 103
SECTION = 104.
WEAK_EXTERNAL = 105
CLR_TOKEN = 107
class BaseType(enum.IntEnum):
NULL = 0
VOID = 1
CHAR = 2
SHORT = 3
INT = 4
LONG = 5
FLOAT = 6
DOUBLE = 7
STRUCT = 8
UNION = 9
ENUM = 10
MOE = 11
BYTE = 12
WORD = 13
UINT = 14
DWORD = 15
class ComplexType(enum.IntEnum):
NULL = 0
POINTER = 1
FUNCTION = 2
ARRAY = 3
def mktype(base, comp):
return (comp << 8) + base
class SymbolRecord:
record_struct = struct.Struct('<8sLhHBB')
def __init__(self, name, typ=None, section_number=SpecialSectionNumber.UNDEFINED, storage_class=StorageClass.NULL):
self.name = name
self.value = None
self.section_number = section_number
self.type = typ or 0
self.storage_class = storage_class
self.aux_records = []
def pack(self):
packed_aux_records = b''.join(self.aux_records)
if len(packed_aux_records) % 18 != 0:
raise ValueError('auxiliary records length must be a multiple of 18')
return self.record_struct.pack(
self.name,
self.value,
self.section_number,
self.type,
self.storage_class,
len(self.aux_records)
) + packed_aux_records
|
d3dave/cough
|
cough/symbol.py
|
Python
|
mit
| 1,935
|
[
"MOE"
] |
37c23a489ea0d683e78cdfd858f5be16bca3ece3678165479e911c5b9171971d
|
#!/usr/bin/env python
#
# Restriction Analysis Libraries.
# Copyright (C) 2004. Frederic Sohm.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# this script is used to produce the dictionary which will contains the data
# about the restriction enzymes from the Emboss/Rebase data files
# namely
# emboss_e.### (description of the sites),
# emboss_r.### (origin, methylation, references)
# emboss_s.### (suppliers)
# where ### is a number of three digits : 1 for the year two for the month
#
# very dirty implementation but it does the job, so...
# Not very quick either but you are not supposed to use it frequently.
#
# The results are stored in
# path/to/site-packages/Bio/Restriction/Restriction_Dictionary.py
# the file contains two dictionary:
# 'rest_dict' which contains the data for the enzymes
# and
# 'suppliers' which map the name of the suppliers to their abbreviation.
#
"""Convert a series of Rebase files into a Restriction_Dictionary.py module.
The Rebase files are in the emboss format:
emboss_e.### -> contains information about the restriction sites.
emboss_r.### -> contains general information about the enzymes.
emboss_s.### -> contains information about the suppliers.
Here ### is the 3 digit number REBASE release number (e.g. 312). The first
digit is the last digit of the year (e.g. 3 for 2013) and the two last the
month (e.g. 12 for December).
There files are available by FTP from ftp://ftp.neb.com/pub/rebase/ which
should allow automated fetching (the the update code and RanaConfig.py).
In addition there are links on this HTML page which requires manual download
and renaming of the files: http://rebase.neb.com/rebase/rebase.f37.html
This Python file is intended to be used via the scripts Scripts/Restriction/*.py
only.
"""
from __future__ import print_function
from Bio._py3k import input as _input
import os
import itertools
import time
import sys
import shutil
import optparse
from functools import reduce
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
import Bio.Restriction.Restriction
from Bio.Restriction.Restriction import AbstractCut, RestrictionType, NoCut, OneCut
from Bio.Restriction.Restriction import TwoCuts, Meth_Dep, Meth_Undep, Palindromic
from Bio.Restriction.Restriction import NonPalindromic, Unknown, Blunt, Ov5, Ov3
from Bio.Restriction.Restriction import NotDefined, Defined, Ambiguous
from Bio.Restriction.Restriction import Commercially_available, Not_available
import Bio.Restriction.RanaConfig as config
from Bio.Restriction._Update.Update import RebaseUpdate
from Bio.Restriction.Restriction import *
dna_alphabet = {'A':'A', 'C':'C', 'G':'G', 'T':'T',
'R':'AG', 'Y':'CT', 'W':'AT', 'S':'CG', 'M':'AC', 'K':'GT',
'H':'ACT', 'B':'CGT', 'V':'ACG', 'D':'AGT',
'N':'ACGT',
'a': 'a', 'c': 'c', 'g': 'g', 't': 't',
'r':'ag', 'y':'ct', 'w':'at', 's':'cg', 'm':'ac', 'k':'gt',
'h':'act', 'b':'cgt', 'v':'acg', 'd':'agt',
'n':'acgt'}
complement_alphabet = {'A':'T', 'T':'A', 'C':'G', 'G':'C','R':'Y', 'Y':'R',
'W':'W', 'S':'S', 'M':'K', 'K':'M', 'H':'D', 'D':'H',
'B':'V', 'V':'B', 'N':'N','a':'t', 'c':'g', 'g':'c',
't':'a', 'r':'y', 'y':'r', 'w':'w', 's':'s','m':'k',
'k':'m', 'h':'d', 'd':'h', 'b':'v', 'v':'b', 'n':'n'}
enzymedict = {}
suppliersdict = {}
classdict = {}
typedict = {}
class OverhangError(ValueError):
"""Exception for dealing with overhang."""
pass
def BaseExpand(base):
"""BaseExpand(base) -> string.
given a degenerated base, returns its meaning in IUPAC alphabet.
i.e:
b= 'A' -> 'A'
b= 'N' -> 'ACGT'
etc..."""
base = base.upper()
return dna_alphabet[base]
def regex(site):
"""regex(site) -> string.
Construct a regular expression from a DNA sequence.
i.e.:
site = 'ABCGN' -> 'A[CGT]CG.'"""
reg_ex = str(site)
for base in reg_ex:
if base in ('A', 'T', 'C', 'G', 'a', 'c', 'g', 't'):
pass
if base in ('N', 'n'):
reg_ex = '.'.join(reg_ex.split('N'))
reg_ex = '.'.join(reg_ex.split('n'))
if base in ('R', 'Y', 'W', 'M', 'S', 'K', 'H', 'D', 'B', 'V'):
expand = '['+ str(BaseExpand(base))+']'
reg_ex = expand.join(reg_ex.split(base))
return reg_ex
def is_palindrom(sequence):
"""is_palindrom(sequence) -> bool.
True is the sequence is a palindrom.
sequence is a Seq object."""
return str(sequence) == str(sequence.reverse_complement())
def LocalTime():
"""LocalTime() -> string.
LocalTime calculate the extension for emboss file for the current year and
month."""
t = time.gmtime()
year = str(t.tm_year)[-1]
month = str(t.tm_mon)
if len(month) == 1:
month = '0' + month
return year+month
class newenzyme(object):
"""construct the attributes of the enzyme corresponding to 'name'."""
def __init__(cls, name):
cls.opt_temp = 37
cls.inact_temp = 65
cls.substrat = 'DNA'
target = enzymedict[name]
cls.site = target[0]
cls.size = target[1]
cls.suppl = tuple(target[9])
cls.freq = target[11]
cls.ovhg = target[13]
cls.ovhgseq = target[14]
cls.bases = ()
#
# Is the site palindromic?
# Important for the way the DNA is search for the site.
# Palindromic sites needs to be looked for only over 1 strand.
# Non Palindromic needs to be search for on the reverse complement
# as well.
#
if target[10]:
cls.bases += ('Palindromic',)
else:
cls.bases += ('NonPalindromic',)
#
# Number of cut the enzyme produce.
# 0 => unknown, the enzyme has not been fully characterised.
# 2 => 1 cut, (because one cut is realised by cutting 2 strands
# 4 => 2 cuts, same logic.
# A little bit confusing but it is the way EMBOSS/Rebase works.
#
if not target[2]:
#
# => undefined enzymes, nothing to be done.
#
cls.bases += ('NoCut', 'Unknown', 'NotDefined')
cls.fst5 = None
cls.fst3 = None
cls.scd5 = None
cls.scd3 = None
cls.ovhg = None
cls.ovhgseq = None
else:
#
# we will need to calculate the overhang.
#
if target[2] == 2:
cls.bases += ('OneCut',)
cls.fst5 = target[4]
cls.fst3 = target[5]
cls.scd5 = None
cls.scd3 = None
else:
cls.bases += ('TwoCuts',)
cls.fst5 = target[4]
cls.fst3 = target[5]
cls.scd5 = target[6]
cls.scd3 = target[7]
#
# Now, prepare the overhangs which will be added to the DNA
# after the cut.
# Undefined enzymes will not be allowed to catalyse,
# they are not available commercially anyway.
# I assumed that if an enzyme cut twice the overhang will be of
# the same kind. The only exception is HaeIV. I do not deal
# with that at the moment (ie I don't include it,
# need to be fixed).
# They generally cut outside their recognition site and
# therefore the overhang is undetermined and dependent of
# the DNA sequence upon which the enzyme act.
#
if target[3]:
#
# rebase field for blunt: blunt == 1, other == 0.
# The enzyme is blunt. No overhang.
#
cls.bases += ('Blunt', 'Defined')
cls.ovhg = 0
elif isinstance(cls.ovhg, int):
#
# => overhang is sequence dependent
#
if cls.ovhg > 0:
#
# 3' overhang, ambiguous site (outside recognition site
# or site containing ambiguous bases (N, W, R,...)
#
cls.bases += ('Ov3', 'Ambiguous')
elif cls.ovhg < 0:
#
# 5' overhang, ambiguous site (outside recognition site
# or site containing ambiguous bases (N, W, R,...)
#
cls.bases += ('Ov5', 'Ambiguous')
else:
#
# cls.ovhg is a string => overhang is constant
#
if cls.fst5 - (cls.fst3 + cls.size) < 0:
cls.bases += ('Ov5', 'Defined')
cls.ovhg = - len(cls.ovhg)
else:
cls.bases += ('Ov3', 'Defined')
cls.ovhg = + len(cls.ovhg)
#
# Next class : sensibility to methylation.
# Set by EmbossMixer from emboss_r.txt file
# Not really methylation dependent at the moment, stands rather for
# 'is the site methylable?'.
# Proper methylation sensibility has yet to be implemented.
# But the class is there for further development.
#
if target[8]:
cls.bases += ('Meth_Dep', )
cls.compsite = target[12]
else:
cls.bases += ('Meth_Undep',)
cls.compsite = target[12]
#
# Next class will allow to select enzymes in function of their
# suppliers. Not essential but can be useful.
#
if cls.suppl:
cls.bases += ('Commercially_available', )
else:
cls.bases += ('Not_available', )
cls.bases += ('AbstractCut', 'RestrictionType')
cls.__name__ = name
cls.results = None
cls.dna = None
cls.__bases__ = cls.bases
cls.charac = (cls.fst5, cls.fst3, cls.scd5, cls.scd3, cls.site)
if not target[2] and cls.suppl:
supp = ', '.join(suppliersdict[s][0] for s in cls.suppl)
print('WARNING : It seems that %s is both commercially available\
\n\tand its characteristics are unknown. \
\n\tThis seems counter-intuitive.\
\n\tThere is certainly an error either in ranacompiler or\
\n\tin this REBASE release.\
\n\tThe supplier is : %s.' % (name, supp))
return
class TypeCompiler(object):
"""Build the different types possible for Restriction Enzymes"""
def __init__(self):
"""TypeCompiler() -> new TypeCompiler instance."""
pass
def buildtype(self):
"""TC.buildtype() -> generator.
build the new types that will be needed for constructing the
restriction enzymes."""
baT = (AbstractCut, RestrictionType)
cuT = (NoCut, OneCut, TwoCuts)
meT = (Meth_Dep, Meth_Undep)
paT = (Palindromic, NonPalindromic)
ovT = (Unknown, Blunt, Ov5, Ov3)
deT = (NotDefined, Defined, Ambiguous)
coT = (Commercially_available, Not_available)
All = (baT, cuT, meT, paT, ovT, deT, coT)
#
# Now build the types. Only the most obvious are left out.
# Modified even the most obvious are not so obvious.
# emboss_*.403 AspCNI is unknown and commercially available.
# So now do not remove the most obvious.
#
types = [(p, c, o, d, m, co, baT[0], baT[1])
for p in paT for c in cuT for o in ovT
for d in deT for m in meT for co in coT]
n= 1
for ty in types:
dct = {}
for t in ty:
dct.update(t.__dict__)
#
# here we need to customize the dictionary.
# i.e. types deriving from OneCut have always scd5 and scd3
# equal to None. No need therefore to store that in a specific
# enzyme of this type. but it then need to be in the type.
#
dct['results'] = []
dct['substrat'] = 'DNA'
dct['dna'] = None
if t == NoCut:
dct.update({'fst5': None, 'fst3': None,
'scd5': None, 'scd3': None,
'ovhg': None, 'ovhgseq': None})
elif t == OneCut:
dct.update({'scd5': None, 'scd3': None})
class klass(type):
def __new__(cls):
return type.__new__(cls, 'type%i'%n, ty, dct)
def __init__(cls):
super(klass, cls).__init__('type%i'%n, ty, dct)
yield klass()
n+=1
start = '\n\
#!/usr/bin/env python\n\
#\n\
# Restriction Analysis Libraries.\n\
# Copyright (C) 2004. Frederic Sohm.\n\
#\n\
# This code is part of the Biopython distribution and governed by its\n\
# license. Please see the LICENSE file that should have been included\n\
# as part of this package.\n\
#\n\
# This file is automatically generated - do not edit it by hand! Instead,\n\
# use the tool Scripts/Restriction/ranacompiler.py which in turn uses\n\
# Bio/Restriction/_Update/RestrictionCompiler.py\n\
#\n\
# The following dictionaries used to be defined in one go, but that does\n\
# not work on Jython due to JVM limitations. Therefore we break this up\n\
# into steps, using temporary functions to avoid the JVM limits.\n\
\n\n'
class DictionaryBuilder(object):
def __init__(self, e_mail='', ftp_proxy=''):
"""DictionaryBuilder([e_mail[, ftp_proxy]) -> DictionaryBuilder instance.
If the emboss files used for the construction need to be updated this
class will download them if the ftp connection is correctly set.
either in RanaConfig.py or given at run time.
e_mail is the e-mail address used as password for the anonymous
ftp connection.
proxy is the ftp_proxy to use if any."""
self.rebase_pass = e_mail or config.Rebase_password
self.proxy = ftp_proxy or config.ftp_proxy
def build_dict(self):
"""DB.build_dict() -> None.
Construct the dictionary and build the files containing the new
dictionaries."""
#
# first parse the emboss files.
#
emboss_e, emboss_r, emboss_s = self.lastrebasefile()
#
# the results will be stored into enzymedict.
#
self.information_mixer(emboss_r, emboss_e, emboss_s)
emboss_r.close()
emboss_e.close()
emboss_s.close()
#
# we build all the possible type
#
tdct = {}
for klass in TypeCompiler().buildtype():
exec(klass.__name__ +'= klass')
exec("tdct['"+klass.__name__+"'] = klass")
#
# Now we build the enzymes from enzymedict
# and store them in a dictionary.
# The type we will need will also be stored.
#
for name in enzymedict:
#
# the class attributes first:
#
cls = newenzyme(name)
#
# Now select the right type for the enzyme.
#
bases = cls.bases
clsbases = tuple([eval(x) for x in bases])
typestuff = ''
for n, t in tdct.items():
#
# if the bases are the same. it is the right type.
# create the enzyme and remember the type
#
if t.__bases__ == clsbases:
typestuff = t
typename = t.__name__
continue
#
# now we build the dictionaries.
#
dct = dict(cls.__dict__)
del dct['bases']
del dct['__bases__']
del dct['__name__']# no need to keep that, it's already in the type.
classdict[name] = dct
commonattr = ['fst5', 'fst3', 'scd5', 'scd3', 'substrat',
'ovhg', 'ovhgseq', 'results', 'dna']
if typename in typedict:
typedict[typename][1].append(name)
else:
enzlst= []
tydct = dict(typestuff.__dict__)
tydct = dict([(k, v) for k, v in tydct.items() if k in commonattr])
enzlst.append(name)
typedict[typename] = (bases, enzlst)
for letter in cls.__dict__['suppl']:
supplier = suppliersdict[letter]
suppliersdict[letter][1].append(name)
if not classdict or not suppliersdict or not typedict:
print('One of the new dictionaries is empty.')
print('Check the integrity of the emboss file before continuing.')
print('Update aborted.')
sys.exit()
#
# How many enzymes this time?
#
print('\nThe new database contains %i enzymes.\n' % len(classdict))
#
# the dictionaries are done. Build the file
#
# update = config.updatefolder
update = os.getcwd()
with open(os.path.join(update, 'Restriction_Dictionary.py'), 'w') as results:
print('Writing the dictionary containing the new Restriction classes...')
results.write(start)
results.write('rest_dict = {}\n')
for name in sorted(classdict):
results.write("def _temp():\n")
results.write(" return {\n")
for key, value in classdict[name].items():
results.write(" %s: %s,\n" % (repr(key), repr(value)))
results.write(" }\n")
results.write("rest_dict[%s] = _temp()\n" % repr(name))
results.write("\n")
print('OK.\n')
print('Writing the dictionary containing the suppliers data...')
results.write('suppliers = {}\n')
for name in sorted(suppliersdict):
results.write("def _temp():\n")
results.write(" return (\n")
for value in suppliersdict[name]:
results.write(" %s,\n" % repr(value))
results.write(" )\n")
results.write("suppliers[%s] = _temp()\n" % repr(name))
results.write("\n")
print('OK.\n')
print('Writing the dictionary containing the Restriction types...')
results.write('typedict = {}\n')
for name in sorted(typedict):
results.write("def _temp():\n")
results.write(" return (\n")
for value in typedict[name]:
results.write(" %s,\n" % repr(value))
results.write(" )\n")
results.write("typedict[%s] = _temp()\n" % repr(name))
results.write("\n")
# I had wanted to do "del _temp" at each stage (just for clarity), but
# that pushed the code size just over the Jython JVM limit. We include
# one the final "del _temp" to clean up the namespace.
results.write("del _temp\n")
results.write("\n")
print('OK.\n')
return
def install_dict(self):
"""DB.install_dict() -> None.
Install the newly created dictionary in the site-packages folder.
May need super user privilege on some architectures."""
print('\n ' +'*'*78 + ' \n')
print('\n\t\tInstalling Restriction_Dictionary.py')
try:
import Bio.Restriction.Restriction_Dictionary as rd
except ImportError:
print('\
\n Unable to locate the previous Restriction_Dictionary.py module\
\n Aborting installation.')
sys.exit()
#
# first save the old file in Updates
#
old = os.path.join(os.path.split(rd.__file__)[0],
'Restriction_Dictionary.py')
# update_folder = config.updatefolder
update_folder = os.getcwd()
shutil.copyfile(old, os.path.join(update_folder,
'Restriction_Dictionary.old'))
#
# Now test and install.
#
new = os.path.join(update_folder, 'Restriction_Dictionary.py')
try:
exec(compile(open(new).read(), new, 'exec'))
print('\
\n\tThe new file seems ok. Proceeding with the installation.')
except SyntaxError:
print('\
\n The new dictionary file is corrupted. Aborting the installation.')
return
try:
shutil.copyfile(new, old)
print('\n\t Everything ok. If you need it a version of the old\
\n\t dictionary have been saved in the Updates folder under\
\n\t the name Restriction_Dictionary.old.')
print('\n ' +'*'*78 + ' \n')
except IOError:
print('\n ' +'*'*78 + ' \n')
print('\
\n\t WARNING : Impossible to install the new dictionary.\
\n\t Are you sure you have write permission to the folder :\n\
\n\t %s ?\n\n' % os.path.split(old)[0])
return self.no_install()
return
def no_install(self):
"""BD.no_install() -> None.
build the new dictionary but do not install the dictionary."""
print('\n ' +'*'*78 + '\n')
# update = config.updatefolder
try:
import Bio.Restriction.Restriction_Dictionary as rd
except ImportError:
print('\
\n Unable to locate the previous Restriction_Dictionary.py module\
\n Aborting installation.')
sys.exit()
#
# first save the old file in Updates
#
old = os.path.join(os.path.split(rd.__file__)[0],
'Restriction_Dictionary.py')
update = os.getcwd()
shutil.copyfile(old, os.path.join(update, 'Restriction_Dictionary.old'))
places = update, os.path.split(Bio.Restriction.Restriction.__file__)[0]
print("\t\tCompilation of the new dictionary : OK.\
\n\t\tInstallation : No.\n\
\n You will find the newly created 'Restriction_Dictionary.py' file\
\n in the folder : \n\
\n\t%s\n\
\n Make a copy of 'Restriction_Dictionary.py' and place it with \
\n the other Restriction libraries.\n\
\n note : \
\n This folder should be :\n\
\n\t%s\n" % places)
print('\n ' +'*'*78 + '\n')
return
def lastrebasefile(self):
"""BD.lastrebasefile() -> None.
Check the emboss files are up to date and download them if they are not.
"""
embossnames = ('emboss_e', 'emboss_r', 'emboss_s')
#
# first check if we have the last update:
#
emboss_now = ['.'.join((x, LocalTime())) for x in embossnames]
update_needed = False
# dircontent = os.listdir(config.Rebase) # local database content
dircontent = os.listdir(os.getcwd())
base = os.getcwd() # added for biopython current directory
for name in emboss_now:
if name in dircontent:
pass
else:
update_needed = True
if not update_needed:
#
# nothing to be done
#
print('\n Using the files : %s'% ', '.join(emboss_now))
return tuple(open(os.path.join(base, n)) for n in emboss_now)
else:
#
# may be download the files.
#
print('\n The rebase files are more than one month old.\
\n Would you like to update them before proceeding?(y/n)')
r = _input(' update [n] >>> ')
if r in ['y', 'yes', 'Y', 'Yes']:
updt = RebaseUpdate(self.rebase_pass, self.proxy)
updt.openRebase()
updt.getfiles()
updt.close()
print('\n Update complete. Creating the dictionaries.\n')
print('\n Using the files : %s'% ', '.join(emboss_now))
return tuple(open(os.path.join(base, n)) for n in emboss_now)
else:
#
# we will use the last files found without updating.
# But first we check we have some file to use.
#
class NotFoundError(Exception):
pass
for name in embossnames:
try:
for file in dircontent:
if file.startswith(name):
break
else:
pass
raise NotFoundError
except NotFoundError:
print("\nNo %s file found. Upgrade is impossible.\n"%name)
sys.exit()
continue
pass
#
# now find the last file.
#
last = [0]
for file in dircontent:
fs = file.split('.')
try:
if fs[0] in embossnames and int(fs[1]) > int(last[-1]):
if last[0]:
last.append(fs[1])
else:
last[0] = fs[1]
else:
continue
except ValueError:
continue
last.sort()
last = last[::-1]
if int(last[-1]) < 100:
last[0], last[-1] = last[-1], last[0]
for number in last:
files = [(name, name+'.%s'%number) for name in embossnames]
strmess = '\nLast EMBOSS files found are :\n'
try:
for name, file in files:
if os.path.isfile(os.path.join(base, file)):
strmess += '\t%s.\n'%file
else:
raise ValueError
print(strmess)
emboss_e = open(os.path.join(base, 'emboss_e.%s'%number), 'r')
emboss_r = open(os.path.join(base, 'emboss_r.%s'%number), 'r')
emboss_s = open(os.path.join(base, 'emboss_s.%s'%number), 'r')
return emboss_e, emboss_r, emboss_s
except ValueError:
continue
def parseline(self, line):
line = [line[0]]+[line[1].upper()]+[int(i) for i in line[2:9]]+line[9:]
name = line[0].replace("-", "_").replace(".", "_")
site = line[1] # sequence of the recognition site
dna = Seq(site, generic_dna)
size = line[2] # size of the recognition site
#
# Calculate the overhang.
#
fst5 = line[5] # first site sense strand
fst3 = line[6] # first site antisense strand
scd5 = line[7] # second site sense strand
scd3 = line[8] # second site antisense strand
#
# the overhang is the difference between the two cut
#
ovhg1 = fst5 - fst3
ovhg2 = scd5 - scd3
#
# 0 has the meaning 'do not cut' in rebase. So we get short of 1
# for the negative numbers so we add 1 to negative sites for now.
# We will deal with the record later.
#
if fst5 < 0:
fst5 += 1
if fst3 < 0:
fst3 += 1
if scd5 < 0:
scd5 += 1
if scd3 < 0:
scd3 += 1
if ovhg2 != 0 and ovhg1 != ovhg2:
#
# different length of the overhang of the first and second cut
# it's a pain to deal with and at the moment it concerns only
# one enzyme which is not commercially available (HaeIV).
# So we don't deal with it but we check the progression
# of the affair.
# Should HaeIV become commercially available or other similar
# new enzymes be added, this might be modified.
#
print('\
\nWARNING : %s cut twice with different overhang length each time.\
\n\tUnable to deal with this behaviour. \
\n\tThis enzyme will not be included in the database. Sorry.' %name)
print('\tChecking...')
raise OverhangError
if 0 <= fst5 <= size and 0 <= fst3 <= size:
#
# cut inside recognition site
#
if fst5 < fst3:
#
# 5' overhang
#
ovhg1 = ovhgseq = site[fst5:fst3]
elif fst5 > fst3:
#
# 3' overhang
#
ovhg1 = ovhgseq = site[fst3:fst5]
else:
#
# blunt
#
ovhg1 = ovhgseq = ''
for base in 'NRYWMSKHDBV':
if base in ovhg1:
#
# site and overhang degenerated
#
ovhgseq = ovhg1
if fst5 < fst3:
ovhg1 = - len(ovhg1)
else:
ovhg1 = len(ovhg1)
break
else:
continue
elif 0 <= fst5 <= size:
#
# 5' cut inside the site 3' outside
#
if fst5 < fst3:
#
# 3' cut after the site
#
ovhgseq = site[fst5:] + (fst3 - size) * 'N'
elif fst5 > fst3:
#
# 3' cut before the site
#
ovhgseq = abs(fst3) * 'N' + site[:fst5]
else:
#
# blunt outside
#
ovhg1 = ovhgseq = ''
elif 0 <= fst3 <= size:
#
# 3' cut inside the site, 5' outside
#
if fst5 < fst3:
#
# 5' cut before the site
#
ovhgseq = abs(fst5) * 'N' + site[:fst3]
elif fst5 > fst3:
#
# 5' cut after the site
#
ovhgseq = site[fst3:] + (fst5 - size) * 'N'
else:
#
# should not happend
#
raise ValueError('Error in #1')
elif fst3 < 0 and size < fst5:
#
# 3' overhang. site is included.
#
ovhgseq = abs(fst3)*'N' + site + (fst5-size)*'N'
elif fst5 < 0 and size <fst3:
#
# 5' overhang. site is included.
#
ovhgseq = abs(fst5)*'N' + site + (fst3-size)*'N'
else:
#
# 5' and 3' outside of the site
#
ovhgseq = 'N' * abs(ovhg1)
#
# Now line[5] to [8] are the location of the cut but we have to
# deal with the weird mathematics of biologists.
#
# EMBOSS sequence numbering give:
# DNA = 'a c g t A C G T'
# -1 1 2 3 4
#
# Biologists do not know about 0. Too much use of latin certainly.
#
# To compensate, we add 1 to the positions if they are negative.
# No need to modify 0 as it means no cut and will not been used.
# Positive numbers should be ok since our sequence starts 1.
#
# Moreover line[6] and line[8] represent cut on the reverse strand.
# They will be used for non palindromic sites and sre.finditer
# will detect the site in inverse orientation so we need to add the
# length of the site to compensate (+1 if they are negative).
#
for x in (5, 7):
if line[x] < 0:
line[x] += 1
for x in (6, 8):
if line[x] > 0:
line[x] -= size
elif line[x] < 0:
line[x] = line[x] - size + 1
#
# now is the site palindromic?
# produce the regular expression which correspond to the site.
# tag of the regex will be the name of the enzyme for palindromic
# enzymesband two tags for the other, the name for the sense sequence
# and the name with '_as' at the end for the antisense sequence.
#
rg = ''
if is_palindrom(dna):
line.append(True)
rg = ''.join(['(?P<', name, '>', regex(site.upper()), ')'])
else:
line.append(False)
sense = ''.join(['(?P<', name, '>', regex(site.upper()), ')'])
antisense = ''.join(['(?P<', name, '_as>',
regex(dna.reverse_complement()), ')'])
rg = sense + '|' + antisense
#
# exact frequency of the site. (ie freq(N) == 1, ...)
#
f = [4/len(dna_alphabet[l]) for l in site.upper()]
freq = reduce(lambda x, y: x*y, f)
line.append(freq)
#
# append regex and ovhg1, they have not been appended before not to
# break the factory class. simply to leazy to make the changes there.
#
line.append(rg)
line.append(ovhg1)
line.append(ovhgseq)
return line
def removestart(self, file):
#
# remove the heading of the file.
#
return [l for l in itertools.dropwhile(lambda l:l.startswith('#'), file)]
def getblock(self, file, index):
#
# emboss_r.txt, separation between blocks is //
#
take = itertools.takewhile
block = [l for l in take(lambda l: not l.startswith('//'), file[index:])]
index += len(block)+1
return block, index
def get(self, block):
#
# take what we want from the block.
# Each block correspond to one enzyme.
# block[0] => enzyme name
# block[3] => methylation (position and type)
# block[5] => suppliers (as a string of single letter)
#
bl3 = block[3].strip()
if not bl3:
bl3 = False # site is not methylable
return (block[0].strip(), bl3, block[5].strip())
def information_mixer(self, file1, file2, file3):
#
# Mix all the information from the 3 files and produce a coherent
# restriction record.
#
methfile = self.removestart(file1)
sitefile = self.removestart(file2)
supplier = self.removestart(file3)
i1, i2= 0, 0
try:
while True:
block, i1 = self.getblock(methfile, i1)
bl = self.get(block)
line = (sitefile[i2].strip()).split()
name = line[0]
if name == bl[0]:
line.append(bl[1]) # -> methylation
line.append(bl[2]) # -> suppliers
else:
bl = self.get(oldblock)
if line[0] == bl[0]:
line.append(bl[1])
line.append(bl[2])
i2 += 1
else:
raise TypeError
oldblock = block
i2 += 1
try:
line = self.parseline(line)
except OverhangError: # overhang error
n = name # do not include the enzyme
if not bl[2]:
print('Anyway, %s is not commercially available.\n' %n)
else:
print('Unfortunately, %s is commercially available.\n'%n)
continue
# Hyphens and dots can't be used as a Python name, nor as a
# group name in a regular expression. e.g. 'CviKI-1', 'R2.BceSIV'
name = name.replace("-", "_").replace(".", "_")
if name in enzymedict:
#
# deal with TaqII and its two sites.
#
print('\nWARNING : %s has two different sites.\n' % name)
other = line[0].replace("-", "_").replace(".", "_")
dna = Seq(line[1], generic_dna)
sense1 = regex(dna)
antisense1 = regex(str(dna.reverse_complement()))
dna = Seq(enzymedict[other][0], generic_dna)
sense2 = regex(dna)
antisense2 = regex(dna.reverse_complement())
sense = '(?P<'+other+'>'+sense1+'|'+sense2+')'
antisense = '(?P<'+other+'_as>'+antisense1+'|'+antisense2 + ')'
reg = sense + '|' + antisense
line[1] = line[1] + '|' + enzymedict[other][0]
line[-1] = reg
#
# the data to produce the enzyme class are then stored in
# enzymedict.
#
enzymedict[name] = line[1:] # element zero was the name
except IndexError:
pass
for i in supplier:
#
# construction of the list of suppliers.
#
t = i.strip().split(' ', 1)
suppliersdict[t[0]] = (t[1], [])
return
def standalone():
parser = optparse.OptionParser()
add = parser.add_option
add('-i', '--install',
action="store_true",
dest='i',
default=False,
help="compile and install the newly created file. "
"default behaviour (without switch): "
"Compile the enzymes and store them in the Updates folder")
add('-m', '--e-mail',
action="store",
dest='rebase_password',
default='',
help="set the e-mail address to be used as password for the"
"anonymous ftp connection to Rebase.")
add('-p', '--proxy',
action="store",
dest='ftp_proxy',
default='',
help="set the proxy to be used by the ftp connection.")
options, args = parser.parse_args()
return options, args
if __name__ == '__main__':
options, args = standalone()
Builder = DictionaryBuilder(options.rebase_password, options.ftp_proxy)
Builder.build_dict()
if options.i:
Builder.install_dict()
else:
Builder.no_install()
sys.exit()
|
updownlife/multipleK
|
dependencies/biopython-1.65/Scripts/Restriction/ranacompiler.py
|
Python
|
gpl-2.0
| 39,012
|
[
"Biopython"
] |
afbd3f0a644e4358579ae8053660e00a35a90bde0a4cf92d0ae20d816bcf9814
|
"""Performance comparison between the Tensor / Numba implementations."""
import json
with open("../../paper.json", 'r') as f: cfg = json.load(f) # noqa
import numpy as np
import pandas as pd
from time import time as tst
from tensorpac.signals import pac_signals_wavelet
from tensorpac import Pac
from tensorpac.spectral import hilbertm
from tensorpac.methods import get_pac_fcn
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('seaborn-poster')
sns.set_style("white")
plt.rc('font', family=cfg["font"])
###############################################################################
n_repetitions = 10
f_pha, f_amp = 10, 100
n_epochs, n_times = 30, 2000
sf = 512.
###############################################################################
# -----------------------------------------------------------------------------
# get tensor / numba implemented PAC methods
# -----------------------------------------------------------------------------
# first run could be slow due to numba caching
METH_TENSOR = get_pac_fcn(None, 18, .05, 'tensor', full=True)
METH_NUMBA = get_pac_fcn(None, 18, .05, 'numba', full=True)
# remove gaussian copula since there's no Numba implementation
METH_TENSOR.pop(6)
METH_NUMBA.pop(6)
n_meth = len(METH_TENSOR) + len(METH_NUMBA)
# -----------------------------------------------------------------------------
# DATA PREPARATION
# -----------------------------------------------------------------------------
# generate random data
data, time = pac_signals_wavelet(sf=sf, f_pha=f_pha, f_amp=f_amp, noise=.8,
n_epochs=n_epochs, n_times=n_times)
# extract phase / amplitude
p_obj = Pac(f_pha='lres', f_amp='lres')
pha = p_obj.filter(sf, data, ftype='phase', n_jobs=1)
amp = p_obj.filter(sf, data, ftype='amplitude', n_jobs=1)
# -----------------------------------------------------------------------------
# compute pac
# -----------------------------------------------------------------------------
meth_types, meth_names, computing_time = [], [], []
for meth in range(len(METH_TENSOR)):
# get both methods
meth_tensor = METH_TENSOR[meth + 1]
meth_numba = METH_NUMBA[meth + 1]
# get both names
meth_tensor_name = meth_tensor.func.__name__
meth_numba_name = meth_numba.func.__name__
assert meth_numba_name == f"{meth_tensor_name}_nb"
# compute several repetitions
for rep in range(n_repetitions):
# compute tensor based
start_ten = tst()
meth_tensor(pha, amp)
end_ten = tst()
computing_time += [end_ten - start_ten]
meth_names += [meth_tensor_name]
meth_types += ['Tensor']
# compute numba based
start_nb = tst()
meth_numba(pha, amp)
end_nb = tst()
computing_time += [end_nb - start_nb]
meth_names += [meth_tensor_name]
meth_types += ['Numba']
# -----------------------------------------------------------------------------
# build DataFrame and barplot the results
# -----------------------------------------------------------------------------
df = pd.DataFrame({'Computing time': computing_time, 'Method': meth_names,
'Implementation': meth_types})
plt.figure(figsize=(10, 9))
sns.barplot(x="Method", y="Computing time", hue="Implementation", data=df)
plt.title("Computing time comparison between the tensor and numba "
f"implementations\n(n_pha={len(p_obj.xvec)}, "
f"n_amp={len(p_obj.yvec)}, n_trials={n_epochs}, n_times={n_times})")
plt.xticks(rotation=10)
plt.tight_layout()
plt.savefig(f"../figures/r2_tensor_vs_numba.png", dpi=300, bbox_inches='tight')
plt.show()
|
EtienneCmb/tensorpac
|
paper/reviews/code/r2_numba_performance.py
|
Python
|
bsd-3-clause
| 3,659
|
[
"Gaussian"
] |
c03a8052094b54019e135266d8316f2535b8887a2f41e2e13432faea51319134
|
from __future__ import annotations
import http.server as server_base
import json
import logging
import multiprocessing
import sys
import time
import urllib.parse
import libtbx.phil
from cctbx import uctbx
from dxtbx.model.experiment_list import ExperimentListFactory
from libtbx.introspection import number_of_processors
from dials.algorithms.indexing import indexer
from dials.algorithms.integration.integrator import create_integrator
from dials.algorithms.profile_model.factory import ProfileModelFactory
from dials.algorithms.spot_finding import per_image_analysis
from dials.array_family import flex
from dials.command_line.find_spots import phil_scope as find_spots_phil_scope
from dials.command_line.index import phil_scope as index_phil_scope
from dials.command_line.integrate import phil_scope as integrate_phil_scope
from dials.util import Sorry, show_mail_handle_errors
from dials.util.options import ArgumentParser
logger = logging.getLogger("dials.command_line.find_spots_server")
help_message = """\
A client/server version of dials.find_spots with additional analysis including
estimation of resolution limits. Intended for quick feedback of image quality
during grid scans and data collections.
On the server machine::
dials.find_spots_server [nproc=8] [port=1234]
On the client machine::
dials.find_spots_client [host=hostname] [port=1234] [nproc=8] /path/to/image.cbf
The client will return a short xml string indicating the number of spots found
and several estimates of the resolution limit.
e.g.::
<response>

<spot_count>352</spot_count>
<spot_count_no_ice>263</spot_count_no_ice>
<d_min>1.46</d_min>
<d_min_method_1>1.92</d_min_method_1>
<d_min_method_2>1.68</d_min_method_2>
<total_intensity>56215</total_intensity>
</response>
* ``spot_count`` is the total number of spots found in given image
* ``spot_count_no_ice`` is the number of spots found excluding those at resolutions
where ice rings may be found
* ``d_min_method_1`` is equivalent to distl's resolution estimate method 1
* ``d_min_method_2`` is equivalent to distl's resolution estimate method 2
* ``total_intensity`` is the total intensity of all strong spots excluding those
at resolutions where ice rings may be found
Any valid ``dials.find_spots`` parameter may be passed to
``dials.find_spots_client``, e.g.::
dials.find_spots_client /path/to/image.cbf min_spot_size=2 d_min=2
To stop the server::
dials.find_spots_client stop [host=hostname] [port=1234]
"""
stop = False
def _filter_by_resolution(experiments, reflections, d_min=None, d_max=None):
reflections.centroid_px_to_mm(experiments)
reflections.map_centroids_to_reciprocal_space(experiments)
d_star_sq = flex.pow2(reflections["rlp"].norms())
reflections["d"] = uctbx.d_star_sq_as_d(d_star_sq)
# Filter based on resolution
if d_min is not None:
selection = reflections["d"] >= d_min
reflections = reflections.select(selection)
logger.debug(f"Selected {len(reflections)} reflections with d >= {d_min:f}")
# Filter based on resolution
if d_max is not None:
selection = reflections["d"] <= d_max
reflections = reflections.select(selection)
logger.debug(f"Selected {len(reflections)} reflections with d <= {d_max:f}")
return reflections
def work(filename, cl=None):
if cl is None:
cl = []
phil_scope = libtbx.phil.parse(
"""\
ice_rings {
filter = True
.type = bool
width = 0.004
.type = float(value_min=0.0)
}
index = False
.type = bool
integrate = False
.type = bool
indexing_min_spots = 10
.type = int(value_min=1)
"""
)
interp = phil_scope.command_line_argument_interpreter()
params, unhandled = interp.process_and_fetch(
cl, custom_processor="collect_remaining"
)
filter_ice = params.extract().ice_rings.filter
ice_rings_width = params.extract().ice_rings.width
index = params.extract().index
integrate = params.extract().integrate
indexing_min_spots = params.extract().indexing_min_spots
interp = find_spots_phil_scope.command_line_argument_interpreter()
phil_scope, unhandled = interp.process_and_fetch(
unhandled, custom_processor="collect_remaining"
)
logger.info("The following spotfinding parameters have been modified:")
logger.info(find_spots_phil_scope.fetch_diff(source=phil_scope).as_str())
params = phil_scope.extract()
# no need to write the hot mask in the server/client
params.spotfinder.write_hot_mask = False
experiments = ExperimentListFactory.from_filenames([filename])
if params.spotfinder.scan_range and len(experiments) > 1:
# This means we've imported a sequence of still image: select
# only the experiment, i.e. image, we're interested in
((start, end),) = params.spotfinder.scan_range
experiments = experiments[start - 1 : end]
# Avoid overhead of calculating per-pixel resolution masks in spotfinding
# and instead perform post-filtering of spot centroids by resolution
d_min = params.spotfinder.filter.d_min
d_max = params.spotfinder.filter.d_max
params.spotfinder.filter.d_min = None
params.spotfinder.filter.d_max = None
t0 = time.perf_counter()
reflections = flex.reflection_table.from_observations(experiments, params)
if d_min or d_max:
reflections = _filter_by_resolution(
experiments, reflections, d_min=d_min, d_max=d_max
)
t1 = time.perf_counter()
logger.info("Spotfinding took %.2f seconds", t1 - t0)
imageset = experiments.imagesets()[0]
reflections.centroid_px_to_mm(experiments)
reflections.map_centroids_to_reciprocal_space(experiments)
stats = per_image_analysis.stats_for_reflection_table(
reflections, filter_ice=filter_ice, ice_rings_width=ice_rings_width
)._asdict()
t2 = time.perf_counter()
logger.info("Resolution analysis took %.2f seconds", t2 - t1)
if index and stats["n_spots_no_ice"] > indexing_min_spots:
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
interp = index_phil_scope.command_line_argument_interpreter()
phil_scope, unhandled = interp.process_and_fetch(
unhandled, custom_processor="collect_remaining"
)
logger.info("The following indexing parameters have been modified:")
index_phil_scope.fetch_diff(source=phil_scope).show()
params = phil_scope.extract()
if (
imageset.get_goniometer() is not None
and imageset.get_scan() is not None
and imageset.get_scan().is_still()
):
imageset.set_goniometer(None)
imageset.set_scan(None)
try:
idxr = indexer.Indexer.from_parameters(
reflections, experiments, params=params
)
indexing_results = []
idxr.index()
indexed_sel = idxr.refined_reflections.get_flags(
idxr.refined_reflections.flags.indexed
)
indexed_sel &= ~(
idxr.refined_reflections.get_flags(
idxr.refined_reflections.flags.centroid_outlier
)
)
for i_expt, expt in enumerate(idxr.refined_experiments):
sel = idxr.refined_reflections["id"] == i_expt
sel &= indexed_sel
indexing_results.append(
{
"crystal": expt.crystal.to_dict(),
"n_indexed": sel.count(True),
"fraction_indexed": sel.count(True) / sel.size(),
}
)
stats["lattices"] = indexing_results
stats["n_indexed"] = indexed_sel.count(True)
stats["fraction_indexed"] = indexed_sel.count(True) / len(reflections)
except Exception as e:
logger.error(e)
stats["error"] = str(e)
finally:
t3 = time.perf_counter()
logger.info("Indexing took %.2f seconds", t3 - t2)
if integrate and "lattices" in stats:
interp = integrate_phil_scope.command_line_argument_interpreter()
phil_scope, unhandled = interp.process_and_fetch(
unhandled, custom_processor="collect_remaining"
)
logger.error("The following integration parameters have been modified:")
integrate_phil_scope.fetch_diff(source=phil_scope).show()
params = phil_scope.extract()
try:
params.profile.gaussian_rs.min_spots = 0
experiments = idxr.refined_experiments
reference = idxr.refined_reflections
predicted = flex.reflection_table.from_predictions_multi(
experiments,
dmin=params.prediction.d_min,
dmax=params.prediction.d_max,
margin=params.prediction.margin,
force_static=params.prediction.force_static,
)
matched, reference, unmatched = predicted.match_with_reference(
reference
)
assert len(matched) == len(predicted)
assert matched.count(True) <= len(reference)
if matched.count(True) == 0:
raise Sorry(
"""
Invalid input for reference reflections.
Zero reference spots were matched to predictions
"""
)
elif matched.count(True) != len(reference):
logger.info("")
logger.info("*" * 80)
logger.info(
"Warning: %d reference spots were not matched to predictions",
len(reference) - matched.count(True),
)
logger.info("*" * 80)
logger.info("")
# Compute the profile model
experiments = ProfileModelFactory.create(params, experiments, reference)
# Compute the bounding box
predicted.compute_bbox(experiments)
# Create the integrator
integrator = create_integrator(params, experiments, predicted)
# Integrate the reflections
reflections = integrator.integrate()
# print len(reflections)
stats["integrated_intensity"] = flex.sum(
reflections["intensity.sum.value"]
)
except Exception as e:
logger.error(e)
stats["error"] = str(e)
finally:
t4 = time.perf_counter()
logger.info("Integration took %.2f seconds", t4 - t3)
return stats
class handler(server_base.BaseHTTPRequestHandler):
def do_GET(self):
"""Respond to a GET request."""
if self.path == "/Ctrl-C":
self.send_response(200)
self.end_headers()
global stop
stop = True
return
filename = self.path.split(";")[0]
params = self.path.split(";")[1:]
# If we're passing a url through, then unquote and ignore leading /
if "%3A//" in filename:
filename = urllib.parse.unquote(filename[1:])
d = {"image": filename}
try:
stats = work(filename, params)
d.update(stats)
response = 200
except Exception as e:
d["error"] = str(e)
response = 500
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
response = json.dumps(d).encode()
self.wfile.write(response)
def serve(httpd):
try:
while not stop:
httpd.handle_request()
except KeyboardInterrupt:
pass
phil_scope = libtbx.phil.parse(
"""\
nproc = Auto
.type = int(value_min=1)
port = 1701
.type = int(value_min=1)
"""
)
def main(nproc, port):
server_class = server_base.HTTPServer
httpd = server_class(("", port), handler)
print(time.asctime(), "Serving %d processes on port %d" % (nproc, port))
for j in range(nproc - 1):
proc = multiprocessing.Process(target=serve, args=(httpd,))
proc.daemon = True
proc.start()
serve(httpd)
httpd.server_close()
print(time.asctime(), "done")
@show_mail_handle_errors()
def run(args=None):
usage = "dials.find_spots_server [options]"
# Python 3.8 on macOS... needs fork
if sys.hexversion >= 0x3080000 and sys.platform == "darwin":
multiprocessing.set_start_method("fork")
parser = ArgumentParser(usage=usage, phil=phil_scope, epilog=help_message)
params, options = parser.parse_args(args, show_diff_phil=True)
if params.nproc is libtbx.Auto:
params.nproc = number_of_processors(return_value_if_unknown=-1)
main(params.nproc, params.port)
if __name__ == "__main__":
run()
|
dials/dials
|
command_line/find_spots_server.py
|
Python
|
bsd-3-clause
| 13,148
|
[
"CRYSTAL"
] |
b7a046c98ac38c4c98e8541c6abb1041443a7f3f7de9bb1d46b6889dab488d64
|
import matplotlib
import matplotlib.gridspec as gridspec
import pylab as plt
from scipy.stats import gaussian_kde, norm
from Pyheana.load.fit_data import *
from Pyheana.load.load_data import *
def plot_particles(x, y, z):
plt.figure(figsize=(12,10))
# pdf2, bins2, patches = plt.hist(x[:,0], 100, histtype='stepfilled')
# pdf3, bins3, patches = plt.hist(y[:,0], 100, orientation='horizontal', histtype='stepfilled')
# plt.clf()
# plt.ion()
gs = gridspec.GridSpec(2, 2, width_ratios=[3,1], height_ratios=[1, 3])
ax1 = plt.subplot(gs[2])
ax2 = plt.subplot(gs[0], sharex=ax1)
ax3 = plt.subplot(gs[3], sharey=ax1)
# ax3 = plt.subplot(gs[3], sharex=ax1)
[n_particles, n_turns] = x.shape
xlimits = plt.amax(plt.absolute(x))
ylimits = plt.amax(plt.absolute(y))
zlimits = plt.amax(plt.absolute(z))
# for i in range(n_turns):
# plt.clf()
# ax = plt.gca()
# ax.set_xlim(-xlimits, xlimits)
# ax.set_ylim(-ylimits, ylimits)
# plt.scatter(x[:,i], y[:,i], c=z, marker='o', lw=0)
# plt.draw()
# Scatterplot
z = plt.ones((n_particles, n_turns)).T * z; z = z.T
ax1.set_xlim(-8e-1, 8e-1)
ax1.set_ylim(-4e-3, 4e-3)
# ax1.set_xlim(-4e-1, 4e-1)
# ax1.set_ylim(-5e-4, 5e-4)
ax1.set_xlabel('$\Delta$ z [m]', fontsize=22)
ax1.set_ylabel('$\delta$', fontsize=26)
ax1.scatter(x, y, c=z, marker='o', lw=0)
# Histograms
smoothed_histogram(ax2, x[:,:].flatten(), xaxis='x')
smoothed_histogram(ax3, y[:,:].flatten(), xaxis='y')
for l in ax2.get_xticklabels() + ax3.get_yticklabels():
l.set_visible(False)
for l in ax1.get_xticklabels() + ax3.get_xticklabels():
l.set_rotation(45)
plt.tight_layout()
# # from mayavi.modules.grid_plane import GridPlane
# # from mayavi.modules.outline import Outline
# # from mayavi.modules.volume import Volume
# # from mayavi.scripts import mayavi2
# from mayavi import mlab
# mlab.options.backend = 'envisage'
# # # graphics card driver problem
# # # workaround by casting int in line 246 of enthought/mayavi/tools/figure.py
# # #mlab.options.offscreen = True
# # #enthought.mayavi.engine.current_scene.scene.off_screen_rendering = True
# mlab.figure(bgcolor=(0.9,0.9,0.9), fgcolor=(0.2,0.2,0.2))
# aspect = (0, 10, 0, 16, -6, 6)
# s = mlab.points3d(x[:,0], y[:,0], z[:,0], r, colormap='jet', resolution=6,
# scale_factor=1e-3, scale_mode='none')
# mlab.outline(line_width=1)
# for i in range(n_turns):
# print i
# s.mlab_source.set(x=x[:,i], y=y[:,i], z=z[:,i])
return ax1
def smoothed_histogram(ax, x, xaxis='x'):
if xaxis == 'x':
t = plt.linspace(ax.get_xlim()[0], ax.get_xlim()[1], 1000)
f = kde_scipy(x, t, 0.2)
f = normalise(f)
ax.plot(t, f, c='purple', lw=2)
ax.fill_between(t, 0, f, facecolor='purple', alpha=0.1)
sigma, mu, xi, yia, yib = fitgauss(t, f)
ax.plot(xi, yia, c='firebrick', lw=2, ls='--')
ax.text(0.99, 0.99, '$\mu$: {:1.2e}\n$\sigma$: {:1.2e}'.format(mu, sigma), fontsize=20, color='firebrick', horizontalalignment='right', verticalalignment='top', transform=ax.transAxes)
elif xaxis == 'y':
t = plt.linspace(ax.get_ylim()[0], ax.get_ylim()[1], 1000)
f = kde_scipy(x, t, 0.2)
f = normalise(f)
ax.plot(f, t, c='purple', lw=2)
ax.fill_betweenx(t, 0, f, facecolor='purple', alpha=0.1)
sigma, mu, xi, yia, yib = fitgauss(t, f)
ax.plot(yia, xi, c='firebrick', lw=2, ls='--')
ax.text(0.99, 0.99, '$\mu$: {:1.2e}\n$\sigma$: {:1.2e}'.format(mu, sigma), fontsize=20, color='firebrick', horizontalalignment='right', verticalalignment='top', transform=ax.transAxes)
else:
raise ValueError
def kde_scipy(x, x_grid, bandwidth=0.2, **kwargs):
"""Kernel Density Estimation with Scipy"""
# Note that scipy weights its bandwidth by the covariance of the
# input data. To make the results comparable to the other methods,
# we divide the bandwidth by the sample standard deviation here.
# kde = gaussian_kde(x, bw_method=bandwidth / x.std(ddof=1), **kwargs)
kde = gaussian_kde(x, bw_method=bandwidth, **kwargs)
return kde.evaluate(x_grid)
def fit_gauss(bins, pdf):
mean, sigma = norm.fit(pdf)
print mean, sigma
x = plt.linspace(bins[0], bins[-1], 1000)
y = norm.pdf(x, loc=mean, scale=sigma)
return x, y
def smoothed_histogram_window(ax, pdf, bins):
wsize = 20
bins = 0.5 * (bins[:-1] + bins[1:])
# for wsize in range(5, 30, 5):
# extending the data at beginning and at the end
# to apply the window at the borders
ps = plt.r_[pdf[wsize-1:0:-1], pdf, pdf[-1:-wsize:-1]]
w = plt.hanning(wsize)
pc = plt.convolve(w/w.sum(), ps, mode='valid')
pc = pc[wsize/2:len(ps)-wsize/2]
pc = pc[0:len(bins)]
# plt.plot(bins, pc)
# plt.fill_between(bins, 0, pc, alpha=0.1)
return pc, bins
def circular_moments(pdf):
c_sin = []
c_cos = []
for k in xrange(5):
c_sin.append(integrate.quad(pdf, low, upp, weight='cos', wvar=k))
c_cos.append(integrate.quad(pdf, low, upp, weight='sin', wvar=k))
print c_sin
print c_cos
|
like2000/Pyheana
|
display/plot_particles.py
|
Python
|
gpl-2.0
| 5,332
|
[
"Mayavi"
] |
e4510fe861deae8ae12bad9af177657160ed2cd04d671922b59dca5f3e7403a3
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Adds KO annotation to each gene ID that passes the specified thresholds
(max. e-value, min. bitscore, min. identity) in the BLAST results file.
Usage: annotate.py <file> <max log e-value> <min bitscore> <min identity>
"""
from config import *
import sys
import itertools
from collections import defaultdict
import urllib
import os
import errno
from buildKeggDB import buildOrgCodes
if len(sys.argv) == 1 or sys.argv[1] == '-h':
sys.exit(__doc__)
DATAFILE = sys.argv[1]
MAX_E = sys.argv[2]
MIN_BITSCORE = sys.argv[3]
MIN_IDENTITY = sys.argv[4]
# SAMPLE = re.search('(?<=/)\w+(?=\.)', DATAFILE).group()
SAMPLE = os.path.split(DATAFILE)[1] + '.'
def buildDir(dirname):
"""
Prepare directory. Create directory if it does not exist.
"""
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(dirname):
raise
def retrieveGeneKO(organism):
"""
Build one gene ID to KO dictionary per organism.
If the mapping file does not exist it will be downloaded via KEGG REST API.
Input: organism code as string
Output: Dictionary with gene ID as key and KO as value
"""
try:
f = open(KEGGDIRNAME + '/' + organism + '.txt', 'r')
except:
urllib.urlretrieve('http://rest.kegg.jp/link/ko/' + organism,
KEGGDIRNAME + '/' + organism + '.txt')
f = open(KEGGDIRNAME + '/' + organism + '.txt', 'r')
else:
GENE_TO_KO = defaultdict(list)
for line in f.readlines():
if len(line.split('\t')) == 2 and len(line.split('\t')[1].split(':')) == 2:
gene = line.split('\t')[0].strip()
ko = line.split('\t')[1].split(':')[1].strip()
GENE_TO_KO[gene].append(ko)
else:
continue
f.close()
return GENE_TO_KO
def blastparser(filename, maxe, minbit, minid):
"""
Parse BLAST output only for lines with e-value and bitscore above given
thresholds. Build a dictionary with organism code as key and gene IDs with
organism codes as values ('orgcode: geneid')
"""
GENES = defaultdict(list)
with open(filename, 'r') as f:
for line in itertools.islice(f, 5, None):
line = line.strip()
if float(line.split('\t')[10]) < float(maxe) and \
float(line.split('\t')[11]) > float(minbit) and \
float(line.split('\t')[2]) > float(minid):
gene = line.split('\t')[1]
organism = gene.split(':')[0]
query = line.split('\t')[0]
GENES[organism].append((gene, query))
return GENES
def annotateKO():
try:
os.remove(MAPPINGSDIR + '/' + SAMPLE + 'KO.txt')
except OSError:
pass
finally:
with open(MAPPINGSDIR + '/' + SAMPLE + 'KO.txt', 'a') as fmap:
GENES = blastparser(DATAFILE, MAX_E, MIN_BITSCORE, MIN_IDENTITY)
ORG_CODES = buildOrgCodes(ORGANISMGROUP)
DONE = defaultdict(list)
for organism in GENES:
if organism in ORG_CODES:
geneToKO = retrieveGeneKO(organism)
else:
continue
if geneToKO is not None:
orgGenes = GENES[organism]
for entry in orgGenes:
gene = entry[0]
query = entry[1]
ko = geneToKO[gene]
if len(ko) == 0:
continue
elif len(ko) == 1:
if ko[0] not in DONE[query]:
DONE[query].append(ko[0])
fmap.write(query + '\t' + ko[0] + '\n')
else:
continue
else:
for koitem in ko:
if koitem not in DONE[query]:
DONE[query].append(koitem)
fmap.write(query + '\t' + koitem + '\n')
else:
continue
else:
continue
print 'Annotation for', SAMPLE, 'done.'
if __name__ == '__main__':
buildDir(MAPPINGSDIR)
annotateKO()
|
pseudonymcp/keggmapping
|
annotate.py
|
Python
|
gpl-3.0
| 4,449
|
[
"BLAST"
] |
6d6ab9749dd3bdde643bca4f236ddf937535134868e92a536b8a578976215890
|
"""
Job control via a command line interface (e.g. qsub/qstat), possibly over a remote connection (e.g. ssh).
"""
import os
import logging
from galaxy import model
from galaxy.jobs import JobDestination
from galaxy.jobs.runners import AsynchronousJobState, AsynchronousJobRunner
from .util.cli import CliInterface, split_params
log = logging.getLogger( __name__ )
__all__ = [ 'ShellJobRunner' ]
class ShellJobRunner( AsynchronousJobRunner ):
"""
Job runner backed by a finite pool of worker threads. FIFO scheduling
"""
runner_name = "ShellRunner"
def __init__( self, app, nworkers ):
"""Start the job runner """
super( ShellJobRunner, self ).__init__( app, nworkers )
self.cli_interface = CliInterface()
self._init_monitor_thread()
self._init_worker_threads()
def get_cli_plugins( self, shell_params, job_params ):
return self.cli_interface.get_plugins( shell_params, job_params )
def url_to_destination( self, url ):
params = {}
shell_params, job_params = url.split( '/' )[ 2:4 ]
# split 'foo=bar&baz=quux' into { 'foo' : 'bar', 'baz' : 'quux' }
shell_params = dict( [ ( 'shell_' + k, v ) for k, v in [ kv.split( '=', 1 ) for kv in shell_params.split( '&' ) ] ] )
job_params = dict( [ ( 'job_' + k, v ) for k, v in [ kv.split( '=', 1 ) for kv in job_params.split( '&' ) ] ] )
params.update( shell_params )
params.update( job_params )
log.debug( "Converted URL '%s' to destination runner=cli, params=%s" % ( url, params ) )
# Create a dynamic JobDestination
return JobDestination( runner='cli', params=params )
def parse_destination_params( self, params ):
return split_params( params )
def queue_job( self, job_wrapper ):
"""Create job script and submit it to the DRM"""
# prepare the job
if not self.prepare_job( job_wrapper, include_metadata=True ):
return
# command line has been added to the wrapper by prepare_job()
command_line = job_wrapper.runner_command_line
# Get shell and job execution interface
job_destination = job_wrapper.job_destination
shell_params, job_params = self.parse_destination_params(job_destination.params)
shell, job_interface = self.get_cli_plugins(shell_params, job_params)
# wrapper.get_id_tag() instead of job_id for compatibility with TaskWrappers.
galaxy_id_tag = job_wrapper.get_id_tag()
# define job attributes
ajs = AsynchronousJobState( files_dir=job_wrapper.working_directory, job_wrapper=job_wrapper )
job_file_kwargs = job_interface.job_script_kwargs(ajs.output_file, ajs.error_file, ajs.job_name)
script = self.get_job_file(
job_wrapper,
exit_code_path=ajs.exit_code_file,
**job_file_kwargs
)
try:
fh = file(ajs.job_file, "w")
fh.write(script)
fh.close()
except:
log.exception("(%s) failure writing job script" % galaxy_id_tag )
job_wrapper.fail("failure preparing job script", exception=True)
return
# job was deleted while we were preparing it
if job_wrapper.get_state() == model.Job.states.DELETED:
log.info("(%s) Job deleted by user before it entered the queue" % galaxy_id_tag )
if self.app.config.cleanup_job in ("always", "onsuccess"):
job_wrapper.cleanup()
return
log.debug( "(%s) submitting file: %s" % ( galaxy_id_tag, ajs.job_file ) )
cmd_out = shell.execute(job_interface.submit(ajs.job_file))
if cmd_out.returncode != 0:
log.error('(%s) submission failed (stdout): %s' % (galaxy_id_tag, cmd_out.stdout))
log.error('(%s) submission failed (stderr): %s' % (galaxy_id_tag, cmd_out.stderr))
job_wrapper.fail("failure submitting job")
return
# Some job runners return something like 'Submitted batch job XXXX'
# Strip and split to get job ID.
external_job_id = cmd_out.stdout.strip().split()[-1]
if not external_job_id:
log.error('(%s) submission did not return a job identifier, failing job' % galaxy_id_tag)
job_wrapper.fail("failure submitting job")
return
log.info("(%s) queued with identifier: %s" % ( galaxy_id_tag, external_job_id ) )
# store runner information for tracking if Galaxy restarts
job_wrapper.set_job_destination( job_destination, external_job_id )
# Store state information for job
ajs.job_id = external_job_id
ajs.old_state = 'new'
ajs.job_destination = job_destination
# Add to our 'queue' of jobs to monitor
self.monitor_queue.put( ajs )
def check_watched_items( self ):
"""
Called by the monitor thread to look at each watched job and deal
with state changes.
"""
new_watched = []
job_states = self.__get_job_states()
for ajs in self.watched:
external_job_id = ajs.job_id
id_tag = ajs.job_wrapper.get_id_tag()
old_state = ajs.old_state
state = job_states.get(external_job_id, None)
if state is None:
if ajs.job_wrapper.get_state() == model.Job.states.DELETED:
continue
log.debug("(%s/%s) job not found in batch state check" % ( id_tag, external_job_id ) )
shell_params, job_params = self.parse_destination_params(ajs.job_destination.params)
shell, job_interface = self.get_cli_plugins(shell_params, job_params)
cmd_out = shell.execute(job_interface.get_single_status(external_job_id))
state = job_interface.parse_single_status(cmd_out.stdout, external_job_id)
if state == model.Job.states.OK:
log.debug('(%s/%s) job execution finished, running job wrapper finish method' % ( id_tag, external_job_id ) )
self.work_queue.put( ( self.finish_job, ajs ) )
continue
else:
log.warning('(%s/%s) job not found in batch state check, but found in individual state check' % ( id_tag, external_job_id ) )
if state != old_state:
ajs.job_wrapper.change_state( state )
else:
if state != old_state:
log.debug("(%s/%s) state change: %s" % ( id_tag, external_job_id, state ) )
ajs.job_wrapper.change_state( state )
if state == model.Job.states.RUNNING and not ajs.running:
ajs.running = True
ajs.job_wrapper.change_state( model.Job.states.RUNNING )
ajs.old_state = state
new_watched.append( ajs )
# Replace the watch list with the updated version
self.watched = new_watched
def __get_job_states(self):
job_destinations = {}
job_states = {}
# unique the list of destinations
for ajs in self.watched:
if ajs.job_destination.id not in job_destinations:
job_destinations[ajs.job_destination.id] = dict( job_destination=ajs.job_destination, job_ids=[ ajs.job_id ] )
else:
job_destinations[ajs.job_destination.id]['job_ids'].append( ajs.job_id )
# check each destination for the listed job ids
for job_destination_id, v in job_destinations.items():
job_destination = v['job_destination']
job_ids = v['job_ids']
shell_params, job_params = self.parse_destination_params(job_destination.params)
shell, job_interface = self.get_cli_plugins(shell_params, job_params)
cmd_out = shell.execute(job_interface.get_status(job_ids))
assert cmd_out.returncode == 0, cmd_out.stderr
job_states.update(job_interface.parse_status(cmd_out.stdout, job_ids))
return job_states
def finish_job( self, job_state ):
"""For recovery of jobs started prior to standardizing the naming of
files in the AsychronousJobState object
"""
old_ofile = "%s.gjout" % os.path.join(job_state.job_wrapper.working_directory, job_state.job_wrapper.get_id_tag())
if os.path.exists( old_ofile ):
job_state.output_file = old_ofile
job_state.error_file = "%s.gjerr" % os.path.join(job_state.job_wrapper.working_directory, job_state.job_wrapper.get_id_tag())
job_state.exit_code_file = "%s.gjec" % os.path.join(job_state.job_wrapper.working_directory, job_state.job_wrapper.get_id_tag())
job_state.job_file = "%s/galaxy_%s.sh" % (self.app.config.cluster_files_directory, job_state.job_wrapper.get_id_tag())
super( ShellJobRunner, self ).finish_job( job_state )
def stop_job( self, job ):
"""Attempts to delete a dispatched job"""
try:
shell_params, job_params = self.parse_destination_params(job.destination_params)
shell, job_interface = self.get_cli_plugins(shell_params, job_params)
cmd_out = shell.execute(job_interface.delete( job.job_runner_external_id ))
assert cmd_out.returncode == 0, cmd_out.stderr
log.debug( "(%s/%s) Terminated at user's request" % ( job.id, job.job_runner_external_id ) )
except Exception, e:
log.debug( "(%s/%s) User killed running job, but error encountered during termination: %s" % ( job.id, job.job_runner_external_id, e ) )
def recover( self, job, job_wrapper ):
"""Recovers jobs stuck in the queued/running state when Galaxy started"""
job_id = job.get_job_runner_external_id()
if job_id is None:
self.put( job_wrapper )
return
ajs = AsynchronousJobState( files_dir=job_wrapper.working_directory, job_wrapper=job_wrapper )
ajs.job_id = str( job_id )
ajs.command_line = job.command_line
ajs.job_wrapper = job_wrapper
ajs.job_destination = job_wrapper.job_destination
if job.state == model.Job.states.RUNNING:
log.debug( "(%s/%s) is still in running state, adding to the runner monitor queue" % ( job.id, job.job_runner_external_id ) )
ajs.old_state = model.Job.states.RUNNING
ajs.running = True
self.monitor_queue.put( ajs )
elif job.state == model.Job.states.QUEUED:
log.debug( "(%s/%s) is still in queued state, adding to the runner monitor queue" % ( job.id, job.job_runner_external_id ) )
ajs.old_state = model.Job.states.QUEUED
ajs.running = False
self.monitor_queue.put( ajs )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/jobs/runners/cli.py
|
Python
|
gpl-3.0
| 10,834
|
[
"Galaxy"
] |
17cf2376721eabb91e1ca26291b9cb7b566037029ab39fbce120daba4131da26
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import csv
import itertools
import logging
import os
import shutil
import sys
import tempfile
import numpy as np
from pathlib import Path
from shutil import which
from collections import defaultdict
from superfocus_app.do_alignment import align_reads, parse_alignments
from superfocus_app import version
LOGGER_FORMAT = '[%(asctime)s - %(levelname)s] %(message)s'
def is_wanted_file(queries):
"""Remove files from query files that not have extension .fasta/.fastq/.fna
Args:
queries (list): List with query names.
Returns:
list: Sorted list with only .fasta/.fastq/.fna files.
"""
queries = [query for query in queries if query.name.split(".")[-1].lower() in ["fna", "fasta", "fastq"]]
queries.sort()
return queries
def get_denominators(results):
"""Get denominators to normalise abundances.
Args:
results (dict): Results.
Returns:
numpy.ndarray: Sum of columns in the metrics aka denominators for normalisation.
"""
return np.sum([results[element] for element in results], axis=0)
def add_relative_abundance(level_results, normalizer):
"""Add relative abundance to results.
Args:
level_results (dict): Results to be updated.
normalizer (numpy.ndarray): Normalizer denominators.
Returns:
dict: Results with relative abundance.
"""
# add relative abundance next to raw count for each of the file(s) in the analysis
for level in level_results:
relative_abundance = np.divide(list(level_results[level]), normalizer, where=normalizer != 0)
relative_abundance *= 100
level_results[level] = list(level_results[level]) + list(relative_abundance)
return level_results
def aggregate_level(results, position, normalizer):
"""Aggregate abundance of subsystem level and add relative abundance.
Args:
results (dict): Results.
position (int): Position of level in the results.
normalizer (numpy.ndarray): Normalizer denominators.
Returns:
dict: Aggregated result targeting chosen subsystem level.
"""
level_results = defaultdict(list)
for all_levels in results:
level = all_levels.split("\t")[position]
abundance = results[all_levels]
level_results[level].append(abundance)
level_results = {temp_level: np.sum(level_results[temp_level], axis=0) for temp_level in level_results}
return add_relative_abundance(level_results, normalizer)
def get_subsystems(translation_file):
"""Create lookup table from primary key to subsystems levels 1, 2, and 3.
Args:
translation_file (PosixPath): Path to file with subsystems information.
Returns:
dict: Lookup table primary key to subsystem levels.
"""
subsystems_translation = {}
with open(translation_file) as database_file:
database_reader = csv.reader(database_file, delimiter='\t')
next(database_reader, None)
for row in database_reader:
subsystems_translation[row[0]] = "\t".join(row[1:])
return subsystems_translation
def write_results(results, header, output_name, query_path, database, aligner):
"""Write results in tabular format.
Args:
results (collections.defaultdict): Dict with results to be written.
header (list): Header to be written.
output_name (str): Path to output.
query_path (str): Path to query.
database (str): Database used.
aligner (str): Aligner name.
"""
with open(output_name, 'w') as outfile:
writer = csv.writer(outfile, delimiter='\t', lineterminator='\n')
# run info
writer.writerow(["Query: {}".format(query_path)])
writer.writerow(["Database used: {}".format(database)])
writer.writerow(["Aligner used: {}".format(aligner)])
writer.writerow([""])
# subsystem and files header
writer.writerow(header)
for row in sorted(results):
if sum(results[row]) > 0:
writer.writerow(row.split("\t") + list(map(str, results[row])))
def write_binning(binning_result, output_name, query_path, database, aligner):
"""Write binning results in tabular format.
Args:
binning_result (collections.defaultdict): Dict with results to be written.
output_name (str): Path to output.
query_path (str): Path to query.
database (str): Database used.
aligner (str): Aligner name.
"""
with open(output_name, 'w') as outfile:
writer = csv.writer(outfile, delimiter='\t', lineterminator='\n')
# run info
writer.writerow(["Query: {}".format(query_path)])
writer.writerow(["Database used: {}".format(database)])
writer.writerow(["Aligner used: {}".format(aligner)])
writer.writerow([""])
writer.writerow(["Sample name", "Read Name",
"Subsystem Level 1", "Subsystem Level 2", "Subsystem Level 3", "Function",
"Identity %", "Alignment Length", "E-value"])
for query_name in binning_result:
for read_name in binning_result[query_name]:
# remove duplicates from list
temp_row = binning_result[query_name][read_name]
temp_row = list(temp_row for temp_row, _ in itertools.groupby(temp_row))
for row_temp in temp_row:
row = [query_name, read_name] + row_temp[-1].split("\t") + row_temp[:-1]
writer.writerow(row)
def is_valid_number(value):
""" Check if input if a valid >= 0 int or float.
Args:
value (str): Value to be checked.
Returns:
bool: True if valid >= 0 number else False.
"""
try:
if float(value) >= 0:
return True
else:
return False
except:
return False
def parse_args():
"""Parse args entered by the user.
Returns:
argparse.Namespace: Parsed arguments.
"""
parser = argparse.ArgumentParser(description="SUPER-FOCUS: A tool for agile functional analysis of shotgun "
"metagenomic data.",
epilog="superfocus -q input_folder -dir output_dir")
parser.add_argument('-v', '--version', action='version', version='SUPER-FOCUS {}'.format(version))
# basic parameters
parser.add_argument("-q", "--query", help="Path to FAST(A/Q) file or directory with these files.", required=True,
action='append')
parser.add_argument("-dir", "--output_directory", help="Path to output files", required=True)
parser.add_argument("-o", "--output_prefix", help="Output prefix (Default: output).", default="output_")
parser.add_argument("-tmp", "--temp_directory", help="specify an alternate temporary directory to use")
# aligner related
parser.add_argument("-a", "--aligner", help="aligner choice (rapsearch, diamond, blast, or mmseqs2; default rapsearch).",
default="rapsearch")
parser.add_argument("-mi", "--minimum_identity", help="minimum identity (default 60 perc).", default="60")
parser.add_argument("-ml", "--minimum_alignment", help="minimum alignment (amino acids) (default: 15).",
default="15")
parser.add_argument("-t", "--threads", help="Number Threads used in the k-mer counting (Default: 4).",
default="4")
parser.add_argument("-e", "--evalue", help="e-value (default 0.00001).", default="0.00001")
parser.add_argument("-db", "--database", help="database (DB_90, DB_95, DB_98, or DB_100; default DB_90)",
default="DB_90")
parser.add_argument("-p", "--amino_acid", help="amino acid input; 0 nucleotides; 1 amino acids (default 0).",
default="0")
parser.add_argument("-f", "--fast", help="runs RAPSearch2 or DIAMOND on fast mode - 0 (False) / 1 (True) "
"(default: 1).", default="1")
# extra
parser.add_argument("-n", "--normalise_output", help="normalises each query counts based on number of hits; "
"0 doesn't normalize; 1 normalizes (default: 1).", default="1")
parser.add_argument("-m", "--focus", help="runs FOCUS; 1 does run; 0 does not run: default 0.", default="0")
parser.add_argument("-b", "--alternate_directory", help="Alternate directory for your databases.", default="")
parser.add_argument('-d', '--delete_alignments', help='Delete alignments', action='store_true', required=False)
parser.add_argument('-w', '--latency_wait', help='Add a delay (in seconds) between writing the file and reading it',
type=int, default=0)
parser.add_argument('-l', '--log', help='Path to log file (Default: STDOUT).', required=False)
return parser.parse_args()
def main():
args = parse_args()
# basic parameters
prefix = args.output_prefix
output_directory = Path(args.output_directory)
# alignment related
aligner = args.aligner.lower()
minimum_identity = float(args.minimum_identity)
minimum_alignment = int(args.minimum_alignment)
threads = args.threads
evalue = args.evalue
database = args.database.split("_")[-1]
amino_acid = args.amino_acid
fast_mode = args.fast
del_alignments = args.delete_alignments
# rename mmseqs2 to mmseqs. This is the name of the command, but the program is mmseqs!
if aligner == 'mmseqs2':
aligner = 'mmseqs'
# other metrics
normalise_output = int(args.normalise_output)
run_focus = args.focus
if args.alternate_directory:
WORK_DIRECTORY = Path(args.alternate_directory)
elif 'SUPERFOCUS_DB' in os.environ:
WORK_DIRECTORY = Path(os.environ['SUPERFOCUS_DB'])
else:
WORK_DIRECTORY = Path(__file__).parents[0]
if args.log:
logging.basicConfig(format=LOGGER_FORMAT, level=logging.INFO, filename=args.log)
else:
logging.basicConfig(format=LOGGER_FORMAT, level=logging.INFO)
logger = logging.getLogger(__name__)
logger.info("SUPER-FOCUS: A tool for agile functional analysis of shotgun metagenomic data")
# check if output_directory is exists - if not, creates it
if not output_directory.exists():
Path(output_directory).mkdir(parents=True, mode=511)
logger.info("OUTPUT: {} does not exist - just created it :)".format(output_directory))
# parse directory and/or query files
query_files = []
for f in args.query:
p = Path(f)
if p.is_dir():
query_files += [Path(p, x) for x in os.listdir(p)]
elif p.is_file():
query_files.append(p)
query_files = is_wanted_file(query_files)
if query_files == []:
logger.critical("QUERY: {} does not have any fasta/fna/fastq files".format(args.query))
sys.exit(1)
# find a temp directory location
tmp = "/tmp"
if args.temp_directory:
tmp = args.temp_directory
if not os.path.exists(tmp):
logger.info(f"Creating temporary path: {tmp}")
os.makedirs(tmp, exist_ok=True)
elif 'TMPDIR' in os.environ:
tmp = os.environ['TMPDIR']
if not os.path.exists(tmp):
logger.info(f"Creating temporary path: {tmp}")
os.makedirs(tmp, exist_ok=True)
else:
sys.stderr.write(f"WARNING: Using {tmp} as the base temporary directory")
tmpdir = tempfile.mkdtemp(dir=tmp)
os.makedirs(tmpdir, exist_ok=True)
logger.info(f"Using {tmpdir} as the temporary directory")
# check if we can run focus
if run_focus != '0':
logger.critical("FOCUS: Running FOCUS is not avaliable on this version. "
"Please see https://github.com/metageni/FOCUS on how to run it")
# check if amino_acid is valid
elif aligner == 'blast' and amino_acid not in ['0', '1']:
logger.critical("AMINO ACID OPTION: {} is not valid for --amino_acid. Only 0 or 1".format(amino_acid))
# check if at database choice is valid
elif database not in ["90", "95", "98", "100"]:
logger.critical("DATABASE: DB_{} not valid. Choose DB_90/95/98/or 100".format(database))
# check if aligner is valid
elif aligner not in ["diamond", "rapsearch", "blast", "mmseqs"]:
logger.critical("ALIGNER: {} is not a valid aligner. Please choose among (diamond, blast, rapsearch, or mmseqs2)".
format(aligner))
# check if aligner exists
elif not which(aligner) and aligner.lower() != "blast":
logger.critical("ALIGNER: {} is not in the path of your system".format(aligner))
# check if work directory exists
elif WORK_DIRECTORY != WORK_DIRECTORY or not WORK_DIRECTORY.exists():
logger.critical("WORK_DIRECTORY: {} does not exist".format(WORK_DIRECTORY))
# check if number of threads are valid
elif threads != "all" and not is_valid_number(threads):
logger.critical("THREADS: {} is not a valid number of threads".format(threads))
# check if evalue is valid
elif not is_valid_number(evalue):
logger.critical("E-VALUE: {} is not a valid evalue".format(evalue))
else:
results = defaultdict(list)
binning_reads = defaultdict(lambda: defaultdict(list))
subsystems_translation = get_subsystems(Path(WORK_DIRECTORY, "db/database_PKs.txt"))
for counter, temp_query in enumerate(query_files):
logger.info("1.{}) Working on: {}".format(counter + 1, temp_query))
logger.info(" Aligning sequences in {} to {} using {}".format(temp_query, database, aligner))
alignment_name = align_reads(temp_query,
output_dir=output_directory, aligner=aligner,
database=database, evalue=evalue,
threads=threads, fast_mode=fast_mode,
WORK_DIRECTORY=WORK_DIRECTORY,
amino_acid=amino_acid, temp_folder=tmpdir,
latency_delay=args.latency_wait)
logger.info(" Parsing Alignments")
sample_position = query_files.index(temp_query)
results, binning_reads = parse_alignments(alignment_name, results, normalise_output, len(query_files),
sample_position, minimum_identity, minimum_alignment,
subsystems_translation, aligner, binning_reads, temp_query,
del_alignments)
# write results
normalizer = get_denominators(results)
header_files = query_files + ["{} %".format(x) for x in query_files]
logger.info('Writting results at {}'.format(output_directory))
# write binning
output_file = "{}/{}binning.xls".format(output_directory, prefix)
write_binning(binning_reads, output_file, args.query, database, aligner)
logger.info(' Working on writing binning')
# write results for each of the levels
for level in [1, 2, 3]:
logger.info(' Working on subsystem level {}'.format(level))
temp_header = ["Subsystem {}".format(level)] + header_files
temp_results = aggregate_level(results, level - 1, normalizer)
output_file = "{}/{}subsystem_level_{}.xls".format(output_directory, prefix, level)
write_results(temp_results, temp_header, output_file, args.query, database, aligner)
# write result for all the levels in one file
logger.info(' Working on Combined output')
temp_header = ["Subsystem Level 1", "Subsystem Level 2", "Subsystem Level 3", "Function"] + header_files
output_file = "{}/{}all_levels_and_function.xls".format(output_directory, prefix)
temp_results = add_relative_abundance(results, normalizer)
write_results(temp_results, temp_header, output_file, args.query, database, aligner)
# clean up our mess
shutil.rmtree(tmpdir)
logger.info('Done')
if __name__ == "__main__":
main()
|
metageni/SUPER-FOCUS
|
superfocus_app/superfocus.py
|
Python
|
gpl-3.0
| 16,374
|
[
"BLAST"
] |
77c0dd1175117e441310a9db2020e05e2eb60aadfa0b97c0b33cd6f100d056f4
|
import ast
import datetime
import re
import secrets
import time
from collections import defaultdict
from datetime import timedelta
from typing import (
AbstractSet,
Any,
Callable,
DefaultDict,
Dict,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
Union,
)
import django.contrib.auth
from bitfield import BitField
from bitfield.types import BitHandler
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, UserManager
from django.contrib.postgres.fields import JSONField
from django.core.exceptions import ValidationError
from django.core.validators import MinLengthValidator, RegexValidator, URLValidator, validate_email
from django.db import models, transaction
from django.db.models import CASCADE, Manager, Q, Sum
from django.db.models.query import QuerySet
from django.db.models.signals import post_delete, post_save
from django.utils.functional import Promise
from django.utils.timezone import now as timezone_now
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from confirmation import settings as confirmation_settings
from zerver.lib import cache
from zerver.lib.cache import (
active_non_guest_user_ids_cache_key,
active_user_ids_cache_key,
bot_dict_fields,
bot_dicts_in_realm_cache_key,
bot_profile_cache_key,
bulk_cached_fetch,
cache_delete,
cache_set,
cache_with_key,
flush_message,
flush_realm,
flush_stream,
flush_submessage,
flush_used_upload_space_cache,
flush_user_profile,
get_realm_used_upload_space_cache_key,
get_stream_cache_key,
realm_alert_words_automaton_cache_key,
realm_alert_words_cache_key,
realm_user_dict_fields,
realm_user_dicts_cache_key,
user_profile_by_api_key_cache_key,
user_profile_by_email_cache_key,
user_profile_by_id_cache_key,
user_profile_cache_key,
)
from zerver.lib.exceptions import JsonableError
from zerver.lib.pysa import mark_sanitized
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.lib.types import (
DisplayRecipientT,
ExtendedFieldElement,
ExtendedValidator,
FieldElement,
ProfileData,
ProfileDataElementBase,
RealmUserValidator,
UserFieldElement,
Validator,
)
from zerver.lib.utils import make_safe_digest
from zerver.lib.validator import (
check_date,
check_int,
check_list,
check_long_string,
check_short_string,
check_url,
validate_choice_field,
)
MAX_TOPIC_NAME_LENGTH = 60
MAX_MESSAGE_LENGTH = 10000
MAX_LANGUAGE_ID_LENGTH: int = 50
STREAM_NAMES = TypeVar('STREAM_NAMES', Sequence[str], AbstractSet[str])
def query_for_ids(query: QuerySet, user_ids: List[int], field: str) -> QuerySet:
'''
This function optimizes searches of the form
`user_profile_id in (1, 2, 3, 4)` by quickly
building the where clauses. Profiling shows significant
speedups over the normal Django-based approach.
Use this very carefully! Also, the caller should
guard against empty lists of user_ids.
'''
assert(user_ids)
clause = f'{field} IN %s'
query = query.extra(
where=[clause], params=(tuple(user_ids),),
)
return query
# Doing 1000 remote cache requests to get_display_recipient is quite slow,
# so add a local cache as well as the remote cache cache.
#
# This local cache has a lifetime of just a single request; it is
# cleared inside `flush_per_request_caches` in our middleware. It
# could be replaced with smarter bulk-fetching logic that deduplicates
# queries for the same recipient; this is just a convenient way to
# write that code.
per_request_display_recipient_cache: Dict[int, DisplayRecipientT] = {}
def get_display_recipient_by_id(recipient_id: int, recipient_type: int,
recipient_type_id: Optional[int]) -> DisplayRecipientT:
"""
returns: an object describing the recipient (using a cache).
If the type is a stream, the type_id must be an int; a string is returned.
Otherwise, type_id may be None; an array of recipient dicts is returned.
"""
# Have to import here, to avoid circular dependency.
from zerver.lib.display_recipient import get_display_recipient_remote_cache
if recipient_id not in per_request_display_recipient_cache:
result = get_display_recipient_remote_cache(recipient_id, recipient_type, recipient_type_id)
per_request_display_recipient_cache[recipient_id] = result
return per_request_display_recipient_cache[recipient_id]
def get_display_recipient(recipient: 'Recipient') -> DisplayRecipientT:
return get_display_recipient_by_id(
recipient.id,
recipient.type,
recipient.type_id,
)
def get_realm_emoji_cache_key(realm: 'Realm') -> str:
return f'realm_emoji:{realm.id}'
def get_active_realm_emoji_cache_key(realm: 'Realm') -> str:
return f'active_realm_emoji:{realm.id}'
# This simple call-once caching saves ~500us in auth_enabled_helper,
# which is a significant optimization for common_context. Note that
# these values cannot change in a running production system, but do
# regularly change within unit tests; we address the latter by calling
# clear_supported_auth_backends_cache in our standard tearDown code.
supported_backends: Optional[Set[type]] = None
def supported_auth_backends() -> Set[type]:
global supported_backends
# Caching temporarily disabled for debugging
supported_backends = django.contrib.auth.get_backends()
assert supported_backends is not None
return supported_backends
def clear_supported_auth_backends_cache() -> None:
global supported_backends
supported_backends = None
class Realm(models.Model):
MAX_REALM_NAME_LENGTH = 40
MAX_REALM_SUBDOMAIN_LENGTH = 40
INVITES_STANDARD_REALM_DAILY_MAX = 3000
MESSAGE_VISIBILITY_LIMITED = 10000
AUTHENTICATION_FLAGS = ['Google', 'Email', 'GitHub', 'LDAP', 'Dev',
'RemoteUser', 'AzureAD', 'SAML', 'GitLab', 'Apple']
SUBDOMAIN_FOR_ROOT_DOMAIN = ''
WILDCARD_MENTION_THRESHOLD = 15
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
# User-visible display name and description used on e.g. the organization homepage
name: Optional[str] = models.CharField(max_length=MAX_REALM_NAME_LENGTH, null=True)
description: str = models.TextField(default="")
# A short, identifier-like name for the organization. Used in subdomains;
# e.g. on a server at example.com, an org with string_id `foo` is reached
# at `foo.example.com`.
string_id: str = models.CharField(max_length=MAX_REALM_SUBDOMAIN_LENGTH, unique=True)
date_created: datetime.datetime = models.DateTimeField(default=timezone_now)
deactivated: bool = models.BooleanField(default=False)
# See RealmDomain for the domains that apply for a given organization.
emails_restricted_to_domains: bool = models.BooleanField(default=False)
invite_required: bool = models.BooleanField(default=True)
invite_by_admins_only: bool = models.BooleanField(default=False)
_max_invites: Optional[int] = models.IntegerField(null=True, db_column='max_invites')
disallow_disposable_email_addresses: bool = models.BooleanField(default=True)
authentication_methods: BitHandler = BitField(
flags=AUTHENTICATION_FLAGS, default=2**31 - 1,
)
# Whether the organization has enabled inline image and URL previews.
inline_image_preview: bool = models.BooleanField(default=True)
inline_url_embed_preview: bool = models.BooleanField(default=False)
# Whether digest emails are enabled for the organization.
digest_emails_enabled: bool = models.BooleanField(default=False)
# Day of the week on which the digest is sent (default: Tuesday).
digest_weekday: int = models.SmallIntegerField(default=1)
send_welcome_emails: bool = models.BooleanField(default=True)
message_content_allowed_in_email_notifications: bool = models.BooleanField(default=True)
mandatory_topics: bool = models.BooleanField(default=False)
add_emoji_by_admins_only: bool = models.BooleanField(default=False)
name_changes_disabled: bool = models.BooleanField(default=False)
email_changes_disabled: bool = models.BooleanField(default=False)
avatar_changes_disabled: bool = models.BooleanField(default=False)
POLICY_MEMBERS_ONLY = 1
POLICY_ADMINS_ONLY = 2
POLICY_FULL_MEMBERS_ONLY = 3
COMMON_POLICY_TYPES = [
POLICY_MEMBERS_ONLY,
POLICY_ADMINS_ONLY,
POLICY_FULL_MEMBERS_ONLY,
]
# Who in the organization is allowed to create streams.
create_stream_policy: int = models.PositiveSmallIntegerField(
default=POLICY_MEMBERS_ONLY)
# Who in the organization is allowed to invite other users to streams.
invite_to_stream_policy: int = models.PositiveSmallIntegerField(
default=POLICY_MEMBERS_ONLY)
USER_GROUP_EDIT_POLICY_MEMBERS = 1
USER_GROUP_EDIT_POLICY_ADMINS = 2
user_group_edit_policy: int = models.PositiveSmallIntegerField(
default=USER_GROUP_EDIT_POLICY_MEMBERS)
USER_GROUP_EDIT_POLICY_TYPES = [
USER_GROUP_EDIT_POLICY_MEMBERS,
USER_GROUP_EDIT_POLICY_ADMINS,
]
PRIVATE_MESSAGE_POLICY_UNLIMITED = 1
PRIVATE_MESSAGE_POLICY_DISABLED = 2
private_message_policy: int = models.PositiveSmallIntegerField(
default=PRIVATE_MESSAGE_POLICY_UNLIMITED)
PRIVATE_MESSAGE_POLICY_TYPES = [
PRIVATE_MESSAGE_POLICY_UNLIMITED,
PRIVATE_MESSAGE_POLICY_DISABLED,
]
# Global policy for who is allowed to use wildcard mentions in
# streams with a large number of subscribers. Anyone can use
# wildcard mentions in small streams regardless of this setting.
WILDCARD_MENTION_POLICY_EVERYONE = 1
WILDCARD_MENTION_POLICY_MEMBERS = 2
WILDCARD_MENTION_POLICY_FULL_MEMBERS = 3
WILDCARD_MENTION_POLICY_STREAM_ADMINS = 4
WILDCARD_MENTION_POLICY_ADMINS = 5
WILDCARD_MENTION_POLICY_NOBODY = 6
wildcard_mention_policy: int = models.PositiveSmallIntegerField(
default=WILDCARD_MENTION_POLICY_STREAM_ADMINS,
)
WILDCARD_MENTION_POLICY_TYPES = [
WILDCARD_MENTION_POLICY_EVERYONE,
WILDCARD_MENTION_POLICY_MEMBERS,
WILDCARD_MENTION_POLICY_FULL_MEMBERS,
WILDCARD_MENTION_POLICY_STREAM_ADMINS,
WILDCARD_MENTION_POLICY_ADMINS,
WILDCARD_MENTION_POLICY_NOBODY,
]
# Who in the organization has access to users' actual email
# addresses. Controls whether the UserProfile.email field is the
# same as UserProfile.delivery_email, or is instead garbage.
EMAIL_ADDRESS_VISIBILITY_EVERYONE = 1
EMAIL_ADDRESS_VISIBILITY_MEMBERS = 2
EMAIL_ADDRESS_VISIBILITY_ADMINS = 3
EMAIL_ADDRESS_VISIBILITY_NOBODY = 4
email_address_visibility: int = models.PositiveSmallIntegerField(
default=EMAIL_ADDRESS_VISIBILITY_EVERYONE,
)
EMAIL_ADDRESS_VISIBILITY_TYPES = [
EMAIL_ADDRESS_VISIBILITY_EVERYONE,
# The MEMBERS level is not yet implemented on the backend.
## EMAIL_ADDRESS_VISIBILITY_MEMBERS,
EMAIL_ADDRESS_VISIBILITY_ADMINS,
EMAIL_ADDRESS_VISIBILITY_NOBODY,
]
# Threshold in days for new users to create streams, and potentially take
# some other actions.
waiting_period_threshold: int = models.PositiveIntegerField(default=0)
allow_message_deleting: bool = models.BooleanField(default=False)
DEFAULT_MESSAGE_CONTENT_DELETE_LIMIT_SECONDS = 600 # if changed, also change in admin.js, setting_org.js
message_content_delete_limit_seconds: int = models.IntegerField(
default=DEFAULT_MESSAGE_CONTENT_DELETE_LIMIT_SECONDS,
)
allow_message_editing: bool = models.BooleanField(default=True)
DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS = 600 # if changed, also change in admin.js, setting_org.js
message_content_edit_limit_seconds: int = models.IntegerField(
default=DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS,
)
# Whether users have access to message edit history
allow_edit_history: bool = models.BooleanField(default=True)
DEFAULT_COMMUNITY_TOPIC_EDITING_LIMIT_SECONDS = 86400
allow_community_topic_editing: bool = models.BooleanField(default=True)
# Defaults for new users
default_twenty_four_hour_time: bool = models.BooleanField(default=False)
default_language: str = models.CharField(default='en', max_length=MAX_LANGUAGE_ID_LENGTH)
DEFAULT_NOTIFICATION_STREAM_NAME = 'general'
INITIAL_PRIVATE_STREAM_NAME = 'core team'
STREAM_EVENTS_NOTIFICATION_TOPIC = ugettext_lazy('stream events')
notifications_stream: Optional["Stream"] = models.ForeignKey(
"Stream", related_name="+", null=True, blank=True, on_delete=CASCADE,
)
signup_notifications_stream: Optional["Stream"] = models.ForeignKey(
"Stream", related_name="+", null=True, blank=True, on_delete=CASCADE,
)
MESSAGE_RETENTION_SPECIAL_VALUES_MAP = {
'forever': -1,
}
# For old messages being automatically deleted
message_retention_days: int = models.IntegerField(null=False, default=-1)
# When non-null, all but the latest this many messages in the organization
# are inaccessible to users (but not deleted).
message_visibility_limit: Optional[int] = models.IntegerField(null=True)
# Messages older than this message ID in the organization are inaccessible.
first_visible_message_id: int = models.IntegerField(default=0)
# Valid org_types are {CORPORATE, COMMUNITY}
CORPORATE = 1
COMMUNITY = 2
org_type: int = models.PositiveSmallIntegerField(default=CORPORATE)
UPGRADE_TEXT_STANDARD = ugettext_lazy("Available on Zulip Standard. Upgrade to access.")
# plan_type controls various features around resource/feature
# limitations for a Zulip organization on multi-tenant installations
# like Zulip Cloud.
SELF_HOSTED = 1
LIMITED = 2
STANDARD = 3
STANDARD_FREE = 4
plan_type: int = models.PositiveSmallIntegerField(default=SELF_HOSTED)
# This value is also being used in static/js/settings_bots.bot_creation_policy_values.
# On updating it here, update it there as well.
BOT_CREATION_EVERYONE = 1
BOT_CREATION_LIMIT_GENERIC_BOTS = 2
BOT_CREATION_ADMINS_ONLY = 3
bot_creation_policy: int = models.PositiveSmallIntegerField(default=BOT_CREATION_EVERYONE)
BOT_CREATION_POLICY_TYPES = [
BOT_CREATION_EVERYONE,
BOT_CREATION_LIMIT_GENERIC_BOTS,
BOT_CREATION_ADMINS_ONLY,
]
# See upload_quota_bytes; don't interpret upload_quota_gb directly.
UPLOAD_QUOTA_LIMITED = 5
UPLOAD_QUOTA_STANDARD = 50
upload_quota_gb: Optional[int] = models.IntegerField(null=True)
VIDEO_CHAT_PROVIDERS = {
'disabled': {
'name': "None",
'id': 0,
},
'jitsi_meet': {
'name': "Jitsi Meet",
'id': 1,
},
# ID 2 was used for the now-deleted Google Hangouts.
# ID 3 reserved for optional Zoom, see below.
# ID 4 reserved for optional Big Blue Button, see below.
}
if settings.VIDEO_ZOOM_CLIENT_ID is not None and settings.VIDEO_ZOOM_CLIENT_SECRET is not None:
VIDEO_CHAT_PROVIDERS['zoom'] = {
'name': "Zoom",
'id': 3,
}
if settings.BIG_BLUE_BUTTON_SECRET is not None and settings.BIG_BLUE_BUTTON_URL is not None:
VIDEO_CHAT_PROVIDERS['big_blue_button'] = {
'name': "Big Blue Button",
'id': 4
}
video_chat_provider: int = models.PositiveSmallIntegerField(default=VIDEO_CHAT_PROVIDERS['jitsi_meet']['id'])
default_code_block_language: Optional[str] = models.TextField(null=True, default=None)
# Define the types of the various automatically managed properties
property_types: Dict[str, Union[type, Tuple[type, ...]]] = dict(
add_emoji_by_admins_only=bool,
allow_edit_history=bool,
allow_message_deleting=bool,
bot_creation_policy=int,
create_stream_policy=int,
invite_to_stream_policy=int,
default_language=str,
default_twenty_four_hour_time = bool,
description=str,
digest_emails_enabled=bool,
disallow_disposable_email_addresses=bool,
email_address_visibility=int,
email_changes_disabled=bool,
invite_required=bool,
invite_by_admins_only=bool,
inline_image_preview=bool,
inline_url_embed_preview=bool,
mandatory_topics=bool,
message_retention_days=(int, type(None)),
name=str,
name_changes_disabled=bool,
avatar_changes_disabled=bool,
emails_restricted_to_domains=bool,
send_welcome_emails=bool,
message_content_allowed_in_email_notifications=bool,
video_chat_provider=int,
waiting_period_threshold=int,
digest_weekday=int,
private_message_policy=int,
user_group_edit_policy=int,
default_code_block_language=(str, type(None)),
message_content_delete_limit_seconds=int,
wildcard_mention_policy=int,
)
DIGEST_WEEKDAY_VALUES = [0, 1, 2, 3, 4, 5, 6]
# Icon is the square mobile icon.
ICON_FROM_GRAVATAR = 'G'
ICON_UPLOADED = 'U'
ICON_SOURCES = (
(ICON_FROM_GRAVATAR, 'Hosted by Gravatar'),
(ICON_UPLOADED, 'Uploaded by administrator'),
)
icon_source: str = models.CharField(
default=ICON_FROM_GRAVATAR, choices=ICON_SOURCES, max_length=1,
)
icon_version: int = models.PositiveSmallIntegerField(default=1)
# Logo is the horizontal logo we show in top-left of webapp navbar UI.
LOGO_DEFAULT = 'D'
LOGO_UPLOADED = 'U'
LOGO_SOURCES = (
(LOGO_DEFAULT, 'Default to Zulip'),
(LOGO_UPLOADED, 'Uploaded by administrator'),
)
logo_source: str = models.CharField(
default=LOGO_DEFAULT, choices=LOGO_SOURCES, max_length=1,
)
logo_version: int = models.PositiveSmallIntegerField(default=1)
night_logo_source: str = models.CharField(
default=LOGO_DEFAULT, choices=LOGO_SOURCES, max_length=1,
)
night_logo_version: int = models.PositiveSmallIntegerField(default=1)
def authentication_methods_dict(self) -> Dict[str, bool]:
"""Returns the a mapping from authentication flags to their status,
showing only those authentication flags that are supported on
the current server (i.e. if EmailAuthBackend is not configured
on the server, this will not return an entry for "Email")."""
# This mapping needs to be imported from here due to the cyclic
# dependency.
from zproject.backends import AUTH_BACKEND_NAME_MAP
ret: Dict[str, bool] = {}
supported_backends = [backend.__class__ for backend in supported_auth_backends()]
# `authentication_methods` is a bitfield.types.BitHandler, not
# a true dict; since it is still python2- and python3-compat,
# `iteritems` is its method to iterate over its contents.
for k, v in self.authentication_methods.iteritems():
backend = AUTH_BACKEND_NAME_MAP[k]
if backend in supported_backends:
ret[k] = v
return ret
def __str__(self) -> str:
return f"<Realm: {self.string_id} {self.id}>"
@cache_with_key(get_realm_emoji_cache_key, timeout=3600*24*7)
def get_emoji(self) -> Dict[str, Dict[str, Iterable[str]]]:
return get_realm_emoji_uncached(self)
@cache_with_key(get_active_realm_emoji_cache_key, timeout=3600*24*7)
def get_active_emoji(self) -> Dict[str, Dict[str, Iterable[str]]]:
return get_active_realm_emoji_uncached(self)
def get_admin_users_and_bots(self) -> Sequence['UserProfile']:
"""Use this in contexts where we want administrative users as well as
bots with administrator privileges, like send_event calls for
notifications to all administrator users.
"""
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_active=True,
role__in=[UserProfile.ROLE_REALM_ADMINISTRATOR,
UserProfile.ROLE_REALM_OWNER])
def get_human_admin_users(self) -> QuerySet:
"""Use this in contexts where we want only human users with
administrative privileges, like sending an email to all of a
realm's administrators (bots don't have real email addresses).
"""
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_bot=False, is_active=True,
role__in=[UserProfile.ROLE_REALM_ADMINISTRATOR,
UserProfile.ROLE_REALM_OWNER])
def get_human_billing_admin_users(self) -> Sequence['UserProfile']:
return UserProfile.objects.filter(Q(role=UserProfile.ROLE_REALM_OWNER) | Q(is_billing_admin=True),
realm=self, is_bot=False, is_active=True)
def get_active_users(self) -> Sequence['UserProfile']:
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_active=True).select_related()
def get_human_owner_users(self) -> QuerySet:
return UserProfile.objects.filter(realm=self, is_bot=False,
role=UserProfile.ROLE_REALM_OWNER,
is_active=True)
def get_bot_domain(self) -> str:
return get_fake_email_domain()
def get_notifications_stream(self) -> Optional['Stream']:
if self.notifications_stream is not None and not self.notifications_stream.deactivated:
return self.notifications_stream
return None
def get_signup_notifications_stream(self) -> Optional['Stream']:
if self.signup_notifications_stream is not None and not self.signup_notifications_stream.deactivated:
return self.signup_notifications_stream
return None
@property
def max_invites(self) -> int:
if self._max_invites is None:
return settings.INVITES_DEFAULT_REALM_DAILY_MAX
return self._max_invites
@max_invites.setter
def max_invites(self, value: Optional[int]) -> None:
self._max_invites = value
def upload_quota_bytes(self) -> Optional[int]:
if self.upload_quota_gb is None:
return None
# We describe the quota to users in "GB" or "gigabytes", but actually apply
# it as gibibytes (GiB) to be a bit more generous in case of confusion.
return self.upload_quota_gb << 30
@cache_with_key(get_realm_used_upload_space_cache_key, timeout=3600*24*7)
def currently_used_upload_space_bytes(self) -> int:
used_space = Attachment.objects.filter(realm=self).aggregate(Sum('size'))['size__sum']
if used_space is None:
return 0
return used_space
def ensure_not_on_limited_plan(self) -> None:
if self.plan_type == Realm.LIMITED:
raise JsonableError(self.UPGRADE_TEXT_STANDARD)
@property
def subdomain(self) -> str:
return self.string_id
@property
def display_subdomain(self) -> str:
"""Likely to be temporary function to avoid signup messages being sent
to an empty topic"""
if self.string_id == "":
return "."
return self.string_id
@property
def uri(self) -> str:
return settings.EXTERNAL_URI_SCHEME + self.host
@property
def host(self) -> str:
# Use mark sanitized to prevent false positives from Pysa thinking that
# the host is user controlled.
return mark_sanitized(self.host_for_subdomain(self.subdomain))
@staticmethod
def host_for_subdomain(subdomain: str) -> str:
if subdomain == Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
return settings.EXTERNAL_HOST
default_host = f"{subdomain}.{settings.EXTERNAL_HOST}"
return settings.REALM_HOSTS.get(subdomain, default_host)
@property
def is_zephyr_mirror_realm(self) -> bool:
return self.string_id == "zephyr"
@property
def webathena_enabled(self) -> bool:
return self.is_zephyr_mirror_realm
@property
def presence_disabled(self) -> bool:
return self.is_zephyr_mirror_realm
class Meta:
permissions = (
('administer', "Administer a realm"),
('api_super_user', "Can send messages as other users for mirroring"),
)
def realm_post_delete_handler(sender: Any, **kwargs: Any) -> None:
# This would be better as a functools.partial, but for some reason
# Django doesn't call it even when it's registered as a post_delete handler.
flush_realm(sender, from_deletion=True, **kwargs)
post_save.connect(flush_realm, sender=Realm)
post_delete.connect(realm_post_delete_handler, sender=Realm)
def get_realm(string_id: str) -> Realm:
return Realm.objects.get(string_id=string_id)
def name_changes_disabled(realm: Optional[Realm]) -> bool:
if realm is None:
return settings.NAME_CHANGES_DISABLED
return settings.NAME_CHANGES_DISABLED or realm.name_changes_disabled
def avatar_changes_disabled(realm: Realm) -> bool:
return settings.AVATAR_CHANGES_DISABLED or realm.avatar_changes_disabled
class RealmDomain(models.Model):
"""For an organization with emails_restricted_to_domains enabled, the list of
allowed domains"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
# should always be stored lowercase
domain: str = models.CharField(max_length=80, db_index=True)
allow_subdomains: bool = models.BooleanField(default=False)
class Meta:
unique_together = ("realm", "domain")
# These functions should only be used on email addresses that have
# been validated via django.core.validators.validate_email
#
# Note that we need to use some care, since can you have multiple @-signs; e.g.
# "tabbott@test"@zulip.com
# is valid email address
def email_to_username(email: str) -> str:
return "@".join(email.split("@")[:-1]).lower()
# Returns the raw domain portion of the desired email address
def email_to_domain(email: str) -> str:
return email.split("@")[-1].lower()
class DomainNotAllowedForRealmError(Exception):
pass
class DisposableEmailError(Exception):
pass
class EmailContainsPlusError(Exception):
pass
def get_realm_domains(realm: Realm) -> List[Dict[str, str]]:
return list(realm.realmdomain_set.values('domain', 'allow_subdomains'))
class RealmEmoji(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
author: Optional["UserProfile"] = models.ForeignKey(
"UserProfile", blank=True, null=True, on_delete=CASCADE,
)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
name: str = models.TextField(validators=[
MinLengthValidator(1),
# The second part of the regex (negative lookbehind) disallows names
# ending with one of the punctuation characters.
RegexValidator(regex=r'^[0-9a-z.\-_]+(?<![.\-_])$',
message=ugettext_lazy("Invalid characters in emoji name"))])
# The basename of the custom emoji's filename; see PATH_ID_TEMPLATE for the full path.
file_name: Optional[str] = models.TextField(db_index=True, null=True, blank=True)
deactivated: bool = models.BooleanField(default=False)
PATH_ID_TEMPLATE = "{realm_id}/emoji/images/{emoji_file_name}"
def __str__(self) -> str:
return f"<RealmEmoji({self.realm.string_id}): {self.id} {self.name} {self.deactivated} {self.file_name}>"
def get_realm_emoji_dicts(realm: Realm,
only_active_emojis: bool=False) -> Dict[str, Dict[str, Any]]:
query = RealmEmoji.objects.filter(realm=realm).select_related('author')
if only_active_emojis:
query = query.filter(deactivated=False)
d = {}
from zerver.lib.emoji import get_emoji_url
for realm_emoji in query.all():
author_id = None
if realm_emoji.author:
author_id = realm_emoji.author_id
emoji_url = get_emoji_url(realm_emoji.file_name, realm_emoji.realm_id)
d[str(realm_emoji.id)] = dict(id=str(realm_emoji.id),
name=realm_emoji.name,
source_url=emoji_url,
deactivated=realm_emoji.deactivated,
author_id=author_id)
return d
def get_realm_emoji_uncached(realm: Realm) -> Dict[str, Dict[str, Any]]:
return get_realm_emoji_dicts(realm)
def get_active_realm_emoji_uncached(realm: Realm) -> Dict[str, Dict[str, Any]]:
realm_emojis = get_realm_emoji_dicts(realm, only_active_emojis=True)
d = {}
for emoji_id, emoji_dict in realm_emojis.items():
d[emoji_dict['name']] = emoji_dict
return d
def flush_realm_emoji(sender: Any, **kwargs: Any) -> None:
realm = kwargs['instance'].realm
cache_set(get_realm_emoji_cache_key(realm),
get_realm_emoji_uncached(realm),
timeout=3600*24*7)
cache_set(get_active_realm_emoji_cache_key(realm),
get_active_realm_emoji_uncached(realm),
timeout=3600*24*7)
post_save.connect(flush_realm_emoji, sender=RealmEmoji)
post_delete.connect(flush_realm_emoji, sender=RealmEmoji)
def filter_pattern_validator(value: str) -> None:
regex = re.compile(r'^(?:(?:[\w\-#_= /:]*|[+]|[!])(\(\?P<\w+>.+\)))+$')
error_msg = _('Invalid filter pattern. Valid characters are {}.').format(
'[ a-zA-Z_#=/:+!-]',)
if not regex.match(str(value)):
raise ValidationError(error_msg)
try:
re.compile(value)
except re.error:
# Regex is invalid
raise ValidationError(error_msg)
def filter_format_validator(value: str) -> None:
regex = re.compile(r'^([\.\/:a-zA-Z0-9#_?=&;~-]+%\(([a-zA-Z0-9_-]+)\)s)+[/a-zA-Z0-9#_?=&;~-]*$')
if not regex.match(value):
raise ValidationError(_('Invalid URL format string.'))
class RealmFilter(models.Model):
"""Realm-specific regular expressions to automatically linkify certain
strings inside the Markdown processor. See "Custom filters" in the settings UI.
"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
pattern: str = models.TextField(validators=[filter_pattern_validator])
url_format_string: str = models.TextField(validators=[URLValidator(), filter_format_validator])
class Meta:
unique_together = ("realm", "pattern")
def __str__(self) -> str:
return f"<RealmFilter({self.realm.string_id}): {self.pattern} {self.url_format_string}>"
def get_realm_filters_cache_key(realm_id: int) -> str:
return f'{cache.KEY_PREFIX}:all_realm_filters:{realm_id}'
# We have a per-process cache to avoid doing 1000 remote cache queries during page load
per_request_realm_filters_cache: Dict[int, List[Tuple[str, str, int]]] = {}
def realm_in_local_realm_filters_cache(realm_id: int) -> bool:
return realm_id in per_request_realm_filters_cache
def realm_filters_for_realm(realm_id: int) -> List[Tuple[str, str, int]]:
if not realm_in_local_realm_filters_cache(realm_id):
per_request_realm_filters_cache[realm_id] = realm_filters_for_realm_remote_cache(realm_id)
return per_request_realm_filters_cache[realm_id]
@cache_with_key(get_realm_filters_cache_key, timeout=3600*24*7)
def realm_filters_for_realm_remote_cache(realm_id: int) -> List[Tuple[str, str, int]]:
filters = []
for realm_filter in RealmFilter.objects.filter(realm_id=realm_id):
filters.append((realm_filter.pattern, realm_filter.url_format_string, realm_filter.id))
return filters
def all_realm_filters() -> Dict[int, List[Tuple[str, str, int]]]:
filters: DefaultDict[int, List[Tuple[str, str, int]]] = defaultdict(list)
for realm_filter in RealmFilter.objects.all():
filters[realm_filter.realm_id].append((realm_filter.pattern,
realm_filter.url_format_string,
realm_filter.id))
return filters
def flush_realm_filter(sender: Any, **kwargs: Any) -> None:
realm_id = kwargs['instance'].realm_id
cache_delete(get_realm_filters_cache_key(realm_id))
try:
per_request_realm_filters_cache.pop(realm_id)
except KeyError:
pass
post_save.connect(flush_realm_filter, sender=RealmFilter)
post_delete.connect(flush_realm_filter, sender=RealmFilter)
def flush_per_request_caches() -> None:
global per_request_display_recipient_cache
per_request_display_recipient_cache = {}
global per_request_realm_filters_cache
per_request_realm_filters_cache = {}
# The Recipient table is used to map Messages to the set of users who
# received the message. It is implemented as a set of triples (id,
# type_id, type). We have 3 types of recipients: Huddles (for group
# private messages), UserProfiles (for 1:1 private messages), and
# Streams. The recipient table maps a globally unique recipient id
# (used by the Message table) to the type-specific unique id (the
# stream id, user_profile id, or huddle id).
class Recipient(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
type_id: int = models.IntegerField(db_index=True)
type: int = models.PositiveSmallIntegerField(db_index=True)
# Valid types are {personal, stream, huddle}
PERSONAL = 1
STREAM = 2
HUDDLE = 3
class Meta:
unique_together = ("type", "type_id")
# N.B. If we used Django's choice=... we would get this for free (kinda)
_type_names = {
PERSONAL: 'personal',
STREAM: 'stream',
HUDDLE: 'huddle'}
def type_name(self) -> str:
# Raises KeyError if invalid
return self._type_names[self.type]
def __str__(self) -> str:
display_recipient = get_display_recipient(self)
return f"<Recipient: {display_recipient} ({self.type_id}, {self.type})>"
class UserProfile(AbstractBaseUser, PermissionsMixin):
USERNAME_FIELD = 'email'
MAX_NAME_LENGTH = 100
MIN_NAME_LENGTH = 2
API_KEY_LENGTH = 32
NAME_INVALID_CHARS = ['*', '`', "\\", '>', '"', '@']
DEFAULT_BOT = 1
"""
Incoming webhook bots are limited to only sending messages via webhooks.
Thus, it is less of a security risk to expose their API keys to third-party services,
since they can't be used to read messages.
"""
INCOMING_WEBHOOK_BOT = 2
# This value is also being used in static/js/settings_bots.js.
# On updating it here, update it there as well.
OUTGOING_WEBHOOK_BOT = 3
"""
Embedded bots run within the Zulip server itself; events are added to the
embedded_bots queue and then handled by a QueueProcessingWorker.
"""
EMBEDDED_BOT = 4
BOT_TYPES = {
DEFAULT_BOT: 'Generic bot',
INCOMING_WEBHOOK_BOT: 'Incoming webhook',
OUTGOING_WEBHOOK_BOT: 'Outgoing webhook',
EMBEDDED_BOT: 'Embedded bot',
}
SERVICE_BOT_TYPES = [
OUTGOING_WEBHOOK_BOT,
EMBEDDED_BOT,
]
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
# For historical reasons, Zulip has two email fields. The
# `delivery_email` field is the user's email address, where all
# email notifications will be sent, and is used for all
# authentication use cases.
#
# The `email` field is the same as delivery_email in organizations
# with EMAIL_ADDRESS_VISIBILITY_EVERYONE. For other
# organizations, it will be a unique value of the form
# user1234@example.com. This field exists for backwards
# compatibility in Zulip APIs where users are referred to by their
# email address, not their ID; it should be used in all API use cases.
#
# Both fields are unique within a realm (in a case-insensitive fashion).
delivery_email: str = models.EmailField(blank=False, db_index=True)
email: str = models.EmailField(blank=False, db_index=True)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
# Foreign key to the Recipient object for PERSONAL type messages to this user.
recipient = models.ForeignKey(Recipient, null=True, on_delete=models.SET_NULL)
# The user's name. We prefer the model of a full_name
# over first+last because cultures vary on how many
# names one has, whether the family name is first or last, etc.
# It also allows organizations to encode a bit of non-name data in
# the "name" attribute if desired, like gender pronouns,
# graduation year, etc.
full_name: str = models.CharField(max_length=MAX_NAME_LENGTH)
date_joined: datetime.datetime = models.DateTimeField(default=timezone_now)
tos_version: Optional[str] = models.CharField(null=True, max_length=10)
api_key: str = models.CharField(max_length=API_KEY_LENGTH)
# Whether the user has access to server-level administrator pages, like /activity
is_staff: bool = models.BooleanField(default=False)
# For a normal user, this is True unless the user or an admin has
# deactivated their account. The name comes from Django; this field
# isn't related to presence or to whether the user has recently used Zulip.
#
# See also `long_term_idle`.
is_active: bool = models.BooleanField(default=True, db_index=True)
is_billing_admin: bool = models.BooleanField(default=False, db_index=True)
is_bot: bool = models.BooleanField(default=False, db_index=True)
bot_type: Optional[int] = models.PositiveSmallIntegerField(null=True, db_index=True)
bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
# Each role has a superset of the permissions of the next higher
# numbered role. When adding new roles, leave enough space for
# future roles to be inserted between currently adjacent
# roles. These constants appear in RealmAuditLog.extra_data, so
# changes to them will require a migration of RealmAuditLog.
ROLE_REALM_OWNER = 100
ROLE_REALM_ADMINISTRATOR = 200
# ROLE_MODERATOR = 300
ROLE_MEMBER = 400
ROLE_GUEST = 600
role: int = models.PositiveSmallIntegerField(default=ROLE_MEMBER, db_index=True)
ROLE_TYPES = [
ROLE_REALM_OWNER,
ROLE_REALM_ADMINISTRATOR,
ROLE_MEMBER,
ROLE_GUEST,
]
# Whether the user has been "soft-deactivated" due to weeks of inactivity.
# For these users we avoid doing UserMessage table work, as an optimization
# for large Zulip organizations with lots of single-visit users.
long_term_idle: bool = models.BooleanField(default=False, db_index=True)
# When we last added basic UserMessage rows for a long_term_idle user.
last_active_message_id: Optional[int] = models.IntegerField(null=True)
# Mirror dummies are fake (!is_active) users used to provide
# message senders in our cross-protocol Zephyr<->Zulip content
# mirroring integration, so that we can display mirrored content
# like native Zulip messages (with a name + avatar, etc.).
is_mirror_dummy: bool = models.BooleanField(default=False)
# API super users are allowed to forge messages as sent by another
# user and to send to private streams; also used for Zephyr/Jabber mirroring.
is_api_super_user: bool = models.BooleanField(default=False, db_index=True)
### Notifications settings. ###
# Stream notifications.
enable_stream_desktop_notifications: bool = models.BooleanField(default=False)
enable_stream_email_notifications: bool = models.BooleanField(default=False)
enable_stream_push_notifications: bool = models.BooleanField(default=False)
enable_stream_audible_notifications: bool = models.BooleanField(default=False)
notification_sound: str = models.CharField(max_length=20, default='zulip')
wildcard_mentions_notify: bool = models.BooleanField(default=True)
# PM + @-mention notifications.
enable_desktop_notifications: bool = models.BooleanField(default=True)
pm_content_in_desktop_notifications: bool = models.BooleanField(default=True)
enable_sounds: bool = models.BooleanField(default=True)
enable_offline_email_notifications: bool = models.BooleanField(default=True)
message_content_in_email_notifications: bool = models.BooleanField(default=True)
enable_offline_push_notifications: bool = models.BooleanField(default=True)
enable_online_push_notifications: bool = models.BooleanField(default=True)
DESKTOP_ICON_COUNT_DISPLAY_MESSAGES = 1
DESKTOP_ICON_COUNT_DISPLAY_NOTIFIABLE = 2
DESKTOP_ICON_COUNT_DISPLAY_NONE = 3
desktop_icon_count_display: int = models.PositiveSmallIntegerField(
default=DESKTOP_ICON_COUNT_DISPLAY_MESSAGES)
enable_digest_emails: bool = models.BooleanField(default=True)
enable_login_emails: bool = models.BooleanField(default=True)
realm_name_in_notifications: bool = models.BooleanField(default=False)
presence_enabled: bool = models.BooleanField(default=True)
# Used for rate-limiting certain automated messages generated by bots
last_reminder: Optional[datetime.datetime] = models.DateTimeField(default=None, null=True)
# Minutes to wait before warning a bot owner that their bot sent a message
# to a nonexistent stream
BOT_OWNER_STREAM_ALERT_WAITPERIOD = 1
# API rate limits, formatted as a comma-separated list of range:max pairs
rate_limits: str = models.CharField(default="", max_length=100)
# Hours to wait before sending another email to a user
EMAIL_REMINDER_WAITPERIOD = 24
# Default streams for some deprecated/legacy classes of bot users.
default_sending_stream: Optional["Stream"] = models.ForeignKey(
"zerver.Stream", null=True, related_name="+", on_delete=CASCADE,
)
default_events_register_stream: Optional["Stream"] = models.ForeignKey(
"zerver.Stream", null=True, related_name="+", on_delete=CASCADE,
)
default_all_public_streams: bool = models.BooleanField(default=False)
# UI vars
enter_sends: Optional[bool] = models.BooleanField(null=True, default=False)
left_side_userlist: bool = models.BooleanField(default=False)
# display settings
default_language: str = models.CharField(default='en', max_length=MAX_LANGUAGE_ID_LENGTH)
dense_mode: bool = models.BooleanField(default=True)
fluid_layout_width: bool = models.BooleanField(default=False)
high_contrast_mode: bool = models.BooleanField(default=False)
translate_emoticons: bool = models.BooleanField(default=False)
twenty_four_hour_time: bool = models.BooleanField(default=False)
starred_message_counts: bool = models.BooleanField(default=False)
COLOR_SCHEME_AUTOMATIC = 1
COLOR_SCHEME_NIGHT = 2
COLOR_SCHEME_LIGHT = 3
COLOR_SCHEME_CHOICES = [
COLOR_SCHEME_AUTOMATIC,
COLOR_SCHEME_NIGHT,
COLOR_SCHEME_LIGHT
]
color_scheme: int = models.PositiveSmallIntegerField(default=COLOR_SCHEME_AUTOMATIC)
# UI setting controlling Zulip's behavior of demoting in the sort
# order and graying out streams with no recent traffic. The
# default behavior, automatic, enables this behavior once a user
# is subscribed to 30+ streams in the webapp.
DEMOTE_STREAMS_AUTOMATIC = 1
DEMOTE_STREAMS_ALWAYS = 2
DEMOTE_STREAMS_NEVER = 3
DEMOTE_STREAMS_CHOICES = [
DEMOTE_STREAMS_AUTOMATIC,
DEMOTE_STREAMS_ALWAYS,
DEMOTE_STREAMS_NEVER,
]
demote_inactive_streams: int = models.PositiveSmallIntegerField(default=DEMOTE_STREAMS_AUTOMATIC)
# A timezone name from the `tzdata` database, as found in pytz.all_timezones.
#
# The longest existing name is 32 characters long, so max_length=40 seems
# like a safe choice.
#
# In Django, the convention is to use an empty string instead of NULL/None
# for text-based fields. For more information, see
# https://docs.djangoproject.com/en/1.10/ref/models/fields/#django.db.models.Field.null.
timezone: str = models.CharField(max_length=40, default='')
# Emojisets
GOOGLE_EMOJISET = 'google'
GOOGLE_BLOB_EMOJISET = 'google-blob'
TEXT_EMOJISET = 'text'
TWITTER_EMOJISET = 'twitter'
EMOJISET_CHOICES = ((GOOGLE_EMOJISET, "Google modern"),
(GOOGLE_BLOB_EMOJISET, "Google classic"),
(TWITTER_EMOJISET, "Twitter"),
(TEXT_EMOJISET, "Plain text"))
emojiset: str = models.CharField(default=GOOGLE_BLOB_EMOJISET, choices=EMOJISET_CHOICES, max_length=20)
AVATAR_FROM_GRAVATAR = 'G'
AVATAR_FROM_USER = 'U'
AVATAR_SOURCES = (
(AVATAR_FROM_GRAVATAR, 'Hosted by Gravatar'),
(AVATAR_FROM_USER, 'Uploaded by user'),
)
avatar_source: str = models.CharField(default=AVATAR_FROM_GRAVATAR, choices=AVATAR_SOURCES, max_length=1)
avatar_version: int = models.PositiveSmallIntegerField(default=1)
avatar_hash: Optional[str] = models.CharField(null=True, max_length=64)
TUTORIAL_WAITING = 'W'
TUTORIAL_STARTED = 'S'
TUTORIAL_FINISHED = 'F'
TUTORIAL_STATES = ((TUTORIAL_WAITING, "Waiting"),
(TUTORIAL_STARTED, "Started"),
(TUTORIAL_FINISHED, "Finished"))
tutorial_status: str = models.CharField(default=TUTORIAL_WAITING, choices=TUTORIAL_STATES, max_length=1)
# Contains serialized JSON of the form:
# [("step 1", true), ("step 2", false)]
# where the second element of each tuple is if the step has been
# completed.
onboarding_steps: str = models.TextField(default='[]')
zoom_token: Optional[object] = JSONField(default=None, null=True)
objects: UserManager = UserManager()
# Define the types of the various automatically managed properties
property_types = dict(
color_scheme=int,
default_language=str,
demote_inactive_streams=int,
dense_mode=bool,
emojiset=str,
fluid_layout_width=bool,
high_contrast_mode=bool,
left_side_userlist=bool,
starred_message_counts=bool,
timezone=str,
translate_emoticons=bool,
twenty_four_hour_time=bool,
)
notification_setting_types = dict(
enable_desktop_notifications=bool,
enable_digest_emails=bool,
enable_login_emails=bool,
enable_offline_email_notifications=bool,
enable_offline_push_notifications=bool,
enable_online_push_notifications=bool,
enable_sounds=bool,
enable_stream_desktop_notifications=bool,
enable_stream_email_notifications=bool,
enable_stream_push_notifications=bool,
enable_stream_audible_notifications=bool,
wildcard_mentions_notify=bool,
message_content_in_email_notifications=bool,
notification_sound=str,
pm_content_in_desktop_notifications=bool,
desktop_icon_count_display=int,
realm_name_in_notifications=bool,
presence_enabled=bool,
)
ROLE_ID_TO_NAME_MAP = {
ROLE_REALM_OWNER: ugettext_lazy("Organization owner"),
ROLE_REALM_ADMINISTRATOR: ugettext_lazy("Organization administrator"),
ROLE_MEMBER: ugettext_lazy("Member"),
ROLE_GUEST: ugettext_lazy("Guest"),
}
def get_role_name(self) -> str:
return self.ROLE_ID_TO_NAME_MAP[self.role]
@property
def profile_data(self) -> ProfileData:
values = CustomProfileFieldValue.objects.filter(user_profile=self)
user_data = {v.field_id: {"value": v.value, "rendered_value": v.rendered_value} for v in values}
data: ProfileData = []
for field in custom_profile_fields_for_realm(self.realm_id):
field_values = user_data.get(field.id, None)
if field_values:
value, rendered_value = field_values.get("value"), field_values.get("rendered_value")
else:
value, rendered_value = None, None
field_type = field.field_type
if value is not None:
converter = field.FIELD_CONVERTERS[field_type]
value = converter(value)
field_data = field.as_dict()
data.append({
'id': field_data['id'],
'name': field_data['name'],
'type': field_data['type'],
'hint': field_data['hint'],
'field_data': field_data['field_data'],
'order': field_data['order'],
'value': value,
'rendered_value': rendered_value,
})
return data
def can_admin_user(self, target_user: 'UserProfile') -> bool:
"""Returns whether this user has permission to modify target_user"""
if target_user.bot_owner == self:
return True
elif self.is_realm_admin and self.realm == target_user.realm:
return True
else:
return False
def __str__(self) -> str:
return f"<UserProfile: {self.email} {self.realm}>"
@property
def is_new_member(self) -> bool:
diff = (timezone_now() - self.date_joined).days
if diff < self.realm.waiting_period_threshold:
return True
return False
@property
def is_realm_admin(self) -> bool:
return self.role == UserProfile.ROLE_REALM_ADMINISTRATOR or \
self.role == UserProfile.ROLE_REALM_OWNER
@is_realm_admin.setter
def is_realm_admin(self, value: bool) -> None:
if value:
self.role = UserProfile.ROLE_REALM_ADMINISTRATOR
elif self.role == UserProfile.ROLE_REALM_ADMINISTRATOR:
# We need to be careful to not accidentally change
# ROLE_GUEST to ROLE_MEMBER here.
self.role = UserProfile.ROLE_MEMBER
@property
def has_billing_access(self) -> bool:
return self.is_realm_owner or self.is_billing_admin
@property
def is_realm_owner(self) -> bool:
return self.role == UserProfile.ROLE_REALM_OWNER
@property
def is_guest(self) -> bool:
return self.role == UserProfile.ROLE_GUEST
@is_guest.setter
def is_guest(self, value: bool) -> None:
if value:
self.role = UserProfile.ROLE_GUEST
elif self.role == UserProfile.ROLE_GUEST:
# We need to be careful to not accidentally change
# ROLE_REALM_ADMINISTRATOR to ROLE_MEMBER here.
self.role = UserProfile.ROLE_MEMBER
@property
def is_incoming_webhook(self) -> bool:
return self.bot_type == UserProfile.INCOMING_WEBHOOK_BOT
@property
def allowed_bot_types(self) -> List[int]:
allowed_bot_types = []
if self.is_realm_admin or \
not self.realm.bot_creation_policy == Realm.BOT_CREATION_LIMIT_GENERIC_BOTS:
allowed_bot_types.append(UserProfile.DEFAULT_BOT)
allowed_bot_types += [
UserProfile.INCOMING_WEBHOOK_BOT,
UserProfile.OUTGOING_WEBHOOK_BOT,
]
if settings.EMBEDDED_BOTS_ENABLED:
allowed_bot_types.append(UserProfile.EMBEDDED_BOT)
return allowed_bot_types
@staticmethod
def emojiset_choices() -> List[Dict[str, str]]:
return [dict(key=emojiset[0], text=emojiset[1]) for emojiset in UserProfile.EMOJISET_CHOICES]
@staticmethod
def emails_from_ids(user_ids: Sequence[int]) -> Dict[int, str]:
rows = UserProfile.objects.filter(id__in=user_ids).values('id', 'email')
return {row['id']: row['email'] for row in rows}
def email_address_is_realm_public(self) -> bool:
if self.realm.email_address_visibility == Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE:
return True
if self.is_bot:
return True
return False
def has_permission(self, policy_name: str) -> bool:
if policy_name not in ['create_stream_policy', 'invite_to_stream_policy']:
raise AssertionError("Invalid policy")
if self.is_realm_admin:
return True
policy_value = getattr(self.realm, policy_name)
if policy_value == Realm.POLICY_ADMINS_ONLY:
return False
if self.is_guest:
return False
if policy_value == Realm.POLICY_MEMBERS_ONLY:
return True
return not self.is_new_member
def can_create_streams(self) -> bool:
return self.has_permission('create_stream_policy')
def can_subscribe_other_users(self) -> bool:
return self.has_permission('invite_to_stream_policy')
def can_access_public_streams(self) -> bool:
return not (self.is_guest or self.realm.is_zephyr_mirror_realm)
def can_access_all_realm_members(self) -> bool:
return not (self.realm.is_zephyr_mirror_realm or self.is_guest)
def major_tos_version(self) -> int:
if self.tos_version is not None:
return int(self.tos_version.split('.')[0])
else:
return -1
def format_requestor_for_logs(self) -> str:
return "{}@{}".format(self.id, self.realm.string_id or 'root')
def set_password(self, password: Optional[str]) -> None:
if password is None:
self.set_unusable_password()
return
from zproject.backends import check_password_strength
if not check_password_strength(password):
raise PasswordTooWeakError
super().set_password(password)
class PasswordTooWeakError(Exception):
pass
class UserGroup(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
name: str = models.CharField(max_length=100)
members: Manager = models.ManyToManyField(UserProfile, through='UserGroupMembership')
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
description: str = models.TextField(default='')
class Meta:
unique_together = (('realm', 'name'),)
class UserGroupMembership(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user_group: UserGroup = models.ForeignKey(UserGroup, on_delete=CASCADE)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
class Meta:
unique_together = (('user_group', 'user_profile'),)
def receives_offline_push_notifications(user_profile: UserProfile) -> bool:
return (user_profile.enable_offline_push_notifications and
not user_profile.is_bot)
def receives_offline_email_notifications(user_profile: UserProfile) -> bool:
return (user_profile.enable_offline_email_notifications and
not user_profile.is_bot)
def receives_online_notifications(user_profile: UserProfile) -> bool:
return (user_profile.enable_online_push_notifications and
not user_profile.is_bot)
def receives_stream_notifications(user_profile: UserProfile) -> bool:
return (user_profile.enable_stream_push_notifications and
not user_profile.is_bot)
def remote_user_to_email(remote_user: str) -> str:
if settings.SSO_APPEND_DOMAIN is not None:
remote_user += "@" + settings.SSO_APPEND_DOMAIN
return remote_user
# Make sure we flush the UserProfile object from our remote cache
# whenever we save it.
post_save.connect(flush_user_profile, sender=UserProfile)
class PreregistrationUser(models.Model):
# Data on a partially created user, before the completion of
# registration. This is used in at least three major code paths:
# * Realm creation, in which case realm is None.
#
# * Invitations, in which case referred_by will always be set.
#
# * Social authentication signup, where it's used to store data
# from the authentication step and pass it to the registration
# form.
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
email: str = models.EmailField()
# If the pre-registration process provides a suggested full name for this user,
# store it here to use it to prepopulate the Full Name field in the registration form:
full_name: Optional[str] = models.CharField(max_length=UserProfile.MAX_NAME_LENGTH, null=True)
full_name_validated: bool = models.BooleanField(default=False)
referred_by: Optional[UserProfile] = models.ForeignKey(UserProfile, null=True, on_delete=CASCADE)
streams: Manager = models.ManyToManyField('Stream')
invited_at: datetime.datetime = models.DateTimeField(auto_now=True)
realm_creation: bool = models.BooleanField(default=False)
# Indicates whether the user needs a password. Users who were
# created via SSO style auth (e.g. GitHub/Google) generally do not.
password_required: bool = models.BooleanField(default=True)
# status: whether an object has been confirmed.
# if confirmed, set to confirmation.settings.STATUS_ACTIVE
status: int = models.IntegerField(default=0)
# The realm should only ever be None for PreregistrationUser
# objects created as part of realm creation.
realm: Optional[Realm] = models.ForeignKey(Realm, null=True, on_delete=CASCADE)
# Changes to INVITED_AS should also be reflected in
# settings_invites.invited_as_values in
# static/js/settings_invites.js
INVITE_AS = dict(
REALM_OWNER = 100,
REALM_ADMIN = 200,
MEMBER = 400,
GUEST_USER = 600,
)
invited_as: int = models.PositiveSmallIntegerField(default=INVITE_AS['MEMBER'])
def filter_to_valid_prereg_users(query: QuerySet) -> QuerySet:
days_to_activate = settings.INVITATION_LINK_VALIDITY_DAYS
active_value = confirmation_settings.STATUS_ACTIVE
revoked_value = confirmation_settings.STATUS_REVOKED
lowest_datetime = timezone_now() - datetime.timedelta(days=days_to_activate)
return query.exclude(status__in=[active_value, revoked_value]).filter(
invited_at__gte=lowest_datetime)
class MultiuseInvite(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
referred_by: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
streams: Manager = models.ManyToManyField('Stream')
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
invited_as: int = models.PositiveSmallIntegerField(default=PreregistrationUser.INVITE_AS['MEMBER'])
class EmailChangeStatus(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
new_email: str = models.EmailField()
old_email: str = models.EmailField()
updated_at: datetime.datetime = models.DateTimeField(auto_now=True)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# status: whether an object has been confirmed.
# if confirmed, set to confirmation.settings.STATUS_ACTIVE
status: int = models.IntegerField(default=0)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
class AbstractPushDeviceToken(models.Model):
APNS = 1
GCM = 2
KINDS = (
(APNS, 'apns'),
(GCM, 'gcm'),
)
kind: int = models.PositiveSmallIntegerField(choices=KINDS)
# The token is a unique device-specific token that is
# sent to us from each device:
# - APNS token if kind == APNS
# - GCM registration id if kind == GCM
token: str = models.CharField(max_length=4096, db_index=True)
# TODO: last_updated should be renamed date_created, since it is
# no longer maintained as a last_updated value.
last_updated: datetime.datetime = models.DateTimeField(auto_now=True)
# [optional] Contains the app id of the device if it is an iOS device
ios_app_id: Optional[str] = models.TextField(null=True)
class Meta:
abstract = True
class PushDeviceToken(AbstractPushDeviceToken):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
# The user whose device this is
user: UserProfile = models.ForeignKey(UserProfile, db_index=True, on_delete=CASCADE)
class Meta:
unique_together = ("user", "kind", "token")
def generate_email_token_for_stream() -> str:
return secrets.token_hex(16)
class Stream(models.Model):
MAX_NAME_LENGTH = 60
MAX_DESCRIPTION_LENGTH = 1024
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
name: str = models.CharField(max_length=MAX_NAME_LENGTH, db_index=True)
realm: Realm = models.ForeignKey(Realm, db_index=True, on_delete=CASCADE)
date_created: datetime.datetime = models.DateTimeField(default=timezone_now)
deactivated: bool = models.BooleanField(default=False)
description: str = models.CharField(max_length=MAX_DESCRIPTION_LENGTH, default='')
rendered_description: str = models.TextField(default='')
# Foreign key to the Recipient object for STREAM type messages to this stream.
recipient = models.ForeignKey(Recipient, null=True, on_delete=models.SET_NULL)
invite_only: Optional[bool] = models.BooleanField(null=True, default=False)
history_public_to_subscribers: bool = models.BooleanField(default=False)
# Whether this stream's content should be published by the web-public archive features
is_web_public: bool = models.BooleanField(default=False)
STREAM_POST_POLICY_EVERYONE = 1
STREAM_POST_POLICY_ADMINS = 2
STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS = 3
# TODO: Implement policy to restrict posting to a user group or admins.
# Who in the organization has permission to send messages to this stream.
stream_post_policy: int = models.PositiveSmallIntegerField(default=STREAM_POST_POLICY_EVERYONE)
STREAM_POST_POLICY_TYPES = [
STREAM_POST_POLICY_EVERYONE,
STREAM_POST_POLICY_ADMINS,
STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS,
]
# The unique thing about Zephyr public streams is that we never list their
# users. We may try to generalize this concept later, but for now
# we just use a concrete field. (Zephyr public streams aren't exactly like
# invite-only streams--while both are private in terms of listing users,
# for Zephyr we don't even list users to stream members, yet membership
# is more public in the sense that you don't need a Zulip invite to join.
# This field is populated directly from UserProfile.is_zephyr_mirror_realm,
# and the reason for denormalizing field is performance.
is_in_zephyr_realm: bool = models.BooleanField(default=False)
# Used by the e-mail forwarder. The e-mail RFC specifies a maximum
# e-mail length of 254, and our max stream length is 30, so we
# have plenty of room for the token.
email_token: str = models.CharField(
max_length=32, default=generate_email_token_for_stream, unique=True,
)
# For old messages being automatically deleted.
# Value NULL means "use retention policy of the realm".
# Value -1 means "disable retention policy for this stream unconditionally".
# Non-negative values have the natural meaning of "archive messages older than <value> days".
MESSAGE_RETENTION_SPECIAL_VALUES_MAP = {
'forever': -1,
'realm_default': None,
}
message_retention_days: Optional[int] = models.IntegerField(null=True, default=None)
# The very first message ID in the stream. Used to help clients
# determine whether they might need to display "more topics" for a
# stream based on what messages they have cached.
first_message_id: Optional[int] = models.IntegerField(null=True, db_index=True)
def __str__(self) -> str:
return f"<Stream: {self.name}>"
def is_public(self) -> bool:
# All streams are private in Zephyr mirroring realms.
return not self.invite_only and not self.is_in_zephyr_realm
def is_history_realm_public(self) -> bool:
return self.is_public()
def is_history_public_to_subscribers(self) -> bool:
return self.history_public_to_subscribers
# Stream fields included whenever a Stream object is provided to
# Zulip clients via the API. A few details worth noting:
# * "id" is represented as "stream_id" in most API interfaces.
# * "email_token" is not realm-public and thus is not included here.
# * is_in_zephyr_realm is a backend-only optimization.
# * "deactivated" streams are filtered from the API entirely.
# * "realm" and "recipient" are not exposed to clients via the API.
API_FIELDS = [
"name",
"id",
"description",
"rendered_description",
"invite_only",
"is_web_public",
"stream_post_policy",
"history_public_to_subscribers",
"first_message_id",
"message_retention_days",
"date_created",
]
@staticmethod
def get_client_data(query: QuerySet) -> List[Dict[str, Any]]:
query = query.only(*Stream.API_FIELDS)
return [row.to_dict() for row in query]
def to_dict(self) -> Dict[str, Any]:
result = {}
for field_name in self.API_FIELDS:
if field_name == "id":
result['stream_id'] = self.id
continue
elif field_name == "date_created":
result['date_created'] = datetime_to_timestamp(self.date_created)
continue
result[field_name] = getattr(self, field_name)
result['is_announcement_only'] = self.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS
return result
post_save.connect(flush_stream, sender=Stream)
post_delete.connect(flush_stream, sender=Stream)
class MutedTopic(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
stream: Stream = models.ForeignKey(Stream, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
topic_name: str = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH)
# The default value for date_muted is a few weeks before tracking
# of when topics were muted was first introduced. It's designed
# to be obviously incorrect so that users can tell it's backfilled data.
date_muted: datetime.datetime = models.DateTimeField(default=datetime.datetime(2020, 1, 1, 0, 0, tzinfo=datetime.timezone.utc))
class Meta:
unique_together = ('user_profile', 'stream', 'topic_name')
def __str__(self) -> str:
return (f"<MutedTopic: ({self.user_profile.email}, {self.stream.name}, {self.topic_name}, {self.date_muted})>")
class Client(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
name: str = models.CharField(max_length=30, db_index=True, unique=True)
def __str__(self) -> str:
return f"<Client: {self.name}>"
get_client_cache: Dict[str, Client] = {}
def get_client(name: str) -> Client:
# Accessing KEY_PREFIX through the module is necessary
# because we need the updated value of the variable.
cache_name = cache.KEY_PREFIX + name
if cache_name not in get_client_cache:
result = get_client_remote_cache(name)
get_client_cache[cache_name] = result
return get_client_cache[cache_name]
def get_client_cache_key(name: str) -> str:
return f'get_client:{make_safe_digest(name)}'
@cache_with_key(get_client_cache_key, timeout=3600*24*7)
def get_client_remote_cache(name: str) -> Client:
(client, _) = Client.objects.get_or_create(name=name)
return client
@cache_with_key(get_stream_cache_key, timeout=3600*24*7)
def get_realm_stream(stream_name: str, realm_id: int) -> Stream:
return Stream.objects.select_related().get(
name__iexact=stream_name.strip(), realm_id=realm_id)
def stream_name_in_use(stream_name: str, realm_id: int) -> bool:
return Stream.objects.filter(
name__iexact=stream_name.strip(),
realm_id=realm_id,
).exists()
def get_active_streams(realm: Optional[Realm]) -> QuerySet:
# TODO: Change return type to QuerySet[Stream]
# NOTE: Return value is used as a QuerySet, so cannot currently be Sequence[QuerySet]
"""
Return all streams (including invite-only streams) that have not been deactivated.
"""
return Stream.objects.filter(realm=realm, deactivated=False)
def get_stream(stream_name: str, realm: Realm) -> Stream:
'''
Callers that don't have a Realm object already available should use
get_realm_stream directly, to avoid unnecessarily fetching the
Realm object.
'''
return get_realm_stream(stream_name, realm.id)
def get_stream_by_id_in_realm(stream_id: int, realm: Realm) -> Stream:
return Stream.objects.select_related().get(id=stream_id, realm=realm)
def bulk_get_streams(realm: Realm, stream_names: STREAM_NAMES) -> Dict[str, Any]:
def fetch_streams_by_name(stream_names: List[str]) -> Sequence[Stream]:
#
# This should be just
#
# Stream.objects.select_related().filter(name__iexact__in=stream_names,
# realm_id=realm_id)
#
# But chaining __in and __iexact doesn't work with Django's
# ORM, so we have the following hack to construct the relevant where clause
where_clause = "upper(zerver_stream.name::text) IN (SELECT upper(name) FROM unnest(%s) AS name)"
return get_active_streams(realm).select_related().extra(
where=[where_clause],
params=(list(stream_names),))
def stream_name_to_cache_key(stream_name: str) -> str:
return get_stream_cache_key(stream_name, realm.id)
def stream_to_lower_name(stream: Stream) -> str:
return stream.name.lower()
return bulk_cached_fetch(
stream_name_to_cache_key,
fetch_streams_by_name,
[stream_name.lower() for stream_name in stream_names],
id_fetcher=stream_to_lower_name,
)
def get_huddle_recipient(user_profile_ids: Set[int]) -> Recipient:
# The caller should ensure that user_profile_ids includes
# the sender. Note that get_huddle hits the cache, and then
# we hit another cache to get the recipient. We may want to
# unify our caching strategy here.
huddle = get_huddle(list(user_profile_ids))
return huddle.recipient
def get_huddle_user_ids(recipient: Recipient) -> List[int]:
assert(recipient.type == Recipient.HUDDLE)
return Subscription.objects.filter(
recipient=recipient,
).order_by('user_profile_id').values_list('user_profile_id', flat=True)
def bulk_get_huddle_user_ids(recipients: List[Recipient]) -> Dict[int, List[int]]:
"""
Takes a list of huddle-type recipients, returns a dict
mapping recipient id to list of user ids in the huddle.
"""
assert all(recipient.type == Recipient.HUDDLE for recipient in recipients)
if not recipients:
return {}
subscriptions = Subscription.objects.filter(
recipient__in=recipients,
).order_by('user_profile_id')
result_dict: Dict[int, List[int]] = {}
for recipient in recipients:
result_dict[recipient.id] = [subscription.user_profile_id
for subscription in subscriptions
if subscription.recipient_id == recipient.id]
return result_dict
class AbstractMessage(models.Model):
sender: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
# The message's topic.
#
# Early versions of Zulip called this concept a "subject", as in an email
# "subject line", before changing to "topic" in 2013 (commit dac5a46fa).
# UI and user documentation now consistently say "topic". New APIs and
# new code should generally also say "topic".
#
# See also the `topic_name` method on `Message`.
subject: str = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH, db_index=True)
content: str = models.TextField()
rendered_content: Optional[str] = models.TextField(null=True)
rendered_content_version: Optional[int] = models.IntegerField(null=True)
date_sent: datetime.datetime = models.DateTimeField('date sent', db_index=True)
sending_client: Client = models.ForeignKey(Client, on_delete=CASCADE)
last_edit_time: Optional[datetime.datetime] = models.DateTimeField(null=True)
# A JSON-encoded list of objects describing any past edits to this
# message, oldest first.
edit_history: Optional[str] = models.TextField(null=True)
has_attachment: bool = models.BooleanField(default=False, db_index=True)
has_image: bool = models.BooleanField(default=False, db_index=True)
has_link: bool = models.BooleanField(default=False, db_index=True)
class Meta:
abstract = True
def __str__(self) -> str:
display_recipient = get_display_recipient(self.recipient)
return f"<{self.__class__.__name__}: {display_recipient} / {self.subject} / {self.sender}>"
class ArchiveTransaction(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
timestamp: datetime.datetime = models.DateTimeField(default=timezone_now, db_index=True)
# Marks if the data archived in this transaction has been restored:
restored: bool = models.BooleanField(default=False, db_index=True)
type: int = models.PositiveSmallIntegerField(db_index=True)
# Valid types:
RETENTION_POLICY_BASED = 1 # Archiving was executed due to automated retention policies
MANUAL = 2 # Archiving was run manually, via move_messages_to_archive function
# ForeignKey to the realm with which objects archived in this transaction are associated.
# If type is set to MANUAL, this should be null.
realm: Optional[Realm] = models.ForeignKey(Realm, null=True, on_delete=CASCADE)
def __str__(self) -> str:
return "ArchiveTransaction id: {id}, type: {type}, realm: {realm}, timestamp: {timestamp}".format(
id=self.id,
type="MANUAL" if self.type == self.MANUAL else "RETENTION_POLICY_BASED",
realm=self.realm.string_id if self.realm else None,
timestamp=self.timestamp,
)
class ArchivedMessage(AbstractMessage):
"""Used as a temporary holding place for deleted messages before they
are permanently deleted. This is an important part of a robust
'message retention' feature.
"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
archive_transaction: ArchiveTransaction = models.ForeignKey(ArchiveTransaction, on_delete=CASCADE)
class Message(AbstractMessage):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
def topic_name(self) -> str:
"""
Please start using this helper to facilitate an
eventual switch over to a separate topic table.
"""
return self.subject
def set_topic_name(self, topic_name: str) -> None:
self.subject = topic_name
def is_stream_message(self) -> bool:
'''
Find out whether a message is a stream message by
looking up its recipient.type. TODO: Make this
an easier operation by denormalizing the message
type onto Message, either explicitly (message.type)
or implicitly (message.stream_id is not None).
'''
return self.recipient.type == Recipient.STREAM
def get_realm(self) -> Realm:
return self.sender.realm
def save_rendered_content(self) -> None:
self.save(update_fields=["rendered_content", "rendered_content_version"])
@staticmethod
def need_to_render_content(rendered_content: Optional[str],
rendered_content_version: Optional[int],
markdown_version: int) -> bool:
return (rendered_content is None or
rendered_content_version is None or
rendered_content_version < markdown_version)
def sent_by_human(self) -> bool:
"""Used to determine whether a message was sent by a full Zulip UI
style client (and thus whether the message should be treated
as sent by a human and automatically marked as read for the
sender). The purpose of this distinction is to ensure that
message sent to the user by e.g. a Google Calendar integration
using the user's own API key don't get marked as read
automatically.
"""
sending_client = self.sending_client.name.lower()
return (sending_client in ('zulipandroid', 'zulipios', 'zulipdesktop',
'zulipmobile', 'zulipelectron', 'zulipterminal', 'snipe',
'website', 'ios', 'android')) or (
'desktop app' in sending_client)
@staticmethod
def is_status_message(content: str, rendered_content: str) -> bool:
"""
"status messages" start with /me and have special rendering:
/me loves chocolate -> Full Name loves chocolate
"""
if content.startswith('/me '):
return True
return False
def get_context_for_message(message: Message) -> Sequence[Message]:
# TODO: Change return type to QuerySet[Message]
return Message.objects.filter(
recipient_id=message.recipient_id,
subject=message.subject,
id__lt=message.id,
date_sent__gt=message.date_sent - timedelta(minutes=15),
).order_by('-id')[:10]
post_save.connect(flush_message, sender=Message)
class AbstractSubMessage(models.Model):
# We can send little text messages that are associated with a regular
# Zulip message. These can be used for experimental widgets like embedded
# games, surveys, mini threads, etc. These are designed to be pretty
# generic in purpose.
sender: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
msg_type: str = models.TextField()
content: str = models.TextField()
class Meta:
abstract = True
class SubMessage(AbstractSubMessage):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
@staticmethod
def get_raw_db_rows(needed_ids: List[int]) -> List[Dict[str, Any]]:
fields = ['id', 'message_id', 'sender_id', 'msg_type', 'content']
query = SubMessage.objects.filter(message_id__in=needed_ids).values(*fields)
query = query.order_by('message_id', 'id')
return list(query)
class ArchivedSubMessage(AbstractSubMessage):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
message: ArchivedMessage = models.ForeignKey(ArchivedMessage, on_delete=CASCADE)
post_save.connect(flush_submessage, sender=SubMessage)
class Draft(models.Model):
""" Server-side storage model for storing drafts so that drafts can be synced across
multiple clients/devices.
"""
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
recipient: Optional[Recipient] = models.ForeignKey(Recipient, null=True, on_delete=models.SET_NULL)
topic: str = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH, db_index=True)
content: str = models.TextField() # Length should not exceed MAX_MESSAGE_LENGTH
last_edit_time: datetime.datetime = models.DateTimeField(db_index=True)
def __str__(self) -> str:
return f"<{self.__class__.__name__}: {self.user_profile.email} / {self.id} / {self.last_edit_time}>"
def to_dict(self) -> Dict[str, Any]:
if self.recipient is None:
_type = ""
to = []
elif self.recipient.type == Recipient.STREAM:
_type = "stream"
to = [self.recipient.type_id]
else:
_type = "private"
if self.recipient.type == Recipient.PERSONAL:
to = [self.recipient.type_id]
else:
to = []
for r in get_display_recipient(self.recipient):
assert(not isinstance(r, str)) # It will only be a string for streams
if not r["id"] == self.user_profile_id:
to.append(r["id"])
return {
"id": self.id,
"type": _type,
"to": to,
"topic": self.topic,
"content": self.content,
"timestamp": int(self.last_edit_time.timestamp()),
}
class AbstractReaction(models.Model):
"""For emoji reactions to messages (and potentially future reaction types).
Emoji are surprisingly complicated to implement correctly. For details
on how this subsystem works, see:
https://zulip.readthedocs.io/en/latest/subsystems/emoji.html
"""
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# The user-facing name for an emoji reaction. With emoji aliases,
# there may be multiple accepted names for a given emoji; this
# field encodes which one the user selected.
emoji_name: str = models.TextField()
UNICODE_EMOJI = 'unicode_emoji'
REALM_EMOJI = 'realm_emoji'
ZULIP_EXTRA_EMOJI = 'zulip_extra_emoji'
REACTION_TYPES = ((UNICODE_EMOJI, ugettext_lazy("Unicode emoji")),
(REALM_EMOJI, ugettext_lazy("Custom emoji")),
(ZULIP_EXTRA_EMOJI, ugettext_lazy("Zulip extra emoji")))
reaction_type: str = models.CharField(default=UNICODE_EMOJI, choices=REACTION_TYPES, max_length=30)
# A string that uniquely identifies a particular emoji. The format varies
# by type:
#
# * For Unicode emoji, a dash-separated hex encoding of the sequence of
# Unicode codepoints that define this emoji in the Unicode
# specification. For examples, see "non_qualified" or "unified" in the
# following data, with "non_qualified" taking precedence when both present:
# https://raw.githubusercontent.com/iamcal/emoji-data/master/emoji_pretty.json
#
# * For realm emoji (aka user uploaded custom emoji), the ID
# (in ASCII decimal) of the RealmEmoji object.
#
# * For "Zulip extra emoji" (like :zulip:), the filename of the emoji.
emoji_code: str = models.TextField()
class Meta:
abstract = True
unique_together = (("user_profile", "message", "emoji_name"),
("user_profile", "message", "reaction_type", "emoji_code"))
class Reaction(AbstractReaction):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
@staticmethod
def get_raw_db_rows(needed_ids: List[int]) -> List[Dict[str, Any]]:
fields = ['message_id', 'emoji_name', 'emoji_code', 'reaction_type',
'user_profile__email', 'user_profile__id', 'user_profile__full_name']
return Reaction.objects.filter(message_id__in=needed_ids).values(*fields)
def __str__(self) -> str:
return f"{self.user_profile.email} / {self.message.id} / {self.emoji_name}"
class ArchivedReaction(AbstractReaction):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
message: ArchivedMessage = models.ForeignKey(ArchivedMessage, on_delete=CASCADE)
# Whenever a message is sent, for each user subscribed to the
# corresponding Recipient object, we add a row to the UserMessage
# table indicating that that user received that message. This table
# allows us to quickly query any user's last 1000 messages to generate
# the home view.
#
# Additionally, the flags field stores metadata like whether the user
# has read the message, starred or collapsed the message, was
# mentioned in the message, etc.
#
# UserMessage is the largest table in a Zulip installation, even
# though each row is only 4 integers.
class AbstractUserMessage(models.Model):
id: int = models.BigAutoField(primary_key=True)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# The order here is important! It's the order of fields in the bitfield.
ALL_FLAGS = [
'read',
'starred',
'collapsed',
'mentioned',
'wildcard_mentioned',
# These next 4 flags are from features that have since been removed.
'summarize_in_home',
'summarize_in_stream',
'force_expand',
'force_collapse',
# Whether the message contains any of the user's alert words.
'has_alert_word',
# The historical flag is used to mark messages which the user
# did not receive when they were sent, but later added to
# their history via e.g. starring the message. This is
# important accounting for the "Subscribed to stream" dividers.
'historical',
# Whether the message is a private message; this flag is a
# denormalization of message.recipient.type to support an
# efficient index on UserMessage for a user's private messages.
'is_private',
# Whether we've sent a push notification to the user's mobile
# devices for this message that has not been revoked.
'active_mobile_push_notification',
]
# Certain flags are used only for internal accounting within the
# Zulip backend, and don't make sense to expose to the API.
NON_API_FLAGS = {"is_private", "active_mobile_push_notification"}
# Certain additional flags are just set once when the UserMessage
# row is created.
NON_EDITABLE_FLAGS = {
# These flags are bookkeeping and don't make sense to edit.
"has_alert_word",
"mentioned",
"wildcard_mentioned",
"historical",
# Unused flags can't be edited.
"force_expand",
"force_collapse",
"summarize_in_home",
"summarize_in_stream",
}
flags: BitHandler = BitField(flags=ALL_FLAGS, default=0)
class Meta:
abstract = True
unique_together = ("user_profile", "message")
@staticmethod
def where_unread() -> str:
# Use this for Django ORM queries to access unread message.
# This custom SQL plays nice with our partial indexes. Grep
# the code for example usage.
return 'flags & 1 = 0'
@staticmethod
def where_starred() -> str:
# Use this for Django ORM queries to access starred messages.
# This custom SQL plays nice with our partial indexes. Grep
# the code for example usage.
#
# The key detail is that e.g.
# UserMessage.objects.filter(user_profile=user_profile, flags=UserMessage.flags.starred)
# will generate a query involving `flags & 2 = 2`, which doesn't match our index.
return 'flags & 2 <> 0'
@staticmethod
def where_active_push_notification() -> str:
# See where_starred for documentation.
return 'flags & 4096 <> 0'
def flags_list(self) -> List[str]:
flags = int(self.flags)
return self.flags_list_for_flags(flags)
@staticmethod
def flags_list_for_flags(val: int) -> List[str]:
'''
This function is highly optimized, because it actually slows down
sending messages in a naive implementation.
'''
flags = []
mask = 1
for flag in UserMessage.ALL_FLAGS:
if (val & mask) and flag not in AbstractUserMessage.NON_API_FLAGS:
flags.append(flag)
mask <<= 1
return flags
def __str__(self) -> str:
display_recipient = get_display_recipient(self.message.recipient)
return f"<{self.__class__.__name__}: {display_recipient} / {self.user_profile.email} ({self.flags_list()})>"
class UserMessage(AbstractUserMessage):
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
def get_usermessage_by_message_id(user_profile: UserProfile, message_id: int) -> Optional[UserMessage]:
try:
return UserMessage.objects.select_related().get(user_profile=user_profile,
message__id=message_id)
except UserMessage.DoesNotExist:
return None
class ArchivedUserMessage(AbstractUserMessage):
"""Used as a temporary holding place for deleted UserMessages objects
before they are permanently deleted. This is an important part of
a robust 'message retention' feature.
"""
message: Message = models.ForeignKey(ArchivedMessage, on_delete=CASCADE)
class AbstractAttachment(models.Model):
file_name: str = models.TextField(db_index=True)
# path_id is a storage location agnostic representation of the path of the file.
# If the path of a file is http://localhost:9991/user_uploads/a/b/abc/temp_file.py
# then its path_id will be a/b/abc/temp_file.py.
path_id: str = models.TextField(db_index=True, unique=True)
owner: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
realm: Optional[Realm] = models.ForeignKey(Realm, blank=True, null=True, on_delete=CASCADE)
create_time: datetime.datetime = models.DateTimeField(
default=timezone_now, db_index=True,
)
# Size of the uploaded file, in bytes
size: int = models.IntegerField()
# The two fields below lets us avoid looking up the corresponding
# messages/streams to check permissions before serving these files.
# Whether this attachment has been posted to a public stream, and
# thus should be available to all non-guest users in the
# organization (even if they weren't a recipient of a message
# linking to it).
is_realm_public: bool = models.BooleanField(default=False)
# Whether this attachment has been posted to a web-public stream,
# and thus should be available to everyone on the internet, even
# if the person isn't logged in.
is_web_public: bool = models.BooleanField(default=False)
class Meta:
abstract = True
def __str__(self) -> str:
return f"<{self.__class__.__name__}: {self.file_name}>"
class ArchivedAttachment(AbstractAttachment):
"""Used as a temporary holding place for deleted Attachment objects
before they are permanently deleted. This is an important part of
a robust 'message retention' feature.
"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
messages: Manager = models.ManyToManyField(ArchivedMessage)
class Attachment(AbstractAttachment):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
messages: Manager = models.ManyToManyField(Message)
def is_claimed(self) -> bool:
return self.messages.count() > 0
def to_dict(self) -> Dict[str, Any]:
return {
'id': self.id,
'name': self.file_name,
'path_id': self.path_id,
'size': self.size,
# convert to JavaScript-style UNIX timestamp so we can take
# advantage of client timezones.
'create_time': int(time.mktime(self.create_time.timetuple()) * 1000),
'messages': [{
'id': m.id,
'date_sent': int(time.mktime(m.date_sent.timetuple()) * 1000),
} for m in self.messages.all()],
}
post_save.connect(flush_used_upload_space_cache, sender=Attachment)
post_delete.connect(flush_used_upload_space_cache, sender=Attachment)
def validate_attachment_request(user_profile: UserProfile, path_id: str) -> Optional[bool]:
try:
attachment = Attachment.objects.get(path_id=path_id)
except Attachment.DoesNotExist:
return None
if user_profile == attachment.owner:
# If you own the file, you can access it.
return True
if (attachment.is_realm_public and attachment.realm == user_profile.realm and
user_profile.can_access_public_streams()):
# Any user in the realm can access realm-public files
return True
messages = attachment.messages.all()
if UserMessage.objects.filter(user_profile=user_profile, message__in=messages).exists():
# If it was sent in a private message or private stream
# message, then anyone who received that message can access it.
return True
# The user didn't receive any of the messages that included this
# attachment. But they might still have access to it, if it was
# sent to a stream they are on where history is public to
# subscribers.
# These are subscriptions to a stream one of the messages was sent to
relevant_stream_ids = Subscription.objects.filter(
user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM,
recipient__in=[m.recipient_id for m in messages]).values_list("recipient__type_id", flat=True)
if len(relevant_stream_ids) == 0:
return False
return Stream.objects.filter(id__in=relevant_stream_ids,
history_public_to_subscribers=True).exists()
def get_old_unclaimed_attachments(weeks_ago: int) -> Sequence[Attachment]:
# TODO: Change return type to QuerySet[Attachment]
delta_weeks_ago = timezone_now() - datetime.timedelta(weeks=weeks_ago)
old_attachments = Attachment.objects.filter(messages=None, create_time__lt=delta_weeks_ago)
return old_attachments
class Subscription(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
# Whether the user has since unsubscribed. We mark Subscription
# objects as inactive, rather than deleting them, when a user
# unsubscribes, so we can preserve user customizations like
# notification settings, stream color, etc., if the user later
# resubscribes.
active: bool = models.BooleanField(default=True)
ROLE_STREAM_ADMINISTRATOR = 20
ROLE_MEMBER = 50
ROLE_TYPES = [
ROLE_STREAM_ADMINISTRATOR,
ROLE_MEMBER,
]
role: int = models.PositiveSmallIntegerField(default=ROLE_MEMBER, db_index=True)
# Whether this user had muted this stream.
is_muted: Optional[bool] = models.BooleanField(null=True, default=False)
DEFAULT_STREAM_COLOR = "#c2c2c2"
color: str = models.CharField(max_length=10, default=DEFAULT_STREAM_COLOR)
pin_to_top: bool = models.BooleanField(default=False)
# These fields are stream-level overrides for the user's default
# configuration for notification, configured in UserProfile. The
# default, None, means we just inherit the user-level default.
desktop_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
audible_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
push_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
email_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
wildcard_mentions_notify: Optional[bool] = models.BooleanField(null=True, default=None)
class Meta:
unique_together = ("user_profile", "recipient")
def __str__(self) -> str:
return f"<Subscription: {self.user_profile} -> {self.recipient}>"
@property
def is_stream_admin(self) -> bool:
return self.role == Subscription.ROLE_STREAM_ADMINISTRATOR
# Subscription fields included whenever a Subscription object is provided to
# Zulip clients via the API. A few details worth noting:
# * These fields will generally be merged with Stream.API_FIELDS
# data about the stream.
# * "user_profile" is usually implied as full API access to Subscription
# is primarily done for the current user; API access to other users'
# subscriptions is generally limited to boolean yes/no.
# * "id" and "recipient_id" are not included as they are not used
# in the Zulip API; it's an internal implementation detail.
# Subscription objects are always looked up in the API via
# (user_profile, stream) pairs.
# * "active" is often excluded in API use cases where it is implied.
# * "is_muted" often needs to be copied to not "in_home_view" for
# backwards-compatibility.
API_FIELDS = [
"color",
"is_muted",
"pin_to_top",
"audible_notifications",
"desktop_notifications",
"email_notifications",
"push_notifications",
"wildcard_mentions_notify",
"role",
]
@cache_with_key(user_profile_by_id_cache_key, timeout=3600*24*7)
def get_user_profile_by_id(uid: int) -> UserProfile:
return UserProfile.objects.select_related().get(id=uid)
@cache_with_key(user_profile_by_email_cache_key, timeout=3600*24*7)
def get_user_profile_by_email(email: str) -> UserProfile:
"""This function is intended to be used by our unit tests and for
manual manage.py shell work; robust code must use get_user or
get_user_by_delivery_email instead, because Zulip supports
multiple users with a given (delivery) email address existing on a
single server (in different realms).
"""
return UserProfile.objects.select_related().get(delivery_email__iexact=email.strip())
@cache_with_key(user_profile_by_api_key_cache_key, timeout=3600*24*7)
def maybe_get_user_profile_by_api_key(api_key: str) -> Optional[UserProfile]:
try:
return UserProfile.objects.select_related().get(api_key=api_key)
except UserProfile.DoesNotExist:
# We will cache failed lookups with None. The
# use case here is that broken API clients may
# continually ask for the same wrong API key, and
# we want to handle that as quickly as possible.
return None
def get_user_profile_by_api_key(api_key: str) -> UserProfile:
user_profile = maybe_get_user_profile_by_api_key(api_key)
if user_profile is None:
raise UserProfile.DoesNotExist()
return user_profile
def get_user_by_delivery_email(email: str, realm: Realm) -> UserProfile:
"""Fetches a user given their delivery email. For use in
authentication/registration contexts. Do not use for user-facing
views (e.g. Zulip API endpoints) as doing so would violate the
EMAIL_ADDRESS_VISIBILITY_ADMINS security model. Use get_user in
those code paths.
"""
return UserProfile.objects.select_related().get(
delivery_email__iexact=email.strip(), realm=realm)
def get_users_by_delivery_email(emails: Set[str], realm: Realm) -> QuerySet:
"""This is similar to get_user_by_delivery_email, and
it has the same security caveats. It gets multiple
users and returns a QuerySet, since most callers
will only need two or three fields.
If you are using this to get large UserProfile objects, you are
probably making a mistake, but if you must,
then use `select_related`.
"""
'''
Django doesn't support delivery_email__iexact__in, so
we simply OR all the filters that we'd do for the
one-email case.
'''
email_filter = Q()
for email in emails:
email_filter |= Q(delivery_email__iexact=email.strip())
return UserProfile.objects.filter(realm=realm).filter(email_filter)
@cache_with_key(user_profile_cache_key, timeout=3600*24*7)
def get_user(email: str, realm: Realm) -> UserProfile:
"""Fetches the user by its visible-to-other users username (in the
`email` field). For use in API contexts; do not use in
authentication/registration contexts as doing so will break
authentication in organizations using
EMAIL_ADDRESS_VISIBILITY_ADMINS. In those code paths, use
get_user_by_delivery_email.
"""
return UserProfile.objects.select_related().get(email__iexact=email.strip(), realm=realm)
def get_active_user(email: str, realm: Realm) -> UserProfile:
"""Variant of get_user_by_email that excludes deactivated users.
See get_user docstring for important usage notes."""
user_profile = get_user(email, realm)
if not user_profile.is_active:
raise UserProfile.DoesNotExist()
return user_profile
def get_user_profile_by_id_in_realm(uid: int, realm: Realm) -> UserProfile:
return UserProfile.objects.select_related().get(id=uid, realm=realm)
def get_active_user_profile_by_id_in_realm(uid: int, realm: Realm) -> UserProfile:
user_profile = get_user_profile_by_id_in_realm(uid, realm)
if not user_profile.is_active:
raise UserProfile.DoesNotExist()
return user_profile
def get_user_including_cross_realm(email: str, realm: Optional[Realm]=None) -> UserProfile:
if is_cross_realm_bot_email(email):
return get_system_bot(email)
assert realm is not None
return get_user(email, realm)
@cache_with_key(bot_profile_cache_key, timeout=3600*24*7)
def get_system_bot(email: str) -> UserProfile:
return UserProfile.objects.select_related().get(email__iexact=email.strip())
def get_user_by_id_in_realm_including_cross_realm(
uid: int,
realm: Optional[Realm],
) -> UserProfile:
user_profile = get_user_profile_by_id(uid)
if user_profile.realm == realm:
return user_profile
# Note: This doesn't validate whether the `realm` passed in is
# None/invalid for the CROSS_REALM_BOT_EMAILS case.
if user_profile.delivery_email in settings.CROSS_REALM_BOT_EMAILS:
return user_profile
raise UserProfile.DoesNotExist()
@cache_with_key(realm_user_dicts_cache_key, timeout=3600*24*7)
def get_realm_user_dicts(realm_id: int) -> List[Dict[str, Any]]:
return UserProfile.objects.filter(
realm_id=realm_id,
).values(*realm_user_dict_fields)
@cache_with_key(active_user_ids_cache_key, timeout=3600*24*7)
def active_user_ids(realm_id: int) -> List[int]:
query = UserProfile.objects.filter(
realm_id=realm_id,
is_active=True,
).values_list('id', flat=True)
return list(query)
@cache_with_key(active_non_guest_user_ids_cache_key, timeout=3600*24*7)
def active_non_guest_user_ids(realm_id: int) -> List[int]:
query = UserProfile.objects.filter(
realm_id=realm_id,
is_active=True,
).exclude(
role=UserProfile.ROLE_GUEST,
).values_list('id', flat=True)
return list(query)
def get_source_profile(email: str, string_id: str) -> Optional[UserProfile]:
try:
return get_user_by_delivery_email(email, get_realm(string_id))
except (Realm.DoesNotExist, UserProfile.DoesNotExist):
return None
@cache_with_key(bot_dicts_in_realm_cache_key, timeout=3600*24*7)
def get_bot_dicts_in_realm(realm: Realm) -> List[Dict[str, Any]]:
return UserProfile.objects.filter(realm=realm, is_bot=True).values(*bot_dict_fields)
def is_cross_realm_bot_email(email: str) -> bool:
return email.lower() in settings.CROSS_REALM_BOT_EMAILS
# The Huddle class represents a group of individuals who have had a
# group private message conversation together. The actual membership
# of the Huddle is stored in the Subscription table just like with
# Streams, and a hash of that list is stored in the huddle_hash field
# below, to support efficiently mapping from a set of users to the
# corresponding Huddle object.
class Huddle(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
# TODO: We should consider whether using
# CommaSeparatedIntegerField would be better.
huddle_hash: str = models.CharField(max_length=40, db_index=True, unique=True)
# Foreign key to the Recipient object for this Huddle.
recipient = models.ForeignKey(Recipient, null=True, on_delete=models.SET_NULL)
def get_huddle_hash(id_list: List[int]) -> str:
id_list = sorted(set(id_list))
hash_key = ",".join(str(x) for x in id_list)
return make_safe_digest(hash_key)
def huddle_hash_cache_key(huddle_hash: str) -> str:
return f"huddle_by_hash:{huddle_hash}"
def get_huddle(id_list: List[int]) -> Huddle:
huddle_hash = get_huddle_hash(id_list)
return get_huddle_backend(huddle_hash, id_list)
@cache_with_key(lambda huddle_hash, id_list: huddle_hash_cache_key(huddle_hash), timeout=3600*24*7)
def get_huddle_backend(huddle_hash: str, id_list: List[int]) -> Huddle:
with transaction.atomic():
(huddle, created) = Huddle.objects.get_or_create(huddle_hash=huddle_hash)
if created:
recipient = Recipient.objects.create(type_id=huddle.id,
type=Recipient.HUDDLE)
huddle.recipient = recipient
huddle.save(update_fields=["recipient"])
subs_to_create = [Subscription(recipient=recipient,
user_profile_id=user_profile_id)
for user_profile_id in id_list]
Subscription.objects.bulk_create(subs_to_create)
return huddle
class UserActivity(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
client: Client = models.ForeignKey(Client, on_delete=CASCADE)
query: str = models.CharField(max_length=50, db_index=True)
count: int = models.IntegerField()
last_visit: datetime.datetime = models.DateTimeField('last visit')
class Meta:
unique_together = ("user_profile", "client", "query")
class UserActivityInterval(models.Model):
MIN_INTERVAL_LENGTH = datetime.timedelta(minutes=15)
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
start: datetime.datetime = models.DateTimeField('start time', db_index=True)
end: datetime.datetime = models.DateTimeField('end time', db_index=True)
class UserPresence(models.Model):
"""A record from the last time we heard from a given user on a given client.
This is a tricky subsystem, because it is highly optimized. See the docs:
https://zulip.readthedocs.io/en/latest/subsystems/presence.html
"""
class Meta:
unique_together = ("user_profile", "client")
index_together = [
("realm", "timestamp"),
]
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
client: Client = models.ForeignKey(Client, on_delete=CASCADE)
# The time we heard this update from the client.
timestamp: datetime.datetime = models.DateTimeField('presence changed')
# The user was actively using this Zulip client as of `timestamp` (i.e.,
# they had interacted with the client recently). When the timestamp is
# itself recent, this is the green "active" status in the webapp.
ACTIVE = 1
# There had been no user activity (keyboard/mouse/etc.) on this client
# recently. So the client was online at the specified time, but it
# could be the user's desktop which they were away from. Displayed as
# orange/idle if the timestamp is current.
IDLE = 2
# Information from the client about the user's recent interaction with
# that client, as of `timestamp`. Possible values above.
#
# There is no "inactive" status, because that is encoded by the
# timestamp being old.
status: int = models.PositiveSmallIntegerField(default=ACTIVE)
@staticmethod
def status_to_string(status: int) -> str:
if status == UserPresence.ACTIVE:
return 'active'
elif status == UserPresence.IDLE:
return 'idle'
else: # nocoverage # TODO: Add a presence test to cover this.
raise ValueError(f'Unknown status: {status}')
@staticmethod
def to_presence_dict(client_name: str, status: int, dt: datetime.datetime, push_enabled: bool=False,
has_push_devices: bool=False) -> Dict[str, Any]:
presence_val = UserPresence.status_to_string(status)
timestamp = datetime_to_timestamp(dt)
return dict(
client=client_name,
status=presence_val,
timestamp=timestamp,
pushable=(push_enabled and has_push_devices),
)
def to_dict(self) -> Dict[str, Any]:
return UserPresence.to_presence_dict(
self.client.name,
self.status,
self.timestamp,
)
@staticmethod
def status_from_string(status: str) -> Optional[int]:
if status == 'active':
# See https://github.com/python/mypy/issues/2611
status_val: Optional[int] = UserPresence.ACTIVE
elif status == 'idle':
status_val = UserPresence.IDLE
else:
status_val = None
return status_val
class UserStatus(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user_profile: UserProfile = models.OneToOneField(UserProfile, on_delete=CASCADE)
timestamp: datetime.datetime = models.DateTimeField()
client: Client = models.ForeignKey(Client, on_delete=CASCADE)
NORMAL = 0
AWAY = 1
status: int = models.PositiveSmallIntegerField(default=NORMAL)
status_text: str = models.CharField(max_length=255, default='')
class DefaultStream(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
stream: Stream = models.ForeignKey(Stream, on_delete=CASCADE)
class Meta:
unique_together = ("realm", "stream")
class DefaultStreamGroup(models.Model):
MAX_NAME_LENGTH = 60
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
name: str = models.CharField(max_length=MAX_NAME_LENGTH, db_index=True)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
streams: Manager = models.ManyToManyField('Stream')
description: str = models.CharField(max_length=1024, default='')
class Meta:
unique_together = ("realm", "name")
def to_dict(self) -> Dict[str, Any]:
return dict(name=self.name,
id=self.id,
description=self.description,
streams=[stream.to_dict() for stream in self.streams.all()])
def get_default_stream_groups(realm: Realm) -> List[DefaultStreamGroup]:
return DefaultStreamGroup.objects.filter(realm=realm)
class AbstractScheduledJob(models.Model):
scheduled_timestamp: datetime.datetime = models.DateTimeField(db_index=True)
# JSON representation of arguments to consumer
data: str = models.TextField()
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
class Meta:
abstract = True
class ScheduledEmail(AbstractScheduledJob):
# Exactly one of users or address should be set. These are
# duplicate values, used to efficiently filter the set of
# ScheduledEmails for use in clear_scheduled_emails; the
# recipients used for actually sending messages are stored in the
# data field of AbstractScheduledJob.
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
users: Manager = models.ManyToManyField(UserProfile)
# Just the address part of a full "name <address>" email address
address: Optional[str] = models.EmailField(null=True, db_index=True)
# Valid types are below
WELCOME = 1
DIGEST = 2
INVITATION_REMINDER = 3
type: int = models.PositiveSmallIntegerField()
def __str__(self) -> str:
return f"<ScheduledEmail: {self.type} {self.address or list(self.users.all())} {self.scheduled_timestamp}>"
class MissedMessageEmailAddress(models.Model):
EXPIRY_SECONDS = 60 * 60 * 24 * 5
ALLOWED_USES = 1
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
email_token: str = models.CharField(max_length=34, unique=True, db_index=True)
# Timestamp of when the missed message address generated.
# The address is valid until timestamp + EXPIRY_SECONDS.
timestamp: datetime.datetime = models.DateTimeField(db_index=True, default=timezone_now)
times_used: int = models.PositiveIntegerField(default=0, db_index=True)
def __str__(self) -> str:
return settings.EMAIL_GATEWAY_PATTERN % (self.email_token,)
def is_usable(self) -> bool:
not_expired = timezone_now() <= self.timestamp + timedelta(seconds=self.EXPIRY_SECONDS)
has_uses_left = self.times_used < self.ALLOWED_USES
return has_uses_left and not_expired
def increment_times_used(self) -> None:
self.times_used += 1
self.save(update_fields=["times_used"])
class ScheduledMessage(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
sender: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
subject: str = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH)
content: str = models.TextField()
sending_client: Client = models.ForeignKey(Client, on_delete=CASCADE)
stream: Optional[Stream] = models.ForeignKey(Stream, null=True, on_delete=CASCADE)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
scheduled_timestamp: datetime.datetime = models.DateTimeField(db_index=True)
delivered: bool = models.BooleanField(default=False)
SEND_LATER = 1
REMIND = 2
DELIVERY_TYPES = (
(SEND_LATER, 'send_later'),
(REMIND, 'remind'),
)
delivery_type: int = models.PositiveSmallIntegerField(
choices=DELIVERY_TYPES, default=SEND_LATER,
)
def topic_name(self) -> str:
return self.subject
def set_topic_name(self, topic_name: str) -> None:
self.subject = topic_name
def __str__(self) -> str:
display_recipient = get_display_recipient(self.recipient)
return f"<ScheduledMessage: {display_recipient} {self.subject} {self.sender} {self.scheduled_timestamp}>"
EMAIL_TYPES = {
'followup_day1': ScheduledEmail.WELCOME,
'followup_day2': ScheduledEmail.WELCOME,
'digest': ScheduledEmail.DIGEST,
'invitation_reminder': ScheduledEmail.INVITATION_REMINDER,
}
class AbstractRealmAuditLog(models.Model):
"""Defines fields common to RealmAuditLog and RemoteRealmAuditLog."""
event_time: datetime.datetime = models.DateTimeField(db_index=True)
# If True, event_time is an overestimate of the true time. Can be used
# by migrations when introducing a new event_type.
backfilled: bool = models.BooleanField(default=False)
# Keys within extra_data, when extra_data is a json dict. Keys are strings because
# json keys must always be strings.
OLD_VALUE = '1'
NEW_VALUE = '2'
ROLE_COUNT = '10'
ROLE_COUNT_HUMANS = '11'
ROLE_COUNT_BOTS = '12'
extra_data: Optional[str] = models.TextField(null=True)
# Event types
USER_CREATED = 101
USER_ACTIVATED = 102
USER_DEACTIVATED = 103
USER_REACTIVATED = 104
USER_ROLE_CHANGED = 105
USER_SOFT_ACTIVATED = 120
USER_SOFT_DEACTIVATED = 121
USER_PASSWORD_CHANGED = 122
USER_AVATAR_SOURCE_CHANGED = 123
USER_FULL_NAME_CHANGED = 124
USER_EMAIL_CHANGED = 125
USER_TOS_VERSION_CHANGED = 126
USER_API_KEY_CHANGED = 127
USER_BOT_OWNER_CHANGED = 128
USER_DEFAULT_SENDING_STREAM_CHANGED = 129
USER_DEFAULT_REGISTER_STREAM_CHANGED = 130
USER_DEFAULT_ALL_PUBLIC_STREAMS_CHANGED = 131
USER_NOTIFICATION_SETTINGS_CHANGED = 132
USER_DIGEST_EMAIL_CREATED = 133
REALM_DEACTIVATED = 201
REALM_REACTIVATED = 202
REALM_SCRUBBED = 203
REALM_PLAN_TYPE_CHANGED = 204
REALM_LOGO_CHANGED = 205
REALM_EXPORTED = 206
REALM_PROPERTY_CHANGED = 207
REALM_ICON_SOURCE_CHANGED = 208
SUBSCRIPTION_CREATED = 301
SUBSCRIPTION_ACTIVATED = 302
SUBSCRIPTION_DEACTIVATED = 303
SUBSCRIPTION_PROPERTY_CHANGED = 304
STRIPE_CUSTOMER_CREATED = 401
STRIPE_CARD_CHANGED = 402
STRIPE_PLAN_CHANGED = 403
STRIPE_PLAN_QUANTITY_RESET = 404
CUSTOMER_CREATED = 501
CUSTOMER_PLAN_CREATED = 502
CUSTOMER_SWITCHED_FROM_MONTHLY_TO_ANNUAL_PLAN = 503
STREAM_CREATED = 601
STREAM_DEACTIVATED = 602
STREAM_NAME_CHANGED = 603
event_type: int = models.PositiveSmallIntegerField()
# event_types synced from on-prem installations to Zulip Cloud when
# billing for mobile push notifications is enabled. Every billing
# event_type should have ROLE_COUNT populated in extra_data.
SYNCED_BILLING_EVENTS = [
USER_CREATED, USER_ACTIVATED, USER_DEACTIVATED, USER_REACTIVATED, USER_ROLE_CHANGED,
REALM_DEACTIVATED, REALM_REACTIVATED]
class Meta:
abstract = True
class RealmAuditLog(AbstractRealmAuditLog):
"""
RealmAuditLog tracks important changes to users, streams, and
realms in Zulip. It is intended to support both
debugging/introspection (e.g. determining when a user's left a
given stream?) as well as help with some database migrations where
we might be able to do a better data backfill with it. Here are a
few key details about how this works:
* acting_user is the user who initiated the state change
* modified_user (if present) is the user being modified
* modified_stream (if present) is the stream being modified
For example:
* When a user subscribes another user to a stream, modified_user,
acting_user, and modified_stream will all be present and different.
* When an administrator changes an organization's realm icon,
acting_user is that administrator and both modified_user and
modified_stream will be None.
"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
acting_user: Optional[UserProfile] = models.ForeignKey(
UserProfile, null=True, related_name="+", on_delete=CASCADE,
)
modified_user: Optional[UserProfile] = models.ForeignKey(
UserProfile, null=True, related_name="+", on_delete=CASCADE,
)
modified_stream: Optional[Stream] = models.ForeignKey(
Stream, null=True, on_delete=CASCADE,
)
event_last_message_id: Optional[int] = models.IntegerField(null=True)
def __str__(self) -> str:
if self.modified_user is not None:
return f"<RealmAuditLog: {self.modified_user} {self.event_type} {self.event_time} {self.id}>"
if self.modified_stream is not None:
return f"<RealmAuditLog: {self.modified_stream} {self.event_type} {self.event_time} {self.id}>"
return f"<RealmAuditLog: {self.realm} {self.event_type} {self.event_time} {self.id}>"
class UserHotspot(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
hotspot: str = models.CharField(max_length=30)
timestamp: datetime.datetime = models.DateTimeField(default=timezone_now)
class Meta:
unique_together = ("user", "hotspot")
def check_valid_user_ids(realm_id: int, val: object,
allow_deactivated: bool=False) -> List[int]:
user_ids = check_list(check_int)("User IDs", val)
realm = Realm.objects.get(id=realm_id)
for user_id in user_ids:
# TODO: Structurally, we should be doing a bulk fetch query to
# get the users here, not doing these in a loop. But because
# this is a rarely used feature and likely to never have more
# than a handful of users, it's probably mostly OK.
try:
user_profile = get_user_profile_by_id_in_realm(user_id, realm)
except UserProfile.DoesNotExist:
raise ValidationError(_('Invalid user ID: {}').format(user_id))
if not allow_deactivated:
if not user_profile.is_active:
raise ValidationError(_('User with ID {} is deactivated').format(user_id))
if (user_profile.is_bot):
raise ValidationError(_('User with ID {} is a bot').format(user_id))
return user_ids
class CustomProfileField(models.Model):
"""Defines a form field for the per-realm custom profile fields feature.
See CustomProfileFieldValue for an individual user's values for one of
these fields.
"""
HINT_MAX_LENGTH = 80
NAME_MAX_LENGTH = 40
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
name: str = models.CharField(max_length=NAME_MAX_LENGTH)
hint: Optional[str] = models.CharField(max_length=HINT_MAX_LENGTH, default='', null=True)
order: int = models.IntegerField(default=0)
SHORT_TEXT = 1
LONG_TEXT = 2
CHOICE = 3
DATE = 4
URL = 5
USER = 6
EXTERNAL_ACCOUNT = 7
# These are the fields whose validators require more than var_name
# and value argument. i.e. CHOICE require field_data, USER require
# realm as argument.
CHOICE_FIELD_TYPE_DATA: List[ExtendedFieldElement] = [
(CHOICE, ugettext_lazy('List of options'), validate_choice_field, str, "CHOICE"),
]
USER_FIELD_TYPE_DATA: List[UserFieldElement] = [
(USER, ugettext_lazy('Person picker'), check_valid_user_ids, ast.literal_eval, "USER"),
]
CHOICE_FIELD_VALIDATORS: Dict[int, ExtendedValidator] = {
item[0]: item[2] for item in CHOICE_FIELD_TYPE_DATA
}
USER_FIELD_VALIDATORS: Dict[int, RealmUserValidator] = {
item[0]: item[2] for item in USER_FIELD_TYPE_DATA
}
FIELD_TYPE_DATA: List[FieldElement] = [
# Type, Display Name, Validator, Converter, Keyword
(SHORT_TEXT, ugettext_lazy('Short text'), check_short_string, str, "SHORT_TEXT"),
(LONG_TEXT, ugettext_lazy('Long text'), check_long_string, str, "LONG_TEXT"),
(DATE, ugettext_lazy('Date picker'), check_date, str, "DATE"),
(URL, ugettext_lazy('Link'), check_url, str, "URL"),
(EXTERNAL_ACCOUNT, ugettext_lazy('External account'), check_short_string, str, "EXTERNAL_ACCOUNT"),
]
ALL_FIELD_TYPES = [*FIELD_TYPE_DATA, *CHOICE_FIELD_TYPE_DATA, *USER_FIELD_TYPE_DATA]
FIELD_VALIDATORS: Dict[int, Validator[Union[int, str, List[int]]]] = {item[0]: item[2] for item in FIELD_TYPE_DATA}
FIELD_CONVERTERS: Dict[int, Callable[[Any], Any]] = {item[0]: item[3] for item in ALL_FIELD_TYPES}
FIELD_TYPE_CHOICES: List[Tuple[int, Promise]] = [(item[0], item[1]) for item in ALL_FIELD_TYPES]
field_type: int = models.PositiveSmallIntegerField(
choices=FIELD_TYPE_CHOICES, default=SHORT_TEXT,
)
# A JSON blob of any additional data needed to define the field beyond
# type/name/hint.
#
# The format depends on the type. Field types SHORT_TEXT, LONG_TEXT,
# DATE, URL, and USER leave this null. Fields of type CHOICE store the
# choices' descriptions.
#
# Note: There is no performance overhead of using TextField in PostgreSQL.
# See https://www.postgresql.org/docs/9.0/static/datatype-character.html
field_data: Optional[str] = models.TextField(default='', null=True)
class Meta:
unique_together = ('realm', 'name')
def as_dict(self) -> ProfileDataElementBase:
return {
'id': self.id,
'name': self.name,
'type': self.field_type,
'hint': self.hint,
'field_data': self.field_data,
'order': self.order,
}
def is_renderable(self) -> bool:
if self.field_type in [CustomProfileField.SHORT_TEXT, CustomProfileField.LONG_TEXT]:
return True
return False
def __str__(self) -> str:
return f"<CustomProfileField: {self.realm} {self.name} {self.field_type} {self.order}>"
def custom_profile_fields_for_realm(realm_id: int) -> List[CustomProfileField]:
return CustomProfileField.objects.filter(realm=realm_id).order_by('order')
class CustomProfileFieldValue(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
field: CustomProfileField = models.ForeignKey(CustomProfileField, on_delete=CASCADE)
value: str = models.TextField()
rendered_value: Optional[str] = models.TextField(null=True, default=None)
class Meta:
unique_together = ('user_profile', 'field')
def __str__(self) -> str:
return f"<CustomProfileFieldValue: {self.user_profile} {self.field} {self.value}>"
# Interfaces for services
# They provide additional functionality like parsing message to obtain query URL, data to be sent to URL,
# and parsing the response.
GENERIC_INTERFACE = 'GenericService'
SLACK_INTERFACE = 'SlackOutgoingWebhookService'
# A Service corresponds to either an outgoing webhook bot or an embedded bot.
# The type of Service is determined by the bot_type field of the referenced
# UserProfile.
#
# If the Service is an outgoing webhook bot:
# - name is any human-readable identifier for the Service
# - base_url is the address of the third-party site
# - token is used for authentication with the third-party site
#
# If the Service is an embedded bot:
# - name is the canonical name for the type of bot (e.g. 'xkcd' for an instance
# of the xkcd bot); multiple embedded bots can have the same name, but all
# embedded bots with the same name will run the same code
# - base_url and token are currently unused
class Service(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
name: str = models.CharField(max_length=UserProfile.MAX_NAME_LENGTH)
# Bot user corresponding to the Service. The bot_type of this user
# deterines the type of service. If non-bot services are added later,
# user_profile can also represent the owner of the Service.
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
base_url: str = models.TextField()
token: str = models.TextField()
# Interface / API version of the service.
interface: int = models.PositiveSmallIntegerField(default=1)
# Valid interfaces are {generic, zulip_bot_service, slack}
GENERIC = 1
SLACK = 2
ALLOWED_INTERFACE_TYPES = [
GENERIC,
SLACK,
]
# N.B. If we used Django's choice=... we would get this for free (kinda)
_interfaces: Dict[int, str] = {
GENERIC: GENERIC_INTERFACE,
SLACK: SLACK_INTERFACE,
}
def interface_name(self) -> str:
# Raises KeyError if invalid
return self._interfaces[self.interface]
def get_bot_services(user_profile_id: int) -> List[Service]:
return list(Service.objects.filter(user_profile__id=user_profile_id))
def get_service_profile(user_profile_id: int, service_name: str) -> Service:
return Service.objects.get(user_profile__id=user_profile_id, name=service_name)
class BotStorageData(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
bot_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
key: str = models.TextField(db_index=True)
value: str = models.TextField()
class Meta:
unique_together = ("bot_profile", "key")
class BotConfigData(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
bot_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
key: str = models.TextField(db_index=True)
value: str = models.TextField()
class Meta:
unique_together = ("bot_profile", "key")
class InvalidFakeEmailDomain(Exception):
pass
def get_fake_email_domain() -> str:
try:
# Check that the fake email domain can be used to form valid email addresses.
validate_email("bot@" + settings.FAKE_EMAIL_DOMAIN)
except ValidationError:
raise InvalidFakeEmailDomain(settings.FAKE_EMAIL_DOMAIN + ' is not a valid domain. '
'Consider setting the FAKE_EMAIL_DOMAIN setting.')
return settings.FAKE_EMAIL_DOMAIN
class AlertWord(models.Model):
# Realm isn't necessary, but it's a nice denormalization. Users
# never move to another realm, so it's static, and having Realm
# here optimizes the main query on this table, which is fetching
# all the alert words in a realm.
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
realm: Realm = models.ForeignKey(Realm, db_index=True, on_delete=CASCADE)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# Case-insensitive name for the alert word.
word: str = models.TextField()
class Meta:
unique_together = ("user_profile", "word")
def flush_realm_alert_words(realm: Realm) -> None:
cache_delete(realm_alert_words_cache_key(realm))
cache_delete(realm_alert_words_automaton_cache_key(realm))
def flush_alert_word(sender: Any, **kwargs: Any) -> None:
realm = kwargs['instance'].realm
flush_realm_alert_words(realm)
post_save.connect(flush_alert_word, sender=AlertWord)
post_delete.connect(flush_alert_word, sender=AlertWord)
|
showell/zulip
|
zerver/models.py
|
Python
|
apache-2.0
| 130,376
|
[
"VisIt"
] |
c8969092bee37da0d84957dcdfa49a907039a911e0ba4edf3d8e519cb8ce22b1
|
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import include, url
from karaage.plugins.kgapplications.views import common, project
from .views.project import register as register_project
register_project()
urlpatterns = [
url(r'^$',
common.application_list, name='kg_application_list'),
url(r'^applicants/(?P<applicant_id>\d+)/$',
common.applicant_edit, name='kg_applicant_edit'),
url(r'^(?P<application_id>\d+)/logs/$',
common.application_logs, name='kg_application_logs'),
url(r'^(?P<application_id>\d+)/add_comment/$',
common.add_comment, name='kg_application_add_comment'),
url(r'^(?P<application_id>\d+)/$',
common.application_detail, name='kg_application_detail'),
url(r'^(?P<application_id>\d+)/(?P<state>[-.\w]+)/$',
common.application_detail, name='kg_application_detail'),
url(r'^(?P<application_id>\d+)/(?P<state>[-.\w]+)/(?P<label>[-.\w]+)/$',
common.application_detail, name='kg_application_detail'),
url(r'^project/new/$',
project.new_application, name='kg_application_new'),
url(r'^project/invite/$',
project.send_invitation, name='kg_application_invite'),
url(r'^project/invite/(?P<project_id>\d+)/$',
project.send_invitation, name='kg_application_invite'),
# this must come last
url(r'^(?P<token>[-.\w]+)/$',
common.application_unauthenticated,
name='kg_application_unauthenticated'),
url(r'^(?P<token>[-.\w]+)/(?P<state>[-.\w]+)/$',
common.application_unauthenticated,
name='kg_application_unauthenticated'),
url(r'^(?P<token>[-.\w]+)/(?P<state>[-.\w]+)/(?P<label>[-.\w]+)/$',
common.application_unauthenticated,
name='kg_application_unauthenticated'),
]
urlpatterns = [
url(r'^applications/', include(urlpatterns)),
]
profile_urlpatterns = [
url(r'^applications/$',
common.profile_application_list,
name='kg_profile_applications'),
]
|
brianmay/karaage
|
karaage/plugins/kgapplications/urls.py
|
Python
|
gpl-3.0
| 2,686
|
[
"Brian"
] |
7ea31a83ee69f81c340db3ea60464c5e094c5baa491cdc67b9c08a1225fabc49
|
import logging
from cgi import escape
import galaxy.util
from galaxy import model
from galaxy import web
from galaxy import managers
from galaxy.datatypes.data import nice_size
from galaxy.model.item_attrs import UsesAnnotations, UsesItemRatings
from galaxy.model.orm import and_, eagerload_all, func
from galaxy import util
from galaxy.util import Params
from galaxy.util.odict import odict
from galaxy.util.sanitize_html import sanitize_html
from galaxy.web import error, url_for
from galaxy.web.base.controller import BaseUIController, SharableMixin, UsesHistoryDatasetAssociationMixin, UsesHistoryMixin
from galaxy.web.base.controller import ExportsHistoryMixin
from galaxy.web.base.controller import ImportsHistoryMixin
from galaxy.web.base.controller import ERROR, INFO, SUCCESS, WARNING
from galaxy.web.framework.helpers import grids, iff, time_ago
log = logging.getLogger( __name__ )
class NameColumn( grids.TextColumn ):
def get_value( self, trans, grid, history ):
return history.get_display_name()
class HistoryListGrid( grids.Grid ):
# Custom column types
class DatasetsByStateColumn( grids.GridColumn, UsesHistoryMixin ):
def get_value( self, trans, grid, history ):
state_count_dict = self.get_hda_state_counts( trans, history )
rval = ''
for state in ( 'ok', 'running', 'queued', 'error' ):
count = state_count_dict.get( state, 0 )
if count:
rval += '<div class="count-box state-color-%s">%s</div> ' % (state, count)
return rval
class HistoryListNameColumn( NameColumn ):
def get_link( self, trans, grid, history ):
link = None
if not history.deleted:
link = dict( operation="Switch", id=history.id, use_panels=grid.use_panels, async_compatible=True )
return link
class DeletedColumn( grids.DeletedColumn ):
def get_value( self, trans, grid, history ):
if history == trans.history:
return "<strong>current history</strong>"
if history.purged:
return "deleted permanently"
elif history.deleted:
return "deleted"
return ""
def sort( self, trans, query, ascending, column_name=None ):
if ascending:
query = query.order_by( self.model_class.table.c.purged.asc(), self.model_class.table.c.update_time.desc() )
else:
query = query.order_by( self.model_class.table.c.purged.desc(), self.model_class.table.c.update_time.desc() )
return query
# Grid definition
title = "Saved Histories"
model_class = model.History
template='/history/grid.mako'
default_sort_key = "-update_time"
columns = [
HistoryListNameColumn( "Name", key="name", attach_popup=True, filterable="advanced" ),
DatasetsByStateColumn( "Datasets", key="datasets_by_state", sortable=False, nowrap=True),
grids.IndividualTagsColumn( "Tags", key="tags", model_tag_association_class=model.HistoryTagAssociation, \
filterable="advanced", grid_name="HistoryListGrid" ),
grids.SharingStatusColumn( "Sharing", key="sharing", filterable="advanced", sortable=False ),
grids.GridColumn( "Size on Disk", key="get_disk_size_bytes", format=nice_size, sortable=False ),
grids.GridColumn( "Created", key="create_time", format=time_ago ),
grids.GridColumn( "Last Updated", key="update_time", format=time_ago ),
DeletedColumn( "Status", key="deleted", filterable="advanced" )
]
columns.append(
grids.MulticolFilterColumn(
"search history names and tags",
cols_to_filter=[ columns[0], columns[2] ],
key="free-text-search", visible=False, filterable="standard" )
)
operations = [
grids.GridOperation( "Switch", allow_multiple=False, condition=( lambda item: not item.deleted ), async_compatible=True ),
grids.GridOperation( "View", allow_multiple=False ),
grids.GridOperation( "Share or Publish", allow_multiple=False, condition=( lambda item: not item.deleted ), async_compatible=False ),
grids.GridOperation( "Copy", allow_multiple=False, condition=( lambda item: not item.deleted ), async_compatible=False ),
grids.GridOperation( "Rename", condition=( lambda item: not item.deleted ), async_compatible=False, inbound=True ),
grids.GridOperation( "Delete", condition=( lambda item: not item.deleted ), async_compatible=True ),
grids.GridOperation( "Delete Permanently", condition=( lambda item: not item.purged ), confirm="History contents will be removed from disk, this cannot be undone. Continue?", async_compatible=True ),
grids.GridOperation( "Undelete", condition=( lambda item: item.deleted and not item.purged ), async_compatible=True ),
]
standard_filters = [
grids.GridColumnFilter( "Active", args=dict( deleted=False ) ),
grids.GridColumnFilter( "Deleted", args=dict( deleted=True ) ),
grids.GridColumnFilter( "All", args=dict( deleted='All' ) ),
]
default_filter = dict( name="All", deleted="False", tags="All", sharing="All" )
num_rows_per_page = 50
preserve_state = False
use_async = True
use_paging = True
info_text = "Histories that have been deleted for more than a time period specified by the Galaxy administrator(s) may be permanently deleted."
def get_current_item( self, trans, **kwargs ):
return trans.get_history()
def apply_query_filter( self, trans, query, **kwargs ):
return query.filter_by( user=trans.user, importing=False )
class SharedHistoryListGrid( grids.Grid ):
# Custom column types
class DatasetsByStateColumn( grids.GridColumn ):
def get_value( self, trans, grid, history ):
rval = ''
for state in ( 'ok', 'running', 'queued', 'error' ):
total = sum( 1 for d in history.active_datasets if d.state == state )
if total:
rval += '<div class="count-box state-color-%s">%s</div>' % ( state, total )
return rval
class SharedByColumn( grids.GridColumn ):
def get_value( self, trans, grid, history ):
return history.user.email
# Grid definition
title = "Histories shared with you by others"
model_class = model.History
default_sort_key = "-update_time"
default_filter = {}
columns = [
grids.GridColumn( "Name", key="name", attach_popup=True ), # link=( lambda item: dict( operation="View", id=item.id ) ), attach_popup=True ),
DatasetsByStateColumn( "Datasets", sortable=False ),
grids.GridColumn( "Created", key="create_time", format=time_ago ),
grids.GridColumn( "Last Updated", key="update_time", format=time_ago ),
SharedByColumn( "Shared by", key="user_id" )
]
operations = [
grids.GridOperation( "View", allow_multiple=False, target="_top" ),
grids.GridOperation( "Copy" ),
grids.GridOperation( "Unshare" )
]
standard_filters = []
def build_initial_query( self, trans, **kwargs ):
return trans.sa_session.query( self.model_class ).join( 'users_shared_with' )
def apply_query_filter( self, trans, query, **kwargs ):
return query.filter( model.HistoryUserShareAssociation.user == trans.user )
class HistoryAllPublishedGrid( grids.Grid ):
class NameURLColumn( grids.PublicURLColumn, NameColumn ):
pass
title = "Published Histories"
model_class = model.History
default_sort_key = "update_time"
default_filter = dict( public_url="All", username="All", tags="All" )
use_paging = True
num_rows_per_page = 50
use_async = True
columns = [
NameURLColumn( "Name", key="name", filterable="advanced" ),
grids.OwnerAnnotationColumn( "Annotation", key="annotation", model_annotation_association_class=model.HistoryAnnotationAssociation, filterable="advanced" ),
grids.OwnerColumn( "Owner", key="username", model_class=model.User, filterable="advanced" ),
grids.CommunityRatingColumn( "Community Rating", key="rating" ),
grids.CommunityTagsColumn( "Community Tags", key="tags", model_tag_association_class=model.HistoryTagAssociation, filterable="advanced", grid_name="PublicHistoryListGrid" ),
grids.ReverseSortColumn( "Last Updated", key="update_time", format=time_ago )
]
columns.append(
grids.MulticolFilterColumn(
"Search name, annotation, owner, and tags",
cols_to_filter=[ columns[0], columns[1], columns[2], columns[4] ],
key="free-text-search", visible=False, filterable="standard" )
)
operations = []
def build_initial_query( self, trans, **kwargs ):
# Join so that searching history.user makes sense.
return trans.sa_session.query( self.model_class ).join( model.User.table )
def apply_query_filter( self, trans, query, **kwargs ):
# A public history is published, has a slug, and is not deleted.
return query.filter( self.model_class.published == True ).filter( self.model_class.slug != None ).filter( self.model_class.deleted == False )
class HistoryController( BaseUIController, SharableMixin, UsesAnnotations, UsesItemRatings,
UsesHistoryMixin, UsesHistoryDatasetAssociationMixin, ExportsHistoryMixin,
ImportsHistoryMixin ):
def __init__( self, app ):
super( HistoryController, self ).__init__( app )
self.mgrs = util.bunch.Bunch(
histories=managers.histories.HistoryManager()
)
@web.expose
def index( self, trans ):
return ""
@web.expose
def list_as_xml( self, trans ):
"""XML history list for functional tests"""
trans.response.set_content_type( 'text/xml' )
return trans.fill_template( "/history/list_as_xml.mako" )
# ......................................................................... lists
stored_list_grid = HistoryListGrid()
shared_list_grid = SharedHistoryListGrid()
published_list_grid = HistoryAllPublishedGrid()
@web.expose
def list_published( self, trans, **kwargs ):
if 'async' in kwargs:
kwargs[ 'embedded' ] = True
return self.published_list_grid( trans, **kwargs )
kwargs[ 'embedded' ] = True
grid = self.published_list_grid( trans, **kwargs )
return trans.fill_template( "history/list_published.mako", embedded_grid=grid )
@web.expose
@web.require_login( "work with multiple histories" )
def list( self, trans, **kwargs ):
"""List all available histories"""
current_history = trans.get_history()
status = message = None
if 'operation' in kwargs:
operation = kwargs['operation'].lower()
if operation == "share or publish":
return self.sharing( trans, **kwargs )
if operation == "rename" and kwargs.get('id', None): # Don't call rename if no ids
if 'name' in kwargs:
del kwargs['name'] # Remove ajax name param that rename method uses
return self.rename( trans, **kwargs )
if operation == "view":
history = self.get_history( trans, kwargs.get( 'id', None ) )
if history:
return trans.response.send_redirect( url_for( controller='history',
action='view',
id=kwargs['id'],
show_deleted=history.deleted,
use_panels=False ) )
#return self.view( trans, id=kwargs['id'], show_deleted=history.deleted, use_panels=False )
if operation == 'copy' and kwargs.get( 'id', None ):
return self.copy( trans, id=kwargs.get( 'id', None ) )
history_ids = galaxy.util.listify( kwargs.get( 'id', [] ) )
# Display no message by default
status, message = None, None
refresh_history = False
# Load the histories and ensure they all belong to the current user
histories = []
for history_id in history_ids:
history = self.get_history( trans, history_id )
if history:
# Ensure history is owned by current user
if history.user_id != None and trans.user:
assert trans.user.id == history.user_id, "History does not belong to current user"
histories.append( history )
else:
log.warn( "Invalid history id '%r' passed to list", history_id )
if histories:
if operation == "switch":
status, message = self._list_switch( trans, histories )
# Take action to update UI to reflect history switch. If
# grid is using panels, it is standalone and hence a redirect
# to root is needed; if grid is not using panels, it is nested
# in the main Galaxy UI and refreshing the history frame
# is sufficient.
use_panels = kwargs.get('use_panels', False) == 'True'
if use_panels:
return trans.response.send_redirect( url_for( "/" ) )
else:
trans.template_context['refresh_frames'] = ['history']
elif operation in ( "delete", "delete permanently" ):
if operation == "delete permanently":
status, message = self._list_delete( trans, histories, purge=True )
else:
status, message = self._list_delete( trans, histories )
if current_history in histories:
# Deleted the current history, so a new, empty history was
# created automatically, and we need to refresh the history frame
trans.template_context['refresh_frames'] = ['history']
elif operation == "undelete":
status, message = self._list_undelete( trans, histories )
elif operation == "unshare":
for history in histories:
for husa in trans.sa_session.query( trans.app.model.HistoryUserShareAssociation ) \
.filter_by( history=history ):
trans.sa_session.delete( husa )
elif operation == "enable import via link":
for history in histories:
if not history.importable:
self._make_item_importable( trans.sa_session, history )
elif operation == "disable import via link":
if history_ids:
histories = [ self.get_history( trans, history_id ) for history_id in history_ids ]
for history in histories:
if history.importable:
history.importable = False
trans.sa_session.flush()
# Render the list view
return self.stored_list_grid( trans, status=status, message=message, **kwargs )
def _list_delete( self, trans, histories, purge=False ):
"""Delete histories"""
n_deleted = 0
deleted_current = False
message_parts = []
status = SUCCESS
for history in histories:
if history.users_shared_with:
message_parts.append( "History (%s) has been shared with others, unshare it before deleting it. " % history.name )
status = ERROR
else:
if not history.deleted:
# We'll not eliminate any DefaultHistoryPermissions in case we undelete the history later
history.deleted = True
# If deleting the current history, make a new current.
if history == trans.get_history():
deleted_current = True
trans.log_event( "History (%s) marked as deleted" % history.name )
n_deleted += 1
if purge and trans.app.config.allow_user_dataset_purge:
for hda in history.datasets:
if trans.user:
trans.user.total_disk_usage -= hda.quota_amount( trans.user )
hda.purged = True
trans.sa_session.add( hda )
trans.log_event( "HDA id %s has been purged" % hda.id )
trans.sa_session.flush()
if hda.dataset.user_can_purge:
try:
hda.dataset.full_delete()
trans.log_event( "Dataset id %s has been purged upon the the purge of HDA id %s" % ( hda.dataset.id, hda.id ) )
trans.sa_session.add( hda.dataset )
except:
log.exception( 'Unable to purge dataset (%s) on purge of hda (%s):' % ( hda.dataset.id, hda.id ) )
history.purged = True
self.sa_session.add( history )
self.sa_session.flush()
trans.sa_session.flush()
if n_deleted:
part = "Deleted %d %s" % ( n_deleted, iff( n_deleted != 1, "histories", "history" ) )
if purge and trans.app.config.allow_user_dataset_purge:
part += " and removed %s dataset%s from disk" % ( iff( n_deleted != 1, "their", "its" ), iff( n_deleted != 1, 's', '' ) )
elif purge:
part += " but the datasets were not removed from disk because that feature is not enabled in this Galaxy instance"
message_parts.append( "%s. " % part )
if deleted_current:
#note: this needs to come after commits above or will use an empty history that was deleted above
trans.get_or_create_default_history()
message_parts.append( "Your active history was deleted, a new empty history is now active. " )
status = INFO
return ( status, " ".join( message_parts ) )
def _list_undelete( self, trans, histories ):
"""Undelete histories"""
n_undeleted = 0
n_already_purged = 0
for history in histories:
if history.purged:
n_already_purged += 1
if history.deleted:
history.deleted = False
if not history.default_permissions:
# For backward compatibility - for a while we were deleting all DefaultHistoryPermissions on
# the history when we deleted the history. We are no longer doing this.
# Need to add default DefaultHistoryPermissions in case they were deleted when the history was deleted
default_action = trans.app.security_agent.permitted_actions.DATASET_MANAGE_PERMISSIONS
private_user_role = trans.app.security_agent.get_private_user_role( history.user )
default_permissions = {}
default_permissions[ default_action ] = [ private_user_role ]
trans.app.security_agent.history_set_default_permissions( history, default_permissions )
n_undeleted += 1
trans.log_event( "History (%s) %d marked as undeleted" % ( history.name, history.id ) )
status = SUCCESS
message_parts = []
if n_undeleted:
message_parts.append( "Undeleted %d %s. " % ( n_undeleted, iff( n_undeleted != 1, "histories", "history" ) ) )
if n_already_purged:
message_parts.append( "%d histories have already been purged and cannot be undeleted." % n_already_purged )
status = WARNING
return status, "".join( message_parts )
def _list_switch( self, trans, histories ):
"""Switch to a new different history"""
new_history = histories[0]
galaxy_session = trans.get_galaxy_session()
try:
association = trans.sa_session.query( trans.app.model.GalaxySessionToHistoryAssociation ) \
.filter_by( session_id=galaxy_session.id, history_id=new_history.id ) \
.first()
except:
association = None
new_history.add_galaxy_session( galaxy_session, association=association )
trans.sa_session.add( new_history )
trans.sa_session.flush()
trans.set_history( new_history )
# No message
return None, None
@web.expose
@web.require_login( "work with shared histories" )
def list_shared( self, trans, **kwargs ):
"""List histories shared with current user by others"""
msg = galaxy.util.restore_text( kwargs.get( 'msg', '' ) )
status = message = None
if 'operation' in kwargs:
ids = galaxy.util.listify( kwargs.get( 'id', [] ) )
operation = kwargs['operation'].lower()
if operation == "view":
# Display history.
history = self.get_history( trans, ids[0], False)
return self.display_by_username_and_slug( trans, history.user.username, history.slug )
elif operation == "copy":
if not ids:
message = "Select a history to copy"
return self.shared_list_grid( trans, status='error', message=message, **kwargs )
# When copying shared histories, only copy active datasets
new_kwargs = { 'copy_choice' : 'active' }
return self.copy( trans, ids, **new_kwargs )
elif operation == 'unshare':
if not ids:
message = "Select a history to unshare"
return self.shared_list_grid( trans, status='error', message=message, **kwargs )
# No need to check security, association below won't yield a
# hit if this user isn't having the history shared with her.
histories = [ self.get_history( trans, history_id, check_ownership=False ) for history_id in ids ]
for history in histories:
# Current user is the user with which the histories were shared
association = trans.sa_session.query( trans.app.model.HistoryUserShareAssociation ).filter_by( user=trans.user, history=history ).one()
trans.sa_session.delete( association )
trans.sa_session.flush()
message = "Unshared %d shared histories" % len( ids )
status = 'done'
# Render the list view
return self.shared_list_grid( trans, status=status, message=message, **kwargs )
# ......................................................................... html
@web.expose
def citations( self, trans ):
# Get history
history = trans.history
history_id = trans.security.encode_id( history.id )
return trans.fill_template( "history/citations.mako", history=history, history_id=history_id )
@web.expose
def display_structured( self, trans, id=None ):
"""
Display a history as a nested structure showing the jobs and workflow
invocations that created each dataset (if any).
"""
# Get history
if id is None:
id = trans.history.id
else:
id = trans.security.decode_id( id )
# Expunge history from the session to allow us to force a reload
# with a bunch of eager loaded joins
trans.sa_session.expunge( trans.history )
history = trans.sa_session.query( model.History ).options(
eagerload_all( 'active_datasets.creating_job_associations.job.workflow_invocation_step.workflow_invocation.workflow' ),
eagerload_all( 'active_datasets.children' )
).get( id )
assert history
#TODO: formalize to trans.show_error
assert ( history.user and ( history.user.id == trans.user.id )
or ( history.id == trans.history.id )
or ( trans.user_is_admin() ) )
# Resolve jobs and workflow invocations for the datasets in the history
# items is filled with items (hdas, jobs, or workflows) that go at the
# top level
items = []
# First go through and group hdas by job, if there is no job they get
# added directly to items
jobs = odict()
for hda in history.active_datasets:
if hda.visible == False:
continue
# Follow "copied from ..." association until we get to the original
# instance of the dataset
original_hda = hda
## while original_hda.copied_from_history_dataset_association:
## original_hda = original_hda.copied_from_history_dataset_association
# Check if the job has a creating job, most should, datasets from
# before jobs were tracked, or from the upload tool before it
# created a job, may not
if not original_hda.creating_job_associations:
items.append( ( hda, None ) )
# Attach hda to correct job
# -- there should only be one creating_job_association, so this
# loop body should only be hit once
for assoc in original_hda.creating_job_associations:
job = assoc.job
if job in jobs:
jobs[ job ].append( ( hda, None ) )
else:
jobs[ job ] = [ ( hda, None ) ]
# Second, go through the jobs and connect to workflows
wf_invocations = odict()
for job, hdas in jobs.iteritems():
# Job is attached to a workflow step, follow it to the
# workflow_invocation and group
if job.workflow_invocation_step:
wf_invocation = job.workflow_invocation_step.workflow_invocation
if wf_invocation in wf_invocations:
wf_invocations[ wf_invocation ].append( ( job, hdas ) )
else:
wf_invocations[ wf_invocation ] = [ ( job, hdas ) ]
# Not attached to a workflow, add to items
else:
items.append( ( job, hdas ) )
# Finally, add workflow invocations to items, which should now
# contain all hdas with some level of grouping
items.extend( wf_invocations.items() )
# Sort items by age
items.sort( key=( lambda x: x[0].create_time ), reverse=True )
#
return trans.fill_template( "history/display_structured.mako", items=items, history=history )
@web.expose
def view( self, trans, id=None, show_deleted=False, show_hidden=False, use_panels=True ):
"""
View a history. If a history is importable, then it is viewable by any user.
"""
# Get history to view.
if not id:
return trans.show_error_message( "You must specify a history you want to view." )
show_deleted = galaxy.util.string_as_bool( show_deleted )
show_hidden = galaxy.util.string_as_bool( show_hidden )
use_panels = galaxy.util.string_as_bool( use_panels )
history_dictionary = {}
hda_dictionaries = []
user_is_owner = False
try:
history_to_view = self.get_history( trans, id, check_ownership=False, check_accessible=False )
if not history_to_view:
return trans.show_error_message( "The specified history does not exist." )
if history_to_view.user == trans.user:
user_is_owner = True
if( ( history_to_view.user != trans.user )
# Admin users can view any history
and ( not trans.user_is_admin() )
and ( not history_to_view.importable )
and ( trans.user not in history_to_view.users_shared_with_dot_users ) ):
return trans.show_error_message( "Either you are not allowed to view this history"
+ " or the owner of this history has not made it accessible." )
# include all datasets: hidden, deleted, and purged
history_data = self.mgrs.histories._get_history_data( trans, history_to_view )
history_dictionary = history_data[ 'history' ]
hda_dictionaries = history_data[ 'contents' ]
except Exception, exc:
user_id = str( trans.user.id ) if trans.user else '(anonymous)'
log.exception( 'Error bootstrapping history for user %s: %s', user_id, str( exc ) )
history_dictionary[ 'error' ] = ( 'An error occurred getting the history data from the server. '
+ 'Please contact a Galaxy administrator if the problem persists.' )
return trans.fill_template_mako( "history/view.mako",
history=history_dictionary, hdas=hda_dictionaries, user_is_owner=user_is_owner,
show_deleted=show_deleted, show_hidden=show_hidden, use_panels=use_panels )
@web.expose
def display_by_username_and_slug( self, trans, username, slug ):
"""
Display history based on a username and slug.
"""
# Get history.
session = trans.sa_session
user = session.query( model.User ).filter_by( username=username ).first()
history = trans.sa_session.query( model.History ).filter_by( user=user, slug=slug, deleted=False ).first()
if history is None:
raise web.httpexceptions.HTTPNotFound()
# Security check raises error if user cannot access history.
self.security_check( trans, history, False, True)
# Get rating data.
user_item_rating = 0
if trans.get_user():
user_item_rating = self.get_user_item_rating( trans.sa_session, trans.get_user(), history )
if user_item_rating:
user_item_rating = user_item_rating.rating
else:
user_item_rating = 0
ave_item_rating, num_ratings = self.get_ave_item_rating_data( trans.sa_session, history )
# create ownership flag for template, dictify models
user_is_owner = trans.user == history.user
history_data = self.mgrs.histories._get_history_data( trans, history )
history_dict = history_data[ 'history' ]
hda_dicts = history_data[ 'contents' ]
history_dict[ 'annotation' ] = self.get_item_annotation_str( trans.sa_session, history.user, history )
# note: adding original annotation since this is published - get_dict returns user-based annos
#for hda_dict in hda_dicts:
# hda_dict[ 'annotation' ] = hda.annotation
# dataset.annotation = self.get_item_annotation_str( trans.sa_session, history.user, dataset )
return trans.stream_template_mako( "history/display.mako", item=history, item_data=[],
user_is_owner=user_is_owner, history_dict=history_dict, hda_dicts=hda_dicts,
user_item_rating = user_item_rating, ave_item_rating=ave_item_rating, num_ratings=num_ratings )
# ......................................................................... sharing & publishing
@web.expose
@web.require_login( "share Galaxy histories" )
def sharing( self, trans, id=None, histories=[], **kwargs ):
""" Handle history sharing. """
# Get session and histories.
session = trans.sa_session
# Id values take precedence over histories passed in; last resort is current history.
if id:
ids = galaxy.util.listify( id )
if ids:
histories = [ self.get_history( trans, history_id ) for history_id in ids ]
elif not histories:
histories = [ trans.history ]
# Do operation on histories.
for history in histories:
if 'make_accessible_via_link' in kwargs:
self._make_item_accessible( trans.sa_session, history )
elif 'make_accessible_and_publish' in kwargs:
self._make_item_accessible( trans.sa_session, history )
history.published = True
elif 'publish' in kwargs:
if history.importable:
history.published = True
else:
# TODO: report error here.
pass
elif 'disable_link_access' in kwargs:
history.importable = False
elif 'unpublish' in kwargs:
history.published = False
elif 'disable_link_access_and_unpublish' in kwargs:
history.importable = history.published = False
elif 'unshare_user' in kwargs:
user = trans.sa_session.query( trans.app.model.User ).get( trans.security.decode_id( kwargs[ 'unshare_user' ] ) )
# Look for and delete sharing relation for history-user.
deleted_sharing_relation = False
husas = trans.sa_session.query( trans.app.model.HistoryUserShareAssociation ).filter_by( user=user, history=history ).all()
if husas:
deleted_sharing_relation = True
for husa in husas:
trans.sa_session.delete( husa )
if not deleted_sharing_relation:
message = "History '%s' does not seem to be shared with user '%s'" % ( history.name, user.email )
return trans.fill_template( '/sharing_base.mako', item=history,
message=message, status='error' )
# Legacy issue: histories made accessible before recent updates may not have a slug. Create slug for any histories that need them.
for history in histories:
if history.importable and not history.slug:
self._make_item_accessible( trans.sa_session, history )
session.flush()
return trans.fill_template( "/sharing_base.mako", item=history )
@web.expose
@web.require_login( "share histories with other users" )
def share( self, trans, id=None, email="", **kwd ):
# If a history contains both datasets that can be shared and others that cannot be shared with the desired user,
# then the entire history is shared, and the protected datasets will be visible, but inaccessible ( greyed out )
# in the copyd history
params = Params( kwd )
user = trans.get_user()
# TODO: we have too many error messages floating around in here - we need
# to incorporate the messaging system used by the libraries that will display
# a message on any page.
err_msg = galaxy.util.restore_text( params.get( 'err_msg', '' ) )
if not email:
if not id:
# Default to the current history
id = trans.security.encode_id( trans.history.id )
id = galaxy.util.listify( id )
send_to_err = err_msg
histories = []
for history_id in id:
histories.append( self.get_history( trans, history_id ) )
return trans.fill_template( "/history/share.mako",
histories=histories,
email=email,
send_to_err=send_to_err )
histories, send_to_users, send_to_err = self._get_histories_and_users( trans, user, id, email )
if not send_to_users:
if not send_to_err:
send_to_err += "%s is not a valid Galaxy user. %s" % ( email, err_msg )
return trans.fill_template( "/history/share.mako",
histories=histories,
email=email,
send_to_err=send_to_err )
if params.get( 'share_button', False ):
# The user has not yet made a choice about how to share, so dictionaries will be built for display
can_change, cannot_change, no_change_needed, unique_no_change_needed, send_to_err = \
self._populate_restricted( trans, user, histories, send_to_users, None, send_to_err, unique=True )
send_to_err += err_msg
if cannot_change and not no_change_needed and not can_change:
send_to_err = "The histories you are sharing do not contain any datasets that can be accessed by the users with which you are sharing."
return trans.fill_template( "/history/share.mako", histories=histories, email=email, send_to_err=send_to_err )
if can_change or cannot_change:
return trans.fill_template( "/history/share.mako",
histories=histories,
email=email,
send_to_err=send_to_err,
can_change=can_change,
cannot_change=cannot_change,
no_change_needed=unique_no_change_needed )
if no_change_needed:
return self._share_histories( trans, user, send_to_err, histories=no_change_needed )
elif not send_to_err:
# User seems to be sharing an empty history
send_to_err = "You cannot share an empty history. "
return trans.fill_template( "/history/share.mako", histories=histories, email=email, send_to_err=send_to_err )
@web.expose
@web.require_login( "share restricted histories with other users" )
def share_restricted( self, trans, id=None, email="", **kwd ):
if 'action' in kwd:
action = kwd[ 'action' ]
else:
err_msg = "Select an action. "
return trans.response.send_redirect( url_for( controller='history',
action='share',
id=id,
email=email,
err_msg=err_msg,
share_button=True ) )
user = trans.get_user()
user_roles = user.all_roles()
histories, send_to_users, send_to_err = self._get_histories_and_users( trans, user, id, email )
send_to_err = ''
# The user has made a choice, so dictionaries will be built for sharing
can_change, cannot_change, no_change_needed, unique_no_change_needed, send_to_err = \
self._populate_restricted( trans, user, histories, send_to_users, action, send_to_err )
# Now that we've populated the can_change, cannot_change, and no_change_needed dictionaries,
# we'll populate the histories_for_sharing dictionary from each of them.
histories_for_sharing = {}
if no_change_needed:
# Don't need to change anything in cannot_change, so populate as is
histories_for_sharing, send_to_err = \
self._populate( trans, histories_for_sharing, no_change_needed, send_to_err )
if cannot_change:
# Can't change anything in cannot_change, so populate as is
histories_for_sharing, send_to_err = \
self._populate( trans, histories_for_sharing, cannot_change, send_to_err )
# The action here is either 'public' or 'private', so we'll continue to populate the
# histories_for_sharing dictionary from the can_change dictionary.
for send_to_user, history_dict in can_change.items():
for history in history_dict:
# Make sure the current history has not already been shared with the current send_to_user
if trans.sa_session.query( trans.app.model.HistoryUserShareAssociation ) \
.filter( and_( trans.app.model.HistoryUserShareAssociation.table.c.user_id == send_to_user.id,
trans.app.model.HistoryUserShareAssociation.table.c.history_id == history.id ) ) \
.count() > 0:
send_to_err += "History (%s) already shared with user (%s)" % ( history.name, send_to_user.email )
else:
# Only deal with datasets that have not been purged
for hda in history.activatable_datasets:
# If the current dataset is not public, we may need to perform an action on it to
# make it accessible by the other user.
if not trans.app.security_agent.can_access_dataset( send_to_user.all_roles(), hda.dataset ):
# The user with which we are sharing the history does not have access permission on the current dataset
if trans.app.security_agent.can_manage_dataset( user_roles, hda.dataset ) and not hda.dataset.library_associations:
# The current user has authority to change permissions on the current dataset because
# they have permission to manage permissions on the dataset and the dataset is not associated
# with a library.
if action == "private":
trans.app.security_agent.privately_share_dataset( hda.dataset, users=[ user, send_to_user ] )
elif action == "public":
trans.app.security_agent.make_dataset_public( hda.dataset )
# Populate histories_for_sharing with the history after performing any requested actions on
# its datasets to make them accessible by the other user.
if send_to_user not in histories_for_sharing:
histories_for_sharing[ send_to_user ] = [ history ]
elif history not in histories_for_sharing[ send_to_user ]:
histories_for_sharing[ send_to_user ].append( history )
return self._share_histories( trans, user, send_to_err, histories=histories_for_sharing )
def _get_histories_and_users( self, trans, user, id, email ):
if not id:
# Default to the current history
id = trans.security.encode_id( trans.history.id )
id = galaxy.util.listify( id )
send_to_err = ""
histories = []
for history_id in id:
histories.append( self.get_history( trans, history_id ) )
send_to_users = []
for email_address in galaxy.util.listify( email ):
email_address = email_address.strip()
if email_address:
if email_address == user.email:
send_to_err += "You cannot send histories to yourself. "
else:
send_to_user = trans.sa_session.query( trans.app.model.User ) \
.filter( and_( trans.app.model.User.table.c.email==email_address,
trans.app.model.User.table.c.deleted==False ) ) \
.first()
if send_to_user:
send_to_users.append( send_to_user )
else:
send_to_err += "%s is not a valid Galaxy user. " % email_address
return histories, send_to_users, send_to_err
def _populate( self, trans, histories_for_sharing, other, send_to_err ):
# This method will populate the histories_for_sharing dictionary with the users and
# histories in other, eliminating histories that have already been shared with the
# associated user. No security checking on datasets is performed.
# If not empty, the histories_for_sharing dictionary looks like:
# { userA: [ historyX, historyY ], userB: [ historyY ] }
# other looks like:
# { userA: {historyX : [hda, hda], historyY : [hda]}, userB: {historyY : [hda]} }
for send_to_user, history_dict in other.items():
for history in history_dict:
# Make sure the current history has not already been shared with the current send_to_user
if trans.sa_session.query( trans.app.model.HistoryUserShareAssociation ) \
.filter( and_( trans.app.model.HistoryUserShareAssociation.table.c.user_id == send_to_user.id,
trans.app.model.HistoryUserShareAssociation.table.c.history_id == history.id ) ) \
.count() > 0:
send_to_err += "History (%s) already shared with user (%s)" % ( history.name, send_to_user.email )
else:
# Build the dict that will be used for sharing
if send_to_user not in histories_for_sharing:
histories_for_sharing[ send_to_user ] = [ history ]
elif history not in histories_for_sharing[ send_to_user ]:
histories_for_sharing[ send_to_user ].append( history )
return histories_for_sharing, send_to_err
def _populate_restricted( self, trans, user, histories, send_to_users, action, send_to_err, unique=False ):
# The user may be attempting to share histories whose datasets cannot all be accessed by other users.
# If this is the case, the user sharing the histories can:
# 1) action=='public': choose to make the datasets public if he is permitted to do so
# 2) action=='private': automatically create a new "sharing role" allowing protected
# datasets to be accessed only by the desired users
# This method will populate the can_change, cannot_change and no_change_needed dictionaries, which
# are used for either displaying to the user, letting them make 1 of the choices above, or sharing
# after the user has made a choice. They will be used for display if 'unique' is True, and will look
# like: {historyX : [hda, hda], historyY : [hda] }
# For sharing, they will look like:
# { userA: {historyX : [hda, hda], historyY : [hda]}, userB: {historyY : [hda]} }
can_change = {}
cannot_change = {}
no_change_needed = {}
unique_no_change_needed = {}
user_roles = user.all_roles()
for history in histories:
for send_to_user in send_to_users:
# Make sure the current history has not already been shared with the current send_to_user
if trans.sa_session.query( trans.app.model.HistoryUserShareAssociation ) \
.filter( and_( trans.app.model.HistoryUserShareAssociation.table.c.user_id == send_to_user.id,
trans.app.model.HistoryUserShareAssociation.table.c.history_id == history.id ) ) \
.count() > 0:
send_to_err += "History (%s) already shared with user (%s)" % ( history.name, send_to_user.email )
else:
# Only deal with datasets that have not been purged
for hda in history.activatable_datasets:
if trans.app.security_agent.can_access_dataset( send_to_user.all_roles(), hda.dataset ):
# The no_change_needed dictionary is a special case. If both of can_change
# and cannot_change are empty, no_change_needed will used for sharing. Otherwise
# unique_no_change_needed will be used for displaying, so we need to populate both.
# Build the dictionaries for display, containing unique histories only
if history not in unique_no_change_needed:
unique_no_change_needed[ history ] = [ hda ]
else:
unique_no_change_needed[ history ].append( hda )
# Build the dictionaries for sharing
if send_to_user not in no_change_needed:
no_change_needed[ send_to_user ] = {}
if history not in no_change_needed[ send_to_user ]:
no_change_needed[ send_to_user ][ history ] = [ hda ]
else:
no_change_needed[ send_to_user ][ history ].append( hda )
else:
# The user with which we are sharing the history does not have access permission on the current dataset
if trans.app.security_agent.can_manage_dataset( user_roles, hda.dataset ):
# The current user has authority to change permissions on the current dataset because
# they have permission to manage permissions on the dataset.
# NOTE: ( gvk )There may be problems if the dataset also has an ldda, but I don't think so
# because the user with which we are sharing will not have the "manage permission" permission
# on the dataset in their history. Keep an eye on this though...
if unique:
# Build the dictionaries for display, containing unique histories only
if history not in can_change:
can_change[ history ] = [ hda ]
else:
can_change[ history ].append( hda )
else:
# Build the dictionaries for sharing
if send_to_user not in can_change:
can_change[ send_to_user ] = {}
if history not in can_change[ send_to_user ]:
can_change[ send_to_user ][ history ] = [ hda ]
else:
can_change[ send_to_user ][ history ].append( hda )
else:
if action in [ "private", "public" ]:
# The user has made a choice, so 'unique' doesn't apply. Don't change stuff
# that the user doesn't have permission to change
continue
if unique:
# Build the dictionaries for display, containing unique histories only
if history not in cannot_change:
cannot_change[ history ] = [ hda ]
else:
cannot_change[ history ].append( hda )
else:
# Build the dictionaries for sharing
if send_to_user not in cannot_change:
cannot_change[ send_to_user ] = {}
if history not in cannot_change[ send_to_user ]:
cannot_change[ send_to_user ][ history ] = [ hda ]
else:
cannot_change[ send_to_user ][ history ].append( hda )
return can_change, cannot_change, no_change_needed, unique_no_change_needed, send_to_err
def _share_histories( self, trans, user, send_to_err, histories=None ):
# histories looks like: { userA: [ historyX, historyY ], userB: [ historyY ] }
histories = histories or {}
msg = ""
sent_to_emails = []
for send_to_user in histories.keys():
sent_to_emails.append( send_to_user.email )
emails = ",".join( e for e in sent_to_emails )
if not histories:
send_to_err += "No users have been specified or no histories can be sent without changing permissions or associating a sharing role. "
else:
for send_to_user, send_to_user_histories in histories.items():
shared_histories = []
for history in send_to_user_histories:
share = trans.app.model.HistoryUserShareAssociation()
share.history = history
share.user = send_to_user
trans.sa_session.add( share )
self.create_item_slug( trans.sa_session, history )
trans.sa_session.flush()
if history not in shared_histories:
shared_histories.append( history )
if send_to_err:
msg += send_to_err
return self.sharing( trans, histories=shared_histories, msg=msg )
# ......................................................................... actions/orig. async
@web.expose
def delete_hidden_datasets( self, trans ):
"""
This method deletes all hidden datasets in the current history.
"""
count = 0
for hda in trans.history.datasets:
if not hda.visible and not hda.deleted and not hda.purged:
hda.mark_deleted()
count += 1
trans.sa_session.add( hda )
trans.log_event( "HDA id %s has been deleted" % hda.id )
trans.sa_session.flush()
return trans.show_ok_message( "%d hidden datasets have been deleted" % count, refresh_frames=['history'] )
@web.expose
def purge_deleted_datasets( self, trans ):
count = 0
if trans.app.config.allow_user_dataset_purge:
for hda in trans.history.datasets:
if not hda.deleted or hda.purged:
continue
if trans.user:
trans.user.total_disk_usage -= hda.quota_amount( trans.user )
hda.purged = True
trans.sa_session.add( hda )
trans.log_event( "HDA id %s has been purged" % hda.id )
trans.sa_session.flush()
if hda.dataset.user_can_purge:
try:
hda.dataset.full_delete()
trans.log_event( "Dataset id %s has been purged upon the the purge of HDA id %s" % ( hda.dataset.id, hda.id ) )
trans.sa_session.add( hda.dataset )
except:
log.exception( 'Unable to purge dataset (%s) on purge of hda (%s):' % ( hda.dataset.id, hda.id ) )
count += 1
return trans.show_ok_message( "%d datasets have been deleted permanently" % count, refresh_frames=['history'] )
#TODO: use api instead
@web.expose
def delete_current( self, trans, purge=False ):
"""Delete just the active history -- this does not require a logged in user."""
history = trans.get_history()
if history.users_shared_with:
return trans.show_error_message( "History (%s) has been shared with others, unshare it before deleting it. " % history.name )
if not history.deleted:
history.deleted = True
trans.sa_session.add( history )
trans.sa_session.flush()
trans.log_event( "History id %d marked as deleted" % history.id )
if purge and trans.app.config.allow_user_dataset_purge:
for hda in history.datasets:
if trans.user:
trans.user.total_disk_usage -= hda.quota_amount( trans.user )
hda.purged = True
trans.sa_session.add( hda )
trans.log_event( "HDA id %s has been purged" % hda.id )
trans.sa_session.flush()
if hda.dataset.user_can_purge:
try:
hda.dataset.full_delete()
trans.log_event( "Dataset id %s has been purged upon the the purge of HDA id %s" % ( hda.dataset.id, hda.id ) )
trans.sa_session.add( hda.dataset )
except:
log.exception( 'Unable to purge dataset (%s) on purge of hda (%s):' % ( hda.dataset.id, hda.id ) )
history.purged = True
self.sa_session.add( history )
self.sa_session.flush()
for hda in history.datasets:
# Not all datasets have jobs associated with them (e.g., datasets imported from libraries).
if hda.creating_job_associations:
# HDA has associated job, so try marking it deleted.
job = hda.creating_job_associations[0].job
if job.history_id == history.id and job.state in [ trans.app.model.Job.states.QUEUED, trans.app.model.Job.states.RUNNING, trans.app.model.Job.states.NEW ]:
# No need to check other outputs since the job's parent history is this history
job.mark_deleted( trans.app.config.track_jobs_in_database )
trans.app.job_manager.job_stop_queue.put( job.id )
# Regardless of whether it was previously deleted, get the most recent history or create a new one.
most_recent_history = self.mgrs.histories.most_recent( trans, user=trans.user, deleted=False )
if most_recent_history:
trans.set_history( most_recent_history )
return trans.show_ok_message( "History deleted, your most recent history is now active",
refresh_frames=['history'] )
trans.get_or_create_default_history()
return trans.show_ok_message( "History deleted, a new history is active", refresh_frames=['history'] )
@web.expose
def unhide_datasets( self, trans, current=False, ids=None ):
"""Unhide the datasets in the active history -- this does not require a logged in user."""
if not ids and galaxy.util.string_as_bool( current ):
histories = [ trans.get_history() ]
refresh_frames = ['history']
else:
raise NotImplementedError( "You can currently only unhide all the datasets of the current history." )
for history in histories:
history.unhide_datasets()
trans.sa_session.add( history )
trans.sa_session.flush()
return trans.show_ok_message( "Your datasets have been unhidden.", refresh_frames=refresh_frames )
#TODO: used in index.mako
@web.expose
def resume_paused_jobs( self, trans, current=False, ids=None ):
"""Resume paused jobs the active history -- this does not require a logged in user."""
if not ids and galaxy.util.string_as_bool( current ):
histories = [ trans.get_history() ]
refresh_frames = ['history']
else:
raise NotImplementedError( "You can currently only resume all the datasets of the current history." )
for history in histories:
history.resume_paused_jobs()
trans.sa_session.add( history )
trans.sa_session.flush()
return trans.show_ok_message( "Your jobs have been resumed.", refresh_frames=refresh_frames )
#TODO: used in index.mako
@web.expose
@web.require_login( "rate items" )
@web.json
def rate_async( self, trans, id, rating ):
""" Rate a history asynchronously and return updated community data. """
history = self.get_history( trans, id, check_ownership=False, check_accessible=True )
if not history:
return trans.show_error_message( "The specified history does not exist." )
# Rate history.
history_rating = self.rate_item( trans.sa_session, trans.get_user(), history, rating )
return self.get_ave_item_rating_data( trans.sa_session, history )
#TODO: used in display_base.mako
@web.expose
# TODO: Remove require_login when users are warned that, if they are not
# logged in, this will remove their current history.
@web.require_login( "use Galaxy histories" )
def import_archive( self, trans, **kwargs ):
""" Import a history from a file archive. """
# Set archive source and type.
archive_file = kwargs.get( 'archive_file', None )
archive_url = kwargs.get( 'archive_url', None )
archive_source = None
if archive_file:
archive_source = archive_file
archive_type = 'file'
elif archive_url:
archive_source = archive_url
archive_type = 'url'
# If no source to create archive from, show form to upload archive or specify URL.
if not archive_source:
return trans.show_form(
web.FormBuilder( web.url_for(controller='history', action='import_archive'), "Import a History from an Archive", submit_text="Submit" ) \
.add_input( "text", "Archived History URL", "archive_url", value="", error=None )
# TODO: add support for importing via a file.
#.add_input( "file", "Archived History File", "archive_file", value=None, error=None )
)
self.queue_history_import( trans, archive_type=archive_type, archive_source=archive_source )
return trans.show_message( "Importing history from '%s'. \
This history will be visible when the import is complete" % archive_source )
#TODO: used in this file and index.mako
@web.expose
def export_archive( self, trans, id=None, gzip=True, include_hidden=False, include_deleted=False, preview=False ):
""" Export a history to an archive. """
#
# Get history to export.
#
if id:
history = self.get_history( trans, id, check_ownership=False, check_accessible=True )
else:
# Use current history.
history = trans.history
id = trans.security.encode_id( history.id )
if not history:
return trans.show_error_message( "This history does not exist or you cannot export this history." )
#
# If history has already been exported and it has not changed since export, stream it.
#
jeha = history.latest_export
if jeha and jeha.up_to_date:
if jeha.ready:
if preview:
url = url_for( controller='history', action="export_archive", id=id, qualified=True )
return trans.show_message( "History Ready: '%(n)s'. Use this link to download \
the archive or import it to another Galaxy server: \
<a href='%(u)s'>%(u)s</a>" % ( { 'n' : history.name, 'u' : url } ) )
else:
return self.serve_ready_history_export( trans, jeha )
elif jeha.preparing:
return trans.show_message( "Still exporting history %(n)s; please check back soon. Link: <a href='%(s)s'>%(s)s</a>" \
% ( { 'n' : history.name, 's' : url_for( controller='history', action="export_archive", id=id, qualified=True ) } ) )
self.queue_history_export( trans, history, gzip=gzip, include_hidden=include_hidden, include_deleted=include_deleted )
url = url_for( controller='history', action="export_archive", id=id, qualified=True )
return trans.show_message( "Exporting History '%(n)s'. Use this link to download \
the archive or import it to another Galaxy server: \
<a href='%(u)s'>%(u)s</a>" % ( { 'n' : history.name, 'u' : url } ) )
#TODO: used in this file and index.mako
@web.expose
@web.json
@web.require_login( "get history name and link" )
def get_name_and_link_async( self, trans, id=None ):
""" Returns history's name and link. """
history = self.get_history( trans, id, False )
if self.create_item_slug( trans.sa_session, history ):
trans.sa_session.flush()
return_dict = {
"name" : history.name,
"link" : url_for(controller='history', action="display_by_username_and_slug",
username=history.user.username, slug=history.slug ) }
return return_dict
#TODO: used in page/editor.mako
@web.expose
@web.require_login( "set history's accessible flag" )
def set_accessible_async( self, trans, id=None, accessible=False ):
""" Set history's importable attribute and slug. """
history = self.get_history( trans, id, True )
# Only set if importable value would change; this prevents a change in the update_time unless attribute really changed.
importable = accessible in ['True', 'true', 't', 'T'];
if history and history.importable != importable:
if importable:
self._make_item_accessible( trans.sa_session, history )
else:
history.importable = importable
trans.sa_session.flush()
return
#TODO: used in page/editor.mako
@web.expose
def get_item_content_async( self, trans, id ):
""" Returns item content in HTML format. """
history = self.get_history( trans, id, False, True )
if history is None:
raise web.httpexceptions.HTTPNotFound()
# Get datasets.
datasets = self.get_history_datasets( trans, history )
# Get annotations.
history.annotation = self.get_item_annotation_str( trans.sa_session, history.user, history )
for dataset in datasets:
dataset.annotation = self.get_item_annotation_str( trans.sa_session, history.user, dataset )
return trans.stream_template_mako( "/history/item_content.mako", item = history, item_data = datasets )
#TODO: used in embed_base.mako
@web.expose
def name_autocomplete_data( self, trans, q=None, limit=None, timestamp=None ):
"""Return autocomplete data for history names"""
user = trans.get_user()
if not user:
return
ac_data = ""
for history in ( trans.sa_session.query( model.History )
.filter_by( user=user )
.filter( func.lower( model.History.name ).like(q.lower() + "%") ) ):
ac_data = ac_data + history.name + "\n"
return ac_data
#TODO: used in grid_base.mako
@web.expose
def imp( self, trans, id=None, confirm=False, **kwd ):
"""Import another user's history via a shared URL"""
msg = ""
user = trans.get_user()
user_history = trans.get_history()
# Set referer message
if 'referer' in kwd:
referer = kwd['referer']
else:
referer = trans.request.referer
if referer is not "":
referer_message = "<a href='%s'>return to the previous page</a>" % referer
else:
referer_message = "<a href='%s'>go to Galaxy's start page</a>" % url_for( '/' )
# include all datasets when copying?
all_datasets = util.string_as_bool( kwd.get( 'all_datasets', False ) )
# Do import.
if not id:
return trans.show_error_message( "You must specify a history you want to import.<br>You can %s." % referer_message, use_panels=True )
import_history = self.get_history( trans, id, check_ownership=False, check_accessible=False )
if not import_history:
return trans.show_error_message( "The specified history does not exist.<br>You can %s." % referer_message, use_panels=True )
# History is importable if user is admin or it's accessible. TODO: probably want to have app setting to enable admin access to histories.
if not trans.user_is_admin() and not self.security_check( trans, import_history, check_ownership=False, check_accessible=True ):
return trans.show_error_message( "You cannot access this history.<br>You can %s." % referer_message, use_panels=True )
if user:
#dan: I can import my own history.
#if import_history.user_id == user.id:
# return trans.show_error_message( "You cannot import your own history.<br>You can %s." % referer_message, use_panels=True )
new_history = import_history.copy( target_user=user, all_datasets=all_datasets )
new_history.name = "imported: " + new_history.name
new_history.user_id = user.id
galaxy_session = trans.get_galaxy_session()
try:
association = trans.sa_session.query( trans.app.model.GalaxySessionToHistoryAssociation ) \
.filter_by( session_id=galaxy_session.id, history_id=new_history.id ) \
.first()
except:
association = None
new_history.add_galaxy_session( galaxy_session, association=association )
trans.sa_session.add( new_history )
trans.sa_session.flush()
# Set imported history to be user's current history.
trans.set_history( new_history )
return trans.show_ok_message(
message="""History "%s" has been imported. <br>You can <a href="%s" onclick="parent.window.location='%s';">start using this history</a> or %s."""
% ( new_history.name, web.url_for( '/' ), web.url_for( '/' ), referer_message ), use_panels=True )
elif not user_history or not user_history.datasets or confirm:
#TODO:?? should anon-users be allowed to include deleted datasets when importing?
#new_history = import_history.copy( activatable=include_deleted )
new_history = import_history.copy()
new_history.name = "imported: " + new_history.name
new_history.user_id = None
galaxy_session = trans.get_galaxy_session()
try:
association = trans.sa_session.query( trans.app.model.GalaxySessionToHistoryAssociation ) \
.filter_by( session_id=galaxy_session.id, history_id=new_history.id ) \
.first()
except:
association = None
new_history.add_galaxy_session( galaxy_session, association=association )
trans.sa_session.add( new_history )
trans.sa_session.flush()
trans.set_history( new_history )
return trans.show_ok_message(
message="""History "%s" has been imported. <br>You can <a href="%s">start using this history</a> or %s."""
% ( new_history.name, web.url_for( '/' ), referer_message ), use_panels=True )
return trans.show_warn_message( """
Warning! If you import this history, you will lose your current
history. <br>You can <a href="%s">continue and import this history</a> or %s.
""" % ( web.url_for(controller='history', action='imp', id=id, confirm=True, referer=trans.request.referer ), referer_message ), use_panels=True )
#TODO: used in history/view, display, embed
@web.expose
@web.require_login( "rename histories" )
def rename( self, trans, id=None, name=None, **kwd ):
user = trans.get_user()
if not id:
# Default to the current history
history = trans.get_history()
if not history.user:
return trans.show_error_message( "You must save your history before renaming it." )
id = trans.security.encode_id( history.id )
id = galaxy.util.listify( id )
name = galaxy.util.listify( name )
histories = []
cur_names = []
for history_id in id:
history = self.get_history( trans, history_id )
if history and history.user_id == user.id:
histories.append( history )
cur_names.append( history.get_display_name() )
if not name or len( histories ) != len( name ):
return trans.fill_template( "/history/rename.mako", histories=histories )
change_msg = ""
for i in range(len(histories)):
if histories[i].user_id == user.id:
if name[i] == histories[i].get_display_name():
change_msg = change_msg + "<p>History: "+cur_names[i]+" is already named: "+name[i]+"</p>"
elif name[i] not in [None,'',' ']:
name[i] = escape(name[i])
histories[i].name = sanitize_html( name[i] )
trans.sa_session.add( histories[i] )
trans.sa_session.flush()
change_msg = change_msg + "<p>History: "+cur_names[i]+" renamed to: "+name[i]+"</p>"
trans.log_event( "History renamed: id: %s, renamed to: '%s'" % (str(histories[i].id), name[i] ) )
else:
change_msg = change_msg + "<p>You must specify a valid name for History: "+cur_names[i]+"</p>"
else:
change_msg = change_msg + "<p>History: "+cur_names[i]+" does not appear to belong to you.</p>"
return trans.show_message( "<p>%s" % change_msg, refresh_frames=['history'] )
@web.expose
@web.require_login( "copy shared Galaxy history" )
def copy( self, trans, id=None, **kwd ):
"""Copy one or more histories"""
params = Params( kwd )
# If copy_choice was not specified, display form passing along id
# argument
copy_choice = params.get( 'copy_choice', None )
if not copy_choice:
return trans.fill_template( "/history/copy.mako", id_argument=id )
# Extract histories for id argument, defaulting to current
if id is None:
histories = [ trans.history ]
else:
ids = galaxy.util.listify( id )
histories = []
for history_id in ids:
history = self.get_history( trans, history_id, check_ownership=False )
histories.append( history )
user = trans.get_user()
for history in histories:
if history.user == user:
owner = True
else:
if trans.sa_session.query( trans.app.model.HistoryUserShareAssociation ) \
.filter_by( user=user, history=history ) \
.count() == 0:
return trans.show_error_message( "The history you are attempting to copy is not owned by you or shared with you. " )
owner = False
name = "Copy of '%s'" % history.name
if not owner:
name += " shared by '%s'" % history.user.email
if copy_choice == 'activatable':
new_history = history.copy( name=name, target_user=user, activatable=True )
elif copy_choice == 'active':
name += " (active items only)"
new_history = history.copy( name=name, target_user=user )
if len( histories ) == 1:
switch_url = url_for( controller="history", action="switch_to_history", hist_id=trans.security.encode_id( new_history.id ) )
msg = 'New history "<a href="%s" target="_top">%s</a>" has been created.' % ( switch_url, new_history.name )
else:
msg = 'Copied and created %d new histories.' % len( histories )
return trans.show_ok_message( msg )
# ------------------------------------------------------------------------- current history
@web.expose
@web.require_login( "switch to a history" )
def switch_to_history( self, trans, hist_id=None ):
"""
"""
self.set_as_current( trans, id=hist_id )
return trans.response.send_redirect( url_for( "/" ) )
def get_item( self, trans, id ):
return self.get_history( trans, id )
#TODO: override of base ui controller?
def history_data( self, trans, history ):
"""
"""
#TODO: to manager
history_data = self.get_history_dict( trans, history )
encoded_history_id = trans.security.encode_id( history.id )
history_data[ 'contents_url' ] = url_for( 'history_contents', history_id=encoded_history_id )
return history_data
#TODO: combine these next two - poss. with a redirect flag
@web.require_login( "switch to a history" )
@web.json
def set_as_current( self, trans, id=None ):
"""
"""
history = self.get_history( trans, id )
trans.set_history( history )
return self.history_data( trans, history )
@web.json
def current_history_json( self, trans ):
"""
"""
history = trans.get_history( create=True )
return self.history_data( trans, history )
@web.json
def create_new_current( self, trans, name=None ):
"""
"""
return self.history_data( trans, trans.new_history( name ) )
#TODO: /history/current to do all of the above: if ajax, return json; if post, read id and set to current
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/webapps/galaxy/controllers/history.py
|
Python
|
gpl-3.0
| 78,352
|
[
"Galaxy"
] |
882cd407fc209f6335b8a6126e91919c079da02eff9f24ce48d74f56f58397c7
|
#!/usr/bin/env python
"""
Scalars
"""
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.3 $
$Date: 2001-05-31 17:48:54 $
Pearu Peterson
"""
__version__ = "$Id: Scalars.py,v 1.3 2001-05-31 17:48:54 pearu Exp $"
from . import DataSetAttr
from . import common
class Scalars(DataSetAttr.DataSetAttr):
"""Holds VTK scalars.
Usage:
Scalars(<sequence> ,name = <string>, lookup_table = 'default')
Attributes:
scalars
name
lookup_table
Public methods:
get_size()
to_string(format = 'ascii')
"""
def __init__(self,scalars,name=None,lookup_table=None):
self.name = self._get_name(name)
self.lookup_table = self._get_lookup_table(lookup_table)
self.scalars = self.get_seq(scalars,[])
def to_string(self,format='ascii'):
t = self.get_datatype(self.scalars)
ret = ['SCALARS %s %s %s'%(self.name,t,1),
'LOOKUP_TABLE %s'%(self.lookup_table),
self.seq_to_string(self.scalars,format,t)]
return '\n'.join(ret)
def get_size(self):
return len(self.scalars)
def scalars_fromfile(f,n,sl):
dataname = sl[0]
datatype = sl[1].lower()
assert datatype in ['bit','unsigned_char','char','unsigned_short','short','unsigned_int','int','unsigned_long','long','float','double'],repr(datatype)
if len(sl)>2:
numcomp = eval(sl[2])
else:
numcomp = 1
l = common._getline(f)
l = l.split(' ')
assert len(l)==2 and l[0].lower() == 'lookup_table'
tablename = l[1]
scalars = []
while len(scalars) < n:
scalars += list(map(eval,common._getline(f).split(' ')))
assert len(scalars)==n
return Scalars(scalars,dataname,tablename)
if __name__ == "__main__":
print(Scalars([3,4,240]).to_string('binary'))
|
ddempsey/PyFEHM
|
pyvtk/Scalars.py
|
Python
|
lgpl-2.1
| 2,099
|
[
"VTK"
] |
ba8dd8c79952f192c2cc232637448a34f1a5392c7163aa342f178f3bcd9f208e
|
# Copyright (C) 2016
# Jakub Krajniak (jkrajniak at gmail.com)
#
# This file is part of ChemLab
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import unittest
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
print sys.path[0]
from chemlab import reaction_parser
class TestTopologyReader(unittest.TestCase):
def test_parse_exchange_reaction(self):
input_string = 'C(0,1):E(0,1) + W(0,1) -> A(1):Z(1) + E(1)'
reactants, r_type = reaction_parser.parse_exchange_equation(input_string)
self.assertEqual(r_type, reaction_parser.REACTION_EXCHANGE)
# Check reactants structure
self.assertEqual(reactants['type_1']['name'], 'C')
self.assertEqual(reactants['type_1']['new_type'], 'A')
self.assertEqual(reactants['type_1']['min'], '0')
self.assertEqual(reactants['type_1']['max'], '1')
self.assertEqual(reactants['type_1']['delta'], '1')
self.assertEqual(reactants['type_2']['name'], 'E')
self.assertEqual(reactants['type_2']['new_type'], 'E')
self.assertEqual(reactants['type_2']['min'], '0')
self.assertEqual(reactants['type_2']['max'], '1')
self.assertEqual(reactants['type_2']['delta'], '1')
self.assertEqual(reactants['type_3']['name'], 'W')
self.assertEqual(reactants['type_3']['new_type'], 'Z')
self.assertEqual(reactants['type_3']['min'], '0')
self.assertEqual(reactants['type_3']['max'], '1')
self.assertEqual(reactants['type_3']['delta'], '1')
if __name__ == '__main__':
unittest.main()
|
cgchemlab/chemlab
|
src/tests/test_reaction_parser.py
|
Python
|
gpl-3.0
| 2,211
|
[
"ESPResSo"
] |
e2fdcb0685e12e5e8e3d705097aa7fbba350d4c55f654b3aa07eb28af08935b6
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007 Brian G. Matherly
# Copyright (C) 2010 Nick Hall
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Proxy class for the Gramps databases. Filter out all data marked private.
"""
#-------------------------------------------------------------------------
#
# Python libraries
#
#-------------------------------------------------------------------------
from ..const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
import logging
LOG = logging.getLogger(".citation")
#-------------------------------------------------------------------------
#
# Gramps libraries
#
#-------------------------------------------------------------------------
from ..lib import (MediaRef, Attribute, Address, EventRef,
Person, Name, Source, RepoRef, Media, Place, Event,
Family, ChildRef, Repository, LdsOrd, Surname, Citation,
SrcAttribute, Note, Tag)
from .proxybase import ProxyDbBase
class PrivateProxyDb(ProxyDbBase):
"""
A proxy to a Gramps database. This proxy will act like a Gramps database,
but all data marked private will be hidden from the user.
"""
def __init__(self, db):
"""
Create a new PrivateProxyDb instance.
"""
ProxyDbBase.__init__(self, db)
def get_person_from_handle(self, handle):
"""
Finds a Person in the database from the passed Gramps ID.
If no such Person exists, None is returned.
"""
person = self.db.get_person_from_handle(handle)
if person and not person.get_privacy():
return sanitize_person(self.db, person)
return None
def get_source_from_handle(self, handle):
"""
Finds a Source in the database from the passed Gramps ID.
If no such Source exists, None is returned.
"""
source = self.db.get_source_from_handle(handle)
if source and not source.get_privacy():
return sanitize_source(self.db, source)
return None
def get_citation_from_handle(self, handle):
"""
Finds a Citation in the database from the passed Gramps ID.
If no such Citation exists, None is returned.
"""
citation = self.db.get_citation_from_handle(handle)
if citation and not citation.get_privacy():
return sanitize_citation(self.db, citation)
return None
def get_media_from_handle(self, handle):
"""
Finds an Object in the database from the passed Gramps ID.
If no such Object exists, None is returned.
"""
media = self.db.get_media_from_handle(handle)
if media and not media.get_privacy():
return sanitize_media(self.db, media)
return None
def get_place_from_handle(self, handle):
"""
Finds a Place in the database from the passed Gramps ID.
If no such Place exists, None is returned.
"""
place = self.db.get_place_from_handle(handle)
if place and not place.get_privacy():
return sanitize_place(self.db, place)
return None
def get_event_from_handle(self, handle):
"""
Finds a Event in the database from the passed Gramps ID.
If no such Event exists, None is returned.
"""
event = self.db.get_event_from_handle(handle)
if event and not event.get_privacy():
return sanitize_event(self.db, event)
return None
def get_family_from_handle(self, handle):
"""
Finds a Family in the database from the passed Gramps ID.
If no such Family exists, None is returned.
"""
family = self.db.get_family_from_handle(handle)
if family and not family.get_privacy():
return sanitize_family(self.db, family)
return None
def get_repository_from_handle(self, handle):
"""
Finds a Repository in the database from the passed Gramps ID.
If no such Repository exists, None is returned.
"""
repository = self.db.get_repository_from_handle(handle)
if repository and not repository.get_privacy():
return sanitize_repository(self.db, repository)
return None
def get_note_from_handle(self, handle):
"""
Finds a Note in the database from the passed Gramps ID.
If no such Note exists, None is returned.
"""
note = self.db.get_note_from_handle(handle)
if note and not note.get_privacy():
return note
return None
def get_person_from_gramps_id(self, val):
"""
Finds a Person in the database from the passed Gramps ID.
If no such Person exists, None is returned.
"""
person = self.db.get_person_from_gramps_id(val)
if person and not person.get_privacy():
return sanitize_person(self.db, person)
return None
def get_family_from_gramps_id(self, val):
"""
Finds a Family in the database from the passed Gramps ID.
If no such Family exists, None is returned.
"""
family = self.db.get_family_from_gramps_id(val)
if family and not family.get_privacy():
return sanitize_family(self.db, family)
return None
def get_event_from_gramps_id(self, val):
"""
Finds an Event in the database from the passed Gramps ID.
If no such Event exists, None is returned.
"""
event = self.db.get_event_from_gramps_id(val)
if event and not event.get_privacy():
return sanitize_event(self.db, event)
return None
def get_place_from_gramps_id(self, val):
"""
Finds a Place in the database from the passed Gramps ID.
If no such Place exists, None is returned.
"""
place = self.db.get_place_from_gramps_id(val)
if place and not place.get_privacy():
return sanitize_place(self.db, place)
return None
def get_source_from_gramps_id(self, val):
"""
Finds a Source in the database from the passed Gramps ID.
If no such Source exists, None is returned.
"""
source = self.db.get_source_from_gramps_id(val)
if source and not source.get_privacy():
return sanitize_source(self.db, source)
return None
def get_citation_from_gramps_id(self, val):
"""
Finds a Citation in the database from the passed Gramps ID.
If no such Citation exists, None is returned.
"""
citation = self.db.get_citation_from_gramps_id(val)
if citation and not citation.get_privacy():
return sanitize_citation(self.db, citation)
return None
def get_media_from_gramps_id(self, val):
"""
Finds a Media in the database from the passed Gramps ID.
If no such Media exists, None is returned.
"""
obj = self.db.get_media_from_gramps_id(val)
if obj and not obj.get_privacy():
return sanitize_media(self.db, obj)
return None
def get_repository_from_gramps_id(self, val):
"""
Finds a Repository in the database from the passed Gramps ID.
If no such Repository exists, None is returned.
"""
repository = self.db.get_repository_from_gramps_id(val)
if repository and not repository.get_privacy():
return sanitize_repository(self.db, repository)
return None
def get_note_from_gramps_id(self, val):
"""
Finds a Note in the database from the passed Gramps ID.
If no such Note exists, None is returned.
"""
note = self.db.get_note_from_gramps_id(val)
if note and not note.get_privacy():
return note
return None
# Define predicate functions for use by default iterator methods
def include_person(self, handle):
"""
Predicate returning True if object is to be included, else False
"""
obj = self.get_unfiltered_person(handle)
return obj and not obj.get_privacy()
def include_family(self, handle):
"""
Predicate returning True if object is to be included, else False
"""
obj = self.get_unfiltered_family(handle)
return obj and not obj.get_privacy()
def include_event(self, handle):
"""
Predicate returning True if object is to be included, else False
"""
obj = self.get_unfiltered_event(handle)
return obj and not obj.get_privacy()
def include_source(self, handle):
"""
Predicate returning True if object is to be included, else False
"""
obj = self.get_unfiltered_source(handle)
return obj and not obj.get_privacy()
def include_citation(self, handle):
"""
Predicate returning True if object is to be included, else False
"""
obj = self.get_unfiltered_citation(handle)
return obj and not obj.get_privacy()
def include_place(self, handle):
"""
Predicate returning True if object is to be included, else False
"""
obj = self.get_unfiltered_place(handle)
return obj and not obj.get_privacy()
def include_media(self, handle):
"""
Predicate returning True if object is to be included, else False
"""
obj = self.get_unfiltered_media(handle)
return obj and not obj.get_privacy()
def include_repository(self, handle):
"""
Predicate returning True if object is to be included, else False
"""
obj = self.get_unfiltered_repository(handle)
return obj and not obj.get_privacy()
def include_note(self, handle):
"""
Predicate returning True if object is to be included, else False
"""
obj = self.get_unfiltered_note(handle)
return obj and not obj.get_privacy()
def get_default_person(self):
"""returns the default Person of the database"""
person = self.db.get_default_person()
if person and not person.get_privacy():
return sanitize_person(self.db, person)
return None
def get_default_handle(self):
"""returns the default Person of the database"""
handle = self.db.get_default_handle()
if handle:
person = self.db.get_person_from_handle(handle)
if person and not person.get_privacy():
return handle
return None
def has_person_handle(self, handle):
"""
returns True if the handle exists in the current Person database.
"""
person = self.db.get_person_from_handle(handle)
if person and not person.get_privacy():
return True
return False
def has_event_handle(self, handle):
"""
returns True if the handle exists in the current Event database.
"""
event = self.db.get_event_from_handle(handle)
if event and not event.get_privacy():
return True
return False
def has_source_handle(self, handle):
"""
returns True if the handle exists in the current Source database.
"""
source = self.db.get_source_from_handle(handle)
if source and not source.get_privacy():
return True
return False
def has_citation_handle(self, handle):
"""
returns True if the handle exists in the current Citation database.
"""
citation = self.db.get_citation_from_handle(handle)
if citation and not citation.get_privacy():
return True
return False
def has_place_handle(self, handle):
"""
returns True if the handle exists in the current Place database.
"""
place = self.db.get_place_from_handle(handle)
if place and not place.get_privacy():
return True
return False
def has_family_handle(self, handle):
"""
Return True if the handle exists in the current Family database.
"""
family = self.db.get_family_from_handle(handle)
if family and not family.get_privacy():
return True
return False
def has_object_handle(self, handle):
"""
Return True if the handle exists in the current Mediadatabase.
"""
object = self.db.get_media_from_handle(handle)
if object and not object.get_privacy():
return True
return False
def has_repository_handle(self, handle):
"""
Return True if the handle exists in the current Repository database.
"""
repository = self.db.get_repository_from_handle(handle)
if repository and not repository.get_privacy():
return True
return False
def has_note_handle(self, handle):
"""
Return True if the handle exists in the current Note database.
"""
note = self.db.get_note_from_handle(handle)
if note and not note.get_privacy():
return True
return False
def find_backlink_handles(self, handle, include_classes=None):
"""
Find all objects that hold a reference to the object handle.
Returns an iterator over a list of (class_name, handle) tuples.
:param handle: handle of the object to search for.
:type handle: database handle
:param include_classes: list of class names to include in the results.
Default: None means include all classes.
:type include_classes: list of class names
This default implementation does a sequential scan through all
the primary object databases and is very slow. Backends can
override this method to provide much faster implementations that
make use of additional capabilities of the backend.
Note that this is a generator function, it returns a iterator for
use in loops. If you want a list of the results use::
> result_list = list(find_backlink_handles(handle))
"""
# This isn't done yet because it doesn't check if references are
# private (like a MediaRef). It only checks if the
# referenced object is private.
objects = {
'Person' : self.db.get_person_from_handle,
'Family' : self.db.get_family_from_handle,
'Event' : self.db.get_event_from_handle,
'Source' : self.db.get_source_from_handle,
'Citation' : self.db.get_citation_from_handle,
'Place' : self.db.get_place_from_handle,
'Media' : self.db.get_media_from_handle,
'Note' : self.db.get_note_from_handle,
'Repository' : self.db.get_repository_from_handle,
}
handle_itr = self.db.find_backlink_handles(handle, include_classes)
for (class_name, handle) in handle_itr:
if class_name in objects:
obj = objects[class_name](handle)
if obj and not obj.get_privacy():
yield (class_name, handle)
else:
raise NotImplementedError
return
def copy_media_ref_list(db, original_obj, clean_obj):
"""
Copies media references from one object to another - excluding private
references and references to private objects.
:param db: Gramps database to which the references belongs
:type db: DbBase
:param original_obj: Object that may have private references
:type original_obj: MediaBase
:param clean_obj: Object that will have only non-private references
:type original_obj: MediaBase
:returns: Nothing
"""
for media_ref in original_obj.get_media_list():
if media_ref and not media_ref.get_privacy():
handle = media_ref.get_reference_handle()
media = db.get_media_from_handle(handle)
if media and not media.get_privacy():
clean_obj.add_media_reference(sanitize_media_ref(db, media_ref))
def copy_citation_ref_list(db, original_obj, clean_obj):
"""
Copies citation references from one object to another - excluding references
to private citations, and references to citations that refer to private
sources.
:param db: Gramps database to which the references belongs
:type db: DbBase
:param original_obj: Object that may have private references
:type original_obj: CitationBase
:param clean_obj: Object that will have only non-private references
:type original_obj: CitationBase
:returns: Nothing
"""
for citation_handle in original_obj.get_citation_list():
citation = db.get_citation_from_handle(citation_handle)
if citation and not citation.get_privacy():
handle = citation.get_reference_handle()
source = db.get_source_from_handle(handle)
if source and not source.get_privacy():
clean_obj.add_citation(citation_handle)
def copy_notes(db, original_obj, clean_obj):
"""
Copies notes from one object to another - excluding references to private
notes.
:param db: Gramps database to which the references belongs
:type db: DbBase
:param original_obj: Object that may have private references
:type original_obj: NoteBase
:param clean_obj: Object that will have only non-private references
:type original_obj: NoteBase
:returns: Nothing
"""
for note_handle in original_obj.get_note_list():
note = db.get_note_from_handle(note_handle)
if note and not note.get_privacy():
clean_obj.add_note(note_handle)
def copy_associations(db, original_obj, clean_obj):
"""
Copies associations from one object to another - excluding
references to private notes.
:param db: Gramps database to which the references belongs
:type db: DbBase
:param original_obj: Object that may have private references
:type original_obj: Base
:param clean_obj: Object that will have only non-private references
:type original_obj: Base
:returns: Nothing
"""
new_person_ref_list = []
for person_ref in original_obj.get_person_ref_list():
if person_ref and not person_ref.get_privacy():
associated_person = db.get_person_from_handle(person_ref.ref)
if associated_person and not associated_person.get_privacy():
new_person_ref_list.append(person_ref)
clean_obj.set_person_ref_list(new_person_ref_list)
def copy_attributes(db, original_obj, clean_obj):
"""
Copies attributes from one object to another - excluding references to
private attributes.
:param db: Gramps database to which the references belongs
:type db: DbBase
:param original_obj: Object that may have private references
:type original_obj: AttributeBase
:param clean_obj: Object that will have only non-private references
:type original_obj: AttributeBase
:returns: Nothing
"""
for attribute in original_obj.get_attribute_list():
if attribute and not attribute.get_privacy():
new_attribute = Attribute()
new_attribute.set_type(attribute.get_type())
new_attribute.set_value(attribute.get_value())
copy_notes(db, attribute, new_attribute)
copy_citation_ref_list(db, attribute, new_attribute)
clean_obj.add_attribute(new_attribute)
def copy_srcattributes(db, original_obj, clean_obj):
"""
Copies srcattributes from one object to another - excluding references to
private srcattributes.
:param db: Gramps database to which the references belongs
:type db: DbBase
:param original_obj: Object that may have private references
:type original_obj: SrcAttributeBase
:param clean_obj: Object that will have only non-private references
:type original_obj: SrcAttributeBase
:returns: Nothing
"""
for attribute in original_obj.get_attribute_list():
if attribute and not attribute.get_privacy():
new_attribute = SrcAttribute()
new_attribute.set_type(attribute.get_type())
new_attribute.set_value(attribute.get_value())
clean_obj.add_attribute(new_attribute)
def copy_urls(db, original_obj, clean_obj):
"""
Copies urls from one object to another - excluding references to
private urls.
:param db: Gramps database to which the references belongs
:type db: DbBase
:param original_obj: Object that may have urls
:type original_obj: UrlBase
:param clean_obj: Object that will have only non-private urls
:type original_obj: UrlBase
:returns: Nothing
"""
for url in original_obj.get_url_list():
if url and not url.get_privacy():
clean_obj.add_url(url)
def copy_lds_ords(db, original_obj, clean_obj):
"""
Copies LDS ORDs from one object to another - excluding references to
private LDS ORDs.
:param db: Gramps database to which the references belongs
:type db: DbBase
:param original_obj: Object that may have LDS ORDs
:type original_obj: LdsOrdBase
:param clean_obj: Object that will have only non-private LDS ORDs
:type original_obj: LdsOrdBase
:returns: Nothing
"""
for lds_ord in original_obj.get_lds_ord_list():
if lds_ord and not lds_ord.get_privacy():
clean_obj.add_lds_ord(sanitize_lds_ord(db, lds_ord))
def copy_addresses(db, original_obj, clean_obj):
"""
Copies addresses from one object to another - excluding references to
private addresses.
:param db: Gramps database to which the references belongs
:type db: DbBase
:param original_obj: Object that may have addresses
:type original_obj: AddressBase
:param clean_obj: Object that will have only non-private addresses
:type original_obj: AddressBase
:returns: Nothing
"""
for address in original_obj.get_address_list():
if address and not address.get_privacy():
clean_obj.add_address(sanitize_address(db, address))
def sanitize_lds_ord(db, lds_ord):
"""
Create a new LdsOrd instance based off the passed LdsOrd
instance. The returned instance has all private records
removed from it.
:param db: Gramps database to which the LdsOrd object belongs
:type db: DbBase
:param name: source LdsOrd object that will be copied with
privacy records removed
:type name: LdsOrd
:returns: 'cleansed' LdsOrd object
:rtype: LdsOrd
"""
new_lds_ord = LdsOrd()
new_lds_ord.set_type(lds_ord.get_type())
new_lds_ord.set_status(lds_ord.get_status())
new_lds_ord.set_temple(lds_ord.get_temple())
fam_handle = lds_ord.get_family_handle()
if fam_handle:
fam = db.get_family_from_handle(fam_handle)
if fam and not fam.get_privacy():
new_lds_ord.set_family_handle(fam_handle)
new_lds_ord.set_date_object(lds_ord.get_date_object())
place_handle = lds_ord.get_place_handle()
if place_handle:
place = db.get_place_from_handle(place_handle)
if place and not place.get_privacy():
new_lds_ord.set_place_handle(place_handle)
copy_citation_ref_list(db, lds_ord, new_lds_ord)
copy_notes(db, lds_ord, new_lds_ord)
return new_lds_ord
def sanitize_address(db, address):
"""
Create a new Address instance based off the passed Address
instance. The returned instance has all private records
removed from it.
:param db: Gramps database to which the Person object belongs
:type db: DbBase
:param name: source Address object that will be copied with
privacy records removed
:type name: Address
:returns: 'cleansed' Address object
:rtype: Address
"""
new_address = Address()
new_address.set_street(address.get_street())
new_address.set_locality(address.get_locality())
new_address.set_city(address.get_city())
new_address.set_county(address.get_county())
new_address.set_state(address.get_state())
new_address.set_country(address.get_country())
new_address.set_postal_code(address.get_postal_code())
new_address.set_phone(address.get_phone())
new_address.set_date_object(address.get_date_object())
copy_citation_ref_list(db, address, new_address)
copy_notes(db, address, new_address)
return new_address
def sanitize_name(db, name):
"""
Create a new Name instance based off the passed Name
instance. The returned instance has all private records
removed from it.
:param db: Gramps database to which the Person object belongs
:type db: DbBase
:param name: source Name object that will be copied with
privacy records removed
:type name: Name
:returns: 'cleansed' Name object
:rtype: Name
"""
new_name = Name()
new_name.set_group_as(name.get_group_as())
new_name.set_sort_as(name.get_sort_as())
new_name.set_display_as(name.get_display_as())
new_name.set_call_name(name.get_call_name())
new_name.set_nick_name(name.get_nick_name())
new_name.set_family_nick_name(name.get_family_nick_name())
new_name.set_type(name.get_type())
new_name.set_first_name(name.get_first_name())
new_name.set_suffix(name.get_suffix())
new_name.set_title(name.get_title())
new_name.set_date_object(name.get_date_object())
new_name.set_surname_list(name.get_surname_list())
copy_citation_ref_list(db, name, new_name)
copy_notes(db, name, new_name)
return new_name
def sanitize_media_ref(db, media_ref):
"""
Create a new MediaRef instance based off the passed MediaRef
instance. The returned instance has all private records
removed from it.
:param db: Gramps database to which the MediaRef object belongs
:type db: DbBase
:param source_ref: source MediaRef object that will be copied with
privacy records removed
:type source_ref: MediaRef
:returns: 'cleansed' MediaRef object
:rtype: MediaRef
"""
new_ref = MediaRef()
new_ref.set_rectangle(media_ref.get_rectangle())
new_ref.set_reference_handle(media_ref.get_reference_handle())
copy_notes(db, media_ref, new_ref)
copy_attributes(db, media_ref, new_ref)
copy_citation_ref_list(db, media_ref, new_ref)
return new_ref
def sanitize_citation(db, citation):
"""
Create a new Citation instance based off the passed Citation
instance. The returned instance has all private records
removed from it.
:param db: Gramps database to which the Person object belongs
:type db: DbBase
:param citation: source Citation object that will be copied with
privacy records removed
:type citation: Citation
:returns: 'cleansed' Citation object
:rtype: Citation
"""
new_citation = Citation()
new_citation.set_date_object(citation.get_date_object())
new_citation.set_page(citation.get_page())
new_citation.set_confidence_level(citation.get_confidence_level())
new_citation.set_reference_handle(citation.get_reference_handle())
new_citation.set_gramps_id(citation.get_gramps_id())
new_citation.set_handle(citation.get_handle())
new_citation.set_change_time(citation.get_change_time())
copy_srcattributes(db, citation, new_citation)
copy_notes(db, citation, new_citation)
copy_media_ref_list(db, citation, new_citation)
return new_citation
def sanitize_event_ref(db, event_ref):
"""
Create a new EventRef instance based off the passed EventRef
instance. The returned instance has all private records
removed from it.
:param db: Gramps database to which the Person object belongs
:type db: DbBase
:param event_ref: source EventRef object that will be copied with
privacy records removed
:type event_ref: EventRef
:returns: 'cleansed' EventRef object
:rtype: EventRef
"""
new_ref = EventRef()
new_ref.set_reference_handle(event_ref.get_reference_handle())
new_ref.set_role(event_ref.get_role())
copy_notes(db, event_ref, new_ref)
copy_attributes(db, event_ref, new_ref)
return new_ref
def sanitize_person(db, person):
"""
Create a new Person instance based off the passed Person
instance. The returned instance has all private records
removed from it.
:param db: Gramps database to which the Person object belongs
:type db: DbBase
:param person: source Person object that will be copied with
privacy records removed
:type person: Person
:returns: 'cleansed' Person object
:rtype: Person
"""
new_person = Person()
# copy gender
new_person.set_gender(person.get_gender())
new_person.set_gramps_id(person.get_gramps_id())
new_person.set_handle(person.get_handle())
new_person.set_change_time(person.get_change_time())
new_person.set_tag_list(person.get_tag_list())
# copy names if not private
name = person.get_primary_name()
if (name and name.get_privacy()) or (person and person.get_privacy()):
# Do this so a person always has a primary name of some sort.
name = Name()
surn = Surname()
surn.set_surname(_('Private'))
name.set_surname_list([surn])
name.set_primary_surname()
else:
name = sanitize_name(db, name)
new_person.set_primary_name(name)
# copy Family reference list
for handle in person.get_family_handle_list():
family = db.get_family_from_handle(handle)
if family and not family.get_privacy():
new_person.add_family_handle(handle)
# copy Family reference list
for handle in person.get_parent_family_handle_list():
family = db.get_family_from_handle(handle)
if not family:
continue
elif family.get_privacy():
continue
child_ref_list = family.get_child_ref_list()
for child_ref in child_ref_list:
if child_ref.get_reference_handle() == person.get_handle():
if child_ref and not child_ref.get_privacy():
new_person.add_parent_family_handle(handle)
break
for name in person.get_alternate_names():
if name and not name.get_privacy():
new_person.add_alternate_name(sanitize_name(db, name))
# copy event list
for event_ref in person.get_event_ref_list():
if event_ref and not event_ref.get_privacy():
event = db.get_event_from_handle(event_ref.ref)
if event and not event.get_privacy():
new_person.add_event_ref(sanitize_event_ref(db, event_ref))
# Copy birth and death after event list to maintain the order.
# copy birth event
event_ref = person.get_birth_ref()
if event_ref and not event_ref.get_privacy():
event = db.get_event_from_handle(event_ref.ref)
if event and not event.get_privacy():
new_person.set_birth_ref(sanitize_event_ref(db, event_ref))
# copy death event
event_ref = person.get_death_ref()
if event_ref and not event_ref.get_privacy():
event = db.get_event_from_handle(event_ref.ref)
if event and not event.get_privacy():
new_person.set_death_ref(sanitize_event_ref(db, event_ref))
copy_addresses(db, person, new_person)
copy_attributes(db, person, new_person)
copy_citation_ref_list(db, person, new_person)
copy_urls(db, person, new_person)
copy_media_ref_list(db, person, new_person)
copy_lds_ords(db, person, new_person)
copy_notes(db, person, new_person)
copy_associations(db, person, new_person)
return new_person
def sanitize_source(db, source):
"""
Create a new Source instance based off the passed Source
instance. The returned instance has all private records
removed from it.
:param db: Gramps database to which the Person object belongs
:type db: DbBase
:param source: source Source object that will be copied with
privacy records removed
:type source: Source
:returns: 'cleansed' Source object
:rtype: Source
"""
new_source = Source()
new_source.set_author(source.get_author())
new_source.set_title(source.get_title())
new_source.set_publication_info(source.get_publication_info())
new_source.set_abbreviation(source.get_abbreviation())
new_source.set_gramps_id(source.get_gramps_id())
new_source.set_handle(source.get_handle())
new_source.set_change_time(source.get_change_time())
for repo_ref in source.get_reporef_list():
if repo_ref and not repo_ref.get_privacy():
handle = repo_ref.get_reference_handle()
repo = db.get_repository_from_handle(handle)
if repo and not repo.get_privacy():
new_source.add_repo_reference(RepoRef(repo_ref))
copy_srcattributes(db, source, new_source)
copy_media_ref_list(db, source, new_source)
copy_notes(db, source, new_source)
return new_source
def sanitize_media(db, media):
"""
Create a new Media instance based off the passed Media
instance. The returned instance has all private records
removed from it.
:param db: Gramps database to which the Person object belongs
:type db: DbBase
:param media: source Media object that will be copied with
privacy records removed
:type media: Media
:returns: 'cleansed' Media object
:rtype: Media
"""
new_media = Media()
new_media.set_mime_type(media.get_mime_type())
new_media.set_path(media.get_path())
new_media.set_description(media.get_description())
new_media.set_gramps_id(media.get_gramps_id())
new_media.set_handle(media.get_handle())
new_media.set_change_time(media.get_change_time())
new_media.set_date_object(media.get_date_object())
new_media.set_tag_list(media.get_tag_list())
copy_citation_ref_list(db, media, new_media)
copy_attributes(db, media, new_media)
copy_notes(db, media, new_media)
return new_media
def sanitize_place(db, place):
"""
Create a new Place instance based off the passed Place
instance. The returned instance has all private records
removed from it.
:param db: Gramps database to which the Person object belongs
:type db: DbBase
:param place: source Place object that will be copied with
privacy records removed
:type place: Place
:returns: 'cleansed' Place object
:rtype: Place
"""
new_place = Place()
new_place.set_title(place.get_title())
new_place.set_gramps_id(place.get_gramps_id())
new_place.set_handle(place.get_handle())
new_place.set_change_time(place.get_change_time())
new_place.set_longitude(place.get_longitude())
new_place.set_latitude(place.get_latitude())
new_place.set_alternate_locations(place.get_alternate_locations())
new_place.set_name(place.get_name())
new_place.set_alternative_names(place.get_alternative_names())
new_place.set_type(place.get_type())
new_place.set_code(place.get_code())
new_place.set_placeref_list(place.get_placeref_list())
copy_citation_ref_list(db, place, new_place)
copy_notes(db, place, new_place)
copy_media_ref_list(db, place, new_place)
copy_urls(db, place, new_place)
return new_place
def sanitize_event(db, event):
"""
Create a new Event instance based off the passed Event
instance. The returned instance has all private records
removed from it.
:param db: Gramps database to which the Person object belongs
:type db: DbBase
:param event: source Event object that will be copied with
privacy records removed
:type event: Event
:returns: 'cleansed' Event object
:rtype: Event
"""
new_event = Event()
new_event.set_type(event.get_type())
new_event.set_description(event.get_description())
new_event.set_gramps_id(event.get_gramps_id())
new_event.set_handle(event.get_handle())
new_event.set_date_object(event.get_date_object())
new_event.set_change_time(event.get_change_time())
copy_citation_ref_list(db, event, new_event)
copy_notes(db, event, new_event)
copy_media_ref_list(db, event, new_event)
copy_attributes(db, event, new_event)
place_handle = event.get_place_handle()
if place_handle:
place = db.get_place_from_handle(place_handle)
if place and not place.get_privacy():
new_event.set_place_handle(place_handle)
return new_event
def sanitize_family(db, family):
"""
Create a new Family instance based off the passed Family
instance. The returned instance has all private records
removed from it.
:param db: Gramps database to which the Person object belongs
:type db: DbBase
:param family: source Family object that will be copied with
privacy records removed
:type family: Family
:returns: 'cleansed' Family object
:rtype: Family
"""
new_family = Family()
new_family.set_gramps_id(family.get_gramps_id())
new_family.set_handle(family.get_handle())
new_family.set_relationship(family.get_relationship())
new_family.set_change_time(family.get_change_time())
new_family.set_tag_list(family.get_tag_list())
# Copy the father handle.
father_handle = family.get_father_handle()
if father_handle:
father = db.get_person_from_handle(father_handle)
if father and not father.get_privacy():
new_family.set_father_handle(father_handle)
# Copy the mother handle.
mother_handle = family.get_mother_handle()
if mother_handle:
mother = db.get_person_from_handle(mother_handle)
if mother and not mother.get_privacy():
new_family.set_mother_handle(mother_handle)
# Copy child references.
for child_ref in family.get_child_ref_list():
if child_ref and child_ref.get_privacy():
continue
child_handle = child_ref.get_reference_handle()
child = db.get_person_from_handle(child_handle)
if child and child.get_privacy():
continue
# Copy this reference
new_ref = ChildRef()
new_ref.set_reference_handle(child_ref.get_reference_handle())
new_ref.set_father_relation(child_ref.get_father_relation())
new_ref.set_mother_relation(child_ref.get_mother_relation())
copy_notes(db, child_ref, new_ref)
copy_citation_ref_list(db, child_ref, new_ref)
new_family.add_child_ref(new_ref)
# Copy event ref list.
for event_ref in family.get_event_ref_list():
if event_ref and not event_ref.get_privacy():
event = db.get_event_from_handle(event_ref.ref)
if event and not event.get_privacy():
new_family.add_event_ref(sanitize_event_ref(db, event_ref))
copy_citation_ref_list(db, family, new_family)
copy_notes(db, family, new_family)
copy_media_ref_list(db, family, new_family)
copy_attributes(db, family, new_family)
copy_lds_ords(db, family, new_family)
return new_family
def sanitize_repository(db, repository):
"""
Create a new Repository instance based off the passed Repository
instance. The returned instance has all private records
removed from it.
:param db: Gramps database to which the Person object belongs
:type db: DbBase
:param repository: source Repository object that will be copied with
privacy records removed
:type repository: Repository
:returns: 'cleansed' Repository object
:rtype: Repository
"""
new_repository = Repository()
new_repository.set_type(repository.get_type())
new_repository.set_name(repository.get_name())
new_repository.set_gramps_id(repository.get_gramps_id())
new_repository.set_handle(repository.get_handle())
new_repository.set_change_time(repository.get_change_time())
copy_notes(db, repository, new_repository)
copy_addresses(db, repository, new_repository)
copy_urls(db, repository, new_repository)
return new_repository
|
sam-m888/gramps
|
gramps/gen/proxy/private.py
|
Python
|
gpl-2.0
| 41,102
|
[
"Brian"
] |
2c173b8893df023abc1208667f1cafec788fb78a993c158bba02ad63a045337e
|
"""
Copyright (C) 2014, Jaguar Land Rover
This program is licensed under the terms and conditions of the
Mozilla Public License, version 2.0. The full text of the
Mozilla Public License is at https://www.mozilla.org/MPL/2.0/
Rudolf Streif (rstreif@jaguarlandrover.com)
"""
from django.contrib import admin
from security.models import JSONWebKey
class KeyAdmin(admin.ModelAdmin):
"""
Administration view for Vehicles.
"""
readonly_fields = ('key_kid', 'key_created', 'key_updated')
list_display = ('key_name', 'key_kty', 'key_kid', 'not_expired')
fieldsets = [
(None, {'fields': ['key_name']}),
('Key Information', {'fields': ['key_kty', 'key_kid', 'key_created', 'key_updated']}),
('Key Experiration', {'fields': ['key_valid_from', 'key_valid_to']}),
('Algorithm Information', {'fields': ['key_alg_sig', 'key_alg_enc']}),
('Key Data', {'fields': ['key_pem']}),
]
def key_kid(self, object):
return object.key_kid
key_kid.short_description = 'Key Fingerprint'
admin.site.register(JSONWebKey, KeyAdmin)
|
rstreif/rvi_backend
|
web/security/admin.py
|
Python
|
mpl-2.0
| 1,149
|
[
"Jaguar"
] |
ac7a9d23cbaba4ef80766949c7adc2c7e43512e82a030a31218648b1634d245d
|
#!/usr/bin/env python3
# coding: utf-8
"""
Subprogram that constitutes the first step of the Mikado pipeline.
"""
import sys
import os
import argparse
import logging
import logging.handlers
from ._utils import check_log_settings_and_create_logger
from ..configuration import MikadoConfiguration, DaijinConfiguration, parse_list_file
from ..configuration.configurator import load_and_validate_config
from ..exceptions import InvalidConfiguration
from collections import Counter
from typing import Union
from ..preparation.prepare import prepare
__author__ = 'Luca Venturini'
def parse_gff_args(mikado_config, args):
__gff_counter = Counter()
__gff_counter.update(args.gff)
if __gff_counter.most_common()[0][1] > 1:
raise InvalidConfiguration(
"Repeated elements among the input GFFs! Duplicated files: {}".format(
", ".join(_[0] for _ in __gff_counter.most_common() if _[1] > 1)
))
mikado_config.prepare.files.gff = args.gff
num_files = len(mikado_config.prepare.files.gff)
if args.strand_specific:
mikado_config.prepare.strand_specific = True
elif args.strand_specific_assemblies:
strand_specific_assemblies = args.strand_specific_assemblies.split(",")
if len(strand_specific_assemblies) > num_files:
raise InvalidConfiguration("Incorrect number of strand-specific assemblies specified!")
for member in strand_specific_assemblies:
if member not in mikado_config.prepare.files.gff:
raise InvalidConfiguration("Incorrect assembly file specified as strand-specific")
mikado_config.prepare.files.strand_specific_assemblies = strand_specific_assemblies
if args.labels:
labels = args.labels.split(",")
# Checks labels are unique
if len(set(labels)) < len(labels):
raise InvalidConfiguration("Duplicated labels detected!")
elif any([True for _ in labels if _.strip() == '']):
raise InvalidConfiguration("Empty labels provided!")
elif len(labels) != num_files:
raise InvalidConfiguration("Incorrect number of labels specified")
mikado_config.prepare.files.labels = labels
else:
if not mikado_config.prepare.files.labels:
labels = [str(_) for _ in list(range(1, 1 + num_files))]
mikado_config.prepare.files.labels = labels
mikado_config.prepare.files.exclude_redundant = [False] * len(mikado_config.prepare.files.gff)
mikado_config.prepare.files.reference = [False] * len(mikado_config.prepare.files.gff)
return mikado_config
def parse_prepare_options(args, mikado_config) -> Union[DaijinConfiguration, MikadoConfiguration]:
if args.reference is not None:
if hasattr(args.reference, "close") and hasattr(args.reference, "name"):
args.reference.close()
mikado_config.reference.genome = args.reference.name
elif hasattr(args.reference, "close") and hasattr(args.reference, "filename"):
# Pysam FastaFile. The filename is bytes, not str
args.reference.close()
mikado_config.reference.genome = args.reference.filename.decode()
elif isinstance(args.reference, bytes):
mikado_config.reference.genome = args.reference.decode()
elif isinstance(args.reference, str):
mikado_config.reference.genome = args.reference
else:
raise InvalidConfiguration(f"Invalid value type for the reference: {args.reference} (type "
f"{type(args.reference)}")
if not os.path.exists(mikado_config.reference.genome):
raise InvalidConfiguration("Reference genome file {} is not available. Please double check.".format(
mikado_config.reference.genome))
if args.list:
mikado_config = parse_list_file(mikado_config, args.list)
elif args.gff and args.gff != [""] and args.gff != []:
mikado_config = parse_gff_args(mikado_config, args)
if getattr(args, "exclude_redundant", None) in (True, False):
mikado_config.prepare.exclude_redundant = args.exclude_redundant
mikado_config.prepare.files.exclude_redundant = [args.exclude_redundant] * len(mikado_config.prepare.files.gff)
elif not mikado_config.prepare.files.exclude_redundant:
mikado_config.prepare.files.exclude_redundant = [False] * len(mikado_config.prepare.files.gff)
elif len(mikado_config.prepare.files.exclude_redundant) != len(mikado_config.prepare.files.gff):
raise InvalidConfiguration("Mismatch between exclude_redundant and gff files")
if not mikado_config.prepare.files.reference:
mikado_config.prepare.files.reference = [False] * len(mikado_config.prepare.files.gff)
elif len(mikado_config.prepare.files.reference) != len(mikado_config.prepare.files.gff):
raise InvalidConfiguration("Mismatch between is_reference and gff files")
# Set values from fields
mikado_config.prepare.minimum_cdna_length = getattr(args, "minimum_cdna_length", None) if \
getattr(args, "minimum_cdna_length", None) else mikado_config.prepare.minimum_cdna_length
mikado_config.prepare.max_intron_length = getattr(args, "max_intron_length", None) if \
getattr(args, "max_intron_length", None) else mikado_config.prepare.max_intron_length
mikado_config.prepare.single = getattr(args, "single", None) if getattr(args, "single", None) else \
mikado_config.prepare.single
mikado_config.multiprocessing_method = getattr(args, "start_method", None) if \
getattr(args, "start_method", None) else mikado_config.multiprocessing_method
mikado_config.prepare.files.output_dir = getattr(args, "output_dir", None) if \
getattr(args, "output_dir", None) else mikado_config.prepare.files.output_dir
mikado_config.prepare.lenient = True if getattr(args, "lenient", None) is not None else \
mikado_config.prepare.lenient
mikado_config.prepare.strip_faulty_cds = True if getattr(args, "strip_faulty_cds", None) else \
mikado_config.prepare.strip_faulty_cds
mikado_config.prepare.strip_cds = True if getattr(args, "strip_cds", None) else mikado_config.prepare.strip_cds
mikado_config.serialise.codon_table = str(args.codon_table) if (
getattr(args, "codon_table", None) not in (None, False, True)) else mikado_config.serialise.codon_table
if args.random_seed is True:
mikado_config.seed = None
elif args.seed is not None:
mikado_config.seed = args.seed
else:
pass
mikado_config.check()
assert isinstance(mikado_config.reference.genome, str)
return mikado_config
def setup(args, logger=None) -> (argparse.Namespace, Union[MikadoConfiguration, DaijinConfiguration],
logging.Logger):
"""Method to set up the analysis using the JSON configuration
and the command line options.
:param args: the ArgumentParser-derived namespace.
"""
if logger is None or not isinstance(logger, logging.Logger):
logger = logging.getLogger("prepare")
logger.setLevel(logging.INFO)
logger.debug("Starting to get prepare arguments")
mikado_config = load_and_validate_config(args.configuration, logger=logger)
parse_prepare_options(args, mikado_config)
try:
os.makedirs(mikado_config.prepare.files.output_dir, exist_ok=True)
except (OSError, PermissionError, FileExistsError) as exc:
logger.error("Failed to create the output directory!")
logger.exception(exc)
raise exc
if len(mikado_config.prepare.files.gff) == 0:
parser = prepare_parser()
logger.error("No input files found!")
print(parser.format_help())
sys.exit(0)
if args.procs is not None and args.procs > 0:
mikado_config.threads = args.procs
mikado_config, logger = check_log_settings_and_create_logger(mikado_config, args.log, args.log_level,
section="prepare")
logger.info("Command line: %s", " ".join(sys.argv))
logger.info("Random seed: %s", mikado_config.seed)
mikado_config.prepare.files.out = os.path.basename(mikado_config.prepare.files.out)
if getattr(args, "out") not in (None, False):
mikado_config.prepare.files.out = os.path.basename(args.out)
mikado_config.prepare.files.out_fasta = os.path.basename(args.out_fasta) if args.out_fasta is not None else \
os.path.basename(mikado_config.prepare.files.out_fasta)
mikado_config.reference.genome = mikado_config.reference.genome.decode() if \
isinstance(mikado_config.reference.genome, bytes) else mikado_config.reference.genome
return args, mikado_config, logger
def prepare_launcher(args):
args, mikado_config, logger = setup(args)
assert isinstance(mikado_config, (MikadoConfiguration, DaijinConfiguration))
prepare(mikado_config, logger)
sys.exit(0)
def prepare_parser():
"""
This function defines the parser for the command line interface
of the program.
:return: an argparse.Namespace object
:rtype: argparse.ArgumentParser
"""
def to_cpu_count(string):
"""
:param string: cpu requested
:rtype: int
"""
return max(1, int(string))
def positive(string):
"""
Simple function to return the absolute value of the integer of the input string.
:param string:
:return:
"""
return abs(int(string))
parser = argparse.ArgumentParser("""Script to prepare a GTF for the pipeline;
it will perform the following operations:
1- add the "transcript" feature
2- sort by coordinates
3- check the strand""")
parser.add_argument("--fasta", "--reference", dest="reference",
type=argparse.FileType(), help="Genome FASTA file. Required.")
verbosity = parser.add_mutually_exclusive_group()
verbosity.add_argument("--verbose", default=None, dest="log_level", action="store_const", const="DEBUG")
verbosity.add_argument("--quiet", default=None, dest="log_level", action="store_const", const="WARNING")
verbosity.add_argument("-lv", "--log-level", default=None,
choices=["DEBUG", "INFO", "WARN", "ERROR"],
help="Log level. Default: derived from the configuration; if absent, INFO")
parser.add_argument("--start-method", dest="start_method",
choices=["fork", "spawn", "forkserver"],
default=None, help="Multiprocessing start method.")
strand = parser.add_mutually_exclusive_group()
strand.add_argument("-s", "--strand-specific", dest="strand_specific",
action="store_true", default=False,
help="""Flag. If set, monoexonic transcripts
will be left on their strand rather than being
moved to the unknown strand.""")
strand.add_argument("-sa", "--strand-specific-assemblies",
default=None,
type=str,
dest="strand_specific_assemblies",
help="Comma-delimited list of strand specific assemblies.")
parser.add_argument("--list", type=argparse.FileType("r"),
help="""Tab-delimited file containing rows with the following format:
<file> <label> <strandedness(def. False)> <score(optional, def. 0)> <is_reference(optional, def. False)> <exclude_redundant(optional, def. True)> <strip_cds(optional, def. False)> <skip_split(optional, def. False)>
"strandedness", "is_reference", "exclude_redundant", "strip_cds" and "skip_split" must be boolean values (True, False)
"score" must be a valid floating number."""
)
parser.add_argument("-l", "--log", default=None, help="Log file. Optional.")
parser.add_argument("--lenient", action="store_true", default=None,
help="""Flag. If set, transcripts with only non-canonical
splices will be output as well.""")
parser.add_argument("-m", "--minimum-cdna-length", default=None, dest="minimum_cdna_length", type=positive,
help="Minimum length for transcripts. Default: 200 bps.")
parser.add_argument("-MI", "--max-intron-size", default=None, type=positive, dest="max_intron_length",
help="Maximum intron length for transcripts. Default: 1,000,000 bps.")
parser.add_argument("-p", "--procs",
help="Number of processors to use (default %(default)s)",
type=to_cpu_count, default=None)
parser.add_argument("-scds", "--strip_cds", action="store_true", default=False,
help="Boolean flag. If set, ignores any CDS/UTR segment.")
parser.add_argument("--labels", type=str, default="",
help="""Labels to attach to the IDs of the transcripts of the input files,
separated by comma.""")
parser.add_argument("--codon-table", dest="codon_table", default=None,
help="""Codon table to use. Default: 0 (ie Standard, NCBI #1, but only ATG is considered \
a valid start codon.""")
parser.add_argument("--single", "--single-thread", action="store_true", default=False,
help="Disable multi-threading. Useful for debugging.")
parser.add_argument("-od", "--output-dir", dest="output_dir",
type=str, default=None,
help="Output directory. Default: current working directory")
parser.add_argument("-o", "--out", default=None,
help="Output file. Default: mikado_prepared.gtf.")
parser.add_argument("-of", "--out_fasta", default=None,
help="Output file. Default: mikado_prepared.fasta.")
parser.add_argument("--configuration", "--json-conf", dest="configuration",
type=str, default="",
help="Configuration file.")
parser.add_argument("-er", "--exclude-redundant", default=None,
dest="exclude_redundant", action="store_true",
help="Boolean flag. If invoked, Mikado prepare will exclude redundant models,\
ignoring the per-sample instructions.")
cds_stripping = parser.add_mutually_exclusive_group()
cds_stripping.add_argument("--strip-faulty-cds", default=None, action="store_true",
help="Flag. If set, transcripts with an incorrect CDS will be retained but \
with their CDS stripped. Default behaviour: the whole transcript will be considered invalid and discarded.")
seed_group = parser.add_mutually_exclusive_group()
seed_group.add_argument("--seed", type=int, default=None, help="Random seed number. Default: 0.")
seed_group.add_argument("--random-seed", action="store_true", default=False,
help="Generate a new random seed number (instead of the default of 0)")
parser.add_argument("gff", help="Input GFF/GTF file(s).", nargs="*")
parser.set_defaults(func=prepare_launcher)
return parser
|
lucventurini/mikado
|
Mikado/subprograms/prepare.py
|
Python
|
lgpl-3.0
| 15,276
|
[
"pysam"
] |
e049ff4b89dfe4a583224530c6fa1aedaf0a9eb3baf871c714fca90c68d05b4d
|
def visit_Call(self, node):
""" The visit of a call node.
Is an overwrite of Visit_Call ignoring all calls
except for those we need to modify.
:param node: A call node
"""
name = self.__find_call_name(node)
if name in ATOMIC_SOURCES:
id = self.__get_id()
self.__replace_connection(id, node)
elif name in WRAPPERS:
if self.dw_flag:
raise Exception('There is more than one wrapper in this program')
else:
id = self.dw_id
self.__replace_connection(id, node)
self.dw_flag = True
|
Betaboxguugi/P6
|
documentation/presentation/code/CallNode.py
|
Python
|
gpl-3.0
| 658
|
[
"VisIt"
] |
e6d2e0bd2358bcbe7b2c0fe9eb976314f9eee159571f67b5dab9d7abf3bace5f
|
# -*- coding: utf-8 -*-
#############################################################################
# SRWLIB Example: Virtual Beamline: a set of utilities and functions allowing to simulate
# operation of an SR Beamline.
# The standard use of this script is from command line, with some optional arguments,
# e.g. for calculation (with default parameter values) of:
# UR Spectrum Through a Slit (Flux within a default aperture):
# python SRWLIB_VirtBL_*.py --sm
# Single-Electron UR Spectrum (Flux per Unit Surface):
# python SRWLIB_VirtBL_*.py --ss
# UR Power Density (at the first optical element):
# python SRWLIB_VirtBL_*.py --pw
# Input Single-Electron UR Intensity Distribution (at the first optical element):
# python SRWLIB_VirtBL_*.py --si
# Single-Electron Wavefront Propagation:
# python SRWLIB_VirtBL_*.py --ws
# Multi-Electron Wavefront Propagation:
# Sequential Mode:
# python SRWLIB_VirtBL_*.py --wm
# Parallel Mode (using MPI / mpi4py), e.g.:
# mpiexec -n 6 python SRWLIB_VirtBL_*.py --wm
# For changing parameters of all these calculaitons from the default valuse, see the definition
# of all options in the list at the end of the script.
# v 0.07
#############################################################################
from __future__ import print_function #Python 2.7 compatibility
from srwl_bl import *
try:
import cPickle as pickle
except:
import pickle
#import time
#*********************************Setting Up Optical Elements and Propagation Parameters
def set_optics(_v):
"""This function describes optical layout of the Coherent Hoard X-ray (CHX) beamline of NSLS-II.
Such function has to be written for every beamline to be simulated; it is specific to a particular beamline.
:param _v: structure containing all parameters allowed to be varied for that particular beamline
"""
#---Nominal Positions of Optical Elements [m] (with respect to straight section center)
zS0 = 20.5 #S0 (primary slit)
zHDM = 27.4 #Horizontally-Deflecting Mirror (HDM)
zS1 = 29.9 #S1 slit
zDCM = 31.6 #DCM (vertically-deflecting)
zS2 = 34.3 #S2 slit
zBPM = 34.6 #BPM for beam visualization
zCRL = 35.4 #+tzCRL*1e-3 #CRL transfocator (corrected by translation)
zKL = 45.0 #44.5 #+tzKL*1e-3 #Kinoform Lens for horizontal focusing (corrected by translation)
zS3 = 48.0 #S3 slit ('pinhole', waist position)
zSample = 48.7 #Sample position, COR of diffractometer
zD = 58.7 #Detector position
#---Instantiation of the Optical Elements
arElNamesAllOpt = [
['S0', 'S0_S1', 'S1', 'S1_S2', 'S2', 'S2_BPM', 'BPM_CRL', 'CRL1', 'CRL2', 'CRL_KL', 'KLA', 'KL', 'KL_S3', 'S3', 'S3_SMP', 'SMP', 'SMP_D'], #1
['S0', 'S0_HDM', 'HDM', 'HDM_S1', 'S1', 'S1_S2', 'S2', 'S2_CRL', 'CRL1', 'CRL2', 'CRL_SMP'], #2
['S0', 'S0_HDM', 'HDM', 'HDM_S1', 'S1', 'S1_DCM', 'DCM', 'DCM_S2', 'S2', 'S2_CRL', 'CRL1', 'CRL2', 'CRL_KL', 'KLA', 'KL', 'KL_S3', 'S3', 'SMP', 'SMP_D'], #3
['S0', 'S0_HDM', 'HDM', 'HDM_S1', 'S1', 'S1_DCM', 'DCM', 'DCM_S2', 'S2', 'S2_CRL', 'CRL1', 'CRL2', 'CRL_SMP'], #4
['S0', 'S0_HDM', 'HDM', 'HDM_S1', 'S1', 'S1_DCM', 'DCM', 'DCM_S2', 'S2', 'S2_CRL', 'FIB', 'CRL_SMP'], #5
]
arElNamesAll = arElNamesAllOpt[int(round(_v.op_BL - 1))]
if(len(_v.op_fin) > 0):
if(_v.op_fin not in arElNamesAll): raise Exception('Optical element with the name specified in the "op_fin" option is not present in this beamline')
#Could be made more general
arElNames = [];
for i in range(len(arElNamesAll)):
arElNames.append(arElNamesAll[i])
if(len(_v.op_fin) > 0):
if(arElNamesAll[i] == _v.op_fin): break
el = []; pp = [] #lists of SRW optical element objects and their corresponding propagation parameters
#S0 (primary slit)
if('S0' in arElNames):
el.append(SRWLOptA('r', 'a', _v.op_S0_dx, _v.op_S0_dy, _v.op_S0_x, _v.op_S0_y)); pp.append(_v.op_S0_pp)
#Drift S0 -> HDM
if('S0_HDM' in arElNames):
el.append(SRWLOptD(zHDM - zS0)); pp.append(_v.op_S0_HDM_pp)
#Drift S0 -> S1
if('S0_S1' in arElNames):
el.append(SRWLOptD(zS1 - zS0)); pp.append(_v.op_S0_S1_pp)
#HDM (Height Profile Error)
if('HDM' in arElNames):
horApHDM = 0.94e-03 #Projected dimensions
verApHDM = 1.e-03
angHDM = 3.1415926e-03 #? grazing angle
ifnHDM = os.path.join(_v.fdir, _v.op_HDM_ifn) if len(_v.op_HDM_ifn) > 0 else ''
if(len(ifnHDM) > 0):
hProfDataHDM = srwl_uti_read_data_cols(ifnHDM, '\t', 0, 1)
opHDM = srwl_opt_setup_surf_height_1d(hProfDataHDM, 'x', _ang=angHDM, _amp_coef=_v.op_HDM_amp, _nx=1000, _ny=200, _size_x=horApHDM, _size_y=verApHDM, _xc=_v.op_HDM_x, _yc=_v.op_HDM_y)
ofnHDM = os.path.join(_v.fdir, _v.op_HDM_ofn) if len(_v.op_HDM_ofn) > 0 else ''
if(len(ofnHDM) > 0):
pathDifHDM = opHDM.get_data(3, 3)
srwl_uti_save_intens_ascii(pathDifHDM, opHDM.mesh, ofnHDM, 0, ['', 'Horizontal Position', 'Vertical Position', 'Opt. Path Dif.'], _arUnits=['', 'm', 'm', 'm'])
el.append(opHDM); pp.append(_v.op_HDM_pp)
#Drift HDM -> S1
if('HDM_S1' in arElNames):
el.append(SRWLOptD(zS1 - zHDM + _v.op_S1_dz)); pp.append(_v.op_HDM_S1_pp)
#S1 slit
if('S1' in arElNames):
el.append(SRWLOptA('r', 'a', _v.op_S1_dx, _v.op_S1_dy, _v.op_S1_x, _v.op_S1_y)); pp.append(_v.op_S1_pp)
#Drift S1 -> DCM
if('S1_DCM' in arElNames):
el.append(SRWLOptD(zDCM - zS1)); pp.append(_v.op_S1_DCM_pp)
#Double-Crystal Monochromator
tCr1 = [0, 0, -1] #required for surface error
if('DCM' in arElNames):
tc = 1e-02 # [m] crystal thickness
angAs = 0.*3.1415926/180. # [rad] asymmetry angle
hc = [1,1,1]
dc = srwl_uti_cryst_pl_sp(hc, 'Si')
#print('DCM Interplannar dist.:', dc)
psi = srwl_uti_cryst_pol_f(_v.op_DCM_e, hc, 'Si')
#print('DCM Fourier Components:', psi)
#---------------------- DCM Crystal #1
opCr1 = SRWLOptCryst(_d_sp=dc, _psi0r=psi[0], _psi0i=psi[1], _psi_hr=psi[2], _psi_hi=psi[3], _psi_hbr=psi[2], _psi_hbi=psi[3], _tc=tc, _ang_as=angAs)
#Find appropriate orientation of the Crystal #1 and the Output Beam Frame (using a member-function in SRWLOptCryst):
#orientDataCr1 = opCr1.find_orient(_en=_v.op_DCM_e, _ang_dif_pl=1.5707963) # Horizontally-deflecting (from HXN)
orientDataCr1 = opCr1.find_orient(_en=_v.op_DCM_e) # Vertically-deflecting
#Crystal #1 Orientation found:
orientCr1 = orientDataCr1[0]
tCr1 = orientCr1[0] #Tangential Vector to Crystal surface
sCr1 = orientCr1[1] #Sagital Vector to Crystal surface
nCr1 = orientCr1[2] #Normal Vector to Crystal surface
print('DCM Crystal #1 Orientation (original):')
print(' t =', tCr1, 's =', orientCr1[1], 'n =', nCr1)
if(_v.op_DCM_ac1 != 0): #Small rotation of DCM Crystal #1:
rot = uti_math.trf_rotation([0,1,0], _v.op_DCM_ac1, [0,0,0])
tCr1 = uti_math.matr_prod(rot[0], tCr1)
sCr1 = uti_math.matr_prod(rot[0], sCr1)
nCr1 = uti_math.matr_prod(rot[0], nCr1)
#Set the Crystal #1 orientation:
opCr1.set_orient(nCr1[0], nCr1[1], nCr1[2], tCr1[0], tCr1[1])
#Orientation of the Outgoing Beam Frame being found:
orientCr1OutFr = orientDataCr1[1]
rxCr1 = orientCr1OutFr[0] #Horizontal Base Vector of the Output Beam Frame
ryCr1 = orientCr1OutFr[1] #Vertical Base Vector of the Output Beam Frame
rzCr1 = orientCr1OutFr[2] #Longitudinal Base Vector of the Output Beam Frame
print('DCM Crystal #1 Outgoing Beam Frame:')
print(' ex =', rxCr1, 'ey =', ryCr1, 'ez =', rzCr1)
#Incoming/Outgoing beam frame transformation matrix for the DCM Crystal #1
TCr1 = [rxCr1, ryCr1, rzCr1]
print('Total transformation matrix after DCM Crystal #1:')
uti_math.matr_print(TCr1)
#print(' ')
el.append(opCr1); pp.append(_v.op_DCMC1_pp)
#---------------------- DCM Crystal #2
opCr2 = SRWLOptCryst(_d_sp=dc, _psi0r=psi[0], _psi0i=psi[1], _psi_hr=psi[2], _psi_hi=psi[3], _psi_hbr=psi[2], _psi_hbi=psi[3], _tc=tc, _ang_as=angAs)
#Find appropriate orientation of the Crystal #2 and the Output Beam Frame
#orientDataCr2 = opCr2.find_orient(_en=_v.op_DCM_e, _ang_dif_pl=-1.5707963) #from HXN
orientDataCr2 = opCr2.find_orient(_en=_v.op_DCM_e, _ang_dif_pl=3.1415926) #Vertically-deflecting
#Crystal #2 Orientation found:
orientCr2 = orientDataCr2[0]
tCr2 = orientCr2[0] #Tangential Vector to Crystal surface
sCr2 = orientCr2[1]
nCr2 = orientCr2[2] #Normal Vector to Crystal surface
print('Crystal #2 Orientation (original):')
print(' t =', tCr2, 's =', sCr2, 'n =', nCr2)
if(_v.op_DCM_ac2 != 0): #Small rotation of DCM Crystal #2:
rot = uti_math.trf_rotation([0,1,0], _v.op_DCM_ac2, [0,0,0])
tCr2 = uti_math.matr_prod(rot[0], tCr2)
sCr2 = uti_math.matr_prod(rot[0], sCr2)
nCr2 = uti_math.matr_prod(rot[0], nCr2)
#Set the Crystal #2 orientation
opCr2.set_orient(nCr2[0], nCr2[1], nCr2[2], tCr2[0], tCr2[1])
#Orientation of the Outgoing Beam Frame being found:
orientCr2OutFr = orientDataCr2[1]
rxCr2 = orientCr2OutFr[0] #Horizontal Base Vector of the Output Beam Frame
ryCr2 = orientCr2OutFr[1] #Vertical Base Vector of the Output Beam Frame
rzCr2 = orientCr2OutFr[2] #Longitudinal Base Vector of the Output Beam Frame
print('DCM Crystal #2 Outgoing Beam Frame:')
print(' ex =', rxCr2, 'ey =', ryCr2, 'ez =',rzCr2)
#Incoming/Outgoing beam transformation matrix for the DCM Crystal #2
TCr2 = [rxCr2, ryCr2, rzCr2]
Ttot = uti_math.matr_prod(TCr2, TCr1)
print('Total transformation matrix after DCM Crystal #2:')
uti_math.matr_print(Ttot)
#print(' ')
el.append(opCr2); pp.append(_v.op_DCMC2_pp)
#DCM Surface Error
horApDCM = 2.e-03 #Projected dimensions
verApDCM = 2.e-03
angDCM = asin(abs(tCr1[2])) #Grazing angle to crystal surface
ifnDCME = os.path.join(_v.fdir, _v.op_DCME_ifn) if len(_v.op_DCME_ifn) > 0 else ''
if(len(ifnDCME) > 0):
hProfDataDCME = srwl_uti_read_data_cols(ifnDCME, '\t', 0, 1)
opDCME = srwl_opt_setup_surf_height_1d(hProfDataDCME, 'y', _ang=angDCM, _amp_coef=_v.op_DCME_amp, _nx=1000, _ny=200, _size_x=horApDCM, _size_y=verApDCM, _xc=_v.op_DCME_x, _yc=_v.op_DCME_y)
ofnDCME = os.path.join(_v.fdir, _v.op_DCME_ofn) if len(_v.op_DCME_ofn) > 0 else ''
if(len(ofnDCME) > 0):
pathDifDCME = opDCME.get_data(3, 3)
srwl_uti_save_intens_ascii(pathDifDCME, opDCME.mesh, ofnDCME, 0, ['', 'Horizontal Position', 'Vertical Position', 'Opt. Path Dif.'], _arUnits=['', 'm', 'm', 'm'])
el.append(opDCME); pp.append(_v.op_DCME_pp)
#Drift DCM -> S2
if('DCM_S2' in arElNames):
el.append(SRWLOptD(zS2 - zDCM + _v.op_S2_dz)); pp.append(_v.op_DCM_S2_pp)
#Boron Fiber (with Tungsten core)
if('FIB' in arElNames):
fpln = 3 #focusing in both planes
if((_v.op_FIB_fpl == 'h') or (_v.op_FIB_fpl == 'H') or (_v.op_FIB_fpl == 'x') or (_v.op_FIB_fpl == 'X')): fpln = 1
elif((_v.op_FIB_fpl == 'v') or (_v.op_FIB_fpl == 'V') or (_v.op_FIB_fpl == 'y') or (_v.op_FIB_fpl == 'Y')): fpln = 2
el.append(srwl_opt_setup_cyl_fiber(fpln, _v.op_FIB_delta_e, _v.op_FIB_delta_c, _v.op_FIB_atnl_e, _v.op_FIB_atnl_c, _v.op_FIB_d_e, _v.op_FIB_d_c, _v.op_FIB_x, _v.op_FIB_y))
pp.append(_v.op_FIB_pp)
#Drift S1 -> S2
if('S1_S2' in arElNames):
el.append(SRWLOptD(zS2 - zS1 + _v.op_S2_dz)); pp.append(_v.op_S1_S2_pp)
#S2 slit
if('S2' in arElNames):
el.append(SRWLOptA('r', 'a', _v.op_S2_dx, _v.op_S2_dy, _v.op_S2_x, _v.op_S2_y)); pp.append(_v.op_S2_pp)
#Drift S2 -> BPM
if('S2_BPM' in arElNames):
el.append(SRWLOptD(zBPM - zS2 + _v.op_BPM_dz)); pp.append(_v.op_S2_BPM_pp)
#Drift BPM -> CRL
if('BPM_CRL' in arElNames):
el.append(SRWLOptD(zCRL - zBPM + _v.op_CRL_dz)); pp.append(_v.op_BPM_CRL_pp)
#Drift S2 -> CRL
if('S2_CRL' in arElNames):
el.append(SRWLOptD(zCRL - zS2 - _v.op_S2_dz + _v.op_CRL_dz)); pp.append(_v.op_S2_CRL_pp)
#CRL1 (1D, vertically-focusing)
if('CRL1' in arElNames):
if((_v.op_CRL1_n > 0) and (_v.op_CRL1_fpl != '')):
fpln = 3 #focusing in both planes
if((_v.op_CRL1_fpl == 'h') or (_v.op_CRL1_fpl == 'H') or (_v.op_CRL1_fpl == 'x') or (_v.op_CRL1_fpl == 'X')): fpln = 1
elif((_v.op_CRL1_fpl == 'v') or (_v.op_CRL1_fpl == 'V') or (_v.op_CRL1_fpl == 'y') or (_v.op_CRL1_fpl == 'Y')): fpln = 2
el.append(srwl_opt_setup_CRL(fpln, _v.op_CRL1_delta, _v.op_CRL1_atnl, 1, _v.op_CRL1_apnf, _v.op_CRL1_apf, _v.op_CRL1_rmin, _v.op_CRL1_n, _v.op_CRL1_thck, _v.op_CRL1_x, _v.op_CRL1_y))
pp.append(_v.op_CRL1_pp)
#CRL2 (1D, vertically-focusing)
if('CRL2' in arElNames):
if((_v.op_CRL2_n > 0) and (_v.op_CRL2_fpl != '')):
fpln = 3 #focusing in both planes
if((_v.op_CRL2_fpl == 'h') or (_v.op_CRL2_fpl == 'H') or (_v.op_CRL2_fpl == 'x') or (_v.op_CRL2_fpl == 'X')): fpln = 1
elif((_v.op_CRL2_fpl == 'v') or (_v.op_CRL2_fpl == 'V') or (_v.op_CRL2_fpl == 'y') or (_v.op_CRL2_fpl == 'Y')): fpln = 2
el.append(srwl_opt_setup_CRL(fpln, _v.op_CRL2_delta, _v.op_CRL2_atnl, 1, _v.op_CRL2_apnf, _v.op_CRL2_apf, _v.op_CRL2_rmin, _v.op_CRL2_n, _v.op_CRL2_thck, _v.op_CRL2_x, _v.op_CRL2_y))
pp.append(_v.op_CRL2_pp)
#Drift CRL -> KL
if('CRL_KL' in arElNames):
el.append(SRWLOptD(zKL - zCRL - _v.op_CRL_dz + _v.op_KL_dz)); pp.append(_v.op_CRL_KL_pp)
#Drift CRL -> Sample
if('CRL_SMP' in arElNames):
el.append(SRWLOptD(zSample - zCRL - _v.op_CRL_dz + _v.op_SMP_dz)); pp.append(_v.op_CRL_SMP_pp)
#KL Aperture
if('KLA' in arElNames):
el.append(SRWLOptA('r', 'a', _v.op_KLA_dx, _v.op_KLA_dy, _v.op_KL_x, _v.op_KL_y)); pp.append(_v.op_KLA_pp)
#KL (1D, horizontally-focusing)
if('KL' in arElNames):
el.append(SRWLOptL(_v.op_KL_fx, _v.op_KL_fy, _v.op_KL_x, _v.op_KL_y)) #KL as Ideal Lens; to make it a transmission element with a profile read from a file
pp.append(_v.op_KL_pp)
#Drift KL -> S3
if('KL_S3' in arElNames):
el.append(SRWLOptD(zS3 - zKL + _v.op_S3_dz)); pp.append(_v.op_KL_S3_pp)
#S3 slit
if('S3' in arElNames):
el.append(SRWLOptA('r', 'a', _v.op_S3_dx, _v.op_S3_dy, _v.op_S3_x, _v.op_S3_y)); pp.append(_v.op_S3_pp)
#Drift S3 -> Sample
if('S3_SMP' in arElNames):
el.append(SRWLOptD(zSample - zS3 + _v.op_SMP_dz)); pp.append(_v.op_S3_SMP_pp)
#Sample
if('SMP' in arElNames):
ifnSMP = os.path.join(_v.fdir, _v.op_SMP_ifn) if len(_v.op_SMP_ifn) > 0 else ''
if(len(ifnSMP) > 0):
ifSMP = open(ifnSMP, 'rb')
opSMP = pickle.load(ifSMP)
#Implementing transverse shift of sample ??
xSt = opSMP.mesh.xStart
xFi = opSMP.mesh.xFin
halfRangeX = 0.5*(xFi - xSt)
opSMP.mesh.xStart = -halfRangeX + _v.op_SMP_x
opSMP.mesh.xFin = halfRangeX + _v.op_SMP_x
ySt = opSMP.mesh.yStart
yFi = opSMP.mesh.yFin
halfRangeY = 0.5*(yFi - ySt)
opSMP.mesh.yStart = -halfRangeY + _v.op_SMP_y
opSMP.mesh.yFin = halfRangeY + _v.op_SMP_y
ofnSMP = os.path.join(_v.fdir, _v.op_SMP_ofn) if len(_v.op_SMP_ofn) > 0 else ''
if(len(ofnSMP) > 0):
pathDifSMP = opSMP.get_data(3, 3)
srwl_uti_save_intens_ascii(pathDifSMP, opSMP.mesh, ofnSMP, 0, ['', 'Horizontal Position', 'Vertical Position', 'Opt. Path Dif.'], _arUnits=['', 'm', 'm', 'm'])
el.append(opSMP); pp.append(_v.op_SMP_pp)
ifSMP.close()
#Drift Sample -> Detector
if('SMP_D' in arElNames):
el.append(SRWLOptD(zD - zSample + _v.op_D_dz)); pp.append(_v.op_SMP_D_pp)
pp.append(_v.op_fin_pp)
return SRWLOptC(el, pp)
#*********************************List of Parameters allowed to be varied
#---List of supported options / commands / parameters allowed to be varied for this Beamline (comment-out unnecessary):
varParam = [
#---Data Folder
['fdir', 's', os.path.join(os.getcwd(), 'data_CHX'), 'folder (directory) name for reading-in input and saving output data files'],
#---Electron Beam
['ebm_nm', 's', 'NSLS-II Low Beta ', 'standard electron beam name'],
['ebm_nms', 's', 'Day1', 'standard electron beam name suffix: e.g. can be Day1, Final'],
['ebm_i', 'f', 0.5, 'electron beam current [A]'],
['ebm_de', 'f', 0., 'electron beam average energy deviation [GeV]'],
['ebm_x', 'f', 0., 'electron beam initial average horizontal position [m]'],
['ebm_y', 'f', 0., 'electron beam initial average vertical position [m]'],
['ebm_xp', 'f', 0., 'electron beam initial average horizontal angle [rad]'],
['ebm_yp', 'f', 0., 'electron beam initial average vertical angle [rad]'],
['ebm_z', 'f', 0., 'electron beam initial average longitudinal position [m]'],
['ebm_dr', 'f', -1.7, 'electron beam longitudinal drift [m] to be performed before a required calculation'],
['ebm_ens', 'f', -1, 'electron beam relative energy spread'],
['ebm_emx', 'f', -1, 'electron beam horizontal emittance [m]'],
['ebm_emy', 'f', -1, 'electron beam vertical emittance [m]'],
#---Undulator
['und_per', 'f', 0.02, 'undulator period [m]'],
['und_len', 'f', 3., 'undulator length [m]'],
['und_b', 'f', 0.88770981, 'undulator vertical peak magnetic field [T]'],
#['und_bx', 'f', 0., 'undulator horizontal peak magnetic field [T]'],
#['und_by', 'f', 1., 'undulator vertical peak magnetic field [T]'],
#['und_phx', 'f', 1.5708, 'undulator horizontal magnetic field phase [rad]'],
#['und_phy', 'f', 0., 'undulator vertical magnetic field phase [rad]'],
['und_sx', 'i', 1, 'undulator horizontal magnetic field symmetry vs longitudinal position'],
['und_sy', 'i', -1, 'undulator vertical magnetic field symmetry vs longitudinal position'],
['und_zc', 'f', 0., 'undulator center longitudinal position [m]'],
['und_mdir', 's', 'magn_meas', 'name of magnetic measurements sub-folder'],
['und_mfs', 's', 'ivu20_chx_sum.txt', 'name of magnetic measurements for different gaps summary file'],
#['und_g', 'f', 0., 'undulator gap [mm] (assumes availability of magnetic measurement or simulation data)'],
#NOTE: the above option/variable names (fdir, ebm*, und*, ss*, sm*, pw*, is*, ws*, wm*) should be the same in all beamline scripts
#on the other hand, the beamline optics related options below (op*) are specific to a particular beamline (and can be differ from beamline to beamline).
#However, the default values of all the options/variables (above and below) can differ from beamline to beamline.
#---Beamline Optics
['op_r', 'f', 20.5, 'longitudinal position of the first optical element [m]'],
['op_fin', 's', 'S3_SMP', 'name of the final optical element wavefront has to be propagated through'],
['op_BL', 'f', 1, 'beamline version/option number'],
['op_S0_dx', 'f', 0.2e-03, 'slit S0: horizontal size [m]'],
['op_S0_dy', 'f', 1.0e-03, 'slit S0: vertical size [m]'],
['op_S0_x', 'f', 0., 'slit S0: horizontal center position [m]'],
['op_S0_y', 'f', 0., 'slit S0: vertical center position [m]'],
['op_HDM_ifn', 's', 'CHX_HDM_height_prof_1d.dat', 'mirror HDM: input file name of height profile data'],
['op_HDM_amp', 'f', 1., 'mirror HDM: amplification coefficient for height profile data'],
['op_HDM_ofn', 's', 'res_CHX_HDM_opt_path_dif.dat', 'mirror HDM: output file name of optical path difference data'],
['op_HDM_x', 'f', 0., 'mirror HDM surface error: horizontal center position [m]'],
['op_HDM_y', 'f', 0., 'mirror HDM surface error: vertical center position [m]'],
['op_S1_dz', 'f', 0., 'S1: offset of longitudinal position [m]'],
['op_S1_dx', 'f', 0.2e-03, 'slit S1: horizontal size [m]'],
['op_S1_dy', 'f', 1.0e-03, 'slit S1: vertical size [m]'],
['op_S1_x', 'f', 0., 'slit S1: horizontal center position [m]'],
['op_S1_y', 'f', 0., 'slit S1: vertical center position [m]'],
['op_DCM_e', 'f', 9000., 'DCM: central photon energy DCM is tuned to [eV]'],
['op_DCM_ac1', 'f', 0., 'DCM: angular deviation of 1st crystal from exact Bragg angle [rad]'],
['op_DCM_ac2', 'f', 0., 'DCM: angular deviation of 2nd crystal from exact Bragg angle [rad]'],
['op_DCME_ifn', 's', 'CHX_DCM_height_prof_1d.dat', 'DCM surface error: input file name of height profile data'],
['op_DCME_amp', 'f', 1., 'DCM surface error: amplification coefficient'],
['op_DCME_ofn', 's', 'res_CHX_DCM_opt_path_dif.dat', 'DCM surface error: output file name of optical path difference data'],
['op_DCME_x', 'f', 0., 'DCM surface error: horizontal center position [m]'],
['op_DCME_y', 'f', 0., 'DCM surface error: vertical center position [m]'],
['op_FIB_fpl', 's', '', 'FIB: focusing plane ("h" or "v" or "hv" or "")'],
['op_FIB_delta_e', 'f', 4.20756805e-06, 'Fiber: refractive index decrement of main (exterior) material'],
['op_FIB_delta_c', 'f', 4.20756805e-06, 'Fiber: refractive index decrement of core material'],
['op_FIB_atnl_e', 'f', 7312.94e-06, 'Fiber: attenuation length of main (exterior) material [m]'],
['op_FIB_atnl_c', 'f', 7312.94e-06, 'Fiber: attenuation length of core material [m]'],
['op_FIB_d_e', 'f', 100.e-06, 'Fiber: ext. diameter [m]'],
['op_FIB_d_c', 'f', 10.e-06, 'Fiber: core diameter [m]'],
['op_FIB_x', 'f', 0., 'Fiber: horizontal center position [m]'],
['op_FIB_y', 'f', 0., 'Fiber: vertical center position [m]'],
['op_S2_dz', 'f', 0., 'S2: offset of longitudinal position [m]'],
['op_S2_dx', 'f', 0.05e-03, 'slit S2: horizontal size [m]'],
['op_S2_dy', 'f', 0.2e-03, 'slit S2: vertical size [m]'], #1.0e-03, 'slit S2: vertical size [m]'],
['op_S2_x', 'f', 0., 'slit S2: horizontal center position [m]'],
['op_S2_y', 'f', 0., 'slit S2: vertical center position [m]'],
['op_BPM_dz', 'f', 0., 'BPM: offset of longitudinal position [m]'],
['op_CRL_dz', 'f', 0., 'CRL: offset of longitudinal position [m]'],
['op_CRL1_fpl', 's', 'v', 'CRL1: focusing plane ("h" or "v" or "hv" or "")'],
['op_CRL1_delta', 'f', 4.20756805e-06, 'CRL1: refractive index decrements of material'],
['op_CRL1_atnl', 'f', 7312.94e-06, 'CRL1: attenuation length of material [m]'],
['op_CRL1_apnf', 'f', 1.e-03, 'CRL1: geometrical aparture of 1D CRL in the plane where there is no focusing'],
['op_CRL1_apf', 'f', 2.4e-03, 'CRL1: geometrical aparture of 1D CRL in the focusing plane'],
['op_CRL1_rmin', 'f', 1.5e-03, 'CRL1: radius of curface curvature at the tip of parabola [m]'],
['op_CRL1_n', 'i', 1, 'CRL1: number of individual lenses'],
['op_CRL1_thck', 'f', 80.e-06, 'CRL1: wall thickness (at the tip of parabola) [m]'],
['op_CRL1_x', 'f', 0., 'CRL1: horizontal center position [m]'],
['op_CRL1_y', 'f', 0., 'CRL1: vertical center position [m]'],
['op_CRL2_fpl', 's', 'v', 'CRL2: focusing plane ("h" or "v" or "hv" or "")'],
['op_CRL2_delta', 'f', 4.20756805e-06, 'CRL2: refractive index decrements of material'],
['op_CRL2_atnl', 'f', 7312.94e-06, 'CRL2: attenuation length of material [m]'],
['op_CRL2_apnf', 'f', 1.e-03, 'CRL2: geometrical aparture of 1D CRL in the plane where there is no focusing'],
['op_CRL2_apf', 'f', 1.4e-03, 'CRL2: geometrical aparture of 1D CRL in the focusing plane'],
['op_CRL2_rmin', 'f', 0.5e-03, 'CRL2: radius of curface curvature at the tip of parabola [m]'],
['op_CRL2_n', 'i', 6, 'CRL2: number of individual lenses'],
['op_CRL2_thck', 'f', 80.e-06, 'CRL2: wall thickness (at the tip of parabola) [m]'],
['op_CRL2_x', 'f', 0., 'CRL2: horizontal center position [m]'],
['op_CRL2_y', 'f', 0., 'CRL2: vertical center position [m]'],
['op_KLA_dx', 'f', 1.0e-03, 'KL aperture: horizontal size [m]'], #1.4e-03, 'KL Aperture: horizontal size [m]'],
['op_KLA_dy', 'f', 0.1e-03, 'KL aperture: vertical size [m]'], #0.2e-03, 'KL Aperture: vertical size [m]'],
['op_KL_dz', 'f', 0., 'KL: offset of longitudinal position [m]'],
['op_KL_fx', 'f', 3.24479, 'KL: horizontal focal length [m]'],
['op_KL_fy', 'f', 1.e+23, 'KL: vertical focal length [m]'],
['op_KL_x', 'f', 0., 'KL: horizontal center position [m]'],
['op_KL_y', 'f', 0., 'KL: vertical center position [m]'],
['op_S3_dz', 'f', 0., 'S3: offset of longitudinal position [m]'],
['op_S3_dx', 'f', 10.e-06, 'slit S3: horizontal size [m]'],
['op_S3_dy', 'f', 10.e-06, 'slit S3: vertical size [m]'],
['op_S3_x', 'f', 0., 'slit S3: horizontal center position [m]'],
['op_S3_y', 'f', 0., 'slit S3: vertical center position [m]'],
['op_SMP_dz', 'f', 0., 'sample: offset of longitudinal position [m]'],
['op_SMP_ifn', 's', 'CHX_SMP_CDI_001.pickle', 'sample: model file name (binary "dumped" SRW transmission object)'],
['op_SMP_ofn', 's', 'res_CHX_SMP_opt_path_dif.dat', 'sample: output file name of optical path difference data'],
['op_SMP_x', 'f', 0., 'sample: horizontal center position [m]'],
['op_SMP_y', 'f', 0., 'sample: vertical center position [m]'],
['op_D_dz', 'f', 0., 'detector: offset of longitudinal position [m]'],
#to add options for different beamline cases, etc.
#Propagation Param.: [0][1][2][3][4] [5] [6] [7] [8] [9][10][11]
#['op_S0_pp', 'f', [0, 0, 1, 0, 0, 4.5, 5.0, 1.5, 2.5, 0, 0, 0], 'slit S0: propagation parameters'],
['op_S0_pp', 'f', [0, 0, 1, 0, 0, 2.5, 5.0, 1.5, 2.5, 0, 0, 0], 'slit S0: propagation parameters'],
['op_S0_HDM_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S0 -> HDM: propagation parameters'],
['op_S0_S1_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S0 -> S1: propagation parameters'],
['op_HDM_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'mirror HDM: propagation parameters'],
['op_HDM_S1_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift HDM -> S1: propagation parameters'],
['op_S1_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'slit S1: propagation parameters'],
['op_S1_DCM_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S1 -> DCM: propagation parameters'],
['op_DCMC1_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'DCM Crystal #1: propagation parameters'],
['op_DCMC2_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'DCM Crystal #2: propagation parameters'],
['op_DCME_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'DCM Crystal #1&2: surface height error'],
['op_FIB_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'fiber: propagation parameters'],
['op_DCM_S2_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift DCM -> S2: propagation parameters'],
['op_S1_S2_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S1 -> S2: propagation parameters'],
['op_S2_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'slit S2: propagation parameters'],
['op_S2_BPM_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S2 -> BPM: propagation parameters'],
['op_S2_CRL_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S2 -> BPM: propagation parameters'],
['op_BPM_CRL_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift BPM -> CRL: propagation parameters'],
['op_CRL1_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'CRL1: propagation parameters'],
['op_CRL2_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'CRL2: propagation parameters'],
['op_CRL_KL_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift CRL -> KL: propagation parameters'],
['op_CRL_SMP_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift CRL -> sample: propagation parameters'],
['op_KLA_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'KL aperture: propagation parameters'],
#['op_KL_pp', 'f', [0, 0, 1, 0, 0, 1.0, 5.0, 1.0, 7.0, 0, 0, 0], 'KL: propagation parameters'],
['op_KL_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'KL: propagation parameters'],
['op_KL_S3_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift KL -> S3: propagation parameters'],
#['op_S3_pp', 'f', [0, 0, 1, 0, 0, 0.3, 3.0, 0.3, 3.0, 0, 0, 0], 'slit S3: propagation parameters'],
['op_S3_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'slit S3: propagation parameters'],
#['op_S3_SMP_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S3 -> Sample: propagation parameters'],
['op_S3_SMP_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S3 -> sample: propagation parameters'],
['op_SMP_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'sample: propagation parameters'],
['op_SMP_D_pp', 'f', [0, 0, 1, 3, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'sample -> detector: propagation parameters'],
#['op_fin_pp', 'f', [0, 0, 1, 0, 1, 0.1, 5.0, 1.0, 1.5, 0, 0, 0], 'final post-propagation (resize) parameters'],
['op_fin_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'final post-propagation (resize) parameters'],
#[ 0]: Auto-Resize (1) or not (0) Before propagation
#[ 1]: Auto-Resize (1) or not (0) After propagation
#[ 2]: Relative Precision for propagation with Auto-Resizing (1. is nominal)
#[ 3]: Allow (1) or not (0) for semi-analytical treatment of the quadratic (leading) phase terms at the propagation
#[ 4]: Do any Resizing on Fourier side, using FFT, (1) or not (0)
#[ 5]: Horizontal Range modification factor at Resizing (1. means no modification)
#[ 6]: Horizontal Resolution modification factor at Resizing
#[ 7]: Vertical Range modification factor at Resizing
#[ 8]: Vertical Resolution modification factor at Resizing
#[ 9]: Type of wavefront Shift before Resizing (not yet implemented)
#[10]: New Horizontal wavefront Center position after Shift (not yet implemented)
#[11]: New Vertical wavefront Center position after Shift (not yet implemented)
#[12]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Horizontal Coordinate
#[13]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Vertical Coordinate
#[14]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Longitudinal Coordinate
#[15]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Horizontal Coordinate
#[16]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Vertical Coordinate
]
varParam = srwl_uti_ext_options(varParam)
#*********************************Entry
if __name__ == "__main__":
#---Parse options, defining Beamline elements and running calculations
v = srwl_uti_parse_options(varParam)
#---Add some constant "parameters" (not allowed to be varied) for the beamline
#v.und_per = 0.02 #['und_per', 'f', 0.02, 'undulator period [m]'],
#v.und_len = 3. #['und_len', 'f', 3., 'undulator length [m]'],
#v.und_zc = 0. #['und_zc', 'f', 0., 'undulator center longitudinal position [m]'],
#v.und_sy = -1 #['und_sy', 'i', -1, 'undulator horizontal magnetic field symmetry vs longitudinal position'],
#---Setup optics only if Wavefront Propagation is required:
v.ws = True
op = set_optics(v) if(v.ws or v.wm) else None
#---Run all requested calculations
SRWLBeamline('Coherent Hard X-ray beamline').calc_all(v, op)
|
mrakitin/sirepo
|
tests/template/srw_import_data/chx.py
|
Python
|
apache-2.0
| 32,265
|
[
"CRYSTAL"
] |
06614c7e389d91736ead1e58960f747e88bb11986ef56b21779c9e3939f0c065
|
"""Factor Analysis.
A latent linear variable model.
FactorAnalysis is similar to probabilistic PCA implemented by PCA.score
While PCA assumes Gaussian noise with the same variance for each
feature, the FactorAnalysis model assumes different variances for
each of them.
This implementation is based on David Barber's Book,
Bayesian Reasoning and Machine Learning,
http://www.cs.ucl.ac.uk/staff/d.barber/brml,
Algorithm 21.1
"""
# Author: Christian Osendorfer <osendorf@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis A. Engemann <d.engemann@fz-juelich.de>
# Licence: BSD3
import warnings
from math import sqrt, log
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array, check_random_state
from ..utils.extmath import fast_logdet, fast_dot, randomized_svd, squared_norm
from ..utils.validation import check_is_fitted
from ..utils import ConvergenceWarning
class FactorAnalysis(BaseEstimator, TransformerMixin):
"""Factor Analysis (FA)
A simple linear generative model with Gaussian latent variables.
The observations are assumed to be caused by a linear transformation of
lower dimensional latent factors and added Gaussian noise.
Without loss of generality the factors are distributed according to a
Gaussian with zero mean and unit covariance. The noise is also zero mean
and has an arbitrary diagonal covariance matrix.
If we would restrict the model further, by assuming that the Gaussian
noise is even isotropic (all diagonal entries are the same) we would obtain
:class:`PPCA`.
FactorAnalysis performs a maximum likelihood estimate of the so-called
`loading` matrix, the transformation of the latent variables to the
observed ones, using expectation-maximization (EM).
Parameters
----------
n_components : int | None
Dimensionality of latent space, the number of components
of ``X`` that are obtained after ``transform``.
If None, n_components is set to the number of features.
tol : float
Stopping tolerance for EM algorithm.
copy : bool
Whether to make a copy of X. If ``False``, the input X gets overwritten
during fitting.
max_iter : int
Maximum number of iterations.
noise_variance_init : None | array, shape=(n_features,)
The initial guess of the noise variance for each feature.
If None, it defaults to np.ones(n_features)
svd_method : {'lapack', 'randomized'}
Which SVD method to use. If 'lapack' use standard SVD from
scipy.linalg, if 'randomized' use fast ``randomized_svd`` function.
Defaults to 'randomized'. For most applications 'randomized' will
be sufficiently precise while providing significant speed gains.
Accuracy can also be improved by setting higher values for
`iterated_power`. If this is not sufficient, for maximum precision
you should choose 'lapack'.
iterated_power : int, optional
Number of iterations for the power method. 3 by default. Only used
if ``svd_method`` equals 'randomized'
random_state : int or RandomState
Pseudo number generator state used for random sampling. Only used
if ``svd_method`` equals 'randomized'
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
loglike_ : list, [n_iterations]
The log likelihood at each iteration.
noise_variance_ : array, shape=(n_features,)
The estimated noise variance for each feature.
n_iter_ : int
Number of iterations run.
References
----------
.. David Barber, Bayesian Reasoning and Machine Learning,
Algorithm 21.1
.. Christopher M. Bishop: Pattern Recognition and Machine Learning,
Chapter 12.2.4
See also
--------
PCA: Principal component analysis is also a latent linear variable model
which however assumes equal noise variance for each feature.
This extra assumption makes probabilistic PCA faster as it can be
computed in closed form.
FastICA: Independent component analysis, a latent variable model with
non-Gaussian latent variables.
"""
def __init__(self, n_components=None, tol=1e-2, copy=True, max_iter=1000,
noise_variance_init=None, svd_method='randomized',
iterated_power=3, random_state=0):
self.n_components = n_components
self.copy = copy
self.tol = tol
self.max_iter = max_iter
if svd_method not in ['lapack', 'randomized']:
raise ValueError('SVD method %s is not supported. Please consider'
' the documentation' % svd_method)
self.svd_method = svd_method
self.noise_variance_init = noise_variance_init
self.iterated_power = iterated_power
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the FactorAnalysis model to X using EM
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self
"""
X = check_array(X, copy=self.copy, dtype=np.float)
n_samples, n_features = X.shape
n_components = self.n_components
if n_components is None:
n_components = n_features
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
# some constant terms
nsqrt = sqrt(n_samples)
llconst = n_features * log(2. * np.pi) + n_components
var = np.var(X, axis=0)
if self.noise_variance_init is None:
psi = np.ones(n_features, dtype=X.dtype)
else:
if len(self.noise_variance_init) != n_features:
raise ValueError("noise_variance_init dimension does not "
"with number of features : %d != %d" %
(len(self.noise_variance_init), n_features))
psi = np.array(self.noise_variance_init)
loglike = []
old_ll = -np.inf
SMALL = 1e-12
# we'll modify svd outputs to return unexplained variance
# to allow for unified computation of loglikelihood
if self.svd_method == 'lapack':
def my_svd(X):
_, s, V = linalg.svd(X, full_matrices=False)
return (s[:n_components], V[:n_components],
squared_norm(s[n_components:]))
elif self.svd_method == 'randomized':
random_state = check_random_state(self.random_state)
def my_svd(X):
_, s, V = randomized_svd(X, n_components,
random_state=random_state,
n_iter=self.iterated_power)
return s, V, squared_norm(X) - squared_norm(s)
else:
raise ValueError('SVD method %s is not supported. Please consider'
' the documentation' % self.svd_method)
for i in xrange(self.max_iter):
# SMALL helps numerics
sqrt_psi = np.sqrt(psi) + SMALL
s, V, unexp_var = my_svd(X / (sqrt_psi * nsqrt))
s **= 2
# Use 'maximum' here to avoid sqrt problems.
W = np.sqrt(np.maximum(s - 1., 0.))[:, np.newaxis] * V
del V
W *= sqrt_psi
# loglikelihood
ll = llconst + np.sum(np.log(s))
ll += unexp_var + np.sum(np.log(psi))
ll *= -n_samples / 2.
loglike.append(ll)
if (ll - old_ll) < self.tol:
break
old_ll = ll
psi = np.maximum(var - np.sum(W ** 2, axis=0), SMALL)
else:
warnings.warn('FactorAnalysis did not converge.' +
' You might want' +
' to increase the number of iterations.',
ConvergenceWarning)
self.components_ = W
self.noise_variance_ = psi
self.loglike_ = loglike
self.n_iter_ = i + 1
return self
def transform(self, X):
"""Apply dimensionality reduction to X using the model.
Compute the expected mean of the latent variables.
See Barber, 21.2.33 (or Bishop, 12.66).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
The latent variables of X.
"""
check_is_fitted(self, 'components_')
X = check_array(X)
Ih = np.eye(len(self.components_))
X_transformed = X - self.mean_
Wpsi = self.components_ / self.noise_variance_
cov_z = linalg.inv(Ih + np.dot(Wpsi, self.components_.T))
tmp = fast_dot(X_transformed, Wpsi.T)
X_transformed = fast_dot(tmp, cov_z)
return X_transformed
def get_covariance(self):
"""Compute data covariance with the FactorAnalysis model.
``cov = components_.T * components_ + diag(noise_variance)``
Returns
-------
cov : array, shape (n_features, n_features)
Estimated covariance of data.
"""
check_is_fitted(self, 'components_')
cov = np.dot(self.components_.T, self.components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the FactorAnalysis model.
Returns
-------
precision : array, shape (n_features, n_features)
Estimated precision of data.
"""
check_is_fitted(self, 'components_')
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components == 0:
return np.diag(1. / self.noise_variance_)
if self.n_components == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
precision = np.dot(components_ / self.noise_variance_, components_.T)
precision.flat[::len(precision) + 1] += 1.
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= self.noise_variance_[:, np.newaxis]
precision /= -self.noise_variance_[np.newaxis, :]
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def score_samples(self, X):
"""Compute the log-likelihood of each sample
Parameters
----------
X: array, shape (n_samples, n_features)
The data
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'components_')
Xr = X - self.mean_
precision = self.get_precision()
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Compute the average log-likelihood of the samples
Parameters
----------
X: array, shape (n_samples, n_features)
The data
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
|
ashhher3/scikit-learn
|
sklearn/decomposition/factor_analysis.py
|
Python
|
bsd-3-clause
| 11,908
|
[
"Gaussian"
] |
d2ae9dc1ae5da7c3629637ecdee28954770930e54f115edaf56ec04b0a06485f
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, absolute_import, print_function
import logging
import re
import time
from datetime import datetime, timedelta
from builtins import * # pylint: disable=unused-import, redefined-builtin
from dateutil.parser import parse as dateutil_parse
from past.builtins import basestring
from sqlalchemy import Table, Column, Integer, String, Unicode, Date, DateTime, Time, or_, func
from sqlalchemy.orm import relation
from sqlalchemy.schema import ForeignKey
from flexget import db_schema
from flexget import options
from flexget import plugin
from flexget.event import event
from flexget.logger import console
from flexget.manager import Session
from flexget.plugin import get_plugin_by_name
from flexget.utils import requests, json
from flexget.utils.database import with_session
from flexget.utils.simple_persistence import SimplePersistence
from flexget.utils.tools import TimedDict
Base = db_schema.versioned_base('api_trakt', 5)
log = logging.getLogger('api_trakt')
# Production Site
CLIENT_ID = '57e188bcb9750c79ed452e1674925bc6848bd126e02bb15350211be74c6547af'
CLIENT_SECRET = 'db4af7531e8df678b134dbc22445a2c04ebdbdd7213be7f5b6d17dfdfabfcdc2'
API_URL = 'https://api-v2launch.trakt.tv/'
PIN_URL = 'http://trakt.tv/pin/346'
# Stores the last time we checked for updates for shows/movies
updated = SimplePersistence('api_trakt')
# Oauth account authentication
class TraktUserAuth(Base):
__tablename__ = 'trakt_user_auth'
account = Column(Unicode, primary_key=True)
access_token = Column(Unicode)
refresh_token = Column(Unicode)
created = Column(DateTime)
expires = Column(DateTime)
def __init__(self, account, access_token, refresh_token, created, expires):
self.account = account
self.access_token = access_token
self.refresh_token = refresh_token
self.expires = token_expire_date(expires)
self.created = token_created_date(created)
def token_expire_date(expires):
return datetime.now() + timedelta(seconds=expires)
def token_created_date(created):
return datetime.fromtimestamp(created)
def device_auth():
data = {'client_id': CLIENT_ID}
try:
r = requests.post(get_api_url('oauth/device/code'), data=data).json()
device_code = r['device_code']
user_code = r['user_code']
expires_in = r['expires_in']
interval = r['interval']
console('Please visit {0} and authorize Flexget. Your user code is {1}. Your code expires in '
'{2} minutes.'.format(r['verification_url'], user_code, expires_in / 60.0))
log.debug('Polling for user authorization.')
data['code'] = device_code
data['client_secret'] = CLIENT_SECRET
end_time = time.time() + expires_in
console('Waiting...', end='')
# stop polling after expires_in seconds
while time.time() < end_time:
time.sleep(interval)
polling_request = requests.post(get_api_url('oauth/device/token'), data=data,
raise_status=False)
if polling_request.status_code == 200: # success
return polling_request.json()
elif polling_request.status_code == 400: # pending -- waiting for user
console('...', end='')
elif polling_request.status_code == 404: # not found -- invalid device_code
raise plugin.PluginError('Invalid device code. Open an issue on Github.')
elif polling_request.status_code == 409: # already used -- user already approved
raise plugin.PluginError('User code has already been approved.')
elif polling_request.status_code == 410: # expired -- restart process
break
elif polling_request.status_code == 418: # denied -- user denied code
raise plugin.PluginError('User code has been denied.')
elif polling_request.status_code == 429: # polling too fast
log.warning('Polling too quickly. Upping the interval. No action required.')
interval += 1
raise plugin.PluginError('User code has expired. Please try again.')
except requests.RequestException as e:
raise plugin.PluginError('Device authorization with Trakt.tv failed: {0}'.format(e.args[0]))
def token_auth(data):
try:
return requests.post(get_api_url('oauth/token'), data=data).json()
except requests.RequestException as e:
raise plugin.PluginError('Token exchange with trakt failed: {0}'.format(e.args[0]))
def get_access_token(account, token=None, refresh=False, re_auth=False, called_from_cli=False):
"""
Gets authorization info from a pin or refresh token.
:param account: Arbitrary account name to attach authorization to.
:param unicode token: The pin or refresh token, as supplied by the trakt website.
:param bool refresh: If True, refresh the access token using refresh_token from db.
:param bool re_auth: If True, account is re-authorized even if it already exists in db.
:raises RequestException: If there is a network error while authorizing.
"""
data = {
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET
}
with Session() as session:
acc = session.query(TraktUserAuth).filter(TraktUserAuth.account == account).first()
if acc and datetime.now() < acc.expires and not refresh and not re_auth:
return acc.access_token
else:
if acc and (refresh or datetime.now() >= acc.expires) and not re_auth:
log.debug('Using refresh token to re-authorize account %s.', account)
data['refresh_token'] = acc.refresh_token
data['grant_type'] = 'refresh_token'
token_dict = token_auth(data)
elif token:
# We are only in here if a pin was specified, so it's safe to use console instead of logging
console('Warning: PIN authorization has been deprecated. Use Device Authorization instead.')
data['code'] = token
data['grant_type'] = 'authorization_code'
data['redirect_uri'] = 'urn:ietf:wg:oauth:2.0:oob'
token_dict = token_auth(data)
elif called_from_cli:
log.debug('No pin specified for an unknown account %s. Attempting to authorize device.', account)
token_dict = device_auth()
else:
raise plugin.PluginError('Account %s has not been authorized. See `flexget trakt auth -h` on how to.' %
account)
try:
access_token = token_dict['access_token']
refresh_token = token_dict['refresh_token']
created_at = token_dict.get('created_at', time.time())
expires_in = token_dict['expires_in']
if acc:
acc.access_token = access_token
acc.refresh_token = refresh_token
acc.created = token_created_date(created_at)
acc.expires = token_expire_date(expires_in)
else:
acc = TraktUserAuth(account, access_token, refresh_token, created_at,
expires_in)
session.add(acc)
return access_token
except requests.RequestException as e:
raise plugin.PluginError('Token exchange with trakt failed: {0}'.format(e.args[0]))
def make_list_slug(name):
"""Return the slug for use in url for given list name."""
slug = name.lower()
# These characters are just stripped in the url
for char in '!@#$%^*()[]{}/=?+\\|':
slug = slug.replace(char, '')
# These characters get replaced
slug = slug.replace('&', 'and')
slug = slug.replace(' ', '-')
return slug
def get_session(account=None, token=None):
"""
Creates a requests session ready to talk to trakt API with FlexGet's api key.
Can also add user level authentication if `account` parameter is given.
:param account: An account authorized via `flexget trakt auth` CLI command. If given, returned session will be
authenticated for that account.
"""
# default to username if account name is not specified
session = requests.Session()
session.headers = {
'Content-Type': 'application/json',
'trakt-api-version': 2,
'trakt-api-key': CLIENT_ID,
}
if account:
access_token = get_access_token(account, token) if account else None
if access_token:
session.headers.update({'Authorization': 'Bearer %s' % access_token})
return session
def get_api_url(*endpoint):
"""
Get the address of a trakt API endpoint.
:param endpoint: Can by a string endpoint (e.g. 'sync/watchlist') or an iterable (e.g. ('sync', 'watchlist')
Multiple parameters can also be specified instead of a single iterable.
:returns: The absolute url to the specified API endpoint.
"""
if len(endpoint) == 1 and not isinstance(endpoint[0], basestring):
endpoint = endpoint[0]
# Make sure integer portions are turned into strings first too
url = API_URL + '/'.join(map(str, endpoint))
return url
@db_schema.upgrade('api_trakt')
def upgrade(ver, session):
if ver is None or ver <= 4:
raise db_schema.UpgradeImpossible
return ver
def get_entry_ids(entry):
"""Creates a trakt ids dict from id fields on an entry. Prefers already populated info over lazy lookups."""
ids = {}
for lazy in [False, True]:
if entry.get('trakt_movie_id', eval_lazy=lazy):
ids['trakt'] = entry['trakt_movie_id']
elif entry.get('trakt_show_id', eval_lazy=lazy):
ids['trakt'] = entry['trakt_show_id']
elif entry.get('trakt_episode_id', eval_lazy=lazy):
ids['trakt'] = entry['trakt_episode_id']
if entry.get('tmdb_id', eval_lazy=lazy):
ids['tmdb'] = entry['tmdb_id']
if entry.get('tvdb_id', eval_lazy=lazy):
ids['tvdb'] = entry['tvdb_id']
if entry.get('imdb_id', eval_lazy=lazy):
ids['imdb'] = entry['imdb_id']
if entry.get('tvrage_id', eval_lazy=lazy):
ids['tvrage'] = entry['tvrage_id']
if ids:
break
return ids
class TraktTranslation(Base):
__tablename__ = 'trakt_translations'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(Unicode)
show_trans_table = Table('trakt_show_trans', Base.metadata,
Column('show_id', Integer, ForeignKey('trakt_shows.id')),
Column('trans_id', Integer, ForeignKey('trakt_translations.id')))
Base.register_table(show_trans_table)
movie_trans_table = Table('trakt_movie_trans', Base.metadata,
Column('movie_id', Integer, ForeignKey('trakt_movies.id')),
Column('trans_id', Integer, ForeignKey('trakt_translations.id')))
Base.register_table(movie_trans_table)
class TraktTranslate(Base):
__tablename__ = 'trakt_translate'
id = Column(Integer, primary_key=True, autoincrement=True)
language = Column(Unicode)
overview = Column(Unicode)
tagline = Column(Unicode)
title = Column(Unicode)
def __init__(self, translation, session):
super(TraktTranslate, self).__init__()
self.update(translation, session)
def update(self, translation, session):
for col in translation.keys():
setattr(self, col, translation.get(col))
def get_translation(ident, style):
url = get_api_url(style + 's', ident, 'translations')
translations = []
req_session = get_session()
try:
results = req_session.get(url, params={'extended': 'full,images'}).json()
with Session() as session:
for result in results:
translate = session.query(TraktTranslate).filter(
TraktTranslate.language == result.get('language')).first()
if not translate:
translate = TraktTranslate(result, session)
translations.append(translate)
return translations
except requests.RequestException as e:
log.debug('Error adding translations to trakt id %s : %s'.format(ident, e))
trans_show_table = Table('show_trans', Base.metadata,
Column('show_id', Integer, ForeignKey('trakt_shows.id')),
Column('trans_id', Integer, ForeignKey('trakt_translate.id')))
Base.register_table(trans_show_table)
trans_movie_table = Table('movie_tans', Base.metadata,
Column('movie_id', Integer, ForeignKey('trakt_movies.id')),
Column('trans_id', Integer, ForeignKey('trakt_translate.id')))
Base.register_table(trans_movie_table)
def get_db_trans(trans, session):
"""Takes a list of genres as strings, returns the database instances for them."""
db_trans = []
for tran in trans:
db_tran = session.query(TraktTranslation).filter(TraktTranslation.name == tran).first()
if not db_tran:
db_tran = TraktTranslation(name=tran)
session.add(db_tran)
db_trans.append(db_tran)
return db_trans
class TraktGenre(Base):
__tablename__ = 'trakt_genres'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(Unicode)
show_genres_table = Table('trakt_show_genres', Base.metadata,
Column('show_id', Integer, ForeignKey('trakt_shows.id')),
Column('genre_id', Integer, ForeignKey('trakt_genres.id')))
Base.register_table(show_genres_table)
movie_genres_table = Table('trakt_movie_genres', Base.metadata,
Column('movie_id', Integer, ForeignKey('trakt_movies.id')),
Column('genre_id', Integer, ForeignKey('trakt_genres.id')))
Base.register_table(movie_genres_table)
def get_db_genres(genres, session):
"""Takes a list of genres as strings, returns the database instances for them."""
db_genres = []
for genre in genres:
genre = genre.replace('-', ' ')
db_genre = session.query(TraktGenre).filter(TraktGenre.name == genre).first()
if not db_genre:
db_genre = TraktGenre(name=genre)
session.add(db_genre)
db_genres.append(db_genre)
return db_genres
class TraktImages(Base):
__tablename__ = 'trakt_images'
id = Column(Integer, primary_key=True, autoincrement=True, nullable=False)
ident = Column(Unicode)
style = Column(Unicode)
url = Column(Unicode)
def __init__(self, images):
super(TraktImages, self).__init__()
self.update(images)
def update(self, images):
for col in images.keys():
setattr(self, col, images.get(col))
trakt_image_actors = Table('trakt_image_actor', Base.metadata,
Column('trakt_actor', Integer, ForeignKey('trakt_actors.id')),
Column('trakt_image', Integer, ForeignKey('trakt_images.id')))
Base.register_table(trakt_image_actors)
trakt_image_shows = Table('trakt_image_show', Base.metadata,
Column('trakt_show', Integer, ForeignKey('trakt_shows.id')),
Column('trakt_image', Integer, ForeignKey('trakt_images.id')))
Base.register_table(trakt_image_shows)
trakt_image_movies = Table('trakt_image_movie', Base.metadata,
Column('trakt_movie', Integer, ForeignKey('trakt_movies.id')),
Column('trakt_image', Integer, ForeignKey('trakt_images.id')))
Base.register_table(trakt_image_movies)
trakt_image_episodes = Table('trakt_image_episode', Base.metadata,
Column('trakt_episode', Integer, ForeignKey('trakt_episodes.id')),
Column('trakt_image', Integer, ForeignKey('trakt_images.id')))
Base.register_table(trakt_image_episodes)
class TraktActor(Base):
__tablename__ = 'trakt_actors'
id = Column(Integer, primary_key=True, nullable=False)
name = Column(Unicode)
slug = Column(Unicode)
tmdb = Column(Integer)
imdb = Column(Unicode)
biography = Column(Unicode)
birthday = Column(Date)
death = Column(Date)
homepage = Column(Unicode)
images = relation(TraktImages, secondary=trakt_image_actors)
def __init__(self, actor, session):
super(TraktActor, self).__init__()
self.update(actor, session)
def update(self, actor, session):
if self.id and self.id != actor.get('ids').get('trakt'):
raise Exception('Tried to update db actors with different actor data')
elif not self.id:
self.id = actor.get('ids').get('trakt')
self.name = actor.get('name')
ids = actor.get('ids')
self.imdb = ids.get('imdb')
self.slug = ids.get('slug')
self.tmdb = ids.get('tmdb')
self.biography = actor.get('biography')
if actor.get('birthday'):
self.birthday = dateutil_parse(actor.get('birthday'))
if actor.get('death'):
self.death = dateutil_parse(actor.get('death'))
self.homepage = actor.get('homepage')
if actor.get('images'):
img = actor.get('images')
self.images = get_db_images(img, session)
def to_dict(self):
return {
'name': self.name,
'trakt_id': self.id,
'imdb_id': self.imdb,
'tmdb_id': self.tmdb,
'images': list_images(self.images)
}
def get_db_images(image, session):
try:
flat = []
images = []
if image:
for i, s in image.items():
for ss, u in s.items():
a = {'ident': i, 'style': ss, 'url': u}
flat.append(a)
for i in flat:
url = i.get('url')
im = session.query(TraktImages).filter(TraktImages.url == url).first()
if not im:
im = TraktImages(i)
images.append(im)
return images
except TypeError as e:
log.debug('Error has Occured during images: %s' % e.args[0])
return
show_actors_table = Table('trakt_show_actors', Base.metadata,
Column('show_id', Integer, ForeignKey('trakt_shows.id')),
Column('actors_id', Integer, ForeignKey('trakt_actors.id')))
Base.register_table(show_actors_table)
movie_actors_table = Table('trakt_movie_actors', Base.metadata,
Column('movie_id', Integer, ForeignKey('trakt_movies.id')),
Column('actors_id', Integer, ForeignKey('trakt_actors.id')))
Base.register_table(movie_actors_table)
def get_db_actors(ident, style):
actors = []
url = get_api_url(style + 's', ident, 'people')
req_session = get_session()
try:
results = req_session.get(url, params={'extended': 'full,images'}).json()
with Session() as session:
for result in results.get('cast'):
trakt_id = result.get('person').get('ids').get('trakt')
actor = session.query(TraktActor).filter(TraktActor.id == trakt_id).first()
if not actor:
actor = TraktActor(result.get('person'), session)
actors.append(actor)
return actors
except requests.RequestException as e:
log.debug('Error searching for actors for trakt id %s', e)
return
def list_images(images):
res = {}
for image in images:
res.setdefault(image.ident, {})[image.style] = image.url
return res
def get_translations(translate):
res = {}
for lang in translate:
info = {'overview': lang.overview,
'title': lang.title,
'tagline': lang.tagline,
}
res[lang.language] = info
return res
def list_actors(actors):
res = {}
for actor in actors:
info = {
'trakt_id': actor.id,
'name': actor.name,
'imdb_id': str(actor.imdb),
'trakt_slug': actor.slug,
'tmdb_id': str(actor.tmdb),
'birthday': actor.birthday.strftime("%Y/%m/%d") if actor.birthday else None,
'biography': actor.biography,
'homepage': actor.homepage,
'death': actor.death.strftime("%Y/%m/%d") if actor.death else None,
'images': list_images(actor.images)
}
res[str(actor.id)] = info
return res
class TraktEpisode(Base):
__tablename__ = 'trakt_episodes'
id = Column(Integer, primary_key=True, autoincrement=False)
tvdb_id = Column(Integer)
imdb_id = Column(Unicode)
tmdb_id = Column(Integer)
tvrage_id = Column(Unicode)
title = Column(Unicode)
season = Column(Integer)
number = Column(Integer)
number_abs = Column(Integer)
overview = Column(Unicode)
images = relation(TraktImages, secondary=trakt_image_episodes)
first_aired = Column(DateTime)
updated_at = Column(DateTime)
cached_at = Column(DateTime)
series_id = Column(Integer, ForeignKey('trakt_shows.id'), nullable=False)
def __init__(self, trakt_episode, session):
super(TraktEpisode, self).__init__()
self.update(trakt_episode, session)
def update(self, trakt_episode, session):
"""Updates this record from the trakt media object `trakt_movie` returned by the trakt api."""
if self.id and self.id != trakt_episode['ids']['trakt']:
raise Exception('Tried to update db ep with different ep data')
elif not self.id:
self.id = trakt_episode['ids']['trakt']
self.imdb_id = trakt_episode['ids']['imdb']
self.tmdb_id = trakt_episode['ids']['tmdb']
self.tvrage_id = trakt_episode['ids']['tvrage']
if trakt_episode.get('images'):
self.images = get_db_images(trakt_episode.get('images'), session)
self.tvdb_id = trakt_episode['ids']['tvdb']
self.first_aired = None
if trakt_episode.get('first_aired'):
self.first_aired = dateutil_parse(trakt_episode['first_aired'], ignoretz=True)
self.updated_at = dateutil_parse(trakt_episode.get('updated_at'), ignoretz=True)
self.cached_at = datetime.now()
for col in ['title', 'season', 'number', 'number_abs', 'overview']:
setattr(self, col, trakt_episode.get(col))
@property
def expired(self):
# TODO should episode have its own expiration function?
return False
class TraktShow(Base):
__tablename__ = 'trakt_shows'
id = Column(Integer, primary_key=True, autoincrement=False)
title = Column(Unicode)
year = Column(Integer)
slug = Column(Unicode)
tvdb_id = Column(Integer)
imdb_id = Column(Unicode)
tmdb_id = Column(Integer)
tvrage_id = Column(Unicode)
overview = Column(Unicode)
first_aired = Column(DateTime)
air_day = Column(Unicode)
air_time = Column(Time)
timezone = Column(Unicode)
runtime = Column(Integer)
certification = Column(Unicode)
network = Column(Unicode)
images = relation(TraktImages, secondary=trakt_image_shows)
country = Column(Unicode)
status = Column(String)
rating = Column(Integer)
votes = Column(Integer)
language = Column(Unicode)
homepage = Column(Unicode)
trailer = Column(Unicode)
aired_episodes = Column(Integer)
translations = relation(TraktTranslation, secondary=show_trans_table)
episodes = relation(TraktEpisode, backref='show', cascade='all, delete, delete-orphan', lazy='dynamic')
_translate = relation(TraktTranslate, secondary=trans_show_table)
genres = relation(TraktGenre, secondary=show_genres_table)
_actors = relation(TraktActor, secondary=show_actors_table)
updated_at = Column(DateTime)
cached_at = Column(DateTime)
def to_dict(self):
return {
"id": self.id,
"title": self.title,
"year": self.year,
"slug": self.slug,
"tvdb_id": self.tvdb_id,
"imdb_id": self.imdb_id,
"tmdb_id": self.tmdb_id,
"tvrage_id": self.tvrage_id,
"overview": self.overview,
"first_aired": self.first_aired,
"air_day": self.air_day,
"air_time": self.air_time.strftime("%H:%M"),
"timezone": self.timezone,
"runtime": self.runtime,
"certification": self.certification,
"network": self.network,
"country": self.country,
"status": self.status,
"rating": self.rating,
"votes": self.votes,
"language": self.language,
"homepage": self.homepage,
"number_of_aired_episodes": self.aired_episodes,
"genres": [g.name for g in self.genres],
"actors": list_actors(self.actors),
"updated_at": self.updated_at,
"cached_at": self.cached_at,
"images": list_images(self.images)
}
def __init__(self, trakt_show, session):
super(TraktShow, self).__init__()
self.update(trakt_show, session)
def update(self, trakt_show, session):
"""Updates this record from the trakt media object `trakt_show` returned by the trakt api."""
if self.id and self.id != trakt_show['ids']['trakt']:
raise Exception('Tried to update db show with different show data')
elif not self.id:
self.id = trakt_show['ids']['trakt']
self.slug = trakt_show['ids']['slug']
self.imdb_id = trakt_show['ids']['imdb']
self.tmdb_id = trakt_show['ids']['tmdb']
self.tvrage_id = trakt_show['ids']['tvrage']
self.tvdb_id = trakt_show['ids']['tvdb']
if trakt_show.get('images'):
self.images = get_db_images(trakt_show.get('images'), session)
if trakt_show.get('airs'):
airs = trakt_show.get('airs')
self.air_day = airs.get('day')
self.timezone = airs.get('timezone')
if airs.get('time'):
self.air_time = datetime.strptime(airs.get('time'), '%H:%M').time()
else:
self.air_time = None
if trakt_show.get('first_aired'):
self.first_aired = dateutil_parse(trakt_show.get('first_aired'), ignoretz=True)
else:
self.first_aired = None
self.updated_at = dateutil_parse(trakt_show.get('updated_at'), ignoretz=True)
for col in ['overview', 'runtime', 'rating', 'votes', 'language', 'title', 'year',
'runtime', 'certification', 'network', 'country', 'status', 'aired_episodes',
'trailer', 'homepage']:
setattr(self, col, trakt_show.get(col))
self.genres[:] = get_db_genres(trakt_show.get('genres', []), session)
self.translations[:] = get_db_trans(trakt_show.get('available_translations', []), session)
self.cached_at = datetime.now()
def get_episode(self, season, number, session, only_cached=False):
# TODO: Does series data being expired mean all episode data should be refreshed?
episode = self.episodes.filter(TraktEpisode.season == season).filter(TraktEpisode.number == number).first()
if not episode or self.expired:
url = get_api_url('shows', self.id, 'seasons', season, 'episodes', number, '?extended=full,images')
if only_cached:
raise LookupError('Episode %s %s not found in cache' % (season, number))
log.debug('Episode %s %s not found in cache, looking up from trakt.', season, number)
try:
ses = get_session()
data = ses.get(url).json()
except requests.RequestException:
raise LookupError('Error Retrieving Trakt url: %s' % url)
if not data:
raise LookupError('No data in response from trakt %s' % url)
episode = self.episodes.filter(TraktEpisode.id == data['ids']['trakt']).first()
if episode:
episode.update(data, session)
else:
episode = TraktEpisode(data, session)
self.episodes.append(episode)
return episode
@property
def expired(self):
"""
:return: True if show details are considered to be expired, ie. need of update
"""
# TODO stolen from imdb plugin, maybe there's a better way?
if self.cached_at is None:
log.debug('cached_at is None: %s', self)
return True
refresh_interval = 2
# if show has been cancelled or ended, then it is unlikely to be updated often
if self.year and (self.status == 'ended' or self.status == 'canceled'):
# Make sure age is not negative
age = max((datetime.now().year - self.year), 0)
refresh_interval += age * 5
log.debug('show `%s` age %i expires in %i days', self.title, age, refresh_interval)
return self.cached_at < datetime.now() - timedelta(days=refresh_interval)
@property
def translate(self):
if not self._translate:
self._translate[:] = get_translation(self.id, 'show')
return self._translate
@property
def actors(self):
if not self._actors:
self._actors[:] = get_db_actors(self.id, 'show')
return self._actors
def __repr__(self):
return '<name=%s, id=%s>' % (self.title, self.id)
class TraktMovie(Base):
__tablename__ = 'trakt_movies'
id = Column(Integer, primary_key=True, autoincrement=False)
title = Column(Unicode)
year = Column(Integer)
slug = Column(Unicode)
imdb_id = Column(Unicode)
tmdb_id = Column(Integer)
tagline = Column(Unicode)
overview = Column(Unicode)
released = Column(Date)
runtime = Column(Integer)
rating = Column(Integer)
votes = Column(Integer)
trailer = Column(Unicode)
homepage = Column(Unicode)
language = Column(Unicode)
updated_at = Column(DateTime)
cached_at = Column(DateTime)
translations = relation(TraktTranslation, secondary=movie_trans_table)
_translate = relation(TraktTranslate, secondary=trans_movie_table)
images = relation(TraktImages, secondary=trakt_image_movies)
genres = relation(TraktGenre, secondary=movie_genres_table)
_actors = relation(TraktActor, secondary=movie_actors_table)
def __init__(self, trakt_movie, session):
super(TraktMovie, self).__init__()
self.update(trakt_movie, session)
def to_dict(self):
return {
"id": self.id,
"title": self.title,
"year": self.year,
"slug": self.slug,
"imdb_id": self.imdb_id,
"tmdb_id": self.tmdb_id,
"tagline": self.tagline,
"overview": self.overview,
"released": self.released,
"runtime": self.runtime,
"rating": self.rating,
"votes": self.votes,
"language": self.language,
"homepage": self.homepage,
"trailer": self.trailer,
"genres": [g.name for g in self.genres],
"actors": list_actors(self.actors),
"updated_at": self.updated_at,
"cached_at": self.cached_at,
"images": list_images(self.images)
}
def update(self, trakt_movie, session):
"""Updates this record from the trakt media object `trakt_movie` returned by the trakt api."""
if self.id and self.id != trakt_movie['ids']['trakt']:
raise Exception('Tried to update db movie with different movie data')
elif not self.id:
self.id = trakt_movie['ids']['trakt']
self.slug = trakt_movie['ids']['slug']
self.imdb_id = trakt_movie['ids']['imdb']
self.tmdb_id = trakt_movie['ids']['tmdb']
for col in ['title', 'overview', 'runtime', 'rating', 'votes',
'language', 'tagline', 'year', 'trailer', 'homepage']:
setattr(self, col, trakt_movie.get(col))
if self.released:
self.released = dateutil_parse(trakt_movie.get('released'), ignoretz=True)
self.updated_at = dateutil_parse(trakt_movie.get('updated_at'), ignoretz=True)
self.genres[:] = get_db_genres(trakt_movie.get('genres', []), session)
self.translations[:] = get_db_trans(trakt_movie.get('available_translations', []), session)
self.cached_at = datetime.now()
self.images = get_db_images(trakt_movie.get('images'), session)
@property
def expired(self):
"""
:return: True if movie details are considered to be expired, ie. need of update
"""
# TODO stolen from imdb plugin, maybe there's a better way?
if self.updated_at is None:
log.debug('updated_at is None: %s', self)
return True
refresh_interval = 2
if self.year:
# Make sure age is not negative
age = max((datetime.now().year - self.year), 0)
refresh_interval += age * 5
log.debug('movie `%s` age %i expires in %i days', self.title, age, refresh_interval)
return self.cached_at < datetime.now() - timedelta(days=refresh_interval)
@property
def translate(self):
if not self._translate:
self._translate[:] = get_translation(self.id, 'movie')
return self._translate
@property
def actors(self):
if not self._actors:
self._actors[:] = get_db_actors(self.id, 'movie')
return self._actors
class TraktShowSearchResult(Base):
__tablename__ = 'trakt_show_search_results'
id = Column(Integer, primary_key=True)
search = Column(Unicode, unique=True, nullable=False)
series_id = Column(Integer, ForeignKey('trakt_shows.id'), nullable=True)
series = relation(TraktShow, backref='search_strings')
class TraktMovieSearchResult(Base):
__tablename__ = 'trakt_movie_search_results'
id = Column(Integer, primary_key=True)
search = Column(Unicode, unique=True, nullable=False)
movie_id = Column(Integer, ForeignKey('trakt_movies.id'), nullable=True)
movie = relation(TraktMovie, backref='search_strings')
def split_title_year(title):
"""Splits title containing a year into a title, year pair."""
# We only recognize years from the 2nd and 3rd millennium, FlexGetters from the year 3000 be damned!
match = re.search(r'[\s(]([12]\d{3})\)?$', title)
if match:
title = title[:match.start()].strip()
year = int(match.group(1))
else:
year = None
return title, year
@with_session
def get_cached(style=None, title=None, year=None, trakt_id=None, trakt_slug=None, tmdb_id=None, imdb_id=None,
tvdb_id=None, tvrage_id=None, session=None):
"""
Get the cached info for a given show/movie from the database.
:param type: Either 'show' or 'movie'
"""
ids = {
'id': trakt_id,
'slug': trakt_slug,
'tmdb_id': tmdb_id,
'imdb_id': imdb_id,
}
if style == 'show':
ids['tvdb_id'] = tvdb_id
ids['tvrage_id'] = tvrage_id
model = TraktShow
else:
model = TraktMovie
result = None
if any(ids.values()):
result = session.query(model).filter(
or_(getattr(model, col) == val for col, val in ids.items() if val)).first()
elif title:
title, y = split_title_year(title)
year = year or y
query = session.query(model).filter(model.title == title)
if year:
query = query.filter(model.year == year)
result = query.first()
return result
def get_trakt(style=None, title=None, year=None, trakt_id=None, trakt_slug=None, tmdb_id=None, imdb_id=None,
tvdb_id=None, tvrage_id=None):
"""Returns the matching media object from trakt api."""
# TODO: Better error messages
# Trakt api accepts either id or slug (there is a rare possibility for conflict though, e.g. 24)
trakt_id = trakt_id or trakt_slug
req_session = get_session()
last_search_query = None # used if no results are found
last_search_type = None
if not trakt_id:
# Try finding trakt_id based on other ids
ids = {
'imdb': imdb_id,
'tmdb': tmdb_id
}
if style == 'show':
ids['tvdb'] = tvdb_id
ids['tvrage'] = tvrage_id
for id_type, identifier in ids.items():
if not identifier:
continue
try:
last_search_query = identifier
last_search_type = id_type
log.debug('Searching with params: %s=%s', id_type, identifier)
results = req_session.get(get_api_url('search'), params={'id_type': id_type, 'id': identifier}).json()
except requests.RequestException as e:
log.debug('Error searching for trakt id %s', e)
continue
for result in results:
if result['type'] != style:
continue
trakt_id = result[style]['ids']['trakt']
break
if trakt_id:
break
if not trakt_id and title:
last_search_query = title
last_search_type = 'title'
# Try finding trakt id based on title and year
if style == 'show':
parsed_title, y = split_title_year(title)
y = year or y
else:
title_parser = get_plugin_by_name('parsing').instance.parse_movie(title)
y = year or title_parser.year
parsed_title = title_parser.name
try:
params = {'query': parsed_title, 'type': style, 'year': y}
log.debug('Type of title: %s', type(parsed_title))
log.debug('Searching with params: %s', ', '.join('{}={}'.format(k, v) for (k, v) in params.items()))
results = req_session.get(get_api_url('search'), params=params).json()
except requests.RequestException as e:
raise LookupError('Searching trakt for %s failed with error: %s' % (title, e))
for result in results:
if year and result[style]['year'] != year:
continue
if parsed_title.lower() == result[style]['title'].lower():
trakt_id = result[style]['ids']['trakt']
break
# grab the first result if there is no exact match
if not trakt_id and results:
trakt_id = results[0][style]['ids']['trakt']
if not trakt_id:
raise LookupError('Unable to find %s="%s" on trakt.' % (last_search_type, last_search_query))
# Get actual data from trakt
try:
return req_session.get(get_api_url(style + 's', trakt_id), params={'extended': 'full,images'}).json()
except requests.RequestException as e:
raise LookupError('Error getting trakt data for id %s: %s' % (trakt_id, e))
def update_collection_cache(style_ident, username=None, account=None):
if account and not username:
username = 'me'
url = get_api_url('users', username, 'collection', style_ident)
session = get_session(account=account)
try:
data = session.get(url).json()
if not data:
log.warning('No collection data returned from trakt.')
return
cache = get_user_cache(username=username, account=account)['collection'][style_ident]
log.verbose('Received %d records from trakt.tv %s\'s collection', len(data), username)
if style_ident == 'movies':
for movie in data:
movie_id = movie['movie']['ids']['trakt']
cache[movie_id] = movie['movie']
cache[movie_id]['collected_at'] = dateutil_parse(movie['collected_at'], ignoretz=True)
else:
for series in data:
series_id = series['show']['ids']['trakt']
cache[series_id] = series['show']
cache[series_id]['seasons'] = series['seasons']
cache[series_id]['collected_at'] = dateutil_parse(series['last_collected_at'], ignoretz=True)
except requests.RequestException as e:
raise plugin.PluginError('Unable to get data from trakt.tv: %s' % e)
def update_watched_cache(style_ident, username=None, account=None):
if account and not username:
username = 'me'
url = get_api_url('users', username, 'watched', style_ident)
session = get_session(account=account)
try:
data = session.get(url).json()
if not data:
log.warning('No watched data returned from trakt.')
return
cache = get_user_cache(username=username, account=account)['watched'][style_ident]
log.verbose('Received %d record(s) from trakt.tv %s\'s watched history', len(data), username)
if style_ident == 'movies':
for movie in data:
movie_id = movie['movie']['ids']['trakt']
cache[movie_id] = movie['movie']
cache[movie_id]['watched_at'] = dateutil_parse(movie['last_watched_at'], ignoretz=True)
cache[movie_id]['plays'] = movie['plays']
else:
for series in data:
series_id = series['show']['ids']['trakt']
cache[series_id] = series['show']
cache[series_id]['seasons'] = series['seasons']
cache[series_id]['watched_at'] = dateutil_parse(series['last_watched_at'], ignoretz=True)
cache[series_id]['plays'] = series['plays']
except requests.RequestException as e:
raise plugin.PluginError('Unable to get data from trakt.tv: %s' % e)
def get_user_cache(username=None, account=None):
identifier = '{}|{}'.format(account, username or 'me')
ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('watched', {}).setdefault('shows', {})
ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('watched', {}).setdefault('movies', {})
ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('collection', {}).setdefault('shows', {})
ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('collection', {}).setdefault('movies', {})
return ApiTrakt.user_cache[identifier]
class ApiTrakt(object):
user_cache = TimedDict(cache_time='15 minutes')
@staticmethod
@with_session
def lookup_series(session=None, only_cached=None, **lookup_params):
series = get_cached('show', session=session, **lookup_params)
title = lookup_params.get('title', '')
found = None
if not series and title:
found = session.query(TraktShowSearchResult).filter(func.lower(TraktShowSearchResult.search) ==
title.lower()).first()
if found and found.series:
log.debug('Found %s in previous search results as %s', title, found.series.title)
series = found.series
if only_cached:
if series:
return series
raise LookupError('Series %s not found from cache' % lookup_params)
if series and not series.expired:
return series
try:
trakt_show = get_trakt('show', **lookup_params)
except LookupError as e:
if series:
log.debug('Error refreshing show data from trakt, using cached. %s', e)
return series
raise
series = session.query(TraktShow).filter(TraktShow.id == trakt_show['ids']['trakt']).first()
if series:
series.update(trakt_show, session)
else:
series = TraktShow(trakt_show, session)
session.add(series)
if series and title.lower() == series.title.lower():
return series
elif series and not found:
if not session.query(TraktShowSearchResult).filter(func.lower(TraktShowSearchResult.search) ==
title.lower()).first():
log.debug('Adding search result to db')
session.add(TraktShowSearchResult(search=title, series=series))
elif series and found:
log.debug('Updating search result in db')
found.series = series
return series
@staticmethod
@with_session
def lookup_movie(session=None, only_cached=None, **lookup_params):
movie = get_cached('movie', session=session, **lookup_params)
title = lookup_params.get('title', '')
found = None
if not movie and title:
found = session.query(TraktMovieSearchResult).filter(func.lower(TraktMovieSearchResult.search) ==
title.lower()).first()
if found and found.movie:
log.debug('Found %s in previous search results as %s', title, found.movie.title)
movie = found.movie
if only_cached:
if movie:
return movie
raise LookupError('Movie %s not found from cache' % lookup_params)
if movie and not movie.expired:
return movie
try:
trakt_movie = get_trakt('movie', **lookup_params)
except LookupError as e:
if movie:
log.debug('Error refreshing movie data from trakt, using cached. %s', e)
return movie
raise
movie = session.query(TraktMovie).filter(TraktMovie.id == trakt_movie['ids']['trakt']).first()
if movie:
movie.update(trakt_movie, session)
else:
movie = TraktMovie(trakt_movie, session)
session.add(movie)
if movie and title.lower() == movie.title.lower():
return movie
if movie and not found:
if not session.query(TraktMovieSearchResult).filter(func.lower(TraktMovieSearchResult.search) ==
title.lower()).first():
log.debug('Adding search result to db')
session.add(TraktMovieSearchResult(search=title, movie=movie))
elif movie and found:
log.debug('Updating search result in db')
found.movie = movie
return movie
@staticmethod
def collected(style, trakt_data, title, username=None, account=None):
style_ident = 'movies' if style == 'movie' else 'shows'
cache = get_user_cache(username=username, account=account)
if not cache['collection'][style_ident]:
log.debug('No collection found in cache.')
update_collection_cache(style_ident, username=username, account=account)
if not cache['collection'][style_ident]:
log.warning('No collection data returned from trakt.')
return
in_collection = False
cache = cache['collection'][style_ident]
if style == 'show':
if trakt_data.id in cache:
series = cache[trakt_data.id]
# specials are not included
number_of_collected_episodes = sum(len(s['episodes']) for s in series['seasons'] if s['number'] > 0)
in_collection = number_of_collected_episodes >= trakt_data.aired_episodes
elif style == 'episode':
if trakt_data.show.id in cache:
series = cache[trakt_data.show.id]
for s in series['seasons']:
if s['number'] == trakt_data.season:
# extract all episode numbers currently in collection for the season number
episodes = [ep['number'] for ep in s['episodes']]
in_collection = trakt_data.number in episodes
break
else:
if trakt_data.id in cache:
in_collection = True
log.debug('The result for entry "%s" is: %s', title,
'Owned' if in_collection else 'Not owned')
return in_collection
@staticmethod
def watched(style, trakt_data, title, username=None, account=None):
style_ident = 'movies' if style == 'movie' else 'shows'
cache = get_user_cache(username=username, account=account)
if not cache['watched'][style_ident]:
log.debug('No watched history found in cache.')
update_watched_cache(style_ident, username=username, account=account)
if not cache['watched'][style_ident]:
log.warning('No watched data returned from trakt.')
return
watched = False
cache = cache['watched'][style_ident]
if style == 'show':
if trakt_data.id in cache:
series = cache[trakt_data.id]
# specials are not included
number_of_watched_episodes = sum(len(s['episodes']) for s in series['seasons'] if s['number'] > 0)
watched = number_of_watched_episodes == trakt_data.aired_episodes
elif style == 'episode':
if trakt_data.show.id in cache:
series = cache[trakt_data.show.id]
for s in series['seasons']:
if s['number'] == trakt_data.season:
# extract all episode numbers currently in collection for the season number
episodes = [ep['number'] for ep in s['episodes']]
watched = trakt_data.number in episodes
break
else:
if trakt_data.id in cache:
watched = True
log.debug('The result for entry "%s" is: %s', title,
'Watched' if watched else 'Not watched')
return watched
def delete_account(account):
with Session() as session:
acc = session.query(TraktUserAuth).filter(TraktUserAuth.account == account).first()
if not acc:
raise plugin.PluginError('Account %s not found.' % account)
session.delete(acc)
def do_cli(manager, options):
if options.action == 'auth':
if not (options.account):
console('You must specify an account (local identifier) so we know where to save your access token!')
return
try:
get_access_token(options.account, options.pin, re_auth=True, called_from_cli=True)
console('Successfully authorized Flexget app on Trakt.tv. Enjoy!')
return
except plugin.PluginError as e:
console('Authorization failed: %s' % e)
elif options.action == 'show':
with Session() as session:
if not options.account:
# Print all accounts
accounts = session.query(TraktUserAuth).all()
if not accounts:
console('No trakt authorizations stored in database.')
return
console('{:-^21}|{:-^28}|{:-^28}'.format('Account', 'Created', 'Expires'))
for auth in accounts:
console('{:<21}|{:>28}|{:>28}'.format(
auth.account, auth.created.strftime('%Y-%m-%d'), auth.expires.strftime('%Y-%m-%d')))
return
# Show a specific account
acc = session.query(TraktUserAuth).filter(TraktUserAuth.account == options.account).first()
if acc:
console('Authorization expires on %s' % acc.expires)
else:
console('Flexget has not been authorized to access your account.')
elif options.action == 'refresh':
if not options.account:
console('Please specify an account')
return
try:
get_access_token(options.account, refresh=True)
console('Successfully refreshed your access token.')
return
except plugin.PluginError as e:
console('Authorization failed: %s' % e)
elif options.action == 'delete':
if not options.account:
console('Please specify an account')
return
try:
delete_account(options.account)
console('Successfully deleted your access token.')
return
except plugin.PluginError as e:
console('Deletion failed: %s' % e)
@event('options.register')
def register_parser_arguments():
acc_text = 'local identifier which should be used in your config to refer these credentials'
# Register subcommand
parser = options.register_command('trakt', do_cli, help='view and manage trakt authentication.')
# Set up our subparsers
subparsers = parser.add_subparsers(title='actions', metavar='<action>', dest='action')
auth_parser = subparsers.add_parser('auth', help='authorize Flexget to access your Trakt.tv account')
auth_parser.add_argument('account', metavar='<account>', help=acc_text)
auth_parser.add_argument('pin', metavar='<pin>', help='get this by authorizing FlexGet to use your trakt account '
'at %s. WARNING: DEPRECATED.' % PIN_URL, nargs='?')
show_parser = subparsers.add_parser('show', help='show expiration date for Flexget authorization(s) (don\'t worry, '
'they will automatically refresh when expired)')
show_parser.add_argument('account', metavar='<account>', nargs='?', help=acc_text)
refresh_parser = subparsers.add_parser('refresh', help='manually refresh your access token associated with your'
' --account <name>')
refresh_parser.add_argument('account', metavar='<account>', help=acc_text)
delete_parser = subparsers.add_parser('delete', help='delete the specified <account> name from local database')
delete_parser.add_argument('account', metavar='<account>', help=acc_text)
@event('plugin.register')
def register_plugin():
plugin.register(ApiTrakt, 'api_trakt', api_ver=2)
|
qvazzler/Flexget
|
flexget/plugins/api_trakt.py
|
Python
|
mit
| 54,456
|
[
"VisIt"
] |
7822269bd84f994cdc32ee550dbdd283ea74087710a8700097693235331afb6f
|
# -*- coding: utf-8 -*-
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2003-2005 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Andrew I Baznikin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Written by Alex Roitman, largely based on relationship.py by Don Allingham
# and on valuable input from Lars Kr. Lundin
"""
Specific classes for relationships.
"""
#-------------------------------------------------------------------------
#
# Gprime modules
#
#-------------------------------------------------------------------------
from gprime.lib import Person
import gprime.relationship
#-------------------------------------------------------------------------
#
# Danish-specific definitions of relationships
#
#-------------------------------------------------------------------------
_level_name = [ "", "første", "anden", "tredje", "fjerde", "femte", "sjette",
"syvende", "ottende", "niende", "tiende", "ellevte", "tolvte",
"trettende", "fjortende", "femtende", "sekstende",
"syttende", "attende", "nittende", "tyvende", "enogtyvende", "toogtyvende",
"treogtyvende","fireogtyvende","femogtyvende","seksogtyvende",
"syvogtyvende","otteogtyvende","niogtyvende","tredivte", ]
_parents_level = [ "forældre", "bedsteforældre", "oldeforældre",
"tipoldeforældre", "tiptipoldeforældre" , "tiptiptipoldeforældre", ]
_father_level = [ "", "faderen", "bedstefaderen", "oldefaderen", "tipoldefaderen", ]
_mother_level = [ "", "moderen", "bedstemoderen", "oldemoderen", "tipoldemoderen", ]
_son_level = [ "", "sønnen", "barnebarnet", "oldebarnet", ]
_daughter_level = [ "", "datteren", "barnebarnet", "oldebarnet", ]
_sister_level = [ "", "søsteren", "tanten", "grandtanten", "oldetanten", ]
_brother_level = [ "", "broderen", "onklen", "grandonklen", "oldeonkel", ]
_nephew_level = [ "", "nevøen", "næstsøskendebarnet", "broderens barnebarn", ]
_niece_level = [ "", "niecen", "næstsøskendebarnet", "søsterens barnebarn", ]
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
class RelationshipCalculator(gramps.gen.relationship.RelationshipCalculator):
"""
RelationshipCalculator Class
"""
def __init__(self):
gramps.gen.relationship.RelationshipCalculator.__init__(self)
def get_parents(self, level):
if level > len(_parents_level)-1:
#return "fjern forfader"
#Instead of "remote ancestors" using "tip (level) oldeforældre" here.
return "tip (%d) oldeforældre" % level
else:
return _parents_level[level]
def pair_up(self, rel_list):
result = []
item = ""
for word in rel_list[:]:
if not word:
continue
if item:
if word == 'søster':
item = item[0:-1]
word = 'ster'
elif word == 'sønne':
word = 'søn'
result.append(item + word)
item = ""
else:
item = word
if item:
result.append(item)
gen_result = [ item + 's' for item in result[0:-1] ]
return ' '.join(gen_result+result[-1:])
def get_direct_ancestor(self, person, rel_string):
result = []
for ix in range(len(rel_string)):
if rel_string[ix] == 'f':
result.append('far')
else:
result.append('mor')
return self.pair_up(result)
def get_direct_descendant(self, person, rel_string):
result = []
for ix in range(len(rel_string)-2, -1, -1):
if rel_string[ix] == 'f':
result.append('sønne')
else:
result.append('datter')
if person == Person.MALE:
result.append('søn')
else:
result.append('datter')
return self.pair_up(result)
def get_two_way_rel(self, person, first_rel_string, second_rel_string):
result = []
for ix in range(len(second_rel_string)-1):
if second_rel_string[ix] == 'f':
result.append('far')
else:
result.append('mor')
if len(first_rel_string) > 1:
if first_rel_string[-2] == 'f':
result.append('bror')
else:
result.append('søster')
for ix in range(len(first_rel_string)-3, -1, -1):
if first_rel_string[ix] == 'f':
result.append('sønne')
else:
result.append('datter')
if person == Person.MALE:
result.append('søn')
else:
result.append('datter')
else:
if person == Person.MALE:
result.append('bror')
else:
result.append('søster')
return self.pair_up(result)
def get_relationship(self,
secondRel, firstRel, orig_person, other_person):
common = ""
if not firstRel:
if not secondRel:
return ('', common)
else:
return (self.get_direct_ancestor(other_person, secondRel), common)
elif not secondRel:
return (self.get_direct_descendant(other_person, firstRel), common)
else:
return (self.get_two_way_rel(other_person, firstRel, secondRel), common)
def get_single_relationship_string(self, Ga, Gb, gender_a, gender_b,
reltocommon_a, reltocommon_b,
only_birth=True,
in_law_a=False, in_law_b=False):
return self.get_relationship(reltocommon_a, reltocommon_b, gender_a, gender_b)[0]
def get_sibling_relationship_string(self, sib_type, gender_a, gender_b,
in_law_a=False, in_law_b=False):
return self.get_two_way_rel(gender_b, "", "")
if __name__ == "__main__":
# Test function. Call it as follows from the command line (so as to find
# imported modules):
# export PYTHONPATH=/path/to/gramps/src
# python src/plugins/rel/rel_da.py
# (Above not needed here)
"""TRANSLATORS, copy this if statement at the bottom of your
rel_xx.py module, and test your work with:
python src/plugins/rel/rel_xx.py
"""
from gprime.relationship import test
RC = RelationshipCalculator()
test(RC, True)
|
sam-m888/gprime
|
gprime/plugins/rel/rel_da.py
|
Python
|
gpl-2.0
| 7,385
|
[
"Brian"
] |
47f36e573f2244fc26259c692354817e67a7e8aaf90942f7a306fb634f5ddc80
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkImageGaussianSmooth(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkImageGaussianSmooth(), 'Processing.',
('vtkImageData',), ('vtkImageData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkImageGaussianSmooth.py
|
Python
|
bsd-3-clause
| 501
|
[
"VTK"
] |
d52d490cc7c00b0409c66cbd260dae950a279caa76e82d4f0f4d54aa849698a7
|
"""
DIRAC.WorkloadManagementSystem.Agent package
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
|
yujikato/DIRAC
|
src/DIRAC/WorkloadManagementSystem/Executor/Base/__init__.py
|
Python
|
gpl-3.0
| 185
|
[
"DIRAC"
] |
cd9d6cb0c0bc5ffe9da2b423909b2b541740942b0d46b140898911ca5bb2032b
|
import numpy as np
class GMM:
"""
Implements the expectation-maximisation (EM) algorithm for the
Gaussian mixture model (GMM). The algorithm is based on the
pseudo-code described in the book by C. Bishop "Pattern Recognition
and Machine Learning", chapter 9.
"""
def __init__(self,
n_components,
means=None,
covariances=None,
mixing_probs=None,
epsilon=1e-6,
callback = None):
"""
Arguments:
n_components -- number of mixtures (components) to fit
means -- (optional) initial array of mean vectors (numpy array of numpy arrays)
covariances -- (optional) initial array of covariance matrices (numpy array of numpy arrays)
mixing_probs -- (optional) initial vector (numpy array) of mixing probabilities
epsilon -- (optional) convergence criterion
"""
self.n_components = n_components
self.means = means
self.covariances = covariances
self.mixing_probs = mixing_probs
self.epsilon = epsilon
self.__callback = callback
def fit(self, features):
"""
Fits a GMM into a set of feature data.
Arguments:
features -- input features data set
"""
# Initialise
n, _ = features.shape
norm_densities = np.empty((n, self.n_components), np.float)
responsibilities = np.empty((n, self.n_components), np.float)
old_log_likelihood = 0
self._initialise_parameters(features)
while True:
# Compute normal densities
for i in np.arange(n):
x = features[i]
for j in np.arange(self.n_components):
norm_densities[i][j] = self.multivariate_normal_pdf(x, self.means[j], self.covariances[j])
# Estimate log likelihood
log_vector = np.log(np.array([np.dot(self.mixing_probs.T, norm_densities[i]) for i in np.arange(n)]))
log_likelihood = np.dot(log_vector.T, np.ones(n))
self.call_back()
# Check for convergence
if np.absolute(log_likelihood - old_log_likelihood) < self.epsilon:
break
# E-step: evaluate responsibilities
for i in np.arange(n):
x = features[i]
denominator = np.dot(self.mixing_probs.T, norm_densities[i])
for j in np.arange(self.n_components):
responsibilities[i][j] = self.mixing_probs[j] * norm_densities[i][j] / denominator
# M-step: re-estimate the parameters
for i in np.arange(self.n_components):
responsibility = (responsibilities.T)[i]
# Common denominator
denominator = np.dot(responsibility.T, np.ones(n))
# Update mean
self.means[i] = np.dot(responsibility.T, features) / denominator
# Update covariance
difference = features - np.tile(self.means[i], (n, 1))
self.covariances[i] = np.dot(np.multiply(responsibility.reshape(n,1), difference).T, difference) / denominator
# Update mixing probabilities
self.mixing_probs[i] = denominator / n
old_log_likelihood = log_likelihood
def cluster(self, features):
"""
Returns a numpy array containing partitioned feature data. The
distance measure used to compute the distance between a feature point
and a Gaussian distribution is Mahanalobis distance.
"""
# Initialise
n, _ = features.shape
partition = np.empty(n, np.int)
distances = np.empty(self.n_components, np.float)
cov_inverses = [np.linalg.inv(cov) for cov in self.covariances]
# Assign each feature point to a Gaussian distribution
for i in np.arange(n):
x = features[i]
# Compute Mahanalobis distances from each mixture
for j in np.arange(self.n_components):
distances[j] = np.dot(np.dot((x - self.means[j]).T, cov_inverses[j]), x - self.means[j])
# Find index of the minimum distance, and assign to a cluster
partition[i] = np.argmin(distances)
return partition
def call_back(self):
if self.__callback:
dct = {
'mixing_probs': self.mixing_probs,
'means': self.means,
'covariances': self.covariances
}
self.__callback(dct)
def multivariate_normal_pdf(self, x, mean, covariance):
"""
Returns normal density value for an n-dimensional random
vector x.
"""
centered = x - mean
cov_inverse = np.linalg.inv(covariance)
cov_det = np.linalg.det(covariance)
exponent = np.dot(np.dot(centered.T, cov_inverse), centered)
return np.exp(-0.5 * exponent) / np.sqrt(cov_det * np.power(2 * np.pi, self.n_components))
def _initialise_parameters(self, features):
"""
Initialises parameters: means, covariances, and mixing probabilities
if undefined.
Arguments:
features -- input features data set
"""
if not self.means or not self.covariances:
n, m = features.shape
# Shuffle features set
indices = np.arange(n)
np.random.shuffle(np.arange(n))
features_shuffled = np.array([features[i] for i in indices])
# Split into n_components subarrays
divs = int(np.floor(n / self.n_components))
features_split = [features_shuffled[i:i+divs] for i in range(0, n, divs)]
# Estimate means/covariances (or both)
if not self.means:
means = []
for i in np.arange(self.n_components):
means.append(np.mean(features_split[i], axis=0))
self.means = np.array(means)
if not self.covariances:
covariances = []
for i in np.arange(self.n_components):
covariances.append(np.cov(features_split[i].T))
self.covariances = np.array(covariances)
if not self.mixing_probs:
self.mixing_probs = np.repeat(1 / self.n_components, self.n_components)
|
kubkon/gmm
|
gmm/algorithm.py
|
Python
|
mit
| 6,433
|
[
"Gaussian"
] |
0a0cc7fdc0350182a8b8cf140b537e189c59974decb01679d04a60cdbd91c4aa
|
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2016-2017 Esteban Tovagliari, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import unittest
import appleseed as asr
class TestEntityMap(unittest.TestCase):
"""
Basic entity map tests.
"""
def setUp(self):
self.scn = asr.Scene()
self.assembly_map = self.scn.assemblies()
def test_get_by_name(self):
ass = asr.Assembly("assembly", {})
self.assembly_map.insert(ass)
ass = asr.Assembly("another_assembly", {})
self.assembly_map.insert(ass)
a = self.assembly_map.get_by_name("assembly")
self.assertEqual(a.get_name(), "assembly")
a = self.assembly_map.get_by_name("another_assembly")
self.assertEqual(a.get_name(), "another_assembly")
a = self.assembly_map.get_by_name("no_such_assembly")
self.assertEqual(a, None)
def test_get_by_uuid(self):
ass = asr.Assembly("assembly")
uid1 = ass.get_uid()
self.assembly_map.insert(ass)
ass = asr.Assembly("another_assembly")
uid2 = ass.get_uid()
self.assembly_map.insert(ass)
a = self.assembly_map.get_by_uid(uid1)
self.assertEqual(a.get_name(), "assembly")
a = self.assembly_map.get_by_uid(uid2)
self.assertEqual(a.get_name(), "another_assembly")
a = self.assembly_map.get_by_uid(77567)
self.assertEqual(a, None)
def test_get_item(self):
ass = asr.Assembly("assembly")
uid1 = ass.get_uid()
self.assembly_map.insert(ass)
ass = asr.Assembly("another_assembly")
uid2 = ass.get_uid()
self.assembly_map.insert(ass)
self.assertEqual(self.assembly_map["assembly"].get_uid(), uid1)
self.assertEqual(self.assembly_map["another_assembly"].get_uid(), uid2)
def test_insert_remove_by_uid(self):
ass = asr.Assembly("assembly")
self.assembly_map.insert(ass)
self.assertEqual(len(self.assembly_map), 1)
a = self.assembly_map.get_by_name("assembly")
ass = self.assembly_map.remove_by_uid(a.get_uid())
self.assertEqual(len(self.assembly_map), 0)
self.assembly_map.insert(ass)
self.assertEqual(len(self.assembly_map), 1)
def test_keys(self):
self.assertEqual(self.assembly_map.keys(), [])
ass = asr.Assembly("assembly")
self.assembly_map.insert(ass)
ass = asr.Assembly("another_assembly")
self.assembly_map.insert(ass)
self.assertEqual(self.assembly_map.keys(), ["assembly", "another_assembly"])
def test_values(self):
ass = asr.Assembly("assembly")
uid1 = ass.get_uid()
self.assembly_map.insert(ass)
ass = asr.Assembly("another_assembly")
uid2 = ass.get_uid()
self.assembly_map.insert(ass)
values = self.assembly_map.values()
self.assertEqual(len(values), 2)
self.assertEqual(values[0].get_uid(), uid1)
self.assertEqual(values[1].get_uid(), uid2)
def test_iters(self):
names = ['assembly', 'assembly2', 'assembly3']
uids = []
for name in names:
ass = asr.Assembly(name)
uids.append(ass.get_uid())
self.assembly_map.insert(ass)
result_names = []
result_uids = []
for ass in self.assembly_map:
result_names.append(ass)
result_uids.append(self.assembly_map[ass].get_uid())
self.assertEqual(sorted(names), sorted(result_names))
self.assertEqual(sorted(uids), sorted(result_uids))
if __name__ == "__main__":
unittest.main()
|
aytekaman/appleseed
|
src/appleseed.python/test/testentitymap.py
|
Python
|
mit
| 4,820
|
[
"VisIt"
] |
a041d11034328a00a9f71c20ea726ad4b88ea788311b17b8165ae875c23834cb
|
#!python
#coding= latin-1
# This script implements the Double Metaphone algorythm (c) 1998, 1999 by Lawrence Philips
# it was translated to Python from the C source written by Kevin Atkinson (http://aspell.net/metaphone/)
# By Andrew Collins - January 12, 2007 who claims no rights to this work
# http://atomboy.isa-geek.com:8080/plone/Members/acoil/programing/double-metaphone
# Tested with Pyhon 2.4.3
# Updated Feb 14, 2007 - Found a typo in the 'gh' section
# Updated Dec 17, 2007 - Bugs fixed in 'S', 'Z', and 'J' sections. Thanks Chris Leong!
def dm(st) :
"""dm(string) -> (string, string or None)
returns the double metaphone codes for given string - always a tuple
there are no checks done on the input string, but it should be a single word or name."""
vowels = ['A', 'E', 'I', 'O', 'U', 'Y']
st = st.decode('ascii', 'ignore')
st = st.upper() # st is short for string. I usually prefer descriptive over short, but this var is used a lot!
is_slavo_germanic = (st.find('W') > -1 or st.find('K') > -1 or st.find('CZ') > -1 or st.find('WITZ') > -1)
length = len(st)
first = 2
st = '-' * first + st + '------' # so we can index beyond the begining and end of the input string
last = first + length -1
pos = first # pos is short for position
pri = sec = '' # primary and secondary metaphone codes
#skip these silent letters when at start of word
if st[first:first+2] in ["GN", "KN", "PN", "WR", "PS"] :
pos += 1
# Initial 'X' is pronounced 'Z' e.g. 'Xavier'
if st[first] == 'X' :
pri = sec = 'S' #'Z' maps to 'S'
pos += 1
# main loop through chars in st
while pos <= last :
#print str(pos) + '\t' + st[pos]
ch = st[pos] # ch is short for character
# nxt (short for next characters in metaphone code) is set to a tuple of the next characters in
# the primary and secondary codes and how many characters to move forward in the string.
# the secondary code letter is given only when it is different than the primary.
# This is just a trick to make the code easier to write and read.
nxt = (None, 1) # default action is to add nothing and move to next char
if ch in vowels :
nxt = (None, 1)
if pos == first : # all init vowels now map to 'A'
nxt = ('A', 1)
elif ch == 'B' :
#"-mb", e.g", "dumb", already skipped over... see 'M' below
if st[pos+1] == 'B' :
nxt = ('P', 2)
else :
nxt = ('P', 1)
elif ch == 'C' :
# various germanic
if (pos > first and st[pos-2] in vowels and st[pos-1:pos+1] == 'ACH' and \
(st[pos+2] not in ['I', 'E'] or st[pos-2:pos+4] in ['BACHER', 'MACHER'])) :
nxt = ('K', 2)
# special case 'CAESAR'
elif pos == first and st[first:first+6] == 'CAESAR' :
nxt = ('S', 2)
elif st[pos:pos+4] == 'CHIA' : #italian 'chianti'
nxt = ('K', 2)
elif st[pos:pos+2] == 'CH' :
# find 'michael'
if pos > first and st[pos:pos+4] == 'CHAE' :
nxt = ('K', 'X', 2)
elif pos == first and (st[pos+1:pos+6] in ['HARAC', 'HARIS'] or \
st[pos+1:pos+4] in ["HOR", "HYM", "HIA", "HEM"]) and st[first:first+5] != 'CHORE' :
nxt = ('K', 2)
#germanic, greek, or otherwise 'ch' for 'kh' sound
elif st[first:first+4] in ['VAN ', 'VON '] or st[first:first+3] == 'SCH' \
or st[pos-2:pos+4] in ["ORCHES", "ARCHIT", "ORCHID"] \
or st[pos+2] in ['T', 'S'] \
or ((st[pos-1] in ["A", "O", "U", "E"] or pos == first) \
and st[pos+2] in ["L", "R", "N", "M", "B", "H", "F", "V", "W"]) :
nxt = ('K', 1)
else :
if pos == first :
if st[first:first+2] == 'MC' :
nxt = ('K', 2)
else :
nxt = ('X', 'K', 2)
else :
nxt = ('X', 2)
#e.g, 'czerny'
elif st[pos:pos+2] == 'CZ' and st[pos-2:pos+2] != 'WICZ' :
nxt = ('S', 'X', 2)
#e.g., 'focaccia'
elif st[pos+1:pos+4] == 'CIA' :
nxt = ('X', 3)
#double 'C', but not if e.g. 'McClellan'
elif st[pos:pos+2] == 'CC' and not (pos == (first +1) and st[first] == 'M') :
#'bellocchio' but not 'bacchus'
if st[pos+2] in ["I", "E", "H"] and st[pos+2:pos+4] != 'HU' :
#'accident', 'accede' 'succeed'
if (pos == (first +1) and st[first] == 'A') or \
st[pos-1:pos+4] in ['UCCEE', 'UCCES'] :
nxt = ('KS', 3)
#'bacci', 'bertucci', other italian
else:
nxt = ('X', 3)
else :
nxt = ('K', 2)
elif st[pos:pos+2] in ["CK", "CG", "CQ"] :
nxt = ('K', 'K', 2)
elif st[pos:pos+2] in ["CI", "CE", "CY"] :
#italian vs. english
if st[pos:pos+3] in ["CIO", "CIE", "CIA"] :
nxt = ('S', 'X', 2)
else :
nxt = ('S', 2)
else :
#name sent in 'mac caffrey', 'mac gregor
if st[pos+1:pos+3] in [" C", " Q", " G"] :
nxt = ('K', 3)
else :
if st[pos+1] in ["C", "K", "Q"] and st[pos+1:pos+3] not in ["CE", "CI"] :
nxt = ('K', 2)
else : # default for 'C'
nxt = ('K', 1)
elif ch == u'Ç' : # will never get here with st.encode('ascii', 'replace') above
nxt = ('S', 1)
elif ch == 'D' :
if st[pos:pos+2] == 'DG' :
if st[pos+2] in ['I', 'E', 'Y'] : #e.g. 'edge'
nxt = ('J', 3)
else :
nxt = ('TK', 2)
elif st[pos:pos+2] in ['DT', 'DD'] :
nxt = ('T', 2)
else :
nxt = ('T', 1)
elif ch == 'F' :
if st[pos+1] == 'F' :
nxt = ('F', 2)
else :
nxt = ('F', 1)
elif ch == 'G' :
if st[pos+1] == 'H' :
if pos > first and st[pos-1] not in vowels :
nxt = ('K', 2)
elif pos < (first + 3) :
if pos == first : #'ghislane', ghiradelli
if st[pos+2] == 'I' :
nxt = ('J', 2)
else :
nxt = ('K', 2)
#Parker's rule (with some further refinements) - e.g., 'hugh'
elif (pos > (first + 1) and st[pos-2] in ['B', 'H', 'D'] ) \
or (pos > (first + 2) and st[pos-3] in ['B', 'H', 'D'] ) \
or (pos > (first + 3) and st[pos-3] in ['B', 'H'] ) :
nxt = (None, 2)
else :
# e.g., 'laugh', 'McLaughlin', 'cough', 'gough', 'rough', 'tough'
if pos > (first + 2) and st[pos-1] == 'U' \
and st[pos-3] in ["C", "G", "L", "R", "T"] :
nxt = ('F', 2)
else :
if pos > first and st[pos-1] != 'I' :
nxt = ('K', 2)
elif st[pos+1] == 'N' :
if pos == (first +1) and st[first] in vowels and not is_slavo_germanic :
nxt = ('KN', 'N', 2)
else :
# not e.g. 'cagney'
if st[pos+2:pos+4] != 'EY' and st[pos+1] != 'Y' and not is_slavo_germanic :
nxt = ('N', 'KN', 2)
else :
nxt = ('KN', 2)
# 'tagliaro'
elif st[pos+1:pos+3] == 'LI' and not is_slavo_germanic :
nxt = ('KL', 'L', 2)
# -ges-,-gep-,-gel-, -gie- at beginning
elif pos == first and (st[pos+1] == 'Y' \
or st[pos+1:pos+3] in ["ES", "EP", "EB", "EL", "EY", "IB", "IL", "IN", "IE", "EI", "ER"]) :
nxt = ('K', 'J', 2)
# -ger-, -gy-
elif (st[pos+1:pos+2] == 'ER' or st[pos+1] == 'Y') \
and st[first:first+6] not in ["DANGER", "RANGER", "MANGER"] \
and st[pos-1] not in ['E', 'I'] and st[pos-1:pos+2] not in ['RGY', 'OGY'] :
nxt = ('K', 'J', 2)
# italian e.g, 'biaggi'
elif st[pos+1] in ['E', 'I', 'Y'] or st[pos-1:pos+3] in ["AGGI", "OGGI"] :
# obvious germanic
if st[first:first+4] in ['VON ', 'VAN '] or st[first:first+3] == 'SCH' \
or st[pos+1:pos+3] == 'ET' :
nxt = ('K', 2)
else :
# always soft if french ending
if st[pos+1:pos+5] == 'IER ' :
nxt = ('J', 2)
else :
nxt = ('J', 'K', 2)
elif st[pos+1] == 'G' :
nxt = ('K', 2)
else :
nxt = ('K', 1)
elif ch == 'H' :
# only keep if first & before vowel or btw. 2 vowels
if (pos == first or st[pos-1] in vowels) and st[pos+1] in vowels :
nxt = ('H', 2)
else : # (also takes care of 'HH')
nxt = (None, 1)
elif ch == 'J' :
# obvious spanish, 'jose', 'san jacinto'
if st[pos:pos+4] == 'JOSE' or st[first:first+4] == 'SAN ' :
if (pos == first and st[pos+4] == ' ') or st[first:first+4] == 'SAN ' :
nxt = ('H',)
else :
nxt = ('J', 'H')
elif pos == first and st[pos:pos+4] != 'JOSE' :
nxt = ('J', 'A') # Yankelovich/Jankelowicz
else :
# spanish pron. of e.g. 'bajador'
if st[pos-1] in vowels and not is_slavo_germanic \
and st[pos+1] in ['A', 'O'] :
nxt = ('J', 'H')
else :
if pos == last :
nxt = ('J', ' ')
else :
if st[pos+1] not in ["L", "T", "K", "S", "N", "M", "B", "Z"] \
and st[pos-1] not in ["S", "K", "L"] :
nxt = ('J',)
else :
nxt = (None, )
if st[pos+1] == 'J' :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
elif ch == 'K' :
if st[pos+1] == 'K' :
nxt = ('K', 2)
else :
nxt = ('K', 1)
elif ch == 'L' :
if st[pos+1] == 'L' :
# spanish e.g. 'cabrillo', 'gallegos'
if (pos == (last - 2) and st[pos-1:pos+3] in ["ILLO", "ILLA", "ALLE"]) \
or (st[last-1:last+1] in ["AS", "OS"] or st[last] in ["A", "O"] \
and st[pos-1:pos+3] == 'ALLE') :
nxt = ('L', ' ', 2)
else :
nxt = ('L', 2)
else :
nxt = ('L', 1)
elif ch == 'M' :
if st[pos+1:pos+4] == 'UMB' \
and (pos + 1 == last or st[pos+2:pos+4] == 'ER') \
or st[pos+1] == 'M' :
nxt = ('M', 2)
else :
nxt = ('M', 1)
elif ch == 'N' :
if st[pos+1] == 'N' :
nxt = ('N', 2)
else :
nxt = ('N', 1)
elif ch == u'Ñ' :
nxt = ('N', 1)
elif ch == 'P' :
if st[pos+1] == 'H' :
nxt = ('F', 2)
elif st[pos+1] in ['P', 'B'] : # also account for "campbell", "raspberry"
nxt = ('P', 2)
else :
nxt = ('P', 1)
elif ch == 'Q' :
if st[pos+1] == 'Q' :
nxt = ('K', 2)
else :
nxt = ('K', 1)
elif ch == 'R' :
# french e.g. 'rogier', but exclude 'hochmeier'
if pos == last and not is_slavo_germanic \
and st[pos-2:pos] == 'IE' and st[pos-4:pos-2] not in ['ME', 'MA'] :
nxt = ('', 'R')
else :
nxt = ('R',)
if st[pos+1] == 'R' :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
elif ch == 'S' :
# special cases 'island', 'isle', 'carlisle', 'carlysle'
if st[pos-1:pos+2] in ['ISL', 'YSL'] :
nxt = (None, 1)
# special case 'sugar-'
elif pos == first and st[first:first+5] == 'SUGAR' :
nxt =('X', 'S', 1)
elif st[pos:pos+2] == 'SH' :
# germanic
if st[pos+1:pos+5] in ["HEIM", "HOEK", "HOLM", "HOLZ"] :
nxt = ('S', 2)
else :
nxt = ('X', 2)
# italian & armenian
elif st[pos:pos+3] in ["SIO", "SIA"] or st[pos:pos+4] == 'SIAN' :
if not is_slavo_germanic :
nxt = ('S', 'X', 3)
else :
nxt = ('S', 3)
# german & anglicisations, e.g. 'smith' match 'schmidt', 'snider' match 'schneider'
# also, -sz- in slavic language altho in hungarian it is pronounced 's'
elif (pos == first and st[pos+1] in ["M", "N", "L", "W"]) or st[pos+1] == 'Z' :
nxt = ('S', 'X')
if st[pos+1] == 'Z' :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
elif st[pos+2:pos+4] == 'SC' :
# Schlesinger's rule
if st[pos+2] == 'H' :
# dutch origin, e.g. 'school', 'schooner'
if st[pos+3:pos+5] in ["OO", "ER", "EN", "UY", "ED", "EM"] :
# 'schermerhorn', 'schenker'
if st[pos+3:pos+5] in ['ER', 'EN'] :
nxt = ('X', 'SK', 3)
else :
nxt = ('SK', 3)
else :
if pos == first and st[first+3] not in vowels and st[first+3] != 'W' :
nxt = ('X', 'S', 3)
else :
nxt = ('X', 3)
elif st[pos+2] in ['I', 'E', 'Y'] :
nxt = ('S', 3)
else :
nxt = ('SK', 3)
# french e.g. 'resnais', 'artois'
elif pos == last and st[pos-2:pos] in ['AI', 'OI'] :
nxt = ('', 'S', 1)
else :
nxt = ('S',)
if st[pos+1] in ['S', 'Z'] :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
elif ch == 'T' :
if st[pos:pos+4] == 'TION' :
nxt = ('X', 3)
elif st[pos:pos+3] in ['TIA', 'TCH'] :
nxt = ('X', 3)
elif st[pos:pos+2] == 'TH' or st[pos:pos+3] == 'TTH' :
# special case 'thomas', 'thames' or germanic
if st[pos+2:pos+4] in ['OM', 'AM'] or st[first:first+4] in ['VON ', 'VAN '] \
or st[first:first+3] == 'SCH' :
nxt = ('T', 2)
else :
nxt = ('0', 'T', 2)
elif st[pos+1] in ['T', 'D'] :
nxt = ('T', 2)
else :
nxt = ('T', 1)
elif ch == 'V' :
if st[pos+1] == 'V' :
nxt = ('F', 2)
else :
nxt = ('F', 1)
elif ch == 'W' :
# can also be in middle of word
if st[pos:pos+2] == 'WR' :
nxt = ('R', 2)
elif pos == first and st[pos+1] in vowels or st[pos:pos+2] == 'WH' :
# Wasserman should match Vasserman
if st[pos+1] in vowels :
nxt = ('A', 'F', 1)
else :
nxt = ('A', 1)
# Arnow should match Arnoff
elif (pos == last and st[pos-1] in vowels) \
or st[pos-1:pos+5] in ["EWSKI", "EWSKY", "OWSKI", "OWSKY"] \
or st[first:first+3] == 'SCH' :
nxt = ('', 'F', 1)
# polish e.g. 'filipowicz'
elif st[pos:pos+4] in ["WICZ", "WITZ"] :
nxt = ('TS', 'FX', 4)
else : # default is to skip it
nxt = (None, 1)
elif ch == 'X' :
# french e.g. breaux
nxt = (None,)
if not(pos == last and (st[pos-3:pos] in ["IAU", "EAU"] \
or st[pos-2:pos] in ['AU', 'OU'])):
nxt = ('KS',)
if st[pos+1] in ['C', 'X'] :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
elif ch == 'Z' :
# chinese pinyin e.g. 'zhao'
if st[pos+1] == 'H' :
nxt = ('J',)
elif st[pos+1:pos+3] in ["ZO", "ZI", "ZA"] \
or (is_slavo_germanic and pos > first and st[pos-1] != 'T') :
nxt = ('S', 'TS')
else :
nxt = ('S',)
if st[pos+1] == 'Z' :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
# ----------------------------------
# --- end checking letters------
# ----------------------------------
#print str(nxt)
if len(nxt) == 2 :
if nxt[0] :
pri += nxt[0]
sec += nxt[0]
pos += nxt[1]
elif len(nxt) == 3 :
if nxt[0] :
pri += nxt[0]
if nxt[1] :
sec += nxt[1]
pos += nxt[2]
if pri == sec :
return (pri, None)
else :
return (pri, sec)
if __name__ == '__main__' :
names = {'maurice':'MRS','aubrey':'APR','cambrillo':'KMPR','heidi':'HT','katherine':'K0RN,KTRN',\
'catherine':'K0RN,KTRN','richard':'RXRT,RKRT','bob':'PP','eric':'ARK','geoff':'JF,KF',\
'dave':'TF','ray':'R','steven':'STFN','bryce':'PRS','randy':'RNT','bryan':'PRN',\
'brian':'PRN','otto':'AT','auto':'AT', 'maisey':'MS, None', 'zhang':'JNK, None', 'solilijs':'SLLS, None'}
for name in names.keys() :
print name + '\t-->\t' + str(dm(name)) + '\t(' +names[name] + ')'
|
zedshaw/librelist
|
lib/metaphone.py
|
Python
|
agpl-3.0
| 14,779
|
[
"Brian"
] |
0e971c6e9257d7425ca109ade8bfd3ec98d58368bf92ca2d1811f7558fc7c524
|
#!/usr/bin/env python
# Python module for simulated annealing - anneal.py - v1.0 - 2 Sep 2009
#
# Copyright (c) 2009, Richard J. Wagner <wagnerr@umich.edu>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
This module performs simulated annealing to find a state of a system that
minimizes its energy.
An example program demonstrates simulated annealing with a traveling
salesman problem to find the shortest route to visit the twenty largest
cities in the United States.
Notes:
Matt Perry 6/24/12
Changed to slicing lists instead of deepcopy-ing them.
e.g. state = prevState[:] instead of state = deepcopy(prevState)
Huge performance enhancement (~5-10x faster)
Should be identical behavior if the items in the state list are immutable.
(immutable objects include integers and strings so should be safe)
"""
# How to optimize a system with simulated annealing:
#
# 1) Define a format for describing the state of the system.
#
# 2) Define a function to calculate the energy of a state.
#
# 3) Define a function to make a random change to a state.
#
# 4) Choose a maximum temperature, minimum temperature, and number of steps.
#
# 5) Set the annealer to work with your state and functions.
#
# 6) Study the variation in energy with temperature and duration to find a
# productive annealing schedule.
#
# Or,
#
# 4) Run the automatic annealer which will attempt to choose reasonable values
# for maximum and minimum temperatures and then anneal for the allotted time.
import copy, math, sys, time
try:
from numpy import random
except ImportError:
import random
def round_figures(x, n):
"""Returns x rounded to n significant figures."""
return round(x, int(n - math.ceil(math.log10(abs(x)))))
def time_string(seconds):
"""Returns time in seconds as a string formatted HHHH:MM:SS."""
s = int(round(seconds)) # round to nearest second
h, s = divmod(s, 3600) # get hours and remainder
m, s = divmod(s, 60) # split remainder into minutes and seconds
return '%4i:%02i:%02i' % (h, m, s)
class Annealer:
"""Performs simulated annealing by calling functions to calculate
energy and make moves on a state. The temperature schedule for
annealing may be provided manually or estimated automatically.
"""
def __init__(self, energy, move):
self.energy = energy # function to calculate energy of a state
self.move = move # function to make a random change to a state
def anneal(self, state, Tmax, Tmin, steps, updates=0):
"""Minimizes the energy of a system by simulated annealing.
Keyword arguments:
state -- an initial arrangement of the system
Tmax -- maximum temperature (in units of energy)
Tmin -- minimum temperature (must be greater than zero)
steps -- the number of steps requested
updates -- the number of updates to print during annealing
Returns the best state and energy found."""
step = 0
start = time.time()
def update(T, E, acceptance, improvement):
"""Prints the current temperature, energy, acceptance rate,
improvement rate, elapsed time, and remaining time.
The acceptance rate indicates the percentage of moves since the last
update that were accepted by the Metropolis algorithm. It includes
moves that decreased the energy, moves that left the energy
unchanged, and moves that increased the energy yet were reached by
thermal excitation.
The improvement rate indicates the percentage of moves since the
last update that strictly decreased the energy. At high
temperatures it will include both moves that improved the overall
state and moves that simply undid previously accepted moves that
increased the energy by thermal excititation. At low temperatures
it will tend toward zero as the moves that can decrease the energy
are exhausted and moves that would increase the energy are no longer
thermally accessible."""
elapsed = time.time() - start
if step == 0:
print ' Temperature Energy Accept Improve Elapsed Remaining'
print '%12.2f %12.2f %s ' % \
(T, E, time_string(elapsed) )
else:
remain = ( steps - step ) * ( elapsed / step )
print '%12.2f %12.2f %7.2f%% %7.2f%% %s %s' % \
(T, E, 100.0*acceptance, 100.0*improvement,
time_string(elapsed), time_string(remain))
# Precompute factor for exponential cooling from Tmax to Tmin
if Tmin <= 0.0:
print 'Exponential cooling requires a minimum temperature greater than zero.'
sys.exit()
Tfactor = -math.log( float(Tmax) / Tmin )
# Note initial state
T = Tmax
E = self.energy(state)
#prevState = copy.deepcopy(state)
prevState = state[:]
prevEnergy = E
#bestState = copy.deepcopy(state)
bestState = state[:]
bestEnergy = E
trials, accepts, improves = 0, 0, 0
if updates > 0:
updateWavelength = float(steps) / updates
update(T, E, None, None)
# Attempt moves to new states
while step < steps:
step += 1
T = Tmax * math.exp( Tfactor * step / steps )
self.move(state)
E = self.energy(state)
dE = E - prevEnergy
trials += 1
if dE > 0.0 and math.exp(-dE/T) < random.random():
# Restore previous state
#state = copy.deepcopy(prevState)
state = prevState[:]
E = prevEnergy
else:
# Accept new state and compare to best state
accepts += 1
if dE < 0.0:
improves += 1
#prevState = copy.deepcopy(state)
prevState = state[:]
prevEnergy = E
if E < bestEnergy:
#bestState = copy.deepcopy(state)
bestState = state[:]
bestEnergy = E
if updates > 1:
if step // updateWavelength > (step-1) // updateWavelength:
update(T, E, float(accepts)/trials, float(improves)/trials)
trials, accepts, improves = 0, 0, 0
# Return best state and energy
return bestState, bestEnergy
def auto(self, state, minutes, steps=2000):
"""Minimizes the energy of a system by simulated annealing with
automatic selection of the temperature schedule.
Keyword arguments:
state -- an initial arrangement of the system
minutes -- time to spend annealing (after exploring temperatures)
steps -- number of steps to spend on each stage of exploration
Returns the best state and energy found."""
def run(state, T, steps):
"""Anneals a system at constant temperature and returns the state,
energy, rate of acceptance, and rate of improvement."""
E = self.energy(state)
#prevState = copy.deepcopy(state)
prevState = state[:]
prevEnergy = E
accepts, improves = 0, 0
for step in range(steps):
self.move(state)
E = self.energy(state)
dE = E - prevEnergy
if dE > 0.0 and math.exp(-dE/T) < random.random():
#state = copy.deepcopy(prevState)
state = prevState[:]
E = prevEnergy
else:
accepts += 1
if dE < 0.0:
improves += 1
#prevState = copy.deepcopy(state)
prevState = state[:]
prevEnergy = E
return state, E, float(accepts)/steps, float(improves)/steps
step = 0
start = time.time()
print 'Attempting automatic simulated anneal...'
# Find an initial guess for temperature
T = 0.0
E = self.energy(state)
while T == 0.0:
step += 1
self.move(state)
T = abs( self.energy(state) - E )
print 'Exploring temperature landscape:'
print ' Temperature Energy Accept Improve Elapsed'
def update(T, E, acceptance, improvement):
"""Prints the current temperature, energy, acceptance rate,
improvement rate, and elapsed time."""
elapsed = time.time() - start
print '%12.2f %12.2f %7.2f%% %7.2f%% %s' % \
(T, E, 100.0*acceptance, 100.0*improvement, time_string(elapsed))
# Search for Tmax - a temperature that gives 98% acceptance
state, E, acceptance, improvement = run(state, T, steps)
step += steps
while acceptance > 0.98:
T = round_figures(T/1.5, 2)
state, E, acceptance, improvement = run(state, T, steps)
step += steps
update(T, E, acceptance, improvement)
while acceptance < 0.98:
T = round_figures(T*1.5, 2)
state, E, acceptance, improvement = run(state, T, steps)
step += steps
update(T, E, acceptance, improvement)
Tmax = T
# Search for Tmin - a temperature that gives 0% improvement
while improvement > 0.0:
T = round_figures(T/1.5, 2)
state, E, acceptance, improvement = run(state, T, steps)
step += steps
update(T, E, acceptance, improvement)
Tmin = T
# Calculate anneal duration
elapsed = time.time() - start
duration = round_figures(int(60.0 * minutes * step / elapsed), 2)
# MP: Don't perform anneal, just return params
#return self.anneal(state, Tmax, Tmin, duration, 20)
return {'tmax': Tmax, 'tmin': Tmin, 'steps': duration}
if __name__ == '__main__':
"""Test annealer with a traveling salesman problem."""
# List latitude and longitude (degrees) for the twenty largest U.S. cities
cities = { 'New York City': (40.72,74.00), 'Los Angeles': (34.05,118.25),
'Chicago': (41.88,87.63), 'Houston': (29.77,95.38),
'Phoenix': (33.45,112.07), 'Philadelphia': (39.95,75.17),
'San Antonio': (29.53,98.47), 'Dallas': (32.78,96.80),
'San Diego': (32.78,117.15), 'San Jose': (37.30,121.87),
'Detroit': (42.33,83.05), 'San Francisco': (37.78,122.42),
'Jacksonville': (30.32,81.70), 'Indianapolis': (39.78,86.15),
'Austin': (30.27,97.77), 'Columbus': (39.98,82.98),
'Fort Worth': (32.75,97.33), 'Charlotte': (35.23,80.85),
'Memphis': (35.12,89.97), 'Baltimore': (39.28,76.62) }
def distance(a, b):
"""Calculates distance between two latitude-longitude coordinates."""
R = 3963 # radius of Earth (miles)
lat1, lon1 = math.radians(a[0]), math.radians(a[1])
lat2, lon2 = math.radians(b[0]), math.radians(b[1])
return math.acos( math.sin(lat1)*math.sin(lat2) +
math.cos(lat1)*math.cos(lat2)*math.cos(lon1-lon2) ) * R
def route_move(state):
"""Swaps two cities in the route."""
a = random.randint( 0, len(state)-1 )
b = random.randint( 0, len(state)-1 )
state[a], state[b] = state[b], state[a]
def route_energy(state):
"""Calculates the length of the route."""
e = 0
for i in range(len(state)):
e += distance( cities[state[i-1]], cities[state[i]] )
return e
# Start with the cities listed in random order
state = cities.keys()
random.shuffle(state)
# Minimize the distance to be traveled by simulated annealing with a
# manually chosen temperature schedule
annealer = Annealer(route_energy, route_move)
state, e = annealer.anneal(state, 10000000, 0.01, 18000*len(state), 9)
while state[0] != 'New York City':
state = state[1:] + state[:1] # rotate NYC to start
print "%i mile route:" % route_energy(state)
for city in state:
print "\t", city
# Minimize the distance to be traveled by simulated annealing with an
# automatically chosen temperature schedule
state, e = annealer.auto(state, 4)
while state[0] != 'New York City':
state = state[1:] + state[:1] # rotate NYC to start
print "%i mile route:" % route_energy(state)
for city in state:
print "\t", city
sys.exit()
|
KevinNJ/Projects
|
Sallen Key Solver/anneal.py
|
Python
|
mit
| 13,636
|
[
"COLUMBUS",
"VisIt"
] |
73364631f50e7c0709a0b9f31fdd44753cafeb271547e2a1d103957e89381224
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""SPM wrappers for preprocessing data
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
__docformat__ = 'restructuredtext'
# Standard library imports
from copy import deepcopy
import os
# Third-party imports
import numpy as np
# Local imports
from nipype.interfaces.base import (OutputMultiPath, TraitedSpec, isdefined,
traits, InputMultiPath, File)
from nipype.interfaces.spm.base import (SPMCommand, scans_for_fname,
func_is_3d,
scans_for_fnames, SPMCommandInputSpec)
from nipype.utils.filemanip import (fname_presuffix, filename_to_list,
list_to_filename, split_filename)
class SliceTimingInputSpec(SPMCommandInputSpec):
in_files = InputMultiPath(traits.Either(traits.List(File(exists=True)),
File(exists=True)), field='scans',
desc='list of filenames to apply slice timing',
mandatory=True, copyfile=False)
num_slices = traits.Int(field='nslices',
desc='number of slices in a volume',
mandatory=True)
time_repetition = traits.Float(field='tr',
desc=('time between volume acquisitions '
'(start to start time)'),
mandatory=True)
time_acquisition = traits.Float(field='ta',
desc=('time of volume acquisition. usually '
'calculated as TR-(TR/num_slices)'),
mandatory=True)
slice_order = traits.List(traits.Int(), field='so',
desc='1-based order in which slices are acquired',
mandatory=True)
ref_slice = traits.Int(field='refslice',
desc='1-based Number of the reference slice',
mandatory=True)
out_prefix = traits.String('a', field='prefix', usedefault=True,
desc='slicetimed output prefix')
class SliceTimingOutputSpec(TraitedSpec):
timecorrected_files = OutputMultiPath(traits.Either(traits.List(File(exists=True)),
File(exists=True)),
desc='slice time corrected files')
class SliceTiming(SPMCommand):
"""Use spm to perform slice timing correction.
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=19
Examples
--------
>>> from nipype.interfaces.spm import SliceTiming
>>> st = SliceTiming()
>>> st.inputs.in_files = 'functional.nii'
>>> st.inputs.num_slices = 32
>>> st.inputs.time_repetition = 6.0
>>> st.inputs.time_acquisition = 6. - 6./32.
>>> st.inputs.slice_order = range(32,0,-1)
>>> st.inputs.ref_slice = 1
>>> st.run() # doctest: +SKIP
"""
input_spec = SliceTimingInputSpec
output_spec = SliceTimingOutputSpec
_jobtype = 'temporal'
_jobname = 'st'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'in_files':
return scans_for_fnames(filename_to_list(val),
keep4d=False,
separate_sessions=True)
return super(SliceTiming, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['timecorrected_files'] = []
filelist = filename_to_list(self.inputs.in_files)
for f in filelist:
if isinstance(f, list):
run = [fname_presuffix(in_f, prefix=self.inputs.out_prefix) for in_f in f]
else:
run = fname_presuffix(f, prefix=self.inputs.out_prefix)
outputs['timecorrected_files'].append(run)
return outputs
class RealignInputSpec(SPMCommandInputSpec):
in_files = InputMultiPath(traits.Either(traits.List(File(exists=True)),
File(exists=True)), field='data',
mandatory=True, copyfile=True,
desc='list of filenames to realign')
jobtype = traits.Enum('estwrite', 'estimate', 'write',
desc='one of: estimate, write, estwrite',
usedefault=True)
quality = traits.Range(low=0.0, high=1.0, field='eoptions.quality',
desc='0.1 = fast, 1.0 = precise')
fwhm = traits.Range(low=0.0, field='eoptions.fwhm',
desc='gaussian smoothing kernel width')
separation = traits.Range(low=0.0, field='eoptions.sep',
desc='sampling separation in mm')
register_to_mean = traits.Bool(True, field='eoptions.rtm',
mandatory=True, usedefault=True,
desc='Indicate whether realignment is done to the mean image')
weight_img = File(exists=True, field='eoptions.weight',
desc='filename of weighting image')
interp = traits.Range(low=0, high=7, field='eoptions.interp',
desc='degree of b-spline used for interpolation')
wrap = traits.List(traits.Int(), minlen=3, maxlen=3,
field='eoptions.wrap',
desc='Check if interpolation should wrap in [x,y,z]')
write_which = traits.ListInt([2, 1], field='roptions.which',
minlen=2, maxlen=2, usedefault=True,
desc='determines which images to reslice')
write_interp = traits.Range(low=0, high=7, field='roptions.interp',
desc='degree of b-spline used for interpolation')
write_wrap = traits.List(traits.Int(), minlen=3, maxlen=3,
field='roptions.wrap',
desc='Check if interpolation should wrap in [x,y,z]')
write_mask = traits.Bool(field='roptions.mask',
desc='True/False mask output image')
out_prefix = traits.String('r', field='roptions.prefix', usedefault=True,
desc='realigned output prefix')
class RealignOutputSpec(TraitedSpec):
mean_image = File(exists=True, desc='Mean image file from the realignment')
modified_in_files = OutputMultiPath(traits.Either(traits.List(File(exists=True)),
File(exists=True)),
desc='Copies of all files passed to in_files.\
Headers will have been modified to align all\
images with the first, or optionally to first\
do that, extract a mean image, and re-align to\
that mean image.')
realigned_files = OutputMultiPath(traits.Either(traits.List(File(exists=True)),
File(exists=True)),
desc='If jobtype is write or estwrite, these will be the\
resliced files. Otherwise, they will be copies of\
in_files that have had their headers rewritten.')
realignment_parameters = OutputMultiPath(File(exists=True),
desc='Estimated translation and rotation parameters')
class Realign(SPMCommand):
"""Use spm_realign for estimating within modality rigid body alignment
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=25
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> realign = spm.Realign()
>>> realign.inputs.in_files = 'functional.nii'
>>> realign.inputs.register_to_mean = True
>>> realign.run() # doctest: +SKIP
"""
input_spec = RealignInputSpec
output_spec = RealignOutputSpec
_jobtype = 'spatial'
_jobname = 'realign'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'in_files':
return scans_for_fnames(val,
keep4d=True,
separate_sessions=True)
return super(Realign, self)._format_arg(opt, spec, val)
def _parse_inputs(self):
"""validate spm realign options if set to None ignore
"""
einputs = super(Realign, self)._parse_inputs()
return [{'%s' % (self.inputs.jobtype): einputs[0]}]
def _list_outputs(self):
outputs = self._outputs().get()
resliced_all = self.inputs.write_which[0] > 0
resliced_mean = self.inputs.write_which[1] > 0
if isdefined(self.inputs.in_files):
outputs['realignment_parameters'] = []
for imgf in self.inputs.in_files:
if isinstance(imgf, list):
tmp_imgf = imgf[0]
else:
tmp_imgf = imgf
outputs['realignment_parameters'].append(fname_presuffix(tmp_imgf,
prefix='rp_',
suffix='.txt',
use_ext=False))
if not isinstance(imgf, list) and func_is_3d(imgf):
break
if self.inputs.jobtype == "estimate":
outputs['realigned_files'] = self.inputs.in_files
if self.inputs.jobtype == "estimate" or self.inputs.jobtype == "estwrite":
outputs['modified_in_files'] = self.inputs.in_files
if self.inputs.jobtype == "write" or self.inputs.jobtype == "estwrite":
if isinstance(self.inputs.in_files[0], list):
first_image = self.inputs.in_files[0][0]
else:
first_image = self.inputs.in_files[0]
if resliced_mean:
outputs['mean_image'] = fname_presuffix(first_image, prefix='mean')
if resliced_all:
outputs['realigned_files'] = []
for idx, imgf in enumerate(filename_to_list(self.inputs.in_files)):
realigned_run = []
if isinstance(imgf, list):
for i, inner_imgf in enumerate(filename_to_list(imgf)):
newfile = fname_presuffix(inner_imgf,
prefix=self.inputs.out_prefix)
if os.path.exists(newfile):
realigned_run.append(newfile)
continue
if (idx == 0) and (i == 0) and \
func_is_3d(inner_imgf):
realigned_run.append(fname_presuffix(inner_imgf,
prefix=''))
else:
realigned_run = fname_presuffix(imgf,
prefix=self.inputs.out_prefix)
if (idx == 0) and func_is_3d(imgf):
realigned_run = fname_presuffix(imgf, prefix='')
outputs['realigned_files'].append(realigned_run)
return outputs
class CoregisterInputSpec(SPMCommandInputSpec):
target = File(exists=True, field='ref', mandatory=True,
desc='reference file to register to', copyfile=False)
source = InputMultiPath(File(exists=True), field='source',
desc='file to register to target', copyfile=True,
mandatory=True)
jobtype = traits.Enum('estwrite', 'estimate', 'write',
desc='one of: estimate, write, estwrite',
usedefault=True)
apply_to_files = InputMultiPath(File(exists=True), field='other',
desc='files to apply transformation to',
copyfile=True)
cost_function = traits.Enum('mi', 'nmi', 'ecc', 'ncc',
field='eoptions.cost_fun',
desc="""cost function, one of: 'mi' - Mutual Information,
'nmi' - Normalised Mutual Information,
'ecc' - Entropy Correlation Coefficient,
'ncc' - Normalised Cross Correlation""")
fwhm = traits.List(traits.Float(), minlen=2, maxlen=2,
field='eoptions.fwhm',
desc='gaussian smoothing kernel width (mm)')
separation = traits.List(traits.Float(), field='eoptions.sep',
desc='sampling separation in mm')
tolerance = traits.List(traits.Float(), field='eoptions.tol',
desc='acceptable tolerance for each of 12 params')
write_interp = traits.Range(low=0, high=7, field='roptions.interp',
desc='degree of b-spline used for interpolation')
write_wrap = traits.List(traits.Int(), minlen=3, maxlen=3,
field='roptions.wrap',
desc='Check if interpolation should wrap in [x,y,z]')
write_mask = traits.Bool(field='roptions.mask',
desc='True/False mask output image')
out_prefix = traits.String('r', field='roptions.prefix', usedefault=True,
desc='coregistered output prefix')
class CoregisterOutputSpec(TraitedSpec):
coregistered_source = OutputMultiPath(File(exists=True),
desc='Coregistered source files')
coregistered_files = OutputMultiPath(File(exists=True),
desc='Coregistered other files')
class Coregister(SPMCommand):
"""Use spm_coreg for estimating cross-modality rigid body alignment
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=39
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> coreg = spm.Coregister()
>>> coreg.inputs.target = 'functional.nii'
>>> coreg.inputs.source = 'structural.nii'
>>> coreg.run() # doctest: +SKIP
"""
input_spec = CoregisterInputSpec
output_spec = CoregisterOutputSpec
_jobtype = 'spatial'
_jobname = 'coreg'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'target' or (opt == 'source' and self.inputs.jobtype != "write"):
return scans_for_fnames(filename_to_list(val),
keep4d=True)
if opt == 'apply_to_files':
return np.array(filename_to_list(val), dtype=object)
if opt == 'source' and self.inputs.jobtype == "write":
if isdefined(self.inputs.apply_to_files):
return scans_for_fnames(val+self.inputs.apply_to_files)
else:
return scans_for_fnames(val)
return super(Coregister, self)._format_arg(opt, spec, val)
def _parse_inputs(self):
"""validate spm coregister options if set to None ignore
"""
if self.inputs.jobtype == "write":
einputs = super(Coregister, self)._parse_inputs(skip=('jobtype', 'apply_to_files'))
else:
einputs = super(Coregister, self)._parse_inputs(skip=('jobtype'))
jobtype = self.inputs.jobtype
return [{'%s' % (jobtype): einputs[0]}]
def _list_outputs(self):
outputs = self._outputs().get()
if self.inputs.jobtype == "estimate":
if isdefined(self.inputs.apply_to_files):
outputs['coregistered_files'] = self.inputs.apply_to_files
outputs['coregistered_source'] = self.inputs.source
elif self.inputs.jobtype == "write" or self.inputs.jobtype == "estwrite":
if isdefined(self.inputs.apply_to_files):
outputs['coregistered_files'] = []
for imgf in filename_to_list(self.inputs.apply_to_files):
outputs['coregistered_files'].append(fname_presuffix(imgf, prefix=self.inputs.out_prefix))
outputs['coregistered_source'] = []
for imgf in filename_to_list(self.inputs.source):
outputs['coregistered_source'].append(fname_presuffix(imgf, prefix=self.inputs.out_prefix))
return outputs
class NormalizeInputSpec(SPMCommandInputSpec):
template = File(exists=True, field='eoptions.template',
desc='template file to normalize to',
mandatory=True, xor=['parameter_file'],
copyfile=False)
source = InputMultiPath(File(exists=True), field='subj.source',
desc='file to normalize to template',
xor=['parameter_file'],
mandatory=True, copyfile=True)
jobtype = traits.Enum('estwrite', 'est', 'write',
desc='one of: est, write, estwrite (opt, estwrite)',
usedefault=True)
apply_to_files = InputMultiPath(traits.Either(File(exists=True),
traits.List(File(exists=True))),
field='subj.resample',
desc='files to apply transformation to (opt)',
copyfile=True)
parameter_file = File(field='subj.matname', mandatory=True,
xor=['source', 'template'],
desc='normalization parameter file*_sn.mat', copyfile=False)
source_weight = File(field='subj.wtsrc',
desc='name of weighting image for source (opt)', copyfile=False)
template_weight = File(field='eoptions.weight',
desc='name of weighting image for template (opt)', copyfile=False)
source_image_smoothing = traits.Float(field='eoptions.smosrc',
desc='source smoothing (opt)')
template_image_smoothing = traits.Float(field='eoptions.smoref',
desc='template smoothing (opt)')
affine_regularization_type = traits.Enum('mni', 'size', 'none', field='eoptions.regype',
desc='mni, size, none (opt)')
DCT_period_cutoff = traits.Float(field='eoptions.cutoff',
desc='Cutoff of for DCT bases (opt)')
nonlinear_iterations = traits.Int(field='eoptions.nits',
desc='Number of iterations of nonlinear warping (opt)')
nonlinear_regularization = traits.Float(field='eoptions.reg',
desc='the amount of the regularization for the nonlinear part of the normalization (opt)')
write_preserve = traits.Bool(field='roptions.preserve',
desc='True/False warped images are modulated (opt,)')
write_bounding_box = traits.List(traits.List(traits.Float(), minlen=3,
maxlen=3),
field='roptions.bb', minlen=2, maxlen=2,
desc='3x2-element list of lists (opt)')
write_voxel_sizes = traits.List(traits.Float(), field='roptions.vox',
minlen=3, maxlen=3,
desc='3-element list (opt)')
write_interp = traits.Range(low=0, high=7, field='roptions.interp',
desc='degree of b-spline used for interpolation')
write_wrap = traits.List(traits.Int(), field='roptions.wrap',
desc=('Check if interpolation should wrap in [x,y,z] '
'- list of bools (opt)'))
out_prefix = traits.String('w', field='roptions.prefix', usedefault=True,
desc='normalized output prefix')
class NormalizeOutputSpec(TraitedSpec):
normalization_parameters = OutputMultiPath(File(exists=True), desc='MAT files containing the normalization parameters')
normalized_source = OutputMultiPath(File(exists=True), desc='Normalized source files')
normalized_files = OutputMultiPath(File(exists=True), desc='Normalized other files')
class Normalize(SPMCommand):
"""use spm_normalise for warping an image to a template
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=51
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> norm = spm.Normalize()
>>> norm.inputs.source = 'functional.nii'
>>> norm.run() # doctest: +SKIP
"""
input_spec = NormalizeInputSpec
output_spec = NormalizeOutputSpec
_jobtype = 'spatial'
_jobname = 'normalise'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'template':
return scans_for_fname(filename_to_list(val))
if opt == 'source':
return scans_for_fname(filename_to_list(val))
if opt == 'apply_to_files':
return scans_for_fnames(filename_to_list(val))
if opt == 'parameter_file':
return np.array([list_to_filename(val)], dtype=object)
if opt in ['write_wrap']:
if len(val) != 3:
raise ValueError('%s must have 3 elements' % opt)
return super(Normalize, self)._format_arg(opt, spec, val)
def _parse_inputs(self):
"""validate spm realign options if set to None ignore
"""
einputs = super(Normalize, self)._parse_inputs(skip=('jobtype',
'apply_to_files'))
if isdefined(self.inputs.apply_to_files):
inputfiles = deepcopy(self.inputs.apply_to_files)
if isdefined(self.inputs.source):
inputfiles.extend(self.inputs.source)
einputs[0]['subj']['resample'] = scans_for_fnames(inputfiles)
jobtype = self.inputs.jobtype
if jobtype in ['estwrite', 'write']:
if not isdefined(self.inputs.apply_to_files):
if isdefined(self.inputs.source):
einputs[0]['subj']['resample'] = scans_for_fname(self.inputs.source)
return [{'%s' % (jobtype): einputs[0]}]
def _list_outputs(self):
outputs = self._outputs().get()
jobtype = self.inputs.jobtype
if jobtype.startswith('est'):
outputs['normalization_parameters'] = []
for imgf in filename_to_list(self.inputs.source):
outputs['normalization_parameters'].append(fname_presuffix(imgf, suffix='_sn.mat', use_ext=False))
outputs['normalization_parameters'] = list_to_filename(outputs['normalization_parameters'])
if self.inputs.jobtype == "estimate":
if isdefined(self.inputs.apply_to_files):
outputs['normalized_files'] = self.inputs.apply_to_files
outputs['normalized_source'] = self.inputs.source
elif 'write' in self.inputs.jobtype:
outputs['normalized_files'] = []
if isdefined(self.inputs.apply_to_files):
filelist = filename_to_list(self.inputs.apply_to_files)
for f in filelist:
if isinstance(f, list):
run = [fname_presuffix(in_f, prefix=self.inputs.out_prefix) for in_f in f]
else:
run = [fname_presuffix(f, prefix=self.inputs.out_prefix)]
outputs['normalized_files'].extend(run)
if isdefined(self.inputs.source):
outputs['normalized_source'] = []
for imgf in filename_to_list(self.inputs.source):
outputs['normalized_source'].append(fname_presuffix(imgf, prefix=self.inputs.out_prefix))
return outputs
class SegmentInputSpec(SPMCommandInputSpec):
data = InputMultiPath(File(exists=True), field='data', desc='one scan per subject',
copyfile=False, mandatory=True)
gm_output_type = traits.List(traits.Bool(), minlen=3, maxlen=3, field='output.GM',
desc="""Options to produce grey matter images: c1*.img, wc1*.img and mwc1*.img.
None: [False,False,False],
Native Space: [False,False,True],
Unmodulated Normalised: [False,True,False],
Modulated Normalised: [True,False,False],
Native + Unmodulated Normalised: [False,True,True],
Native + Modulated Normalised: [True,False,True],
Native + Modulated + Unmodulated: [True,True,True],
Modulated + Unmodulated Normalised: [True,True,False]""")
wm_output_type = traits.List(traits.Bool(), minlen=3, maxlen=3, field='output.WM',
desc="""Options to produce white matter images: c2*.img, wc2*.img and mwc2*.img.
None: [False,False,False],
Native Space: [False,False,True],
Unmodulated Normalised: [False,True,False],
Modulated Normalised: [True,False,False],
Native + Unmodulated Normalised: [False,True,True],
Native + Modulated Normalised: [True,False,True],
Native + Modulated + Unmodulated: [True,True,True],
Modulated + Unmodulated Normalised: [True,True,False]""")
csf_output_type = traits.List(traits.Bool(), minlen=3, maxlen=3, field='output.CSF',
desc="""Options to produce CSF images: c3*.img, wc3*.img and mwc3*.img.
None: [False,False,False],
Native Space: [False,False,True],
Unmodulated Normalised: [False,True,False],
Modulated Normalised: [True,False,False],
Native + Unmodulated Normalised: [False,True,True],
Native + Modulated Normalised: [True,False,True],
Native + Modulated + Unmodulated: [True,True,True],
Modulated + Unmodulated Normalised: [True,True,False]""")
save_bias_corrected = traits.Bool(field='output.biascor',
desc='True/False produce a bias corrected image')
clean_masks = traits.Enum('no', 'light', 'thorough', field='output.cleanup',
desc="clean using estimated brain mask ('no','light','thorough')")
tissue_prob_maps = traits.List(File(exists=True), field='opts.tpm',
desc='list of gray, white & csf prob. (opt,)')
gaussians_per_class = traits.List(traits.Int(), field='opts.ngaus',
desc='num Gaussians capture intensity distribution')
affine_regularization = traits.Enum('mni', 'eastern', 'subj', 'none', '', field='opts.regtype',
desc='Possible options: "mni", "eastern", "subj", "none" (no reguralisation), "" (no affine registration)')
warping_regularization = traits.Float(field='opts.warpreg',
desc='Controls balance between parameters and data')
warp_frequency_cutoff = traits.Float(field='opts.warpco', desc='Cutoff of DCT bases')
bias_regularization = traits.Enum(0, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, field='opts.biasreg',
desc='no(0) - extremely heavy (10)')
bias_fwhm = traits.Enum(30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130,
'Inf', field='opts.biasfwhm',
desc='FWHM of Gaussian smoothness of bias')
sampling_distance = traits.Float(field='opts.samp',
desc='Sampling distance on data for parameter estimation')
mask_image = File(exists=True, field='opts.msk',
desc='Binary image to restrict parameter estimation ')
class SegmentOutputSpec(TraitedSpec):
native_gm_image = File(desc='native space grey probability map')
normalized_gm_image = File(desc='normalized grey probability map',)
modulated_gm_image = File(desc='modulated, normalized grey probability map')
native_wm_image = File(desc='native space white probability map')
normalized_wm_image = File(desc='normalized white probability map')
modulated_wm_image = File(desc='modulated, normalized white probability map')
native_csf_image = File(desc='native space csf probability map')
normalized_csf_image = File(desc='normalized csf probability map')
modulated_csf_image = File(desc='modulated, normalized csf probability map')
modulated_input_image = File(deprecated='0.10',
new_name='bias_corrected_image',
desc='bias-corrected version of input image')
bias_corrected_image = File(desc='bias-corrected version of input image')
transformation_mat = File(exists=True, desc='Normalization transformation')
inverse_transformation_mat = File(exists=True,
desc='Inverse normalization info')
class Segment(SPMCommand):
"""use spm_segment to separate structural images into different
tissue classes.
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=43
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> seg = spm.Segment()
>>> seg.inputs.data = 'structural.nii'
>>> seg.run() # doctest: +SKIP
"""
_jobtype = 'spatial'
_jobname = 'preproc'
input_spec = SegmentInputSpec
output_spec = SegmentOutputSpec
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
clean_masks_dict = {'no': 0, 'light': 1, 'thorough': 2}
if opt in ['data', 'tissue_prob_maps']:
if isinstance(val, list):
return scans_for_fnames(val)
else:
return scans_for_fname(val)
if 'output_type' in opt:
return [int(v) for v in val]
if opt == 'mask_image':
return scans_for_fname(val)
if opt == 'clean_masks':
return clean_masks_dict[val]
return super(Segment, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
f = self.inputs.data[0]
for tidx, tissue in enumerate(['gm', 'wm', 'csf']):
outtype = '%s_output_type' % tissue
if isdefined(getattr(self.inputs, outtype)):
for idx, (image, prefix) in enumerate([('modulated', 'mw'),
('normalized', 'w'),
('native', '')]):
if getattr(self.inputs, outtype)[idx]:
outfield = '%s_%s_image' % (image, tissue)
outputs[outfield] = fname_presuffix(f,
prefix='%sc%d' % (prefix,
tidx+1))
if isdefined(self.inputs.save_bias_corrected) and \
self.inputs.save_bias_corrected:
outputs['bias_corrected_image'] = fname_presuffix(f, prefix='m')
t_mat = fname_presuffix(f, suffix='_seg_sn.mat', use_ext=False)
outputs['transformation_mat'] = t_mat
invt_mat = fname_presuffix(f, suffix='_seg_inv_sn.mat', use_ext=False)
outputs['inverse_transformation_mat'] = invt_mat
return outputs
class NewSegmentInputSpec(SPMCommandInputSpec):
channel_files = InputMultiPath(File(exists=True),
desc="A list of files to be segmented",
field='channel', copyfile=False, mandatory=True)
channel_info = traits.Tuple(traits.Float(), traits.Float(),
traits.Tuple(traits.Bool, traits.Bool),
desc="""A tuple with the following fields:
- bias reguralisation (0-10)
- FWHM of Gaussian smoothness of bias
- which maps to save (Corrected, Field) - a tuple of two boolean values""",
field='channel')
tissues = traits.List(traits.Tuple(traits.Tuple(File(exists=True), traits.Int()), traits.Int(),
traits.Tuple(traits.Bool, traits.Bool), traits.Tuple(traits.Bool, traits.Bool)),
desc="""A list of tuples (one per tissue) with the following fields:
- tissue probability map (4D), 1-based index to frame
- number of gaussians
- which maps to save [Native, DARTEL] - a tuple of two boolean values
- which maps to save [Modulated, Unmodualted] - a tuple of two boolean values""",
field='tissue')
affine_regularization = traits.Enum('mni', 'eastern', 'subj', 'none', field='warp.affreg',
desc='mni, eastern, subj, none ')
warping_regularization = traits.Float(field='warp.reg',
desc='Aproximate distance between sampling points.')
sampling_distance = traits.Float(field='warp.samp',
desc='Sampling distance on data for parameter estimation')
write_deformation_fields = traits.List(traits.Bool(), minlen=2, maxlen=2, field='warp.write',
desc="Which deformation fields to write:[Inverse, Forward]")
class NewSegmentOutputSpec(TraitedSpec):
native_class_images = traits.List(traits.List(File(exists=True)), desc='native space probability maps')
dartel_input_images = traits.List(traits.List(File(exists=True)), desc='dartel imported class images')
normalized_class_images = traits.List(traits.List(File(exists=True)), desc='normalized class images')
modulated_class_images = traits.List(traits.List(File(exists=True)), desc='modulated+normalized class images')
transformation_mat = OutputMultiPath(File(exists=True), desc='Normalization transformation')
bias_corrected_images = OutputMultiPath(File(exists=True), desc='bias corrected images')
bias_field_images = OutputMultiPath(File(exists=True), desc='bias field images')
forward_deformation_field = OutputMultiPath(File(exists=True))
inverse_deformation_field = OutputMultiPath(File(exists=True))
class NewSegment(SPMCommand):
"""Use spm_preproc8 (New Segment) to separate structural images into different
tissue classes. Supports multiple modalities.
NOTE: This interface currently supports single channel input only
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=185
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> seg = spm.NewSegment()
>>> seg.inputs.channel_files = 'structural.nii'
>>> seg.inputs.channel_info = (0.0001, 60, (True, True))
>>> seg.run() # doctest: +SKIP
For VBM pre-processing [http://www.fil.ion.ucl.ac.uk/~john/misc/VBMclass10.pdf],
TPM.nii should be replaced by /path/to/spm8/toolbox/Seg/TPM.nii
>>> seg = NewSegment()
>>> seg.inputs.channel_files = 'structural.nii'
>>> tissue1 = (('TPM.nii', 1), 2, (True,True), (False, False))
>>> tissue2 = (('TPM.nii', 2), 2, (True,True), (False, False))
>>> tissue3 = (('TPM.nii', 3), 2, (True,False), (False, False))
>>> tissue4 = (('TPM.nii', 4), 2, (False,False), (False, False))
>>> tissue5 = (('TPM.nii', 5), 2, (False,False), (False, False))
>>> seg.inputs.tissues = [tissue1, tissue2, tissue3, tissue4, tissue5]
>>> seg.run() # doctest: +SKIP
"""
input_spec = NewSegmentInputSpec
output_spec = NewSegmentOutputSpec
_jobtype = 'tools'
_jobname = 'preproc8'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['channel_files', 'channel_info']:
# structure have to be recreated, because of some weird traits error
new_channel = {}
new_channel['vols'] = scans_for_fnames(self.inputs.channel_files)
if isdefined(self.inputs.channel_info):
info = self.inputs.channel_info
new_channel['biasreg'] = info[0]
new_channel['biasfwhm'] = info[1]
new_channel['write'] = [int(info[2][0]), int(info[2][1])]
return [new_channel]
elif opt == 'tissues':
new_tissues = []
for tissue in val:
new_tissue = {}
new_tissue['tpm'] = np.array([','.join([tissue[0][0], str(tissue[0][1])])], dtype=object)
new_tissue['ngaus'] = tissue[1]
new_tissue['native'] = [int(tissue[2][0]), int(tissue[2][1])]
new_tissue['warped'] = [int(tissue[3][0]), int(tissue[3][1])]
new_tissues.append(new_tissue)
return new_tissues
elif opt == 'write_deformation_fields':
return super(NewSegment, self)._format_arg(opt, spec, [int(val[0]), int(val[1])])
else:
return super(NewSegment, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['native_class_images'] = []
outputs['dartel_input_images'] = []
outputs['normalized_class_images'] = []
outputs['modulated_class_images'] = []
outputs['transformation_mat'] = []
outputs['bias_corrected_images'] = []
outputs['bias_field_images'] = []
outputs['inverse_deformation_field'] = []
outputs['forward_deformation_field'] = []
n_classes = 5
if isdefined(self.inputs.tissues):
n_classes = len(self.inputs.tissues)
for i in range(n_classes):
outputs['native_class_images'].append([])
outputs['dartel_input_images'].append([])
outputs['normalized_class_images'].append([])
outputs['modulated_class_images'].append([])
for filename in self.inputs.channel_files:
pth, base, ext = split_filename(filename)
if isdefined(self.inputs.tissues):
for i, tissue in enumerate(self.inputs.tissues):
if tissue[2][0]:
outputs['native_class_images'][i].append(os.path.join(pth, "c%d%s.nii" % (i+1, base)))
if tissue[2][1]:
outputs['dartel_input_images'][i].append(os.path.join(pth, "rc%d%s.nii" % (i+1, base)))
if tissue[3][0]:
outputs['normalized_class_images'][i].append(os.path.join(pth, "wc%d%s.nii" % (i+1, base)))
if tissue[3][1]:
outputs['modulated_class_images'][i].append(os.path.join(pth, "mwc%d%s.nii" % (i+1, base)))
else:
for i in range(n_classes):
outputs['native_class_images'][i].append(os.path.join(pth, "c%d%s.nii" % (i+1, base)))
outputs['transformation_mat'].append(os.path.join(pth, "%s_seg8.mat" % base))
if isdefined(self.inputs.write_deformation_fields):
if self.inputs.write_deformation_fields[0]:
outputs['inverse_deformation_field'].append(os.path.join(pth, "iy_%s.nii" % base))
if self.inputs.write_deformation_fields[1]:
outputs['forward_deformation_field'].append(os.path.join(pth, "y_%s.nii" % base))
if isdefined(self.inputs.channel_info):
if self.inputs.channel_info[2][0]:
outputs['bias_corrected_images'].append(os.path.join(pth, "m%s.nii" % (base)))
if self.inputs.channel_info[2][1]:
outputs['bias_field_images'].append(os.path.join(pth, "BiasField_%s.nii" % (base)))
return outputs
class SmoothInputSpec(SPMCommandInputSpec):
in_files = InputMultiPath(File(exists=True), field='data',
desc='list of files to smooth',
mandatory=True, copyfile=False)
fwhm = traits.Either(traits.List(traits.Float(), minlen=3, maxlen=3),
traits.Float(), field='fwhm',
desc='3-list of fwhm for each dimension (opt)')
data_type = traits.Int(field='dtype',
desc='Data type of the output images (opt)')
implicit_masking = traits.Bool(field='im',
desc=('A mask implied by a particular '
'voxel value'))
out_prefix = traits.String('s', field='prefix', usedefault=True,
desc='smoothed output prefix')
class SmoothOutputSpec(TraitedSpec):
smoothed_files = OutputMultiPath(File(exists=True), desc='smoothed files')
class Smooth(SPMCommand):
"""Use spm_smooth for 3D Gaussian smoothing of image volumes.
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=57
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> smooth = spm.Smooth()
>>> smooth.inputs.in_files = 'functional.nii'
>>> smooth.inputs.fwhm = [4, 4, 4]
>>> smooth.run() # doctest: +SKIP
"""
input_spec = SmoothInputSpec
output_spec = SmoothOutputSpec
_jobtype = 'spatial'
_jobname = 'smooth'
def _format_arg(self, opt, spec, val):
if opt in ['in_files']:
return scans_for_fnames(filename_to_list(val))
if opt == 'fwhm':
if not isinstance(val, list):
return [val, val, val]
if isinstance(val, list):
if len(val) == 1:
return [val[0], val[0], val[0]]
else:
return val
return super(Smooth, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['smoothed_files'] = []
for imgf in filename_to_list(self.inputs.in_files):
outputs['smoothed_files'].append(fname_presuffix(imgf, prefix=self.inputs.out_prefix))
return outputs
class DARTELInputSpec(SPMCommandInputSpec):
image_files = traits.List(traits.List(File(exists=True)),
desc="A list of files to be segmented",
field='warp.images', copyfile=False, mandatory=True)
template_prefix = traits.Str('Template', usedefault=True,
field='warp.settings.template',
desc='Prefix for template')
regularization_form = traits.Enum('Linear', 'Membrane', 'Bending',
field='warp.settings.rform',
desc='Form of regularization energy term')
iteration_parameters = traits.List(traits.Tuple(traits.Range(1, 10),
traits.Tuple(traits.Float,
traits.Float,
traits.Float),
traits.Enum(1, 2, 4, 8, 16,
32, 64, 128,
256, 512),
traits.Enum(0, 0.5, 1, 2, 4,
8, 16, 32)),
minlen=3,
maxlen=12,
field='warp.settings.param',
desc="""List of tuples for each iteration
- Inner iterations
- Regularization parameters
- Time points for deformation model
- smoothing parameter
""")
optimization_parameters = traits.Tuple(traits.Float, traits.Range(1, 8),
traits.Range(1, 8),
field='warp.settings.optim',
desc="""Optimization settings a tuple
- LM regularization
- cycles of multigrid solver
- relaxation iterations
""")
class DARTELOutputSpec(TraitedSpec):
final_template_file = File(exists=True, desc='final DARTEL template')
template_files = traits.List(File(exists=True), desc='Templates from different stages of iteration')
dartel_flow_fields = traits.List(File(exists=True), desc='DARTEL flow fields')
class DARTEL(SPMCommand):
"""Use spm DARTEL to create a template and flow fields
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=197
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> dartel = spm.DARTEL()
>>> dartel.inputs.image_files = [['rc1s1.nii','rc1s2.nii'],['rc2s1.nii', 'rc2s2.nii']]
>>> dartel.run() # doctest: +SKIP
"""
input_spec = DARTELInputSpec
output_spec = DARTELOutputSpec
_jobtype = 'tools'
_jobname = 'dartel'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['image_files']:
return scans_for_fnames(val, keep4d=True, separate_sessions=True)
elif opt == 'regularization_form':
mapper = {'Linear': 0, 'Membrane': 1, 'Bending': 2}
return mapper[val]
elif opt == 'iteration_parameters':
params = []
for param in val:
new_param = {}
new_param['its'] = param[0]
new_param['rparam'] = list(param[1])
new_param['K'] = param[2]
new_param['slam'] = param[3]
params.append(new_param)
return params
elif opt == 'optimization_parameters':
new_param = {}
new_param['lmreg'] = val[0]
new_param['cyc'] = val[1]
new_param['its'] = val[2]
return [new_param]
else:
return super(DARTEL, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['template_files'] = []
for i in range(6):
outputs['template_files'].append(os.path.realpath('%s_%d.nii' % (self.inputs.template_prefix, i+1)))
outputs['final_template_file'] = os.path.realpath('%s_6.nii' % self.inputs.template_prefix)
outputs['dartel_flow_fields'] = []
for filename in self.inputs.image_files[0]:
pth, base, ext = split_filename(filename)
outputs['dartel_flow_fields'].append(os.path.realpath('u_%s_%s%s' % (base,
self.inputs.template_prefix,
ext)))
return outputs
class DARTELNorm2MNIInputSpec(SPMCommandInputSpec):
template_file = File(exists=True,
desc="DARTEL template",
field='mni_norm.template', copyfile=False, mandatory=True)
flowfield_files = InputMultiPath(File(exists=True),
desc="DARTEL flow fields u_rc1*",
field='mni_norm.data.subjs.flowfields',
mandatory=True)
apply_to_files = InputMultiPath(File(exists=True),
desc="Files to apply the transform to",
field='mni_norm.data.subjs.images',
mandatory=True, copyfile=False)
voxel_size = traits.Tuple(traits.Float, traits.Float, traits.Float,
desc="Voxel sizes for output file",
field='mni_norm.vox')
bounding_box = traits.Tuple(traits.Float, traits.Float, traits.Float,
traits.Float, traits.Float, traits.Float,
desc="Voxel sizes for output file",
field='mni_norm.bb')
modulate = traits.Bool(field='mni_norm.preserve',
desc="Modulate out images - no modulation preserves concentrations")
fwhm = traits.Either(traits.List(traits.Float(), minlen=3, maxlen=3),
traits.Float(), field='mni_norm.fwhm',
desc='3-list of fwhm for each dimension')
class DARTELNorm2MNIOutputSpec(TraitedSpec):
normalized_files = OutputMultiPath(File(exists=True), desc='Normalized files in MNI space')
normalization_parameter_file = File(exists=True, desc='Transform parameters to MNI space')
class DARTELNorm2MNI(SPMCommand):
"""Use spm DARTEL to normalize data to MNI space
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=200
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> nm = spm.DARTELNorm2MNI()
>>> nm.inputs.template_file = 'Template_6.nii'
>>> nm.inputs.flowfield_files = ['u_rc1s1_Template.nii', 'u_rc1s3_Template.nii']
>>> nm.inputs.apply_to_files = ['c1s1.nii', 'c1s3.nii']
>>> nm.inputs.modulate = True
>>> nm.run() # doctest: +SKIP
"""
input_spec = DARTELNorm2MNIInputSpec
output_spec = DARTELNorm2MNIOutputSpec
_jobtype = 'tools'
_jobname = 'dartel'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['template_file']:
return np.array([val], dtype=object)
elif opt in ['flowfield_files']:
return scans_for_fnames(val, keep4d=True)
elif opt in ['apply_to_files']:
return scans_for_fnames(val, keep4d=True, separate_sessions=True)
elif opt == 'voxel_size':
return list(val)
elif opt == 'bounding_box':
return list(val)
elif opt == 'fwhm':
if isinstance(val, list):
return val
else:
return [val, val, val]
else:
return super(DARTELNorm2MNI, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
pth, base, ext = split_filename(self.inputs.template_file)
outputs['normalization_parameter_file'] = os.path.realpath(base+'_2mni.mat')
outputs['normalized_files'] = []
prefix = "w"
if isdefined(self.inputs.modulate) and self.inputs.modulate:
prefix = 'm' + prefix
if not isdefined(self.inputs.fwhm) or self.inputs.fwhm > 0:
prefix = 's' + prefix
for filename in self.inputs.apply_to_files:
pth, base, ext = split_filename(filename)
outputs['normalized_files'].append(os.path.realpath('%s%s%s' % (prefix,
base,
ext)))
return outputs
class CreateWarpedInputSpec(SPMCommandInputSpec):
image_files = InputMultiPath(File(exists=True),
desc="A list of files to be warped",
field='crt_warped.images', copyfile=False,
mandatory=True)
flowfield_files = InputMultiPath(File(exists=True),
desc="DARTEL flow fields u_rc1*",
field='crt_warped.flowfields',
copyfile=False,
mandatory=True)
iterations = traits.Range(low=0, high=9,
desc=("The number of iterations: log2(number of "
"time steps)"),
field='crt_warped.K')
interp = traits.Range(low=0, high=7, field='crt_warped.interp',
desc='degree of b-spline used for interpolation')
modulate = traits.Bool(field='crt_warped.jactransf',
desc="Modulate images")
class CreateWarpedOutputSpec(TraitedSpec):
warped_files = traits.List(File(exists=True, desc='final warped files'))
class CreateWarped(SPMCommand):
"""Apply a flow field estimated by DARTEL to create warped images
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=202
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> create_warped = spm.CreateWarped()
>>> create_warped.inputs.image_files = ['rc1s1.nii', 'rc1s2.nii']
>>> create_warped.inputs.flowfield_files = ['u_rc1s1_Template.nii', 'u_rc1s2_Template.nii']
>>> create_warped.run() # doctest: +SKIP
"""
input_spec = CreateWarpedInputSpec
output_spec = CreateWarpedOutputSpec
_jobtype = 'tools'
_jobname = 'dartel'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['image_files']:
return scans_for_fnames(val, keep4d=True,
separate_sessions=True)
if opt in ['flowfield_files']:
return scans_for_fnames(val, keep4d=True)
else:
return super(CreateWarped, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['warped_files'] = []
for filename in self.inputs.image_files:
pth, base, ext = split_filename(filename)
if isdefined(self.inputs.modulate) and self.inputs.modulate:
outputs['warped_files'].append(os.path.realpath('mw%s%s' % (base,
ext)))
else:
outputs['warped_files'].append(os.path.realpath('w%s%s' % (base,
ext)))
return outputs
class ApplyDeformationFieldInputSpec(SPMCommandInputSpec):
in_files = InputMultiPath(File(exists=True), mandatory=True, field='fnames')
deformation_field = File(exists=True, mandatory=True, field='comp{1}.def')
reference_volume = File(exists=True, mandatory=True,
field='comp{2}.id.space')
interp = traits.Range(low=0, high=7, field='interp',
desc='degree of b-spline used for interpolation')
class ApplyDeformationFieldOutputSpec(TraitedSpec):
out_files = OutputMultiPath(File(exists=True))
class ApplyDeformations(SPMCommand):
input_spec = ApplyDeformationFieldInputSpec
output_spec = ApplyDeformationFieldOutputSpec
_jobtype = 'util'
_jobname = 'defs'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['deformation_field', 'reference_volume']:
val = [val]
if opt in ['deformation_field']:
return scans_for_fnames(val, keep4d=True, separate_sessions=False)
if opt in ['in_files', 'reference_volume']:
return scans_for_fnames(val, keep4d=False, separate_sessions=False)
else:
return super(ApplyDeformations, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_files'] = []
for filename in self.inputs.in_files:
_, fname = os.path.split(filename)
outputs['out_files'].append(os.path.realpath('w%s' % fname))
return outputs
class VBMSegmentInputSpec(SPMCommandInputSpec):
in_files = InputMultiPath(
File(exists=True),
desc="A list of files to be segmented",
field='estwrite.data', copyfile=False, mandatory=True)
tissues = File(
exists=True, field='estwrite.tpm',
desc='tissue probability map')
gaussians_per_class = traits.Tuple(
(2, 2, 2, 3, 4, 2), *([traits.Int()]*6),
usedefault=True,
desc='number of gaussians for each tissue class')
bias_regularization = traits.Enum(
0.0001,
(0, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10),
field='estwrite.opts.biasreg', usedefault=True,
desc='no(0) - extremely heavy (10)')
bias_fwhm = traits.Enum(
60,
(30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 'Inf'),
field='estwrite.opts.biasfwhm',
usedefault=True,
desc='FWHM of Gaussian smoothness of bias')
sampling_distance = traits.Float(
3, usedefault=True, field='estwrite.opts.samp',
desc='Sampling distance on data for parameter estimation')
warping_regularization = traits.Float(
4, usedefault=True, field='estwrite.opts.warpreg',
desc='Controls balance between parameters and data')
spatial_normalization = traits.Enum(
'high', 'low', usedefault=True,)
dartel_template = File(
exists=True,
field='estwrite.extopts.dartelwarp.normhigh.darteltpm')
use_sanlm_denoising_filter = traits.Range(
0, 2, 2, usedefault=True, field='estwrite.extopts.sanlm',
desc="0=No denoising, 1=denoising,2=denoising multi-threaded")
mrf_weighting = traits.Float(
0.15, usedefault=True, field='estwrite.extopts.mrf')
cleanup_partitions = traits.Int(
1, usedefault=True, field='estwrite.extopts.cleanup',
desc="0=None,1=light,2=thorough")
display_results = traits.Bool(
True, usedefault=True, field='estwrite.extopts.print')
gm_native = traits.Bool(
False, usedefault=True, field='estwrite.output.GM.native',)
gm_normalized = traits.Bool(
False, usedefault=True, field='estwrite.output.GM.warped',)
gm_modulated_normalized = traits.Range(
0, 2, 2, usedefault=True, field='estwrite.output.GM.modulated',
desc='0=none,1=affine+non-linear(SPM8 default),2=non-linear only')
gm_dartel = traits.Range(
0, 2, 0, usedefault=True, field='estwrite.output.GM.dartel',
desc="0=None,1=rigid(SPM8 default),2=affine")
wm_native = traits.Bool(
False, usedefault=True, field='estwrite.output.WM.native',)
wm_normalized = traits.Bool(
False, usedefault=True, field='estwrite.output.WM.warped',)
wm_modulated_normalized = traits.Range(
0, 2, 2, usedefault=True, field='estwrite.output.WM.modulated',
desc='0=none,1=affine+non-linear(SPM8 default),2=non-linear only')
wm_dartel = traits.Range(
0, 2, 0, usedefault=True, field='estwrite.output.WM.dartel',
desc="0=None,1=rigid(SPM8 default),2=affine")
csf_native = traits.Bool(
False, usedefault=True, field='estwrite.output.CSF.native',)
csf_normalized = traits.Bool(
False, usedefault=True, field='estwrite.output.CSF.warped',)
csf_modulated_normalized = traits.Range(
0, 2, 2, usedefault=True, field='estwrite.output.CSF.modulated',
desc='0=none,1=affine+non-linear(SPM8 default),2=non-linear only')
csf_dartel = traits.Range(
0, 2, 0, usedefault=True, field='estwrite.output.CSF.dartel',
desc="0=None,1=rigid(SPM8 default),2=affine")
bias_corrected_native = traits.Bool(
False, usedefault=True, field='estwrite.output.bias.native',)
bias_corrected_normalized = traits.Bool(
True, usedefault=True, field='estwrite.output.bias.warped',)
bias_corrected_affine = traits.Bool(
False, usedefault=True, field='estwrite.output.bias.affine',)
pve_label_native = traits.Bool(
False, usedefault=True, field='estwrite.output.label.native')
pve_label_normalized = traits.Bool(
False, usedefault=True, field='estwrite.output.label.warped')
pve_label_dartel = traits.Range(
0, 2, 0, usedefault=True, field='estwrite.output.label.dartel',
desc="0=None,1=rigid(SPM8 default),2=affine")
jacobian_determinant = traits.Bool(
False, usedefault=True, field='estwrite.jacobian.warped')
deformation_field = traits.Tuple(
(0, 0), traits.Bool, traits.Bool, usedefault=True,
field='estwrite.output.warps',
desc='forward and inverse field')
class VBMSegmentOuputSpec(TraitedSpec):
native_class_images = traits.List(traits.List(File(exists=True)),
desc='native space probability maps')
dartel_input_images = traits.List(traits.List(File(exists=True)),
desc='dartel imported class images')
normalized_class_images = traits.List(traits.List(File(exists=True)),
desc='normalized class images')
modulated_class_images = traits.List(traits.List(File(exists=True)),
desc='modulated+normalized class images')
transformation_mat = OutputMultiPath(File(exists=True),
desc='Normalization transformation')
bias_corrected_images = OutputMultiPath(
File(exists=True),
desc='bias corrected images')
normalized_bias_corrected_images = OutputMultiPath(
File(exists=True),
desc='bias corrected images')
pve_label_native_images = OutputMultiPath(File(exists=True))
pve_label_normalized_images = OutputMultiPath(File(exists=True))
pve_label_registered_images = OutputMultiPath(File(exists=True))
forward_deformation_field = OutputMultiPath(File(exists=True))
inverse_deformation_field = OutputMultiPath(File(exists=True))
jacobian_determinant_images = OutputMultiPath(File(exists=True))
class VBMSegment(SPMCommand):
"""Use VBM8 toolbox to separate structural images into different
tissue classes.
Example
-------
>>> import nipype.interfaces.spm as spm
>>> seg = spm.VBMSegment()
>>> seg.inputs.tissues = 'TPM.nii'
>>> seg.inputs.dartel_template = 'Template_1_IXI550_MNI152.nii'
>>> seg.inputs.bias_corrected_native = True
>>> seg.inputs.gm_native = True
>>> seg.inputs.wm_native = True
>>> seg.inputs.csf_native = True
>>> seg.inputs.pve_label_native = True
>>> seg.inputs.deformation_field = (True, False)
>>> seg.run() # doctest: +SKIP
"""
input_spec = VBMSegmentInputSpec
output_spec = VBMSegmentOuputSpec
_jobtype = 'tools'
_jobname = 'vbm8'
def _list_outputs(self):
outputs = self._outputs().get()
do_dartel = self.inputs.spatial_normalization
dartel_px = ''
if do_dartel:
dartel_px = 'r'
outputs['native_class_images'] = [[], [], []]
outputs['dartel_input_images'] = [[], [], []]
outputs['normalized_class_images'] = [[], [], []]
outputs['modulated_class_images'] = [[], [], []]
outputs['transformation_mat'] = []
outputs['bias_corrected_images'] = []
outputs['normalized_bias_corrected_images'] = []
outputs['inverse_deformation_field'] = []
outputs['forward_deformation_field'] = []
outputs['jacobian_determinant_images'] = []
outputs['pve_label_native_images'] = []
outputs['pve_label_normalized_images'] = []
outputs['pve_label_registered_images'] = []
for filename in self.inputs.in_files:
pth, base, ext = split_filename(filename)
outputs['transformation_mat'].append(
os.path.join(pth, "%s_seg8.mat" % base))
for i, tis in enumerate(['gm', 'wm', 'csf']):
# native space
if getattr(self.inputs, '%s_native' % tis):
outputs['native_class_images'][i].append(
os.path.join(pth, "p%d%s.nii" % (i+1, base)))
if getattr(self.inputs, '%s_dartel' % tis) == 1:
outputs['dartel_input_images'][i].append(
os.path.join(pth, "rp%d%s.nii" % (i+1, base)))
elif getattr(self.inputs, '%s_dartel' % tis) == 2:
outputs['dartel_input_images'][i].append(
os.path.join(pth, "rp%d%s_affine.nii" % (i+1, base)))
# normalized space
if getattr(self.inputs, '%s_normalized' % tis):
outputs['normalized_class_images'][i].append(
os.path.join(pth, "w%sp%d%s.nii" % (dartel_px, i+1, base)))
if getattr(self.inputs, '%s_modulated_normalized' % tis) == 1:
outputs['modulated_class_images'][i].append(os.path.join(
pth, "mw%sp%d%s.nii" % (dartel_px, i+1, base)))
elif getattr(self.inputs, '%s_modulated_normalized' % tis) == 2:
outputs['normalized_class_images'][i].append(os.path.join(
pth, "m0w%sp%d%s.nii" % (dartel_px, i+1, base)))
if self.inputs.pve_label_native:
outputs['pve_label_native_images'].append(
os.path.join(pth, "p0%s.nii" % (base)))
if self.inputs.pve_label_normalized:
outputs['pve_label_normalized_images'].append(
os.path.join(pth, "w%sp0%s.nii" % (dartel_px, base)))
if self.inputs.pve_label_dartel == 1:
outputs['pve_label_registered_images'].append(
os.path.join(pth, "rp0%s.nii" % (base)))
elif self.inputs.pve_label_dartel == 2:
outputs['pve_label_registered_images'].append(
os.path.join(pth, "rp0%s_affine.nii" % (base)))
if self.inputs.bias_corrected_native:
outputs['bias_corrected_images'].append(
os.path.join(pth, "m%s.nii" % (base)))
if self.inputs.bias_corrected_normalized:
outputs['normalized_bias_corrected_images'].append(
os.path.join(pth, "wm%s%s.nii" % (dartel_px, base)))
if self.inputs.deformation_field[0]:
outputs['forward_deformation_field'].append(
os.path.join(pth, "y_%s%s.nii" % (dartel_px, base)))
if self.inputs.deformation_field[1]:
outputs['inverse_deformation_field'].append(
os.path.join(pth, "iy_%s%s.nii" % (dartel_px, base)))
if self.inputs.jacobian_determinant and do_dartel:
outputs['jacobian_determinant_images'].append(
os.path.join(pth, "jac_wrp1%s.nii" % (base)))
return outputs
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['in_files']:
return scans_for_fnames(val, keep4d=True)
elif opt in ['spatial_normalization']:
if val == 'low':
return {'normlow': []}
elif opt in ['dartel_template']:
return np.array([val], dtype=object)
elif opt in ['deformation_field']:
return super(VBMSegment, self)._format_arg(opt, spec, [int(val[0]), int(val[1])])
else:
return super(VBMSegment, self)._format_arg(opt, spec, val)
def _parse_inputs(self):
if self.inputs.spatial_normalization == 'low':
einputs = super(VBMSegment, self)._parse_inputs(
skip=('spatial_normalization', 'dartel_template'))
einputs[0]['estwrite']['extopts']['dartelwarp'] = {'normlow': 1}
return einputs
else:
return super(VBMSegment, self)._parse_inputs(skip=('spatial_normalization'))
|
dmordom/nipype
|
nipype/interfaces/spm/preprocess.py
|
Python
|
bsd-3-clause
| 68,149
|
[
"Gaussian"
] |
c61af248891a3323b0cd2bf3b409c544f3ca6b6f5297a3f07f3f20b618097642
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyscf.nao.m_siesta_ev2ha import siesta_ev2ha
def siesta_eig(label='siesta'):
f = open(label+'.EIG', 'r')
f.seek(0)
Fermi_energy_eV = float(f.readline())
Fermi_energy_Ha = Fermi_energy_eV * siesta_ev2ha
f.close()
return Fermi_energy_Ha
|
gkc1000/pyscf
|
pyscf/nao/m_siesta_eig.py
|
Python
|
apache-2.0
| 872
|
[
"PySCF",
"SIESTA"
] |
61ddd51535d5d7d32f701cca7b3d8cba91a9fbcf0f79624aacc71fa2f87d9a72
|
#!/usr/bin/python3.4
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A collection of Web browser user agent strings.
Culled from actual apache log file.
"""
# The key value is a simplified name you use in scripts. The value is the
# actual user agent string.
USER_AGENTS = {
'atw_crawl': 'FAST-WebCrawler/3.6 (atw-crawler at fast dot no; http://fast.no/support/crawler.asp)',
'becomebot': 'Mozilla/5.0 (compatible; BecomeBot/3.0; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)',
'cern': 'CERN-LineMode/2.15 libwww/2.17b3',
'chrome_mac': "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10",
'dfind': 'DFind',
'epiphany16': 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.13) Gecko/20060418 Epiphany/1.6.1 (Ubuntu) (Ubuntu package 1.0.8)',
'firefox10_fed': 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20060202 Fedora/1.0.7-1.2.fc4 Firefox/1.0.7',
'firefox10_ldeb': 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.10) Gecko/20060424 Firefox/1.0.4 (Debian package 1.0.4-2sarge6)',
'firefox10_lmand': 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.10) Gecko/20050921 Firefox/1.0.7 Mandriva/1.0.6-15mdk (2006.0)',
'firefox10_w_de': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.7.10) Gecko/20050717 Firefox/1.0.6',
'firefox10_w': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.12) Gecko/20050915 Firefox/1.0.7',
'firefox15_ldeb': 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.3) Gecko/20060326 Firefox/1.5.0.3 (Debian-1.5.dfsg+1.5.0.3-2)',
'firefox15_l': 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.2) Gecko/20060308 Firefox/1.5.0.2',
'firefox15_lpango': 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.2) Gecko/20060419 Fedora/1.5.0.2-1.2.fc5 Firefox/1.5.0.2 pango-text',
'firefox15_w_fr': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; fr; rv:1.8.0.3) Gecko/20060426 Firefox/1.5.0.3',
'firefox15_w_gb': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.0.3) Gecko/20060426 Firefox/1.5.0.3',
'firefox15_w_goog': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.3; Google-TR-3) Gecko/20060426 Firefox/1.5.0.3',
'firefox15_w_it': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.0.3) Gecko/20060426 Firefox/1.5.0.3',
'firefox15_w': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.3) Gecko/20060426 Firefox/1.5.0.3',
'firefox15_w_ru': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; ru; rv:1.8.0.3) Gecko/20060426 Firefox/1.5.0.3',
'firefox_36_64': "Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.12) Gecko/20101203 Gentoo Firefox/3.6.12",
'galeon1': 'Mozilla/5.0 Galeon/1.2.1 (X11; Linux i686; U;) Gecko/0',
'gecko': 'TinyBrowser/2.0 (TinyBrowser Comment) Gecko/20201231',
'gigabot': 'Gigabot/2.0; http://www.gigablast.com/spider.html',
'googlebot': 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'jeeves': 'Mozilla/2.0 (compatible; Ask Jeeves/Teoma; +http://sp.ask.com/docs/about/tech_crawling.html)',
'konqueror1': 'Mozilla/5.0 (compatible; Konqueror/3.0; i686 Linux; 20020919)',
'konqueror2': 'Mozilla/5.0 (compatible; Konqueror/3.1-rc4; i686 Linux; 20020504)',
'konqueror3': 'Mozilla/5.0 (compatible; Konqueror/3.5; Linux; i686; en_US) KHTML/3.5.2 (like Gecko) (Debian)',
'konqueror': 'Mozilla/5.0 (compatible; Konqueror/3.0.0-10; Linux)',
'links': 'Links (2.0; Linux 2.4.18-6mdkenterprise i686; 80x36)',
'lwp': 'lwp-trivial/1.41',
'lynx': 'Lynx/2.8.5dev.3 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.6c',
'motorola': 'MOT-V600/0B.09.3AR MIB/2.2 Profile/MIDP-2.0 Configuration/CLDC-1.0',
'mozilla10': 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.0) Gecko/00200203',
'mozilla5_irix': 'Mozilla/5.25 Netscape/5.0 (X11; U; IRIX 6.3 IP32)',
'mozilla5': 'Mozilla/5.0',
'mozilla5_w': 'Mozilla/5.001 (windows; U; NT4.0; en-us) Gecko/25250101',
'mozilla9': 'Mozilla/9.876 (X11; U; Linux 2.2.12-20 i686, en) Gecko/25250101 Netscape/5.432b1 (C-MindSpring)',
'mozilla_mac': 'Mozilla/4.72 (Macintosh; I; PPC)',
'mozilla_w_de': 'Mozilla/5.0 (Windows; U; Win 9x 4.90; de-AT; rv:1.7.8) Gecko/20050511',
'mscontrol': 'Microsoft URL Control - 6.00.8862',
'msie4_w95': 'Mozilla/4.0 (compatible; MSIE 4.01; Windows 95)',
'msie501_nt': 'Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)',
'msie501_w98': 'Mozilla/4.0 (compatible; MSIE 5.01; Windows 98)',
'msie50_nt': 'Mozilla/4.0 (compatible; MSIE 5.0; Windows NT 5.0)',
'msie50_w98': 'Mozilla/4.0 (compatible; MSIE 5.0; Windows 98; DigExt)',
'msie51_mac': 'Mozilla/4.0 (compatible; MSIE 5.14; Mac_PowerPC)',
'msie55_nt': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0; T312461)',
'msie5_w2k': 'Mozilla/5.0 (compatible; MSIE 5.01; Win2000)',
'msie6_2': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Q312469)',
'msie6_98': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows 98)',
'msie6': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',
'msie6_net2_sp1': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727)',
'msie6_net_infopath': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; T312461; .NET CLR 1.1.4322; InfoPath.1)',
'msie6_net': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'msie6_net_mp': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; MathPlayer 2.0; .NET CLR 1.0.3705; .NET CLR 1.1.4322)',
'msie6_net_sp1_2': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50215)',
'msie6_net_sp1_maxthon': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Maxthon; .NET CLR 1.1.4322)',
'msie6_net_sp1': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)',
'msie6_net_sp1_naver': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Naver Desktop Search)',
'msie6_net_ypc2': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; YPC 3.2.0; FunWebProducts; .NET CLR 1.0.3705; yplus 5.1.02b)',
'msie6_net_ypc': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; YPC 3.2.0; FunWebProducts; .NET CLR 1.0.3705)',
'msie6_nt': 'Mozilla/4.0 (compatible; MSIE 6.01; Windows NT 5.0)',
'msie6_sp1': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'msie6_w98_crazy': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; Crazy Browser 1.0.5)',
'msie6_w98': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; Win 9x 4.90)',
'msnbot': 'msnbot/1.0 (+http://search.msn.com/msnbot.htm)',
'mz5mac_ja': 'Mozilla/5.001 (Macintosh; N; PPC; ja) Gecko/25250101 MegaCorpBrowser/1.0 (MegaCorp, Inc.)',
'netscape4_en': 'Mozilla/4.76 [en] (X11; U; Linux 2.4.17 i686)',
'netscape6_w': 'Mozilla/5.0 (Windows; U; Win 9x 4.90; en-US; VaioUSSum01) Gecko/20010131 Netscape6/6.01',
'netscape8_w': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.5) Gecko/20060127 Netscape/8.1',
'nextgen1': 'NextGenSearchBot 1 (for information visit http://www.zoominfo.com/NextGenSearchBot)',
'opera6_l': 'Opera/6.0 (Linux 2.4.18-6mdk i686; U) [en]',
'opera6_w': 'Opera/6.04 (Windows XP; U) [en]',
'opera7_w': 'Opera/7.0 (Windows NT 5.1; U) [en]',
'python': 'Python/2.4 (X11; U; Linux i686; en-US)',
'safari_intel': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en) AppleWebKit/418 (KHTML, like Gecko) Safari/417.9.3',
'safari_ppc': 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/418 (KHTML, like Gecko) Safari/417.9.2',
'slurp': 'Mozilla/3.0 (Slurp/si; slurp@inktomi.com; http://www.inktomi.com/slurp.html)',
'slysearch': 'SlySearch/1.3 (http://www.slysearch.com)',
'surveybot': 'SurveyBot/2.3 (Whois Source)',
'syntryx': 'Syntryx ANT Scout Chassis Pheromone; Mozilla/4.0 compatible crawler',
'wget_rh': 'Wget/1.10.2 (Red Hat modified)',
'wget': 'Wget/1.10.2',
'yahoo': 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'zyborg': 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)',
}
def get_useragents():
"""Return a list of possible user-agent keywords."""
return sorted(USER_AGENTS.keys())
|
kdart/pycopia3
|
net/pycopia/http/useragents.py
|
Python
|
apache-2.0
| 8,690
|
[
"VisIt"
] |
448dc1a016675746decaa131b921c1ca06cdb1effb790e32e40e00bddea0aee0
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from lib.meos import MEoS
from lib import unidades
class Cyclohexane(MEoS):
"""Multiparameter equation of state for cyclohexane"""
name = "cyclohexane"
CASNumber = "110-82-7"
formula = "cyclo(CH2)6"
synonym = ""
rhoc = unidades.Density(271.33016352)
Tc = unidades.Temperature(553.6)
Pc = unidades.Pressure(4080.5, "kPa")
M = 84.15948 # g/mol
Tt = unidades.Temperature(279.47)
Tb = unidades.Temperature(353.865)
f_acent = 0.2096
momentoDipolar = unidades.DipoleMoment(0.3, "Debye")
id = 38
_Tr = unidades.Temperature(526.231121)
_rhor = unidades.Density(274.647526)
_w = 0.221837522
Fi1 = {"ao_log": [1, 3],
"pow": [0, 1],
"ao_pow": [0.9891140602, 1.6359660572],
"ao_exp": [0.83775, 16.036, 24.636, 7.1715],
"titao": [773/Tc, 941/Tc, 2185/Tc, 4495/Tc],
"ao_hyp": [], "hyp": []}
CP1 = {"ao": 9.3683272,
"an": [-0.56214088e8, 0.15261554e-1, -0.36352468e-5],
"pow": [-3, 1, 2],
"ao_exp": [.23766589e2],
"exp": [2000],
"ao_hyp": [], "hyp": []}
helmholtz1 = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for cyclohexane of Zhou et al. (2014)",
"__doi__": {"autor": "Zhou, Y., Jun Liu, J., Penoncello, S.G., Lemmon, E.W.",
"title": "An Equation of State for the Thermodynamic Properties of Cyclohexane",
"ref": "J. Phys. Chem. Ref. Data 43, 043105 (2014)",
"doi": "10.1063/1.4900538"},
"__test__": """
>>> st=Cyclohexane(T=300, rhom=9.4)
>>> print "%0.1f %0.1f %0.8g %0.8g %0.8g %0.8g %0.8g %0.8g" % (\
st.T, st.rhoM, st.P.MPa, st.cvM.JmolK, st.cpM.JmolK, st.w.ms, st.hM.Jmol, st.sM.JmolK)
300.0 9.4 24.173705 115.286 154.76956 1383.3878 -8400.0834 -28.889069
>>> st=Cyclohexane(T=500, rhom=6.5)
>>> print "%0.1f %0.1f %0.8g %0.8g %0.8g %0.8g %0.8g %0.8g" % (\
st.T, st.rhoM, st.P.MPa, st.cvM.JmolK, st.cpM.JmolK, st.w.ms, st.hM.Jmol, st.sM.JmolK)
500.0 6.5 3.9246630 192.52056 255.57087 434.13064 31070.127 70.891447
>>> st=Cyclohexane(T=500, rhom=0.7)
>>> print "%0.1f %0.1f %0.8g %0.8g %0.8g %0.8g %0.8g %0.8g" % (\
st.T, st.rhoM, st.P.MPa, st.cvM.JmolK, st.cpM.JmolK, st.w.ms, st.hM.Jmol, st.sM.JmolK)
500.0 0.7 1.9981172 191.96446 235.52281 155.348 52757.706 122.92657
>>> st=Cyclohexane(T=600, rhom=3.5)
>>> print "%0.1f %0.1f %0.8g %0.8g %0.8g %0.8g %0.8g %0.8g" % (\
st.T, st.rhoM, st.P.MPa, st.cvM.JmolK, st.cpM.JmolK, st.w.ms, st.hM.Jmol, st.sM.JmolK)
600.0 3.5 6.8225506 232.79222 388.55185 150.53318 70150.132 143.42323
>>> st=Cyclohexane(T=553.6, rhom=3.3)
>>> print "%0.1f %0.1f %0.8g %0.8g %0.8g %0.8g %0.8g %0.8g" % (\
st.T, st.rhoM, st.P.MPa, st.cvM.JmolK, st.cpM.JmolK, st.w.ms, st.hM.Jmol, st.sM.JmolK)
553.6 3.3 4.0805433 224.19555 199224.62 87.913911 58532.604 123.59810
>>> st=Cyclohexane(P=101325, x=0)
>>> print "%0.9g %0.8g %0.8g %0.8g %0.8g %0.8g %0.5f %0.5f" % (\
st.T, st.rhoM, st.P.MPa, st.cvM.JmolK, st.cpM.JmolK, st.w.ms, st.hM.Jmol, st.sM.JmolK)
353.864939 8.5487851 0.101325 134.6163 179.07223 994.05862 0.00000 -0.00000
>>> st=Cyclohexane(P=101325, x=1)
>>> print "%0.9g %0.8g %0.8g %0.8g %0.8g %0.8g %0.8g %0.8g" % (\
st.T, st.rhoM, st.P.MPa, st.cvM.JmolK, st.cpM.JmolK, st.w.ms, st.hM.Jmol, st.sM.JmolK)
353.864939 0.035779032 0.101325 123.4305 133.35895 186.91349 29991.286 84.753484
""", # Table 5, Pag 17
"R": 8.3144621,
"cp": Fi1,
"ref": {"Tref": 300, "Pref": 1., "ho": 23949.01, "so": 104.2926004},
"Tmin": 279.86, "Tmax": 700.0, "Pmax": 250000.0, "rhomax": 10.3,
"Pmin": 5.2402, "rhomin": 9.403,
"nr1": [0.05483581, 1.607734, -2.375928, -0.5137709, 0.1858417],
"d1": [4, 1, 1, 2, 3],
"t1": [1, 0.37, 0.79, 1.075, 0.37],
"nr2": [-0.9007515, -0.5628776, 0.2903717, -0.3279141, -0.03177644],
"d2": [1, 3, 2, 2, 7],
"t2": [2.4, 2.5, 0.5, 3, 1.06],
"c2": [2, 2, 1, 2, 1],
"gamma2": [1]*5,
"nr3": [0.8668676, -0.1962725, -0.1425992, 0.004197016, 0.1776584,
-0.04433903, -0.03861246, 0.07399692, 0.02036006, 0.00272825],
"d3": [1, 1, 3, 3, 2, 2, 3, 2, 3, 2],
"t3": [1.6, 0.37, 1.33, 2.5, 0.9, 0.5, 0.73, 0.2, 1.5, 1.5],
"alfa3": [0.99, 1.43, 0.97, 1.93, 0.92, 1.27, 0.87, 0.82, 1.4, 3],
"beta3": [0.38, 4.2, 1.2, 0.9, 1.2, 2.6, 5.3, 4.4, 4.2, 25],
"gamma3": [0.65, 0.63, 1.14, 0.09, 0.56, 0.4, 1.01, 0.45, 0.85, 0.86],
"epsilon3": [0.73, 0.75, 0.48, 2.32, 0.2, 1.33, 0.68, 1.11, 1.47, 0.99]}
helmholtz2 = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for cyclohexane of Penoncello et al. (1995)",
"__doi__": {"autor": "Penoncello, S.G., Goodwin, A.R.H., and Jacobsen, R.T.",
"title": "A Thermodynamic Property Formulation for Cyclohexane",
"ref": "Int. J. Thermophys., 16(2):519-531, 1995.",
"doi": "10.1007/BF01441918"},
"R": 8.31434,
"cp": CP1,
"ref": {"Tref": 279.47, "Pref": 101.325, "ho": 33884.8, "so": 96.612},
"Tt": 279.47, "Tc": 553.64, "rhoc": 3.244, "M": 84.1608,
"Tmin": 279.47, "Tmax": 700.0, "Pmax": 80000.0, "rhomax": 9.77,
"Pmin": 5.2538, "rhomin": 9.4045,
"nr1": [0.8425412659, -0.3138388327e1, 0.1679072631e1, -0.153819249,
0.1984911143, -0.144532594, 0.3746346428e-3, 0.1861479616e-3,
0.1745721652e-3],
"d1": [1, 1, 1, 2, 3, 3, 7, 6, 6],
"t1": [0, 1.5, 2.5, 1.5, 1, 2.5, 2, 0.5, 3],
"nr2": [-0.6427428062, 0.2280757615, -0.1868116802e1, -0.1028243711e1,
0.5821457418, -0.255891152, 0.1276844113e-1, -0.5158613166e-2,
0.6334794755e-1, -0.6014686589e-1, 0.4439056828, -0.6264920642,
0.2132589969e1, -0.3620300991e-2, 0.2534453992,
0.1669144715e-1, 0.3985052291e-2],
"d2": [1, 1, 2, 3, 3, 5, 8, 10, 3, 4, 1, 1, 2, 2, 4, 4, 8],
"t2": [5, 6, 5.5, 3, 7, 6, 6.5, 5.5, 11, 11, 0.5, 1, 4, 4, 1.5, 2, 0.5],
"c2": [2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 3, 3, 2, 6, 2, 4, 2],
"gamma2": [1]*17}
helmholtz3 = {
"__type__": "Helmholtz",
"__name__": "short Helmholtz equation of state for cyclohexane of Span and Wagner (2003)",
"__doi__": {"autor": "Span, R., Wagner, W.",
"title": "Equations of state for technical applications. II. Results for nonpolar fluids.",
"ref": "Int. J. Thermophys. 24 (2003), 41 – 109.",
"doi": "10.1023/A:1022310214958"},
"__test__": """
>>> st=Cyclohexane(T=700, rho=200, eq=2)
>>> print "%0.4f %0.3f %0.4f" % (st.cp0.kJkgK, st.P.MPa, st.cp.kJkgK)
3.0278 9.007 3.5927
>>> st2=Cyclohexane(T=750, rho=100, eq=2)
>>> print "%0.2f %0.5f" % (st2.h.kJkg-st.h.kJkg, st2.s.kJkgK-st.s.kJkgK)
206.82 0.31448
""", # Table III, Pag 46
"R": 8.31451,
"cp": CP1,
"ref": {"Tref": 279.47, "Pref": 101.325, "ho": 33884.8, "so": 96.612},
"Tt": 279.47, "Tc": 553.64, "rhoc": 3.244, "M": 84.1608,
"Tmin": Tt, "Tmax": 600.0, "Pmax": 100000.0, "rhomax": 9.77,
"Pmin": 5.2428, "rhomin":9.3999,
"nr1": [0.10232354e1, -0.29204964e1, 0.10736630e1, -0.19573985,
0.12228111, 0.28943321e-3],
"d1": [1, 1, 1, 2, 3, 7],
"t1": [0.25, 1.125, 1.5, 1.375, 0.25, 0.875],
"nr2": [0.27231767, -0.4483332e-1, -0.38253334, -0.89835333e-1,
-0.24874965e-1, 0.10836132e-1],
"d2": [2, 5, 1, 4, 3, 4],
"t2": [0.625, 1.75, 3.625, 3.625, 14.5, 12],
"c2": [1, 1, 2, 2, 3, 3],
"gamma2": [1]*6}
helmholtz4 = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for cyclohexane of Sun and Ely (2004)",
"__doi__": {"autor": "Sun, L. and Ely, J.F.",
"title": "Universal equation of state for engineering application: Algorithm and application to non-polar and polar fluids",
"ref": "Fluid Phase Equilib., 222-223:107-118, 2004.",
"doi": "10.1016/j.fluid.2004.06.028"},
"R": 8.31434,
"cp": CP1,
"ref": {"Tref": 279.47, "Pref": 101.325, "ho": 33884.8, "so": 96.612},
"Tmin": Tt, "Tmax": 620.0, "Pmax": 800000.0, "rhomax": 40.,
"Pmin": 0.1, "rhomin": 40.,
"nr1": [1.27436292, 1.15372124, -3.86726473, 8.84627298e-2,
2.76478090e-4, 7.26682313e-2],
"d1": [1, 1, 1, 3, 7, 2],
"t1": [1.5, 0.25, 1.25, 0.25, 0.875, 1.375],
"nr2": [7.10849914e-2, 4.46376742e-1, 7.64476190e-1, -4.23520282e-2,
-3.96468623e-1, -1.41250071e-2, -1.08371284e-1, -2.50082884e-2],
"d2": [1, 1, 2, 5, 1, 1, 4, 2],
"t2": [0, 2.375, 2., 2.125, 3.5, 6.5, 4.75, 12.5],
"c2": [1, 1, 1, 1, 2, 2, 2, 3],
"gamma2": [1]*8}
eq = helmholtz1, helmholtz2, helmholtz3, helmholtz4
_surface = {"sigma": [0.06485], "exp": [1.263]}
_melting = {"eq": 1, "Tref": 1, "Pref": 700,
"Tmin": Tt, "Tmax": 370.0,
"a1": [0.1329969885, -374.255624], "exp1": [1.41, 0],
"a2": [], "exp2": [], "a3": [], "exp3": []}
_vapor_Pressure = {
"eq": 5,
"ao": [-7.0342, 1.7311, -1.7572, -3.3406],
"exp": [1., 1.5, 2.3, 4.6]}
_liquid_Density = {
"eq": 1,
"ao": [5.5081, -14.486, 38.241, -64.589, 57.919, -20.55],
"exp": [0.51, 0.94, 1.4, 1.9, 2.4, 3.0]}
_vapor_Density = {
"eq": 3,
"ao": [-3.69006, -41.4239, 220.914, -443.72, 491.49, -296.373],
"exp": [0.446, 1.98, 2.75, 3.3, 4.1, 4.8]}
|
edusegzy/pychemqt
|
lib/mEoS/Cyclohexane.py
|
Python
|
gpl-3.0
| 10,322
|
[
"Jmol"
] |
306b67753b21e1acd6578930dd8ae8012b13f464a7051bec01eb6722253effbd
|
"""Class for making a redMaGiC galaxy selection."""
from collections import OrderedDict
import os
import numpy as np
import fitsio
import time
import scipy.optimize
import esutil
import healpy as hp
from ..catalog import Entry, Catalog
from ..galaxy import GalaxyCatalog
from ..configuration import Configuration
from ..volumelimit import VolumeLimitMask, VolumeLimitMaskFixed
from ..redsequence import RedSequenceColorPar
from ..utilities import CubicSpline
class RedmagicSelector(object):
"""
Class to select redMaGiC galaxies.
"""
def __init__(self, conf, vlim_masks=None):
"""
Instantiate a RedmagicSelector
Parameters
----------
conf: `redmapper.Configuration` or `str
Configuration object or config filename
vlim_masks: `OrderedDict`, optional
Dictionary of vlim_masks. Will read in if not set.
"""
if not isinstance(conf, Configuration):
self.config = Configuration(conf)
else:
self.config = conf
redmagicfilepath = os.path.dirname(self.config.redmagicfile)
self.calib_data = OrderedDict()
with fitsio.FITS(self.config.redmagicfile) as fits:
# Number of modes is number of binary extentions
self.n_modes = len(fits) - 1
for ext in range(self.n_modes):
data = Entry(fits[ext + 1].read())
try:
name = data.name.decode().rstrip()
except AttributeError:
name = data.name.rstrip()
self.calib_data[name] = data
self.modes = self.calib_data.keys()
self.zredstr = RedSequenceColorPar(self.config.parfile, fine=True)
if vlim_masks is None:
self.vlim_masks = OrderedDict()
for mode in self.modes:
try:
vmaskfile = self.calib_data[mode].vmaskfile.decode().rstrip()
except AttributeError:
vmaskfile = self.calib_data[mode].vmaskfile.rstrip()
if vmaskfile == '':
# There is no vmaskfile, we need to do a fixed area one
self.vlim_masks[mode] = VolumeLimitMaskFixed(self.config)
else:
vmaskfile = os.path.join(redmagicfilepath,
os.path.basename(vmaskfile))
if not os.path.isfile(vmaskfile):
raise RuntimeError("Could not find vmaskfile %s. Must be in same path as redmagic calibration file %s." % (vmaskfile, os.path.abspath(self.config.redmagicfile)))
#if os.path.isfile(vmaskfile):
# vmaskfile = vmaskfile
#elif os.path.isfile(os.path.join(self.config.configpath, vmaskfile)):
# vmaskfile = os.path.join(self.config.configpath, vmaskfile)
#else:
# raise RuntimeError("Could not find vmaskfile %s" % (vmaskfile))
self.vlim_masks[mode] = VolumeLimitMask(self.config,
self.calib_data[mode].etamin,
vlimfile=vmaskfile)
else:
# Check that it's an OrderedDict? Must it be ordered?
self.vlim_masks = vlim_masks
self.spec = None
def select_redmagic_galaxies(self, gals, mode, return_indices=False):
"""
Select redMaGiC galaxies from a galaxy catalog, according to the mode.
Parameters
----------
gals: `redmapper.GalaxyCatalog`
Catalog of galaxies for redMaPPer
mode: `str`
redMaGiC mode to select
return_indices: `bool`, optional
Return the indices of the galaxies selected. Default is False.
Returns
-------
redmagic_catalog: `redmapper.GalaxyCatalog`
Catalog of redMaGiC galaxies
indices: `np.ndarray`
Integer array of selection (if return_indices is True)
"""
# Check if we have to decode mode (py2/py3)
if hasattr(mode, 'decode'):
_mode = mode.decode()
else:
_mode = mode
if _mode not in self.modes:
raise RuntimeError("Requested redMaGiC mode %s not available." % (_mode))
calstr = self.calib_data[_mode]
# Takes in galaxies...
# Which are the possibly red galaxies?
lstar_cushion = calstr.lstar_cushion
z_cushion = calstr.z_cushion
z_buffer = calstr.buffer
mstar_init = self.zredstr.mstar(gals.zred_uncorr)
cut_zrange = [calstr.cost_zrange[0] - z_cushion - z_buffer,
calstr.cost_zrange[1] + z_cushion + z_buffer]
minlstar = np.clip(np.min(calstr.etamin) - lstar_cushion, 0.1, None)
red_poss_mask = ((gals.zred_uncorr > cut_zrange[0]) &
(gals.zred_uncorr < cut_zrange[1]) &
(gals.chisq < calstr.maxchi) &
(gals.refmag < (mstar_init - 2.5*np.log10(minlstar))))
# Creates a new catalog...
zredmagic = np.copy(gals.zred_uncorr)
zredmagic_e = np.copy(gals.zred_uncorr_e)
try:
zredmagic_samp = np.copy(gals.zred_samp)
except (ValueError, AttributeError) as e:
# Sample from zred + zred_e (not optimal, for old catalogs)
zredmagic_samp = np.zeros((zredmagic.size, 1))
zredmagic_samp[:, 0] = np.random.normal(loc=zredmagic,
scale=zredmagic_e,
size=zredmagic.size)
spl = CubicSpline(calstr.nodes, calstr.cmax, fixextrap=True)
chi2max = np.clip(spl(gals.zred_uncorr), 0.1, calstr.maxchi)
if calstr.run_afterburner:
spl = CubicSpline(calstr.corrnodes, calstr.bias, fixextrap=True)
offset = spl(gals.zred_uncorr)
zredmagic -= offset
if calstr.apply_afterburner:
for i in range(zredmagic_samp.shape[1]):
zredmagic_samp[:, i] -= offset
spl = CubicSpline(calstr.corrnodes, calstr.eratio, fixextrap=True)
zredmagic_e *= spl(gals.zred_uncorr)
# Compute mstar
mstar = self.zredstr.mstar(zredmagic)
# Compute the maximum redshift
vmask = self.vlim_masks[_mode]
zmax = vmask.calc_zmax(gals.ra, gals.dec)
# Do the redmagic selection
gd, = np.where((gals.chisq < chi2max) &
(gals.refmag < (mstar - 2.5 * np.log10(calstr.etamin))) &
(zredmagic < zmax) &
(red_poss_mask))
redmagic_catalog = GalaxyCatalog(np.zeros(gd.size, dtype=[('id', 'i8'),
('ra', 'f8'),
('dec', 'f8'),
('refmag', 'f4'),
('refmag_err', 'f4'),
('mag', 'f4', self.config.nmag),
('mag_err', 'f4', self.config.nmag),
('lum', 'f4'),
('zredmagic', 'f4'),
('zredmagic_e', 'f4'),
('zredmagic_samp', 'f4', self.config.zred_nsamp),
('chisq', 'f4'),
('zspec', 'f4')]))
if gd.size == 0:
if return_indices:
return redmagic_catalog, gd
else:
return redmagic_catalog
redmagic_catalog.id = gals.id[gd]
redmagic_catalog.ra = gals.ra[gd]
redmagic_catalog.dec = gals.dec[gd]
redmagic_catalog.refmag = gals.refmag[gd]
redmagic_catalog.refmag_err = gals.refmag_err[gd]
redmagic_catalog.mag[:, :] = gals.mag[gd, :]
redmagic_catalog.mag_err[:, :] = gals.mag_err[gd, :]
redmagic_catalog.zredmagic = zredmagic[gd]
redmagic_catalog.zredmagic_e = zredmagic_e[gd]
redmagic_catalog.zredmagic_samp = zredmagic_samp[gd, :]
redmagic_catalog.chisq = gals.chisq[gd]
# Compute the luminosity
redmagic_catalog.lum = 10.**((mstar[gd] - redmagic_catalog.refmag) / 2.5)
# In the future, add absolute magnitude calculations, but that will
# require some k-corrections.
# Compute the zspec (check this)
if 'ztrue' in gals.dtype.names:
# We have truth zspec
redmagic_catalog.zspec = gals.ztrue[gd]
elif 'zspec' in gals.dtype.names:
# We have already done a zspec match
redmagic_catalog.zspec = gals.zspec[gd]
else:
# We need to do a zspec match here
if self.spec is None:
self.config.logger.info("Reading in spectroscopic information...")
self.spec = GalaxyCatalog.from_fits_file(self.config.specfile)
use, = np.where(self.spec.z_err < 0.001)
self.spec = self.spec[use]
self.config.logger.info("Done reading in spectroscopic information.")
redmagic_catalog.zspec[:] = -1.0
i0, i1, dists = self.spec.match_many(redmagic_catalog.ra, redmagic_catalog.dec, 3./3600., maxmatch=1)
redmagic_catalog.zspec[i0] = self.spec.z[i1]
if return_indices:
return redmagic_catalog, gd
else:
return redmagic_catalog
|
erykoff/redmapper
|
redmapper/redmagic/redmagic_selector.py
|
Python
|
apache-2.0
| 10,043
|
[
"Galaxy"
] |
323595018a156fb9ba22a290eab283c486942508098ed6597a7da089fc583192
|
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
# Introduction: 本程序用于
# Created by galaxy on 2016/9/8 10:50
import os
my_path = os.getcwd()
gbk_dir = os.path.join(my_path, 'gbk')
batch_lines = []
for root, dirs, files in os.walk(gbk_dir):
for each_file in files:
gbk_path = 'gbk/{0}'.format(each_file)
each_cmd = 'python convertGenbank2table.py -g {0} -v 1'.format(gbk_path)
batch_lines.append(each_cmd)
batch_file = os.path.join(my_path, 'convertGenbank2table.sh')
with open(batch_file, 'w') as f1:
for each_batch_cmd in batch_lines:
f1.write('{0}\n'.format(each_batch_cmd))
convert_cmd = 'sh convertGenbank2table.sh'
os.system(convert_cmd)
|
cvn001/RecentHGT
|
src/convertGenbank2table_InBatch.py
|
Python
|
mit
| 691
|
[
"Galaxy"
] |
43121bc20ba222b0f9f203439e926489129d4199dd8fac9fbabee5dd567feed8
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2007-2009 Brian G. Matherly
# Copyright (C) 2009-2010 Benny Malengier <benny.malengier@gramps-project.org>
# Copyright (C) 2010 Peter Landgren
# Copyright (C) 2011 Adam Stein <adam@csh.rit.edu>
# Copyright (C) 2012 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
ACSII document generator.
"""
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.const import DOCGEN_OPTIONS
from gramps.gen.errors import ReportError
from gramps.gen.plug.docgen import (BaseDoc, TextDoc,
PARA_ALIGN_RIGHT, PARA_ALIGN_CENTER)
from gramps.gen.plug.menu import NumberOption
from gramps.gen.plug.report import DocOptions
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#------------------------------------------------------------------------
#
# Constants
#
#------------------------------------------------------------------------
LEFT, RIGHT, CENTER = 'LEFT', 'RIGHT', 'CENTER'
#------------------------------------------------------------------------
#
# This routine was written by David Mertz and placed into the public
# domain. It is sample code from his book, "Text Processing in Python"
#
# Modified by Alex Roitman: right-pad with spaces, if right_pad==1;
# return empty string if no text was given
# Another argument: "first" is the first line indent in characters
# _relative_ to the "left" margin. It can be negative!
#
#------------------------------------------------------------------------
def reformat_para(para='', left=0, right=72, just=LEFT, right_pad=0, first=0):
if not para.strip():
return "\n"
lines = []
real_left = left+first
alllines = para.split('\n')
for realline in alllines:
words = realline.split()
line = ''
word = 0
end_words = 0
while not end_words:
if not words:
lines.append("\n")
break
if len(words[word]) > right-real_left: # Handle very long words
line = words[word]
word += 1
if word >= len(words):
end_words = 1
else: # Compose line of words
while len(line)+len(words[word]) <= right-real_left:
line += words[word]
word += 1
if word >= len(words):
end_words = 1
break
elif len(line) < right-real_left:
line += ' ' # add a space since there is still room
lines.append(line)
#first line finished, discard first
real_left = left
line = ''
if just == CENTER:
if right_pad:
return '\n'.join(
[' '*(left+first) + ln.center(right-left-first)
for ln in lines[0:1]] +
[' '*left + ln.center(right-left) for ln in lines[1:]]
)
else:
return '\n'.join(
[' '*(left+first) + ln.center(right-left-first).rstrip()
for ln in lines[0:1]] +
[' '*left + ln.center(right-left).rstrip()
for ln in lines[1:]]
)
elif just == RIGHT:
if right_pad:
return '\n'.join([line.rjust(right) for line in lines])
else:
return '\n'.join([line.rjust(right).rstrip() for line in lines])
else: # left justify
if right_pad:
return '\n'.join(
[' '*(left+first) + line.ljust(right-left-first)
for line in lines[0:1]] +
[' '*left + line.ljust(right-left) for line in lines[1:]]
)
else:
return '\n'.join(
[' '*(left+first) + line for line in lines[0:1]] +
[' '*left + line for line in lines[1:]]
)
#------------------------------------------------------------------------
#
# Ascii
#
#------------------------------------------------------------------------
class AsciiDoc(BaseDoc, TextDoc):
"""
ASCII document generator.
"""
def __init__(self, styles, paper_style, options=None):
BaseDoc.__init__(self, styles, paper_style)
self.__note_format = False
self._cpl = 72 # characters per line, in case the options are ignored
if options:
menu = options.menu
self._cpl = menu.get_option_by_name('linechars').get_value()
self.file = None
self.filename = ''
self.text = ''
self.para = None
self.leader = None
self.tbl_style = None
self.in_cell = None
self.ncols = 0
self.cellpars = []
self.cell_lines = []
self.cell_widths = []
self.cellnum = -1
self.maxlines = 0
#--------------------------------------------------------------------
#
# Opens the file, resets the text buffer.
#
#--------------------------------------------------------------------
def open(self, filename):
if filename[-4:] != ".txt":
self.filename = filename + ".txt"
else:
self.filename = filename
try:
self.file = open(self.filename, "w", errors='backslashreplace')
except Exception as msg:
raise ReportError(_("Could not create %s") % self.filename, msg)
self.in_cell = 0
self.text = ""
#--------------------------------------------------------------------
#
# Close the file. Call the app if required.
#
#--------------------------------------------------------------------
def close(self):
self.file.close()
def get_usable_width(self):
"""
Return the usable width of the document in characters.
"""
return self._cpl
#--------------------------------------------------------------------
#
# Force a section page break
#
#--------------------------------------------------------------------
def page_break(self):
self.file.write('\012')
def start_bold(self):
pass
def end_bold(self):
pass
def start_superscript(self):
self.text = self.text + '['
def end_superscript(self):
self.text = self.text + ']'
#--------------------------------------------------------------------
#
# Starts a paragraph.
#
#--------------------------------------------------------------------
def start_paragraph(self, style_name, leader=None):
styles = self.get_style_sheet()
self.para = styles.get_paragraph_style(style_name)
self.leader = leader
#--------------------------------------------------------------------
#
# End a paragraph. First format it to the desired widths.
# If not in table cell, write it immediately. If in the cell,
# add it to the list for this cell after formatting.
#
#--------------------------------------------------------------------
def end_paragraph(self):
if self.para.get_alignment() == PARA_ALIGN_RIGHT:
fmt = RIGHT
elif self.para.get_alignment() == PARA_ALIGN_CENTER:
fmt = CENTER
else:
fmt = LEFT
if self.in_cell:
right = self.cell_widths[self.cellnum]
else:
right = self.get_usable_width()
# Compute indents in characters. Keep first_indent relative!
regular_indent = 0
first_indent = 0
if self.para.get_left_margin():
regular_indent = int(4*self.para.get_left_margin())
if self.para.get_first_indent():
first_indent = int(4*self.para.get_first_indent())
if self.in_cell and self.cellnum < self.ncols - 1:
right_pad = 1
the_pad = ' ' * right
else:
right_pad = 0
the_pad = ''
# Depending on the leader's presence, treat the first line differently
if self.leader:
# If we have a leader then we need to reformat the text
# as if there's no special treatment for the first line.
# Then add leader and eat up the beginning of the first line pad.
# Do not reformat if preformatted notes
if not self.__note_format:
self.leader += ' '
start_at = regular_indent + min(len(self.leader)+first_indent,
0)
this_text = reformat_para(self.text, regular_indent, right, fmt,
right_pad)
this_text = (' ' * (regular_indent+first_indent) +
self.leader +
this_text[start_at:]
)
else:
this_text = self.text
else:
# If no leader then reformat the text according to the first
# line indent, as specified by style.
# Do not reformat if preformatted notes
if not self.__note_format:
this_text = reformat_para(self.text, regular_indent, right, fmt,
right_pad, first_indent)
else:
this_text = ' ' * (regular_indent + first_indent) + self.text
if self.__note_format:
# don't add an extra LF before the_pad if preformatted notes.
if this_text != '\n':
# don't add LF if there is this_text is a LF
this_text += the_pad + '\n'
else:
this_text += '\n' + the_pad + '\n'
if self.in_cell:
self.cellpars[self.cellnum] += this_text
else:
self.file.write(this_text)
self.text = ""
#--------------------------------------------------------------------
#
# Start a table. Grab the table style, and store it.
#
#--------------------------------------------------------------------
def start_table(self, name, style_name):
styles = self.get_style_sheet()
self.tbl_style = styles.get_table_style(style_name)
self.ncols = self.tbl_style.get_columns()
#--------------------------------------------------------------------
#
# End a table. Turn off the self.in_cell flag
#
#--------------------------------------------------------------------
def end_table(self):
self.in_cell = 0
#--------------------------------------------------------------------
#
# Start a row. Initialize lists for cell contents, number of lines,
# and the widths. It is necessary to keep a list of cell contents
# that is to be written after all the cells are defined.
#
#--------------------------------------------------------------------
def start_row(self):
self.cellpars = [''] * self.ncols
self.cell_lines = [0] * self.ncols
self.cell_widths = [0] * self.ncols
self.cellnum = -1
self.maxlines = 0
table_width = (self.get_usable_width() *
self.tbl_style.get_width() / 100.0)
for cell in range(self.ncols):
self.cell_widths[cell] = int(
table_width * self.tbl_style.get_column_width(cell) / 100.0)
#--------------------------------------------------------------------
#
# End a row. Write the cell contents. Write the line of spaces
# if the cell has fewer lines than the maximum number.
#
#--------------------------------------------------------------------
def end_row(self):
self.in_cell = 0
cell_text = [None]*self.ncols
for cell in range(self.ncols):
if self.cell_widths[cell]:
blanks = ' '*self.cell_widths[cell] + '\n'
if self.cell_lines[cell] < self.maxlines:
self.cellpars[cell] += blanks * (
self.maxlines - self.cell_lines[cell]
)
cell_text[cell] = self.cellpars[cell].split('\n')
for line in range(self.maxlines):
for cell in range(self.ncols):
if self.cell_widths[cell]:
self.file.write(cell_text[cell][line])
self.file.write('\n')
#--------------------------------------------------------------------
#
# Start a cell. Set the self.in_cell flag,
# increment the current cell number.
#
#--------------------------------------------------------------------
def start_cell(self, style_name, span=1):
self.in_cell = 1
self.cellnum = self.cellnum + span
span -= 1
while span:
self.cell_widths[self.cellnum] += (
self.cell_widths[self.cellnum-span]
)
self.cell_widths[self.cellnum-span] = 0
span -= 1
#--------------------------------------------------------------------
#
# End a cell. Find out the number of lines in this cell, correct
# the maximum number of lines if necessary.
#
#--------------------------------------------------------------------
def end_cell(self):
self.in_cell = 0
self.cell_lines[self.cellnum] = self.cellpars[self.cellnum].count('\n')
if self.cell_lines[self.cellnum] > self.maxlines:
self.maxlines = self.cell_lines[self.cellnum]
def add_media(self, name, align, w_cm, h_cm, alt='', style_name=None,
crop=None):
this_text = '(photo)'
if self.in_cell:
self.cellpars[self.cellnum] += this_text
else:
self.file.write(this_text)
def write_styled_note(self, styledtext, format, style_name,
contains_html=False, links=False):
"""
Convenience function to write a styledtext to the ASCII doc.
styledtext : assumed a StyledText object to write
format : = 0 : Flowed, = 1 : Preformatted
style_name : name of the style to use for default presentation
contains_html: bool, the backend should not check if html is present.
If contains_html=True, then the textdoc is free to handle that in
some way. Eg, a textdoc could remove all tags, or could make sure
a link is clickable. AsciiDoc prints the html without handling it
links: bool, make the URL in the text clickable (if supported)
"""
if contains_html:
return
text = str(styledtext)
if format:
#Preformatted note, keep all white spaces, tabs, LF's
self.__note_format = True
for line in text.split('\n'):
self.start_paragraph(style_name)
self.write_text(line)
self.end_paragraph()
# Add an extra empty para all lines in each preformatted note
self.start_paragraph(style_name)
self.end_paragraph()
self.__note_format = False
else:
for line in text.split('\n\n'):
self.start_paragraph(style_name)
#line = line.replace('\n',' ')
#line = ' '.join(line.split())
self.write_text(line)
self.end_paragraph()
#--------------------------------------------------------------------
#
# Writes text.
#--------------------------------------------------------------------
def write_text(self, text, mark=None, links=False):
self.text = self.text + text
#------------------------------------------------------------------------
#
# AsciiDocOptions class
#
#------------------------------------------------------------------------
class AsciiDocOptions(DocOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
DocOptions.__init__(self, name)
def add_menu_options(self, menu):
"""
Add options to the document menu for the AsciiDoc docgen.
"""
category_name = DOCGEN_OPTIONS
linechars = NumberOption(_('Characters per line'), 72, 20, 9999)
linechars.set_help(_("The number of characters per line"))
menu.add_option(category_name, 'linechars', linechars)
|
beernarrd/gramps
|
gramps/plugins/docgen/asciidoc.py
|
Python
|
gpl-2.0
| 17,303
|
[
"Brian"
] |
bb658db6bbc4a8624650ee26c21d24dff056988fbbaefd21f3eeeb7913411c9a
|
#!/usr/bin/python
"""
Script for parsing Gaussian input files (obtained from the EMSL website for
example) and converting them into PyQuante's (http://pyquante.sourceforge.net/)
format.
It reads a file from the stdin and outputs to the stdout (so you should redirect
it to a file). This file should then be put in the appropriate folder. Read the
README file for more instructions.
This script works for all "normal" orbitals, S, P, D, ... and SP hybrid orbitals.
----------------------------------------------------------------------------
This file is part of atomicCI.
Copyright (c) 2012, Alexandre Lopes
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import fileinput # for reading from the stdin
import re # regular expressions package
# Problems here: detecting if it's an element name or an orbital!!
# Must solve this thing above!! - need a flag
# initialization
basis = {}
comment = re.compile('[*!]{1,}') # so we can search for * and ! characters
atmLine = re.compile('[A-Za-z]{1,}') # so we can search for something containing one letter
orbitLine = re.compile('[A-Za-z]{1,}')
def name2no(element):
# takes an element name (string) and spits out the atomic number
table = {
"H" : 1,
"He": 2,
"Li": 3,
"Be": 4,
"B" : 5,
"C" : 6,
"N" : 7,
"O" : 8,
"F" : 9,
"Ne": 10,
"Na": 11,
"Mg": 12,
"Al": 13,
"Si": 14,
"P": 15,
"S": 16,
"Cl": 17,
"Ar": 18,
"K": 19,
"Ca": 20,
"Sc": 21,
"Ti": 22,
"V": 23,
"Cr": 24,
"Mn": 25,
"Fe": 26,
"Co": 27,
"Ni": 28,
"Cu": 29,
"Zn": 30
}
return table.get(element)
for line in fileinput.input():
# split line string where there are blank spaces
sline = line.split()
if not sline: # in case it's a blank line
continue
if comment.search(line): # in case the line has some * or ! characters
continue
if atmLine.search(line) and len(line.split()) == 2:
# if there is at least one letter and the line has 2 elements
# then we found the element line
element = line.split()[0]
atno = name2no(element) # convert from element name to atomic number
basis[atno] = [] # add key and value to dictionary
# if not, we should have found the orbital letter
elif orbitLine.search(line) and len(line.split()) == 3:
# if there's at least one letter and the line has 3 elements
# then we found the orbital line
sline = line.split()
symbol = sline[0] # the orbital symbol
if symbol == "SP": # then we have found an SP orbital and these are a little bit different...
spflag = 1
nprim = sline[1] # number of primitives - not used
basis[atno].append(("S",[])) # for the S orbital
basis[atno].append(("P",[])) # for the P orbital
else:
spflag = 0
nprim = sline[1] # number of primitives - not used
basis[atno].append((symbol,[]))
# if not, we have a line of exponents and coeff, if spflag = 0 we have a normal orbital
elif spflag == 0:
sline = line.split()
exp = float(sline[0])
coeff = float(sline[1])
basis[atno][-1][1].append((exp,coeff))
# if not, we have a line of exponents and coeff, if spflag = 1 we have an SP orbital
elif spflag == 1:
# this is definitively not finished!
sline = line.split()
exp = float(sline[0])
coeffS = float(sline[1])
coeffP = float(sline[2])
basis[atno][-2][1].append((exp,coeffS))
basis[atno][-1][1].append((exp,coeffP))
print("basis_data = \\")
print(basis)
fileinput.close()
|
aalopes/atomicCI
|
parseGaussian/parseGauss.py
|
Python
|
bsd-3-clause
| 5,423
|
[
"Gaussian"
] |
c5e455def00509ce94cb03f8dc818615c2eefab23e3a9b0486d24ef6fcc34dab
|
"""
Dropbox API Storage object.
"""
import pickle
import os
import tempfile
from shutil import copyfileobj
from .base import BaseStorage, StorageError
from dropbox.rest import ErrorResponse
from django.conf import settings
from dropbox.client import DropboxClient
from dropbox import session
DEFAULT_ACCESS_TYPE = 'app_folder'
MAX_SPOOLED_SIZE = 10 * 1024 * 1024
FILE_SIZE_LIMIT = 145 * 1024 * 1024
################################
# Dropbox Storage Object
################################
class Storage(BaseStorage):
""" Dropbox API Storage. """
name = 'Dropbox'
TOKENS_FILEPATH = getattr(settings, 'DBBACKUP_TOKENS_FILEPATH', None)
DROPBOX_DIRECTORY = getattr(settings, 'DBBACKUP_DROPBOX_DIRECTORY', "/django-dbbackups/")
DROPBOX_DIRECTORY = '/%s/' % DROPBOX_DIRECTORY.strip('/')
DBBACKUP_DROPBOX_APP_KEY = getattr(settings, 'DBBACKUP_DROPBOX_APP_KEY', None)
DBBACKUP_DROPBOX_APP_SECRET = getattr(settings, 'DBBACKUP_DROPBOX_APP_SECRET', None)
DBBACKUP_DROPBOX_ACCESS_TYPE = getattr(settings, 'DBBACKUP_DROPBOX_ACCESS_TYPE', DEFAULT_ACCESS_TYPE)
_request_token = None
_access_token = None
def __init__(self, server_name=None):
self._check_settings()
self.dropbox = self.get_dropbox_client()
BaseStorage.__init__(self)
def _check_settings(self):
""" Check we have all the required settings defined. """
if not self.TOKENS_FILEPATH:
raise StorageError('Dropbox storage requires DBBACKUP_TOKENS_FILEPATH to be defined in settings.')
if not self.DBBACKUP_DROPBOX_APP_KEY:
raise StorageError('%s storage requires DBBACKUP_DROPBOX_APP_KEY to be defined in settings.' % self.name)
if not self.DBBACKUP_DROPBOX_APP_SECRET:
raise StorageError('%s storage requires DBBACKUP_DROPBOX_APP_SECRET to be specified.' % self.name)
###################################
# DBBackup Storage Methods
###################################
def backup_dir(self):
return self.DROPBOX_DIRECTORY
def delete_file(self, filepath):
""" Delete the specified filepath. """
files = self.list_directory(raw=True)
to_be_deleted = [x for x in files if os.path.splitext(x)[0] == filepath]
for name in to_be_deleted:
self.run_dropbox_action(self.dropbox.file_delete, name)
def list_directory(self, raw=False):
""" List all stored backups for the specified. """
metadata = self.run_dropbox_action(self.dropbox.metadata, self.DROPBOX_DIRECTORY)
filepaths = [x['path'] for x in metadata['contents'] if not x['is_dir']]
if not raw:
filepaths = [os.path.splitext(x)[0] for x in filepaths]
filepaths = list(set(filepaths))
return sorted(filepaths)
def get_numbered_path(self, path, number):
return "{}.{}".format(path, number)
@staticmethod
def chunked_file(filehandle, chunk_size=FILE_SIZE_LIMIT):
eof = False
while not eof:
with tempfile.SpooledTemporaryFile(max_size=MAX_SPOOLED_SIZE) as t:
chunk_space = chunk_size
while chunk_space > 0:
data = filehandle.read(min(16384, chunk_space))
if not data:
eof = True
break
chunk_space -= len(data)
t.write(data)
if t.tell() > 0:
t.seek(0)
yield t
def write_file(self, filehandle):
""" Write the specified file. """
filehandle.seek(0)
total_files = 0
path = os.path.join(
self.DROPBOX_DIRECTORY,
filehandle.name,
)
for chunk in self.chunked_file(filehandle):
self.run_dropbox_action(
self.dropbox.put_file,
self.get_numbered_path(path, total_files),
chunk,
)
total_files += 1
def read_file(self, filepath):
""" Read the specified file and return it's handle. """
total_files = 0
filehandle = tempfile.SpooledTemporaryFile(max_size=MAX_SPOOLED_SIZE)
try:
while True:
response = self.run_dropbox_action(
self.dropbox.get_file,
self.get_numbered_path(filepath, total_files),
ignore_404=(total_files > 0),
)
if not response:
break
copyfileobj(response, filehandle)
total_files += 1
except:
filehandle.close()
raise
return filehandle
def run_dropbox_action(self, method, *args, **kwargs):
""" Check we have a valid 200 response from Dropbox. """
ignore_404 = kwargs.pop("ignore_404", False)
try:
response = method(*args, **kwargs)
except ErrorResponse, e:
if ignore_404 and e.status == 404:
return None
errmsg = "ERROR %s" % (e,)
raise StorageError(errmsg)
return response
###################################
# Dropbox Client Methods
###################################
def get_dropbox_client(self):
""" Connect and return a Dropbox client object. """
self.read_token_file()
sess = session.DropboxSession(self.DBBACKUP_DROPBOX_APP_KEY,
self.DBBACKUP_DROPBOX_APP_SECRET, self.DBBACKUP_DROPBOX_ACCESS_TYPE)
# Get existing or new access token and use it for this session
access_token = self.get_access_token(sess)
sess.set_token(access_token.key, access_token.secret)
dropbox = DropboxClient(sess)
# Test the connection by making call to get account_info
dropbox.account_info()
return dropbox
def get_request_token(self, sess):
""" Return Request Token. If not available, a new one will be created, saved
and a RequestUrl object will be returned.
"""
if not self._request_token:
return self.create_request_token(sess)
return self._request_token
def create_request_token(self, sess):
""" Return Request Token. If not available, a new one will be created, saved
and a RequestUrl object will be returned.
"""
self._request_token = sess.obtain_request_token()
self.save_token_file()
return self._request_token
def prompt_for_authorization(self, sess, request_token):
""" Generate the authorization url, show it to the user and exit """
message = "Dropbox not authorized, visit the following URL to authorize:\n"
message += sess.build_authorize_url(request_token)
raise StorageError(message)
def get_access_token(self, sess):
""" Return Access Token. If not available, a new one will be created and saved. """
if not self._access_token:
return self.create_access_token(sess)
return self._access_token
def create_access_token(self, sess):
""" Create and save a new access token to self.TOKENFILEPATH. """
request_token = self.get_request_token(sess)
try:
self._access_token = sess.obtain_access_token(request_token)
except ErrorResponse:
# If we get an error, it means the request token has expired or is not authorize, generate a new request
# token and prompt the user to complete the authorization process
request_token = self.create_request_token(sess)
self.prompt_for_authorization(sess, request_token)
# We've got a good access token, save it.
self.save_token_file()
return self._access_token
def save_token_file(self):
""" Save the request and access tokens to disk. """
tokendata = dict(request_token=self._request_token, access_token=self._access_token)
with open(self.TOKENS_FILEPATH, 'wb') as tokenhandle:
pickle.dump(tokendata, tokenhandle, -1)
def read_token_file(self):
""" Reload the request and/or access tokens from disk. """
if os.path.exists(self.TOKENS_FILEPATH):
with open(self.TOKENS_FILEPATH, 'rb') as tokenhandle:
tokendata = pickle.load(tokenhandle)
self._request_token = tokendata.get('request_token')
self._access_token = tokendata.get('access_token')
|
nimbis/django-dbbackup
|
dbbackup/storage/dropbox_storage.py
|
Python
|
bsd-3-clause
| 8,496
|
[
"VisIt"
] |
5251fb73d13afbd95c065f56ba93fb3f0dd91bdc3232b2809d65fc0706cf0aae
|
import matplotlib.pyplot as plt
import argparse
import warnings
from orcanet.history import HistoryHandler
from orcanet.utilities.visualization import TrainValPlotter
class Summarizer:
"""
Summarize one or more trainings by giving their orcanet folder(s).
- Plot the training and validation curves in a single plot and show them
- Print info about the best and worst epochs
Attributes
----------
folders : str or List, optional
Path to a orcanet folder, or to multiple folder as a list.
Default: CWD.
metric : str
The metric to plot. Default: loss.
smooth : int, optional
Apply gaussian blur to the train curve with given sigma.
labels : List, optional
Labels for each folder.
noplot : bool
Dont plot the train/val curves [default: False].
width : float
Scaling of the width of the curves and the marker size [default: 1].
"""
def __init__(self, folders,
metric="loss",
smooth=None,
labels=None,
noplot=False,
width=1.):
self.folders = folders
self.metric = metric
self.smooth = smooth
self.labels = labels
self.noplot = noplot
self.width = width
self._tvp = None
def summarize(self):
if not self.noplot:
self._tvp = TrainValPlotter()
min_stats, max_stats = [], []
print("Reading stats of {} trainings...".format(len(self._folders)))
for folder_no in range(len(self._folders)):
try:
min_stat, max_stat = self._summarize_folder(folder_no)
min_stats.append(min_stat)
max_stats.append(max_stat)
except OSError:
warnings.warn("Can not summarize {}, skipping..."
"".format(self._folders[folder_no]))
min_stats.sort()
print("\nMinimum\n-------")
print("{} \t{}\t{}\t{}".format(" ", "Epoch", self._full_metric, "name"))
for i, stat in enumerate(min_stats, 1):
print("{} | \t{}\t{}\t{}".format(i, stat[2], stat[0], stat[1]))
max_stats.sort(reverse=True)
print("\nMaximum\n-------")
print("{} \t{}\t{}\t{}".format(" ", "Epoch", self._full_metric, "name"))
for i, stat in enumerate(max_stats, 1):
print("{} | \t{}\t{}\t{}".format(i, stat[2], stat[0], stat[1]))
if not self.noplot:
self._tvp.apply_layout(x_label="Epoch",
y_label=self._metric_name,
grid=True,
legend=True)
plt.show()
@property
def _metric_name(self):
""" E.g. loss """
if self.metric.startswith("train_"):
metric = self.metric[6:]
elif self.metric.startswith("val_"):
metric = self.metric[4:]
else:
metric = self.metric
return metric
@property
def _full_metric(self):
""" E.g. val_loss """
if not (self.metric.startswith("train_") or
self.metric.startswith("val_")):
full_metric = "val_" + self.metric
else:
full_metric = self.metric
return full_metric
@property
def _folders(self):
""" Get a list of folders. """
if not self.folders:
folders = "./"
else:
folders = self.folders
if isinstance(folders, str):
folders = [folders]
return folders
@property
def _labels(self):
""" Get a list of labels. """
if self.labels is None:
return self._folders
else:
return self.labels
def _summarize_folder(self, folder_no):
label = self._labels[folder_no]
folder = self._folders[folder_no]
if len(self._labels) == 1:
train_label, val_label = "training", "validation"
else:
train_label, val_label = None, label
hist = HistoryHandler(folder)
summary_data = hist.get_summary_data()
full_train_data = hist.get_train_data()
train_data = [full_train_data["Batch_float"],
full_train_data[self._metric_name]]
val_data = [summary_data["Epoch"],
summary_data[self._full_metric]]
smry_met_name = self._full_metric
max_line = hist.get_best_epoch_info(metric=smry_met_name,
mini=False)
min_line = hist.get_best_epoch_info(metric=smry_met_name, mini=True)
min_stat = [min_line[smry_met_name], label, min_line["Epoch"]]
max_stat = [max_line[smry_met_name], label, max_line["Epoch"]]
if not self.noplot:
self._tvp.plot_curves(train_data=train_data,
val_data=val_data,
train_label=train_label,
val_label=val_label,
smooth_sigma=self.smooth,
tlw=0.5*self.width,
vlw=0.5*self.width,
vms=3*self.width**0.5)
return min_stat, max_stat
def summarize_dirs(self):
"""
Get the best and worst epochs of all given folders as a dict.
Returns
-------
minima : dict
Keys : Name of folder.
Values : [Epoch, metric] of where the metric is lowest.
maxima : dict
As above, but for where the metric is highest.
"""
minima, maxima = {}, {}
for folder in self._folders:
hist = HistoryHandler(folder)
smry_met_name = self._full_metric
try:
max_line = hist.get_best_epoch_info(metric=smry_met_name,
mini=False)
min_line = hist.get_best_epoch_info(metric=smry_met_name,
mini=True)
except OSError as e:
warnings.warn(str(e))
continue
minima[folder] = [min_line["Epoch"], min_line[smry_met_name]]
maxima[folder] = [max_line["Epoch"], max_line[smry_met_name]]
return minima, maxima
def main():
parser = argparse.ArgumentParser(
description=str(Summarizer.__doc__),
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('folders', type=str, nargs='*')
parser.add_argument('-metric', type=str, nargs="?")
parser.add_argument('-smooth', nargs="?", type=int)
parser.add_argument('-width', nargs="?", type=float)
parser.add_argument('-labels', nargs="*", type=str)
parser.add_argument('-noplot', action="store_true")
args = vars(parser.parse_args())
for key in list(args.keys()):
if args[key] is None:
args.pop(key)
Summarizer(**args).summarize()
if __name__ == '__main__':
main()
|
ViaFerrata/DL_pipeline_TauAppearance
|
orcanet/utilities/summarize_training.py
|
Python
|
agpl-3.0
| 7,103
|
[
"Gaussian"
] |
55eddc47b7d45f303dc081aa7eae463fac20002eb4bb9bed84ca1a9f19f91e46
|
import pandas as pd
import numpy as np
import os
from sys import argv
import copy
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import proj3d
from matplotlib.patches import FancyArrowPatch
plot_angle = 0 # plot histogram of maxmimum angle between NN atom vectors.
class POSCAR:
theta = 90 # surface threshold in degree
ninfile = ''
header = []
lat_vec = []
atom_species = []
atom_numbers = []
total_atom = 0
nb_atoms_list = []
xmin = np.inf
xmax = -np.inf
ymin = np.inf
ymax = -np.inf
zmin = np.inf
zmax = -np.inf
xlen = -1
ylen = -1
zlen = -1
maxveclen = 0
f_radius = 0
data = pd.DataFrame(columns=['xcoord', 'ycoord', 'zcoord', 'atom_num',
'mat_org', 'matnum_org', 'surf_flag', 'num_neighbor',
'nb_vector_x', 'nb_vector_y', 'nb_vector_z', 'nb_vector_sum', 'polar_delta'])
def input_check():
if len(argv) != 2 and len(argv) != 3:
print "\n###\tError!!\t###"
print "#"
print "#\tUSAGE1: > python3 SurfaceExtraction.py vasp_file_name.vasp"
print "#\t ex) python3 SurfaceExtraction.py SiO2.vasp"
print "#\n"
print "#\tUSAGE2: > python3 SurfaceExtraction.py vasp_file_name.vasp surface_threshold_angle_in_degree"
print "# ex) python3 SurfaceExtraction.py SiO2.vasp 60"
exit(1)
if len(argv) == 3:
POSCAR.theta = float(argv[2])
print 'surface threshold angle = ', POSCAR.theta
def readPOSCAR(filename):
POSCAR.surf_th = 360-POSCAR.theta # surface threshold in degree (inside material)
POSCAR.ninfile = argv[1]
line_list = [line.strip() for line in open(POSCAR.ninfile)]
compact_line = [x for x in line_list if x != []]
coordi_start = 0
for line in compact_line:
if len(line.lower()) != 0:
if line.lower()[0] == 'd' or line.lower()[0] == 'c':
coordi_type = line.lower()
coordi_start = compact_line.index(line) + 1
POSCAR.header = line_list[0:coordi_start]
for i in range(3):
POSCAR.lat_vec.append([float(x) for x in compact_line[2+i].split()])
if coordi_start == 8:
POSCAR.atom_species = compact_line[coordi_start - 3].split()
POSCAR.atom_numbers = [int(x) for x in compact_line[coordi_start - 2].split()]
else:
POSCAR.atom_species = compact_line[coordi_start - 4].split()
POSCAR.atom_numbers = [int(x) for x in compact_line[coordi_start - 3].split()]
POSCAR.total_atom = sum(POSCAR.atom_numbers)
scale_factor = float(compact_line[1])
matnum = 0
for i in range(POSCAR.total_atom):
raw_coords = [float(x) for x in compact_line[coordi_start + i].split()]
x_fact = raw_coords[0] * POSCAR.lat_vec[0][0] + raw_coords[1] * POSCAR.lat_vec[1][0] + raw_coords[2] * POSCAR.lat_vec[2][0]
y_fact = raw_coords[0] * POSCAR.lat_vec[0][1] + raw_coords[1] * POSCAR.lat_vec[1][1] + raw_coords[2] * POSCAR.lat_vec[2][1]
z_fact = raw_coords[0] * POSCAR.lat_vec[0][2] + raw_coords[1] * POSCAR.lat_vec[1][2] + raw_coords[2] * POSCAR.lat_vec[2][2]
if coordi_type[0] == 'd':
coords = [x_fact * scale_factor, y_fact * scale_factor, z_fact * scale_factor]
else:
coords = raw_coords
if coords[0] < POSCAR.xmin:
POSCAR.xmin = coords[0]
if coords[0] > POSCAR.xmax:
POSCAR.xmax = coords[0]
if coords[1] < POSCAR.ymin:
POSCAR.ymin = coords[1]
if coords[1] > POSCAR.ymax:
POSCAR.ymax = coords[1]
if coords[2] < POSCAR.zmin:
POSCAR.zmin = coords[2]
if coords[2] > POSCAR.zmax:
POSCAR.zmax = coords[2]
POSCAR.data.at[i, 'xcoord'] = coords[0]
POSCAR.data.at[i, 'ycoord'] = coords[1]
POSCAR.data.at[i, 'zcoord'] = coords[2]
POSCAR.data.at[i, 'atom_num'] = int(i+1)
if i >= sum(POSCAR.atom_numbers[0:matnum+1]):
matnum += 1
POSCAR.data.at[i, 'mat_org'] = POSCAR.atom_species[matnum]
POSCAR.data.at[i, 'matnum_org'] = POSCAR.atom_numbers[matnum]
POSCAR.data.at[i, 'surf_flag'] = 0
POSCAR.xlen = POSCAR.xmax - POSCAR.xmin + 1 # +1 is to avoid atom overlapping
POSCAR.ylen = POSCAR.ymax - POSCAR.ymin + 1
POSCAR.zlen = POSCAR.zmax - POSCAR.zmin + 1
print '\n#\tX range= %.2f ~ %.2f,\tx length= %.2f' %(POSCAR.xmin, POSCAR.xmax, POSCAR.xlen)
print '#\tY range= %.2f ~ %.2f,\ty length= %.2f' %(POSCAR.ymin, POSCAR.ymax, POSCAR.ylen)
print '#\tZ range= %.2f ~ %.2f,\tz length= %.2f' %(POSCAR.zmin, POSCAR.zmax, POSCAR.zlen)
return POSCAR.data
def peakfind(X, Y, X_init, Y_final):
peakind = []
pos = X_init
while X[pos] <= Y_final:
kernal = [pos-3, pos-2, pos-1, pos, pos+1, pos+2, pos+3]
if pos-3 < 0:
kernal[0] = pos+3
if pos-2 < 0:
kernal[1] = pos+2
if pos-1 < 0:
kernal[2] = pos+1
y1 = Y[kernal[0]]
y2 = Y[kernal[1]]
y3 = Y[kernal[2]]
y4 = Y[kernal[3]]
y5 = Y[kernal[4]]
y6 = Y[kernal[5]]
y7 = Y[kernal[6]]
y_1 = [y1, y2, y3, y4, y5, y6, y7]
if (y4 == max(y_1)) and (y4 >= 0.2 * Y[0]):
x_2 = np.arange(pos*2-2, pos*2+3)
y_2 = np.zeros(5)
for i in range(5):
y_2[i] = Y[x_2[i]]
if y_2.max() > 0:
peakind.append(pos)
pos += 1
if len(peakind) < 2:
peakind.append(0)
peakind.append(0)
return peakind
def selfEvaluation(POSCAR = POSCAR):
if os.path.isfile('voro_input_single') is True:
os.remove('voro_input_single')
if os.path.isfile('voro_input_single.vol') is True:
os.remove('voro_input_single.vol')
noutfile = 'voro_input_single'
outfile = open(noutfile, 'w')
for i in range(POSCAR.total_atom):
outfile.write(str(i+1)+'\t'+str(POSCAR.data.xcoord.loc[i])
+'\t'+str(POSCAR.data.ycoord.loc[i])
+'\t'+str(POSCAR.data.zcoord.loc[i])+'\n')
outfile.close()
a = str(np.sqrt(POSCAR.lat_vec[0][0]**2 + POSCAR.lat_vec[0][1]**2 + POSCAR.lat_vec[0][2]**2))
b = str(np.sqrt(POSCAR.lat_vec[1][0]**2 + POSCAR.lat_vec[1][1]**2 + POSCAR.lat_vec[1][2]**2))
c = str(np.sqrt(POSCAR.lat_vec[2][0]**2 + POSCAR.lat_vec[2][1]**2 + POSCAR.lat_vec[2][2]**2))
lat_vec_xmin = min(POSCAR.lat_vec[0][0], POSCAR.lat_vec[1][0], POSCAR.lat_vec[2][0])
lat_vec_ymin = min(POSCAR.lat_vec[0][1], POSCAR.lat_vec[1][1], POSCAR.lat_vec[2][1])
lat_vec_zmin = min(POSCAR.lat_vec[0][2], POSCAR.lat_vec[1][2], POSCAR.lat_vec[2][2])
lat_vec_xmax = max(POSCAR.lat_vec[0][0], POSCAR.lat_vec[1][0], POSCAR.lat_vec[2][0])
lat_vec_ymax = max(POSCAR.lat_vec[0][1], POSCAR.lat_vec[1][1], POSCAR.lat_vec[2][1])
lat_vec_zmax = max(POSCAR.lat_vec[0][2], POSCAR.lat_vec[1][2], POSCAR.lat_vec[2][2])
cmd1 = 'voro++ -c "%i %q %n" '
cmd2 = '-o %s %s %s %s %s %s voro_input_single' \
%(min(POSCAR.xmin, lat_vec_xmin), max(POSCAR.xmax, lat_vec_xmax),
min(POSCAR.ymin, lat_vec_ymin), max(POSCAR.ymax, lat_vec_ymax),
min(POSCAR.zmin, lat_vec_zmin), max(POSCAR.zmax, lat_vec_zmax))
cmd = cmd1 + cmd2
os.system(cmd)
voro_single_list = [line.strip() for line in open('voro_input_single.vol')]
data_single = pd.DataFrame(columns=['xcoord', 'ycoord', 'zcoord', 'atom_num', 'nb_atoms_list'])
voro_single_list_len = len(voro_single_list)
for i in range(voro_single_list_len):
x = voro_single_list[i]
x_split = x.split()
data_single.at[i, 'xcoord'] = float(x_split[1])
data_single.at[i, 'ycoord'] = float(x_split[2])
data_single.at[i, 'zcoord'] = float(x_split[3])
data_single.at[i, 'atom_num'] = int(x_split[0])
data_single.at[i, 'nb_atoms_list'] = []
#print data_single.loc[i]
#print x_split[4:]
data_single.nb_atoms_list[i].append([int(j) for j in x_split[4:]])
vector_list = []
for i in range(voro_single_list_len):
self_position = np.array([data_single.xcoord.loc[i], data_single.ycoord.loc[i], data_single.zcoord.loc[i]])
#print 'i=', i
for k in data_single.nb_atoms_list[i][0]:
if (([i+1, k] not in vector_list) or ([k, i+1] not in vector_list)) and k >= 0:
index = int(data_single[data_single['atom_num'] == k].index[0])
nb_vec_x = data_single.xcoord.loc[index]
nb_vec_y = data_single.ycoord.loc[index]
nb_vec_z = data_single.zcoord.loc[index]
nb_vector = np.array([nb_vec_x, nb_vec_y, nb_vec_z]) - self_position
nb_vector_len = np.linalg.norm(nb_vector)
vector_list.append([i+1, k, nb_vector_len])
if nb_vector_len > POSCAR.maxveclen:
POSCAR.maxveclen = nb_vector_len
print 'threshold vector length =', POSCAR.maxveclen
def strFFT():
### FFT in x, y, z-direction
### x-direction
gridsize = 1e-3
xmin_sc = POSCAR.data.xcoord.min()
xmax_sc = POSCAR.data.xcoord.max()
ymin_sc = POSCAR.data.ycoord.min()
ymax_sc = POSCAR.data.ycoord.max()
zmin_sc = POSCAR.data.zcoord.min()
zmax_sc = POSCAR.data.zcoord.max()
xnum = int((xmax_sc - xmin_sc)/gridsize + 1)
if xnum != 1:
W = np.zeros(xnum)
X = np.arange(xmin_sc, xmax_sc, gridsize)
X = np.append(X, xmax_sc)
for i in range(POSCAR.total_atom*27):
W[int((POSCAR.data.xcoord.loc[i]-xmin_sc)/gridsize)] = 1
plt.plot(X[:xnum], W)
#plt.show()
plt.title('x-directional atom position')
plt.savefig(argv[1][:-5]+'_x.png')
plt.close()
spectrum = np.fft.fft(W)
frequency = np.fft.fftfreq(spectrum.size, d=gridsize)
index = np.where(frequency >= 0.)
clipped_spectrum = gridsize * spectrum[index].real
clipped_frequency = frequency[index]
### peak finding
peakind = peakfind(clipped_frequency, clipped_spectrum, 0, 10)
if clipped_frequency[peakind[1]] == 0:
vec_x = np.sqrt(POSCAR.maxveclen/3)
else:
vec_x = 1/clipped_frequency[peakind[1]]
print 'vec_x=', vec_x
plt.plot(clipped_frequency, clipped_spectrum)
plt.plot(clipped_frequency[peakind], clipped_spectrum[peakind], 'o', color='yellow', alpha=0.5)
plt.xlim(0., 10.)
#plt.show()
plt.title('x-directional fft')
plt.savefig(argv[1][:-5]+'_xfft.png')
plt.close()
else:
vec_x = np.sqrt(POSCAR.maxveclen/3)
### Y-direction
ynum = int((ymax_sc - ymin_sc)/gridsize + 1)
if ynum != 1:
W = np.zeros(ynum)
Y = np.arange(ymin_sc, ymax_sc, gridsize)
Y = np.append(Y, ymax_sc)
for i in range(POSCAR.total_atom*27):
W[int((POSCAR.data.ycoord.loc[i]-ymin_sc)/gridsize)] = 1
plt.plot(Y[:ynum], W)
#plt.show()
plt.title('y-directional atom position')
plt.savefig(argv[1][:-5]+'_y.png')
plt.close()
spectrum = np.fft.fft(W)
frequency = np.fft.fftfreq(spectrum.size, d=gridsize)
index = np.where(frequency >= 0.)
clipped_spectrum = gridsize * spectrum[index].real
clipped_frequency = frequency[index]
### peak finding
peakind = peakfind(clipped_frequency, clipped_spectrum, 0, 10)
if clipped_frequency[peakind[1]] == 0:
vec_y = np.sqrt(POSCAR.maxveclen/3)
else:
vec_y = 1/clipped_frequency[peakind[1]]
print 'vec_y =', vec_y
plt.plot(clipped_frequency, clipped_spectrum)
plt.plot(clipped_frequency[peakind], clipped_spectrum[peakind], 'o', color='yellow', alpha=0.5)
plt.xlim(0., 10.)
#plt.show()
plt.title('y-directional fft')
plt.savefig(argv[1][:-5]+'_yfft.png')
plt.close()
else:
vec_y = np.sqrt(POSCAR.maxveclen/3)
### Z-direction
znum = int((zmax_sc - zmin_sc)/gridsize + 1)
if znum != 1:
W = np.zeros(znum)
Z = np.arange(zmin_sc, zmax_sc, gridsize)
Z = np.append(Z, zmax_sc)
for i in range(POSCAR.total_atom*27):
W[int((POSCAR.data.zcoord.loc[i]-zmin_sc)/gridsize)] = 1
plt.plot(Z[:znum], W)
#plt.show()
plt.title('z-directional atom position')
plt.savefig(argv[1][:-5]+'_z.png')
plt.close()
spectrum = np.fft.fft(W)
frequency = np.fft.fftfreq(spectrum.size, d=gridsize)
index = np.where(frequency >= 0.)
clipped_spectrum = gridsize * spectrum[index].real
clipped_frequency = frequency[index]
### peak finding
peakind = peakfind(clipped_frequency, clipped_spectrum, 0, 10)
if clipped_frequency[peakind[1]] == 0:
vec_z = np.sqrt(POSCAR.maxveclen/3)
else:
vec_z = 1/clipped_frequency[peakind[1]]
print 'vec_z =', vec_z
plt.plot(clipped_frequency, clipped_spectrum)
plt.plot(clipped_frequency[peakind], clipped_spectrum[peakind], 'o', color='yellow', alpha=0.5)
plt.xlim(0., 10.)
#plt.show()
plt.title('z-directional fft')
plt.savefig(argv[1][:-5]+'_zfft.png')
plt.close()
else:
vec_z = np.sqrt(POSCAR.maxveclen/3)
POSCAR.f_radius = np.linalg.norm([vec_x, vec_y, vec_z])
if POSCAR.f_radius == 0:
POSCAR.f_radius = POSCAR.maxveclen
print 'f_radius =', POSCAR.f_radius
def makeSupercell(POSCAR = POSCAR):
# create 26 dummy cells around the original one, in X, Y, Z directions.
tmpdata = copy.deepcopy(POSCAR.data)
supercell = pd.DataFrame()
lattice = np.array(POSCAR.lat_vec)
shift = [-1, 0, 1]
for i in range(3): # x-direction
for j in range(3): # y-direction
for k in range(3): # z-direction
for m in range(POSCAR.data.xcoord.size):
atom_pos = np.array([POSCAR.data.loc[m, 'xcoord'], POSCAR.data.loc[m, 'ycoord'], POSCAR.data.loc[m, 'zcoord']])
supercell_pos = atom_pos + (lattice[0] * shift[i]) + (lattice[1] * shift[j]) + (lattice[2] * shift[k])
tmpdata.at[m, 'xcoord'] = supercell_pos[0]
tmpdata.at[m, 'ycoord'] = supercell_pos[1]
tmpdata.at[m, 'zcoord'] = supercell_pos[2]
supercell = supercell.append(tmpdata, ignore_index=True)
POSCAR.data = copy.deepcopy(supercell)
def runVoro(data = POSCAR):
if os.path.isfile('voro_input') is True:
os.remove('voro_input')
if os.path.isfile('voro_input.vol') is True:
os.remove('voro_input.vol')
noutfile = 'voro_input'
outfile = open(noutfile, 'w')
for i in range(data.total_atom * 27):
outfile.write(str(i+1)+'\t'+str(POSCAR.data.xcoord.loc[i])
+'\t'+str(POSCAR.data.ycoord.loc[i])
+'\t'+str(POSCAR.data.zcoord.loc[i])+'\n')
outfile.close()
a = np.sqrt(data.lat_vec[0][0]**2 + data.lat_vec[0][1]**2 + data.lat_vec[0][2]**2)
b = np.sqrt(data.lat_vec[1][0]**2 + data.lat_vec[1][1]**2 + data.lat_vec[1][2]**2)
c = np.sqrt(data.lat_vec[2][0]**2 + data.lat_vec[2][1]**2 + data.lat_vec[2][2]**2)
cmd1 = 'voro++ -c "%i %q %v %n %m" '
cmd2 = '-o -p %s %s %s %s %s %s voro_input' %(str(-1*a), str(2*a), str(-1*b), str(2*b), str(-1*c), str(2*c))
cmd = cmd1 + cmd2
os.system(cmd)
###
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
def chk_in_plane(i, self_position, vector_list):
#screening vector by magnitude
vector_in_frad = []
for k in vector_list:
nb_vec_x = POSCAR.data.xcoord.loc[k-1]
nb_vec_y = POSCAR.data.ycoord.loc[k-1]
nb_vec_z = POSCAR.data.zcoord.loc[k-1]
nb_vector = np.array([nb_vec_x, nb_vec_y, nb_vec_z]) - self_position
nb_vector_mag = np.linalg.norm(nb_vector)
if nb_vector_mag <= POSCAR.f_radius:
vector_in_frad.append(nb_vector)
vector_frad_len = len(vector_in_frad)
nb_vec = np.zeros(3*vector_frad_len).reshape(3, vector_frad_len)
count = 0
for k in range(vector_frad_len):
nb_vec[0][count] = vector_in_frad[k][0]
nb_vec[1][count] = vector_in_frad[k][1]
nb_vec[2][count] = vector_in_frad[k][2]
count += 1
if count == 0:
pass
else:
mean_x = np.mean(nb_vec[0, :])
mean_y = np.mean(nb_vec[1, :])
mean_z = np.mean(nb_vec[2, :])
mean_vector = np.array([[mean_x], [mean_y], [mean_z]])
if len(nb_vec[0]) > 1:
cov_mat = np.cov([nb_vec[0, :], nb_vec[1, :], nb_vec[2, :]])
eig_val, eig_vec = np.linalg.eig(cov_mat)
#print eig_vec
'''
if i == 13*POSCAR.total_atom+554-1:
#fig = plt.figure(figsize=(7, 7))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
#ax.set_aspect('equal')
ax.plot(nb_vec[0, :], nb_vec[1, :], nb_vec[2, :], 'o', markersize=8, color='green', alpha=0.2)
ax.plot([mean_x], [mean_y], [mean_z], 'o', markersize=10, color='red', alpha=0.5)
color = ['r', 'g', 'b']
index = 0
for v in eig_vec.T:
a = Arrow3D([mean_x, v[0]], [mean_y, v[1]], [mean_z, v[2]], mutation_scale=20, lw=3, arrowstyle="-|>",
color=color[index])
ax.add_artist(a)
index += 1
ax.set_xlabel('x_values')
ax.set_ylabel('y_values')
ax.set_zlabel('z_values')
plt.title('Eigenvectors')
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([nb_vec[0, :].max() - nb_vec[0, :].min(), nb_vec[1, :].max() - nb_vec[1, :].min(), nb_vec[2, :].max() - nb_vec[2, :].min()]).max()
Xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][0].flatten() + 0.5 * (nb_vec[0, :].max() + nb_vec[0, :].min())
Yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][1].flatten() + 0.5 * (nb_vec[1, :].max() + nb_vec[1, :].min())
Zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][2].flatten() + 0.5 * (nb_vec[2, :].max() + nb_vec[2, :].min())
# Comment or uncomment following both lines to test the fake bounding box:
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w')
plt.show()
'''
eig_pairs = [(np.abs(eig_val[ii]), eig_vec[:, ii]) for ii in range(len(eig_val))]
eig_pairs.sort(key=lambda x: x[0], reverse=True)
#for ii in eig_pairs:
# print ii[0]
matrix_w = np.hstack((eig_pairs[0][1].reshape(3, 1), eig_pairs[1][1].reshape(3,1), eig_pairs[2][1].reshape(3, 1)))
transformed = matrix_w.T.dot(nb_vec)
transformed_eigvec = matrix_w.T.dot(eig_vec)
#print(transformed)
polar_min = np.inf
polar_max = -np.inf
for ii in range(vector_frad_len):
r = np.linalg.norm(transformed[:, ii])
polar = (np.arccos(transformed[2][ii]/r) - np.pi/2) * 180/np.pi
if polar > polar_max:
polar_max = polar
if polar < polar_min:
polar_min = polar
#print polar_max, polar_min, polar_max-polar_min
###if i == 13*POSCAR.total_atom+554-1:
### print polar, polar_max, polar_min
polar_delta = polar_max - polar_min
POSCAR.data.at[i, 'polar_delta'] = polar_delta
if polar_delta < POSCAR.theta * 0.5:
POSCAR.data.at[i, 'surf_flag'] = 3
'''
if i == 13*POSCAR.total_atom+554-1:
#fig = plt.figure(figsize=(7, 7))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.pbaspect = [1.0, 1.0, 1.0]
ax.plot(transformed[0, :], transformed[1, :], transformed[2, :], 'o', markersize=8, color='green', alpha=0.2)
ax.plot([mean_x], [mean_y], [mean_z], 'o', markersize=10, color='red', alpha=0.5)
color = ['r', 'g', 'b']
index = 0
for v in transformed_eigvec.T:
a = Arrow3D([mean_x, v[0]], [mean_y, v[1]], [mean_z, v[2]], mutation_scale=20, lw=3, arrowstyle="-|>",
color=color[index])
ax.add_artist(a)
index += 1
ax.set_xlabel('x_values')
ax.set_ylabel('y_values')
ax.set_zlabel('z_values')
plt.title('Eigenvectors')
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([transformed[0, :].max() - transformed[0, :].min(), transformed[1, :].max() - transformed[1, :].min(), transformed[2, :].max() - transformed[2, :].min()]).max()
Xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][0].flatten() + 0.5 * (transformed[0, :].max() + transformed[0, :].min())
Yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][1].flatten() + 0.5 * (transformed[1, :].max() + transformed[1, :].min())
Zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][2].flatten() + 0.5 * (transformed[2, :].max() + transformed[2, :].min())
# Comment or uncomment following both lines to test the fake bounding box:
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w')
plt.show()
'''
def SurfaceExtraction(data = POSCAR):
voro_list = [line.strip() for line in open('voro_input.vol')]
voro_list_len = len(voro_list)
strFFT()
for i in range(voro_list_len):
x = voro_list[i]
data.nb_atoms_list.append([])
data.nb_atoms_list[i].append([int(j) for j in x.split()[5:-1]])
vector_maxinner = []
for i in range(POSCAR.total_atom*13, POSCAR.total_atom*14):
#print '############ atom ',i+1
vector_array = []
self_position = np.array([data.data.xcoord.loc[i], data.data.ycoord.loc[i], data.data.zcoord.loc[i]])
#self_position /= np.linalg.norm(self_position)
nn_list = [] # nearest neighbor list
#1st nearest neighbor
###if POSCAR.data.atom_num.loc[i] == 60:
### print data.nb_atoms_list[i][0]
for k in data.nb_atoms_list[i][0]:
#1st nearest neighbor
nb_vec_x = data.data.xcoord.loc[k-1]
nb_vec_y = data.data.ycoord.loc[k-1]
nb_vec_z = data.data.zcoord.loc[k-1]
nb_vector = np.array([nb_vec_x, nb_vec_y, nb_vec_z]) - self_position
nb_vector_len = np.linalg.norm(nb_vector)
if nb_vector_len <= data.maxveclen:
nn_list.append(k)
nb_vector /= np.linalg.norm(nb_vector)
vector_array.append(nb_vector.tolist())
if nb_vector_len > data.maxveclen:
POSCAR.data.at[i, 'surf_flag'] = 2
#2nd nearest neighbor
for k in data.nb_atoms_list[i][0]:
for m in data.nb_atoms_list[k-1][0]:
if (m not in nn_list) and (i != m-1):
nb_vec_x = data.data.xcoord.loc[m-1]
nb_vec_y = data.data.ycoord.loc[m-1]
nb_vec_z = data.data.zcoord.loc[m-1]
nb_vector = np.array([nb_vec_x, nb_vec_y, nb_vec_z]) - self_position
nb_vector_len = np.linalg.norm(nb_vector)
if nb_vector_len <= data.maxveclen:
nn_list.append(m)
nb_vector /= np.linalg.norm(nb_vector)
vector_array.append(nb_vector.tolist())
### PCA for in-plane check
chk_in_plane(i, self_position, nn_list)
vector_sum = np.sum(np.array(vector_array), axis=0)
vector_sum_mag = np.linalg.norm(vector_sum)
data.data.at[i, 'num_neighbor'] = len(data.nb_atoms_list[i][0])
data.data.at[i, 'nb_vector_x'] = vector_sum[0]
data.data.at[i, 'nb_vector_y'] = vector_sum[1]
data.data.at[i, 'nb_vector_z'] = vector_sum[2]
data.data.at[i, 'nb_vector_sum'] = vector_sum_mag
for ii in np.arange(0, len(vector_array)):
vector_inner = []
maxinner = -np.inf
mininner = np.inf
for jj in np.arange(0, len(vector_array)):
nb_inner = np.inner(vector_array[ii], vector_array[jj])
if nb_inner >1: nb_inner = 1
if nb_inner <-1: nb_inner = -1
if nb_inner > maxinner:
maxinner = nb_inner
if nb_inner < mininner:
mininner = nb_inner
if nb_inner >= np.cos(POSCAR.surf_th/2 * np.pi/180):
vector_inner.append(1)
else:
vector_inner.append(0)
###if POSCAR.data.atom_num.loc[i] == 60:
### print 'ii=', ii, 'jj=', jj, 'nb_inner =', nb_inner
vector_maxinner.append(np.arccos(mininner)*180/np.pi)
if 0 not in vector_inner:
POSCAR.data.at[i, 'surf_flag'] = 1
if plot_angle == 1:
plt.hist(vector_maxinner, bins='auto')
plt.xticks(np.arange(90, 180.1, step=15))
plt.savefig(argv[1][:-5]+'.png')
def writeCSV(input_filename, POSCAR = POSCAR):
input_filename = argv[1]
noutfile = 'Surf_' + input_filename
data_out = POSCAR.data[POSCAR.total_atom*13:POSCAR.total_atom*14]
POSCAR.data.to_csv(noutfile[:-5] + '_supercell.csv', index=False)
data_out.to_csv(noutfile[:-5] + '.csv', index=False)
def writeList(POSCAR = POSCAR):
if os.path.isfile('surfatoms.txt') is True:
os.remove('surfatoms.txt')
noutfile = 'surfatoms.txt'
outfile = open(noutfile, 'w')
count = 0
for i in range(POSCAR.total_atom*13, POSCAR.total_atom*14):
if POSCAR.data.surf_flag.loc[i] > 0:
if count != 0:
outfile.write(',')
outfile.write(str(POSCAR.data.atom_num.loc[i]))
count += 1
outfile.close()
print 'number of surface atoms = ', count
def main():
input_check()
readPOSCAR(argv[1])
selfEvaluation()
makeSupercell()
runVoro()
SurfaceExtraction()
writeCSV(argv[1])
writeList()
if __name__ == '__main__':
main()
|
cwandtj/A2P2
|
SurfaceExtraction_PCA_180419.py
|
Python
|
mit
| 27,865
|
[
"VASP"
] |
b0f111e6718550460b545700255f0be6dd5d88a29d2527fb3bac62fc14385a16
|
# Copyright (C) 2013-2017 Paulo V. C. Medeiros
# This file is part of BandUP: Band Unfolding code for Plane-wave based calculations.
#
# BandUP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BandUP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BandUP. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen, PIPE, STDOUT
import os
import sys
# Imports from within the package
from .constants import BANDUP_BIN, BANDUP_PRE_UNFOLDING_BIN, WORKING_DIR
from .files import (
mkdir,
create_bandup_input,
create_bandup_plot_input,
)
from .plot import make_plot
from .vasp import procar2bandup
from .orbital_contributions import get_unfolded_orb_projs
def run_bandup(args):
#start_dir = os.getcwd()
start_dir = WORKING_DIR
# Running BandUP
os.chdir(args.results_dir)
bandup_run_options = [BANDUP_BIN] + args.argv
with open("out_BandUP.dat", 'w') as f:
bandup_run = Popen(bandup_run_options, stdout=PIPE, stderr=STDOUT)
for line in iter(bandup_run.stdout.readline, ''):
sys.stdout.write(line)
f.write(line)
if(args.orbitals):
get_orbital_projections_and_duals(args)
os.chdir(start_dir)
def run_pre_bandup_tool(args):
start_dir = WORKING_DIR
# Running BandUP pre-unfolding tool
os.chdir(args.inputs_dir)
bandup_pre_unf_run_options = [BANDUP_PRE_UNFOLDING_BIN] + args.argv
with open("out_BandUP_get_SCKPTS_pre_unfolding.dat", 'w') as f:
bandup_pre_unf_run = Popen(bandup_pre_unf_run_options,
stdout=PIPE, stderr=STDOUT)
for line in iter(bandup_pre_unf_run.stdout.readline, ''):
sys.stdout.write(line)
f.write(line)
os.chdir(start_dir)
def run_requested_task(args):
if(args.main_task=='unfold'):
mkdir(args.results_dir, ignore_existing=True)
create_bandup_input(args)
run_bandup(args)
elif(args.main_task=='plot'):
if(args.gui):
from .plot_gui.main_window import open_plot_gui
open_plot_gui()
else:
mkdir(args.plotdir, ignore_existing=True)
create_bandup_plot_input(args)
make_plot(args)
elif(args.main_task=='kpts-sc-get'):
run_pre_bandup_tool(args)
elif(args.main_task=='projected-unfold'):
get_unfolded_orb_projs(args, clip_contributions=True, verbose=True)
else:
print('Task "%s" not available.'%(args.main_task))
def get_orbital_projections_and_duals(args):
if(args.qe or args.castep or args.abinit):
raise ValueError('Orbital projections not yet implemented for current PW code!')
else:
procar2bandup(fpath=os.path.join(args.wavefunc_calc_dir, 'PROCAR'))
|
paulovcmedeiros/band_unfolding
|
src/python_interface/bandupy/runners.py
|
Python
|
gpl-3.0
| 3,192
|
[
"ABINIT",
"CASTEP",
"VASP"
] |
e0702954c4085a39910050a498d2f1f923a0541bec671ff0a6c61c577bbacf67
|
# Copyright (c) 2010-12, Pierre-Antoine Delsart, Kurtis Geerlings, Joey Huston,
# Brian Martin, and Christopher Vermilion
#
#----------------------------------------------------------------------
# This file is part of SpartyJet.
#
# SpartyJet is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# SpartyJet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SpartyJet; if not, write to the Free Software
# Foundation, Inc.:
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#----------------------------------------------------------------------
print "WARNING: Importing SpartyJetConfig is a deprecated method of interacting with Python! Do 'import spartyjet' or 'from spartyjet import *' instead!"
from spartyjet import *
|
mickypaganini/SSI2016-jet-clustering
|
spartyjet-4.0.2_mac/python/SpartyJetConfig.py
|
Python
|
mit
| 1,210
|
[
"Brian"
] |
72641958e78034c8d07695962ad9af0e550e090d4e78922e4d133f2d5fbeadfd
|
import os
import numpy as np
from astropy.io import fits
from astropy import wcs
from astropy.convolution import Gaussian2DKernel, convolve_fft, convolve
from skimage import restoration
import matplotlib.pyplot as plt
import blast.util
fwhm = 5.0
mapdir = "/home/wizwit/miscellaneous_projects/carina/carinaData"
mapfile = os.path.join(mapdir, "mopraData/G287_288.-2.0_0.5.13CO.fits")
kernel_size = None
hdu = fits.open(mapfile)
h = hdu[0].header
w = wcs.WCS(h)
if kernel_size is not None:
nx = ny = kernel_size
else:
# number of x pixels
nx = h["NAXIS1"]
print "Number of X pixels:", nx
# numer of y pixels
ny = h["NAXIS2"]
print "Number of Y pixels:", ny
nz = h["NAXIS3"]
print "Number of slices:", nz
# distance between pixels
pix = h["CDELT2"] * 3600. # arcseconds
print "Delta Pixel (arcsec):", pix
count = 0
x = np.mgrid[1:h["NAXIS2"]+1, 1:h["NAXIS1"]+1][1]
y = np.mgrid[1:h["NAXIS2"]+1, 1:h["NAXIS1"]+1][0]
# construct gaussian
gauss = blast.util.get_kernel(fwhm, pix_size=pix, map_size=(nx, ny))
smoothed_data = np.zeros_like(hdu[0].data)
while count < nz:
print count
# open FITS file and get map size
new_hdu = fits.PrimaryHDU(hdu[0].data[count], header=w.to_header())
# convolve map by the kernel
print("\t...convolving map")
blast.util.smooth_map(new_hdu, gauss)
smoothed_data[count] = new_hdu.data
count += 1
new_hdu = fits.PrimaryHDU(smoothed_data, header=w.to_header())
outfile = os.path.join('./', "co13_smooth_" + str(fwhm) + "_arcmin.fits")
new_hdu.writeto(outfile)
|
sbg2133/miscellaneous_projects
|
carina/velocityMaps/smoothMopra_13.py
|
Python
|
gpl-3.0
| 1,544
|
[
"BLAST",
"Gaussian"
] |
40a1313e34e4c8ab3ce2a562ba967c459a446e5b9033153a0a4bb7a6167497bd
|
"""
# Notes:
- This simulation seeks to emulate the COBAHH benchmark simulations of (Brette
et al. 2007) using the Brian2 simulator for speed benchmark comparison to
DynaSim. However, this simulation includes CLOCK-DRIVEN synapses, for direct
comparison to DynaSim's clock-driven architecture. The synaptic connections
are "high-density", with a 90% probability of connection.
- The time taken to simulate will be indicated in the stdout log file
'~/batchdirs/brian2_benchmark_COBAHH_clocksyn_hidens_compiled_0128/pbsout/brian2_benchmark_COBAHH_clocksyn_hidens_compiled_0128.out'
- Note that this code has been slightly modified from the original (Brette et
al. 2007) benchmarking code, available here on ModelDB:
https://senselab.med.yale.edu/modeldb/showModel.cshtml?model=83319 in order
to work with version 2 of the Brian simulator (aka Brian2), and also modified
to change the model being benchmarked, etc.
# References:
- Brette R, Rudolph M, Carnevale T, Hines M, Beeman D, Bower JM, et al.
Simulation of networks of spiking neurons: A review of tools and strategies.
Journal of Computational Neuroscience 2007;23:349–98.
doi:10.1007/s10827-007-0038-6.
- Goodman D, Brette R. Brian: a simulator for spiking neural networks in Python.
Frontiers in Neuroinformatics 2008;2. doi:10.3389/neuro.11.005.2008.
"""
from brian2 import *
set_device('cpp_standalone')
prefs.codegen.cpp.extra_compile_args = ['-w', '-O3', '-ffast-math', '-march=native']
# Parameters
cells = 128
defaultclock.dt = 0.01*ms
area = 20000*umetre**2
Cm = (1*ufarad*cmetre**-2) * area
gl = (5e-5*siemens*cmetre**-2) * area
El = -60*mV
EK = -90*mV
ENa = 50*mV
g_na = (100*msiemens*cmetre**-2) * area
g_kd = (30*msiemens*cmetre**-2) * area
VT = -63*mV
# Synaptic strengths
gAMPA = (0.1*msiemens*cmetre**-2)* area
gGABAA = (0.06*msiemens*cmetre**-2)* area
# Synaptic time constants
tauAMPA = 2
tauGABAA = 5
# Synaptic reversal potentials
EAMPA = 1*mV
EGABAA = -80*mV
# The model
eqs = Equations('''
dv/dt = (gl*(El-v)-
gAMPA/cells*sAMPAtotal*(v-EAMPA)-
gGABAA/cells*sGABAAtotal*(v-EGABAA)-
g_na*(m*m*m)*h*(v-ENa)-
g_kd*(n*n*n*n)*(v-EK))/Cm : volt
dm/dt = alpha_m*(1-m)-beta_m*m : 1
dn/dt = alpha_n*(1-n)-beta_n*n : 1
dh/dt = alpha_h*(1-h)-beta_h*h : 1
alpha_m = 0.32*(mV**-1)*(13*mV-v+VT)/
(exp((13*mV-v+VT)/(4*mV))-1.)/ms : Hz
beta_m = 0.28*(mV**-1)*(v-VT-40*mV)/
(exp((v-VT-40*mV)/(5*mV))-1)/ms : Hz
alpha_h = 0.128*exp((17*mV-v+VT)/(18*mV))/ms : Hz
beta_h = 4./(1+exp((40*mV-v+VT)/(5*mV)))/ms : Hz
alpha_n = 0.032*(mV**-1)*(15*mV-v+VT)/
(exp((15*mV-v+VT)/(5*mV))-1.)/ms : Hz
beta_n = .5*exp((10*mV-v+VT)/(40*mV))/ms : Hz
sAMPAtotal : 1
sGABAAtotal : 1
''')
# Construct intrinsic cells
P = NeuronGroup(cells, model=eqs, method='euler')
proportion=int(0.8*cells)
Pe = P[:proportion]
Pi = P[proportion:]
# Contruct synaptic network
sAMPA=Synapses(Pe,P,
model='''ds/dt=1000.*5.*(1 + tanh(v_pre/(4.*mV)))*(1-s)/ms - (s)/(2*ms) : 1 (clock-driven)
sAMPAtotal_post = s : 1 (summed)
''')
sAMPA.connect(p=0.90)
sGABAA_RETC=Synapses(Pi,P,
model='''ds/dt=1000.*2.*(1 + tanh(v_pre/(4.*mV)))*(1-s)/ms - s/(5*ms) : 1 (clock-driven)
sGABAAtotal_post = s : 1 (summed)
''')
sGABAA_RETC.connect(p=0.90)
# Initialization
P.v = 'El + (randn() * 5 - 5)*mV'
# Record a few traces
trace = StateMonitor(P, 'v', record=[1, 10, 100])
totaldata = StateMonitor(P, 'v', record=True)
run(0.5 * second, report='text')
# # If you want to plot:
# plot(trace.t/ms, trace[1].v/mV)
# plot(trace.t/ms, trace[10].v/mV)
# plot(trace.t/ms, trace[100].v/mV)
# xlabel('t (ms)')
# ylabel('v (mV)')
# show()
# # If you want to save data:
# print("Saving TC cell voltages!")
# numpy.savetxt("foo_totaldata.csv", totaldata.v/mV, delimiter=",")
|
asoplata/dynasim-benchmark-brette-2007
|
Brian2/brian2_benchmark_COBAHH_clocksyn_hidens_compiled_0128.py
|
Python
|
gpl-3.0
| 3,911
|
[
"Brian"
] |
befa0256ea06c3256c04340d6e24e5b1f2ee7d55207fc1060e7c31cf6aec1936
|
# -*- coding: latin1 -*-
from __future__ import print_function
"""
.. currentmodule:: pylayers.antprop.rays
.. autosummary::
:members:
"""
import doctest
import os
import sys
import glob
try:
# from tvtk.api import tvtk
# from mayavi.sources.vtk_data_source import VTKDataSource
from mayavi import mlab
except:
print('Layout:Mayavi is not installed')
import pdb
import os
import copy
if sys.version_info.major==2:
import ConfigParser
else:
import configparser
import glob
import doctest
import networkx as nx
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import struct as stru
import pylayers.util.geomutil as geu
import pylayers.util.pyutil as pyu
from pylayers.util.project import *
from pylayers.antprop.interactions import *
from pylayers.antprop.slab import *
from pylayers.antprop.channel import Ctilde
from pylayers.gis.layout import Layout
import pylayers.signal.bsignal as bs
import shapely.geometry as shg
import h5py
import operator
class Rays(PyLayers, dict):
""" Class handling a set of rays
Attributes
----------
pTx : np.array
transmitter (3,)
pRx : np.array
receiver (3,)
B : IntB
B0 : IntB
I : Interactions
I.I : np.array
(f,nI,3,3)
I.T : IntT
I.T.A : np.array
(f,iT,3,3)
I.R : IntR
I.R.A : np.array
(f,iR,3,3)
I.D : IntD
I.D.A : np.array
(f,iD,3,3)
Lfilename : string
Layout name
delays : np.array
ray delays
dis : np.array
ray distance = delays*0.3
nray : int
number of rays
evaluated : boolean
are rays evaluated ?
is3D : boolean
are rays 2d or 3d rays ?
isbased : boolean
locbas has been applied ?
filles : boolean
filled has been applied ?
los : boolean
Line of sight boolean
fGHz : np.array
frequency points for evaluation
origin_sig_name : string
signature file which produces the rays
Notes
-----
The Rays object is obtained from a signature.
It is a container for a set of rays between a source
and a target point defining a radio link.
Once a Rays object has been obtained in 2D, it is transformed
in 3D via the **to3D** method. This method takes two parameters :
the height from floor to ceil, and the number N of
multiple reflections to account for.
Once the 3d rays have been calculated,
the local basis are evaluated along those rays. This is
done through the **locbas** method
Once the local basis have been calculated the different
interactions along rays can be informed via the **fillinter**
method.
Once the interactions are informed the field along rays can
be evaluated via the **eval** method
"""
def __init__(self, pTx, pRx):
""" object constructor
Parameters
----------
pTx : np.array
transmitter coordinates
pRx : np.array
receiver coordinates
"""
self.pTx = pTx
self.pRx = pRx
self.nray = 0
self.nray2D = 0
self.raypt = 0
self.los = False
self.is3D = False
self.isbased = False
self.filled = False
self.evaluated = False
def __len__(self):
Nray = 0
for k in self.keys():
sh = np.shape(self[k]['sig'])
Nray = Nray + sh[2]
return Nray
# def __add__(self,r):
# if (not r.is3D) and (not r.isbased) and (not self.is3D) and (not self.isbased) :
# raise AttributeError('both Ray structures must be 3D and based to be added')
# for ni in r:
# if self.has_key(ni):
# import ipdb
# ipdb.set_trace()
# # check if som rays already exists
# # if so, don't add them
# lur = np.array([])
# for ur in range(self[ni]['pt'].shape[2]):
# udifferent = np.where(np.all(np.all(r[ni]['pt'][...,ur][...,None]!=self[ni]['pt'],axis=0),axis=0))[0]
# lur = np.hstack((lur,udifferent ))
# import ipdb
# ipdb.set_trace()
# self[ni]['pt'] = np.concatenate((self[ni]['pt'],r[ni]['pt']),axis=2)
# self[ni]['sig'] = np.concatenate((self[ni]['sig'],r[ni]['sig']),axis=2)
# self[ni]['si'] = np.concatenate((self[ni]['si'],r[ni]['si']),axis=1)
# self[ni]['rayidx'] = np.concatenate((self[ni]['rayidx'],r[ni]['rayidx']),axis=0)
# self[ni]['dis'] = np.concatenate((self[ni]['dis'],r[ni]['dis']),axis=0)
# self[ni]['vsi'] = np.concatenate((self[ni]['vsi'],r[ni]['vsi']),axis=1)
# self[ni]['nbrays'] += 1
# if ni != 0:
# self[ni]['BiN'] = np.concatenate((self[ni]['BiN'],r[ni]['BiN']),axis=2)
# self[ni]['Bi'] = np.concatenate((self[ni]['Bi'],r[ni]['Bi']),axis=3)
# self[ni]['Bo'] = np.concatenate((self[ni]['Bo'],r[ni]['Bo']),axis=3)
# self[ni]['Bo0'] = np.concatenate((self[ni]['Bo0'],r[ni]['Bo0']),axis=2)
# self[ni]['scpr'] = np.concatenate((self[ni]['scpr'],r[ni]['scpr']),axis=1)
# self[ni]['norm'] = np.concatenate((self[ni]['norm'],r[ni]['norm']),axis=2)
# self[ni]['B'] = np.concatenate((self[ni]['B'],r[ni]['B']),axis=3)
# self[ni]['aod'] = np.concatenate((self[ni]['aod'],r[ni]['aod']),axis=1)
# self[ni]['aoa'] = np.concatenate((self[ni]['aoa'],r[ni]['aoa']),axis=1)
# self[ni]['theta'] = np.concatenate((self[ni]['theta'],r[ni]['theta']),axis=1)
# if r[ni].has_key('diffidx'):
# if self[ni].has_key('diffidx'):
# self[ni]['diffidx'] = np.concatenate((self[ni]['diffidx'],r[ni]['diffidx']))
# self[ni]['diffvect'] = np.concatenate((self[ni]['diffvect'],r[ni]['diffvect']),axis=1)
# self[ni]['diffslabs'].append(r[ni]['diffslabs'])
# else:
# self[ni]['diffidx'] = r['diffidx']
# self[ni]['diffvect'] = r['diffvect']
# self[ni]['diffslabs'] = r['diffslabs']
# else:
# self[ni]=r[ni]
def __repr__(self):
s = ''
ni = 0
nl = 0
lgi = list(self.keys())
lgi.sort()
if self.is3D:
s = self.__class__.__name__ + '3D\n' + '----------'+'\n'
for k in lgi:
r = self[k]['rayidx']
nr = len(r)
s = s + str(k)+' / '+str(nr)+ ' : '+str(r)+'\n'
ni = ni + nr*k
nl = nl + nr*(2*k+1)
nray2D = self.nray2D
else:
s = self.__class__.__name__ + '2D\n' + '----------'+'\n'
nray2D = len(self)
if self.los:
s = s + "LOS "
if self.isbased:
s = s + "based "
if self.filled:
s = s + "filled "
s = s + '\n'
s = s + 'N2Drays : '+ str(nray2D) + '\n'
if hasattr(self,'nb_origin_sig'):
s = s + 'from '+ str(self.nb_origin_sig) + ' signatures\n'
s = s + '#Rays/#Sig: '+ str(nray2D/(1.*self.nb_origin_sig) )
s = s + '\npTx : '+ str(self.pTx) + '\npRx : ' + str(self.pRx)+'\n'
if not self.is3D:
ray_cpt = 0
for k in lgi:
#sk = np.shape(self[k]['sig'])[2]
s = s + str(k) + ':\n'
sig = self[k]['sig'][0,:]
sha0 = sig.shape[0]
sha1 = sig.shape[1]
#pdb.set_trace()
for l in np.arange(sha1):
s = s + ' '+str(ray_cpt)+':'
ray_cpt +=1
for n in np.arange(sha0):
s = s + ' '+str(sig[n,l])
s = s+'\n'
#pdb.set_trace()
#s = s + str(sk) + 'rays with' + str(k) + ' interactions'
return(s)
def saveh5(self,idx=0):
""" save rays in hdf5 format
Parameters
----------
idx : int
See Also
--------
loadh5
"""
filename = self.filename+'_'+str(idx)
filenameh5=pyu.getlong(filename+'.h5',pstruc['DIRR3D'])
# try/except to avoid loosing the h5 file if
# read/write error
try:
f=h5py.File(filenameh5,'w')
# keys not saved as attribute of h5py file
notattr = ['I','B','B0','delays','dis']
for a in self.__dict__.keys():
if a not in notattr:
f.attrs[a]=getattr(self,a)
for k in self.keys():
f.create_group(str(k))
for kk in self[k].keys():
if kk == 'sig2d':
# Need to find an efficient way to save the signatures
# 2d which have created the rays
pass
elif kk == 'nbrays':
f[str(k)].create_dataset(kk,shape=(1,),data=np.array([self[k][kk]]))
else:
f[str(k)].create_dataset(kk,shape=np.shape(self[k][kk]),data=self[k][kk])
f.close()
except:
f.close()
raise NameError('Rays: issue when writting h5py file')
print(filenameh5)
def loadh5(self,filename=[],idx=0):
""" load rays hdf5 format
Parameters
----------
idx : int
"""
if filename == []:
filenameh5 = self.filename+'_'+str(idx)+'.h5'
else :
filenameh5 = filename
filename=pyu.getlong(filenameh5,pstruc['DIRR3D'])
print(filename)
# try/except to avoid loosing the h5 file if
# read/write error
try:
f = h5py.File(filename,'r')
for k in f.keys():
self.update({eval(k):{}})
for kk in f[k].keys():
self[eval(k)].update({kk:f[k][str(kk)][:]})
for a,va in f.attrs.items():
setattr(self,a,va)
f.close()
except:
f.close()
raise NameError('Rays: issue when reading h5py file')
# fill if save was filled
# temporary solution in order to avoid
# creating save for Interactions classes
if self.filled:
#Lname = self.Lfilename
Lname = '_'.join(self.filename.split('_')[0:-1]) + '.lay'
#Lname = self.filename.split('_')[0] + '.lay'
L=Layout(Lname)
self.fillinter(L)
if self.evaluated:
return self.val(self.fGHz)
def _saveh5(self,filenameh5,grpname):
""" Save rays h5py format compliant with Links Class
Parameters
----------
filenameh5 : string
filename of the h5py file (from Links Class)
grpname : string
groupname of the h5py file (from Links Class)
See Also
--------
pylayers.simul.links
"""
filenameh5=pyu.getlong(filenameh5,pstruc['DIRLNK'])
# try/except to avoid loosing the h5 file if
# read/write error
#try:
fh5=h5py.File(filenameh5,'a')
if self.is3D:
if not grpname in fh5['ray'].keys():
fh5['ray'].create_group(grpname)
else :
print('ray/'+grpname +'already exists in '+filenameh5)
f = fh5['ray/'+grpname]
else:
if not grpname in fh5['ray2'].keys():
fh5['ray2'].create_group(grpname)
else :
print('ray2/'+grpname +'already exists in '+filenameh5)
f = fh5['ray2/'+grpname]
# keys not saved as attribute of h5py file
notattr = ['I','B','B0','dis']
for a in self.__dict__.keys():
if a not in notattr:
if type(a)==str:
a.encode('utf-8')
if a=='_luw':
la = [ x.encode('utf8') for x in getattr(self,a) ]
f.attrs[a] = la
else:
f.attrs[a] = getattr(self,a)
for k in self.keys():
f.create_group(str(k))
for kk in self[k].keys():
if kk == 'sig2d':
# Need to find an efficient way to save the signatures
# 2d which have created the rays
pass
elif kk == 'nbrays':
f[str(k)].create_dataset(kk,shape=(1,),data=np.array([self[k][kk]]))
else:
if kk=='diffslabs':
ldiffslabs = [ x.encode('utf8') for x in self[k][kk] ]
f[str(k)].create_dataset(kk,shape=np.shape(self[k][kk]),data=ldiffslabs)
else:
f[str(k)].create_dataset(kk,shape=np.shape(self[k][kk]),data=self[k][kk])
fh5.close()
#except:
# fh5.close()
# raise NameError('Rays: issue when writting h5py file')
def _loadh5(self,filenameh5,grpname,**kwargs):
""" load rays h5py format compliant with Links Class
Parameters
----------
filenameh5 : string
filename of the h5py file (from Links Class)
grpname : string
groupname of the h5py file (from Links Class)
kwargs may contain a L: layout object
if L = [] the layout is loaded from the layout name stored
into the h5 file
if L = Layout the layout passed in arg is used
See Also
--------
pylayers.simul.links
"""
filename=pyu.getlong(filenameh5,pstruc['DIRLNK'])
# try/except to avoid loosing the h5 file if
# read/write error
try:
fh5=h5py.File(filename,'r')
if self.is3D:
argfile = 'ray/'+grpname
else:
argfile = 'ray2/'+grpname
f = fh5[argfile]
for k in f.keys():
self.update({eval(k):{}})
for kk in f[k].keys():
self[eval(k)].update({kk:f[k][str(kk)][:]})
for a,va in f.attrs.items():
setattr(self,a,va)
fh5.close()
except:
fh5.close()
raise NameError('Rays: issue when reading h5py file')
# fill if save was filled
# temporary solution in order to avoid
# creating save for Interactions classes
if self.filled:
if 'L' in kwargs:
self.L=kwargs['L']
else:
self.L = Layout(self.Lfilename,bbuild=True)
try:
self.L.dumpr()
except:
self.L.build()
self.L.dumpw()
# L=Layout(self.Lfilename,bbuild=True)
self.fillinter(self.L)
# if self.evaluated:
# return self.eval(self.fGHz)
def reciprocal(self):
""" switch tx and rx
"""
r = Rays(self.pRx,self.pTx)
r.is3D = self.is3D
r.nray = self.nray
r.origin_sig_name = self.origin_sig_name
r.nb_origin_sig = self.nb_origin_sig
for k in self:
r[k]={}
r[k]['pt']=self[k]['pt'][:,::-1,:]
r[k]['sig']=self[k]['sig'][:,::-1,:]
return(r)
def check_reciprocity(self,r):
""" check ray reciprocity in comparing two reciprocal rays
Parameters
----------
r : rays reciprocal to self
"""
# permutation of all termination points
assert (self.pTx==r.pRx).all()
assert (self.pRx==r.pTx).all()
# for all group of interctions
for k in self:
# same distances
assert (np.allclose(self[k]['dis'],r[k]['dis']))
# same points when reading from right to left
assert (np.allclose(self[k]['pt'],r[k]['pt'][:,::-1,:]))
# same signature reading from right to left
assert (np.allclose(self[k]['sig'],r[k]['sig'][:,::-1,:]))
# if local basis have been evaluated
if (self.isbased) & (r.isbased):
#assert (np.allclose(self[k]['nstrwall'],r[k]['nstrwall'][:,::-1,:]))
assert (np.allclose(self[k]['norm'],r[k]['norm'][:,::-1,:])), 'interaction block:' + str(k)
#assert ((np.mod(self[k]['aoa']-r[k]['aod'],2*np.pi)==0).all())
#assert ((np.mod(self[k]['aod']-r[k]['aoa'],2*np.pi)==0).all())
# 1st output basis is equal to last input basis of the reciprocal ray
assert (np.allclose(self[k]['Bo0'],r[k]['BiN'])), 'interaction block:' + str(k)
# last input basis is equal to 1st output basis of the reciprocal ray
assert (np.allclose(self[k]['BiN'],r[k]['Bo0'])), 'interaction block:' + str(k)
# vsi vectors are inversed
assert (np.allclose(self[k]['vsi'],-r[k]['vsi'][:,::-1,:])), 'interaction block:' + str(k)
assert (np.allclose(abs(self[k]['scpr']),abs(r[k]['scpr'][::-1,:]))), 'interaction block:' + str(k)
assert (np.allclose(self[k]['theta'],r[k]['theta'][::-1,:])), 'interaction block:' + str(k)
assert (np.allclose(self[k]['Bi'],r[k]['Bo'][:,:,::-1,:])), 'interaction block:' + str(k)
assert (np.allclose(self[k]['Bo'],r[k]['Bi'][:,:,::-1,:])), 'interaction block:' + str(k)
assert (np.allclose(self[k]['B'],r[k]['B'][:,:,::-1,:].swapaxes(0,1))), 'interaction block:' + str(k)
if self.evaluated :
for ir in range(self.nray):
iint1 = self.ray(ir)
iint2 = r.ray(ir)
# check Interactions
A1 = self.I.I[:, iint1, :, :]
A2 = r.I.I[:, iint2, :, :][:,::-1,:,:]
assert np.allclose(A1,A2),pdb.set_trace()
# check bases
# ray 1 : B0 | B[0] | B[1] | B[2] | B[3] | B[4]
# ray 2 : B[4] | B[3] | B[2] | B[1] | B[0] | B0
assert np.allclose(self.B0.data[ir,:,:],r.B.data[iint2,:,:][-1,:,:].swapaxes(1,0))
assert np.allclose(r.B0.data[ir,:,:],self.B.data[iint1,:,:][-1,:,:].swapaxes(1,0))
assert np.allclose(self.B.data[iint1,:,:][:-1],r.B.data[iint2,:,:][:-1][::-1,:,:].swapaxes(2,1))
def sort(self):
""" sort rays
TODO : not finished
"""
u = np.argsort(self.dis)
def rayfromtyp_order(self,nD=[1],nR=[1],nT=[1],llo='&&'):
"""
Return rays from a given type (R|T|D) to a given order
( number of interaction)
list logic operator : llo ['op0op1']
nD <op0> nR <op1> nT
Parameters
----------
nD = list|int
requested number of Diffraction
nR = list|int
requested number of Reflection
nT = list|int
requested number of Transmission
llo = list logic operator [op0,op1]
nD <op0> nR <op1> nT
Returns
-------
lr : list
list of ray index matching the typ & order conditions
"""
if not isinstance(nD,list):
nD=[nD]
if not isinstance(nR,list):
nR=[nR]
if not isinstance(nT,list):
nT=[nT]
op = {'and':operator.and_,
'or':operator.or_,
'&':operator.and_,
'|':operator.or_,
}
lr=[]
for ur,r in enumerate(range(self.nray)):
li = self.ray2ityp(r)
nRli = li.count('R')
nTli = li.count('T')
nDli = li.count('D')
cD = (nDli in nD)
cR = (nRli in nR)
cT = (nTli in nT)
# if (nDli in nD) and (nRli in nR) and (nTli in nT) :
if op[llo[1].lower()]( op[llo[0].lower()](cD,cR) , cT):
lr.append(r)
elif (self.los) and (1 in nT ) and (0 in nD) and (0 in nR) and (ur == 0):
lr.append(r)
return lr
def extract_typ_order(self,L,nD=[1],nR=[1],nT=[1],llo='&&'):
""" Extract group of rays from a certain type (R|T|D)
at a order ( <=> given number of interaction)
list logic operator : llo [op0,op1]
nD <op0> nR <op1> nT
Parameters
----------
L : Layout
nD = list|int
requested number of Diffraction
nR = list|int
requested number of Reflection
nT = list|int
requested number of Transmission
llo = list logic operator [op0,op1]
nD <op0> nR <op1> nT
Returns
-------
R : Rays object
New Rays object containing rays matching
the typ/order conditions
"""
lr = self.rayfromtyp_order(nD=nD,nR=nR,nT=nT,llo=llo)
return self.extract(lr,L)
def extract(self,lnr,L):
""" Extract a group of rays
Parameters
----------
lnr : list of rays indexes
L : Layout
"""
if not isinstance(lnr,list):
lnr=[lnr]
r = Rays(self.pTx,self.pRx)
r.is3D = self.is3D
for unr,nr in enumerate(lnr):
#r.nray2D =
#r.nb_origin_sig = 1
ni = self.ray2nbi(nr)
ur = np.where(self[ni]['rayidx']==nr)[0][0]
if ni == 0:
los = True
else:
los = False
if 'D' in self.typ(nr):
diff=True
else:
diff=False
if 'diffvect' in self[ni]:
# check if the ray has diffraction interaction
inter = self.ray2iidx(nr)[:,0]
uD = np.where([i in inter for i in self[ni]['diffidx']])[0]
else:
uD=[]
diffkey = ['diffvect','diffidx','diffslabs']
cray = {}
for k in self[ni].keys():
if ni ==0:
cray = self[ni]
break
elif k not in ['nbrays','rayidx','dis','nstrwall','nstrswall']:
tab = self[ni][k]
if type(tab)==np.ndarray and k not in diffkey:
try:
cray[k] = tab[...,ur][...,np.newaxis]
except:
import ipdb
ipdb.set_trace()
if diff :
if k in diffkey :
if k != 'diffslabs':
cray[k]=tab[...,uD][...,np.newaxis]
else:
if len(uD)>0 :
cray[k]=[tab[uD]]
else:
cray[k]=[]
cray['nbrays'] = unr+1 # keep only one ray
r.nray = unr+1
#cray['rayidx']=np.array([self[ni]['rayidx'][nr]]) # ray index in the whole structure
cray['rayidx'] = np.array([unr])
cray['dis'] = np.array([self[ni]['dis'][ur]])
if ni in r:
# R[ni]['sig2d'].append(self[k]['sig2d'][ur])
if not los :
r[ni]['BiN'] = np.concatenate((r[ni]['BiN'],cray['BiN']),axis=2)
r[ni]['Bo'] = np.concatenate((r[ni]['Bo'],cray['Bo']),axis=3)
r[ni]['Bi'] = np.concatenate((r[ni]['Bi'],cray['Bi']),axis=3)
if diff:
if 'diffidx' in r[ni]:
r[ni]['diffidx'] = np.concatenate((r[ni]['diffidx'],cray['diffidx']))
r[ni]['diffvect'] = np.concatenate((r[ni]['diffvect'],cray['diffvect']),axis=1)
r[ni]['diffslabs'].append(cray['diffslabs'])
else:
r[ni]['diffidx'] = cray['diffidx']
r[ni]['diffvect'] = cray['diffvect']
r[ni]['diffslabs'] = cray['diffslabs']
r[ni]['nbrays'] += 1
r[ni]['B'] = np.concatenate((r[ni]['B'], cray['B']), axis=3)
r[ni]['pt'] = np.concatenate((r[ni]['pt'], cray['pt']), axis=2)
r[ni]['rayidx'] = np.concatenate((r[ni]['rayidx'], cray['rayidx']), axis=0)
r[ni]['Bo0'] = np.concatenate((r[ni]['Bo0'],cray['Bo0']), axis=2)
r[ni]['scpr'] = np.concatenate((r[ni]['scpr'], cray['scpr']), axis=1)
r[ni]['aod'] = np.concatenate((r[ni]['aod'], cray['aod']), axis=1)
r[ni]['si'] = np.concatenate((r[ni]['si'], cray['si']), axis=1)
r[ni]['sig'] = np.concatenate((r[ni]['sig'], cray['sig']), axis=2)
# r[ni]['sig2d'] = np.concatenate((r[ni]['sig2d'],cray['sig2d']),axis=2)
r[ni]['aoa'] = np.concatenate((r[ni]['aoa'], cray['aoa']), axis=1)
r[ni]['vsi'] = np.concatenate((r[ni]['vsi'], cray['vsi']), axis=2)
r[ni]['theta'] = np.concatenate((r[ni]['theta'], cray['theta']), axis=1)
r[ni]['norm'] = np.concatenate((r[ni]['norm'], cray['norm']), axis=2)
r[ni]['dis'] = np.concatenate((r[ni]['dis'], cray['dis']), axis=0)
else:
r[ni] = cray
# r[ni]['rays'] = to be done HERE
r.locbas(L)
r.fillinter(L)
return(r)
def extract_old(self,nr,L):
""" Extract a single ray
Parameters
----------
nr : ray index
L : Layout
"""
r = Rays(self.pTx,self.pRx)
r.is3D = self.is3D
r.nray2D = 1
r.nb_origin_sig = 1
#ni = self._ray2nbi[nr]
#ur = np.where(self[ni]['rayidx']==nr)[0][0]
ni,ur = self.ir2a(nr)
if 'D' in self.typ(nr):
diff=True
else:
diff=False
if 'diffvect' in self[ni]:
# check if the ray has diffraction interaction
inter = self.ray2iidx(nr)[:,0]
uD = np.where([i in inter for i in self[ni]['diffidx']])[0]
else:
uD=[]
diffkey = ['diffvect','diffidx','diffslabs']
r[ni] = {}
for k in self[ni].keys():
if k not in ['nbrays','rayidx','dis','nstrwall','nstrswall']:
tab = self[ni][k]
if type(tab)==np.ndarray and k not in diffkey:
r[ni][k] = tab[...,ur][...,np.newaxis]
if diff :
if k in diffkey :
if k != 'diffslabs':
r[ni][k]=tab[...,uD][...,np.newaxis]
else:
if len(uD)>0 :
r[ni][k]=tab[uD]
else:
r[ni][k]=[]
r[ni]['nrays'] = 1 # keep only one ray
r.nray = 1
#r[ni]['rayidx']=np.array([self[ni]['rayidx'][nr]]) # ray index in the whole structure
r[ni]['rayidx'] = np.array([0])
r[ni]['dis'] = np.array([self[ni]['dis'][ur]])
r.locbas(L)
r.fillinter(L)
return(r)
def show(self,**kwargs):
""" plot 2D rays within the simulated environment
Parameters
----------
rlist : list (default []= all rays)
list of indices of ray in interaction group
graph : string t
type of graph to be displayed
's','r','t',..
fig : figure
ax : axis
L : Layout
alpha : float
1
linewidth : float
0.1
color : string
'black'
ms : int
marker size : 5
layout : boolean
True
points : boolean
True
ER : ray energy
"""
defaults = {'rlist': [],
'fig': [],
'ax': [],
'L': [],
'graph': 's',
'color': 'black',
'alpha': 1,
'linewidth': 0.5,
'ms': 5,
'vmin':0,
'vmax':-70,
'cmap': plt.cm.hot_r,
'layout': True,
'points': True,
'labels': False,
'bcolorbar': False
}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
if kwargs['fig'] ==[]:
fig = plt.figure()
if kwargs['ax'] ==[]:
ax = fig.add_subplot(111)
#
# display the Layout
#
if kwargs['layout'] == True:
if kwargs['L'] != []:
fig,ax = kwargs['L'].showG(**kwargs)
else :
raise AttributeError('Please give a Layout file as argument')
else:
fig = kwargs['fig']
ax = kwargs['ax']
#
# display Tx and Rx
#
if kwargs['points'] ==True:
ax.plot(self.pTx[0], self.pTx[1], 'or',ms=kwargs['ms'])
ax.plot(self.pRx[0], self.pRx[1], 'og',ms=kwargs['ms'])
# i=-1 all rays
# else block of interactions i
# plot all rays
if kwargs['rlist'] == []:
# list of group of interactions
lgrint = self.keys()
for i in lgrint:
# list of rays
lray = range(len(self[i]['pt'][0, 0, :]))
#if self.filled :
# ax.set_title('rays index :'+ str(self[i]['rayidx']))
for j in lray:
addr_ray = (i,j)
index_ray = self.a2ir(addr_ray)
ray = np.hstack((self.pTx[0:2].reshape((2, 1)),
np.hstack((self[i]['pt'][0:2, :, j],
self.pRx[0:2].reshape((2, 1))))
))
if 'ER' not in kwargs:
ax.plot(ray[0, :], ray[1, :],
alpha = kwargs['alpha'],
color = kwargs['color'],
linewidth = kwargs['linewidth'])
else:
EdB = 10*np.log10(ER[index_ray])
ERdB = 10*np.log10(E)
vscale = 1.-(max(ERdB)-EdB)/(max(ERdB)-min(ERdB))
linewidth = 3*vscale
alpha = vscale
cmap = cm.hot
color = cmap(vscale)
ax.plot(ray[0, :], ray[1, :],
alpha = alpha,
color = color,
linewidth = linewidth)
ax.axis('off')
#if self.filled :
# ax.set_title('rays index :'+ str(self[i]['rayidx'][lray]))
else:
rlist = kwargs['rlist']
# 3D ray
if self.is3D:
nbi = self._ray2nbi[rlist]
nr = np.array((nbi,rlist))
unb = np.unique(nr[0,:])
unr = {int(i):np.where(nr[0,:]==i)[0] for i in unb}
for i in unb:
raynb = (nr[1,unr[i]]).astype(int)
nbr = len(raynb)
ptidx = [np.where(self[i]['rayidx']==x)[0][0] for x in raynb]
for j in ptidx:
ray = np.hstack((self.pTx[0:2].reshape((2, 1)),
np.hstack((self[i]['pt'][0:2, :, j],
self.pRx[0:2].reshape((2, 1))))
))
ax.plot(ray[0, :], ray[1, :],
alpha = kwargs['alpha'],
color = kwargs['color'],
linewidth = kwargs['linewidth'])
ax.axis('off')
# 2D ray
else:
for i in rlist:
lray = range(len(self[i]['pt'][0, 0, :]))
#if self.filled :
# ax.set_title('rays index :'+ str(self[i]['rayidx']))
for j in lray:
ray = np.hstack((self.pTx[0:2].reshape((2, 1)),
np.hstack((self[i]['pt'][0:2, :, j],
self.pRx[0:2].reshape((2, 1))))
))
ax.plot(ray[0, :], ray[1, :],
alpha=kwargs['alpha'],
color=kwargs['color'],
linewidth=kwargs['linewidth'])
ax.axis('off')
if kwargs['bcolorbar']:
# axes : left , bottom , width , height
sm = plt.cm.ScalarMappable(cmap = kwargs['cmap'], norm = plt.Normalize(vmin=kwargs['vmin'],vmax=kwargs['vmax']))
sm._A = [] # necessary set_array
cax = fig.add_axes([0.18,0.35, 0.35, 0.025])
#cb = plt.colorbar(sm,cax=cax,orientation='horizontal')
cb = plt.colorbar(sm,cax=cax,orientation='horizontal')
cb.ax.tick_params(labelsize=24)
cb.set_label('Level (dB)', fontsize=24)
return(fig,ax)
def mirror(self, H=3, N=1, za = [], zb= []):
""" mirror a ray termination
Parameters
----------
H : float
ceil height (default 3m)
if H=0 only floor reflection is calculated (outdoor case)
if H=-1 floor and ceil reflection are inhibited (2D test case)
N : int
handle the number of mirror reflexions
za : float
height of the point where the parametrization starts ( e.g. pTx[2])
zb : float
height of the point where the parametrization ends ( e.g. pRx[2])
Returns
-------
d : dict
k : zm v: alpham
k : zp v: alphap
Examples
--------
>>> ptx = np.array([1,1,1.5])
>>> prx = np.array([2,2,1.2])
>>> r = Rays(ptx,prx)
>>> d = r.mirror()
>>> d[-1.5]
array([ 0.55555556])
Notes
-----
d is a dictionnary whose keys are heights along the vertical from where
are emanating the reflected rays. Values of d are the parameterization
(0< () <1) along the ray where are situated the different reflection
points.
"""
km = np.arange(-N+1, N+1, 1)
kp = np.arange(-N, N+1, 1)
#
# heights of transmitter and receiver
#
if za == []:
za=self.pTx[2]
if zb == []:
zb=self.pRx[2]
ht = za
hr = zb
assert (hr<H or H==0 or H == -1),"mirror : receiver higher than ceil height"
assert (ht<H or H==0 or H == -1),"mirror : transmitter higher than ceil height"
zkp = 2*kp*H + ht
zkm = 2*km*H - ht
d = {}
if H>0:
for zm in zkm:
if zm < 0:
bup = H
pas = H
km = int(np.ceil(zm/H))
else:
bup = 0
pas = -H
km = int(np.floor(zm/H))
thrm = np.arange(km*H, bup, pas)
d[zm] = abs(thrm-zm)/abs(hr-zm)
for zp in zkp:
if zp < 0:
bup = H
pas = H
kp = int(np.ceil(zp/H))
else:
bup = 0
pas = -H
kp = int(np.floor(zp/H))
thrp = np.arange(kp*H, bup, pas)
d[zp] = abs(thrp-zp)/abs(hr-zp)
elif H==0:
d[-ht] = np.array([ht/(ht+hr)])
d[ht] = np.array([])
elif H==-1:
d[ht] = np.array([])
# print "zp",zp
# print "kp",kp
# print "thrp",thrp
# print "alphap",d[zp]
return d
def to3D(self, L, H=3, N=1, rmoutceilR=True):
""" transform 2D ray to 3D ray
Parameters
----------
L : Layout object
H : float
ceil height (default 3m)
if H= 0 only floor reflection is calculated (outdoor case)
if H=-1 floor and ceil reflection are inhibited (2D test case)
N : int
number of mirror reflexions
rmoutceilR : bool
Remove ceil reflexions in cycles (Gt nodes)
with indoor=False attribute
Returns
-------
r3d : Rays
See Also
--------
mirror
"""
if H==-1:
rmoutceilR=False
tx = self.pTx
rx = self.pRx
#
# Phase 1 : calculate Tx images height and parameterization in the
# vertical plane
#
d = self.mirror(H=H, N=N, za=tx[2], zb=rx[2])
#
# Elimination of invalid diffraction point
# If the diffaction point is a separation between 2 air wall
# it should be removed.
#
# Phase 2 : calculate 2D parameterization in the horizontal plane
#
# for all group of interactions
for i in self:
pts = self[i]['pt'][0:2, :, :]
sig = self[i]['sig']
if pts.shape[2]!=0:
# broadcasting of t and r
t = self.pTx[0:2].reshape((2, 1, 1)) * \
np.ones((1, 1, len(pts[0, 0, :])))
r = self.pRx[0:2].reshape((2, 1, 1)) * \
np.ones((1, 1, len(pts[0, 0, :])))
pts1 = np.hstack((t, np.hstack((pts, r))))
else:
t = self.pTx[0:2].reshape((2, 1, 1))
r = self.pRx[0:2].reshape((2, 1, 1))
pts1 = np.hstack((t,r))
# append t and r to interaction points in 2D
si1 = pts1[:, 1:, :] - pts1[:, :-1, :]
# array of all ray segments distances
si = np.sqrt(np.sum(si1 * si1, axis=0))
# array of cumulative distance of 2D ray
al1 = np.cumsum(si, axis=0)
# initialize parameterization parameter alpha
self[i]['alpha'] = np.zeros(np.shape(si[:-1, :]))
for j in range(len(self[i]['alpha'][:, 0])):
# get alpha
self[i]['alpha'][j, :] = np.sum(si[0:j+1, :], axis=0) \
/np.sum(si, axis=0)
# get z coordinate
self[i]['pt'][2, j, :] = tx[2] + self[i]['alpha'][j, :] \
* (rx[2] - tx[2])
#
# Phase 3 : Initialize 3D rays dictionnary
#
r3d = Rays(tx, rx)
r3d.los = self.los
r3d.is3D = True
r3d.nray2D = len(self)
r3d.nb_origin_sig = self.nb_origin_sig
#
# Phase 4 : Fill 3D rays information
#
# Two nested loops
#
# for all interaction group
# for all type of 3D rays
# 1) extension
# 2) sort
# 3) coordinates as a function of parameter
#
for k in self: # for all interaction group k
# k = int(k)
# Number of rays in interaction group k
Nrayk = np.shape(self[k]['alpha'])[1]
# get 2D horizontal parameterization
a1 = self[k]['alpha']
#if (k==1):
# pdb.set_trace()
# get 2D signature
sig = self[k]['sig']
#print "signatures 2D ",sig
#print "----"
sigsave = copy.copy(sig)
# add parameterization of tx and rx (0,1)
a1 = np.concatenate((np.zeros((1, Nrayk)), a1, np.ones((1, Nrayk))))
# reshape signature in adding tx and rx
if sig.shape[0]!=0:
sig = np.hstack((np.zeros((2, 1, Nrayk), dtype=int),
sig,
np.zeros((2, 1, Nrayk), dtype=int))) # add signature of Tx and Rx (0,0))
else:
sig = np.hstack((np.zeros((2, 1, Nrayk), dtype=int),
np.zeros((2, 1, Nrayk), dtype=int)))
# broadcast tx and rx
Tx = tx.reshape(3, 1, 1)*np.ones((1, 1, Nrayk))
Rx = rx.reshape(3, 1, 1)*np.ones((1, 1, Nrayk))
if k!=0:
# pte is the sequence of point in 3D ndim =3 ( ndim x k x Nrayk)
pte = self[k]['pt']
# ndim x k+2 x Nrayk
pte = np.hstack((Tx, pte, Rx))
else:
pte = np.hstack((Tx, Rx))
# extension
for l in d: # for each vertical pattern (C,F,CF,FC,....)
#print k,l,d[l]
Nint = len(d[l]) # number of additional interaction
#if ((k==1) & (l==5.0)):print
if Nint > 0: # if new interaction ==> need extension
# a1e : extended horizontal+vertical parameterization
a1e = np.concatenate((a1, d[l].reshape(len(d[l]), 1)*
np.ones((1, Nrayk))))
# get sorted indices
ks = np.argsort(a1e, axis=0)
# a1es : extended sorted horizontal + vertical parameterization
a1es = np.sort(a1e, axis=0)
# #### Check if it exists the same parameter value in the horizontal plane
# #### and the vertical plane. Move parameter if so.
da1es = np.diff(a1es,axis=0)
pda1es = np.where(da1es<1e-10)
a1es[pda1es]=a1es[pda1es]-1e-3
# prepare an extended sequence of points ( ndim x (Nint+k+2) x Nrayk )
ptee = np.hstack((pte, np.zeros((3, Nint, Nrayk))))
#
# Boolean ceil/floor detector
#
# u is 4 (floor interaction )
# 5 (ceil interaction )
# depending on the vertical pattern l.
#
# l <0 corresponds to last reflexion on floor
# l >0 corresponds to last reflexion on ceil
#
# u =0 (floor) or 1 (ceil)
# if l < 0:
# u = np.mod(range(Nint), 2)
# else:
# u = 1 - np.mod(range(Nint), 2)
if l < 0 and Nint%2 ==1: # l<0 Nint odd
u = np.mod(range(Nint), 2)
elif l > 0 and Nint%2 ==1: # l>0 Nint odd
u = 1 - np.mod(range(Nint), 2)
elif l < 0 and Nint%2 ==0: # l<0 Nint even
u = 1 - np.mod(range(Nint), 2)
elif l > 0 and Nint%2 ==0: # l>0 Nint even
u = np.mod(range(Nint), 2)
#
u = u + 4
#
# At that point we introduce the signature of the new
# introduced points on the ceil and/or floor.
#
# A signature is composed of two lines
# esigs sup line : interaction number
# esigi inf line : interaction type
#
esigs = np.zeros((1, Nint, Nrayk), dtype=int)
esigi = u.reshape(1, Nint, 1)* np.ones((1, 1, Nrayk), dtype=int)
# esig : extension of the signature
esig = np.vstack((esigs, esigi))
# sige : signature extended ( 2 x (Nint+k+2) x Nrayk )
sige = np.hstack((sig, esig))
#
# 2 x (Nint+k+2) x Nrayk
#
# sort extended sequence of points
# and extended sequence of signatures with the sorting
# index ks obtained from argsort of merge parametization
#
# sequence of extended sorted points
#
ptees = ptee[:, ks, range(Nrayk)]
siges = sige[:, ks, range(Nrayk)]
# extended and sorted signature
iint_f, iray_f = np.where(siges[ 1, :] == 4) # floor interaction
iint_c, iray_c = np.where(siges[ 1, :] == 5) # ceil interaction
#print siges
#
# find the list of the previous and next point around the
# new ceil or floor point. The case of successive ceil or
# floor reflexion make
#
# Tous les points prcdents qui ne sont pas des Ceils ou
# des floors et tous les points suivants qui ne sont pas
# des points de rflexion ceil ou floor
#
# Afin de tenir compte du rayon et du groupe d'interactions
# concerne, il faut passer un tuple qui concatene la valeur
# de l'indice d'interaction floor ou ceil et l'indice de
# rayons du groupe associe (d'ou le zip)
#
# Cette sequence d'instruction fixe le bug #133
#
# Antrieurement il y avait une hypothese de succession
# immediate d'un point 2D renseigne.
#
try:
iintm_f = [ np.where( (siges[1,0:x[0],x[1]]!=4) &
(siges[1,0:x[0],x[1]]!=5))[0][-1]
for x in zip(iint_f,iray_f) ]
iintp_f = [ np.where( (siges[1,x[0]:,x[1]]!=4) &
(siges[1,x[0]:,x[1]]!=5))[0][0]+x[0]
for x in zip(iint_f,iray_f) ]
iintm_c = [ np.where( (siges[1,0:x[0],x[1]]!=4) &
(siges[1,0:x[0],x[1]]!=5))[0][-1]
for x in zip(iint_c,iray_c) ]
iintp_c = [ np.where( (siges[1,x[0]:,x[1]]!=4) &
(siges[1,x[0]:,x[1]]!=5))[0][0]+x[0]
for x in zip(iint_c,iray_c) ]
except:
pdb.set_trace()
# Update coordinate in the horizontal plane
#
#
# The new interaction ceil or floor has no coordinates in
# the horizontal plane.
# Those coordinates are evaluated first by finding a sub
# parameterization of the point with respect to the two
# known adjascent interaction point j-1 and j+1 (Thales)
#
#iintm_f = iint_f - 1
#iintp_f = iint_f + 1
#iintm_c = iint_c - 1
#iintp_c = iint_c + 1
#
# If there are floor points
#
if len(iint_f)>0:
a1esm_f = a1es[iintm_f, iray_f]
a1esc_f = a1es[iint_f, iray_f]
a1esp_f = a1es[iintp_f, iray_f]
pteesm_f = ptees[0:2, iintm_f, iray_f]
pteesp_f = ptees[0:2, iintp_f, iray_f]
coeff_f = (a1esc_f-a1esm_f)/(a1esp_f-a1esm_f)
ptees[0:2, iint_f, iray_f] = pteesm_f + coeff_f*(pteesp_f-pteesm_f)
#
# If there are ceil points
#
if len(iint_c)>0:
a1esm_c = a1es[iintm_c, iray_c]
a1esc_c = a1es[iint_c, iray_c]
a1esp_c = a1es[iintp_c, iray_c]
pteesm_c = ptees[0:2, iintm_c, iray_c]
pteesp_c = ptees[0:2, iintp_c, iray_c]
coeff_c = (a1esc_c-a1esm_c)/(a1esp_c-a1esm_c)
ptees[0:2, iint_c, iray_c] = pteesm_c + coeff_c*(pteesp_c-pteesm_c)
if H != 0:
z = np.mod(l+a1es*(rx[2]-l), 2*H)
pz = np.where(z > H)
z[pz] = 2*H-z[pz]
ptees[2, :] = z
# case where ceil reflection are inhibited
elif H==0:
z = abs(l+a1es*(rx[2]-l))
# pz = np.where(z > H)
# z[pz] = 2*H-z[pz]
ptees[2, :] = z
# recopy old 2D parameterization (no extension)
else:
a1es = a1
ks = np.argsort(a1es, axis=0)
ptees = pte
# fixing bug
siges = copy.copy(sig)
#print siges
#---------------------------------
# handling multi segment (iso segments)
# Height of reflexion interaction
# Height of diffraction interaction
#---------------------------------
#
# ptes (3 x i+2 x r )
if len(L.lsss)>0:
#
# lsss : list of sub segments ( iso segments siges)
# lnss : list of diffaction point involving
lsss = np.array(L.lsss)
lnss = np.array(L.lnss)
# array of structure element (nstr) with TxRx extension (nstr=0)
anstr = siges[0,:,:]
# type of interaction
typi = siges[1,:,:]
# lss : list of subsegments in the current signature
#
# scalability : avoid a loop over all the subsegments in lsss
#
lss = [ x for x in lsss if x in anstr.ravel()]
ray_to_delete = []
for s in lss:
u = np.where(anstr==s)
if len(u)>0:
zs = ptees[2,u[0],u[1]]
zinterval = L.Gs.node[s]['z']
unot_in_interval = ~((zs<=zinterval[1]) & (zs>=zinterval[0]))
ray_to_delete.extend(u[1][unot_in_interval])
# lns : list of diffraction points in the current signature
# with involving multi segments (iso)
# scalability : avoid a loop over all the points in lnss
#
lns = [ x for x in lnss if x in anstr.ravel()]
#
# loop over multi diffraction points
#
for npt in lns:
# diffraction cornet in espoo.lay
#if npt==-225:
# import ipdb
# ipdb.set_trace()
u = np.where(anstr==npt)
if len(u)>0:
# height of the diffraction point
zp = ptees[2,u[0],u[1]]
#
# At which couple of segments belongs this height ?
# get_diffslab function answers that question
#
ltu_seg,ltu_slab = L.get_diffslab(npt,zp)
#
# delete rays where diffraction point is connected to
# 2 AIR segments
#
[ray_to_delete.append(u[1][i]) for i in range(len(zp))
if ((ltu_slab[i][0]=='AIR') & (ltu_slab[i][1]=='AIR'))]
# #zinterval = L.Gs.node[s]['z']
# # if (zs<=zinterval[1]) & (zs>=zinterval[0]):
# if ((tu_slab[0]!='AIR') & (tu_slab[1]!='AIR')):
# #print(npt , zp)
# pass
# else:
# ray_to_delete.append(u[1][0])
# # nstr : structure number
# nstr = np.delete(nstr,ray_to_delete,axis=1)
# typi : type of interaction
typi = np.delete(typi,ray_to_delete,axis=1)
# 3d sequence of points
ptees = np.delete(ptees,ray_to_delete,axis=2)
# extended (floor/ceil) signature
siges = np.delete(siges,ray_to_delete,axis=2)
if rmoutceilR:
# 1 determine Ceil reflexion index
# uc (inter x ray)
uc = np.where(siges[1,:,:]==5)
ptc = ptees[:,uc[0],uc[1]]
if len(uc[0]) !=0:
P = shg.MultiPoint(ptc[:2,:].T)
# to determine the cycle where ceil reflexions append
# uinter(nb pt x nb cycles)
mapnode = list(L.Gt.nodes())
uinter = np.array([[L.Gt.node[x]['polyg'].contains(p) for x in mapnode if x>0] for p in P])
# import ipdb
# ipdb.set_trace()
#[plt.scatter(p.xy[0],p.xy[1],c='r') for up,p in enumerate(P) if uinter[0,up]]
#[ plt.scatter(p.xy[0],p.xy[1],c='r') for up,p in enumerate(P) if uinter[0,up]]
# find points are indoor/outdoor cycles
upt,ucy = np.where(uinter)
uout = np.where([not L.Gt.node[mapnode[u+1]]['indoor'] for u in ucy])[0] #ucy+1 is to manage cycle 0
# 3 remove ceil reflexions of outdoor cycles
if len(uout)>0:
ptees = np.delete(ptees,uc[1][uout],axis=2)
siges = np.delete(siges,uc[1][uout],axis=2)
sigsave = np.delete(sigsave,uc[1][uout],axis=2)
if k+Nint in r3d:
r3d[k+Nint]['pt'] = np.dstack((r3d[k+Nint]['pt'], ptees))
r3d[k+Nint]['sig'] = np.dstack((r3d[k+Nint]['sig'], siges))
r3d[k+Nint]['sig2d'].append(sigsave)
else:
if ptees.shape[2]!=0:
r3d[k+Nint] = {}
r3d[k+Nint]['pt'] = ptees
r3d[k+Nint]['sig'] = siges
r3d[k+Nint]['sig2d'] = [sigsave]
# ax=plt.gca()
# uu = np.where(ptees[2,...]==3.0)
# ax.plot(ptees[0,uu[0],uu[1]],ptees[1,uu[0],uu[1]],'ok')
# import ipdb
# ipdb.set_trace()
#
# Add Line Of Sight ray information
# pt = [tx,rx]
# sig = [0,0]
#
#pdb.set_trace()
# if (self.los) & (np.sqrt(np.sum((tx-rx)**2)) !=0) :
# r3d[0] = {}
# r3d[0]['sig'] = np.zeros((2,2,1))
# r3d[0]['sig2d'] = np.zeros((2,2,1))
# r3d[0]['pt'] = np.zeros((3,2,1))
# r3d[0]['pt'][:,0,:] = tx[:,np.newaxis]
# r3d[0]['pt'][:,1,:] = rx[:,np.newaxis]
# r3d.nray = reduce(lambda x,y : y + np.shape(r3d[x]['sig'])[2],lnint)
# count total number of ray
# evaluate length of ray segment
#
# vsi
# si
# dis
#
val =0
for k in r3d.keys():
nrayk = np.shape(r3d[k]['sig'])[2]
r3d[k]['nbrays'] = nrayk
r3d[k]['rayidx'] = np.arange(nrayk)+val
r3d.nray = r3d.nray + nrayk
val=r3d[k]['rayidx'][-1]+1
# 3 : x,y,z
# i : interaction index
# r : ray index
#
# k : group of interactions index
#
v = r3d[k]['pt'][:, 1:, :]-r3d[k]['pt'][:, 0:-1, :]
lsi = np.sqrt(np.sum(v*v, axis=0))
rlength = np.sum(lsi,axis=0)
if (lsi.any()==0):
pdb.set_trace()
if not (lsi.all()>0):
pdb.set_trace()
#assert(lsi.all()>0)
if (len(np.where(lsi==0.))==0) :
pdb.set_trace()
#
# sort rays w.r.t their length
#
u = np.argsort(rlength)
r3d[k]['pt'] = r3d[k]['pt'][:,:,u]
r3d[k]['sig'] = r3d[k]['sig'][:,:,u]
#r3d[k]['sig2d'] = r3d[k]['sig2d'][:,:,u]
si = v/lsi # ndim , nint - 1 , nray
# vsi : 3 x (i+1) x r
r3d[k]['vsi'] = si[:,:,u]
# si : (i+1) x r
r3d[k]['si'] = lsi[:,u]
r3d[k]['dis'] = rlength[u]
r3d.delays = np.zeros((r3d.nray))
for k in r3d.keys():
ir = r3d[k]['rayidx']
r3d.delays[ir] = r3d[k]['dis']/0.3
r3d.origin_sig_name = self.origin_sig_name
r3d.Lfilename = L._filename
r3d.filename = L._filename.split('.')[0] + '_' + str(r3d.nray)
return(r3d)
def get_rays_slabs(self,L,ir):
""" return the slabs for a given interaction index
Parameters
----------
L : Layout
ir : interaction block
Returns
-------
numpy array of slabs strings at the shape (ir,r)
ir : number of interactions ( of the interaction block)
r : number of rays
"""
v=np.vectorize( lambda t: L.Gs.node[t]['name'] if (t!=0) and (t>0) else '_')
return v(self[ir]['sig'][0])
def remove_aw(self,L):
""" remove AIR interactions
"""
# def consecutive(data, stepsize=1):
# return np.split(data, np.where(np.diff(data) != stepsize)[0]+1)
R = Rays(self.pTx,self.pRx)
R.__dict__.update(self.__dict__)
# R.is3D=True
# R.nray = self.nray
# R.nray2D = self.nray2D
# R.nray2D = self.nray2D
# R.nray2D = self.nray2D
for k in self:
lr = self[k]['sig'].shape[1]
inter = self.get_rays_slabs(L,k)
for ur,r in enumerate(inter.T):
not_air_mask = ~((r =='_AIR') | (r == 'AIR' ))
nb_air = sum(~not_air_mask)
if nb_air != 0 :
new_bi = k-nb_air
# +2 : add tx & rx interaciton
# -1 : 2 interactions correspond to 1 distance
lsi = new_bi + 2 - 1
si = np.zeros(lsi)
si_old = self[k]['si'][:,ur]
vsi = np.zeros((3,lsi))
vsi_old = self[k]['vsi'][...,ur]
sig = self[k]['sig'][:,not_air_mask,ur][...,None]
# sig2d = self[k]['sig2d'][0][...,ur]
pt = self[k]['pt'][:,not_air_mask,ur][...,None]
u = 0
si_aw = 0
# import ipdb
# ipdb.set_trace()
for uold,b in enumerate(not_air_mask[1:]):
if b:
# update new si with sum of all
# distance from preceding airwall
si[u] = si_old[uold] + si_aw
# keep vsi from the last airwall
# because vsi don't change on an airwall
vsi[:,u] = vsi_old[:,uold]
u += 1
si_aw=0
else:
si_aw += si_old[uold]
si = si[...,None]
vsi = vsi[...,None]
dis = np.array([np.sum(si)])
assert np.allclose(dis,np.sum(si_old))
else:
# no air wall case, fill R with self values
new_bi = k
pt = self[k]['pt'][...,ur][...,None]
sig = self[k]['sig'][...,ur][...,None]
# sig2d = self[k]['sig2d'][0][...,ur]
si = self[k]['si'][:,ur][:,None]
vsi = self[k]['vsi'][...,ur][...,None]
dis = np.array([self[k]['dis'][ur]])
if new_bi in R:
# R[new_bi]['sig2d'].append(self[k]['sig2d'][ur])
R[new_bi]['pt'] = np.concatenate((R[new_bi]['pt'],pt),axis=2)
R[new_bi]['sig'] = np.concatenate((R[new_bi]['sig'],sig),axis=2)
R[new_bi]['rayidx'] = np.concatenate((R[new_bi]['rayidx'],np.array([self[k]['rayidx'][ur]])))
R[new_bi]['si'] = np.concatenate((R[new_bi]['si'],si),axis=1)
R[new_bi]['vsi'] = np.concatenate((R[new_bi]['vsi'],vsi),axis=2)
R[new_bi]['dis'] = np.concatenate((R[new_bi]['dis'],dis),axis=0)
else:
R[new_bi] = {}
# R[new_bi]['sig2d'] = [self[k]['sig2d'][0][...,ur]]
R[new_bi]['pt'] = pt
R[new_bi]['sig'] = sig
R[new_bi]['rayidx'] = np.array([self[k]['rayidx'][ur]])
R[new_bi]['si'] = si
R[new_bi]['vsi'] = vsi
R[new_bi]['dis'] = dis
if 0 in R:
R.los=True
X = [[R[k]['rayidx'][u] for u in range(len(R[k]['rayidx']))] for k in R]
R._rayidx_aw = sum(X,[])
return R
def length(self,typ=2):
""" calculate length of rays
Parameters
----------
typ : int
men1 : length of all segments
2 : accumulated length
"""
dk = {}
for k in self: # for all interaction group k
# 3 x Ni-1 x Nr
vk = self[k]['pt'][:,1:,:]-self[k]['pt'][:,0:-1,:]
d1 = np.sqrt(np.sum(vk*vk,axis=0))
d2 = np.sum(d1,axis=0)
if typ==1:
dk[k] = d1
if typ==2:
dk[k] = d2
return(dk)
def simplify(self):
if not self.is3D:
return None
for ir in self:
print(self[ik]['si'])
def locbas(self, L):
""" calculate ray local bas
Parameters
----------
L : Layout
Notes
-----
This method adds for each group of interactions the following members
norm : np.array
3 x i x r (interaction vector)
nstrwall : np.array
nstr of interactions
vsi : np.array
3 x (i+1) x r
aod : np.array
2 x r
aoa : np.array
2 x r
BoO : np.array
3 x 3 x r
Bi : np.array
3 x 3 x r
Bo : np.array
3 x 3 x r
BiN : np.array
3 x 3 x r
scpr : np.array
i x r
theta : np.array
i x r
rays : int
nbrays : int
rayidx : np.array
diffslabs : list
diffvect : np.array
(phi0,phi,beta,NN)
"""
#
# extract normal in np.array
#
# nsegment x 3
norm = np.array(list(nx.get_node_attributes(L.Gs,'norm').values()))
# nsegment x k
key = np.array(list(dict(nx.get_node_attributes(L.Gs,'norm')).keys()))
# maximum number for refering to segment
# not to be confused with a segment number
nsmax = max(L.Gs.node.keys())
mapping = np.zeros(nsmax+1, dtype=int)
mapping[key] = np.arange(len(key), dtype=int)
#
# Structure number : nstr
# the structure number is < 0 for points
# > 0 for segments
# A segment can have several subsegments (until 100)
# nstrs is the nstr of the segment if subsegment :
# nstr is the glabal which allows to recover the slab values
#
idx = np.array(())
if self.los:
idxts = 1
nbrayt = 1
else:
idxts = 0
nbrayt = 0
# list of used wedges
luw=[]
lgi = list(self.keys())
lgi.sort()
for k in lgi:
#
# k is the number of interactions in the block
#
#print(k,self[11]['rayidx'])
if k != 0:
# structure number (segment or point)
# nstr : i x r
nstr = self[k]['sig'][0, 1:-1, :]
# ityp : i x r
ityp = self[k]['sig'][1, 1:-1, :]
# nstr of underlying segment
# position of interaction corresponding to a sub segment
# print nstr
#
# uss : index of subsegment
# subsegments are not nodes of Gs but have positive nstr index
#
uss = np.where(nstr > nsmax)
# print uss
nstrs = copy.copy(nstr)
#
# if subsegments have been found
#
if len(uss) >0:
ind = nstr[uss]- nsmax-1
nstrs[uss] = np.array(L.lsss)[ind]
# print nstr
#print nstrs
#pdb.set_trace()
nray = np.shape(nstr)[1]
uwall = np.where((ityp == 2) | (ityp == 3))
udiff = np.where((ityp == 1))
ufloor= np.where((ityp == 4))
uceil = np.where((ityp == 5))
nstrwall = nstr[uwall[0], uwall[1]] # nstr of walls
nstrswall = nstrs[uwall[0], uwall[1]] # nstrs of walls
self[k]['nstrwall'] = nstrwall # store nstr without subsegment
self[k]['nstrswall'] = nstrswall # store nstr with subsegment
self[k]['norm'] = np.zeros((3, k, nray)) # 3 x int x nray
# norm : 3 x i x r
#
# norm is the vector associated to the interaction
# For the diffraction case the normal is replaced by the unit
# vector along the wedge directed upward.
#
self[k]['norm'][:, uwall[0], uwall[1]] = norm[mapping[nstrswall],:].T
self[k]['norm'][2, ufloor[0], ufloor[1]] = np.ones(len(ufloor[0]))
self[k]['norm'][2, uceil[0], uceil[1]] = -np.ones(len(uceil[0]))
self[k]['norm'][2, udiff[0], udiff[1]] = np.ones(len(udiff[0]))
normcheck = np.sum(self[k]['norm']*self[k]['norm'],axis=0)
assert normcheck.all()>0.99,pdb.set_trace()
# 3 : x,y,z
# i : interaction index
# r : ray index
#
# k : group of interactions index
#
#v = self[k]['pt'][:, 1:, :]-self[k]['pt'][:, 0:-1, :]
#lsi = np.sqrt(np.sum(v*v, axis=0))
#if (lsi.any()==0):
# pdb.set_trace()
#assert(lsi.all()>0)
#if (len(np.where(lsi==0.))==0) :
# pdb.set_trace()
#si = v/lsi # ndim , nint - 1 , nray
# si : 3 x (i+1) x r
si = self[k]['vsi']
# si : (i+1) x r
#self[k]['si'] = lsi
#self[k]['dis'] = np.sum(lsi,axis=0)
# normal : 3 x i x r
vn = self[k]['norm']
# s_in : 3 x i x r
s_in = si[:, 0:-1, :]
# s_out : 3 x i x r
s_out = si[:, 1:, :]
#
# AOD (rad)
#
# th : ,r
thd = np.arccos(si[2, 0, :])
# ph : ,r
phd = np.arctan2(si[1, 0, :], si[0, 0, :])
# aod : 2 x r (radians)
self[k]['aod'] = np.vstack((thd, phd))
# eth : 3 x r
eth = np.array([np.cos(thd) * np.cos(phd),
np.cos(thd) * np.sin(phd),
-np.sin(thd)])
# eph : 3 x r
eph = np.array([-np.sin(phd),
np.cos(phd),
np.zeros(len(phd))])
# Bo0 : 3 x 3 x r
Bo0 = np.concatenate((si[:, 0, None, :],
eth[:, None, :],
eph[:, None, :]), axis=1)
self[k]['Bo0'] = Bo0
#
# scalar product si . norm
#
# vn : 3 x i x r
# s_in : 3 x i x r
#
# scpr : i x r
#
scpr = np.sum(vn*si[:,0:-1,:], axis=0)
self[k]['scpr'] = scpr
self[k]['theta'] = np.arccos(abs(scpr)) # *180/np.pi
def fix_colinear(w):
"""
w : vector
"""
nw = np.sqrt(np.sum(w*w, axis=0))
u = np.where(nw==0)
if len(u[0])!=0:
logger.debug('colinear situation detected')
if (u[0].any() or u[1].any()) \
or (u[0].any()==0 or u[1].any()==0):
uu = np.array([u[0],u[1]]).T
#determine which interaction and rays
#present the colinearity issue
uvv = abs(vn[2,uu[:,0],uu[:,1]])>0.99
# uv : nbi x nbr colinear index
uv = uu[uvv]
# uh : nbi x nbr anti-colinear index
uh = uu[np.logical_not(uvv)]
try:
#fix w for colinear index
w[:,uv[:,0],uv[:,1]] = np.array(([1,0,0]))[:,None]
# update normal
nw[uv[:,0],uv[:,1]] = np.sqrt(np.sum(
w[:,uv[:,0],uh[:,1]]*w[:,uv[:,0],uv[:,1]],axis=0))
except:
pass
try:
# fix w for anti-colinear index
w[:,uh[:,0],uh[:,1]] = np.array(([0,0,1]))[:,None]
# update normal
nw[uh[:,0],uh[:,1]] = \
np.sqrt(np.sum(w[:,uh[:,0],uh[:,1]]*w[:,uh[:,0],uh[:,1]],axis=0))
except:
pass
return w, nw
#
# Warning need to handle singular case when s_in // vn
#
# w : 3 x i x r
#
w = np.cross(s_in, vn, axisa=0, axisb=0, axisc=0)
# nw : i x r
w, nw = fix_colinear(w)
wn = w/nw
v = np.cross(wn, s_in, axisa=0, axisb=0, axisc=0)
es_in = np.expand_dims(s_in, axis=1)
ew = np.expand_dims(wn, axis=1)
ev = np.expand_dims(v, axis=1)
# Bi 3 x 3 x i x r
Bi = np.concatenate((es_in,ew,ev),axis=1)
# self[k]['Bi'] 3 x 3 x i x r
self[k]['Bi'] = Bi
################################
w = np.cross(s_out, vn, axisa=0, axisb=0, axisc=0)
w, nw = fix_colinear(w)
#wn = w/np.sqrt(np.sum(w*w, axis=0))
wn = w/nw
v = np.cross(wn, s_out, axisa=0, axisb=0, axisc=0)
es_out = np.expand_dims(s_out, axis=1)
ew = np.expand_dims(wn, axis=1)
ev = np.expand_dims(v, axis=1)
# Bi 3 x 3 x i x r
Bo = np.concatenate((es_out,ew,ev),axis=1)
# self[k]['Bo'] 3 x 3 x i x r
self[k]['Bo'] = Bo
#
# AOA (rad)
#
# th : ,r
# fix doa/dod reciprocity
#th = np.arccos(si[2, -1, :])
tha = np.arccos(si[2, -1, :])
# th : ,r
#ph = np.arctan2(si[1, -1, :], si[0, -1, :])
pha = np.arctan2(si[1, -1, :], si[0, -1, :])
# aoa : 2 x r (radians)
self[k]['aoa'] = np.vstack((tha, pha))
eth = np.array([np.cos(tha) * np.cos(pha),
np.cos(tha) * np.sin(pha),
-np.sin(tha)])
eph = np.array([-np.sin(pha),
np.cos(pha),
np.zeros(len(pha))])
# Bo0 : 3 x 3 x r
BiN = np.concatenate((si[:,-1,None,:],
eth[:, None, :],
eph[:, None, :]), axis=1)
self[k]['BiN'] = BiN
#self[k]['BiN'] = np.concatenate((-si[:,-1,np.newaxis,:],eth[:,np.newaxis,:],
# eph[:,np.newaxis,:]),axis=1)
# Creation of B from Bi and Bo
# is done after the potential diffraction
# computation
## index creation
##################
# create index for retrieving interactions
# integer offset : total size idx
idxts = idxts + idx.size
idx = idxts + np.arange(ityp.size).reshape(np.shape(ityp),order='F')
nbray = np.shape(idx)[1]
self[k]['rays'] = idx
self[k]['nbrays'] = nbray
self[k]['rayidx'] = nbrayt + np.arange(nbray)
# create a numpy array to relate the ray index to its corresponding
# number of interactions
#pdb.set_trace()
_ray2nbi = np.ones((nbray), dtype=int)
try:
self._ray2nbi = np.hstack((self._ray2nbi,_ray2nbi))
except:
self._ray2nbi = _ray2nbi
self._ray2nbi[self[k]['rayidx']] = k
nbrayt = nbrayt + nbray
self.raypt = self.raypt + self[k]['nbrays']
#################################
# Start diffraction specific case
#################################
if len(udiff[0]) != 0 :
Z = np.where(ityp.T==1)
udiff=Z[1],Z[0]
# diffseg,udiffseg = np.unique(nstr[udiff],return_inverse=True)
diffupt=nstr[udiff]
# position of diff seg (- because iupnt accept > 0 reference to points)
#
# TO BE FIXED
#
#ptdiff = L.pt[:,L.iupnt[-diffupt]]
ptdiff = np.array([ (L.Gs.pos[x][0],L.Gs.pos[x][1]) for x in diffupt ]).T
self[k]['diffidx'] = idx[udiff[0],udiff[1]]
# get tail head position of seg associated to diff point
lair = L.name['AIR'] + L.name['_AIR']
#aseg = map(lambda x : filter(lambda y : y not in lair,
# nx.neighbors(L.Gs,x)),
# diffupt)
aseg = [ [ y for y in nx.neighbors(L.Gs,x) if y not in lair ] for x in diffupt ]
#manage flat angle : diffraction by flat segment e.g. door limitation)
[aseg[ix].extend(x) for ix,x in enumerate(aseg) if len(x)==1]
# get points positions
#pdb.set_trace()
pts = np.array([ L.seg2pts([x[0],x[1]]) for x in aseg ])
#self[k]['diffslabs']=[str(L.sl[L.Gs.node[x[0]]['name']])+'_'
# + str(L.sl[L.Gs.node[x[1]]['name']]]) for x in aseg]
self[k]['diffslabs']=[ L.Gs.node[x[0]]['name']+'@'
+ L.Gs.node[x[1]]['name'] for x in aseg]
uwl = np.unique(self[k]['diffslabs']).tolist()
luw.extend(uwl)
pt1 = pts[:,0:2,0] #tail seg1
ph1 = pts[:,2:4,0] #head seg1
pt2 = pts[:,0:2,1] #tail seg2
ph2 = pts[:,2:4,1] #head seg2
#pts is (nb_diffraction_points x 4 x 2)
#- The dimension 4 represent the 2x2 points: t1,h1 and t2,h2
# tail and head of segment 1 and 2 respectively
# a segment
#- The dimension 2 is x,y
#
# The following aims to determine which tails and heads of
# segments associated to a given diffraction point
# are connected
#
#
# point diff is pt1
updpt1 = np.where(np.sum(ptdiff.T==pt1,axis=1)==2)[0]
# point diff is ph1
updph1 = np.where(np.sum(ptdiff.T==ph1,axis=1)==2)[0]
# point diff is pt2
updpt2 = np.where(np.sum(ptdiff.T==pt2,axis=1)==2)[0]
# point diff is ph2
updph2 = np.where(np.sum(ptdiff.T==ph2,axis=1)==2)[0]
pa = np.empty((len(diffupt),2))
pb = np.empty((len(diffupt),2))
####seg 1 :
#if pt1 diff point => ph1 is the other point
pa[updpt1]= ph1[updpt1]
#if ph1 diff point => pt1 is the other point
pa[updph1]= pt1[updph1]
####seg 2 :
#if pt2 diff point => ph2 is the other point
pb[updpt2]= ph2[updpt2]
#if ph2 diff point => pt2 is the other point
pb[updph2]= pt2[updph2]
pt = ptdiff.T
# NN : (nb_diffraction_points)
# alpha wegde (a.k.a. wedge parameters, a.k.a wedge aperture)
NN = (360.-geu.sector(pa.T,pb.T,pt.T))/180.
# NN = (2.-NN)*np.pi
#angle between face 0, diffraction point and s_in
#s_in[:2,udiff[0],udiff[1]] :
# s_in of insteractions udiff (2D) restricted to diffraction points
vptpa = pt-pa
vptpan = vptpa.T / np.sqrt(np.sum((vptpa)*(vptpa),axis=1))
# vpapt= pa-pt # papt : direction vector of face 0
# vpaptn = vpapt.T / np.sqrt(np.sum((vpapt)*(vpapt),axis=1))
sid = s_in[:,udiff[0],udiff[1]] #s_in restricted to diff
sod = s_out[:,udiff[0],udiff[1]] #s_out restricted to diff
vnormz = self[k]['norm'][:, udiff[0], udiff[1]]
#phi0 = arccos(dot(sid*vpavptn))
# phi0 = geu.vecang(sid[:2],vpaptn)
uleft = geu.isleft(pa.T,pt.T,pb.T)
phi0 = geu.vecang(vptpan,sid[:2])
phi0[~uleft] = geu.vecang(sid[:2,~uleft],vptpan[:,~uleft])
# phi0 = np.arccos(np.sum(sid[:2]*vpaptn,axis=0))
#phi = arccos(dot(sod*vpavptn))
# phi = np.arccos(np.sum(-sod[:2]*vpaptn,axis=0))
phi = geu.vecang(vptpan,-sod[:2])
phi[~uleft] = geu.vecang(-sod[:2,~uleft],vptpan[:,~uleft])
# beta
#it is important to check if the sid comes from left or right
#to this end assume that sid vector is composed
#of 2 point : (0,0) and sid
# compared to the position of the diffraction point in x
# with an elevation=0
sidxz = sid[[0,2]]
vnormxz = vnormz[[0,2]]
zero = np.zeros((2,ptdiff.shape[1]))
zdiff = np.vstack((ptdiff[0],zero[0]))
left = geu.isleft(zero,sidxz,zdiff)
beta = np.arccos(np.sum(vnormz*sid,axis=0))
# self[k]['diffvect'] is (4 x Nb_rays )
# for axis 0 lenght 4 represent :
# 0 => phi0
# 1 => phi
# 2 => beta
# 3 => N (wedge parameter)
self[k]['diffvect']=np.array((phi0,phi,beta,NN))
######
#Bi diffract
#####
#w is the \perp \soft in diff
w = np.cross(-sid, vnormz, axisa=0, axisb=0, axisc=0)
# nw : i x r
w, nw = fix_colinear(w)
wn = w/nw
# Handling channel reciprocity s_in --> -s_in
#v = np.cross(wn, s_in, axisa=0, axisb=0, axisc=0)
v = np.cross(wn, -sid, axisa=0, axisb=0, axisc=0)
e_sid = np.expand_dims(-sid, axis=1)
ew = np.expand_dims(wn, axis=1)
ev = np.expand_dims(v, axis=1)
# Bid 3 x 3 x (i,r)diff
Bid = np.concatenate((e_sid,ev, ew), axis=1)
#update Bi for diffracted rays
Bi[:,:,udiff[0],udiff[1]] = Bid
######
#Bo diffract
#####
w = np.cross(sod,vnormz, axisa=0, axisb=0, axisc=0)
w, nw = fix_colinear(w)
wn = w/nw
#wn = w/np.sqrt(np.sum(w*w, axis=0))
v = np.cross(wn, sod, axisa=0, axisb=0, axisc=0)
e_sod = np.expand_dims(sod, axis=1)
ew = np.expand_dims(wn, axis=1)
ev = np.expand_dims(v, axis=1)
# Bod 3 x 3 x (i,r)diff
Bod = np.concatenate((e_sod,ev, ew), axis=1)
#update Bo for diffracted rays
Bo[:,:,udiff[0],udiff[1]] = Bod
#################################
# End of diffraction specific case
##################################
#
# pasting (Bo0,B,BiN)
#
# B : 3 x 3 x i x r
Bo = np.concatenate((Bo0[:, :, np.newaxis, :], Bo), axis=2)
Bi = np.concatenate((Bi, BiN[:, :, np.newaxis, :]), axis=2)
# B : 3 x 3 x i x r
self[k]['B'] = np.einsum('xv...,xw...->vw...', Bi, Bo)
#self[k]['B'] = np.einsum('vx...,xw...->vw...', Bi, Bo)
#BiN = np.array([si[:,-1,:], eth, eph]) # ndim x 3 x Nray
#self[k]['BiN']=BiN
# self[k]['B']=np.sum(self[k]['Bi'][:2,:2,np.newaxis]*self[k]['Bo'][np.newaxis,:2,:2],axis=1)
# if los exists
else :
self[k]['nstrwall'] = np.array(())
self[k]['norm'] = np.array(())
si = np.sqrt(np.sum((self[0]['pt'][:,0]-self[0]['pt'][:,1])**2,axis=0))
self[k]['si'] = np.vstack((si,0.))
self[k]['vsi'] = (self[0]['pt'][:,1]-self[0]['pt'][:,0])/si
self[k]['dis'] = np.array((si))
vsi = self[k]['vsi']
thd = np.arccos(vsi[2])
phd = np.arctan2(vsi[1], vsi[0])
self[k]['aod'] = np.vstack((thd, phd))
self[k]['Bo0'] = np.array(())
self[k]['scpr'] = np.array(())
self[k]['theta'] = np.zeros((1,1))
#
# The following derivation of the doa is the actual chosen angle convention
# Those angles are relative to natural spherical coordinates system in the gcs of the scene.
#
# for a LOS path :
# tha = pi - thd
# pha = phd - pi
#
#self[k]['aoa'] = np.vstack((np.pi-thd, phd-np.pi))
self[k]['aoa'] = np.vstack((thd,phd))
E = np.eye(2)[:,:,np.newaxis,np.newaxis]
self[k]['B'] = np.dstack((E,E))
ze = np.array([0])
self[k]['rays'] = np.array(([[0]]))
self[k]['nbrays'] = 1
self[k]['rayidx'] = ze
self.raypt = 1
self._ray2nbi = ze
self._luw = np.unique(luw).tolist()
self.isbased = True
def fillinter(self, L, append=False):
""" fill ray interactions
Parameters
----------
L : Layout
append : Boolean
If True append new rays to existing structure
Notes
-------
This method adds the following members
I : Interactions
B : IntB
B0 : IntB
"""
# reinitialized ray pointer if not in append mode
if not append:
self.raypt = 0
# stacked interactions
I = Interactions(slab=L.sl)
# rotation basis
B = IntB(slab=L.sl)
B0 = IntB(slab=L.sl)
# # LOS Interaction
# Los = IntL()
# Reflexion
R = IntR(slab=L.sl)
# Transmission
T = IntT(slab=L.sl)
# Diffraction
D = IntD(slab=L.sl)
idx = np.array(())
if self.los:
idxts = 1
nbrayt = 1
else:
idxts = 0
nbrayt = 0
# Transform dictionnary of slab name to array
# slv = nx.get_node_attributes(L.Gs, "name").values()
# slk = nx.get_node_attributes(L.Gs, "name").keys()
# find all material used in simulation
#uslv = np.unique(L.sla[1:])
uslv = L.sl.keys()
#
# add CEIL and FLOOR
#
#uslv = np.hstack((uslv, np.array(('CEIL', 'FLOOR'))))
# create reverse dictionnary with all material as a key
# and associated point/segment as a value
#dsla = {}
#for s in uslv:
# dsla[s] = np.where(s == np.array(slv))[0]
nsmax = max(L.Gs.node.keys())
#sla = np.zeros((nsmax+1), dtype='S20')
# array type str with more than 1 character
# warning use zeros instead of empty because slab zero
# is virtually used before assigning correct slab to ceil and floor
#
# sla is an array of string.
# each value of Gs node is the index of the corresponding slab
#
#sla[slk] = np.array(slv)
R.dusl = dict.fromkeys(uslv, np.array((), dtype=int))
T.dusl = dict.fromkeys(uslv, np.array((), dtype=int))
#to be specified and limited to used wedges
if hasattr(self,'_luw'):
D.dusl = dict.fromkeys(self._luw, np.array((), dtype=int))
# transmission/reflection slab array
tsl = np.array(())
rsl = np.array(())
# diffraction wedge list
dw = np.array(())
# loop on group of interactions
for k in self:
if k !=0:
uR = uT = uD = uRf = uRc = 0.
# structure number (segment or point)
# nstr : i x r
nstr = self[k]['sig'][0, 1:-1, :]
# ityp : i x r
ityp = self[k]['sig'][1, 1:-1, :]
# theta : i x r ( related to interactions )
theta = self[k]['theta']
# (i+1) x r
si = self[k]['si']
# distance in
s_in = si[0:-1,:]
# distance in
s_out = si[1:,:]
if 'diffvect' in self[k]:
dvec = self[k]['diffvect']
ldsl = self[k]['diffslabs']
dix = self[k]['diffidx']
## flatten information
######################
# flatten nstr (1 dimension)
# size1 = i x r
size1 = nstr.size
# flatten ityp (method faster than np.ravel() )
nstrf = np.reshape(nstr,size1,order='F')
itypf = ityp.reshape(size1,order='F')
thetaf = theta.reshape(size1,order='F')
#sif = si[0, :, :].reshape(si[0, :, :].size)
# ## index creation / already done in rays.locbas
# ##################
# # create index for retrieving interactions
# # integer offset : total size idx
# idxts = idxts + idx.size
# idx = idxts + np.arange(ityp.size).reshape(np.shape(ityp),order='F')
# nbray = np.shape(idx)[1]
# self[k]['rays'] = idx
# self[k]['nbrays'] = nbray
# self[k]['rayidx'] = nbrayt + np.arange(nbray)
# # create a numpy array to relate the ray index to its corresponding
# # number of interactions
# # _ray2nbi = np.ones((nbray))
# #try:
# # self._ray2nbi=np.hstack((self._ray2nbi,_ray2nbi))
# #except:
# # self._ray2nbi=_ray2nbi
# #self._ray2nbi[self[k]['rayidx']] = k
# nbrayt = nbrayt + nbray
# #self.raypt = self.raypt + self[k]['nbrays']
idxf = self[k]['rays'].reshape(self[k]['rays'].size,order='F')
# (i+1)xr
#
size2 = si[:, :].size
nbray = self[k]['nbrays']
# TODO
# dirty fix
# nbray is either an int or an array. why ?
if type(nbray)==np.ndarray:
nbray=nbray[0]
# ,(i+1)xr
# sif = si[:, :].reshape(size2,order='F') # TO BE REMOVE
s_inf = s_in[:, :].reshape(ityp.size,order='F')
s_outf = s_out[:, :].reshape(ityp.size,order='F')
# 2x2,(i+1)xr
#
# self[k]['B'] 3 x 3 x i x r
#
# first unitary matrix (3x3xr)
b0 = self[k]['B'][:,:,0,:]
# first unitary matrix 1:
# dimension i and r are merged
b = self[k]['B'][:,:,1:,:].reshape(3, 3, size2-nbray,order='F')
## find used slab
##################
# find slab type for the rnstr
# nstrf is a number of slab
# this is a problem for handling subsegment
#
# seek for interactions position
################################
uD = np.where((itypf == 1))[0]
uR = np.where((itypf == 2))[0]
uT = np.where((itypf == 3))[0]
uRf = np.where((itypf == 4))[0]
uRc = np.where((itypf == 5))[0]
# assign floor and ceil slab
############################
slT = [ L.Gs.node[x]['name'] for x in nstrf[uT] ]
slR = [ L.Gs.node[x]['name'] for x in nstrf[uR] ]
# WARNING
# in future versions floor and ceil could be different for each cycle.
# this information would be directly obtained from L.Gs
# then the two following lines would have to be modified
slRf = np.array(['FLOOR']*len(uRf))
slRc = np.array(['CEIL']*len(uRc))
# Fill the used slab
#####################
tsl = np.hstack((tsl, slT))
rsl = np.hstack((rsl, slR, slRf, slRc))
if 'diffvect' in self[k]:
dw = np.hstack((dw,self[k]['diffslabs']))
## for s in uslv:
##
## T.dusl[s]=np.hstack((T.dusl[s],len(T.idx) + np.where(sl[uT]==s)[0]))
## R.dusl[s]=np.hstack((R.dusl[s],len(R.idx) + np.where(sl[uR]==s)[0]))
## R.dusl['FLOOR']=np.hstack((R.dusl['FLOOR'],len(R.idx)+len(uR) + np.where(sl[uRf]=='FLOOR')[0]))
# R.dusl['CEIL']=np.hstack((R.dusl['CEIL'],len(R.idx)+len(uR)+len(uRf) +
# np.where(sl[uRc]=='CEIL')[0]))
# Basis
# Hugr issue with B index
# Friedman version Bs was entering in the index
# maybe B can have the same index that interactions
# but this must be managed when evaluation of CIR is made
# BU 10/4/2013
# .. todo: This is no longer idxf the good index
# why the transposition b is first 2x2x(i+1)xr
# idxf is (ixr)
#
# need to check how B is used in eval()
#
# Warning
# -------
# B.idx refers to an interaction index
# whereas B0.idx refers to a ray number
# B.stack(data=b.T, idx=idxf)
# B0.stack(data=b0.T,idx=self[k]['rayidx'])
B.stack(data=b.T, idx=idxf)
B0.stack(data=b0.T,idx=self[k]['rayidx'])
### Reflexion
############
### wall reflexion
#(theta, s_in,s_out)
R.stack(data=np.array((thetaf[uR], s_inf[uR], s_outf[uR])).T,
idx=idxf[uR])
# floor reflexion
R.stack(data=np.array((thetaf[uRf], s_inf[uRf], s_outf[uRf])).T,
idx=idxf[uRf])
# ceil reflexion
R.stack(data=np.array((thetaf[uRc], s_inf[uRc], s_outf[uRc])).T,
idx=idxf[uRc])
# R.stack(data=np.array((thetaf[uR], sif[uR], sif[uR+1])).T,
# idx=idxf[uR])
# # floor reflexion
# R.stack(data=np.array((thetaf[uRf], sif[uRf], sif[uRf+1])).T,
# idx=idxf[uRf])
# # ceil reflexion
# R.stack(data=np.array((thetaf[uRc], sif[uRc], sif[uRc+1])).T,
# idx=idxf[uRc])
### sl[idxf[uT]]
# Transmision
############
# (theta, s_in,s_out)
# T.stack(data=np.array((thetaf[uT], sif[uT], sif[uT+1])).T, idx=idxf[uT])
T.stack(data=np.array((thetaf[uT], s_inf[uT], s_outf[uT])).T, idx=idxf[uT])
###
#Diffraction
#phi0,phi,si,sd,N,mat0,matN,beta
#
if 'diffvect' in self[k]:
# self[k]['diffvect'] = ((phi0,phi,beta,N) x (nb_rayxnb_interactions) )
#si and so are stacked at the end of self[k]['diffvect']
#as well:
#data = (6 x (nb_rayxnb_interactions) )
# ((phi0,phi,beta,N,sin,sout) x (nb_rayxnb_interactions) )
data = np.vstack((self[k]['diffvect'],s_inf[uD],s_outf[uD]))
D.stack(data=data.T,idx=self[k]['diffidx'])#idxf[uD])
elif self.los:
ze = np.array([0])
#self[k]['rays'] = np.array(([[0]]))
#self[k]['nbrays'] = 1
#self[k]['rayidx'] = ze
#self.raypt = 1
#self._ray2nbi=ze
B.stack(data=np.eye(3)[np.newaxis,:,:], idx=ze)
B0.stack(data=np.eye(3)[np.newaxis,:,:],idx=ze)
if len(tsl)>0:
T.create_dusl(tsl)
if len(rsl)>0:
R.create_dusl(rsl)
if len(dw)>0:
D.create_dusl(dw)
# create interactions structure
self.I = I
self.I.add([T, R, D])
# create rotation base B
self.B = B
# create rotation base B0
self.B0 = B0
self.filled = True
def eval(self,fGHz=np.array([2.4]),bfacdiv=False,ib=[]):
""" field evaluation of rays
Parameters
----------
fGHz : array
frequency in GHz
ib : list of interactions block
"""
#print 'Rays evaluation'
self.fGHz=fGHz
# evaluation of all interactions
#
# core calculation of all interactions is done here
#
self.I.eval(fGHz)
# if np.isnan(self.I.I).any():
# pdb.set_trace()
# evaluation of base B (2x2)
# B and B0 do no depend on frequency
# just an axis extension (np.newaxis)
#pdb.set_trace()
# 1 x i x 3 x 3
B = self.B.data[np.newaxis,...]
B = B.swapaxes(2,3)
# 1 x r x 3 x 3
B0 = self.B0.data[np.newaxis,...]
B0 = B0.swapaxes(2,3)
# Ct : f x r x 3 x 3
Ct = np.zeros((self.I.nf, self.nray, 3, 3), dtype=complex)
# delays : ,r
self.delays = np.zeros((self.nray))
# dis : ,r
self.dis = np.zeros((self.nray))
#nf : number of frequency point
nf = self.I.nf
aod= np.empty((2,self.nray))
aoa= np.empty((2,self.nray))
# loop on interaction blocks
if ib==[]:
ib=self.keys()
# loop over group of interactions
for l in ib:
# ir : ray index
ir = self[l]['rayidx']
aoa[:,ir]=self[l]['aoa']
aod[:,ir]=self[l]['aod']
if l != 0:
# l stands for the number of interactions
r = self[l]['nbrays']
# dirty fix should not be an array
if type(r)==np.ndarray:
r = r[0]
# reshape in order to have a 1D list of index
# reshape ray index
rrl = self[l]['rays'].reshape(r*l,order='F')
# get the corresponding evaluated interactions
#
# reshape error can be tricky to debug.
#
# f , r , l , 2 , 2
A = self.I.I[:, rrl, :, :].reshape(self.I.nf, r, l, 3, 3)
# get the corresponding unitary matrix B
# 1 , r , l , 2 , 2
#Bl = B[:, rrl, :, :].reshape(self.I.nf, r, l, 2, 2,order='F')
Bl = B[:, rrl, :, :].reshape(1, r, l, 3, 3)
# get the first unitary matrix B0l
B0l = B0[:,ir,:, :]
# get alpha
alpha = self.I.alpha[rrl].reshape(r, l,order='F')
# # get gamma
gamma = self.I.gamma[rrl].reshape(r, l,order='F')
# # get si0
si0 = self.I.si0[rrl].reshape(r, l,order='F')
# # get sout
sout = self.I.sout[rrl].reshape(r, l,order='F')
try:
del Z
except:
pass
#print "\nrays",ir
#print "-----------------------"
## loop on all the interactions of ray with l interactions
for i in range(0, l):
############################################
## # Divergence factor D
### not yet implementented
############################################
# if i == 0:
# pdb.set_trace()
# D0 = 1./si0[:,1]
# rho1 = si0[:,1]*alpha[:,i]
# rho2 = si0[:,1]*alpha[:,i]*gamma[:,i]
# D =np.sqrt(
# ( (rho1 ) / (rho1 + sout[:,i]) )
# *( (rho2) / (rho2 + sout[:,i])))
# D=D*D0
# rho1=rho1+(sout[:,i]*alpha[:,i])
# rho2=rho2+(sout[:,i]*alpha[:,i]*gamma[:,i])
#
# ## gerer le loss
# if np.isnan(D).any():
# p=np.nonzero(np.isnan(D))[0]
# D[p]=1./sout[p,1]
# else :
# D=np.sqrt(
# ( (rho1 ) / (rho1 + sout[:,i]) )
# *( (rho2) / (rho2 + sout[:,i])))
#
# rho1=rho1+(sout[:,i]*alpha[:,i])
# rho2=rho2+(sout[:,i]*alpha[:,i]*gamma[:,i])
############################################
# A0 (X dot Y)
# | | |
# v v v
##########################
## B # I # B # I # B #
##########################
# \_____/ \______/
# | |
# Atmp(i) Atmp(i+1)
#
# Z=Atmp(i) dot Atmp(i+1)
#X = A [:, :, i, :, :]
#Y = Bl[:, :, i, :, :]
# pdb.set_trace()
if i == 0:
## First Basis added
Atmp = A[:, :, i, :, :]
B00 = B0l[:, :, :, :]
Z = np.sum(Atmp[..., :, :, np.newaxis]
*B00[..., np.newaxis, :, :], axis=-2)
else:
Atmp = A[:, :, i, :, :]
BB = Bl[:, :, i-1, :, :]
Ztmp = np.sum(Atmp[..., :, :, np.newaxis]
*BB[..., np.newaxis, :, :], axis=-2)
Z = np.sum(Ztmp[..., :, :, np.newaxis]
*Z[..., np.newaxis, :, :], axis=-2)
if i == l-1:
BB = Bl[:, :, i, :, :]
Z = np.sum(BB[..., :, :, np.newaxis]
*Z[..., np.newaxis, :, :], axis=-2)
# fill the C tilde MDA
Ct[:,ir, :, :] = Z[:, :, :, :]
#
if bfacdiv:
Ct[:,ir, :, :] = Ct[:, ir, :, :]*1./(self[l]['dis'][np.newaxis, :, np.newaxis, np.newaxis])
else:
Ct[:,ir, :, :] = Ct[:, ir, :, :]*1./(self[l]['dis'][np.newaxis, :, np.newaxis, np.newaxis])
self.delays[ir] = self[l]['dis']/0.3
self.dis[ir] = self[l]['dis']
#
# true LOS when no interaction
#
if self.los:
Ct[:,0, :, :]= np.eye(3,3)[None,None,:,:]
#self[0]['dis'] = self[0]['si'][0]
# Fris
Ct[:,0, :, :] = Ct[:,0, :, :]*1./(self[0]['dis'][None, :, None, None])
self.delays[0] = self[0]['dis']/0.3
self.dis[0] = self[0]['dis']
# To be corrected in a future version
#
# Ct : nf , Nray , theta , phi
#
# to
#
# Ct : Nray x nf , theta , phi
#
Ct = np.swapaxes(Ct, 1, 0)
#c11 = Ct[:,:,0,0]
#c12 = Ct[:,:,0,1]
#c21 = Ct[:,:,1,0]
#c22 = Ct[:,:,1,1]
c11 = Ct[:,:,1,1]
c12 = Ct[:,:,1,2]
c21 = Ct[:,:,2,1]
c22 = Ct[:,:,2,2]
#
# Construction of the Ctilde propagation channel structure
#
Cn = Ctilde()
# Cn.Cpp = bs.FUsignal(self.I.fGHz, c11)
# Cn.Cpt = bs.FUsignal(self.I.fGHz, c12)
# Cn.Ctp = bs.FUsignal(self.I.fGHz, c21)
# Cn.Ctt = bs.FUsignal(self.I.fGHz, c22)
Cn.Ctt = bs.FUsignal(self.I.fGHz, c11)
Cn.Ctp = bs.FUsignal(self.I.fGHz, c12)
Cn.Cpt = bs.FUsignal(self.I.fGHz, c21)
Cn.Cpp = bs.FUsignal(self.I.fGHz, c22)
Cn.nfreq = self.I.nf
Cn.nray = self.nray
Cn.tauk = self.delays
Cn.fGHz = self.I.fGHz
# r x 2
Cn.tang = aod.T
Cn.tangl = aod.T
# r x 2
#
# recover angle of arrival convention
#
Cn.rang = np.hstack([np.pi-aoa.T[:,[0]],aoa.T[:,[1]]-np.pi])
Cn.rangl = np.hstack([np.pi-aoa.T[:,[0]],aoa.T[:,[1]]-np.pi])
# add aoa and aod
self.evaluated = True
return(Cn)
def rayfromseg(self,ls):
''' DEPRECATED
use raysfromnstr instead
'''
DeprecationWarning('function name update: use raysfromnstr instead')
return self.rayfromnstr(ls)
def rayfromnstr(self,ls):
""" returns the indexes of rays for a given interaction list
"""
if not isinstance(ls,list):
ls = [ls]
lur = []
for k in self:
aib = self[k]['sig'][0,...]
for i in ls :
# import ipdb
# ipdb.set_trace()
ui, ur = np.where(aib == i)
lur.extend(self[k]['rayidx'][ur].tolist())
return np.sort(lur)
def rayfromdelay(self,t0=0,t1=[]):
""" returns the indexes of rays between 2 timestamps t0 and t1
"""
if t1 == []:
t1 = self.delays.max()
u = np.where((self.delays>t0) & (self.delays<t1))[0]
return u
def ray2slab(self,L,ir):
""" return the slabs for a given interaction index
Parameters
----------
L : Layout
ir : interaction block
Returns
-------
numpy array of slabs strings at the shape (ir,r)
ir : number of interactions ( of the interaction block)
r : number of rays
"""
v=np.vectorize( lambda t: L.Gs.node[t]['name'] if (t!=0) and (t>0) else '_')
return v(self[ir]['sig'][0])
def ray(self, r):
""" returns the index of interactions of r
Parameters
----------
r : integer
ray index
Returns
-------
ir : nd.array
index of interactions of r
Examples
--------
"""
raypos = np.nonzero(self[self._ray2nbi[r]]['rayidx'] == r)[0]
return(self[self._ray2nbi[r]]['rays'][:,raypos][:,0])
def ir2a(self,ir):
""" index ray 2 address ray
Parameters
----------
ir : integer
Returns
-------
(ni,ux) : tuple address (group of interactions, index)
"""
assert ir < self.nray, "wrong ray index"
ni = self._ray2nbi[ir]
ur = np.where(self[ni]['rayidx']==ir)[0][0]
return(ni,ur)
def a2ir(self,t):
""" address ray 2 index ray
Parameters
----------
t = (ni,ux) : tuple address (group of interactions, index)
ray address
Returns
-------
ir : integer
ray index
"""
assert t[0] in self.keys(), "wrong number of interactions"
ir = self[t[0]]['rayidx'][t[1]]
return(ir)
def ray2ityp(self,r):
""" return interaction type for a given ray
Parameters
----------
r : integer
ray index
Returns
-------
lt : list
list of type of interactions
"""
di = {1:'D',2:'R',3:'T',4:'R',5:'R'}
sig = self.ray2sig(r)
sig = sig[1,1:-1]
return [di[s] for s in sig]
def ray2nbi(self,r):
""" Get interaction block/number of interactions of a given ray
Parameters
----------
r : integer
ray index
Returns
-------
nbi : int
interaction block number
"""
i = self._ray2nbi[r]
return i
def ray2iidx(self,ir):
""" Get interactions index of a given ray
Parameters
----------
ir : integer
ray index
Returns
-------
iidx : array
interaction index
"""
unbi = self.ray2nbi(ir)
ur = np.where(self[unbi]['rayidx']==ir)[0]
return self[unbi]['rays'][:,ur]
def ray2sig(self,ir):
""" get signature to corresponding ray
"""
unbi = self.ray2nbi(ir)
ur = np.where(self[unbi]['rayidx']==ir)[0]
return self[unbi]['sig'][:,:,ur].squeeze()
def ray2sig2d(self,ir):
""" get signature to corresponding ray
"""
sig = self.ray2sig(ir)
sig = sig.squeeze()
sig = sig[:,1:-1] # remove extremal 0
unfc = np.where(sig[1,:]<4)[0]# index floor cell
sig2d = sig[:,unfc]
return sig2d
def ray2inter(self,ir,L,Si):
""" get interaction list (Gi style) from a ray
Parameters
----------
ir : ray index
L : Layout
Si : Signatures object
"""
sig = self.ray2sig2d(ir)
return Si.sig2inter(L,sig)
def slab_nb(self, ir):
""" returns the slab numbers of r
Parameters
----------
ir : integer
ray index
Returns
-------
isl : slabs number
"""
raypos = np.nonzero(self[self._ray2nbi[ir]]['rayidx'] == ir)[0]
return(self[self._ray2nbi[ir]]['sig'][0,1:-1,raypos[0]])
def vis(self,ir,L):
typ = ['Tx'] + self.typ(ir) + ['Rx']
slab_nb = self.slab_nb(ir)
slab_nb = np.insert(slab_nb,0,0)
slab_nb = np.insert(slab_nb,len(slab_nb),0)
nbi = self._ray2nbi[ir]
raypos = np.nonzero(self[nbi]['rayidx'] == ir)[0]
pt = self[nbi]['pt'][:,:,raypos]
tz = pt[2].ravel()
slab = [ L.Gs.node[x]['name'] for x in slab_nb if x > 0]
st = ''
for t in typ:
st = st + t+' '
print(st)
st = ''
for s in slab_nb:
st = st + str(s)+' '
print(st)
st = ''
for z in tz:
st = st + str(z)+' '
print(st)
print(slab)
def typ(self, ir,fromR=True):
""" returns interactions list type of a given ray
Parameters
----------
ir : integer
ray index
fromR : bool
True : get information from signature in R
False: get information in R.I
"""
#
# In this function we can see that the ceil and floor
# are hard coded as reflection. This is going to evolve
# for implementation of multi floor
#
if fromR:
di = {0:'L',1:'D',2:'R',3:'T',4:'R',5:'R'}
nbi = self._ray2nbi[ir]
raypos = np.nonzero(self[nbi]['rayidx'] == ir)[0]
inter = self[nbi]['sig'][1,1:-1,raypos][0]
return [di[i] for i in inter]
else:
a = self.ray(r)
return(self.I.typ[a])
def dump(self,ir,L,ifGHz=0,filename='dumpray.ray'):
""" dump the full information of a ray in a file
"""
nbi = self._ray2nbi[ir]
ur = np.where(self[nbi]['rayidx']==ir)[0][0]
fd=open(filename,'w')
fd.write('ray #'+str(ir)+'\n')
fd.write(str(ur)+ ' th ray from the group of ' + str(nbi)+' Interactions' +'\n')
cy_a = L.pt2cy(self.pTx)
cy_b = L.pt2cy(self.pRx)
#fd.write('Tx #'+str(self.pTx)+'\n')
#fd.write('Rx #'+str(self.pRx)+'\n')
if self.evaluated:
ray = self.ray(ir)
typ = self.typ(ir)
slabnb = self.slab_nb(ir)
fd.write(' ray #'+str(ray)+'\n')
#fd.write(' typ #'+str(typ)+'\n')
fd.write(' slab #'+str(slabnb)+'\n')
for k in range(nbi+2):
if k==0:
fd.write('Tx : ')
elif k==(nbi+1):
fd.write('Rx : ')
else:
six = slabnb[k-1]
if six==0:
slabname='FLOOR'
cyc =[-2,-3]
else:
slabname = L.Gs.node[six]['name']
cyc = L.Gs.node[six]['ncycles']
if typ[k-1]=='T':
fd.write('T '+slabname +' ('+str(six)+','+str(cyc[0])+','+str(cyc[1])+')')
if typ[k-1]=='R':
fd.write('R '+slabname +' ('+str(six)+',)')
if typ[k-1]=='D':
fd.write('D ('+str(six)+') :')
fd.write(str(self[nbi]['pt'][:,k,ur])+'\n' )
if k==0:
fd.write(' '+str(cy_a)+'\n')
elif k==(nbi+1):
fd.write(' '+str(cy_b)+'\n')
if k==0:
for l in range(3):
if l<2:
fd.write('\t'+str(self[nbi]['Bo0'][l,:,ur])
+'\t'+str(self[nbi]['B'][l,:,0,ur])+'\n')
else:
fd.write('\t'+str(self[nbi]['Bo0'][l,:,ur]) +'\n')
elif k==(nbi+1):
for l in range(3):
fd.write('\t'+str(self[nbi]['BiN'][l,:,ur])+'\n')
else:
for l in range(3):
if l<2:
fd.write('\t'+str(self[nbi]['Bi'][l,:,k-1,ur])+'\t'+
str(self[nbi]['Bo'][l,:,k-1,ur])
+'\t'+str(self[nbi]['B'][l,:,k-1,ur])+'\n')
else:
fd.write('\t'+str(self[nbi]['Bi'][l,:,k-1,ur])+'\t'+
str(self[nbi]['Bo'][l,:,k-1,ur])+'\n')
fd.close()
def info(self,ir,ifGHz=0,bB=True,matrix=False):
""" provides information for a given ray r
Parameters
----------
ir : int
ray index
ifGHz : int
frequency index
bB: boolean
display Basis
matrix :
display matrix
"""
if self.evaluated:
print('-------------------------')
print('Informations of ray #', ir)
print('-------------------------\n')
ray = self.ray(ir)
typ = self.typ(ir)
slabnb = self.slab_nb(ir)
# if there is a diffraction, phi0, phi, beta are shown
if 'D' in typ:
diff =True
print('{0:5} , {1:4}, {2:10}, {3:7}, {4:7}, {5:10}, {6:10}, {7:4}, {8:4}, {9:4}'\
.format('Index',
'type',
'slab',
'nstr' ,
'th(rad)',
'alpha',
'gamma2',
'phi0',
'phi',
'beta'))
else :
diff =False
print('{0:5} , {1:4}, {2:10}, {3:7}, {4:7}, {5:10}, {6:10}'\
.format('Index',
'type',
'slab',
'nstr',
'th(rad)',
'alpha',
'gamma2'))
print('{0:5} , {1:4}, {2:10}, {3:7}, {4:7.2}, {5:10.2}, {6:10.2}'\
.format(ir, 'B0','-', '-', '-', '-', '-'))
for iidx, i in enumerate(typ):
# import ipdb
# ipdb.set_trace()
if i == 'T' or i == 'R' or i =='D':
I = getattr(self.I, i)
for slab in I.dusl.keys():
# print slab
midx = I.dusl[slab]
# print midx
Iidx = np.array((I.idx))[midx]
if i != 'D':
th = I.data[I.dusl[slab], 0]
gamma = I.gamma[midx]
alpha = I.alpha[midx]
else :
# from IPython.core.debugger import Tracer
# Tracer()()
th=['-']*max(max(Iidx),1)
gamma = ['NC']*max(max(Iidx),1)
alpha = ['NC']*max(max(Iidx),1)
udiff = np.where(self.I.D.idx==ray[iidx])[0]
phi0 = self.I.D.phi0[udiff][0]
phi=self.I.D.phi[udiff][0]
beta=self.I.D.beta[udiff][0]
for ii, Ii in enumerate(Iidx):
if Ii == ray[iidx]:
if i=='D':
print('{0:5} , {1:4}, {2:10}, {3:7}, {4:7.2}, {5:10}, {6:10}, {7:3.4}, {8:3.4}, {9:3.4}'\
.format(Ii, i, slab, slabnb[iidx], th[ii], alpha[ii], gamma[ii],phi0,phi,beta))
else:
print('{0:5} , {1:4}, {2:10}, {3:7}, {4:7.2}, {5:10.2}, {6:10.2}'\
.format(Ii, i, slab, slabnb[iidx], th[ii], alpha[ii], gamma[ii]))
else:
if bB:
print('{0:5} , {1:4}, {2:10}, {3:7}, {4:7.2}, {5:10.2}, {6:10.2}'.format(ray[iidx], 'B', '-', '-', '-', '-', '-'))
# print '{0:5} , {1:4}, {2:10}, {3:7}, {4:10}, {5:10}'.format(ray[iidx], i, '-', '-', '-', '-')
if matrix:
print('\n----------------------------------------')
print(' Matrix of ray #', ir, 'at f=', self.I.fGHz[ifGHz])
print('----------------------------------------')
lmat = []
ltran = []
if bB:
print('rotation matrix#', 'type: B0')
B0 = self.B0.data[ir,:,:]
addr = self.ir2a(ir)
Bo0 = self[addr[0]]['Bo0'][:,:,addr[1]]
Bi1 = self[addr[0]]['Bi'][:,:,0,addr[1]]
U = np.dot(Bi1.T,Bo0)
assert np.allclose(B0,U)
lmat.append(B0)
ltran.append(B0)
print(B0)
for iidx, i in enumerate(typ):
print('interaction #', ray[iidx], 'type:', i)
# f x l x 2 x 2
I = self.I.I[ifGHz, ray[iidx], :, :]
print(I)
lmat.append(I)
if bB:
print('rotation matrix#',[ray[iidx]], 'type: B')
B = self.B.data[ray[iidx], :, :]
print(B)
lmat.append(B)
ltran.append(B)
# evaluate matrix product
PM0=np.eye(3)
PM1=np.eye(3)
for m in lmat[::-1]:
PM0=np.dot(PM0,m)
for m in ltran[::-1]:
PM1=np.dot(PM1,m)
print("matrix product with interactions (dB)")
print(20*np.log10(np.abs(PM0[1,1])),' ',20*np.log10(np.abs(PM0[1,2])))
print(20*np.log10(np.abs(PM0[2,1])),' ',20*np.log10(np.abs(PM0[2,2])))
print("matrix product without interactions (dB)")
print(20*np.log10(np.abs(PM1[1,1])),' ',20*np.log10(np.abs(PM1[1,2])))
print(20*np.log10(np.abs(PM1[2,1])),' ',20*np.log10(np.abs(PM1[2,2])))
return(PM0)
else:
print('\nto display matrix, use matrix=True on call')
else:
print('Rays have not been evaluated yet')
def signature(self, u , typ='full'):
""" extract ray signature
Parameters
----------
u : tuple orr int
if tuple addr
if int index
Returns
-------
sig : ndarray
Notes
-----
Signature of a ray is store as a member
r[nint]['sig']
"""
if type(u)==tuple:
addr = u
else:
addr = self.ir2a(u)
if typ=='full':
sig = self[addr[0]]['sig'][:,:,addr[1]]
else:
pass
return(sig)
def show3d(self,
ray,
bdis=True,
bbas=False,
bstruc=True,
col=np.array([1, 0, 1]),
id=0,
linewidth=1):
""" plot a set of 3D rays
Parameters
----------
ray :
block : int
interaction block
bdis : Boolean
if False return .vect filename (True)
bbas : Boolean
display local basis (False)
bstruc : Boolean
display structure (True)
col : ndarray() 1x3
color of the ray ([1,0,1])
id : Integer
id of the ray (default 0)
linewidth : Integer
default 1
"""
filerac = pyu.getlong("ray" + str(id), pstruc['DIRGEOM'])
_filerac = pyu.getshort(filerac)
filename_list = filerac + '.list'
filename_vect = filerac + '.vect'
try:
fo = open(filename_vect, "w")
except:
raise NameError(filename)
fo.write("appearance { linewidth %d }\n" % linewidth)
fo.write("VECT\n")
fo.write("1 %d 1\n\n" % len(ray[0, :]))
fo.write("%d\n" % len(ray[0, :]))
fo.write("1\n")
for i in range(len(ray[0, :])):
fo.write("%g %g %g\n" % (ray[0, i], ray[1, i],
ray[2, i]))
# fo.write("%d %d %d 0\n" % (col[0],col[1],col[2]))
fo.write("%g %g %g 0\n" % (col[0], col[1], col[2]))
fo.close()
#
# Ajout des bases locales
#
fo = open(filename_list, "w")
fo.write("LIST\n")
fo.write("{<" + filename_vect + "}\n")
if (bstruc):
# fo.write("{<strucTxRx.off}\n")
fo.write("{<" + _filestr + ".off}\n")
filename = filename_list
fo.close()
if (bdis):
#
# Geomview Visualisation
#
chaine = "geomview -nopanel -b 1 1 1 " + filename + \
" 2>/dev/null &"
os.system(chaine)
else:
return(filename)
def _show3(self,L=[],rlist=[],newfig=False,cmap='hot',**kwargs):
""" plot 3D rays in environment using Mayavi
Parameters
----------
L : Layout object
Layout to be displayed
rlist : list
list of index rays
newfig : boolean (default: False)
if true create a new mayavi figure
else : use the current
ER: Ray energy
"""
if newfig:
mlab.clf()
f = mlab.figure(bgcolor=(1, 1, 1), fgcolor=(0, 0, 0))
else :
f = mlab.gcf()
# view=mlab.view()
if L != []:
try:
L._filename
except:
raise NameError('L argument must be a layout object')
L._show3()
if 'ER' in kwargs:
ER = kwargs['ER']
color_range = np.linspace( 0, 1., len(ER))#np.linspace( 0, np.pi, len(ER))
uER = ER.argsort()[::-1]
colors= color_range[uER]
if rlist ==[]:
nbi = self.keys()
for i in nbi:
r = range(np.shape(self[i]['pt'])[2])
ridx = self[i]['rayidx']
# number of rays
nbr = len(r)
# current number of interactions
cnbi = i + 2
# import ipdb
# ipdb.set_trace()
pt = self[i]['pt'][:,:,r].reshape(3,cnbi*nbr,order='F')
l0 = np.array([np.arange(0,cnbi-1)+k*cnbi for k in range(nbr)]).ravel()
l1 = l0+1
connection = np.vstack((l0,l1)).T
if 'ER' in kwargs:
rc = np.repeat(colors[ridx],cnbi)
rc[::cnbi]=0
src = mlab.pipeline.scalar_scatter(pt[0,:], pt[1,:], pt[2,:],rc,colormap=cmap)
else:
src = mlab.pipeline.scalar_scatter(pt[0,:], pt[1,:], pt[2,:])
src.mlab_source.dataset.lines=connection
src.update()
lines = mlab.pipeline.stripper(src)
mlab.pipeline.surface(lines,opacity=0.5,colormap=cmap)
f.children[-1].name='Rays with ' + str(i) + 'interactions'
else :
nbi = self._ray2nbi[rlist]
nr = np.array((nbi,rlist))
unb = np.unique(nr[0,:])
unr = {int(i):np.where(nr[0,:]==i)[0] for i in unb}
for i in unb:
raynb = (nr[1,unr[i]]).astype(int)
nbr=len(raynb)
ptidx = [np.where(self[i]['rayidx']==x)[0][0] for x in raynb]
# current number of interactions
cnbi = i + 2
pt = self[i]['pt'][:,:,ptidx].reshape(3,cnbi*nbr,order='F')
# lines = np.arange(cnbi*nbr).reshape(cnbi,nbr)
lines = np.arange(cnbi*nbr).reshape(nbr,cnbi)
# mesh = tvtk.PolyData(points=pt.T, polys=lines)
mesh = tvtk.PolyData(points=pt.T, polys=lines)
mlab.pipeline.surface(mlab.pipeline.extract_edges(mesh),
color=(0, 0, 0), )
f.children[-1].name='Rays with ' + str(int(i)) + 'interactions'
# mlab.view(view[0],view[1],view[2],view[3])
return(f)
def show3(self,
L=[],
bdis=True,
bstruc=True,
bbasi = False,
bbaso = False,
id=0,
ilist=[],
raylist=[],centered=True):
""" plot 3D rays within the simulated environment
Parameters
----------
bdis : boolean
True
bstruc : boolean
True
bbasi : boolean
display input basis of each interaction of rays
bbaso : boolean
display ouput basis of each interaction of rays
id : int
L : Layout object
Layout to be displayed
ilist : list of group of interactions
raylist : list of index rays
centered : boolean
if True center the layout before display
"""
try:
L._filename
except:
raise NameError('L argument must be a layout object')
if not centered:
pg=np.array([[0],[0],[0]])
strucname= L._filename.split('.')[0]
pg = L.geomfile(centered=centered)
pg = np.hstack((pg,0.)).reshape(3,1)
if ilist == []:
ilist = self.keys()
pTx = self.pTx.reshape((3, 1))-pg
pRx = self.pRx.reshape((3, 1))-pg
filename = pyu.getlong("grRay" + str(id) + ".list", pstruc['DIRGEOM'])
fo = open(filename, "w")
fo.write("LIST\n")
if bstruc:
fo.write("{<"+strucname+".off}\n")
if bbasi:
if not self.isbased:
raise NameError('Bases have not been computed (self.locbas(Layout)')
else:
base_listi = geu.Geomlist('baselisti',clear=True)
base_listi.append("LIST\n")
if bbaso:
if not self.isbased:
raise NameError('Bases have not been computed (self.locbas(Layout)')
else:
base_listo = geu.Geomlist('baselisto',clear=True)
base_listo.append("LIST\n")
# fo.write("{<strucTxRx.off}\n")
k = 0
for i in ilist:
if raylist == []:
rlist = range(np.shape(self[i]['pt'])[2])
else:
rlist = raylist
for j in rlist:
ray = np.hstack((pTx,np.hstack((self[i]['pt'][:, :, j]-pg, pRx))))
# ray = rays[i]['pt'][:,:,j]
col = np.array([0, 0, 0])
# print ray
fileray = self.show3d(ray=ray, bdis=False,
bstruc=False, col=col, id=k)
k += 1
fo.write("{< " + fileray + " }\n")
if bbasi:
for inter in range(i):
filebi = 'bi_' + str(j) + '_' + str(i) + '_' +str(inter)
basi = geu.GeomVect(filebi)
basi.geomBase(self[i]['Bi'][:,:,inter,j],pt=self[i]['pt'][:,inter+1,j]-pg[:,0])
base_listi.append("{<" + filebi +'.vect' "}\n")
filebi = 'bi_' + str(j) + '_' + str(i) + '_' +str(inter-1)
basi = geu.GeomVect(filebi)
basi.geomBase(self[i]['BiN'][:,:,j],pt=self[i]['pt'][:,-1,j]-pg[:,0])
base_listi.append("{<" + filebi +'.vect' "}\n")
if bbaso:
for inter in range(i):
filebo = 'bo_' + str(j) + '_' + str(i) + '_' +str(inter)
baso = geu.GeomVect(filebo)
baso.geomBase(self[i]['Bo'][:,:,inter,j],pt=self[i]['pt'][:,inter+1,j]-pg[:,0])
base_listo.append("{<" + filebo +'.vect' "}\n")
filebo = 'bo_' + str(j) + '_' + str(i) + '_' +str(inter+1)
baso = geu.GeomVect(filebo)
baso.geomBase(self[i]['Bo0'][:,:,j],pt=self[i]['pt'][:,0,j]-pg[:,0])
base_listo.append("{<" + filebo +'.vect' "}\n")
if bbasi:
fo.write("{< " + "baselisti.list}\n")
if bbaso:
fo.write("{< " + "baselisto.list}\n")
fo.close()
if (bdis):
chaine = "geomview " + filename + " 2>/dev/null &"
os.system(chaine)
else:
return(filename)
if __name__ == "__main__":
doctest.testmod()
|
pylayers/pylayers
|
pylayers/antprop/rays.py
|
Python
|
mit
| 132,523
|
[
"Mayavi"
] |
cac3cafe33ef01707109df174c7d6c73a6f608db985cae225d8258e6e2837c64
|
"""
Implementation of Harwell-Boeing read/write.
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
"""
# TODO:
# - Add more support (symmetric/complex matrices, non-assembled matrices ?)
# XXX: reading is reasonably efficient (>= 85 % is in numpy.fromstring), but
# takes a lot of memory. Being faster would require compiled code.
# write is not efficient. Although not a terribly exciting task,
# having reusable facilities to efficiently read/write fortran-formatted files
# would be useful outside this module.
import warnings
import numpy as np
from scipy.sparse import csc_matrix
from scipy.io.harwell_boeing._fortran_format_parser import \
FortranFormatParser, IntFormat, ExpFormat
__all__ = ["MalformedHeader", "hb_read", "hb_write", "HBInfo", "HBFile",
"HBMatrixType"]
class MalformedHeader(Exception):
pass
class LineOverflow(Warning):
pass
def _nbytes_full(fmt, nlines):
"""Return the number of bytes to read to get every full lines for the
given parsed fortran format."""
return (fmt.repeat * fmt.width + 1) * (nlines - 1)
class HBInfo(object):
@classmethod
def from_data(cls, m, title="Default title", key="0", mxtype=None, fmt=None):
"""Create a HBInfo instance from an existing sparse matrix.
Parameters
----------
m : sparse matrix
the HBInfo instance will derive its parameters from m
title : str
Title to put in the HB header
key : str
Key
mxtype : HBMatrixType
type of the input matrix
fmt : dict
not implemented
Returns
-------
hb_info : HBInfo instance
"""
m = m.tocsc(copy=False)
pointer = m.indptr
indices = m.indices
values = m.data
nrows, ncols = m.shape
nnon_zeros = m.nnz
if fmt is None:
# +1 because HB use one-based indexing (Fortran), and we will write
# the indices /pointer as such
pointer_fmt = IntFormat.from_number(np.max(pointer+1))
indices_fmt = IntFormat.from_number(np.max(indices+1))
if values.dtype.kind in np.typecodes["AllFloat"]:
values_fmt = ExpFormat.from_number(-np.max(np.abs(values)))
elif values.dtype.kind in np.typecodes["AllInteger"]:
values_fmt = IntFormat.from_number(-np.max(np.abs(values)))
else:
raise NotImplementedError("type %s not implemented yet" % values.dtype.kind)
else:
raise NotImplementedError("fmt argument not supported yet.")
if mxtype is None:
if not np.isrealobj(values):
raise ValueError("Complex values not supported yet")
if values.dtype.kind in np.typecodes["AllInteger"]:
tp = "integer"
elif values.dtype.kind in np.typecodes["AllFloat"]:
tp = "real"
else:
raise NotImplementedError("type %s for values not implemented"
% values.dtype)
mxtype = HBMatrixType(tp, "unsymmetric", "assembled")
else:
raise ValueError("mxtype argument not handled yet.")
def _nlines(fmt, size):
nlines = size // fmt.repeat
if nlines * fmt.repeat != size:
nlines += 1
return nlines
pointer_nlines = _nlines(pointer_fmt, pointer.size)
indices_nlines = _nlines(indices_fmt, indices.size)
values_nlines = _nlines(values_fmt, values.size)
total_nlines = pointer_nlines + indices_nlines + values_nlines
return cls(title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
pointer_fmt.fortran_format, indices_fmt.fortran_format,
values_fmt.fortran_format)
@classmethod
def from_file(cls, fid):
"""Create a HBInfo instance from a file object containing a matrix in the
HB format.
Parameters
----------
fid : file-like matrix
File or file-like object containing a matrix in the HB format.
Returns
-------
hb_info : HBInfo instance
"""
# First line
line = fid.readline().strip("\n")
if not len(line) > 72:
raise ValueError("Expected at least 72 characters for first line, "
"got: \n%s" % line)
title = line[:72]
key = line[72:]
# Second line
line = fid.readline().strip("\n")
if not len(line.rstrip()) >= 56:
raise ValueError("Expected at least 56 characters for second line, "
"got: \n%s" % line)
total_nlines = _expect_int(line[:14])
pointer_nlines = _expect_int(line[14:28])
indices_nlines = _expect_int(line[28:42])
values_nlines = _expect_int(line[42:56])
rhs_nlines = line[56:72].strip()
if rhs_nlines == '':
rhs_nlines = 0
else:
rhs_nlines = _expect_int(rhs_nlines)
if not rhs_nlines == 0:
raise ValueError("Only files without right hand side supported for "
"now.")
# Third line
line = fid.readline().strip("\n")
if not len(line) >= 70:
raise ValueError("Expected at least 72 character for third line, got:\n"
"%s" % line)
mxtype_s = line[:3].upper()
if not len(mxtype_s) == 3:
raise ValueError("mxtype expected to be 3 characters long")
mxtype = HBMatrixType.from_fortran(mxtype_s)
if mxtype.value_type not in ["real", "integer"]:
raise ValueError("Only real or integer matrices supported for "
"now (detected %s)" % mxtype)
if not mxtype.structure == "unsymmetric":
raise ValueError("Only unsymmetric matrices supported for "
"now (detected %s)" % mxtype)
if not mxtype.storage == "assembled":
raise ValueError("Only assembled matrices supported for now")
if not line[3:14] == " " * 11:
raise ValueError("Malformed data for third line: %s" % line)
nrows = _expect_int(line[14:28])
ncols = _expect_int(line[28:42])
nnon_zeros = _expect_int(line[42:56])
nelementals = _expect_int(line[56:70])
if not nelementals == 0:
raise ValueError("Unexpected value %d for nltvl (last entry of line 3)"
% nelementals)
# Fourth line
line = fid.readline().strip("\n")
ct = line.split()
if not len(ct) == 3:
raise ValueError("Expected 3 formats, got %s" % ct)
return cls(title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
ct[0], ct[1], ct[2],
rhs_nlines, nelementals)
def __init__(self, title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
pointer_format_str, indices_format_str, values_format_str,
right_hand_sides_nlines=0, nelementals=0):
"""Do not use this directly, but the class ctrs (from_* functions)."""
self.title = title
self.key = key
if title is None:
title = "No Title"
if len(title) > 72:
raise ValueError("title cannot be > 72 characters")
if key is None:
key = "|No Key"
if len(key) > 8:
warnings.warn("key is > 8 characters (key is %s)" % key, LineOverflow)
self.total_nlines = total_nlines
self.pointer_nlines = pointer_nlines
self.indices_nlines = indices_nlines
self.values_nlines = values_nlines
parser = FortranFormatParser()
pointer_format = parser.parse(pointer_format_str)
if not isinstance(pointer_format, IntFormat):
raise ValueError("Expected int format for pointer format, got %s"
% pointer_format)
indices_format = parser.parse(indices_format_str)
if not isinstance(indices_format, IntFormat):
raise ValueError("Expected int format for indices format, got %s" %
indices_format)
values_format = parser.parse(values_format_str)
if isinstance(values_format, ExpFormat):
if mxtype.value_type not in ["real", "complex"]:
raise ValueError("Inconsistency between matrix type %s and "
"value type %s" % (mxtype, values_format))
values_dtype = np.float64
elif isinstance(values_format, IntFormat):
if mxtype.value_type not in ["integer"]:
raise ValueError("Inconsistency between matrix type %s and "
"value type %s" % (mxtype, values_format))
# XXX: fortran int -> dtype association ?
values_dtype = int
else:
raise ValueError("Unsupported format for values %r" % (values_format,))
self.pointer_format = pointer_format
self.indices_format = indices_format
self.values_format = values_format
self.pointer_dtype = np.int32
self.indices_dtype = np.int32
self.values_dtype = values_dtype
self.pointer_nlines = pointer_nlines
self.pointer_nbytes_full = _nbytes_full(pointer_format, pointer_nlines)
self.indices_nlines = indices_nlines
self.indices_nbytes_full = _nbytes_full(indices_format, indices_nlines)
self.values_nlines = values_nlines
self.values_nbytes_full = _nbytes_full(values_format, values_nlines)
self.nrows = nrows
self.ncols = ncols
self.nnon_zeros = nnon_zeros
self.nelementals = nelementals
self.mxtype = mxtype
def dump(self):
"""Gives the header corresponding to this instance as a string."""
header = [self.title.ljust(72) + self.key.ljust(8)]
header.append("%14d%14d%14d%14d" %
(self.total_nlines, self.pointer_nlines,
self.indices_nlines, self.values_nlines))
header.append("%14s%14d%14d%14d%14d" %
(self.mxtype.fortran_format.ljust(14), self.nrows,
self.ncols, self.nnon_zeros, 0))
pffmt = self.pointer_format.fortran_format
iffmt = self.indices_format.fortran_format
vffmt = self.values_format.fortran_format
header.append("%16s%16s%20s" %
(pffmt.ljust(16), iffmt.ljust(16), vffmt.ljust(20)))
return "\n".join(header)
def _expect_int(value, msg=None):
try:
return int(value)
except ValueError:
if msg is None:
msg = "Expected an int, got %s"
raise ValueError(msg % value)
def _read_hb_data(content, header):
# XXX: look at a way to reduce memory here (big string creation)
ptr_string = "".join([content.read(header.pointer_nbytes_full),
content.readline()])
ptr = np.fromstring(ptr_string,
dtype=int, sep=' ')
ind_string = "".join([content.read(header.indices_nbytes_full),
content.readline()])
ind = np.fromstring(ind_string,
dtype=int, sep=' ')
val_string = "".join([content.read(header.values_nbytes_full),
content.readline()])
val = np.fromstring(val_string,
dtype=header.values_dtype, sep=' ')
try:
return csc_matrix((val, ind-1, ptr-1),
shape=(header.nrows, header.ncols))
except ValueError as e:
raise e
def _write_data(m, fid, header):
m = m.tocsc(copy=False)
def write_array(f, ar, nlines, fmt):
# ar_nlines is the number of full lines, n is the number of items per
# line, ffmt the fortran format
pyfmt = fmt.python_format
pyfmt_full = pyfmt * fmt.repeat
# for each array to write, we first write the full lines, and special
# case for partial line
full = ar[:(nlines - 1) * fmt.repeat]
for row in full.reshape((nlines-1, fmt.repeat)):
f.write(pyfmt_full % tuple(row) + "\n")
nremain = ar.size - full.size
if nremain > 0:
f.write((pyfmt * nremain) % tuple(ar[ar.size - nremain:]) + "\n")
fid.write(header.dump())
fid.write("\n")
# +1 is for Fortran one-based indexing
write_array(fid, m.indptr+1, header.pointer_nlines,
header.pointer_format)
write_array(fid, m.indices+1, header.indices_nlines,
header.indices_format)
write_array(fid, m.data, header.values_nlines,
header.values_format)
class HBMatrixType(object):
"""Class to hold the matrix type."""
# q2f* translates qualified names to Fortran character
_q2f_type = {
"real": "R",
"complex": "C",
"pattern": "P",
"integer": "I",
}
_q2f_structure = {
"symmetric": "S",
"unsymmetric": "U",
"hermitian": "H",
"skewsymmetric": "Z",
"rectangular": "R"
}
_q2f_storage = {
"assembled": "A",
"elemental": "E",
}
_f2q_type = dict([(j, i) for i, j in _q2f_type.items()])
_f2q_structure = dict([(j, i) for i, j in _q2f_structure.items()])
_f2q_storage = dict([(j, i) for i, j in _q2f_storage.items()])
@classmethod
def from_fortran(cls, fmt):
if not len(fmt) == 3:
raise ValueError("Fortran format for matrix type should be 3 "
"characters long")
try:
value_type = cls._f2q_type[fmt[0]]
structure = cls._f2q_structure[fmt[1]]
storage = cls._f2q_storage[fmt[2]]
return cls(value_type, structure, storage)
except KeyError:
raise ValueError("Unrecognized format %s" % fmt)
def __init__(self, value_type, structure, storage="assembled"):
self.value_type = value_type
self.structure = structure
self.storage = storage
if value_type not in self._q2f_type:
raise ValueError("Unrecognized type %s" % value_type)
if structure not in self._q2f_structure:
raise ValueError("Unrecognized structure %s" % structure)
if storage not in self._q2f_storage:
raise ValueError("Unrecognized storage %s" % storage)
@property
def fortran_format(self):
return self._q2f_type[self.value_type] + \
self._q2f_structure[self.structure] + \
self._q2f_storage[self.storage]
def __repr__(self):
return "HBMatrixType(%s, %s, %s)" % \
(self.value_type, self.structure, self.storage)
class HBFile(object):
def __init__(self, file, hb_info=None):
"""Create a HBFile instance.
Parameters
----------
file : file-object
StringIO work as well
hb_info : HBInfo, optional
Should be given as an argument for writing, in which case the file
should be writable.
"""
self._fid = file
if hb_info is None:
self._hb_info = HBInfo.from_file(file)
else:
#raise IOError("file %s is not writable, and hb_info "
# "was given." % file)
self._hb_info = hb_info
@property
def title(self):
return self._hb_info.title
@property
def key(self):
return self._hb_info.key
@property
def type(self):
return self._hb_info.mxtype.value_type
@property
def structure(self):
return self._hb_info.mxtype.structure
@property
def storage(self):
return self._hb_info.mxtype.storage
def read_matrix(self):
return _read_hb_data(self._fid, self._hb_info)
def write_matrix(self, m):
return _write_data(m, self._fid, self._hb_info)
def hb_read(path_or_open_file):
"""Read HB-format file.
Parameters
----------
path_or_open_file : path-like or file-like
If a file-like object, it is used as-is. Otherwise, it is opened
before reading.
Returns
-------
data : scipy.sparse.csc_matrix instance
The data read from the HB file as a sparse matrix.
Notes
-----
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
Examples
--------
We can read and write a harwell-boeing format file:
>>> from scipy.io.harwell_boeing import hb_read, hb_write
>>> from scipy.sparse import csr_matrix, eye
>>> data = csr_matrix(eye(3)) # create a sparse matrix
>>> hb_write("data.hb", data) # write a hb file
>>> print(hb_read("data.hb")) # read a hb file
(0, 0) 1.0
(1, 1) 1.0
(2, 2) 1.0
"""
def _get_matrix(fid):
hb = HBFile(fid)
return hb.read_matrix()
if hasattr(path_or_open_file, 'read'):
return _get_matrix(path_or_open_file)
else:
with open(path_or_open_file) as f:
return _get_matrix(f)
def hb_write(path_or_open_file, m, hb_info=None):
"""Write HB-format file.
Parameters
----------
path_or_open_file : path-like or file-like
If a file-like object, it is used as-is. Otherwise, it is opened
before writing.
m : sparse-matrix
the sparse matrix to write
hb_info : HBInfo
contains the meta-data for write
Returns
-------
None
Notes
-----
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
Examples
--------
We can read and write a harwell-boeing format file:
>>> from scipy.io.harwell_boeing import hb_read, hb_write
>>> from scipy.sparse import csr_matrix, eye
>>> data = csr_matrix(eye(3)) # create a sparse matrix
>>> hb_write("data.hb", data) # write a hb file
>>> print(hb_read("data.hb")) # read a hb file
(0, 0) 1.0
(1, 1) 1.0
(2, 2) 1.0
"""
m = m.tocsc(copy=False)
if hb_info is None:
hb_info = HBInfo.from_data(m)
def _set_matrix(fid):
hb = HBFile(fid, hb_info)
return hb.write_matrix(m)
if hasattr(path_or_open_file, 'write'):
return _set_matrix(path_or_open_file)
else:
with open(path_or_open_file, 'w') as f:
return _set_matrix(f)
|
pizzathief/scipy
|
scipy/io/harwell_boeing/hb.py
|
Python
|
bsd-3-clause
| 19,230
|
[
"exciting"
] |
b60b79e9f5929314d15ddd120b67f19b56fccda43a91b7504012f538a1ffd466
|
__all__ = ['Neuron', 'Simulation', 'Integrator', 'Stimulus',
'plotting_tools', 'pretty_plotting',
'tuning_analysis', 'data_extractor']
|
miladh/lgn-simulator
|
tools/analysis/__init__.py
|
Python
|
gpl-3.0
| 157
|
[
"NEURON"
] |
226e7223a6bf1383e04ba753ebd1838c23575519fd37e22caa6df751873c4862
|
"""
OLD -- this module has not been fully modernized, and most likely will not
import.
forcing.py: construct forcing (and eventually initialization?) for suntans runs.
the total process is something like this:
1) Write a forcing_conf.py file that goes with a specific grid
This file constructs groups of edges that will be forced and the datasets
that are used to force them. Only need the original points/edges/cells.dat,
not the partitioning. This description is built up using classes from this
file.
2) For a particular partitioning, run forcing_conf.py partition to dice up the forcing
information and write boundaries.dat.nnn files for each partition.
done.
"""
from __future__ import print_function
import sys, os, glob
import logging
import netCDF4
from . import sunreader
from . import timeseries
from ...grid import trigrid
from ... import filters as lp_filter
from ...spatial import wkb2shp
try:
if netCDF4.__version__ >= '1.2.6':
# kludge:
# recent netCDF4 isn't compatible with cfunits due to renaming
# some datetime internals, which cfunits tries to reach in and grab.
# monkey patch in shame.
nct = netCDF4.netcdftime.netcdftime = netCDF4.netcdftime
nct._DateFromNoLeapDay = nct.DatetimeNoLeap
nct._DateFromAllLeap = nct.DatetimeAllLeap
nct._DateFrom360Day = nct.Datetime360Day
from cfunits.units import Units
except ImportError:
Units=None
try:
import qnc
except ImportError:
qnc=None
from ... import (tide_consts, utils)
# cache_handler relies on some pieces which are no longer easily accessible.
# from cache_handler import urlopen_delay as urlopen
from copy import deepcopy
import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import norm
from scipy.interpolate import interp1d
import datetime
from shapely import geometry
forcing_dir= os.path.join( os.environ['HOME'], "models/forcing")
try:
from shapely.prepared import prep as geom_prep
except ImportError:
geom_prep = None
hydro_forcings = ['FORCE_H','FORCE_U', 'FORCE_U_VECTOR', 'FORCE_Q','FORCE_SURFACE_Q']
class GlobalForcing(object):
def __init__(self,datadir=None,sun=None,proc=None):
self.log=logging.getLogger(self.__class__.__name__)
if sun:
self.sun = sun
else:
if datadir is None:
datadir='.'
self.sun = sunreader.SunReader(datadir)
# if this is None, we're looking at forcing for the whole domain
# otherwise, it's forcing after domain decomposition
self.proc = proc
self.forcing_groups = []
self.datasources = []
# some functios to make it look more like the old forcing class
def has_forced_edges(self):
""" true if there are actual forced edges or cells """
return len(self.forcing_groups) > 0
def uses_gages(self):
""" kludge: guess if it uses gages based on whether the datasources
are timeseries or harmonics
"""
for ds in self.datasources:
if isinstance(ds,Timeseries):
return True
return False
def uses_predictions(self):
""" kludge: guess if it uses harmonics based on whether the datasources
are timeseries or harmonics
"""
for ds in self.datasources:
if isinstance(ds,Harmonics):
return True
return False
def new_group(self,**kwargs):
""" create an edge group, add it to the GlobalForcing, and return the object
"""
self.forcing_groups.append( ForcingGroup(self,**kwargs) )
return self.forcing_groups[-1]
def add_groups_bulk(self,defs=None,shp=None,bc_type_field='bc_type'):
""" defs: array as returned by wkb2shp.shp2geom
has a geom field which will be used to match against edges/cells.
if there is a field matching bc_type_field, it will be used to
discern the type of forcing, and specifically whether to identify
cells or edges.
"""
if defs is None:
assert shp is not None
defs=wkb2shp.shp2geom(shp)
if bc_type_field in defs.dtype.names:
bc_types=defs[bc_type_field]
else:
# default to edges
bc_types=['BOUNDARY']*len(defs)
groups=[None]*len(defs)
warn_on_fake=True
for feat_id in range(len(defs)):
fields={ fld:defs[fld][feat_id]
for fld in defs.dtype.names }
geo = fields['geom']
points = np.array(geo)
typ=bc_types[feat_id]
if typ=='BED':
typ='SURFACE'
if warn_on_fake:
self.log.debug("Will fake the bed source with a surface source")
warn_on_fake=False
# Define the forcing group:
if typ in ['SURFACE','BED']:
grp = self.new_group( nearest_cell=points[0] )
else:
if len(points) == 1:
grp = self.new_group( nearest=points[0] )
else:
# just use endpoints to define an arc along the boundary.
grp = self.new_group( points=[points[0],
points[-1]] )
groups[feat_id]=grp
return groups
def add_datasource(self,ds):
""" returns the index of the datasource
"""
if ds not in self.datasources:
self.datasources.append( ds )
ds.filename ='%04d'%self.datasources.index(ds)
# UPDATE: go ahead and try moving as much of the downloading
# and time-consuming work to right before the data is written
# out. This should help avoid unnecessary processing when
# all we need to know is which edges are being forced.
# OLD NOTES:
# this could also be moved to right before writing out config files,
# so that information from other datasources or which forcing_group
# a datasource is tied to could be utilized.
# the downside is that we might want to call plot() or similar before
# or instead of writing a config file.
# Also, with the new Kriging code, ds.prepare is where the Kriging class
# adds its subsources into the global forcing.
ds.prepare(self)
return self.datasources.index(ds)
def write_fs_initial_dat(self):
print("Attempting to figure out a good initial freesurface")
best_ds=None
best_n_edges=0
for fg in self.forcing_groups:
for bc_type,ds in fg.datasources:
if bc_type == 'FORCE_H':
n_edges = fg.Nedges()
if n_edges > best_n_edges:
best_ds = ds
best_n_edges = n_edges
if best_n_edges > 0:
print("Found a good datasource for getting the freesurface")
# evaluate the datasource at the desired time,
start = self.sun.time_zero()
start_val = self.datasources[best_ds].calc_values( date2num(start) )
fp = open( os.path.join(self.sun.datadir,'fs_initial.dat'), 'wt')
fp.write("%8f\n"%start_val)
fp.close()
def write_boundaries_dat(self):
""" write boundaries.dat.* for each processor
if possible, also write an fs_initial.dat file with a reasonable choice
of initial freesurface elevation.
"""
# The trick here is to map global edges to local edges
# luckily the points don't change
self.write_fs_initial_dat()
# Write out the datasources first, in their own directory
ds_subdir = 'datasources'
datasource_dir = os.path.join( self.sun.datadir,ds_subdir)
if not os.path.exists(datasource_dir):
os.mkdir(datasource_dir)
for i,d in enumerate(self.datasources):
## d.filename is now populated at time of creation to ease referencing from one datasource
## to another
ds_path = os.path.join( self.sun.datadir, ds_subdir, d.filename )
fp = open( ds_path, 'wt')
d.write_config(fp,self.sun)
fp.close()
for proc in range(self.sun.num_processors()):
print("Writing boundary data for processors %d"%proc)
self.write_boundaries_dat_proc(proc)
def write_boundaries_dat_proc(self,proc):
#print "loading global grid"
g = self.sun.grid()
#print "loading per-proc grid"
gproc = self.sun.grid(proc)
fp = open(self.sun.file_path('BoundaryInput',proc),'wt')
fp.write("BOUNDARY_FORCING 6\n")
# for now, each processor gets all of the data sections
#print "Mapping groups"
# first cycle through to figure out which groups have edges on this processor
mapped_groups = [fg.map_to_grid(g,gproc) for fg in self.forcing_groups]
mapped_groups = [fg for fg in mapped_groups if fg.nonempty()]
# Split those into edge-based and cell-based:
edge_based = [fg for fg in mapped_groups if fg.edge_based()]
cell_based = [fg for fg in mapped_groups if fg.cell_based()]
fp.write("ITEMLIST_COUNT %d\n"%(len(edge_based)+len(cell_based)))
## Write Edge based
for fg in edge_based:
fg.write_config(fp,self.sun)
## Write Cell based
all_cells = []
for fg in cell_based:
fg.write_config(fp,self.sun)
if fg.cells is not "all":
all_cells.append( fg.cells )
if len(all_cells)>0:
all_cells = concatenate(all_cells)
if len(all_cells) > len(unique(all_cells)):
print("All cells for proc %d: %s"%(proc,all_cells))
print("Looks like there are duplicates, for which we are not prepared!")
raise Exception("Duplicate cells in forcing")
fp.close()
def update_grid(self,target_path=None):
""" Given the forcing groups defined, rewrite edges.dat, adjusting
edge markers as necessary.
Currently this only operates on the global grid, so if anything
changes, you will have to repartition
target_path: if specified, write the new grid here, instead of overwriting the
old one.
Returns 1 if the grid changed and needs to be repartitioned, 0 otherwise
"""
g = self.sun.grid()
# keep track of who should be marked
marked = []
changed = 0
for fg in self.forcing_groups:
edges = fg.edges
if edges is None or len(edges)==0:
continue
if fg.hydro_bctype() in ["FORCE_H"]:
marker = 3
elif fg.hydro_bctype() in ["FORCE_U","FORCE_U_VECTOR","FORCE_Q"]:
marker = 2
else:
print("update_grid: no hydrodynamic forcing found")
continue
if len(edges) == 0:
print("WARNING: forcing group %s has no edges"%fg)
continue
marked.append(edges)
print("update_grid: edges is ",edges)
if any( g.edges[edges,2] != marker ):
print("Writing in new markers=%d"%marker)
changed = 1
g.edges[edges,2] = marker
# the ones we expect to have >1 markers
if len(marked) > 0:
marked = concatenate( marked )
else:
marked = array([],int32)
extras = setdiff1d( where(g.edges[:,2] > 1)[0], marked )
if len(extras)>0:
print("There were extra markers in places - they will be set to closed=1")
g.edges[extras,2] = 1
changed = 1
if changed or target_path is not None:
if target_path is None:
target_path = self.sun.datadir
print("There were changes - writing out new global grid")
g.write_suntans(target_path)
print("Reloading grid")
self.sun = sunreader.SunReader(target_path)
print("You will need to repartition the grid!")
return 1
else:
print("No changes in grid markers.")
return 0
class ForcingGroup(object):
""" A group of features (cells or edges) that will get the same forcing.
"""
def __init__(self,
gforce,
edges=None, # list of ids of edges - *must* match the global-ness of
# GlobalForcing.
nodes=None, # pair of global node ids, connected by shortest path along edges on the boundary
points=None, # pair of coordinates, connect by shortest path
nearest=None, # single coordinate pair, force the one edge closest
cells=None, # list of cell ids, or 'all'
boundary_cells=False, # if true, after finding boundary edges, use the cells just inside.
nearest_cell=None,# choose cell closest to the given coord pair
edges_in_polygon = None): # edges with centers within the given shapely.Polygon
self.gforce = gforce
self.edges = None
self.cells = None
# Record how the edges/cells were specified:
self.spec = {'edges':edges,
'nodes':nodes,
'points':points,
'nearest':nearest,
'cells':cells,
'nearest_cell':nearest_cell,
'edges_in_polygon':edges_in_polygon}
# each datasource is a tuple of (bctype,datasource_id)
self.datasources = []
if edges is not None:
# if type(edges) == str and edges == 'all':
# g = self.gforce.sun.grid()
# self.edges = arange(g.Nedges())
# else:
# self.edges = edges
self.edges = edges
elif nodes is not None:
self.edges = self.find_edges(nodes_ccw)
elif points is not None:
g = self.gforce.sun.grid()
nodes = [g.closest_point(p,boundary=1) for p in points]
if len(nodes) > 2:
raise Exception("For now, line segments must be 1 or 2 nodes only")
if nodes[0] == nodes[1]:
self.edges = self.find_nearest_edges(points[0])
else:
self.edges = self.find_edges(nodes)
elif nearest is not None:
self.edges = self.find_nearest_edges(nearest)
elif cells is not None:
self.cells = cells
elif nearest_cell is not None:
self.cells = self.find_nearest_cell(nearest_cell)
elif edges_in_polygon is not None:
self.edges = self.find_edges_in_polygon(edges_in_polygon)
if boundary_cells:
if self.cells is not None:
raise Exception("boundary_cells specified, but cells have already been chosen")
if self.edges is None:
raise Exception("boundary_cells specified, but no edges have been chosen")
if self.edges is 'all':
raise Exception("can't use boundary_cells with edges='all'")
g = self.gforce.sun.grid()
edges = array(self.edges)
self.cells = g.edges[edges,3]
self.edges = None
if (self.edges is None or len(self.edges) == 0) and (self.cells is None or len(self.cells) == 0):
print("ForcingGroup(edges=%s,"%edges)
print(" nodes=%s,"%nodes)
print(" points=%s,"%points)
print(" nearest=%s,"%nearest)
print(" cells=%s)"%cells)
print("FOUND NO EDGES")
def Nedges(self):
if self.edge_based():
if self.edges is 'all':
return self.gforce.sun.grid().Nedges()
else:
return len(self.edges)
else:
return 0
def Ncells(self):
if self.cell_based():
if self.cells is 'all':
return self.gforce.sun.grid().Ncells()
else:
return len(self.cells)
else:
return 0
def edge_based(self):
return (self.edges is not None)
def cell_based(self):
return (self.cells is not None)
def nonempty(self):
return (self.edges is not None and len(self.edges)>0) or \
(self.cells is not None and len(self.cells)>0)
def copy(self):
fg = ForcingGroup(gforce = self.gforce,
edges = self.edges,
cells = self.cells)
fg.datasources = deepcopy( self.datasources )
fg.spec = {'copy':self}
return fg
def find_edges(self,nodes):
# get the global grid:
g = self.gforce.sun.grid()
# print "Searching for shortest path along boundary between nodes %d %d"%(nodes[0],nodes[1])
path = g.shortest_path(nodes[0],nodes[1],boundary_only=1)
edges = []
for i in range(len(path)-1):
edges.append(g.find_edge( path[i:i+2] ))
return array(edges)
def find_nearest_edges(self,point):
g = self.gforce.sun.grid()
n = g.closest_point(point, boundary=1)
possible_edges = g.pnt2edges(n)
best_dist = inf
best_edge = -1
for e in possible_edges:
if g.edges[e,2] == 0:
# skip internal edges, but allow for edges that
# are currently marked 1 (closed), b/c in the future
# this code may be responsible for setting edge markers.
continue
if g.edges[e,0] == n:
nbr = g.edges[e,1]
else:
nbr = g.edges[e,0]
dist = norm( g.points[nbr,:2] - point )
if dist < best_dist:
best_dist = dist
best_edge = e
if best_edge < 0:
raise Exception("Didn't find a good edge near %s"%point)
return array( [best_edge] )
def find_edges_in_polygon(self,edges_in_polygon):
g = self.gforce.sun.grid()
ec = g.edge_centers()
if geom_prep is not None:
poly = geom_prep(edges_in_polygon)
else:
poly = edges_in_polygon
in_poly = []
for j in range(g.Nedges()):
if j % 100000 == 0:
print("%d / %d edge centers checked"%(j,g.Nedges()))
if poly.contains(geometry.Point(ec[j])):
in_poly.append(j)
return array( in_poly )
def find_nearest_cell(self,nearest_cell):
g = self.gforce.sun.grid()
c = g.closest_cell( nearest_cell )
return [c]
def add_datasource(self,ds,bctype):
""" ties the given datasource to this group, for the given boundary
condition type ( 'FORCE_H','FORCE_WIND', etc.)
"""
# gets the integer index
# if type(ds) == tuple:
# myds = (bctype,
# self.gforce.add_datasource(ds[0]),
# self.gforce.add_datasource(ds[1]))
# else:
myds = (bctype,self.gforce.add_datasource(ds))
self.datasources.append(myds)
self.check_bctypes()
def hydro_bctype(self):
""" returns the bctype of just hydrodynamic forcing, if it is set
"""
for tup in self.datasources:
if tup[0] in hydro_forcings:
return tup[0]
return None
def hydro_datasource(self):
""" returns the datasource that specifies hydrodynamics on this
group, or None if none exists
"""
for tup in self.datasources:
if tup[0] in hydro_forcings:
print("NB: just returning the first datasource. there could be a second one")
return tup[1]
return None
def check_bctypes(self):
# at most, one hydro forcing:
n_hydro_forcing = 0
for tup in self.datasources:
if tup[0] in hydro_forcings:
n_hydro_forcing += 1
if n_hydro_forcing > 1:
raise Exception("Looks like more than one type of hydrodynamic forcing")
return True
def write_config(self,fp,sun):
fp.write("BEGIN_ITEMLIST\n")
if self.edges is 'all':
fp.write(" ITEM_TYPE ALL_EDGES\n")
# for now, all_cells and all_edges only works for 2-D fields
fp.write(" DIMENSIONS xy\n")
elif self.edges is not None:
fp.write(" ITEM_TYPE EDGE\n")
fp.write(" ITEM_COUNT %d\n"%len(self.edges))
fp.write(" ITEMS")
for e in self.edges:
fp.write(" %d"%e)
fp.write("\n")
elif self.cells is 'all':
fp.write(" ITEM_TYPE ALL_CELLS\n")
# see note above for ALL_EDGES
fp.write(" DIMENSIONS xy\n")
else:
fp.write(" ITEM_TYPE CELL\n")
fp.write(" ITEM_COUNT %d\n"%len(self.cells))
fp.write(" ITEMS")
for c in self.cells:
fp.write(" %d"%c)
fp.write("\n")
fp.write(" BC_COUNT %d\n"%len(self.datasources))
for tup in self.datasources:
ds = self.gforce.datasources[tup[1]]
fp.write(" BCTYPE %s\n"%tup[0])
fp.write(" DATA %s\n"%ds.filename)
fp.write("END_ITEMLIST\n")
def map_to_grid(self,oldg,newg):
""" requires that the point arrays are the same, and then translates edge or cell indices,
making a copy of self
"""
c = self.copy()
if c.edges is not None and c.edges is not 'all':
#print "Mapping edges"
if 'edges' in self.spec and self.spec['edges'] is 'all':
#print "Fast mapping of edges='all'"
c.edges = arange(newg.Nedges())
else:
new_edges = []
for e in c.edges:
try:
new_e = newg.find_edge( oldg.edges[e,:2])
new_edges.append(new_e)
except trigrid.NoSuchEdgeError:
pass
c.edges = array(new_edges)
if c.cells is not None and c.cells is not 'all':
print("Mapping cells")
if 'cells' in self.spec and self.spec['cells'] is 'all':
#print "Fast mapping of cells='all'"
c.cells = arange(newg.Ncells())
else:
new_cells = []
for i in c.cells:
try:
new_c = newg.find_cell( oldg.cells[i])
print("Mapped global cell %d to local cell %d on grids %s,%s"%(i, new_c, oldg.processor,newg.processor))
new_cells.append(new_c)
except trigrid.NoSuchCellError:
pass
c.cells = array(new_cells)
#print "done with mapping"
return c
class DataSource(object):
n_components = 1
def __init__(self,label):
self.label = label
def write_config(self,fp,sun):
raise Exception("Missing!")
def prepare(self,gforce):
pass
def plot(self,t,**kwargs):
" t should be a vector of absdays "
v = self.calc_values(t)
plot(t,v,**kwargs)
def calc_values(self,t):
raise Exception("Missing!")
def plot_overview(self,tmin,tmax):
# plot something representative into a single axes,
# for the given simulation period, as specified by absdays
axvline( date2num(tmin),c='k')
axvline( date2num(tmax),c='k')
grid()
title(self.label)
gca().xaxis_date()
gcf().autofmt_xdate()
class Constant(DataSource):
def __init__(self,label,value):
DataSource.__init__(self,label)
self.value = value
def write_config(self,fp,sun):
fp.write("# %s\n"%self.label)
fp.write("BEGIN_DATA\n")
fp.write(" CONSTANT\n")
fp.write(" VALUE %g\n"%self.value)
fp.write("END_DATA\n")
def calc_values(self,t):
return self.value * ones_like(t)
def plot_overview(self,tmin,tmax):
# plot something representative into a single axes,
# for the given simulation period, as specified by absdays
plot( [date2num(tmin),date2num(tmax)],[self.value,self.value], 'r')
annotate( "Constant: %f"%self.value,
[0.5*(date2num(tmin)+date2num(tmax)),self.value] )
DataSource.plot_overview(self,tmin,tmax)
class Constant2Vector(DataSource):
n_components = 2
def __init__(self,label,value1,value2):
DataSource.__init__(self,label)
self.value = array([value1,value2])
def write_config(self,fp,sun):
fp.write("# %s\n"%self.label)
fp.write("BEGIN_DATA\n")
fp.write(" CONSTANT_2VEC\n")
fp.write(" VALUE %g %g\n"%(self.value[0],self.value[1]))
fp.write("END_DATA\n")
def calc_values(self,t):
return self.value * ones( (len(t),2) )
def plot_overview(self,tmin,tmax):
# plot something representative into a single axes,
# for the given simulation period, as specified by absdays
quiver( [0.5*(date2num(tmin)+date2num(tmax))],[0],[self.value[0]],[self.value[1]] )
annotate( "Constant 2-vector: (%f,%f)"%(self.value[0],self.value[1]),
[0.5*(date2num(tmin)+date2num(tmax)),0.0] )
axis(ymin=-1,ymax=1)
DataSource.plot_overview(self,tmin,tmax)
class Constant3Vector(DataSource):
""" For wave forcing, the values are Hsig, thetamean, sigma_mean
"""
n_components = 3
def __init__(self,label,value1,value2,value3):
DataSource.__init__(self,label)
self.value = array([value1,value2,value3])
def write_config(self,fp,sun):
fp.write("# %s\n"%self.label)
fp.write("BEGIN_DATA\n")
fp.write(" CONSTANT_3VEC\n")
fp.write(" VALUE %g %g %g\n"%(self.value[0],self.value[1],self.value[2]))
fp.write("END_DATA\n")
def calc_values(self,t):
return self.value * ones( (len(t),3) )
def plot_overview(self,tmin,tmax):
# plot something representative into a single axes,
# for the given simulation period, as specified by absdays
annotate( "Constant 3-vector: (%f,%f,%f)"%(self.value[0],self.value[1],self.value[2]),
[0.5*(date2num(tmin)+date2num(tmax)),0.0] )
axis(ymin=-1,ymax=1)
DataSource.plot_overview(self,tmin,tmax)
class Harmonics(DataSource):
start_year = 2000 # really this should be set to sun.conf_int('start_year')
def __init__(self,label,omegas=None,phases=None,amplitudes=None):
DataSource.__init__(self,label)
self.omegas = array(omegas)
self.phases = array(phases)
self.amplitudes = array(amplitudes)
def prepare(self,gforce):
self.start_year = gforce.sun.conf_int('start_year')
def calc_values(self,t):
""" t should be a datenum, python.datetime / pylab style.
self.omegas are in rad/sec
self.phases are radians, relative t=0 at midnight, 1/1/<start year>
"""
t = array(t)
# convert to yeardays:
t = t - date2num( datetime.datetime(self.start_year,1,1) )
# and yearseconds
t = 24*3600.*t
if t.shape:
v = (self.amplitudes * cos(self.omegas*t[...,newaxis]+self.phases)).sum(axis=1)
else:
v = (self.amplitudes * cos(self.omegas*t+self.phases)).sum()
v = float(v)
return v
def write_config(self,fp,sun):
if len(self.omegas) != len(self.phases) or len(self.phases) != len(self.amplitudes):
raise Exception("Mismatch in number of harmonics")
fp.write("# %s\n"%self.label)
fp.write("BEGIN_DATA\n")
fp.write(" HARMONICS\n")
nc = len(self.omegas)
fp.write(" CONSTITUENTS_COUNT %d\n"%nc)
fp.write(" OMEGAS")
for omega in self.omegas:
fp.write( " %7g"%omega)
fp.write("\n")
fp.write(" PHASES")
for phase in self.phases:
fp.write( " %7g"%phase)
fp.write("\n")
fp.write(" AMPLITUDES")
for amp in self.amplitudes:
fp.write( " %7g"%amp)
fp.write("\n")
fp.write("END_DATA\n")
class OtisHarmonics(Harmonics):
""" Read the output of OTPS extract_HC and write suntans boundaries.c compatible
harmonics.
"""
def __init__(self,label,otis_output,h_offset=0.0):
self.otis_output = otis_output
omegas,phases,amplitudes = self.parse_otis(otis_output)
self.h_offset = h_offset # this is taken care of during prepare()
# print omegas,phases,amplitudes
Harmonics.__init__(self,label=label,omegas=omegas,phases=phases,amplitudes=amplitudes)
def parse_otis(self,fn):
fp = open(fn,'rt')
model = fp.readline()
units = fp.readline()
if units.strip() != 'Elevations (m)':
print("Expected meters - units line was",units)
headers = fp.readline().split()
values = {}
line = fp.readline() # first line of numbers
for h,s in zip(headers,line.split()):
values[h] = float(s)
constituents = [s[:-4] for s in headers if s[-4:] == '_amp']
amplitudes = array( [values[c+'_amp'] for c in constituents] )
phases = array( [values[c+'_ph'] for c in constituents] )
phases *= pi/180 # convert to radians
self.constituents = constituents
omegas = []
tide_db_indexes = []
# find this constituent in the tide database, so we can get speed and
# eventually equilibrium phase
for constituent in constituents:
# hopefully the constituent names are consistent - we just have to upcase
# the otis names:
i = tide_consts.const_names.index(constituent.upper())
speed = tide_consts.speeds[i] # degrees per hour
period = 1./(speed/360.)
omega = speed/3600. * pi/180.
omegas.append( omega ) # rad/s
tide_db_indexes.append( i )
# print constituent,i,speed,omega,period
omegas = array(omegas)
self.tide_db_indexes = array(tide_db_indexes)
# since we have to correct these during prepare()
self.original_phases = phases.copy()
self.original_amps = amplitudes.copy()
return omegas,phases,amplitudes
def prepare(self,gforce):
Harmonics.prepare(self,gforce)
self.adjust_for_year(self.start_year)
if self.h_offset != 0.0:
self.omegas = concatenate( [self.omegas,[0.0]])
self.phases = concatenate( [self.phases,[0.0]])
self.amplitudes = concatenate( [self.amplitudes,[self.h_offset]] )
def adjust_for_year(self,start_year):
if start_year not in tide_consts.years:
raise Exception('constants for prediction year are not available')
year_i = tide_consts.years.searchsorted(start_year)
# extract just the constituents OTIS provided, just for this year
v0u=tide_consts.v0u[self.tide_db_indexes,year_i]
lun_nod=tide_consts.lun_nodes[self.tide_db_indexes,year_i]
# self.phases = (v0u*(pi/180)) - self.original_phases
self.phases = (v0u*(pi/180)) - self.original_phases
self.amplitudes = lun_nod*self.original_amps
class Timeseries(DataSource):
def __init__(self,label,t0=None,dt=None,data=None,lag_s=None):
DataSource.__init__(self,label)
if t0 is not None and not isinstance(t0,datetime.datetime):
raise Exception("t0 should be a datetime. It was %s"%t0)
self.t0 = t0
self.dt = dt # should be in seconds
self.data = data
if lag_s is not None:
self.lag_s = lag_s
else:
self.lag_s = 0.0
# if we already have the pieces, go ahead and populate self.absdays
if self.data is not None and self.t0 is not None and self.dt is not None:
self.absdays = date2num(self.t0) + arange(len(self.data))*self.dt/(24*3600.)
def calc_values(self,t):
""" Evaluate this datasource at the given times t, an array of absdays
"""
self.get_data()
# so t is in absdays, self.t0 is offset in seconds between
my_t = date2num(self.t0) + (self.dt/(24.*3600.)) *arange(len(self.data))
v = interp(t, my_t, self.data, left=self.data[0], right = self.data[-1] )
return v
def write_config(self,fp,sun):
self.get_data()
# t0 in the file is seconds since the beginning of the year the simulation
# started
base_absdays = date2num( datetime.datetime( self.sun.time_zero().year, 1, 1) )
t0_sun_seconds = (date2num(self.t0) - base_absdays)*24*3600
fp.write("# %s\n"%self.label)
fp.write("BEGIN_DATA\n")
fp.write(" TIMESERIES\n")
fp.write(" SAMPLE_COUNT %d\n"%len(self.data))
fp.write(" DT %g\n"%self.dt)
fp.write(" TZERO %g\n"%t0_sun_seconds)
fp.write(" VALUES")
for v in self.data:
fp.write(" %g"%v)
fp.write("\n")
fp.write("END_DATA\n")
def set_times(self,sun,Tabsdays):
""" convenience routine: set t0 and dt from an array of absdays"""
if len(Tabsdays)>0:
self.t0 = num2date( Tabsdays[0] )
if len(Tabsdays) > 1:
t_first = sunreader.dt_round( num2date(Tabsdays[0]) )
t_last = sunreader.dt_round( num2date(Tabsdays[-1]) )
tdelta = t_last - t_first
total_seconds = tdelta.days*86400 + tdelta.seconds + tdelta.microseconds*1e-6
dt_s = total_seconds / (len(Tabsdays)-1)
# self.dt = 24*3600*median(diff(Tabsdays))
self.dt = dt_s
else:
sim_start,sim_end = sun.simulation_period()
# make it one timestep the length of the simulation
self.dt = 24*3600*( date2num(sim_end) - date2num(sim_start))
print("TimeSeries: Found t0 = ",date2num(self.t0))
print("TimeSeries: Found dt = ",self.dt)
else:
raise Exception("No data at all found for this data source: %s"%str(self))
def prepare(self,gforce):
self.sun = gforce.sun
def get_data(self):
""" populate self.data, according to the period of the simulation.
"""
if self.data is None:
sim_start,sim_end = self.sun.simulation_period()
## This used to use the real t0 for sim_start - the beginning of the whole series
# of runs. This is no good for long-term runs since every time we have to reconstruct
# the entire history. Not sure why that was a good idea in the past...
# but for a restart, sim_start will be the beginning of the
# restart, so pull out the real t0 from sun
# sim_start = datetime.datetime(self.sun.conf_int('start_year'),1,1) + datetime.timedelta(self.sun.conf_float('start_day'))
# a negative lag means that at model time t, the forcing is from
# real time t+delta,
lag_days = self.lag_s / (24*3600.0)
sim_start += datetime.timedelta(-lag_days)
sim_end += datetime.timedelta(-lag_days)
absdays, values = self.raw_data(sim_start,sim_end)
# here is where we 'lie' about the timing of gage data
self.absdays = absdays + lag_days
# set the variables that Timeseries will need:
self.set_times(self.sun,self.absdays)
self.data = values
def plot_overview(self,tmin,tmax):
self.get_data()
# plot something representative into a single axes,
# for the given simulation period, as specified by absdays
if min(self.data) < 0 and max(self.data)>0: # go with linear
plot(self.absdays,self.data,'r')
else:
if min(self.data) < 0 and max(self.data)<0:
data = -self.data
ylabel('negated')
else:
data = self.data
# attempt to be clever about applying log scale:
log_range = log10(data.max()) - log10(data.min())
if log_range < 1.5:
# go back to linear, with original sign
ylabel('')
plot(self.absdays,self.data,'r')
else:
if log_range > 3.5:
ymin = data.max() / 10**3.5
data = clip(data,ymin,inf)
gca().set_yscale('log')
plot(self.absdays,data,'r')
DataSource.plot_overview(self,tmin,tmax)
class TimeseriesFunction(Timeseries):
""" A timeseries, but the data is supplied as a callable function
"""
def __init__(self,label,func,dt):
Timeseries.__init__(self,label)
self.func = func
self.dt = dt # seconds! used to be interpreted as days
def raw_data(self,sim_start,sim_end):
dt_days = self.dt / 86400.0
absdays = arange(date2num(sim_start),date2num(sim_end)+dt_days,dt_days)
data = self.func(absdays)
return absdays, data
def plot_overview(self,tmin,tmax):
# plot something representative into a single axes,
# for the given simulation period, as specified by absdays
annotate( "TimeseriesFunction", [self.absdays[0],self.data[0]])
Timeseries.plot_overview(self,tmin,tmax)
class TimeseriesVector(Timeseries):
def write_config(self,fp,sun):
self.get_data()
base_absdays = date2num( datetime.datetime( sun.time_zero().year,1,1 ) )
t0_sun_seconds = (date2num(self.t0) - base_absdays) *24*3600
fp.write("# %s\n"%self.label)
fp.write("BEGIN_DATA\n")
fp.write(" TIMESERIES_%dVEC\n"%self.n_components)
fp.write(" SAMPLE_COUNT %d\n"%len(self.data))
fp.write(" DT %g\n"%self.dt)
fp.write(" TZERO %g\n"%t0_sun_seconds)
fp.write(" VALUES")
for v in self.data:
for i in range(self.n_components):
fp.write(" %g"%v[i] )
fp.write("\n")
fp.write("END_DATA\n")
class Timeseries2Vector(TimeseriesVector):
n_components = 2
def plot_overview(self,tmin,tmax):
self.get_data()
# plot something representative into a single axes,
# for the given simulation period, as specified by absdays
quiver(self.absdays,0*self.absdays,
self.data[:,0],self.data[:,1])
axis(ymin=-1,ymax=1)
DataSource.plot_overview(self,tmin,tmax)
def plot_components(self,tmin,tmax):
#self.get_data()
plot(self.absdays,self.data[:0],'r')
plot(self.absdays,self.data[:0],'b')
#DataSource.plot_overview(self,tmin,tmax)
class Timeseries3Vector(TimeseriesVector):
n_components = 3
def plot_overview(self,tmin,tmax):
self.get_data()
plot(self.absdays,self.data[:,0],'r')
plot(self.absdays,self.data[:,1],'g')
plot(self.absdays,self.data[:,2],'b')
DataSource.plot_overview(self,tmin,tmax)
import gage_data, opendap
class NoaaHarmonics(Harmonics):
""" Specify harmonic constituents fetched from NOAA
usage: NoaaHarmonics('Point Reyes, CA',2006)
n_consts: if this is a number, take the first n_consts constituents
if a list, it is the names of the constituents desired
"""
def __init__(self,station_name,n_consts=None,
amplification=1.0,
raise_h=0.0,
lag_s=0.0,
include_bathy_offset=1):
self.station_name = station_name
self.gage = gage_data.gage(self.station_name)
self.n_consts = n_consts
self.amplification = 1.0
self.raise_h = raise_h
self.lag_s = lag_s
self.include_bathy_offset = include_bathy_offset
self.omegas = None
self.amplitudes = None
self.phases = None
Harmonics.__init__(self,"%s harmonics"%station_name)
def prepare(self,gforce):
self.sun = gforce.sun
def get_data(self):
if self.omegas is not None:
# assume that we've already loaded stuff
return
# Grab the constituents:
start_year = self.sun.conf_int('start_year')
self.start_year = start_year
[self.omegas,
self.amplitudes,
self.phases] = opendap.convert_noaa_to_otis_xtides(int(self.gage.external_id),
start_year)
self.amplitudes *= self.amplification
if self.lag_s != 0.0:
print("NoaaHarmonics: lag is not yet supported")
if self.n_consts is not None:
self.choose_constituents(self.n_consts)
offset = self.raise_h
if self.include_bathy_offset:
offset -= sunreader.read_bathymetry_offset()
# The sign here has gone back and forth, but Kevin says that it worked
# for him recently (5/2011) as offset += msl_to_navd88.
offset += self.gage.msl_to_navd88()
if offset != 0.0:
# add a constant term to the harmonic decomposition
self.omegas = concatenate( (self.omegas,[0.0] ) )
self.amplitudes = concatenate( (self.amplitudes,[offset]) )
self.phases = concatenate( (self.phases,[0.0] ) )
def choose_constituents(self,n_consts):
if type(n_consts) == int:
self.omegas = self.omegas[:n_consts]
self.amplitudes = self.amplitudes[:n_consts]
self.phases = self.phases[:n_consts]
elif type(n_consts) == list:
noaa_names, consts = opendap.get_prediction_consts(int(self.gage.external_id))
idxs = array([noaa_names.index(const_name) for const_name in n_consts])
self.omegas = self.omegas[idxs]
self.amplitudes = self.amplitudes[idxs]
self.phases = self.phases[idxs]
# sanity checking, print out the periods ~[h] of the selected
# constituents
hours = 1.0 / (self.omegas * 3600 / (2*pi))
print("Selected tidal periods [h]:",hours)
else:
raise "Bad n_consts: %s "%n_consts
class NoaaGage(Timeseries):
""" Get NOAA gage data and create a timeseries Datasource with it
amplification will multiply by the given factor, centered around the
mean of the highest and lowest tides
raise_h adds the given amount to the freesurface, and lag_s introduces
a lag specified in seconds (i.e. if the model lags reality, specify a
negative lag here so that the forcing is shifted back in time)
include_bathy_offset: automatically incorporate the bathymetry offset, as queried
from the sunreader instance
"""
max_missing_samples = 5
def __init__(self,station_name,
amplification=1.0,
raise_h=0.0,
lag_s=0.0,
include_bathy_offset=1):
Timeseries.__init__(self,label="%s observed tidal height"%station_name,
lag_s=lag_s)
self.station_name = station_name
self.amplification = amplification
self.raise_h = raise_h
self.include_bathy_offset = include_bathy_offset
self.gage = gage_data.gage(station_name)
def raw_data(self,sim_start,sim_end):
""" return the array of times and array of values for the real start/end dates
given.
code in here should *not* perform the lagging
this *is* the right place to perform amplification or shifts in value,
or tidal filtering
should return a vector of AbsDays
"""
print("Fetch data for gage: ",self.gage)
print(" start: ",sim_start)
print(" stop: ",sim_end)
vals = self.gage.data(sim_start,sim_end,'h')
vals = timeseries.fill_holes(vals,max_missing_samples=self.max_missing_samples)
absdays = vals[:,0]
values = vals[:,1]
# remember some of the intermediate values just in case we need to debug
self.raw_h = values
# filter:
#self.h = tidal_filter.filter_tidal_data(self.raw_h, absdays*24*3600)
self.h = lp_filter.lowpass(self.raw_h,
absdays*24, # Time in hours
cutoff=3.0) # cutoff of 3 hours
if self.include_bathy_offset:
self.h -= sunreader.read_bathymetry_offset()
# add amplification (note that this is pretty much wrong, since we really
# ought to amplify it around MSL, which we probably aren't coming anywhere
# close to computing. At least averaging the max/min aren't as vulnerable
# to the period of the gage data
mean_h = 0.5*(self.h.max() + self.h.min())
self.h = mean_h + (self.h - mean_h)*self.amplification + self.raise_h
return absdays, self.h
class CompositeNoaaGage(NoaaGage):
lp_hours = 35.0
def __init__(self,*args,**kwargs):
self.backup_station_name = kwargs.pop('backup_station_name',None)
NoaaGage.__init__(self,*args,**kwargs)
def fill_in_missing_data(self,sim_start,sim_end,data):
# Find the holes that are bigger than our limit for filling in just by interpolation
# Same logic as in timeseries.py
# First, get everything onto a common time line, with nans for the missing spots
basic_dt = median( diff(data[:,0]) )
t0 = data[0,0]
tN = data[-1,0]
if date2num(sim_start) < t0:
# push back the start time, but as a whole number of timesteps
nsteps = int(ceil( (t0 - date2num(sim_start))/basic_dt))
t0 = t0 - nsteps*basic_dt
if date2num(sim_end) > tN:
# push ahead the end time, as a whole number of timesteps
nsteps = int(ceil( (date2num(sim_end) - tN)/basic_dt))
tN = tN + nsteps*basic_dt
nsteps = int(ceil( (tN-t0)/basic_dt))
new_data = zeros( (nsteps,2), float64 )
new_data[:,0] = linspace(t0,tN,nsteps)
new_data[:,1] = nan
# populate the original data, just choosing the nearest timestep (thus the 0.5*dt)
new_data[searchsorted( new_data[:,0]+0.5*basic_dt, data[:,0] ),1] = data[:,1]
# Small missing chunks we just interpolate over:
missing = isnan(new_data[:,1])
idx = arange(len(missing))
# find the indices into new_data for each place a gap starts or ends
# this gives the index right before the transition
gap_bounds, = nonzero( (missing[:-1] != missing[1:]) )
if missing[0]:
gap_bounds = concatenate( ([-1],gap_bounds) )
if missing[-1]:
gap_bounds = concatenate( (gap_bounds, [missing.shape[0]-1] ) )
if len(gap_bounds) %2 != 0:
raise Exception("How can there be an odd number of gap bounds?")
gap_starts = gap_bounds[0::2]+1 # index of first missing
gap_ends = gap_bounds[1::2]+1 # index after last missing
# Fill in short gaps with interpolation:
i_to_interp = []
f = interp1d(data[:,0],data[:,1])
for i in range(len(gap_starts)):
# For gaps that have valid data on both sides, and aren't too long, we interpolate
if gap_starts[i] != 0 and \
gap_ends[i] != data.shape[0] and \
(gap_ends[i] - gap_starts[i] <= self.max_missing_samples):
data[gap_starts[i]:gap_ends[i],1] = f( data[gap_starts[i]:gap_ends[i],0] )
else:
i_to_interp.append(i)
if len(i_to_interp) == 0:
return data
print("Will have to go to the backup datasource")
gap_starts = gap_starts[i_to_interp]
gap_ends = gap_ends[i_to_interp]
mask = zeros( (new_data.shape[0]), bool8 )
mask[:] = False
for i in range(len(gap_starts)):
mask[gap_starts[i]:gap_ends[i]] = True
backup_t = new_data[mask,0]
backup_t0 = num2date(backup_t[0])
backup_tN = num2date(backup_t[-1])
# First, we get harmonic predictions for this period:
my_harmonic_vals = self.gage.data( backup_t0, backup_tN, 'p' )
f = interp1d( my_harmonic_vals[:,0], my_harmonic_vals[:,1] )
new_data[mask,1] = f( new_data[mask,0] )
# Second, come back and add some subtidal fluctuations
if self.backup_station_name is not None:
print("Getting subtidal from %s"%self.backup_station_name)
backup_gage = gage_data.gage(self.backup_station_name)
# harmonics from that station:
backup_harmonics = backup_gage.data( backup_t0,backup_tN,'p')
# observed tides:
backup_observations = backup_gage.data( backup_t0,backup_tN,'h')
f = interp1d( backup_observations[:,0], backup_observations[:,1],bounds_error=False,fill_value=nan)
backup_subtidal = f(backup_harmonics[:,0]) - backup_harmonics[:,1]
f = interp1d(backup_harmonics[:,0],backup_subtidal)
new_data[mask,1] += f( new_data[mask,0] )
return new_data
def raw_data(self,sim_start,sim_end):
print("Fetch data for gage: ",self.gage)
print(" start: ",sim_start)
print(" stop: ",sim_end)
vals = self.gage.data(sim_start,sim_end,'h')
self.primary_vals = vals
# Intervene, and check for possibly missing data, take care of fill_holes stuff
vals = self.fill_in_missing_data(sim_start,sim_end,vals)
# And back to the usual:
absdays = vals[:,0]
values = vals[:,1]
# remember some of the intermediate values just in case we need to debug
self.raw_h = values
# filter:
#self.h = tidal_filter.filter_tidal_data(self.raw_h, absdays*24*3600)
self.h = lp_filter.lowpass(self.raw_h, absdays*24, cutoff=3.0)
if self.include_bathy_offset:
self.h -= sunreader.read_bathymetry_offset()
# add amplification (note that this is pretty much wrong, since we really
# ought to amplify it around MSL, which we probably aren't coming anywhere
# close to computing. At least averaging the max/min aren't as vulnerable
# to the period of the gage data
mean_h = 0.5*(self.h.max() + self.h.min())
self.h = mean_h + (self.h - mean_h)*self.amplification + self.raise_h
return absdays, self.h
class MergeTidalTimeseriesFilter(Timeseries):
"""
taken largely from CompositeNoaaGage, but updated to work with the DatabaseGage
classes.
represents a time series by combining two gages. When the primary gage has
data, all is well. gaps smaller than linear_gap_days are filled by linear
interpolation just using the primary gage's data.
gaps larger than that will be queried from the secondary gage. the secondary
timeseries will be adjusted to match the primary at the start and end of the
gap.
"""
max_missing_samples = 5
lp_hours = 35.0
# as much as possible pull the timestep from the primary or secondary gages,
# but if there are no hints, use this value:
default_dt_days = 360./86400.
def __init__(self,label,primary_gage,secondary_gage,offset=0.0):
self.pri = primary_gage
self.sec = secondary_gage
self.offset = offset
super(MergeTidalTimeseriesFilter,self).__init__(label)
def raw_data(self,sim_start,sim_end):
# use the query interface for the primary/secondary gages, to avoid
# any interpolation happening too soon
sim_start = date2num(sim_start)
sim_end = date2num(sim_end)
vals = self.pri.query(sim_start, sim_end,
interpolate_gaps = True, # just gets enough to data to allow for interpolation,
extrapolate = False, # not sure about that one
autopopulate = True)
# Intervene, and check for possibly missing data, take care of fill_holes stuff
self.vals = vals = self.fill_in_missing_data(sim_start,sim_end,vals)
vals[:,1] += self.offset
# And back to the usual:
return vals[:,0],vals[:,1]
def fill_in_missing_data(self,sim_start,sim_end,data):
# Find the holes that are bigger than our limit for filling in just by interpolation
# Same logic as in timeseries.py
# First, get everything onto a common time line, with nans for the missing spots
# if there isn't any good data, this will be nan:
basic_dt = median( diff(data[:,0]) )
if isnan(basic_dt):
try:
basic_dt = self.pri.parms['dt_s'] / 86400.
except:
pass
if isnan(basic_dt):
try:
basic_dt = self.sec.parms['dt_s'] / 86400.
except:
pass
if isnan(basic_dt):
print("Having to fall back to default dt for tide datasource")
basic_dt = self.default_dt_days
t0 = data[0,0]
tN = data[-1,0]
## The idea here is to make sure that t0<= sim_start, tN >= sim_end,
# that the dt in the new data is the same as the basic_dt of the old
# data, and t0 falls on an integral time step of the new data.
print("t0:",num2date(t0))
print("tN:",num2date(tN))
# is 0 if t0 is early enough, or some negative number if we need
# more steps before data starts.
step_start = min(0, int( -ceil( (data[0,0]-sim_start)/basic_dt)))
# keep data through at least what we have, and extend to the simulation period if
# that's longer
tEnd = max( data[-1,0],sim_end)
# count steps based solely on duration and timestep, not how many steps came in
# (since they might not be evenyl spaced, missing, etc.)
# this is _inclusive_
step_end = int( ceil( (tEnd - data[0,0])/basic_dt))
nsteps = step_end + 1 - step_start #
new_data = zeros( (nsteps,2), float64 )
new_data[:,0] = arange(step_start,step_end+1) * basic_dt + data[0,0]
new_data[:,1] = nan
# place the original data in reasonable spots in the old data,
# just choosing the nearest new-data timestep (thus 0.5*dt)
new_data[searchsorted( new_data[:,0]+0.5*basic_dt, data[:,0] ),1] = data[:,1]
# Small missing chunks we just interpolate over:
missing = isnan(new_data[:,1])
idx = arange(len(missing))
# find the indices into new_data for each place a gap starts or ends
# this gives the index right before the transition
gap_bounds, = nonzero( (missing[:-1] != missing[1:]) )
if missing[0]:
gap_bounds = concatenate( ([-1],gap_bounds) )
if missing[-1]:
gap_bounds = concatenate( (gap_bounds, [missing.shape[0]-1] ) )
if len(gap_bounds) %2 != 0:
raise Exception("How can there be an odd number of gap bounds?")
gap_starts = gap_bounds[0::2]+1 # index of first missing
gap_ends = gap_bounds[1::2]+1 # index after last missing
# Fill in short gaps with interpolation:
i_to_interp = []
f = None
if len(data) > 1: # this fails when there isn't enough data
valid = ~isnan(data[:,1])
if sum(valid) > 1:
f = interp1d(data[valid,0],data[valid,1])
f_tmin = data[valid,0].min()
f_tmax = data[valid,0].max()
# otherwise, can't do basic linear interpolation - will fall back to
# the backup datasource below:
for i in range(len(gap_starts)):
# For gaps that have valid data on both sides, and aren't too long, we interpolate
gap_t_start = new_data[gap_starts[i],0] # time of first missing sample
gap_t_end = new_data[gap_ends[i]-1,0] # time of last missing sample
if f and \
gap_t_start > f_tmin and \
gap_t_end < f_tmax and \
(gap_ends[i] - gap_starts[i] <= self.max_missing_samples):
new_data[gap_starts[i]:gap_ends[i],1] = f( new_data[gap_starts[i]:gap_ends[i],0] )
else:
# fill in by going to backup datasource
# to match the DC & linear component, query for some extra data
# choose an index into the real data before the period to fill in
left_w_overlap = max(gap_starts[i]-self.max_missing_samples,0)
# and an index into the real data after the period to fill
right_w_overlap = min(gap_ends[i]+self.max_missing_samples,len(new_data))
# pull the whole chunk
win_with_overlap = new_data[left_w_overlap:right_w_overlap]
t_start = new_data[left_w_overlap,0]
t_end = new_data[right_w_overlap-1,0]
# NB: the slightest bit of round off and backup_vals will not entirely
# cover the range of dates - so request and extra basic_dt on each end
backup_vals = self.sec.query( t_start-basic_dt, t_end+basic_dt, autopopulate=True,interpolate_gaps=True )
fbackup = interp1d( backup_vals[:,0], backup_vals[:,1] )
t_fill = new_data[gap_starts[i]:gap_ends[i],0]
# and narrow that down to valid data in the overlapping region
overlap_data = win_with_overlap[ ~isnan(win_with_overlap[:,1]) ]
# if we only have data on oneside of the gap, fit only the DC component -
# otherwise fit a line
if len(overlap_data) > 0:
if all(overlap_data[:,0]<= new_data[gap_starts[i],0]) or \
all(overlap_data[:,0]>= new_data[gap_ends[i],0]):
degree = 0
else:
degree = 1
error = overlap_data[:,1] - fbackup( overlap_data[:,0] )
fit = polyfit( overlap_data[:,0],
error,
degree )
correction = polyval(fit,t_fill)
else:
correction = 0.0
new_data[gap_starts[i]:gap_ends[i],1] = fbackup( t_fill ) + correction
return new_data
def read_cimis_csv(filename,col_names):
""" returns an array of data, with the requested columns plus
an absdays column.
"""
if filename is None:
filename = os.path.join( os.environ['HOME'], 'classes/research/suntans/forcing/wind/cimis/union_city_171_2005_2010.csv')
fp = open(filename,'rt')
headers = [s.strip() for s in fp.readline().split(",")]
# date/time always pulled:
date_col = headers.index('Date')
hour_col = headers.index('Hour')
col_indexes = [headers.index(col_name) for col_name in col_names]
records = []
for line in fp:
cols = line.split(",")
d = datetime.datetime.strptime(cols[date_col].strip(),"%m/%d/%Y")
hour_minute = cols[hour_col].strip()
h = int(hour_minute[:2])
m = int(hour_minute[2:])
record = [date2num(d) + (h+m/60.)/24.]
for col_index in col_indexes:
try:
val = float( cols[col_index] )
except ValueError:
val = nan
record.append(val)
records.append(record)
fp.close()
return array(records)
def read_Japanese_met(filename,col_names):
""" returns an array of data, with the requested columns plus
an absdays column. The date is given in julian day of year starting
at 0 on Jan. 1.
A single row is manually added to each wind file to specify the
year of the data.
"""
if filename is None:
print("No wind filename given")
exit(1)
# hardwired column definitions
column_list = ["Day","Precip (mm)","Air Temp (deg C)","Wind Speed (m/s)",
"Wind Dir (0-360)"]
col_indexes = [column_list.index(col_name) for col_name in col_names]
fp = open(filename,'rt')
year_string = fp.readline().split()
year = int(year_string[0])
# date/time always pulled:
# get time of Jan 1 of input year at 0:00
d = datetime.datetime.strptime(year_string[0] + "-01-01 00:00","%Y-%m-%d %H:%M")
Jan1_of_year = date2num(d)
records = []
for line in fp:
cols = line.split()
record = []
record.append( Jan1_of_year + float(cols[0]))
for col_index in col_indexes:
try:
val = float( cols[col_index] )
except ValueError:
val = nan
record.append(val)
records.append(record)
fp.close()
return array(records)
class CimisEvapPrecip(Timeseries):
""" References a CSV file from CIMIS to get hourly precipitation, and
a climatology of evaporation, creating a timeseries where evaporation is
positive, and precipitation is negative, in m/s
"""
def __init__(self,name,csv_file=None,lag_s=0.0):
""" Load CIMIS precip data and merge with evap. climatology. if csv_file is
not specified, it defaults to a 2005-2010 dataset.
"""
self.name = name
self.csv_file = csv_file
Timeseries.__init__(self,label="%s CIMIS evap-precip"%name,
lag_s=lag_s)
def raw_data(self,sim_start,sim_end):
""" return the array of times and array of values for C:\Program Files\GnuWin32the real start/end dates
given.
code in here should *not* perform the lagging
this *is* the right place to perform amplification or shifts in value,
or tidal filtering
"""
data = read_cimis_csv(self.csv_file,['Precip (mm)'])
# nan's get replaced by 0
valid = isfinite(data[:,1])
data[~valid,1] = 0.0
# convert to a rate in m/s
data[0,1] = 0.0
# mm/period -> m dt [d] h seconds
data[1:,1] = data[1:,1] * 1e-3 / ( diff(data[:,0])*24*3600 )
# just to make sure that we have evenly spaced data
vals = timeseries.fill_holes(data)
# trim down to the period in question:
valid = (vals[:,0] > date2num(sim_start) -1) & (vals[:,0] < date2num(sim_end) + 1)
vals = vals[valid,:]
# and add in evaporation:
evap = SFBayMeanEvaporation()
# combine, with evaporation being positive, precip negative
vals[:,1] = evap.interp_to_absdays(vals[:,0]) - vals[:,1]
return vals[:,0], vals[:,1]
class JapaneseEvapPrecip(Timeseries):
""" References Japanese met file to get precipitation
Calculated precipitation is negative, in m/s
Hardwired evaporation to zero.
"""
def __init__(self,name,tab_delim_file=None,lag_s=0.0):
self.name = name
self.tab_delim_file = tab_delim_file
Timeseries.__init__(self,label="%s Japanese precip"%name,
lag_s=lag_s)
def raw_data(self,sim_start,sim_end):
""" return the array of times and array of values for C:\Program Files\GnuWin32the real start/end dates
given.
code in here should *not* perform the lagging
this *is* the right place to perform amplification or shifts in value,
or tidal filtering
"""
data = read_Japanese_met(self.tab_delim_file,['Precip (mm)'])
# nan's get replaced by 0
valid = isfinite(data[:,1])
data[~valid,1] = 0.0
# convert to a rate in m/s
data[0,1] = 0.0
# mm/period -> m dt [d] h seconds
data[1:,1] = data[1:,1] * 1e-3 / ( diff(data[:,0])*24*3600 )
# just to make sure that we have evenly spaced data
vals = timeseries.fill_holes(data)
# trim down to the period in question:
valid = (vals[:,0] > date2num(sim_start) -1) & (vals[:,0] < date2num(sim_end) + 1)
vals = vals[valid,:]
# evaporation is hardwired to zero, subtract precip from zero evap to get net evap:
vals[:,1] = -vals[:,1]
return vals[:,0], vals[:,1]
class CimisWind(Timeseries2Vector):
""" References a CSV file downloaded from wwwcimis.water.ca.gov
Choose 'CSV with Headers' when saving the file, and save to metric units.
"""
def __init__(self,name,csv_file=None,lag_s=0.0):
""" Read wind from a single station CIMIS csv file. If csv_file is not
given, defaults to a Union City, 2005-2010 dataset.
"""
self.name = name
self.csv_file = csv_file
Timeseries2Vector.__init__(self,label="%s CIMIS wind"%name,
lag_s=lag_s)
def raw_data(self,sim_start,sim_end):
""" return the array of times and array of values for the real start/end dates
given.
code in here should *not* perform the lagging
this *is* the right place to perform amplification or shifts in value,
or tidal filtering
"""
data = read_cimis_csv(self.csv_file,['Wind Speed (m/s)',
'Wind Dir (0-360)'])
# remove nan values - they'll be interpolated in
valid = isfinite(data[:,1])
data = data[valid,:]
#
wind_dir = (180 + 90-data[:,2]) * pi / 180. # radians CCW from +x, velocity vector
wind_spd = data[:,1].copy()
data[:,1] = cos(wind_dir) * wind_spd
data[:,2] = sin(wind_dir) * wind_spd
vals = timeseries.fill_holes(data)
# trim down to the period in question:
valid = (vals[:,0] > date2num(sim_start) -1) & (vals[:,0] < date2num(sim_end) + 1)
vals = vals[valid,:]
return vals[:,0], vals[:,1:]
class JapaneseWind(Timeseries2Vector):
""" References a tab delimited file
units are m/s and degrees.
"""
def __init__(self,name,tab_delim_file=None,lag_s=0.0):
""" Read wind from a single station Japanese weather file.
"""
self.name = name
self.tab_delim_file = tab_delim_file
Timeseries2Vector.__init__(self,label="%s Japanese wind"%name,
lag_s=lag_s)
def raw_data(self,sim_start,sim_end):
""" return the array of times and array of values for the real
start/end dates given.
code in here should *not* perform the lagging
this *is* the right place to perform amplification or shifts in value,
or tidal filtering
"""
data = read_Japanese_met(self.tab_delim_file,['Wind Speed (m/s)',
'Wind Dir (0-360)'])
# remove nan values - they'll be interpolated in
valid = isfinite(data[:,1])
#
wind_dir = (180 + 90-data[:,2]) * pi / 180. # radians CCW from +x, velocity vector
wind_spd = data[:,1].copy()
data[:,1] = cos(wind_dir) * wind_spd
data[:,2] = sin(wind_dir) * wind_spd
vals = timeseries.fill_holes(data)
# trim down to the period in question:
valid = (vals[:,0] > date2num(sim_start) -1) & (vals[:,0] < date2num(sim_end) + 1)
vals = vals[valid,:]
return vals[:,0], vals[:,1:]
# class NDBCWind(Timeseries2Vector):
# """ Retrieve wind timeseries from National Data Buoy Center
# buoys. At least for 46026, just beyond the sand bar outside Golden
# Gate, this is hourly, near-realtime data, measured at 5m above sea surface.
# """
class NoaaWind(Timeseries2Vector):
""" Get NOAA wind data and create a timeseries Datasource with it
lag_s introduces
a lag specified in seconds (i.e. if the model lags reality, specify a
negative lag here so that the forcing is shifted back in time)
"""
def __init__(self,station_name,lag_s=0.0,lowpass_hours=0):
Timeseries2Vector.__init__(self,label="%s vector wind"%station_name,
lag_s=lag_s)
self.station_name = station_name
self.gage = gage_data.gage(station_name)
self.lowpass_hours = lowpass_hours
def raw_data(self,sim_start,sim_end):
""" return the array of times and array of values for the real start/end dates
given.
code in here should *not* perform the lagging
this *is* the right place to perform amplification or shifts in value,
or tidal filtering
"""
if self.lowpass_hours > 0:
td = datetime.timedelta(2* self.lowpass_hours / 24.0)
sim_start = sim_start - td
sim_end = sim_end + td
vals = self.gage.data(sim_start,sim_end,'w')
vals = timeseries.fill_holes(vals)
absdays = vals[:,0]
uv_values = vals[:,1:]
# hopefully 4th order is okay -
# typical cutoff will probably be ~1h or less, and typical data sampling rate
# will be 10 per hour.
if len(absdays) < 12:
print("NOAA Wind Data for station %s, period %s - %s is no good"%(self.station_name,
sim_start,sim_end))
print(absdays)
print(uv_values)
print("Last request key: ",self.gage.last_request_key)
raise Exception("Missing forcing data")
if self.lowpass_hours > 0:
uv_values[:,0] = lp_filter.lowpass(uv_values[:,0],absdays, self.lowpass_hours / 24.0, order=4 )
uv_values[:,1] = lp_filter.lowpass(uv_values[:,1],absdays, self.lowpass_hours / 24.0, order=4 )
return absdays, uv_values
class SFBayMeanEvaporation(Timeseries):
""" Kludge for estimating evaporation rates based at Oakland airport.
Eventually this will need to be calculated using some form of the Penman
equation.
These data are in several places on the web, notably
http://www.calclim.dri.edu/ccda/comparative/avgpan.html
while the units are not given, I'm pretty sure it should be inches/month.
"""
# the Burlingame data (OAK and SFO data are calculated, this is measured)
orig_data = array( [1.27, 1.81, 3.60, 5.28, 6.85, 7.82, 8.42, 7.39, 5.74, 3.78, 1.98, 1.28] )
# Burlingame: [1.27 1.81 3.60 5.28 6.85 7.82 8.42 7.39 5.74 3.78 1.98 1.28]
# Oakland AP: [1.8 2.3 3.8 4.8 5.7 6.4 6.4 6.0 5.4 4.0 2.4 1.8 ]
# SFO: [1.7 2.4 3.8 5.3 6.4 7.1 6.7 6.6 5.9 4.4 2.4 1.7 ]
# Grizzly Island: [1.45 2.25 4.00 5.72 8.07 9.82 10.69 8.93 6.88 4.33 2.10 1.55]
# Mandeville Isl, Delta: [1.10 2.38 4.77 6.95 8.55 10.44 11.22 9.71 7.41 5.12 2.47 1.13 ]
# Panoche Cr, San Jose: [1.74 2.86 5.72 7.50 11.83 13.58 15.04 14.29 10.45 7.61 2.72 1.81 ]
# Tracy Pumps: [1.54 2.48 5.31 8.16 12.00 14.88 16.92 14.52 10.62 6.59 2.95 1.47]
def __init__(self,lag_s=0.0):
Timeseries.__init__(self,label="SF Bay evaporation, monthly climatology",
lag_s=lag_s)
def raw_data(self,sim_start,sim_end):
# Take the original data at the middle of each month, so here we need
# to synthesize the ides-timeseries between sim_start and sim_end
# remember months are counted 1-based
start_month = sim_start.year * 12 + (sim_start.month - 1)
end_month = sim_end.year*12 + (sim_end.month - 1)
# a bit of padding to make sure we have a measurement before the beginning
# and after the end.
all_months = arange( start_month - 1, end_month + 2 )
mapping = all_months % 12 # indexes the monthly average values - NOT IDES!
# the Timeseries code wants a constant time-step -
start_absday = date2num(datetime.datetime( all_months[0] // 12, (all_months[0] % 12) + 1, 15 ) )
end_absday = date2num(datetime.datetime( all_months[-1] // 12, (all_months[-1] % 12) + 1, 15 ) )
absdays = linspace(start_absday,end_absday,len(all_months))
# Just inch/month -> meters/second
evap_mps = self.orig_data[mapping] * 9.6587355e-09
return absdays,evap_mps
def interp_to_absdays(self,absdays):
""" Return evaporation rates, sampled onto the given absdays values.
This is used by precipitation datasources which combine with evaporation
"""
sim_start = num2date(absdays[0])
sim_end = num2date(absdays[-1])
my_absdays,my_data = self.raw_data(sim_start,sim_end)
f = interp1d(my_absdays,my_data)
return f(absdays)
class NDOI(Timeseries):
""" Flow data for net delta output index. This can probably be ignored, and use the FlowCsvMgd class instead.
"""
ndoi_fn = os.path.join(forcing_dir,"flows/ndoi-1994-2009.txt")
dayflow_fn = os.path.join(forcing_dir,"flows/dayflow.csv")
def __init__(self,
amplification=1.0,
lag_s=0.0):
Timeseries.__init__(self,label="NDOI",lag_s=lag_s)
self.amplification = amplification
def raw_data(self,sim_start,sim_end):
# NDOI from IEP appears to no longer be supported. Switching to data from dayflow, which
# should be about the same, although there is some small discrepancy
if 0:
ts = timeseries.IEPFile(self.ndoi_fn)
# important to clip because the timeseries determines the starting point
# for year days by looking at the year of the first entry in the timeseries.
else:
d = loadtxt(self.dayflow_fn,
skiprows=1,delimiter=',',
converters={0: lambda s: date2num(datetime.datetime.strptime(s,'%d-%b-%y')) } )
# [cfs] => [m3/s]
d[:,1] *= 0.028316847
ts = timeseries.Timeseries( d[:,0],d[:,1] )
ts = ts.clip(sim_start,sim_end)
return ts.t_in(units='absdays'), ts.x * self.amplification
class FlowCsvMgd(Timeseries):
""" Basic CSV format for flows.
Assumes first line is headers, subsequent lines are YYYY-MM-DD HH:MM,24.5234
where the flow is in mgd.
timestamps are assumed to be at the center of the averaging interval, already adjusted
to be UTC. The suntans boundaries.c code doesn't do it yet, but it would be more appropriate
to take the nearest value rather than interpolating between values (because these data are
generally daily averages already).
"""
def __init__(self,csv_fn,amplification=1.0,lag_s=0.0):
Timeseries.__init__(self,label=os.path.basename(csv_fn),lag_s=lag_s)
self.amplification = amplification
self.fn = csv_fn
def raw_data(self,sim_start,sim_end):
ts = timeseries.Timeseries.load_csv(self.fn,
skiprows=1,date_fmt="%Y-%m-%d %H:%M")
# important to clip because the timeseries determines the starting point
# for year days by looking at the year of the first entry in the timeseries.
ts = ts.clip(sim_start,sim_end)
# amplify and convert [mgd] -> [m3/s]
return ts.t_in(units='absdays'), ts.x * self.amplification * 0.043812636
class NCFlow(Timeseries):
""" Read flow data from a netcdf file
"""
def __init__(self,nc_fn,amplification=1.0,lag_s=0.0):
super(NCFlow,self).__init__(label=os.path.basename(nc_fn),lag_s=lag_s)
self.amplification = amplification
self.fn = nc_fn
def raw_data(self,sim_start,sim_end):
if not os.path.exists(self.fn):
raise Exception("Forcing file doesn't exist: %s"%self.fn)
nc=qnc.QDataset(self.fn)
# assumes that time is called time, and is CF-like
t=nc.time.as_datenum()
Q=nc.flow[:]
# convert to m3/s
Q=Units.conform(Q,Units(nc.flow.units),Units('m3/s'))
sel=utils.within(t,[date2num(sim_start),date2num(sim_end)])
t=t[sel]
Q=Q[sel]
#ts = timeseries.Timeseries(t,Q)
#ts = ts.clip(sim_start,sim_end)
# amplify
# return ts.t_in(units='absdays'), ts.x * self.amplification
return t,Q
class EBDA_MDF(Timeseries):
""" Flow data for the East Bay Dischargers Assoc.
This is data from Mike Connor, mconnor@ebda.org, for MDF (Marina Dechlorination Facility),
which includes EBDA contributors, plus some LAVWMA, less some diversions. I'm pretty sure
it's the last point before it's pumped into the bay.
This can also be ignored, and use FlowCsvMgd instead.
"""
fn = os.path.join(forcing_dir,"flows/ebda/ebda_mdf_flow.csv")
def __init__(self,
amplification=1.0,
lag_s=0.0):
Timeseries.__init__(self,label="EBDA",lag_s=lag_s)
self.amplification = amplification
def read_timeseries(self):
fp = open(self.fn,'rt')
fp.readline() # column names
t = []
f = []
for line in fp:
date,flow = line.split(',')
flow = float(flow) * 0.043812636 # [MGD]->[m3/s]
date = date2num( datetime.datetime.strptime(date,'%m/%d/%Y') ) + 0.5
t.append(date)
f.append(flow)
return timeseries.Timeseries(array(t),array(f))
def raw_data(self,sim_start,sim_end):
ts = self.read_timeseries()
# important to clip because the timeseries determines the starting point
# for year days by looking at the year of the first entry in the timeseries.
ts = ts.clip(sim_start,sim_end)
return ts.t_in(units='absdays'), ts.x * self.amplification
class SJWWTP(Timeseries):
""" Flow data for San Jose wastewater treatment plant inputs into
Artesian slough. Data obtained from Peter.Schafer@sanjoseca.gov
Likewise, use FlowCsvMgd instead.
"""
fn=os.path.join(forcing_dir,"flows/sjwwtp2005_2009.csv")
def __init__(self,
amplification=1.0,
lag_s=0.0):
Timeseries.__init__(self,label="SJWWTP",lag_s=lag_s)
self.amplification = amplification
def read_timeseries(self):
fp = open(self.fn,'rt')
fp.readline() # header info
fp.readline() # column names
t = []
f = []
for line in fp:
date,flow = line.split(',')
flow = float(flow) * 0.043812636 # [MGD]->[m3/s]
date = date2num( datetime.datetime.strptime(date,'%m/%d/%Y') ) + 0.5
t.append(date)
f.append(flow)
return timeseries.Timeseries(array(t),array(f))
def raw_data(self,sim_start,sim_end):
ts = self.read_timeseries()
# important to clip because the timeseries determines the starting point
# for year days by looking at the year of the first entry in the timeseries.
ts = ts.clip(sim_start,sim_end)
return ts.t_in(units='absdays'), ts.x * self.amplification
from rdb import Rdb
class UsgsGage(Timeseries):
""" fetches data from waterdata.usgs.gov
for now, just get daily information.
"""
def __init__(self,station_code,lag_s = None,label=None,amplification=1.0):
if label is None:
label = "USGS streamflow, #%s"%station_code
Timeseries.__init__(self, label=label, lag_s=lag_s)
self.station_code = station_code
self.amplification = amplification
def raw_data(self,sim_start,sim_end):
# cb_00060=on gives us daily mean
sim_start=utils.to_datetime(sim_start)
sim_end =utils.to_datetime(sim_end)
begin_date = sim_start.strftime("%Y-%m-%d")
end_date = (sim_end+datetime.timedelta(1)).strftime("%Y-%m-%d")
self.url = "http://waterdata.usgs.gov/nwis/dv?referred_module=sw&" + \
"site_no=%d&cb_00060=on&begin_date=%s&end_date=%s&format=rdb"%(self.station_code,
begin_date,end_date)
print(self.url)
fp = urlopen(self.url)
self.reader = Rdb(fp=fp)
# this assumes that only one numeric data column is in the rdb file
daily_mean_cfs = self.reader.data()
cumecs = daily_mean_cfs * 0.028316847 * self.amplification
absdays = self.reader['datetime']
# fake any missing data by interpolation:
invalid = isnan(cumecs)
if any(invalid):
print("USGS gage %s has some missing data. will attempt to interpolate"%self.station_code)
cumecs[invalid] = interp( absdays[invalid],
absdays[~invalid],cumecs[~invalid],
left = cumecs[~invalid][0], right = cumecs[~invalid][-1] )
return absdays,cumecs
class CompositeUsgsGage(Timeseries):
""" A weighted average of multiple usgs gages. This is used in conjunction with the watershed-based
flow forcing where each un-gaged source is correlated to gaged sources using watershed area.
"""
def __init__(self,gage_ids,weights,lag_s = None,label=None,amplification=1.0):
if label is None:
label = "USGS streamflow, composite"
Timeseries.__init__(self, label=label, lag_s=lag_s)
self.gage_ids = gage_ids
self.weights = weights
self.amplification = amplification
def raw_data(self,sim_start,sim_end):
all_absdays = []
all_cumecs = []
for gage_id,weight in zip(self.gage_ids,self.weights):
one_gage = UsgsGage(station_code=gage_id,label="temp",amplification=self.amplification)
absdays,cumecs = one_gage.raw_data(sim_start,sim_end)
if len(all_absdays) > 0:
# For now, assert that they are all the same size and time period
if len(absdays) != len(all_absdays[0]):
print(sim_start,sim_end)
print("While processing composite gage for %s"%self.label)
raise Exception("Lengths of absdays didn't match")
if any( absdays != all_absdays[0] ):
print(sim_start,sim_end)
print("While processing composite gage for %s"%self.label)
raise Exception("Values of absdays didn't match")
all_absdays.append(absdays)
all_cumecs.append(cumecs)
absdays = all_absdays[0]
total_cumecs = 0*all_cumecs[0]
for cumecs,weight in zip(all_cumecs,self.weights):
total_cumecs += weight * cumecs
return absdays,total_cumecs
class KrigedSource(DataSource):
""" A spatially variable field, based on Kriging between a given set
of sources. The other sources may themselves be time-varying.
"""
def __init__(self,label,station_list):
""" label: string giving short descriptive name of this field
station_list: [ ([x,y],datasource), ... ]
"""
DataSource.__init__(self,label)
self.station_list = station_list
def write_config(self,fp,sun):
fp.write("# %s\n"%self.label)
fp.write("BEGIN_DATA\n")
fp.write(" KRIGED\n")
fp.write(" STATION_COUNT %d\n"%len(self.station_list))
for xy,subsrc in self.station_list:
fp.write(" STATION_SPEC %s %f %f\n"%(subsrc.filename, xy[0], xy[1]) )
fp.write("END_DATA\n")
def prepare(self,gforce):
""" For Kriging sources, this is where subsources are registered, and
we can get the proper references for them before write_config is called
"""
for xy,subsrc in self.station_list:
gforce.add_datasource(subsrc)
## Filters for modifying and combining data sources:
class LowpassTimeseries(Timeseries):
pad_factor = 2.0 # assume that the filter transients decay within time pad_factor*cutoff_days
def __init__(self,source,cutoff_days,order=4):
""" Returns a new forcing timeseries object which is a low-passed
version of the source timeseries
handles fetching a bit of extra data to pad out the input before filtering,
"""
super(LowpassTimeseries,self).__init__(label="LP"+source.label)
self.source=source
self.cutoff_days = cutoff_days
self.order = order
def raw_data(self,start_datetime,end_datetime):
""" Fetch data from the underlying source for a slightly larger time window,
make sure it's evenly spaced, low-pass filter, truncate, and return
"""
pad = datetime.timedelta(self.cutoff_days * self.pad_factor)
source_times,source_data = self.source.raw_data(start_datetime - pad,
end_datetime+pad)
lp_data = lp_filter.lowpass(source_data,source_times,self.cutoff_days,order=self.order)
i_start = searchsorted(source_times,date2num(start_datetime),side='left')
i_end = searchsorted(source_times,date2num(end_datetime),side='right')
# need to include an extra sample to completely enclose the range
i_start = max(0,i_start-1)
i_end += 1
return source_times[i_start:i_end],lp_data[i_start:i_end]
class FillByLastValid(Timeseries):
""" Wrap a timeseries, and when there is missing data, use the most
recent valid data from before the missing data.
"""
stride_days=1.0
max_backwards_days=30
def __init__(self,source,stride_days=None,max_backwards_days=None,fallback=0.0):
""" source: a Timeseries object
stride_days: when the request period starts with invalid data, this
gives the stride for checking past periods
max_backwards_days: if no valid data is found within this amount of
time, then leading invalid values are given the next valid value.
if there are no valid values anywhere, then returns fallback
"""
super(FillByLastValid,self).__init__(label="Fill"+source.label)
self.source=source
if stride_days:
self.stride_days=stride_days
if max_backwards_days:
self.max_backwards_days=max_backwards_days
self.fallback=fallback
def raw_data(self,start_datetime,end_datetime):
""" Fetch data from the underlying source for a slightly larger time window,
make sure it's evenly spaced, low-pass filter, truncate, and return
"""
source_times,source_data = self.source.raw_data(start_datetime,
end_datetime)
if isnan(source_data[0]):
print("FillByLastValid: looking backwards in time")
Nbacks=int(self.max_backwards_days/self.stride_days)
last_valid = nan
for i in range(1,Nbacks+1):
start_dt=start_datetime - datetime.timedelta(i*self.stride_days)
end_dt=start_datetime - datetime.timedelta((i-1)*self.stride_days)
back_times,back_data = self.source.raw_data(start_dt,end_dt)
if any( isfinite(back_data) ):
last_valid= back_data[isfinite(back_data)][-1]
break
if isnan(last_valid):
print("FillByLastValid: found no past, useable data.")
if any(isfinite(source_data)):
last_valid=source_data[isfinite(source_data)][0]
else:
print("No valid data anywhere - using fallback value")
last_valid=self.fallback
else:
last_valid=source_data[0]
for i in range(len(source_data)):
if isnan(source_data[i]):
source_data[i]=last_valid
else:
last_valid=source_data[i]
return source_times,source_data
class ShiftTimeseries(Timeseries):
""" Apply time/value shift/scaling
"""
def __init__(self,source,amplify=1.0,delay_s=0.0,offset=0.0,center=None):
""" Returns a new forcing timeseries object which is has time/value shifts
relative the source timeseries.
amplify scales the data about center, which if unspecified is taken as the mean.
(i.e. good for tides, bad for wind)
delay_s will shift the data in time
"""
super(ShiftTimeseries,self).__init__(label="Shift"+source.label)
self.source=source
self.amplify = amplify
self.delay_s = delay_s
self.offset = offset
self.center = center
def raw_data(self,start_datetime,end_datetime):
""" Fetch data from the underlying source for a slightly larger time window,
make sure it's evenly spaced, low-pass filter, truncate, and return
"""
delay_delta = datetime.timedelta(self.delay_s/86400.)
print("ShiftTimeSeries: end_datetime: %s"%end_datetime)
print(" shifted end: %s"%(end_datetime - delay_delta))
source_times,source_data = self.source.raw_data(start_datetime - delay_delta,
end_datetime - delay_delta)
source_times += self.delay_s/86400.
print("Resulting range of data: %s - %s"%( num2date(source_times[0]),
num2date(source_times[-1])))
if self.center is None:
center = mean(source_data)
else:
center = self.center
new_data = (source_data - center)*self.amplify + center + self.offset
return source_times,new_data
def read_boundaries_dat(sun,proc):
fp = open(sun.file_path('BoundaryInput',proc),'rt')
gforce = GlobalForcing(sun=sun,proc=proc)
# simple tokenizer
# able to handle comments that start with a #
def tok_gen():
for line in fp:
for t in line.split():
if t[0] == '#':
break # skip the rest of the line
yield t
tok = tok_gen().__next__
def tok_tag(s):
t = tok()
if t != s:
print("Expected %s, got %s"%(s,t))
def tok_int(tag=None):
if tag:
tok_tag(tag)
return int(tok())
def tok_float(tag=None):
if tag:
tok_tag(tag)
return float(tok())
def tok_str(tag=None):
if tag:
tok_tag(tag)
return tok()
version = tok_int('BOUNDARY_FORCING')
print("reading boundaries.dat version %d"%version)
if version == 2:
# format, something like:
ntides = tok_int()
ncells = tok_int()
ngages = tok_int()
gage_t0 = tok_float()
gage_dt = tok_float()
ngage_steps = tok_int()
if ntides > 0:
raise Exception("New forcing code not tested with old format and harmonics")
omegas = [tok_float() for tide_i in range(ntides)]
gage_weights = zeros( (ncells,ngages), float64 )
for c in range(ncells):
# this is where we should be doing something smarter...
for x in range(ntides*6):
tok_float()
# this is what we want:
for gi in range(ngages):
gage_weights[c] = [tok_float() for x in ngages]
datasources = []
if ngages > 0:
gage_data = zeros( (ngage_steps,ngages,3), float64)
for gage_step in range(ngage_steps):
for gage_i in range(ngages):
# read u,v,h
gage_data[gage_step,gage_i] = [tok_float(),tok_float(),tok_float()]
for i in range(ngages):
datasources.append( Timeseries("gage%i"%i,
t0=gage_t0,
dt=gage_dt,
data=gage_data[:,gage_i],
lag_s=0) )
# For now, assume all edges are getting the same weight
# Still, the old code starts with the boundary edges, gets the boundary cells,
# and those are what are listed in the boundaries.dat files.
raise Exception("Really not prepared for reading the old forcing file.")
# BOUNDARY_FORCING version
# <number of tidal components>
# <number of boundary cells>
# <number of gages>
# <gage t0 - simulation_start, in seconds>
# <gage timestep>
# <num gage timesteps>
# ntides * <omega>
# ncells * [ ntides * <uamp>
# ntides * <uphase>
# ntides * <vamp>
# ntides * <vphase>
# ntides * <hamp>
# ntides * <hphase>
# ngages * <gage weight> ]
# ngagetimesteps * [ ngages * [ u,v,h ] ]
elif version == 6:
# read in the datasources first, stored into a dict:
ds_dir = os.path.join(sun.datadir,'datasources')
datasources = {}
for f in glob.glob(os.path.join(ds_dir,"*")):
ds_name = os.path.basename(f)
try:
datasources[ds_name] = read_datasource(f,sun)
except Exception as e:
print("Couldn't read datasource %s (file %s)"%(ds_name,f))
datasources[ds_name] = None
itemlist_count = tok_int('ITEMLIST_COUNT')
print("itemlist_count is" ,itemlist_count)
for itemlist_index in range(itemlist_count):
tok_tag('BEGIN_ITEMLIST')
# identify which model elements are being forced, and define a forcing
# group
item_type = tok_str('ITEM_TYPE')
if item_type in ('EDGE','CELL'):
item_count = tok_int('ITEM_COUNT')
tok_tag('ITEMS')
items = [tok_int() for i in range(item_count)]
if item_type == 'EDGE':
group = gforce.new_group(edges = items)
elif item_type == 'CELL':
group = gforce.new_group(cells = items)
else:
raise Exception("unknown item type %s"%item_type)
elif item_type in ('ALL_CELLS','ALL_EDGES'):
items = item_type
dimensions = tok_str('DIMENSIONS')
if item_type == 'ALL_CELLS':
group = gforce.new_group(cells='all')
elif item_type == 'ALL_EDGES':
group = gforce.new_group(edges='all')
# then read what parameters are forced, and what datasource is used.
bc_count = tok_int('BC_COUNT')
for bc_index in range(bc_count):
bctype = tok_str('BCTYPE')
data_index = tok_str('DATA')
group.add_datasource(datasources[data_index],bctype)
tok_tag('END_ITEMLIST')
else:
# READ DATA SECTIONS:
data_count = tok_int('DATA_COUNT')
datasources = [None] * data_count
# print "Reading %d data sections"%data_count
for data_i in range(data_count):
dsource = None
tok_tag('BEGIN_DATA')
dtype = tok()
if dtype in ('TIMESERIES','TIMESERIES_2VEC'):
sample_count = tok_int('SAMPLE_COUNT')
dt = tok_float('DT')
t0_sun_seconds = tok_float('TZERO')
# convert to datetime:
base_date = datetime.datetime(sun.time_zero().year,1,1)
t0 = base_date + datetime.timedelta(t0_sun_seconds / (24.*3600.))
tok_tag('VALUES')
if dtype == 'TIMESERIES':
values = zeros( sample_count, float64)
for i in range(sample_count):
values[i] = tok_float()
dsource = Timeseries("timeseries%i"%data_i,
t0=t0,
dt=dt,
data=values,lag_s=0)
else:
values = zeros( (sample_count,2), float64)
for i in range(sample_count):
values[i,0] = tok_float()
values[i,1] = tok_float()
dsource = Timeseries2Vector("timeseries2vec%i"%data_i,
t0=t0,
dt=dt,
data=values,lag_s=0)
elif dtype == 'CONSTANT':
value = tok_float('VALUE')
dsource = Constant("const%i"%data_i,
value=value)
elif dtype == 'HARMONICS':
constituents_count = tok_int('CONSTITUENTS_COUNT')
omegas = zeros(constituents_count,float64)
phases = zeros_like(omegas)
amps = zeros_like(omeags)
for sec,vals in zip(['OMEGAS','PHASES','AMPLITUDES'],
[omegas,phases,amps]):
tok_tag(sec)
for i in range(constituents_count):
vals[i] = tok_float()
dsource = Harmonics("harmonics%i"%data_i,
omegas=omegas,
phases=phases,
amplitudes = amplitudes)
else:
raise Exception("Unrecognized data type: %s"%dtype)
datasources[data_i] = dsource
tok_tag('END_DATA')
# READ EDGELIST SECTIONS:
edgelist_count = tok_int('EDGELIST_COUNT')
for elist_i in range(edgelist_count):
tok_tag('BEGIN_EDGELIST')
edge_count = tok_int('EDGE_COUNT')
edges = zeros(edge_count,int32)
tok_tag('EDGES')
for i in range(edge_count):
edges[i] = tok_int()
group = gforce.new_group(edges = edges)
bc_count = tok_int('BC_COUNT')
bcs = [None]*bc_count
for i in range(bc_count):
tok_tag('BCTYPE')
bctype = tok()
dsource_index = tok_int('DATA')
group.add_datasource(datasources[dsource_index],bctype)
tok_tag('END_EDGELIST')
if version == 5:
print("Version 5, reading cell lists")
celllist_count = tok_int('CELLLIST_COUNT')
for clist_i in range(celllist_count):
tok_tag('BEGIN_CELLLIST')
cell_count = tok_int('CELL_COUNT')
cells = zeros(cell_count,int32)
tok_tag('CELLS')
for i in range(cell_count):
cells[i] = tok_int()
group = gforce.new_group(cells = cells)
bc_count = tok_int('BC_COUNT')
bcs = [None]*bc_count
for i in range(bc_count):
tok_tag('BCTYPE')
bctype = tok()
dsource_index = tok_int('DATA')
group.add_datasource(datasources[dsource_index],bctype)
tok_tag('END_CELLLIST')
return gforce
def read_datasource(fn,sun):
#
fp = open(fn,'rt')
# Read any header lines, and concatenate to make a comment
comment = [os.path.basename(fn)]
while 1:
txt = fp.readline().strip()
if len(txt) == 0:
pass
elif txt[0] == '#':
comment.append( txt[1:].strip() )
else:
break
comment = " ".join(comment)
# simple tokenizer
# able to handle comments that start with a #
def tok_gen():
for t in txt.split():
if t[0] == '#':
break
yield t
for line in fp:
for t in line.split():
if t[0] == '#':
break # skip the rest of the line
yield t
tok = tok_gen().__next__
def tok_tag(s):
t = tok()
if t != s:
print("Expected %s, got %s"%(s,t))
def tok_int(tag=None):
if tag:
tok_tag(tag)
return int(tok())
def tok_float(tag=None):
if tag:
tok_tag(tag)
return float(tok())
tok_tag('BEGIN_DATA')
ds_type = tok()
if ds_type == "TIMESERIES":
sample_count = tok_int('SAMPLE_COUNT')
dt = tok_float('DT')
t0_sun_seconds = tok_float('TZERO')
# convert to datetime:
base_date = datetime.datetime(sun.time_zero().year,1,1)
t0 = base_date + datetime.timedelta(t0_sun_seconds / (24.*3600.))
tok_tag('VALUES')
data = array( [tok_float() for i in range(sample_count)] )
tok_tag('END_DATA')
dsource = Timeseries(comment,
t0=t0,
dt=dt,
data=data,lag_s=0)
else:
dsource = Constant(comment + "FAKE", value=1)
return dsource
|
rustychris/stompy
|
stompy/model/suntans/forcing.py
|
Python
|
mit
| 103,813
|
[
"NetCDF"
] |
97af0b18f9b2f43d3e4dd08b1776da6ac6cb71db58cfdd1206ee03c3089c06f5
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RDose(RPackage):
"""This package implements five methods proposed by Resnik, Schlicker,
Jiang, Lin and Wang respectively for measuring semantic similarities
among DO terms and gene products. Enrichment analyses including
hypergeometric model and gene set enrichment analysis are also
implemented for discovering disease associations of high-throughput
biological data."""
homepage = "https://www.bioconductor.org/packages/DOSE/"
git = "https://git.bioconductor.org/packages/DOSE.git"
version('3.2.0', commit='71f563fc39d02dfdf65184c94e0890a63b96b86b')
depends_on('r@3.4.0:3.4.9', when='@3.2.0')
depends_on('r-scales', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-reshape2', type=('build', 'run'))
depends_on('r-qvalue', type=('build', 'run'))
depends_on('r-igraph', type=('build', 'run'))
depends_on('r-gosemsim', type=('build', 'run'))
depends_on('r-ggplot2', type=('build', 'run'))
depends_on('r-fgsea', type=('build', 'run'))
depends_on('r-do-db', type=('build', 'run'))
depends_on('r-biocparallel', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
|
mfherbst/spack
|
var/spack/repos/builtin/packages/r-dose/package.py
|
Python
|
lgpl-2.1
| 2,475
|
[
"Bioconductor"
] |
702215a1837e54212e6d4d39353a3abe60025c4daf6aaf2aabd5fb5cc067451b
|
# -*- coding: utf-8 -*-
"""This module contains functions to be called from console script entry points.
"""
# symbols which are imported by "from mse.command import *"
__all__ = ['bootstrap', 'ConfigurationError', 'start']
import sys
import optparse
from os import getcwd
from os.path import dirname, exists, join
import pkg_resources
try:
pkg_resources.require('TurboGears>=1.5.1')
except pkg_resources.DistributionNotFound:
print """\
This is a TurboGears 1.5.1 (http://www.turbogears.org) application.
It seems that you either don't have TurboGears installed or it can not be found.
Please check if your PYTHONPATH is set correctly. To install TurboGears, go to
http://www.turbogears.org/en/documentation and follow the instructions there. If
you are stuck, visit http://www.turbogears.org/en/resources for support options."""
sys.exit(1)
try:
pkg_resources.require('SQLObject>=0.10.1')
except pkg_resources.DistributionNotFound:
from turbogears.util import missing_dependency_error
print missing_dependency_error('SQLObject')
sys.exit(1)
import cherrypy
import turbogears
from mse.release import version
cherrypy.lowercase_api = True
class ConfigurationError(Exception):
"""Configuration error."""
def _read_config(args):
"""Read deployment configuration file.
First looks on the command line for a desired config file, if it's not on
the command line, then looks for 'setup.py' in the parent of the directory
where this module is located.
If 'setup.py' is there, assumes that the application is started from
the project directory and should run in development mode and so loads the
configuration from a file called 'dev.cfg' in the current directory.
If 'setup.py' is not there, the project is probably installed and the code
looks first for a file called 'prod.cfg' in the current directory and, if
this isn't found either, for a default config file called 'default.cfg'
packaged in the egg.
"""
setupdir = dirname(dirname(__file__))
curdir = getcwd()
if args:
configfile = args[0]
elif exists(join(setupdir, 'setup.py')):
configfile = join(setupdir, 'dev.cfg')
elif exists(join(curdir, 'prod.cfg')):
configfile = join(curdir, 'prod.cfg')
else:
try:
configfile = pkg_resources.resource_filename(
pkg_resources.Requirement.parse('mse'),
'config/default.cfg')
except pkg_resources.DistributionNotFound:
raise ConfigurationError("Could not find default configuration.")
turbogears.update_config(configfile=configfile,
modulename='mse.config')
def bootstrap():
"""Example function for loading bootstrap data into the database
You can adapt this to your needs to e.g. accept more options or to
run more functions for bootstrapping other parts of your application.
By default this runs the function 'mse.model.bootstrap_model', which
creates all database tables and optionally adds a user.
The following line in your project's 'setup.py' file takes care of
installing a command line script when you install your application via
easy_install which will run this function:
'bootstrap-mse = mse.command:bootstrap',
"""
optparser = optparse.OptionParser(usage="%prog [options] [config-file]",
description="Load bootstrap data into the database defined in "
"config-file.", version="mse %s" % version)
optparser.add_option('-C', '--clean', dest="clean", action="store_true",
help="Purge all data in the database before loading the bootrap data.")
optparser.add_option('-u', '--user', dest="user", metavar="USERNAME",
help="Create a default user USERNAME (prompts for password).")
options, args = optparser.parse_args()
user = getattr(options, 'user', None)
if user:
options.user = user.decode(sys.getfilesystemencoding())
_read_config(args)
from mse.model import bootstrap_model
bootstrap_model(options.clean, options.user)
def start():
"""Start the CherryPy application server."""
_read_config(sys.argv[1:])
# Running the async task
#from mse import async
#turbogears.startup.call_on_startup.append(async.schedule)
from mse.controllers import Root
return turbogears.start_server(Root())
|
jinmingda/MicroorganismSearchEngine
|
mse/command.py
|
Python
|
mit
| 4,370
|
[
"VisIt"
] |
a55ca05be16a95b2d317f6c7a71f470d6f74c646afef4b15ce6ad8e5f8e59938
|
"""
Acceptance tests for the teams feature.
"""
import json
import random
import time
from dateutil.parser import parse
import ddt
from flaky import flaky
from nose.plugins.attrib import attr
from selenium.common.exceptions import TimeoutException
from uuid import uuid4
from ..helpers import get_modal_alert, EventsTestMixin, UniqueCourseTest
from ...fixtures import LMS_BASE_URL
from ...fixtures.course import CourseFixture
from ...fixtures.discussion import (
Thread,
MultipleThreadFixture
)
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.course_info import CourseInfoPage
from ...pages.lms.learner_profile import LearnerProfilePage
from ...pages.lms.tab_nav import TabNavPage
from ...pages.lms.teams import (
TeamsPage,
MyTeamsPage,
BrowseTopicsPage,
BrowseTeamsPage,
TeamManagementPage,
EditMembershipPage,
TeamPage
)
from ...pages.common.utils import confirm_prompt
TOPICS_PER_PAGE = 12
class TeamsTabBase(EventsTestMixin, UniqueCourseTest):
"""Base class for Teams Tab tests"""
def setUp(self):
super(TeamsTabBase, self).setUp()
self.tab_nav = TabNavPage(self.browser)
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.teams_page = TeamsPage(self.browser, self.course_id)
# TODO: Refactor so resetting events database is not necessary
self.reset_event_tracking()
def create_topics(self, num_topics):
"""Create `num_topics` test topics."""
return [{u"description": i, u"name": i, u"id": i} for i in map(str, xrange(num_topics))]
def create_teams(self, topic, num_teams, time_between_creation=0):
"""Create `num_teams` teams belonging to `topic`."""
teams = []
for i in xrange(num_teams):
team = {
'course_id': self.course_id,
'topic_id': topic['id'],
'name': 'Team {}'.format(i),
'description': 'Description {}'.format(i),
'language': 'aa',
'country': 'AF'
}
teams.append(self.post_team_data(team))
# Sadly, this sleep is necessary in order to ensure that
# sorting by last_activity_at works correctly when running
# in Jenkins.
time.sleep(time_between_creation)
return teams
def post_team_data(self, team_data):
"""Given a JSON representation of a team, post it to the server."""
response = self.course_fixture.session.post(
LMS_BASE_URL + '/api/team/v0/teams/',
data=json.dumps(team_data),
headers=self.course_fixture.headers
)
self.assertEqual(response.status_code, 200)
return json.loads(response.text)
def create_memberships(self, num_memberships, team_id):
"""Create `num_memberships` users and assign them to `team_id`. The
last user created becomes the current user."""
memberships = []
for __ in xrange(num_memberships):
user_info = AutoAuthPage(self.browser, course_id=self.course_id).visit().user_info
memberships.append(user_info)
self.create_membership(user_info['username'], team_id)
#pylint: disable=attribute-defined-outside-init
self.user_info = memberships[-1]
return memberships
def create_membership(self, username, team_id):
"""Assign `username` to `team_id`."""
response = self.course_fixture.session.post(
LMS_BASE_URL + '/api/team/v0/team_membership/',
data=json.dumps({'username': username, 'team_id': team_id}),
headers=self.course_fixture.headers
)
return json.loads(response.text)
def set_team_configuration(self, configuration, enroll_in_course=True, global_staff=False):
"""
Sets team configuration on the course and calls auto-auth on the user.
"""
#pylint: disable=attribute-defined-outside-init
self.course_fixture = CourseFixture(**self.course_info)
if configuration:
self.course_fixture.add_advanced_settings(
{u"teams_configuration": {u"value": configuration}}
)
self.course_fixture.install()
enroll_course_id = self.course_id if enroll_in_course else None
#pylint: disable=attribute-defined-outside-init
self.user_info = AutoAuthPage(self.browser, course_id=enroll_course_id, staff=global_staff).visit().user_info
self.course_info_page.visit()
def verify_teams_present(self, present):
"""
Verifies whether or not the teams tab is present. If it should be present, also
checks the text on the page (to ensure view is working).
"""
if present:
self.assertIn("Teams", self.tab_nav.tab_names)
self.teams_page.visit()
self.assertEqual(self.teams_page.active_tab(), 'browse')
else:
self.assertNotIn("Teams", self.tab_nav.tab_names)
def verify_teams(self, page, expected_teams):
"""Verify that the list of team cards on the current page match the expected teams in order."""
def assert_team_equal(expected_team, team_card_name, team_card_description):
"""
Helper to assert that a single team card has the expected name and
description.
"""
self.assertEqual(expected_team['name'], team_card_name)
self.assertEqual(expected_team['description'], team_card_description)
team_card_names = page.team_names
team_card_descriptions = page.team_descriptions
map(assert_team_equal, expected_teams, team_card_names, team_card_descriptions)
def verify_my_team_count(self, expected_number_of_teams):
""" Verify the number of teams shown on "My Team". """
# We are doing these operations on this top-level page object to avoid reloading the page.
self.teams_page.verify_my_team_count(expected_number_of_teams)
def only_team_events(self, event):
"""Filter out all non-team events."""
return event['event_type'].startswith('edx.team.')
@ddt.ddt
@attr('shard_5')
class TeamsTabTest(TeamsTabBase):
"""
Tests verifying when the Teams tab is present.
"""
def test_teams_not_enabled(self):
"""
Scenario: teams tab should not be present if no team configuration is set
Given I am enrolled in a course without team configuration
When I view the course info page
Then I should not see the Teams tab
"""
self.set_team_configuration(None)
self.verify_teams_present(False)
def test_teams_not_enabled_no_topics(self):
"""
Scenario: teams tab should not be present if team configuration does not specify topics
Given I am enrolled in a course with no topics in the team configuration
When I view the course info page
Then I should not see the Teams tab
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": []})
self.verify_teams_present(False)
def test_teams_not_enabled_not_enrolled(self):
"""
Scenario: teams tab should not be present if student is not enrolled in the course
Given there is a course with team configuration and topics
And I am not enrolled in that course, and am not global staff
When I view the course info page
Then I should not see the Teams tab
"""
self.set_team_configuration(
{u"max_team_size": 10, u"topics": self.create_topics(1)},
enroll_in_course=False
)
self.verify_teams_present(False)
def test_teams_enabled(self):
"""
Scenario: teams tab should be present if user is enrolled in the course and it has team configuration
Given I am enrolled in a course with team configuration and topics
When I view the course info page
Then I should see the Teams tab
And the correct content should be on the page
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(1)})
self.verify_teams_present(True)
def test_teams_enabled_global_staff(self):
"""
Scenario: teams tab should be present if user is not enrolled in the course, but is global staff
Given there is a course with team configuration
And I am not enrolled in that course, but am global staff
When I view the course info page
Then I should see the Teams tab
And the correct content should be on the page
"""
self.set_team_configuration(
{u"max_team_size": 10, u"topics": self.create_topics(1)},
enroll_in_course=False,
global_staff=True
)
self.verify_teams_present(True)
@ddt.data(
'topics/{topic_id}',
'topics/{topic_id}/search',
'teams/{topic_id}/{team_id}/edit-team',
'teams/{topic_id}/{team_id}'
)
def test_unauthorized_error_message(self, route):
"""Ensure that an error message is shown to the user if they attempt
to take an action which makes an AJAX request while not signed
in.
"""
topics = self.create_topics(1)
topic = topics[0]
self.set_team_configuration(
{u'max_team_size': 10, u'topics': topics},
global_staff=True
)
team = self.create_teams(topic, 1)[0]
self.teams_page.visit()
self.browser.delete_cookie('sessionid')
url = self.browser.current_url.split('#')[0]
self.browser.get(
'{url}#{route}'.format(
url=url,
route=route.format(
topic_id=topic['id'],
team_id=team['id']
)
)
)
self.teams_page.wait_for_ajax()
self.assertEqual(
self.teams_page.warning_message,
u"Your request could not be completed. Reload the page and try again."
)
@ddt.data(
('browse', '.topics-list'),
# TODO: find a reliable way to match the "My Teams" tab
# ('my-teams', 'div.teams-list'),
('teams/{topic_id}/{team_id}', 'div.discussion-module'),
('topics/{topic_id}/create-team', 'div.create-team-instructions'),
('topics/{topic_id}', '.teams-list'),
('not-a-real-route', 'div.warning')
)
@ddt.unpack
def test_url_routing(self, route, selector):
"""Ensure that navigating to a URL route correctly updates the page
content.
"""
topics = self.create_topics(1)
topic = topics[0]
self.set_team_configuration({
u'max_team_size': 10,
u'topics': topics
})
team = self.create_teams(topic, 1)[0]
self.teams_page.visit()
# Get the base URL (the URL without any trailing fragment)
url = self.browser.current_url
fragment_index = url.find('#')
if fragment_index >= 0:
url = url[0:fragment_index]
self.browser.get(
'{url}#{route}'.format(
url=url,
route=route.format(
topic_id=topic['id'],
team_id=team['id']
))
)
self.teams_page.wait_for_page()
self.teams_page.wait_for_ajax()
self.assertTrue(self.teams_page.q(css=selector).present)
self.assertTrue(self.teams_page.q(css=selector).visible)
@attr('shard_5')
class MyTeamsTest(TeamsTabBase):
"""
Tests for the "My Teams" tab of the Teams page.
"""
def setUp(self):
super(MyTeamsTest, self).setUp()
self.topic = {u"name": u"Example Topic", u"id": "example_topic", u"description": "Description"}
self.set_team_configuration({'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]})
self.my_teams_page = MyTeamsPage(self.browser, self.course_id)
self.page_viewed_event = {
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'my-teams',
'topic_id': None,
'team_id': None
}
}
def test_not_member_of_any_teams(self):
"""
Scenario: Visiting the My Teams page when user is not a member of any team should not display any teams.
Given I am enrolled in a course with a team configuration and a topic but am not a member of a team
When I visit the My Teams page
And I should see no teams
And I should see a message that I belong to no teams.
"""
with self.assert_events_match_during(self.only_team_events, expected_events=[self.page_viewed_event]):
self.my_teams_page.visit()
self.assertEqual(len(self.my_teams_page.team_cards), 0, msg='Expected to see no team cards')
self.assertEqual(
self.my_teams_page.q(css='.page-content-main').text,
[u'You are not currently a member of any team.']
)
def test_member_of_a_team(self):
"""
Scenario: Visiting the My Teams page when user is a member of a team should display the teams.
Given I am enrolled in a course with a team configuration and a topic and am a member of a team
When I visit the My Teams page
Then I should see a pagination header showing the number of teams
And I should see all the expected team cards
And I should not see a pagination footer
"""
teams = self.create_teams(self.topic, 1)
self.create_membership(self.user_info['username'], teams[0]['id'])
with self.assert_events_match_during(self.only_team_events, expected_events=[self.page_viewed_event]):
self.my_teams_page.visit()
self.verify_teams(self.my_teams_page, teams)
def test_multiple_team_members(self):
"""
Scenario: Visiting the My Teams page when user is a member of a team should display the teams.
Given I am a member of a team with multiple members
When I visit the My Teams page
Then I should see the correct number of team members on my membership
"""
teams = self.create_teams(self.topic, 1)
self.create_memberships(4, teams[0]['id'])
self.my_teams_page.visit()
self.assertEqual(self.my_teams_page.team_memberships[0], '4 / 10 Members')
@attr('shard_5')
@ddt.ddt
class BrowseTopicsTest(TeamsTabBase):
"""
Tests for the Browse tab of the Teams page.
"""
def setUp(self):
super(BrowseTopicsTest, self).setUp()
self.topics_page = BrowseTopicsPage(self.browser, self.course_id)
@ddt.data(('name', False), ('team_count', True))
@ddt.unpack
def test_sort_topics(self, sort_order, reverse):
"""
Scenario: the user should be able to sort the list of topics by name or team count
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
Then I should see a list of topics for the course
When I choose a sort order
Then I should see the paginated list of topics in that order
"""
topics = self.create_topics(TOPICS_PER_PAGE + 1)
self.set_team_configuration({u"max_team_size": 100, u"topics": topics})
for i, topic in enumerate(random.sample(topics, len(topics))):
self.create_teams(topic, i)
topic['team_count'] = i
self.topics_page.visit()
self.topics_page.sort_topics_by(sort_order)
topic_names = self.topics_page.topic_names
self.assertEqual(len(topic_names), TOPICS_PER_PAGE)
self.assertEqual(
topic_names,
[t['name'] for t in sorted(topics, key=lambda t: t[sort_order], reverse=reverse)][:TOPICS_PER_PAGE]
)
def test_sort_topics_update(self):
"""
Scenario: the list of topics should remain sorted after updates
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics and choose a sort order
Then I should see the paginated list of topics in that order
When I create a team in one of those topics
And I return to the topics list
Then I should see the topics in the correct sorted order
"""
topics = self.create_topics(3)
self.set_team_configuration({u"max_team_size": 100, u"topics": topics})
self.topics_page.visit()
self.topics_page.sort_topics_by('team_count')
topic_name = self.topics_page.topic_names[-1]
topic = [t for t in topics if t['name'] == topic_name][0]
self.topics_page.browse_teams_for_topic(topic_name)
browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, topic)
self.assertTrue(browse_teams_page.is_browser_on_page())
browse_teams_page.click_create_team_link()
create_team_page = TeamManagementPage(self.browser, self.course_id, topic)
create_team_page.value_for_text_field(field_id='name', value='Team Name', press_enter=False)
create_team_page.set_value_for_textarea_field(
field_id='description',
value='Team description.'
)
create_team_page.submit_form()
team_page = TeamPage(self.browser, self.course_id)
self.assertTrue(team_page.is_browser_on_page())
team_page.click_all_topics()
self.assertTrue(self.topics_page.is_browser_on_page())
self.topics_page.wait_for_ajax()
self.assertEqual(topic_name, self.topics_page.topic_names[0])
def test_list_topics(self):
"""
Scenario: a list of topics should be visible in the "Browse" tab
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
Then I should see a list of topics for the course
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(2)})
self.topics_page.visit()
self.assertEqual(len(self.topics_page.topic_cards), 2)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 1-2 out of 2 total'))
self.assertFalse(self.topics_page.pagination_controls_visible())
self.assertFalse(self.topics_page.is_previous_page_button_enabled())
self.assertFalse(self.topics_page.is_next_page_button_enabled())
def test_topic_pagination(self):
"""
Scenario: a list of topics should be visible in the "Browse" tab, paginated 12 per page
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
Then I should see only the first 12 topics
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(20)})
self.topics_page.visit()
self.assertEqual(len(self.topics_page.topic_cards), TOPICS_PER_PAGE)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 1-12 out of 20 total'))
self.assertTrue(self.topics_page.pagination_controls_visible())
self.assertFalse(self.topics_page.is_previous_page_button_enabled())
self.assertTrue(self.topics_page.is_next_page_button_enabled())
def test_go_to_numbered_page(self):
"""
Scenario: topics should be able to be navigated by page number
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
And I enter a valid page number in the page number input
Then I should see that page of topics
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(25)})
self.topics_page.visit()
self.topics_page.go_to_page(3)
self.assertEqual(len(self.topics_page.topic_cards), 1)
self.assertTrue(self.topics_page.is_previous_page_button_enabled())
self.assertFalse(self.topics_page.is_next_page_button_enabled())
def test_go_to_invalid_page(self):
"""
Scenario: browsing topics should not respond to invalid page numbers
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
And I enter an invalid page number in the page number input
Then I should stay on the current page
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(13)})
self.topics_page.visit()
self.topics_page.go_to_page(3)
self.assertEqual(self.topics_page.get_current_page_number(), 1)
def test_page_navigation_buttons(self):
"""
Scenario: browsing topics should not respond to invalid page numbers
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
When I press the next page button
Then I should move to the next page
When I press the previous page button
Then I should move to the previous page
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(13)})
self.topics_page.visit()
self.topics_page.press_next_page_button()
self.assertEqual(len(self.topics_page.topic_cards), 1)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 13-13 out of 13 total'))
self.topics_page.press_previous_page_button()
self.assertEqual(len(self.topics_page.topic_cards), TOPICS_PER_PAGE)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 1-12 out of 13 total'))
def test_topic_description_truncation(self):
"""
Scenario: excessively long topic descriptions should be truncated so
as to fit within a topic card.
Given I am enrolled in a course with a team configuration and a topic
with a long description
When I visit the Teams page
And I browse topics
Then I should see a truncated topic description
"""
initial_description = "A" + " really" * 50 + " long description"
self.set_team_configuration(
{u"max_team_size": 1, u"topics": [{"name": "", "id": "", "description": initial_description}]}
)
self.topics_page.visit()
truncated_description = self.topics_page.topic_descriptions[0]
self.assertLess(len(truncated_description), len(initial_description))
self.assertTrue(truncated_description.endswith('...'))
self.assertIn(truncated_description.split('...')[0], initial_description)
def test_go_to_teams_list(self):
"""
Scenario: Clicking on a Topic Card should take you to the
teams list for that Topic.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page
And I browse topics
And I click on the arrow link to view teams for the first topic
Then I should be on the browse teams page
"""
topic = {u"name": u"Example Topic", u"id": u"example_topic", u"description": "Description"}
self.set_team_configuration(
{u"max_team_size": 1, u"topics": [topic]}
)
self.topics_page.visit()
self.topics_page.browse_teams_for_topic('Example Topic')
browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, topic)
self.assertTrue(browse_teams_page.is_browser_on_page())
self.assertEqual(browse_teams_page.header_name, 'Example Topic')
self.assertEqual(browse_teams_page.header_description, 'Description')
def test_page_viewed_event(self):
"""
Scenario: Visiting the browse topics page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the browse topics page
Then my browser should post a page viewed event
"""
topic = {u"name": u"Example Topic", u"id": u"example_topic", u"description": "Description"}
self.set_team_configuration(
{u"max_team_size": 1, u"topics": [topic]}
)
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'browse',
'topic_id': None,
'team_id': None
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.topics_page.visit()
@attr('shard_5')
@ddt.ddt
class BrowseTeamsWithinTopicTest(TeamsTabBase):
"""
Tests for browsing Teams within a Topic on the Teams page.
"""
TEAMS_PAGE_SIZE = 10
def setUp(self):
super(BrowseTeamsWithinTopicTest, self).setUp()
self.topic = {u"name": u"Example Topic", u"id": "example_topic", u"description": "Description"}
self.max_team_size = 10
self.set_team_configuration({
'course_id': self.course_id,
'max_team_size': self.max_team_size,
'topics': [self.topic]
})
self.browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
self.topics_page = BrowseTopicsPage(self.browser, self.course_id)
def teams_with_default_sort_order(self, teams):
"""Return a list of teams sorted according to the default ordering
(last_activity_at, with a secondary sort by open slots).
"""
return sorted(
sorted(teams, key=lambda t: len(t['membership']), reverse=True),
key=lambda t: parse(t['last_activity_at']).replace(microsecond=0),
reverse=True
)
def verify_page_header(self):
"""Verify that the page header correctly reflects the current topic's name and description."""
self.assertEqual(self.browse_teams_page.header_name, self.topic['name'])
self.assertEqual(self.browse_teams_page.header_description, self.topic['description'])
def verify_search_header(self, search_results_page, search_query):
"""Verify that the page header correctly reflects the current topic's name and description."""
self.assertEqual(search_results_page.header_name, 'Team Search')
self.assertEqual(
search_results_page.header_description,
'Showing results for "{search_query}"'.format(search_query=search_query)
)
def verify_on_page(self, teams_page, page_num, total_teams, pagination_header_text, footer_visible):
"""
Verify that we are on the correct team list page.
Arguments:
teams_page (BaseTeamsPage): The teams page object that should be the current page.
page_num (int): The one-indexed page number that we expect to be on
total_teams (list): An unsorted list of all the teams for the
current topic
pagination_header_text (str): Text we expect to see in the
pagination header.
footer_visible (bool): Whether we expect to see the pagination
footer controls.
"""
sorted_teams = self.teams_with_default_sort_order(total_teams)
self.assertTrue(teams_page.get_pagination_header_text().startswith(pagination_header_text))
self.verify_teams(
teams_page,
sorted_teams[(page_num - 1) * self.TEAMS_PAGE_SIZE:page_num * self.TEAMS_PAGE_SIZE]
)
self.assertEqual(
teams_page.pagination_controls_visible(),
footer_visible,
msg='Expected paging footer to be ' + 'visible' if footer_visible else 'invisible'
)
@ddt.data(
('open_slots', 'last_activity_at', True),
('last_activity_at', 'open_slots', True)
)
@ddt.unpack
def test_sort_teams(self, sort_order, secondary_sort_order, reverse):
"""
Scenario: the user should be able to sort the list of teams by open slots or last activity
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse teams within a topic
Then I should see a list of teams for that topic
When I choose a sort order
Then I should see the paginated list of teams in that order
"""
teams = self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 1)
for i, team in enumerate(random.sample(teams, len(teams))):
for _ in range(i):
user_info = AutoAuthPage(self.browser, course_id=self.course_id).visit().user_info
self.create_membership(user_info['username'], team['id'])
team['open_slots'] = self.max_team_size - i
# Re-authenticate as staff after creating users
AutoAuthPage(
self.browser,
course_id=self.course_id,
staff=True
).visit()
self.browse_teams_page.visit()
self.browse_teams_page.sort_teams_by(sort_order)
team_names = self.browse_teams_page.team_names
self.assertEqual(len(team_names), self.TEAMS_PAGE_SIZE)
sorted_teams = [
team['name']
for team in sorted(
sorted(teams, key=lambda t: t[secondary_sort_order], reverse=reverse),
key=lambda t: t[sort_order],
reverse=reverse
)
][:self.TEAMS_PAGE_SIZE]
self.assertEqual(team_names, sorted_teams)
def test_default_sort_order(self):
"""
Scenario: the list of teams should be sorted by last activity by default
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse teams within a topic
Then I should see a list of teams for that topic, sorted by last activity
"""
self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 1)
self.browse_teams_page.visit()
self.assertEqual(self.browse_teams_page.sort_order, 'last activity')
def test_no_teams(self):
"""
Scenario: Visiting a topic with no teams should not display any teams.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see a pagination header showing no teams
And I should see no teams
And I should see a button to add a team
And I should not see a pagination footer
"""
self.browse_teams_page.visit()
self.verify_page_header()
self.assertTrue(self.browse_teams_page.get_pagination_header_text().startswith('Showing 0 out of 0 total'))
self.assertEqual(len(self.browse_teams_page.team_cards), 0, msg='Expected to see no team cards')
self.assertFalse(
self.browse_teams_page.pagination_controls_visible(),
msg='Expected paging footer to be invisible'
)
def test_teams_one_page(self):
"""
Scenario: Visiting a topic with fewer teams than the page size should
all those teams on one page.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see a pagination header showing the number of teams
And I should see all the expected team cards
And I should see a button to add a team
And I should not see a pagination footer
"""
teams = self.teams_with_default_sort_order(
self.create_teams(self.topic, self.TEAMS_PAGE_SIZE, time_between_creation=1)
)
self.browse_teams_page.visit()
self.verify_page_header()
self.assertTrue(self.browse_teams_page.get_pagination_header_text().startswith('Showing 1-10 out of 10 total'))
self.verify_teams(self.browse_teams_page, teams)
self.assertFalse(
self.browse_teams_page.pagination_controls_visible(),
msg='Expected paging footer to be invisible'
)
def test_teams_navigation_buttons(self):
"""
Scenario: The user should be able to page through a topic's team list
using navigation buttons when it is longer than the page size.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see that I am on the first page of results
When I click on the next page button
Then I should see that I am on the second page of results
And when I click on the previous page button
Then I should see that I am on the first page of results
"""
teams = self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 1, time_between_creation=1)
self.browse_teams_page.visit()
self.verify_page_header()
self.verify_on_page(self.browse_teams_page, 1, teams, 'Showing 1-10 out of 11 total', True)
self.browse_teams_page.press_next_page_button()
self.verify_on_page(self.browse_teams_page, 2, teams, 'Showing 11-11 out of 11 total', True)
self.browse_teams_page.press_previous_page_button()
self.verify_on_page(self.browse_teams_page, 1, teams, 'Showing 1-10 out of 11 total', True)
def test_teams_page_input(self):
"""
Scenario: The user should be able to page through a topic's team list
using the page input when it is longer than the page size.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see that I am on the first page of results
When I input the second page
Then I should see that I am on the second page of results
When I input the first page
Then I should see that I am on the first page of results
"""
teams = self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 10, time_between_creation=1)
self.browse_teams_page.visit()
self.verify_page_header()
self.verify_on_page(self.browse_teams_page, 1, teams, 'Showing 1-10 out of 20 total', True)
self.browse_teams_page.go_to_page(2)
self.verify_on_page(self.browse_teams_page, 2, teams, 'Showing 11-20 out of 20 total', True)
self.browse_teams_page.go_to_page(1)
self.verify_on_page(self.browse_teams_page, 1, teams, 'Showing 1-10 out of 20 total', True)
def test_browse_team_topics(self):
"""
Scenario: User should be able to navigate to "browse all teams" and "search team description" links.
Given I am enrolled in a course with teams enabled
When I visit the Teams page for a topic
Then I should see the correct page header
And I should see the link to "browse teams in other topics"
When I should navigate to that link
Then I should see the topic browse page
"""
self.browse_teams_page.visit()
self.verify_page_header()
self.browse_teams_page.click_browse_all_teams_link()
self.assertTrue(self.topics_page.is_browser_on_page())
def test_search(self):
"""
Scenario: User should be able to search for a team
Given I am enrolled in a course with teams enabled
When I visit the Teams page for that topic
And I search for 'banana'
Then I should see the search result page
And the search header should be shown
And 0 results should be shown
And my browser should fire a page viewed event for the search page
And a searched event should have been fired
"""
# Note: all searches will return 0 results with the mock search server
# used by Bok Choy.
search_text = 'banana'
self.create_teams(self.topic, 5)
self.browse_teams_page.visit()
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'search-teams',
'topic_id': self.topic['id'],
'team_id': None
}
}, {
'event_type': 'edx.team.searched',
'event': {
'search_text': search_text,
'topic_id': self.topic['id'],
'number_of_results': 0
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events, in_order=False):
search_results_page = self.browse_teams_page.search(search_text)
self.verify_search_header(search_results_page, search_text)
self.assertTrue(search_results_page.get_pagination_header_text().startswith('Showing 0 out of 0 total'))
def test_page_viewed_event(self):
"""
Scenario: Visiting the browse page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page
Then my browser should post a page viewed event for the teams page
"""
self.create_teams(self.topic, 5)
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'single-topic',
'topic_id': self.topic['id'],
'team_id': None
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.browse_teams_page.visit()
def test_team_name_xss(self):
"""
Scenario: Team names should be HTML-escaped on the teams page
Given I am enrolled in a course with teams enabled
When I visit the Teams page for a topic, with a team name containing JS code
Then I should not see any alerts
"""
self.post_team_data({
'course_id': self.course_id,
'topic_id': self.topic['id'],
'name': '<script>alert("XSS")</script>',
'description': 'Description',
'language': 'aa',
'country': 'AF'
})
with self.assertRaises(TimeoutException):
self.browser.get(self.browse_teams_page.url)
alert = get_modal_alert(self.browser)
alert.accept()
@attr('shard_5')
class TeamFormActions(TeamsTabBase):
"""
Base class for create, edit, and delete team.
"""
TEAM_DESCRIPTION = 'The Avengers are a fictional team of superheroes.'
topic = {'name': 'Example Topic', 'id': 'example_topic', 'description': 'Description'}
TEAMS_NAME = 'Avengers'
def setUp(self):
super(TeamFormActions, self).setUp()
self.team_management_page = TeamManagementPage(self.browser, self.course_id, self.topic)
def verify_page_header(self, title, description, breadcrumbs):
"""
Verify that the page header correctly reflects the
create team header, description and breadcrumb.
"""
self.assertEqual(self.team_management_page.header_page_name, title)
self.assertEqual(self.team_management_page.header_page_description, description)
self.assertEqual(self.team_management_page.header_page_breadcrumbs, breadcrumbs)
def verify_and_navigate_to_create_team_page(self):
"""Navigates to the create team page and verifies."""
self.browse_teams_page.click_create_team_link()
self.verify_page_header(
title='Create a New Team',
description='Create a new team if you can\'t find an existing team to join, '
'or if you would like to learn with friends you know.',
breadcrumbs='All Topics {topic_name}'.format(topic_name=self.topic['name'])
)
def verify_and_navigate_to_edit_team_page(self):
"""Navigates to the edit team page and verifies."""
# pylint: disable=no-member
self.assertEqual(self.team_page.team_name, self.team['name'])
self.assertTrue(self.team_page.edit_team_button_present)
self.team_page.click_edit_team_button()
self.team_management_page.wait_for_page()
# Edit page header.
self.verify_page_header(
title='Edit Team',
description='If you make significant changes, make sure you notify '
'members of the team before making these changes.',
breadcrumbs='All Topics {topic_name} {team_name}'.format(
topic_name=self.topic['name'],
team_name=self.team['name']
)
)
def verify_team_info(self, name, description, location, language):
"""Verify the team information on team page."""
# pylint: disable=no-member
self.assertEqual(self.team_page.team_name, name)
self.assertEqual(self.team_page.team_description, description)
self.assertEqual(self.team_page.team_location, location)
self.assertEqual(self.team_page.team_language, language)
def fill_create_or_edit_form(self):
"""Fill the create/edit team form fields with appropriate values."""
self.team_management_page.value_for_text_field(
field_id='name',
value=self.TEAMS_NAME,
press_enter=False
)
self.team_management_page.set_value_for_textarea_field(
field_id='description',
value=self.TEAM_DESCRIPTION
)
self.team_management_page.value_for_dropdown_field(field_id='language', value='English')
self.team_management_page.value_for_dropdown_field(field_id='country', value='Pakistan')
def verify_all_fields_exist(self):
"""
Verify the fields for create/edit page.
"""
self.assertEqual(
self.team_management_page.message_for_field('name'),
'A name that identifies your team (maximum 255 characters).'
)
self.assertEqual(
self.team_management_page.message_for_textarea_field('description'),
'A short description of the team to help other learners understand '
'the goals or direction of the team (maximum 300 characters).'
)
self.assertEqual(
self.team_management_page.message_for_field('country'),
'The country that team members primarily identify with.'
)
self.assertEqual(
self.team_management_page.message_for_field('language'),
'The language that team members primarily use to communicate with each other.'
)
@ddt.ddt
class CreateTeamTest(TeamFormActions):
"""
Tests for creating a new Team within a Topic on the Teams page.
"""
def setUp(self):
super(CreateTeamTest, self).setUp()
self.set_team_configuration({'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]})
self.browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
self.browse_teams_page.visit()
def test_user_can_see_create_team_page(self):
"""
Scenario: The user should be able to see the create team page via teams list page.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the Create Team page link on bottom
And When I click create team link
Then I should see the create team page.
And I should see the create team header
And I should also see the help messages for fields.
"""
self.verify_and_navigate_to_create_team_page()
self.verify_all_fields_exist()
def test_user_can_see_error_message_for_missing_data(self):
"""
Scenario: The user should be able to see error message in case of missing required field.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
And When I click create team button without filling required fields
Then I should see the error message and highlighted fields.
"""
self.verify_and_navigate_to_create_team_page()
self.team_management_page.submit_form()
self.assertEqual(
self.team_management_page.validation_message_text,
'Check the highlighted fields below and try again.'
)
self.assertTrue(self.team_management_page.error_for_field(field_id='name'))
self.assertTrue(self.team_management_page.error_for_field(field_id='description'))
def test_user_can_see_error_message_for_incorrect_data(self):
"""
Scenario: The user should be able to see error message in case of increasing length for required fields.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
When I add text > than 255 characters for name field
And I click Create button
Then I should see the error message for exceeding length.
"""
self.verify_and_navigate_to_create_team_page()
# Fill the name field with >255 characters to see validation message.
self.team_management_page.value_for_text_field(
field_id='name',
value='EdX is a massive open online course (MOOC) provider and online learning platform. '
'It hosts online university-level courses in a wide range of disciplines to a worldwide '
'audience, some at no charge. It also conducts research into learning based on how '
'people use its platform. EdX was created for students and institutions that seek to'
'transform themselves through cutting-edge technologies, innovative pedagogy, and '
'rigorous courses. More than 70 schools, nonprofits, corporations, and international'
'organizations offer or plan to offer courses on the edX website. As of 22 October 2014,'
'edX has more than 4 million users taking more than 500 courses online.',
press_enter=False
)
self.team_management_page.submit_form()
self.assertEqual(
self.team_management_page.validation_message_text,
'Check the highlighted fields below and try again.'
)
self.assertTrue(self.team_management_page.error_for_field(field_id='name'))
def test_user_can_create_new_team_successfully(self):
"""
Scenario: The user should be able to create new team.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
When I fill all the fields present with appropriate data
And I click Create button
Then I expect analytics events to be emitted
And I should see the page for my team
And I should see the message that says "You are member of this team"
And the new team should be added to the list of teams within the topic
And the number of teams should be updated on the topic card
And if I switch to "My Team", the newly created team is displayed
"""
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.browse_teams_page.visit()
self.verify_and_navigate_to_create_team_page()
self.fill_create_or_edit_form()
expected_events = [
{
'event_type': 'edx.team.created'
},
{
'event_type': 'edx.team.learner_added',
'event': {
'add_method': 'added_on_create',
}
}
]
with self.assert_events_match_during(event_filter=self.only_team_events, expected_events=expected_events):
self.team_management_page.submit_form()
# Verify that the page is shown for the new team
team_page = TeamPage(self.browser, self.course_id)
team_page.wait_for_page()
self.assertEqual(team_page.team_name, self.TEAMS_NAME)
self.assertEqual(team_page.team_description, self.TEAM_DESCRIPTION)
self.assertEqual(team_page.team_user_membership_text, 'You are a member of this team.')
# Verify the new team was added to the topic list
self.teams_page.click_specific_topic("Example Topic")
self.teams_page.verify_topic_team_count(1)
self.teams_page.click_all_topics()
self.teams_page.verify_team_count_in_first_topic(1)
# Verify that if one switches to "My Team" without reloading the page, the newly created team is shown.
self.verify_my_team_count(1)
def test_user_can_cancel_the_team_creation(self):
"""
Scenario: The user should be able to cancel the creation of new team.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
When I click Cancel button
Then I should see teams list page without any new team.
And if I switch to "My Team", it shows no teams
"""
self.assertTrue(self.browse_teams_page.get_pagination_header_text().startswith('Showing 0 out of 0 total'))
self.verify_and_navigate_to_create_team_page()
self.team_management_page.cancel_team()
self.assertTrue(self.browse_teams_page.is_browser_on_page())
self.assertTrue(self.browse_teams_page.get_pagination_header_text().startswith('Showing 0 out of 0 total'))
self.teams_page.click_all_topics()
self.teams_page.verify_team_count_in_first_topic(0)
self.verify_my_team_count(0)
def test_page_viewed_event(self):
"""
Scenario: Visiting the create team page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the create team page
Then my browser should post a page viewed event
"""
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'new-team',
'topic_id': self.topic['id'],
'team_id': None
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.verify_and_navigate_to_create_team_page()
@ddt.ddt
class DeleteTeamTest(TeamFormActions):
"""
Tests for deleting teams.
"""
def setUp(self):
super(DeleteTeamTest, self).setUp()
self.set_team_configuration(
{'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]},
global_staff=True
)
self.team = self.create_teams(self.topic, num_teams=1)[0]
self.team_page = TeamPage(self.browser, self.course_id, team=self.team)
#need to have a membership to confirm it gets deleted as well
self.create_membership(self.user_info['username'], self.team['id'])
self.team_page.visit()
def test_cancel_delete(self):
"""
Scenario: The user should be able to cancel the Delete Team dialog
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the Delete Team button
When I click the delete team button
And I cancel the prompt
And I refresh the page
Then I should still see the team
"""
self.delete_team(cancel=True)
self.assertTrue(self.team_management_page.is_browser_on_page())
self.browser.refresh()
self.team_management_page.wait_for_page()
self.assertEqual(
' '.join(('All Topics', self.topic['name'], self.team['name'])),
self.team_management_page.header_page_breadcrumbs
)
@ddt.data('Moderator', 'Community TA', 'Administrator', None)
def test_delete_team(self, role):
"""
Scenario: The user should be able to see and navigate to the delete team page.
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the Delete Team button
When I click the delete team button
And I confirm the prompt
Then I should see the browse teams page
And the team should not be present
"""
# If role is None, remain logged in as global staff
if role is not None:
AutoAuthPage(
self.browser,
course_id=self.course_id,
staff=False,
roles=role
).visit()
self.team_page.visit()
self.delete_team(require_notification=False)
browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
self.assertTrue(browse_teams_page.is_browser_on_page())
self.assertNotIn(self.team['name'], browse_teams_page.team_names)
def delete_team(self, **kwargs):
"""
Delete a team. Passes `kwargs` to `confirm_prompt`.
Expects edx.team.deleted event to be emitted, with correct course_id.
Also expects edx.team.learner_removed event to be emitted for the
membership that is removed as a part of the delete operation.
"""
self.team_page.click_edit_team_button()
self.team_management_page.wait_for_page()
self.team_management_page.delete_team_button.click()
if 'cancel' in kwargs and kwargs['cancel'] is True:
confirm_prompt(self.team_management_page, **kwargs)
else:
expected_events = [
{
'event_type': 'edx.team.deleted',
'event': {
'team_id': self.team['id']
}
},
{
'event_type': 'edx.team.learner_removed',
'event': {
'team_id': self.team['id'],
'remove_method': 'team_deleted',
'user_id': self.user_info['user_id']
}
}
]
with self.assert_events_match_during(
event_filter=self.only_team_events, expected_events=expected_events
):
confirm_prompt(self.team_management_page, **kwargs)
def test_delete_team_updates_topics(self):
"""
Scenario: Deleting a team should update the team count on the topics page
Given I am staff user for a course with a team
And I delete a team
When I navigate to the browse topics page
Then the team count for the deletd team's topic should be updated
"""
self.delete_team(require_notification=False)
BrowseTeamsPage(self.browser, self.course_id, self.topic).click_all_topics()
topics_page = BrowseTopicsPage(self.browser, self.course_id)
self.assertTrue(topics_page.is_browser_on_page())
self.teams_page.verify_topic_team_count(0)
@ddt.ddt
class EditTeamTest(TeamFormActions):
"""
Tests for editing the team.
"""
def setUp(self):
super(EditTeamTest, self).setUp()
self.set_team_configuration(
{'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]},
global_staff=True
)
self.team = self.create_teams(self.topic, num_teams=1)[0]
self.team_page = TeamPage(self.browser, self.course_id, team=self.team)
self.team_page.visit()
def test_staff_can_navigate_to_edit_team_page(self):
"""
Scenario: The user should be able to see and navigate to the edit team page.
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the edit team page
And I should see the edit team header
And I should also see the help messages for fields
"""
self.verify_and_navigate_to_edit_team_page()
self.verify_all_fields_exist()
def test_staff_can_edit_team_successfully(self):
"""
Scenario: The staff should be able to edit team successfully.
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the edit team page
And an analytics event should be fired
When I edit all the fields with appropriate data
And I click Update button
Then I should see the page for my team with updated data
"""
self.verify_team_info(
name=self.team['name'],
description=self.team['description'],
location='Afghanistan',
language='Afar'
)
self.verify_and_navigate_to_edit_team_page()
self.fill_create_or_edit_form()
expected_events = [
{
'event_type': 'edx.team.changed',
'event': {
'team_id': self.team['id'],
'field': 'country',
'old': 'AF',
'new': 'PK',
'truncated': [],
}
},
{
'event_type': 'edx.team.changed',
'event': {
'team_id': self.team['id'],
'field': 'name',
'old': self.team['name'],
'new': self.TEAMS_NAME,
'truncated': [],
}
},
{
'event_type': 'edx.team.changed',
'event': {
'team_id': self.team['id'],
'field': 'language',
'old': 'aa',
'new': 'en',
'truncated': [],
}
},
{
'event_type': 'edx.team.changed',
'event': {
'team_id': self.team['id'],
'field': 'description',
'old': self.team['description'],
'new': self.TEAM_DESCRIPTION,
'truncated': [],
}
},
]
with self.assert_events_match_during(
event_filter=self.only_team_events,
expected_events=expected_events,
):
self.team_management_page.submit_form()
self.team_page.wait_for_page()
self.verify_team_info(
name=self.TEAMS_NAME,
description=self.TEAM_DESCRIPTION,
location='Pakistan',
language='English'
)
def test_staff_can_cancel_the_team_edit(self):
"""
Scenario: The user should be able to cancel the editing of team.
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the edit team page
Then I should see the Edit Team header
When I click Cancel button
Then I should see team page page without changes.
"""
self.verify_team_info(
name=self.team['name'],
description=self.team['description'],
location='Afghanistan',
language='Afar'
)
self.verify_and_navigate_to_edit_team_page()
self.fill_create_or_edit_form()
self.team_management_page.cancel_team()
self.team_page.wait_for_page()
self.verify_team_info(
name=self.team['name'],
description=self.team['description'],
location='Afghanistan',
language='Afar'
)
def test_student_cannot_see_edit_button(self):
"""
Scenario: The student should not see the edit team button.
Given I am student for a course with a team
When I visit the Team profile page
Then I should not see the Edit Team button
"""
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.team_page.visit()
self.assertFalse(self.team_page.edit_team_button_present)
@ddt.data('Moderator', 'Community TA', 'Administrator')
def test_discussion_privileged_user_can_edit_team(self, role):
"""
Scenario: The user with specified role should see the edit team button.
Given I am user with privileged role for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
"""
kwargs = {
'course_id': self.course_id,
'staff': False
}
if role is not None:
kwargs['roles'] = role
AutoAuthPage(self.browser, **kwargs).visit()
self.team_page.visit()
self.teams_page.wait_for_page()
self.assertTrue(self.team_page.edit_team_button_present)
self.verify_team_info(
name=self.team['name'],
description=self.team['description'],
location='Afghanistan',
language='Afar'
)
self.verify_and_navigate_to_edit_team_page()
self.fill_create_or_edit_form()
self.team_management_page.submit_form()
self.team_page.wait_for_page()
self.verify_team_info(
name=self.TEAMS_NAME,
description=self.TEAM_DESCRIPTION,
location='Pakistan',
language='English'
)
def test_page_viewed_event(self):
"""
Scenario: Visiting the edit team page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the edit team page
Then my browser should post a page viewed event
"""
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'edit-team',
'topic_id': self.topic['id'],
'team_id': self.team['id']
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.verify_and_navigate_to_edit_team_page()
@ddt.ddt
class EditMembershipTest(TeamFormActions):
"""
Tests for administrating from the team membership page
"""
def setUp(self):
super(EditMembershipTest, self).setUp()
self.set_team_configuration(
{'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]},
global_staff=True
)
self.team_management_page = TeamManagementPage(self.browser, self.course_id, self.topic)
self.team = self.create_teams(self.topic, num_teams=1)[0]
#make sure a user exists on this team so we can edit the membership
self.create_membership(self.user_info['username'], self.team['id'])
self.edit_membership_page = EditMembershipPage(self.browser, self.course_id, self.team)
self.team_page = TeamPage(self.browser, self.course_id, team=self.team)
def edit_membership_helper(self, role, cancel=False):
"""
Helper for common functionality in edit membership tests.
Checks for all relevant assertions about membership being removed,
including verify edx.team.learner_removed events are emitted.
"""
if role is not None:
AutoAuthPage(
self.browser,
course_id=self.course_id,
staff=False,
roles=role
).visit()
self.team_page.visit()
self.team_page.click_edit_team_button()
self.team_management_page.wait_for_page()
self.assertTrue(
self.team_management_page.membership_button_present
)
self.team_management_page.click_membership_button()
self.edit_membership_page.wait_for_page()
self.edit_membership_page.click_first_remove()
if cancel:
self.edit_membership_page.cancel_delete_membership_dialog()
self.assertEqual(self.edit_membership_page.team_members, 1)
else:
expected_events = [
{
'event_type': 'edx.team.learner_removed',
'event': {
'team_id': self.team['id'],
'remove_method': 'removed_by_admin',
'user_id': self.user_info['user_id']
}
}
]
with self.assert_events_match_during(
event_filter=self.only_team_events, expected_events=expected_events
):
self.edit_membership_page.confirm_delete_membership_dialog()
self.assertEqual(self.edit_membership_page.team_members, 0)
self.assertTrue(self.edit_membership_page.is_browser_on_page)
@ddt.data('Moderator', 'Community TA', 'Administrator', None)
def test_remove_membership(self, role):
"""
Scenario: The user should be able to remove a membership
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the Edit Membership button
And When I click the edit membership button
Then I should see the edit membership page
And When I click the remove button and confirm the dialog
Then my membership should be removed, and I should remain on the page
"""
self.edit_membership_helper(role, cancel=False)
@ddt.data('Moderator', 'Community TA', 'Administrator', None)
def test_cancel_remove_membership(self, role):
"""
Scenario: The user should be able to remove a membership
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the Edit Membership button
And When I click the edit membership button
Then I should see the edit membership page
And When I click the remove button and cancel the dialog
Then my membership should not be removed, and I should remain on the page
"""
self.edit_membership_helper(role, cancel=True)
@attr('shard_5')
@ddt.ddt
class TeamPageTest(TeamsTabBase):
"""Tests for viewing a specific team"""
SEND_INVITE_TEXT = 'Send this link to friends so that they can join too.'
def setUp(self):
super(TeamPageTest, self).setUp()
self.topic = {u"name": u"Example Topic", u"id": "example_topic", u"description": "Description"}
def _set_team_configuration_and_membership(
self,
max_team_size=10,
membership_team_index=0,
visit_team_index=0,
create_membership=True,
another_user=False):
"""
Set team configuration.
Arguments:
max_team_size (int): number of users a team can have
membership_team_index (int): index of team user will join
visit_team_index (int): index of team user will visit
create_membership (bool): whether to create membership or not
another_user (bool): another user to visit a team
"""
#pylint: disable=attribute-defined-outside-init
self.set_team_configuration(
{'course_id': self.course_id, 'max_team_size': max_team_size, 'topics': [self.topic]}
)
self.teams = self.create_teams(self.topic, 2)
if create_membership:
self.create_membership(self.user_info['username'], self.teams[membership_team_index]['id'])
if another_user:
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.team_page = TeamPage(self.browser, self.course_id, self.teams[visit_team_index])
def setup_thread(self):
"""
Create and return a thread for this test's discussion topic.
"""
thread = Thread(
id="test_thread_{}".format(uuid4().hex),
commentable_id=self.teams[0]['discussion_topic_id'],
body="Dummy text body."
)
thread_fixture = MultipleThreadFixture([thread])
thread_fixture.push()
return thread
def setup_discussion_user(self, role=None, staff=False):
"""Set this test's user to have the given role in its
discussions. Role is one of 'Community TA', 'Moderator',
'Administrator', or 'Student'.
"""
kwargs = {
'course_id': self.course_id,
'staff': staff
}
if role is not None:
kwargs['roles'] = role
#pylint: disable=attribute-defined-outside-init
self.user_info = AutoAuthPage(self.browser, **kwargs).visit().user_info
def verify_teams_discussion_permissions(self, should_have_permission):
"""Verify that the teams discussion component is in the correct state
for the test user. If `should_have_permission` is True, assert that
the user can see controls for posting replies, voting, editing, and
deleting. Otherwise, assert that those controls are hidden.
"""
thread = self.setup_thread()
self.team_page.visit()
self.assertEqual(self.team_page.discussion_id, self.teams[0]['discussion_topic_id'])
discussion = self.team_page.discussion_page
self.assertTrue(discussion.is_browser_on_page())
self.assertTrue(discussion.is_discussion_expanded())
self.assertEqual(discussion.get_num_displayed_threads(), 1)
self.assertTrue(discussion.has_thread(thread['id']))
assertion = self.assertTrue if should_have_permission else self.assertFalse
assertion(discussion.q(css='.post-header-actions').present)
assertion(discussion.q(css='.add-response').present)
assertion(discussion.q(css='.new-post-btn').present)
def test_discussion_on_my_team_page(self):
"""
Scenario: Team Page renders a discussion for a team to which I belong.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am a member
When the team has a discussion with a thread
And I visit the Team page for that team
Then I should see a discussion with the correct discussion_id
And I should see the existing thread
And I should see controls to change the state of the discussion
"""
self._set_team_configuration_and_membership()
self.verify_teams_discussion_permissions(True)
@ddt.data(True, False)
def test_discussion_on_other_team_page(self, is_staff):
"""
Scenario: Team Page renders a team discussion for a team to which I do
not belong.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am not a member
When the team has a discussion with a thread
And I visit the Team page for that team
Then I should see a discussion with the correct discussion_id
And I should see the team's thread
And I should not see controls to change the state of the discussion
"""
self._set_team_configuration_and_membership(create_membership=False)
self.setup_discussion_user(staff=is_staff)
self.verify_teams_discussion_permissions(False)
@ddt.data('Moderator', 'Community TA', 'Administrator')
def test_discussion_privileged(self, role):
self._set_team_configuration_and_membership(create_membership=False)
self.setup_discussion_user(role=role)
self.verify_teams_discussion_permissions(True)
def assert_team_details(self, num_members, is_member=True, max_size=10):
"""
Verifies that user can see all the information, present on detail page according to their membership status.
Arguments:
num_members (int): number of users in a team
is_member (bool) default True: True if request user is member else False
max_size (int): number of users a team can have
"""
self.assertEqual(
self.team_page.team_capacity_text,
self.team_page.format_capacity_text(num_members, max_size)
)
self.assertEqual(self.team_page.team_location, 'Afghanistan')
self.assertEqual(self.team_page.team_language, 'Afar')
self.assertEqual(self.team_page.team_members, num_members)
if num_members > 0:
self.assertTrue(self.team_page.team_members_present)
else:
self.assertFalse(self.team_page.team_members_present)
if is_member:
self.assertEqual(self.team_page.team_user_membership_text, 'You are a member of this team.')
self.assertTrue(self.team_page.team_leave_link_present)
self.assertTrue(self.team_page.new_post_button_present)
else:
self.assertEqual(self.team_page.team_user_membership_text, '')
self.assertFalse(self.team_page.team_leave_link_present)
self.assertFalse(self.team_page.new_post_button_present)
def test_team_member_can_see_full_team_details(self):
"""
Scenario: Team member can see full info for team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am a member
When I visit the Team page for that team
Then I should see the full team detail
And I should see the team members
And I should see my team membership text
And I should see the language & country
And I should see the Leave Team and Invite Team
"""
self._set_team_configuration_and_membership()
self.team_page.visit()
self.assert_team_details(
num_members=1,
)
def test_other_users_can_see_limited_team_details(self):
"""
Scenario: Users who are not member of this team can only see limited info for this team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am not a member
When I visit the Team page for that team
Then I should not see full team detail
And I should see the team members
And I should not see my team membership text
And I should not see the Leave Team and Invite Team links
"""
self._set_team_configuration_and_membership(create_membership=False)
self.team_page.visit()
self.assert_team_details(is_member=False, num_members=0)
def test_user_can_navigate_to_members_profile_page(self):
"""
Scenario: User can navigate to profile page via team member profile image.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am a member
When I visit the Team page for that team
Then I should see profile images for the team members
When I click on the first profile image
Then I should be taken to the user's profile page
And I should see the username on profile page
"""
self._set_team_configuration_and_membership()
self.team_page.visit()
learner_name = self.team_page.first_member_username
self.team_page.click_first_profile_image()
learner_profile_page = LearnerProfilePage(self.browser, learner_name)
learner_profile_page.wait_for_page()
learner_profile_page.wait_for_field('username')
self.assertTrue(learner_profile_page.field_is_visible('username'))
def test_join_team(self):
"""
Scenario: User can join a Team if not a member already..
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And I visit the Team page for that team
Then I should see Join Team button
And I should not see New Post button
When I click on Join Team button
Then there should be no Join Team button and no message
And an analytics event should be emitted
And I should see the updated information under Team Details
And I should see New Post button
And if I switch to "My Team", the team I have joined is displayed
"""
self._set_team_configuration_and_membership(create_membership=False)
teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
teams_page.visit()
teams_page.view_first_team()
self.assertTrue(self.team_page.join_team_button_present)
expected_events = [
{
'event_type': 'edx.team.learner_added',
'event': {
'add_method': 'joined_from_team_view'
}
}
]
with self.assert_events_match_during(event_filter=self.only_team_events, expected_events=expected_events):
self.team_page.click_join_team_button()
self.assertFalse(self.team_page.join_team_button_present)
self.assertFalse(self.team_page.join_team_message_present)
self.assert_team_details(num_members=1, is_member=True)
# Verify that if one switches to "My Team" without reloading the page, the newly joined team is shown.
self.teams_page.click_all_topics()
self.verify_my_team_count(1)
def test_already_member_message(self):
"""
Scenario: User should see `You are already in a team` if user is a
member of other team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And I am already a member of a team
And I visit a team other than mine
Then I should see `You are already in a team` message
"""
self._set_team_configuration_and_membership(membership_team_index=0, visit_team_index=1)
self.team_page.visit()
self.assertEqual(self.team_page.join_team_message, 'You already belong to another team.')
self.assert_team_details(num_members=0, is_member=False)
def test_team_full_message(self):
"""
Scenario: User should see `Team is full` message when team is full.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And team has no space left
And I am not a member of any team
And I visit the team
Then I should see `Team is full` message
"""
self._set_team_configuration_and_membership(
create_membership=True,
max_team_size=1,
membership_team_index=0,
visit_team_index=0,
another_user=True
)
self.team_page.visit()
self.assertEqual(self.team_page.join_team_message, 'This team is full.')
self.assert_team_details(num_members=1, is_member=False, max_size=1)
def test_leave_team(self):
"""
Scenario: User can leave a team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And I am a member of team
And I visit the team
And I should not see Join Team button
And I should see New Post button
Then I should see Leave Team link
When I click on Leave Team link
Then user should be removed from team
And an analytics event should be emitted
And I should see Join Team button
And I should not see New Post button
And if I switch to "My Team", the team I have left is not displayed
"""
self._set_team_configuration_and_membership()
self.team_page.visit()
self.assertFalse(self.team_page.join_team_button_present)
self.assert_team_details(num_members=1)
expected_events = [
{
'event_type': 'edx.team.learner_removed',
'event': {
'remove_method': 'self_removal'
}
}
]
with self.assert_events_match_during(event_filter=self.only_team_events, expected_events=expected_events):
self.team_page.click_leave_team_link()
self.assert_team_details(num_members=0, is_member=False)
self.assertTrue(self.team_page.join_team_button_present)
# Verify that if one switches to "My Team" without reloading the page, the old team no longer shows.
self.teams_page.click_all_topics()
self.verify_my_team_count(0)
def test_page_viewed_event(self):
"""
Scenario: Visiting the team profile page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the team profile page
Then my browser should post a page viewed event
"""
self._set_team_configuration_and_membership()
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'single-team',
'topic_id': self.topic['id'],
'team_id': self.teams[0]['id']
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.team_page.visit()
|
IndonesiaX/edx-platform
|
common/test/acceptance/tests/lms/test_teams.py
|
Python
|
agpl-3.0
| 82,589
|
[
"VisIt"
] |
a093fb62bda848c19ee52857c9a2053885ca7327cb2814044bd4d00c3637f17d
|
# Copyright 2016 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from os.path import relpath, join
from setuptools import find_packages, setup
from setuptools.command.install import install
import versioneer
assert sys.version_info[:2] == (2, 7), "Sorry, this package requires Python 2.7."
PACKAGE_NAME = 'moldesign'
CLASSIFIERS = """\
Development Status :: 4 - Beta
Intended Audience :: Science/Research
Intended Audience :: Developers
Intended Audience :: Education
License :: OSI Approved :: Apache Software License
Programming Language :: Python :: 2.7
Programming Language :: Python :: 2 :: Only
Topic :: Scientific/Engineering :: Chemistry
Topic :: Scientific/Engineering :: Physics
Topic :: Scientific/Engineering :: Visualization
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
HOME = os.environ['HOME']
CONFIG_DIR = os.path.join(HOME, '.moldesign')
PYEXT = set('.py .pyc .pyo'.split())
with open('requirements.txt', 'r') as reqfile:
requirements = [x.strip() for x in reqfile if x.strip()]
def find_package_data(pkgdir):
""" Just include all files that won't be included as package modules.
"""
files = []
for root, dirnames, filenames in os.walk(pkgdir):
not_a_package = '__init__.py' not in filenames
for fn in filenames:
basename, fext = os.path.splitext(fn)
if not_a_package or (fext not in PYEXT) or ('static' in fn):
files.append(relpath(join(root, fn), pkgdir))
return files
class PostInstall(install):
def run(self):
install.run(self)
self.prompt_intro()
def prompt_intro(self): # this doesn't actually display - print statements don't work?
print('Thank you for installing the Molecular Design Toolkit!!!')
print('For help, documentation, and any questions, visit us at ')
print(' http://moldesign.bionano.autodesk.com/')
print('\nTo get started, please run:')
print(' >>> python -m moldesign intro')
cmdclass = versioneer.get_cmdclass()
cmdclass['install'] = PostInstall
setup(
name=PACKAGE_NAME,
version=versioneer.get_version(),
classifiers=CLASSIFIERS.splitlines(),
packages=find_packages(),
package_data={PACKAGE_NAME: find_package_data(PACKAGE_NAME)},
install_requires=requirements,
url='http://moldesign.bionano.autodesk.com',
cmdclass=cmdclass,
license='Apache 2.0',
author='Aaron Virshup, BioNano Research at Autodesk',
author_email='moleculardesigntoolkit@autodesk.com',
description='The Molecular Design Toolkit: Dead-simple chemical simulation, visualization, '
'and cloud computing in a notebook'
)
|
tkzeng/molecular-design-toolkit
|
setup.py
|
Python
|
apache-2.0
| 3,223
|
[
"VisIt"
] |
a4bc2e46adbe5866bfa6ff3ea31a6b793fb1acd88616953456b301df711cce71
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import colorsys
import functools
import itertools
import math
import os
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.compat import compat
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import image_ops_impl
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class RGBToHSVTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to HSV and back, as a batch and individually
with self.cached_session(use_gpu=True) as sess:
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_hsv(batch0)
batch2 = image_ops.hsv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_hsv, split0))
split2 = list(map(image_ops.hsv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1)
self.assertAllClose(batch2, join2)
self.assertAllClose(batch2, inp)
def testRGBToHSVRoundTrip(self):
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for nptype in [np.float32, np.float64]:
rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255.
with self.cached_session(use_gpu=True):
hsv = image_ops.rgb_to_hsv(rgb_np)
rgb = image_ops.hsv_to_rgb(hsv)
rgb_tf = self.evaluate(rgb)
self.assertAllClose(rgb_tf, rgb_np)
class RGBToYIQTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to YIQ and back, as a batch and individually
with self.cached_session(use_gpu=True) as sess:
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yiq(batch0)
batch2 = image_ops.yiq_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_yiq, split0))
split2 = list(map(image_ops.yiq_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
class RGBToYUVTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to YUV and back, as a batch and individually
with self.cached_session(use_gpu=True) as sess:
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yuv(batch0)
batch2 = image_ops.yuv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_yuv, split0))
split2 = list(map(image_ops.yuv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
class GrayscaleToRGBTest(test_util.TensorFlowTestCase):
def _RGBToGrayscale(self, images):
is_batch = True
if len(images.shape) == 3:
is_batch = False
images = np.expand_dims(images, axis=0)
out_shape = images.shape[0:3] + (1,)
out = np.zeros(shape=out_shape, dtype=np.uint8)
for batch in xrange(images.shape[0]):
for y in xrange(images.shape[1]):
for x in xrange(images.shape[2]):
red = images[batch, y, x, 0]
green = images[batch, y, x, 1]
blue = images[batch, y, x, 2]
gray = 0.2989 * red + 0.5870 * green + 0.1140 * blue
out[batch, y, x, 0] = int(gray)
if not is_batch:
out = np.squeeze(out, axis=0)
return out
def _TestRGBToGrayscale(self, x_np):
y_np = self._RGBToGrayscale(x_np)
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.rgb_to_grayscale(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBasicRGBToGrayscale(self):
# 4-D input with batch dimension.
x_np = np.array(
[[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 1, 2, 3])
self._TestRGBToGrayscale(x_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 2, 3])
self._TestRGBToGrayscale(x_np)
def testBasicGrayscaleToRGB(self):
# 4-D input with batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2, 1])
y_np = np.array(
[[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 1, 2, 3])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 2, 1])
y_np = np.array([[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 2, 3])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testGrayscaleToRGBInputValidation(self):
# tests whether the grayscale_to_rgb function raises
# an exception if the input images' last dimension is
# not of size 1, i.e. the images have shape
# [batch size, height, width] or [height, width]
# tests if an exception is raised if a three dimensional
# input is used, i.e. the images have shape [batch size, height, width]
with self.cached_session(use_gpu=True):
# 3-D input with batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# this is the error message we expect the function to raise
err_msg = "Last dimension of a grayscale image should be size 1"
with self.assertRaisesRegexp(ValueError, err_msg):
image_ops.grayscale_to_rgb(x_tf)
# tests if an exception is raised if a two dimensional
# input is used, i.e. the images have shape [height, width]
with self.cached_session(use_gpu=True):
# 1-D input without batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([2])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# this is the error message we expect the function to raise
err_msg = "A grayscale image must be at least two-dimensional"
with self.assertRaisesRegexp(ValueError, err_msg):
image_ops.grayscale_to_rgb(x_tf)
@test_util.run_deprecated_v1
def testShapeInference(self):
# Shape inference works and produces expected output where possible
rgb_shape = [7, None, 19, 3]
gray_shape = rgb_shape[:-1] + [1]
with self.cached_session(use_gpu=True):
rgb_tf = array_ops.placeholder(dtypes.uint8, shape=rgb_shape)
gray = image_ops.rgb_to_grayscale(rgb_tf)
self.assertEqual(gray_shape, gray.get_shape().as_list())
with self.cached_session(use_gpu=True):
gray_tf = array_ops.placeholder(dtypes.uint8, shape=gray_shape)
rgb = image_ops.grayscale_to_rgb(gray_tf)
self.assertEqual(rgb_shape, rgb.get_shape().as_list())
# Shape inference does not break for unknown shapes
with self.cached_session(use_gpu=True):
rgb_tf_unknown = array_ops.placeholder(dtypes.uint8)
gray_unknown = image_ops.rgb_to_grayscale(rgb_tf_unknown)
self.assertFalse(gray_unknown.get_shape())
with self.cached_session(use_gpu=True):
gray_tf_unknown = array_ops.placeholder(dtypes.uint8)
rgb_unknown = image_ops.grayscale_to_rgb(gray_tf_unknown)
self.assertFalse(rgb_unknown.get_shape())
class AdjustGamma(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def test_adjust_gamma_less_zero_float32(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 1.0, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
err_msg = "Gamma should be a non-negative real number"
with self.assertRaisesRegexp(ValueError, err_msg):
image_ops.adjust_gamma(x, gamma=-1)
@test_util.run_deprecated_v1
def test_adjust_gamma_less_zero_uint8(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 255, (8, 8))
x_np = np.array(x_data, dtype=np.uint8)
x = constant_op.constant(x_np, shape=x_np.shape)
err_msg = "Gamma should be a non-negative real number"
with self.assertRaisesRegexp(ValueError, err_msg):
image_ops.adjust_gamma(x, gamma=-1)
@test_util.run_deprecated_v1
def test_adjust_gamma_less_zero_tensor(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 1.0, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
y = constant_op.constant(-1.0, dtype=dtypes.float32)
image = image_ops.adjust_gamma(x, gamma=y)
err_msg = "Gamma should be a non-negative real number"
with self.assertRaisesRegexp(errors.InvalidArgumentError, err_msg):
self.evaluate(image)
def _test_adjust_gamma_uint8(self, gamma):
"""Verifying the output with expected results for gamma
correction for uint8 images
"""
with self.cached_session():
x_np = np.random.uniform(0, 255, (8, 8)).astype(np.uint8)
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=gamma)
y_tf = np.trunc(y.eval())
# calculate gamma correction using numpy
# firstly, transform uint8 to float representation
# then perform correction
y_np = np.power(x_np / 255.0, gamma)
# convert correct numpy image back to uint8 type
y_np = np.trunc(np.clip(y_np * 255.5, 0, 255.0))
self.assertAllClose(y_tf, y_np, 1e-6)
def _test_adjust_gamma_float32(self, gamma):
"""Verifying the output with expected results for gamma
correction for float32 images
"""
with self.cached_session():
x_np = np.random.uniform(0, 1.0, (8, 8))
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=gamma)
y_tf = y.eval()
y_np = np.clip(np.power(x_np, gamma), 0, 1.0)
self.assertAllClose(y_tf, y_np, 1e-6)
@test_util.run_deprecated_v1
def test_adjust_gamma_one_float32(self):
"""Same image should be returned for gamma equal to one"""
self._test_adjust_gamma_float32(1.0)
@test_util.run_deprecated_v1
def test_adjust_gamma_one_uint8(self):
self._test_adjust_gamma_uint8(1.0)
@test_util.run_deprecated_v1
def test_adjust_gamma_zero_uint8(self):
"""White image should be returned for gamma equal
to zero for uint8 images
"""
self._test_adjust_gamma_uint8(gamma=0.0)
@test_util.run_deprecated_v1
def test_adjust_gamma_less_one_uint8(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to half for uint8 images
"""
self._test_adjust_gamma_uint8(gamma=0.5)
@test_util.run_deprecated_v1
def test_adjust_gamma_greater_one_uint8(self):
"""Verifying the output with expected results for gamma
correction for uint8 images
"""
self._test_adjust_gamma_uint8(gamma=1.0)
@test_util.run_deprecated_v1
def test_adjust_gamma_less_one_float32(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to half for float32 images
"""
self._test_adjust_gamma_float32(0.5)
@test_util.run_deprecated_v1
def test_adjust_gamma_greater_one_float32(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to two for float32 images
"""
self._test_adjust_gamma_float32(1.0)
@test_util.run_deprecated_v1
def test_adjust_gamma_zero_float32(self):
"""White image should be returned for gamma equal
to zero for float32 images
"""
self._test_adjust_gamma_float32(0.0)
class AdjustHueTest(test_util.TensorFlowTestCase):
def testAdjustNegativeHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = -0.25
y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testAdjustPositiveHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBatchAdjustHue(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def _adjustHueNp(self, x_np, delta_h):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
h += delta_h
h = math.fmod(h + 10.0, 1.0)
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def _adjustHueTf(self, x_np, delta_h):
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np)
y = image_ops.adjust_hue(x, delta_h)
y_tf = self.evaluate(y)
return y_tf
def testAdjustRandomHue(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_np = self._adjustHueNp(x_np, delta_h)
y_tf = self._adjustHueTf(x_np, delta_h)
self.assertAllClose(y_tf, y_np, rtol=2e-5, atol=1e-5)
def testInvalidShapes(self):
fused = False
if not fused:
# The tests are known to pass with the fused adjust_hue. We will enable
# them when the fused implementation is the default.
return
x_np = np.random.rand(2, 3) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
fused = False
with self.assertRaisesRegexp(ValueError, "Shape must be at least rank 3"):
self._adjustHueTf(x_np, delta_h)
x_np = np.random.rand(4, 2, 4) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError("input must have 3 channels"):
self._adjustHueTf(x_np, delta_h)
class FlipImageBenchmark(test.Benchmark):
def _benchmarkFlipLeftRight(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.flip_left_right(inputs)
self.evaluate(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkFlipLeftRight_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkFlipLeftRight_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def _benchmarkRandomFlipLeftRight(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.random_flip_left_right(inputs)
self.evaluate(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkRandomFlipLeftRight_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkRandomFlipLeftRight_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def _benchmarkBatchedRandomFlipLeftRight(self, device, cpu_count):
image_shape = [16, 299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.random_flip_left_right(inputs)
self.evaluate(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s step_time: "
"%.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkFlipLeftRightCpu1(self):
self._benchmarkFlipLeftRight("/cpu:0", 1)
def benchmarkFlipLeftRightCpuAll(self):
self._benchmarkFlipLeftRight("/cpu:0", None)
def benchmarkFlipLeftRightGpu(self):
self._benchmarkFlipLeftRight(test.gpu_device_name(), None)
def benchmarkRandomFlipLeftRightCpu1(self):
self._benchmarkRandomFlipLeftRight("/cpu:0", 1)
def benchmarkRandomFlipLeftRightCpuAll(self):
self._benchmarkRandomFlipLeftRight("/cpu:0", None)
def benchmarkRandomFlipLeftRightGpu(self):
self._benchmarkRandomFlipLeftRight(test.gpu_device_name(), None)
def benchmarkBatchedRandomFlipLeftRightCpu1(self):
self._benchmarkBatchedRandomFlipLeftRight("/cpu:0", 1)
def benchmarkBatchedRandomFlipLeftRightCpuAll(self):
self._benchmarkBatchedRandomFlipLeftRight("/cpu:0", None)
def benchmarkBatchedRandomFlipLeftRightGpu(self):
self._benchmarkBatchedRandomFlipLeftRight(test.gpu_device_name(), None)
class AdjustHueBenchmark(test.Benchmark):
def _benchmarkAdjustHue(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with self.benchmark_session(config=config, device=device) as sess:
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_hue(inputs, delta)
run_op = control_flow_ops.group(outputs)
self.evaluate(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkAdjustHue_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustHue_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustHueCpu1(self):
self._benchmarkAdjustHue("/cpu:0", 1)
def benchmarkAdjustHueCpuAll(self):
self._benchmarkAdjustHue("/cpu:0", None)
def benchmarkAdjustHueGpu(self):
self._benchmarkAdjustHue(test.gpu_device_name(), None)
class AdjustSaturationBenchmark(test.Benchmark):
def _benchmarkAdjustSaturation(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with self.benchmark_session(config=config, device=device) as sess:
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_saturation(inputs, delta)
run_op = control_flow_ops.group(outputs)
self.evaluate(variables.global_variables_initializer())
for _ in xrange(warmup_rounds):
self.evaluate(run_op)
start = time.time()
for _ in xrange(benchmark_rounds):
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkAdjustSaturation_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustSaturation_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustSaturationCpu1(self):
self._benchmarkAdjustSaturation("/cpu:0", 1)
def benchmarkAdjustSaturationCpuAll(self):
self._benchmarkAdjustSaturation("/cpu:0", None)
def benchmarkAdjustSaturationGpu(self):
self._benchmarkAdjustSaturation(test.gpu_device_name(), None)
class ResizeBilinearBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bilinear(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
self.evaluate(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
name=("resize_bilinear_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class ResizeBicubicBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bicubic(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
self.evaluate(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
min_iters=20,
name=("resize_bicubic_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
def benchmarkSimilar4Channel(self):
self._benchmarkResize((183, 229), 4)
def benchmarkScaleUp4Channel(self):
self._benchmarkResize((141, 186), 4)
def benchmarkScaleDown4Channel(self):
self._benchmarkResize((749, 603), 4)
class ResizeAreaBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_area(img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
self.evaluate(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
name=("resize_area_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class AdjustSaturationTest(test_util.TensorFlowTestCase):
def testHalfSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testTwiceSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 2.0
y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBatchSaturation(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def _adjustSaturationNp(self, x_np, scale):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
s *= scale
s = min(1.0, max(0.0, s))
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
@test_util.run_deprecated_v1
def testAdjustRandomSaturation(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
with self.cached_session(use_gpu=True):
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
scale = np.random.rand()
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_baseline = self._adjustSaturationNp(x_np, scale)
y_fused = image_ops.adjust_saturation(x_np, scale).eval()
self.assertAllClose(y_fused, y_baseline, rtol=2e-5, atol=1e-5)
class FlipTransposeRotateTest(test_util.TensorFlowTestCase):
def testInvolutionLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionLeftRightWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
@test_util.run_deprecated_v1
def testLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
self.assertTrue(y.op.name.startswith("flip_left_right"))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testLeftRightWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[3, 2, 1], [3, 2, 1]], [[3, 2, 1], [3, 2, 1]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
@test_util.run_deprecated_v1
def testRandomFlipLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
seed = 42
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_left_right(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_left_right"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
# 100 trials
# Mean: 50
# Std Dev: ~5
# Six Sigma: 50 - (5 * 6) = 20
self.assertGreaterEqual(count_flipped, 20)
self.assertGreaterEqual(count_unflipped, 20)
@test_util.run_deprecated_v1
def testRandomFlipLeftRightWithBatch(self):
batch_size = 16
seed = 42
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [1, 2, 3]], dtype=np.uint8
).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[3, 2, 1], [3, 2, 1]], dtype=np.uint8
).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_left_right(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_left_right"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
# check every element of the batch
for i in range(batch_size):
if y_tf[i][0][0] == 1:
self.assertAllEqual(y_tf[i], x_np[i])
count_unflipped += 1
else:
self.assertAllEqual(y_tf[i], y_np[i])
count_flipped += 1
# 100 trials, each containing batch_size elements
# Mean: 50 * batch_size
# Std Dev: ~5 * sqrt(batch_size)
# Six Sigma: 50 * batch_size - (5 * 6 * sqrt(batch_size))
# = 50 * batch_size - 30 * sqrt(batch_size) = 800 - 30 * 4 = 680
six_sigma = 50 * batch_size - 30 * np.sqrt(batch_size)
self.assertGreaterEqual(count_flipped, six_sigma)
self.assertGreaterEqual(count_unflipped, six_sigma)
def testInvolutionUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionUpDownWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
@test_util.run_deprecated_v1
def testUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
self.assertTrue(y.op.name.startswith("flip_up_down"))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testUpDownWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[4, 5, 6], [1, 2, 3]], [[10, 11, 12], [7, 8, 9]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
@test_util.run_deprecated_v1
def testRandomFlipUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
seed = 42
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_up_down(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_up_down"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
# 100 trials
# Mean: 50
# Std Dev: ~5
# Six Sigma: 50 - (5 * 6) = 20
self.assertGreaterEqual(count_flipped, 20)
self.assertGreaterEqual(count_unflipped, 20)
@test_util.run_deprecated_v1
def testRandomFlipUpDownWithBatch(self):
batch_size = 16
seed = 42
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.uint8
).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[4, 5, 6], [1, 2, 3]], dtype=np.uint8
).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_up_down(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_up_down"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
# check every element of the batch
for i in range(batch_size):
if y_tf[i][0][0] == 1:
self.assertAllEqual(y_tf[i], x_np[i])
count_unflipped += 1
else:
self.assertAllEqual(y_tf[i], y_np[i])
count_flipped += 1
# 100 trials, each containing batch_size elements
# Mean: 50 * batch_size
# Std Dev: ~5 * sqrt(batch_size)
# Six Sigma: 50 * batch_size - (5 * 6 * sqrt(batch_size))
# = 50 * batch_size - 30 * sqrt(batch_size) = 800 - 30 * 4 = 680
six_sigma = 50 * batch_size - 30 * np.sqrt(batch_size)
self.assertGreaterEqual(count_flipped, six_sigma)
self.assertGreaterEqual(count_unflipped, six_sigma)
def testInvolutionTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(image_ops.transpose(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionTransposeWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(image_ops.transpose(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
@test_util.run_deprecated_v1
def testTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[1, 4], [2, 5], [3, 6]], dtype=np.uint8).reshape([3, 2, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(x_tf)
self.assertTrue(y.op.name.startswith("transpose"))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testTransposeWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[1, 4], [2, 5], [3, 6]], [[7, 10], [8, 11], [9, 12]]],
dtype=np.uint8).reshape([2, 3, 2, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
@test_util.run_deprecated_v1
def testPartialShapes(self):
p_unknown_rank = array_ops.placeholder(dtypes.uint8)
p_unknown_dims_3 = array_ops.placeholder(
dtypes.uint8, shape=[None, None, None])
p_unknown_dims_4 = array_ops.placeholder(
dtypes.uint8, shape=[None, None, None, None])
p_unknown_width = array_ops.placeholder(dtypes.uint8, shape=[64, None, 3])
p_unknown_batch = array_ops.placeholder(
dtypes.uint8, shape=[None, 64, 64, 3])
p_wrong_rank = array_ops.placeholder(dtypes.uint8, shape=[None, None])
p_zero_dim = array_ops.placeholder(dtypes.uint8, shape=[64, 0, 3])
#Ops that support 3D input
for op in [
image_ops.flip_left_right, image_ops.flip_up_down,
image_ops.random_flip_left_right, image_ops.random_flip_up_down,
image_ops.transpose, image_ops.rot90
]:
transformed_unknown_rank = op(p_unknown_rank)
self.assertEqual(3, transformed_unknown_rank.get_shape().ndims)
transformed_unknown_dims_3 = op(p_unknown_dims_3)
self.assertEqual(3, transformed_unknown_dims_3.get_shape().ndims)
transformed_unknown_width = op(p_unknown_width)
self.assertEqual(3, transformed_unknown_width.get_shape().ndims)
with self.assertRaisesRegexp(ValueError, "must be > 0"):
op(p_zero_dim)
#Ops that support 4D input
for op in [
image_ops.flip_left_right, image_ops.flip_up_down,
image_ops.random_flip_left_right, image_ops.random_flip_up_down,
image_ops.transpose, image_ops.rot90
]:
transformed_unknown_dims_4 = op(p_unknown_dims_4)
self.assertEqual(4, transformed_unknown_dims_4.get_shape().ndims)
transformed_unknown_batch = op(p_unknown_batch)
self.assertEqual(4, transformed_unknown_batch.get_shape().ndims)
with self.assertRaisesRegexp(ValueError,
"must be at least three-dimensional"):
op(p_wrong_rank)
def testRot90GroupOrder(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.cached_session(use_gpu=True):
rotated = image
for _ in xrange(4):
rotated = image_ops.rot90(rotated)
self.assertAllEqual(image, self.evaluate(rotated))
def testRot90GroupOrderWithBatch(self):
image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])
with self.cached_session(use_gpu=True):
rotated = image
for _ in xrange(4):
rotated = image_ops.rot90(rotated)
self.assertAllEqual(image, self.evaluate(rotated))
@test_util.run_deprecated_v1
def testRot90NumpyEquivalence(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.cached_session(use_gpu=True):
k_placeholder = array_ops.placeholder(dtypes.int32, shape=[])
y_tf = image_ops.rot90(image, k_placeholder)
for k in xrange(4):
y_np = np.rot90(image, k=k)
self.assertAllEqual(y_np, y_tf.eval({k_placeholder: k}))
@test_util.run_deprecated_v1
def testRot90NumpyEquivalenceWithBatch(self):
image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])
with self.cached_session(use_gpu=True):
k_placeholder = array_ops.placeholder(dtypes.int32, shape=[])
y_tf = image_ops.rot90(image, k_placeholder)
for k in xrange(4):
y_np = np.rot90(image, k=k, axes=(1, 2))
self.assertAllEqual(y_np, y_tf.eval({k_placeholder: k}))
class AdjustContrastTest(test_util.TensorFlowTestCase):
def _testContrast(self, x_np, y_np, contrast_factor):
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, 1e-6)
def testDoubleContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 62, 169, 255, 28, 0, 255, 135, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testDoubleContrastFloat(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float).reshape(x_shape) / 255.
y_data = [
-45.25, -90.75, -92.5, 62.75, 169.25, 333.5, 28.75, -84.75, 349.5,
134.75, 409.25, -116.5
]
y_np = np.array(y_data, dtype=np.float).reshape(x_shape) / 255.
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testHalfContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [22, 52, 65, 49, 118, 172, 41, 54, 176, 67, 178, 59]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=0.5)
def testBatchDoubleContrast(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 81, 200, 255, 10, 0, 255, 116, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def _adjustContrastNp(self, x_np, contrast_factor):
mean = np.mean(x_np, (1, 2), keepdims=True)
y_np = mean + contrast_factor * (x_np - mean)
return y_np
def _adjustContrastTf(self, x_np, contrast_factor):
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = self.evaluate(y)
return y_tf
def testRandomContrast(self):
x_shapes = [
[1, 2, 2, 3],
[2, 1, 2, 3],
[1, 2, 2, 3],
[2, 5, 5, 3],
[2, 1, 1, 3],
]
for x_shape in x_shapes:
x_np = np.random.rand(*x_shape) * 255.
contrast_factor = np.random.rand() * 2.0 + 0.1
y_np = self._adjustContrastNp(x_np, contrast_factor)
y_tf = self._adjustContrastTf(x_np, contrast_factor)
self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testContrastFactorShape(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
with self.assertRaisesRegexp(
ValueError, 'Shape must be rank 0 but is rank 1'):
image_ops.adjust_contrast(x_np, [2.0])
class AdjustBrightnessTest(test_util.TensorFlowTestCase):
def _testBrightness(self, x_np, y_np, delta, tol=1e-6):
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_brightness(x, delta)
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, tol)
def testPositiveDeltaUint8(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 255, 11]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testPositiveDeltaFloat32(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255.
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255.
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testPositiveDeltaFloat16(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float16).reshape(x_shape) / 255.
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float16).reshape(x_shape) / 255.
self._testBrightness(x_np, y_np, delta=10. / 255., tol=1e-3)
def testNegativeDelta(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 3, 44, 125, 216, 27, 0, 224, 80, 245, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=-10. / 255.)
class PerImageWhiteningTest(test_util.TensorFlowTestCase):
def _NumpyPerImageWhitening(self, x):
num_pixels = np.prod(x.shape)
mn = np.mean(x)
std = np.std(x)
stddev = max(std, 1.0 / math.sqrt(num_pixels))
y = x.astype(np.float32)
y -= mn
y /= stddev
return y
@test_util.run_deprecated_v1
def testBasic(self):
x_shape = [13, 9, 3]
x_np = np.arange(0, np.prod(x_shape), dtype=np.float32).reshape(x_shape)
y_np = self._NumpyPerImageWhitening(x_np)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.per_image_standardization(x)
self.assertTrue(y.op.name.startswith("per_image_standardization"))
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, atol=1e-4)
def testUniformImage(self):
im_np = np.ones([19, 19, 3]).astype(np.float32) * 249
im = constant_op.constant(im_np)
whiten = image_ops.per_image_standardization(im)
with self.cached_session(use_gpu=True):
whiten_np = self.evaluate(whiten)
self.assertFalse(np.any(np.isnan(whiten_np)))
def testBatchWhitening(self):
imgs_np = np.random.uniform(0., 255., [4, 24, 24, 3])
whiten_np = [self._NumpyPerImageWhitening(img) for img in imgs_np]
with self.cached_session(use_gpu=True):
imgs = constant_op.constant(imgs_np)
whiten = image_ops.per_image_standardization(imgs)
whiten_tf = self.evaluate(whiten)
for w_tf, w_np in zip(whiten_tf, whiten_np):
self.assertAllClose(w_tf, w_np, atol=1e-4)
def testPreservesDtype(self):
imgs_npu8 = np.random.uniform(0., 255., [2, 5, 5, 3]).astype(np.uint8)
imgs_tfu8 = constant_op.constant(imgs_npu8)
whiten_tfu8 = image_ops.per_image_standardization(imgs_tfu8)
self.assertEqual(whiten_tfu8.dtype, dtypes.uint8)
imgs_npf16 = np.random.uniform(0., 255., [2, 5, 5, 3]).astype(np.float16)
imgs_tff16 = constant_op.constant(imgs_npf16)
whiten_tff16 = image_ops.per_image_standardization(imgs_tff16)
self.assertEqual(whiten_tff16.dtype, dtypes.float16)
class CropToBoundingBoxTest(test_util.TensorFlowTestCase):
def _CropToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.crop_to_bounding_box(x_tensor, offset_height, offset_width,
target_height, target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.cached_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._CropToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._CropToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.crop_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
@test_util.run_deprecated_v1
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, 0, 0, x, x_shape)
@test_util.run_deprecated_v1
def testCrop(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y_shape = [2, 3, 1]
y = [4, 5, 6, 7, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y_shape = [3, 2, 1]
y = [2, 3, 5, 6, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [2, 3, 1]
y = [1, 2, 3, 4, 5, 6]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [3, 2, 1]
y = [1, 2, 4, 5, 7, 8]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
@test_util.run_deprecated_v1
def testShapeInference(self):
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
@test_util.run_deprecated_v1
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([3, 5], [1, 3, 5, 1, 1]):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"'image' must have either 3 or 4 dimensions.")
@test_util.run_deprecated_v1
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 1, 1), ([2, 0, 2], 1, 1), ([2, 2, 0], 1, 1),
([0, 2, 2], 0, 1), ([2, 0, 2], 1, 0))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# Multiple assertion could fail, but the evaluation order is arbitrary.
# Match gainst generic pattern.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"assertion failed:",
use_tensor_inputs_options=[True])
@test_util.run_deprecated_v1
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# (offset_height, offset_width, target_height, target_width), err_msg
test_config = (([-1, 0, 3, 3], "offset_height must be >= 0"), ([
0, -1, 3, 3
], "offset_width must be >= 0"), ([0, 0, 0, 3],
"target_height must be > 0"),
([0, 0, 3, 0], "target_width must be > 0"),
([2, 0, 3, 3], "height must be >= target + offset"),
([0, 2, 3, 3], "width must be >= target + offset"))
for params, err_msg in test_config:
self._assertRaises(x, x_shape, *params, err_msg=err_msg)
@test_util.run_deprecated_v1
def testNameScope(self):
image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
y = image_ops.crop_to_bounding_box(image, 0, 0, 55, 66)
self.assertTrue(y.name.startswith("crop_to_bounding_box"))
class CentralCropTest(test_util.TensorFlowTestCase):
def _assertShapeInference(self, pre_shape, fraction, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.central_crop(image, fraction)
if post_shape is None:
self.assertEqual(y.get_shape().dims, None)
else:
self.assertEqual(y.get_shape().as_list(), post_shape)
@test_util.run_deprecated_v1
def testNoOp(self):
x_shapes = [[13, 9, 3], [5, 13, 9, 3]]
for x_shape in x_shapes:
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 1.0)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
self.assertEqual(y.op.name, x.op.name)
def testCropping(self):
x_shape = [4, 8, 1]
x_np = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8]],
dtype=np.int32).reshape(x_shape)
y_np = np.array([[3, 4, 5, 6], [3, 4, 5, 6]]).reshape([2, 4, 1])
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 0.5)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
x_shape = [2, 4, 8, 1]
x_np = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1]],
dtype=np.int32).reshape(x_shape)
y_np = np.array([[[3, 4, 5, 6], [3, 4, 5, 6]],
[[6, 5, 4, 3], [6, 5, 4, 3]]]).reshape([2, 2, 4, 1])
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 0.5)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
@test_util.run_deprecated_v1
def testCropping2(self):
# Test case for 10315
x_shapes = [[240, 320, 3], [5, 240, 320, 3]]
expected_y_shapes = [[80, 106, 3], [5, 80, 106, 3]]
for x_shape, y_shape in zip(x_shapes, expected_y_shapes):
x_np = np.zeros(x_shape, dtype=np.int32)
y_np = np.zeros(y_shape, dtype=np.int32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = array_ops.placeholder(shape=x_shape, dtype=dtypes.int32)
y = image_ops.central_crop(x, 0.33)
y_tf = y.eval(feed_dict={x: x_np})
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
@test_util.run_deprecated_v1
def testShapeInference(self):
# Test no-op fraction=1.0, with 3-D tensors.
self._assertShapeInference([50, 60, 3], 1.0, [50, 60, 3])
self._assertShapeInference([None, 60, 3], 1.0, [None, 60, 3])
self._assertShapeInference([50, None, 3], 1.0, [50, None, 3])
self._assertShapeInference([None, None, 3], 1.0, [None, None, 3])
self._assertShapeInference([50, 60, None], 1.0, [50, 60, None])
self._assertShapeInference([None, None, None], 1.0, [None, None, None])
# Test no-op fraction=0.5, with 3-D tensors.
self._assertShapeInference([50, 60, 3], 0.5, [26, 30, 3])
self._assertShapeInference([None, 60, 3], 0.5, [None, 30, 3])
self._assertShapeInference([50, None, 3], 0.5, [26, None, 3])
self._assertShapeInference([None, None, 3], 0.5, [None, None, 3])
self._assertShapeInference([50, 60, None], 0.5, [26, 30, None])
self._assertShapeInference([None, None, None], 0.5, [None, None, None])
# Test no-op fraction=1.0, with 4-D tensors.
self._assertShapeInference([5, 50, 60, 3], 1.0, [5, 50, 60, 3])
self._assertShapeInference([5, None, 60, 3], 1.0, [5, None, 60, 3])
self._assertShapeInference([5, 50, None, 3], 1.0, [5, 50, None, 3])
self._assertShapeInference([5, None, None, 3], 1.0, [5, None, None, 3])
self._assertShapeInference([5, 50, 60, None], 1.0, [5, 50, 60, None])
self._assertShapeInference([5, None, None, None], 1.0,
[5, None, None, None])
self._assertShapeInference([None, None, None, None], 1.0,
[None, None, None, None])
# Test no-op fraction=0.5, with 4-D tensors.
self._assertShapeInference([5, 50, 60, 3], 0.5, [5, 26, 30, 3])
self._assertShapeInference([5, None, 60, 3], 0.5, [5, None, 30, 3])
self._assertShapeInference([5, 50, None, 3], 0.5, [5, 26, None, 3])
self._assertShapeInference([5, None, None, 3], 0.5, [5, None, None, 3])
self._assertShapeInference([5, 50, 60, None], 0.5, [5, 26, 30, None])
self._assertShapeInference([5, None, None, None], 0.5,
[5, None, None, None])
self._assertShapeInference([None, None, None, None], 0.5,
[None, None, None, None])
def testErrorOnInvalidCentralCropFractionValues(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 0.0)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 1.01)
def testErrorOnInvalidShapes(self):
x_shapes = [None, [], [3], [3, 9], [3, 9, 3, 9, 3]]
for x_shape in x_shapes:
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 0.5)
@test_util.run_deprecated_v1
def testNameScope(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
y = image_ops.central_crop(x_np, 1.0)
self.assertTrue(y.op.name.startswith("central_crop"))
class PadToBoundingBoxTest(test_util.TensorFlowTestCase):
def _PadToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.pad_to_bounding_box(x_tensor, offset_height, offset_width,
target_height, target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.cached_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._PadToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._PadToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.pad_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testInt64(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
i = constant_op.constant([1, 0, 4, 3], dtype=dtypes.int64)
y_tf = image_ops.pad_to_bounding_box(x, i[0], i[1], i[2], i[3])
with self.cached_session(use_gpu=True):
self.assertAllClose(y, self.evaluate(y_tf))
@test_util.run_deprecated_v1
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
offset_height, offset_width = [0, 0]
self._assertReturns(x, x_shape, offset_height, offset_width, x, x_shape)
@test_util.run_deprecated_v1
def testPadding(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y = [0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
@test_util.run_deprecated_v1
def testShapeInference(self):
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
@test_util.run_deprecated_v1
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([3, 5], [1, 3, 5, 1, 1]):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"'image' must have either 3 or 4 dimensions.")
@test_util.run_deprecated_v1
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 2, 2), ([2, 0, 2], 2, 2), ([2, 2, 0], 2, 2))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The original error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"all dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
@test_util.run_deprecated_v1
def testBadParams(self):
x_shape = [3, 3, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# offset_height, offset_width, target_height, target_width, err_msg
test_config = ((-1, 0, 4, 4, "offset_height must be >= 0"),
(0, -1, 4, 4, "offset_width must be >= 0"),
(2, 0, 4, 4, "height must be <= target - offset"),
(0, 2, 4, 4, "width must be <= target - offset"))
for config_item in test_config:
self._assertRaises(x, x_shape, *config_item)
@test_util.run_deprecated_v1
def testNameScope(self):
image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
y = image_ops.pad_to_bounding_box(image, 0, 0, 55, 66)
self.assertTrue(y.op.name.startswith("pad_to_bounding_box"))
class SelectDistortedCropBoxTest(test_util.TensorFlowTestCase):
def _testSampleDistortedBoundingBox(self, image, bounding_box,
min_object_covered, aspect_ratio_range,
area_range):
original_area = float(np.prod(image.shape))
bounding_box_area = float((bounding_box[3] - bounding_box[1]) *
(bounding_box[2] - bounding_box[0]))
image_size_np = np.array(image.shape, dtype=np.int32)
bounding_box_np = (
np.array(bounding_box, dtype=np.float32).reshape([1, 1, 4]))
aspect_ratios = []
area_ratios = []
fraction_object_covered = []
num_iter = 1000
with self.cached_session(use_gpu=True):
image_tf = constant_op.constant(image, shape=image.shape)
image_size_tf = constant_op.constant(
image_size_np, shape=image_size_np.shape)
bounding_box_tf = constant_op.constant(
bounding_box_np, dtype=dtypes.float32, shape=bounding_box_np.shape)
begin, size, _ = image_ops.sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
for _ in xrange(num_iter):
y_tf = self.evaluate(y)
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratios.append(area / original_area)
fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)
# min_object_covered as tensor
min_object_covered_placeholder = array_ops.placeholder(dtypes.float32)
begin, size, _ = image_ops.sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
min_object_covered=min_object_covered_placeholder,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
for _ in xrange(num_iter):
y_tf = y.eval(feed_dict={
min_object_covered_placeholder: min_object_covered
})
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratios.append(area / original_area)
fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)
# Ensure that each entry is observed within 3 standard deviations.
# num_bins = 10
# aspect_ratio_hist, _ = np.histogram(aspect_ratios,
# bins=num_bins,
# range=aspect_ratio_range)
# mean = np.mean(aspect_ratio_hist)
# stddev = np.sqrt(mean)
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# TODO(irving): Since the rejection probability is not independent of the
# aspect ratio, the aspect_ratio random value is not exactly uniformly
# distributed in [min_aspect_ratio, max_aspect_ratio). This test should be
# fixed to reflect the true statistical property, then tightened to enforce
# a stricter bound. Or, ideally, the sample_distorted_bounding_box Op
# be fixed to not use rejection sampling and generate correctly uniform
# aspect ratios.
# self.assertAllClose(aspect_ratio_hist,
# [mean] * num_bins, atol=3.6 * stddev)
# The resulting crop will not be uniformly distributed in area. In practice,
# we find that the area skews towards the small sizes. Instead, we perform
# a weaker test to ensure that the area ratios are merely within the
# specified bounds.
self.assertLessEqual(max(area_ratios), area_range[1])
self.assertGreaterEqual(min(area_ratios), area_range[0])
# For reference, here is what the distribution of area ratios look like.
area_ratio_hist, _ = np.histogram(area_ratios, bins=10, range=area_range)
print("area_ratio_hist ", area_ratio_hist)
# Ensure that fraction_object_covered is satisfied.
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# self.assertGreaterEqual(min(fraction_object_covered), min_object_covered)
@test_util.run_deprecated_v1
def testWholeImageBoundingBox(self):
height = 40
width = 50
image_size = [height, width, 1]
bounding_box = [0.0, 0.0, 1.0, 1.0]
image = np.arange(
0, np.prod(image_size), dtype=np.int32).reshape(image_size)
self._testSampleDistortedBoundingBox(
image,
bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
@test_util.run_deprecated_v1
def testWithBoundingBox(self):
height = 40
width = 50
x_shape = [height, width, 1]
image = np.zeros(x_shape, dtype=np.int32)
# Create an object with 1's in a region with area A and require that
# the total pixel values >= 0.1 * A.
min_object_covered = 0.1
xmin = 2
ymin = 3
xmax = 12
ymax = 13
for x in np.arange(xmin, xmax + 1, 1):
for y in np.arange(ymin, ymax + 1, 1):
image[x, y] = 1
# Bounding box is specified as (ymin, xmin, ymax, xmax) in
# relative coordinates.
bounding_box = (float(ymin) / height, float(xmin) / width,
float(ymax) / height, float(xmax) / width)
self._testSampleDistortedBoundingBox(
image,
bounding_box=bounding_box,
min_object_covered=min_object_covered,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
@test_util.run_deprecated_v1
def testSampleDistortedBoundingBoxShape(self):
with self.cached_session(use_gpu=True):
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[[[0.0, 0.0, 1.0, 1.0]]],
shape=[1, 1, 4],
dtype=dtypes.float32,
)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
# Actual run to make sure shape is correct inside Compute().
begin = self.evaluate(begin)
end = self.evaluate(end)
bbox_for_drawing = self.evaluate(bbox_for_drawing)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=array_ops.placeholder(dtypes.float32),
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
def testDefaultMinObjectCovered(self):
# By default min_object_covered=0.1 if not provided
with self.cached_session(use_gpu=True):
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[[[0.0, 0.0, 1.0, 1.0]]],
shape=[1, 1, 4],
dtype=dtypes.float32,
)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
# Actual run to make sure shape is correct inside Compute().
begin = self.evaluate(begin)
end = self.evaluate(end)
bbox_for_drawing = self.evaluate(bbox_for_drawing)
class ResizeImagesV2Test(test_util.TensorFlowTestCase):
METHODS = [
image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA,
image_ops.ResizeMethod.LANCZOS3, image_ops.ResizeMethod.LANCZOS5,
image_ops.ResizeMethod.GAUSSIAN, image_ops.ResizeMethod.MITCHELLCUBIC
]
# Some resize methods, such as Gaussian, are non-interpolating in that they
# change the image even if there is no scale change, for some test, we only
# check the value on the value preserving methods.
INTERPOLATING_METHODS = [
image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA,
image_ops.ResizeMethod.LANCZOS3, image_ops.ResizeMethod.LANCZOS5
]
TYPES = [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64
]
def _assertShapeInference(self, pre_shape, size, post_shape):
# Try single image resize
single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_images_v2(single_image, size)
self.assertEqual(y.get_shape().as_list(), post_shape)
# Try batch images resize with known batch size
images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)
y = image_ops.resize_images_v2(images, size)
self.assertEqual(y.get_shape().as_list(), [99] + post_shape)
# Try batch images resize with unknown batch size
images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)
y = image_ops.resize_images_v2(images, size)
self.assertEqual(y.get_shape().as_list(), [None] + post_shape)
def shouldRunOnGPU(self, method, nptype):
if (method == image_ops.ResizeMethod.NEAREST_NEIGHBOR and
nptype in [np.float32, np.float64]):
return True
else:
return False
@test_util.disable_xla("align_corners=False not supported by XLA")
@test_util.run_deprecated_v1
def testNoOp(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
target_height = 6
target_width = 4
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
self.METHODS[0])
yshape = array_ops.shape(y)
newshape = self.evaluate(yshape)
self.assertAllEqual(single_shape, newshape)
# half_pixel_centers unsupported in ResizeBilinear
@test_util.run_deprecated_v1
@test_util.disable_xla("b/127616992")
def testTensorArguments(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
new_size = array_ops.placeholder(dtypes.int32, shape=(2))
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session(use_gpu=True) as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, new_size, method)
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images_v2(image, new_size, self.METHODS[0])
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})
self.assertAllEqual(single_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_single, atol=1e-5)
# Incorrect shape.
with self.assertRaises(ValueError):
new_size = constant_op.constant(4)
_ = image_ops.resize_images_v2(image, new_size,
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([4])
_ = image_ops.resize_images_v2(image, new_size,
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([1, 2, 3])
_ = image_ops.resize_images_v2(image, new_size,
image_ops.ResizeMethod.BILINEAR)
# Incorrect dtypes.
with self.assertRaises(ValueError):
new_size = constant_op.constant([6.0, 4])
_ = image_ops.resize_images_v2(image, new_size,
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images_v2(image, [6, 4.0],
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images_v2(image, [None, 4],
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images_v2(image, [6, None],
image_ops.ResizeMethod.BILINEAR)
@test_util.run_deprecated_v1
def testReturnDtype(self):
target_shapes = [[6, 4], [3, 2],
[
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32)
]]
for nptype in self.TYPES:
image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1])
for method in self.METHODS:
for target_shape in target_shapes:
y = image_ops.resize_images_v2(image, target_shape, method)
if method == image_ops.ResizeMethod.NEAREST_NEIGHBOR:
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
# half_pixel_centers not supported by XLA
@test_util.disable_xla("b/127616992")
def testSumTensor(self):
img_shape = [1, 6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
# Test size where width is specified as a tensor which is a sum
# of two tensors.
width_1 = constant_op.constant(1)
width_2 = constant_op.constant(3)
width = math_ops.add(width_1, width_2)
height = constant_op.constant(6)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [height, width], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeDown(self):
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
expected_data = [127, 64, 64, 127, 50, 100]
target_height = 3
target_width = 2
# Test out 3-D and 4-D image shapes.
img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
target_shapes = [[1, target_height, target_width, 1],
[target_height, target_width, 1]]
for target_shape, img_shape in zip(target_shapes, img_shapes):
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
if test.is_gpu_available() and self.shouldRunOnGPU(method, nptype):
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(
image, [target_height, target_width], method)
expected = np.array(expected_data).reshape(target_shape)
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeUp(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethod.BILINEAR] = [
64.0, 56.0, 40.0, 32.0, 56.0, 52.0, 44.0, 40.0, 40.0, 44.0, 52.0, 56.0,
36.5, 45.625, 63.875, 73.0, 45.5, 56.875, 79.625, 91.0, 50.0, 62.5,
87.5, 100.0
]
expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethod.AREA] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethod.LANCZOS3] = [
75.8294, 59.6281, 38.4313, 22.23, 60.6851, 52.0037, 40.6454, 31.964,
35.8344, 41.0779, 47.9383, 53.1818, 24.6968, 43.0769, 67.1244, 85.5045,
35.7939, 56.4713, 83.5243, 104.2017, 44.8138, 65.1949, 91.8603, 112.2413
]
expected_data[image_ops.ResizeMethod.LANCZOS5] = [
77.5699, 60.0223, 40.6694, 23.1219, 61.8253, 51.2369, 39.5593, 28.9709,
35.7438, 40.8875, 46.5604, 51.7041, 21.5942, 43.5299, 67.7223, 89.658,
32.1213, 56.784, 83.984, 108.6467, 44.5802, 66.183, 90.0082, 111.6109
]
expected_data[image_ops.ResizeMethod.GAUSSIAN] = [
61.1087, 54.6926, 41.3074, 34.8913, 54.6926, 51.4168, 44.5832, 41.3074,
41.696, 45.2456, 52.6508, 56.2004, 39.4273, 47.0526, 62.9602, 70.5855,
47.3008, 57.3042, 78.173, 88.1764, 51.4771, 62.3638, 85.0752, 95.9619
]
expected_data[image_ops.ResizeMethod.BICUBIC] = [
70.1453, 59.0252, 36.9748, 25.8547, 59.3195, 53.3386, 41.4789, 35.4981,
36.383, 41.285, 51.0051, 55.9071, 30.2232, 42.151, 65.8032, 77.731,
41.6492, 55.823, 83.9288, 98.1026, 47.0363, 62.2744, 92.4903, 107.7284
]
expected_data[image_ops.ResizeMethod.MITCHELLCUBIC] = [
66.0382, 56.6079, 39.3921, 29.9618, 56.7255, 51.9603, 43.2611, 38.4959,
39.1828, 43.4664, 51.2864, 55.57, 34.6287, 45.1812, 64.4458, 74.9983,
43.8523, 56.8078, 80.4594, 93.4149, 48.9943, 63.026, 88.6422, 102.6739
]
for nptype in self.TYPES:
for method in expected_data:
with self.cached_session(use_gpu=True):
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
method)
resized = self.evaluate(y)
expected = np.array(expected_data[method]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-04)
# XLA doesn't implement half_pixel_centers
@test_util.disable_xla("b/127616992")
def testLegacyBicubicMethodsMatchNewMethods(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
methods_to_test = ((gen_image_ops.resize_bilinear, "triangle"),
(gen_image_ops.resize_bicubic, "keyscubic"))
for legacy_method, new_method in methods_to_test:
with self.cached_session(use_gpu=True):
img_np = np.array(data, dtype=np.float32).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
legacy_result = legacy_method(
image,
constant_op.constant([target_height, target_width],
dtype=dtypes.int32),
half_pixel_centers=True)
scale = (
constant_op.constant([target_height, target_width],
dtype=dtypes.float32) /
math_ops.cast(array_ops.shape(image)[1:3], dtype=dtypes.float32))
new_result = gen_image_ops.scale_and_translate(
image,
constant_op.constant([target_height, target_width],
dtype=dtypes.int32),
scale,
array_ops.zeros([2]),
kernel_type=new_method,
antialias=False)
self.assertAllClose(
self.evaluate(legacy_result), self.evaluate(new_result), atol=1e-04)
def testResizeDownArea(self):
img_shape = [1, 6, 6, 1]
data = [
128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5,
10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 4
target_width = 4
expected_data = [
73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21
]
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
image_ops.ResizeMethod.AREA)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testCompareNearestNeighbor(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images_v2(
image, new_size, image_ops.ResizeMethod.NEAREST_NEIGHBOR)
gpu_val = self.evaluate(out_op)
with self.cached_session(use_gpu=False):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images_v2(
image, new_size, image_ops.ResizeMethod.NEAREST_NEIGHBOR)
cpu_val = self.evaluate(out_op)
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
def testCompareBilinear(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
value = {}
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
value[use_gpu] = self.evaluate(out_op)
self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testShapeInference(self):
self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])
@test_util.run_deprecated_v1
def testNameScope(self):
with self.cached_session(use_gpu=True):
single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_images(single_image, [55, 66])
self.assertTrue(y.op.name.startswith("resize"))
def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio,
use_tensor_inputs):
if use_tensor_inputs:
target_max = ops.convert_to_tensor([max_h, max_w])
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
target_max = [max_h, max_w]
x_tensor = x
feed_dict = {}
y = image_ops.resize_images(
x_tensor, target_max, preserve_aspect_ratio=preserve_aspect_ratio)
with self.cached_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertResizeEqual(self,
x,
x_shape,
y,
y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertResizeCheckShape(self,
x,
x_shape,
target_shape,
y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width = target_shape
x = np.array(x).reshape(x_shape)
y = np.zeros(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertShapeEqual(y, ops.convert_to_tensor(y_tf))
@test_util.run_deprecated_v1
def testPreserveAspectRatioMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(
x, x_shape, [250, 250], [10, 250, 250, 10], preserve_aspect_ratio=False)
@test_util.run_deprecated_v1
def testPreserveAspectRatioNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeEqual(x, x_shape, x, x_shape)
@test_util.run_deprecated_v1
def testPreserveAspectRatioSmaller(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10])
@test_util.run_deprecated_v1
def testPreserveAspectRatioSmallerMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10])
@test_util.run_deprecated_v1
def testPreserveAspectRatioLarger(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10])
@test_util.run_deprecated_v1
def testPreserveAspectRatioSameRatio(self):
x_shape = [1920, 1080, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3])
@test_util.run_deprecated_v1
def testPreserveAspectRatioSquare(self):
x_shape = [299, 299, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3])
class ResizeImagesTest(test_util.TensorFlowTestCase):
METHODS = [
image_ops.ResizeMethodV1.BILINEAR,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
image_ops.ResizeMethodV1.BICUBIC, image_ops.ResizeMethodV1.AREA
]
TYPES = [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64
]
def _assertShapeInference(self, pre_shape, size, post_shape):
# Try single image resize
single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_images(single_image, size)
self.assertEqual(y.get_shape().as_list(), post_shape)
# Try batch images resize with known batch size
images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [99] + post_shape)
# Try batch images resize with unknown batch size
images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [None] + post_shape)
def shouldRunOnGPU(self, method, nptype):
if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR and
nptype in [np.float32, np.float64]):
return True
else:
return False
@test_util.disable_xla("align_corners=False not supported by XLA")
@test_util.run_deprecated_v1
def testNoOp(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
target_height = 6
target_width = 4
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
with self.cached_session(use_gpu=True) as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images(image, [target_height, target_width],
self.METHODS[0])
yshape = array_ops.shape(y)
newshape = self.evaluate(yshape)
self.assertAllEqual(single_shape, newshape)
@test_util.run_deprecated_v1
def testTensorArguments(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
new_size = array_ops.placeholder(dtypes.int32, shape=(2))
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session(use_gpu=True) as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, new_size, method)
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images(image, new_size, self.METHODS[0])
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})
self.assertAllEqual(single_shape, newshape)
self.assertAllClose(resized, img_single, atol=1e-5)
# Incorrect shape.
with self.assertRaises(ValueError):
new_size = constant_op.constant(4)
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([4])
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([1, 2, 3])
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethodV1.BILINEAR)
# Incorrect dtypes.
with self.assertRaises(ValueError):
new_size = constant_op.constant([6.0, 4])
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images(image, [6, 4.0],
image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images(image, [None, 4],
image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images(image, [6, None],
image_ops.ResizeMethodV1.BILINEAR)
@test_util.run_deprecated_v1
def testReturnDtype(self):
target_shapes = [[6, 4], [3, 2], [
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32)
]]
for nptype in self.TYPES:
image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1])
for method in self.METHODS:
for target_shape in target_shapes:
y = image_ops.resize_images(image, target_shape, method)
if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR or
target_shape == image.shape[1:3]):
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testSumTensor(self):
img_shape = [1, 6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
# Test size where width is specified as a tensor which is a sum
# of two tensors.
width_1 = constant_op.constant(1)
width_2 = constant_op.constant(3)
width = math_ops.add(width_1, width_2)
height = constant_op.constant(6)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session() as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [height, width], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeDown(self):
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
expected_data = [127, 64, 64, 127, 50, 100]
target_height = 3
target_width = 2
# Test out 3-D and 4-D image shapes.
img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
target_shapes = [[1, target_height, target_width, 1],
[target_height, target_width, 1]]
for target_shape, img_shape in zip(target_shapes, img_shapes):
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
if test.is_gpu_available() and self.shouldRunOnGPU(method, nptype):
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
method)
expected = np.array(expected_data).reshape(target_shape)
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeUpAlignCornersFalse(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethodV1.BILINEAR] = [
64.0, 48.0, 32.0, 32.0, 48.0, 48.0, 48.0, 48.0, 32.0, 48.0, 64.0, 64.0,
41.0, 61.5, 82.0, 82.0, 50.0, 75.0, 100.0, 100.0, 50.0, 75.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethodV1.NEAREST_NEIGHBOR] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethodV1.AREA] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
for nptype in self.TYPES:
for method in [
image_ops.ResizeMethodV1.BILINEAR,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
image_ops.ResizeMethodV1.AREA
]:
with self.cached_session(use_gpu=True):
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(
image, [target_height, target_width], method, align_corners=False)
resized = self.evaluate(y)
expected = np.array(expected_data[method]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpAlignCornersTrue(self):
img_shape = [1, 3, 2, 1]
data = [6, 3, 3, 6, 6, 9]
target_height = 5
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethodV1.BILINEAR] = [
6.0, 5.0, 4.0, 3.0, 4.5, 4.5, 4.5, 4.5, 3.0, 4.0, 5.0, 6.0, 4.5, 5.5,
6.5, 7.5, 6.0, 7.0, 8.0, 9.0
]
expected_data[image_ops.ResizeMethodV1.NEAREST_NEIGHBOR] = [
6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 6.0, 3.0, 3.0, 6.0, 6.0, 6.0, 6.0,
9.0, 9.0, 6.0, 6.0, 9.0, 9.0
]
# TODO(b/37749740): Improve alignment of ResizeMethodV1.AREA when
# align_corners=True.
expected_data[image_ops.ResizeMethodV1.AREA] = [
6.0, 6.0, 6.0, 3.0, 6.0, 6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 3.0, 3.0,
3.0, 6.0, 6.0, 6.0, 6.0, 9.0
]
for nptype in self.TYPES:
for method in [
image_ops.ResizeMethodV1.BILINEAR,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
image_ops.ResizeMethodV1.AREA
]:
with self.cached_session(use_gpu=True):
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(
image, [target_height, target_width], method, align_corners=True)
resized = self.evaluate(y)
expected = np.array(expected_data[method]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpBicubic(self):
img_shape = [1, 6, 6, 1]
data = [
128, 128, 64, 64, 128, 128, 64, 64, 64, 64, 128, 128, 64, 64, 128, 128,
50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100,
50, 50, 100, 100
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 8
target_width = 8
expected_data = [
128, 135, 96, 55, 64, 114, 134, 128, 78, 81, 68, 52, 57, 118, 144, 136,
55, 49, 79, 109, 103, 89, 83, 84, 74, 70, 95, 122, 115, 69, 49, 55, 100,
105, 75, 43, 50, 89, 105, 100, 57, 54, 74, 96, 91, 65, 55, 58, 70, 69,
75, 81, 80, 72, 69, 70, 105, 112, 75, 36, 45, 92, 111, 105
]
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethodV1.BICUBIC)
resized = self.evaluate(y)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1)
def testResizeDownArea(self):
img_shape = [1, 6, 6, 1]
data = [
128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5,
10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 4
target_width = 4
expected_data = [
73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21
]
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethodV1.AREA)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testCompareNearestNeighbor(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
align_corners=align_corners)
gpu_val = self.evaluate(out_op)
with self.cached_session(use_gpu=False):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
align_corners=align_corners)
cpu_val = self.evaluate(out_op)
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
def testCompareBilinear(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
value = {}
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethodV1.BILINEAR,
align_corners=align_corners)
value[use_gpu] = self.evaluate(out_op)
self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testShapeInference(self):
self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])
@test_util.run_deprecated_v1
def testNameScope(self):
img_shape = [1, 3, 2, 1]
with self.cached_session(use_gpu=True):
single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_images(single_image, [55, 66])
self.assertTrue(y.op.name.startswith("resize"))
def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio,
use_tensor_inputs):
if use_tensor_inputs:
target_max = ops.convert_to_tensor([max_h, max_w])
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
target_max = [max_h, max_w]
x_tensor = x
feed_dict = {}
y = image_ops.resize_images(x_tensor, target_max,
preserve_aspect_ratio=preserve_aspect_ratio)
with self.cached_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertResizeEqual(self, x, x_shape, y, y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertResizeCheckShape(self, x, x_shape, target_shape,
y_shape, preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width = target_shape
x = np.array(x).reshape(x_shape)
y = np.zeros(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertShapeEqual(y, ops.convert_to_tensor(y_tf))
@test_util.run_deprecated_v1
def testPreserveAspectRatioMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [250, 250], [10, 250, 250, 10],
preserve_aspect_ratio=False)
@test_util.run_deprecated_v1
def testPreserveAspectRatioNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeEqual(x, x_shape, x, x_shape)
@test_util.run_deprecated_v1
def testPreserveAspectRatioSmaller(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10])
@test_util.run_deprecated_v1
def testPreserveAspectRatioSmallerMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10])
@test_util.run_deprecated_v1
def testPreserveAspectRatioLarger(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10])
@test_util.run_deprecated_v1
def testPreserveAspectRatioSameRatio(self):
x_shape = [1920, 1080, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3])
@test_util.run_deprecated_v1
def testPreserveAspectRatioSquare(self):
x_shape = [299, 299, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3])
class ResizeImageWithPadV1Test(test_util.TensorFlowTestCase):
def _ResizeImageWithPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.resize_image_with_pad_v1(x_tensor, target_height,
target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.cached_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
except Exception as e: # pylint: disable=broad-except
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_pad_v1(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
@test_util.run_deprecated_v1
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
@test_util.run_deprecated_v1
def testPad(self):
# Reduce vertical dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 3, 0]
y_shape = [1, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Reduce horizontal dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [1, 3, 0, 0]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [1, 3]
y_shape = [1, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# half_pixel_centers not supported by XLA
@test_util.for_all_test_methods(test_util.disable_xla, "b/127616992")
class ResizeImageWithPadV2Test(test_util.TensorFlowTestCase):
def _ResizeImageWithPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.resize_image_with_pad_v2(x_tensor, target_height,
target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.cached_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
except Exception as e: # pylint: disable=broad-except
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_pad_v1(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
@test_util.run_deprecated_v1
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
@test_util.run_deprecated_v1
def testPad(self):
# Reduce vertical dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 3.5, 5.5, 0]
y_shape = [1, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Reduce horizontal dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [3.5, 5.5, 0, 0]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [3.5, 5.5]
y_shape = [1, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
class ResizeImageWithCropOrPadTest(test_util.TensorFlowTestCase):
def _ResizeImageWithCropOrPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.resize_image_with_crop_or_pad(x_tensor, target_height,
target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.cached_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_crop_or_pad(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
@test_util.run_deprecated_v1
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
@test_util.run_deprecated_v1
def testPad(self):
# Pad even along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0, 0, 5, 6, 7, 8, 0]
y_shape = [2, 6, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0, 0, 0, 5, 6, 7, 8, 0, 0]
y_shape = [2, 7, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad even along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0]
y_shape = [4, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0]
y_shape = [5, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
@test_util.run_deprecated_v1
def testCrop(self):
# Crop even along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [2, 3, 6, 7]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along col.
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
x_shape = [2, 6, 1]
y = [2, 3, 4, 8, 9, 10]
y_shape = [2, 3, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop even along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [4, 2, 1]
y = [3, 4, 5, 6]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along row.
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
x_shape = [8, 2, 1]
y = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
y_shape = [5, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
@test_util.run_deprecated_v1
def testCropAndPad(self):
# Pad along row but crop along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 2, 3, 6, 7, 0, 0]
y_shape = [4, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop along row but pad along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [4, 2, 1]
y = [0, 3, 4, 0, 0, 5, 6, 0]
y_shape = [2, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
@test_util.run_deprecated_v1
def testShapeInference(self):
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
@test_util.run_deprecated_v1
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
target_height, target_width = [4, 4]
for x_shape in ([3, 5],):
self._assertRaises(x, x_shape, target_height, target_width,
"'image' must have either 3 or 4 dimensions.")
for x_shape in ([1, 3, 5, 1, 1],):
self._assertRaises(x, x_shape, target_height, target_width,
"'image' must have either 3 or 4 dimensions.")
@test_util.run_deprecated_v1
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
target_height, target_width = [1, 1]
x = []
for x_shape in ([0, 2, 2], [2, 0, 2], [2, 2, 0]):
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The original error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"all dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
@test_util.run_deprecated_v1
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# target_height <= 0
target_height, target_width = [0, 5]
self._assertRaises(x, x_shape, target_height, target_width,
"target_height must be > 0")
# target_width <= 0
target_height, target_width = [5, 0]
self._assertRaises(x, x_shape, target_height, target_width,
"target_width must be > 0")
@test_util.run_deprecated_v1
def testNameScope(self):
image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_image_with_crop_or_pad(image, 55, 66)
self.assertTrue(y.op.name.startswith("resize_image_with_crop_or_pad"))
def _SimpleColorRamp():
"""Build a simple color ramp RGB image."""
w, h = 256, 200
i = np.arange(h)[:, None]
j = np.arange(w)
image = np.empty((h, w, 3), dtype=np.uint8)
image[:, :, 0] = i
image[:, :, 1] = j
image[:, :, 2] = (i + j) >> 1
return image
class JpegTest(test_util.TensorFlowTestCase):
# TODO(irving): Add self.assertAverageLess or similar to test_util
def averageError(self, image0, image1):
self.assertEqual(image0.shape, image1.shape)
image0 = image0.astype(int) # Avoid overflow
return np.abs(image0 - image1).sum() / np.prod(image0.shape)
def testExisting(self):
# Read a real jpeg and verify shape
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1.jpg")
with self.cached_session(use_gpu=True) as sess:
jpeg0 = io_ops.read_file(path)
image0 = image_ops.decode_jpeg(jpeg0)
image1 = image_ops.decode_jpeg(image_ops.encode_jpeg(image0))
jpeg0, image0, image1 = self.evaluate([jpeg0, image0, image1])
self.assertEqual(len(jpeg0), 3771)
self.assertEqual(image0.shape, (256, 128, 3))
self.assertLess(self.averageError(image0, image1), 1.4)
def testCmyk(self):
# Confirm that CMYK reads in as RGB
base = "tensorflow/core/lib/jpeg/testdata"
rgb_path = os.path.join(base, "jpeg_merge_test1.jpg")
cmyk_path = os.path.join(base, "jpeg_merge_test1_cmyk.jpg")
shape = 256, 128, 3
for channels in 3, 0:
with self.cached_session(use_gpu=True) as sess:
rgb = image_ops.decode_jpeg(
io_ops.read_file(rgb_path), channels=channels)
cmyk = image_ops.decode_jpeg(
io_ops.read_file(cmyk_path), channels=channels)
rgb, cmyk = self.evaluate([rgb, cmyk])
self.assertEqual(rgb.shape, shape)
self.assertEqual(cmyk.shape, shape)
error = self.averageError(rgb, cmyk)
self.assertLess(error, 4)
def testCropAndDecodeJpeg(self):
with self.cached_session() as sess:
# Encode it, then decode it, then encode it
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
h, w, _ = 256, 128, 3
crop_windows = [[0, 0, 5, 5], [0, 0, 5, w], [0, 0, h, 5],
[h - 6, w - 5, 6, 5], [6, 5, 15, 10], [0, 0, h, w]]
for crop_window in crop_windows:
# Explicit two stages: decode + crop.
image1 = image_ops.decode_jpeg(jpeg0)
y, x, h, w = crop_window
image1_crop = image_ops.crop_to_bounding_box(image1, y, x, h, w)
# Combined decode+crop.
image2 = image_ops.decode_and_crop_jpeg(jpeg0, crop_window)
# Combined decode+crop should have the same shape inference
self.assertAllEqual(image1_crop.get_shape().as_list(),
image2.get_shape().as_list())
# CropAndDecode should be equal to DecodeJpeg+Crop.
image1_crop, image2 = self.evaluate([image1_crop, image2])
self.assertAllEqual(image1_crop, image2)
@test_util.run_deprecated_v1
def testCropAndDecodeJpegWithInvalidCropWindow(self):
with self.cached_session() as sess:
# Encode it, then decode it, then encode it
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
h, w, _ = 256, 128, 3
# Invalid crop windows.
crop_windows = [[-1, 11, 11, 11], [11, -1, 11, 11], [11, 11, -1, 11],
[11, 11, 11, -1], [11, 11, 0, 11], [11, 11, 11, 0],
[0, 0, h + 1, w], [0, 0, h, w + 1]]
for crop_window in crop_windows:
result = image_ops.decode_and_crop_jpeg(jpeg0, crop_window)
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
lambda e: "Invalid JPEG data or crop window" in str(e)):
self.evaluate(result)
def testSynthetic(self):
with self.cached_session(use_gpu=True) as sess:
# Encode it, then decode it, then encode it
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_ACCURATE")
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method="INTEGER_ACCURATE")
jpeg0, image0, image1, image2 = self.evaluate(
[jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input
self.assertLess(self.averageError(image0, image1), 0.6)
# We should be very close to a fixpoint
self.assertLess(self.averageError(image1, image2), 0.02)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testSyntheticFasterAlgorithm(self):
with self.cached_session(use_gpu=True) as sess:
# Encode it, then decode it, then encode it
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method="INTEGER_FAST")
jpeg0, image0, image1, image2 = self.evaluate(
[jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input, but
# note this is worse than the slower algorithm because it is
# less accurate.
self.assertLess(self.averageError(image0, image1), 0.95)
# Repeated compression / decompression will have a higher error
# with a lossier algorithm.
self.assertLess(self.averageError(image1, image2), 1.05)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testDefaultDCTMethodIsIntegerFast(self):
with self.cached_session(use_gpu=True) as sess:
# Compare decoding with both dct_option=INTEGER_FAST and
# default. They should be the same.
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
image2 = image_ops.decode_jpeg(jpeg0)
image1, image2 = self.evaluate([image1, image2])
# The images should be the same.
self.assertAllClose(image1, image2)
@test_util.run_deprecated_v1
def testShape(self):
with self.cached_session(use_gpu=True) as sess:
jpeg = constant_op.constant("nonsense")
for channels in 0, 1, 3:
image = image_ops.decode_jpeg(jpeg, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
@test_util.run_deprecated_v1
def testExtractJpegShape(self):
# Read a real jpeg and verify shape.
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1.jpg")
with self.cached_session(use_gpu=True) as sess:
jpeg = io_ops.read_file(path)
# Extract shape without decoding.
[image_shape] = sess.run([image_ops.extract_jpeg_shape(jpeg)])
self.assertEqual(image_shape.tolist(), [256, 128, 3])
@test_util.run_deprecated_v1
def testExtractJpegShapeforCmyk(self):
# Read a cmyk jpeg image, and verify its shape.
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1_cmyk.jpg")
with self.cached_session(use_gpu=True) as sess:
jpeg = io_ops.read_file(path)
[image_shape] = sess.run([image_ops.extract_jpeg_shape(jpeg)])
# Cmyk jpeg image has 4 channels.
self.assertEqual(image_shape.tolist(), [256, 128, 4])
def testRandomJpegQuality(self):
# Previous implementation of random_jpeg_quality had a bug.
# This unit test tests the fixed version, but due to forward compatibility
# this test can only be done when fixed version is used.
if compat.forward_compatible(2019, 4, 4):
# Test jpeg quality dynamic randomization.
with ops.Graph().as_default(), self.test_session():
np.random.seed(7)
path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg")
jpeg = io_ops.read_file(path)
image = image_ops.decode_jpeg(jpeg)
random_jpeg_image = image_ops.random_jpeg_quality(image, 40, 100)
with self.cached_session(use_gpu=True) as sess:
# Test randomization.
random_jpeg_images = [sess.run(random_jpeg_image) for _ in range(5)]
are_images_equal = []
for i in range(1, len(random_jpeg_images)):
# Most of them should be different if randomization is occurring
# correctly.
are_images_equal.append(
np.array_equal(random_jpeg_images[0], random_jpeg_images[i]))
self.assertFalse(all(are_images_equal))
def testAdjustJpegQuality(self):
# Test if image_ops.adjust_jpeg_quality works when jpeq quality
# is an int (not tensor) for backward compatibility.
with ops.Graph().as_default(), self.test_session():
np.random.seed(7)
jpeg_quality = np.random.randint(40, 100)
path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg")
jpeg = io_ops.read_file(path)
image = image_ops.decode_jpeg(jpeg)
adjust_jpeg_quality_image = image_ops.adjust_jpeg_quality(
image, jpeg_quality)
with self.cached_session(use_gpu=True) as sess:
sess.run(adjust_jpeg_quality_image)
@test_util.run_deprecated_v1
def testAdjustJpegQualityShape(self):
with self.cached_session(use_gpu=True):
image = constant_op.constant(
np.arange(24, dtype=np.uint8).reshape([2, 4, 3]))
adjusted_image = image_ops.adjust_jpeg_quality(image, 80)
self.assertListEqual(adjusted_image.shape.as_list(),
[None, None, 3])
class PngTest(test_util.TensorFlowTestCase):
def testExisting(self):
# Read some real PNGs, converting to different channel numbers
prefix = "tensorflow/core/lib/png/testdata/"
inputs = ((1, "lena_gray.png"), (4, "lena_rgba.png"),
(3, "lena_palette.png"), (4, "lena_palette_trns.png"))
for channels_in, filename in inputs:
for channels in 0, 1, 3, 4:
with self.cached_session(use_gpu=True) as sess:
png0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_png(png0, channels=channels)
png0, image0 = self.evaluate([png0, image0])
self.assertEqual(image0.shape, (26, 51, channels or channels_in))
if channels == channels_in:
image1 = image_ops.decode_png(image_ops.encode_png(image0))
self.assertAllEqual(image0, self.evaluate(image1))
def testSynthetic(self):
with self.cached_session(use_gpu=True) as sess:
# Encode it, then decode it
image0 = constant_op.constant(_SimpleColorRamp())
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = self.evaluate([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 400)
self.assertLessEqual(len(png0), 750)
def testSyntheticUint16(self):
with self.cached_session(use_gpu=True) as sess:
# Encode it, then decode it
image0 = constant_op.constant(_SimpleColorRamp(), dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = self.evaluate([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 800)
self.assertLessEqual(len(png0), 1500)
def testSyntheticTwoChannel(self):
with self.cached_session(use_gpu=True) as sess:
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = _SimpleColorRamp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = self.evaluate([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
def testSyntheticTwoChannelUint16(self):
with self.cached_session(use_gpu=True) as sess:
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = _SimpleColorRamp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha, dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = self.evaluate([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
@test_util.run_deprecated_v1
def testShape(self):
with self.cached_session(use_gpu=True):
png = constant_op.constant("nonsense")
for channels in 0, 1, 3:
image = image_ops.decode_png(png, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
class GifTest(test_util.TensorFlowTestCase):
def _testValid(self, filename):
# Read some real GIFs
prefix = "tensorflow/core/lib/gif/testdata/"
WIDTH = 20
HEIGHT = 40
STRIDE = 5
shape = (12, HEIGHT, WIDTH, 3)
with self.cached_session(use_gpu=True) as sess:
gif0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_gif(gif0)
gif0, image0 = self.evaluate([gif0, image0])
self.assertEqual(image0.shape, shape)
for frame_idx, frame in enumerate(image0):
gt = np.zeros(shape[1:], dtype=np.uint8)
start = frame_idx * STRIDE
end = (frame_idx + 1) * STRIDE
print(frame_idx)
if end <= WIDTH:
gt[:, start:end, :] = 255
else:
start -= WIDTH
end -= WIDTH
gt[start:end, :, :] = 255
self.assertAllClose(frame, gt)
def testValid(self):
self._testValid("scan.gif")
self._testValid("optimized.gif")
@test_util.run_deprecated_v1
def testShape(self):
with self.cached_session(use_gpu=True) as sess:
gif = constant_op.constant("nonsense")
image = image_ops.decode_gif(gif)
self.assertEqual(image.get_shape().as_list(), [None, None, None, 3])
class ConvertImageTest(test_util.TensorFlowTestCase):
def _convert(self, original, original_dtype, output_dtype, expected):
x_np = np.array(original, dtype=original_dtype.as_numpy_dtype())
y_np = np.array(expected, dtype=output_dtype.as_numpy_dtype())
with self.cached_session(use_gpu=True):
image = constant_op.constant(x_np)
y = image_ops.convert_image_dtype(image, output_dtype)
self.assertTrue(y.dtype == output_dtype)
self.assertAllClose(y.eval(), y_np, atol=1e-5)
if output_dtype in [
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64
]:
y_saturate = image_ops.convert_image_dtype(
image, output_dtype, saturate=True)
self.assertTrue(y_saturate.dtype == output_dtype)
self.assertAllClose(y_saturate.eval(), y_np, atol=1e-5)
@test_util.run_deprecated_v1
def testNoConvert(self):
# Make sure converting to the same data type creates only an identity op
with self.cached_session(use_gpu=True):
image = constant_op.constant([1], dtype=dtypes.uint8)
image_ops.convert_image_dtype(image, dtypes.uint8)
y = image_ops.convert_image_dtype(image, dtypes.uint8)
self.assertEquals(y.op.type, "Identity")
self.assertEquals(y.op.inputs[0], image)
@test_util.run_deprecated_v1
def testConvertBetweenInteger(self):
# Make sure converting to between integer types scales appropriately
with self.cached_session(use_gpu=True):
self._convert([0, 255], dtypes.uint8, dtypes.int16, [0, 255 * 128])
self._convert([0, 32767], dtypes.int16, dtypes.uint8, [0, 255])
self._convert([0, 2**32], dtypes.int64, dtypes.int32, [0, 1])
self._convert([0, 1], dtypes.int32, dtypes.int64, [0, 2**32])
@test_util.run_deprecated_v1
def testConvertBetweenFloat(self):
# Make sure converting to between float types does nothing interesting
with self.cached_session(use_gpu=True):
self._convert([-1.0, 0, 1.0, 200000], dtypes.float32, dtypes.float64,
[-1.0, 0, 1.0, 200000])
self._convert([-1.0, 0, 1.0, 200000], dtypes.float64, dtypes.float32,
[-1.0, 0, 1.0, 200000])
@test_util.run_deprecated_v1
def testConvertBetweenIntegerAndFloat(self):
# Make sure converting from and to a float type scales appropriately
with self.cached_session(use_gpu=True):
self._convert([0, 1, 255], dtypes.uint8, dtypes.float32,
[0, 1.0 / 255.0, 1])
self._convert([0, 1.1 / 255.0, 1], dtypes.float32, dtypes.uint8,
[0, 1, 255])
@test_util.run_deprecated_v1
def testConvertBetweenInt16AndInt8(self):
with self.cached_session(use_gpu=True):
# uint8, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.uint8, [0, 255])
self._convert([0, 255], dtypes.uint8, dtypes.uint16, [0, 255 * 256])
# int8, uint16
self._convert([0, 127 * 2 * 256], dtypes.uint16, dtypes.int8, [0, 127])
self._convert([0, 127], dtypes.int8, dtypes.uint16, [0, 127 * 2 * 256])
# int16, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.int16, [0, 255 * 128])
self._convert([0, 255 * 128], dtypes.int16, dtypes.uint16, [0, 255 * 256])
class TotalVariationTest(test_util.TensorFlowTestCase):
"""Tests the function total_variation() in image_ops.
We test a few small handmade examples, as well as
some larger examples using an equivalent numpy
implementation of the total_variation() function.
We do NOT test for overflows and invalid / edge-case arguments.
"""
def _test(self, x_np, y_np):
"""Test that the TensorFlow implementation of
total_variation(x_np) calculates the values in y_np.
Note that these may be float-numbers so we only test
for approximate equality within some narrow error-bound.
"""
# Create a TensorFlow session.
with self.cached_session(use_gpu=True):
# Add a constant to the TensorFlow graph that holds the input.
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# Add ops for calculating the total variation using TensorFlow.
y = image_ops.total_variation(images=x_tf)
# Run the TensorFlow session to calculate the result.
y_tf = self.evaluate(y)
# Assert that the results are as expected within
# some small error-bound in case they are float-values.
self.assertAllClose(y_tf, y_np)
def _total_variation_np(self, x_np):
"""Calculate the total variation of x_np using numpy.
This implements the same function as TensorFlow but
using numpy instead.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
dim = len(x_np.shape)
if dim == 3:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[1:, :, :] - x_np[:-1, :, :]
dif2 = x_np[:, 1:, :] - x_np[:, :-1, :]
# Sum for all axis.
sum_axis = None
elif dim == 4:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[:, 1:, :, :] - x_np[:, :-1, :, :]
dif2 = x_np[:, :, 1:, :] - x_np[:, :, :-1, :]
# Only sum for the last 3 axis.
sum_axis = (1, 2, 3)
else:
# This should not occur in this test-code.
pass
tot_var = np.sum(np.abs(dif1), axis=sum_axis) + \
np.sum(np.abs(dif2), axis=sum_axis)
return tot_var
def _test_tensorflow_vs_numpy(self, x_np):
"""Test the TensorFlow implementation against a numpy implementation.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
# Calculate the y-values using the numpy implementation.
y_np = self._total_variation_np(x_np)
self._test(x_np, y_np)
def _generateArray(self, shape):
"""Generate an array of the given shape for use in testing.
The numbers are calculated as the cumulative sum, which
causes the difference between neighboring numbers to vary."""
# Flattened length of the array.
flat_len = np.prod(shape)
a = np.array(range(flat_len), dtype=int)
a = np.cumsum(a)
a = a.reshape(shape)
return a
# TODO(b/133851381): re-enable this test.
def disabledtestTotalVariationNumpy(self):
"""Test the TensorFlow implementation against a numpy implementation.
The two implementations are very similar so it is possible that both
have the same bug, which would not be detected by this test. It is
therefore necessary to test with manually crafted data as well."""
# Generate a test-array.
# This is an 'image' with 100x80 pixels and 3 color channels.
a = self._generateArray(shape=(100, 80, 3))
# Test the TensorFlow implementation vs. numpy implementation.
# We use a numpy implementation to check the results that are
# calculated using TensorFlow are correct.
self._test_tensorflow_vs_numpy(a)
self._test_tensorflow_vs_numpy(a + 1)
self._test_tensorflow_vs_numpy(-a)
self._test_tensorflow_vs_numpy(1.1 * a)
# Expand to a 4-dim array.
b = a[np.newaxis, :]
# Combine several variations of the image into a single 4-dim array.
multi = np.vstack((b, b + 1, -b, 1.1 * b))
# Test that the TensorFlow function can also handle 4-dim arrays.
self._test_tensorflow_vs_numpy(multi)
def testTotalVariationHandmade(self):
"""Test the total variation for a few handmade examples."""
# We create an image that is 2x2 pixels with 3 color channels.
# The image is very small so we can check the result by hand.
# Red color channel.
# The following are the sum of absolute differences between the pixels.
# sum row dif = (4-1) + (7-2) = 3 + 5 = 8
# sum col dif = (2-1) + (7-4) = 1 + 3 = 4
r = [[1, 2], [4, 7]]
# Blue color channel.
# sum row dif = 18 + 29 = 47
# sum col dif = 7 + 18 = 25
g = [[11, 18], [29, 47]]
# Green color channel.
# sum row dif = 120 + 193 = 313
# sum col dif = 47 + 120 = 167
b = [[73, 120], [193, 313]]
# Combine the 3 color channels into a single 3-dim array.
# The shape is (2, 2, 3) corresponding to (height, width and color).
a = np.dstack((r, g, b))
# Total variation for this image.
# Sum of all pixel differences = 8 + 4 + 47 + 25 + 313 + 167 = 564
tot_var = 564
# Calculate the total variation using TensorFlow and assert it is correct.
self._test(a, tot_var)
# If we add 1 to all pixel-values then the total variation is unchanged.
self._test(a + 1, tot_var)
# If we negate all pixel-values then the total variation is unchanged.
self._test(-a, tot_var)
# Scale the pixel-values by a float. This scales the total variation as
# well.
b = 1.1 * a
self._test(b, 1.1 * tot_var)
# Scale by another float.
c = 1.2 * a
self._test(c, 1.2 * tot_var)
# Combine these 3 images into a single array of shape (3, 2, 2, 3)
# where the first dimension is for the image-number.
multi = np.vstack((a[np.newaxis, :], b[np.newaxis, :], c[np.newaxis, :]))
# Check that TensorFlow correctly calculates the total variation
# for each image individually and returns the correct array.
self._test(multi, tot_var * np.array([1.0, 1.1, 1.2]))
class FormatTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testFormats(self):
prefix = "tensorflow/core/lib"
paths = ("png/testdata/lena_gray.png", "jpeg/testdata/jpeg_merge_test1.jpg",
"gif/testdata/lena.gif")
decoders = {
"jpeg": functools.partial(image_ops.decode_jpeg, channels=3),
"png": functools.partial(image_ops.decode_png, channels=3),
"gif": lambda s: array_ops.squeeze(image_ops.decode_gif(s), axis=0),
}
with self.cached_session():
for path in paths:
contents = io_ops.read_file(os.path.join(prefix, path)).eval()
images = {}
for name, decode in decoders.items():
image = decode(contents).eval()
self.assertEqual(image.ndim, 3)
for prev_name, prev in images.items():
print("path %s, names %s %s, shapes %s %s" %
(path, name, prev_name, image.shape, prev.shape))
self.assertAllEqual(image, prev)
images[name] = image
def testError(self):
path = "tensorflow/core/lib/gif/testdata/scan.gif"
with self.cached_session():
for decode in image_ops.decode_jpeg, image_ops.decode_png:
with self.assertRaisesOpError(r"Got 12 frames"):
decode(io_ops.read_file(path)).eval()
class NonMaxSuppressionTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def NonMaxSuppressionTest(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
with self.cached_session():
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices = image_ops.non_max_suppression(
boxes, scores, max_output_size, iou_threshold)
self.assertAllClose(selected_indices.eval(), [3, 0, 5])
@test_util.run_deprecated_v1
def testInvalidShape(self):
# The boxes should be 2D of shape [num_boxes, 4].
with self.assertRaisesRegexp(ValueError,
"Shape must be rank 2 but is rank 1"):
boxes = constant_op.constant([0.0, 0.0, 1.0, 1.0])
scores = constant_op.constant([0.9])
image_ops.non_max_suppression(boxes, scores, 3, 0.5)
with self.assertRaisesRegexp(ValueError, "Dimension must be 4 but is 3"):
boxes = constant_op.constant([[0.0, 0.0, 1.0]])
scores = constant_op.constant([0.9])
image_ops.non_max_suppression(boxes, scores, 3, 0.5)
# The boxes is of shape [num_boxes, 4], and the scores is
# of shape [num_boxes]. So an error will be thrown.
with self.assertRaisesRegexp(ValueError,
"Dimensions must be equal, but are 1 and 2"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9, 0.75])
image_ops.non_max_suppression(boxes, scores, 3, 0.5)
# The scores should be 1D of shape [num_boxes].
with self.assertRaisesRegexp(ValueError,
"Shape must be rank 1 but is rank 2"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([[0.9]])
image_ops.non_max_suppression(boxes, scores, 3, 0.5)
# The max_output_size should be a scalar (0-D).
with self.assertRaisesRegexp(ValueError,
"Shape must be rank 0 but is rank 1"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9])
image_ops.non_max_suppression(boxes, scores, [3], 0.5)
# The iou_threshold should be a scalar (0-D).
with self.assertRaisesRegexp(ValueError,
"Shape must be rank 0 but is rank 2"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9])
image_ops.non_max_suppression(boxes, scores, 3, [[0.5]])
@test_util.run_deprecated_v1
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testDataTypes(self):
# Test case for GitHub issue 20199.
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
score_threshold_np = float("-inf")
# Note: There are multiple versions of non_max_suppression v2, v3, v4.
# gen_image_ops.non_max_suppression_v2:
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)
selected_indices = gen_image_ops.non_max_suppression_v2(
boxes, scores, max_output_size, iou_threshold).eval()
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v3
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)
score_threshold = constant_op.constant(score_threshold_np, dtype=dtype)
selected_indices = gen_image_ops.non_max_suppression_v3(
boxes, scores, max_output_size, iou_threshold, score_threshold)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v4.
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)
score_threshold = constant_op.constant(score_threshold_np, dtype=dtype)
selected_indices, _ = gen_image_ops.non_max_suppression_v4(
boxes, scores, max_output_size, iou_threshold, score_threshold)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v5.
soft_nms_sigma_np = float(0.0)
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)
score_threshold = constant_op.constant(score_threshold_np, dtype=dtype)
soft_nms_sigma = constant_op.constant(soft_nms_sigma_np, dtype=dtype)
selected_indices, _, _ = gen_image_ops.non_max_suppression_v5(
boxes, scores, max_output_size, iou_threshold, score_threshold,
soft_nms_sigma)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
class NonMaxSuppressionWithScoresTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromThreeClustersWithSoftNMS(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 6
iou_threshold_np = 1.0
score_threshold_np = 0.0
soft_nms_sigma_np = 0.5
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
score_threshold = constant_op.constant(score_threshold_np)
soft_nms_sigma = constant_op.constant(soft_nms_sigma_np)
selected_indices, selected_scores = \
image_ops.non_max_suppression_with_scores(
boxes,
scores,
max_output_size,
iou_threshold,
score_threshold,
soft_nms_sigma)
selected_indices, selected_scores = self.evaluate(
[selected_indices, selected_scores])
self.assertAllClose(selected_indices, [3, 0, 1, 5, 4, 2])
self.assertAllClose(selected_scores,
[0.95, 0.9, 0.384, 0.3, 0.256, 0.197],
rtol=1e-2, atol=1e-2)
class NonMaxSuppressionPaddedTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
@test_util.disable_xla(
"b/141236442: "
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromThreeClusters(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 5
iou_threshold_np = 0.5
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices_padded, num_valid_padded = \
image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=True)
selected_indices, num_valid = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=False)
# The output shape of the padded operation must be fully defined.
self.assertEqual(selected_indices_padded.shape.is_fully_defined(), True)
self.assertEqual(selected_indices.shape.is_fully_defined(), False)
with self.cached_session():
self.assertAllClose(selected_indices_padded.eval(), [3, 0, 5, 0, 0])
self.assertEqual(num_valid_padded.eval(), 3)
self.assertAllClose(selected_indices.eval(), [3, 0, 5])
self.assertEqual(num_valid.eval(), 3)
@test_util.run_deprecated_v1
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromContinuousOverLap(self):
boxes_np = [[0, 0, 1, 1], [0, 0.2, 1, 1.2], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]]
scores_np = [0.9, 0.75, 0.6, 0.5, 0.4, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
score_threshold_np = 0.1
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
score_threshold = constant_op.constant(score_threshold_np)
selected_indices, num_valid = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
score_threshold)
# The output shape of the padded operation must be fully defined.
self.assertEqual(selected_indices.shape.is_fully_defined(), False)
with self.cached_session():
self.assertAllClose(selected_indices.eval(), [0, 2, 4])
self.assertEqual(num_valid.eval(), 3)
class NonMaxSuppressionWithOverlapsTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testSelectOneFromThree(self):
overlaps_np = [
[1.0, 0.7, 0.2],
[0.7, 1.0, 0.0],
[0.2, 0.0, 1.0],
]
scores_np = [0.7, 0.9, 0.1]
max_output_size_np = 3
overlaps = constant_op.constant(overlaps_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
overlap_threshold = 0.6
score_threshold = 0.4
selected_indices = image_ops.non_max_suppression_with_overlaps(
overlaps, scores, max_output_size, overlap_threshold, score_threshold)
with self.cached_session():
self.assertAllClose(selected_indices.eval(), [1])
class VerifyCompatibleImageShapesTest(test_util.TensorFlowTestCase):
"""Tests utility function used by ssim() and psnr()."""
@test_util.run_deprecated_v1
def testWrongDims(self):
img = array_ops.placeholder(dtype=dtypes.float32)
img_np = np.array((2, 2))
with self.cached_session(use_gpu=True) as sess:
_, _, checks = image_ops_impl._verify_compatible_image_shapes(img, img)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(checks, {img: img_np})
@test_util.run_deprecated_v1
def testShapeMismatch(self):
img1 = array_ops.placeholder(dtype=dtypes.float32)
img2 = array_ops.placeholder(dtype=dtypes.float32)
img1_np = np.array([1, 2, 2, 1])
img2_np = np.array([1, 3, 3, 1])
with self.cached_session(use_gpu=True) as sess:
_, _, checks = image_ops_impl._verify_compatible_image_shapes(img1, img2)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(checks, {img1: img1_np, img2: img2_np})
class PSNRTest(test_util.TensorFlowTestCase):
"""Tests for PSNR."""
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/psnr/testdata", filename))
im = image_ops.decode_jpeg(content, dct_method="INTEGER_ACCURATE")
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session(use_gpu=True) as sess:
q20 = self._LoadTestImage(sess, "cat_q20.jpg")
q72 = self._LoadTestImage(sess, "cat_q72.jpg")
q95 = self._LoadTestImage(sess, "cat_q95.jpg")
return q20, q72, q95
def _PSNR_NumPy(self, orig, target, max_value):
"""Numpy implementation of PSNR."""
mse = ((orig - target) ** 2).mean(axis=(-3, -2, -1))
return 20 * np.log10(max_value) - 10 * np.log10(mse)
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
@test_util.run_deprecated_v1
def testPSNRSingleImage(self):
image1 = self._RandomImage((8, 8, 1), 1)
image2 = self._RandomImage((8, 8, 1), 1)
psnr = self._PSNR_NumPy(image1, image2, 1)
with self.cached_session(use_gpu=True):
tf_image1 = constant_op.constant(image1, shape=image1.shape,
dtype=dtypes.float32)
tf_image2 = constant_op.constant(image2, shape=image2.shape,
dtype=dtypes.float32)
tf_psnr = image_ops.psnr(tf_image1, tf_image2, 1.0, "psnr").eval()
self.assertAllClose(psnr, tf_psnr, atol=0.001)
@test_util.run_deprecated_v1
def testPSNRMultiImage(self):
image1 = self._RandomImage((10, 8, 8, 1), 1)
image2 = self._RandomImage((10, 8, 8, 1), 1)
psnr = self._PSNR_NumPy(image1, image2, 1)
with self.cached_session(use_gpu=True):
tf_image1 = constant_op.constant(image1, shape=image1.shape,
dtype=dtypes.float32)
tf_image2 = constant_op.constant(image2, shape=image2.shape,
dtype=dtypes.float32)
tf_psnr = image_ops.psnr(tf_image1, tf_image2, 1, "psnr").eval()
self.assertAllClose(psnr, tf_psnr, atol=0.001)
@test_util.run_deprecated_v1
def testGoldenPSNR(self):
q20, q72, q95 = self._LoadTestImages()
# Verify NumPy implementation first.
# Golden values are generated using GNU Octave's psnr() function.
psnr1 = self._PSNR_NumPy(q20, q72, 1)
self.assertNear(30.321, psnr1, 0.001, msg="q20.dtype=" + str(q20.dtype))
psnr2 = self._PSNR_NumPy(q20, q95, 1)
self.assertNear(29.994, psnr2, 0.001)
psnr3 = self._PSNR_NumPy(q72, q95, 1)
self.assertNear(35.302, psnr3, 0.001)
# Test TensorFlow implementation.
with self.cached_session(use_gpu=True):
tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32)
tf_q72 = constant_op.constant(q72, shape=q72.shape, dtype=dtypes.float32)
tf_q95 = constant_op.constant(q95, shape=q95.shape, dtype=dtypes.float32)
tf_psnr1 = image_ops.psnr(tf_q20, tf_q72, 1, "psnr1").eval()
tf_psnr2 = image_ops.psnr(tf_q20, tf_q95, 1, "psnr2").eval()
tf_psnr3 = image_ops.psnr(tf_q72, tf_q95, 1, "psnr3").eval()
self.assertAllClose(psnr1, tf_psnr1, atol=0.001)
self.assertAllClose(psnr2, tf_psnr2, atol=0.001)
self.assertAllClose(psnr3, tf_psnr3, atol=0.001)
@test_util.run_deprecated_v1
def testInfinity(self):
q20, _, _ = self._LoadTestImages()
psnr = self._PSNR_NumPy(q20, q20, 1)
with self.cached_session(use_gpu=True):
tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32)
tf_psnr = image_ops.psnr(tf_q20, tf_q20, 1, "psnr").eval()
self.assertAllClose(psnr, tf_psnr, atol=0.001)
@test_util.run_deprecated_v1
def testInt(self):
img1 = self._RandomImage((10, 8, 8, 1), 255)
img2 = self._RandomImage((10, 8, 8, 1), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
psnr_uint8 = image_ops.psnr(img1, img2, 255)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
psnr_float32 = image_ops.psnr(img1, img2, 1.0)
with self.cached_session(use_gpu=True):
self.assertAllClose(
psnr_uint8.eval(), self.evaluate(psnr_float32), atol=0.001)
class SSIMTest(test_util.TensorFlowTestCase):
"""Tests for SSIM."""
_filenames = ["checkerboard1.png",
"checkerboard2.png",
"checkerboard3.png",]
_ssim = np.asarray([[1.000000, 0.230880, 0.231153],
[0.230880, 1.000000, 0.996828],
[0.231153, 0.996828, 1.000000]])
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/ssim/testdata", filename))
im = image_ops.decode_png(content)
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session(use_gpu=True) as sess:
return [self._LoadTestImage(sess, f) for f in self._filenames]
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
@test_util.run_deprecated_v1
def testAgainstMatlab(self):
"""Tests against values produced by Matlab."""
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3)]
ph = [array_ops.placeholder(dtype=dtypes.float32) for _ in range(2)]
ssim = image_ops.ssim(
*ph, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session(use_gpu=True):
scores = [ssim.eval(dict(zip(ph, t)))
for t in itertools.combinations_with_replacement(img, 2)]
self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)
def testBatch(self):
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
ssim = image_ops.ssim(
constant_op.constant(img1),
constant_op.constant(img2),
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session(use_gpu=True):
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
def testBroadcast(self):
img = self._LoadTestImages()[:2]
expected = self._ssim[:2, :2]
img = constant_op.constant(np.concatenate(img))
img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.
img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.
ssim = image_ops.ssim(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session(use_gpu=True):
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
@test_util.run_deprecated_v1
def testNegative(self):
"""Tests against negative SSIM index."""
step = np.expand_dims(np.arange(0, 256, 16, dtype=np.uint8), axis=0)
img1 = np.tile(step, (16, 1))
img2 = np.fliplr(img1)
img1 = img1.reshape((1, 16, 16, 1))
img2 = img2.reshape((1, 16, 16, 1))
ssim = image_ops.ssim(
constant_op.constant(img1),
constant_op.constant(img2),
255,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session(use_gpu=True):
self.assertLess(ssim.eval(), 0)
@test_util.run_deprecated_v1
def testInt(self):
img1 = self._RandomImage((1, 16, 16, 3), 255)
img2 = self._RandomImage((1, 16, 16, 3), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
ssim_uint8 = image_ops.ssim(
img1, img2, 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
ssim_float32 = image_ops.ssim(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session(use_gpu=True):
self.assertAllClose(
ssim_uint8.eval(), self.evaluate(ssim_float32), atol=0.001)
class MultiscaleSSIMTest(test_util.TensorFlowTestCase):
"""Tests for MS-SSIM."""
_filenames = ["checkerboard1.png",
"checkerboard2.png",
"checkerboard3.png",]
_msssim = np.asarray([[1.000000, 0.091016, 0.091025],
[0.091016, 1.000000, 0.999567],
[0.091025, 0.999567, 1.000000]])
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/ssim/testdata", filename))
im = image_ops.decode_png(content)
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session(use_gpu=True) as sess:
return [self._LoadTestImage(sess, f) for f in self._filenames]
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
@test_util.run_deprecated_v1
def testAgainstMatlab(self):
"""Tests against MS-SSIM computed with Matlab implementation.
For color images, MS-SSIM scores are averaged over color channels.
"""
img = self._LoadTestImages()
expected = self._msssim[np.triu_indices(3)]
ph = [array_ops.placeholder(dtype=dtypes.float32) for _ in range(2)]
msssim = image_ops.ssim_multiscale(
*ph, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session(use_gpu=True):
scores = [msssim.eval(dict(zip(ph, t)))
for t in itertools.combinations_with_replacement(img, 2)]
self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)
@test_util.run_deprecated_v1
def testUnweightedIsDifferentiable(self):
img = self._LoadTestImages()
ph = [array_ops.placeholder(dtype=dtypes.float32) for _ in range(2)]
scalar = constant_op.constant(1.0, dtype=dtypes.float32)
scaled_ph = [x * scalar for x in ph]
msssim = image_ops.ssim_multiscale(
*scaled_ph,
max_val=1.0,
power_factors=(1, 1, 1, 1, 1),
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
grads = gradients.gradients(msssim, scalar)
with self.cached_session(use_gpu=True) as sess:
np_grads = sess.run(grads, feed_dict={ph[0]: img[0], ph[1]: img[1]})
self.assertTrue(np.isfinite(np_grads).all())
def testBatch(self):
"""Tests MS-SSIM computed in batch."""
img = self._LoadTestImages()
expected = self._msssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
msssim = image_ops.ssim_multiscale(
constant_op.constant(img1),
constant_op.constant(img2),
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session(use_gpu=True):
self.assertAllClose(expected, self.evaluate(msssim), 1e-4)
def testBroadcast(self):
"""Tests MS-SSIM broadcasting."""
img = self._LoadTestImages()[:2]
expected = self._msssim[:2, :2]
img = constant_op.constant(np.concatenate(img))
img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.
img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.
score_tensor = image_ops.ssim_multiscale(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session(use_gpu=True):
self.assertAllClose(expected, self.evaluate(score_tensor), 1e-4)
def testRange(self):
"""Tests against low MS-SSIM score.
MS-SSIM is a geometric mean of SSIM and CS scores of various scales.
If any of the value is negative so that the geometric mean is not
well-defined, then treat the MS-SSIM score as zero.
"""
with self.cached_session(use_gpu=True) as sess:
img1 = self._LoadTestImage(sess, "checkerboard1.png")
img2 = self._LoadTestImage(sess, "checkerboard3.png")
images = [img1, img2, np.zeros_like(img1),
np.full_like(img1, fill_value=255)]
images = [ops.convert_to_tensor(x, dtype=dtypes.float32) for x in images]
msssim_ops = [
image_ops.ssim_multiscale(
x, y, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
for x, y in itertools.combinations(images, 2)
]
msssim = self.evaluate(msssim_ops)
msssim = np.squeeze(msssim)
self.assertTrue(np.all(msssim >= 0.0))
self.assertTrue(np.all(msssim <= 1.0))
@test_util.run_deprecated_v1
def testInt(self):
img1 = self._RandomImage((1, 180, 240, 3), 255)
img2 = self._RandomImage((1, 180, 240, 3), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
ssim_uint8 = image_ops.ssim_multiscale(
img1, img2, 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
ssim_float32 = image_ops.ssim_multiscale(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session(use_gpu=True):
self.assertAllClose(
ssim_uint8.eval(), self.evaluate(ssim_float32), atol=0.001)
def testNumpyInput(self):
"""Test case for GitHub issue 28241."""
image = np.random.random([512, 512, 1])
score_tensor = image_ops.ssim_multiscale(image, image, max_val=1.0)
with self.cached_session(use_gpu=True):
_ = self.evaluate(score_tensor)
class ImageGradientsTest(test_util.TensorFlowTestCase):
def testImageGradients(self):
shape = [1, 2, 4, 1]
img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])
img = array_ops.reshape(img, shape)
expected_dy = np.reshape([[7, 4, 1, 4], [0, 0, 0, 0]], shape)
expected_dx = np.reshape([[2, 1, -2, 0], [-1, -2, 1, 0]], shape)
dy, dx = image_ops.image_gradients(img)
with self.cached_session():
actual_dy = self.evaluate(dy)
actual_dx = self.evaluate(dx)
self.assertAllClose(expected_dy, actual_dy)
self.assertAllClose(expected_dx, actual_dx)
def testImageGradientsMultiChannelBatch(self):
batch = [[[[1, 2], [2, 5], [3, 3]],
[[8, 4], [5, 1], [9, 8]]],
[[[5, 3], [7, 9], [1, 6]],
[[1, 2], [6, 3], [6, 3]]]]
expected_dy = [[[[7, 2], [3, -4], [6, 5]],
[[0, 0], [0, 0], [0, 0]]],
[[[-4, -1], [-1, -6], [5, -3]],
[[0, 0], [0, 0], [0, 0]]]]
expected_dx = [[[[1, 3], [1, -2], [0, 0]],
[[-3, -3], [4, 7], [0, 0]]],
[[[2, 6], [-6, -3], [0, 0]],
[[5, 1], [0, 0], [0, 0]]]]
batch = constant_op.constant(batch)
assert batch.get_shape().as_list() == [2, 2, 3, 2]
dy, dx = image_ops.image_gradients(batch)
with self.cached_session(use_gpu=True):
actual_dy = self.evaluate(dy)
actual_dx = self.evaluate(dx)
self.assertAllClose(expected_dy, actual_dy)
self.assertAllClose(expected_dx, actual_dx)
def testImageGradientsBadShape(self):
# [2 x 4] image but missing batch and depth dimensions.
img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])
with self.assertRaises(ValueError):
image_ops.image_gradients(img)
class SobelEdgesTest(test_util.TensorFlowTestCase):
def disabled_testSobelEdges1x2x3x1(self):
img = constant_op.constant([[1, 3, 6], [4, 1, 5]],
dtype=dtypes.float32, shape=[1, 2, 3, 1])
expected = np.reshape([[[0, 0], [0, 12], [0, 0]],
[[0, 0], [0, 12], [0, 0]]], [1, 2, 3, 1, 2])
sobel = image_ops.sobel_edges(img)
with self.cached_session(use_gpu=True):
actual_sobel = self.evaluate(sobel)
self.assertAllClose(expected, actual_sobel)
def testSobelEdges5x3x4x2(self):
batch_size = 5
plane = np.reshape([[1, 3, 6, 2], [4, 1, 5, 7], [2, 5, 1, 4]],
[1, 3, 4, 1])
two_channel = np.concatenate([plane, plane], axis=3)
batch = np.concatenate([two_channel] * batch_size, axis=0)
img = constant_op.constant(batch, dtype=dtypes.float32,
shape=[batch_size, 3, 4, 2])
expected_plane = np.reshape([[[0, 0], [0, 12], [0, 10], [0, 0]],
[[6, 0], [0, 6], [-6, 10], [-6, 0]],
[[0, 0], [0, 0], [0, 10], [0, 0]]],
[1, 3, 4, 1, 2])
expected_two_channel = np.concatenate(
[expected_plane, expected_plane], axis=3)
expected_batch = np.concatenate([expected_two_channel] * batch_size, axis=0)
sobel = image_ops.sobel_edges(img)
with self.cached_session(use_gpu=True):
actual_sobel = self.evaluate(sobel)
self.assertAllClose(expected_batch, actual_sobel)
class DecodeImageTest(test_util.TensorFlowTestCase):
def testJpegUint16(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
image0 = image_ops.decode_image(jpeg0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testPngUint16(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/png/testdata"
png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png"))
image0 = image_ops.decode_image(png0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(
image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testGifUint16(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(gif0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testBmpUint16(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp"))
image0 = image_ops.decode_image(bmp0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testJpegFloat32(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
image0 = image_ops.decode_image(jpeg0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testPngFloat32(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/png/testdata"
png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png"))
image0 = image_ops.decode_image(png0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(
image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testGifFloat32(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(gif0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testBmpFloat32(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp"))
image0 = image_ops.decode_image(bmp0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testExpandAnimations(self):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(
gif0, dtype=dtypes.float32, expand_animations=False)
# image_ops.decode_png() handles GIFs and returns 3D tensors
animation = image_ops.decode_gif(gif0)
first_frame = array_ops.gather(animation, 0)
image1 = image_ops.convert_image_dtype(first_frame, dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertEqual(len(image0.shape), 3)
self.assertAllEqual(list(image0.shape), [40, 20, 3])
self.assertAllEqual(image0, image1)
if __name__ == "__main__":
googletest.main()
|
DavidNorman/tensorflow
|
tensorflow/python/ops/image_ops_test.py
|
Python
|
apache-2.0
| 203,147
|
[
"Gaussian"
] |
b929f99f53d0c27c85b10d3730ea0d33f8f14cd2c2f8ae51248a0eb80ddce4e2
|
# TO-DO: to be moved to tests directory
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import cmd
import sys
import os.path
from DIRAC.DataManagementSystem.Client.CmdDirCompletion.AbstractFileSystem import UnixLikeFileSystem
from DIRAC.DataManagementSystem.Client.CmdDirCompletion.DirectoryCompletion import DirectoryCompletion
class DirCompletion(cmd.Cmd):
ulfs = UnixLikeFileSystem()
dc = DirectoryCompletion(ulfs)
def do_exit(self, args):
sys.exit(0)
def _listdir(self, args):
if os.path.isdir(args):
return os.listdir(args)
else:
return [args]
def _ls(self, args):
try:
return self._listdir(args)
except:
return []
def do_ls(self, args):
print()
print(" ".join(self._ls(args)))
def complete_ls(self, text, line, begidx, endidx):
#print
result = []
cur_input_line = line.split()
cur_path = "."
if (len(cur_input_line) == 2):
cur_path = cur_input_line[1]
#print "cur_path:", cur_path
result = self.dc.parse_text_line(text, cur_path, os.getcwd() )
return result
if __name__ == "__main__":
cli = DirCompletion()
cli.cmdloop()
|
yujikato/DIRAC
|
src/DIRAC/DataManagementSystem/Client/test/new_dir_completion.py
|
Python
|
gpl-3.0
| 1,215
|
[
"DIRAC"
] |
e80fd082b1e3287b1502c647e8cc771802b6b51d7406894601ea3036263d0061
|
# -*- encoding: utf-8 -*-
import sys
import os.path
import logging
def precheck( configs ):
"""
checks configs for problems
return value specifies number of errors found
Checks:
- if the full path to the executables have not been provided are these the right ones?
- if bowtie/bowtie are the indexes bowtie/bowtie2?
- is the FastQC file transfer host accessible?
- are all paths working?
- have mapping and quantification resources per sample (e.g. SRR*****=1,2) been specified and are they valid?
- look out for qupto option for bowtie
- SPOT_TYPE=single|paired|mixed
- if SPOT_TYPE=single then PAIRED_SAMPLES should be blank
- if SPOT_TYPE=paired then PAIRED_SAMPLES can be blank => all samples are paired
- if SPOT_TYPE=mixed then samples excluded in PAIRED_SAMPLES are single
- check the MAPPING_TYPE variable [non-spliced|spliced] only
Ask the user if s/he would like to proceed given the warnings shown
"""
config_status = dict()
logging.info( "Running pre-checks..." )
overall_status = 0
for key,value in config_status.iteritems():
if value != 0:
overall_status += value
return overall_status
|
polarise/breeze
|
breeze/precheck.py
|
Python
|
gpl-2.0
| 1,136
|
[
"Bowtie"
] |
9349bc6f613856cd15f703c3837da4c89557aeb50a40f36be32d0d8e0d92011c
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup
# Version number
version = '0.0.1'
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name = 'GPSVI',
version = version,
author = 'Ziang Zhu',
author_email = 'zhu.ziang.1990@gmail.com',
description = ('Gaussian Process with Stochastic Variational Inference'),
license = 'MIT',
keywords = 'machine-learning gaussian-processes stochastic variational inference',
url = 'https://github.com/AlchemicalChest/Gaussian-Process-with-Stochastic-Variational-Inference',
packages = ['GPSVI.core',
'GPSVI.util',
'GPSVI.test'],
package_dir={'GPSVI': 'GPSVI'},
include_package_data = True,
py_modules = ['GPSVI.__init__'],
long_description=read('README.md'),
install_requires=['numpy>=1.7', 'scipy>=0.12'],
extras_require = {'docs':['matplotlib >=1.3','Sphinx','IPython']}
)
|
AlchemicalChest/Gaussian-Process-with-Stochastic-Variational-Inference
|
setup.py
|
Python
|
mit
| 1,017
|
[
"Gaussian"
] |
6261ad2951904d5a51912700284ed84c1e4f91f6f8df446c5a7f0eb67f1ef0b1
|
"""This demo program solves Poisson's equation
- div C grad u(x, y) = f(x, y)
on the unit square with source f given by
f(x, y) = 10*exp(-((x - 0.5)^2 + (y - 0.5)^2) / 0.02)
and boundary conditions given by
u(x, y) = 0 for x = 0 or x = 1
du/dn(x, y) = 0 for y = 0 or y = 1
The conductivity C is a symmetric 2 x 2 matrix which
varies throughout the domain. In the left part of the
domain, the conductivity is
C = ((1, 0.3), (0.3, 2))
and in the right part it is
C = ((3, 0.5), (0.5, 4))
The data files where these values are stored are generated
by the program generate_data.py
This demo is dedicated to BF and Marius... ;-)
"""
# Copyright (C) 2009-2011 Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2009-12-16
# Last changed: 2011-06-28
# Begin demo
from dolfin import *
# Read mesh from file and create function space
mesh = Mesh("mesh.xml.gz")
V = FunctionSpace(mesh, "Lagrange", 1)
# Define Dirichlet boundary (x = 0 or x = 1)
def boundary(x):
return x[0] < DOLFIN_EPS or x[0] > 1.0 - DOLFIN_EPS
# Define boundary condition
u0 = Constant(0.0)
bc = DirichletBC(V, u0, boundary)
# Code for C++ evaluation of conductivity
conductivity_code = """
class Conductivity : public Expression
{
public:
// Create expression with 3 components
Conductivity() : Expression(3) {}
// Function for evaluating expression on each cell
void eval(Array<double>& values, const Array<double>& x, const ufc::cell& cell) const
{
const uint D = cell.topological_dimension;
const uint cell_index = cell.index;
values[0] = (*c00)[cell_index];
values[1] = (*c01)[cell_index];
values[2] = (*c11)[cell_index];
}
// The data stored in mesh functions
std::shared_ptr<MeshFunction<double> > c00;
std::shared_ptr<MeshFunction<double> > c01;
std::shared_ptr<MeshFunction<double> > c11;
};
"""
# Define conductivity expression and matrix
c00 = MeshFunction("double", mesh, "c00.xml.gz")
c01 = MeshFunction("double", mesh, "c01.xml.gz")
c11 = MeshFunction("double", mesh, "c11.xml.gz")
c = Expression(cppcode=conductivity_code)
c.c00 = c00
c.c01 = c01
c.c11 = c11
C = as_matrix(((c[0], c[1]), (c[1], c[2])))
# Define variational problem
u = TrialFunction(V)
v = TestFunction(V)
f = Expression("10*exp(-(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2)) / 0.02)")
a = inner(C*grad(u), grad(v))*dx
L = f*v*dx
# Compute solution
u = Function(V)
solve(a == L, u, bc)
# Save solution in VTK format
file = File("poisson.pvd")
file << u
# Plot solution
plot(u, interactive=True)
|
akshmakov/Dolfin-Fijee-Fork
|
demo/documented/tensor-weighted-poisson/python/demo_tensorweighted-poisson.py
|
Python
|
lgpl-3.0
| 3,177
|
[
"VTK"
] |
261f24153cca54e09c436d52fd31f2f49f77c5e5a8b8245a087e73903934a761
|
# Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for checkpointer."""
import os
from absl.testing import parameterized
from lingvo import model_registry
import lingvo.compat as tf
from lingvo.core import base_input_generator
from lingvo.core import base_model
from lingvo.core import base_model_params
from lingvo.core import checkpointer
from lingvo.core import cluster_factory
from lingvo.core import learner
from lingvo.core import optimizer
from lingvo.core import py_utils
from lingvo.core import test_utils
FLAGS = tf.flags.FLAGS
def _GetCheckpointKeys(save_path):
reader = tf.train.load_checkpoint(save_path)
shapes = reader.get_variable_to_shape_map()
return set(shapes.keys())
class SimpleInputGenerator(base_input_generator.BaseInputGenerator):
def GetPreprocessedInputBatch(self):
return py_utils.NestedMap(x=tf.constant([1]))
class LinearModel(base_model.BaseTask):
"""A basic linear model."""
@classmethod
def Params(cls):
p = super().Params()
p.name = 'linear_model'
return p
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
p = self.params
w = py_utils.WeightParams(
shape=[3],
init=py_utils.WeightInit.Gaussian(scale=1.0, seed=123456),
dtype=p.dtype)
b = py_utils.WeightParams(
shape=[],
init=py_utils.WeightInit.Gaussian(scale=1.0, seed=234567),
dtype=p.dtype)
self.CreateVariable('w', w)
self.CreateVariable('b', b)
def FPropTower(self, theta, unused_input_batch):
return py_utils.NestedMap(
loss=(tf.reduce_sum(theta.w) + theta.b, 1.0),
loss2=(tf.reduce_sum(theta.w) - theta.b, 1.0)), py_utils.NestedMap()
@model_registry.RegisterSingleTaskModel
class LinearModelParams(base_model_params.SingleTaskModelParams):
def Train(self):
return SimpleInputGenerator.Params()
def Task(self):
p = LinearModel.Params()
p.train.learner = [
learner.Learner.Params().Set(
name='loss', optimizer=optimizer.Adam.Params().Set(name='Adam')),
learner.Learner.Params().Set(
name='loss2', optimizer=optimizer.Adam.Params().Set(name='Adam2'))
]
p.train.ema_decay = 0.999
return p
# We cannnot use `variables_to_restore()` becase it will create new EMA
# variables if they don't exist. Here we just want existing EMA variables.
def _GetModelEMAVariablePairs(model, ema):
res = {}
for v in model.variables:
shadow_v = ema.average(v)
if shadow_v is not None:
res[v.ref()] = shadow_v
return res
class EagerCheckpointerTest(test_utils.TestCase, parameterized.TestCase):
def testEagerEMACheckpointCompatibility(self):
self.assertTrue(tf.executing_eagerly())
cfg = model_registry.GetParams('test.LinearModelParams', 'Train')
# Use non-zero learning rate so that the weights are updated
cfg.task.train.learner[0].learning_rate = 0.1
cfg.task.train.learner[1].learning_rate = 0.1
eager_v1_logdir = os.path.join(self.get_temp_dir(), 'eager_v1')
eager_v2_logdir = os.path.join(self.get_temp_dir(), 'eager_v2')
mdl = cfg.Instantiate()
@tf.function
def _Update():
with py_utils.GradientTape(persistent=True):
mdl.ConstructFPropBPropGraph()
# Step 1
_Update()
# Save V1 checkpoints at step 1.
ckpt_v1 = checkpointer.EagerCheckpointerV1(eager_v1_logdir, mdl)
ckpt_v1.Save(gsteps=1)
ema = mdl.ema
model_to_ema_map = _GetModelEMAVariablePairs(mdl, ema)
model_to_ema_map_snapshot_step1 = {
k: v.value() for k, v in model_to_ema_map.items()
}
# Step 2
_Update()
# Save V2 checkpoints at step 2.
ckpt_v2 = checkpointer.EagerCheckpointerV2(eager_v2_logdir, mdl)
ckpt_v2.Save(gsteps=2)
model_to_ema_map = _GetModelEMAVariablePairs(mdl, ema)
model_to_ema_map_snapshot_step2 = {
k: v.value() for k, v in model_to_ema_map.items()
}
with cluster_factory.SetEval(True):
# Restores variables to values saved in `eager_v1_logdir`
ckpt_v1.Restore()
# Verify that the EMA variables from V1 checkpoints at step 1 successfully
# overwrite the model variables.
for v in mdl.variables:
if v.ref() in model_to_ema_map_snapshot_step1:
self.assertAllEqual(v, model_to_ema_map_snapshot_step1[v.ref()])
with cluster_factory.SetEval(True):
# Restores variables to values saved in `eager_v2_logdir`
ckpt_v2.Restore()
# Verify that the EMA variables from V2 checkpoints at step 2 successfully
# overwrite the model variables.
for v in mdl.variables:
if v.ref() in model_to_ema_map_snapshot_step2:
self.assertAllEqual(v, model_to_ema_map_snapshot_step2[v.ref()])
def testEagerMultiLearnerCheckpointCompatibility(self):
self.assertTrue(tf.executing_eagerly())
cfg = model_registry.GetParams('test.LinearModelParams', 'Train')
mdl = cfg.Instantiate()
with py_utils.GradientTape(persistent=True):
mdl.ConstructFPropBPropGraph()
eager_v1_logdir = os.path.join(self.get_temp_dir(), 'eager_v1')
eager_v2_logdir = os.path.join(self.get_temp_dir(), 'eager_v2')
checkpointer.EagerCheckpointerV1(eager_v1_logdir, mdl).Save(gsteps=0)
checkpointer.EagerCheckpointerV2(eager_v2_logdir, mdl).Save(gsteps=0)
eager_v1_keys = _GetCheckpointKeys(
os.path.join(eager_v1_logdir, 'ckpt_V1', 'ckpt-00000000'))
eager_v2_keys = _GetCheckpointKeys(
os.path.join(eager_v2_logdir, 'ckpt_V2', 'ckpt-0'))
# Expecting two more variables in V2 checkpoints:
# _CHECKPOINTABLE_OBJECT_GRAPH
# save_counter
self.assertEqual(len(eager_v1_keys) + 2, len(eager_v2_keys)) # pylint:disable=g-generic-assert
py_utils.SetEagerMode(False)
self.assertFalse(tf.executing_eagerly())
graph_logdir = os.path.join(self.get_temp_dir(), 'graph')
os.mkdir(graph_logdir)
with self.session(graph=tf.Graph()) as sess:
mdl = cfg.Instantiate()
for lrn in mdl.GetTask().learners:
lrn.optimizer.params.clear_variable_scope = False
mdl.ConstructFPropBPropGraph()
sess.run(tf.global_variables_initializer())
checkpointer.Checkpointer(graph_logdir, mdl).Save(sess)
graph_keys = _GetCheckpointKeys(os.path.join(graph_logdir, 'ckpt'))
self.assertEqual(eager_v1_keys, graph_keys)
if __name__ == '__main__':
py_utils.SetEagerMode(True)
tf.test.main()
|
tensorflow/lingvo
|
lingvo/core/checkpointer_eager_test.py
|
Python
|
apache-2.0
| 7,045
|
[
"Gaussian"
] |
4e147d6d980750617d6314c3169d803680e95282a700c9dd359e0d114d14ea2e
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2005 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Peter Landgren
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2012,2017 Paul Franklin
# Copyright (C) 2014 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Paragraph/Font style editor
"""
#------------------------------------------------------------------------
#
# Python modules
#
#------------------------------------------------------------------------
import logging
log = logging.getLogger(".")
import re
#------------------------------------------------------------------------
#
# GNOME/GTK modules
#
#------------------------------------------------------------------------
from gi.repository import Gtk, Gdk
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.gen.plug.docgen import (StyleSheet, FONT_SERIF, FONT_SANS_SERIF,
PARA_ALIGN_RIGHT, PARA_ALIGN_CENTER, PARA_ALIGN_LEFT,
PARA_ALIGN_JUSTIFY, ParagraphStyle, TableStyle, TableCellStyle,
GraphicsStyle)
from ...listmodel import ListModel
from ...managedwindow import ManagedWindow
from ...glade import Glade
from ...dialog import ErrorDialog
#------------------------------------------------------------------------
#
# StyleListDisplay class
#
#------------------------------------------------------------------------
class StyleListDisplay(ManagedWindow):
"""
Shows the available paragraph/font styles. Allows the user to select,
add, edit, and delete styles from a StyleSheetList.
"""
def __init__(self, stylesheetlist, uistate, track, callback=None):
"""
Create a StyleListDisplay object that displays the styles in the
StyleSheetList.
stylesheetlist - styles for editing: a :class:`.StyleSheetList` instance
callback - task called when an object has been added.
"""
ManagedWindow.__init__(self, uistate, track, self.__class__, modal=True)
# the self.window.run() below makes Gtk make it modal, so any change
# to the previous line's "modal" would require that line to be changed
self.callback = callback
self.sheetlist = stylesheetlist
self.top = Glade(toplevel='styles')
self.set_window(self.top.toplevel, self.top.get_object('title'),
_('Document Styles'))
self.setup_configs('interface.stylelistdisplay', 400, 300)
self.show()
self.top.connect_signals({
"on_ok_clicked" : self.on_ok_clicked,
"on_add_clicked" : self.on_add_clicked,
"on_delete_clicked" : self.on_delete_clicked,
"on_button_press" : self.on_button_press,
"on_edit_clicked" : self.on_edit_clicked,
"on_cancel_clicked" : self.__cancel,
"on_cancel_style_clicked" : dummy_callback,
"on_save_style_clicked" : dummy_callback,
})
self.list = ListModel(self.top.get_object("list"),
[(_('Style'), -1, 10)], )
self.redraw()
# the self.window.run() makes Gtk make it modal, so any change to that
# line would require the ManagedWindow.__init__ to be changed also
self.window.run()
if self.opened:
self.close()
def build_menu_names(self, obj): # meaningless while it's modal
"""Override :class:`.ManagedWindow` method."""
return (_('Document Styles'), ' ')
def __cancel(self, obj):
pass
def redraw(self):
"""Redraws the list of styles that are currently available"""
self.list.model.clear()
self.list.add([_("default")])
index = 1
for style in sorted(self.sheetlist.get_style_names()):
if style == "default":
continue
self.list.add([style])
index += 1
def on_add_clicked(self, obj):
"""Called when the ADD button is clicked. Invokes the StyleEditor to
create a new style"""
style = self.sheetlist.get_style_sheet("default")
StyleEditor(_("New Style"), style, self)
def on_ok_clicked(self, obj):
"""Called when the OK button is clicked; Calls the callback task,
then saves the stylesheet."""
if self.callback is not None:
self.callback()
try:
self.sheetlist.save()
except IOError as msg:
ErrorDialog(_("Error saving stylesheet"), str(msg),
parent=self.window)
except:
log.error("Failed to save stylesheet", exc_info=True)
def on_button_press(self, obj, event):
if event.type == Gdk.EventType._2BUTTON_PRESS and event.button == 1:
self.on_edit_clicked(obj)
def on_edit_clicked(self, obj):
"""
Called when the EDIT button is clicked.
Calls the StyleEditor to edit the selected style.
"""
store, node = self.list.selection.get_selected()
if not node:
ErrorDialog(_("Missing information"), _("Select a style"),
parent=self.window)
return
name = str(self.list.model.get_value(node, 0))
if name == _('default'): # the default style cannot be edited
return
style = self.sheetlist.get_style_sheet(name)
StyleEditor(name, style, self)
def on_delete_clicked(self, obj):
"""Deletes the selected style."""
store, node = self.list.selection.get_selected()
if not node:
ErrorDialog(_("Missing information"), _("Select a style"),
parent=self.window)
return
name = str(self.list.model.get_value(node, 0))
if name == _('default'): # the default style cannot be removed
return
self.sheetlist.delete_style_sheet(name)
self.redraw()
#------------------------------------------------------------------------
#
# StyleEditor class
#
#------------------------------------------------------------------------
class StyleEditor(ManagedWindow):
"""
Edits the current style definition.
Presents a dialog allowing the values in the style to be altered.
"""
def __init__(self, name, style, parent):
"""
Create the StyleEditor.
name - name of the style that is to be edited
style - style object to be edited: a :class:`.StyleSheet` instance
parent - StyleListDisplay object that called the editor
"""
ManagedWindow.__init__(self, parent.uistate, parent.track,
self.__class__, modal=True)
# the self.window.run() below makes Gtk make it modal, so any change
# to the previous line's "modal" would require that line to be changed
self.current_style = None
self.current_name = None
self.style = StyleSheet(style)
self.parent = parent
self.top = Glade(
toplevel='editor',
also_load=[
"adjustment1", "adjustment2", "adjustment3", "adjustment4",
"adjustment5", "adjustment6", "adjustment7", "adjustment8",
"adjustment9", "adjustment10", "adjustment11"])
self.set_window(self.top.toplevel, self.top.get_object('title'),
_('Style editor'))
self.setup_configs('interface.styleeditor', 550, 610)
self.show()
self.top.connect_signals({
"on_save_style_clicked" : self.on_save_style_clicked,
"on_cancel_style_clicked" : self.__cancel,
"on_cancel_clicked" : dummy_callback,
"on_ok_clicked" : dummy_callback,
"on_add_clicked" : dummy_callback,
"on_delete_clicked" : dummy_callback,
"on_button_press" : dummy_callback,
"on_edit_clicked" : dummy_callback,
})
self.pname = self.top.get_object('pname')
self.pdescription = self.top.get_object('pdescription')
self.notebook = self.top.get_object('notebook1')
self.vbox = self.top.get_object('column_widths')
self.line_style = self.top.get_object('line_style')
line_styles = Gtk.ListStore(int, str)
line_styles.append([0, "Solid"])
line_styles.append([1, "Dashed"])
line_styles.append([2, "Dotted"])
self.line_style.set_model(line_styles)
renderer_text = Gtk.CellRendererText()
self.line_style.pack_start(renderer_text, True)
self.line_style.add_attribute(renderer_text, "text", 1)
self.top.get_object("label6").set_text(_("point size|pt"))
titles = [(_('Style'), 0, 130)]
self.plist = ListModel(self.top.get_object("ptree"), titles,
self.change_display)
for widget_name in ('color', 'bgcolor', 'line_color', 'fill_color'):
color = self.top.get_object(widget_name)
label = self.top.get_object(widget_name + '_code')
color.connect('notify::color', self.color_changed, label)
self.top.get_object("style_name").set_text(name)
def _alphanumeric_sort(iterable):
""" sort the given iterable in the way that humans expect """
convert = lambda text: int(text) if text.isdigit() else text
sort_key = lambda k: [convert(c) for c in re.split('([0-9]+)', k)]
return sorted(iterable, key=sort_key)
names = _alphanumeric_sort(self.style.get_paragraph_style_names())
for p_name in names:
self.plist.add([p_name], self.style.get_paragraph_style(p_name))
names = _alphanumeric_sort(self.style.get_table_style_names())
for t_name in names:
self.plist.add([t_name], self.style.get_table_style(t_name))
names = _alphanumeric_sort(self.style.get_cell_style_names())
for c_name in names:
self.plist.add([c_name], self.style.get_cell_style(c_name))
names = _alphanumeric_sort(self.style.get_draw_style_names())
for d_name in names:
self.plist.add([d_name], self.style.get_draw_style(d_name))
self.plist.select_row(0)
# the self.window.run() makes Gtk make it modal, so any change to that
# line would require the ManagedWindow.__init__ to be changed also
self.window.run()
if self.opened:
self.close()
def build_menu_names(self, obj): # meaningless while it's modal
"""Override :class:`.ManagedWindow` method."""
return (_('Style editor'), None)
def __cancel(self, obj):
pass
def show_pages(self, show_pages):
"""
Make the given pages visible.
"""
for page_num in range(self.notebook.get_n_pages()):
page = self.notebook.get_nth_page(page_num)
if page_num in show_pages:
page.show()
else:
page.hide()
def draw(self):
"""
Updates the display with the selected style.
"""
if isinstance(self.current_style, ParagraphStyle):
self.show_pages([0, 1, 2])
self.draw_paragraph()
elif isinstance(self.current_style, TableStyle):
self.show_pages([0, 3])
self.draw_table()
elif isinstance(self.current_style, TableCellStyle):
self.show_pages([0, 4])
self.draw_cell()
elif isinstance(self.current_style, GraphicsStyle):
self.show_pages([0, 5])
self.draw_graphics()
def draw_graphics(self):
"""
Updates the display with the selected graphics style.
"""
g = self.current_style
self.pname.set_text( '<span size="larger" weight="bold">%s</span>' %
self.current_name)
self.pname.set_use_markup(True)
descr = g.get_description()
descr = descr or _("No description available")
p_style = g.get_paragraph_style()
if p_style:
para_note = _("(Embedded style '%s' must be edited separately)")
descr += '\n\n' + para_note % p_style
self.pdescription.set_text(descr)
self.top.get_object("line_style").set_active(g.get_line_style())
self.top.get_object("line_width").set_value(g.get_line_width())
self.line_color = rgb2color(g.get_color())
self.top.get_object("line_color").set_color(self.line_color)
self.fill_color = rgb2color(g.get_fill_color())
self.top.get_object("fill_color").set_color(self.fill_color)
self.top.get_object("shadow").set_active(g.get_shadow())
self.top.get_object("shadow_space").set_value(g.get_shadow_space())
def draw_cell(self):
"""
Updates the display with the selected cell style.
"""
c = self.current_style
self.pname.set_text( '<span size="larger" weight="bold">%s</span>' %
self.current_name)
self.pname.set_use_markup(True)
descr = c.get_description()
self.pdescription.set_text(descr or _("No description available"))
self.top.get_object("cell_lborder").set_active(c.get_left_border())
self.top.get_object("cell_rborder").set_active(c.get_right_border())
self.top.get_object("cell_tborder").set_active(c.get_top_border())
self.top.get_object("cell_bborder").set_active(c.get_bottom_border())
self.top.get_object("cell_padding").set_value(c.get_padding())
def draw_table(self):
"""
Updates the display with the selected table style.
"""
t = self.current_style
self.pname.set_text( '<span size="larger" weight="bold">%s</span>' %
self.current_name)
self.pname.set_use_markup(True)
descr = t.get_description()
self.pdescription.set_text(descr or _("No description available"))
self.top.get_object("table_width").set_value(t.get_width())
self.column = []
for widget in self.vbox.get_children():
self.vbox.remove(widget)
for i in range(t.get_columns()):
hbox = Gtk.Box()
label = Gtk.Label(label=_('Column %d:') % (i + 1))
hbox.pack_start(label, False, False, 6)
spin = Gtk.SpinButton()
spin.set_range(0, 100)
spin.set_increments(1, 10)
spin.set_numeric(True)
spin.set_value(t.get_column_width(i))
self.column.append(spin)
hbox.pack_start(spin, False, False, 6)
hbox.pack_start(Gtk.Label('%'), False, False, 6)
hbox.show_all()
self.vbox.pack_start(hbox, False, False, 3)
def draw_paragraph(self):
"""
Updates the display with the selected paragraph style.
"""
p = self.current_style
self.pname.set_text( '<span size="larger" weight="bold">%s</span>' %
self.current_name)
self.pname.set_use_markup(True)
descr = p.get_description()
self.pdescription.set_text(descr or _("No description available") )
font = p.get_font()
self.top.get_object("size").set_value(font.get_size())
if font.get_type_face() == FONT_SERIF:
self.top.get_object("roman").set_active(1)
else:
self.top.get_object("swiss").set_active(1)
self.top.get_object("bold").set_active(font.get_bold())
self.top.get_object("italic").set_active(font.get_italic())
self.top.get_object("underline").set_active(font.get_underline())
if p.get_alignment() == PARA_ALIGN_LEFT:
self.top.get_object("lalign").set_active(1)
elif p.get_alignment() == PARA_ALIGN_RIGHT:
self.top.get_object("ralign").set_active(1)
elif p.get_alignment() == PARA_ALIGN_CENTER:
self.top.get_object("calign").set_active(1)
else:
self.top.get_object("jalign").set_active(1)
self.top.get_object("rmargin").set_value(p.get_right_margin())
self.top.get_object("lmargin").set_value(p.get_left_margin())
self.top.get_object("pad").set_value(p.get_padding())
self.top.get_object("tmargin").set_value(p.get_top_margin())
self.top.get_object("bmargin").set_value(p.get_bottom_margin())
self.top.get_object("indent").set_value(p.get_first_indent())
self.top.get_object("tborder").set_active(p.get_top_border())
self.top.get_object("lborder").set_active(p.get_left_border())
self.top.get_object("rborder").set_active(p.get_right_border())
self.top.get_object("bborder").set_active(p.get_bottom_border())
color = rgb2color(font.get_color())
self.top.get_object("color").set_color(color)
bg_color = rgb2color(p.get_background_color())
self.top.get_object("bgcolor").set_color(bg_color)
def color_changed(self, color, name, label):
"""
Called to set the color code when a color is changed.
"""
rgb = color2rgb(color.get_color())
label.set_text("#%02X%02X%02X" % color2rgb(color.get_color()))
def save(self):
"""
Saves the current style displayed on the dialog.
"""
if isinstance(self.current_style, ParagraphStyle):
self.save_paragraph()
elif isinstance(self.current_style, TableStyle):
self.save_table()
elif isinstance(self.current_style, TableCellStyle):
self.save_cell()
elif isinstance(self.current_style, GraphicsStyle):
self.save_graphics()
def save_graphics(self):
"""
Saves the current graphics style displayed on the dialog.
"""
g = self.current_style
g.set_line_style(self.top.get_object("line_style").get_active())
g.set_line_width(self.top.get_object("line_width").get_value())
line_color = self.top.get_object("line_color").get_color()
g.set_color(color2rgb(line_color))
fill_color = self.top.get_object("fill_color").get_color()
g.set_fill_color(color2rgb(fill_color))
shadow = self.top.get_object("shadow").get_active()
shadow_space = self.top.get_object("shadow_space").get_value()
g.set_shadow(shadow, shadow_space)
self.style.add_draw_style(self.current_name, self.current_style)
def save_cell(self):
"""
Saves the current cell style displayed on the dialog.
"""
c = self.current_style
c.set_left_border(self.top.get_object("cell_lborder").get_active())
c.set_right_border(self.top.get_object("cell_rborder").get_active())
c.set_top_border(self.top.get_object("cell_tborder").get_active())
c.set_bottom_border(self.top.get_object("cell_bborder").get_active())
c.set_padding(self.top.get_object("cell_padding").get_value())
self.style.add_cell_style(self.current_name, self.current_style)
def save_table(self):
"""
Saves the current table style displayed on the dialog.
"""
t = self.current_style
t.set_width(self.top.get_object("table_width").get_value_as_int())
for i in range(t.get_columns()):
t.set_column_width(i, self.column[i].get_value_as_int())
self.style.add_table_style(self.current_name, self.current_style)
def save_paragraph(self):
"""
Saves the current paragraph style displayed on the dialog.
"""
p = self.current_style
font = p.get_font()
font.set_size(self.top.get_object("size").get_value_as_int())
if self.top.get_object("roman").get_active():
font.set_type_face(FONT_SERIF)
else:
font.set_type_face(FONT_SANS_SERIF)
font.set_bold(self.top.get_object("bold").get_active())
font.set_italic(self.top.get_object("italic").get_active())
font.set_underline(self.top.get_object("underline").get_active())
if self.top.get_object("lalign").get_active():
p.set_alignment(PARA_ALIGN_LEFT)
elif self.top.get_object("ralign").get_active():
p.set_alignment(PARA_ALIGN_RIGHT)
elif self.top.get_object("calign").get_active():
p.set_alignment(PARA_ALIGN_CENTER)
else:
p.set_alignment(PARA_ALIGN_JUSTIFY)
p.set_right_margin(self.top.get_object("rmargin").get_value())
p.set_left_margin(self.top.get_object("lmargin").get_value())
p.set_top_margin(self.top.get_object("tmargin").get_value())
p.set_bottom_margin(self.top.get_object("bmargin").get_value())
p.set_padding(self.top.get_object("pad").get_value())
p.set_first_indent(self.top.get_object("indent").get_value())
p.set_top_border(self.top.get_object("tborder").get_active())
p.set_left_border(self.top.get_object("lborder").get_active())
p.set_right_border(self.top.get_object("rborder").get_active())
p.set_bottom_border(self.top.get_object("bborder").get_active())
color = self.top.get_object("color").get_color()
font.set_color(color2rgb(color))
bg_color = self.top.get_object("bgcolor").get_color()
p.set_background_color(color2rgb(bg_color))
self.style.add_paragraph_style(self.current_name, self.current_style)
def on_save_style_clicked(self, obj):
"""
Saves the current style sheet and causes the parent to be updated with
the changes.
"""
name = str(self.top.get_object("style_name").get_text())
self.save()
self.style.set_name(name)
self.parent.sheetlist.set_style_sheet(name, self.style)
self.parent.redraw()
def change_display(self, obj):
"""
Called when the paragraph selection has been changed. Saves the
old paragraph, then draws the newly selected paragraph.
"""
# Don't save until current_name is defined
# If it's defined, save under the current paragraph name
if self.current_name:
self.save()
# Then change to new paragraph
objs = self.plist.get_selected_objects()
store, node = self.plist.get_selected()
self.current_name = store.get_value(node, 0)
self.current_style = objs[0]
self.draw()
def rgb2color(rgb):
"""
Convert a tuple containing RGB values into a Gdk Color.
"""
return Gdk.Color(rgb[0] << 8, rgb[1] << 8, rgb[2] << 8)
def color2rgb(color):
"""
Convert a Gdk Color into a tuple containing RGB values.
"""
return (color.red >> 8, color.green >> 8, color.blue >> 8)
def dummy_callback(obj):
"""Dummy callback to satisfy gtkbuilder on connect of signals.
There are two widgets in the glade file, although only one is needed,
the signals of the other must be connected too
"""
pass
|
jralls/gramps
|
gramps/gui/plug/report/_styleeditor.py
|
Python
|
gpl-2.0
| 23,885
|
[
"Brian"
] |
7aeeffb21516eade9bdb097db5fde16a00a5bb598c9e11d14f6b0defc5967a5f
|
# Copyright 2000-2002 by Andrew Dalke.
# Revisions copyright 2007-2010 by Peter Cock.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Alphabets used in Seq objects etc to declare sequence type and letters.
This is used by sequences which contain a finite number of similar words.
"""
class Alphabet(object):
"""Generic alphabet base class.
This class is used as a base class for other types of alphabets.
Attributes:
- letters - list-like object containing the letters of the alphabet.
Usually it is a string when letters are single characters.
- size - size of the alphabet's letters (e.g. 1 when letters are
single characters).
"""
size = None # default to no fixed size for words
letters = None # default to no fixed alphabet
# In general, a list-like object. However,
# assuming letters are single characters, use a
# string. This is expected for use with Seq like
# objects.
def __repr__(self):
return self.__class__.__name__ + "()"
def contains(self, other):
"""Does this alphabet 'contain' the other (OBSOLETE?).
Returns a boolean. This relies on the Alphabet subclassing
hierarchy only, and does not check the letters property.
This isn't ideal, and doesn't seem to work as intended
with the AlphabetEncoder classes."""
return isinstance(other, self.__class__)
def _case_less(self):
"""Return a case-less variant of the current alphabet (PRIVATE)."""
# TODO - remove this method by dealing with things in subclasses?
if isinstance(self, ProteinAlphabet):
return generic_protein
elif isinstance(self, DNAAlphabet):
return generic_dna
elif isinstance(self, RNAAlphabet):
return generic_rna
elif isinstance(self, NucleotideAlphabet):
return generic_nucleotide
elif isinstance(self, SingleLetterAlphabet):
return single_letter_alphabet
else:
return generic_alphabet
def _upper(self):
"""Return an upper case variant of the current alphabet (PRIVATE)."""
if not self.letters or self.letters == self.letters.upper():
# Easy case, no letters or already upper case!
return self
else:
# TODO - Raise NotImplementedError and handle via subclass?
return self._case_less()
def _lower(self):
"""Return a lower case variant of the current alphabet (PRIVATE)."""
if not self.letters or self.letters == self.letters.lower():
# Easy case, no letters or already lower case!
return self
else:
# TODO - Raise NotImplementedError and handle via subclass?
return self._case_less()
generic_alphabet = Alphabet()
class SingleLetterAlphabet(Alphabet):
"""Generic alphabet with letters of size one."""
size = 1
letters = None # string of all letters in the alphabet
single_letter_alphabet = SingleLetterAlphabet()
# ########## Protein
class ProteinAlphabet(SingleLetterAlphabet):
"""Generic single letter protein alphabet."""
pass
generic_protein = ProteinAlphabet()
# ########## DNA
class NucleotideAlphabet(SingleLetterAlphabet):
"""Generic single letter nucleotide alphabet."""
pass
generic_nucleotide = NucleotideAlphabet()
class DNAAlphabet(NucleotideAlphabet):
"""Generic single letter DNA alphabet."""
pass
generic_dna = DNAAlphabet()
# ########## RNA
class RNAAlphabet(NucleotideAlphabet):
"""Generic single letter RNA alphabet."""
pass
generic_rna = RNAAlphabet()
# ########## Other per-sequence encodings
class SecondaryStructure(SingleLetterAlphabet):
"""Alphabet used to describe secondary structure.
Letters are 'H' (helix), 'S' (strand), 'T' (turn) and 'C' (coil).
"""
letters = "HSTC"
class ThreeLetterProtein(Alphabet):
"""Three letter protein alphabet."""
size = 3
letters = [
"Ala", "Asx", "Cys", "Asp", "Glu", "Phe", "Gly", "His", "Ile",
"Lys", "Leu", "Met", "Asn", "Pro", "Gln", "Arg", "Ser", "Thr",
"Sec", "Val", "Trp", "Xaa", "Tyr", "Glx",
]
def _upper(self):
raise NotImplementedError("We don't have an uppercase three letter protein alphabet.")
def _lower(self):
raise NotImplementedError("We don't have a lowercase three letter protein alphabet.")
# ##### Non per-sequence modifications
# (These are Decorator classes)
class AlphabetEncoder(object):
def __init__(self, alphabet, new_letters):
self.alphabet = alphabet
self.new_letters = new_letters
if alphabet.letters is not None:
self.letters = alphabet.letters + new_letters
else:
self.letters = None
def __getattr__(self, key):
if key[:2] == "__" and key[-2:] == "__":
raise AttributeError(key)
return getattr(self.alphabet, key)
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__, self.alphabet,
self.new_letters)
def contains(self, other):
"""Does this alphabet 'contain' the other (OBSOLETE?).
This is isn't implemented for the base AlphabetEncoder,
which will always return 0 (False)."""
return 0
def _upper(self):
"""Return an upper case variant of the current alphabet (PRIVATE)."""
return AlphabetEncoder(self.alphabet._upper(), self.new_letters.upper())
def _lower(self):
"""Return a lower case variant of the current alphabet (PRIVATE)."""
return AlphabetEncoder(self.alphabet._lower(), self.new_letters.lower())
class Gapped(AlphabetEncoder):
def __init__(self, alphabet, gap_char="-"):
AlphabetEncoder.__init__(self, alphabet, gap_char)
self.gap_char = gap_char
def contains(self, other):
"""Does this alphabet 'contain' the other (OBSOLETE?).
Returns a boolean. This relies on the Alphabet subclassing
hierarchy, and attempts to check the gap character. This fails
if the other alphabet does not have a gap character!
"""
return other.gap_char == self.gap_char and \
self.alphabet.contains(other.alphabet)
def _upper(self):
"""Return an upper case variant of the current alphabet (PRIVATE)."""
return Gapped(self.alphabet._upper(), self.gap_char.upper())
def _lower(self):
"""Return a lower case variant of the current alphabet (PRIVATE)."""
return Gapped(self.alphabet._lower(), self.gap_char.lower())
class HasStopCodon(AlphabetEncoder):
def __init__(self, alphabet, stop_symbol="*"):
AlphabetEncoder.__init__(self, alphabet, stop_symbol)
self.stop_symbol = stop_symbol
def contains(self, other):
"""Does this alphabet 'contain' the other (OBSOLETE?).
Returns a boolean. This relies on the Alphabet subclassing
hierarchy, and attempts to check the stop symbol. This fails
if the other alphabet does not have a stop symbol!
"""
return other.stop_symbol == self.stop_symbol and \
self.alphabet.contains(other.alphabet)
def _upper(self):
"""Return an upper case variant of the current alphabet (PRIVATE)."""
return HasStopCodon(self.alphabet._upper(), self.stop_symbol.upper())
def _lower(self):
"""Return a lower case variant of the current alphabet (PRIVATE)."""
return HasStopCodon(self.alphabet._lower(), self.stop_symbol.lower())
def _get_base_alphabet(alphabet):
"""Returns the non-gapped non-stop-codon Alphabet object (PRIVATE)."""
a = alphabet
while isinstance(a, AlphabetEncoder):
a = a.alphabet
assert isinstance(a, Alphabet), \
"Invalid alphabet found, %s" % repr(a)
return a
def _ungap(alphabet):
"""Returns the alphabet without any gap encoder (PRIVATE)."""
# TODO - Handle via method of the objects?
if not hasattr(alphabet, "gap_char"):
return alphabet
elif isinstance(alphabet, Gapped):
return alphabet.alphabet
elif isinstance(alphabet, HasStopCodon):
return HasStopCodon(_ungap(alphabet.alphabet), stop_symbol=alphabet.stop_symbol)
elif isinstance(alphabet, AlphabetEncoder):
return AlphabetEncoder(_ungap(alphabet.alphabet), letters=alphabet.letters)
else:
raise NotImplementedError
def _consensus_base_alphabet(alphabets):
"""Returns a common but often generic base alphabet object (PRIVATE).
This throws away any AlphabetEncoder information, e.g. Gapped alphabets.
Note that DNA+RNA -> Nucleotide, and Nucleotide+Protein-> generic single
letter. These DO NOT raise an exception!"""
common = None
for alpha in alphabets:
a = _get_base_alphabet(alpha)
if common is None:
common = a
elif common == a:
pass
elif isinstance(a, common.__class__):
pass
elif isinstance(common, a.__class__):
common = a
elif isinstance(a, NucleotideAlphabet) \
and isinstance(common, NucleotideAlphabet):
# e.g. Give a mix of RNA and DNA alphabets
common = generic_nucleotide
elif isinstance(a, SingleLetterAlphabet) \
and isinstance(common, SingleLetterAlphabet):
# This is a pretty big mis-match!
common = single_letter_alphabet
else:
# We have a major mis-match... take the easy way out!
return generic_alphabet
if common is None:
# Given NO alphabets!
return generic_alphabet
return common
def _consensus_alphabet(alphabets):
"""Returns a common but often generic alphabet object (PRIVATE).
>>> from Bio.Alphabet import IUPAC
>>> _consensus_alphabet([IUPAC.extended_protein, IUPAC.protein])
ExtendedIUPACProtein()
>>> _consensus_alphabet([generic_protein, IUPAC.protein])
ProteinAlphabet()
Note that DNA+RNA -> Nucleotide, and Nucleotide+Protein-> generic single
letter. These DO NOT raise an exception!
>>> _consensus_alphabet([generic_dna, generic_nucleotide])
NucleotideAlphabet()
>>> _consensus_alphabet([generic_dna, generic_rna])
NucleotideAlphabet()
>>> _consensus_alphabet([generic_dna, generic_protein])
SingleLetterAlphabet()
>>> _consensus_alphabet([single_letter_alphabet, generic_protein])
SingleLetterAlphabet()
This is aware of Gapped and HasStopCodon and new letters added by
other AlphabetEncoders. This WILL raise an exception if more than
one gap character or stop symbol is present.
>>> from Bio.Alphabet import IUPAC
>>> _consensus_alphabet([Gapped(IUPAC.extended_protein), HasStopCodon(IUPAC.protein)])
HasStopCodon(Gapped(ExtendedIUPACProtein(), '-'), '*')
>>> _consensus_alphabet([Gapped(IUPAC.protein, "-"), Gapped(IUPAC.protein, "=")])
Traceback (most recent call last):
...
ValueError: More than one gap character present
>>> _consensus_alphabet([HasStopCodon(IUPAC.protein, "*"), HasStopCodon(IUPAC.protein, "+")])
Traceback (most recent call last):
...
ValueError: More than one stop symbol present
"""
base = _consensus_base_alphabet(alphabets)
gap = None
stop = None
new_letters = ""
for alpha in alphabets:
# Gaps...
if not hasattr(alpha, "gap_char"):
pass
elif gap is None:
gap = alpha.gap_char
elif gap == alpha.gap_char:
pass
else:
raise ValueError("More than one gap character present")
# Stops...
if not hasattr(alpha, "stop_symbol"):
pass
elif stop is None:
stop = alpha.stop_symbol
elif stop == alpha.stop_symbol:
pass
else:
raise ValueError("More than one stop symbol present")
# New letters...
if hasattr(alpha, "new_letters"):
for letter in alpha.new_letters:
if letter not in new_letters \
and letter != gap and letter != stop:
new_letters += letter
alpha = base
if new_letters:
alpha = AlphabetEncoder(alpha, new_letters)
if gap:
alpha = Gapped(alpha, gap_char=gap)
if stop:
alpha = HasStopCodon(alpha, stop_symbol=stop)
return alpha
def _check_type_compatible(alphabets):
"""Returns True except for DNA+RNA or Nucleotide+Protein (PRIVATE).
>>> _check_type_compatible([generic_dna, generic_nucleotide])
True
>>> _check_type_compatible([generic_dna, generic_rna])
False
>>> _check_type_compatible([generic_dna, generic_protein])
False
>>> _check_type_compatible([single_letter_alphabet, generic_protein])
True
This relies on the Alphabet subclassing hierarchy. It does not
check things like gap characters or stop symbols."""
dna, rna, nucl, protein = False, False, False, False
for alpha in alphabets:
a = _get_base_alphabet(alpha)
if isinstance(a, DNAAlphabet):
dna = True
nucl = True
if rna or protein:
return False
elif isinstance(a, RNAAlphabet):
rna = True
nucl = True
if dna or protein:
return False
elif isinstance(a, NucleotideAlphabet):
nucl = True
if protein:
return False
elif isinstance(a, ProteinAlphabet):
protein = True
if nucl:
return False
return True
def _verify_alphabet(sequence):
"""Check all letters in sequence are in the alphabet (PRIVATE).
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import IUPAC
>>> my_seq = Seq("MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF",
... IUPAC.protein)
>>> _verify_alphabet(my_seq)
True
This example has an X, which is not in the IUPAC protein alphabet
(you should be using the IUPAC extended protein alphabet):
>>> bad_seq = Seq("MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVFX",
... IUPAC.protein)
>>> _verify_alphabet(bad_seq)
False
This replaces Bio.utils.verify_alphabet() since we are deprecating
that. Potentially this could be added to the Alphabet object, and
I would like it to be an option when creating a Seq object... but
that might slow things down.
"""
letters = sequence.alphabet.letters
if not letters:
raise ValueError("Alphabet does not define letters.")
for letter in sequence:
if letter not in letters:
return False
return True
|
zjuchenyuan/BioWeb
|
Lib/Bio/Alphabet/__init__.py
|
Python
|
mit
| 14,959
|
[
"Biopython"
] |
18f9dfb2fba8034131569836bc4a1e88e9133f2d41680baa0f35f08135e5be29
|
import os
from tempfile import mkdtemp, NamedTemporaryFile
import pytest
from numpy.testing import assert_array_equal, assert_array_almost_equal
import pandas as pd
import oddt
from oddt.utils import method_caller
from oddt.spatial import rmsd
from oddt.scoring import scorer
from oddt.scoring.functions import rfscore, nnscore
from oddt.virtualscreening import virtualscreening
test_data_dir = os.path.dirname(os.path.abspath(__file__))
# common file names
dude_data_dir = os.path.join(test_data_dir, 'data', 'dude', 'xiap')
xiap_crystal_ligand = os.path.join(dude_data_dir, 'crystal_ligand.sdf')
xiap_protein = os.path.join(dude_data_dir, 'receptor_rdkit.pdb')
xiap_actives_docked = os.path.join(dude_data_dir, 'actives_docked.sdf')
def test_vs_scoring_vina():
"""VS scoring (Vina) tests"""
vs = virtualscreening(n_cpu=1)
vs.load_ligands('sdf', xiap_crystal_ligand)
vs.score(function='autodock_vina', protein=xiap_protein)
mols = list(vs.fetch())
assert len(mols) == 1
mol_data = mols[0].data
assert 'vina_affinity' in mol_data
assert 'vina_gauss1' in mol_data
assert 'vina_gauss2' in mol_data
assert 'vina_hydrogen' in mol_data
assert 'vina_hydrophobic' in mol_data
assert 'vina_repulsion' in mol_data
assert mol_data['vina_affinity'] == '-3.57594'
assert mol_data['vina_gauss1'] == '63.01213'
assert mol_data['vina_gauss2'] == '999.07625'
assert mol_data['vina_hydrogen'] == '0.0'
assert mol_data['vina_hydrophobic'] == '26.12648'
assert mol_data['vina_repulsion'] == '3.63178'
def test_vs_docking():
"""VS docking (Vina) tests"""
vs = virtualscreening(n_cpu=1)
vs.load_ligands('sdf', xiap_crystal_ligand)
# bad docking engine
with pytest.raises(ValueError):
vs.dock('srina', 'prot.pdb')
vs.dock(engine='autodock_vina',
protein=xiap_protein,
auto_ligand=xiap_crystal_ligand,
exhaustiveness=1,
energy_range=6,
num_modes=7,
size=(20, 20, 20),
seed=0)
mols = list(vs.fetch())
assert len(mols) == 7
mol_data = mols[0].data
assert 'vina_affinity' in mol_data
assert 'vina_rmsd_lb' in mol_data
assert 'vina_rmsd_ub' in mol_data
if oddt.toolkit.backend == 'ob':
vina_scores = [-5.3, -4.0, -3.8, -3.7, -3.4, -3.4, -3.0]
else:
vina_scores = [-6.3, -6.0, -5.8, -5.8, -3.9, -3.0, -1.1]
assert_array_equal([float(m.data['vina_affinity']) for m in mols], vina_scores)
# verify the SMILES of molecules
ref_mol = next(oddt.toolkit.readfile('sdf', xiap_crystal_ligand))
if oddt.toolkit.backend == 'ob':
# OB 2.3.2 will fail the following, since Hs are removed, etc.
# OB 2.4 recognizes the smiles chirality wrong
pass
else:
vina_rmsd = [8.153314, 5.32554, 8.514586, 8.510169, 9.060128, 8.995098,
8.626776]
assert_array_equal([mol.smiles for mol in mols],
[ref_mol.smiles] * len(mols))
assert_array_almost_equal([rmsd(ref_mol, mol, method='min_symmetry')
for mol in mols], vina_rmsd)
def test_vs_empty():
vs = virtualscreening(n_cpu=1)
with pytest.raises(StopIteration, match='no molecules loaded'):
vs.fetch()
def test_vs_docking_empty():
vs = virtualscreening(n_cpu=1)
vs.load_ligands('smi', os.path.join(dude_data_dir, 'actives_rdkit.smi'))
vs.dock(engine='autodock_vina',
protein=xiap_protein,
auto_ligand=xiap_crystal_ligand,
exhaustiveness=1,
energy_range=5,
num_modes=9,
size=(20, 20, 20),
seed=0)
with pytest.raises(ValueError, match='has no 3D coordinates'):
next(vs.fetch())
def test_vs_multithreading_fallback():
vs = virtualscreening(n_cpu=8)
vs.load_ligands('sdf', xiap_crystal_ligand)
vs.score(function='autodock_vina', protein=xiap_protein)
with pytest.warns(UserWarning, match='Falling back to sub-methods multithreading'):
method_caller(vs, 'fetch')
if oddt.toolkit.backend == 'ob': # RDKit rewrite needed
def test_vs_filtering():
"""VS preset filtering tests"""
vs = virtualscreening(n_cpu=1)
vs.load_ligands('sdf', xiap_actives_docked)
vs.apply_filter('ro5', soft_fail=1)
assert len(list(vs.fetch())) == 49
vs.load_ligands('sdf', xiap_actives_docked)
vs.apply_filter('ro3', soft_fail=2)
assert len(list(vs.fetch())) == 9
def test_vs_pains():
"""VS PAINS filter tests"""
vs = virtualscreening(n_cpu=1)
# TODO: add some failing molecules
vs.load_ligands('sdf', xiap_actives_docked)
vs.apply_filter('pains', soft_fail=0)
assert len(list(vs.fetch())) == 100
def test_vs_similarity():
"""VS similarity filter (USRs, IFPs) tests"""
ref_mol = next(oddt.toolkit.readfile('sdf', xiap_crystal_ligand))
receptor = next(oddt.toolkit.readfile('pdb', xiap_protein))
# following toolkit differences is due to different Hs treatment
vs = virtualscreening(n_cpu=1, chunksize=10)
vs.load_ligands('sdf', xiap_actives_docked)
vs.similarity('usr', cutoff=0.4, query=ref_mol)
if oddt.toolkit.backend == 'ob':
assert len(list(vs.fetch())) == 11
else:
assert len(list(vs.fetch())) == 6
vs = virtualscreening(n_cpu=1)
vs.load_ligands('sdf', xiap_actives_docked)
vs.similarity('usr_cat', cutoff=0.3, query=ref_mol)
if oddt.toolkit.backend == 'ob':
assert len(list(vs.fetch())) == 16
else:
assert len(list(vs.fetch())) == 11
vs = virtualscreening(n_cpu=1)
vs.load_ligands('sdf', xiap_actives_docked)
vs.similarity('electroshape', cutoff=0.45, query=ref_mol)
if oddt.toolkit.backend == 'ob':
assert len(list(vs.fetch())) == 55
else:
assert len(list(vs.fetch())) == 95
vs = virtualscreening(n_cpu=1)
vs.load_ligands('sdf', xiap_actives_docked)
vs.similarity('ifp', cutoff=0.95, query=ref_mol, protein=receptor)
if oddt.toolkit.backend == 'ob':
assert len(list(vs.fetch())) == 3
else:
assert len(list(vs.fetch())) == 6
vs = virtualscreening(n_cpu=1)
vs.load_ligands('sdf', xiap_actives_docked)
vs.similarity('sifp', cutoff=0.9, query=ref_mol, protein=receptor)
if oddt.toolkit.backend == 'ob':
assert len(list(vs.fetch())) == 14
else:
assert len(list(vs.fetch())) == 21
# test wrong method error
with pytest.raises(ValueError):
vs.similarity('sift', query=ref_mol)
def test_vs_scoring():
protein = next(oddt.toolkit.readfile('pdb', xiap_protein))
protein.protein = True
data_dir = os.path.join(test_data_dir, 'data')
home_dir = mkdtemp()
pdbbind_versions = (2007, 2013, 2016)
pdbbind_dir = os.path.join(data_dir, 'pdbbind')
for pdbbind_v in pdbbind_versions:
version_dir = os.path.join(data_dir, 'v%s' % pdbbind_v)
if not os.path.isdir(version_dir):
os.symlink(pdbbind_dir, version_dir)
filenames = []
# train mocked SFs
for model in [nnscore(n_jobs=1)] + [rfscore(version=v, n_jobs=1)
for v in [1, 2, 3]]:
model.gen_training_data(data_dir, pdbbind_versions=pdbbind_versions,
home_dir=home_dir)
filenames.append(model.train(home_dir=home_dir))
vs = virtualscreening(n_cpu=-1, chunksize=10)
vs.load_ligands('sdf', xiap_actives_docked)
# error if no protein is fed
with pytest.raises(ValueError):
vs.score('nnscore')
# bad sf name
with pytest.raises(ValueError):
vs.score('bad_sf', protein=protein)
vs.score('nnscore', protein=xiap_protein)
vs.score('nnscore_pdbbind2016', protein=protein)
vs.score('rfscore_v1', protein=protein)
vs.score('rfscore_v1_pdbbind2016', protein=protein)
vs.score('rfscore_v2', protein=protein)
vs.score('rfscore_v3', protein=protein)
vs.score('pleclinear', protein=protein)
vs.score('pleclinear_p5_l1_s65536_pdbbind2016', protein=protein)
# use pickle directly
vs.score(filenames[0], protein=protein)
# pass SF object directly
vs.score(scorer.load(filenames[0]), protein=protein)
# pass wrong object (sum is not an instance of scorer)
with pytest.raises(ValueError):
vs.score(sum, protein=protein)
mols = list(vs.fetch())
assert len(mols) == 100
mol_data = mols[0].data
assert 'nnscore' in mol_data
assert 'rfscore_v1' in mol_data
assert 'rfscore_v2' in mol_data
assert 'rfscore_v3' in mol_data
assert 'PLEClinear_p5_l1_s65536' in mol_data
vs = virtualscreening(n_cpu=-1, chunksize=10)
vs.load_ligands('sdf', xiap_actives_docked)
vs.score('nnscore', protein=protein)
vs.score('rfscore_v1', protein=protein)
vs.score('rfscore_v2', protein=protein)
vs.score('rfscore_v3', protein=protein)
with NamedTemporaryFile('w', suffix='.sdf') as molfile:
with NamedTemporaryFile('w', suffix='.csv') as csvfile:
vs.write('sdf', molfile.name, csv_filename=csvfile.name)
data = pd.read_csv(csvfile.name)
assert 'nnscore' in data.columns
assert 'rfscore_v1' in data.columns
assert 'rfscore_v2' in data.columns
assert 'rfscore_v3' in data.columns
mols = list(oddt.toolkit.readfile('sdf', molfile.name))
assert len(mols) == 100
vs.write_csv(csvfile.name, fields=['nnscore', 'rfscore_v1',
'rfscore_v2', 'rfscore_v3'])
data = pd.read_csv(csvfile.name)
assert len(data.columns) == 4
assert len(data) == len(mols)
assert 'nnscore' in data.columns
assert 'rfscore_v1' in data.columns
assert 'rfscore_v2' in data.columns
assert 'rfscore_v3' in data.columns
# remove files
for f in filenames:
os.unlink(f)
# remove symlinks
for pdbbind_v in pdbbind_versions:
version_dir = os.path.join(data_dir, 'v%s' % pdbbind_v)
if os.path.islink(version_dir):
os.unlink(version_dir)
|
mkukielka/oddt
|
tests/test_virtualscreening.py
|
Python
|
bsd-3-clause
| 10,272
|
[
"RDKit"
] |
1bbf03ac0dc77c13dcc357191e53c18485b15a4d2c19c7c494823aa3e987ee19
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from espresso.tools.decomp import *
from espresso.tools.timers import *
from espresso.tools.replicate import *
from espresso.tools.pdb import *
from espresso.tools.init_cfg import *
from espresso.tools.topology import *
from espresso.tools.vmd import *
from espresso.tools.info import *
from espresso.tools.DumpConfigurations import *
from espresso.tools.convert import *
from espresso.tools.analyse import *
from espresso.tools.tabulated import *
from espresso.tools.prepareAdress import *
from espresso.tools.warmup import *
from espresso.tools.lammpsfilewrite import *
from espresso.tools.povwrite import *
from espresso.tools.pathintegral import *
|
BackupTheBerlios/espressopp
|
src/tools/__init__.py
|
Python
|
gpl-3.0
| 1,523
|
[
"ESPResSo",
"VMD"
] |
d962969f2f8eea37a0f86379135785fcd83a73a1c297766089604aa4656b9d7e
|
''' master file '''
''' imports needed for doing bash commands in python '''
import os
import shutil
''' imports needed for getRestartTime '''
from scipy.io import netcdf
import glob
''' imports needed for email '''
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
def copySingleFile(sourcefile,destination):
shutil.copy(sourcefile,destination)
def copyDirectory(sourcefile,destination):
shutil.copytree(sourcefile,destination)
def makeDirectory(directoryName):
os.mkdir(directoryName)
def move(sourcefile,destination):
shutil.move(sourcefile,destination)
def removeSingleFile(sourcefile):
os.remove(sourcefile)
def removeDirectory(sourcefile):
shutil.rmtree(sourcefile)
def mail(gmail_user,gmail_pwd,to, subject='ACE-net job status', text='Run has started'):
''' Sends an email from a gmail account to a user with a subject and a
message '''
msg = MIMEMultipart()
msg['From'] = gmail_user
msg['To'] = to
msg['Subject'] = subject
msg.attach(MIMEText(text))
mailServer = smtplib.SMTP("smtp.gmail.com", 587)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(gmail_user, gmail_pwd)
mailServer.sendmail(gmail_user, to, msg.as_string())
mailServer.close()
def readInFiles():
''' Read in the email and password. A userInfo.txt file is needed in the
same directory. userInfo.txt has the users email and password in it. '''
with open("userInfo.txt",'r') as f:
user=f.readline()
passwd=f.readline()
user = user.rstrip('\n')
passwd = passwd.rstrip('\n')
return user,passwd
def getRestartTime():
datadir = '/home/daugue6/capeislerestart/output/'
#get the correct restart file, the second newest one (if there is more than one)
files = glob.glob(datadir + "*.nc")
#find the restart files
restart_files = []
for i in files:
if "restart" in i:
restart_files.append(i)
#get the latest restart file
filenums = []
for i in restart_files:
filenums.append(int(i[-7:-3]))
latest = filenums.index(max(filenums))
#we need the times data from the restart file
ncid = netcdf.netcdf_file(files[latest],'r')
Times = ncid.variables['Times'].data
ind = Times.shape[0] - 1
#join the elements of the list into a single string
time = "\'"
for i in Times[ind,:]:
if i == 'T':
time += ' '
else:
time += i
time += "\'"
name="\'{}\'".format(files[latest])
return time, name
def buildRestart():
oldRunFile = 'capeisle_run.nml'
start, restartDir = getRestartTime()
restart = restartDir.split('/')[-1]
restart = "\'{}" .format(restart)
outputFile = oldRunFile
moveOldTo = './runfiles/'
renameOld='capeisle_run{}.nml'.format(start)
oldOutputRunFile = 'run_output{}'.format(start)
try:
makeDirectory(moveOldTo)
except OSError:
pass
move(oldRunFile, renameOld)
move(renameOld, moveOldTo)
move(oldOutputRunFile, moveOldTo)
top = '''
!================================================================!
_______ _ _ _______ _______ _______ ______ _____
(_______)(_) (_)(_______)(_______)(_______)(_____ \ (_____)
_____ _ _ _ _ _ _ _ _ _____) ) _ __ _
| ___) | | | || | | | | || ||_|| |(_____ ( | |/ /| |
| | \ \ / / | |_____ | |___| || | | | _____) )_| /_| |
|_| \___/ \______) \_____/ |_| |_|(______/(_)\_____/
-- Beta Release
! !
!========DOMAIN DECOMPOSITION USING: METIS 4.0.1 ================!
!======Copyright 1998, Regents of University of Minnesota========!
! !
'''
change = '''
&NML_CASE
CASE_TITLE = 'Cape Isle Site'
TIMEZONE = 'UTC',
DATE_FORMAT = 'YMD'
START_DATE = {0}
END_DATE = '2011-10-16 00:00:00'
/
&NML_STARTUP
STARTUP_TYPE = 'hotstart'
STARTUP_FILE = {1}
STARTUP_UV_TYPE = 'set values'
STARTUP_TURB_TYPE = 'set values'
STARTUP_TS_TYPE = 'constant'
STARTUP_T_VALS = 18
STARTUP_S_VALS = 35.0
STARTUP_DMAX = -10.0
/
&NML_IO
INPUT_DIR = './input/'
OUTPUT_DIR = './ouput/'
IREPORT = 720,
VISIT_ALL_VARS = F,
WAIT_FOR_VISIT = F,
USE_MPI_IO_MODE = F
/
&NML_INTEGRATION
EXTSTEP_SECONDS = 0.5,
ISPLIT = 1
IRAMP = 34560
MIN_DEPTH = 0.5
STATIC_SSH_ADJ = 0.0
/
&NML_RESTART
RST_ON = T,
RST_FIRST_OUT = {0}
RST_OUT_INTERVAL = 'days = 1.0'
RST_OUTPUT_STACK = 1
/
&NML_NETCDF
NC_ON = T,
NC_FIRST_OUT = {0}
NC_OUT_INTERVAL = 'seconds=600.0',
NC_OUTPUT_STACK = 0,
NC_GRID_METRICS = T,
NC_VELOCITY = F,
NC_SALT_TEMP = F,
NC_TURBULENCE = F,
NC_AVERAGE_VEL = T,
NC_VERTICAL_VEL = F,
NC_WIND_VEL = F,
NC_WIND_STRESS = F,
NC_EVAP_PRECIP = F,
NC_SURFACE_HEAT = F,
NC_GROUNDWATER = F
/
'''.format(start, restart)
rest = '''
&NML_NETCDF_AV
NCAV_ON = F,
NCAV_FIRST_OUT = 'none'
NCAV_OUT_INTERVAL = 0.0,
NCAV_OUTPUT_STACK = 0,
NCAV_GRID_METRICS = F,
NCAV_FILE_DATE = F,
NCAV_VELOCITY = F,
NCAV_SALT_TEMP = F,
NCAV_TURBULENCE = F,
NCAV_AVERAGE_VEL = F,
NCAV_VERTICAL_VEL = F,
NCAV_WIND_VEL = F,
NCAV_WIND_STRESS = F,
NCAV_EVAP_PRECIP = F,
NCAV_SURFACE_HEAT = F,
NCAV_GROUNDWATER = F,
NCAV_BIO = F,
NCAV_WQM = F,
NCAV_VORTICITY = F
/
&NML_SURFACE_FORCING
WIND_ON = F,
HEATING_ON = F,
PRECIPITATION_ON = F,
/
&NML_PHYSICS
HORIZONTAL_MIXING_TYPE = 'closure'
HORIZONTAL_MIXING_KIND = 'constant'
HORIZONTAL_MIXING_COEFFICIENT = 0.3
HORIZONTAL_PRANDTL_NUMBER = 1.0
VERTICAL_MIXING_TYPE = 'closure'
VERTICAL_MIXING_COEFFICIENT = 1.0E-3,
VERTICAL_PRANDTL_NUMBER = 1.0
BOTTOM_ROUGHNESS_MINIMUM = 0.0025
BOTTOM_ROUGHNESS_LENGTHSCALE = 0.001
BOTTOM_ROUGHNESS_KIND = 'constant'
BOTTOM_ROUGHNESS_TYPE = 'orig'
CONVECTIVE_OVERTURNING = F,
SCALAR_POSITIVITY_CONTROL = T,
BAROTROPIC = T,
BAROCLINIC_PRESSURE_GRADIENT = 'sigma levels'
SEA_WATER_DENSITY_FUNCTION = 'dens2'
RECALCULATE_RHO_MEAN = F
INTERVAL_RHO_MEAN = 'seconds=1800.'
TEMPERATURE_ACTIVE = F,
SALINITY_ACTIVE = F,
SURFACE_WAVE_MIXING = F,
WETTING_DRYING_ON = T
/
&NML_RIVER_TYPE
RIVER_NUMBER = 0,
/
&NML_OPEN_BOUNDARY_CONTROL
OBC_ON = T,
OBC_NODE_LIST_FILE = 'capeisle_obc.dat'
OBC_ELEVATION_FORCING_ON = T,
OBC_ELEVATION_FILE = 'capeisle_el_obc.nc'
OBC_TS_TYPE = 3
OBC_TEMP_NUDGING = F,
OBC_TEMP_FILE = 'none'
OBC_TEMP_NUDGING_TIMESCALE = 0.0000000E+00,
OBC_SALT_NUDGING = F,
OBC_SALT_FILE = 'none'
OBC_SALT_NUDGING_TIMESCALE = 0.0000000E+00,
OBC_MEANFLOW = F,
/
&NML_GRID_COORDINATES
GRID_FILE = 'capeisle_grd.dat'
GRID_FILE_UNITS = 'meters'
PROJECTION_REFERENCE = 'proj=lcc +lon_0=-6.461692e+01 +lat_0=4.545973e+01 +lat_1=4.514367e+01 +lat_2=4.577579e+01'
SIGMA_LEVELS_FILE = 'sigma.dat'
DEPTH_FILE = 'capeisle_dep.dat'
CORIOLIS_FILE = 'capeisle_cor.dat'
SPONGE_FILE = 'capeisle_spg.dat'
BFRIC_FILE='capeisle_bfric.dat'
VVCOE_FILE='capeisle_vvcoe.dat'
/
&NML_GROUNDWATER
GROUNDWATER_ON = F,
GROUNDWATER_FLOW = 0.0,
GROUNDWATER_FILE = 'none'
/
&NML_LAG
LAG_PARTICLES_ON = F,
LAG_START_FILE = 'none'
LAG_OUT_FILE = 'none'
LAG_RESTART_FILE = 'none'
LAG_OUT_INTERVAL = 0.000000000000000E+000,
LAG_SCAL_CHOICE = 'none'
/
&NML_ADDITIONAL_MODELS
DATA_ASSIMILATION = F,
BIOLOGICAL_MODEL = F,
SEDIMENT_MODEL = F,
SEDIMENT_PARAMETER_TYPE = 'constant'
SEDIMENT_MODEL_FILE = 'generic_sediment.inp'
ICING_MODEL = F,
ICE_MODEL = F,
/
&NML_PROBES
PROBES_ON = T,
PROBES_NUMBER = 64,
PROBES_FILE = 'capeisle_timeseries01.nml',
/
&NML_TURBINE
TURBINE_ON = F,
TURBINE_FILE = 'capeisle_turbines.dat'
/
&NML_NESTING
NESTING_ON = F
/
&NML_NCNEST
NCNEST_ON = F
/
&NML_BOUNDSCHK
BOUNDSCHK_ON = F
/
&NML_STATION_TIMESERIES
OUT_STATION_TIMESERIES_ON = F,
STATION_FILE='NONE'
LOCATION_TYPE='NONE'
OUT_ELEVATION=F,
OUT_VELOCITY_3D=F,
OUT_VELOCITY_2D=F,
OUT_SALT_TEMP =F,
OUT_WIND_VELOCITY=F,
OUT_INTERVAL= 'seconds=1000.0'
/
'''
top=top.split('\n')
change=change.split('\n')
rest=rest.split('\n')
with open(outputFile, 'w') as f:
for t in top:
print >> f, t
for c in change:
print >> f, c
for r in rest:
print >> f, r
|
wesleybowman/aidan-projects
|
placentia/master.py
|
Python
|
gpl-2.0
| 9,667
|
[
"NetCDF"
] |
e661d625a83f2cd17bae964a2a3652daf9e1a97c99306e1a55bcbe6f8f79443f
|
"""
desitarget.lyazcat
==================
Post-redrock ML processing for LyA Quasar object identification.
"""
import os
import numpy as np
import time
import fitsio
from desitarget.geomask import match, match_to
from desitarget.internal import sharedmem
from desitarget.io import write_with_units
from desispec.io import read_spectra
from desiutil.depend import add_dependencies
from desitarget.targets import main_cmx_or_sv, switch_main_cmx_or_sv
from quasarnp.io import load_model
from quasarnp.io import load_desi_coadd
from quasarnp.utils import process_preds
from prospect.coaddcam import coadd_brz_cameras
from operator import itemgetter
from itertools import groupby
from astropy.modeling import fitting
from astropy.modeling import models
from astropy.table import Table
from scipy.signal import medfilt
from astropy.convolution import Gaussian1DKernel
from astropy.convolution import convolve
# ADM set up the DESI default logger.
from desiutil.log import get_logger
log = get_logger()
# ADM data models for the various afterburners.
zcatdatamodel = np.array([], [('RA', '>f8'), ('DEC', '>f8'), ('TARGETID', '>i8'),
('DESI_TARGET', '>i8'), ('BGS_TARGET', '>i8'),
('MWS_TARGET', '>i8'), ('SCND_TARGET', '>i8'),
('Z', '>f8'), ('ZWARN', '>i8'),
('SPECTYPE', '<U6'), ('DELTACHI2', '>f8'),
('NUMOBS', '>i4'), ('ZTILEID', '>i4')])
qndm = [('Z_QN', '>f8'), ('Z_QN_CONF', '>f8'), ('IS_QSO_QN', '>i2')]
sqdm = [('Z_SQ', '>f8'), ('Z_SQ_CONF', '>f8')]
absdm = [('Z_ABS', '>f8'), ('Z_ABS_CONF', '>f8')]
combdm = [('Z_COMB', '>f8'), ('Z_COMB_PROB', '>f8')]
def tmark(istring):
"""A function to mark the time an operation starts or ends.
Parameters
----------
istring : :class:'str'
The input string to print to the terminal.
Notes
-----
- A string with the date and time in ISO 8061 standard followed
by the 'istring'.
"""
t0 = time.time()
t_start = time.strftime('%Y-%m-%d | %H:%M:%S')
log.info('\n{}: {}'.format(istring, t_start))
def make_new_zcat(zbestname, qn_flag=False, sq_flag=False, abs_flag=False,
zcomb_flag=False):
"""Make the initial zcat array with redrock data.
Parameters
----------
zbestname : :class:`str`
Full filename and path for the zbest file to process.
qn_flag : :class:`bool'` optional
Flag to add QuasarNP data (or not) to the zcat file.
sq_flag : :class:`bool`, optional
Flag to add SQUEzE data (or not) to the zcat file.
abs_flag : :class:`bool`, optional
Flag to add MgII Absorption data (or not) to the zcat file.
zcomb_flag : :class:`bool`, optional
Flag if a combined redshift (or not) was added to the zcat file.
Returns
-------
:class:`~numpy.array` or `bool`
A zcat in the official format (`zcatdatamodel`) compiled from
the `tile', 'night', and 'petal_num', in `zcatdir`. If the zbest
file for that petal doesn't exist, returns ``False``.
"""
tmark(' Making redrock zcat')
# ADM read in the zbest and fibermap extensions for the RR
# ADM redshift catalog, if they exist.
try:
zs = fitsio.read(zbestname, "ZBEST")
fms = fitsio.read(zbestname, "FIBERMAP")
log.info(f'Read {zbestname}')
except (FileNotFoundError, OSError):
log.error(f'Missing {zbestname}')
return False
# ADM recover the information for unique targets based on the
# ADM first entry for each TARGETID.
_, ii = np.unique(fms['TARGETID'], return_index=True)
fms = fms[ii]
# ADM check for some glitches.
if len(zs) != len(set(zs["TARGETID"])):
msg = "a target is duplicated in file {}!!!".format(zbestname)
log.critical(msg)
raise ValueError(msg)
# ADM check for some glitches.
if len(zs) != len(fms):
msg = "TARGETID mismatch for extensions in file {}!!!".format(zbestname)
log.critical(msg)
raise ValueError(msg)
# ADM Strictly match the targets in the z catalog and fibermap.
zid = match_to(fms["TARGETID"], zs["TARGETID"])
# ADM set up the output zqso file, which differs depending on which
# ADM afterburners were specified.
dtswitched = switch_main_cmx_or_sv(zcatdatamodel, fms)
dt = dtswitched.dtype.descr
for flag, dm in zip([qn_flag, sq_flag, abs_flag, zcomb_flag],
[qndm, sqdm, absdm, combdm]):
if flag:
dt += dm
zcat = np.full(len(zs), -1, dtype=dt)
# ADM add the columns from the original zbest file.
zcat["RA"] = fms[zid]["TARGET_RA"]
zcat["DEC"] = fms[zid]["TARGET_DEC"]
zcat["ZTILEID"] = fms[zid]["TILEID"]
zcat["NUMOBS"] = zs["NUMTILE"]
# ADM also add the appropriate bit-columns.
Mxcols, _, _, = main_cmx_or_sv(fms, scnd=True)
for col in Mxcols:
if col in fms.dtype.names:
zcat[col] = fms[zid][col]
# SB fail on missing required columns ...
elif col in zcatdatamodel.dtype.names:
msg = f'Input fibermap missing {col}, which is required by zqso datamodel'
log.critical(msg)
raise ValueError(msg)
# SB ... but only log error about unexpectedly missing optional columns
else:
log.error(f'Input fibermap missing optional {col}; leaving it blank')
# ADM write out the unwritten columns.
allcols = set(dtswitched.dtype.names)
usedcols = set(['RA', 'DEC', 'NUMOBS', 'ZTILEID'] + Mxcols)
for col in allcols - usedcols:
zcat[col] = zs[col]
return zcat
def get_qn_model_fname(qnmodel_fname=None):
"""Convenience function to grab the $QN_MODEL_FILE environment variable.
Parameters
----------
qnmodel_fname : :class:`str`, optional, defaults to $QN_MODEL_FILE
If `qnmodel_fname` is passed, it is returned from this function. If it's
not passed, the $QN_MODEL_FILE variable is returned.
Returns
-------
:class:`str`
not passed, the directory stored in the $QN_MODEL_FILE environment
variable is returned prepended to the default filename.
"""
if qnmodel_fname is None:
qnmodel_fname = os.environ.get('QN_MODEL_FILE')
# EBL check that the $QN_MODEL_FILE environment variable is set.
if qnmodel_fname is None:
msg = "Pass qnmodel_fname or set $QN_MODEL_FILE environment variable!"
log.critical(msg)
raise ValueError(msg)
return qnmodel_fname
def load_qn_model(model_filename):
"""Convenience function to load the QuasarNP model and line lists.
Parameters
----------
model_filename : :class:`str`
The filename and path of the QuasarNP model. Either input by user or defaults
to get_qn_model_fname().
Returns
-------
:class:`~numpy.array`
The QuasarNP model file loaded as an array.
:class:`~numpy.array`
An array of the emission line names to be used for quasarnp.process_preds().
:class:`~numpy.array`
An array of the BAL emission line names to be used by quasarnp.process_preds().
"""
lines = ['LYA', 'CIV(1548)', 'CIII(1909)', 'MgII(2796)', 'Hbeta', 'Halpha']
lines_bal = ['CIV(1548)']
model = load_model(model_filename)
return model, lines, lines_bal
def add_qn_data(zcat, coaddname, qnp_model, qnp_lines, qnp_lines_bal):
"""Apply the QuasarNP model to the input zcat and add data to columns.
Parameters
----------
zcat : :class:`~numpy.array`
The structured array that was created by make_new_zcat()
coaddname : :class:`str`
The name of the coadd file corresponding to the zbest file used
in make_new_zcat()
qnp_model : :class:`h5.array`
The array containing the pre-trained QuasarNP model.
qnp_lines : :class:`list`
A list containing the names of the emission lines that
quasarnp.process_preds() should use.
qnp_lines_bal : :class:`list`
A list containing the names of the emission lines to check
for BAL troughs.
Returns
-------
:class:`~numpy.array`
The zcat array with QuasarNP data included in the columns:
* Z_QN - The best QuasarNP redshift for the object
* Z_QN_CONF - The confidence of Z_QN
* IS_QSO_QN - A binary flag indicated object is a quasar
"""
tmark(' Adding QuasarNP data')
data, w = load_desi_coadd(coaddname)
data = data[:, :, None]
p = qnp_model.predict(data)
c_line, z_line, zbest, *_ = process_preds(p, qnp_lines, qnp_lines_bal,
verbose=False)
cbest = np.array(c_line[c_line.argmax(axis=0), np.arange(len(zbest))])
c_thresh = 0.5
n_thresh = 1
is_qso = np.sum(c_line > c_thresh, axis=0) >= n_thresh
zcat['Z_QN'][w] = zbest
zcat['Z_QN_CONF'][w] = cbest
zcat['IS_QSO_QN'][w] = is_qso
return zcat
def get_sq_model_fname(sqmodel_fname=None):
"""Convenience function to grab the $SQ_MODEL_FILE environment variable.
Parameters
----------
sqmodel_fname : :class:`str`, optional, defaults to $SQ_MODEL_FILE
If `sqmodel_fname` is passed, it is returned from this function. If it's
not passed, the $SQ_MODEL_FILE environment variable is returned.
Returns
-------
:class:`str`
If `sqmodel_fname` is passed, it is returned from this function. If it's
not passed, the directory stored in the $SQ_MODEL_FILE environment
variable is returned.
"""
if sqmodel_fname is None:
sqmodel_fname = os.environ.get('SQ_MODEL_FILE')
# EBL check that the $SQ_MODEL_FILE environment variable is set.
if sqmodel_fname is None:
msg = "Pass sqmodel_fname or set $SQ_MODEL_FILE environment variable!"
log.critical(msg)
raise ValueError(msg)
return sqmodel_fname
def load_sq_model(model_filename):
"""Convenience function for loading the SQUEzE model file.
Parameters
----------
model_filename : :class:`str`
The filename and path of the SQUEzE model file. Either input by user
or defaults to get_sq_model_fname().
Returns
-------
:class:`~numpy.array`
A numpy array of the SQUEzE model.
Notes
-----
- The input model file needs to be in the json file format.
"""
from squeze.common_functions import load_json
from squeze.model import Model
model = Model.from_json(load_json(model_filename))
return model
def add_sq_data(zcat, coaddname, squeze_model):
"""Apply the SQUEzE model to the input zcat and add data to columns.
Parameters
----------
zcat : :class:`~numpy.array`
The structured array that was created by make_new_zcat()
coaddname : class:`str`
The name of the coadd file corresponding to the zbest file used
in make_new_zcat()
squeze_model : :class:`numpy.array`
The loaded SQUEzE model file
Returns
-------
:class:`~numpy.array`
The zcat array with SQUEzE data included in the columns:
* Z_SQ - The best redshift from SQUEzE for each object.
* Z_SQ_CONF - The confidence value of this redshift.
"""
tmark(' Adding SQUEzE data')
from squeze.candidates import Candidates
from squeze.desi_spectrum import DesiSpectrum
from squeze.spectra import Spectra
mdata = ['TARGETID']
single_exposure = False
sq_cols_keep = ['PROB', 'Z_TRY', 'TARGETID']
tmark(' Reading spectra')
desi_spectra = read_spectra(coaddname)
# EBL Initialize squeze Spectra class
squeze_spectra = Spectra([])
# EBL Get TARGETIDs
targetid = np.unique(desi_spectra.fibermap['TARGETID'])
# EBL Loop over TARGETIDs to build the Spectra objects
for targid in targetid:
# EBL Select objects
pos = np.where(desi_spectra.fibermap['TARGETID'] == targid)
# EBL Prepare column metadata
metadata = {col.upper(): desi_spectra.fibermap[col][pos[0][0]] for col in mdata}
# EBL Add the SPECID as the TARGETID
metadata['SPECID'] = targid
# EBL Extract the data
flux = {}
wave = {}
ivar = {}
mask = {}
for band in desi_spectra.bands:
flux[band] = desi_spectra.flux[band][pos]
wave[band] = desi_spectra.wave[band]
ivar[band] = desi_spectra.ivar[band][pos]
mask[band] = desi_spectra.mask[band][pos]
# EBL Format each spectrum for the model application
spectrum = DesiSpectrum(flux, wave, ivar, mask, metadata, single_exposure)
# EBL Append the spectrum to the Spectra object
squeze_spectra.append(spectrum)
# EBL Initialize candidate object. This takes a while with no feedback
# so we want a time output for benchmarking purposes.
tmark(' Initializing candidates')
candidates = Candidates(mode='operation', model=squeze_model)
# EBL Look for candidate objects. This also takes a while.
tmark(' Looking for candidates')
candidates.find_candidates(squeze_spectra.spectra_list(), save=False)
# EBL Compute the probabilities of the line/model matches to the spectra
tmark(' Computing probabilities')
candidates.classify_candidates(save=False)
# EBL Filter the results by removing the duplicate entries for each
# TARGETID. Merge the remaining with the zcat data.
tmark(' Merging SQUEzE data with zcat')
data_frame = candidates.candidates()
data_frame = data_frame[~data_frame['DUPLICATED']][sq_cols_keep]
# EBL Strip the pandas data frame structure and put it into a numpy
# structured array first.
sqdata_arr = np.zeros(len(data_frame), dtype=[('TARGETID', 'int64'),
('Z_SQ', 'float64'),
('Z_SQ_CONF', 'float64')])
sqdata_arr['TARGETID'] = data_frame['TARGETID'].values
sqdata_arr['Z_SQ'] = data_frame['Z_TRY'].values
sqdata_arr['Z_SQ_CONF'] = data_frame['PROB'].values
# EBL SQUEzE will reorder the objects, so match on TARGETID.
zcat_args, sqdata_args = match(zcat['TARGETID'], sqdata_arr['TARGETID'])
zcat['Z_SQ'][zcat_args] = sqdata_arr['Z_SQ'][sqdata_args]
zcat['Z_SQ_CONF'][zcat_args] = sqdata_arr['Z_SQ_CONF'][sqdata_args]
return zcat
def add_abs_data(zcat, coaddname):
"""Add the MgII absorption line finder data to the input zcat array.
Parameters
----------
zcat : :class:'~numpy.array`
The structured array that was created by make_new_zcat()
coaddname : class:`str`
The name of the coadd file corresponding to the zbest file used
in make_new_zcat()
Returns
-------
:class:`~numpy.array`
The zcat array with MgII Absorption data included in the columns:
* Z_ABS - The highest redshift of MgII absorption
* Z_ABS_CONF - The confidence value for this redshift.
Notes
-----
- The original function was written by Lucas Napolitano (LGN) and
modified for this script by Eleanor Lyke (EBL).
"""
fitter = fitting.LevMarLSQFitter()
model = models.Gaussian1D()
# LGN Define constants
first_line_wave = 2796.3543
second_line_wave = 2803.5315
rf_line_sep = second_line_wave - first_line_wave
# LGN Define hyperparameters
rf_err_margain = 0.50
kernel_smooth = 2
kernel = Gaussian1DKernel(stddev=kernel_smooth)
med_filt_size = 19
snr_threshold = 3.0
qi_min = 0.01
sim_fudge = 0.94
# LGN Intialize output array.
out_arr = []
# LGN Read the coadd file and find targetid.
specobj = read_spectra(coaddname)
redrockfile = coaddname.replace('coadd', 'redrock').replace('.fits', '.h5')
# LGN Get all targetids
tids = specobj.target_ids()
# LGN Run for every quasar target on the petal.
num_rows = len(zcat)
for specnum in range(num_rows):
# LGN Grab a single targetid.
targetid = tids[specnum]
# LGN Open the redrock file and read in model fits for specific
# targetid.
targpath = f'/zfit/{targetid}/zfit'
zalt = Table.read(redrockfile, path=targpath)
# LGN If best spectype is a star we shouldn't process it.
if zalt['spectype'][0] == 'STAR':
out_arr.append([targetid, 0, 0])
continue
# LGN Define wavelength range and flux values.
# LGN Check to see if b,r, and z cameras are already coadded.
if "brz" in specobj.wave:
x_spc = specobj.wave["brz"]
y_flx = specobj.flux["brz"][specnum]
y_err = np.sqrt(specobj.ivar["brz"][specnum])**(-1.0)
# LGN If not, coadd them into "brz" using coadd_brz_cameras from
# prospect docs.
else:
wave_arr = [specobj.wave["b"],
specobj.wave["r"],
specobj.wave["z"]]
flux_arr = [specobj.flux["b"][specnum],
specobj.flux["r"][specnum],
specobj.flux["z"][specnum]]
noise_arr = [np.sqrt(specobj.ivar["b"][specnum])**(-1.0),
np.sqrt(specobj.ivar["r"][specnum])**(-1.0),
np.sqrt(specobj.ivar["z"][specnum])**(-1.0)]
x_spc, y_flx, y_err = coadd_brz_cameras(wave_arr, flux_arr,
noise_arr)
# LGN Apply a gaussian smoothing kernel using hyperparameters
# defined above.
smooth_yflx = convolve(y_flx, kernel)
# LGN Estimate the continuum using median filter.
continuum = medfilt(y_flx, med_filt_size)
# LGN Run the doublet finder.
residual = continuum - y_flx
# LGN Generate groups of data with positive residuals.
# LGN/EBL: The following is from a stackoverlow thread:
# https://stackoverflow.com/questions/3149440/python-splitting-list-based-on-missing-numbers-in-a-sequence
groups = []
for k, g in groupby(enumerate(np.where(residual > 0)[0]), lambda x: x[0] - x[1]):
groups.append(list(map(itemgetter(1), g)))
# LGN Intialize the absorbtion line list.
absorb_lines = []
for group in groups:
# LGN Skip groups of 1 or 2 data vals, these aren't worthwhile
# peaks and cause fitting issues.
if len(group) < 3:
continue
# LGN Calculate the S/N value.
snr = np.sum(residual[group]) * np.sqrt(np.sum(y_err[group]))**(-1.0)
if snr > snr_threshold:
# LGN Fit a gaussian model.
model = models.Gaussian1D(amplitude=np.nanmax(residual[group]),
mean=np.average(x_spc[group]))
fm = fitter(model=model, x=x_spc[group], y=residual[group])
# LGN Unpack the model fit data.
amp, cen, stddev = fm.parameters
absorb_lines.append([amp, cen, stddev, snr])
# LGN Extract the highest z feature and associated quality index (QI)
hz = 0
hz_qi = 0
# LGN This is particuarly poorly implemented, using range(len) so
# I can slice to higher redshift lines only more easily.
for counter in range(len(absorb_lines)):
line1 = absorb_lines[counter]
# LGN Determine redshift from model parameters.
ztemp = (line1[1] * first_line_wave**(-1.0)) - 1
# LGN If redshift is in any of the masked regions ignore it.
if 2.189 < ztemp < 2.191 or 2.36 < ztemp < 2.40:
continue
# LGN Determine line seperation and error margain scaled to
# redshift.
line_sep = rf_line_sep * (1 + ztemp)
err_margain = rf_err_margain * (1 + ztemp)
# LGN for all lines at higher redshifts.
for line2 in absorb_lines[counter+1:]:
# LGN calculate error from expected line seperation
# given the redshift of the first line.
sep_err = np.abs(line2[1] - line1[1] - line_sep)
# LGN Keep if within error margains.
if sep_err < err_margain:
# LGN Calculate the QI.
# LGN S/N similarity of lines. sim_fudge is defined
# in the hyperparameters above and
# adjusts for the first line being larger,
# kind of a fudge, won't lie.
snr_sim = sim_fudge * line1[3] * line2[3]**(-1.0)
# LGN Rescale to peak at lines having exact same S/N.
if snr_sim > 1:
snr_sim = snr_sim**(-1.0)
# LGN seperation accuracy
# Is '1' if expected seperation = actual seperation.
# Decreases to 0 outside this.
sep_acc = (1 - sep_err) * err_margain**(-1.0)
qi = snr_sim * sep_acc
if ztemp > hz and qi > qi_min:
hz = ztemp
hz_qi = qi
out_arr.append([targetid, hz, hz_qi])
# EBL Add the redshift and quality index for each targetid to the
# zcat file passed to the function.
out_arr = np.array(out_arr)
zcat_args, abs_args = match(zcat['TARGETID'], out_arr[0])
zcat['Z_ABS'][zcat_args] = out_arr[1][abs_args]
zcat['Z_ABS_CONF'][zcat_args] = out_arr[2][abs_args]
return zcat
def zcomb_selector(zcat, proc_flag=False):
"""Compare results from redrock, QuasarNP, SQUEzE, and MgII data.
Parameters
----------
zcat : :class:`~numpy.array`
The structured array that was created by make_new_zcat()
proc_flag : :class:`bool`
Turn on extra comparison procedure.
Returns
-------
:class:`~numpy.array`
The zcat array with SQUEzE data included in the columns:
* Z_COMB - The best models-combined redshift for each object.
* Z_COMB_PROB - The combined probability value of that redshift.
"""
zcat['Z_COMB'][:] = zcat['Z']
zcat['Z_COMB_PROB'][:] = 0.95
return zcat
def zcat_writer(zcat, outputdir, outputname,
qn_flag=False, sq_flag=False, abs_flag=False, zcomb_flag=False,
qnp_model_file=None, squeze_model_file=None):
"""Writes the zcat structured array out as a FITS file.
Parameters
----------
zcat : :class:`~numpy.array`
The structured array that was created by make_new_zcat()
outputdir : :class:`str`
The directory where the zcat file will be written.
outputname : :class:`str`
The filename of the zqso output file.
qn_flag : :class:`bool`
Flag if QuasarNP data (or not) was added to the zcat file.
sq_flag : :class:`bool`
Flag if SQUEzE data (or not) was added to the zcat file.
abs_flag : :class:`bool`
Flag if MgII Absorption data (or not) was added to the zcat file.
zcomb_flag : :class:`bool`
Flag if a combined redshift (or not) was added to the zcat file.
qnp_model_file : :class:`str`, optional
File from which the QuasarNP model was loaded. Written to the
output header.
squeze_model_file : :class:`str`, optional
File from which the SQUEzE model was loaded. Written to the
output header.
Returns
-------
:class:`str`
The filename, with path, of the FITS file written out.
"""
tmark(' Creating output file...')
# ADM create the necessary output directory, if it doesn't exist.
os.makedirs(outputdir, exist_ok=True)
# ADM construct the fill filename.
full_outputname = os.path.join(outputdir, outputname)
# ADM create the header and add the standard DESI dependencies.
hdr = {}
add_dependencies(hdr)
add_dependencies(hdr, module_names=['quasarnp', ])
# ADM add the specific lyazcat dependencies
hdr['QN_ADDED'] = qn_flag
hdr['SQ_ADDED'] = sq_flag
hdr['AB_ADDED'] = abs_flag
hdr['ZC_ADDED'] = zcomb_flag
if qn_flag:
hdr['QNMODFIL'] = qnp_model_file
if sq_flag:
hdr['SQMODFIL'] = squeze_model_file
# ADM write out the data to the full file name.
write_with_units(full_outputname, zcat, extname='QSOZCAT', header=hdr)
return full_outputname
def create_zcat(zcatdir, outputdir, tile=None, night=None, petal_num=None,
qn_flag=False, qnp_model=None, qnp_model_file=None,
qnp_lines=None, qnp_lines_bal=None,
sq_flag=False, squeze_model=None, squeze_model_file=None,
abs_flag=False, zcomb_flag=False):
"""This will create a single zqso file from a set of user inputs.
Parameters
----------
zcatdir : :class:`str`
If any of `tile`, `night` or `petal_num` are ``None``:
The name of a redrock `zbest` file.
If none of `tile`, `night` and `petal_num` are ``None``:
The root directory from which to read `zbest` and `coadd`
spectro files. The full directory is constructed as
`zcatdir` + `tile` + `night`, with files
zbest-/coadd-`petal_num`*`night`.fits.
outputdir : :class:`str`
If any of `tile`, `night` or `petal_num` are ``None``:
The name of an output file.
If none of `tile`, `night` and `petal_num` are ``None``:
The output directory to which to write the output file.
The full directory is constructed as `outputdir` + `tile` +
`night`, with file zqso-`petal_num`*`night`.fits.
tile : :class:`str`
The TILEID of the tile to process.
night : :class:`str`
The date associated with the observation of the 'tile' used.
* Must be in YYYYMMDD format
petal_num : :class:`int`
If 'all_petals' isn't used, the single petal to create a zcat for.
qn_flag : :class:`bool`, optional
Flag to add QuasarNP data (or not) to the zcat file.
qnp_model : :class:`h5 array`, optional
The QuasarNP model file to be used for line predictions.
qnp_model_file : :class:`str`, optional
File from which to load the QuasarNP model (`qnp_model`),
`qnp_lines` and `qnp_lines_bal` if `qnp_model` is ``None``. Also
written to the output header of the zqso file.
qnp_lines : :class:`list`, optional
The list of lines to use in the QuasarNP model to test against.
qnp_lines_bal : :class:`list`, optional
The list of BAL lines to use for QuasarNP to identify BALs.
sq_flag : :class:`bool`, optional
Flag to add SQUEzE data (or not) to the zcat file.
squeze_model : :class:`numpy.array`, optional
The numpy array for the SQUEzE model file.
squeze_model_file : :class:`str`, optional
File from which to load the SQUEzE model if `squeze_model` is
``None``. Also written to the output header of the zqso file.
abs_flag : :class:`bool`, optional
Flag to add MgII Absorption data (or not) to the zcat file.
zcomb_flag : :class:`bool`, optional
Flag if a combined redshift (or not) was added to the zcat file.
Notes
-----
- Writes a FITS catalog that incorporates redrock, and a range of
afterburner redshifts and confidence values. This will write to the
same directory of the zbest and coadd files unless a different
output directory is passed.
"""
# ADM load the model files, if needed.
if qn_flag and qnp_model is None:
tmark(' Loading QuasarNP Model file and lines of interest')
qnp_model, qnp_lines, qnp_lines_bal = load_qn_model(qnp_model_file)
tmark(' QNP model file loaded')
if sq_flag and squeze_model is None:
tmark(' Loading SQUEzE Model file')
sq_model = load_sq_model(squeze_model_file)
tmark(' Model file loaded')
# ADM simply read/write files if tile/night/petal_num not specified.
if tile is None or night is None or petal_num is None:
zbestfn = zcatdir
coaddfn = zbestfn.replace("zbest", "coadd")
outputdir, outputname = os.path.split(outputdir)
# EBL Create the filepath for the input tile/night combination
else:
tiledir = os.path.join(zcatdir, tile)
ymdir = os.path.join(tiledir, night)
# ADM Create the corresponding output directory.
outputdir = os.path.join(outputdir, tile, night)
# EBL Create the filename tag that appends to zbest-*, coadd-*,
# and zqso-* files.
filename_tag = f'{petal_num}-{tile}-{night}.fits'
# ADM try a couple of generic options for the file names.
if not os.path.isfile(os.path.join(ymdir, f'zbest-{filename_tag}')):
filename_tag = f'{petal_num}-{tile}-thru{night}.fits'
zbestname = f'zbest-{filename_tag}'
coaddname = f'coadd-{filename_tag}'
outputname = f'zqso-{filename_tag}'
zbestfn = os.path.join(ymdir, zbestname)
coaddfn = os.path.join(ymdir, coaddname)
zcat = make_new_zcat(zbestfn, qn_flag, sq_flag, abs_flag, zcomb_flag)
if isinstance(zcat, bool):
log.info('Petal Number has no corresponding zbest file: {}'.format(zbestfn))
if not os.path.isdir(ymdir):
msg = "Directory doesn't exist: {}".format(ymdir)
log.error(msg)
raise FileNotFoundError(msg)
else:
if qn_flag:
zcat = add_qn_data(zcat, coaddfn, qnp_model, qnp_lines, qnp_lines_bal)
if sq_flag:
zcat = add_sq_data(zcat, coaddfn, squeze_model)
if abs_flag:
zcat = add_abs_data(zcat, coaddfn)
if zcomb_flag:
zcat = zcomb_selector(zcat)
full_outputname = zcat_writer(zcat, outputdir, outputname, qn_flag,
sq_flag, abs_flag, zcomb_flag,
qnp_model_file, squeze_model_file)
tmark(' --{} written out correctly.'.format(full_outputname))
log.info('='*79)
|
desihub/desitarget
|
py/desitarget/lyazcat.py
|
Python
|
bsd-3-clause
| 30,371
|
[
"Gaussian"
] |
94b5d738f7fdd1fec41139523ae6673f0bbe007a02e8dd55c81bd669163f6246
|
##############################################################################
# adaptiveMD: A Python Framework to Run Adaptive Molecular Dynamics (MD)
# Simulations on HPC Resources
# Copyright 2017 FU Berlin and the Authors
#
# Authors: Jan-Hendrik Prinz
# Contributors:
#
# `adaptiveMD` is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from adaptivemd.task import Task
from adaptivemd.file import Location, File
from adaptivemd.engine import Engine, Frame, Trajectory
class ACEMDEngine(Engine):
def __init__(self, conf_file, pdb_file, args=None):
"""
Implementation of the AceMD engine
Parameters
----------
conf_file : `File`
reference to the .conf file
pdb_file : `File`
reference to a .pdb file
args : str
arguments passed to the AceMD command line
"""
super(ACEMDEngine, self).__init__()
self._items = dict()
self['pdb_file'] = pdb_file
self['conf_file'] = conf_file
for name, f in self.files.items():
stage = f.transfer(Location('staging:///'))
self[name + '_stage'] = stage.target
self.initial_staging.append(stage)
if args is None:
args = ''
self.args = args
@property
def call_format_str(self):
return 'acemd %s {0}' % self.args
def run(self, target):
return None
|
markovmodel/adaptivemd
|
adaptivemd/engine/acemd/acemd.py
|
Python
|
lgpl-2.1
| 2,101
|
[
"ACEMD",
"MDTraj"
] |
8cc7f777fe01fba588634e0374bf7de5c540b150943caaf711ca8858f5624b8b
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# This file is part of solus-sc
#
# Copyright © 2014-2018 Ikey Doherty <ikey@solus-project.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
from gi.repository import Gio, Gtk, GLib
from .screenshot_view import ScScreenshotView
from .util.markdown import SpecialMarkdownParser
from .plugins.base import ItemStatus, ItemLink
from gi.repository import AppStreamGlib as As
class ScLinkLabel(Gtk.Label):
""" Simple widget to provide links between items """
__gtype_name__ = "ScLinkLabel"
item = None # The item we link to
def __init__(self, context, item):
Gtk.Label.__init__(self)
self.item = item
self.set_halign(Gtk.Align.START)
id = item.get_id()
# Get an AppSystem name for the item
name = context.appsystem.get_name(
id,
item.get_name(),
item.get_store())
# Mark this guy installed already
if item.has_status(ItemStatus.INSTALLED):
name += " " + _("(installed)")
self.get_style_context().add_class("dim-label")
name = " • {}".format(name)
self.set_markup(name)
class ScLinksBox(Gtk.Box):
""" Links to foreign packages """
__gtype_name__ = "ScLinksBox"
listbox_links = None
scroller = None
def __init__(self, context, title):
Gtk.Box.__init__(self, orientation=Gtk.Orientation.VERTICAL)
self.context = context
build_header_section(title, self)
# Build scrolledwindow to take nasty background off listbox
self.scroller = Gtk.ScrolledWindow.new(None, None)
self.scroller.set_margin_start(30)
self.scroller.set_margin_end(150)
self.scroller.set_margin_top(6)
self.scroller.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.NEVER)
self.pack_start(self.scroller, False, False, 0)
self.listbox_links = Gtk.ListBox.new()
self.listbox_links.set_selection_mode(Gtk.SelectionMode.NONE)
self.scroller.add(self.listbox_links)
def update(self, item, reason):
""" Update our links based on the new item """
# Kill old widgets
for sprog in self.listbox_links.get_children():
sprog.destroy()
# No reason to show. As it were
if reason not in item.links:
self.hide()
return
# Walk links and render them
for link in item.links[reason]:
lab = ScLinkLabel(self.context, link)
lab.show_all()
self.listbox_links.add(lab)
# Make sure we're now visible
self.show()
class ScDetailsView(Gtk.Box):
""" Shows details for a selected ProviderItem
The details view is effectively the pretty view with all the relevant
package/software details, screenshots, and actions to invoke removal,
installation, etc.
"""
__gtype_name__ = "ScDetailsView"
context = None
item = None
# Header widgets
header_name = None
header_image = None
header_summary = None
# TODO: Make less dumb
header_action_remove = None
header_action_install = None
header_action_upgrade = None
header_action_launch = None
launch_info = None
stack = None
stack_switcher = None
screenie_view = None
# We actually put lots of labels in this guy.
description_box = None
parser = None
label_version = None
label_version_id = None
label_website = None
label_bugsite = None
label_donate = None
label_developer = None
changelog_view = None
links_virtual = None # Providers on virtual packages
links_enhance = None # Software that enhances this software..
def get_page_name(self):
return self.header_name.get_text()
def __init__(self, context):
Gtk.Box.__init__(self, orientation=Gtk.Orientation.VERTICAL)
self.context = context
self.parser = SpecialMarkdownParser()
self.build_header()
self.show_all()
def set_item(self, item):
""" Update our UI for the current item """
if item == self.item:
return
# Only show changelog for supported items
self.changelog_view.set_visible(
item.has_status(ItemStatus.META_CHANGELOG))
self.launch_info = None
self.item = item
# Grab the app
apps = self.context.appsystem
store = item.get_store()
id = item.get_id()
# Update main header
self.header_name.set_markup(apps.get_name(id, item.get_name(), store))
self.header_summary.set_markup(
apps.get_summary(id, item.get_summary(), store))
apps.set_image_from_item(self.header_image, item, store)
self.header_image.set_pixel_size(64)
if self.item.has_status(ItemStatus.INSTALLED):
launch_id = apps.get_launchable_id(id, store)
if launch_id is not None:
try:
self.launch_info = Gio.DesktopAppInfo.new(launch_id)
except Exception as e:
self.launch_info = None
print("Request AppStream data rebuild for: {}".format(
launch_id))
print(e)
# Now set the screenshot ball in motion
self.screenie_view.set_item(item)
self.update_description()
self.update_actions()
self.update_details()
self.update_links()
# Always re-focus to details
self.stack.set_visible_child_name("details")
def build_header(self):
""" Build our main header area """
box = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 0)
box.set_margin_top(15)
box.set_margin_left(15)
box.set_margin_right(15)
box_main_wrap = Gtk.Box.new(Gtk.Orientation.VERTICAL, 0)
box_main_wrap.pack_start(box, False, False, 0)
ebox = Gtk.EventBox()
ebox.add(box_main_wrap)
ebox.get_style_context().add_class("details-header")
self.pack_start(ebox, False, False, 0)
self.header_name = Gtk.Label("")
self.header_name.get_style_context().add_class("huge-label")
self.header_image = Gtk.Image()
self.header_image.set_pixel_size(64)
self.header_image.set_margin_end(24)
self.header_image.set_margin_start(12)
box.pack_start(self.header_image, False, False, 0)
details_box = Gtk.Box.new(Gtk.Orientation.VERTICAL, 0)
box.pack_start(details_box, True, True, 0)
# name
self.header_name.set_halign(Gtk.Align.START)
self.header_name.set_halign(Gtk.Align.START)
details_box.pack_start(self.header_name, True, True, 0)
# summary
self.header_summary = Gtk.Label("")
self.header_summary.set_margin_top(6)
self.header_summary.set_margin_bottom(3)
self.header_summary.set_halign(Gtk.Align.START)
details_box.pack_start(self.header_summary, False, False, 0)
# Install thing
self.header_action_install = Gtk.Button("Install")
self.header_action_install.connect('clicked',
self.on_install_clicked)
self.header_action_install.set_valign(Gtk.Align.CENTER)
self.header_action_install.set_no_show_all(True)
self.header_action_install.get_style_context().add_class(
"suggested-action")
self.header_action_install.set_margin_end(2)
box.pack_end(self.header_action_install, False, False, 0)
# Remove thing
self.header_action_remove = Gtk.Button("Remove")
self.header_action_remove.set_margin_end(2)
self.header_action_remove.connect('clicked',
self.on_remove_clicked)
self.header_action_remove.set_valign(Gtk.Align.CENTER)
self.header_action_remove.set_no_show_all(True)
self.header_action_remove.get_style_context().add_class(
"destructive-action")
box.pack_end(self.header_action_remove, False, False, 0)
# Upgrade thing
self.header_action_upgrade = Gtk.Button("Upgrade")
self.header_action_upgrade.set_margin_end(2)
self.header_action_upgrade.set_valign(Gtk.Align.CENTER)
self.header_action_upgrade.set_no_show_all(True)
self.header_action_upgrade.get_style_context().add_class(
"suggested-action")
box.pack_end(self.header_action_upgrade, False, False, 0)
self.header_action_launch = Gtk.Button.new_from_icon_name(
"document-open-symbolic", Gtk.IconSize.BUTTON)
self.header_action_launch.set_margin_end(4)
self.header_action_launch.set_tooltip_text(_("Launch"))
self.header_action_launch.connect('clicked',
self.on_launch_clicked)
self.header_action_launch.set_valign(Gtk.Align.CENTER)
self.header_action_launch.set_no_show_all(True)
self.header_action_launch.set_relief(Gtk.ReliefStyle.NONE)
box.pack_end(self.header_action_launch, False, False, 0)
self.stack = Gtk.Stack()
self.stack.set_homogeneous(False)
self.stack_switcher = Gtk.StackSwitcher()
self.stack_switcher.show_all()
self.stack_switcher.set_no_show_all(True)
self.stack_switcher.set_halign(Gtk.Align.CENTER)
self.stack_switcher.set_stack(self.stack)
self.stack.set_transition_type(
Gtk.StackTransitionType.SLIDE_LEFT_RIGHT)
box_main_wrap.pack_start(self.stack_switcher, False, False, 0)
self.pack_start(self.stack, True, True, 0)
# Dummy pages for now
self.build_details()
self.changelog_view = Gtk.Box.new(Gtk.Orientation.VERTICAL, 0)
self.changelog_view.show_all()
self.changelog_view.set_no_show_all(True)
self.stack.add_titled(self.changelog_view, "changelog", "Changelog")
def build_details(self):
""" Build the main 'Details' view """
box = Gtk.Box.new(Gtk.Orientation.VERTICAL, 0)
box.set_margin_start(40)
box.set_margin_end(40)
box.set_margin_bottom(40)
self.stack.add_titled(box, "details", "Details")
# Allocate our screenshot view area
self.screenie_view = ScScreenshotView(self.context)
self.screenie_view.set_halign(Gtk.Align.CENTER)
box.pack_start(self.screenie_view, False, False, 0)
build_header_section(_("Description"), box)
# A place to have our description
self.description_box = Gtk.Box.new(Gtk.Orientation.VERTICAL, 0)
self.description_box.set_margin_end(150)
self.description_box.set_margin_start(30)
box.pack_start(self.description_box, False, False, 0)
self.build_links(box)
self.build_details_grid(box)
def build_links(self, box):
""" Build a set of links to foreign packages """
# First up is our virtual providers
self.links_virtual = ScLinksBox(self.context, _("Providers"))
box.pack_start(self.links_virtual, False, False, 0)
self.links_virtual.show_all()
self.links_virtual.set_no_show_all(True)
# Now we'll have our enhance list
self.links_enhance = ScLinksBox(self.context, _("Related software"))
box.pack_start(self.links_enhance, False, False, 0)
self.links_enhance.show_all()
self.links_enhance.set_no_show_all(True)
def build_details_grid(self, box):
""" Build the detailed information grid for each item """
grid = Gtk.Grid.new()
grid.set_margin_end(150)
grid.set_margin_start(30)
grid.set_column_spacing(6)
grid.set_row_spacing(12)
grid.set_valign(Gtk.Align.START)
build_header_section(_("Information"), box)
# Attach grid to the view
box.pack_start(grid, False, False, 0)
self.label_version = Gtk.Label.new("")
self.label_version.set_halign(Gtk.Align.START)
self.label_version.show_all()
self.label_version.set_no_show_all(True)
desc = Gtk.Label.new(_("Version"))
desc.set_halign(Gtk.Align.START)
desc.set_use_markup(True)
self.label_version_id = desc
self.label_version_id.show_all()
self.label_version_id.set_no_show_all(True)
# column row
grid.attach(desc, 0, 0, 1, 1)
grid.attach(self.label_version, 1, 0, 1, 1)
# create buttons for website, donations, etc
self.label_website = Gtk.LinkButton.new(_("Visit website"))
self.label_website.get_style_context().add_class("flat")
self.label_bugsite = Gtk.LinkButton.new(_("Report a bug"))
self.label_bugsite.get_style_context().add_class("flat")
self.label_donate = Gtk.LinkButton.new(_("Make a donation"))
self.label_donate.get_style_context().add_class("flat")
button_box = Gtk.ButtonBox.new(Gtk.Orientation.HORIZONTAL)
button_box.set_halign(Gtk.Align.END)
button_box.set_hexpand(True)
button_box.set_layout(Gtk.ButtonBoxStyle.END)
button_box.add(self.label_website)
button_box.add(self.label_bugsite)
button_box.add(self.label_donate)
button_box.show_all()
self.label_website.set_no_show_all(True)
self.label_bugsite.set_no_show_all(True)
self.label_donate.set_no_show_all(True)
grid.attach(button_box, 2, 0, 1, 1)
self.label_developer = Gtk.Label.new("")
self.label_developer.set_margin_top(6)
self.label_developer.set_margin_end(6)
self.label_developer.set_halign(Gtk.Align.END)
self.label_developer.set_hexpand(True)
self.label_developer.show_all()
self.label_developer.set_no_show_all(True)
grid.attach(self.label_developer, 2, 1, 1, 1)
def update_description(self):
# I have become GTK - Destroyer Of Children
for child in self.description_box.get_children():
child.destroy()
id = self.item.get_id()
fallback = self.item.get_description()
store = self.item.get_store()
desc = self.context.appsystem.get_description(id, fallback, store)
plain = As.markup_convert(desc, As.MarkupConvertFormat.MARKDOWN)
lines = []
try:
self.parser.consume(plain)
lines = self.parser.emit()
except Exception as e:
print("Parsing error: {}".format(e))
plain = As.markup_convert_simple(desc)
lines = plain.split("\n")
for line in lines:
lab = Gtk.Label(line)
lab.set_use_markup(True)
lab.set_halign(Gtk.Align.START)
lab.set_line_wrap(True)
lab.set_property("xalign", 0.0)
lab.set_property("margin", 2)
lab.set_margin_bottom(4)
self.description_box.pack_start(lab, False, False, 0)
lab.show_all()
def update_actions(self):
""" Update actions for the given item """
# Special case for hardware, none of the buttons will do anything.
if self.item.has_status(ItemStatus.META_VIRTUAL):
self.header_action_install.hide()
self.header_action_launch.hide()
self.header_action_remove.hide()
self.header_action_upgrade.hide()
return
# Normal software
if self.item.has_status(ItemStatus.INSTALLED):
self.header_action_remove.show()
self.header_action_install.hide()
if self.launch_info is not None:
self.header_action_launch.show()
else:
self.header_action_remove.hide()
self.header_action_install.show()
# Disable remove button if dangerous!
if self.item.has_status(ItemStatus.META_ESSENTIAL):
self.header_action_remove.set_sensitive(False)
else:
self.header_action_remove.set_sensitive(True)
if self.item.has_status(ItemStatus.UPDATE_NEEDED):
self.header_action_upgrade.show()
else:
self.header_action_upgrade.hide()
# Hide launch info once more
if not self.launch_info:
self.header_action_launch.hide()
def update_details(self):
""" Update extra detail labels from the selected package """
version = self.item.get_version()
# Only render version if we have one.
if not version:
self.label_version.hide()
self.label_version_id.hide()
else:
self.label_version.show()
self.label_version_id.show()
self.label_version.set_markup("<b>{}</b>".format(
self.item.get_version()))
id = self.item.get_id()
store = self.item.get_store()
# Main website
site = self.context.appsystem.get_website(id, store)
if site:
self.label_website.set_uri(site)
self.label_website.set_visited(False)
self.label_website.set_visible(site is not None)
# Bug website
site = self.context.appsystem.get_bug_site(id, store)
if site:
self.label_bugsite.set_uri(site)
self.label_bugsite.set_visited(False)
self.label_bugsite.set_visible(site is not None)
# Donate website
site = self.context.appsystem.get_donation_site(id, store)
if site:
self.label_donate.set_uri(site)
self.label_donate.set_visited(False)
self.label_donate.set_visible(site is not None)
dev = self.context.appsystem.get_developers(id, store)
if dev:
developers = GLib.markup_escape_text(dev)
self.label_developer.set_markup("Developed by <b>{}</b>".format(
developers))
else:
self.label_developer.set_markup("")
self.label_developer.set_visible(dev is not None)
def update_links(self):
""" Deal with ItemLink reasons """
self.links_virtual.update(self.item, ItemLink.PROVIDES)
self.links_enhance.update(self.item, ItemLink.ENHANCES)
def on_install_clicked(self, btn, udata=None):
""" User clicked install """
self.context.begin_install(self.item)
def on_remove_clicked(self, btn, udata=None):
""" User clicked remove """
self.context.begin_remove(self.item)
def on_launch_clicked(self, btn, udata=None):
""" User clicked launch """
self.launch_info.launch(None, None)
def build_header_section(label, pack_target):
""" Build a fancy header section and put it into pack_target """
# Header for the information
lab = Gtk.Label.new(label)
lab.set_use_markup(True)
lab.set_halign(Gtk.Align.START)
lab.set_margin_start(30)
lab.set_margin_top(30)
lab.get_style_context().add_class("dim-label")
pack_target.pack_start(lab, False, False, 0)
# Visually separate this information now
sep = Gtk.Separator.new(Gtk.Orientation.HORIZONTAL)
sep.set_margin_start(30)
sep.set_margin_end(150)
sep.set_margin_top(8)
sep.set_margin_bottom(15)
pack_target.pack_start(sep, False, False, 0)
|
solus-project/evolve-sc
|
xng/details.py
|
Python
|
gpl-2.0
| 19,486
|
[
"VisIt"
] |
3744d2ceb3a3faca59f29b7b196e2c966ba6b33ac129954c7b547761e3799872
|
#!/usr/bin/python
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2015 Hans Hoogenboom, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import json
import glob
import os.path
import sys
import argparse
try:
from ConfigParser import ConfigParser
except:
from configparser import ConfigParser
import datetime
import getpass
import subprocess
FileTypes = {'.oso' : "openshadinglanguage"}
# metadata according to the OSL specification
_shaderTypes = ["surface", "displacement", "light", "volume", "shader"]
_dataTypes = ["int", "float", "point", "vector", "normal", "color", "matrix", "string", "void"]
_shaderKeys = ["name", "label", "type", "help", "url", "value", "page", "widget", "units"]
# These osl parameters are not part of the official shadinglanguage but more guidelines as how to
# make up the interface of the shader inside a 3rd party program. Not yet decided what to do with it...
#_parmWidgets = ["number", "string", "boolean", "checkBox", "popup", "mapper", "filename", "null"]
#_parmInteger = ["min", "max", "sensitivity", "slider"]
#_parmFloat = _parmInteger + ["digits"]
#_parmSlider = ["slidermin", "slidermax", "slidercenter", "sliderexponent"]
#_parmKeyword = ["output"]
#----------------------------------------------------------
# Functions to sanitize olsinfo output
#----------------------------------------------------------
def _error(msg, crash=False):
sys.stderr.write(msg)
sys.stderr.write('\n')
if crash:
sys.exit(1)
return False
def _fatalError(msg):
_error(msg,True)
def _formatVal(st):
value = st.replace('"','',2)
value = value.strip()
return value
def _getKeyValue(st):
signPos = st.index('=')
value = st[signPos+1:]
key = st[:signPos-1]
key = key.split()
key = key[-1].strip()
return (key, value)
#----------------------------------------------------------
# File handling
#----------------------------------------------------------
def isValidFile(filename, filetypes):
(head, tail) = os.path.splitext(filename)
return (os.path.isfile(filename) and tail in filetypes)
def isValidExtension(fp, filetypes):
return (os.path.splitext(fp)[1] in filetypes)
def createFileList(filetypes, osl_cfg, recursive=False, args=None, pathfile=None):
filelist = list()
# files/dirs from external file
if pathfile:
for fp in pathfile:
try:
fp = open(pathfile)
for line in fp:
filelist.append(line)
fp.close()
except:
_error("Could not read from file %s" % pathfile)
# files/dirs from command line arguments
if args:
for arg in args:
filelist.append(arg)
# files/dirs from config file
osl_dir = osl_cfg.get('settings', 'osldir')
if len(osl_dir) > 0:
osldir_list = osl_dir.split(',')
for arg in osldir_list:
filelist.append(arg)
# expand vars
args_expanded = list()
for arg in filelist:
args_expanded.append(os.path.expandvars(arg))
# clear filelist and glob
filelist = list()
for arg in args_expanded:
filelist.extend([x for x in glob.iglob(arg)])
# split files from directories
dirlist = list()
dirlist = [x for x in filelist if os.path.isdir(x)]
filelist[:] = [x for x in filelist if isValidFile(x, filetypes)]
# travel directories and add shader files to filelist
for directory in dirlist:
if recursive:
for dirpath, dirnames, filenames in os.walk(directory):
for filename in filenames:
(head, tail) = os.path.splitext(filename)
if tail in filetypes:
filelist.append(os.path.join(dirpath, filename))
else:
dirpath, dirnames, filenames = next(os.walk(directory))
for filename in filenames:
(head, tail) = os.path.splitext(filename)
if tail in filetypes:
filelist.append(os.path.join(dirpath, filename))
# clear duplicate entries, do not care for order
filelist = list (set(filelist))
# if there are no files/paths quit
if len(filelist) < 1:
_fatalError("No files or directories found, exiting.")
return filelist
#----------------------------------------------------------
# Functions for parsing *.oso files
#----------------------------------------------------------
def parseOslInfo(compiledShader, osl_cfg):
oslpath = osl_cfg.get('settings', 'oslpath')
if os.path.isfile(oslpath):
cmd = str(oslpath) + ' -v %s' % compiledShader
else:
cmd = 'oslinfo -v %s' % compiledShader
cmd = cmd.split()
try:
fp = subprocess.check_output(cmd)
except subprocess.CalledProcessError as fp_ret:
_fatalError("Could not run oslinfo, exiting.\nReturncode: %s" % fp_ret.returncode)
# check if output of oslinfo is correct
# if false skip shader and write error message to console
lines = fp.splitlines()
if not lines:
_error('Missing shader definition for %s' % compiledShader)
return False
count = 0
shaderDef = lines[ count ]
args = shaderDef.split()
# tempShader stores all the data
tempShader = dict()
# store the order in which oslinfo outputs its data
# and separate the parameters from general shader data
parmlist = list()
if args[0] not in _shaderTypes:
_error("Not a valid shader type: %s" % args[0])
return False
else:
tempShader['type'] = _formatVal(args[0])
tempShader['name'] = _formatVal(args[1])
tempShader['hasMetaData'] = False
tempShader['hasParmHelp'] = False
# parse the rest of the file to get parameters
# number of entries in lines
length = len(lines) - 1
# lines iterator
count = 1
while True:
line = lines[ count ]
if not line:
_error("No more lines to read, invalid shader %s?" % compiledShader)
break
args = line.split()
# find parameter name
if args[0] not in ["Default", "metadata:"]: # or args[0] == "export":
tempparm = dict()
if len(args) < 3:
tempparm['name'] = _formatVal(args[0])
tempparm['type'] = _formatVal(args[1])
else:
tempparm['output'] = True
tempparm['name'] = _formatVal(args[0])
tempparm['type'] = _formatVal(args[2])
condition = True
widget = str()
while condition:
# read next line
count += 1
if count > length:
break
line = lines[count]
parmargs = line.split()
if parmargs[0] == "Default":
tempparm['value'] = _formatVal(' '.join(parmargs[2:]))
elif parmargs[0] == "metadata:":
(key, value) = _getKeyValue(line)
value = _formatVal(value)
if key != 'widget':
tempparm[key] = value
else:
widget = value
else:
condition = False
# move one line back
count -= 1
if len(widget) > 0 and 'widget' not in tempparm:
tempparm['widget'] = widget
tempShader[tempparm['name']] = tempparm
parmlist.append(tempparm['name'])
if 'help' in tempparm:
tempShader['hasParmHelp'] = True
# we didn't find a parameter yet, so there must be some general stuff
else:
if args[0] == "metadata:":
(key, value) = _getKeyValue(line)
value = _formatVal(value)
tempparm[key] = value
tempShader['hasMetaData'] = True
if count > length:
break
else:
count += 1
# parsed all lines
tempShader['parmlist'] = parmlist
return tempShader
def parseShaderInfo(compiledShader, FileTypes, osl_cfg):
(name, extension) = os.path.splitext(compiledShader)
shaderUI = None
if extension == '.oso':
shaderUI = parseOslInfo(compiledShader, osl_cfg)
if not shaderUI:
_error("Could not process %s" % compiledShader)
return None
else:
compShader = dict()
compShader['name'] = shaderUI['name']
compShader['path'] = compiledShader
compShader['mtime'] = str(os.path.getmtime(compiledShader))
compShader['ctime'] = str(datetime.datetime.now())
compShader['language']= FileTypes[extension]
# holds the output of parseOslInfo (the actual shader metadata/ui)
compShader['ui'] = shaderUI
return compShader
#----------------------------------------------------------
# Functions for handling the shader dictionary
#----------------------------------------------------------
def getNumberOfShaders(jsonFile):
return len(jsonFile['shaders'])
def cleanJsonShaders(jsonDict):
num_del = 0
for shaderpath in jsonDict.keys():
if not os.path.isfile(shaderpath):
del jsonDict[shaderpath]
num_del += 1
return (num_del, jsonDict)
def existsJsonShader(jsonFile, shaderName):
for shader in jsonFile['shaders']:
if shader['name'] == shaderName:
return True
else:
return False
def writeJsonHeader(filename, numElements):
headerDict = dict()
headerDict['creator'] = getpass.getuser()
headerDict['creation date'] = str(datetime.datetime.now())
headerDict['name'] = os.path.basename(filename)
headerDict['elements'] = numElements
headerDict['last update'] = str(datetime.datetime.now())
return headerDict
def updateJsonHeader(jsonFile, numElements):
headerDict = jsonFile
headerDict['last update'] = str(datetime.datetime.now())
headerDict['elements'] = numElements
return headerDict
def cli():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description = '''
oslextractmetadata stores the user interface and metadata of a
compiled OSL (openshadinglanguage) shader(s) into a JSON file.
The JSON dictionary consists of a 'header' and a 'shader' part.
jsondict['shader'] will return a dictionary with all shaders. The
user interface of the shader is stored as a sub-dictionary, the
metadata can be retrieved using the 'ui' key on the elements, e.g.:
for x in jsondict['shaders'].values():
print x['ui']
''')
parser.add_argument('-i', '--input', nargs='+', action='store', dest='files', metavar='compiled shaders',help='List of file(s) to parse.')
parser.add_argument('-v', '--verbose', action='store_true', dest='verbosity', help='Increase output verbosity.')
parser.add_argument('-o', '--output', nargs=1, action='store', dest='output', required=True, metavar='output file', help="Store shader UI in file.")
parser.add_argument('-f', '--file', nargs='+', action='store', dest='read_file', metavar='file', help="Read file paths from file(s).")
parser.add_argument('-U', '--update', action='store_true', dest='update', help="Update existing shader file.")
parser.add_argument('-O', '--overwrite', action='store_true', dest='overwrite', help="Overwrite existing files.")
parser.add_argument('-c', '--clean', action='store_true', dest='clean', help="Clean file, remove non existant shaders.")
parser.add_argument('-r', '--recursive', action='store_true', dest='recursive', help="Add directories recursively.")
args = parser.parse_args()
# user input checks
output = args.output[0]
existingFile = os.path.exists(output)
if not existingFile:
args.overwrite = False
args.update = False
args.clean = False
if args.overwrite:
args.update = False
args.clean = False
return (args, output, existingFile)
#----------------------------------------------------------
# Main body
#----------------------------------------------------------
def main():
(args, output, existingFile) = cli()
# read configuration file
cfg_defaults = {'oslpath' : '/usr/bin/oslinfo'}
osl_cfg = ConfigParser(cfg_defaults)
osl_cfg.read('oslextractmeta.conf')
# create list of files specified on cli or read from file
files = createFileList(FileTypes, osl_cfg, args.recursive, args.files, args.read_file)
# parse files for shader metadata
shaders = dict()
for shaderfile in files:
if args.verbosity:
print("Processing file %s" % shaderfile)
shaderUI = parseShaderInfo(shaderfile, FileTypes, osl_cfg)
if shaderUI:
shaders[shaderUI['path']] = shaderUI
jsonDict = dict()
# retrieve existing values in case of updating or cleaning
if existingFile and not args.overwrite:
with open(output, 'r') as fp:
try:
jsonDict = json.load(fp)
except:
_fatalError("JSON object could not be decoded.")
# create/update/clean json shader and header dictionaries
changes = 0
if args.clean:
(changes, jsonDict['shaders']) = cleanJsonShaders(jsonDict['shaders'])
if args.verbosity:
print("Removed %s shaders." % changes)
if args.update:
changes = len(shaders)
jsonDict['shaders'].update(shaders)
if args.verbosity:
print("%s shaders updated." % changes)
if args.overwrite:
changes = len(shaders)
jsonDict['header'] = writeJsonHeader(output, changes)
jsonDict['shaders'] = shaders
if args.verbosity:
print("%s shaders added to %s" % (changes, output))
# only adding new shaders
else:
temp_changes = changes
if jsonDict.has_key('shaders'):
existing_keys = jsonDict['shaders'].keys()
for key in shaders:
if key not in existing_keys:
jsonDict['shaders'][key] = shaders[key]
changes += 1
else:
jsonDict['shaders'] = shaders
changes = len(shaders)
if args.verbosity:
added_shaders = changes - temp_changes
print("Added %s shaders." % added_shaders)
# write to file shaders to file if changed
if existingFile and changes:
with open(output, 'w') as fp:
fp.seek(0)
fp.truncate()
jsonDict['header'] = updateJsonHeader(jsonDict['header'], len(jsonDict['shaders']))
json.dump(jsonDict, fp)
elif not existingFile and changes:
with open(output, 'w') as fp:
jsonDict['header'] = writeJsonHeader(output, len(shaders))
json.dump(jsonDict, fp)
elif args.verbosity:
print("No shaders found for adding to %s, exiting." % output)
return 0
# call main function
if __name__ == "__main__":
main()
|
Vertexwahn/appleseed
|
scripts/oslextractmeta.py
|
Python
|
mit
| 16,460
|
[
"VisIt"
] |
88db088d6436a500423fab97f6287cbb0907f03a514ae58e7db23057a0a4f11b
|
# This code simulates something the user would like to do. In this
# case the code allows a user to create a 3D cube of data (a numpy
# array), specify an equation for the scalars and view it using the
# mayavi plugin. The only "envisage bits" are the code that let one
# grab the running mayavi instance and script it. The application
# trait is set by Envisage and we use the application to get hold of
# the mayavi engine. Then we show the data once the mayavi engine has
# started.
# Standard library imports.
import numpy
# Enthought library imports
from traits.api import HasTraits, Button, Instance, \
Any, Str, Array
from traitsui.api import Item, View, TextEditor
######################################################################
# `Explorer3D` class.
######################################################################
class Explorer3D(HasTraits):
"""This class basically allows you to create a 3D cube of data (a
numpy array), specify an equation for the scalars and view it
using the mayavi plugin.
"""
########################################
# Traits.
# Set by envisage when this is offered as a service offer.
window = Instance('pyface.workbench.api.WorkbenchWindow')
# The equation that generates the scalar field.
equation = Str('sin(x*y*z)/(x*y*z)',
desc='equation to evaluate (enter to set)',
auto_set=False,
enter_set=True)
# Dimensions of the cube of data.
dimensions = Array(value=(128, 128, 128),
dtype=int,
shape=(3,),
cols=1,
labels=['nx', 'ny', 'nz'],
desc='the array dimensions')
# The volume of interest (VOI).
volume = Array(dtype=float,
value=(-5,5,-5,5,-5,5),
shape=(6,),
cols=2,
labels=['xmin','xmax','ymin','ymax','zmin','zmax'],
desc='the volume of interest')
# Clicking this button resets the data with the new dimensions and
# VOI.
update_data = Button('Update data')
########################################
# Private traits.
# Our data source.
_x = Array
_y = Array
_z = Array
data = Array
source = Any
_ipw1 = Any
_ipw2 = Any
_ipw3 = Any
########################################
# Our UI view.
view = View(Item('equation', editor=TextEditor(auto_set=False,
enter_set=True)),
Item('dimensions'),
Item('volume'),
Item('update_data', show_label=False),
resizable=True,
scrollable=True,
)
######################################################################
# `object` interface.
######################################################################
def __init__(self, **traits):
super(Explorer3D, self).__init__(**traits)
# Make some default data.
if len(self.data) == 0:
self._make_data()
# Note: to show the visualization by default we must wait till
# the mayavi engine has started. To do this we hook into the
# mayavi engine's started event and setup our visualization.
# Now, when this object is constructed (i.e. when this method
# is invoked), the services are not running yet and our own
# application instance has not been set. So we can't even
# get hold of the mayavi instance. So, we do the hooking up
# when our application instance is set by listening for
# changes to our application trait.
def get_mayavi(self):
from mayavi.plugins.script import Script
return self.window.get_service(Script)
######################################################################
# Non-public methods.
######################################################################
def _make_data(self):
dims = self.dimensions.tolist()
np = dims[0]*dims[1]*dims[2]
xmin, xmax, ymin, ymax, zmin, zmax = self.volume
x, y, z = numpy.ogrid[xmin:xmax:dims[0]*1j,
ymin:ymax:dims[1]*1j,
zmin:zmax:dims[2]*1j]
self._x = x.astype('f')
self._y = y.astype('f')
self._z = z.astype('f')
self._equation_changed('', self.equation)
def _show_data(self):
if self.source is not None:
return
mayavi = self.get_mayavi()
if mayavi.engine.current_scene is None:
mayavi.new_scene()
from mayavi.sources.array_source import ArraySource
vol = self.volume
origin = vol[::2]
spacing = (vol[1::2] - origin)/(self.dimensions -1)
src = ArraySource(transpose_input_array=False,
scalar_data=self.data,
origin=origin,
spacing=spacing)
self.source = src
mayavi.add_source(src)
from mayavi.modules.outline import Outline
from mayavi.modules.image_plane_widget import ImagePlaneWidget
from mayavi.modules.axes import Axes
# Visualize the data.
o = Outline()
mayavi.add_module(o)
a = Axes()
mayavi.add_module(a)
self._ipw1 = ipw = ImagePlaneWidget()
mayavi.add_module(ipw)
ipw.module_manager.scalar_lut_manager.show_scalar_bar = True
self._ipw2 = ipw_y = ImagePlaneWidget()
mayavi.add_module(ipw_y)
ipw_y.ipw.plane_orientation = 'y_axes'
self._ipw3 = ipw_z = ImagePlaneWidget()
mayavi.add_module(ipw_z)
ipw_z.ipw.plane_orientation = 'z_axes'
######################################################################
# Traits static event handlers.
######################################################################
def _equation_changed(self, old, new):
try:
g = numpy.__dict__
s = eval(new, g, {'x':self._x,
'y':self._y,
'z':self._z})
# The copy makes the data contiguous and the transpose
# makes it suitable for display via tvtk.
s = s.transpose().copy()
# Reshaping the array is needed since the transpose
# messes up the dimensions of the data. The scalars
# themselves are ravel'd and used internally by VTK so the
# dimension does not matter for the scalars.
s.shape = s.shape[::-1]
self.data = s
except:
pass
def _dimensions_changed(self):
"""This does nothing and only changes to update_data do
anything.
"""
return
def _volume_changed(self):
return
def _update_data_fired(self):
self._make_data()
src = self.source
if src is not None:
vol = self.volume
origin = vol[::2]
spacing = (vol[1::2] - origin)/(self.dimensions -1)
# Set the source spacing and origin.
src.set(spacing=spacing, origin=origin)
# Update the sources data.
src.update_image_data = True
self._reset_ipw()
def _reset_ipw(self):
ipw1, ipw2, ipw3 = self._ipw1, self._ipw2, self._ipw3
if ipw1.running:
ipw1.ipw.place_widget()
if ipw2.running:
ipw2.ipw.place_widget()
ipw2.ipw.plane_orientation = 'y_axes'
if ipw3.running:
ipw3.ipw.place_widget()
ipw3.ipw.plane_orientation = 'z_axes'
self.source.render()
def _data_changed(self, value):
if self.source is None:
return
self.source.scalar_data = value
def _window_changed(self):
m = self.get_mayavi()
if m.engine.running:
if len(self.data) == 0:
# Happens since the window may be set on __init__ at
# which time the data is not created.
self._make_data()
self._show_data()
else:
# Show the data once the mayavi engine has started.
m.engine.on_trait_change(self._show_data, 'started')
|
dmsurti/mayavi
|
examples/mayavi/explorer/explorer_app.py
|
Python
|
bsd-3-clause
| 8,360
|
[
"Mayavi",
"VTK"
] |
fb10371092649edb0209e5b8a43a6ae92651841ba2c4575549136e06d6330888
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.