text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
import os, subprocess
import dialog
from io import BytesIO
from zeroinstall import _
from zeroinstall.injector import model, selections, qdom
from gui import gobject
XMLNS_0COMPILE = 'http://zero-install.sourceforge.net/2006/namespaces/0compile'
class Command(object):
def __init__(self):
self.child = None
self.error = b""
self.stdout = b""
self.watched_streams = 0
def run(self, command, success, get_stdout = False):
assert self.child is None
self.success = success
if get_stdout:
self.child = subprocess.Popen(command,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
gobject.io_add_watch(self.child.stdout, gobject.IO_IN | gobject.IO_HUP, self.got_stdout)
gobject.io_add_watch(self.child.stderr, gobject.IO_IN | gobject.IO_HUP, self.got_errors)
self.watched_streams = 2
else:
self.child = subprocess.Popen(command,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT)
gobject.io_add_watch(self.child.stdout, gobject.IO_IN | gobject.IO_HUP, self.got_errors)
self.watched_streams = 1
def got_stdout(self, src, cond):
data = os.read(src.fileno(), 100)
if data:
self.stdout += data
return True
else:
self.done()
return False
def done(self):
self.watched_streams -= 1
if self.watched_streams == 0:
status = self.child.wait()
self.child = None
if status == 0:
self.success(self.stdout)
else:
if status == 1 and not self.error:
return False # Cancelled
dialog.alert(None, _("Command failed with exit code %(status)d:\n%(error)s\n") %
{'status': status, 'error': self.error})
def got_errors(self, src, cond):
data = os.read(src.fileno(), 100)
if data:
self.error += data
return True
else:
self.done()
return False
def compile(on_success, interface_uri, autocompile = False):
our_min_version = '0.18' # The oldest version of 0compile we support
def build(selections_xml):
# Get the chosen versions
sels = selections.Selections(qdom.parse(BytesIO(selections_xml)))
impl = sels.selections[interface_uri]
min_version = impl.attrs.get(XMLNS_0COMPILE + ' min-version', our_min_version)
# Check the syntax is valid and the version is high enough
if model.parse_version(min_version) < model.parse_version(our_min_version):
min_version = our_min_version
# Do the whole build-and-register-feed
c = Command()
c.run(("0launch",
'--message', _('Download the 0compile tool, to compile the source code'),
'--not-before=' + min_version,
"http://0install.net/2006/interfaces/0compile.xml",
'gui',
interface_uri), lambda unused: on_success())
if autocompile:
c = Command()
c.run(("0launch",
'--message', 'Download the 0compile tool, to compile the source code',
'--not-before=' + our_min_version,
"http://0install.net/2006/interfaces/0compile.xml",
'autocompile',
'--gui',
interface_uri), lambda unused: on_success())
else:
# Prompt user to choose source version
c = Command()
c.run(['0install', 'download', '--xml',
'--message', _('Download the source code to be compiled'),
'--gui', '--source', '--', interface_uri], build, get_stdout = True)
|
slovenwd/0install
|
zeroinstall/0launch-gui/compile.py
|
Python
|
lgpl-2.1
| 3,264
|
[
"VisIt"
] |
99211d9da3dfa1738754216210f5bc175a86ed7dff4934b2c1a29c63590bc183
|
import cv2
import numpy as np
import sys
import argparse
from os.path import splitext
from os import listdir, makedirs, rename
#cv2.CV_64F, cv2.CV_8UC3
color_mode=cv2.CV_8UC1
#8UC1
def none(img, *args):
return img
def avg(img,x):
return cv2.blur(img,(x,x))
def gaussian(img,x):
return cv2.GaussianBlur(img,(x, x),0)
def median(img,x):
return cv2.medianBlur(img,x)
def bilateral(img,x):
return cv2.bilateralFilter(img,x,40,100)
blurs={ 0 : none,
1 : avg,
2 : gaussian,
3 : median,
4 : bilateral
}
def sobelX(img):
return cv2.Sobel(img,color_mode, 1, 0, ksize=5) # x
def sobelY(img):
return cv2.Sobel(img,color_mode, 0, 1, ksize=5) # y
def laplacian(img):
return cv2.Laplacian(img,color_mode)
def canny(img):
return cv2.Canny(img,100,200)
#laplacian = cv2.Laplacian(self.blur,cv2.CV_64F)
#sobelx = cv2.Sobel(self.blur,cv2.CV_64F,1,0,ksize=5) # x
#sobely = cv2.Sobel(self.blur,cv2.CV_64F,0,1,ksize=5) # y
#filtered = cv2.Canny(self.blur,100,200)
filters={ 0 : none,
1 : sobelX,
2 : sobelY,
3 : laplacian,
4 : canny
}
brushvals=[0,1,2,4,10,20,40]
def binary(img, val, tdir):
ret,mask=cv2.threshold(img,float(val),255,tdir)
return mask
def adaptive(img, val, tdir):
return cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,tdir,11,2)
def otsu(img, val, tdir):
ret3,mask = cv2.threshold(img,float(val),255,tdir+cv2.THRESH_OTSU)
return mask
threshing={
0: none,
1: binary,
2: adaptive,
3: otsu
}
class EdgeUI(object):
name=''
display=''
#okay, number of members started to feel wrong for this kind of interface
blurSel=0
blurVal=1
thresholdVal=0
threshSel=0
THRESH_DIR=cv2.THRESH_BINARY_INV
filterSel=0
#currently we can use 2 as each step is triggered if the preeceding one is.
inp=None
gray=None
blurred=None
filtered=None
out=None
canvas=None
# it looks like it is time to create a class for the drawing pad itself
brush_size=2
brush_idx=2
#drawing
draw = False # true if mouse is pressed
erase = False # if True, draw rectangle. Press 'm' to toggle to curve
ix,iy = -1,-1
def __init__(self, name='Edge Filters',indisplay='Bitmask Drawer'):
self.name=name
self.display=indisplay
cv2.namedWindow(self.name)
# cv2.moveWindow(self.name, 400,20)
cv2.namedWindow(self.display)
cv2.moveWindow(self.display, 400,320)
cv2.namedWindow('OG')
cv2.moveWindow('OG', 800, 400)
cv2.createTrackbar('blur mtd (0,a,g,m,b)',self.name,0,4,self.setBlurMethod)
cv2.createTrackbar('blur ',self.name,0,20,self.setBlur)
cv2.createTrackbar('filter type (0,sx,sy,l,c)',self.name,0,4,self.setFilter)
cv2.createTrackbar('threshold mtd (0,b,a,o)',self.name,0,3,self.setThreshMethod)
cv2.createTrackbar('threshold val',self.name,0,255,self.setThreshold)
cv2.setMouseCallback(self.display, self.mouseHandler)
cv2.setMouseCallback(self.display, self.filterMouseHandler)
#cv2.createButton('blur',self.name,self.onChange)
#
# Mouse callbacks
def mouseHandler(self, event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDOWN:
self.draw = True # true if mouse is pressed
self.erase = False
self.ix,self.iy = x,y
#print(' brush size: ', self.brush_size)
elif event == cv2.EVENT_MOUSEMOVE:
if self.draw == True:
#self.erase = False
cv2.circle(self.canvas,(x,y),self.brush_size ,(255,255,255),-1)
elif event == cv2.EVENT_LBUTTONUP:
self.draw = False
cv2.circle(self.canvas,(x,y),self.brush_size ,(255,255,255),-1)
if event == cv2.EVENT_RBUTTONDOWN:
self.erase = True
self.draw = False
self.ix,self.iy = x,y
# print(' brush size: ', self.brush_size)
elif event == cv2.EVENT_MOUSEMOVE:
if self.erase == True:
#self.draw = False
cv2.circle(self.canvas,(x,y),self.brush_size ,(0,0,0),-1)
elif event == cv2.EVENT_RBUTTONUP:
self.erase = False
cv2.circle(self.canvas,(x,y),self.brush_size ,(0,0,0),-1)
if event == cv2.EVENT_MBUTTONDOWN:
self.draw = False
self.erase = False
self.togglebrush()
#change brush size
if event == cv2.EVENT_MOUSEWHEEL:
if flags > 0:
self.brush_size+=1
elif flags < 0 and self.brush_size:
self.brush_size-=1
#print('brush size: ', self.brush_size)
#put this on a seperate window
self.showcanvaslayers()
def filterMouseHandler(self, event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDOWN:
self.draw = True # true if mouse is pressed
self.erase = False
self.ix,self.iy = x,y
xi,xf,yi,yf = self.roify(x,y)
#print(' brush size: ', self.brush_size)
elif event == cv2.EVENT_MOUSEMOVE:
if self.draw == True:
#self.erase = False
#copy roi to canvas
xi,xf,yi,yf = self.roify(x,y)
self.canvas[yi:yf,xi:xf]=cv2.cvtColor(cv2.bitwise_or(self.canvas[yi:yf,xi:xf,0], self.out[yi:yf,xi:xf]),cv2.COLOR_GRAY2RGB)#make this method so it doesn't look nasty
elif event == cv2.EVENT_LBUTTONUP:
self.draw = False
#copy roi to canvas
xi,xf,yi,yf = self.roify(x,y)
self.canvas[yi:yf,xi:xf]=cv2.cvtColor(cv2.bitwise_or(self.canvas[yi:yf,xi:xf,0], self.out[yi:yf,xi:xf]),cv2.COLOR_GRAY2RGB)
#hijack ERASER for overwriting, instead ofnitwise masking
if event == cv2.EVENT_RBUTTONDOWN:
self.erase = True
self.draw = False
self.ix,self.iy = x,y
# print(' brush size: ', self.brush_size)
elif event == cv2.EVENT_MOUSEMOVE:
if self.erase == True:
#self.draw = False
xi,xf,yi,yf = self.roify(x,y)
self.canvas[yi:yf,xi:xf]=cv2.cvtColor(self.out[yi:yf,xi:xf],cv2.COLOR_GRAY2RGB)
elif event == cv2.EVENT_RBUTTONUP:
self.erase = False
xi,xf,yi,yf = self.roify(x,y)
self.canvas[yi:yf,xi:xf]=cv2.cvtColor(self.out[yi:yf,xi:xf],cv2.COLOR_GRAY2RGB)
if event == cv2.EVENT_MBUTTONDOWN:
self.draw = False
self.erase = False
self.togglebrush()
self.showcanvaslayers()
def roify(self,x,y):
sz=self.brush_size
[xi,xf]=np.clip([x-sz,x+sz],0,self.w-1)
[yi,yf]=np.clip([y-sz,y+sz],0,self.h-1)
return xi,xf,yi,yf
# elif event == cv2.EVENT_MBUTTONCLICK:
# cv2.imwrite()
#def onChange(self):
# pass
# any method calls the next one in the chain below.
# change:
#[new Img] [setBlur] [sel. Filter] [setThreshold]
# | | | |
# V V V V
# Load -> cvt2Gray-> Blur-> filter -> threshold.
def togglebrush(self):
self.brush_idx= (self.brush_idx+1)%len(brushvals)
self.brush_size=brushvals[self.brush_idx]
print(' brush size: ', self.brush_size)
def setBlurMethod(self, mtd):
self.blurSel=mtd
self.blur()
def setBlur(self, val):
self.blurVal=val*2+1
self.blur()
def setFilter(self, filter):
self.filterSel=filter
if(filter == 0):
self.THRESH_DIR=cv2.THRESH_BINARY_INV
else:
self.THRESH_DIR=cv2.THRESH_BINARY
self.filter()
def setThreshMethod(self, mtd):
self.threshSel=mtd
self.threshold()
def setThreshold(self, val):
self.thresholdVal=val
self.threshold()
def blur(self):
self.blurred = blurs[self.blurSel](self.gray, self.blurVal)
self.filter()
def filter(self):
self.filtered= filters[self.filterSel](self.blurred)
self.threshold()
def threshold(self):
self.out=threshing[self.threshSel](self.filtered, self.thresholdVal, self.THRESH_DIR)
#if(filter==LAPLACIAN)
def updateManual(self, img):
print(' updated manually')
cv2.imshow(self.name, img)
cv2.waitKey(0)
def update(self):
#print('update')
cv2.imshow(self.name, self.out)
cv2.waitKey(30)
return
def clearcanvas(self):
self.canvas=np.zeros((self.inp.shape[0], self.inp.shape[1], 3), np.uint8)
self.showcanvaslayers()
def filter2canvas(self):
self.canvas=cv2.cvtColor(self.out, cv2.COLOR_GRAY2BGR)
self.showcanvaslayers()
def showcanvaslayers(self):
cv2.imshow(self.display, cv2.add(self.inp, self.canvas))
#can also take in a Mat
def loadImg(self, img, str=True):
if(str):
self.inp=cv2.imread(img)
else:
self.inp=img
# converting to gray scale
self.gray = cv2.cvtColor(self.inp, cv2.COLOR_BGR2GRAY)
self.blur()
cv2.imshow(self.display, self.inp)
cv2.imshow('OG', self.inp)
self.clearcanvas()
#print(self.canvas.shape)
#print(self.inp.shape)
def startadvance(self):
pass
# def saveFilter(self):
# cv2.imwrite()
#def savePainted(self):
# cv2.imwrite()
def run(self):
while(1):
cv2.imshow(self.name, self.out)
key = cv2.waitKey(30)
if 'b' == chr(key & 255):
print(' Saving filtered img')
return self.out
elif 'm' == chr(key & 255):
print(' Saving drawn bitmask')
return cv2.cvtColor(self.canvas, cv2.COLOR_BGR2GRAY)
elif 'c' == chr(key & 255):
print(' Copied filter to canvas')
self.filter2canvas()
elif 'z' == chr(key & 255):
print(' Cleared canvas')
self.clearcanvas()
def main():
parser = argparse.ArgumentParser(description='Run basic Edge detection algortihms on a given image.')
parser.add_argument('img_path', metavar='base', type=str, help='Path to the image to process.')
args = parser.parse_args()
path=args.img_path
#blank=np.zeros((240, 320, 3), np.uint8)
#blank[:] = (0,0,0)
ui=EdgeUI()
#top = tkinter.Tk()
images=listdir(path)
mask_save_path = path +'/../masks/'
merged_save_path = path +'/../masked/'
processed_path = path +'/../processed/'
makedirs(mask_save_path, exist_ok=True)
makedirs(merged_save_path, exist_ok=True)
makedirs(processed_path, exist_ok=True)
print('EdgeUI, v0.4')
print('Image annotation tool for labeling')
print('by Ozan Akyildiz')
print('Use scrollbars to:')
print(' - Set the blur method (None, Average, Gaussian, Median, Bilateral)')
print(' - Set the blur kernel size (2k+1)')
print(' - Choose edge filter (None, SobelX, SobelY, Laplacian, Canny)')
print(' - Choose thresholding method (None, binary, adaptive, OTSU)')
print(' - Set the threshhold value')
print('Mouse:')
print(' LMB - Draw')
print(' RMB - Erase')
print(' MMB - Toggle brush size')
print(' MWD - Brush size -')
print(' MWU - Brush size +')
print('Keyboard:')
print('Z - Reset drawing canvas')
print('C - Copy filter result to canvas')
print('B - Save filter result and proceed to next img')
print('M - Save drawn bitmask and ...')
# From a SW Design standpoint, this should be a part of the UI class.
# But, you know... time.
for img_name in images:
# loading image
print('----------')
print('Opened :' + img_name)
#img0 = cv2.imread(args.img_path, cv2.CV_8UC1)
img0 = cv2.imread(path+'/'+img_name)
print(img0.shape, img0.dtype)
nm,ext = splitext(img_name)
ui.loadImg(img0, False)
mask = ui.run()
#print(mask)
#print(' bitmask:', mask.shape, mask.dtype)
b,g,r=cv2.split(img0)
print('Saved :' + nm + '.png', ', and moved the src img')
to_save=cv2.merge((b,g,r,mask))
rename(path+'/'+img_name, processed_path+img_name)
print(to_save.shape, to_save.dtype)
# remove noise
cv2.imwrite( mask_save_path + nm + '.bmp', mask)
cv2.imwrite( merged_save_path + nm +'.png', to_save)
cv2.destroyAllWindows()
if __name__ == "__main__":
print()
sys.exit(main())
|
OAkyildiz/dataset_tools
|
bitmask_tool.py
|
Python
|
mit
| 13,414
|
[
"Gaussian"
] |
5f9251cf4ddbbe2f4494e27888951acb54496743e0fb0001d59d4bbd1055f5fb
|
# coding: utf-8
"""
This module provides objects for extracting timing data from the ABINIT output files
It also provides tools to analye and to visualize the parallel efficiency.
"""
from __future__ import unicode_literals, division
import sys
import collections
import numpy as np
from six.moves import zip
from monty.string import is_string, list_strings
from pymatgen.util.num_utils import minloc
from pymatgen.util.plotting_utils import add_fig_kwargs, get_ax_fig_plt
import logging
logger = logging.getLogger(__name__)
def alternate(*iterables):
"""
[a[0], b[0], ... , a[1], b[1], ..., a[n], b[n] ...]
>>> alternate([1,4], [2,5], [3,6])
[1, 2, 3, 4, 5, 6]
"""
items = []
for tup in zip(*iterables):
items.extend([item for item in tup])
return items
class AbinitTimerParserError(Exception):
"""Errors raised by AbinitTimerParser"""
class AbinitTimerParser(collections.Iterable):
"""
Responsible for parsing a list of output files, and managing the parsed database.
"""
# The markers enclosing the data.
BEGIN_TAG = "-<BEGIN_TIMER"
END_TAG = "-<END_TIMER>"
Error = AbinitTimerParserError
#DEFAULT_MPI_RANK = "0"
def __init__(self):
# List of files that have been parsed.
self._filenames = []
# timers[filename][mpi_rank]
# contains the timer extracted from the file filename
# associated to the MPI rank mpi_rank.
self._timers = collections.OrderedDict()
def __iter__(self):
return self._timers.__iter__()
def __len__(self):
return len(self._timers)
def parse(self, filenames):
"""
Read and parse a filename or a list of filenames.
Files that cannot be opened are ignored. A single filename may also be given.
Return list of successfully read files.
"""
filenames = list_strings(filenames)
read_ok = []
for fname in filenames:
try:
fh = open(fname)
except IOError:
logger.warning("Cannot open file %s" % fname)
continue
try:
self._read(fh, fname)
read_ok.append(fname)
except self.Error as e:
logger.warning("exception while parsing file %s:\n%s" % (fname, str(e)))
continue
finally:
fh.close()
# Add read_ok to the list of files that have been parsed.
self._filenames.extend(read_ok)
return read_ok
def _read(self, fh, fname):
"""Parse the TIMER section"""
if fname in self._timers:
raise self.Error("Cannot overwrite timer associated to: %s " % fname)
data = {}
def parse_line(line):
name, vals = line[:25], line[25:].split()
ctime, cfract, wtime, wfract, ncalls, gflops = vals
return AbinitTimerSection(name, ctime, cfract, wtime, wfract, ncalls, gflops)
inside, has_timer = 0, False
for line in fh:
#print(line.strip())
if line.startswith(self.BEGIN_TAG):
has_timer = True
sections = []
info = {}
inside = 1
line = line[len(self.BEGIN_TAG):].strip()[:-1]
info["fname"] = fname
for tok in line.split(","):
(key, val) = [s.strip() for s in tok.split("=")]
info[key] = val
elif line.startswith(self.END_TAG):
inside = 0
timer = AbinitTimer(sections, info, cpu_time, wall_time)
mpi_rank = info["mpi_rank"]
data[mpi_rank] = timer
elif inside:
inside += 1
line = line[1:].strip()
if inside == 2:
d = dict()
for tok in line.split(","):
(key, val) = [s.strip() for s in tok.split("=")]
d[key] = float(val)
cpu_time, wall_time = d["cpu_time"], d["wall_time"]
elif inside > 5:
sections.append(parse_line(line))
else:
try:
parse_line(line)
except:
parser_failed = True
if not parser_failed:
raise self.Error("line should be empty: " + str(inside) + line)
if not has_timer:
raise self.Error("%s: No timer section found" % fname)
# Add it to the dict
self._timers[fname] = data
#def set_default_mpi_rank(mpi_rank): self._default_mpi_rank = mpi_rank
#def get_default_mpi_rank(mpi_rank): return self._default_mpi_rank
def timers(self, filename=None, mpi_rank="0"):
"""Return the list of timers associated to the given filename and MPI rank mpi_rank."""
if filename is not None:
timers = [self._timers[filename][mpi_rank]]
else:
timers = [self._timers[filename][mpi_rank] for filename in self._filenames]
return timers
def section_names(self, ordkey="wall_time"):
"""Return the names of sections ordered by ordkey."""
section_names = [] # Avoid UnboundLocalError
# FIXME this is not trivial
for (idx, timer) in enumerate(self.timers()):
if idx == 0:
section_names = [s.name for s in timer.order_sections(ordkey)]
#check = section_names
#else:
# new_set = set( [s.name for s in timer.order_sections(ordkey)])
# section_names.intersection_update(new_set)
# check = check.union(new_set)
#if check != section_names:
# print("sections", section_names)
# print("check",check)
return section_names
def get_sections(self, section_name):
"""
Return the list of sections stored in self.timers() whose name is section_name
A fake section is returned if the timer does not have sectio_name.
"""
sections = []
for timer in self.timers():
for sect in timer.sections:
if sect.name == section_name:
sections.append(sect)
break
else:
sections.append(AbinitTimerSection.fake())
return sections
def pefficiency(self):
"""
Analyze the parallel efficiency.
"""
timers = self.timers()
#
# Number of CPUs employed in each calculation.
ncpus = [timer.ncpus for timer in timers]
# Find the minimum number of cpus used and its index in timers.
min_idx = minloc(ncpus)
min_ncpus = ncpus[min_idx]
# Reference timer
ref_t = timers[min_idx]
# Compute the parallel efficiency (total efficieny and the efficiency of each section)
peff = {}
ctime_peff = [(min_ncpus * ref_t.wall_time) / (t.wall_time * ncp) for (t, ncp) in zip(timers, ncpus)]
wtime_peff = [(min_ncpus * ref_t.cpu_time) / (t.cpu_time * ncp) for (t, ncp) in zip(timers, ncpus)]
n = len(timers)
peff["total"] = {}
peff["total"]["cpu_time"] = ctime_peff
peff["total"]["wall_time"] = wtime_peff
peff["total"]["cpu_fract"] = n * [100]
peff["total"]["wall_fract"] = n * [100]
for sect_name in self.section_names():
#print(sect_name)
ref_sect = ref_t.get_section(sect_name)
sects = [t.get_section(sect_name) for t in timers]
try:
ctime_peff = [(min_ncpus * ref_sect.cpu_time) / (s.cpu_time * ncp) for (s, ncp) in zip(sects, ncpus)]
wtime_peff = [(min_ncpus * ref_sect.wall_time) / (s.wall_time * ncp) for (s, ncp) in zip(sects, ncpus)]
except ZeroDivisionError:
ctime_peff = n * [-1]
wtime_peff = n * [-1]
assert sect_name not in peff
peff[sect_name] = {}
peff[sect_name]["cpu_time"] = ctime_peff
peff[sect_name]["wall_time"] = wtime_peff
peff[sect_name]["cpu_fract"] = [s.cpu_fract for s in sects]
peff[sect_name]["wall_fract"] = [s.wall_fract for s in sects]
return ParallelEfficiency(self._filenames, min_idx, peff)
@add_fig_kwargs
def plot_efficiency(self, key="wall_time", what="gb", nmax=5, ax=None, **kwargs):
ax, fig, plt = get_ax_fig_plt(ax=ax)
timers = self.timers()
peff = self.pefficiency()
# Table with the parallel efficiency for all the sections.
#pprint_table(peff.totable())
n = len(timers)
xx = np.arange(n)
ax.set_color_cycle(['g', 'b', 'c', 'm', 'y', 'k'])
legend_entries = []
# Plot sections with good efficiency.
lines = []
if "g" in what:
good = peff.good_sections(key=key, nmax=nmax)
for g in good:
#print(g, peff[g])
yy = peff[g][key]
line, = ax.plot(xx, yy, "-->", linewidth=3.0, markersize=10)
lines.append(line)
legend_entries.append(g)
# Plot sections with bad efficiency.
if "b" in what:
bad = peff.bad_sections(key=key, nmax=nmax)
for b in bad:
#print(b, peff[b])
yy = peff[b][key]
line, = ax.plot(xx, yy, "-.<", linewidth=3.0, markersize=10)
lines.append(line)
legend_entries.append(b)
if "total" not in legend_entries:
yy = peff["total"][key]
total_line, = ax.plot(xx, yy, "r", linewidth=3.0, markersize=10)
lines.append(total_line)
legend_entries.append("total")
ax.legend(lines, legend_entries, loc="best", shadow=True)
#ax.set_title(title)
ax.set_xlabel('Total_NCPUs')
ax.set_ylabel('Efficiency')
ax.grid(True)
# Set xticks and labels.
labels = ["MPI = %d, OMP = %d" % (t.mpi_nprocs, t.omp_nthreads) for t in timers]
ax.set_xticks(xx)
ax.set_xticklabels(labels, fontdict=None, minor=False, rotation=15)
return fig
@add_fig_kwargs
def plot_pie(self, key="wall_time", minfract=0.05, ax=None, **kwargs):
"""Pie charts of the different timers."""
ax, fig, plt = get_ax_fig_plt(ax=ax)
timers = self.timers()
n = len(timers)
# Make square figures and axes
the_grid = plt.GridSpec(n, 1)
fig = plt.figure(1, figsize=(6, 6))
for idx, timer in enumerate(timers):
plt.subplot(the_grid[idx, 0])
plt.title(str(timer))
timer.pie(key=key, minfract=minfract)
return fig
@add_fig_kwargs
def plot_stacked_hist(self, key="wall_time", nmax=5, ax=None, **kwargs):
"""Stacked histogram of the different timers."""
ax, fig, plt = get_ax_fig_plt(ax=ax)
mpi_rank = "0"
timers = self.timers(mpi_rank=mpi_rank)
n = len(timers)
names, values = [], []
rest = np.zeros(n)
for idx, sname in enumerate(self.section_names(ordkey=key)):
sections = self.get_sections(sname)
svals = np.asarray([s.__dict__[key] for s in sections])
if idx < nmax:
names.append(sname)
values.append(svals)
else:
rest += svals
names.append("others (nmax = %d)" % nmax)
values.append(rest)
#for (n, vals) in zip(names, values): print(n, vals)
# The dataset is stored in values.
# Now create the stacked histogram.
ind = np.arange(n) # the locations for the groups
width = 0.35 # the width of the bars
# this does not work with matplotlib < 1.0
#plt.rcParams['axes.color_cycle'] = ['r', 'g', 'b', 'c']
colors = nmax * ['r', 'g', 'b', 'c', 'k', 'y', 'm']
bars = []
bottom = np.zeros(n)
for idx, vals in enumerate(values):
color = colors[idx]
bar = plt.bar(ind, vals, width, color=color, bottom=bottom)
bars.append(bar)
bottom += vals
ax.set_ylabel(key)
#ax.title("Stacked histogram for the %d most important sections" % nmax)
labels = ["MPI = %d, OMP = %d" % (t.mpi_nprocs, t.omp_nthreads) for t in timers]
plt.xticks(ind + width / 2.0, labels, rotation=15)
#plt.yticks(np.arange(0,81,10))
ax.legend([bar[0] for bar in bars], names, loc="best")
return fig
class ParallelEfficiency(dict):
def __init__(self, filenames, ref_idx, *args, **kwargs):
self.update(*args, **kwargs)
self.filenames = filenames
self._ref_idx = ref_idx
def _order_by_peff(self, key, criterion, reverse=True):
estimators = {
"min": min,
"max": max,
"mean": lambda items: sum(items) / len(items)
}
self.estimator = estimators[criterion]
data = []
for (sect_name, peff) in self.items():
#
# Ignore values where we had a division by zero.
if all([v != -1 for v in peff[key]]):
values = peff[key][:]
#print(sect_name, values)
if len(values) > 1:
ref_value = values.pop(self._ref_idx)
assert ref_value == 1.0
data.append((sect_name, self.estimator(values)))
fsort = lambda t: t[1]
data.sort(key=fsort, reverse=reverse)
return tuple([sect_name for (sect_name, e) in data])
def totable(self, stop=None, reverse=True):
osects = self._order_by_peff("wall_time", criterion="mean", reverse=reverse)
n = len(self.filenames)
table = [["AbinitTimerSection"] + alternate(self.filenames, n * ["%"])]
for sect_name in osects:
peff = self[sect_name]["wall_time"]
fract = self[sect_name]["wall_fract"]
vals = alternate(peff, fract)
table.append([sect_name] + ["%.2f" % val for val in vals])
return table
def good_sections(self, key="wall_time", criterion="mean", nmax=5):
good_sections = self._order_by_peff(key, criterion=criterion)
return good_sections[:nmax]
def bad_sections(self, key="wall_time", criterion="mean", nmax=5):
bad_sections = self._order_by_peff(key, criterion=criterion, reverse=False)
return bad_sections[:nmax]
class AbinitTimerSection(object):
"""Record with the timing results associated to a section of code."""
STR_FIELDS = [
"name"
]
NUMERIC_FIELDS = [
"cpu_time",
"cpu_fract",
"wall_time",
"wall_fract",
"ncalls",
"gflops",
]
FIELDS = tuple(STR_FIELDS + NUMERIC_FIELDS)
@classmethod
def fake(cls):
return AbinitTimerSection("fake", 0.0, 0.0, 0.0, 0.0, -1, 0.0)
def __init__(self, name, cpu_time, cpu_fract, wall_time, wall_fract, ncalls, gflops):
self.name = name.strip()
self.cpu_time = float(cpu_time)
self.cpu_fract = float(cpu_fract)
self.wall_time = float(wall_time)
self.wall_fract = float(wall_fract)
self.ncalls = int(ncalls)
self.gflops = float(gflops)
def to_tuple(self):
return tuple([self.__dict__[at] for at in AbinitTimerSection.FIELDS])
def to_csvline(self, with_header=False):
"""Return a string with data in CSV format"""
string = ""
if with_header:
string += "# " + " ".join(at for at in AbinitTimerSection.FIELDS) + "\n"
string += ", ".join(str(v) for v in self.to_tuple()) + "\n"
return string
def __str__(self):
string = ""
for a in AbinitTimerSection.FIELDS: string += a + " = " + self.__dict__[a] + ","
return string[:-1]
class AbinitTimer(object):
"""Container class used to store the timing results."""
def __init__(self, sections, info, cpu_time, wall_time):
self.sections = tuple(sections)
self.section_names = tuple([s.name for s in self.sections])
self.info = info
self.cpu_time = float(cpu_time)
self.wall_time = float(wall_time)
self.mpi_nprocs = int(info["mpi_nprocs"])
self.omp_nthreads = int(info["omp_nthreads"])
self.mpi_rank = info["mpi_rank"].strip()
self.fname = info["fname"].strip()
def __str__(self):
string = "file = %s, wall_time = %.1f, mpi_nprocs = %d, omp_nthreads = %d" % (
self.fname, self.wall_time, self.mpi_nprocs, self.omp_nthreads )
#string += ", rank = " + self.mpi_rank
return string
def __cmp__(self, other):
return cmp(self.wall_time, other.wall_time)
@property
def ncpus(self):
"""Total number of CPUs employed."""
return self.mpi_nprocs * self.omp_nthreads
def get_section(self, section_name):
try:
idx = self.section_names.index(section_name)
except:
raise
sect = self.sections[idx]
assert sect.name == section_name
return sect
def to_csv(self, fileobj=sys.stdout):
"""Write data on file fileobj using CSV format."""
openclose = is_string(fileobj)
if openclose:
fileobj = open(fileobj, "w")
for (idx, section) in enumerate(self.sections):
fileobj.write(section.to_csvline(with_header=(idx == 0)))
fileobj.flush()
if openclose:
fileobj.close()
def totable(self, sort_key="wall_time", stop=None):
"""Return a table (list of lists) with timer data"""
table = [list(AbinitTimerSection.FIELDS), ]
ord_sections = self.order_sections(sort_key)
if stop is not None:
ord_sections = ord_sections[:stop]
for osect in ord_sections:
row = [str(item) for item in osect.to_tuple()]
table.append(row)
return table
def get_values(self, keys):
"""Return a list of values associated to a particular list of keys"""
if is_string(keys):
return [s.__dict__[keys] for s in self.sections]
else:
values = []
for k in keys:
values.append([s.__dict__[k] for s in self.sections])
return values
def names_and_values(self, key, minval=None, minfract=None, sorted=True):
"""
Select the entries whose value[key] is >= minval or whose fraction[key] is >= minfract
Return the names of the sections and the correspoding value
"""
values = self.get_values(key)
names = self.get_values("name")
new_names, new_values = [], []
other_val = 0.0
if minval is not None:
assert minfract is None
for (n, v) in zip(names, values):
if v >= minval:
new_names.append(n)
new_values.append(v)
else:
other_val += v
new_names.append("below minval " + str(minval))
new_values.append(other_val)
elif minfract is not None:
assert minval is None
total = self.sum_sections(key)
for (n, v) in zip(names, values):
if v / total >= minfract:
new_names.append(n)
new_values.append(v)
else:
other_val += v
new_names.append("below minfract " + str(minfract))
new_values.append(other_val)
else:
# all values
(new_names, new_values) = (names, values)
if sorted:
# Sort new_values and rearrange new_names.
fsort = lambda t: t[1]
nandv = [nv for nv in zip(new_names, new_values)]
nandv.sort(key=fsort)
new_names, new_values = [n[0] for n in nandv], [n[1] for n in nandv]
return new_names, new_values
def _reduce_sections(self, keys, operator):
return operator(self.get_values(keys))
def sum_sections(self, keys):
return self._reduce_sections(keys, sum)
def order_sections(self, key, reverse=True):
"""Sort sections according to the value of key."""
fsort = lambda s: s.__dict__[key]
return sorted(self.sections, key=fsort, reverse=reverse)
@add_fig_kwargs
def cpuwall_histogram(self, ax=None, **kwargs):
ax, fig, plt = get_ax_fig_plt(ax=ax)
nk = len(self.sections)
ind = np.arange(nk) # the x locations for the groups
width = 0.35 # the width of the bars
cpu_times = self.get_values("cpu_time")
rects1 = plt.bar(ind, cpu_times, width, color='r')
wall_times = self.get_values("wall_time")
rects2 = plt.bar(ind + width, wall_times, width, color='y')
# Add ylable and title
ax.set_ylabel('Time (s)')
#if title:
# plt.title(title)
#else:
# plt.title('CPU-time and Wall-time for the different sections of the code')
ticks = self.get_values("name")
ax.set_xticks(ind + width, ticks)
ax.legend((rects1[0], rects2[0]), ('CPU', 'Wall'), loc="best")
return fig
#def hist2(self, key1="wall_time", key2="cpu_time"):
# labels = self.get_values("name")
# vals1, vals2 = self.get_values([key1, key2])
# N = len(vals1)
# assert N == len(vals2)
# plt.figure(1)
# plt.subplot(2, 1, 1) # 2 rows, 1 column, figure 1
# n1, bins1, patches1 = plt.hist(vals1, N, facecolor="m")
# plt.xlabel(labels)
# plt.ylabel(key1)
# plt.subplot(2, 1, 2)
# n2, bins2, patches2 = plt.hist(vals2, N, facecolor="y")
# plt.xlabel(labels)
# plt.ylabel(key2)
# plt.show()
def pie(self, key="wall_time", minfract=0.05, title=None):
import matplotlib.pyplot as plt
# Don't show section whose value is less that minfract
labels, vals = self.names_and_values(key, minfract=minfract)
return plt.pie(vals, explode=None, labels=labels, autopct='%1.1f%%', shadow=True)
def scatter_hist(self, ax=None, **kwargs):
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
ax, fig, plt = get_ax_fig_plt(ax=ax)
#title = kwargs.pop("title", None)
#show = kwargs.pop("show", True)
#savefig = kwargs.pop("savefig", None)
#fig = plt.figure(1, figsize=(5.5, 5.5))
x = np.asarray(self.get_values("cpu_time"))
y = np.asarray(self.get_values("wall_time"))
# the scatter plot:
axScatter = plt.subplot(1, 1, 1)
axScatter.scatter(x, y)
axScatter.set_aspect("auto")
# create new axes on the right and on the top of the current axes
# The first argument of the new_vertical(new_horizontal) method is
# the height (width) of the axes to be created in inches.
divider = make_axes_locatable(axScatter)
axHistx = divider.append_axes("top", 1.2, pad=0.1, sharex=axScatter)
axHisty = divider.append_axes("right", 1.2, pad=0.1, sharey=axScatter)
# make some labels invisible
plt.setp(axHistx.get_xticklabels() + axHisty.get_yticklabels(), visible=False)
# now determine nice limits by hand:
binwidth = 0.25
xymax = np.max([np.max(np.fabs(x)), np.max(np.fabs(y))])
lim = (int(xymax / binwidth) + 1) * binwidth
bins = np.arange(-lim, lim + binwidth, binwidth)
axHistx.hist(x, bins=bins)
axHisty.hist(y, bins=bins, orientation='horizontal')
# the xaxis of axHistx and yaxis of axHisty are shared with axScatter,
# thus there is no need to manually adjust the xlim and ylim of these axis.
#axHistx.axis["bottom"].major_ticklabels.set_visible(False)
for tl in axHistx.get_xticklabels():
tl.set_visible(False)
axHistx.set_yticks([0, 50, 100])
#axHisty.axis["left"].major_ticklabels.set_visible(False)
for tl in axHisty.get_yticklabels():
tl.set_visible(False)
axHisty.set_xticks([0, 50, 100])
plt.draw()
return fig
|
rousseab/pymatgen
|
pymatgen/io/abinitio/abitimer.py
|
Python
|
mit
| 24,590
|
[
"ABINIT",
"pymatgen"
] |
16d23032a0fe29c4c7d42acf6f62f71dc3cc07c42ba6bbdb79a0aa37c7ecddae
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2014--, tax-credit development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from setuptools import find_packages, setup
setup(
name='tax-credit',
version='0.0.0-dev',
license='BSD-3-Clause',
packages=find_packages('tax_credit'),
install_requires=['biom-format', 'pandas', 'statsmodels', 'bokeh',
'scipy', 'jupyter', 'scikit-bio', 'seaborn',
'scikit-learn', 'joblib'],
author="Nicholas Bokulich",
author_email="nbokulich@gmail.com",
description="Systematic benchmarking of taxonomic classification methods",
url="https://github.com/caporaso-lab/tax-credit"
)
|
BenKaehler/short-read-tax-assignment
|
setup.py
|
Python
|
bsd-3-clause
| 926
|
[
"scikit-bio"
] |
8e996870218706d8f8e76fb0c2ec62a707976eda96c26dbfe281e73391c40a6c
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkImageSobel3D(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkImageSobel3D(), 'Processing.',
('vtkImageData',), ('vtkImageData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkImageSobel3D.py
|
Python
|
bsd-3-clause
| 487
|
[
"VTK"
] |
9f6429a461ccf08169a60e6b879f3de08e1a52417ad32f4dd0354a6cdc85f380
|
# pylint: disable=C0111
# pylint: disable=W0621
import urllib
from lettuce import world
from django.contrib.auth.models import User, Group
from student.models import CourseEnrollment
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.contentstore.django import contentstore
@world.absorb
def create_user(uname, password):
# If the user already exists, don't try to create it again
if len(User.objects.filter(username=uname)) > 0:
return
portal_user = world.UserFactory.build(username=uname, email=uname + '@edx.org')
portal_user.set_password(password)
portal_user.save()
registration = world.RegistrationFactory(user=portal_user)
registration.register(portal_user)
registration.activate()
world.UserProfileFactory(user=portal_user)
@world.absorb
def log_in(username='robot', password='test', email='robot@edx.org', name="Robot"):
"""
Use the auto_auth feature to programmatically log the user in
"""
url = '/auto_auth'
params = { 'username': username, 'password': password, 'email': email, 'full_name': name }
url += "?" + urllib.urlencode(params)
world.visit(url)
# Save the user info in the world scenario_dict for use in the tests
user = User.objects.get(username=username)
world.scenario_dict['USER'] = user
@world.absorb
def register_by_course_key(course_key, username='robot', password='test', is_staff=False):
create_user(username, password)
user = User.objects.get(username=username)
# Note: this flag makes the user global staff - that is, an edX employee - not a course staff.
# See courseware.tests.factories for StaffFactory and InstructorFactory.
if is_staff:
user.is_staff = True
user.save()
CourseEnrollment.enroll(user, course_key)
@world.absorb
def enroll_user(user, course_key):
# Activate user
registration = world.RegistrationFactory(user=user)
registration.register(user)
registration.activate()
# Enroll them in the course
CourseEnrollment.enroll(user, course_key)
@world.absorb
def clear_courses():
# Flush and initialize the module store
# Note that if your test module gets in some weird state
# (though it shouldn't), do this manually
# from the bash shell to drop it:
# $ mongo test_xmodule --eval "db.dropDatabase()"
store = modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.mongo)
store.collection.drop()
contentstore().fs_files.drop()
|
wwj718/murp-edx
|
common/djangoapps/terrain/course_helpers.py
|
Python
|
agpl-3.0
| 2,532
|
[
"VisIt"
] |
789ff6860f87b32c1b9981eca0bc4e02709f9df1d3a84a74cb7af3cbef7fed7f
|
from math import radians, sin, cos
from ase import Atom, Atoms
from ase.neb import NEB
from ase.constraints import FixAtoms
from ase.calculators.emt import EMT
from ase.optimize import QuasiNewton, BFGS
from ase.visualize import view
# http://jcp.aip.org/resource/1/jcpsa6/v97/i10/p7507_s1
doo = 2.74
doht = 0.957
doh = 0.977
angle = radians(104.5)
initial = Atoms('HOHOH',
positions=[(- sin(angle)*doht, 0., cos(angle)*doht),
(0., 0., 0.),
(0., 0., doh),
(0., 0., doo),
(sin(angle)*doht, 0., doo - cos(angle)*doht)])
if 0:
view(initial)
final = Atoms('HOHOH',
positions=[(- sin(angle)*doht, 0., cos(angle)*doht),
(0., 0., 0.),
(0., 0., doo - doh),
(0., 0., doo),
(sin(angle)*doht, 0., doo - cos(angle)*doht)])
if 0:
view(final)
# Make band:
images = [initial.copy()]
for i in range(3):
images.append(initial.copy())
images.append(final.copy())
neb = NEB(images, climb=True)
# Set constraints and calculator:
constraint = FixAtoms(indices=[1, 3]) # fix OO
for image in images:
image.set_calculator(EMT())
image.set_constraint(constraint)
for image in images: # O-H(shared) distance
print(image.get_distance(1, 2), image.get_potential_energy())
# Relax initial and final states:
if 1:
# XXX: Warning:
# One would have to optimize more tightly in order to get
# symmetric anion from both images[0] and [1], but
# if one optimizes tightly one gets rotated(H2O) ... OH- instead
dyn1 = QuasiNewton(images[0])
dyn1.run(fmax=0.01)
dyn2 = QuasiNewton(images[-1])
dyn2.run(fmax=0.01)
# Interpolate positions between initial and final states:
neb.interpolate()
for image in images:
print(image.get_distance(1, 2), image.get_potential_energy())
dyn = BFGS(neb, trajectory='emt_h3o2m.traj')
dyn.run(fmax=0.05)
for image in images:
print(image.get_distance(1, 2), image.get_potential_energy())
|
suttond/MODOI
|
ase/test/emt_h3o2m.py
|
Python
|
lgpl-3.0
| 2,096
|
[
"ASE"
] |
1921e00156d7b40d5fb2d562ef6381e1b5e7dadbe12389a412cd69f637184451
|
# -*- coding: iso-8859-1 -*-
'''Module containing and processing all orbkit options.'''
lgpl = '''ORBKIT
Gunter Hermann, Vincent Pohl, Lukas Eugen Marsoner Steinkasserer, Axel Schild, and Jean Christophe Tremblay
Institut fuer Chemie und Biochemie, Freie Universitaet Berlin, 14195 Berlin, Germany
This file is part of ORBKIT.
ORBKIT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or any later version.
ORBKIT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ORBKIT. If not, see <http://www.gnu.org/licenses/>.
'''
lgpl_short = '''This is ORBKIT.
Copyright (C) 2017 Gunter Hermann, Vincent Pohl, Lukas Eugen Marsoner Steinkasserer, Axel Schild, and Jean Christophe Tremblay.
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it
under certain conditions. Type '-l' for details.
'''
import os
import sys
from copy import deepcopy
thismodule = sys.modules[__name__]
from orbkit import grid
available = [
'filename','itype','cclib_parser','outputname','otype',
'numproc','mo_set','calc_ao','all_mo','calc_mo','spin','drv','laplacian',
'slice_length','is_vector','grid_file','adjust_grid','center_grid','random_grid',
'gross_atomic_density',
'quiet','no_log','no_output','no_slice','interactive', 'test'
]
itypes = ['auto',
'molden',
'aomix',
'gamess',
'gaussian.log',
'gaussian.fchk',
'wfn',
'wfx',
'cclib',
'native',
'orbkit.dump'] #: Specifies possible input types.
niotypes = ['npz',
'hdf5'] #: Specifies file format for native io
otypes = ['h5', 'hdf5',
'npz',
'cube', 'cb',
'cube.gz', 'cb.gz',
'obj', 'obj.gz',
'am',
'hx',
'vmd',
'mayavi',
'native',
'auto'] #: Specifies possible output types.
drv_options = ['None','x','y','z',
'xx','yy','zz','x2','y2','z2',
'xy','yx','xz','zx','yz','zy'] #: Specifies possible derivative variables.
def get_options():
'''Returns all possible options and their value.'''
opts = [(i,globals()[i]) for i in available]
return dict(opts)
def init_parser():
'''Initializes parser and processes the options.
'''
import optparse
global parser
def default_if_called(option, opt, value, parser, default=1e4):
try:
arg = parser.rargs[0]
if ((arg[:2] == "--" and len(arg) > 2) or
(arg[:1] == "-" and len(arg) > 1 and arg[1] != "-")):
raise ValueError
value = int(float(arg))
except (IndexError, ValueError):
value = int(default)
setattr(parser.values, option.dest, value)
#optparse.Option.STORE_ACTIONS += ('call_back',)
usage = 'Usage: %prog [options] -i INPUT'
parser = optparse.OptionParser(usage=usage,description=lgpl_short)
parser.add_option("-l", dest="show_lgpl",
default=False,action="store_true",
help="show license information and exit")
parser.add_option("--quiet",dest="quiet",
default=False,action="store_true",
help="suppress terminal output")
parser.add_option("--no_log",dest="no_log",
default=False,action="store_true",
help="suppress output of a INPUT.oklog logfile")
group = optparse.OptionGroup(parser, "Input/Output Options")
group.add_option("-i", "--input", dest="filename",metavar="INPUT",
default='', type="string",nargs=1,
help="input file")
group.add_option("-e", "--itype", dest="itype",
default='auto', type="choice",choices=itypes,
help="input type: '" + "', '".join(itypes) +
"' [default: '%default']")
group.add_option("--niotype", dest="niotype",
default='npz', type="choice",choices=niotypes,
help="input type: '" + "', '".join(niotypes) +
"' [default: '%default']")
group.add_option("--cclib_parser",dest="cclib_parser",
type="string",
help='''if '--itype=cclib', this argument determines what
cclib.parser will be used, e.g., 'Gaussian' or 'GAMESS'.''')
group.add_option("-o", "--output",dest="outputname",
type="string",
help='''name of the output file
[default: base name of INPUT]''')
group.add_option("-t", "--otype", dest="otype",
type="choice", action="append", choices=otypes,
help='''output formats (multiple calls possible):
'{0}' or '{1}' (HDF5 file),
'{2}' (Compressed numpy file),
'{3}' or '{5}' (Gaussian cube file),
'{7}' (VMD network),
'{8}' (ZIBAmiraMesh file),
'{9}' (ZIBAmira network),
'{10}' (simple interactive Mayavi interface)
'{11}' (determine from OUTPUTNAME)
[default: '{11}' if OUTPUTNAME has file extension
else '{0}']'''.format(*otypes))
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Computational Options")
group.add_option("-p", "--numproc",dest="numproc",
default=1, type="int",
help='''number of subprocesses to be started
during the execution [default: %default]''')
group.add_option("--mo_set",dest="mo_set",
default=[], type="string",action="append",
help='''read the plain text file MO_SET containing row
vectors of molecular orbital indeces (delimiter=' ',
Integer numbering or MOLPRO's symmetry numbering)
and compute the electron density
using exclusively those orbitals'''.replace(' ','').replace('\n',''))
group.add_option("--calc_ao",dest="calc_ao",
default=False,action="store_true",
help="calculate and save all AOs.")
group.add_option("--calc_mo",dest="calc_mo",
default=[], type="string", action="append",
help=('''calculate and save the MOs specified in the
plain text file CALC_MO by the indices (delimiter=' ')
(Type 'all_mo' to store all occupied and virtual
orbitals)''').replace(' ','').replace('\n',''))
group.add_option("--all_mo",dest="all_mo",
default=False, action="store_true",
help='''take into account all (occupied and virtual) MOs
for all computations''')
group.add_option("--spin",dest="spin",
default=None, type=spin, choices=['alpha','beta'],
help='''consider only `alpha` or `beta` molecular orbitals
for the computations. Only available for unrestricted
calculations.'''.replace(' ','').replace('\n',''))
group.add_option("-d", "--drv",dest="drv",choices=drv_options,
type="choice",action="append",
help=('''compute the analytical derivative of the requested
quantities with respect to DRV, i.e., 'x', 'y', and/or 'z'.
For 2nd derivatives, specify the respective combinations
, e.g., 'xx' or 'yz'. (multiple calls possible)'''
).replace(' ','').replace('\n',''))
group.add_option("--laplacian",dest="laplacian",
default=False, action="store_true",
help='''compute the analytical laplacian of the density
or the specified mo_set, respectively.
''')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Grid-Related Options")
group.add_option("--adjust_grid",dest="adjust_grid",
type="float",nargs=2,default=[5,0.5],
help=('''create a grid using a spacing of X a_0 and having
the size of the molecule plus D a_0 in each direction,
e.g., --adjust_grid=D X [default: --adjust_grid=5 0.5]'''
).replace(' ','').replace('\n',''))
group.add_option("--grid", dest="grid_file",
type="string",
help='''read the grid from the plain text file GRID_FILE''')
group.add_option("--random_grid", dest="random_grid",
default=False, action="store_true",
help=optparse.SUPPRESS_HELP)
group.add_option("--center", dest="center_grid",
metavar="ATOM",type="int",
help='''center with respect to the origin and the
atom number ATOM (input order)''')
group.add_option("-s", "--slice_length",dest="slice_length",
default=1e4, type="int",
help=('''specify how many grid points are computed at once
(per subprocess).''').replace(' ','').replace('\n',''))
group.add_option("-v", "--vector",dest="is_vector",
default=False, action="store_true",
help=('''store the output in a vector format.''')
)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Additional Options")
group.add_option("--gross_atomic_density",dest="gross_atomic_density",
metavar="INDEX",action="append",type="int",
help='''compute the atom-projected electron density with
respect to atom INDEX (multiple calls possible)''')
# The following parser options are hidden
group.add_option("--no_slice",dest="no_slice",
default=False, action="store_true",
help=optparse.SUPPRESS_HELP)
group.add_option("--no_output",dest="no_output",
default=False, action="store_true",
help=optparse.SUPPRESS_HELP)
group.add_option("--not_interactive",dest="interactive",
default=True, action="store_false",
help=optparse.SUPPRESS_HELP)
parser.add_option_group(group)
(kwargs, args) = parser.parse_args()
# Print the licence, if requested
if kwargs.show_lgpl:
print(lgpl.replace('\nThis file is part of orbkit.\n',''))
sys.exit(0)
if kwargs.otype is None:
kwargs.otype = ['auto']
for i,j in vars(kwargs).items():
setattr(thismodule,i,j)
# Check the options for compatibility and correctness
check_options(error=parser.error,
interactive=interactive,
info=False,
check_io=(not len(args)))
if len(args) and args[0] == 'test':
from orbkit.test import test
test()
return
# init_parser
def raise_error(string,error=IOError):
if hasattr(thismodule,'parser'):
error = parser.error
raise error(string)
def print_message(string):
print(string)
def check_options(error=raise_error,display=print_message,
interactive=False,info=True,check_io=True):
'''Checks options for errors.
**Parameters:**
error : function, optional
Handles the errors.
display : function, optional
Handles the print commands.
interactive : bool, optional
If True and a file does not exist, asks the user to insert name of
existing file.
info : bool, optional
If True, some additional information is printed.
:Default Error and Exception Handling:
Prints the errors and continues.
'''
#--- Input/Output Options ---#
if check_io:
# Look for the input file
setattr(thismodule,'filename',check_if_exists(filename,
what='filename for the input file',
interactive=interactive,
error=error))
# Check the input type for correctness
if itype not in itypes:
error('Invalid input file format (choose from "%s")\n' %
'", "'.join(itypes))
if itype == 'cclib' and cclib_parser is None:
error('The input type cclib requires the specification of parser, ' +
'e.g., --cclib_parser=Gaussian')
if niotype not in niotypes:
error('Unsupported format for native io (choose from "%s")\n' %
'", "'.join(niotypes))
if niotype == 'hdf5':
try:
__import__('h5py')
except ImportError:
error('External IO to HDF5 file was requested but no\n' +
'HDF5 module could be found.')
fid_base,ext = os.path.splitext(filename)
if outputname is None:
setattr(thismodule,'outputname',fid_base)
else:
outpath = os.path.dirname(outputname.split('@')[0])
if not (outpath == '' or os.path.exists(outpath)):
error('Output path "%s" does not exist!' % outpath)
# Check the output types for correctness
if otype is None:
setattr(thismodule,'otype',[])
elif ('auto' in otype and os.path.splitext(outputname)[1] is not None
and os.path.splitext(outputname)[1][1:] not in otypes):
setattr(thismodule,'otype',['h5'])
elif not isinstance(otype,list):
setattr(thismodule,'otype',[otype])
if not all(i in otypes for i in otype):
error('Invalid output file formats (choose from "%s")\n' %
'", "'.join(otypes))
# Check if h5py is installed
if 'h5' in otype:
try:
import h5py
except ImportError:
error('ImportError: The module h5py is not installed!\n')
#--- Grid-Related Options ---#
# Look for the grid input file
if grid_file is not None:
setattr(thismodule,'grid_file',check_if_exists(grid_file,
what='filename for the grid input file',
interactive=interactive,
error=error))
if adjust_grid is not None:
if (not isinstance(adjust_grid,(list,tuple)) or
(len(adjust_grid) != 2) or
(not isinstance(adjust_grid[0],(int,float))) or
(not isinstance(adjust_grid[1],(int,float)))
):
error('The grid parameter (--adjust_grid), has to be a list containing '
'containing two floats.\n')
elif adjust_grid[1] == 0:
error('The grid spacing (second value in --adjust_grid) cannot be zero.\n')
#--- Computational Options ---#
if not isinstance(numproc,int):
error('The number of processes (--numproc) has to be an integer value.\n')
# Check the files specified by --calc_mo or --mo_set for existance
def check_mo(attr):
data = getattr(thismodule,attr)
if not data:
setattr(thismodule,attr,False)
return False
if isinstance(data,int):
data = str(data)
if isinstance(data,str):
data = [data]
try:
for d in data:
d = str(d)
if not (',' in d.lower() or ':' in d.lower()):
i = deepcopy(d)
if i != 'homo' and i != 'lumo':
for r in ['homo','lumo','-','+']:
i = i.replace(r,'')
int(i.split('.')[0])
except ValueError:
if len(data) == 1:
data = data[0]
if not any([i != data.lower() for i in ['all_mo','occupied','unoccupied','virtual']]):
setattr(thismodule,attr,
check_if_exists(data,
what='filename for the MO list',
interactive=interactive))
else:
setattr(thismodule,attr,data)
else:
display('You have called `%s` multiple times. So, you have\n' % attr +
'to give the molecular orbital labels explicitly, i.e.,\n' +
'no filenames and no usage of the keyword `all_mo`.\n\n')
error('Entry `%s` is not valid!' % d)
return True
i = check_mo('calc_mo')
j = check_mo('mo_set')
if i and j:
error('Please choose --calc_mo OR --mo_set, not both. \n'+
'--calc_mo will be done.\n')
if not isinstance(all_mo,bool):
error('The option --all_mo has to be a boolean.\n')
if spin is not None and not (spin == 'alpha' or spin == 'beta'):
error('The option --spin has to be `alpha` or `beta`.\n')
if (drv is not None) and not all(i in drv_options for i in drv):
error('Invalid derivative option (choose from "%s")\n' %
'", "'.join(drv_options))
if laplacian:
if not (drv is None or drv == ['xx','yy','zz'] or drv == ['x2','y2','z2']):
display('Note: You have set the option --laplacian and specified values\n' +
'for --drv. Both options are not compatible.\n\n' +
'The options have been changed to -dxx -dyy -dzz.\n')
setattr(thismodule,'drv', ['xx','yy','zz'])
#--- Additional Options ---#
if gross_atomic_density is not None and drv is not None:
error('The derivative of the gross atomic density is not implemented.\n')
# The following options cannot be checked before running the main program
if info:
string = 'The option %s--center cannot be checked before %s...\n'
if center_grid is not None:
display(string % ('--center','reading\nthe input file'))
if gross_atomic_density is not None:
display(string % ('--gross_atomic_density',
'reading\nthe input file'))
return True
def check_if_exists(fid, what='',error=IOError,display=sys.stdout.write,
interactive=False):
'''Checks the existence of a file.
**Parameters:**
fid : string
Specifies filename of the requested file.
what : string, optional
Describes the file.
error : function, optional
Handles the errors.
display : function, optional
Handles the print commands.
interactive : bool, optional
If True and a file does not exist, asks the user to insert name of
existing file.
**Returns:**
fid : string
Specifies filename of the requested file.
'''
while not (isinstance(fid,str) and os.path.exists(fid)):
if fid != '':
display('%s does not exist!\n' % fid)
if interactive:
fid = input('Please insert a correct %s: ' % what)
else:
error('Insert a correct %s!\n' % what)
break
return fid
def check_grid_output_compatibilty(error=raise_error):
if not grid.is_regular and ('cube' in otype or
'cb' in otype or
'vmd' in otype or
'am' in otype or
'hx' in otype or
'mayavi' in otype):
error('For a non-regular vector grid, only HDF5 ' +
'is available as output format. Choose: --otype=h5\n')
# initiating the parser variables
# the names are chosen according to core.init_parser()
#--- Input/Output Options ---
filename = '' #: Specifies input file name. (str)
itype = 'auto' #: Specifies input file type. See :data:`itypes` for details. (str)
niotype = 'npz' #: Specifies output filetype for native io
cclib_parser = None #: If itype is 'cclib', specifies the cclib.parser. (str)
outputname = '' #: Specifies output file (base) name. (str)
otype = 'auto' #: Specifies output file type. See :data:`otypes` for details. (str or list of str or None)
#--- Computational Options ---
numproc = 1 #: Specifies number of subprocesses for multiprocessing. (int)
mo_set = False #: Specifies molecular orbitals used for density calculation. (filename or list of indices)
calc_ao = False #: If True, all atomic orbitals will be computed and saved.
calc_mo = False #: Specifies which molecular orbitals will be calculated. (filename or list of indices)
all_mo = False #: If True, all molecular orbitals will be computed. (bool)
spin = None #: If not None, exclusively 'alpha' or 'beta' molecular orbitals are taken into account. (None,'alpha', or 'beta')
drv = None #: Specifies derivative variables. (list of str)
laplacian = False #: If True, computes the laplacian of the density or of the mo_set. (bool)
#--- Grid-Related Options ---
slice_length = 1e4 #: Specifies the number of points per subprocess. (int)
vector = None # This option is only present because of backward compatibility
is_vector = False #: If True, vector grid is used for the output. (bool)
grid_file = None #: Specifies file to read grid from. (filename)
adjust_grid = None #: If not None, create a grid using a spacing of X a_0 and having the size of the molecule plus D a_0 in each direction. (list: [D, x])
center_grid = None #: If not None, grid is centered to specified atom and origin. (int)
random_grid = False #: If True, creates random grid around atom positions. (bool)
#--- Additional Options ---
gross_atomic_density = None #: Computes the gross atomic electron density with respect to specified atom. (int or list of int)
#--- Options for Advanced Users ---
quiet = False #: If True, omits terminal output. (bool)
no_log = False #: If True, omits logfile output. (bool)
no_output = False #: If True, omits creation of output. (bool)
no_slice = False #: If True, omits slicing of the grid. (bool)
interactive = False #: If True, asks user to select unclarified options. (bool)
#--- Default values for the grid parameters ---
grid.reset_grid()
|
orbkit/orbkit
|
orbkit/options.py
|
Python
|
lgpl-3.0
| 22,643
|
[
"GAMESS",
"Gaussian",
"Mayavi",
"Molpro",
"VMD",
"cclib"
] |
d27ca159645f79e5db15972d2de142b30c5a07f610c57c29921fe1da1ed6e07a
|
"""Tests against live services.
*** SKIPPED BY DEFAULT ***
These tests won't normally be run, as part of the main test suite but are run by
our hudson instance to tell us should Yahoo's API change in some way that will
break python-yql.
Note to end-users: These tests are dependent on defining a secrets file with API
keys and other secrets which are required to carry out these tests.
If the secrets file isn't present the tests are skipped
"""
import os
import sys
from time import time
from unittest import TestCase
from nose.plugins.skip import SkipTest
import yql
from yql.storage import FileTokenStore
SECRETS_DIR = os.path.join(os.path.dirname(__file__), "../../../secrets")
CACHE_DIR = os.path.abspath(os.path.join(SECRETS_DIR, "cache"))
try:
if SECRETS_DIR not in sys.path:
sys.path.append(SECRETS_DIR)
from secrets import *
except ImportError:
raise SkipTest("Unable to find secrets directory")
class LiveTestCase(TestCase):
"""A test case containing live tests"""
def test_write_bitly_url(self):
"""Test writing bit.ly url"""
query = """USE 'http://www.datatables.org/bitly/bit.ly.shorten.xml';
SELECT * from bit.ly.shorten where login='%s' and apiKey='%s' and
longUrl='http://yahoo.com'""" % (BITLY_USER, BITLY_API_KEY)
y = yql.TwoLegged(YQL_API_KEY, YQL_SHARED_SECRET)
res = y.execute(query)
assert res.one()["data"]["url"] == "http://yhoo.it/9PPTOr"
def test_public_request(self):
"""Test public two-legged request to flickr"""
query = """select * from flickr.photos.search where
text="panda" and api_key='%s' LIMIT 3""" % FLICKR_API_KEY
y = yql.TwoLegged(YQL_API_KEY, YQL_SHARED_SECRET)
res = y.execute(query)
assert len(res.rows) == 3
def test_two_legged_weather_select(self):
"""Tests the weather tables using two-legged"""
query = """select * from weather.forecast where location in
(select id from xml where
url='http://xoap.weather.com/search/search?where=london'
and itemPath='search.loc')"""
y = yql.TwoLegged(YQL_API_KEY, YQL_SHARED_SECRET)
res = y.execute(query)
assert len(res.rows) > 1
def test_update_social_status(self):
"""Updates status"""
y = yql.ThreeLegged(YQL_API_KEY, YQL_SHARED_SECRET)
timestamp = time()
query = """UPDATE social.profile.status
SET status='Using YQL. %s Update'
WHERE guid=me""" % timestamp
token_store = FileTokenStore(CACHE_DIR, secret='gsfdsfdsfdsfs')
stored_token = token_store.get('foo')
if not stored_token:
# Do the dance
request_token, auth_url = y.get_token_and_auth_url()
print "Visit url %s and get a verifier string" % auth_url
verifier = raw_input("Enter the code: ")
token = y.get_access_token(request_token, verifier)
token_store.set('foo', token)
else:
# Check access_token is within 1hour-old and if not refresh it
# and stash it
token = y.check_token(stored_token)
if token != stored_token:
token_store.set('foo', token)
res = y.execute(query, token=token)
assert res.rows[0] == "ok"
new_query = """select message from social.profile.status where guid=me"""
res = y.execute(new_query, token=token)
assert res.rows[0].get("message") == "Using YQL. %s Update" % timestamp
def test_update_meme_status(self):
"""Updates status"""
y = yql.ThreeLegged(YQL_API_KEY, YQL_SHARED_SECRET)
query = 'INSERT INTO meme.user.posts (type, content) VALUES("text", "test with pythonyql")'
token_store = FileTokenStore(CACHE_DIR, secret='fjdsfjllds')
store_name = "meme"
stored_token = token_store.get(store_name)
if not stored_token:
# Do the dance
request_token, auth_url = y.get_token_and_auth_url()
print "Visit url %s and get a verifier string" % auth_url
verifier = raw_input("Enter the code: ")
token = y.get_access_token(request_token, verifier)
token_store.set(store_name, token)
else:
# Check access_token is within 1hour-old and if not refresh it
# and stash it
token = y.check_token(stored_token)
if token != stored_token:
token_store.set(store_name, token)
# post a meme
res = y.execute(query, token=token)
assert y.uri == "http://query.yahooapis.com/v1/yql"
assert res.rows[0].get("message") == "ok"
pubid = None
if res.rows[0].get("post") and res.rows[0]["post"].get("pubid"):
pubid = res.rows[0]["post"]["pubid"]
# Delete the post we've just created
query = 'DELETE FROM meme.user.posts WHERE pubid=@pubid'
res2 = y.execute(query, token=token, params={"pubid": pubid})
assert res2.rows[0].get("message") == "ok"
def test_check_env_var(self):
"""Testing env variable"""
y = yql.Public()
env = "http://datatables.org/alltables.env"
query = "SHOW tables;"
res = y.execute(query, env=env)
assert res.count >= 800
def test_xpath_works(self):
y = yql.Public()
query = """SELECT * FROM html
WHERE url='http://google.co.uk'
AND xpath="//input[contains(@name, 'q')]"
LIMIT 10"""
res = y.execute(query)
assert res.rows[0].get("title") == "Search"
|
allanice001/RJ45
|
lib/yql/tests/test_live_services.py
|
Python
|
gpl-2.0
| 5,705
|
[
"VisIt"
] |
c2a9225e8250f1a37c2e7d19436a37e1d0de9af491bd1ba06353de23f666446b
|
#!/usr/bin/python
# (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# most of it copied from AWX's scan_packages module
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: package_facts
short_description: package information as facts
description:
- Return information about installed packages as facts
options:
manager:
description:
- The package manager used by the system so we can query the package information
default: auto
choices: ["auto", "rpm", "apt"]
required: False
version_added: "2.5"
author:
- Matthew Jones
- Brian Coca
- Adam Miller
'''
EXAMPLES = '''
- name: get the rpm package facts
package_facts:
manager: "auto"
- name: show them
debug: var=ansible_facts.packages
'''
RETURN = '''
ansible_facts:
description: facts to add to ansible_facts
returned: always
type: complex
contains:
packages:
description: list of dicts with package information
returned: when operating system level package manager is specified or auto detected manager
type: dict
sample_rpm:
{
"packages": {
"kernel": [
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.26.2.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.16.1.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.10.2.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.21.1.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "693.2.2.el7",
"source": "rpm",
"version": "3.10.0"
}
],
"kernel-tools": [
{
"arch": "x86_64",
"epoch": null,
"name": "kernel-tools",
"release": "693.2.2.el7",
"source": "rpm",
"version": "3.10.0"
}
],
"kernel-tools-libs": [
{
"arch": "x86_64",
"epoch": null,
"name": "kernel-tools-libs",
"release": "693.2.2.el7",
"source": "rpm",
"version": "3.10.0"
}
],
}
}
sample_deb:
{
"packages": {
"libbz2-1.0": [
{
"version": "1.0.6-5",
"source": "apt",
"arch": "amd64",
"name": "libbz2-1.0"
}
],
"patch": [
{
"version": "2.7.1-4ubuntu1",
"source": "apt",
"arch": "amd64",
"name": "patch"
}
],
}
}
'''
import sys
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text
def rpm_package_list():
try:
import rpm
except ImportError:
module.fail_json('Unable to use the rpm python bindings, please ensure they are installed under the python the module runs under')
trans_set = rpm.TransactionSet()
installed_packages = {}
for package in trans_set.dbMatch():
package_details = dict(name=package[rpm.RPMTAG_NAME],
version=package[rpm.RPMTAG_VERSION],
release=package[rpm.RPMTAG_RELEASE],
epoch=package[rpm.RPMTAG_EPOCH],
arch=package[rpm.RPMTAG_ARCH],
source='rpm')
if package_details['name'] not in installed_packages:
installed_packages[package_details['name']] = [package_details]
else:
installed_packages[package_details['name']].append(package_details)
return installed_packages
def apt_package_list():
try:
import apt
except ImportError:
module.fail_json('Unable to use the apt python bindings, please ensure they are installed under the python the module runs under')
apt_cache = apt.Cache()
installed_packages = {}
apt_installed_packages = [pk for pk in apt_cache.keys() if apt_cache[pk].is_installed]
for package in apt_installed_packages:
ac_pkg = apt_cache[package].installed
package_details = dict(name=package, version=ac_pkg.version, arch=ac_pkg.architecture, source='apt')
if package_details['name'] not in installed_packages:
installed_packages[package_details['name']] = [package_details]
else:
installed_packages[package_details['name']].append(package_details)
return installed_packages
# FIXME: add more listing methods
def main():
global module
module = AnsibleModule(argument_spec=dict(manager=dict()))
manager = module.params['manager']
packages = {}
results = {}
if manager is None or manager == 'auto':
# detect!
for manager_lib in ('rpm', 'apt'):
try:
dummy = __import__(manager_lib)
manager = manager_lib
break
except ImportError:
pass
# FIXME: add more detection methods
try:
if manager == "rpm":
packages = rpm_package_list()
elif manager == "apt":
packages = apt_package_list()
else:
if manager:
results['msg'] = 'Unsupported package manager: %s' % manager
results['skipped'] = True
else:
module.fail_json(msg='Could not detect supported package manager')
except Exception as e:
from traceback import format_tb
module.fail_json(msg='Failed to retrieve packages: %s' % to_text(e), exception=format_tb(sys.exc_info()[2]))
results['ansible_facts'] = {}
# Set the facts, this will override the facts in ansible_facts that might
# exist from previous runs when using operating system level or distribution
# package managers
results['ansible_facts']['packages'] = packages
module.exit_json(**results)
if __name__ == '__main__':
main()
|
le9i0nx/ansible
|
lib/ansible/modules/packaging/os/package_facts.py
|
Python
|
gpl-3.0
| 7,114
|
[
"Brian"
] |
142663de42f65089ac7ad6397a05261b303917832f9456403fc40bb7d2144c95
|
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""exceptions handling (raising, catching, exceptions classes) checker
"""
import sys
from logilab.common.compat import builtins
BUILTINS_NAME = builtins.__name__
import astroid
from astroid import YES, Instance, unpack_infer
from pylint.checkers import BaseChecker
from pylint.checkers.utils import is_empty, is_raising, check_messages
from pylint.interfaces import IAstroidChecker
def infer_bases(klass):
""" Fully infer the bases of the klass node.
This doesn't use .ancestors(), because we need
the non-inferable nodes (YES nodes),
which can't be retrieved from .ancestors()
"""
for base in klass.bases:
try:
inferit = base.infer().next()
except astroid.InferenceError:
continue
if inferit is YES:
yield inferit
else:
for base in infer_bases(inferit):
yield base
PY3K = sys.version_info >= (3, 0)
OVERGENERAL_EXCEPTIONS = ('Exception',)
MSGS = {
'E0701': ('Bad except clauses order (%s)',
'bad-except-order',
'Used when except clauses are not in the correct order (from the '
'more specific to the more generic). If you don\'t fix the order, '
'some exceptions may not be catched by the most specific handler.'),
'E0702': ('Raising %s while only classes, instances or string are allowed',
'raising-bad-type',
'Used when something which is neither a class, an instance or a \
string is raised (i.e. a `TypeError` will be raised).'),
'E0703': ('Exception context set to something which is not an '
'exception, nor None',
'bad-exception-context',
'Used when using the syntax "raise ... from ...", '
'where the exception context is not an exception, '
'nor None.',
{'minversion': (3, 0)}),
'E0710': ('Raising a new style class which doesn\'t inherit from BaseException',
'raising-non-exception',
'Used when a new style class which doesn\'t inherit from \
BaseException is raised.'),
'E0711': ('NotImplemented raised - should raise NotImplementedError',
'notimplemented-raised',
'Used when NotImplemented is raised instead of \
NotImplementedError'),
'E0712': ('Catching an exception which doesn\'t inherit from BaseException: %s',
'catching-non-exception',
'Used when a class which doesn\'t inherit from \
BaseException is used as an exception in an except clause.'),
'W0701': ('Raising a string exception',
'raising-string',
'Used when a string exception is raised.'),
'W0702': ('No exception type(s) specified',
'bare-except',
'Used when an except clause doesn\'t specify exceptions type to \
catch.'),
'W0703': ('Catching too general exception %s',
'broad-except',
'Used when an except catches a too general exception, \
possibly burying unrelated errors.'),
'W0704': ('Except doesn\'t do anything',
'pointless-except',
'Used when an except clause does nothing but "pass" and there is\
no "else" clause.'),
'W0710': ('Exception doesn\'t inherit from standard "Exception" class',
'nonstandard-exception',
'Used when a custom exception class is raised but doesn\'t \
inherit from the builtin "Exception" class.',
{'maxversion': (3, 0)}),
'W0711': ('Exception to catch is the result of a binary "%s" operation',
'binary-op-exception',
'Used when the exception to catch is of the form \
"except A or B:". If intending to catch multiple, \
rewrite as "except (A, B):"'),
'W0712': ('Implicit unpacking of exceptions is not supported in Python 3',
'unpacking-in-except',
'Python3 will not allow implicit unpacking of exceptions in except '
'clauses. '
'See http://www.python.org/dev/peps/pep-3110/',
{'maxversion': (3, 0)}),
'W0713': ('Indexing exceptions will not work on Python 3',
'indexing-exception',
'Indexing exceptions will not work on Python 3. Use '
'`exception.args[index]` instead.',
{'maxversion': (3, 0)}),
}
if sys.version_info < (3, 0):
EXCEPTIONS_MODULE = "exceptions"
else:
EXCEPTIONS_MODULE = "builtins"
class ExceptionsChecker(BaseChecker):
"""checks for
* excepts without exception filter
* type of raise argument : string, Exceptions, other values
"""
__implements__ = IAstroidChecker
name = 'exceptions'
msgs = MSGS
priority = -4
options = (('overgeneral-exceptions',
{'default' : OVERGENERAL_EXCEPTIONS,
'type' :'csv', 'metavar' : '<comma-separated class names>',
'help' : 'Exceptions that will emit a warning '
'when being caught. Defaults to "%s"' % (
', '.join(OVERGENERAL_EXCEPTIONS),)}
),
)
@check_messages('raising-string', 'nonstandard-exception', 'raising-bad-type',
'raising-non-exception', 'notimplemented-raised', 'bad-exception-context')
def visit_raise(self, node):
"""visit raise possibly inferring value"""
# ignore empty raise
if node.exc is None:
return
if PY3K and node.cause:
try:
cause = node.cause.infer().next()
except astroid.InferenceError:
pass
else:
if cause is YES:
return
if isinstance(cause, astroid.Const):
if cause.value is not None:
self.add_message('bad-exception-context',
node=node)
elif (not isinstance(cause, astroid.Class) and
not inherit_from_std_ex(cause)):
self.add_message('bad-exception-context',
node=node)
expr = node.exc
if self._check_raise_value(node, expr):
return
else:
try:
value = unpack_infer(expr).next()
except astroid.InferenceError:
return
self._check_raise_value(node, value)
def _check_raise_value(self, node, expr):
"""check for bad values, string exception and class inheritance
"""
value_found = True
if isinstance(expr, astroid.Const):
value = expr.value
if isinstance(value, str):
self.add_message('raising-string', node=node)
else:
self.add_message('raising-bad-type', node=node,
args=value.__class__.__name__)
elif (isinstance(expr, astroid.Name) and \
expr.name in ('None', 'True', 'False')) or \
isinstance(expr, (astroid.List, astroid.Dict, astroid.Tuple,
astroid.Module, astroid.Function)):
self.add_message('raising-bad-type', node=node, args=expr.name)
elif ((isinstance(expr, astroid.Name) and expr.name == 'NotImplemented')
or (isinstance(expr, astroid.CallFunc) and
isinstance(expr.func, astroid.Name) and
expr.func.name == 'NotImplemented')):
self.add_message('notimplemented-raised', node=node)
elif isinstance(expr, astroid.BinOp) and expr.op == '%':
self.add_message('raising-string', node=node)
elif isinstance(expr, (Instance, astroid.Class)):
if isinstance(expr, Instance):
expr = expr._proxied
if (isinstance(expr, astroid.Class) and
not inherit_from_std_ex(expr) and
expr.root().name != BUILTINS_NAME):
if expr.newstyle:
self.add_message('raising-non-exception', node=node)
else:
self.add_message('nonstandard-exception', node=node)
else:
value_found = False
else:
value_found = False
return value_found
@check_messages('unpacking-in-except')
def visit_excepthandler(self, node):
"""Visit an except handler block and check for exception unpacking."""
if isinstance(node.name, (astroid.Tuple, astroid.List)):
self.add_message('unpacking-in-except', node=node)
@check_messages('indexing-exception')
def visit_subscript(self, node):
""" Look for indexing exceptions. """
try:
for infered in node.value.infer():
if not isinstance(infered, astroid.Instance):
continue
if inherit_from_std_ex(infered):
self.add_message('indexing-exception', node=node)
except astroid.InferenceError:
return
@check_messages('bare-except', 'broad-except', 'pointless-except',
'binary-op-exception', 'bad-except-order',
'catching-non-exception')
def visit_tryexcept(self, node):
"""check for empty except"""
exceptions_classes = []
nb_handlers = len(node.handlers)
for index, handler in enumerate(node.handlers):
# single except doing nothing but "pass" without else clause
if nb_handlers == 1 and is_empty(handler.body) and not node.orelse:
self.add_message('pointless-except', node=handler.type or handler.body[0])
if handler.type is None:
if nb_handlers == 1 and not is_raising(handler.body):
self.add_message('bare-except', node=handler)
# check if a "except:" is followed by some other
# except
elif index < (nb_handlers - 1):
msg = 'empty except clause should always appear last'
self.add_message('bad-except-order', node=node, args=msg)
elif isinstance(handler.type, astroid.BoolOp):
self.add_message('binary-op-exception', node=handler, args=handler.type.op)
else:
try:
excs = list(unpack_infer(handler.type))
except astroid.InferenceError:
continue
for exc in excs:
# XXX skip other non class nodes
if exc is YES or not isinstance(exc, astroid.Class):
continue
exc_ancestors = [anc for anc in exc.ancestors()
if isinstance(anc, astroid.Class)]
for previous_exc in exceptions_classes:
if previous_exc in exc_ancestors:
msg = '%s is an ancestor class of %s' % (
previous_exc.name, exc.name)
self.add_message('bad-except-order', node=handler.type, args=msg)
if (exc.name in self.config.overgeneral_exceptions
and exc.root().name == EXCEPTIONS_MODULE
and nb_handlers == 1 and not is_raising(handler.body)):
self.add_message('broad-except', args=exc.name, node=handler.type)
if (not inherit_from_std_ex(exc) and
exc.root().name != BUILTINS_NAME):
# try to see if the exception is based on a C based
# exception, by infering all the base classes and
# looking for inference errors
bases = infer_bases(exc)
fully_infered = all(inferit is not YES
for inferit in bases)
if fully_infered:
self.add_message('catching-non-exception',
node=handler.type,
args=(exc.name, ))
exceptions_classes += excs
def inherit_from_std_ex(node):
"""return true if the given class node is subclass of
exceptions.Exception
"""
if node.name in ('Exception', 'BaseException') \
and node.root().name == EXCEPTIONS_MODULE:
return True
for parent in node.ancestors(recurs=False):
if inherit_from_std_ex(parent):
return True
return False
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(ExceptionsChecker(linter))
|
tianzhihen/python-mode
|
pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/exceptions.py
|
Python
|
lgpl-3.0
| 13,754
|
[
"VisIt"
] |
cefe7b78873eb3ede25fc829429b70f16b23a6628446891da883589d9e36ee91
|
# Authors: John Dennis <jdennis@redhat.com>
#
# Copyright (C) 2011 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Quick Start Guide For Using This Module
=======================================
This module implements a Log Manager class which wraps the Python
logging module and provides some utility functions for use with
logging. All logging operations should be done through the
`LogManager` where available. *DO NOT create objects using the
Python logging module, the log manager will be unaware of them.*
This module was designed for ease of use while preserving advanced
functionality and performance. You must perform the following steps.
1. Import the log_manger module and instantiate *one* `LogManager`
instance for your application or library. The `LogManager` is
configured via `LogManager.configure()` whose values are
easily populated from command line options or a config file. You
can modify the configuration again at any point.
2. Create one or more output handlers via
`LogManager.create_log_handlers()` an easy to use yet powerful
interface.
3. In your code create loggers via `LogManager.get_logger()`. Since
loggers are normally bound to a class this method is optimized for
that case, all you need to do in the call ``__init__()`` is::
log_mgr.get_logger(self, True)
Then emitting messages is as simple as ``self.debug()`` or ``self.error()``
Example:
--------
::
# Step 1, Create log manager and configure it
prog_name = 'my_app'
log_mgr = LogManager(prog_name)
log_mgr.configure(dict(verbose=True))
# Step 2, Create handlers
log_mgr.create_log_handlers([dict(name='my_app stdout',
stream=sys.stdout,
level=logging.INFO),
dict(name='my_app file',
filename='my_app.log',
level=logging.DEBUG)])
# Step 3, Create and use a logger in your code
class FooBar:
def __init__(self, name):
log_mgr.get_logger(self, True)
self.info("I'm alive! %s", name)
foobar = FooBar('Dr. Frankenstein')
# Dump the log manager state for illustration
print
print log_mgr
Running the above code would produce::
<INFO>: I'm alive! Dr. Frankenstein
root_logger_name: my_app
configure_state: None
default_level: INFO
debug: False
verbose: True
number of loggers: 2
"my_app" [level=INFO]
"my_app.__main__.FooBar" [level=INFO]
number of handlers: 2
"my_app file" [level=DEBUG]
"my_app stdout" [level=INFO]
number of logger regexps: 0
*Note, Steps 1 & 2 were broken out for expository purposes.* You can
pass your handler configuration into `LogManager.configure()`. The above
could have been simpler and more compact.::
# Step 1 & 2, Create log manager, and configure it and handlers
prog_name = 'my_app'
log_mgr = LogManager(prog_name)
log_mgr.configure(dict(verbose=True,
handlers = [dict(name='my_app stdout',
stream=sys.stdout,
level=logging.INFO),
dict(name='my_app file',
filename='my_app.log',
level=logging.DEBUG)])
FAQ (Frequently Asked Questions)
================================
#. **Why is this better than logging.basicConfig? The short example
for the LogManager doesn't seem much different in complexity from
basicConfig?**
* You get independent logging namespaces. You can instantiate
multiple logging namespaces. If you use this module you'll be
isolated from other users of the Python logging module avoiding
conflicts.
* Creating and initializing loggers for classes is trivial. One
simple call creates the logger, configures it, and sets logging
methods on the class instance.
* You can easily configure individual loggers to different
levels. For example turn on debuging for just the part of the
code you're working on.
* The configuration is both simple and powerful. You get many more
options than with basicConfig.
* You can dynamically reset the logging configuration during
execution, you're not forced to live with the config established
during program initialization.
* The manager optimizes the use of the logging objects, you'll
spend less time executing pointless logging code for messages
that won't be emitted.
* You can see the state of all the logging objects in your
namespace from one centrally managed location.
* You can configure a LogManager to use the standard logging root
logger and get all the benefits of this API.
#. **How do I turn on debug logging for a specific class without
affecting the rest of the logging configuration?**
Use a logger regular expression to bind a custom level to loggers
whose name matches the regexp. See `LogManager.configure()`
for details.
Lets say you want to set your Foo.Bar class to debug, then do
this::
log_mgr.configure(dict(logger_regexps=[(r'Foo\.Bar', 'debug')]))
#. **I set the default_level but all my loggers are configured
with a higher level, what happened?**
You probably don't have any handlers defined at or below the
default_level. The level set on a logger will never be
lower than the lowest level handler available to that logger.
#. **My logger's all have their level set to a huge integer, why?**
See above. Logger's will never have a level less than the level of
the handlers visible to the logger. If there are no handlers then
loggers can't output anything so their level is set to maxint.
#. **I set the default_level but all the loggers are configured
at INFO or DEBUG, what happened?**
The verbose and debug config flags set the default_level to
INFO and DEBUG respectively as a convenience.
#. **I'm not seeing messages output when I expect them to be, what's
wrong?**
For a message to be emitted the following 3 conditions must hold:
* Message level >= logger's level
* Message level >= handler's level
* The message was not elided by a filter
To verify the above conditions hold print out the log manager state
(e.g. print log_mgr). Locate your logger, what level is at? Locate
the handler you expected to see the message appear on, what level
is it?
A General Discussion of Python Logging
======================================
The design of this module is driven by how the Python logging module
works. The following discussion complements the Python Logging Howto,
fills in some missing information and covers strategies for
implementing different functionality along with the trade-offs
involved.
Understanding when & how log messages are emitted:
--------------------------------------------------
Loggers provide the application interface for logging. Every logger
object has the following methods debug(), info(), warning(), error(),
critical(), exception() and log() all of which can accept a format
string and arguments. Applications generate logging messages by
calling one of these methods to produce a formatted message.
A logger's effective level is the first explicitly set level found
when searching from the logger through it's ancestors terminating at
the root logger. The root logger always has an explicit level
(defaults to WARNING).
For a message to be emitted by a handler the following must be true:
The logger's effective level must >= message level and it must not
be filtered by a filter attached to the logger, otherwise the
message is discarded.
If the message survives the logger check it is passed to a list of
handlers. A handler will emit the message if the handler's level >=
message level and its not filtered by a filter attached to the
handler.
The list of handlers is determined thusly: Each logger has a list of
handlers (which may be empty). Starting with the logger the message
was bound to the message is passed to each of it's handlers. Then
the process repeats itself by traversing the chain of loggers
through all of it's ancestors until it reaches the root logger. The
logger traversal will be terminated if the propagate flag on a logger
is False (by default propagate is True).
Let's look at a hypothetical logger hierarchy (tree)::
A
/ \\
B D
/
C
There are 4 loggers and 3 handlers
Loggers:
+-------+---------+---------+-----------+----------+
|Logger | Level | Filters | Propagate | Handlers |
+=======+=========+=========+===========+==========+
| A | WARNING | [] | False | [h1,h2] |
+-------+---------+---------+-----------+----------+
| A.B | ERROR | [] | False | [h3] |
+-------+---------+---------+-----------+----------+
| A.B.C | DEBUG | [] | True | |
+-------+---------+---------+-----------+----------+
| A.D | | [] | True | |
+-------+---------+---------+-----------+----------+
Handlers:
+---------+---------+---------+
| Handler | Level | Filters |
+=========+=========+=========+
| h1 | ERROR | [] |
+---------+---------+---------+
| h2 | WARNING | [] |
+---------+---------+---------+
| h3 | DEBUG | [] |
+---------+---------+---------+
Each of the loggers and handlers have empty filter lists in this
example thus the filter checks will always pass.
If a debug message is posted logger A.B.C the following would
happen. The effective level is determined. Since it does not have a
level set it's parent (A.B) is examined which has ERROR set,
therefore the effective level of A.B.C is ERROR. Processing
immediately stops because the logger's level of ERROR does not
permit debug messages.
If an error message is posted on logger A.B.C it passes the logger
level check and filter check therefore the message is passed along
to the handlers. The list of handlers on A.B.C is empty so no
handlers are called at this position in the logging hierarchy. Logger
A.B.C's propagate flag is True so parent logger A.B handlers are
invoked. Handler h3's level is DEBUG, it passes both the level and
filter check thus h3 emits the message. Processing now stops because
logger A.B's propagate flag is False.
Now let's see what would happen if a warning message was posted on
logger A.D. It's effective level is WARNING because logger A.D does
not have a level set, it's only ancestor is logger A, the root
logger which has a level of WARNING, thus logger's A.D effective
level is WARNING. Logger A.D has no handlers, it's propagate flag is
True so the message is passed to it's parent logger A, the root
logger. Logger A has two handlers h1 and h2. The level of h1 is
ERROR so the warning message is discarded by h1, nothing is emitted
by h1. Next handler h2 is invoked, it's level is WARNING so it
passes both the level check and the filter check, thus h2 emits the
warning message.
How to configure independent logging spaces:
--------------------------------------------
A common idiom is to hang all handlers off the root logger and set
the root loggers level to the desired verbosity. But this simplistic
approach runs afoul of several problems, in particular who controls
logging (accomplished by configuring the root logger). The usual
advice is to check and see if the root logger has any handlers set,
if so someone before you has configured logging and you should
inherit their configuration, all you do is add your own loggers
without any explicitly set level. If the root logger doesn't have
handlers set then you go ahead and configure the root logger to your
preference. The idea here is if your code is being loaded by another
application you want to defer to that applications logging
configuration but if your code is running stand-alone you need to
set up logging yourself.
But sometimes your code really wants it's own logging configuration
managed only by yourself completely independent of any logging
configuration by someone who may have loaded your code. Even if you
code is not designed to be loaded as a package or module you may be
faced with this problem. A trivial example of this is running your
code under a unit test framework which itself uses the logging
facility (remember there is only ever one root logger in any Python
process).
Fortunately there is a simple way to accommodate this. All you need
to do is create a "fake" root in the logging hierarchy which belongs
to you. You set your fake root's propagate flag to False, set a
level on it and you'll hang your handlers off this fake root. Then
when you create your loggers each should be a descendant of this
fake root. Now you've completely isolated yourself in the logging
hierarchy and won't be influenced by any other logging
configuration. As an example let's say your your code is called
'foo' and so you name your fake root logger 'foo'.::
my_root = logging.getLogger('foo') # child of the root logger
my_root.propagate = False
my_root.setLevel(logging.DEBUG)
my_root.addHandler(my_handler)
Then every logger you create should have 'foo.' prepended to it's
name. If you're logging my module your module's logger would be
created like this::
module_logger = logging.getLogger('foo.%s' % __module__)
If you're logging by class then your class logger would be::
class_logger = logging.getLogger('foo.%s.%s' % (self.__module__, self.__class__.__name__))
How to set levels:
------------------
An instinctive or simplistic assumption is to set the root logger to a
high logging level, for example ERROR. After all you don't want to be
spamming users with debug and info messages. Let's also assume you've
got two handlers, one for a file and one for the console, both
attached to the root logger (a common configuration) and you haven't
set the level on either handler (in which case the handler will emit
all levels).
But now let's say you want to turn on debugging, but just to the file,
the console should continue to only emit error messages.
You set the root logger's level to DEBUG. The first thing you notice is
that you're getting debug message both in the file and on the console
because the console's handler does not have a level set. Not what you
want.
So you go back restore the root loggers level back to it's original
ERROR level and set the file handler's level to DEBUG and the console
handler's level to ERROR. Now you don't get any debug messages because
the root logger is blocking all messages below the level of ERROR and
doesn't invoke any handlers. The file handler attached to the root
logger even though it's level is set to DEBUG never gets a chance to
process the message.
*IMPORTANT:* You have to set the logger's level to the minimum of all
the attached handler's levels, otherwise the logger may block the
message from ever reaching any handler.
In this example the root logger's level must be set to DEBUG, the file
handler's level to DEBUG, and the console handler's level set to
ERROR.
Now let's take a more real world example which is a bit more
complicated. It's typical to assign loggers to every major class. In
fact this is the design strategy of Java logging from which the Python
logging is modeled. In a large complex application or library that
means dozens or possibly hundreds of loggers. Now lets say you need to
trace what is happening with one class. If you use the simplistic
configuration outlined above you'll set the log level of the root
logger and one of the handlers to debug. Now you're flooded with debug
message from every logger in the system when all you wanted was the
debug messages from just one class.
How can you get fine grained control over which loggers emit debug
messages? Here are some possibilities:
(1) Set a filter.
.................
When a message is propagated to a logger in the hierarchy first the
loggers level is checked. If logger level passes then the logger
iterates over every handler attached to the logger first checking the
handler level. If the handler level check passes then the filters
attached to the handler are run.
Filters are passed the record (i.e. the message), it does not have
access to either the logger or handler it's executing within. You
can't just set the filter to only pass the records of the classes you
want to debug because that would block other important info, warning,
error and critical messages from other classes. The filter would have
to know about the "global" log level which is in effect and also pass
any messages at that level or higher. It's unfortunate the filter
cannot know the level of the logger or handler it's executing inside
of.
Also logger filters only are applied to the logger they are attached
to, i.e. the logger the message was generated on. They do not get
applied to any ancestor loggers. That means you can't just set a
filter on the root logger. You have to either set the filters on the
handlers or on every logger created.
The filter first checks the level of the message record. If it's
greater than debug it passes it. For debug messages it checks the set
of loggers which have debug messages enabled, if the message record
was generated on one of those loggers it passes the record, otherwise
it blocks it.
The only question is whether you attach the filter to every logger or
to a handful of handlers. The advantage of attaching the filter to
every logger is efficiency, the time spent handling the message can be
short circuited much sooner if the message is filtered earlier in the
process. The advantage of attaching the filter to a handler is
simplicity, you only have to do that when a handler is created, not
every place in the code where a logger is created.
(2) Conditionally set the level of each logger.
...............................................
When loggers are created a check is performed to see if the logger is
in the set of loggers for which debug information is desired, if so
it's level is set to DEBUG, otherwise it's set to the global
level. One has to recall there really isn't a single global level if
you want some handlers to emit info and above, some handlers error and
above, etc. In this case if the logger is not in the set of logger's
emitting debug the logger level should be set to the next increment
above debug level.
A good question to ask would be why not just leave the logger's level
unset if it's not in the set of loggers to be debugged? After all it
will just inherit the root level right? There are two problems with
that. 1) It wold actually inherit the level any ancestor logger and if
an ancestor was set to debug you've effectively turned on debugging
for all children of that ancestor logger. There are times you might
want that behavior, where all your children inherit your level, but
there are many cases where that's not the behavior you want. 2) A more
pernicious problem exists. The logger your handlers are attached to
MUST be set to debug level, otherwise your debug messages will never
reach the handlers for output. Thus if you leave a loggers level unset
and let it inherit it's effective level from an ancestor it might very
well inherit the debug level from the root logger. That means you've
completely negated your attempt to selectively set debug logging on
specific loggers. Bottom line, you really have to set the level on
every logger created if you want fine grained control.
Approach 2 has some distinct performance advantages. First of all
filters are not used, this avoids a whole processing step and extra
filter function calls on every message. Secondly a logger level check
is a simple integer compare which is very efficient. Thirdly the
processing of a message can be short circuited very early in the
processing pipeline, no ancestor loggers will be invoked and no
handlers will be invoked.
The downside is some added complexity at logger creation time. But
this is easily mitigated by using a utility function or method to
create the logger instead of just calling logger.getLogger().
Like every thing else in computer science which approach you take boils
down to a series of trade offs, most around how your code is
organized. You might find it easier to set a filter on just one or two
handlers. It might be easier to modify the configuration during
execution if the logic is centralized in just a filter function, but
don't let that sway you too much because it's trivial to iterate over
every logger and dynamically reset it's log level.
Now at least you've got a basic understanding of how this stuff hangs
together and what your options are. That's not insignificant, when I
was first introduced to logging in Java and Python I found it
bewildering difficult to get it do what I wanted.
John Dennis <jdennis@redhat.com>
'''
#-------------------------------------------------------------------------------
import sys
import os
import pwd
import logging
import re
import time
#-------------------------------------------------------------------------------
# Default format
LOGGING_DEFAULT_FORMAT = '%(levelname)s %(message)s'
# Maps a logging level name to it's numeric value
log_level_name_map = {
'notset' : logging.NOTSET,
'debug' : logging.DEBUG,
'info' : logging.INFO,
'warn' : logging.WARNING,
'warning' : logging.WARNING,
'error' : logging.ERROR,
'critical' : logging.CRITICAL
}
log_levels = (logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL)
logger_method_names = ('debug', 'info', 'warning', 'error', 'exception', 'critical')
#-------------------------------------------------------------------------------
def get_unique_levels(iterable):
'''
Given a iterable of objects containing a logging level return a
ordered list (min to max) of unique levels.
:parameters:
iterable
Iterable yielding objects with a logging level attribute.
:returns:
Ordered list (min to max) of unique levels.
'''
levels = set()
for obj in iterable:
level = getattr(obj, 'level', sys.maxint)
if level != logging.NOTSET:
levels.add(level)
levels = list(levels)
levels.sort()
return levels
def get_minimum_level(iterable):
'''
Given a iterable of objects containing a logging level return the
minimum level. If no levels are defined return maxint.
set of unique levels.
:parameters:
iterable
Iterable yielding objects with a logging level attribute.
:returns:
Ordered list (min to max) of unique levels.
'''
min_level = sys.maxint
for obj in iterable:
level = getattr(obj, 'level', sys.maxint)
if level != logging.NOTSET:
if level < min_level:
min_level = level
return min_level
def parse_log_level(level):
'''
Given a log level either as a string or integer
return a numeric logging level. The following case insensitive
names are recognized::
* notset
* debug
* info
* warn
* warning
* error
* critical
A string containing an integer is also recognized, for example
``"10"`` would map to ``logging.DEBUG``
The integer value must be the range [``logging.NOTSET``,
``logging.CRITICAL``] otherwise a value exception will be raised.
:parameters:
level
basestring or integer, level value to convert
:returns:
integer level value
'''
# Is it a string representation of an integer?
# If so convert to an int.
if isinstance(level, basestring):
try:
level = int(level)
except:
pass
# If it's a string lookup it's name and map to logging level
# otherwise validate the integer value is in range.
if isinstance(level, basestring):
result = log_level_name_map.get(level.lower()) #pylint: disable=E1103
if result is None:
raise ValueError('unknown log level (%s)' % level)
return result
elif isinstance(level, int):
if level < logging.NOTSET or level > logging.CRITICAL:
raise ValueError('log level (%d) out of range' % level)
return level
else:
raise TypeError('log level must be basestring or int, got (%s)' % type(level))
#-------------------------------------------------------------------------------
def logging_obj_str(obj):
'''
Unfortunately the logging Logger and Handler classes do not have a
custom __str__() function which converts the object into a human
readable string representation. This function takes any object
with a level attribute and outputs the objects name with it's
associated level. If a name was never set for the object then it's
repr is used instead.
:parameters:
obj
Object with a logging level attribute
:returns:
string describing the object
'''
name = getattr(obj, 'name', repr(obj))
text = '"%s" [level=%s]' % (name, logging.getLevelName(obj.level))
if isinstance(obj, logging.FileHandler):
text += ' filename="%s"' % obj.baseFilename
return text
#-------------------------------------------------------------------------------
class LogManager(object):
'''
This class wraps the functionality in the logging module to
provide an easier to use API for logging while providing advanced
features including a independent namespace. Each application or
library wishing to have it's own logging namespace should instantiate
exactly one instance of this class and use it to manage all it's
logging.
Traditionally (or simplistically) logging was set up with a single
global root logger with output handlers bound to it. The global
root logger (whose name is the empty string) was shared by all
code in a loaded process. The only the global unamed root logger
had a level set on it, all other loggers created inherited this
global level. This can cause conflicts in more complex scenarios
where loaded code wants to maintain it's own logging configuration
independent of whomever loaded it's code. By using only a single
logger level set on the global root logger it was not possible to
have fine grained control over individual logger output. The
pattern seen with this simplistic setup has been frequently copied
despite being clumsy and awkward. The logging module has the tools
available to support a more sophisitcated and useful model, but it
requires an overarching framework to manage. This class provides
such a framework.
The features of this logging manager are:
* Independent logging namespace.
* Simplifed method to create handlers.
* Simple setup for applications with command line args.
* Sophisitcated handler configuration
(e.g. file ownership & permissions)
* Easy fine grained control of logger output
(e.g. turning on debug for just 1 or 2 loggers)
* Holistic management of the interrelationships between
logging components.
* Ability to dynamically adjust logging configuration in
a running process.
An independent namespace is established by creating a independent
root logger for this manager (root_logger_name). This root logger
is a direct child of the global unamed root logger. All loggers
created by this manager will be descendants of this managers root
logger. The managers root logger has it's propagate flag set
to False which means all loggers and handlers created by this
manager will be isolated in the global logging tree.
Log level management:
---------------------
Traditionally loggers inherited their logging level from the root
logger. This was simple but made it impossible to independently
control logging output from different loggers. If you set the root
level to DEBUG you got DEBUG output from every logger in the
system, often overwhelming in it's voluminous output. Many times
you want to turn on debug for just one class (a common idom is to
have one logger per class). To achieve the fine grained control
you can either use filters or set a logging level on every logger
(see the module documentation for the pros and cons). This manager
sets a log level on every logger instead of using level
inheritence because it's more efficient at run time.
Global levels are supported via the verbose and debug flags
setting every logger level to INFO and DEBUG respectively. Fine
grained level control is provided via regular expression matching
on logger names (see `configure()` for the details. For
example if you want to set a debug level for the foo.bar logger
set a regular expression to match it and bind it to the debug
level. Note, the global verbose and debug flags always override
the regular expression level configuration. Do not set these
global flags if you want fine grained control.
The manager maintains the minimum level for all loggers under it's
control and the minimum level for all handlers under it's
control. The reason it does this is because there is no point in
generating debug messages on a logger if there is no handler
defined which will output a debug message. Thus when the level is
set on a logger it takes into consideration the set of handlers
that logger can emit to.
IMPORTANT: Because the manager maintains knowledge about all the
loggers and handlers under it's control it is essential you use
only the managers interface to modify a logger or handler and not
set levels on the objects directly, otherwise the manger will not
know to visit every object under it's control when a configuraiton
changes (see '`LogManager.apply_configuration()`).
Example Usage::
# Create a log managers for use by 'my_app'
log_mgr = LogManager('my_app')
# Create a handler to send error messages to stderr
log_mgr.create_log_handlers([dict(stream=sys.stdout,
level=logging.ERROR)])
# Create logger for a class
class Foo(object):
def __init__(self):
self.log = log_mgr.get_logger(self)
'''
def __init__(self, root_logger_name='', configure_state=None):
'''
Create a new LogManager instance using root_logger_name as the
parent of all loggers maintained by the manager.
Only one log manger should be created for each logging namespace.
:parameters:
root_logger_name
The name of the root logger. All loggers will be prefixed
by this name.
configure_state
Used by clients of the log manager to track the
configuration state, may be any object.
:return:
LogManager instance
'''
self.loggers = {} # dict, key is logger name, value is logger object
self.handlers = {} # dict, key is handler name, value is handler object
self.configure_state = configure_state
self.root_logger_name = root_logger_name
self.default_level = 'error'
self.debug = False
self.verbose = False
self.logger_regexps = []
self.root_logger = self.get_logger(self.root_logger_name)
# Stop loggers and handlers from searching above our root
self.root_logger.propagate = False
def _get_default_level(self):
return self._default_level
def _set_default_level(self, value):
level = parse_log_level(value)
self._default_level = level
self.apply_configuration()
default_level = property(_get_default_level, _set_default_level,
doc='see log_manager.parse_log_level()` for details on how the level can be specified during assignement.')
def set_default_level(self, level, configure_state=None):
'''
Reset the default logger level, updates all loggers.
Note, the default_level may also be set by assigning to the
default_level attribute but that does not update the configure_state,
this method is provided as a convenience to simultaneously set the
configure_state if so desired.
:parameters:
level
The new default level for the log manager. See
`log_manager.parse_log_level()` for details on how the
level can be specified.
configure_state
If other than None update the log manger's configure_state
variable to this object. Clients of the log manager can
use configure_state to track the state of the log manager.
'''
level = parse_log_level(level)
self._default_level = level
self.apply_configuration(configure_state)
def __str__(self):
'''
When str() is called on the LogManager output it's state.
'''
text = ''
text += 'root_logger_name: %s\n' % (self.root_logger_name)
text += 'configure_state: %s\n' % (self.configure_state)
text += 'default_level: %s\n' % (logging.getLevelName(self.default_level))
text += 'debug: %s\n' % (self.debug)
text += 'verbose: %s\n' % (self.verbose)
text += 'number of loggers: %d\n' % (len(self.loggers))
loggers = [logging_obj_str(x) for x in self.loggers.values()]
loggers.sort()
for logger in loggers:
text += ' %s\n' % (logger)
text += 'number of handlers: %d\n' % (len(self.handlers))
handlers = [logging_obj_str(x) for x in self.handlers.values()]
handlers.sort()
for handler in handlers:
text += ' %s\n' % (handler)
text += 'number of logger regexps: %d\n' % (len(self.logger_regexps))
for regexp, level in self.logger_regexps:
text += ' "%s" => %s\n' % (regexp, logging.getLevelName(level))
return text
def configure(self, config, configure_state=None):
'''
The log manager is initialized from key,value pairs in the
config dict. This may be called any time to modify the
logging configuration at run time.
The supported entries in the config dict are:
default_level
The default level applied to a logger when not indivdually
configured. The verbose and debug config items override
the default level. See `log_manager.parse_log_level()` for
details on how the level can be specified.
verbose
Boolean, if True sets default_level to INFO.
debug
Boolean, if True sets default_level to DEBUG.
logger_regexps
List of (regexp, level) tuples. This is a an ordered list
regular expressions used to match against a logger name to
configure the logger's level. The first regexp in the
sequence which matches the logger name will use the the
level bound to that regexp to set the logger's level. If
no regexp matches the logger name then the logger will be
assigned the default_level.
The regular expression comparision is performed with the
re.search() function which means the match can be located
anywhere in the name string (as opposed to the start of
the string). Do not forget to escape regular
expression metacharacters when appropriate. For example
dot ('.') is used to seperate loggers in a logging
hierarchy path (e.g. a.b.c)
Examples::
# To match exactly the logger a.b.c and set it to DEBUG:
logger_regexps = [(r'^a\.b\.c$', 'debug')]
# To match any child of a.b and set it to INFO:
logger_regexps = [(r'^a\.b\..*', 'info')]
# To match any leaf logger with the name c and set it to level 5:
logger_regexps = [(r'\.c$', 5)]
handlers
List of handler config dicts or (config, logger)
tuples. See `create_log_handlers()` for details
of a hanlder config.
The simple form where handlers is a list of dicts each
handler is bound to the log mangers root logger (see
`create_log_handlers()` optional ``logger``
parameter). If you want to bind each handler to a specific
logger other then root handler then group the handler config
with a logger in a (config, logger) tuple. The logger may be
either a logger name or a logger instance. The following are
all valid methods of passing handler configuration.::
# List of 2 config dicts; both handlers bound to root logger
[{}, {}]
# List of 2 tuples; first handler bound to logger_name1
# by name, second bound to logger2 by object.
[({}, 'logger_name1'), ({}, logger2']
# List of 1 dict, 1 tuple; first bound to root logger,
# second bound to logger_name by name
[{}, ({}, 'logger_name']
:parameters:
config
Dict of <key,value> pairs describing the configuration.
configure_state
If other than None update the log manger's configure_state
variable to this object. Clients of the log manager can
use configure_state to track the state of the log manager.
'''
for attr in ('debug', 'verbose', 'logger_regexps'):
value = config.get(attr)
if value is not None:
setattr(self, attr, value)
attr = 'default_level'
value = config.get(attr)
if value is not None:
try:
level = parse_log_level(value)
except Exception, e:
raise ValueError("could not set %s (%s)" % (attr, e))
setattr(self, attr, level)
attr = 'handlers'
handlers = config.get(attr)
if handlers is not None:
for item in handlers:
logger = self.root_logger
config = None
if isinstance(item, dict):
config = item
elif isinstance(item, tuple):
if len(item) != 2:
raise ValueError('handler tuple must have exactly 2 items, got "%s"' % item)
config = item[0]
logger = item[1]
else:
raise TypeError('expected dict or tuple for handler item, got "%s", handlers=%s' % \
type(item), value)
if not isinstance(config, dict):
raise TypeError('expected dict for handler config, got "%s"', type(config))
if isinstance(logger, basestring):
logger = self.get_logger(logger)
else:
if not isinstance(logger, logging.Logger):
raise TypeError('expected logger name or logger object in %s' % item)
self.create_log_handlers([config], logger, configure_state)
if self.verbose:
self.default_level = logging.INFO
if self.debug:
self.default_level = logging.DEBUG
self.apply_configuration(configure_state)
def create_log_handlers(self, configs, logger=None, configure_state=None):
'''
Create new handlers and attach them to a logger (log mangers
root logger by default).
*Note, you may also pass the handler configs to `LogManager.configure()`.*
configs is an iterable yielding a dict. Each dict configures a
handler. Currently two types of handlers are supported:
* stream
* file
Which type of handler is created is determined by the presence of
the ``stream`` or ``filename`` in the dict.
Configuration keys:
===================
Handler type keys:
------------------
Exactly of the following must present in the config dict:
stream
Use the specified stream to initialize the StreamHandler.
filename
Specifies that a FileHandler be created, using the specified
filename.
Common keys:
------------
name
Set the name of the handler. This is optional but can be
useful when examining the logging configuration.
For files defaults to ``'file:absolute_path'`` and for streams
it defaults to ``'stream:stream_name'``
format
Use the specified format string for the handler.
time_zone_converter
Log record timestamps are seconds since the epoch in the UTC
time zone stored as floating point values. When the formatter
inserts a timestamp via the %(asctime)s format substitution it
calls a time zone converter on the timestamp which returns a
time.struct_time value to pass to the time.strftime function
along with the datefmt format conversion string. The time
module provides two functions with this signature,
time.localtime and time.gmtime which performs a conversion to
local time and UTC respectively. time.localtime is the default
converter. Setting the time zone converter to time.gmtime is
appropriate for date/time strings in UTC. The
time_zone_converter attribute may be any function with the
correct signature. Or as a convenience you may also pass a
string which will select either the time.localtime or the
time.gmtime converter. The case insenstive string mappings
are::
'local' => time.localtime
'localtime' => time.localtime
'gmt' => time.gmtime
'gmtime' => time.gmtime
'utc' => time.gmtime
datefmt
Use the specified time.strftime date/time format when
formatting a timestamp via the %(asctime)s format
substitution. The timestamp is first converted using the
time_zone_converter to either local or UTC
level
Set the handler logger level to the specified level. May be
one of the following strings: 'debug', 'info', 'warn',
'warning', 'error', 'critical' or any of the logging level
constants. Thus level='debug' is equivalent to
level=logging.DEBUG. Defaults to self.default_level.
File handler keys:
------------------
filemode
Specifies the mode to open the file. Defaults to 'a' for
append, use 'w' for write.
permission
Set the permission bits on the file (i.e. chmod).
Must be a valid integer (e.g. 0660 for rw-rw----)
user
Set the user owning the file. May be either a numeric uid or a
basestring with a user name in the passwd file.
group
Set the group associated with the file, May be either a
numeric gid or a basestring with a group name in the groups
file.
Examples:
---------
The following shows how to set two handlers, one for a file
(ipa.log) at the debug log level and a second handler set to
stdout (e.g. console) at the info log level. (One handler sets it
level with a simple name, the other with a logging constant just
to illustrate the flexibility) ::
# Get a root logger
log_mgr = LogManger('my_app')
# Create the handlers
log_mgr.create_log_handlers([dict(filename='my_app.log',
level='info',
user='root',
group='root',
permission=0600,
time_zone_converter='utc',
datefmt='%Y-%m-%dT%H:%M:%SZ', # ISO 8601
format='<%(levelname)s> [%(asctime)s] module=%(name)s "%(message)s"'),
dict(stream=sys.stdout,
level=logging.ERROR,
format='%(levelname)s: %(message)s')])
# Create a logger for my_app.foo.bar
foo_bar_log = log_mgr.get_logger('foo.bar')
root_logger.info("Ready to process requests")
foo_bar_log.error("something went boom")
In the file my_app.log you would see::
<INFO> [2011-10-26T01:39:00Z] module=my_app "Ready to process requests"
<ERROR> [2011-10-26T01:39:00Z] module=may_app.foo.bar "something went boom"
On the console you would see::
ERROR: something went boom
:parameters:
configs
Sequence of dicts (any iterable yielding a dict). Each
dict creates one handler and contains the configuration
parameters used to create that handler.
logger
If unspecified the handlers will be attached to the
LogManager.root_logger, otherwise the handlers will be
attached to the specified logger.
configure_state
If other than None update the log manger's configure_state
variable to this object. Clients of the log manager can
use configure_state to track the state of the log manager.
:return:
The list of created handers.
'''
if logger is None:
logger = self.root_logger
handlers = []
# Iterate over handler configurations.
for cfg in configs:
# File or stream handler?
filename = cfg.get('filename')
if filename:
if cfg.has_key("stream"):
raise ValueError("both filename and stream are specified, must be one or the other, config: %s" % cfg)
path = os.path.abspath(filename)
filemode = cfg.get('filemode', 'a')
handler = logging.FileHandler(path, filemode)
# Set the handler name
name = cfg.get("name")
if name is None:
name = 'file:%s' % (path)
handler.name = name
# Path should now exist, set ownership and permissions if requested.
# Set uid, gid (e.g. chmod)
uid = gid = None
user = cfg.get('user')
group = cfg.get('group')
if user is not None:
if isinstance(user, basestring):
pw = pwd.getpwnam(user)
uid = pw.pw_uid
elif isinstance(user, int):
uid = user
else:
raise TypeError("user (%s) is not int or basestring" % user)
if group is not None:
if isinstance(group, basestring):
pw = pwd.getpwnam(group)
gid = pw.pw_gid
elif isinstance(group, int):
gid = group
else:
raise TypeError("group (%s) is not int or basestring" % group)
if uid is not None or gid is not None:
if uid is None:
uid = -1
if gid is None:
gid = -1
os.chown(path, uid, gid)
# Set file permissions (e.g. mode)
permission = cfg.get('permission')
if permission is not None:
os.chmod(path, permission)
else:
stream = cfg.get("stream")
if stream is None:
raise ValueError("neither file nor stream specified in config: %s" % cfg)
handler = logging.StreamHandler(stream)
# Set the handler name
name = cfg.get("name")
if name is None:
name = 'stream:%s' % (stream)
handler.name = name
# Add the handler
handlers.append(handler)
# Configure message formatting on the handler
format = cfg.get("format", LOGGING_DEFAULT_FORMAT)
datefmt = cfg.get("datefmt", None)
formatter = logging.Formatter(format, datefmt)
time_zone_converter = cfg.get('time_zone_converter', time.localtime)
if isinstance(time_zone_converter, basestring):
converter = {'local' : time.localtime,
'localtime' : time.localtime,
'gmt' : time.gmtime,
'gmtime' : time.gmtime,
'utc' : time.gmtime}.get(time_zone_converter.lower())
if converter is None:
raise ValueError("invalid time_zone_converter name (%s)" % \
time_zone_converter)
elif callable(time_zone_converter):
converter = time_zone_converter
else:
raise ValueError("time_zone_converter must be basestring or callable, not %s" % \
type(time_zone_converter))
formatter.converter = converter
handler.setFormatter(formatter)
# Set the logging level
level = cfg.get('level')
if level is not None:
try:
level = parse_log_level(level)
except Exception, e:
print >>sys.stderr, 'could not set handler log level "%s" (%s)' % (level, e)
level = None
if level is None:
level = self.default_level
handler.setLevel(level)
for handler in handlers:
if handler.name in self.handlers:
raise ValueError('handler "%s" already exists' % handler.name)
logger.addHandler(handler)
self.handlers[handler.name] = handler
self.apply_configuration(configure_state)
return handlers
def get_handler(self, handler_name):
'''
Given a handler name return the handler object associated with
it.
:parameters:
handler_name
Name of the handler to look-up.
:returns:
The handler object associated with the handler name.
'''
handler = self.handlers.get(handler_name)
if handler is None:
raise KeyError('handler "%s" is not defined' % handler_name)
return handler
def set_handler_level(self, handler_name, level, configure_state=None):
'''
Given a handler name, set the handler's level, return previous level.
:parameters:
handler_name
Name of the handler to look-up.
level
The new level for the handler. See
`log_manager.parse_log_level()` for details on how the
level can be specified.
configure_state
If other than None update the log manger's configure_state
variable to this object. Clients of the log manager can
use configure_state to track the state of the log manager.
:returns:
The handler's previous level
'''
handler = self.get_handler(handler_name)
level = parse_log_level(level)
prev_level = handler.level
handler.setLevel(level)
self.apply_configuration(configure_state)
return prev_level
def get_loggers_with_handler(self, handler):
'''
Given a handler return a list of loggers that hander is bound to.
:parameters:
handler
The name of a handler or a handler object.
:returns:
List of loggers with the handler is bound to.
'''
if isinstance(handler, basestring):
handler = self.get_handler(handler)
elif isinstance(handler, logging.Handler):
if not handler in self.handlers.values():
raise ValueError('handler "%s" is not managed by this log manager' % \
logging_obj_str(handler))
else:
raise TypeError('handler must be basestring or Handler object, got %s' % type(handler))
loggers = []
for logger in self.loggers.values():
if handler in logger.handlers:
loggers.append(logger)
return loggers
def remove_handler(self, handler, logger=None, configure_state=None):
'''
Remove the named handler. If logger is unspecified the handler
will be removed from all managed loggers, otherwise it will be
removed from only the specified logger.
:parameters:
handler
The name of the handler to be removed or the handler object.
logger
If unspecified the handler is removed from all loggers,
otherwise the handler is removed from only this logger.
configure_state
If other than None update the log manger's configure_state
variable to this object. Clients of the log manager can
use configure_state to track the state of the log manager.
'''
if isinstance(handler, basestring):
handler = self.get_handler(handler)
elif not isinstance(handler, logging.Handler):
raise TypeError('handler must be basestring or Handler object, got %s' % type(handler))
handler_name = handler.name
if handler_name is None:
raise ValueError('handler "%s" does not have a name' % logging_obj_str(handler))
loggers = self.get_loggers_with_handler(handler)
if logger is None:
for logger in loggers:
logger.removeHandler(handler)
del self.handlers[handler_name]
else:
if not logger in loggers:
raise ValueError('handler "%s" is not bound to logger "%s"' % \
(handler_name, logging_obj_str(logger)))
logger.removeHandler(handler)
if len(loggers) == 1:
del self.handlers[handler_name]
self.apply_configuration(configure_state)
def apply_configuration(self, configure_state=None):
'''
Using the log manager's internal configuration state apply the
configuration to all the objects managed by the log manager.
:parameters:
configure_state
If other than None update the log manger's configure_state
variable to this object. Clients of the log manager can
use configure_state to track the state of the log manager.
'''
if configure_state is not None:
self.configure_state = configure_state
for logger in self.loggers.values():
self._set_configured_logger_level(logger)
def get_configured_logger_level(self, name):
'''
Given a logger name return it's level as defined by the
`LogManager` configuration.
:parameters:
name
logger name
:returns:
log level
'''
level = self.default_level
for regexp, config_level in self.logger_regexps:
if re.search(regexp, name):
level = config_level
break
level = parse_log_level(level)
return level
def get_logger_handlers(self, logger):
'''
Return the set of unique handlers visible to this logger.
:parameters:
logger
The logger whose visible and enabled handlers will be returned.
:return:
Set of handlers
'''
handlers = set()
while logger:
for handler in logger.handlers:
handlers.add(handler)
if logger.propagate:
logger = logger.parent
else:
logger = None
return handlers
def get_minimum_handler_level_for_logger(self, logger):
'''
Return the minimum handler level of all the handlers the
logger is exposed to.
:parameters:
logger
The logger whose handlers will be examined.
:return:
The minimum of all the handler's levels. If no
handlers are defined sys.maxint will be returned.
'''
handlers = self.get_logger_handlers(logger)
min_level = get_minimum_level(handlers)
return min_level
def _set_configured_logger_level(self, logger):
'''
Based on the current configuration maintained by the log
manager set this logger's level.
If the level specified for this logger by the configuration is
less than the minimum level supported by the output handlers
the logger is exposed to then adjust the logger's level higher
to the minimum handler level. This is a performance
optimization, no point in emitting a log message if no
handlers will ever output it.
:parameters:
logger
The logger whose level is being configured.
:return:
The level actually set on the logger.
'''
level = self.get_configured_logger_level(logger.name)
minimum_handler_level = self.get_minimum_handler_level_for_logger(logger)
if level < minimum_handler_level:
level = minimum_handler_level
logger.setLevel(level)
return level
def get_logger(self, who, bind_logger_names=False):
'''
Return the logger for an object or a name. If the logger
already exists return the existing instance otherwise create
the logger.
The who parameter may be either a name or an object.
Loggers are identified by a name but because loggers are
usually bound to a class this method is optimized to handle
that case. If who is an object:
* The name object's module name (dot seperated) and the
object's class name.
* Optionally the logging output methods can be bound to the
object if bind_logger_names is True.
Otherwise if who is a basestring it is used as the logger
name.
In all instances the root_logger_name is prefixed to every
logger created by the manager.
:parameters:
who
If a basestring then use this as the logger name,
prefixed with the root_logger_name. Otherwise who is treated
as a class instance. The logger name is formed by prepending
the root_logger_name to the module name and then appending the
class name. All name components are dot seperated. Thus if the
root_logger_name is 'my_app', the class is ParseFileConfig
living in the config.parsers module the logger name will be:
``my_app.config.parsers.ParseFileConfig``.
bind_logger_names
If true the class instance will have the following bound
to it: ``log``, ``debug()``, ``info()``, ``warning()``,
``error()``, ``exception()``, ``critical()``. Where log is
the logger object and the others are the loggers output
methods. This is a convenience which allows you emit
logging messages directly, for example::
self.debug('%d names defined', self.num_names).
:return:
The logger matching the name indicated by who. If the
logger pre-existed return that instance otherwise create the
named logger return it.
'''
is_object = False
if isinstance(who, basestring):
obj_name = who
else:
is_object = True
obj_name = '%s.%s' % (who.__module__, who.__class__.__name__)
if obj_name == self.root_logger_name:
logger_name = obj_name
else:
logger_name = self.root_logger_name + '.' + obj_name
# If logger not in our cache then create and initialize the logger.
logger = self.loggers.get(logger_name)
if logger is None:
logger = logging.getLogger(logger_name)
self.loggers[logger_name] = logger
self._set_configured_logger_level(logger)
if bind_logger_names and is_object and getattr(who, '__log_manager', None) is None:
setattr(who, '__log_manager', self)
method = 'log'
if hasattr(who, method):
raise ValueError('%s is already bound to %s' % (method, repr(who)))
setattr(who, method, logger)
for method in logger_method_names:
if hasattr(who, method):
raise ValueError('%s is already bound to %s' % (method, repr(who)))
setattr(who, method, getattr(logger, method))
return logger
|
hatchetation/freeipa
|
ipapython/log_manager.py
|
Python
|
gpl-3.0
| 62,297
|
[
"VisIt"
] |
919424895f3f922965d04c3f647bd5377a08e6ba895fa143cea0d06770143c92
|
import ast
from viper.exceptions import (
InvalidLiteralException,
NonPayableViolationException,
StructureException,
TypeMismatchException,
VariableDeclarationException,
)
from viper.function_signature import (
FunctionSignature,
VariableRecord,
)
from viper.signatures.event_signature import (
EventSignature
)
from viper.functions import (
dispatch_table,
)
from .stmt import Stmt
from .parser_utils import LLLnode
from .parser_utils import (
get_length,
get_number_as_fraction,
get_original_if_0x_prefixed,
getpos,
make_byte_array_copier,
add_variable_offset,
base_type_conversion,
unwrap_location
)
from viper.types import (
BaseType,
ByteArrayType,
ListType,
MappingType,
MixedType,
NullType,
StructType,
TupleType,
)
from viper.types import (
get_size_of_type,
is_base_type,
is_numeric_type,
parse_type,
)
from viper.types import (
are_units_compatible,
combine_units,
)
from viper.utils import (
DECIMAL_DIVISOR,
MemoryPositions,
LOADED_LIMIT_MAP
)
from viper.utils import (
bytes_to_int,
checksum_encode,
calc_mem_gas,
is_varname_valid,
)
try:
x = ast.AnnAssign
except:
raise Exception("Requires python 3.6 or higher for annotation support")
# Converts code to parse tree
def parse(code):
o = ast.parse(code)
decorate_ast_with_source(o, code)
return o.body
# Parser for a single line
def parse_line(code):
o = ast.parse(code).body[0]
decorate_ast_with_source(o, code)
return o
# Decorate every node of an AST tree with the original source code.
# This is necessary to facilitate error pretty-printing.
def decorate_ast_with_source(_ast, code):
class MyVisitor(ast.NodeVisitor):
def visit(self, node):
self.generic_visit(node)
node.source_code = code
MyVisitor().visit(_ast)
# Make a getter for a variable. This function gives an output that
# contains lists of 4-tuples:
# (i) the tail of the function name for the getter
# (ii) the code for the arguments that the function takes
# (iii) the code for the return
# (iv) the output type
#
# Here is an example:
#
# Input: my_variable: {foo: num, bar: decimal[5]}
#
# Output:
#
# [('__foo', '', '.foo', 'num'),
# ('__bar', 'arg0: num, ', '.bar[arg0]', 'decimal')]
#
# The getters will have code:
# def get_my_variable__foo() -> num: return self.foo
# def get_my_variable__bar(arg0: nun) -> decimal: return self.bar[arg0]
def _mk_getter_helper(typ, depth=0):
# Base type and byte array type: do not extend the getter function
# name, add no input arguments, add nothing to the return statement,
# output type is the base type
if isinstance(typ, BaseType):
return [("", "", "", repr(typ))]
elif isinstance(typ, ByteArrayType):
return [("", "", "", repr(typ))]
# List type: do not extend the getter name, add an input argument for
# the index in the list, add an item access to the return statement
elif isinstance(typ, ListType):
o = []
for funname, head, tail, base in _mk_getter_helper(typ.subtype, depth + 1):
o.append((funname, ("arg%d: num, " % depth) + head, ("[arg%d]" % depth) + tail, base))
return o
# Mapping type: do not extend the getter name, add an input argument for
# the key in the map, add a value access to the return statement
elif isinstance(typ, MappingType):
o = []
for funname, head, tail, base in _mk_getter_helper(typ.valuetype, depth + 1):
o.append((funname, ("arg%d: %r, " % (depth, typ.keytype)) + head, ("[arg%d]" % depth) + tail, base))
return o
# Struct type: for each member variable, make a separate getter, extend
# its function name with the name of the variable, do not add input
# arguments, add a member access to the return statement
elif isinstance(typ, StructType):
o = []
for k, v in typ.members.items():
for funname, head, tail, base in _mk_getter_helper(v, depth):
o.append(("__" + k + funname, head, "." + k + tail, base))
return o
else:
raise Exception("Unexpected type")
# Make a list of getters for a given variable name with a given type
def mk_getter(varname, typ):
funs = _mk_getter_helper(typ)
return ['@constant\ndef get_%s%s(%s) -> %s: return self.%s%s' % (varname, funname, head.rstrip(', '), base, varname, tail)
for (funname, head, tail, base) in funs]
def add_contract(code):
_defs = []
for item in code:
# Function definitions
if isinstance(item, ast.FunctionDef):
_defs.append(item)
else:
raise StructureException("Invalid contract reference", item)
return _defs
def add_globals_and_events(_defs, _events, _getters, _globals, item):
if isinstance(item.annotation, ast.Call) and item.annotation.func.id == "__log__":
if _globals or len(_defs):
raise StructureException("Events must all come before global declarations and function definitions", item)
_events.append(item)
elif not isinstance(item.target, ast.Name):
raise StructureException("Can only assign type to variable in top-level statement", item)
# Check if global already exists, if so error
elif item.target.id in _globals:
raise VariableDeclarationException("Cannot declare a persistent variable twice!", item.target)
elif len(_defs):
raise StructureException("Global variables must all come before function definitions", item)
# If the type declaration is of the form public(<type here>), then proceed with
# the underlying type but also add getters
elif isinstance(item.annotation, ast.Call) and item.annotation.func.id == "public":
if len(item.annotation.args) != 1:
raise StructureException("Public expects one arg (the type)")
typ = parse_type(item.annotation.args[0], 'storage')
_globals[item.target.id] = VariableRecord(item.target.id, len(_globals), typ, True)
# Adding getters here
for getter in mk_getter(item.target.id, typ):
_getters.append(parse_line('\n' * (item.lineno - 1) + getter))
_getters[-1].pos = getpos(item)
else:
_globals[item.target.id] = VariableRecord(item.target.id, len(_globals), parse_type(item.annotation, 'storage'), True)
return _events, _globals, _getters
# Parse top-level functions and variables
def get_contracts_and_defs_and_globals(code):
_contracts = {}
_events = []
_globals = {}
_defs = []
_getters = []
for item in code:
# Contract references
if isinstance(item, ast.ClassDef):
if _events or _globals or _defs:
raise StructureException("External contract declarations must come before event declarations, global declarations, and function definitions", item)
_contracts[item.name] = add_contract(item.body)
# Statements of the form:
# variable_name: type
elif isinstance(item, ast.AnnAssign):
_events, _globals, _getters = add_globals_and_events(_defs, _events, _getters, _globals, item)
# Function definitions
elif isinstance(item, ast.FunctionDef):
_defs.append(item)
else:
raise StructureException("Invalid top-level statement", item)
return _contracts, _events, _defs + _getters, _globals
# Header code
initializer_list = ['seq', ['mstore', 28, ['calldataload', 0]]]
# Store limit constants at fixed addresses in memory.
initializer_list += [['mstore', pos, limit_size] for pos, limit_size in LOADED_LIMIT_MAP.items()]
initializer_lll = LLLnode.from_list(initializer_list, typ=None)
# Contains arguments, variables, etc
class Context():
def __init__(self, vars=None, globals=None, sigs=None, forvars=None, return_type=None, is_constant=False, is_payable=False, origcode=''):
# In-memory variables, in the form (name, memory location, type)
self.vars = vars or {}
self.next_mem = MemoryPositions.RESERVED_MEMORY
# Global variables, in the form (name, storage location, type)
self.globals = globals or {}
# ABI objects, in the form {classname: ABI JSON}
self.sigs = sigs or {}
# Variables defined in for loops, eg. for i in range(6): ...
self.forvars = forvars or {}
# Return type of the function
self.return_type = return_type
# Is the function constant?
self.is_constant = is_constant
# Is the function payable?
self.is_payable = is_payable
# Number of placeholders generated (used to generate random names)
self.placeholder_count = 1
# Original code (for error pretty-printing purposes)
self.origcode = origcode
# Add a new variable
def new_variable(self, name, typ):
if not is_varname_valid(name):
raise VariableDeclarationException("Variable name invalid or reserved: " + name)
if name in self.vars or name in self.globals:
raise VariableDeclarationException("Duplicate variable name: %s" % name)
self.vars[name] = VariableRecord(name, self.next_mem, typ, True)
pos = self.next_mem
self.next_mem += 32 * get_size_of_type(typ)
return pos
# Add an anonymous variable (used in some complex function definitions)
def new_placeholder(self, typ):
name = '_placeholder_' + str(self.placeholder_count)
self.placeholder_count += 1
return self.new_variable(name, typ)
# Get the next unused memory location
def get_next_mem(self):
return self.next_mem
# Is a function the initializer?
def is_initializer(code):
return code.name == '__init__'
# Get ABI signature
def mk_full_signature(code):
o = []
_contracts, _events, _defs, _globals = get_contracts_and_defs_and_globals(code)
for code in _events:
sig = EventSignature.from_declaration(code)
o.append(sig.to_abi_dict())
for code in _defs:
sig = FunctionSignature.from_definition(code)
if not sig.internal:
o.append(sig.to_abi_dict())
return o
# Main python parse tree => LLL method
def parse_tree_to_lll(code, origcode):
_contracts, _events, _defs, _globals = get_contracts_and_defs_and_globals(code)
_names = [_def.name for _def in _defs] + [_event.target.id for _event in _events]
# Checks for duplicate funciton / event names
if len(set(_names)) < len(_names):
raise VariableDeclarationException("Duplicate function or event name: %s" % [name for name in _names if _names.count(name) > 1][0])
contracts = {}
# Create the main statement
o = ['seq']
# Initialization function
initfunc = [_def for _def in _defs if is_initializer(_def)]
# Regular functions
otherfuncs = [_def for _def in _defs if not is_initializer(_def)]
sigs = {}
if _events:
for event in _events:
sigs[event.target.id] = EventSignature.from_declaration(event)
for _contractname in _contracts:
_c_defs = _contracts[_contractname]
_defnames = [_def.name for _def in _c_defs]
contract = {}
if len(set(_defnames)) < len(_c_defs):
raise VariableDeclarationException("Duplicate function name: %s" % [name for name in _defnames if _defnames.count(name) > 1][0])
c_otherfuncs = [_def for _def in _c_defs if not is_initializer(_def)]
if c_otherfuncs:
for _def in c_otherfuncs:
sig = FunctionSignature.from_definition(_def)
contract[sig.name] = sig
contracts[_contractname] = contract
_defnames = [_def.name for _def in _defs]
if len(set(_defnames)) < len(_defs):
raise VariableDeclarationException("Duplicate function name: %s" % [name for name in _defnames if _defnames.count(name) > 1][0])
# If there is an init func...
if initfunc:
o.append(['seq', initializer_lll])
o.append(parse_func(initfunc[0], _globals, {**{'self': sigs}, **contracts}, origcode))
# If there are regular functions...
if otherfuncs:
sub = ['seq', initializer_lll]
add_gas = initializer_lll.gas
for _def in otherfuncs:
sub.append(parse_func(_def, _globals, {**{'self': sigs}, **contracts}, origcode))
sub[-1].total_gas += add_gas
add_gas += 30
sig = FunctionSignature.from_definition(_def)
sig.gas = sub[-1].total_gas
sigs[sig.name] = sig
o.append(['return', 0, ['lll', sub, 0]])
return LLLnode.from_list(o, typ=None)
# Checks that an input matches its type
def make_clamper(datapos, mempos, typ, is_init=False):
if not is_init:
data_decl = ['calldataload', ['add', 4, datapos]]
copier = lambda pos, sz: ['calldatacopy', mempos, ['add', 4, pos], sz]
else:
data_decl = ['codeload', ['add', '~codelen', datapos]]
copier = lambda pos, sz: ['codecopy', mempos, ['add', '~codelen', pos], sz]
# Numbers: make sure they're in range
if is_base_type(typ, 'num'):
return LLLnode.from_list(['clamp', ['mload', MemoryPositions.MINNUM], data_decl, ['mload', MemoryPositions.MAXNUM]],
typ=typ, annotation='checking num input')
# Booleans: make sure they're zero or one
elif is_base_type(typ, 'bool'):
return LLLnode.from_list(['uclamplt', data_decl, 2], typ=typ, annotation='checking bool input')
# Addresses: make sure they're in range
elif is_base_type(typ, 'address'):
return LLLnode.from_list(['uclamplt', data_decl, ['mload', MemoryPositions.ADDRSIZE]], typ=typ, annotation='checking address input')
# Bytes: make sure they have the right size
elif isinstance(typ, ByteArrayType):
return LLLnode.from_list(['seq',
copier(data_decl, 32 + typ.maxlen),
['assert', ['le', ['calldataload', ['add', 4, data_decl]], typ.maxlen]]],
typ=None, annotation='checking bytearray input')
# Lists: recurse
elif isinstance(typ, ListType):
o = []
for i in range(typ.count):
offset = get_size_of_type(typ.subtype) * 32 * i
o.append(make_clamper(datapos + offset, mempos + offset, typ.subtype, is_init))
return LLLnode.from_list(['seq'] + o, typ=None, annotation='checking list input')
# Otherwise don't make any checks
else:
return LLLnode.from_list('pass')
# Parses a function declaration
def parse_func(code, _globals, sigs, origcode, _vars=None):
if _vars is None:
_vars = {}
sig = FunctionSignature.from_definition(code)
# Check for duplicate variables with globals
for arg in sig.args:
if arg.name in _globals:
raise VariableDeclarationException("Variable name duplicated between function arguments and globals: " + arg.name)
# Create a context
context = Context(vars=_vars, globals=_globals, sigs=sigs,
return_type=sig.output_type, is_constant=sig.const, is_payable=sig.payable, origcode=origcode)
# Copy calldata to memory for fixed-size arguments
copy_size = sum([32 if isinstance(arg.typ, ByteArrayType) else get_size_of_type(arg.typ) * 32 for arg in sig.args])
context.next_mem += copy_size
if not len(sig.args):
copier = 'pass'
elif sig.name == '__init__':
copier = ['codecopy', MemoryPositions.RESERVED_MEMORY, '~codelen', copy_size]
else:
copier = ['calldatacopy', MemoryPositions.RESERVED_MEMORY, 4, copy_size]
clampers = [copier]
# Add asserts for payable and internal
if not sig.payable:
clampers.append(['assert', ['iszero', 'callvalue']])
if sig.internal:
clampers.append(['assert', ['eq', 'caller', 'address']])
# Fill in variable positions
for arg in sig.args:
clampers.append(make_clamper(arg.pos, context.next_mem, arg.typ, sig.name == '__init__'))
if isinstance(arg.typ, ByteArrayType):
context.vars[arg.name] = VariableRecord(arg.name, context.next_mem, arg.typ, False)
context.next_mem += 32 * get_size_of_type(arg.typ)
else:
context.vars[arg.name] = VariableRecord(arg.name, MemoryPositions.RESERVED_MEMORY + arg.pos, arg.typ, False)
# Create "clampers" (input well-formedness checkers)
# Return function body
if sig.name == '__init__':
o = LLLnode.from_list(['seq'] + clampers + [parse_body(code.body, context)], pos=getpos(code))
else:
method_id_node = LLLnode.from_list(sig.method_id, pos=getpos(code), annotation='%s' % sig.name)
o = LLLnode.from_list(['if',
['eq', ['mload', 0], method_id_node],
['seq'] + clampers + [parse_body(c, context) for c in code.body] + ['stop']
], typ=None, pos=getpos(code))
o.context = context
o.total_gas = o.gas + calc_mem_gas(o.context.next_mem)
o.func_name = sig.name
return o
# Parse a piece of code
def parse_body(code, context):
if not isinstance(code, list):
return parse_stmt(code, context)
o = []
for stmt in code:
o.append(parse_stmt(stmt, context))
return LLLnode.from_list(['seq'] + o, pos=getpos(code[0]) if code else None)
def external_contract_call_stmt(stmt, context):
contract_name = stmt.func.value.func.id
if contract_name not in context.sigs:
raise VariableDeclarationException("Contract not declared yet: %s" % contract_name)
method_name = stmt.func.attr
if method_name not in context.sigs[contract_name]:
raise VariableDeclarationException("Function not declared yet: %s (reminder: "
"function must be declared in the correct contract)" % method_name)
sig = context.sigs[contract_name][method_name]
contract_address = parse_expr(stmt.func.value.args[0], context)
inargs, inargsize = pack_arguments(sig, [parse_expr(arg, context) for arg in stmt.args], context)
o = LLLnode.from_list(['seq',
['assert', ['extcodesize', ['mload', contract_address]]],
['assert', ['ne', 'address', ['mload', contract_address]]],
['assert', ['call', ['gas'], ['mload', contract_address], 0, inargs, inargsize, 0, 0]]],
typ=None, location='memory', pos=getpos(stmt))
return o
def external_contract_call_expr(expr, context):
contract_name = expr.func.value.func.id
if contract_name not in context.sigs:
raise VariableDeclarationException("Contract not declared yet: %s" % contract_name)
method_name = expr.func.attr
if method_name not in context.sigs[contract_name]:
raise VariableDeclarationException("Function not declared yet: %s (reminder: "
"function must be declared in the correct contract)" % method_name)
sig = context.sigs[contract_name][method_name]
contract_address = parse_expr(expr.func.value.args[0], context)
inargs, inargsize = pack_arguments(sig, [parse_expr(arg, context) for arg in expr.args], context)
output_placeholder = context.new_placeholder(typ=sig.output_type)
if isinstance(sig.output_type, BaseType):
returner = output_placeholder
elif isinstance(sig.output_type, ByteArrayType):
returner = output_placeholder + 32
else:
raise TypeMismatchException("Invalid output type: %r" % sig.output_type, expr)
o = LLLnode.from_list(['seq',
['assert', ['extcodesize', ['mload', contract_address]]],
['assert', ['ne', 'address', ['mload', contract_address]]],
['assert', ['call', ['gas'], ['mload', contract_address], 0,
inargs, inargsize,
output_placeholder, get_size_of_type(sig.output_type) * 32]],
returner], typ=sig.output_type, location='memory', pos=getpos(expr))
return o
# Parse an expression
def parse_expr(expr, context):
if isinstance(expr, LLLnode):
return expr
# Numbers (integers or decimals)
elif isinstance(expr, ast.Num):
orignum = get_original_if_0x_prefixed(expr, context)
if orignum is None and isinstance(expr.n, int):
if not (-2**127 + 1 <= expr.n <= 2**127 - 1):
raise InvalidLiteralException("Number out of range: " + str(expr.n), expr)
return LLLnode.from_list(expr.n, typ=BaseType('num', None), pos=getpos(expr))
elif isinstance(expr.n, float):
numstring, num, den = get_number_as_fraction(expr, context)
if not (-2**127 * den < num < 2**127 * den):
raise InvalidLiteralException("Number out of range: " + numstring, expr)
if DECIMAL_DIVISOR % den:
raise InvalidLiteralException("Too many decimal places: " + numstring, expr)
return LLLnode.from_list(num * DECIMAL_DIVISOR // den, typ=BaseType('decimal', None), pos=getpos(expr))
elif len(orignum) == 42:
if checksum_encode(orignum) != orignum:
raise InvalidLiteralException("Address checksum mismatch. If you are sure this is the "
"right address, the correct checksummed form is: " +
checksum_encode(orignum), expr)
return LLLnode.from_list(expr.n, typ=BaseType('address'), pos=getpos(expr))
elif len(orignum) == 66:
return LLLnode.from_list(expr.n, typ=BaseType('bytes32'), pos=getpos(expr))
else:
raise InvalidLiteralException("Cannot read 0x value with length %d. Expecting 42 (address incl 0x) or 66 (bytes32 incl 0x)"
% len(orignum), expr)
# Byte array literals
elif isinstance(expr, ast.Str):
bytez = b''
for c in expr.s:
if ord(c) >= 256:
raise InvalidLiteralException("Cannot insert special character %r into byte array" % c, expr)
bytez += bytes([ord(c)])
placeholder = context.new_placeholder(ByteArrayType(len(bytez)))
seq = []
seq.append(['mstore', placeholder, len(bytez)])
for i in range(0, len(bytez), 32):
seq.append(['mstore', ['add', placeholder, i + 32], bytes_to_int((bytez + b'\x00' * 31)[i: i + 32])])
return LLLnode.from_list(['seq'] + seq + [placeholder], typ=ByteArrayType(len(bytez)), location='memory', pos=getpos(expr))
# True, False, None constants
elif isinstance(expr, ast.NameConstant):
if expr.value is True:
return LLLnode.from_list(1, typ='bool', pos=getpos(expr))
elif expr.value is False:
return LLLnode.from_list(0, typ='bool', pos=getpos(expr))
elif expr.value is None:
return LLLnode.from_list(None, typ=NullType(), pos=getpos(expr))
else:
raise Exception("Unknown name constant: %r" % expr.value.value)
# Variable names
elif isinstance(expr, ast.Name):
if expr.id == 'self':
return LLLnode.from_list(['address'], typ='address', pos=getpos(expr))
if expr.id == 'true':
return LLLnode.from_list(1, typ='bool', pos=getpos(expr))
if expr.id == 'false':
return LLLnode.from_list(0, typ='bool', pos=getpos(expr))
if expr.id == 'null':
return LLLnode.from_list(None, typ=NullType(), pos=getpos(expr))
if expr.id in context.vars:
var = context.vars[expr.id]
return LLLnode.from_list(var.pos, typ=var.typ, location='memory', pos=getpos(expr), annotation=expr.id, mutable=var.mutable)
else:
raise VariableDeclarationException("Undeclared variable: " + expr.id, expr)
# x.y or x[5]
elif isinstance(expr, ast.Attribute):
# x.balance: balance of address x
if expr.attr == 'balance':
addr = parse_value_expr(expr.value, context)
if not is_base_type(addr.typ, 'address'):
raise TypeMismatchException("Type mismatch: balance keyword expects an address as input", expr)
return LLLnode.from_list(['balance', addr], typ=BaseType('num', {'wei': 1}), location=None, pos=getpos(expr))
# x.codesize: codesize of address x
elif expr.attr == 'codesize' or expr.attr == 'is_contract':
addr = parse_value_expr(expr.value, context)
if not is_base_type(addr.typ, 'address'):
raise TypeMismatchException(f"Type mismatch: {expr.attr} keyword expects an address as input", expr)
if expr.attr == 'codesize':
output_type = 'num'
else:
output_type = 'bool'
return LLLnode.from_list(['extcodesize', addr], typ=BaseType(output_type), location=None, pos=getpos(expr))
# self.x: global attribute
elif isinstance(expr.value, ast.Name) and expr.value.id == "self":
if expr.attr not in context.globals:
raise VariableDeclarationException("Persistent variable undeclared: " + expr.attr, expr)
var = context.globals[expr.attr]
return LLLnode.from_list(var.pos, typ=var.typ, location='storage', pos=getpos(expr), annotation='self.' + expr.attr)
# Reserved keywords
elif isinstance(expr.value, ast.Name) and expr.value.id in ("msg", "block", "tx"):
key = expr.value.id + "." + expr.attr
if key == "msg.sender":
return LLLnode.from_list(['caller'], typ='address', pos=getpos(expr))
elif key == "msg.value":
if not context.is_payable:
raise NonPayableViolationException("Cannot use msg.value in a non-payable function", expr)
return LLLnode.from_list(['callvalue'], typ=BaseType('num', {'wei': 1}), pos=getpos(expr))
elif key == "block.difficulty":
return LLLnode.from_list(['difficulty'], typ='num', pos=getpos(expr))
elif key == "block.timestamp":
return LLLnode.from_list(['timestamp'], typ=BaseType('num', {'sec': 1}, True), pos=getpos(expr))
elif key == "block.coinbase":
return LLLnode.from_list(['coinbase'], typ='address', pos=getpos(expr))
elif key == "block.number":
return LLLnode.from_list(['number'], typ='num', pos=getpos(expr))
elif key == "block.prevhash":
return LLLnode.from_list(['blockhash', ['sub', 'number', 1]], typ='bytes32', pos=getpos(expr))
elif key == "tx.origin":
return LLLnode.from_list(['origin'], typ='address', pos=getpos(expr))
else:
raise Exception("Unsupported keyword: " + key)
# Other variables
else:
sub = parse_variable_location(expr.value, context)
if not isinstance(sub.typ, StructType):
raise TypeMismatchException("Type mismatch: member variable access not expected", expr.value)
attrs = sorted(sub.typ.members.keys())
if expr.attr not in attrs:
raise TypeMismatchException("Member %s not found. Only the following available: %s" % (expr.attr, " ".join(attrs)), expr)
return add_variable_offset(sub, expr.attr)
elif isinstance(expr, ast.Subscript):
sub = parse_variable_location(expr.value, context)
if isinstance(sub.typ, (MappingType, ListType)):
if 'value' not in vars(expr.slice):
raise StructureException("Array access must access a single element, not a slice", expr)
index = parse_value_expr(expr.slice.value, context)
elif isinstance(sub.typ, TupleType):
if not isinstance(expr.slice.value, ast.Num) or expr.slice.value.n < 0 or expr.slice.value.n >= len(sub.typ.members):
raise TypeMismatchException("Tuple index invalid", expr.slice.value)
index = expr.slice.value.n
else:
raise TypeMismatchException("Bad subscript attempt", expr.value)
o = add_variable_offset(sub, index)
o.mutable = sub.mutable
return o
# Arithmetic operations
elif isinstance(expr, ast.BinOp):
left = parse_value_expr(expr.left, context)
right = parse_value_expr(expr.right, context)
if not is_numeric_type(left.typ) or not is_numeric_type(right.typ):
raise TypeMismatchException("Unsupported types for arithmetic op: %r %r" % (left.typ, right.typ), expr)
ltyp, rtyp = left.typ.typ, right.typ.typ
if isinstance(expr.op, (ast.Add, ast.Sub)):
if left.typ.unit != right.typ.unit and left.typ.unit is not None and right.typ.unit is not None:
raise TypeMismatchException("Unit mismatch: %r %r" % (left.typ.unit, right.typ.unit), expr)
if left.typ.positional and right.typ.positional and isinstance(expr.op, ast.Add):
raise TypeMismatchException("Cannot add two positional units!", expr)
new_unit = left.typ.unit or right.typ.unit
new_positional = left.typ.positional ^ right.typ.positional # xor, as subtracting two positionals gives a delta
op = 'add' if isinstance(expr.op, ast.Add) else 'sub'
if ltyp == rtyp:
o = LLLnode.from_list([op, left, right], typ=BaseType(ltyp, new_unit, new_positional), pos=getpos(expr))
elif ltyp == 'num' and rtyp == 'decimal':
o = LLLnode.from_list([op, ['mul', left, DECIMAL_DIVISOR], right],
typ=BaseType('decimal', new_unit, new_positional), pos=getpos(expr))
elif ltyp == 'decimal' and rtyp == 'num':
o = LLLnode.from_list([op, left, ['mul', right, DECIMAL_DIVISOR]],
typ=BaseType('decimal', new_unit, new_positional), pos=getpos(expr))
else:
raise Exception("How did I get here? %r %r" % (ltyp, rtyp))
elif isinstance(expr.op, ast.Mult):
if left.typ.positional or right.typ.positional:
raise TypeMismatchException("Cannot multiply positional values!", expr)
new_unit = combine_units(left.typ.unit, right.typ.unit)
if ltyp == rtyp == 'num':
o = LLLnode.from_list(['mul', left, right], typ=BaseType('num', new_unit), pos=getpos(expr))
elif ltyp == rtyp == 'decimal':
o = LLLnode.from_list(['with', 'r', right, ['with', 'l', left,
['with', 'ans', ['mul', 'l', 'r'],
['seq',
['assert', ['or', ['eq', ['sdiv', 'ans', 'l'], 'r'], ['not', 'l']]],
['sdiv', 'ans', DECIMAL_DIVISOR]]]]], typ=BaseType('decimal', new_unit), pos=getpos(expr))
elif (ltyp == 'num' and rtyp == 'decimal') or (ltyp == 'decimal' and rtyp == 'num'):
o = LLLnode.from_list(['with', 'r', right, ['with', 'l', left,
['with', 'ans', ['mul', 'l', 'r'],
['seq',
['assert', ['or', ['eq', ['sdiv', 'ans', 'l'], 'r'], ['not', 'l']]],
'ans']]]], typ=BaseType('decimal', new_unit), pos=getpos(expr))
elif isinstance(expr.op, ast.Div):
if left.typ.positional or right.typ.positional:
raise TypeMismatchException("Cannot divide positional values!", expr)
new_unit = combine_units(left.typ.unit, right.typ.unit, div=True)
if rtyp == 'num':
o = LLLnode.from_list(['sdiv', left, ['clamp_nonzero', right]], typ=BaseType(ltyp, new_unit), pos=getpos(expr))
elif ltyp == rtyp == 'decimal':
o = LLLnode.from_list(['with', 'l', left, ['with', 'r', ['clamp_nonzero', right],
['sdiv', ['mul', 'l', DECIMAL_DIVISOR], 'r']]],
typ=BaseType('decimal', new_unit), pos=getpos(expr))
elif ltyp == 'num' and rtyp == 'decimal':
o = LLLnode.from_list(['sdiv', ['mul', left, DECIMAL_DIVISOR ** 2], ['clamp_nonzero', right]],
typ=BaseType('decimal', new_unit), pos=getpos(expr))
elif isinstance(expr.op, ast.Mod):
if left.typ.positional or right.typ.positional:
raise TypeMismatchException("Cannot use positional values as modulus arguments!", expr)
if left.typ.unit != right.typ.unit and left.typ.unit is not None and right.typ.unit is not None:
raise TypeMismatchException("Modulus arguments must have same unit", expr)
new_unit = left.typ.unit or right.typ.unit
if ltyp == rtyp:
o = LLLnode.from_list(['smod', left, ['clamp_nonzero', right]], typ=BaseType(ltyp, new_unit), pos=getpos(expr))
elif ltyp == 'decimal' and rtyp == 'num':
o = LLLnode.from_list(['smod', left, ['mul', ['clamp_nonzero', right], DECIMAL_DIVISOR]],
typ=BaseType('decimal', new_unit), pos=getpos(expr))
elif ltyp == 'num' and rtyp == 'decimal':
o = LLLnode.from_list(['smod', ['mul', left, DECIMAL_DIVISOR], right],
typ=BaseType('decimal', new_unit), pos=getpos(expr))
elif isinstance(expr.op, ast.Pow):
if left.typ.positional or right.typ.positional:
raise TypeMismatchException("Cannot use positional values as exponential arguments!", expr)
new_unit = combine_units(left.typ.unit, right.typ.unit)
if ltyp == rtyp == 'num':
o = LLLnode.from_list(['exp', left, right], typ=BaseType('num', new_unit), pos=getpos(expr))
else:
raise TypeMismatchException('Only whole number exponents are supported', expr)
else:
raise Exception("Unsupported binop: %r" % expr.op)
if o.typ.typ == 'num':
return LLLnode.from_list(['clamp', ['mload', MemoryPositions.MINNUM], o, ['mload', MemoryPositions.MAXNUM]], typ=o.typ, pos=getpos(expr))
elif o.typ.typ == 'decimal':
return LLLnode.from_list(['clamp', ['mload', MemoryPositions.MINDECIMAL], o, ['mload', MemoryPositions.MAXDECIMAL]], typ=o.typ, pos=getpos(expr))
else:
raise Exception("%r %r" % (o, o.typ))
# Comparison operations
elif isinstance(expr, ast.Compare):
left = parse_value_expr(expr.left, context)
right = parse_value_expr(expr.comparators[0], context)
if not are_units_compatible(left.typ, right.typ) and not are_units_compatible(right.typ, left.typ):
raise TypeMismatchException("Can't compare values with different units!", expr)
if len(expr.ops) != 1:
raise StructureException("Cannot have a comparison with more than two elements", expr)
if isinstance(expr.ops[0], ast.Gt):
op = 'sgt'
elif isinstance(expr.ops[0], ast.GtE):
op = 'sge'
elif isinstance(expr.ops[0], ast.LtE):
op = 'sle'
elif isinstance(expr.ops[0], ast.Lt):
op = 'slt'
elif isinstance(expr.ops[0], ast.Eq):
op = 'eq'
elif isinstance(expr.ops[0], ast.NotEq):
op = 'ne'
else:
raise Exception("Unsupported comparison operator")
if not is_numeric_type(left.typ) or not is_numeric_type(right.typ):
if op not in ('eq', 'ne'):
raise TypeMismatchException("Invalid type for comparison op", expr)
ltyp, rtyp = left.typ.typ, right.typ.typ
if ltyp == rtyp:
return LLLnode.from_list([op, left, right], typ='bool', pos=getpos(expr))
elif ltyp == 'decimal' and rtyp == 'num':
return LLLnode.from_list([op, left, ['mul', right, DECIMAL_DIVISOR]], typ='bool', pos=getpos(expr))
elif ltyp == 'num' and rtyp == 'decimal':
return LLLnode.from_list([op, ['mul', left, DECIMAL_DIVISOR], right], typ='bool', pos=getpos(expr))
else:
raise TypeMismatchException("Unsupported types for comparison: %r %r" % (ltyp, rtyp), expr)
# Boolean logical operations
elif isinstance(expr, ast.BoolOp):
if len(expr.values) != 2:
raise StructureException("Expected two arguments for a bool op", expr)
left = parse_value_expr(expr.values[0], context)
right = parse_value_expr(expr.values[1], context)
if not is_base_type(left.typ, 'bool') or not is_base_type(right.typ, 'bool'):
raise TypeMismatchException("Boolean operations can only be between booleans!", expr)
if isinstance(expr.op, ast.And):
op = 'and'
elif isinstance(expr.op, ast.Or):
op = 'or'
else:
raise Exception("Unsupported bool op: " + expr.op)
return LLLnode.from_list([op, left, right], typ='bool', pos=getpos(expr))
# Unary operations (only "not" supported)
elif isinstance(expr, ast.UnaryOp):
operand = parse_value_expr(expr.operand, context)
if isinstance(expr.op, ast.Not):
# Note that in the case of bool, num, address, decimal, num256 AND bytes32,
# a zero entry represents false, all others represent true
return LLLnode.from_list(["iszero", operand], typ='bool', pos=getpos(expr))
elif isinstance(expr.op, ast.USub):
if not is_numeric_type(operand.typ):
raise TypeMismatchException("Unsupported type for negation: %r" % operand.typ, operand)
return LLLnode.from_list(["sub", 0, operand], typ=operand.typ, pos=getpos(expr))
else:
raise StructureException("Only the 'not' unary operator is supported")
# Function calls
elif isinstance(expr, ast.Call):
if isinstance(expr.func, ast.Name) and expr.func.id in dispatch_table:
return dispatch_table[expr.func.id](expr, context)
elif isinstance(expr.func, ast.Attribute) and isinstance(expr.func.value, ast.Name) and expr.func.value.id == "self":
method_name = expr.func.attr
if method_name not in context.sigs['self']:
raise VariableDeclarationException("Function not declared yet (reminder: functions cannot "
"call functions later in code than themselves): %s" % expr.func.attr)
sig = context.sigs['self'][expr.func.attr]
inargs, inargsize = pack_arguments(sig, [parse_expr(arg, context) for arg in expr.args], context)
output_placeholder = context.new_placeholder(typ=sig.output_type)
if isinstance(sig.output_type, BaseType):
returner = output_placeholder
elif isinstance(sig.output_type, ByteArrayType):
returner = output_placeholder + 32
else:
raise TypeMismatchException("Invalid output type: %r" % sig.output_type, expr)
o = LLLnode.from_list(['seq',
['assert', ['call', ['gas'], ['address'], 0,
inargs, inargsize,
output_placeholder, get_size_of_type(sig.output_type) * 32]],
returner], typ=sig.output_type, location='memory', pos=getpos(expr))
o.gas += sig.gas
return o
elif isinstance(expr.func, ast.Attribute) and isinstance(expr.func.value, ast.Call):
return external_contract_call_expr(expr, context)
else:
raise StructureException("Unsupported operator: %r" % ast.dump(expr), expr)
# List literals
elif isinstance(expr, ast.List):
if not len(expr.elts):
raise StructureException("List must have elements", expr)
o = []
out_type = None
for elt in expr.elts:
o.append(parse_expr(elt, context))
if not out_type:
out_type = o[-1].typ
elif len(o) > 1 and o[-1].typ != out_type:
out_type = MixedType()
return LLLnode.from_list(["multi"] + o, typ=ListType(out_type, len(o)), pos=getpos(expr))
# Struct literals
elif isinstance(expr, ast.Dict):
o = {}
members = {}
for key, value in zip(expr.keys, expr.values):
if not isinstance(key, ast.Name) or not is_varname_valid(key.id):
raise TypeMismatchException("Invalid member variable for struct: %r" % vars(key).get('id', key), key)
if key.id in o:
raise TypeMismatchException("Member variable duplicated: " + key.id, key)
o[key.id] = parse_expr(value, context)
members[key.id] = o[key.id].typ
return LLLnode.from_list(["multi"] + [o[key] for key in sorted(list(o.keys()))], typ=StructType(members), pos=getpos(expr))
raise Exception("Unsupported operator: %r" % ast.dump(expr))
# Parse an expression that represents an address in memory or storage
def parse_variable_location(expr, context):
o = parse_expr(expr, context)
if not o.location:
raise Exception("Looking for a variable location, instead got a value")
return o
# Parse an expression that results in a value
def parse_value_expr(expr, context):
return unwrap_location(parse_expr(expr, context))
# Create an x=y statement, where the types may be compound
def make_setter(left, right, location):
# Basic types
if isinstance(left.typ, BaseType):
right = base_type_conversion(right, right.typ, left.typ)
if location == 'storage':
return LLLnode.from_list(['sstore', left, right], typ=None)
elif location == 'memory':
return LLLnode.from_list(['mstore', left, right], typ=None)
# Byte arrays
elif isinstance(left.typ, ByteArrayType):
return make_byte_array_copier(left, right)
# Can't copy mappings
elif isinstance(left.typ, MappingType):
raise TypeMismatchException("Cannot copy mappings; can only copy individual elements")
# Arrays
elif isinstance(left.typ, ListType):
# Cannot do something like [a, b, c] = [1, 2, 3]
if left.value == "multi":
raise Exception("Target of set statement must be a single item")
if not isinstance(right.typ, (ListType, NullType)):
raise TypeMismatchException("Setter type mismatch: left side is array, right side is %r" % right.typ)
left_token = LLLnode.from_list('_L', typ=left.typ, location=left.location)
if left.location == "storage":
left = LLLnode.from_list(['sha3_32', left], typ=left.typ, location="storage_prehashed")
left_token.location = "storage_prehashed"
# Type checks
if not isinstance(right.typ, NullType):
if not isinstance(right.typ, ListType):
raise TypeMismatchException("Left side is array, right side is not")
if left.typ.count != right.typ.count:
raise TypeMismatchException("Mismatched number of elements")
# If the right side is a literal
if right.value == "multi":
if len(right.args) != left.typ.count:
raise TypeMismatchException("Mismatched number of elements")
subs = []
for i in range(left.typ.count):
subs.append(make_setter(add_variable_offset(left_token, LLLnode.from_list(i, typ='num')),
right.args[i], location))
return LLLnode.from_list(['with', '_L', left, ['seq'] + subs], typ=None)
# If the right side is a null
elif isinstance(right.typ, NullType):
subs = []
for i in range(left.typ.count):
subs.append(make_setter(add_variable_offset(left_token, LLLnode.from_list(i, typ='num')),
LLLnode.from_list(None, typ=NullType()), location))
return LLLnode.from_list(['with', '_L', left, ['seq'] + subs], typ=None)
# If the right side is a variable
else:
right_token = LLLnode.from_list('_R', typ=right.typ, location=right.location)
subs = []
for i in range(left.typ.count):
subs.append(make_setter(add_variable_offset(left_token, LLLnode.from_list(i, typ='num')),
add_variable_offset(right_token, LLLnode.from_list(i, typ='num')), location))
return LLLnode.from_list(['with', '_L', left, ['with', '_R', right, ['seq'] + subs]], typ=None)
# Structs
elif isinstance(left.typ, (StructType, TupleType)):
if left.value == "multi":
raise Exception("Target of set statement must be a single item")
if not isinstance(right.typ, NullType):
if not isinstance(right.typ, left.typ.__class__):
raise TypeMismatchException("Setter type mismatch: left side is %r, right side is %r" % (left.typ, right.typ))
if isinstance(left.typ, StructType):
for k in left.typ.members:
if k not in right.typ.members:
raise TypeMismatchException("Keys don't match for structs, missing %s" % k)
for k in right.typ.members:
if k not in left.typ.members:
raise TypeMismatchException("Keys don't match for structs, extra %s" % k)
else:
if len(left.typ.members) != len(right.typ.members):
raise TypeMismatchException("Tuple lengths don't match, %d vs %d" % (len(left.typ.members), len(right.typ.members)))
left_token = LLLnode.from_list('_L', typ=left.typ, location=left.location)
if left.location == "storage":
left = LLLnode.from_list(['sha3_32', left], typ=left.typ, location="storage_prehashed")
left_token.location = "storage_prehashed"
if isinstance(left.typ, StructType):
keyz = sorted(list(left.typ.members.keys()))
else:
keyz = list(range(len(left.typ.members)))
# If the right side is a literal
if right.value == "multi":
if len(right.args) != len(keyz):
raise TypeMismatchException("Mismatched number of elements")
subs = []
for i, typ in enumerate(keyz):
subs.append(make_setter(add_variable_offset(left_token, typ), right.args[i], location))
return LLLnode.from_list(['with', '_L', left, ['seq'] + subs], typ=None)
# If the right side is a null
elif isinstance(right.typ, NullType):
subs = []
for typ in keyz:
subs.append(make_setter(add_variable_offset(left_token, typ), LLLnode.from_list(None, typ=NullType()), location))
return LLLnode.from_list(['with', '_L', left, ['seq'] + subs], typ=None)
# If the right side is a variable
else:
right_token = LLLnode.from_list('_R', typ=right.typ, location=right.location)
subs = []
for typ in keyz:
subs.append(make_setter(add_variable_offset(left_token, typ), add_variable_offset(right_token, typ), location))
return LLLnode.from_list(['with', '_L', left, ['with', '_R', right, ['seq'] + subs]], typ=None)
else:
raise Exception("Invalid type for setters")
# Parse a statement (usually one line of code but not always)
def parse_stmt(stmt, context):
return Stmt(stmt, context).lll_node
def pack_logging_topics(event_id, args, topics_types, context):
topics = [event_id]
topics_count = 1
stored_topics = ['seq']
for pos, typ in enumerate(topics_types):
arg = args[pos]
topics_count += 1
if isinstance(arg, ast.Str):
stored_topics.append(parse_value_expr(arg, context))
topics.append(['mload', stored_topics[-1].to_list()[-1][-1][-1] + 32])
else:
input = parse_value_expr(arg, context)
input = base_type_conversion(input, input.typ, typ)
topics.append(input)
return topics, stored_topics
def pack_args_by_32(holder, maxlen, arg, typ, context, placeholder):
if isinstance(typ, BaseType):
input = parse_expr(arg, context)
input = base_type_conversion(input, input.typ, typ)
holder.append(LLLnode.from_list(['mstore', placeholder, input], typ=typ, location='memory'))
elif isinstance(typ, ByteArrayType):
bytez = b''
for c in arg.s:
if ord(c) >= 256:
raise InvalidLiteralException("Cannot insert special character %r into byte array" % c)
bytez += bytes([ord(c)])
bytez_length = len(bytez)
if len(bytez) > 32:
raise InvalidLiteralException("Can only log a maximum of 32 bytes at a time.")
holder.append(LLLnode.from_list(['mstore', placeholder, bytes_to_int(bytez + b'\x00' * (32 - bytez_length))], typ=typ, location='memory'))
elif isinstance(typ, ListType):
maxlen += (typ.count - 1) * 32
typ = typ.subtype
holder, maxlen = pack_args_by_32(holder, maxlen, arg.elts[0], typ, context, placeholder)
for j, arg2 in enumerate(arg.elts[1:]):
holder, maxlen = pack_args_by_32(holder, maxlen, arg2, typ, context, context.new_placeholder(BaseType(32)))
return holder, maxlen
# Pack logging data arguments
def pack_logging_data(types, args, context):
# Checks to see if there's any data
if not args:
return ['seq'], 0, 0
holder = ['seq']
maxlen = len(args) * 32
for i, (arg, typ) in enumerate(zip(args, types)):
holder, maxlen = pack_args_by_32(holder, maxlen, arg, typ, context, context.new_placeholder(BaseType(32)))
return holder, maxlen, holder[1].to_list()[1][0]
# Pack function arguments for a call
def pack_arguments(signature, args, context):
placeholder_typ = ByteArrayType(maxlen=sum([get_size_of_type(arg.typ) for arg in signature.args]) * 32 + 32)
placeholder = context.new_placeholder(placeholder_typ)
setters = [['mstore', placeholder, signature.method_id]]
needpos = False
for i, (arg, typ) in enumerate(zip(args, [arg.typ for arg in signature.args])):
if isinstance(typ, BaseType):
setters.append(make_setter(LLLnode.from_list(placeholder + 32 + i * 32, typ=typ), arg, 'memory'))
elif isinstance(typ, ByteArrayType):
setters.append(['mstore', placeholder + 32 + i * 32, '_poz'])
arg_copy = LLLnode.from_list('_s', typ=arg.typ, location=arg.location)
target = LLLnode.from_list(['add', placeholder + 32, '_poz'], typ=typ, location='memory')
setters.append(['with', '_s', arg, ['seq',
make_byte_array_copier(target, arg_copy),
['set', '_poz', ['add', 32, ['add', '_poz', get_length(arg_copy)]]]]])
needpos = True
else:
raise TypeMismatchException("Cannot pack argument of type %r" % typ)
if needpos:
return LLLnode.from_list(['with', '_poz', len(args) * 32, ['seq'] + setters + [placeholder + 28]],
typ=placeholder_typ, location='memory'), \
placeholder_typ.maxlen - 28
else:
return LLLnode.from_list(['seq'] + setters + [placeholder + 28], typ=placeholder_typ, location='memory'), \
placeholder_typ.maxlen - 28
def parse_to_lll(kode):
code = parse(kode)
return parse_tree_to_lll(code, kode)
|
NedYork/viper
|
viper/parser/parser.py
|
Python
|
mit
| 52,311
|
[
"VisIt"
] |
1ca32c163a6f1984a105a3abe54de8518f106918bf14b0f427a5c0561895ef6a
|
"""
:Author: Engelbert Gruber
:Contact: grubert@users.sourceforge.net
:Revision: $Revision: 21817 $
:Date: $Date: 2005-07-21 22:39:57 +0200 (Thu, 21 Jul 2005) $
:Copyright: This module has been placed in the public domain.
LaTeX2e document tree Writer.
"""
__docformat__ = 'reStructuredText'
# code contributions from several people included, thanks to all.
# some named: David Abrahams, Julien Letessier, Lele Gaifax, and others.
#
# convention deactivate code by two # e.g. ##.
import sys
import time
import re
import string
from types import ListType
from docutils import frontend, nodes, languages, writers, utils
class Writer(writers.Writer):
supported = ('latex','latex2e')
"""Formats this writer supports."""
settings_spec = (
'LaTeX-Specific Options',
'The LaTeX "--output-encoding" default is "latin-1:strict".',
(('Specify documentclass. Default is "article".',
['--documentclass'],
{'default': 'article', }),
('Specify document options. Multiple options can be given, '
'separated by commas. Default is "10pt,a4paper".',
['--documentoptions'],
{'default': '10pt,a4paper', }),
('Use LaTeX footnotes. LaTeX supports only numbered footnotes (does it?). '
'Default: no, uses figures.',
['--use-latex-footnotes'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Format for footnote references: one of "superscript" or '
'"brackets". Default is "superscript".',
['--footnote-references'],
{'choices': ['superscript', 'brackets'], 'default': 'superscript',
'metavar': '<format>',
'overrides': 'trim_footnote_reference_space'}),
('Use LaTeX citations. '
'Default: no, uses figures which might get mixed with images.',
['--use-latex-citations'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".',
['--attribution'],
{'choices': ['dash', 'parentheses', 'parens', 'none'],
'default': 'dash', 'metavar': '<format>'}),
('Specify a stylesheet file. The file will be "input" by latex in '
'the document header. Default is no stylesheet (""). '
'Overrides --stylesheet-path.',
['--stylesheet'],
{'default': '', 'metavar': '<file>',
'overrides': 'stylesheet_path'}),
('Specify a stylesheet file, relative to the current working '
'directory. Overrides --stylesheet.',
['--stylesheet-path'],
{'metavar': '<file>', 'overrides': 'stylesheet'}),
('Table of contents by docutils (default) or latex. Latex (writer) '
'supports only one ToC per document, but docutils does not write '
'pagenumbers.',
['--use-latex-toc'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Let LaTeX print author and date, do not show it in docutils '
'document info.',
['--use-latex-docinfo'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Color of any hyperlinks embedded in text '
'(default: "blue", "0" to disable).',
['--hyperlink-color'], {'default': 'blue'}),
('Enable compound enumerators for nested enumerated lists '
'(e.g. "1.2.a.ii"). Default: disabled.',
['--compound-enumerators'],
{'default': None, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compound enumerators for nested enumerated lists. This is '
'the default.',
['--no-compound-enumerators'],
{'action': 'store_false', 'dest': 'compound_enumerators'}),
('Enable section ("." subsection ...) prefixes for compound '
'enumerators. This has no effect without --compound-enumerators. '
'Default: disabled.',
['--section-prefix-for-enumerators'],
{'default': None, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable section prefixes for compound enumerators. '
'This is the default.',
['--no-section-prefix-for-enumerators'],
{'action': 'store_false', 'dest': 'section_prefix_for_enumerators'}),
('Set the separator between section number and enumerator '
'for compound enumerated lists. Default is "-".',
['--section-enumerator-separator'],
{'default': '-', 'metavar': '<char>'}),
('When possibile, use verbatim for literal-blocks. '
'Default is to always use the mbox environment.',
['--use-verbatim-when-possible'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Table style. "standard" with horizontal and vertical lines, '
'"booktabs" (LaTeX booktabs style) only horizontal lines '
'above and below the table and below the header or "nolines". '
'Default: "standard"',
['--table-style'],
{'choices': ['standard', 'booktabs','nolines'], 'default': 'standard',
'metavar': '<format>'}),
('LaTeX graphicx package option. '
'Possible values are "dvips", "pdftex". "auto" includes LaTeX code '
'to use "pdftex" if processing with pdf(la)tex and dvips otherwise. '
'Default is no option.',
['--graphicx-option'],
{'default': ''}),
('LaTeX font encoding. '
'Possible values are "T1", "OT1", "" or some other fontenc option. '
'The font encoding influences available symbols, e.g. "<<" as one '
'character. Default is "" which leads to package "ae" (a T1 '
'emulation using CM fonts).',
['--font-encoding'],
{'default': ''}),
),)
settings_defaults = {'output_encoding': 'latin-1'}
relative_path_settings = ('stylesheet_path',)
config_section = 'latex2e writer'
config_section_dependencies = ('writers',)
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = LaTeXTranslator
def translate(self):
visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
self.output = visitor.astext()
self.head_prefix = visitor.head_prefix
self.head = visitor.head
self.body_prefix = visitor.body_prefix
self.body = visitor.body
self.body_suffix = visitor.body_suffix
"""
Notes on LaTeX
--------------
* latex does not support multiple tocs in one document.
(might be no limitation except for docutils documentation)
* width
* linewidth - width of a line in the local environment
* textwidth - the width of text on the page
Maybe always use linewidth ?
*Bug* inside a minipage a (e.g. Sidebar) the linewidth is
not changed, needs fix in docutils so that tables
are not too wide.
So we add locallinewidth set it initially and
on entering sidebar and reset on exit.
"""
class Babel:
"""Language specifics for LaTeX."""
# country code by a.schlock.
# partly manually converted from iso and babel stuff, dialects and some
_ISO639_TO_BABEL = {
'no': 'norsk', #XXX added by hand ( forget about nynorsk?)
'gd': 'scottish', #XXX added by hand
'hu': 'magyar', #XXX added by hand
'pt': 'portuguese',#XXX added by hand
'sl': 'slovenian',
'af': 'afrikaans',
'bg': 'bulgarian',
'br': 'breton',
'ca': 'catalan',
'cs': 'czech',
'cy': 'welsh',
'da': 'danish',
'fr': 'french',
# french, francais, canadien, acadian
'de': 'ngerman', #XXX rather than german
# ngerman, naustrian, german, germanb, austrian
'el': 'greek',
'en': 'english',
# english, USenglish, american, UKenglish, british, canadian
'eo': 'esperanto',
'es': 'spanish',
'et': 'estonian',
'eu': 'basque',
'fi': 'finnish',
'ga': 'irish',
'gl': 'galician',
'he': 'hebrew',
'hr': 'croatian',
'hu': 'hungarian',
'is': 'icelandic',
'it': 'italian',
'la': 'latin',
'nl': 'dutch',
'pl': 'polish',
'pt': 'portuguese',
'ro': 'romanian',
'ru': 'russian',
'sk': 'slovak',
'sr': 'serbian',
'sv': 'swedish',
'tr': 'turkish',
'uk': 'ukrainian'
}
def __init__(self,lang):
self.language = lang
# pdflatex does not produce double quotes for ngerman in tt.
self.double_quote_replacment = None
if re.search('^de',self.language):
#self.quotes = ("\"`", "\"'")
self.quotes = ('{\\glqq}', '{\\grqq}')
self.double_quote_replacment = "{\\dq}"
else:
self.quotes = ("``", "''")
self.quote_index = 0
def next_quote(self):
q = self.quotes[self.quote_index]
self.quote_index = (self.quote_index+1)%2
return q
def quote_quotes(self,text):
t = None
for part in text.split('"'):
if t == None:
t = part
else:
t += self.next_quote() + part
return t
def double_quotes_in_tt (self,text):
if not self.double_quote_replacment:
return text
return text.replace('"', self.double_quote_replacment)
def get_language(self):
if self._ISO639_TO_BABEL.has_key(self.language):
return self._ISO639_TO_BABEL[self.language]
else:
# support dialects.
l = self.language.split("_")[0]
if self._ISO639_TO_BABEL.has_key(l):
return self._ISO639_TO_BABEL[l]
return None
latex_headings = {
'optionlist_environment' : [
'\\newcommand{\\optionlistlabel}[1]{\\bf #1 \\hfill}\n'
'\\newenvironment{optionlist}[1]\n'
'{\\begin{list}{}\n'
' {\\setlength{\\labelwidth}{#1}\n'
' \\setlength{\\rightmargin}{1cm}\n'
' \\setlength{\\leftmargin}{\\rightmargin}\n'
' \\addtolength{\\leftmargin}{\\labelwidth}\n'
' \\addtolength{\\leftmargin}{\\labelsep}\n'
' \\renewcommand{\\makelabel}{\\optionlistlabel}}\n'
'}{\\end{list}}\n',
],
'lineblock_environment' : [
'\\newlength{\\lineblockindentation}\n'
'\\setlength{\\lineblockindentation}{2.5em}\n'
'\\newenvironment{lineblock}[1]\n'
'{\\begin{list}{}\n'
' {\\setlength{\\partopsep}{\\parskip}\n'
' \\addtolength{\\partopsep}{\\baselineskip}\n'
' \\topsep0pt\\itemsep0.15\\baselineskip\\parsep0pt\n'
' \\leftmargin#1}\n'
' \\raggedright}\n'
'{\\end{list}}\n'
],
'footnote_floats' : [
'% begin: floats for footnotes tweaking.\n',
'\\setlength{\\floatsep}{0.5em}\n',
'\\setlength{\\textfloatsep}{\\fill}\n',
'\\addtolength{\\textfloatsep}{3em}\n',
'\\renewcommand{\\textfraction}{0.5}\n',
'\\renewcommand{\\topfraction}{0.5}\n',
'\\renewcommand{\\bottomfraction}{0.5}\n',
'\\setcounter{totalnumber}{50}\n',
'\\setcounter{topnumber}{50}\n',
'\\setcounter{bottomnumber}{50}\n',
'% end floats for footnotes\n',
],
'some_commands' : [
'% some commands, that could be overwritten in the style file.\n'
'\\newcommand{\\rubric}[1]'
'{\\subsection*{~\\hfill {\\it #1} \\hfill ~}}\n'
'\\newcommand{\\titlereference}[1]{\\textsl{#1}}\n'
'% end of "some commands"\n',
]
}
class DocumentClass:
"""Details of a LaTeX document class."""
# BUG: LaTeX has no deeper sections (actually paragrah is no
# section either).
# BUG: No support for unknown document classes. Make 'article'
# default?
_class_sections = {
'book': ( 'chapter', 'section', 'subsection', 'subsubsection' ),
'scrbook': ( 'chapter', 'section', 'subsection', 'subsubsection' ),
'report': ( 'chapter', 'section', 'subsection', 'subsubsection' ),
'scrreprt': ( 'chapter', 'section', 'subsection', 'subsubsection' ),
'article': ( 'section', 'subsection', 'subsubsection' ),
'scrartcl': ( 'section', 'subsection', 'subsubsection' ),
}
_deepest_section = 'subsubsection'
def __init__(self, document_class):
self.document_class = document_class
def section(self, level):
""" Return the section name at the given level for the specific
document class.
Level is 1,2,3..., as level 0 is the title."""
sections = self._class_sections[self.document_class]
if level <= len(sections):
return sections[level-1]
else:
return self._deepest_section
class Table:
""" Manage a table while traversing.
Maybe change to a mixin defining the visit/departs, but then
class Table internal variables are in the Translator.
"""
def __init__(self,latex_type,table_style):
self._latex_type = latex_type
self._table_style = table_style
self._open = 0
# miscellaneous attributes
self._attrs = {}
self._col_width = []
self._rowspan = []
def open(self):
self._open = 1
self._col_specs = []
self.caption = None
self._attrs = {}
self._in_head = 0 # maybe context with search
def close(self):
self._open = 0
self._col_specs = None
self.caption = None
self._attrs = {}
def is_open(self):
return self._open
def used_packages(self):
if self._table_style == 'booktabs':
return '\\usepackage{booktabs}\n'
return ''
def get_latex_type(self):
return self._latex_type
def set(self,attr,value):
self._attrs[attr] = value
def get(self,attr):
if self._attrs.has_key(attr):
return self._attrs[attr]
return None
def get_vertical_bar(self):
if self._table_style == 'standard':
return '|'
return ''
# horizontal lines are drawn below a row, because we.
def get_opening(self):
return '\\begin{%s}[c]' % self._latex_type
def get_closing(self):
line = ""
if self._table_style == 'booktabs':
line = '\\bottomrule\n'
elif self._table_style == 'standard':
lines = '\\hline\n'
return '%s\\end{%s}' % (line,self._latex_type)
def visit_colspec(self,node):
self._col_specs.append(node)
def get_colspecs(self):
"""
Return column specification for longtable.
Assumes reST line length being 80 characters.
Table width is hairy.
=== ===
ABC DEF
=== ===
usually gets to narrow, therefore we add 1 (fiddlefactor).
"""
width = 80
total_width = 0.0
# first see if we get too wide.
for node in self._col_specs:
colwidth = float(node['colwidth']+1) / width
total_width += colwidth
self._col_width = []
self._rowspan = []
# donot make it full linewidth
factor = 0.93
if total_width > 1.0:
factor /= total_width
bar = self.get_vertical_bar()
latex_table_spec = ""
for node in self._col_specs:
colwidth = factor * float(node['colwidth']+1) / width
self._col_width.append(colwidth+0.005)
self._rowspan.append(0)
latex_table_spec += "%sp{%.2f\\locallinewidth}" % (bar,colwidth+0.005)
return latex_table_spec+bar
def get_column_width(self):
""" return columnwidth for current cell (not multicell)
"""
return "%.2f\\locallinewidth" % self._col_width[self._cell_in_row-1]
def visit_thead(self):
self._in_thead = 1
if self._table_style == 'standard':
return ['\\hline\n']
elif self._table_style == 'booktabs':
return ['\\toprule\n']
return []
def depart_thead(self):
a = []
#if self._table_style == 'standard':
# a.append('\\hline\n')
if self._table_style == 'booktabs':
a.append('\\midrule\n')
a.append('\\endhead\n')
# for longtable one could add firsthead, foot and lastfoot
self._in_thead = 0
return a
def visit_row(self):
self._cell_in_row = 0
def depart_row(self):
res = [' \\\\\n']
self._cell_in_row = None # remove cell counter
for i in range(len(self._rowspan)):
if (self._rowspan[i]>0):
self._rowspan[i] -= 1
if self._table_style == 'standard':
rowspans = []
for i in range(len(self._rowspan)):
if (self._rowspan[i]<=0):
rowspans.append(i+1)
if len(rowspans)==len(self._rowspan):
res.append('\\hline\n')
else:
cline = ''
rowspans.reverse()
# TODO merge clines
while 1:
try:
c_start = rowspans.pop()
except:
break
cline += '\\cline{%d-%d}\n' % (c_start,c_start)
res.append(cline)
return res
def set_rowspan(self,cell,value):
try:
self._rowspan[cell] = value
except:
pass
def get_rowspan(self,cell):
try:
return self._rowspan[cell]
except:
return 0
def get_entry_number(self):
return self._cell_in_row
def visit_entry(self):
self._cell_in_row += 1
class LaTeXTranslator(nodes.NodeVisitor):
# When options are given to the documentclass, latex will pass them
# to other packages, as done with babel.
# Dummy settings might be taken from document settings
latex_head = '\\documentclass[%s]{%s}\n'
encoding = '\\usepackage[%s]{inputenc}\n'
linking = '\\usepackage[colorlinks=%s,linkcolor=%s,urlcolor=%s]{hyperref}\n'
stylesheet = '\\input{%s}\n'
# add a generated on day , machine by user using docutils version.
generator = '%% generator Docutils: http://docutils.sourceforge.net/\n'
# use latex tableofcontents or let docutils do it.
use_latex_toc = 0
# TODO: use mixins for different implementations.
# list environment for option-list. else tabularx
use_optionlist_for_option_list = 1
# list environment for docinfo. else tabularx
use_optionlist_for_docinfo = 0 # NOT YET IN USE
# Use compound enumerations (1.A.1.)
compound_enumerators = 0
# If using compound enumerations, include section information.
section_prefix_for_enumerators = 0
# This is the character that separates the section ("." subsection ...)
# prefix from the regular list enumerator.
section_enumerator_separator = '-'
# default link color
hyperlink_color = "blue"
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
self.settings = settings = document.settings
self.latex_encoding = self.to_latex_encoding(settings.output_encoding)
self.use_latex_toc = settings.use_latex_toc
self.use_latex_docinfo = settings.use_latex_docinfo
self.use_latex_footnotes = settings.use_latex_footnotes
self._use_latex_citations = settings.use_latex_citations
self.hyperlink_color = settings.hyperlink_color
self.compound_enumerators = settings.compound_enumerators
self.font_encoding = settings.font_encoding
self.section_prefix_for_enumerators = (
settings.section_prefix_for_enumerators)
self.section_enumerator_separator = (
settings.section_enumerator_separator.replace('_', '\\_'))
if self.hyperlink_color == '0':
self.hyperlink_color = 'black'
self.colorlinks = 'false'
else:
self.colorlinks = 'true'
# language: labels, bibliographic_fields, and author_separators.
# to allow writing labes for specific languages.
self.language = languages.get_language(settings.language_code)
self.babel = Babel(settings.language_code)
self.author_separator = self.language.author_separators[0]
self.d_options = self.settings.documentoptions
if self.babel.get_language():
self.d_options += ',%s' % \
self.babel.get_language()
self.d_class = DocumentClass(settings.documentclass)
# object for a table while proccessing.
self.active_table = Table('longtable',settings.table_style)
# HACK. Should have more sophisticated typearea handling.
if settings.documentclass.find('scr') == -1:
self.typearea = '\\usepackage[DIV12]{typearea}\n'
else:
if self.d_options.find('DIV') == -1 and self.d_options.find('BCOR') == -1:
self.typearea = '\\typearea{12}\n'
else:
self.typearea = ''
if self.font_encoding == 'OT1':
fontenc_header = ''
elif self.font_encoding == '':
fontenc_header = '\\usepackage{ae}\n\\usepackage{aeguill}\n'
else:
fontenc_header = '\\usepackage[%s]{fontenc}\n' % (self.font_encoding,)
input_encoding = self.encoding % self.latex_encoding
if self.settings.graphicx_option == '':
self.graphicx_package = '\\usepackage{graphicx}\n'
elif self.settings.graphicx_option.lower() == 'auto':
self.graphicx_package = '\n'.join(
('%Check if we are compiling under latex or pdflatex',
'\\ifx\\pdftexversion\\undefined',
' \\usepackage{graphicx}',
'\\else',
' \\usepackage[pdftex]{graphicx}',
'\\fi\n'))
else:
self.graphicx_package = (
'\\usepackage[%s]{graphicx}\n' % self.settings.graphicx_option)
self.head_prefix = [
self.latex_head % (self.d_options,self.settings.documentclass),
'\\usepackage{babel}\n', # language is in documents settings.
fontenc_header,
'\\usepackage{shortvrb}\n', # allows verb in footnotes.
input_encoding,
# * tabularx: for docinfo, automatic width of columns, always on one page.
'\\usepackage{tabularx}\n',
'\\usepackage{longtable}\n',
self.active_table.used_packages(),
# possible other packages.
# * fancyhdr
# * ltxtable is a combination of tabularx and longtable (pagebreaks).
# but ??
#
# extra space between text in tables and the line above them
'\\setlength{\\extrarowheight}{2pt}\n',
'\\usepackage{amsmath}\n', # what fore amsmath.
self.graphicx_package,
'\\usepackage{color}\n',
'\\usepackage{multirow}\n',
'\\usepackage{ifthen}\n', # before hyperref!
self.linking % (self.colorlinks, self.hyperlink_color, self.hyperlink_color),
self.typearea,
self.generator,
# latex lengths
'\\newlength{\\admonitionwidth}\n',
'\\setlength{\\admonitionwidth}{0.9\\textwidth}\n'
# width for docinfo tablewidth
'\\newlength{\\docinfowidth}\n',
'\\setlength{\\docinfowidth}{0.9\\textwidth}\n'
# linewidth of current environment, so tables are not wider
# than the sidebar: using locallinewidth seems to defer evaluation
# of linewidth, this is fixing it.
'\\newlength{\\locallinewidth}\n',
# will be set later.
]
self.head_prefix.extend( latex_headings['optionlist_environment'] )
self.head_prefix.extend( latex_headings['lineblock_environment'] )
self.head_prefix.extend( latex_headings['footnote_floats'] )
self.head_prefix.extend( latex_headings['some_commands'] )
## stylesheet is last: so it might be possible to overwrite defaults.
stylesheet = utils.get_stylesheet_reference(settings)
if stylesheet:
settings.record_dependencies.add(stylesheet)
self.head_prefix.append(self.stylesheet % (stylesheet))
if self.linking: # and maybe check for pdf
self.pdfinfo = [ ]
self.pdfauthor = None
# pdftitle, pdfsubject, pdfauthor, pdfkeywords, pdfcreator, pdfproducer
else:
self.pdfinfo = None
# NOTE: Latex wants a date and an author, rst puts this into
# docinfo, so normally we donot want latex author/date handling.
# latex article has its own handling of date and author, deactivate.
# So we always emit \title{...} \author{...} \date{...}, even if the
# "..." are empty strings.
self.head = [ ]
# separate title, so we can appen subtitle.
self.title = ''
# if use_latex_docinfo: collects lists of author/organization/contact/address lines
self.author_stack = []
self.date = ''
self.body_prefix = ['\\raggedbottom\n']
self.body = []
self.body_suffix = ['\n']
self.section_level = 0
self.context = []
self.topic_classes = []
# column specification for tables
self.table_caption = None
# Flags to encode
# ---------------
# verbatim: to tell encode not to encode.
self.verbatim = 0
# insert_newline: to tell encode to replace blanks by "~".
self.insert_none_breaking_blanks = 0
# insert_newline: to tell encode to add latex newline.
self.insert_newline = 0
# mbox_newline: to tell encode to add mbox and newline.
self.mbox_newline = 0
# enumeration is done by list environment.
self._enum_cnt = 0
# Stack of section counters so that we don't have to use_latex_toc.
# This will grow and shrink as processing occurs.
# Initialized for potential first-level sections.
self._section_number = [0]
# The current stack of enumerations so that we can expand
# them into a compound enumeration
self._enumeration_counters = []
self._bibitems = []
# docinfo.
self.docinfo = None
# inside literal block: no quote mangling.
self.literal_block = 0
self.literal_block_stack = []
self.literal = 0
# true when encoding in math mode
self.mathmode = 0
def to_latex_encoding(self,docutils_encoding):
"""
Translate docutils encoding name into latex's.
Default fallback method is remove "-" and "_" chars from docutils_encoding.
"""
tr = { "iso-8859-1": "latin1", # west european
"iso-8859-2": "latin2", # east european
"iso-8859-3": "latin3", # esperanto, maltese
"iso-8859-4": "latin4", # north european,scandinavian, baltic
"iso-8859-5": "iso88595", # cyrillic (ISO)
"iso-8859-9": "latin5", # turkish
"iso-8859-15": "latin9", # latin9, update to latin1.
"mac_cyrillic": "maccyr", # cyrillic (on Mac)
"windows-1251": "cp1251", # cyrillic (on Windows)
"koi8-r": "koi8-r", # cyrillic (Russian)
"koi8-u": "koi8-u", # cyrillic (Ukrainian)
"windows-1250": "cp1250", #
"windows-1252": "cp1252", #
"us-ascii": "ascii", # ASCII (US)
# unmatched encodings
#"": "applemac",
#"": "ansinew", # windows 3.1 ansi
#"": "ascii", # ASCII encoding for the range 32--127.
#"": "cp437", # dos latine us
#"": "cp850", # dos latin 1
#"": "cp852", # dos latin 2
#"": "decmulti",
#"": "latin10",
#"iso-8859-6": "" # arabic
#"iso-8859-7": "" # greek
#"iso-8859-8": "" # hebrew
#"iso-8859-10": "" # latin6, more complete iso-8859-4
}
if tr.has_key(docutils_encoding.lower()):
return tr[docutils_encoding.lower()]
return docutils_encoding.translate(string.maketrans("",""),"_-").lower()
def language_label(self, docutil_label):
return self.language.labels[docutil_label]
latex_equivalents = {
u'\u00A0' : '~',
u'\u2013' : '{--}',
u'\u2014' : '{---}',
u'\u2018' : '`',
u'\u2019' : '\'',
u'\u201A' : ',',
u'\u201C' : '``',
u'\u201D' : '\'\'',
u'\u201E' : ',,',
u'\u2020' : '{\\dag}',
u'\u2021' : '{\\ddag}',
u'\u2026' : '{\\dots}',
u'\u2122' : '{\\texttrademark}',
u'\u21d4' : '{$\\Leftrightarrow$}',
}
def unicode_to_latex(self,text):
# see LaTeX codec
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/252124
# Only some special chracters are translated, for documents with many
# utf-8 chars one should use the LaTeX unicode package.
for uchar in self.latex_equivalents.keys():
text = text.replace(uchar,self.latex_equivalents[uchar])
return text
def encode(self, text):
"""
Encode special characters in `text` & return.
# $ % & ~ _ ^ \ { }
Escaping with a backslash does not help with backslashes, ~ and ^.
< > are only available in math-mode or tt font. (really ?)
$ starts math- mode.
AND quotes:
"""
if self.verbatim:
return text
# compile the regexps once. do it here so one can see them.
#
# first the braces.
if not self.__dict__.has_key('encode_re_braces'):
self.encode_re_braces = re.compile(r'([{}])')
text = self.encode_re_braces.sub(r'{\\\1}',text)
if not self.__dict__.has_key('encode_re_bslash'):
# find backslash: except in the form '{\{}' or '{\}}'.
self.encode_re_bslash = re.compile(r'(?<!{)(\\)(?![{}]})')
# then the backslash: except in the form from line above:
# either '{\{}' or '{\}}'.
text = self.encode_re_bslash.sub(r'{\\textbackslash}', text)
# then dollar
text = text.replace("$", '{\\$}')
if not ( self.literal_block or self.literal or self.mathmode ):
# the vertical bar: in mathmode |,\vert or \mid
# in textmode \textbar
text = text.replace("|", '{\\textbar}')
text = text.replace("<", '{\\textless}')
text = text.replace(">", '{\\textgreater}')
# then
text = text.replace("&", '{\\&}')
# the ^:
# * verb|^| does not work in mbox.
# * mathmode has wedge. hat{~} would also work.
# text = text.replace("^", '{\\ensuremath{^\\wedge}}')
text = text.replace("^", '{\\textasciicircum}')
text = text.replace("%", '{\\%}')
text = text.replace("#", '{\\#}')
text = text.replace("~", '{\\textasciitilde}')
# Separate compound characters, e.g. "--" to "-{}-". (The
# actual separation is done later; see below.)
separate_chars = '-'
if self.literal_block or self.literal:
# In monospace-font, we also separate ",,", "``" and "''"
# and some other characters which can't occur in
# non-literal text.
separate_chars += ',`\'"<>'
# pdflatex does not produce doublequotes for ngerman.
text = self.babel.double_quotes_in_tt(text)
if self.font_encoding == 'OT1':
# We're using OT1 font-encoding and have to replace
# underscore by underlined blank, because this has
# correct width.
text = text.replace('_', '{\\underline{ }}')
# And the tt-backslash doesn't work in OT1, so we use
# a mirrored slash.
text = text.replace('\\textbackslash', '\\reflectbox{/}')
else:
text = text.replace('_', '{\\_}')
else:
text = self.babel.quote_quotes(text)
text = text.replace("_", '{\\_}')
for char in separate_chars * 2:
# Do it twice ("* 2") becaues otherwise we would replace
# "---" by "-{}--".
text = text.replace(char + char, char + '{}' + char)
if self.insert_newline or self.literal_block:
# Insert a blank before the newline, to avoid
# ! LaTeX Error: There's no line here to end.
text = text.replace("\n", '~\\\\\n')
elif self.mbox_newline:
if self.literal_block:
closings = "}" * len(self.literal_block_stack)
openings = "".join(self.literal_block_stack)
else:
closings = ""
openings = ""
text = text.replace("\n", "%s}\\\\\n\\mbox{%s" % (closings,openings))
# lines starting with "[" give errors.
text = text.replace('[', '{[}')
if self.insert_none_breaking_blanks:
text = text.replace(' ', '~')
if self.latex_encoding != 'utf8':
text = self.unicode_to_latex(text)
return text
def attval(self, text,
whitespace=re.compile('[\n\r\t\v\f]')):
"""Cleanse, encode, and return attribute value text."""
return self.encode(whitespace.sub(' ', text))
def astext(self):
if self.pdfinfo is not None:
if self.pdfauthor:
self.pdfinfo.append('pdfauthor={%s}' % self.pdfauthor)
if self.pdfinfo:
pdfinfo = '\\hypersetup{\n' + ',\n'.join(self.pdfinfo) + '\n}\n'
else:
pdfinfo = ''
head = '\\title{%s}\n\\author{%s}\n\\date{%s}\n' % \
(self.title,
' \\and\n'.join(['~\\\\\n'.join(author_lines)
for author_lines in self.author_stack]),
self.date)
return ''.join(self.head_prefix + [head] + self.head + [pdfinfo]
+ self.body_prefix + self.body + self.body_suffix)
def visit_Text(self, node):
self.body.append(self.encode(node.astext()))
def depart_Text(self, node):
pass
def visit_address(self, node):
self.visit_docinfo_item(node, 'address')
def depart_address(self, node):
self.depart_docinfo_item(node)
def visit_admonition(self, node, name=''):
self.body.append('\\begin{center}\\begin{sffamily}\n')
self.body.append('\\fbox{\\parbox{\\admonitionwidth}{\n')
if name:
self.body.append('\\textbf{\\large '+ self.language.labels[name] + '}\n');
self.body.append('\\vspace{2mm}\n')
def depart_admonition(self, node=None):
self.body.append('}}\n') # end parbox fbox
self.body.append('\\end{sffamily}\n\\end{center}\n');
def visit_attention(self, node):
self.visit_admonition(node, 'attention')
def depart_attention(self, node):
self.depart_admonition()
def visit_author(self, node):
self.visit_docinfo_item(node, 'author')
def depart_author(self, node):
self.depart_docinfo_item(node)
def visit_authors(self, node):
# not used: visit_author is called anyway for each author.
pass
def depart_authors(self, node):
pass
def visit_block_quote(self, node):
self.body.append( '\\begin{quote}\n')
def depart_block_quote(self, node):
self.body.append( '\\end{quote}\n')
def visit_bullet_list(self, node):
if 'contents' in self.topic_classes:
if not self.use_latex_toc:
self.body.append( '\\begin{list}{}{}\n' )
else:
self.body.append( '\\begin{itemize}\n' )
def depart_bullet_list(self, node):
if 'contents' in self.topic_classes:
if not self.use_latex_toc:
self.body.append( '\\end{list}\n' )
else:
self.body.append( '\\end{itemize}\n' )
# Imperfect superscript/subscript handling: mathmode italicizes
# all letters by default.
def visit_superscript(self, node):
self.body.append('$^{')
self.mathmode = 1
def depart_superscript(self, node):
self.body.append('}$')
self.mathmode = 0
def visit_subscript(self, node):
self.body.append('$_{')
self.mathmode = 1
def depart_subscript(self, node):
self.body.append('}$')
self.mathmode = 0
def visit_caption(self, node):
self.body.append( '\\caption{' )
def depart_caption(self, node):
self.body.append('}')
def visit_caution(self, node):
self.visit_admonition(node, 'caution')
def depart_caution(self, node):
self.depart_admonition()
def visit_title_reference(self, node):
self.body.append( '\\titlereference{' )
def depart_title_reference(self, node):
self.body.append( '}' )
def visit_citation(self, node):
# TODO maybe use cite bibitems
if self._use_latex_citations:
self.context.append(len(self.body))
else:
self.body.append('\\begin{figure}[b]')
for id in node['ids']:
self.body.append('\\hypertarget{%s}' % id)
def depart_citation(self, node):
if self._use_latex_citations:
size = self.context.pop()
label = self.body[size]
text = ''.join(self.body[size+1:])
del self.body[size:]
self._bibitems.append([label, text])
else:
self.body.append('\\end{figure}\n')
def visit_citation_reference(self, node):
if self._use_latex_citations:
self.body.append('\\cite{')
else:
href = ''
if node.has_key('refid'):
href = node['refid']
elif node.has_key('refname'):
href = self.document.nameids[node['refname']]
self.body.append('[\\hyperlink{%s}{' % href)
def depart_citation_reference(self, node):
if self._use_latex_citations:
self.body.append('}')
else:
self.body.append('}]')
def visit_classifier(self, node):
self.body.append( '(\\textbf{' )
def depart_classifier(self, node):
self.body.append( '})\n' )
def visit_colspec(self, node):
self.active_table.visit_colspec(node)
def depart_colspec(self, node):
pass
def visit_comment(self, node):
# Escape end of line by a new comment start in comment text.
self.body.append('%% %s \n' % node.astext().replace('\n', '\n% '))
raise nodes.SkipNode
def visit_compound(self, node):
pass
def depart_compound(self, node):
pass
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact')
def depart_contact(self, node):
self.depart_docinfo_item(node)
def visit_copyright(self, node):
self.visit_docinfo_item(node, 'copyright')
def depart_copyright(self, node):
self.depart_docinfo_item(node)
def visit_danger(self, node):
self.visit_admonition(node, 'danger')
def depart_danger(self, node):
self.depart_admonition()
def visit_date(self, node):
self.visit_docinfo_item(node, 'date')
def depart_date(self, node):
self.depart_docinfo_item(node)
def visit_decoration(self, node):
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
self.body.append('%[visit_definition]\n')
def depart_definition(self, node):
self.body.append('\n')
self.body.append('%[depart_definition]\n')
def visit_definition_list(self, node):
self.body.append( '\\begin{description}\n' )
def depart_definition_list(self, node):
self.body.append( '\\end{description}\n' )
def visit_definition_list_item(self, node):
self.body.append('%[visit_definition_list_item]\n')
def depart_definition_list_item(self, node):
self.body.append('%[depart_definition_list_item]\n')
def visit_description(self, node):
if self.use_optionlist_for_option_list:
self.body.append( ' ' )
else:
self.body.append( ' & ' )
def depart_description(self, node):
pass
def visit_docinfo(self, node):
self.docinfo = []
self.docinfo.append('%' + '_'*75 + '\n')
self.docinfo.append('\\begin{center}\n')
self.docinfo.append('\\begin{tabularx}{\\docinfowidth}{lX}\n')
def depart_docinfo(self, node):
self.docinfo.append('\\end{tabularx}\n')
self.docinfo.append('\\end{center}\n')
self.body = self.docinfo + self.body
# clear docinfo, so field names are no longer appended.
self.docinfo = None
def visit_docinfo_item(self, node, name):
if name == 'author':
if not self.pdfinfo == None:
if not self.pdfauthor:
self.pdfauthor = self.attval(node.astext())
else:
self.pdfauthor += self.author_separator + self.attval(node.astext())
if self.use_latex_docinfo:
if name in ('author', 'organization', 'contact', 'address'):
# We attach these to the last author. If any of them precedes
# the first author, put them in a separate "author" group (for
# no better semantics).
if name == 'author' or not self.author_stack:
self.author_stack.append([])
if name == 'address': # newlines are meaningful
self.insert_newline = 1
text = self.encode(node.astext())
self.insert_newline = 0
else:
text = self.attval(node.astext())
self.author_stack[-1].append(text)
raise nodes.SkipNode
elif name == 'date':
self.date = self.attval(node.astext())
raise nodes.SkipNode
self.docinfo.append('\\textbf{%s}: &\n\t' % self.language_label(name))
if name == 'address':
self.insert_newline = 1
self.docinfo.append('{\\raggedright\n')
self.context.append(' } \\\\\n')
else:
self.context.append(' \\\\\n')
self.context.append(self.docinfo)
self.context.append(len(self.body))
def depart_docinfo_item(self, node):
size = self.context.pop()
dest = self.context.pop()
tail = self.context.pop()
tail = self.body[size:] + [tail]
del self.body[size:]
dest.extend(tail)
# for address we did set insert_newline
self.insert_newline = 0
def visit_doctest_block(self, node):
self.body.append( '\\begin{verbatim}' )
self.verbatim = 1
def depart_doctest_block(self, node):
self.body.append( '\\end{verbatim}\n' )
self.verbatim = 0
def visit_document(self, node):
self.body_prefix.append('\\begin{document}\n')
# titled document?
if self.use_latex_docinfo or len(node) and isinstance(node[0], nodes.title):
self.body_prefix.append('\\maketitle\n\n')
# alternative use titlepage environment.
# \begin{titlepage}
self.body.append('\n\\setlength{\\locallinewidth}{\\linewidth}\n')
def depart_document(self, node):
# TODO insertion point of bibliography should none automatic.
if self._use_latex_citations and len(self._bibitems)>0:
widest_label = ""
for bi in self._bibitems:
if len(widest_label)<len(bi[0]):
widest_label = bi[0]
self.body.append('\n\\begin{thebibliography}{%s}\n'%widest_label)
for bi in self._bibitems:
self.body.append('\\bibitem[%s]{%s}{%s}\n' % (bi[0], bi[0], bi[1]))
self.body.append('\\end{thebibliography}\n')
self.body_suffix.append('\\end{document}\n')
def visit_emphasis(self, node):
self.body.append('\\emph{')
self.literal_block_stack.append('\\emph{')
def depart_emphasis(self, node):
self.body.append('}')
self.literal_block_stack.pop()
def visit_entry(self, node):
self.active_table.visit_entry()
# cell separation
if self.active_table.get_entry_number() == 1:
# if the firstrow is a multirow, this actually is the second row.
# this gets hairy if rowspans follow each other.
if self.active_table.get_rowspan(0):
count = 0
while self.active_table.get_rowspan(count):
count += 1
self.body.append(' & ')
self.active_table.visit_entry() # increment cell count
else:
self.body.append(' & ')
# multi{row,column}
# IN WORK BUG TODO HACK continues here
# multirow in LaTeX simply will enlarge the cell over several rows
# (the following n if n is positive, the former if negative).
if node.has_key('morerows') and node.has_key('morecols'):
raise NotImplementedError('Cells that '
'span multiple rows *and* columns are not supported, sorry.')
if node.has_key('morerows'):
count = node['morerows'] + 1
self.active_table.set_rowspan(self.active_table.get_entry_number()-1,count)
self.body.append('\\multirow{%d}{%s}{' % \
(count,self.active_table.get_column_width()))
self.context.append('}')
# BUG following rows must have empty cells.
elif node.has_key('morecols'):
# the vertical bar before column is missing if it is the first column.
# the one after always.
if self.active_table.get_entry_number() == 1:
bar1 = self.active_table.get_vertical_bar()
else:
bar1 = ''
count = node['morecols'] + 1
self.body.append('\\multicolumn{%d}{%sl%s}{' % \
(count, bar1, self.active_table.get_vertical_bar()))
self.context.append('}')
else:
self.context.append('')
# header / not header
if isinstance(node.parent.parent, nodes.thead):
self.body.append('\\textbf{')
self.context.append('}')
else:
self.context.append('')
def depart_entry(self, node):
self.body.append(self.context.pop()) # header / not header
self.body.append(self.context.pop()) # multirow/column
# if following row is spanned from above.
if self.active_table.get_rowspan(self.active_table.get_entry_number()):
self.body.append(' & ')
self.active_table.visit_entry() # increment cell count
def visit_row(self, node):
self.active_table.visit_row()
def depart_row(self, node):
self.body.extend(self.active_table.depart_row())
def visit_enumerated_list(self, node):
# We create our own enumeration list environment.
# This allows to set the style and starting value
# and unlimited nesting.
self._enum_cnt += 1
enum_style = {'arabic':'arabic',
'loweralpha':'alph',
'upperalpha':'Alph',
'lowerroman':'roman',
'upperroman':'Roman' }
enum_suffix = ""
if node.has_key('suffix'):
enum_suffix = node['suffix']
enum_prefix = ""
if node.has_key('prefix'):
enum_prefix = node['prefix']
if self.compound_enumerators:
pref = ""
if self.section_prefix_for_enumerators and self.section_level:
for i in range(self.section_level):
pref += '%d.' % self._section_number[i]
pref = pref[:-1] + self.section_enumerator_separator
enum_prefix += pref
for counter in self._enumeration_counters:
enum_prefix += counter + '.'
enum_type = "arabic"
if node.has_key('enumtype'):
enum_type = node['enumtype']
if enum_style.has_key(enum_type):
enum_type = enum_style[enum_type]
counter_name = "listcnt%d" % self._enum_cnt;
self._enumeration_counters.append("\\%s{%s}" % (enum_type,counter_name))
self.body.append('\\newcounter{%s}\n' % counter_name)
self.body.append('\\begin{list}{%s\\%s{%s}%s}\n' % \
(enum_prefix,enum_type,counter_name,enum_suffix))
self.body.append('{\n')
self.body.append('\\usecounter{%s}\n' % counter_name)
# set start after usecounter, because it initializes to zero.
if node.has_key('start'):
self.body.append('\\addtocounter{%s}{%d}\n' \
% (counter_name,node['start']-1))
## set rightmargin equal to leftmargin
self.body.append('\\setlength{\\rightmargin}{\\leftmargin}\n')
self.body.append('}\n')
def depart_enumerated_list(self, node):
self.body.append('\\end{list}\n')
self._enumeration_counters.pop()
def visit_error(self, node):
self.visit_admonition(node, 'error')
def depart_error(self, node):
self.depart_admonition()
def visit_field(self, node):
# real output is done in siblings: _argument, _body, _name
pass
def depart_field(self, node):
self.body.append('\n')
##self.body.append('%[depart_field]\n')
def visit_field_argument(self, node):
self.body.append('%[visit_field_argument]\n')
def depart_field_argument(self, node):
self.body.append('%[depart_field_argument]\n')
def visit_field_body(self, node):
# BUG by attach as text we loose references.
if self.docinfo:
self.docinfo.append('%s \\\\\n' % self.encode(node.astext()))
raise nodes.SkipNode
# BUG: what happens if not docinfo
def depart_field_body(self, node):
self.body.append( '\n' )
def visit_field_list(self, node):
if not self.docinfo:
self.body.append('\\begin{quote}\n')
self.body.append('\\begin{description}\n')
def depart_field_list(self, node):
if not self.docinfo:
self.body.append('\\end{description}\n')
self.body.append('\\end{quote}\n')
def visit_field_name(self, node):
# BUG this duplicates docinfo_item
if self.docinfo:
self.docinfo.append('\\textbf{%s}: &\n\t' % self.encode(node.astext()))
raise nodes.SkipNode
else:
self.body.append('\\item [')
def depart_field_name(self, node):
if not self.docinfo:
self.body.append(':]')
def visit_figure(self, node):
self.body.append( '\\begin{figure}[htbp]\\begin{center}\n' )
def depart_figure(self, node):
self.body.append( '\\end{center}\\end{figure}\n' )
def visit_footer(self, node):
self.context.append(len(self.body))
def depart_footer(self, node):
start = self.context.pop()
footer = (['\n\\begin{center}\small\n']
+ self.body[start:] + ['\n\\end{center}\n'])
self.body_suffix[:0] = footer
del self.body[start:]
def visit_footnote(self, node):
if self.use_latex_footnotes:
num,text = node.astext().split(None,1)
num = self.encode(num.strip())
self.body.append('\\footnotetext['+num+']')
self.body.append('{')
else:
self.body.append('\\begin{figure}[b]')
for id in node['ids']:
self.body.append('\\hypertarget{%s}' % id)
def depart_footnote(self, node):
if self.use_latex_footnotes:
self.body.append('}\n')
else:
self.body.append('\\end{figure}\n')
def visit_footnote_reference(self, node):
if self.use_latex_footnotes:
self.body.append("\\footnotemark["+self.encode(node.astext())+"]")
raise nodes.SkipNode
href = ''
if node.has_key('refid'):
href = node['refid']
elif node.has_key('refname'):
href = self.document.nameids[node['refname']]
format = self.settings.footnote_references
if format == 'brackets':
suffix = '['
self.context.append(']')
elif format == 'superscript':
suffix = '\\raisebox{.5em}[0em]{\\scriptsize'
self.context.append('}')
else: # shouldn't happen
raise AssertionError('Illegal footnote reference format.')
self.body.append('%s\\hyperlink{%s}{' % (suffix,href))
def depart_footnote_reference(self, node):
if self.use_latex_footnotes:
return
self.body.append('}%s' % self.context.pop())
# footnote/citation label
def label_delim(self, node, bracket, superscript):
if isinstance(node.parent, nodes.footnote):
if self.use_latex_footnotes:
raise nodes.SkipNode
if self.settings.footnote_references == 'brackets':
self.body.append(bracket)
else:
self.body.append(superscript)
else:
assert isinstance(node.parent, nodes.citation)
if not self._use_latex_citations:
self.body.append(bracket)
def visit_label(self, node):
self.label_delim(node, '[', '$^{')
def depart_label(self, node):
self.label_delim(node, ']', '}$')
# elements generated by the framework e.g. section numbers.
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_header(self, node):
self.context.append(len(self.body))
def depart_header(self, node):
start = self.context.pop()
self.body_prefix.append('\n\\verb|begin_header|\n')
self.body_prefix.extend(self.body[start:])
self.body_prefix.append('\n\\verb|end_header|\n')
del self.body[start:]
def visit_hint(self, node):
self.visit_admonition(node, 'hint')
def depart_hint(self, node):
self.depart_admonition()
def visit_image(self, node):
attrs = node.attributes
# Add image URI to dependency list, assuming that it's
# referring to a local file.
self.settings.record_dependencies.add(attrs['uri'])
pre = [] # in reverse order
post = ['\\includegraphics{%s}' % attrs['uri']]
inline = isinstance(node.parent, nodes.TextElement)
if attrs.has_key('scale'):
# Could also be done with ``scale`` option to
# ``\includegraphics``; doing it this way for consistency.
pre.append('\\scalebox{%f}{' % (attrs['scale'] / 100.0,))
post.append('}')
if attrs.has_key('align'):
align_prepost = {
# By default latex aligns the top of an image.
(1, 'top'): ('', ''),
(1, 'middle'): ('\\raisebox{-0.5\\height}{', '}'),
(1, 'bottom'): ('\\raisebox{-\\height}{', '}'),
(0, 'center'): ('{\\hfill', '\\hfill}'),
# These 2 don't exactly do the right thing. The image should
# be floated alongside the paragraph. See
# http://www.w3.org/TR/html4/struct/objects.html#adef-align-IMG
(0, 'left'): ('{', '\\hfill}'),
(0, 'right'): ('{\\hfill', '}'),}
try:
pre.append(align_prepost[inline, attrs['align']][0])
post.append(align_prepost[inline, attrs['align']][1])
except KeyError:
pass # XXX complain here?
if not inline:
pre.append('\n')
post.append('\n')
pre.reverse()
self.body.extend(pre + post)
def depart_image(self, node):
pass
def visit_important(self, node):
self.visit_admonition(node, 'important')
def depart_important(self, node):
self.depart_admonition()
def visit_interpreted(self, node):
# @@@ Incomplete, pending a proper implementation on the
# Parser/Reader end.
self.visit_literal(node)
def depart_interpreted(self, node):
self.depart_literal(node)
def visit_legend(self, node):
self.body.append('{\\small ')
def depart_legend(self, node):
self.body.append('}')
def visit_line(self, node):
self.body.append('\item[] ')
def depart_line(self, node):
self.body.append('\n')
def visit_line_block(self, node):
if isinstance(node.parent, nodes.line_block):
self.body.append('\\item[] \n'
'\\begin{lineblock}{\\lineblockindentation}\n')
else:
self.body.append('\n\\begin{lineblock}{0em}\n')
def depart_line_block(self, node):
self.body.append('\\end{lineblock}\n')
def visit_list_item(self, node):
# Append "{}" in case the next character is "[", which would break
# LaTeX's list environment (no numbering and the "[" is not printed).
self.body.append('\\item {} ')
def depart_list_item(self, node):
self.body.append('\n')
def visit_literal(self, node):
self.literal = 1
self.body.append('\\texttt{')
def depart_literal(self, node):
self.body.append('}')
self.literal = 0
def visit_literal_block(self, node):
"""
Render a literal-block.
Literal blocks are used for "::"-prefixed literal-indented
blocks of text, where the inline markup is not recognized,
but are also the product of the parsed-literal directive,
where the markup is respected.
"""
# In both cases, we want to use a typewriter/monospaced typeface.
# For "real" literal-blocks, we can use \verbatim, while for all
# the others we must use \mbox.
#
# We can distinguish between the two kinds by the number of
# siblings the compose this node: if it is composed by a
# single element, it's surely is either a real one, otherwise
# it's a parsed-literal that does not contain any markup.
#
if (self.settings.use_verbatim_when_possible and (len(node) == 1)
# in case of a parsed-literal containing just a "**bold**" word:
and isinstance(node[0], nodes.Text)):
self.verbatim = 1
self.body.append('\\begin{quote}\\begin{verbatim}\n')
else:
self.literal_block = 1
self.insert_none_breaking_blanks = 1
if self.active_table.is_open():
self.body.append('\n{\\ttfamily \\raggedright \\noindent\n')
else:
# no quote inside tables, to avoid vertical sppace between
# table border and literal block.
# BUG: fails if normal text preceeds the literal block.
self.body.append('\\begin{quote}')
self.body.append('{\\ttfamily \\raggedright \\noindent\n')
# * obey..: is from julien and never worked for me (grubert).
# self.body.append('{\\obeylines\\obeyspaces\\ttfamily\n')
def depart_literal_block(self, node):
if self.verbatim:
self.body.append('\n\\end{verbatim}\\end{quote}\n')
self.verbatim = 0
else:
if self.active_table.is_open():
self.body.append('\n}\n')
else:
self.body.append('\n')
self.body.append('}\\end{quote}\n')
self.insert_none_breaking_blanks = 0
self.literal_block = 0
# obey end: self.body.append('}\n')
def visit_meta(self, node):
self.body.append('[visit_meta]\n')
# BUG maybe set keywords for pdf
##self.head.append(self.starttag(node, 'meta', **node.attributes))
def depart_meta(self, node):
self.body.append('[depart_meta]\n')
def visit_note(self, node):
self.visit_admonition(node, 'note')
def depart_note(self, node):
self.depart_admonition()
def visit_option(self, node):
if self.context[-1]:
# this is not the first option
self.body.append(', ')
def depart_option(self, node):
# flag tha the first option is done.
self.context[-1] += 1
def visit_option_argument(self, node):
"""The delimiter betweeen an option and its argument."""
self.body.append(node.get('delimiter', ' '))
def depart_option_argument(self, node):
pass
def visit_option_group(self, node):
if self.use_optionlist_for_option_list:
self.body.append('\\item [')
else:
if len(node.astext()) > 14:
self.body.append('\\multicolumn{2}{l}{')
self.context.append('} \\\\\n ')
else:
self.context.append('')
self.body.append('\\texttt{')
# flag for first option
self.context.append(0)
def depart_option_group(self, node):
self.context.pop() # the flag
if self.use_optionlist_for_option_list:
self.body.append('] ')
else:
self.body.append('}')
self.body.append(self.context.pop())
def visit_option_list(self, node):
self.body.append('% [option list]\n')
if self.use_optionlist_for_option_list:
self.body.append('\\begin{optionlist}{3cm}\n')
else:
self.body.append('\\begin{center}\n')
# BUG: use admwidth or make it relative to textwidth ?
self.body.append('\\begin{tabularx}{.9\\linewidth}{lX}\n')
def depart_option_list(self, node):
if self.use_optionlist_for_option_list:
self.body.append('\\end{optionlist}\n')
else:
self.body.append('\\end{tabularx}\n')
self.body.append('\\end{center}\n')
def visit_option_list_item(self, node):
pass
def depart_option_list_item(self, node):
if not self.use_optionlist_for_option_list:
self.body.append('\\\\\n')
def visit_option_string(self, node):
##self.body.append(self.starttag(node, 'span', '', CLASS='option'))
pass
def depart_option_string(self, node):
##self.body.append('</span>')
pass
def visit_organization(self, node):
self.visit_docinfo_item(node, 'organization')
def depart_organization(self, node):
self.depart_docinfo_item(node)
def visit_paragraph(self, node):
index = node.parent.index(node)
if not ('contents' in self.topic_classes or
(isinstance(node.parent, nodes.compound) and
index > 0 and
not isinstance(node.parent[index - 1], nodes.paragraph) and
not isinstance(node.parent[index - 1], nodes.compound))):
self.body.append('\n')
def depart_paragraph(self, node):
self.body.append('\n')
def visit_problematic(self, node):
self.body.append('{\\color{red}\\bfseries{}')
def depart_problematic(self, node):
self.body.append('}')
def visit_raw(self, node):
if 'latex' in node.get('format', '').split():
self.body.append(node.astext())
raise nodes.SkipNode
def visit_reference(self, node):
# BUG: hash_char "#" is trouble some in LaTeX.
# mbox and other environment do not like the '#'.
hash_char = '\\#'
if node.has_key('refuri'):
href = node['refuri'].replace('#',hash_char)
elif node.has_key('refid'):
href = hash_char + node['refid']
elif node.has_key('refname'):
href = hash_char + self.document.nameids[node['refname']]
else:
raise AssertionError('Unknown reference.')
self.body.append('\\href{%s}{' % href)
def depart_reference(self, node):
self.body.append('}')
def visit_revision(self, node):
self.visit_docinfo_item(node, 'revision')
def depart_revision(self, node):
self.depart_docinfo_item(node)
def visit_section(self, node):
self.section_level += 1
# Initialize counter for potential subsections:
self._section_number.append(0)
# Counter for this section's level (initialized by parent section):
self._section_number[self.section_level - 1] += 1
def depart_section(self, node):
# Remove counter for potential subsections:
self._section_number.pop()
self.section_level -= 1
def visit_sidebar(self, node):
# BUG: this is just a hack to make sidebars render something
self.body.append('\n\\setlength{\\locallinewidth}{0.9\\admonitionwidth}\n')
self.body.append('\\begin{center}\\begin{sffamily}\n')
self.body.append('\\fbox{\\colorbox[gray]{0.80}{\\parbox{\\admonitionwidth}{\n')
def depart_sidebar(self, node):
self.body.append('}}}\n') # end parbox colorbox fbox
self.body.append('\\end{sffamily}\n\\end{center}\n');
self.body.append('\n\\setlength{\\locallinewidth}{\\linewidth}\n')
attribution_formats = {'dash': ('---', ''),
'parentheses': ('(', ')'),
'parens': ('(', ')'),
'none': ('', '')}
def visit_attribution(self, node):
prefix, suffix = self.attribution_formats[self.settings.attribution]
self.body.append('\n\\begin{flushright}\n')
self.body.append(prefix)
self.context.append(suffix)
def depart_attribution(self, node):
self.body.append(self.context.pop() + '\n')
self.body.append('\\end{flushright}\n')
def visit_status(self, node):
self.visit_docinfo_item(node, 'status')
def depart_status(self, node):
self.depart_docinfo_item(node)
def visit_strong(self, node):
self.body.append('\\textbf{')
self.literal_block_stack.append('\\textbf{')
def depart_strong(self, node):
self.body.append('}')
self.literal_block_stack.pop()
def visit_substitution_definition(self, node):
raise nodes.SkipNode
def visit_substitution_reference(self, node):
self.unimplemented_visit(node)
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.sidebar):
self.body.append('~\\\\\n\\textbf{')
self.context.append('}\n\\smallskip\n')
elif isinstance(node.parent, nodes.document):
self.title = self.title + \
'\\\\\n\\large{%s}\n' % self.encode(node.astext())
raise nodes.SkipNode
elif isinstance(node.parent, nodes.section):
self.body.append('\\textbf{')
self.context.append('}\\vspace{0.2cm}\n\n\\noindent ')
def depart_subtitle(self, node):
self.body.append(self.context.pop())
def visit_system_message(self, node):
pass
def depart_system_message(self, node):
self.body.append('\n')
def visit_table(self, node):
if self.active_table.is_open():
print 'nested tables are not supported'
raise AssertionError
self.active_table.open()
self.body.append('\n' + self.active_table.get_opening())
def depart_table(self, node):
self.body.append(self.active_table.get_closing() + '\n')
self.active_table.close()
def visit_target(self, node):
# BUG: why not (refuri or refid or refname) means not footnote ?
if not (node.has_key('refuri') or node.has_key('refid')
or node.has_key('refname')):
for id in node['ids']:
self.body.append('\\hypertarget{%s}{' % id)
self.context.append('}' * len(node['ids']))
else:
self.context.append('')
def depart_target(self, node):
self.body.append(self.context.pop())
def visit_tbody(self, node):
# BUG write preamble if not yet done (colspecs not [])
# for tables without heads.
if not self.active_table.get('preamble written'):
self.visit_thead(None)
# self.depart_thead(None)
def depart_tbody(self, node):
pass
def visit_term(self, node):
self.body.append('\\item[{')
def depart_term(self, node):
# definition list term.
self.body.append('}] ')
def visit_tgroup(self, node):
#self.body.append(self.starttag(node, 'colgroup'))
#self.context.append('</colgroup>\n')
pass
def depart_tgroup(self, node):
pass
def visit_thead(self, node):
self.body.append('{%s}\n' % self.active_table.get_colspecs())
if self.active_table.caption:
self.body.append('\\caption{%s}\\\\\n' % self.active_table.caption)
self.active_table.set('preamble written',1)
# TODO longtable supports firsthead and lastfoot too.
self.body.extend(self.active_table.visit_thead())
def depart_thead(self, node):
# the table header written should be on every page
# => \endhead
self.body.extend(self.active_table.depart_thead())
# and the firsthead => \endfirsthead
# BUG i want a "continued from previous page" on every not
# firsthead, but then we need the header twice.
#
# there is a \endfoot and \endlastfoot too.
# but we need the number of columns to
# self.body.append('\\multicolumn{%d}{c}{"..."}\n' % number_of_columns)
# self.body.append('\\hline\n\\endfoot\n')
# self.body.append('\\hline\n')
# self.body.append('\\endlastfoot\n')
def visit_tip(self, node):
self.visit_admonition(node, 'tip')
def depart_tip(self, node):
self.depart_admonition()
def bookmark(self, node):
"""Append latex href and pdfbookmarks for titles.
"""
if node.parent['ids']:
for id in node.parent['ids']:
self.body.append('\\hypertarget{%s}{}\n' % id)
if not self.use_latex_toc:
# BUG level depends on style. pdflatex allows level 0 to 3
# ToC would be the only on level 0 so i choose to decrement the rest.
# "Table of contents" bookmark to see the ToC. To avoid this
# we set all zeroes to one.
l = self.section_level
if l>0:
l = l-1
# pdftex does not like "_" subscripts in titles
text = self.encode(node.astext())
for id in node.parent['ids']:
self.body.append('\\pdfbookmark[%d]{%s}{%s}\n' % \
(l, text, id))
def visit_title(self, node):
"""Only 3 section levels are supported by LaTeX article (AFAIR)."""
if isinstance(node.parent, nodes.topic):
# section titles before the table of contents.
self.bookmark(node)
# BUG: latex chokes on center environment with "perhaps a missing item".
# so we use hfill.
self.body.append('\\subsubsection*{~\\hfill ')
# the closing brace for subsection.
self.context.append('\\hfill ~}\n')
# TODO: for admonition titles before the first section
# either specify every possible node or ... ?
elif isinstance(node.parent, nodes.sidebar) \
or isinstance(node.parent, nodes.admonition):
self.body.append('\\textbf{\\large ')
self.context.append('}\n\\smallskip\n')
elif isinstance(node.parent, nodes.table):
# caption must be written after column spec
self.active_table.caption = self.encode(node.astext())
raise nodes.SkipNode
elif self.section_level == 0:
# document title
self.title = self.encode(node.astext())
if not self.pdfinfo == None:
self.pdfinfo.append( 'pdftitle={%s}' % self.encode(node.astext()) )
raise nodes.SkipNode
else:
self.body.append('\n\n')
self.body.append('%' + '_' * 75)
self.body.append('\n\n')
self.bookmark(node)
if self.use_latex_toc:
section_star = ""
else:
section_star = "*"
section_name = self.d_class.section(self.section_level)
self.body.append('\\%s%s{' % (section_name, section_star))
self.context.append('}\n')
def depart_title(self, node):
self.body.append(self.context.pop())
def visit_topic(self, node):
self.topic_classes = node['classes']
if 'contents' in node['classes'] and self.use_latex_toc:
self.body.append('\\tableofcontents\n\n\\bigskip\n')
self.topic_classes = []
raise nodes.SkipNode
def visit_inline(self, node): # titlereference
self.body.append( '\\docutilsrole%s{' % node.get('class'))
def depart_inline(self, node):
self.body.append( '}' )
def depart_topic(self, node):
self.topic_classes = []
self.body.append('\n')
def visit_rubric(self, node):
self.body.append('\\rubric{')
self.context.append('}\n')
def depart_rubric(self, node):
self.body.append(self.context.pop())
def visit_transition(self, node):
self.body.append('\n\n')
self.body.append('%' + '_' * 75)
self.body.append('\n\\hspace*{\\fill}\\hrulefill\\hspace*{\\fill}')
self.body.append('\n\n')
def depart_transition(self, node):
pass
def visit_version(self, node):
self.visit_docinfo_item(node, 'version')
def depart_version(self, node):
self.depart_docinfo_item(node)
def visit_warning(self, node):
self.visit_admonition(node, 'warning')
def depart_warning(self, node):
self.depart_admonition()
def unimplemented_visit(self, node):
raise NotImplementedError('visiting unimplemented node type: %s'
% node.__class__.__name__)
# def unknown_visit(self, node):
# def default_visit(self, node):
# vim: set ts=4 et ai :
|
baoboa/Crystal-Space
|
docs/support/docutils/writers/latex2e.py
|
Python
|
lgpl-2.1
| 75,964
|
[
"VisIt"
] |
70c025f691c8bf8a99c346ba32ad346ccf5e8198aef28c51e77204e29f874432
|
#!/usr/bin/env python
##
## See COPYING file distributed along with the ncanda-data-integration package
## for the copyright and license terms
##
"""
ncanda_quality_control_script
======================
This script checks the quality of the data for the NCANDA Project on REDCap.
Call script on command line.
Example Usage:
python ncanda_quality_control_script.py -v "baseline_visit_arm_1"
"""
import os
import sys
import json
import datetime
import csv
import redcap
import math
import pandas as pd
import sibis
fields = ['study_id', 'redcap_event_name','exclude', 'visit_ignore',
'visit_date', 'dob', 'cnp_test_sessions_dob','saliva_missing',
'saliva_1_collected','saliva_1_date','saliva_2_collected','saliva_2_date',
'saliva_3_collected','saliva_3_date','saliva_4_collected',
'saliva_4_date','youthreport1_missing','youthreport1_date',
'youthreport1b_missing', 'youthreport1b_date','youthreport2_missing',
'youthreport2_date','youthreport2_yid2', 'youthreport1_yid2',
'parentreport_missing','parentreport_date','ssage_youth_missing',
'ssage_youth_date', 'lssaga1_youth_missing','lssaga1_youth_date',
'lssaga1_parent_missing','lssaga1_parent_date','bio_np_missing',
'bio_np_date','dd1000_missing','dd1000_date','dd100_missing',
'dd100_date','np_wrat4_missing','np_wrat4_wr_raw','np_gpeg_missing',
'np_gpeg_exclusion','np_gpeg_dh_time','np_gpeg_ndh_time',
'np_reyo_missing','np_reyo_copy_time','np_reyo_qc(completed)',
'np_atax_missing','np_atax_sht_trial1','np_wais4_missing',
'np_wais4_rawscore','np_wais4_rawscore_computed',
'np_wais4_rawscore_diff(correct)','pasat_missing','pasat_date',
'cnp_missing','cnp_test_sessions_dotest','stroop_missing',
'stroop_date','mrireport_missing','mrireport_date',
'mr_session_report_complete']
form_fields = [['youthreport1_missing','youthreport1_date'],
['youthreport1b_missing', 'youthreport1b_date'],
['youthreport2_missing', 'youthreport2_date'],
['parentreport_missing','parentreport_date'],
['ssage_youth_missing','ssage_youth_date'],
['lssaga1_youth_missing','lssaga1_youth_date'],
['lssaga1_parent_missing','lssaga1_parent_date'],
['bio_np_missing', 'bio_np_date'],
['dd1000_missing','dd1000_date'],
['dd100_missing','dd100_date'],
['np_wrat4_missing','np_wrat4_wr_raw'],
['np_reyo_missing','np_reyo_copy_time'],
['np_atax_missing','np_atax_sht_trial1'],
['np_wais4_missing', 'np_wais4_rawscore'],
['pasat_missing','pasat_date'],
['cnp_missing','cnp_test_sessions_dotest'],
['stroop_missing','stroop_date']]
np_gpeg_fields = [['np_gpeg_exclusion___dh','np_gpeg_dh_time'],
['np_gpeg_exclusion___ndh','np_gpeg_ndh_time']]
saliva_fields = [['saliva_1_collected','saliva_1_date'],
['saliva_2_collected','saliva_2_date'],['saliva_3_collected',
'saliva_3_date'],['saliva_4_collected','saliva_4_date']]
fields_sex = [['youthreport1_missing','youthreport1_yid2'],
['youthreport2_missing','youthreport2_yid2']]
def get_project_entry(args=None):
"""
Pulls the data from REDCap
"""
# Get API key.
summary_key_file = open(os.path.join(os.path.expanduser("~"),
'.server_config',
'redcap-dataentry-token'), 'r')
summary_api_key = summary_key_file.read().strip()
# Connect to API.
project_entry = redcap.Project('https://ncanda.sri.com/redcap/api/',
summary_api_key, verify_ssl=False)
return project_entry
def data_entry_fields(fields,project,arm):
"""
Gets the dataframe containing a specific arm from REDCap
"""
# Get a dataframe of fields
data_entry_raw = project.export_records(fields=fields, format='df',
events=arm)
return data_entry_raw
def check(check, error):
if check:
error.append(check)
def missing_form(idx,row,field_missing, field_value):
"""
Generates a report indicating which Forms have not been entered onto redcap
"""
error = dict()
#exclude with a value of 1 is excluded
if math.isnan(row.get('exclude')):
# visit_ignore____yes with value 1 is ignored
if row.get('visit_ignore___yes') != 1:
# form is not missing if form_missing if value nan or zero
if row.get(field_missing) != 1:
# for form_date, date is stored as a string
if type(row.get(field_value)) == float:
if math.isnan(row.get(field_value)):
error = dict(subject_site_id = idx[0],
visit_date = row.get('visit_date'),
form_missing = field_missing,
event_name = idx[1],
error = 'ERROR: Form is missing')
return error
def np_groove_check(idx,row,field_missing, field_excluded, field_value):
"""
Checks to see if the Grooveboard NP is missing
"""
# visit_ignore____yes with value 0 is not ignored
error = dict()
if math.isnan(row.get('exclude')):
if row.get('visit_ignore___yes') == 0:
# np is not missing if field_missing if value nan or zero
if row.get(field_excluded) == 0:
# np is not excluded if field_missing if value nan or zero
if row.get(field_missing) == 0 or math.isnan(row.get(field_missing)):
# for np_date, date is stored as a string
if type(row.get(field_value)) == float:
# If field is left blank, a NaN is put in it's place
if math.isnan(row.get(field_value)):
error = dict(subject_site_id = idx[0],
visit_date = row.get('visit_date'),
np_missing = field_missing,
event_name = idx[1],
error = 'ERROR: NP is missing.'
)
return error
def fourteen_days_mri_report(idx,row):
"""
Generates a report indicating which MRI reports have no data after 14 days.
"""
error = dict()
#exclude with a value of 1 is excluded
if math.isnan(row.get('exclude')):
# visit_ignore____yes with value 1 is ignored
if row.get('visit_ignore___yes') != 1:
if row.get('mrireport_missing') != 1:
if type(row.get('mrireport_missing')) == str:
if datetime.datetime.strptime(row.get('mrireport_date'),'%Y-%m-%d') == datetime.date.today()-datetime.timedelta(days = 14):
error = dict(subject_site_id = idx[0],
visit_date = row.get('visit_date'),
event_name = idx[1],
error = 'ERROR: No MRI data after 14 days')
return error
def cnp_dob(idx,row):
"""
Checks to see if dob and cnp_test_sessions_dob match
"""
# visit_ignore____yes with value 0 is not ignored
error = dict()
if math.isnan(row.get('exclude')):
if row.get('visit_ignore___yes') == 0:
if row.get('dob') == row.get('cnp_test_sessions_dob'):
error = dict(subject_site_id = idx[0],
visit_date = row.get('visit_date'),
event_name = idx[1],
error = 'ERROR: DOB and CNP_TEST_SESSIONS_DOB do not match.'
)
return error
def missing_mri_stroop(idx,row):
"""
Generate a report indicating which MRI Stroop have not been entered.
"""
# visit_ignore____yes with value 0 is not ignored
error = dict()
if math.isnan(row.get('exclude')):
if row.get('visit_ignore___yes') != 1:
# MRI Report is not missing if form_missing if value nan or zero
if row.get('mri_missing') != 1:
if row.get('redcap_data_access_group') == 'SRI' or row.get('redcap_data_access_group') == 'UCSD':
if row.get('mri_stroop_missing') == 0:
# for mri_stroop_date, date is stored as a string, if blank, defaults to NaN
if type(row.get('mri_stroop_date')) == float:
error = dict(subject_site_id = idx[0],
xnat_sid = row.get('mri_xnat_sid'),
visit_date = row.get('visit_date'),
event_name = idx[1],
error = 'ERROR: MRI Stroop is missing'
)
return error
def missing_saliva_sample(idx,row,saliva_collected, saliva_date):
"""
Generate a report indicating which Saliva Samples have not been entered.
"""
# visit_ignore____yes with value 0 is not ignored
error = dict()
if math.isnan(row.get('exclude')):
if row.get('visit_ignore___yes') != 1:
# saliva_sample is not missing if saliva_sample_missing if value zero
if row.get('saliva_missing') != 1:
if row.get(saliva_collected) == 1:
# for form_date, date is stored as a string
if type(row.get(saliva_date)) == float:
error = dict(subject_site_id = idx[0],
visit_date = row.get('visit_date'),
event_name = idx[1],
sample_missing = saliva_collected,
visit_notes = row.get('visit_notes'),
error = 'ERROR: Saliva Sample is missing'
)
return error
def visit_data_missing(idx,row):
"""
Generate a report indicating which Visit Dates are missing.
"""
error = dict()
if row.get('exclude') != 1:
if row.get('visit_ignore___yes') != 1:
if type(row.get('visit_date')) != str:
error = dict(subject_site_id = idx[0],
event_name = idx[1],
error = 'ERROR: Visit date missing.'
)
return error
def wais_score_verification(idx,row):
"""
Verifies whether the wais_rawscore was computed correctly.
"""
# visit_ignore____yes with value 0 is not ignored
error = dict()
if math.isnan(row.get('exclude')):
if row.get('visit_ignore___yes') != 1:
# form is not missing if form_missing if value nan or zero
if row.get('np_wais4_missing') != 1:
if row.get('np_wais4_rawscore_computed') == row.get('np_wais4_rawscore_diff(correct)'):
if row.get('np_wais4_rawscore_diff(correct)') != 0:
error = dict(subject_site_id = idx[0],
visit_date = row.get('visit_date'),
event_name = idx[1],
error = 'ERROR: WAIS score is not verified'
)
return error
def youth_report_sex(idx,row, field_missing, field_sex):
"""
Checks whether or not sex was entered correctly in the Youth Report
"""
# visit_ignore____yes with value 0 is not ignored
error = dict()
if math.isnan(row.get('exclude')):
if row.get('visit_ignore___yes') != 1:
# np is not missing if field_missing if value nan or zero
if row.get(field_missing) != 1:
if row.get('sex') != row.get(field_sex):
error = dict(subject_site_id = idx[0],
visit_date = row.get('visit_date'),
event_name = idx[1],
field = field_sex,
error = 'ERROR: SEX and SEX in YOUTHREPORT do not match.'
)
return error
def main(args):
project_entry = get_project_entry()
project_df = data_entry_fields(fields,project_entry,args.visit)
error = []
for idx, row in project_df.iterrows():
for f in form_fields:
check(missing_form(idx,row,f[0],f[1]),error)
for np in np_gpeg_fields:
check(np_groove_check(idx,row,'np_gpeg_missing',np[0],np[1]),error)
check(fourteen_days_mri_report(idx,row),error)
check(cnp_dob(idx, row),error)
check(missing_mri_stroop(idx, row),error)
for s in saliva_fields:
check(missing_saliva_sample(idx,row,s[0],s[1]),error)
check(visit_data_missing(idx,row),error)
check(wais_score_verification(idx,row),error)
for f in fields_sex:
check(youth_report_sex(idx,row,f[0],f[1]),error)
if args.csvdir:
for e in error:
if e == 'null':
error.remove(e)
with open(args.csvdir, 'wb+') as fi:
f = csv.writer(fi)
f.writerow(["subject_site_id", "visit_date", "event_name", "error"])
for x in error:
f.writerow([x["subject_site_id"],
x["visit_date"],
x["event_name"],
x["error"]])
else:
for e in error:
if e != 'null':
#print json.dumps(e, sort_keys=True)
#print "{}-{}".format(e['subject_site_id'], e['visit_date']), e['error'],e
sibis.logging("{}-{}".format(e['subject_site_id'], e['visit_date']), e['error'],e_dictionary=e)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v','--visit', default=['baseline_visit_arm_1', '1y_visit_arm_1'],
help='Select which visit the QC script runs on',)
parser.add_argument( "-c","--csvdir", action="store",
help="Directory where CSV will be stored.")
argv = parser.parse_args()
sys.exit(main(args=argv))
|
sibis-platform/ncanda-data-integration
|
scripts/reporting/ncanda_quality_control_script.py
|
Python
|
bsd-3-clause
| 14,366
|
[
"VisIt"
] |
62a3534360b32b301d90693dfc7129136556c2d6ab33025d35044a58adf3790f
|
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
sample, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
"@SAMPLES_DIR@/dpd.py", sample_size=100, int_steps=100)
@skipIfMissingFeatures
class Sample(ut.TestCase):
system = sample.system
def test_final_pressure(self):
# test simulated value to +/- 10% accuracy
self.assertLess(abs(sample.p_avg - 0.23), 0.02)
self.assertLess(abs(sample.p_std - 0.01), 0.01)
if __name__ == "__main__":
ut.main()
|
espressomd/espresso
|
testsuite/scripts/samples/test_dpd.py
|
Python
|
gpl-3.0
| 1,207
|
[
"ESPResSo"
] |
871872ce131487534a74fd5d0f224537d9531d064c312999d21350c368be8b74
|
'''
This script performs a grid search for the current parameters that best match the data
from Hausser and Clark (1997)
'''
import datetime
import os
import gc
import multiprocessing
import itertools
from brian import *
import sys
sys.path.append('../../')
from MLI_PKJ_net import *
import cPickle
import time
set_global_preferences(useweave=True, usenewpropagate=True, usecodegen=True, usecodegenweave=True)
defaultclock.dt = .25*ms
def isi_mean_and_std(monitor):
'''
compute the mean and variance of interspike intervals
of a group of neurons
'''
isi = []
for n_ind, times in monitor.spiketimes.iteritems():
isi += list(diff(times)*1000)
return mean(isi), var(isi)**.5
def run_net((k,theta)):
seed(os.getpid())
print os.getpid()
reinit()
reinit_default_clock()
clear(True)
gc.collect()
T = 6000
N_MLI = 1
MLI = MLIGroup(N_MLI)
@network_operation(Clock(dt=defaultclock.dt))
def random_current():
MLI.I = gamma(k,theta,size=len(MLI)) * nA
# Monitor
MS_MLI = SpikeMonitor(MLI)
MR_MLI = PopulationRateMonitor(MLI,bin=1*ms)
MISI_MLI = ISIHistogramMonitor(MLI,bins=arange(0,162,2)*ms)
start = time.time()
run(T*msecond)
print time.time() - start
mli_mew, mli_std = isi_mean_and_std(MS_MLI)
return k,theta,mean(MR_MLI.rate), mli_std/mli_mew
if __name__ == "__main__":
pool = multiprocessing.Pool(6)
params = []
for k in linspace(.1,11,500):
for theta in linspace(.0001,.05,100):
if k*theta < .03 and k*theta > .015:
params.append((k,theta))
print len(params)
results = pool.map(run_net, params)
out_dir = out_dir = '~/data/neuron_models/molecular_layer/mli_gamma_current_param_sweep/%s/'%datetime.datetime.now().isoformat()
os.makedirs(out_dir)
with open(out_dir+'results.txt','w') as outf:
outf.write('\t'.join(['k','theta','mli_mean_firing_rate','mli_cv'])+'\n')
for r in results:
outf.write('\t'.join(map(str,r))+'\n')
|
blennon/MLI_PKJ_net
|
MLI_PKJ_net/parameter_search/MLI_gamma_current_param_sweep.py
|
Python
|
mit
| 2,101
|
[
"Brian"
] |
2a33f0b2509508ccecbb0c8b002a630f72cf2d66942bf70ce5ba95baaf507736
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import datetime
import re
import urllib
import urlparse
import kodi
import log_utils # @UnusedImport
import dom_parser
from salts_lib.utils2 import i18n
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import SHORT_MONS
from salts_lib.constants import VIDEO_TYPES
import scraper
BASE_URL = 'http://2ddl.io'
CATEGORIES = {VIDEO_TYPES.MOVIE: '/category/movies/', VIDEO_TYPES.TVSHOW: '/category/tv-shows/'}
EXCLUDE_LINKS = ['adf.ly', urlparse.urlparse(BASE_URL).hostname]
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE, VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return '2DDL'
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, require_debrid=True, cache_limit=.5)
if video.video_type == VIDEO_TYPES.MOVIE:
pattern = '<singlelink>(.*?)(?=<hr\s*/>|download>|thanks_button_div)'
else:
pattern = '<hr\s*/>\s*<strong>(.*?)</strong>.*?<singlelink>(.*?)(?=<hr\s*/>|download>|thanks_button_div)'
for match in re.finditer(pattern, html, re.DOTALL):
if video.video_type == VIDEO_TYPES.MOVIE:
links = match.group(1)
match = re.search('<h2>\s*<a[^>]+>(.*?)</a>', html)
if match:
title = match.group(1)
else:
title = ''
else:
title, links = match.groups()
for match in re.finditer('href="([^"]+)', links):
stream_url = match.group(1).lower()
if any(link in stream_url for link in EXCLUDE_LINKS): continue
host = urlparse.urlparse(stream_url).hostname
quality = scraper_utils.blog_get_quality(video, title, host)
hoster = {'multi-part': False, 'host': host, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'quality': quality, 'direct': False}
hosters.append(hoster)
return hosters
@classmethod
def get_settings(cls):
settings = super(cls, cls).get_settings()
settings = scraper_utils.disable_sub_check(settings)
name = cls.get_name()
settings.append(' <setting id="%s-filter" type="slider" range="0,180" option="int" label=" %s" default="60" visible="eq(-4,true)"/>' % (name, i18n('filter_results_days')))
return settings
def _get_episode_url(self, show_url, video):
force_title = scraper_utils.force_title(video)
title_fallback = kodi.get_setting('title-fallback') == 'true'
norm_title = scraper_utils.normalize_title(video.ep_title)
page_url = [show_url]
too_old = False
while page_url and not too_old:
url = urlparse.urljoin(self.base_url, page_url[0])
html = self._http_get(url, require_debrid=True, cache_limit=1)
posts = dom_parser.parse_dom(html, 'div', {'id': 'post-\d+'})
for post in posts:
if self.__too_old(post):
too_old = True
break
if CATEGORIES[VIDEO_TYPES.TVSHOW] in post and show_url in post:
match = re.search('<a\s+href="([^"]+)[^>]+>(.*?)</a>', post)
if match:
url, title = match.groups()
if not force_title:
if scraper_utils.release_check(video, title, require_title=False):
return scraper_utils.pathify_url(url)
else:
if title_fallback and norm_title:
match = re.search('</strong>(.*?)</p>', post)
if match and norm_title == scraper_utils.normalize_title(match.group(1)):
return scraper_utils.pathify_url(url)
page_url = dom_parser.parse_dom(html, 'a', {'class': 'nextpostslink'}, ret='href')
def search(self, video_type, title, year, season=''): # @UnusedVariable
results = []
search_url = urlparse.urljoin(self.base_url, '/search/')
search_url += urllib.quote_plus(title)
html = self._http_get(search_url, require_debrid=True, cache_limit=1)
if video_type == VIDEO_TYPES.TVSHOW:
seen_urls = {}
for post in dom_parser.parse_dom(html, 'div', {'id': 'post-\d+'}):
if CATEGORIES[video_type] in post:
match = re.search('<span>\s*TAGS:\s*</span>\s*<a\s+href="([^"]+)[^>]+>([^<]+)', post, re.I)
if match:
show_url, match_title = match.groups()
if show_url not in seen_urls:
result = {'url': scraper_utils.pathify_url(show_url), 'title': scraper_utils.cleanse_title(match_title), 'year': ''}
seen_urls[show_url] = result
results.append(result)
elif video_type == VIDEO_TYPES.MOVIE:
headings = re.findall('<h2>\s*<a\s+href="([^"]+)[^>]+>(.*?)</a>', html)
posts = dom_parser.parse_dom(html, 'div', {'id': 'post-\d+'})
norm_title = scraper_utils.normalize_title(title)
for heading, post in zip(headings, posts):
if CATEGORIES[video_type] in post and not self.__too_old(post):
post_url, post_title = heading
match = re.search('(.*?)\s*[.\[(]?(\d{4})[.)\]]?\s*(.*)', post_title)
if match:
match_title, match_year, extra_title = match.groups()
full_title = '%s [%s]' % (match_title, extra_title)
else:
full_title = match_title = post_title
match_year = ''
match_norm_title = scraper_utils.normalize_title(match_title)
if (match_norm_title in norm_title or norm_title in match_norm_title) and (not year or not match_year or year == match_year):
result = {'url': scraper_utils.pathify_url(post_url), 'title': scraper_utils.cleanse_title(full_title), 'year': match_year}
results.append(result)
return results
def __too_old(self, post):
filter_days = datetime.timedelta(days=int(kodi.get_setting('%s-filter' % (self.get_name()))))
if filter_days:
today = datetime.date.today()
match = re.search('<a[^>]+title="posting time[^"]*">(.*?)\s+(\d+)\s*(\d{2,4})<', post)
if match:
try:
mon_name, post_day, post_year = match.groups()
post_year = int(post_year)
if post_year < 2000:
post_year += 2000
post_month = SHORT_MONS.index(mon_name) + 1
post_date = datetime.date(post_year, post_month, int(post_day))
if today - post_date > filter_days:
return True
except ValueError:
return False
return False
|
JamesLinEngineer/RKMC
|
addons/plugin.video.salts/scrapers/2ddl_scraper.py
|
Python
|
gpl-2.0
| 8,509
|
[
"ADF"
] |
b57d1a58c8309e74cf46d7d7696453ee63a0d9bf1b84a9e64fd5cc8ad1538e33
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2014 Siyuan Ren (netheril96@gmail.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import unicode_literals
from __future__ import print_function
import re
import argparse
import os
import hashlib
import sys
import io
from numbers import Number
# Python 2/3 compatibility layer
is_python2 = sys.version_info.major == 2
if is_python2:
str = unicode
# simplejson has the same interface as the standard json module, but with better error messages
try:
import simplejson as json
except ImportError:
import json
# parsimonious is required for parsing C++ type name
try:
import parsimonious
except ImportError:
parsimonious = None
# base class for all custom exceptions in this unit
class InvalidDefinitionError(Exception):
pass
class InvalidIdentifier(InvalidDefinitionError):
def __init__(self, identifier):
self.identifier = identifier
def __str__(self):
return "Invalid string for C++ identifier: " + repr(self.identifier)
class InvalidNamespace(InvalidDefinitionError):
def __init__(self, namespace):
self.namespace = namespace
def __str__(self):
return "Invalid namespace: " + repr(self.namespace)
class UnrecognizedOption(InvalidDefinitionError):
def __init__(self, option):
self.option = option
def __str__(self):
return "Unrecognized option: " + repr(self.option)
class UnsupportedTypeError(InvalidDefinitionError):
def __init__(self, type_name):
self.type_name = type_name
def __str__(self):
return "Unsupported C++ type: " + repr(self.type_name)
# convert arbitrary byte sequence into a C++ string literal by escaping every character
if is_python2:
def cstring_literal(byte_string):
return '"' + ''.join('\\x{:02x}'.format(ord(char)) for char in byte_string) + '"'
else:
def cstring_literal(byte_string):
return '"' + ''.join('\\x{:02x}'.format(char) for char in byte_string) + '"'
def check_identifier(identifier):
if not re.match(r'^[A-Za-z_]\w*$', identifier):
raise InvalidIdentifier(identifier)
class ClassInfo(object):
accept_options = {"name", "namespace", "parse_mode", "members", "constructor_code", "comment", "no_duplicates"}
def __init__(self, record):
self._name = record['name']
self._members = [MemberInfo(r) for r in record['members']]
self._strict = record.get('parse_mode', '') == 'strict'
self._namespace = record.get("namespace", None)
self._constructor_code = record.get("constructor_code", "")
self._no_duplicates = record.get("no_duplicates", False)
check_identifier(self._name)
if self._namespace is not None and not re.match(r'^(?:::)?[A-Za-z_]\w*(?:::[A-Za-z_]\w*)*$', self._namespace):
raise InvalidNamespace(self._namespace)
for op in record:
if op not in ClassInfo.accept_options:
raise UnrecognizedOption(op)
@property
def name(self):
return self._name
@property
def qualified_name(self):
if self.namespace is None:
return '::' + self.name
if self.namespace.startswith('::'):
return self.namespace + '::' + self.name
return '::' + self.namespace + '::' + self.name
@property
def members(self):
return self._members
@property
def strict_parsing(self):
return self._strict
@property
def namespace(self):
return self._namespace
@property
def constructor_code(self):
return self._constructor_code
@property
def no_duplicates(self):
return self._no_duplicates
class ClassDefinitionCodeGenerator(object):
def __init__(self, class_info):
self._class_info = class_info
@property
def class_info(self):
return self._class_info
def member_declarations(self):
return '\n'.join(m.type_name + ' ' + m.variable_name + ';' for m in self.class_info.members)
def initializer_list(self):
return ', '.join('{0}({1})'.format(m.variable_name, m.constructor_args) for m in self.class_info.members)
def constructor(self):
return 'explicit {name}():{init} {{ {code} }}\n'.format(name=self.class_info.name,
init=self.initializer_list(),
code=self.class_info.constructor_code)
def class_definition(self):
class_def = 'struct {name} {{\n {declarations}\n\n{constructor}\n\n \n}};' \
.format(name=self.class_info.name, declarations=self.member_declarations(),
constructor=self.constructor())
if self.class_info.namespace is not None:
for space in reversed(self.class_info.namespace.split('::')):
if space:
class_def = 'namespace {} {{ {} }}\n'.format(space, class_def)
return class_def
class MemberInfo(object):
accept_options = {'default', 'required', 'json_key', 'comment', 'minLength', 'maxLength', 'pattern', 'format', 'enum', 'oneOf','containerTypeProperty', 'minimum', 'maximum', 'minItems', 'maxItems', 'uniqueItems'}
def __init__(self, record):
self._record = record
if '*' in self.type_name or '&' in self.type_name:
raise UnsupportedTypeError(self.type_name)
check_identifier(self.variable_name)
if len(record) > 3:
raise UnrecognizedOption(record[3:])
@property
def type_name(self):
return self._record[0]
@property
def variable_name(self):
return self._record[1]
@property
def json_key(self):
try:
return self._record[2]['json_key'].encode('utf-8')
except (IndexError, KeyError):
return self.variable_name.encode('utf-8')
@property
def is_required(self):
return self._record[2]['required']
@property
def default(self):
try:
return self._record[2]['default']
except (IndexError, KeyError):
return None
@property
def constructor_args(self):
return MemberInfo.cpp_repr(self.default)
@staticmethod
def cpp_repr(args):
if args is None:
return ''
elif args is True:
return 'true'
elif args is False:
return 'false'
elif isinstance(args, str):
return cstring_literal(args.encode('utf-8'))
elif isinstance(args, Number):
return str(args)
elif isinstance(args, bytes):
return cstring_literal(args)
else:
raise UnrecognizedOption("default=" + repr(args))
class HelperClassCodeGenerator(object):
def __init__(self, class_info):
self._class_info = class_info
@property
def class_info(self):
return self._class_info
@property
def members_info(self):
return self._class_info.members
def handler_declarations(self):
return '\n'.join('SAXEventHandler< {} > handler_{};'.format(m.type_name, i)
for i, m in enumerate(self.members_info))
def handler_initializers(self):
return '\n'.join(', handler_{}(&obj->{})'.format(i, m.variable_name)
for i, m in enumerate(self.members_info))
def flags_declaration(self):
return '\n'.join('bool has_{};'.format(m.variable_name) for m in self.members_info
if self.class_info.no_duplicates or m.is_required)
def flags_reset(self):
return '\n'.join(self.flag_statement(m, "false") for m in self.members_info)
def post_validation(self):
return '\n'.join('if (!has_{0}) set_missing_required("{0}");'
.format(m.variable_name) for m in self.members_info if m.is_required)
def key_event_handling(self):
return '\n'.join('else if (utility::string_equal(str, length, {key}, {key_length}))\n\
{{ state={state}; {dup_check} {set_flag} }}'
.format(key=cstring_literal(m.json_key), key_length=len(m.json_key),
state=i, dup_check=self.check_for_duplicate_key(m),
set_flag=self.flag_statement(m, "true"))
for i, m in enumerate(self.members_info))
def event_forwarding(self, call_text):
return '\n\n'.join('case {i}:\n return checked_event_forwarding(handler_{i}.{call});'
.format(i=i, call=call_text) for i in range(len(self.members_info)))
def error_reaping(self):
return '\n'.join('case {0}:\n handler_{0}.ReapError(errs); break;'.format(i)
for i in range(len(self.members_info)))
def writer_type_name(self):
return "Writer" + hashlib.sha256(self.class_info.qualified_name.encode()).hexdigest()
def data_serialization(self):
return '\n'.join('w.Key({}, {}, false); Serializer< {}, {} >()(w, value.{});'
.format(cstring_literal(m.json_key), len(m.json_key),
self.writer_type_name(), m.type_name, m.variable_name)
for m in self.members_info)
def current_member_name(self):
return '\n'.join('case {}:\n return "{}";'.format(i, m.variable_name)
for i, m in enumerate(self.members_info))
def unknown_key_handling(self):
if self.class_info.strict_parsing:
return 'the_error.reset(new error::UnknownFieldError(str, length)); return false;'
else:
return 'return true;'
def count_of_members(self):
return str(len(self.members_info))
def flag_statement(self, member_info, flag):
if self.class_info.no_duplicates or member_info.is_required:
return 'has_{} = {};'.format(member_info.variable_name, flag)
else:
return ''
def check_for_duplicate_key(self, member_info):
if self.class_info.no_duplicates:
return 'if (has_{}) the_error.reset(new error::DuplicateKeyError(current_member_name()));\n'.\
format(member_info.variable_name)
else:
return ''
def prepare_for_reuse(self):
return ''.join('handler_{}.PrepareForReuse();\n'.format(i) for i in range(len(self.members_info)))
class CPPTypeNameChecker(object):
# PEG grammar for parsing the C++ type name we support
# Note that raw pointer, reference, array, void, enum, function and pointer-to-member types are not supported
PEG_GRAMMAR = r'''
type = (space cv_type space "<" space type_list space ">" space) / ( space cv_type space )
type_list = (type space "," space type_list) / type / space
cv_type = c_and_v_type / c_or_v_type / simple_type
c_and_v_type = ("const" space "volatile" space simple_type) / ("volatile" space "const" space simple_type)
c_or_v_type = ("const" space simple_type) / ("volatile" space simple_type)
simple_type = spaced_type / ("::"? identifier ("::" identifier)*)
spaced_type = sign_type / long_type
sign_type = ("unsigned" / "signed") space ( ("long" space "long"? space "int"?) / "int" / "char")
long_type = ("long" space "long" space "int") / ("long" space "long") / ("long" space "int")
identifier = ~"[A-Za-z_][A-Za-z_0-9]*"
space = ~"[ \t]*"
'''
KNOWN_BASIC_TYPE_NAMES = frozenset(['bool', 'char', 'int', 'unsigned int', 'unsigned', 'long long', 'long long int',
'unsigned long long', 'unsigned long long int', 'std::uint32_t', 'std::int32_t',
'std::uint64_t', 'std::int64_t', 'uint32_t', 'int32_t', 'uint64_t', 'int64_t',
'std::nullptr_t',
'std::size_t', 'size_t', 'std::ptrdiff_t', 'ptrdiff_t',
'double', 'std::string', 'std::vector', 'std::deque', 'std::array',
'boost::container::vector', 'boost::container::deque', 'boost::array',
'std::shared_ptr', 'std::unique_ptr', 'boost::shared_ptr', 'boost::optional',
'std::map', 'std::unordered_map', 'std::multimap', 'std::unordered_multimap',
'boost::unordered_map', 'boost::unordered_multimap', 'std::tuple'])
ParseError = parsimonious.ParseError if parsimonious else None
def __init__(self):
self._grammar = parsimonious.Grammar(CPPTypeNameChecker.PEG_GRAMMAR)
self._known_names = set(CPPTypeNameChecker.KNOWN_BASIC_TYPE_NAMES)
@staticmethod
def __extract_simple_type(node):
if node.expr_name == 'simple_type':
yield node.text.lstrip(':')
for sub_node in node.children:
for value in CPPTypeNameChecker.__extract_simple_type(sub_node):
yield value
def check_for_unknown_basic_types(self, name):
"""
:param name: the full name of the type to check
:return: a list of unknown basic types
"""
node = self.grammar.parse(name)
simple_types = set(self.__extract_simple_type(node))
return simple_types - self.known_names
@property
def grammar(self):
return self._grammar
@property
def known_names(self):
return self._known_names
def build_class(template, class_info):
gen = HelperClassCodeGenerator(class_info)
replacement = {
"class definition": ClassDefinitionCodeGenerator(class_info).class_definition(),
"list of declarations": gen.handler_declarations() + gen.flags_declaration(),
"init": gen.handler_initializers(),
"serialize all members": gen.data_serialization(),
"change state": gen.key_event_handling(),
"reap error": gen.error_reaping(),
"get member name": gen.current_member_name(),
"validation": gen.post_validation(),
"reset flags": gen.flags_reset(),
"handle unknown key": gen.unknown_key_handling(),
"TypeName": class_info.qualified_name,
"count of members": gen.count_of_members(),
"Writer": gen.writer_type_name(),
"call PrepareForReuse": gen.prepare_for_reuse()
}
def evaluate(match):
try:
return replacement[match.group(1)]
except KeyError:
match = re.match(r'forward (.*?) to members', match.group(1))
if match:
return gen.event_forwarding(match.group(1))
else:
raise
return re.sub(r'/\*\s*(.*?)\s*\*/', evaluate, template)
def warn_if_name_unknown(checker, class_info):
checker.known_names.add(class_info.qualified_name.lstrip(':'))
for m in class_info.members:
try:
unknowns = checker.check_for_unknown_basic_types(m.type_name)
for u in unknowns:
print("Warning:", "The type", repr(u), "may not be recognized", file=sys.stderr)
print("\tReferenced from variable", repr(m.variable_name),
"in class", repr(class_info.qualified_name), "\n", file=sys.stderr)
except CPPTypeNameChecker.ParseError:
print("Warning:", "The type", repr(m.type_name), "is not valid", file=sys.stderr)
print("\tReferenced from variable", repr(m.variable_name),
"in class", repr(class_info.qualified_name), "\n", file=sys.stderr)
def main():
parser = argparse.ArgumentParser(description='`autojsoncxx` code generator '
'(visit https://github.com/netheril96/autojsoncxx for details)')
parser.add_argument('-c', '--check', help='check the type names specified; requires `parsimonious` to be installed',
action='store_true', default=False)
parser.add_argument('-i', '--input', help='input name for the definition file for classes', required=True)
parser.add_argument('-o', '--output', help='output name for the header file', default=None)
parser.add_argument('--template', help='location of the template file', default=None)
args = parser.parse_args()
if args.check and not parsimonious:
print("Unable to import module `parsimonious`", "Type checks disabled", "", sep='\n', file=sys.stderr)
args.check = False
if args.template is None:
# On Windows code_template located in the same folder as the executable
if getattr(sys, 'frozen', False):
template_dir = os.path.dirname(sys.executable)
args.template = os.path.join(template_dir, 'code_template')
# On UNIX It's worth checking system directories while looking for a code_template
else:
possible_template_dirs = [
"/usr/local/share/autojsoncxx",
"/usr/share/autojsoncxx",
os.path.dirname(os.path.abspath(__file__)),
]
custom_template_dir = os.environ.get("AUTOJSONCXX_TEMPLATE_DIR")
if custom_template_dir and os.path.isdir(custom_template_dir):
possible_template_dirs.insert(0, custom_template_dir)
possible_template_pathes = (os.path.join(d, 'code_template') for d in possible_template_dirs)
args.template = next(p for p in possible_template_pathes if os.path.isfile(p))
if args.output is None:
args.output = os.path.basename(args.input)
args.output = os.path.splitext(args.output)[0] + '.hpp'
if args.check:
checker = CPPTypeNameChecker()
else:
checker = None
with io.open(args.template, encoding='utf-8') as f:
template = f.read()
with io.open(args.input, encoding='utf-8') as f:
raw_record = json.load(f)
with io.open(args.output, 'w', encoding='utf-8') as output:
output.write('#pragma once\n\n')
def output_class(class_record):
class_info = ClassInfo(class_record)
if args.check:
warn_if_name_unknown(checker, class_info)
output.write(build_class(template, class_info))
if isinstance(raw_record, list):
for r in raw_record:
print("Processing:", r)
output_class(r)
else:
output_class(raw_record)
if __name__ == '__main__':
main()
|
vitalyisaev2/autojsoncxx
|
autojsoncxx.py
|
Python
|
mit
| 19,692
|
[
"VisIt"
] |
2b15add52f63865d9a6f94198c4665ab6d55ec6da6e4b62147fb40fb095a7eaf
|
# $Id$
#
# Copyright (C) 2001-2011 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" SMARTS definitions for the publically available MACCS keys
and a MACCS fingerprinter
I compared the MACCS fingerprints generated here with those from two
other packages (not MDL, unfortunately). Of course there are
disagreements between the various fingerprints still, but I think
these definitions work pretty well. Some notes:
1) most of the differences have to do with aromaticity
2) there's a discrepancy sometimes because the current RDKit
definitions do not require multiple matches to be distinct. e.g. the
SMILES C(=O)CC(=O) can match the (hypothetical) key O=CC twice in my
definition. It's not clear to me what the correct behavior is.
3) Some keys are not fully defined in the MDL documentation
4) Two keys, 125 and 166, have to be done outside of SMARTS.
5) Key 1 (ISOTOPE) isn't defined
Rev history:
2006 (gl): Original open-source release
May 2011 (gl): Update some definitions based on feedback from Andrew Dalke
"""
from __future__ import print_function
from rdkit import Chem
from rdkit.Chem import rdMolDescriptors
from rdkit import DataStructs
# these are SMARTS patterns corresponding to the MDL MACCS keys
smartsPatts={
1:('?',0), # ISOTOPE
#2:('[#104,#105,#106,#107,#106,#109,#110,#111,#112]',0), # atomic num >103 Not complete
2:('[#104]',0), # limit the above def'n since the RDKit only accepts up to #104
3:('[#32,#33,#34,#50,#51,#52,#82,#83,#84]',0), # Group IVa,Va,VIa Rows 4-6
4:('[Ac,Th,Pa,U,Np,Pu,Am,Cm,Bk,Cf,Es,Fm,Md,No,Lr]',0), # actinide
5:('[Sc,Ti,Y,Zr,Hf]',0), # Group IIIB,IVB (Sc...)
6:('[La,Ce,Pr,Nd,Pm,Sm,Eu,Gd,Tb,Dy,Ho,Er,Tm,Yb,Lu]',0), # Lanthanide
7:('[V,Cr,Mn,Nb,Mo,Tc,Ta,W,Re]',0), # Group VB,VIB,VIIB
8:('[!#6;!#1]1~*~*~*~1',0), # QAAA@1
9:('[Fe,Co,Ni,Ru,Rh,Pd,Os,Ir,Pt]',0), # Group VIII (Fe...)
10:('[Be,Mg,Ca,Sr,Ba,Ra]',0), # Group IIa (Alkaline earth)
11:('*1~*~*~*~1',0), # 4M Ring
12:('[Cu,Zn,Ag,Cd,Au,Hg]',0), # Group IB,IIB (Cu..)
13:('[#8]~[#7](~[#6])~[#6]',0), # ON(C)C
14:('[#16]-[#16]',0), # S-S
15:('[#8]~[#6](~[#8])~[#8]',0), # OC(O)O
16:('[!#6;!#1]1~*~*~1',0), # QAA@1
17:('[#6]#[#6]',0), #CTC
18:('[#5,#13,#31,#49,#81]',0), # Group IIIA (B...)
19:('*1~*~*~*~*~*~*~1',0), # 7M Ring
20:('[#14]',0), #Si
21:('[#6]=[#6](~[!#6;!#1])~[!#6;!#1]',0), # C=C(Q)Q
22:('*1~*~*~1',0), # 3M Ring
23:('[#7]~[#6](~[#8])~[#8]',0), # NC(O)O
24:('[#7]-[#8]',0), # N-O
25:('[#7]~[#6](~[#7])~[#7]',0), # NC(N)N
26:('[#6]=;@[#6](@*)@*',0), # C$=C($A)$A
27:('[I]',0), # I
28:('[!#6;!#1]~[CH2]~[!#6;!#1]',0), # QCH2Q
29:('[#15]',0),# P
30:('[#6]~[!#6;!#1](~[#6])(~[#6])~*',0), # CQ(C)(C)A
31:('[!#6;!#1]~[F,Cl,Br,I]',0), # QX
32:('[#6]~[#16]~[#7]',0), # CSN
33:('[#7]~[#16]',0), # NS
34:('[CH2]=*',0), # CH2=A
35:('[Li,Na,K,Rb,Cs,Fr]',0), # Group IA (Alkali Metal)
36:('[#16R]',0), # S Heterocycle
37:('[#7]~[#6](~[#8])~[#7]',0), # NC(O)N
38:('[#7]~[#6](~[#6])~[#7]',0), # NC(C)N
39:('[#8]~[#16](~[#8])~[#8]',0), # OS(O)O
40:('[#16]-[#8]',0), # S-O
41:('[#6]#[#7]',0), # CTN
42:('F',0), # F
43:('[!#6;!#1;!H0]~*~[!#6;!#1;!H0]',0), # QHAQH
44:('[!#1;!#6;!#7;!#8;!#9;!#14;!#15;!#16;!#17;!#35;!#53]',0), # OTHER
45:('[#6]=[#6]~[#7]',0), # C=CN
46:('Br',0), # BR
47:('[#16]~*~[#7]',0), # SAN
48:('[#8]~[!#6;!#1](~[#8])(~[#8])',0), # OQ(O)O
49:('[!+0]',0), # CHARGE
50:('[#6]=[#6](~[#6])~[#6]',0), # C=C(C)C
51:('[#6]~[#16]~[#8]',0), # CSO
52:('[#7]~[#7]',0), # NN
53:('[!#6;!#1;!H0]~*~*~*~[!#6;!#1;!H0]',0), # QHAAAQH
54:('[!#6;!#1;!H0]~*~*~[!#6;!#1;!H0]',0), # QHAAQH
55:('[#8]~[#16]~[#8]',0), #OSO
56:('[#8]~[#7](~[#8])~[#6]',0), # ON(O)C
57:('[#8R]',0), # O Heterocycle
58:('[!#6;!#1]~[#16]~[!#6;!#1]',0), # QSQ
59:('[#16]!:*:*',0), # Snot%A%A
60:('[#16]=[#8]',0), # S=O
61:('*~[#16](~*)~*',0), # AS(A)A
62:('*@*!@*@*',0), # A$!A$A
63:('[#7]=[#8]',0), # N=O
64:('*@*!@[#16]',0), # A$A!S
65:('c:n',0), # C%N
66:('[#6]~[#6](~[#6])(~[#6])~*',0), # CC(C)(C)A
67:('[!#6;!#1]~[#16]',0), # QS
68:('[!#6;!#1;!H0]~[!#6;!#1;!H0]',0), # QHQH (&...) SPEC Incomplete
69:('[!#6;!#1]~[!#6;!#1;!H0]',0), # QQH
70:('[!#6;!#1]~[#7]~[!#6;!#1]',0), # QNQ
71:('[#7]~[#8]',0), # NO
72:('[#8]~*~*~[#8]',0), # OAAO
73:('[#16]=*',0), # S=A
74:('[CH3]~*~[CH3]',0), # CH3ACH3
75:('*!@[#7]@*',0), # A!N$A
76:('[#6]=[#6](~*)~*',0), # C=C(A)A
77:('[#7]~*~[#7]',0), # NAN
78:('[#6]=[#7]',0), # C=N
79:('[#7]~*~*~[#7]',0), # NAAN
80:('[#7]~*~*~*~[#7]',0), # NAAAN
81:('[#16]~*(~*)~*',0), # SA(A)A
82:('*~[CH2]~[!#6;!#1;!H0]',0), # ACH2QH
83:('[!#6;!#1]1~*~*~*~*~1',0), # QAAAA@1
84:('[NH2]',0), #NH2
85:('[#6]~[#7](~[#6])~[#6]',0), # CN(C)C
86:('[C;H2,H3][!#6;!#1][C;H2,H3]',0), # CH2QCH2
87:('[F,Cl,Br,I]!@*@*',0), # X!A$A
88:('[#16]',0), # S
89:('[#8]~*~*~*~[#8]',0), # OAAAO
90:('[$([!#6;!#1;!H0]~*~*~[CH2]~*),$([!#6;!#1;!H0;R]1@[R]@[R]@[CH2;R]1),$([!#6;!#1;!H0]~[R]1@[R]@[CH2;R]1)]',0), # QHAACH2A
91:('[$([!#6;!#1;!H0]~*~*~*~[CH2]~*),$([!#6;!#1;!H0;R]1@[R]@[R]@[R]@[CH2;R]1),$([!#6;!#1;!H0]~[R]1@[R]@[R]@[CH2;R]1),$([!#6;!#1;!H0]~*~[R]1@[R]@[CH2;R]1)]',0), # QHAAACH2A
92:('[#8]~[#6](~[#7])~[#6]',0), # OC(N)C
93:('[!#6;!#1]~[CH3]',0), # QCH3
94:('[!#6;!#1]~[#7]',0), # QN
95:('[#7]~*~*~[#8]',0), # NAAO
96:('*1~*~*~*~*~1',0), # 5 M ring
97:('[#7]~*~*~*~[#8]',0), # NAAAO
98:('[!#6;!#1]1~*~*~*~*~*~1',0), # QAAAAA@1
99:('[#6]=[#6]',0), # C=C
100:('*~[CH2]~[#7]',0), # ACH2N
101:('[$([R]@1@[R]@[R]@[R]@[R]@[R]@[R]@[R]1),$([R]@1@[R]@[R]@[R]@[R]@[R]@[R]@[R]@[R]1),$([R]@1@[R]@[R]@[R]@[R]@[R]@[R]@[R]@[R]@[R]1),$([R]@1@[R]@[R]@[R]@[R]@[R]@[R]@[R]@[R]@[R]@[R]1),$([R]@1@[R]@[R]@[R]@[R]@[R]@[R]@[R]@[R]@[R]@[R]@[R]1),$([R]@1@[R]@[R]@[R]@[R]@[R]@[R]@[R]@[R]@[R]@[R]@[R]@[R]1),$([R]@1@[R]@[R]@[R]@[R]@[R]@[R]@[R]@[R]@[R]@[R]@[R]@[R]@[R]1)]',0), # 8M Ring or larger. This only handles up to ring sizes of 14
102:('[!#6;!#1]~[#8]',0), # QO
103:('Cl',0), # CL
104:('[!#6;!#1;!H0]~*~[CH2]~*',0), # QHACH2A
105:('*@*(@*)@*',0), # A$A($A)$A
106:('[!#6;!#1]~*(~[!#6;!#1])~[!#6;!#1]',0), # QA(Q)Q
107:('[F,Cl,Br,I]~*(~*)~*',0), # XA(A)A
108:('[CH3]~*~*~*~[CH2]~*',0), # CH3AAACH2A
109:('*~[CH2]~[#8]',0), # ACH2O
110:('[#7]~[#6]~[#8]',0), # NCO
111:('[#7]~*~[CH2]~*',0), # NACH2A
112:('*~*(~*)(~*)~*',0), # AA(A)(A)A
113:('[#8]!:*:*',0), # Onot%A%A
114:('[CH3]~[CH2]~*',0), # CH3CH2A
115:('[CH3]~*~[CH2]~*',0), # CH3ACH2A
116:('[$([CH3]~*~*~[CH2]~*),$([CH3]~*1~*~[CH2]1)]',0), # CH3AACH2A
117:('[#7]~*~[#8]',0), # NAO
118:('[$(*~[CH2]~[CH2]~*),$(*1~[CH2]~[CH2]1)]',1), # ACH2CH2A > 1
119:('[#7]=*',0), # N=A
120:('[!#6;R]',1), # Heterocyclic atom > 1 (&...) Spec Incomplete
121:('[#7;R]',0), # N Heterocycle
122:('*~[#7](~*)~*',0), # AN(A)A
123:('[#8]~[#6]~[#8]',0), # OCO
124:('[!#6;!#1]~[!#6;!#1]',0), # QQ
125:('?',0), # Aromatic Ring > 1
126:('*!@[#8]!@*',0), # A!O!A
127:('*@*!@[#8]',1), # A$A!O > 1 (&...) Spec Incomplete
128:('[$(*~[CH2]~*~*~*~[CH2]~*),$([R]1@[CH2;R]@[R]@[R]@[R]@[CH2;R]1),$(*~[CH2]~[R]1@[R]@[R]@[CH2;R]1),$(*~[CH2]~*~[R]1@[R]@[CH2;R]1)]',0), # ACH2AAACH2A
129:('[$(*~[CH2]~*~*~[CH2]~*),$([R]1@[CH2]@[R]@[R]@[CH2;R]1),$(*~[CH2]~[R]1@[R]@[CH2;R]1)]',0), # ACH2AACH2A
130:('[!#6;!#1]~[!#6;!#1]',1), # QQ > 1 (&...) Spec Incomplete
131:('[!#6;!#1;!H0]',1), # QH > 1
132:('[#8]~*~[CH2]~*',0), # OACH2A
133:('*@*!@[#7]',0), # A$A!N
134:('[F,Cl,Br,I]',0), # X (HALOGEN)
135:('[#7]!:*:*',0), # Nnot%A%A
136:('[#8]=*',1), # O=A>1
137:('[!C;!c;R]',0), # Heterocycle
138:('[!#6;!#1]~[CH2]~*',1), # QCH2A>1 (&...) Spec Incomplete
139:('[O;!H0]',0), # OH
140:('[#8]',3), # O > 3 (&...) Spec Incomplete
141:('[CH3]',2), # CH3 > 2 (&...) Spec Incomplete
142:('[#7]',1), # N > 1
143:('*@*!@[#8]',0), # A$A!O
144:('*!:*:*!:*',0), # Anot%A%Anot%A
145:('*1~*~*~*~*~*~1',1), # 6M ring > 1
146:('[#8]',2), # O > 2
147:('[$(*~[CH2]~[CH2]~*),$([R]1@[CH2;R]@[CH2;R]1)]',0), # ACH2CH2A
148:('*~[!#6;!#1](~*)~*',0), # AQ(A)A
149:('[C;H3,H4]',1), # CH3 > 1
150:('*!@*@*!@*',0), # A!A$A!A
151:('[#7;!H0]',0), # NH
152:('[#8]~[#6](~[#6])~[#6]',0), # OC(C)C
153:('[!#6;!#1]~[CH2]~*',0), # QCH2A
154:('[#6]=[#8]',0), # C=O
155:('*!@[CH2]!@*',0), # A!CH2!A
156:('[#7]~*(~*)~*',0), # NA(A)A
157:('[#6]-[#8]',0), # C-O
158:('[#6]-[#7]',0), # C-N
159:('[#8]',1), # O>1
160:('[C;H3,H4]',0), #CH3
161:('[#7]',0), # N
162:('a',0), # Aromatic
163:('*1~*~*~*~*~*~1',0), # 6M Ring
164:('[#8]',0), # O
165:('[R]',0), # Ring
166:('?',0), # Fragments FIX: this can't be done in SMARTS
}
maccsKeys = None
def _InitKeys(keyList,keyDict):
""" *Internal Use Only*
generates SMARTS patterns for the keys, run once
"""
assert len(keyList) == len(keyDict.keys()),'length mismatch'
for key in keyDict.keys():
patt,count = keyDict[key]
if patt != '?':
try:
sma = Chem.MolFromSmarts(patt)
except:
sma = None
if not sma:
print('SMARTS parser error for key #%d: %s'%(key,patt))
else:
keyList[key-1] = sma,count
def _pyGenMACCSKeys(mol,**kwargs):
""" generates the MACCS fingerprint for a molecules
**Arguments**
- mol: the molecule to be fingerprinted
- any extra keyword arguments are ignored
**Returns**
a _DataStructs.SparseBitVect_ containing the fingerprint.
>>> m = Chem.MolFromSmiles('CNO')
>>> bv = GenMACCSKeys(m)
>>> tuple(bv.GetOnBits())
(24, 68, 69, 71, 93, 94, 102, 124, 131, 139, 151, 158, 160, 161, 164)
>>> bv = GenMACCSKeys(Chem.MolFromSmiles('CCC'))
>>> tuple(bv.GetOnBits())
(74, 114, 149, 155, 160)
"""
global maccsKeys
if maccsKeys is None:
maccsKeys = [(None,0)]*len(smartsPatts.keys())
_InitKeys(maccsKeys,smartsPatts)
ctor=kwargs.get('ctor',DataStructs.SparseBitVect)
res = ctor(len(maccsKeys)+1)
for i,(patt,count) in enumerate(maccsKeys):
if patt is not None:
if count==0:
res[i+1] = mol.HasSubstructMatch(patt)
else:
matches = mol.GetSubstructMatches(patt)
if len(matches) > count:
res[i+1] = 1
elif (i+1)==125:
# special case: num aromatic rings > 1
ri = mol.GetRingInfo()
nArom=0
res[125]=0
for ring in ri.BondRings():
isArom=True
for bondIdx in ring:
if not mol.GetBondWithIdx(bondIdx).GetIsAromatic():
isArom=False
break
if isArom:
nArom+=1
if nArom>1:
res[125]=1
break
elif (i+1)==166:
res[166]=0
# special case: num frags > 1
if len(Chem.GetMolFrags(mol))>1:
res[166]=1
return res
GenMACCSKeys = rdMolDescriptors.GetMACCSKeysFingerprint
FingerprintMol = rdMolDescriptors.GetMACCSKeysFingerprint
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest,sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed,tried = _test()
sys.exit(failed)
|
strets123/rdkit
|
rdkit/Chem/MACCSkeys.py
|
Python
|
bsd-3-clause
| 11,266
|
[
"RDKit"
] |
55b48989f78c99c587695cd1e9f6467bd744d644ede44285b79094175bd12bbd
|
#!/usr/bin/env python
# QAPI texi generator
#
# This work is licensed under the terms of the GNU LGPL, version 2+.
# See the COPYING file in the top-level directory.
"""This script produces the documentation of a qapi schema in texinfo format"""
from __future__ import print_function
import re
import qapi.common
MSG_FMT = """
@deftypefn {type} {{}} {name}
{body}
@end deftypefn
""".format
TYPE_FMT = """
@deftp {{{type}}} {name}
{body}
@end deftp
""".format
EXAMPLE_FMT = """@example
{code}
@end example
""".format
def subst_strong(doc):
"""Replaces *foo* by @strong{foo}"""
return re.sub(r'\*([^*\n]+)\*', r'@strong{\1}', doc)
def subst_emph(doc):
"""Replaces _foo_ by @emph{foo}"""
return re.sub(r'\b_([^_\n]+)_\b', r'@emph{\1}', doc)
def subst_vars(doc):
"""Replaces @var by @code{var}"""
return re.sub(r'@([\w-]+)', r'@code{\1}', doc)
def subst_braces(doc):
"""Replaces {} with @{ @}"""
return doc.replace('{', '@{').replace('}', '@}')
def texi_example(doc):
"""Format @example"""
# TODO: Neglects to escape @ characters.
# We should probably escape them in subst_braces(), and rename the
# function to subst_special() or subs_texi_special(). If we do that, we
# need to delay it until after subst_vars() in texi_format().
doc = subst_braces(doc).strip('\n')
return EXAMPLE_FMT(code=doc)
def texi_format(doc):
"""
Format documentation
Lines starting with:
- |: generates an @example
- =: generates @section
- ==: generates @subsection
- 1. or 1): generates an @enumerate @item
- */-: generates an @itemize list
"""
ret = ''
doc = subst_braces(doc)
doc = subst_vars(doc)
doc = subst_emph(doc)
doc = subst_strong(doc)
inlist = ''
lastempty = False
for line in doc.split('\n'):
empty = line == ''
# FIXME: Doing this in a single if / elif chain is
# problematic. For instance, a line without markup terminates
# a list if it follows a blank line (reaches the final elif),
# but a line with some *other* markup, such as a = title
# doesn't.
#
# Make sure to update section "Documentation markup" in
# docs/devel/qapi-code-gen.txt when fixing this.
if line.startswith('| '):
line = EXAMPLE_FMT(code=line[2:])
elif line.startswith('= '):
line = '@section ' + line[2:]
elif line.startswith('== '):
line = '@subsection ' + line[3:]
elif re.match(r'^([0-9]*\.) ', line):
if not inlist:
ret += '@enumerate\n'
inlist = 'enumerate'
ret += '@item\n'
line = line[line.find(' ')+1:]
elif re.match(r'^[*-] ', line):
if not inlist:
ret += '@itemize %s\n' % {'*': '@bullet',
'-': '@minus'}[line[0]]
inlist = 'itemize'
ret += '@item\n'
line = line[2:]
elif lastempty and inlist:
ret += '@end %s\n\n' % inlist
inlist = ''
lastempty = empty
ret += line + '\n'
if inlist:
ret += '@end %s\n\n' % inlist
return ret
def texi_body(doc):
"""Format the main documentation body"""
return texi_format(doc.body.text)
def texi_if(ifcond, prefix='\n', suffix='\n'):
"""Format the #if condition"""
if not ifcond:
return ''
return '%s@b{If:} @code{%s}%s' % (prefix, ', '.join(ifcond), suffix)
def texi_enum_value(value, desc, suffix):
"""Format a table of members item for an enumeration value"""
return '@item @code{%s}\n%s%s' % (
value.name, desc, texi_if(value.ifcond, prefix='@*'))
def texi_member(member, desc, suffix):
"""Format a table of members item for an object type member"""
typ = member.type.doc_type()
membertype = ': ' + typ if typ else ''
return '@item @code{%s%s}%s%s\n%s%s' % (
member.name, membertype,
' (optional)' if member.optional else '',
suffix, desc, texi_if(member.ifcond, prefix='@*'))
def texi_members(doc, what, base, variants, member_func):
"""Format the table of members"""
items = ''
for section in doc.args.values():
# TODO Drop fallbacks when undocumented members are outlawed
if section.text:
desc = texi_format(section.text)
elif (variants and variants.tag_member == section.member
and not section.member.type.doc_type()):
values = section.member.type.member_names()
members_text = ', '.join(['@t{"%s"}' % v for v in values])
desc = 'One of ' + members_text + '\n'
else:
desc = 'Not documented\n'
items += member_func(section.member, desc, suffix='')
if base:
items += '@item The members of @code{%s}\n' % base.doc_type()
if variants:
for v in variants.variants:
when = ' when @code{%s} is @t{"%s"}%s' % (
variants.tag_member.name, v.name, texi_if(v.ifcond, " (", ")"))
if v.type.is_implicit():
assert not v.type.base and not v.type.variants
for m in v.type.local_members:
items += member_func(m, desc='', suffix=when)
else:
items += '@item The members of @code{%s}%s\n' % (
v.type.doc_type(), when)
if not items:
return ''
return '\n@b{%s:}\n@table @asis\n%s@end table\n' % (what, items)
def texi_sections(doc, ifcond):
"""Format additional sections following arguments"""
body = ''
for section in doc.sections:
if section.name:
# prefer @b over @strong, so txt doesn't translate it to *Foo:*
body += '\n@b{%s:}\n' % section.name
if section.name and section.name.startswith('Example'):
body += texi_example(section.text)
else:
body += texi_format(section.text)
body += texi_if(ifcond, suffix='')
return body
def texi_entity(doc, what, ifcond, base=None, variants=None,
member_func=texi_member):
return (texi_body(doc)
+ texi_members(doc, what, base, variants, member_func)
+ texi_sections(doc, ifcond))
class QAPISchemaGenDocVisitor(qapi.common.QAPISchemaVisitor):
def __init__(self, prefix):
self._prefix = prefix
self._gen = qapi.common.QAPIGenDoc()
self.cur_doc = None
def write(self, output_dir):
self._gen.write(output_dir, self._prefix + 'qapi-doc.texi')
def visit_enum_type(self, name, info, ifcond, members, prefix):
doc = self.cur_doc
self._gen.add(TYPE_FMT(type='Enum',
name=doc.symbol,
body=texi_entity(doc, 'Values', ifcond,
member_func=texi_enum_value)))
def visit_object_type(self, name, info, ifcond, base, members, variants):
doc = self.cur_doc
if base and base.is_implicit():
base = None
self._gen.add(TYPE_FMT(type='Object',
name=doc.symbol,
body=texi_entity(doc, 'Members', ifcond,
base, variants)))
def visit_alternate_type(self, name, info, ifcond, variants):
doc = self.cur_doc
self._gen.add(TYPE_FMT(type='Alternate',
name=doc.symbol,
body=texi_entity(doc, 'Members', ifcond)))
def visit_command(self, name, info, ifcond, arg_type, ret_type, gen,
success_response, boxed, allow_oob, allow_preconfig):
doc = self.cur_doc
if boxed:
body = texi_body(doc)
body += ('\n@b{Arguments:} the members of @code{%s}\n'
% arg_type.name)
body += texi_sections(doc, ifcond)
else:
body = texi_entity(doc, 'Arguments', ifcond)
self._gen.add(MSG_FMT(type='Command',
name=doc.symbol,
body=body))
def visit_event(self, name, info, ifcond, arg_type, boxed):
doc = self.cur_doc
self._gen.add(MSG_FMT(type='Event',
name=doc.symbol,
body=texi_entity(doc, 'Arguments', ifcond)))
def symbol(self, doc, entity):
if self._gen._body:
self._gen.add('\n')
self.cur_doc = doc
entity.visit(self)
self.cur_doc = None
def freeform(self, doc):
assert not doc.args
if self._gen._body:
self._gen.add('\n')
self._gen.add(texi_body(doc) + texi_sections(doc, None))
def gen_doc(schema, output_dir, prefix):
if not qapi.common.doc_required:
return
vis = QAPISchemaGenDocVisitor(prefix)
vis.visit_begin(schema)
for doc in schema.docs:
if doc.symbol:
vis.symbol(doc, schema.lookup_entity(doc.symbol))
else:
vis.freeform(doc)
vis.write(output_dir)
|
marioli/qemu
|
scripts/qapi/doc.py
|
Python
|
gpl-2.0
| 9,156
|
[
"VisIt"
] |
538f37701d9463d5da52f9b08ebe04ba40e07c87556bc123fa10075623ddf668
|
# Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008, Enthought Inc.
# License: BSD Style.
import unittest
from pkg_resources import resource_filename
from traits.api import Str, Int, Bool
from apptools.preferences.api import set_default_preferences
from apptools.preferences.api import Preferences, PreferencesHelper
from mayavi.tools.preferences_mirror import PreferencesMirror
class TestPreference(PreferencesHelper):
"""A simple test preference helper."""
preferences_path = "test"
bg = Str
width = Int
show = Bool
class ClassNameTest(unittest.TestCase):
def setUp(self):
"""Called before each test is run"""
self.preferences = set_default_preferences(Preferences())
# The filename of the example preferences file.
pref_file = resource_filename('mayavi.tests',
'test_preference.ini')
self.preferences.load(pref_file)
self.pref = TestPreference()
self.mirror = PreferencesMirror()
self.mirror.preferences = self.pref
def test_mirroring(self):
"""Are the traits properly mirrored?"""
pref = self.pref
mirror = self.mirror
self.assertEqual(pref.bg, mirror.bg)
self.assertEqual(pref.width, mirror.width)
self.assertEqual(pref.show, mirror.show)
def test_sync(self):
"""Does the mirror listen for changes on original preference."""
pref = self.pref
mirror = self.mirror
# Save original state.
saved = pref.get()
pref.set(bg = 'white', width=20, show=True)
self.assertEqual(pref.bg, mirror.bg)
self.assertEqual(pref.width, mirror.width)
self.assertEqual(pref.show, mirror.show)
# Reset preferences back to defaults.
pref.set(saved)
def test_no_reverse_sync(self):
"""mirror must not sync changes back to the original preferences."""
pref = self.pref
mirror = self.mirror
saved = pref.get()
mirror.set(bg = 'white', width=20, show=True)
self.assertNotEqual(pref.bg, mirror.bg)
self.assertNotEqual(pref.width, mirror.width)
self.assertNotEqual(pref.show, mirror.show)
self.assertEqual(pref.bg, saved['bg'])
self.assertEqual(pref.width, saved['width'])
self.assertEqual(pref.show, saved['show'])
def test_save(self):
"""Are Mirror's preferences saved correctly"""
pref = self.pref
mirror = self.mirror
saved = pref.get()
mirror.set(bg = 'white', width=20, show=True)
mirror.save()
self.assertEqual(pref.bg, mirror.bg)
self.assertEqual(pref.width, mirror.width)
self.assertEqual(pref.show, mirror.show)
# Reset preferences back to defaults.
pref.set(saved)
if __name__ == '__main__':
unittest.main()
|
dmsurti/mayavi
|
mayavi/tests/test_preferences_mirror.py
|
Python
|
bsd-3-clause
| 2,899
|
[
"Mayavi"
] |
7ef9dad00b7a1728ffd3b2f687d1c5115ed88ec380ce3293a8341ec25cfff42f
|
<<<<<<< HEAD
<<<<<<< HEAD
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import gyp
import gyp.common
import gyp.msvs_emulation
import json
import sys
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
flavor = gyp.common.GetFlavor(params)
if flavor =='win':
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GenerateOutput(target_list, target_dicts, data, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get('dependencies', []):
edges[target].append(dep)
targets_to_visit.append(dep)
filename = 'dump.json'
f = open(filename, 'w')
json.dump(edges, f)
f.close()
print 'Wrote json to %s.' % filename
=======
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import gyp
import gyp.common
import gyp.msvs_emulation
import json
import sys
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
flavor = gyp.common.GetFlavor(params)
if flavor =='win':
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GenerateOutput(target_list, target_dicts, data, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get('dependencies', []):
edges[target].append(dep)
targets_to_visit.append(dep)
filename = 'dump.json'
f = open(filename, 'w')
json.dump(edges, f)
f.close()
print 'Wrote json to %s.' % filename
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import gyp
import gyp.common
import gyp.msvs_emulation
import json
import sys
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
flavor = gyp.common.GetFlavor(params)
if flavor =='win':
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GenerateOutput(target_list, target_dicts, data, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get('dependencies', []):
edges[target].append(dep)
targets_to_visit.append(dep)
filename = 'dump.json'
f = open(filename, 'w')
json.dump(edges, f)
f.close()
print 'Wrote json to %s.' % filename
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
ArcherSys/ArcherSys
|
node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/dump_dependency_json.py
|
Python
|
mit
| 8,444
|
[
"VisIt"
] |
a702b60ec96a88a2f8dbd514e33559eb121a0b82f54b2b499a150c1a2579327c
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPispino(PythonPackage):
"""PISPINO (PIpits SPIN-Off tools)."""
homepage = "https://github.com/hsgweon/pispino"
url = "https://github.com/hsgweon/pispino/archive/1.1.tar.gz"
version('1.1', sha256='8fb2e1c0ae38ecca7c637de9c0b655eb18fc67d7838ceb5a6902555ea12416c0')
# https://github.com/bioconda/bioconda-recipes/blob/master/recipes/pispino/meta.yaml
depends_on('py-setuptools', type='build')
depends_on('vsearch', type='run')
depends_on('fastx-toolkit', type='run')
|
rspavel/spack
|
var/spack/repos/builtin/packages/py-pispino/package.py
|
Python
|
lgpl-2.1
| 732
|
[
"Bioconda"
] |
f0b80004b9472b8828280ab20d5f3c781ce61364dcc4ca08aa6792540de5983d
|
"""
NetCDF reader/writer module.
This module implements the Scientific.IO.NetCDF API to read and create
NetCDF files. The same API is also used in the PyNIO and pynetcdf
modules, allowing these modules to be used interchangebly when working
with NetCDF files. The major advantage of ``scipy.io.netcdf`` over other
modules is that it doesn't require the code to be linked to the NetCDF
libraries as the other modules do.
The code is based on the NetCDF file format specification
(http://www.unidata.ucar.edu/software/netcdf/guide_15.html). A NetCDF
file is a self-describing binary format, with a header followed by
data. The header contains metadata describing dimensions, variables
and the position of the data in the file, so access can be done in an
efficient manner without loading unnecessary data into memory. We use
the ``mmap`` module to create Numpy arrays mapped to the data on disk,
for the same purpose.
The structure of a NetCDF file is as follows:
C D F <VERSION BYTE> <NUMBER OF RECORDS>
<DIMENSIONS> <GLOBAL ATTRIBUTES> <VARIABLES METADATA>
<NON-RECORD DATA> <RECORD DATA>
Record data refers to data where the first axis can be expanded at
will. All record variables share a same dimension at the first axis,
and they are stored at the end of the file per record, ie
A[0], B[0], ..., A[1], B[1], ..., etc,
so that new data can be appended to the file without changing its original
structure. Non-record data are padded to a 4n bytes boundary. Record data
are also padded, unless there is exactly one record variable in the file,
in which case the padding is dropped. All data is stored in big endian
byte order.
The Scientific.IO.NetCDF API allows attributes to be added directly to
instances of ``netcdf_file`` and ``netcdf_variable``. To differentiate
between user-set attributes and instance attributes, user-set attributes
are automatically stored in the ``_attributes`` attribute by overloading
``__setattr__``. This is the reason why the code sometimes uses
``obj.__dict__['key'] = value``, instead of simply ``obj.key = value``;
otherwise the key would be inserted into userspace attributes.
To create a NetCDF file::
>>> import time
>>> f = netcdf_file('simple.nc', 'w')
>>> f.history = 'Created for a test'
>>> f.createDimension('time', 10)
>>> time = f.createVariable('time', 'i', ('time',))
>>> time[:] = range(10)
>>> time.units = 'days since 2008-01-01'
>>> f.close()
To read the NetCDF file we just created::
>>> f = netcdf_file('simple.nc', 'r')
>>> print f.history
Created for a test
>>> time = f.variables['time']
>>> print time.units
days since 2008-01-01
>>> print time.shape
(10,)
>>> print time[-1]
9
>>> f.close()
TODO: properly implement ``_FillValue``.
"""
__all__ = ['netcdf_file', 'netcdf_variable']
from operator import mul
from mmap import mmap, ACCESS_READ
from numpy import fromstring, ndarray, dtype, empty, array, asarray
from numpy import little_endian as LITTLE_ENDIAN
ABSENT = '\x00\x00\x00\x00\x00\x00\x00\x00'
ZERO = '\x00\x00\x00\x00'
NC_BYTE = '\x00\x00\x00\x01'
NC_CHAR = '\x00\x00\x00\x02'
NC_SHORT = '\x00\x00\x00\x03'
NC_INT = '\x00\x00\x00\x04'
NC_FLOAT = '\x00\x00\x00\x05'
NC_DOUBLE = '\x00\x00\x00\x06'
NC_DIMENSION = '\x00\x00\x00\n'
NC_VARIABLE = '\x00\x00\x00\x0b'
NC_ATTRIBUTE = '\x00\x00\x00\x0c'
TYPEMAP = { NC_BYTE: ('b', 1),
NC_CHAR: ('c', 1),
NC_SHORT: ('h', 2),
NC_INT: ('i', 4),
NC_FLOAT: ('f', 4),
NC_DOUBLE: ('d', 8) }
REVERSE = { 'b': NC_BYTE,
'c': NC_CHAR,
'h': NC_SHORT,
'i': NC_INT,
'f': NC_FLOAT,
'd': NC_DOUBLE,
# these come from asarray(1).dtype.char and asarray('foo').dtype.char,
# used when getting the types from generic attributes.
'l': NC_INT,
'S': NC_CHAR }
class netcdf_file(object):
"""
A ``netcdf_file`` object has two standard attributes: ``dimensions`` and
``variables``. The values of both are dictionaries, mapping dimension
names to their associated lengths and variable names to variables,
respectively. Application programs should never modify these
dictionaries.
All other attributes correspond to global attributes defined in the
NetCDF file. Global file attributes are created by assigning to an
attribute of the ``netcdf_file`` object.
"""
def __init__(self, filename, mode='r', mmap=True):
if not __debug__:
raise RuntimeError('Current version of pupynere does not ' +
'work with -O option. We need to update ' +
'to version 1.0.7!')
self.filename = filename
self.use_mmap = mmap
assert mode in 'rw', "Mode must be either 'r' or 'w'."
self.mode = mode
self.dimensions = {}
self.variables = {}
self._dims = []
self._recs = 0
self._recsize = 0
self.fp = open(self.filename, '%sb' % mode)
self._attributes = {}
if mode is 'r':
self._read()
def __setattr__(self, attr, value):
# Store user defined attributes in a separate dict,
# so we can save them to file later.
try:
self._attributes[attr] = value
except AttributeError:
pass
self.__dict__[attr] = value
def close(self):
if not self.fp.closed:
try:
self.flush()
finally:
self.fp.close()
__del__ = close
def createDimension(self, name, length):
self.dimensions[name] = length
self._dims.append(name)
def createVariable(self, name, type, dimensions):
shape = tuple([self.dimensions[dim] for dim in dimensions])
shape_ = tuple([dim or 0 for dim in shape]) # replace None with 0 for numpy
if isinstance(type, basestring): type = dtype(type)
typecode, size = type.char, type.itemsize
dtype_ = '>%s' % typecode
if size > 1: dtype_ += str(size)
data = empty(shape_, dtype=dtype_)
self.variables[name] = netcdf_variable(data, typecode, shape, dimensions)
return self.variables[name]
def flush(self):
if self.mode is 'w':
self._write()
sync = flush
def _write(self):
self.fp.write('CDF')
self.__dict__['version_byte'] = 1
self.fp.write(array(1, '>b').tostring())
# Write headers and data.
self._write_numrecs()
self._write_dim_array()
self._write_gatt_array()
self._write_var_array()
def _write_numrecs(self):
# Get highest record count from all record variables.
for var in self.variables.values():
if var.isrec and len(var.data) > self._recs:
self.__dict__['_recs'] = len(var.data)
self._pack_int(self._recs)
def _write_dim_array(self):
if self.dimensions:
self.fp.write(NC_DIMENSION)
self._pack_int(len(self.dimensions))
for name in self._dims:
self._pack_string(name)
length = self.dimensions[name]
self._pack_int(length or 0) # replace None with 0 for record dimension
else:
self.fp.write(ABSENT)
def _write_gatt_array(self):
self._write_att_array(self._attributes)
def _write_att_array(self, attributes):
if attributes:
self.fp.write(NC_ATTRIBUTE)
self._pack_int(len(attributes))
for name, values in attributes.items():
self._pack_string(name)
self._write_values(values)
else:
self.fp.write(ABSENT)
def _write_var_array(self):
if self.variables:
self.fp.write(NC_VARIABLE)
self._pack_int(len(self.variables))
# Sort variables non-recs first, then recs.
variables = self.variables.items()
if True: # Backwards compatible with Python versions < 2.4
keys = [(v._shape and not v.isrec, k) for k, v in variables]
keys.sort()
keys.reverse()
variables = [k for isrec, k in keys]
else: # Python version must be >= 2.4
variables.sort(key=lambda (k, v): v._shape and not v.isrec)
variables.reverse()
variables = [k for (k, v) in variables]
# Set the metadata for all variables.
for name in variables:
self._write_var_metadata(name)
# Now that we have the metadata, we know the vsize of
# each record variable, so we can calculate recsize.
self.__dict__['_recsize'] = sum([
var._vsize for var in self.variables.values()
if var.isrec])
# Set the data for all variables.
for name in variables:
self._write_var_data(name)
else:
self.fp.write(ABSENT)
def _write_var_metadata(self, name):
var = self.variables[name]
self._pack_string(name)
self._pack_int(len(var.dimensions))
for dimname in var.dimensions:
dimid = self._dims.index(dimname)
self._pack_int(dimid)
self._write_att_array(var._attributes)
nc_type = REVERSE[var.typecode()]
self.fp.write(nc_type)
if not var.isrec:
vsize = var.data.size * var.data.itemsize
vsize += -vsize % 4
else: # record variable
try:
vsize = var.data[0].size * var.data.itemsize
except IndexError:
vsize = 0
rec_vars = len([var for var in self.variables.values()
if var.isrec])
if rec_vars > 1:
vsize += -vsize % 4
self.variables[name].__dict__['_vsize'] = vsize
self._pack_int(vsize)
# Pack a bogus begin, and set the real value later.
self.variables[name].__dict__['_begin'] = self.fp.tell()
self._pack_begin(0)
def _write_var_data(self, name):
var = self.variables[name]
# Set begin in file header.
the_beguine = self.fp.tell()
self.fp.seek(var._begin)
self._pack_begin(the_beguine)
self.fp.seek(the_beguine)
# Write data.
if not var.isrec:
self.fp.write(var.data.tostring())
count = var.data.size * var.data.itemsize
self.fp.write('0' * (var._vsize - count))
else: # record variable
# Handle rec vars with shape[0] < nrecs.
if self._recs > len(var.data):
shape = (self._recs,) + var.data.shape[1:]
var.data.resize(shape)
pos0 = pos = self.fp.tell()
for rec in var.data:
# Apparently scalars cannot be converted to big endian. If we
# try to convert a ``=i4`` scalar to, say, '>i4' the dtype
# will remain as ``=i4``.
if not rec.shape and (rec.dtype.byteorder == '<' or
(rec.dtype.byteorder == '=' and LITTLE_ENDIAN)):
rec = rec.byteswap()
self.fp.write(rec.tostring())
# Padding
count = rec.size * rec.itemsize
self.fp.write('0' * (var._vsize - count))
pos += self._recsize
self.fp.seek(pos)
self.fp.seek(pos0 + var._vsize)
def _write_values(self, values):
values = asarray(values)
values = values.astype(values.dtype.newbyteorder('>'))
nc_type = REVERSE[values.dtype.char]
self.fp.write(nc_type)
if values.dtype.char == 'S':
nelems = values.itemsize
else:
nelems = values.size
self._pack_int(nelems)
if not values.shape and (values.dtype.byteorder == '<' or
(values.dtype.byteorder == '=' and LITTLE_ENDIAN)):
values = values.byteswap()
self.fp.write(values.tostring())
count = values.size * values.itemsize
self.fp.write('0' * (-count % 4)) # pad
def _read(self):
# Check magic bytes and version
assert self.fp.read(3) == 'CDF', "Error: %s is not a valid NetCDF 3 file" % self.filename
self.__dict__['version_byte'] = fromstring(self.fp.read(1), '>b')[0]
# Read file headers and set data.
self._read_numrecs()
self._read_dim_array()
self._read_gatt_array()
self._read_var_array()
def _read_numrecs(self):
self.__dict__['_recs'] = self._unpack_int()
def _read_dim_array(self):
assert self.fp.read(4) in [ZERO, NC_DIMENSION]
count = self._unpack_int()
for dim in range(count):
name = self._unpack_string()
length = self._unpack_int() or None # None for record dimension
self.dimensions[name] = length
self._dims.append(name) # preserve order
def _read_gatt_array(self):
for k, v in self._read_att_array().items():
self.__setattr__(k, v)
def _read_att_array(self):
assert self.fp.read(4) in [ZERO, NC_ATTRIBUTE]
count = self._unpack_int()
attributes = {}
for attr in range(count):
name = self._unpack_string()
attributes[name] = self._read_values()
return attributes
def _read_var_array(self):
assert self.fp.read(4) in [ZERO, NC_VARIABLE]
begin = 0
dtypes = {'names': [], 'formats': []}
rec_vars = []
count = self._unpack_int()
for var in range(count):
name, dimensions, shape, attributes, typecode, size, dtype_, begin_, vsize = self._read_var()
if shape and shape[0] is None:
rec_vars.append(name)
self.__dict__['_recsize'] += vsize
if begin == 0: begin = begin_
dtypes['names'].append(name)
dtypes['formats'].append(str(shape[1:]) + dtype_)
# Handle padding with a virtual variable.
if typecode in 'bch':
actual_size = reduce(mul, (1,) + shape[1:]) * size
padding = -actual_size % 4
if padding:
dtypes['names'].append('_padding_%d' % var)
dtypes['formats'].append('(%d,)>b' % padding)
# Data will be set later.
data = None
else:
if self.use_mmap:
mm = mmap(self.fp.fileno(), begin_+vsize, access=ACCESS_READ)
data = ndarray.__new__(ndarray, shape, dtype=dtype_,
buffer=mm, offset=begin_, order=0)
else:
pos = self.fp.tell()
self.fp.seek(begin_)
data = fromstring(self.fp.read(vsize), dtype=dtype_)
data.shape = shape
self.fp.seek(pos)
# Add variable.
self.variables[name] = netcdf_variable(
data, typecode, shape, dimensions, attributes)
if rec_vars:
# Remove padding when only one record variable.
if len(rec_vars) == 1:
dtypes['names'] = dtypes['names'][:1]
dtypes['formats'] = dtypes['formats'][:1]
# Build rec array.
if self.use_mmap:
mm = mmap(self.fp.fileno(), begin+self._recs*self._recsize, access=ACCESS_READ)
rec_array = ndarray.__new__(ndarray, (self._recs,), dtype=dtypes,
buffer=mm, offset=begin, order=0)
else:
pos = self.fp.tell()
self.fp.seek(begin)
rec_array = fromstring(self.fp.read(self._recs*self._recsize), dtype=dtypes)
rec_array.shape = (self._recs,)
self.fp.seek(pos)
for var in rec_vars:
self.variables[var].__dict__['data'] = rec_array[var]
def _read_var(self):
name = self._unpack_string()
dimensions = []
shape = []
dims = self._unpack_int()
for i in range(dims):
dimid = self._unpack_int()
dimname = self._dims[dimid]
dimensions.append(dimname)
dim = self.dimensions[dimname]
shape.append(dim)
dimensions = tuple(dimensions)
shape = tuple(shape)
attributes = self._read_att_array()
nc_type = self.fp.read(4)
vsize = self._unpack_int()
begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]()
typecode, size = TYPEMAP[nc_type]
if typecode is 'c':
dtype_ = '>c'
else:
dtype_ = '>%s' % typecode
if size > 1: dtype_ += str(size)
return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize
def _read_values(self):
nc_type = self.fp.read(4)
n = self._unpack_int()
typecode, size = TYPEMAP[nc_type]
count = n*size
values = self.fp.read(count)
self.fp.read(-count % 4) # read padding
if typecode is not 'c':
values = fromstring(values, dtype='>%s%d' % (typecode, size))
if values.shape == (1,): values = values[0]
else:
values = values.rstrip('\x00')
return values
def _pack_begin(self, begin):
if self.version_byte == 1:
self._pack_int(begin)
elif self.version_byte == 2:
self._pack_int64(begin)
def _pack_int(self, value):
self.fp.write(array(value, '>i').tostring())
_pack_int32 = _pack_int
def _unpack_int(self):
return int(fromstring(self.fp.read(4), '>i')[0])
_unpack_int32 = _unpack_int
def _pack_int64(self, value):
self.fp.write(array(value, '>q').tostring())
def _unpack_int64(self):
return int(fromstring(self.fp.read(8), '>q')[0])
def _pack_string(self, s):
count = len(s)
self._pack_int(count)
self.fp.write(s)
self.fp.write('0' * (-count % 4)) # pad
def _unpack_string(self):
count = self._unpack_int()
s = self.fp.read(count).rstrip('\x00')
self.fp.read(-count % 4) # read padding
return s
class netcdf_variable(object):
"""
``netcdf_variable`` objects are constructed by calling the method
``createVariable`` on the netcdf_file object.
``netcdf_variable`` objects behave much like array objects defined in
Numpy, except that their data resides in a file. Data is read by
indexing and written by assigning to an indexed subset; the entire
array can be accessed by the index ``[:]`` or using the methods
``getValue`` and ``assignValue``. ``netcdf_variable`` objects also
have attribute ``shape`` with the same meaning as for arrays, but
the shape cannot be modified. There is another read-only attribute
``dimensions``, whose value is the tuple of dimension names.
All other attributes correspond to variable attributes defined in
the NetCDF file. Variable attributes are created by assigning to an
attribute of the ``netcdf_variable`` object.
"""
def __init__(self, data, typecode, shape, dimensions, attributes=None):
self.data = data
self._typecode = typecode
self._shape = shape
self.dimensions = dimensions
self._attributes = attributes or {}
for k, v in self._attributes.items():
self.__dict__[k] = v
def __setattr__(self, attr, value):
# Store user defined attributes in a separate dict,
# so we can save them to file later.
try:
self._attributes[attr] = value
except AttributeError:
pass
self.__dict__[attr] = value
def isrec(self):
return self.data.shape and not self._shape[0]
isrec = property(isrec)
def shape(self):
return self.data.shape
shape = property(shape)
def getValue(self):
return self.data.item()
def assignValue(self, value):
self.data.itemset(value)
def typecode(self):
return self._typecode
def __getitem__(self, index):
return self.data[index]
def __setitem__(self, index, data):
# Expand data for record vars?
if self.isrec:
if isinstance(index, tuple):
rec_index = index[0]
else:
rec_index = index
if isinstance(rec_index, slice):
recs = (rec_index.start or 0) + len(data)
else:
recs = rec_index + 1
if recs > len(self.data):
shape = (recs,) + self._shape[1:]
self.data.resize(shape)
self.data[index] = data
NetCDFFile = netcdf_file
NetCDFVariable = netcdf_variable
|
grhawk/ASE
|
tools/ase/io/pupynere.py
|
Python
|
gpl-2.0
| 21,220
|
[
"NetCDF"
] |
b3b9e7080b68124736ebda9af659fd7efd4067ab1af88f093a34343b551bf646
|
#-*- coding:Utf-8 -*-
"""
.. currentmodule:: pylayers.antprop.antenna
This module handles antennas
An antenna can be loaded from various file formats among
+ .vsh2
+ .vsh3
+ .sh2
+ .sh3
+ .mat
+ .trx
Antenna derives from Pattern
Examples
--------
.. plot::
:include-source:
>>> import matplotlib.pyplot as plt
>>> from pylayers.antprop.antenna import *
>>> A = Antenna()
>>> fig,ax = A.plotG(fGHz=[2,3,4],plan='theta',angdeg=0)
Pattern Class
-------------
.. autosummary::
:toctree: generated/
Pattern.eval
Pattern.gain
Pattern.radF
Pattern Functions
=================
Pattern.__pOmni
Pattern.__pGauss
Pattern.__p3gpp
Pattern.__p3gpp
Pattern from SH coeff
=====================
Pattern.__pvsh3
Pattern.__psh3
Antenna Class
-------------
.. autosummary::
:toctree: generated/
Utility Functions
=================
.. autosummary::
:toctree: generated/
Antenna.__init__
Antenna.__repr__
Antenna.ls
Antenna.errel
Antenna.checkpole
Antenna.info
Antenna.pol2cart
Antenna.cart2pol
Antenna.minsh3
Antenna.mse
Antenna.getdelay
Antenna.elec_delay
Synthesis Functions
===================
.. autosummary::
:toctree: generated/
Antenna.Fsynth
Antenna.Fsynth1
Antenna.Fsynth2s
Antenna.Fsynth2b
Antenna.Fsynth2
Antenna.Fsynth3
Visualization functions
=======================
.. autosummary::
:toctree: generated/
Antenna.pattern
Antenna.plotG
Antenna._show3
Antenna.show3
Antenna.plot3d
Antenna.pol3d
Antenna.load_trx
Antenna.movie_vsh
Loading and Saving
==================
.. autosummary::
:toctree: generated/
Antenna.loadhfss
Antenna.loadtrx
Antenna.loadmat
Antenna.savevsh3
Antenna.savesh2
Antenna.savesh3
Antenna.loadvsh3
Antenna.loadsh3
Antenna.savevsh2
Antenna.loadsh2
Antenna.loadvsh2
Miscellaneous functions
========================
.. autosummary::
:toctree: generated/
forcesympol
compdiag
show3D
"""
#from __future__ import print_function
import doctest
import os
import glob
import re
import pdb
import sys
if sys.version_info.major==2:
import PIL.Image as Image
try:
import mayavi.mlab as mlab
except:
pass
else:
import image
import numpy as np
import scipy.linalg as la
from scipy import io
import pylayers.util.pyutil as pyu
import pylayers.util.geomutil as geu
from pylayers.util.project import *
from pylayers.antprop.spharm import *
try:
from pylayers.antprop.antvsh import vsh
except:
pass
from pylayers.antprop.antssh import ssh,SSHFunc2, SSHFunc, SSHCoeff, CartToSphere
from pylayers.antprop.coeffModel import *
from matplotlib import rc
from matplotlib import cm # colormaps
from mpl_toolkits.mplot3d import axes3d
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.ticker import MaxNLocator
from scipy.special import sici , fresnel
import pandas as pd
import matplotlib.pylab as plt
class Pattern(PyLayers):
""" Class Pattern
MetaClass of Antenna
A pattern is evaluated with the 3 np.array parameters
theta
phi
fGHz
This class implements pattern methods.
The name of a pattern method starts by p.
Each pattern method has a unique dictionnary argument 'param'
If self.grid dimensions are
Nt x Np x Nf
else:
Ndir x Nf
"""
def __init__(self):
PyLayers.__init__(self)
def __repr__(self):
st = ''
st = st + 'Antenna type : ' + self.typ +'\n'
st = st+'------------------------\n'
if 'param' in self.__dict__:
for k in self.param:
st = st + ' ' + k + ' : ' + str(self.param[k])+'\n'
return (st)
def eval(self,**kwargs):
""" evaluate pattern functions
Parameters
----------
th: list
[]
ph: list
[]
pt : np.array (3,N)
pr : np.array (3,N)
azoffset : int (0)
Rfloor:bool
if true add gain value to reflected ray on the floor.
values are append at the end of sqG.
fGHz:list
[]
nth: int
90
nph: int
181
first: boolean
True if first call (to define self.param)
grid: boolean
True for pattern mode, False for Ray Tracing mode
th0 : float
theta initial value
th1 : float
theta finale value
ph0 : float
phi initial value
ph1 : float
phi final value
Examples
--------
>>> from pylayers.antprop.aarray import *
>>> A0=Antenna('Omni',param={'pol':'t','GmaxdB':0})
>>> A1=Antenna('Gauss')
>>> A2=Antenna('3gpp')
>>> A3=ULArray()
>>> A0.eval()
>>> A1.eval()
>>> A2.eval()
>>> #A3.eval()
"""
defaults = {'Rfloor':False,
'nth':90,
'nph':181,
'grid':True,
'th0':0,
'th1':np.pi,
'ph0':0,
'ph1':2*np.pi,
'azoffset':0,
'inplace':True
}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
if 'fGHz' not in kwargs:
if 'fGHz' not in self.__dict__:
self.fGHz = np.array([2.4])
else:
if type(kwargs['fGHz'])==np.ndarray:
self.fGHz = kwargs['fGHz']
else:
self.fGHz = np.array([kwargs['fGHz']])
self.nf = len(self.fGHz)
self.grid = kwargs['grid']
#
# if th and ph are empty
# if pt and pr are empty
# calculates from th0,th1,nth
# ph0,phi,nph
# else
# calculates from points coordinates pt and pr
# else
# take specified values
if ('th' not in kwargs) and ('ph' not in kwargs):
if ('pt' not in kwargs) and ('pr' not in kwargs):
self.theta = np.linspace(kwargs['th0'],kwargs['th1'],kwargs['nth'])
self.phi = np.linspace(kwargs['ph0'],kwargs['ph1'],kwargs['nph'],endpoint=False)
self.grid = True
self.full_evaluated = True
else:
si = kwargs['pr']-kwargs['pt']
ssi = np.sqrt(np.sum(si*si,axis=0))
sn = si/ssi[None,:]
self.theta = np.arccos(sn[2,:])
self.phi = np.mod(np.arctan2(sn[1,:],sn[0,:])+kwargs['azoffset'],2*np.pi)
self.grid = False
self.full_evaluated = True
if kwargs['Rfloor']:
dR = np.sqrt(ssi**2 + (kwargs['pr'][2,:] + kwargs['pt'][2,:])**2) # reflexion length
thetaR = np.arccos((kwargs['pr'][2,:] + kwargs['pt'][2,:]) / dR)
self.theta = np.hstack([self.theta,thetaR])
self.phi = np.hstack([self.phi,self.phi])
else :
assert(len(kwargs['th'])==len(kwargs['ph']))
self.theta = kwargs['th']
self.phi = kwargs['ph']
self.full_evaluated = False
if self.typ=='azel':
self.theta=np.linspace(-np.pi,np.pi,360)
self.phi=np.linspace(-np.pi,np.pi,360)
self.full_evaluated = False
self.nth = len(self.theta)
self.nph = len(self.phi)
#
# evaluation of the specific Pattern__p function
#
Ft,Fp = eval('self._Pattern__p'+self.typ)(param=self.param)
if kwargs['inplace']:
self.Ft = Ft
self.Fp = Fp
self.evaluated = True
self.gain()
else:
return Ft,Fp
def vsh(self,threshold=-1):
if self.evaluated:
vsh(self)
self.C.s1tos2()
self.C.s2tos3(threshold=threshold)
def ssh(self,L=89,dsf=1):
if self.evaluated:
ssh(self,L,dsf)
def __pOmni(self,**kwargs):
""" omnidirectional pattern
Parameters
----------
param : dict
dictionnary of parameters
+ pol : string
't'| 'p'
+ GmaxdB : float
0
self.grid is used for switching between :
if True angular grid : nth x nph x nf
if False direction : ndir x nf
"""
defaults = { 'param' : { 'pol' : 't', 'GmaxdB': 0 } }
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
self.param = kwargs['param']
self.GmaxdB = self.param['GmaxdB']
self.pol = self.param['pol']
G = pow(10.,self.GmaxdB/10.) # linear gain
if self.grid:
# Nth x Nphx Nf
self.sqG = np.array(np.sqrt(G))*np.ones(len(self.fGHz))[None,None,:]
self.evaluated = True
else:
# Nd x Nf
self.sqG = np.array(np.sqrt(G))*np.ones(len(self.fGHz))[None,:]
Ft,Fp = self.radF()
return Ft,Fp
def __paperture(self,**kwargs):
""" Aperture Pattern
Aperture in the (x,y) plane. Main lobe in theta=0 direction
polar indicates the orientation of the Electric field either 'x' or 'y'
See theoretical background in :
http://www.ece.rutgers.edu/~orfanidi/ewa/ch18.pdf
Parameters
----------
HPBW_x_deg : float
Half Power Beamwidth (degrees)
HPBW_y_deg : float
Half Power Beamwidth (degrees)
"""
defaults = {'param': {'HPBW_x_deg':40,
'HPBW_y_deg':10,
'Gfactor':27000,
'fcGHz': 27.5,
'polar':'x',
'window':'rect'
}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
self.param = kwargs['param']
deg_to_rad = np.pi/180.
ld_c = 0.3/self.param['fcGHz']
ld = 0.3/self.fGHz
Dx = 0.886*ld_c/(self.param['HPBW_x_deg']*deg_to_rad)
Dy = 0.886*ld_c/(self.param['HPBW_y_deg']*deg_to_rad)
Dx_n = Dx/ld
Dy_n = Dy/ld
if self.grid:
# Nth x Nph x Nf
theta = self.theta[:,None,None]
phi = self.phi[None,:,None]
else:
# Ndir x Nf
theta = self.theta[:,None]
phi = self.phi[:,None]
vx = Dx_n[...,:]*np.sin(theta)*np.cos(phi) # 18.1.4
vy = Dy_n[...,:]*np.sin(theta)*np.sin(phi) # 18.1.4
F_nor = ((1+np.cos(theta))/2.)*np.abs(np.sinc(vx)*np.sinc(vy))
HPBW_x = (0.886*ld/Dx)/deg_to_rad
HPBW_y = (0.886*ld/Dy)/deg_to_rad
Gmax = self.param['Gfactor']/(HPBW_x*HPBW_y)
F = np.sqrt(Gmax[...,:])*F_nor # Ndir x Nf
# Handling repartition on both vector components
# enforce E.y = 0
if self.param['polar']=='x':
Ft = F/np.sqrt(1+(np.cos(theta)*np.sin(phi)/np.cos(phi))**2)
Fp = (-np.cos(theta)*np.sin(phi)/np.cos(phi))*Ft
nan_bool = np.isnan(Fp)
Fp[nan_bool] = F[nan_bool]
# enforce E.x = 0
if self.param['polar']=='y':
Ft = F/np.sqrt(1+(np.cos(theta)*np.cos(phi)/np.sin(phi))**2)
Fp = (np.cos(theta)*np.cos(phi)/np.sin(phi))*Ft
nan_bool = np.isnan(Fp)
Fp[nan_bool] = F[nan_bool]
# enforce E.x = 0
#
# This is experimental
# How to apply the 2D windowing properly ?
#
# if self.param['window']!='rect':
# Nt = self.Fp.shape[0]
# Np = self.Fp.shape[1]
# Wp = np.fft.ifftshift(np.hamming(Nt)[:,None]*np.ones(Np)[None,:])[:,:,None]
# Wt = np.fft.ifftshift(np.ones(Nt)[:,None]*np.hamming(Np)[None,:])[:,:,None]
# Wu = np.fft.ifftshift(np.ones(Nt)[:,None]*np.ones(Np)[None,:])[:,:,None]
# Wi = np.fft.ifftshift(np.hamming(Nt)[:,None]*np.hamming(Np)[None,:])[:,:,None]
# W = np.fft.fftshift(np.hamming(Nt)[:,None]*np.hamming(Np)[None,:])[:,:,None]
# # Fp : t x p x f ou r x f
# # Ft : t x p x f ou r x f
#
# Kp = np.fft.ifft2(self.Fp,axes=(0,1))
# Kt = np.fft.ifft2(self.Ft,axes=(0,1))
#
# self.Fp = np.fft.fft2(Kp*Wt,axes=(0,1))
# self.Ft = np.fft.fft2(Kt*Wp,axes=(0,1))
return Ft,Fp
def __paperture2(self,**kwargs):
""" Aperture Pattern
Aperture in the (x,y) plane. Main lobe in theta=0 direction
polar indicates the orientation of the Electric field either 'x' or 'y'
See theoretical background in :
http://www.ece.rutgers.edu/~orfanidi/ewa/ch18.pdf
Parameters
----------
HPBW_x_deg : float
Half Power Beamwidth (degrees)
HPBW_y_deg : float
Half Power Beamwidth (degrees)
"""
defaults = {'param': {'HPBW_a_deg':40,
'HPBW_b_deg':10,
'Gfactor':27000,
'fcGHz': 27.5,
'polar':'x',
'window':'rect'
}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
self.param = kwargs['param']
deg_to_rad = np.pi/180.
ld_c = 0.3/self.param['fcGHz']
ld = 0.3/self.fGHz
a = 1.189*ld_c/(self.param['HPBW_a_deg']*deg_to_rad)
b = 0.886*ld_c/(self.param['HPBW_b_deg']*deg_to_rad)
a_n = a/ld
b_n = b/ld
if self.grid:
# Nth x Nph x Nf
theta = self.theta[:,None,None]
phi = self.phi[None,:,None]
else:
# Ndir x Nf
theta = self.theta[:,None]
phi = self.phi[:,None]
vx = a_n[...,:]*np.sin(theta)*np.cos(phi) # 18.1.4
vy = b_n[...,:]*np.sin(theta)*np.sin(phi) # 18.1.4
#F_nor = ((1+np.cos(theta))/2.)*np.abs(np.sinc(vx)*np.sinc(vy))
F_nor = (1+np.cos(theta))/2*(np.cos(np.pi*vx)/(1-4*vx**2))*np.sinc(vy) # 18.1.3 + suppression rear radiation
HPBW_a = (1.189*ld/a)/deg_to_rad
HPBW_b = (0.886*ld/b)/deg_to_rad
Gmax = self.param['Gfactor']/(HPBW_a*HPBW_b)
F = np.sqrt(Gmax[...,:])*F_nor # Ndir x Nf
# Handling repartition on both vector components
# enforce E.y = 0
if self.param['polar']=='x':
Ft = F/np.sqrt(1+(np.cos(theta)*np.sin(phi)/np.cos(phi))**2)
Fp = (-np.cos(theta)*np.sin(phi)/np.cos(phi))*Ft
nan_bool = np.isnan(Fp)
Fp[nan_bool] = F[nan_bool]
# enforce E.x = 0
if self.param['polar']=='y':
Ft = F/np.sqrt(1+(np.cos(theta)*np.cos(phi)/np.sin(phi))**2)
Fp = (np.cos(theta)*np.cos(phi)/np.sin(phi))*Ft
nan_bool = np.isnan(Fp)
Fp[nan_bool] = F[nan_bool]
# enforce E.x = 0
#
# This is experimeintal
# How to apply the 2D windowing properly ?
#
# if self.param['window']!='rect':
# Nt = self.Fp.shape[0]
# Np = self.Fp.shape[1]
# Wp = np.fft.ifftshift(np.hamming(Nt)[:,None]*np.ones(Np)[None,:])[:,:,None]
# Wt = np.fft.ifftshift(np.ones(Nt)[:,None]*np.hamming(Np)[None,:])[:,:,None]
# Wu = np.fft.ifftshift(np.ones(Nt)[:,None]*np.ones(Np)[None,:])[:,:,None]
# Wi = np.fft.ifftshift(np.hamming(Nt)[:,None]*np.hamming(Np)[None,:])[:,:,None]
# W = np.fft.fftshift(np.hamming(Nt)[:,None]*np.hamming(Np)[None,:])[:,:,None]
# # Fp : t x p x f ou r x f
# # Ft : t x p x f ou r x f
#
# Kp = np.fft.ifft2(self.Fp,axes=(0,1))
# Kt = np.fft.ifft2(self.Ft,axes=(0,1))
#
# self.Fp = np.fft.fft2(Kp*Wt,axes=(0,1))
# self.Ft = np.fft.fft2(Kt*Wp,axes=(0,1))
return Ft,Fp
def __phplanesectoralhorn(self,**kwargs):
""" H plane sectoral horn
Parameters
----------
rho1 : float
sector radius (meter)
a1 : float
aperture dimension along x (greatest value in meters)
b1 : float
aperture dimension along y (greatest value in meters)
Notes
-----
Maximum gain in theta =0
Polarized along y axis (Jx=0,Jz=0)
"""
defaults = {'param': {'rho1':0.198,
'a1':0.088, # aperture dimension along x
'b1':0.0126, # aperture dimension along y
'fcGHz':28,
'GcmaxdB':19,
'Nx':20,
'Ny':20}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
self.param = kwargs['param']
#H-plane antenna
rho1 = self.param['rho1']
a1 = self.param['a1']
b1 = self.param['b1']
Nx = self.param['Nx']
Ny = self.param['Ny']
fcGHz = self.param['fcGHz']
GcmaxdB = self.param['GcmaxdB']
assert(a1>b1), "a1 should be greater than b1 (see fig 13.1O(a) Balanis"
lbda = 0.3/self.fGHz
k = 2*np.pi/lbda
eta0 = np.sqrt(4*np.pi*1e-7/8.85429e-12)
if self.grid:
# X,Y aperture points (t,p,x,y,f)
X = np.arange(-a1/2,a1/2,a1/(Nx-1))[None,None,:,None,None]
Y = np.arange(-b1/2,b1/2,b1/(Ny-1))[None,None,None,:,None]
# angular domain (theta,phi)
Theta= self.theta[:,None,None,None,None]
Phi = self.phi[None,:,None,None,None]
else:
# X,Y aperture points (r,x,y,f)
X = np.arange(-a1/2,a1/2,a1/(Nx-1))[None,:,None,None]
Y = np.arange(-b1/2,b1/2,b1/(Ny-1))[None,None,:,None]
# angular domain (theta,phi)
Theta= self.theta[:,None,None,None]
Phi= self.phi[:,None,None,None]
#% Aperture field Ea:
# Ea is an approximation of the aperture field:
# (from: C. A. Balanis, Antenna Theoy: Analysis and Design. New York
# Wiley, 1982. ... Section 13.3.1 )
Ea = np.cos(X*np.pi/a1)*np.exp(-.5*1j*k*((X**2)/(rho1)+(Y**2)/(rho1)))
Jy = -Ea/eta0
Mx = Ea
# cosine direction
ctsp = np.cos(Theta)*np.sin(Phi)
cp = np.cos(Phi)
ctcp = np.cos(Theta)*np.cos(Phi)
sp = np.sin(Phi)
stcp = np.sin(Theta)*np.cos(Phi)
stsp = np.sin(Theta)*np.sin(Phi)
# N & L
ejkrrp = np.exp(1j*k*( X*stcp + Y*stsp)) # exp(jk (r.r'))
if self.grid:
N_theta = np.einsum('tpnmf->tpf',Jy*ctsp*ejkrrp) # 12-12 a assuming Jx,Jz=0
N_phi = np.einsum('tpnmf->tpf',Jy*cp*ejkrrp) # 12-12 b ""
L_theta = np.einsum('tpnmf->tpf',Mx*ctcp*ejkrrp) # 12-12 c assuming My,Mz=0
L_phi = np.einsum('tpnmf->tpf',-Mx*sp*ejkrrp) # 12-12 d ""
else:
N_theta = np.einsum('rnmf->rf',Jy*ctsp*ejkrrp) # 12-12 a assuming Jx,Jz=0
N_phi = np.einsum('rnmf->rf',Jy*cp*ejkrrp) # 12-12 b ""
L_theta = np.einsum('rnmf->rf',Mx*ctcp*ejkrrp) # 12-12 c assuming My,Mz=0
L_phi = np.einsum('rnmf->rf',-Mx*sp*ejkrrp) # 12-12 d ""
# Far-Field
Ft = -L_phi - eta0*N_theta # 12-10b p 661
Fp = L_theta - eta0*N_phi # 12-10c p 661
G = Ft*np.conj(Ft)+Fp*np.conj(Fp)
if self.grid:
# Umax : ,f
self.Umax = G.max(axis=(0,1))
Ft = Ft/np.sqrt(self.Umax[None,None,:])
Fp = Fp/np.sqrt(self.Umax[None,None,:])
# centered frequency range
fcc = np.abs(self.fGHz-fcGHz)
idxc = np.where(fcc==np.min(fcc))[0][0]
# Gain @ center frequency
#G = _gain(Ft[:,:,idxc],Fp[:,:,idxc])
G = _gain(Ft,Fp)
# effective half power beamwidth
self.ehpbw, self.hpster = _hpbw(G,self.theta,self.phi)
self.Gfactor = 10**(GcmaxdB/10.)*self.ehpbw[idxc]
Gmax = self.Gfactor/self.ehpbw
Ft = np.sqrt(Gmax[None,None,:])*Ft
Fp = np.sqrt(Gmax[None,None,:])*Fp
else:
##
## Ft (r x f )
## Fp (r x f )
##
Ft = Ft/np.sqrt(self.Umax[None,:])
Fp = Fp/np.sqrt(self.Umax[None,:])
Gmax = self.Gfactor/self.ehpbw
Ft = np.sqrt(Gmax[None,:])*Ft
Fp = np.sqrt(Gmax[None,:])*Fp
return Ft,Fp
def __phorn(self,**kwargs):
""" Horn antenna
http://www.ece.rutgers.edu/~orfanidi/ewa/ch18.pdf (18.2)
Parameters
----------
Half Power Beamwidth (degrees)
"""
defaults = {'param': {'sigma_a':1.2593,
'sigma_b':1.0246,
'A_wl':16,
'B_wl':3,
'fcGHz':28.,
'polar':'x'
}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
self.param = kwargs['param']
deg_to_rad = np.pi/180.
ld_c = 0.3/self.param['fcGHz']
ld = 0.3/self.fGHz
A_wl = kwargs['param']['A_wl']
B_wl = kwargs['param']['B_wl']
A = A_wl*ld_c
B = B_wl*ld_c
sigma_a = kwargs['param']['sigma_a']
sigma_b = kwargs['param']['sigma_b']
#b = kwargs['param']['b']
#Ra = (A/(A-a))*RA
#Rb = (B/(B-b))*RB
#La = np.sqrt(Ra**2+A**2/4)
#Lb = np.sqrt(Rb**2+B**2/4)
#alpha = np.arctan(A/(2*Ra))
#beta = np.arctan(B/(2*Rb))
#Delta_a = A**2/(8*Ra)
#Delta_b = B**2/(8*Rb)
#sigma_a = A/np.sqrt((2*ld*Ra))
#sigma_b = B/np.sqrt((2*ld*Rb))
A_n = A/ld
B_n = B/ld
if self.grid:
# Nth x Nph x Nf
theta = self.theta[:,None,None]
phi = self.phi[None,:,None]
else:
# Ndir x Nf
theta = self.theta[:,None]
phi = self.phi[:,None]
vx = A_n[...,:]*np.sin(theta)*np.cos(phi) # 18.3.4
vy = B_n[...,:]*np.sin(theta)*np.sin(phi) # 18.3.4
F = ((1+np.cos(theta))/2.)*(F1(vx,sigma_a)*F0(vy,sigma_b))
normF = np.abs(F1(0,sigma_a)*F0(0,sigma_b))**2
F_nor = F/np.sqrt(normF)
efficiency = 0.125*normF # 18.4.3
Gmax = efficiency*4*np.pi*A*B/ld**2
F = np.sqrt(Gmax[...,:])*F_nor # Ndir x Nf
# Handling repatition on both vector components
# enforce E.y = 0
if self.param['polar']=='x':
Ft = F/np.sqrt(1+(np.cos(theta)*np.sin(phi)/np.cos(phi))**2)
Fp = (-np.cos(theta)*np.sin(phi)/np.cos(phi))*Ft
nan_bool = np.isnan(Fp)
Fp[nan_bool] = F[nan_bool]
# enforce E.x = 0
if self.param['polar']=='y':
Ft = F/np.sqrt(1+(np.cos(theta)*np.cos(phi)/np.sin(phi))**2)
Fp = (np.cos(theta)*np.cos(phi)/np.sin(phi))*Ft
nan_bool = np.isnan(Fp)
Fp[nan_bool] = F[nan_bool]
return Ft,Fp
def __pazel(self,**kwargs):
""" Azimuth Elevation pattern from file
Parameters
----------
filename : ANT filename
"""
defaults = {'param': {'filename' : '',
'pol':'V'}}
f = open(kwargs['param']['filename'])
Gthetaphi = f.readlines()
f.close()
Gthetaphi = np.array(Gthetaphi).astype('float')
Gaz = Gthetaphi[360:]
Gel = Gthetaphi[:360]
sqGazlin = np.sqrt(pow(10,Gaz/10.))
sqGellin = np.sqrt(pow(10,Gel/10.))
if self.grid :
# Nth x Nph x Nf
if kwargs['param']['pol']=='V':
Ft = np.ones((360,360,1))
Fp = np.zeros((360,360,1))
#Ft[180,:] = sqGazlin[:,None]
#Ft[:,180] = sqGellin[:,None]
Ft = sqGazlin[None,:,None]*sqGellin[:,None,None]
if kwargs['param']['pol']=='H':
Fp = np.ones((360,360,1))
Ft = np.zeros((360,360,1))
Fp = sqGazlin[None,:,None]*sqGellin[:,None,None]
#self.Fp[180,:]= sqGazlin[:,None]
#self.Fp[:,180]= sqGellin[:,None]
if kwargs['param']['pol']=='45':
Fp = np.ones((360,360,1))
Ft = np.ones((360,360,1))
# Azimuth
Ft = (1/sqrt(2))*sqGazlin[None,:,None]*sqGellin[:,None,None]
Fp = (1/sqrt(2))*sqGazlin[None,:,None]*sqGellin[:,None,None]
#self.Fp[180,:]= sqGazlin[:,None]
#self.Fp[180,:]= (1/sqrt(2))*sqGazlin[:,None]
#Ft[180,:]= (1/sqrt(2))*sqGazlin[:,None]
# Elevation
#self.Fp[:,180]= (1/sqrt(2))*sqGellin[:,None]
#Ft[:,180]= (1/sqrt(2))*sqGellin[:,None]
#Ft = sqGthlin[:,None,None]
#self.Fp = sqGphlin[None,:,None]
# Ft = self.sqGmax * ( np.exp(-2.76*argth[:,None,None]) * np.exp(-2.76*argphi[None,:,None]) )
# self.Fp = self.sqGmax * ( np.exp(-2.76*argth[:,None,None]) * np.exp(-2.76*argphi[None,:,None]) )
self.evaluated = True
else:
pass
# #
# # Nd x Nf
# #
# Ft = self.sqGmax * ( np.exp(-2.76*argth) * np.exp(-2.76*argphi) )
# Fp = self.sqGmax * ( np.exp(-2.76*argth) * np.exp(-2.76*argphi) )
# # add frequency axis (Ndir x Nf)
# Ft = np.dot(Ft[:,None],np.ones(len(self.fGHz))[None,:])
# self.Fp = np.dot(Fp[:,None],np.ones(len(self.fGHz))[None,:])
return Ft,Fp
def __pGauss(self,**kwargs):
""" Gauss pattern
Parameters
----------
p0 : phi main lobe (0-2pi)
p3 : 3dB aperture angle
t0 : theta main lobe (0-pi)
t3 : 3dB aperture angle
TODO : finish implementation of polar
"""
defaults = {'param':{'p0' : 0,
't0' : np.pi/2,
'p3' : np.pi/6,
't3' : np.pi/6,
'pol':'th'
}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
self.typ='Gauss'
self.param = kwargs['param']
p0 = self.param['p0']
t0 = self.param['t0']
p3 = self.param['p3']
t3 = self.param['t3']
pol = self.param['pol']
self.Gmax = 16/(t3*p3)
self.GdB = 10*np.log10(self.Gmax)
self.sqGmax = np.sqrt(self.Gmax)
argth = ((self.theta-t0)**2)/t3
e1 = np.mod(self.phi-p0,2*np.pi)
e2 = np.mod(p0-self.phi,2*np.pi)
e = np.array(map(lambda x: min(x[0],x[1]),zip(e1,e2)))
argphi = (e**2)/p3
Nf = len(self.fGHz)
if self.grid :
Nt = len(self.theta)
Np = len(self.phi)
# Nth x Nph x Nf
# Ft = self.sqGmax * ( np.exp(-2.76*argth[:,None,None]) * np.exp(-2.76*argphi[None,:,None]) )
# self.Fp = self.sqGmax * ( np.exp(-2.76*argth[:,None,None]) * np.exp(-2.76*argphi[None,:,None]) )
if pol=='th':
Ft = self.sqGmax * ( np.exp(-2.76*argth[:,None,None]) * np.exp(-2.76*argphi[None,:,None]) *np.ones(len(self.fGHz))[None,None,:])
Fp = np.zeros((Nt,Np,Nf))
if pol=='ph':
Ft = np.zeros((Nt,Np,Nf))
Fp = self.sqGmax * ( np.exp(-2.76*argth[:,None,None]) * np.exp(-2.76*argphi[None,:,None]) *np.ones(len(self.fGHz))[None,None,:])
else:
#
# Nd x Nf
#
Nd = len(self.theta)
assert(len(self.phi)==Nd)
if pol=='th':
Ft = self.sqGmax * ( np.exp(-2.76*argth) * np.exp(-2.76*argphi) )
Fp = np.zeros(Nd)
if pol=='ph':
Ft = np.zeros(Nd)
Fp = self.sqGmax * ( np.exp(-2.76*argth) * np.exp(-2.76*argphi) )
# add frequency axis (Ndir x Nf)
Ft = np.dot(Ft[:,None],np.ones(len(self.fGHz))[None,:])
Fp = np.dot(Fp[:,None],np.ones(len(self.fGHz))[None,:])
return Ft,Fp
def __p3gpp(self,**kwargs):
""" 3GPP pattern
Parameters
----------
thtilt : theta tilt antenna
hpbwv : half power beamwidth v
hpbwh : half power beamwidth h
sllv : side lobe level
fbrh : front back ratio
gm :
pol : h | v | c
if pattern
Ft nth x nphi x nf
Fp nth x nphi x nf
else
Ft ndir x nf (==nth, ==nph)
Fp ndir x nf (==nth, ==nph)
"""
defaults = {'param' : {'thtilt':0, # antenna tilt
'hpbwv' :6.2,# half power beamwidth v
'hpbwh' :65, # half power beamwidth h
'sllv': -18, # side lobe level
'fbrh': 30, # front back ratio
'gm': 18, #
'pol':'p' #t , p , c
}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param'] = defaults['param']
#if 'param' not in kwargs:
#kwargs['param']=defaults['param']
self.typ = "3gpp"
self.param = kwargs['param']
thtilt = self.param['thtilt']
hpbwh = self.param['hpbwh']
hpbwv = self.param['hpbwv']
sllv = self.param['sllv']
fbrh = self.param['fbrh']
gm = self.param['gm']
pol = self.param['pol']
self.pol = pol
# convert radian to degree
phi = self.phi*180/np.pi-180
theta = self.theta*180/np.pi-90
if self.grid:
#Nth x Nph x Nf
GvdB = np.maximum(-12*((theta-thtilt)/hpbwv)**2,sllv)[:,None,None]
GhdB = (-np.minimum(12*(phi/hpbwh)**2,fbrh)+gm)[None,:,None]
GdB = GhdB+GvdB
self.sqG = np.sqrt(10**(GdB/10.))*np.ones(self.nf)[None,None,:]
self.evaluated = True
else:
#Nd x Nf
GvdB = np.maximum(-12*((theta-thtilt)/hpbwv)**2,sllv)[:,None]
GhdB = (-np.minimum(12*(phi/hpbwh)**2,fbrh)+gm)[:,None]
GdB = GhdB+GvdB
self.sqG = np.sqrt(10**(GdB/10.))
# radiating functions are deduced from square root of gain
Ft,Fp = self.radF()
return Ft,Fp
def __pvsh1(self,**kwargs):
""" calculate pattern from VSH Coeffs (shape 1)
Parameters
----------
theta : ndarray (1xNdir)
phi : ndarray (1xNdir)
k : int
frequency index
Returns
-------
Ft , Fp
"""
assert hasattr(self,'C'),'no spherical coefficient'
assert hasattr(self.C.Br,'s1'),'no shape 1 coeff in vsh'
if self.grid:
theta = np.kron(self.theta, np.ones(self.nph))
phi = np.kron(np.ones(self.nth),self.phi)
else:
theta = self.theta
phi = self.phi
Nt = len(theta)
Np = len(phi)
if self.grid:
theta = np.kron(theta, np.ones(Np))
phi = np.kron(np.ones(Nt),phi)
nray = len(theta)
Br = self.C.Br.s1[:, :, :]
Bi = self.C.Bi.s1[:, :, :]
Cr = self.C.Cr.s1[:, :, :]
Ci = self.C.Ci.s1[:, :, :]
L = self.C.Br.L1
M = self.C.Br.M1
# The - sign is necessary to get the good reconstruction
# deduced from observation
# May be it comes from a different definition of theta in SPHEREPACK
ind = index_vsh(L, M)
l = ind[:, 0]
m = ind[:, 1]
#
V, W = VW(l, m, theta, phi)
#
# broadcasting along frequency axis
#
V = np.expand_dims(V,0)
W = np.expand_dims(V,0)
#
# k : frequency axis
# l : axis l (theta)
# m : axis m (phi)
#
Fth = np.eisum('klm,kilm->ki',Br,np.real(V.T)) - \
np.eisum('klm,kilm->ki',Bi,np.imag(V.T)) + \
np.eisum('klm,kilm->ki',Ci,np.real(W.T)) + \
np.eisum('klm,kilm->ki',Cr,np.imag(W.T))
Fph = -np.eisum('klm,kilm->ki',Cr,np.real(V.T)) + \
np.eisum('klm,kilm->ki',Ci,np.imag(V.T)) + \
np.eisum('klm,kilm->ki',Bi,np.real(W.T)) + \
np.eisum('klm,kilm->ki',Br,np.imag(W.T))
# here Nf x Nd
Ft = Fth.transpose()
Fp = Fph.transpose()
# then Nd x Nf
if self.grid:
# Nth x Nph x Nf
Ft = Ft.reshape(self.nth, self.nph,self.nf)
Fp = Fp.reshape(self.nth, self.nph,self.nf)
# last axis should be frequency
assert(Ft.shape[-1]==self.nf)
assert(Fp.shape[-1]==self.nf)
return Ft, Fp
def __pvsh3(self,**kwargs):
""" calculate pattern from vsh3
"""
assert hasattr(self,'C'),'no spherical coefficient'
assert hasattr(self.C.Br,'s3'),'no shape 3 coeff in vsh'
if self.grid:
theta = np.kron(self.theta, np.ones(self.nph))
phi = np.kron(np.ones(self.nth),self.phi)
else:
theta = self.theta
phi = self.phi
Br = self.C.Br.s3
lBr = self.C.Br.ind3[:, 0]
mBr = self.C.Br.ind3[:, 1]
Bi = self.C.Bi.s3
Cr = self.C.Cr.s3
Ci = self.C.Ci.s3
L = lBr.max()
M = mBr.max()
# vector spherical harmonics basis functions
# V, W = VW(lBr, mBr, theta, phi)
V, W = VW(lBr, mBr, theta, phi)
Fth = np.dot(Br, np.real(V.T)) - \
np.dot(Bi, np.imag(V.T)) + \
np.dot(Ci, np.real(W.T)) + \
np.dot(Cr, np.imag(W.T))
Fph = -np.dot(Cr, np.real(V.T)) + \
np.dot(Ci, np.imag(V.T)) + \
np.dot(Bi, np.real(W.T)) + \
np.dot(Br, np.imag(W.T))
# here Nf x Nd
Ft = Fth.transpose()
Fp = Fph.transpose()
# then Nd x Nf
if self.grid:
# Nth x Nph x Nf
Ft = Ft.reshape(self.nth, self.nph,self.nf)
Fp = Fp.reshape(self.nth, self.nph,self.nf)
# last axis should be frequency
assert(Ft.shape[-1]==self.nf)
assert(Fp.shape[-1]==self.nf)
return Ft,Fp
def __psh3(self,**kwargs):
""" calculate pattern for sh3
Parameters
----------
"""
assert hasattr(self,'S'),'no spherical coefficient'
assert hasattr(self.S.Cx,'s3'),'no shape 3 coeff in ssh'
if self.grid:
theta = np.kron(self.theta, np.ones(self.nph))
phi = np.kron(np.ones(self.nth),self.phi)
else:
theta = self.theta
phi = self.phi
cx = self.S.Cx.s3
cy = self.S.Cy.s3
cz = self.S.Cz.s3
lmax = self.S.Cx.lmax
Y ,indx = SSHFunc2(lmax, theta,phi)
k = self.S.Cx.k2
if self.grid:
Ex = np.dot(cx,Y[k])
Ey = np.dot(cy,Y[k])
Ez = np.dot(cz,Y[k])
Fth,Fph = CartToSphere(theta, phi, Ex, Ey,Ez, bfreq = True, pattern = True )
Ft = Fth.transpose()
Fp = Fph.transpose()
Ft = Ft.reshape(self.nth, self.nph,self.nf)
Fp = Fp.reshape(self.nth, self.nph,self.nf)
else:
Ex = np.dot(cx,Y[k])
Ey = np.dot(cy,Y[k])
Ez = np.dot(cz,Y[k])
Fth,Fph = CartToSphere(theta, phi, Ex, Ey,Ez, bfreq = True, pattern = False)
Ft = Fth.transpose()
Fp = Fph.transpose()
assert(Ft.shape[-1]==self.nf)
assert(Fp.shape[-1]==self.nf)
return Ft,Fp
def __pwireplate(self,**kwargs):
""" pattern wire plate antenna
"""
defaults = {'param':{'t0' : 5*np.pi/6,
'GmaxdB': 5
}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
self.typ='wireplate'
self.param = kwargs['param']
t0 = self.param['t0']
GmaxdB = self.param['GmaxdB']
Gmax = pow(GmaxdB/10.,10)
sqGmax = np.sqrt(Gmax)
uth1 = np.where(self.theta < t0)[0]
uth2 = np.where(self.theta >= t0)[0]
p = t0
q = np.pi/2.
A = np.array(([[3*p**2,2*p,1],[p**3,p**2,p],[q**3,q**2,q]]))
Y = np.array(([0,1,1/(1.*sqGmax)]))
self.poly = la.solve(A,Y)
argth1 = np.abs(self.poly[0]*self.theta[uth1]**3
+ self.poly[1]*self.theta[uth1]**2
+ self.poly[2]*self.theta[uth1])
argth2 = -(1/(np.pi-t0)**2)*(self.theta[uth2]-t0)**2+1
argth = np.hstack((argth1,argth2))[::-1]
if self.grid:
Ft = sqGmax * (argth[:,None])
Fp = sqGmax * (argth[:,None])
else:
Fat = sqGmax * argth
Fap = sqGmax * argth
Ft = np.dot(Fat[:,None],np.ones(len(self.fGHz))[None,:])
Fp = np.dot(Fap[:,None],np.ones(len(self.fGHz))[None,:])
return Ft,Fp
def __pcst(self,**kwargs):
""" read antenna in text format
"""
defaults = {'param':{'p' : 2,
'directory':'ant/FF_Results_txt_port_1_2/',
'fGHz':np.arange(2,6.5,0.5)}}
if 'param' not in kwargs or kwargs['param']=={}:
param=defaults['param']
else:
param=kwargs['param']
self.fGHz = param['fGHz']
self.nf = len(self.fGHz)
for f in param['fGHz']:
if ((int(f*10))%10)==0:
_filename1 = 'E_port'+str(param['p'])+'_f'+str(int(f))+'GHz.txt'
_filename2 = 'E_port'+str(param['p'])+'_f'+str(int(f))+'Ghz.txt'
# print 'toto'
else:
_filename1 = 'E_port'+str(param['p'])+'_f'+str(f)+'GHz.txt'
_filename2 = 'E_port'+str(param['p'])+'_f'+str(f)+'Ghz.txt'
filename1 = pyu.getlong(_filename1, param['directory'])
filename2 = pyu.getlong(_filename2, param['directory'])
try:
df = pd.read_csv(filename1,sep=';')
except:
df = pd.read_csv(filename2,sep=';')
columns = df.columns
theta = (df[columns[0]]*np.pi/180).values.reshape(72,37)
phi = (df[columns[1]]*np.pi/180).values.reshape(72,37)
modGrlzdB = df[columns[2]]
mFt = df[columns[3]]
pFt = df[columns[4]]
mFp = df[columns[5]]
pFp = df[columns[6]]
ratiodB = df[columns[7]]
Ft = (10**(mFt/20)*np.exp(1j*pFt*np.pi/180)).values.reshape(72,37)
Fp = (10**(mFp/20)*np.exp(1j*pFp*np.pi/180)).values.reshape(72,37)
Ft = Ft.swapaxes(0,1)
Fp = Fp.swapaxes(0,1)
try:
tFt=np.concatenate((tFt,Ft[...,None]),axis=2)
tFp=np.concatenate((tFp,Fp[...,None]),axis=2)
except:
tFt=Ft[...,None]
tFp=Fp[...,None]
self.phi = phi[:,0]
self.theta = theta[0,:]
self.nth = len(self.theta)
self.nph = len(self.phi)
Ft = tFt
Fp = tFp
return Ft,Fp
def __pHertz(self,**kwargs):
""" Hertz dipole
"""
defaults = {'param':{'le':np.array([0,0,1])}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
#k = 2*np.pi*self.fGHz[None,None,None,:]/0.3
param=kwargs['param']
if self.grid:
le = param['le'][:,None,None]
xr = np.sin(self.theta)[None,:,None]*np.cos(self.phi)[None,None,:]
yr = np.sin(self.theta)[None,:,None]*np.sin(self.phi)[None,None,:]
zr = np.cos(self.theta)[None,:,None]*np.ones(len(self.phi))[None,None,:]
r = np.concatenate((xr,yr,zr),axis=0)
xp = -np.sin(self.phi)[None,None,:]*np.ones(len(self.theta))[None,:,None]
yp = np.cos(self.phi)[None,None,:]*np.ones(len(self.theta))[None,:,None]
zp = np.zeros(len(self.phi))[None,None,:]*np.ones(len(self.theta))[None,:,None]
ph = np.concatenate((xp,yp,zp),axis=0)
xt = np.cos(self.theta)[None,:,None]*np.cos(self.phi)[None,None,:]
yt = np.cos(self.theta)[None,:,None]*np.sin(self.phi)[None,None,:]
zt = -np.sin(self.theta)[None,:,None]*np.ones(len(self.phi))[None,None,:]
th = np.concatenate((xt,yt,zt),axis=0)
vec = le - np.einsum('kij,kij->ij',le,r)[None,...]*r
#G = 1j*30*k*vec
Ft = np.sqrt(3/2.)*np.einsum('kij,kij->ij',vec,th)[...,None]
Fp = np.sqrt(3/2.)*np.einsum('kij,kij->ij',vec,ph)[...,None]
else:
le = param['le'][:,None]
xr = np.sin(self.theta)*np.cos(self.phi)
yr = np.sin(self.theta)*np.sin(self.phi)
zr = np.cos(self.theta)
r = np.concatenate((xr,yr,zr),axis=0)
xp = -np.sin(self.phi)
yp = np.cos(self.phi)
zp = np.zeros(len(self.phi))
ph = np.concatenate((xp,yp,zp),axis=0)
xt = np.cos(self.theta)*np.cos(self.phi)
yt = np.cos(self.theta)*np.sin(self.phi)
zt = -np.sin(self.theta)
th = np.concatenate((xt,yt,zt),axis=0)
vec = le - np.einsum('ki,ki->i',le,r)[None,...]*r
#G = 1j*30*k*vec
Ft = np.sqrt(3/2.)*np.einsum('ki,ki->i',vec,th)[...,None]
Fp = np.sqrt(3/2.)*np.einsum('ki,ki->i',vec,ph)[...,None]
return Ft,Fp
def __pHuygens(self,**kwargs):
""" Huygens source
param : dict
le : direction of electric current
n : normal to aperture
"""
defaults = {'param':{'le':np.array([0,0,1]),
'n':np.array([1,0,0])}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
#k = 2*np.pi*self.fGHz[None,None,None,:]/0.3
param=kwargs['param']
if self.grid:
le = param['le'][:,None,None]
n = param['n'][:,None,None]
xr = np.sin(self.theta)[None,:,None]*np.cos(self.phi)[None,None,:]
yr = np.sin(self.theta)[None,:,None]*np.sin(self.phi)[None,None,:]
zr = np.cos(self.theta)[None,:,None]*np.ones(len(self.phi))[None,None,:]
r = np.concatenate((xr,yr,zr),axis=0)
xp = -np.sin(self.phi)[None,None,:]*np.ones(len(self.theta))[None,:,None]
yp = np.cos(self.phi)[None,None,:]*np.ones(len(self.theta))[None,:,None]
zp = np.zeros(len(self.phi))[None,None,:]*np.ones(len(self.theta))[None,:,None]
ph = np.concatenate((xp,yp,zp),axis=0)
xt = np.cos(self.theta)[None,:,None]*np.cos(self.phi)[None,None,:]
yt = np.cos(self.theta)[None,:,None]*np.sin(self.phi)[None,None,:]
zt = -np.sin(self.theta)[None,:,None]*np.ones(len(self.phi))[None,None,:]
th = np.concatenate((xt,yt,zt),axis=0)
vec1 = le - np.einsum('kij,kij->ij',le,r)[None,...]*r
cro1 = np.cross(le,n,axisa=0,axisb=0,axisc=0)
vec2 = np.cross(cro1,r,axisa=0,axisb=0,axisc=0)
vec = vec1-vec2
#G = 1j*30*k*vec
Ft = np.sqrt(3/4.)*np.einsum('kij,kij->ij',vec,th)[...,None]
Fp = np.sqrt(3/4.)*np.einsum('kij,kij->ij',vec,ph)[...,None]
#Ft = np.einsum('kij,kij->ij',vec,th)[...,None]
#Fp = np.einsum('kij,kij->ij',vec,ph)[...,None]
else:
le = param['le'][:,None]
xr = np.sin(self.theta)*np.cos(self.phi)
yr = np.sin(self.theta)*np.sin(self.phi)
zr = np.cos(self.theta)
r = np.concatenate((xr,yr,zr),axis=0)
xp = -np.sin(self.phi)
yp = np.cos(self.phi)
zp = np.zeros(len(self.phi))
ph = np.concatenate((xp,yp,zp),axis=0)
xt = np.cos(self.theta)*np.cos(self.phi)
yt = np.cos(self.theta)*np.sin(self.phi)
zt = -np.sin(self.theta)
th = np.concatenate((xt,yt,zt),axis=0)
vec1 = le - np.einsum('ki,ki->i',le,r)[None,...]*r
cro1 = np.cross(le,n,axisa=0,axisb=0,axisc=0)
vec2 = np.cross(cro1,r,axisa=0,axisb=0,axisc=0)
vec = vec1-vec2
#G = 1j*30*k*vec
Ft = np.sqrt(3)*np.einsum('ki,ki->i',vec,th)[...,None]
Fp = np.sqrt(3)*np.einsum('ki,ki->i',vec,ph)[...,None]
return Ft,Fp
def __pArray(self,**kwargs):
""" Array factor
Parameters
----------
Sc : np.array
coupling S matrix
Notes
-----
Nd : Number of directions
Np : Number of points (antenna elements)
Nf : Number of frequency
Nb : Number of beams
"""
defaults = {'param':{'Sc':[]}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
self.param = kwargs['param']
lamda = (0.3/self.fGHz)
k = 2*np.pi/lamda
if self.grid:
sx = np.sin(self.theta[:,None])*np.cos(self.phi[None,:]) # Ntheta x Nphi
sy = np.sin(self.theta[:,None])*np.sin(self.phi[None,:]) # Ntheta x Nphi
sz = np.cos(self.theta[:,None])*np.ones(len(self.phi))[None,:] # Ntheta x Nphi
sx = sx.reshape(self.nth*self.nph)
sy = sy.reshape(self.nth*self.nph)
sz = sz.reshape(self.nth*self.nph)
else:
sx = np.sin(self.theta)*np.cos(self.phi) # ,Nd
sy = np.sin(self.theta)*np.sin(self.phi) # ,Nd
sz = np.cos(self.theta) # ,Nd
self.s = np.vstack((sx,sy,sz)).T # Nd x 3
#
# F = exp(+jk s.p)
#
lshp = np.array(self.p.shape)
if len(lshp)>2:
Np = np.prod(lshp[1:])
p = self.p.reshape(3,Np)
else:
p = self.p
Np = p.shape[1]
self.Sc = self.param['Sc']
if self.Sc==[]:
# Sc : Np x Np x Nf
self.Sc = np.eye(Np)[...,None]
#Sc2 = np.random.rand(Np,Np)[...,None]
#pdb.set_trace()
#
# Get the weights
#
# w : b x a x f
lshw = np.array(self.w.shape)
if len(lshw)>2:
Np2 = np.prod(lshw[0:-1])
assert(Np2==Np)
w = self.w.reshape(Np,lshw[-1])
else:
w = self.w
# s : Nd x 3
# p : 3 x Np
#
# sdotp : Nd x Np
sdotp = np.dot(self.s,p) # s . p
for a in self.la:
if not self.grid:
a.eval(grid=self.grid,ph=self.phi,th=self.theta)
else:
a.eval(grid=self.grid)
# aFt : Nt x Np x Nf |Nd x Nf
# aFp : Nt x Np x Nf |Nd x Nf
aFt = a.Ft
aFp = a.Fp
#
# Force conversion to Nd x Nf
#
shF = aFt.shape
aFt = aFt.reshape(np.prod(shF[0:-1]),shF[-1])
aFp = aFp.reshape(np.prod(shF[0:-1]),shF[-1])
#
# Same pattern on each point
#
aFt = aFt[:,None,:]
aFp = aFp[:,None,:]
#
# Nf : frequency
# Nd : direction
# Np : points or array antenna element position
# Nb : number of beams
#
# w : Np x Nf
# Sc : Np x Np x Nf
#
#
# w' = w.Sc Np x Nf
#
# Coupling is implemented here
# Rules : The repeated index k is the common dimension of the product
# w : Np(k) x Nf(i)
# Sc : Np(k) x Np(m) x Nf(i)
# wp : Np(m) x Nf(i)
wp = np.einsum('ki,kmi->mi',w,self.Sc)
# add direction axis (=0) in w
#if len(.w.shape)==3:
# self.wp = self.wp[None,:,:,:]
# aFT : Nd x Np x Nf
# E : Nd x Np x Nf
E = np.exp(1j*k[None,None,:]*sdotp[:,:,None])
#
# wp : Np x Nf
# Fp : Nd x Np x Nf
# Ft : Nd x Np x Nf
#
Ft = wp[None,...]*aFt*E
Fp = wp[None,...]*aFp*E
if self.grid:
#
# Integrate over the Np points (axis =1)
# only if self.grid
# Fp : Nd x Nf
# Ft : Nd x Nf
#
Ft = np.sum(Ft,axis=1)
Fp = np.sum(Fp,axis=1)
sh = Ft.shape
Ft = Ft.reshape(self.nth,self.nph,sh[1])
Fp = Fp.reshape(self.nth,self.nph,sh[1])
return Ft,Fp
def radF(self):
""" evaluate radiation fonction w.r.t polarization
self.pol : 't' : theta , 'p' : phi n, 'c' : circular
"""
assert self.pol in ['t','p','c']
if self.pol=='p':
Fp = self.sqG
if len(self.sqG.shape)==3:
Ft = np.array([0])*np.ones(len(self.fGHz))[None,None,:]
else:
Ft = np.array([0])*np.ones(len(self.fGHz))[None,:]
if self.pol=='t':
if len(self.sqG.shape)==3:
Fp = np.array([0])*np.ones(len(self.fGHz))[None,None,:]
else:
Fp = np.array([0])*np.ones(len(self.fGHz))[None,:]
Ft = self.sqG
if self.pol=='c':
Fp = (1./np.sqrt(2))*self.sqG
Ft = (1j/np.sqrt(2))*self.sqG
return Ft,Fp
def gain(self):
""" calculates antenna gain
Returns
-------
self.G : np.array(Nt,Np,Nf) dtype:float
linear gain
or np.array(Nr,Nf)
self.sqG : np.array(Nt,Np,Nf) dtype:float
linear sqare root of gain
or np.array(Nr,Nf)
self.efficiency : np.array (,Nf) dtype:float
efficiency
self.hpster : np.array (,Nf) dtype:float
half power solid angle : 1 ~ 4pi steradian
self.ehpbw : np.array (,Nf) dtyp:float
equivalent half power beamwidth (radians)
Notes
-----
.. math:: G(\theta,phi) = |F_{\\theta}|^2 + |F_{\\phi}|^2
(
"""
self.G = np.real( self.Fp * np.conj(self.Fp)
+ self.Ft * np.conj(self.Ft) )
if self.grid:
dt = self.theta[1]-self.theta[0]
dp = self.phi[1]-self.phi[0]
Nt = len(self.theta)
Np = len(self.phi)
Gs = self.G*np.sin(self.theta)[:,None,None]*np.ones(Np)[None,:,None]
self.efficiency = np.sum(np.sum(Gs,axis=0),axis=0)*dt*dp/(4*np.pi)
self.sqG = np.sqrt(self.G)
self.GdB = 10*np.log10(self.G)
# GdBmax (,Nf)
# Get direction of Gmax and get the polarisation state in that direction
#
self.GdBmax = np.max(np.max(self.GdB,axis=0),axis=0)
self.umax = np.array(np.where(self.GdB==self.GdBmax))[:,0]
self.theta_max = self.theta[self.umax[0]]
self.phi_max = self.phi[self.umax[1]]
M = geu.SphericalBasis(np.array([[self.theta_max,self.phi_max]]))
self.sl = M[:,2].squeeze()
uth = M[:,0]
uph = M[:,1]
el = self.Ft[tuple(self.umax)]*uth + self.Fp[tuple(self.umax)]*uph
eln = el/np.linalg.norm(el)
self.el = np.abs(eln.squeeze())
self.hl = np.cross(self.sl,self.el)
#assert((self.efficiency<1.0).all()),pdb.set_trace()
self.hpster=np.zeros(len(self.fGHz))
self.ehpbw=np.zeros(len(self.fGHz))
for k in range(len(self.fGHz)):
U = np.zeros((Nt,Np))
A = self.GdB[:,:,k]*np.ones(Nt)[:,None]*np.ones(Np)[None,:]
u = np.where(A>(self.GdBmax[k]-3))
U[u] = 1
V = U*np.sin(self.theta)[:,None]
self.hpster[k] = np.sum(V)*dt*dp/(4*np.pi)
self.ehpbw[k] = np.arccos(1-2*self.hpster[k])
else:
self.sqG = np.sqrt(self.G)
self.GdB = 10*np.log10(self.G)
def plotG(self,**kwargs):
""" antenna plot gain in 2D
Parameters
----------
fGHz : frequency
plan : 'theta' | 'phi' depending on the selected plan to be displayed
angdeg : phi or theta in degrees, if plan=='phi' it corresponds to theta
GmaxdB : max gain to be displayed
polar : boolean
Returns
-------
fig
ax
Examples
--------
.. plot::
:include-source:
>>> import matplotlib.pyplot as plt
>>> from pylayers.antprop.antenna import *
>>> A = Antenna('defant.vsh3')
>>> fig,ax = A.plotG(fGHz=[2,3,4],plan='theta',angdeg=0)
>>> fig,ax = A.plotG(fGHz=[2,3,4],plan='phi',angdeg=90)
"""
if not self.evaluated:
self.eval(pattern=True)
dtr = np.pi/180.
defaults = {'fGHz' : [],
'dyn' : 8 ,
'plan': 'phi',
'angdeg' : 90,
'legend':True,
'GmaxdB':20,
'polar':True,
'topos':False,
'source':'satimo',
'show':True,
'mode':'index',
'color':'black',
'u':0,
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
args = {}
for k in kwargs:
if k not in defaults:
args[k] = kwargs[k]
if 'fig' not in kwargs:
fig = plt.figure(figsize=(8, 8))
else:
fig = kwargs['fig']
if 'ax' not in kwargs:
#ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True, facecolor='#d5de9c')
if kwargs['polar']:
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True )
else:
ax = fig.add_subplot(111)
else:
ax = kwargs['ax']
u = kwargs['u']
rc('grid', color='#316931', linewidth=1, linestyle='-')
rc('xtick', labelsize=15)
rc('ytick', labelsize=15)
DyndB = kwargs['dyn'] * 5
GmindB = kwargs['GmaxdB'] - DyndB
#print "DyndB",DyndB
#print "GmindB",GmindB
# force square figure and square axes looks better for polar, IMO
t1 = np.arange(5, DyndB + 5, 5)
t2 = np.arange(GmindB + 5, kwargs['GmaxdB'] + 5, 5)
col = ['k', 'r', 'g', 'b', 'm', 'c', 'y']
cpt = 0
#if len(self.fGHz) > 1 :
# fstep = self.fGHz[1]-self.fGHz[0]
#else :
# fstep = np.array((abs(self.fGHz-kwargs['fGHz'][0])+1))
#dtheta = self.theta[1,0]-self.theta[0,0]
#dphi = self.phi[0,1]-self.phi[0,0]
dtheta = self.theta[1]-self.theta[0]
dphi = self.phi[1]-self.phi[0]
if kwargs['fGHz']==[]:
lfreq = [self.fGHz[0]]
else:
lfreq = kwargs['fGHz']
for f in lfreq:
df = abs(self.fGHz-f)
ik0 = np.where(df==min(df))
ik = ik0[0][0]
#ik=0
chaine = 'f = %3.2f GHz' %(self.fGHz[ik])
# all theta
if kwargs['plan']=='theta':
itheta = np.arange(self.nth)
iphi1 = np.where(abs(self.phi-kwargs['angdeg']*dtr)<dphi)[0][0]
Np = self.nph
# 0 <= theta <= pi/2
u1 = np.where((self.theta <= np.pi / 2.) & (self.theta >= 0))[0]
# 0 < theta < pi
u2 = np.arange(self.nth)
# pi/2 < theta <= pi
u3 = np.nonzero((self.theta <= np.pi) & ( self.theta > np.pi / 2))[0]
#
# handle broadcasted axis =1 --> index 0
shsqG = self.sqG.shape
if shsqG[0]==1:
u1 = 0
u2 = 0
u3 = 0
if shsqG[1]==1:
iphi1 = 0
iphi2 = 0
if len(shsqG)==3: # if only one frequency point
if shsqG[2]==1:
ik = 0
else:
if shsqG[3]==1:
ik = 0
# handle parity
if np.mod(Np, 2) == 0:
iphi2 = np.mod(iphi1 + Np / 2, Np)
else:
iphi2 = np.mod(iphi1 + (Np - 1) / 2, Np)
if len(shsqG)==3:
arg1 = (u1,iphi1,ik)
arg2 = (u2,iphi2,ik)
arg3 = (u3,iphi1,ik)
else:
if shsqG[3]==1:
u = 0
arg1 = (u1,iphi1,u,ik)
arg2 = (u2,iphi2,u,ik)
arg3 = (u3,iphi1,u,ik)
# polar diagram
#pdb.set_trace()
if kwargs['polar']:
if kwargs['source']=='satimo':
r1 = -GmindB + 20 * np.log10( self.sqG[arg1]+1e-12)
r2 = -GmindB + 20 * np.log10( self.sqG[arg2]+1e-12)
r3 = -GmindB + 20 * np.log10( self.sqG[arg3]+1e-12)
#print max(r1)+GmindB
#print max(r2)+GmindB
#print max(r3)+GmindB
if kwargs['source']=='cst':
r1 = -GmindB + 20 * np.log10( self.sqG[arg1]/np.sqrt(30)+1e-12)
r2 = -GmindB + 20 * np.log10( self.sqG[arg2]/np.sqrt(30)+1e-12)
r3 = -GmindB + 20 * np.log10( self.sqG[arg3]/np.sqrt(30)+1e-12)
if type(r1)!= np.ndarray:
r1 = np.array([r1])*np.ones(len(self.phi))
if type(r2)!= np.ndarray:
r2 = np.array([r2])*np.ones(len(self.phi))
if type(r3)!= np.ndarray:
r3 = np.array([r3])*np.ones(len(self.phi))
negr1 = np.nonzero(r1 < 0)
negr2 = np.nonzero(r2 < 0)
negr3 = np.nonzero(r3 < 0)
r1[negr1[0]] = 0
r2[negr2[0]] = 0
r3[negr3[0]] = 0
r = np.hstack((r1[::-1], r2, r3[::-1], r1[-1]))
a1 = np.arange(0, 360, 30)
a2 = [90, 60, 30, 0, 330, 300, 270, 240, 210, 180, 150, 120]
rline2, rtext2 = plt.thetagrids(a1, a2)
# linear diagram
else:
r1 = 20 * np.log10( self.sqG[arg1]+1e-12)
r2 = 20 * np.log10( self.sqG[arg2]+1e-12)
r3 = 20 * np.log10( self.sqG[arg3]+1e-12)
r = np.hstack((r1[::-1], r2, r3[::-1], r1[-1]))
# angular basis for phi
angle = np.linspace(0, 2 * np.pi, len(r), endpoint=True)
plt.title(u'$\\theta$ plane')
if kwargs['plan']=='phi':
iphi = np.arange(self.nph)
itheta = np.where(abs(self.theta-kwargs['angdeg']*dtr)<dtheta)[0][0]
angle = self.phi[iphi]
if len(self.sqG.shape)==3:
arg = [itheta,iphi,ik]
else:
arg = [itheta,iphi,u,ik]
if kwargs['polar']:
if np.prod(self.sqG.shape)!=1:
r = -GmindB + 20 * np.log10(self.sqG[arg])
neg = np.nonzero(r < 0)
r[neg] = 0
else:
r = -GmindB+ 20*np.log10(self.sqG[0,0,0]*np.ones(np.shape(angle)))
# plt.title(u'H plane - $\phi$ degrees')
a1 = np.arange(0, 360, 30)
a2 = [0, 30, 60, 90, 120 , 150 , 180 , 210, 240 , 300 , 330]
#rline2, rtext2 = plt.thetagrids(a1, a2)
else:
r = 20 * np.log10(self.sqG[arg])
plt.title(u'$\\phi$ plane ')
# actual plotting
if len(lfreq)>1:
ax.plot(angle, r, color=col[cpt], lw=2, label=chaine)
else:
ax.plot(angle, r, color=kwargs['color'], lw=2, label=chaine)
cpt = cpt + 1
if kwargs['polar']:
rline1, rtext1 = plt.rgrids(t1, t2)
#ax.set_rmax(t1[-1])
#ax.set_rmin(t1[0])
if kwargs['legend']:
ax.legend()
if kwargs['show']:
plt.ion()
plt.show()
return(fig,ax)
class Antenna(Pattern):
""" Antenna
Attributes
----------
name : Antenna name
nf : number of frequency
nth : number of theta
nph : number of phi
Ft : Normalized Ftheta (ntheta,nphi,nf)
Fp : Normalized Fphi (ntheta,nphi,nf)
sqG : square root of gain (ntheta,nphi,nf)
theta : theta base 1 x ntheta
phi : phi base 1 x phi
C : VSH Coefficients
Methods
-------
info : Display information about antenna
vsh : calculates Vector Spherical Harmonics
show3 : Geomview diagram
plot3d : 3D diagram plotting using matplotlib toolkit
Antenna trx file can be stored in various order
natural : HFSS
ncp : near filed chamber
It is important when initializing an antenna object
to be aware of the typ of trx file
.trx (ASCII Vectorial antenna Pattern)
F Phi Theta Fphi Ftheta
"""
def __init__(self,typ='Omni',**kwargs):
""" class constructor
Parameters
----------
typ : 'Omni','Gauss','WirePlate','3GPP','atoll'
_filename : string
antenna file name
directory : str
antenna subdirectory of the current project
the file is seek in the $BASENAME/ant directory
nf : integer
number of frequency
ntheta : integer
number of theta (default 181)
nphi : integer
number of phi (default 90)
source : string
source of data { 'satimo' | 'cst' | 'hfss' }
Notes
-----
The supported data formats for storing antenna patterns are
'mat': Matlab File
'vsh2': unthresholded vector spherical coefficients
'vsh3': thresholded vector spherical cpoefficients
'atoll': Atoll antenna file format
'trx' : Satimo NFC raw data
'trx1' : Satimo NFC raw data (deprecated)
A = Antenna('my_antenna.mat')
"""
defaults = {'directory': 'ant',
'source':'satimo',
'ntheta':90,
'nphi':181,
'L':90, # L max
'param':{}
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if 'fGHz' in kwargs:
if type(kwargs['fGHz'])==np.ndarray:
self.fGHz=kwargs['fGHz']
else:
self.fGHz=np.array([kwargs['fGHz']])
#mayavi selection
self._is_selected=False
self.source = kwargs['source']
self.param = kwargs['param']
#super(Antenna,self).__init__()
#Pattern.__init__(self)
#
# if typ string has an extension it is a file
#
if isinstance(typ,str):
AntennaName,Extension = os.path.splitext(typ)
self.ext = Extension[1:]
if self.ext=='':
self.fromfile = False
else:
self.fromfile = True
else:
self.fromfile = True
self.tau = 0
self.evaluated = False
#determine if pattern for all theta/phi is constructed
self.full_evaluated = False
if self.fromfile:
if isinstance(typ,str):
self._filename = typ
if self.ext == 'vsh3':
self.typ='vsh3'
self.loadvsh3()
if self.ext == 'vsh2':
self.typ='vsh2'
self.loadvsh2()
if self.ext == 'sh3':
self.typ='sh3'
self.loadsh3()
if self.ext == 'sh2':
self.typ='sh2'
self.loadsh2()
if self.ext == 'trx1':
self.typ='trx'
self.load_trx(kwargs['directory'],self.nf,self.nth,self.nph)
if self.ext == 'trx':
self.typ='trx'
self.loadtrx(kwargs['directory'])
if self.ext == 'mat':
self.typ='mat'
self.loadmat(kwargs['directory'])
if self.ext == 'cst':
self.typ='cst'
if self.ext == 'txt':
self.typ='atoll'
self.load_atoll(kwargs['directory'])
elif isinstance(typ,list):
self._filename = typ
self.ext='hfss'
self.loadhfss(typ, self.nth, self.nph)
else:
self.typ=typ
self._filename=typ
if self.typ=='vsh3':
self.initvsh()
else:
self.eval()
def __repr__(self):
st = ''
st = st + 'Antenna type : ' + self.typ +'\n'
st = st+'------------------------\n'
if 'param' in self.__dict__:
for k in self.param:
st = st + ' ' + k + ' : ' + str(self.param[k])+'\n'
if hasattr(self,'atoll'):
for k1 in self.atoll.keys():
st = st + str(k1)+'\n'
for k2 in self.atoll[k1]:
st = st + ' '+ str(k2)+'\n'
st = st+'------------------------\n'
rtd = 180./np.pi
if self.fromfile:
if isinstance(self._filename,str):
st = st + 'file name : ' + self._filename+'\n'
else:
for i in range(len(self._filename)):
st = st + 'FileName : ' + self._filename[i]+'\n'
# #st = st + 'file type : ' + self.typ+'\n'
if 'fGHz' in self.__dict__:
st = st + "fmin : %4.2f" % (self.fGHz[0]) + "GHz\n"
st = st + "fmax : %4.2f" % (self.fGHz[-1]) + "GHz\n"
try:
st = st + "step : %4.2f" % (1000*(self.fGHz[1]-self.fGHz[0])) + "MHz\n"
except:
st = st + "step : None\n"
st = st + "Nf : %d" % (len(self.fGHz)) +"\n"
#
#
if hasattr(self,'C'):
st = st + self.C.__repr__()
if hasattr(self,'S'):
st = st + self.S.__repr__()
if self.evaluated:
st = st + '-----------------------\n'
st = st + ' evaluated \n'
st = st + '-----------------------\n'
st = st + "Ntheta : %d" % (self.nth) + "\n"
st = st + "Nphi : %d" % (self.nph) + "\n"
# kwargs[k] = defaults[k]
u = np.where(self.sqG==self.sqG.max())
if self.grid:
if len(u[0])>1:
S = self.sqG[(u[0][0],u[1][0],u[2][0])]
ut = u[0][0]
up = u[1][0]
uf = u[2][0]
else:
S = self.sqG[u]
ut = u[0]
up = u[1]
uf = u[2]
else:
if len(u[0])>1:
S = self.sqG[(u[0][0],u[1][0])]
ud = u[0][0]
uf = u[1][0]
else:
S = self.sqG[u]
ud = u[0]
uf = u[1]
st = st + "GdBmax :"+str(self.GdBmax[0])+' '+str(self.GdBmax[-1])+'\n'
st = st + "Gmax direction : .sl" + str(self.sl)+'\n'
st = st + "Orientation of E field in Gmax direction : .el " + str(self.el)+'\n'
st = st + "Orientation of H field in Gmax direction : .hl " + str(self.hl)+'\n'
st = st + "effective HPBW : .ehpbw " + str(self.ehpbw[0])+' '+str(self.ehpbw[-1])+'\n'
if self.source=='satimo':
GdB = 20*np.log10(S)
# see WHERE1 D4.1 sec 3.1.1.2.2
if self.source=='cst':
GdB = 20*np.log10(S/np.sqrt(30))
#st = st + "GmaxdB : %4.2f dB \n" % (GdB)
st = st + " f = %4.2f GHz \n" % (self.fGHz[uf])
if self.grid:
st = st + " theta = %4.2f (degrees) \n" % (self.theta[ut]*rtd)
st = st + " phi = %4.2f (degrees) \n" % (self.phi[up]*rtd)
else:
st = st + " Ray n :" + str(ud)+' \n'
else:
st = st + 'Not evaluated\n'
#
#
# if self.typ == 'mat':
# #st = st + self.DataFile + '\n'
# st = st + 'antenna name : '+ self.AntennaName + '\n'
# st = st + 'date : ' + self.Date +'\n'
# st = st + 'time : ' + self.StartTime +'\n'
# st = st + 'Notes : ' + self.Notes+'\n'
# st = st + 'Serie : ' + str(self.Serie)+'\n'
# st = st + 'Run : ' + str(self.Run)+'\n'
# st = st + "Nb theta (lat) : "+ str(self.nth)+'\n'
# st = st + "Nb phi (lon) :"+ str(self.nph)+'\n'
#
# if self.typ == 'Gauss':
# st = st + 'Gaussian pattern' + '\n'
# st = st + 'phi0 : ' + str(self.p0) +'\n'
# st = st + 'theta0 :' + str(self.t0) + '\n'
# st = st + 'phi 3dB :' + str(self.p3) + '\n'
# st = st + 'theta 3dB :' + str(self.t3) + '\n'
# st = st + 'Gain dB :' + str(self.GdB) + '\n'
# st = st + 'Gain linear :' + str(self.G ) + '\n'
# st = st + 'sqrt G :' + str(self.sqG) + '\n'
return(st)
def initvsh(self,lmax=45):
""" Initialize a void vsh structure
Parameters
----------
fGHz : array
lmax : int
level max
"""
nf = len(self.fGHz)
Br = 1j * np.zeros((nf, lmax, lmax-1))
Bi = 1j * np.zeros((nf, lmax, lmax-1))
Cr = 1j * np.zeros((nf, lmax, lmax-1))
Ci = 1j * np.zeros((nf, lmax, lmax-1))
Br = VCoeff(typ='s1', fmin=self.fGHz[0], fmax=self.fGHz[-1], data=Br)
Bi = VCoeff(typ='s1', fmin=self.fGHz[0], fmax=self.fGHz[-1], data=Bi)
Cr = VCoeff(typ='s1', fmin=self.fGHz[0], fmax=self.fGHz[-1], data=Cr)
Ci = VCoeff(typ='s1', fmin=self.fGHz[0], fmax=self.fGHz[-1], data=Ci)
self.C = VSHCoeff(Br, Bi, Cr, Ci)
def ls(self, typ='vsh3'):
""" list the antenna files in antenna project directory
Parameters
----------
typ : string optional
{'mat'|'trx'|'vsh2'|'sh2'|'vsh3'|'sh3'}
Returns
-------
lfile_s : list
sorted list of all the .str file of strdir
"""
if typ=='vsh3':
pathname = pstruc['DIRANT'] + '/*.' + typ
if typ=='sh3':
pathname = pstruc['DIRANT'] + '/*.' + typ
if typ=='mat':
pathname = pstruc['DIRANT'] + '/*.' + typ
if typ=='trx':
pathname = pstruc['DIRANT'] + '/*.' + typ
lfile_l = glob.glob(basename+'/'+pathname)
lfile_s = []
for fi in lfile_l:
fis = pyu.getshort(fi)
lfile_s.append(fis)
lfile_s.sort()
return lfile_s
def photo(self,directory=''):
""" show a picture of the antenna
Parameters
----------
directory : string
"""
if directory == '':
directory = os.path.join('ant','UWBAN','PhotosVideos')
_filename = 'IMG_'+self.PhotoFile.split('-')[1]+'.JPG'
filename = pyu.getlong(_filename,directory)
if sys.version_info.major==2:
I = Image.open(filename)
else:
I = image.open(filename)
I.show()
def load_atoll(self,directory="ant"):
""" load antenna from Atoll file
Atoll format provides Antenna gain given for the horizontal and vertical plane
for different frequencies and different tilt values
Parameters
----------
directory : string
The dictionnary attol is created
"""
_filemat = self._filename
fileatoll = pyu.getlong(_filemat, directory)
fd = open(fileatoll)
lis = fd.readlines()
tab = []
for li in lis:
lispl= li.split('\t')
if (lispl[0]!=''):
tab.append(lispl)
deg_to_rad = np.pi/180.
lbs_to_kg = 0.45359237
columns = tab[0]
#pdb.set_trace()
for k in np.arange(len(tab)-1):
df = pd.DataFrame([tab[k+1]],columns=columns)
try:
dff=dff.append(df)
except:
dff= df
self.raw = dff
dff = dff.iloc[:,[0,8,9,10,2,5,7,14,11,16,17,13,6,12]]
#dff = df['Name','Gain (dBi)','FMin','FMax','FREQUENCY','Pattern','V_WIDTH','H_WIDTH','DIMENSIONS HxWxD (INCHES)','WEIGHT (LBS)']
dff.columns = ['Name','Fmin','Fmax','F','Gmax','G','Hpbw','H_width','V_width','HxWxD','Weight','Tilt','Etilt','Ftob']
dff=dff.apply(lambda x :pd.to_numeric(x,errors='ignore'))
#
# Parse polarization in the field name
#
upolarp45 = ['(+45)' in x for x in dff['Name']]
upolarm45 = ['(-45)' in x for x in dff['Name']]
if (sum(upolarp45)>0):
dff.loc[upolarp45,'Polar']=45
if (sum(upolarm45)>0):
dff.loc[upolarm45,'Polar']=-45
atoll = {}
dfband = dff.groupby(['Fmin'])
for b in dfband:
keyband = str(b[0])+'-'+str(b[1]['Fmax'].values[0])
atoll[keyband]={} # band
dfpol = b[1].groupby(['Polar'])
for p in dfpol:
atoll[keyband][p[0]] = {} # polar
dftilt = p[1].groupby(['Tilt'])
Ghor = np.empty((360,1)) # angle , tilt , frequency
Gver = np.empty((360,1)) # angle ,
ct = 0
tilt = []
for t in dftilt:
dffreq = t[1].groupby(['F'])
ct+=1
cf=0
tilt.append(t[0])
freq = []
for f in dffreq:
freq.append(f[0])
cf+=1
if len(f[1])==1:
df = f[1]
else:
df = f[1].iloc[0:1]
Gmax = df['Gmax'].values
str1 = df.loc[:,'G'].values[0].replace(' ',' ')
lstr = str1.split(' ')
Pattern = [ eval(x) for x in lstr[0:-1]]
# 4 fist field / # of points
Nd,db,dc,Np = Pattern[0:4]
#print(Nd,b,c,Np)
tmp = np.array(Pattern[4:4+2*Np]).reshape(Np,2)
ah = tmp[:,0]
ghor = Gmax-tmp[:,1]
# 4 fist field / # of points
da,db,dc,dd = Pattern[4+2*Np:4+2*Np+4]
#pdb.set_trace()
#print a,b,c,d
tmp = np.array(Pattern[4+2*Np+4:]).reshape(dc,2)
gver = Gmax-tmp[:,0]
av = tmp[:,1]
try:
Ghor = np.hstack((Ghor,ghor[:,None]))
Gver = np.hstack((Gver,gver[:,None]))
except:
pdb.set_trace()
Ghor = np.delete(Ghor,0,1)
Gver = np.delete(Gver,0,1)
atoll[keyband][p[0]]['hor'] = Ghor.reshape(360,ct,cf)
atoll[keyband][p[0]]['ver'] = Gver.reshape(360,ct,cf)
atoll[keyband][p[0]]['tilt'] = np.array(tilt)
atoll[keyband][p[0]]['freq'] = np.array(freq)
self.atoll = atoll
# Gmax = eval(self.df['Gain (dBi)'].values[0])
#fig = plt.figure()
#ax =plt.gca(projection='polar')
#ax =plt.gca()
#ax.plot(H2[:,1]*deg_to_rad,Gain-H2[:,0],'r',label='vertical',linewidth=2)
#ax.plot(H1[:,0]*deg_to_rad,Gain-H1[:,1],'b',label='horizontal',linewidth=2)
#ax.set_rmin(-30)
#plt.title(dir1+'/'+filename+' Gain : '+df['Gain (dBi)'].values[0])
#BXD-634X638XCF-EDIN.txt
#BXD-636X638XCF-EDIN.txt
def loadmat(self, directory="ant"):
""" load an antenna stored in a mat file
Parameters
----------
directory : str , optional
default 'ant'
Examples
--------
Read an Antenna file in UWBAN directory and plot a polar plot
.. plot::
:include-source:
>>> import matplotlib.pyplot as plt
>>> from pylayers.antprop.antenna import *
>>> A = Antenna('S1R1.mat',directory='ant/UWBAN/Matfile')
>>> f,a = A.plotG(plan='theta',angdeg=0)
>>> f,a = A.plotG(plan='phi',angdeg=90,fig=f,ax=a)
>>> txt = plt.title('S1R1 antenna : st loadmat')
>>> plt.show()
"""
_filemat = self._filename
filemat = pyu.getlong(_filemat, directory)
d = io.loadmat(filemat, squeeze_me=True, struct_as_record=False)
ext = _filemat.replace('.mat', '')
d = d[ext]
#
#
#
self.typ = 'mat'
self.Date = str(d.Date)
self.Notes = str(d.Notes)
self.PhotoFile = str(d.PhotoFile)
self.Serie = eval(str(d.Serie))
self.Run = eval(str(d.Run))
self.DataFile = str(d.DataFile)
self.StartTime = str(d.StartTime)
self.AntennaName = str(d.AntennaName)
self.fGHz = d.freq/1.e9
self.theta = d.theta
self.phi = d.phi
self.Ft = d.Ftheta
self.Fp = d.Fphi
self.Fp = self.Fp.swapaxes(0, 2)
self.Fp = self.Fp.swapaxes(0, 1)
self.Ft = self.Ft.swapaxes(0, 2)
self.Ft = self.Ft.swapaxes(0, 1)
Gr = np.real(self.Fp * np.conj(self.Fp) + \
self.Ft * np.conj(self.Ft))
self.sqG = np.sqrt(Gr)
self.nth = len(self.theta)
self.nph = len(self.phi)
if type(self.fGHz) == float:
self.nf = 1
else:
self.nf = len(self.fGHz)
self.evaluated = True
self.grid = True
def load_trx(self, directory="ant", nf=104, ntheta=181, nphi=90, ncol=6):
""" load a trx file (deprecated)
Parameters
----------
directory : str
directory where is located the trx file (default : ant)
nf : float
number of frequency points
ntheta : float
number of theta
nphi : float
number of phi
TODO : DEPRECATED (Fix the Ft and Fp format with Nf as last axis)
"""
_filetrx = self._filename
filename = pyu.getlong(_filetrx, directory)
if ncol == 6:
pattern = """^.*\t.*\t.*\t.*\t.*\t.*\t.*$"""
else:
pattern = """^.*\t.*\t.*\t.*\t.*\t.*\t.*\t.*$"""
fd = open(filename, 'r')
d = fd.read().split('\r\n')
fd.close()
k = 0
#while ((re.search(pattern1,d[k]) is None ) & (re.search(pattern2,d[k]) is None )):
while re.search(pattern, d[k]) is None:
k = k + 1
d = d[k:]
N = len(d)
del d[N - 1]
r = '\t'.join(d)
r.replace(' ', '')
d = np.array(r.split()).astype('float')
#
# TODO Parsing the header
#
#nf = 104
#nphi = 90
#ntheta = 181
N = nf * nphi * ntheta
d = d.reshape(N, 7)
F = d[:, 0]
PHI = d[:, 1]
THETA = d[:, 2]
Fphi = d[:, 3] + d[:, 4] * 1j
Ftheta = d[:, 5] + d[:, 6] * 1j
self.Fp = Fphi.reshape((nf, nphi, ntheta))
self.Ft = Ftheta.reshape((nf, nphi, ntheta))
Ttheta = THETA.reshape((nf, nphi, ntheta))
Tphi = PHI.reshape((nf, nphi, ntheta))
Tf = F.reshape((nf, nphi, ntheta))
self.Fp = self.Fp.swapaxes(1, 2)
self.Ft = self.Ft.swapaxes(1, 2)
Ttheta = Ttheta.swapaxes(1, 2)
Tphi = Tphi.swapaxes(1, 2)
Tf = Tf.swapaxes(1, 2)
self.fGHz = Tf[:, 0, 0]
self.theta = Ttheta[0, :, 0]
#self.phi = Tphi[0,0,:]
#
# Temporaire
#
A1 = self.Fp[:, 90:181, :]
A2 = self.Fp[:, 0:91, :]
self.Fp = np.concatenate((A1, A2[:, ::-1, :]), axis=2)
A1 = self.Ft[:, 90:181, :]
A2 = self.Ft[:, 0:91, :]
self.Ft = np.concatenate((A1, A2[:, ::-1, :]), axis=2)
self.theta = np.linspace(0, np.pi, 91)
self.phi = np.linspace(0, 2 * np.pi, 180, endpoint=False)
self.nth = 91
self.nph = 180
self.nf = 104
self.evaluated = True
def pattern(self,theta=[],phi=[],typ='s3'):
""" return multidimensionnal radiation patterns
Parameters
----------
theta : array
1xNt
phi : array
1xNp
typ : string
{s1|s2|s3}
"""
if theta == []:
theta = np.linspace(0,np.pi,30)
if phi == []:
phi = np.linspace(0,2*np.pi,60)
self.grid = True
Nt = len(theta)
Np = len(phi)
Nf = len(self.fGHz)
#Th = np.kron(theta, np.ones(Np))
#Ph = np.kron(np.ones(Nt), phi)
if typ =='s1':
FTh, FPh = self.Fsynth1(theta, phi)
if typ =='s2':
FTh, FPh = self.Fsynth2b(theta,phi)
if typ =='s3':
FTh, FPh = self.Fsynth3(theta, phi)
#FTh = Fth.reshape(Nf, Nt, Np)
#FPh = Fph.reshape(Nf, Nt, Np)
return(FTh,FPh)
def coeffshow(self,**kwargs):
""" display antenna coefficient
typ : string
'ssh' |'vsh'
L : maximum level
kf : frequency index
vmin : float
vmax : float
"""
defaults = {'typ':'vsh',
'L':20,
'kf':46,
'vmin':-40,
'vmax':0,
'cmap':cm.hot_r,
'dB':True
}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
L = kwargs['L']
kf = kwargs['kf']
# calculates mode energy
# linear and log scale
# E : f , l , m
if kwargs['typ']=='vsh':
E = self.C.energy(typ='s1')
if kwargs['typ']=='ssh':
E = self.S.energy(typ='s1')
# Aem : f,l
# calculates energy integrated over m
Aem = np.sum(E,axis=2)
Aem_dB = 10*np.log10(Aem)
# Ael : f,m
# calculates energy integrated over l
Ael = np.sum(E,axis=1)
Ael_dB = 10*np.log10(Ael)
fig, ax = plt.subplots()
fig.set_figwidth(15)
fig.set_figheight(10)
if kwargs['dB']:
im = ax.imshow(10*np.log10(E[kf,:,:]),
vmin = kwargs['vmin'],
vmax = kwargs['vmax'],
extent =[-L,L,L,0],
interpolation = 'nearest',
cmap = kwargs['cmap'])
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
axHistx = divider.append_axes("top", 1., pad=0.5, sharex=ax)
axHisty = divider.append_axes("left", 1., pad=0.5, sharey=ax)
#axHistx.bar(range(-L,L),Aem)
#axHisty.barh(range(0,L),Ael )
axHistx.yaxis.set_ticks(np.array([0,0.2,0.4,0.6,0.8]))
axHisty.xaxis.set_ticks(np.array([0,0.1,0.2,0.3]))
cbar = plt.colorbar(im, cax=cax)
fig.tight_layout()
plt.text(-0.02,0.6 ,'levels',
horizontalalignment='right',
verticalalignment='top',
transform=ax.transAxes,
rotation =90, fontsize= 15)
plt.text(0.6,1.1 ,'free space',
horizontalalignment='right',
verticalalignment='top',
transform=ax.transAxes,
fontsize= 15)
plt.text(0.55,-0.1 ,'modes',
horizontalalignment='right'
,verticalalignment='top', transform=ax.transAxes, fontsize= 15)
return fig,ax
def errel(self,kf=-1, dsf=1, typ='s3'):
""" calculates error between antenna pattern and reference pattern
Parameters
----------
kf : integer
frequency index. If k=-1 integration over all frequency
dsf : down sampling factor
typ :
Returns
-------
errelTh : float
relative error on :math:`F_{\\theta}`
errelPh : float
relative error on :math:`F_{\phi}`
errel : float
Notes
-----
.. math::
\epsilon_r^{\\theta} =
\\frac{|F_{\\theta}(\\theta,\phi)-\hat{F}_{\\theta}(\\theta)(\phi)|^2}
{|F_{\\theta}(\\theta,\phi)|^2}
\epsilon_r^{\phi} =
\\frac{|F_{\phi}(\\theta,\phi)-\hat{F}_{\phi}(\\theta)(\phi)|^2}
{|F_{\\theta}(\\theta,\phi)|^2}
"""
#
# Retrieve angular bases from the down sampling factor dsf
#
theta = self.theta[::dsf]
phi = self.phi[::dsf]
Nt = len(theta)
Np = len(phi)
#Th = np.kron(theta, np.ones(Np))
#Ph = np.kron(np.ones(Nt), phi)
if typ =='s1':
FTh, FPh = self.Fsynth1(theta, phi)
if typ =='s2':
FTh, FPh = self.Fsynth2b(theta, phi)
if typ =='s3':
FTh, FPh = self.Fsynth3(theta, phi)
#FTh = Fth.reshape(self.nf, Nt, Np)
#FPh = Fph.reshape(self.nf, Nt, Np)
#
# Jacobian
#
#st = outer(sin(theta),ones(len(phi)))
st = np.sin(theta).reshape((len(theta), 1))
#
# Construct difference between reference and reconstructed
#
if kf!=-1:
dTh = (FTh[kf, :, :] - self.Ft[kf, ::dsf, ::dsf])
dPh = (FPh[kf, :, :] - self.Fp[kf, ::dsf, ::dsf])
#
# squaring + Jacobian
#
dTh2 = np.real(dTh * np.conj(dTh)) * st
dPh2 = np.real(dPh * np.conj(dPh)) * st
vTh2 = np.real(self.Ft[kf, ::dsf, ::dsf] \
* np.conj(self.Ft[kf, ::dsf, ::dsf])) * st
vPh2 = np.real(self.Fp[kf, ::dsf, ::dsf] \
* np.conj(self.Fp[kf, ::dsf, ::dsf])) * st
mvTh2 = np.sum(vTh2)
mvPh2 = np.sum(vPh2)
errTh = np.sum(dTh2)
errPh = np.sum(dPh2)
else:
dTh = (FTh[:, :, :] - self.Ft[:, ::dsf, ::dsf])
dPh = (FPh[:, :, :] - self.Fp[:, ::dsf, ::dsf])
#
# squaring + Jacobian
#
dTh2 = np.real(dTh * np.conj(dTh)) * st
dPh2 = np.real(dPh * np.conj(dPh)) * st
vTh2 = np.real(self.Ft[:, ::dsf, ::dsf] \
* np.conj(self.Ft[:, ::dsf, ::dsf])) * st
vPh2 = np.real(self.Fp[:, ::dsf, ::dsf] \
* np.conj(self.Fp[:, ::dsf, ::dsf])) * st
mvTh2 = np.sum(vTh2)
mvPh2 = np.sum(vPh2)
errTh = np.sum(dTh2)
errPh = np.sum(dPh2)
errelTh = (errTh / mvTh2)
errelPh = (errPh / mvPh2)
errel =( (errTh + errPh) / (mvTh2 + mvPh2))
return(errelTh, errelPh, errel)
def loadhfss(self,lfa = [], Nt=72,Np=37):
""" load antenna from HFSS file
Parameters
----------
lfa : list of antenna file
Nt : int
Number of angle theta
Np : int
Number of angle phi
Notes
-----
One file per frequency point
th , ph , abs_grlz,th_absdB,th_phase,ph_absdB,ph_phase_ax_ratio
"""
# lfa : list file antenna
self.nf = len(lfa)
fGHz = []
lacsv = []
Fphi = np.empty((self.nf,self.nth,self.nph))
Ftheta = np.empty((self.nf,self.nth,self.nph))
SqG = np.empty((self.nf,self.nth,self.nph))
for i in range (len(lfa)):
fGHz.append(eval(lfa[i].split('.csv')[0][-4]))
lacsv.append(pd.read_csv(lfa[i],
header=False,
sep=',',
names=['th','ph','abs_grlz','th_absdB','th_phase','ph_absdB','ph_phase','ax_ratio'],
index_col=False))
th=lacsv[i].th.reshape(Np,Nt)*np.pi/180.
ph=lacsv[i].ph.reshape(Np,Nt)*np.pi/180.
Greal = lacsv[i].abs_grlz.reshape(Np,Nt)
th_dB = lacsv[i].th_absdB.reshape(Np,Nt)
ph_dB = lacsv[i].ph_absdB.reshape(Np,Nt)
th_lin = pow(10,th_dB/20.)
ph_lin = pow(10,ph_dB/20.)
#th_phase = lacsv[i].th_phase.reshape(72,37)*np.pi/180.
#ph_phase = lacsv[i].ph_phase.reshape(72,37)*np.pi/180.
#axratio=lacsv[i].ax_ratio.reshape(72,37)
Fphi[i,:,:] = ph_lin.swapaxes(1,0)
Ftheta[i,:,:] = th_lin.swapaxes(1,0)
SqG[i,:,:] = Greal.swapaxes(1,0)
self.fGHz = np.array(fGHz)
#self.theta = th[0,:].reshape(Nt,1)
#self.phi = ph[:,0].reshape(1,Np)
self.theta = th[0,:]
self.phi = ph[:,0]
self.Fp=Fphi
self.Ft=Ftheta
self.sqG=SqG
def loadtrx(self,directory):
""" load trx file (SATIMO Near Field Chamber raw data)
Parameters
----------
directory
self._filename: short name of the antenna file
the file is seek in the $BASENAME/ant directory
.. todo:
consider using an ini file for the header
Trx header structure
fmin fmax Nf phmin phmax Nphi thmin thmax Ntheta #EDelay
0 1 2 3 4 5 6 7 8 9
1 10 121 0 6.19 72 0 3.14 37 0
"""
_filetrx = self._filename
_headtrx = 'header_' + _filetrx
_headtrx = _headtrx.replace('trx', 'txt')
headtrx = pyu.getlong(_headtrx, directory)
filename = pyu.getlong(_filetrx, directory)
#
# Trx header structure
#
# fmin fmax Nf phmin phmax Nphi thmin thmax Ntheta #EDelay
# 0 1 2 3 4 5 6 7 8 9
# 1 10 121 0 6.19 72 0 3.14 37 0
#
#
foh = open(headtrx)
ligh = foh.read()
foh.close()
fmin = eval(ligh.split()[0])
fmax = eval(ligh.split()[1])
nf = eval(ligh.split()[2])
phmin = eval(ligh.split()[3])
phmax = eval(ligh.split()[4])
nphi = eval(ligh.split()[5])
thmin = eval(ligh.split()[6])
thmax = eval(ligh.split()[7])
ntheta = eval(ligh.split()[8])
#
# The electrical delay in column 9 is optional
#
try:
tau = eval(ligh.split()[9]) # tau : delay (ns)
except:
tau = 0
#
# Data are stored in 7 columns
#
# 0 1 2 3 4 5 6
# f phi th ReFph ImFphi ReFth ImFth
#
#
fi = open(filename)
d = np.array(fi.read().split())
N = len(d)
M = N / 7
d = d.reshape(M, 7)
d = d.astype('float')
f = d[:, 0]
if f[0] == 0:
print("error : frequency cannot be zero")
# detect frequency unit
# if values are above 2000 its means frequency is not expressed
# in GHz
#
if (f[0] > 2000):
f = f / 1.0e9
phi = d[:, 1]
theta = d[:, 2]
#
# type : refers to the way the angular values are stored in the file
# Detection of file type
#
# nfc
# f phi theta
# 2 1 0
# Natural
# f phi theta
# 2 0 1
#
# auto detect storage mode looping
#
dphi = abs(phi[0] - phi[1])
dtheta = abs(theta[0] - theta[1])
if (dphi == 0) & (dtheta != 0):
typ = 'nfc'
if (dtheta == 0) & (dphi != 0):
typ = 'natural'
self.typ = typ
Fphi = d[:, 3] + d[:, 4] * 1j
Ftheta = d[:, 5] + d[:, 6] * 1j
#
# Normalization
#
G = np.real(Fphi * np.conj(Fphi) + Ftheta * np.conj(Ftheta))
SqG = np.sqrt(G)
#Fphi = Fphi/SqG
#Ftheta = Ftheta/SqG
#Fphi = Fphi
#Ftheta = Ftheta
#
# Reshaping
#
if typ == 'natural':
self.Fp = Fphi.reshape((nf, ntheta, nphi))
self.Ft = Ftheta.reshape((nf, ntheta, nphi))
self.sqG = SqG.reshape((nf, ntheta, nphi))
Ttheta = theta.reshape((nf, ntheta, nphi))
Tphi = phi.reshape((nf, ntheta, nphi))
Tf = f.reshape((nf, ntheta, nphi))
if typ == 'nfc':
self.Fp = Fphi.reshape((nf, nphi, ntheta))
self.Ft = Ftheta.reshape((nf, nphi, ntheta))
self.sqG = SqG.reshape((nf, nphi, ntheta))
Ttheta = theta.reshape((nf, nphi, ntheta))
Tphi = phi.reshape((nf, nphi, ntheta))
Tf = f.reshape((nf, nphi, ntheta))
#
# Force natural order (f,theta,phi)
# This is not the order of the satimo nfc which is (f,phi,theta)
#
self.Fp = self.Fp.swapaxes(1, 2)
self.Ft = self.Ft.swapaxes(1, 2)
self.sqG = self.sqG.swapaxes(1, 2)
Ttheta = Ttheta.swapaxes(1, 2)
Tphi = Tphi.swapaxes(1, 2)
Tf = Tf.swapaxes(1, 2)
self.fGHz = Tf[:, 0, 0]
self.theta = Ttheta[0, :, 0]
self.phi = Tphi[0, 0, :]
#
# check header consistency
#
np.testing.assert_almost_equal(self.fGHz[0],fmin,6)
np.testing.assert_almost_equal(self.fGHz[-1],fmax,6)
np.testing.assert_almost_equal(self.theta[0],thmin,3)
np.testing.assert_almost_equal(self.theta[-1],thmax,3)
np.testing.assert_almost_equal(self.phi[0],phmin,3)
np.testing.assert_almost_equal(self.phi[-1],phmax,3)
self.nf = nf
self.nth = ntheta
self.nph = nphi
self.tau = tau
self.evaluated = True
def checkpole(self, kf=0):
""" display the reconstructed field on pole for integrity verification
Parameters
----------
kf : int
frequency index default 0
"""
Ft0 = self.Ft[kf, 0, :]
Fp0 = self.Fp[kf, 0, :]
Ftp = self.Ft[kf, -1, :]
Fpp = self.Fp[kf, -1, :]
phi = self.phi
Ex0 = Ft0 * np.cos(phi) - Fp0 * np.sin(phi)
Ey0 = Ft0 * np.sin(phi) + Fp0 * np.cos(phi)
Exp = Ftp * np.cos(phi) - Fpp * np.sin(phi)
Eyp = Ftp * np.sin(phi) + Fpp * np.cos(phi)
plt.subplot(4, 2, 1)
plt.plot(phi, np.real(Ex0))
plt.subplot(4, 2, 2)
plt.plot(phi, np.imag(Ex0))
plt.subplot(4, 2, 3)
plt.plot(phi, np.real(Ey0))
plt.subplot(4, 2, 4)
plt.plot(phi, np.imag(Ey0))
plt.subplot(4, 2, 5)
plt.plot(phi, np.real(Exp))
plt.subplot(4, 2, 6)
plt.plot(phi, np.imag(Exp))
plt.subplot(4, 2, 7)
plt.plot(phi, np.real(Eyp))
plt.subplot(4, 2, 8)
plt.plot(phi, np.imag(Eyp))
def info(self):
""" gives info about antenna object
"""
print(self._filename)
print("type : ", self.typ)
if self.typ == 'mat':
print(self.DataFile)
print(self.AntennaName)
print(self.Date)
print(self.StartTime)
print(self.Notes)
print(self.Serie)
print(self.Run)
print("Nb theta (lat) :", self.nth)
print("Nb phi (lon) :", self.nph)
if self.typ =='nfc':
print( "--------------------------")
print( "fmin (GHz) :", self.fGHz[0])
print( "fmax (GHz) :", self.fGHz[-1])
print( "Nf :", self.nf)
print( "thmin (rad) :", self.theta[0])
print( "thmax (rad) :", self.theta[-1])
print( "Nth :", self.nth)
print( "phmin (rad) :", self.phi[0])
print( "phmax (rad) :", self.phi[-1])
print( "Nph :", self.nph)
try:
self.C.info()
except:
print("No vsh coefficient calculated yet")
#@mlab.show
def _show3(self,bnewfig = True,
bcolorbar =True,
name=[],
binteract=False,
btitle=True,
bcircle=True,
**kwargs ):
""" show3 mayavi
Parameters
----------
btitle : boolean
display title
bcolorbar : boolean
display colorbar
binteract : boolean
enable interactive mode
newfig: boolean
see also
--------
antprop.antenna._computemesh
"""
if not self.evaluated:
self.eval(pattern=True)
# k is the frequency index
if hasattr(self,'p'):
lpshp = len(self.p.shape)
sum_index = tuple(np.arange(1,lpshp))
po = np.mean(self.p,axis=sum_index)
kwargs['po']=po
x, y, z, k, scalar = self._computemesh(**kwargs)
if bnewfig:
mlab.clf()
f=mlab.figure(bgcolor=(1, 1, 1), fgcolor=(0, 0, 0))
else :
f=mlab.gcf()
if kwargs.has_key('opacity'):
opacity = kwargs['opacity']
else:
opacity = 1
self._mayamesh = mlab.mesh(x, y, z,
scalars= scalar,
resolution = 1,
opacity = opacity,reset_zoom=False)
if name == []:
f.children[-1].name = 'Antenna ' + self._filename
else :
f.children[-1].name = name + self._filename
if bcolorbar :
mlab.colorbar()
if btitle:
mlab.title(self._filename + ' @ ' + str(self.fGHz[k]) + ' GHz',height=1,size=0.5)
def circle(typ='xy',a=1.2):
phi = np.linspace(0, 2*np.pi, 2000)
if typ=='xy':
return [ a*np.cos(phi) ,
a*np.sin(phi) ,
np.zeros(len(phi))
]
if typ=='yz':
return [ np.zeros(len(phi)),
a*np.cos(phi) ,
a*np.sin(phi)
]
if typ=='xz':
return [ a*np.cos(phi),
a*np.zeros(len(phi)),
np.sin(phi)
]
# draw 3D circle around pattern
if bcircle:
xc,yc,zc =circle('xy') # blue
mlab.plot3d(xc,yc,zc,color=(0,0,1))
xc,yc,zc =circle('yz') # red
mlab.plot3d(xc,yc,zc,color=(1,0,0))
xc,yc,zc =circle('xz') # green
mlab.plot3d(xc,yc,zc,color=(0,1,0))
if binteract:
self._outline = mlab.outline(self._mayamesh, color=(.7, .7, .7))
self._outline.visible=False
def picker_callback(picker):
""" Picker callback: this get called when on pick events.
"""
if picker.actor in self._mayamesh.actor.actors:
self._outline.visible = not self._outline.visible
self._is_selected=self._outline.visible
picker = f.on_mouse_pick(picker_callback)
return(f)
def _computemesh(self,**kwargs):
""" compute mesh from theta phi
Parameters
----------
fGHz : np.array()
default [] : takes center frequency fa[len(fa)/2]
po : np.array()
location point of the antenna
T : np.array
rotation matrix
minr : float
minimum radius in meter
maxr : float
maximum radius in meters
tag : string
ilog : boolean
title : boolean
Returns
-------
(x, y, z, k)
x , y , z values in cartesian axis
k frequency point evaluated
"""
defaults = { 'fGHz' :[],
'po': np.array([0,0,0]),
'T' : np.eye(3),
'minr' : 0.1,
'maxr' : 1 ,
'scale':1.,
'tag' : 'Pat',
'txru' : 0,
'ilog' : False,
'title':True,
}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
fGHz = kwargs['fGHz']
minr = kwargs['minr']
maxr = kwargs['maxr']
tag = kwargs['tag']
ilog = kwargs['ilog']
txru = kwargs['txru']
scale= kwargs['scale']
po = kwargs['po']
# T is an unitary matrix
T = kwargs['T']
if fGHz == []:
# self.ext == '' <=> mathematically generated => nf = 1
if self.ext != '':
k = len(self.fGHz)/2
else:
k = 0
else :
if self.ext != '':
k = np.where(self.fGHz>=fGHz)[0][0]
else:
k = 0
if len(self.Ft.shape)==3:
r = self.sqG[:,:,k]
else:
r = self.sqG[:,:,txru,k]
th = self.theta[:,None]
phi = self.phi[None,:]
if ilog :
r = 10*np.log10(abs(r))
else:
r = abs(r)
if r.max() != r.min():
u = (r - r.min()) /(r.max() - r.min())
else : u = r
r = minr + (maxr-minr) * u
x = scale*r * np.sin(th) * np.cos(phi)
y = scale*r * np.sin(th) * np.sin(phi)
z = scale*r * np.cos(th)
if z.shape[1] != y.shape[1]:
z = z*np.ones(y.shape[1])
p = np.concatenate((x[...,None],
y[...,None],
z[...,None]),axis=2)
#
# antenna cs -> glogal cs
# q : Nt x Np x 3
q = np.einsum('ij,klj->kli',T,p)
#
# translation
#
scalar=(q[...,0]**2+q[...,1]**2+q[...,2]**2)
q[...,0]=q[...,0]+po[0]
q[...,1]=q[...,1]+po[1]
q[...,2]=q[...,2]+po[2]
x = q[...,0]
y = q[...,1]
z = q[...,2]
return x, y, z, k, scalar
def show3(self,k=0,po=[],T=[],txru=0,typ='G', mode='linear', silent=False):
""" show3 geomview
Parameters
----------
k : frequency index
po : poition of the antenna
T : GCS of the antenna
typ : string
'G' | 'Ft' | 'Fp'
mode : string
'linear'| 'not implemented'
silent : boolean
True | False
Examples
--------
>>> from pylayers.antprop.antenna import *
>>> import numpy as np
>>> import matplotlib.pylab as plt
>>> A = Antenna('defant.sh3')
>>> #A.show3()
"""
if not self.evaluated:
self.eval(pattern=True)
f = self.fGHz[k]
# 3 axis : nth x nph x nf
if len(self.Ft.shape)==3:
if typ == 'G':
V = self.sqG[:, :,k]
if typ == 'Ft':
V = self.Ft[:, :,k]
if typ == 'Fp':
V = self.Fp[:, :,k]
if typ == 'Ft':
V = self.Ft[:,:,k]
# 4 axis : nth x nph x ntxru x nf
if len(self.Ft.shape)==4:
if typ == 'G':
V = self.sqG[:, :, txru,k]
if typ == 'Ft':
V = self.Ft[:, : ,txru,k]
if typ == 'Fp':
V = self.Fp[:, :,txru,k]
if po ==[]:
po = np.array([0, 0, 0])
if T ==[]:
T = np.eye(3)
_filename = 'antbody'
geo = geu.Geomoff(_filename)
# geo.pattern requires the following shapes
# theta (Ntx1)
# phi (1xNp)
#if len(np.shape(self.theta))==1:
# theta = self.theta[:,None]
#else:
# theta=self.theta
theta = self.theta
#if len(np.shape(self.phi))==1:
# phi = self.phi[None,:]
#else:
# phi=self.phi
phi = self.phi
geo.pattern(theta,phi,V,po=po,T=T,ilog=False,minr=0.01,maxr=0.2)
#filename = geom_pattern(self.theta, self.phi, V, k, po, minr, maxr, typ)
#filename = geom_pattern(self.theta, self.phi, V, k, po, minr, maxr, typ)
if not silent:
geo.show3()
def plot3d(self, k=0, typ='Gain', col=True):
""" show 3D pattern in matplotlib
Parameters
----------
k : frequency index
typ = 'Gain'
= 'Ftheta'
= 'Fphi'
if col -> color coded plot3D
else -> simple plot3D
"""
fig = plt.figure()
ax = axes3d.Axes3D(fig)
if typ == 'Gain':
V = self.sqG[:, :,k]
if typ == 'Ftheta':
V = self.Ft[ :, :,k]
if typ == 'Fphi':
V = self.Fp[ :, :,k]
vt = np.ones(self.nth)
vp = np.ones(self.nph)
Th = np.outer(self.theta, vp)
Ph = np.outer(vt, self.phi)
pdb.set_trace()
X = abs(V) * np.cos(Ph) * np.sin(Th)
Y = abs(V) * np.sin(Ph) * np.sin(Th)
Z = abs(V) * np.cos(Th)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if col:
ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
cmap=cm.hot_r,shade=True)
else:
ax.plot3D(np.ravel(X), np.ravel(Y), np.ravel(Z))
plt.show()
def pol3d(self, k=0, R=50, St=4, Sp=4, silent=False):
""" Display polarisation diagram in 3D
Parameters
----------
k : int
frequency index
R : float
radius of the sphere
St : int
downsampling factor along theta
Sp : int
downsampling factor along phi
silent : Boolean
(if True the file is created and not displayed')
The file created is named : Polar{ifreq}.list
it is placed in the /geom directory of the project
"""
_filename = 'Polar' + str(10000 + k)[1:] + '.list'
filename = pyu.getlong(_filename, pstruc['DIRGEOM'])
fd = open(filename, "w")
fd.write("LIST\n")
Nt = self.nth
Np = self.nph
N = 10
plth = np.arange(0, Nt, St)
plph = np.arange(0, Np, Sp)
for m in plph:
for n in plth:
#theta = self.theta[n,0]
theta = self.theta[n]
#print "m,theta= :",m,theta*180/np.pi
#phi = self.phi[0,m]
phi = self.phi[m]
#print "n,phi=:",n,phi*180/np.pi
B = geu.vec_sph(theta, phi)
p = R * np.array((np.cos(phi) * np.sin(theta),
np.sin(phi) * np.sin(theta),
np.cos(theta)))
fd.write('{\n')
geu.ellipse(fd, p, B[0, :], B[1, :], self.Ft[n, m , k], self.Fp[n, m , k], N)
fd.write('}\n')
fd.close()
if not silent:
chaine = "geomview " + filename + " 2>/dev/null &"
os.system(chaine)
def mse(self, Fth, Fph, N=0):
""" mean square error between original and reconstructed
Parameters
----------
Fth : np.array
Fph : np.array
N : int
Notes
-----
Calculate the relative mean square error between original pattern A.Ftheta , A.Fphi and the
pattern given as argument of the function Fth , Fph
The mse is evaluated on both polarization and normalized over the energy of each
original pattern.
The function returns the maximum between those two errors
N is a parameter which allows to suppress value at the pole for the calculation of the error
if N=0 all values are kept else N < n < Nt - N
"""
sh = np.shape(self.Ft)
Nf = sh[0]
Nt = sh[1]
Np = sh[2]
# plage de theta (exclusion du pole)
pt = np.arange(N, Nt - N, 1)
Fthr = Fth.reshape(sh)
Fphr = Fph.reshape(sh)
Gr = np.real(Fphr * np.conj(Fphr) + Fthr * np.conj(Fthr))
SqGr = np.sqrt(Gr)
Fthr = Fthr[:, pt, :].ravel()
Fphr = Fphr[:, pt, :].ravel()
SqGr = SqGr[:, pt, :].ravel()
Ftho = self.Ft[:, pt, :].ravel()
Fpho = self.Fp[:, pt, :].ravel()
SqGo = self.sqG[:, pt, :].ravel()
Etho = np.sqrt(np.dot(np.conj(Ftho), Ftho))
Epho = np.sqrt(np.dot(np.conj(Fpho), Fpho))
Eo = np.sqrt(np.dot(np.conj(Ftho), Ftho) + np.dot(np.conj(Fpho), Fpho))
errth = Ftho - Fthr
errph = Fpho - Fphr
Err = np.real(np.sqrt(np.dot(np.conj(errth), errth) + np.dot(np.conj(errph), errph)))
Errth = np.real(np.sqrt(np.dot(np.conj(errth), errth)))
Errph = np.real(np.sqrt(np.dot(np.conj(errph), errph)))
#Errth_rel = Errth/Etho
#Errph_rel = Errph/Epho
Errth_rel = Errth / Eo
Errph_rel = Errph / Eo
Err_rel = Err / Eo
return Err_rel, Errth_rel, Errph_rel
def getdelay(self,delayCandidates = np.arange(-10,10,0.001)):
""" get electrical delay
Parameters
----------
delayCandidates : ndarray dalay in (ns)
default np.arange(-10,10,0.001)
Returns
-------
electricalDelay : float
Author : Troels Pedersen (Aalborg University)
B.Uguen
"""
if self.evaluated:
maxPowerInd = np.unravel_index(np.argmax(abs(self.Ft)),np.shape(self.Ft))
elD = delayCandidates[np.argmax(abs(
np.dot(self.Ft[maxPowerInd[0],maxPowerInd[1],:]
,np.exp(2j*np.pi*self.fGHz[:,None]
*delayCandidates[None,:]))))]
#electricalDelay = delayCandidates[np.argmax(abs(
# np.dot(self.Ft[:,maxPowerInd[1],maxPowerInd[2]]
# ,np.exp(2j*np.pi*freq.reshape(len(freq),1)
# *delayCandidates.reshape(1,len(delayCandidates))))
# ))]
return(elD)
else:
raise Warning('Antenna has not been evaluated')
def elec_delay(self,tau):
r""" apply an electrical delay
Parameters
----------
tau : float
electrical delay in nanoseconds
Notes
-----
This function applies an electrical delay math::`\exp{+2 j \pi f \tau)`
on the phase of diagram math::``F_{\theta}`` and math::`F_{\phi}`
Examples
--------
.. plot::
:include-source:
>>> from pylayers.antprop.antenna import *
>>> A = Antenna('S2R2.sh3')
>>> A.eval()
>>> tau = A.getdelay()
>>> A.elec_delay(tau)
"""
self.tau = self.tau+tau
if self.evaluated:
Ftheta = self.Ft
Fphi = self.Fp
sh = np.shape(Ftheta)
e = np.exp(2 * np.pi * 1j * self.fGHz[None,None,:]* tau)
#E = np.outer(e, ones(sh[1] * sh[2]))
#Fth = Ftheta.reshape(sh[0], sh[1] * sh[2])
#EFth = Fth * E
#self.Ft = EFth.reshape(sh[0], sh[1], sh[2])
self.Ft = self.Ft*e
self.Fp = self.Fp*e
#Fph = Fphi.reshape(sh[0], sh[1] * sh[2])
#EFph = Fph * E
#self.Fp = EFph.reshape(sh[0], sh[1], sh[2])
else:
raise Warning('antenna has not been evaluated')
def Fsynth(self,theta=[],phi=[],):
""" Perform Antenna synthesis
Parameters
----------
theta : np.array
phi : np.array
call Antenna.Fpatt or Antenna.Fsynth3
Notes
-----
The antenna pattern synthesis is done either from spherical
harmonics coefficients or from an analytical expression of the
radiation pattern.
"""
if ((self.fromfile) or (self.typ=='vsh') or (self.typ=='ssh')):
Ft,Fp = self.Fsynth3(theta,phi)
self.gain()
self.evaluated=True
else :
Ft = self.Ft
Fp = self.Fp
self.theta = theta
self.phi = phi
eval('self.p'+self.typ)()
#Ft,Fp = self.Fpatt(theta,phi,pattern)
return (Ft,Fp)
#def Fsynth1(self, theta, phi, k=0):
def Fsynth1(self, theta, phi):
""" calculate complex antenna pattern from VSH Coefficients (shape 1)
Parameters
----------
theta : ndarray (1xNdir)
phi : ndarray (1xNdir)
k : int
frequency index
Returns
-------
Ft , Fp
"""
Nt = len(theta)
Np = len(phi)
if self.grid:
theta = np.kron(theta, np.ones(Np))
phi = np.kron(np.ones(Nt),phi)
nray = len(theta)
#Br = self.C.Br.s1[k, :, :]
#Bi = self.C.Bi.s1[k, :, :]
#Cr = self.C.Cr.s1[k, :, :]
#Ci = self.C.Ci.s1[k, :, :]
Br = self.C.Br.s1[:, :, :]
Bi = self.C.Bi.s1[:, :, :]
Cr = self.C.Cr.s1[:, :, :]
Ci = self.C.Ci.s1[:, :, :]
N = self.C.Br.N1
M = self.C.Br.M1
#print "N,M",N,M
#
# The - sign is necessary to get the good reconstruction
# deduced from observation
# May be it comes from a different definition of theta in SPHEREPACK
x = -np.cos(theta)
Pmm1n, Pmp1n = AFLegendre3(N, M, x)
ind = index_vsh(N, M)
n = ind[:, 0]
m = ind[:, 1]
#~ V, W = VW(n, m, x, phi, Pmm1n, Pmp1n)
V, W = VW(n, m, x, phi)
#
# broadcasting along frequency axis
#
V = np.expand_dims(V,0)
W = np.expand_dims(V,0)
#
# k : frequency axis
# l : coeff l
# m
Fth = np.eisum('klm,kilm->ki',Br,np.real(V.T)) - \
np.eisum('klm,kilm->ki',Bi,np.imag(V.T)) + \
np.eisum('klm,kilm->ki',Ci,np.real(W.T)) + \
np.eisum('klm,kilm->ki',Cr,np.imag(W.T))
Fph = -np.eisum('klm,kilm->ki',Cr,np.real(V.T)) + \
np.eisum('klm,kilm->ki',Ci,np.imag(V.T)) + \
np.eisum('klm,kilm->ki',Bi,np.real(W.T)) + \
np.eisum('klm,kilm->ki',Br,np.imag(W.T))
#Fth = np.dot(Br, np.real(V.T)) - \
# np.dot(Bi, np.imag(V.T)) + \
# np.dot(Ci, np.real(W.T)) + \
# np.dot(Cr, np.imag(W.T))
#Fph = -np.dot(Cr, np.real(V.T)) + \
# np.dot(Ci, np.imag(V.T)) + \
# np.dot(Bi, np.real(W.T)) + \
# np.dot(Br, np.imag(W.T))
if self.grid:
Nf = len(self.fGHz)
Fth = Fth.reshape(Nf, Nt, Np)
Fph = Fph.reshape(Nf, Nt, Np)
return Fth, Fph
def Fsynth2s(self,dsf=1):
""" pattern synthesis from shape 2 vsh coefficients
Parameters
----------
phi
Notes
-----
Calculate complex antenna pattern from VSH Coefficients (shape 2)
for the specified directions (theta,phi)
theta and phi arrays needs to have the same size
"""
theta = self.theta[::dsf]
phi = self.phi[::dsf]
Nt = len(theta)
Np = len(phi)
theta = np.kron(theta, np.ones(Np))
phi = np.kron(np.ones(Nt), phi)
Ndir = len(theta)
Br = self.C.Br.s2 # Nf x K2
Bi = self.C.Bi.s2 # Nf x K2
Cr = self.C.Cr.s2 # Nf x K2
Ci = self.C.Ci.s2 # Nf x K2
Nf = np.shape(self.C.Br.s2)[0]
K2 = np.shape(self.C.Br.s2)[1]
L = self.C.Br.N2 # int
M = self.C.Br.M2 # int
#print "N,M",N,M
#
# The - sign is necessary to get the good reconstruction
# deduced from observation
# May be it comes from a different definition of theta in SPHEREPACK
x = -np.cos(theta)
Pmm1n, Pmp1n = AFLegendre3(L, M, x)
ind = index_vsh(L, M)
l = ind[:, 0]
m = ind[:, 1]
V, W = VW2(l, m, x, phi, Pmm1n, Pmp1n) # K2 x Ndir
# Fth , Fph are Nf x Ndir
tEBr = []
tEBi = []
tECr = []
tECi = []
for k in range(K2):
BrVr = np.dot(Br[:,k].reshape(Nf,1),
np.real(V.T)[k,:].reshape(1,Ndir))
BiVi = np.dot(Bi[:,k].reshape(Nf,1),
np.imag(V.T)[k,:].reshape(1,Ndir))
CiWr = np.dot(Ci[:,k].reshape(Nf,1),
np.real(W.T)[k,:].reshape(1,Ndir))
CrWi = np.dot(Cr[:,k].reshape(Nf,1),
np.imag(W.T)[k,:].reshape(1,Ndir))
CrVr = np.dot(Cr[:,k].reshape(Nf,1),
np.real(V.T)[k,:].reshape(1,Ndir))
CiVi = np.dot(Ci[:,k].reshape(Nf,1),
np.imag(V.T)[k,:].reshape(1,Ndir))
BiWr = np.dot(Bi[:,k].reshape(Nf,1),
np.real(W.T)[k,:].reshape(1,Ndir))
BrWi = np.dot(Br[:,k].reshape(Nf,1),
np.imag(W.T)[k,:].reshape(1,Ndir))
EBr = np.sum(BrVr*np.conj(BrVr)*np.sin(theta)) + \
np.sum(BrWi*np.conj(BrWi)*np.sin(theta))
EBi = np.sum(BiVi*np.conj(BiVi)*np.sin(theta)) + \
np.sum(BiWr*np.conj(BiWr)*np.sin(theta))
ECr = np.sum(CrWi*np.conj(CrWi)*np.sin(theta)) + \
+ np.sum(CrVr*np.conj(CrVr)*np.sin(theta))
ECi = np.sum(CiWr*np.conj(CiWr)*np.sin(theta)) + \
+ np.sum(CiVi*np.conj(CiVi)*np.sin(theta))
tEBr.append(EBr)
tEBi.append(EBi)
tECr.append(ECr)
tECi.append(ECi)
#Fth = np.dot(Br, np.real(V.T)) - np.dot(Bi, np.imag(V.T)) + \
# np.dot(Ci, np.real(W.T)) + np.dot(Cr, np.imag(W.T))
#Fph = -np.dot(Cr, np.real(V.T)) + np.dot(Ci, np.imag(V.T)) + \
# np.dot(Bi, np.real(W.T)) + np.dot(Br, np.imag(W.T))
return np.array(tEBr),np.array(tEBi),np.array(tECr),np.array(tECi)
def Fsynth2b(self, theta, phi):
""" pattern synthesis from shape 2 vsh coefficients
Parameters
----------
theta : 1 x Nt
phi : 1 x Np
Notes
-----
Calculate complex antenna pattern from VSH Coefficients (shape 2)
for the specified directions (theta,phi)
theta and phi arrays needs to have the same size
"""
Nt = len(theta)
Np = len(phi)
if self.grid:
theta = np.kron(theta, np.ones(Np))
phi = np.kron(np.ones(Nt),phi)
Br = self.C.Br.s2 # Nf x K2
Bi = self.C.Bi.s2 # Nf x K2
Cr = self.C.Cr.s2 # Nf x K2
Ci = self.C.Ci.s2 # Nf x K2
L = self.C.Br.N2 # int
M = self.C.Br.M2 # int
#print "N,M",N,M
#
# The - sign is necessary to get the good reconstruction
# deduced from observation
# May be it comes from a different definition of theta in SPHEREPACK
x = -np.cos(theta)
Pmm1n, Pmp1n = AFLegendre3(L, M, x)
ind = index_vsh(L, M)
l = ind[:, 0]
m = ind[:, 1]
V, W = VW2(l, m, x, phi, Pmm1n, Pmp1n) # K2 x Ndir
# Fth , Fph are Nf x Ndir
Fth = np.dot(Br, np.real(V.T)) - np.dot(Bi, np.imag(V.T)) + \
np.dot(Ci, np.real(W.T)) + np.dot(Cr, np.imag(W.T))
Fph = -np.dot(Cr, np.real(V.T)) + np.dot(Ci, np.imag(V.T)) + \
np.dot(Bi, np.real(W.T)) + np.dot(Br, np.imag(W.T))
if self.grid:
Nf = len(self.fGHz)
Fth = Fth.reshape(Nf, Nt, Np)
Fph = Fph.reshape(Nf, Nt, Np)
return Fth, Fph
def Fsynth2(self, theta, phi, typ = 'vsh'):
""" pattern synthesis from shape 2 vsh coeff
Parameters
----------
theta : array 1 x Nt
phi : array 1 x Np
pattern : boolean
default False
typ : string
{vsh | ssh}
Notes
-----
Calculate complex antenna pattern from VSH Coefficients (shape 2)
for the specified directions (theta,phi)
theta and phi arrays needs to have the same size
"""
self.nth = len(theta)
self.nph = len(phi)
self.nf = len(self.fGHz)
if typ =='vsh' :
if self.grid:
theta = np.kron(theta, np.ones(self.nph))
phi = np.kron(np.ones(self.nth),phi)
Br = self.C.Br.s2
Bi = self.C.Bi.s2
Cr = self.C.Cr.s2
Ci = self.C.Ci.s2
N = self.C.Br.N2
M = self.C.Br.M2
#print "N,M",N,M
#
# The - sign is necessary to get the good reconstruction
# deduced from observation
# May be it comes from a different definition of theta in SPHEREPACK
x = -np.cos(theta)
Pmm1n, Pmp1n = AFLegendre3(N, M, x)
ind = index_vsh(N, M)
n = ind[:, 0]
m = ind[:, 1]
#~ V, W = VW(n, m, x, phi, Pmm1n, Pmp1n)
V, W = VW(n, m, x, phi)
Fth = np.dot(Br, np.real(V.T)) - np.dot(Bi, np.imag(V.T)) + \
np.dot(Ci, np.real(W.T)) + np.dot(Cr, np.imag(W.T))
Fph = -np.dot(Cr, np.real(V.T)) + np.dot(Ci, np.imag(V.T)) + \
np.dot(Bi, np.real(W.T)) + np.dot(Br, np.imag(W.T))
if self.grid:
Fth = Fth.reshape(self.nf, self.nth, self.nph)
Fph = Fph.reshape(self.nf, self.nth, self.nph)
if typ=='ssh':
cx = self.S.Cx.s2
cy = self.S.Cy.s2
cz = self.S.Cz.s2
lmax = self.S.Cx.lmax
Y ,indx = SSHFunc(lmax, theta,phi)
Ex = np.dot(cx,Y).reshape(self.nf,self.nth,self.nph)
Ey = np.dot(cy,Y).reshape(self.nf,self.nth,self.nph)
Ez = np.dot(cz,Y).reshape(self.nf,self.nth,self.nph)
Fth,Fph = CartToSphere (theta, phi, Ex, Ey,Ez, bfreq = True )
self.evaluated = True
return Fth, Fph
def Fsynth3(self,theta=[],phi=[],typ='vsh'):
r""" synthesis of a complex antenna pattern from SH coefficients
(vsh or ssh in shape 3)
Ndir is the number of directions
Parameters
----------
theta : ndarray (1xNdir if not pattern) (1xNtheta if pattern)
phi : ndarray (1xNdir if not pattter) (1xNphi if pattern)
pattern : boolean
if True theta and phi are reorganized for building the pattern
typ : 'vsh' | 'ssh' | 'hfss'
Returns
-------
if self.grid:
Fth : ndarray (Ntheta x Nphi)
Fph : ndarray (Ntheta x Nphi)
else:
Fth : ndarray (1 x Ndir)
Fph : ndarray (1 x Ndir)
See Also
--------
pylayers.antprop.channel._vec2scalA
Examples
--------
.. plot::
:include-source:
>>> from pylayers.antprop.antenna import *
>>> import numpy as np
>>> import matplotlib.pylab as plt
>>> A = Antenna('defant.vsh3')
>>> F = A.eval(grid=True)
All Br,Cr,Bi,Ci have the same (l,m) index in order to evaluate only
once the V,W function
If the data comes from a cst file like the antenna used in WHERE1 D4.1
the pattern is multiplied by $\frac{4\pi}{120\pi}=\frac{1}{\sqrt{30}$
"""
#typ = self.typ
#self._filename.split('.')[1]
#if typ=='satimo':
# coeff=1.
#if typ=='cst':
# coeff=1./sqrt(30)
#assert typ in ['ssh','vsh','hfss'],
assert (hasattr(self,'C') or hasattr(self,'S')),"No SH coeffs evaluated"
Nf = len(self.fGHz)
if theta==[]:
theta=np.linspace(0,np.pi,45)
if phi == []:
phi= np.linspace(0,2*np.pi,90)
Nt = len(theta)
Np = len(phi)
self.nth = len(theta)
self.nph = len(phi)
if self.grid:
#self.theta = theta[:,None]
#self.phi = phi[None,:]
self.theta = theta
self.phi = phi
theta = np.kron(theta, np.ones(Np))
phi = np.kron(np.ones(Nt),phi)
if typ =='vsh':
nray = len(theta)
Br = self.C.Br.s3
lBr = self.C.Br.ind3[:, 0]
mBr = self.C.Br.ind3[:, 1]
Bi = self.C.Bi.s3
Cr = self.C.Cr.s3
Ci = self.C.Ci.s3
L = lBr.max()
M = mBr.max()
# vector spherical harmonics basis functions
V, W = VW(lBr, mBr, theta, phi)
Fth = np.dot(Br, np.real(V.T)) - \
np.dot(Bi, np.imag(V.T)) + \
np.dot(Ci, np.real(W.T)) + \
np.dot(Cr, np.imag(W.T))
Fph = -np.dot(Cr, np.real(V.T)) + \
np.dot(Ci, np.imag(V.T)) + \
np.dot(Bi, np.real(W.T)) + \
np.dot(Br, np.imag(W.T))
if self.grid:
Fth = Fth.reshape(Nf, Nt, Np)
Fph = Fph.reshape(Nf, Nt, Np)
if typ == 'ssh':
cx = self.S.Cx.s3
cy = self.S.Cy.s3
cz = self.S.Cz.s3
lmax = self.S.Cx.lmax
Y ,indx = SSHFunc2(lmax, theta,phi)
#k = self.S.Cx.k2[:,0]
# same k for x y and z
k = self.S.Cx.k2
if pattern :
Ex = np.dot(cx,Y[k])
Ey = np.dot(cy,Y[k])
Ez = np.dot(cz,Y[k])
Fth,Fph = CartToSphere(theta, phi, Ex, Ey,Ez, bfreq = True, pattern = True )
Fth = Fth.reshape(Nf,Nt,Np)
Fph = Fph.reshape(Nf,Nt,Np)
else:
Ex = np.dot(cx,Y[k])
Ey = np.dot(cy,Y[k])
Ez = np.dot(cz,Y[k])
Fth,Fph = CartToSphere (theta, phi, Ex, Ey,Ez, bfreq = True, pattern = False)
#self.Fp = Fph
#self.Ft = Fth
#G = np.real(Fph * np.conj(Fph) + Fth * np.conj(Fth))
#self.sqG = np.sqrt(G)
#if self.grid:
# self.Fp = Fph
# self.Ft = Fth
# G = np.real(Fph * np.conj(Fph) + Fth * np.conj(Fth))
# self.sqG = np.sqrt(G)
self.evaluated = True
#if typ == 'hfss':
# scipy.interpolate.griddata()
# Fth = self.Ft
# Fph = self.Fp
# TODO create 2 different functions for pattern and not pattern
#if not self.grid:
return Fth, Fph
#else:
# return None,None
def movie_vsh(self, mode='linear'):
""" animates vector spherical coeff w.r.t frequency
Parameters
----------
mode : string
'linear' |
"""
Brmin = abs(self.C.Br[:, 0:20, 0:20]).min()
Brmax = abs(self.C.Br[:, 0:20, 0:20]).max()
Bimin = abs(self.C.Bi[:, 0:20, 0:20]).min()
Bimax = abs(self.C.Bi[:, 0:20, 0:20]).max()
Crmin = abs(self.C.Cr[:, 0:20, 0:20]).min()
Crmax = abs(self.C.Cr[:, 0:20, 0:20]).max()
Cimin = abs(self.C.Ci[:, 0:20, 0:20]).min()
Cimax = abs(self.C.Ci[:, 0:20, 0:20]).max()
# print(Brmin, Brmax, Bimin, Bimax, Crmin, Crmax, Cimin, Cimax)
for k in range(self.nf):
plt.figure()
stf = ' f=' + str(self.fGHz[k]) + ' GHz'
subplot(221)
pcolor(abs(self.C.Br.s1[k, 0:20, 0:20]),
vmin=Brmin, vmax=Brmax, edgecolors='k')
#xlabel('m',fontsize=12)
ylabel('n', fontsize=12)
title('$|Br_{n}^{(m)}|$' + stf, fontsize=10)
colorbar()
subplot(222)
pcolor(abs(self.C.Bi.s1[k, 0:20, 0:20]),
vmin=Bimin, vmax=Bimax, edgecolors='k')
#xlabel('m',fontsize=12)
ylabel('n', fontsize=12)
title('$|Bi_{n}^{(m)}|$' + stf, fontsize=10)
colorbar()
subplot(223)
pcolor(abs(self.C.Cr.s1[k, 0:20, 0:20]),
vmin=Crmin, vmax=Crmax, edgecolors='k')
xlabel('m', fontsize=12)
#ylabel('n',fontsize=12)
title('$|Cr_{n}^{(m)}|$' + stf, fontsize=10)
colorbar()
subplot(224)
pcolor(abs(self.C.Ci.s1[k, 0:20, 0:20]),
vmin=Cimin, vmax=Cimax, edgecolors='k')
xlabel('m', fontsize=12)
#ylabel('n',fontsize=12)
title('$|Ci_{n}^{(m)}|$' + stf, fontsize=10)
colorbar()
filename = str('%03d' % k) + '.png'
savefig(filename, dpi=100)
clf()
command = ('mencoder',
'mf://*.png',
'-mf',
'type=png:w=800:h=600:fps=1',
'-ovc',
'lavc',
'-lavcopts',
'vcodec=mpeg4',
'-oac',
'copy',
'-o',
'vshcoeff.avi')
subprocess.check_call(command)
def minsh3(self, emax=0.05):
""" creates vsh3 with significant coeff until given relative reconstruction error
Parameters
----------
emax : float
error default 0.05
Summary
-------
Create antenna's vsh3 file which only contains
the significant vsh coefficients in shape 3,
in order to obtain a reconstruction maximal error = emax
This function requires a reading of .trx file before being executed
"""
#th = np.kron(self.theta, np.ones(self.nph))
#ph = np.kron(np.ones(self.nth), self.phi)
if not self.grid:
self.grid = True
Fth3, Fph3 = self.Fsynth3(self.theta, self.phi)
Err = self.mse(Fth3, Fph3, 0)
Enc = self.C.ens3()
n = len(Enc)
pos = 0
while (pos < n) & (Err[0] < emax):
Emin = Enc[pos]
d = self.C.drag3(Emin)
Fth3, Fph3 = self.Fsynth3(self.theta, self.phi)
Err = self.mse(Fth3, Fph3, 0)
if Err[0] >= emax:
i = d[0][0]
i3 = d[1][0]
self.C.put3(i, i3)
Fth3, Fph3 = self.Fsynth3(self.theta,self.phi)
Err = self.mse(Fth3, Fph3, 0)
pos = pos + 1
def savevsh3(self):
""" save antenna in vsh3 format
Create a .vsh3 antenna file
"""
# create vsh3 file
_filevsh3 = os.path.splitext(self._filename)[0]+'.vsh3'
filevsh3 = pyu.getlong(_filevsh3, pstruc['DIRANT'])
#filevsh3 = pyu.getlong(self._filename,'ant')
if os.path.isfile(filevsh3):
print( filevsh3, ' already exist')
else:
print( 'create ', filevsh3, ' file')
coeff = {}
coeff['fmin'] = self.fGHz[0]
coeff['fmax'] = self.fGHz[-1]
coeff['Br.ind'] = self.C.Br.ind3
coeff['Bi.ind'] = self.C.Bi.ind3
coeff['Cr.ind'] = self.C.Cr.ind3
coeff['Ci.ind'] = self.C.Ci.ind3
coeff['Br.k'] = self.C.Br.k2
coeff['Bi.k'] = self.C.Bi.k2
coeff['Cr.k'] = self.C.Cr.k2
coeff['Ci.k'] = self.C.Ci.k2
coeff['Br.s3'] = self.C.Br.s3
coeff['Bi.s3'] = self.C.Bi.s3
coeff['Cr.s3'] = self.C.Cr.s3
coeff['Ci.s3'] = self.C.Ci.s3
io.savemat(filevsh3, coeff, appendmat=False)
def savesh2(self):
""" save coeff in .sh2 antenna file
"""
# create sh2 file
#typ = self._filename.split('.')[1]
#self.typ = typ
_filesh2 = self._filename.replace('.'+ self.typ, '.sh2')
filesh2 = pyu.getlong(_filesh2, pstruc['DIRANT'])
if os.path.isfile(filesh2):
print(filesh2, ' already exist')
else:
print('create ', filesh2, ' file')
coeff = {}
coeff['fmin'] = self.fGHz[0]
coeff['fmax'] = self.fGHz[-1]
coeff['Cx.ind'] = self.S.Cx.ind2
coeff['Cy.ind'] = self.S.Cy.ind2
coeff['Cz.ind'] = self.S.Cz.ind2
coeff['Cx.lmax']= self.S.Cx.lmax
coeff['Cy.lmax']= self.S.Cy.lmax
coeff['Cz.lmax']= self.S.Cz.lmax
coeff['Cx.s2'] = self.S.Cx.s2
coeff['Cy.s2'] = self.S.Cy.s2
coeff['Cz.s2'] = self.S.Cz.s2
io.savemat(filesh2, coeff, appendmat=False)
def savesh3(self):
""" save antenna in sh3 format
create a .sh3 antenna file
"""
# create sh3 file
# if self._filename has an extension
# it is replace by .sh3
#typ = self._filename.split('.')[1]
#self.typ = typ
_filesh3 = self._filename.replace('.'+ self.typ, '.sh3')
filesh3 = pyu.getlong(_filesh3, pstruc['DIRANT'])
if os.path.isfile(filesh3):
print(filesh3, ' already exist')
else:
print('create ', filesh3, ' file')
coeff = {}
coeff['fmin'] = self.fGHz[0]
coeff['fmax'] = self.fGHz[-1]
coeff['Cx.ind'] = self.S.Cx.ind3
coeff['Cy.ind'] = self.S.Cy.ind3
coeff['Cz.ind'] = self.S.Cz.ind3
coeff['Cx.k'] = self.S.Cx.k2
coeff['Cy.k'] = self.S.Cy.k2
coeff['Cz.k'] = self.S.Cz.k2
coeff['Cx.lmax']= self.S.Cx.lmax
coeff['Cy.lmax']= self.S.Cy.lmax
coeff['Cz.lmax']= self.S.Cz.lmax
coeff['Cx.s3'] = self.S.Cx.s3
coeff['Cy.s3'] = self.S.Cy.s3
coeff['Cz.s3'] = self.S.Cz.s3
io.savemat(filesh3, coeff, appendmat=False)
def loadvsh3(self):
""" Load antenna's vsh3 file
vsh3 file contains a thresholded version of vsh coefficients in shape 3
"""
_filevsh3 = self._filename
filevsh3 = pyu.getlong(_filevsh3, pstruc['DIRANT'])
self.evaluated = False
if os.path.isfile(filevsh3):
coeff = io.loadmat(filevsh3, appendmat=False)
#
# This test is to fix a problem with 2 different
# behavior of io.loadmat
#
if type(coeff['fmin']) == float:
fmin = coeff['fmin']
fmax = coeff['fmax']
else:
fmin = coeff['fmin'][0][0]
fmax = coeff['fmax'][0][0]
# .. Warning
# Warning modification takes only one dimension for k
# if the .vsh3 format evolve it may not work anymore
#
Br = VCoeff('s3', fmin, fmax, coeff['Br.s3'],
coeff['Br.ind'], coeff['Br.k'][0])
Bi = VCoeff('s3', fmin, fmax, coeff['Bi.s3'],
coeff['Bi.ind'], coeff['Bi.k'][0])
Cr = VCoeff('s3', fmin, fmax, coeff['Cr.s3'],
coeff['Cr.ind'], coeff['Cr.k'][0])
Ci = VCoeff('s3', fmin, fmax, coeff['Ci.s3'],
coeff['Ci.ind'], coeff['Ci.k'][0])
self.C = VSHCoeff(Br, Bi, Cr, Ci)
self.nf = np.shape(Br.s3)[0]
self.fGHz = np.linspace(fmin, fmax, self.nf)
else:
print(_filevsh3, ' does not exist')
def loadsh3(self):
""" Load antenna's sh3 file
sh3 file contains a thesholded version of ssh coefficients in shape 3
"""
_filesh3 = self._filename.split('.')[0]+'.sh3'
filesh3 = pyu.getlong(_filesh3, pstruc['DIRANT'])
self.evaluated = False
if os.path.isfile(filesh3):
coeff = io.loadmat(filesh3, appendmat=False)
#
# This test is to fix a problem with 2 different
# behavior of io.loadmat
#
if type(coeff['fmin']) == float:
fmin = coeff['fmin']
fmax = coeff['fmax']
else:
fmin = coeff['fmin'][0][0]
fmax = coeff['fmax'][0][0]
# .. Warning
# Warning modification takes only one dimension for k
# if the .sh3 format evolve it may not work anymore
#
if type(coeff['Cx.lmax']) == float:
lmax = coeff['Cx.lmax']
else:
lmax = coeff['Cx.lmax'][0][0]
Cx = SCoeff(typ = 's3',
fmin = fmin ,
fmax = fmax ,
lmax = lmax,
data = coeff['Cx.s3'],
ind = coeff['Cx.ind'],
k = np.squeeze(coeff['Cx.k']))
Cy = SCoeff(typ= 's3',
fmin = fmin ,
fmax = fmax ,
lmax = lmax,
data = coeff['Cy.s3'],
ind = coeff['Cy.ind'],
k = np.squeeze(coeff['Cy.k']))
Cz = SCoeff(typ = 's3',
fmin = fmin ,
fmax = fmax ,
data = coeff['Cz.s3'],
lmax = lmax,
ind = coeff['Cz.ind'],
k = np.squeeze(coeff['Cz.k']))
if not 'S' in self.__dict__.keys():
self.S = SSHCoeff(Cx, Cy,Cz)
else:
self.S.sets3(Cx,Cy,Cz)
self.nf = np.shape(Cx.s3)[0]
self.fGHz = np.linspace(fmin, fmax, self.nf)
else:
print(_filesh3, ' does not exist')
def savevsh2(self, filename = ''):
""" save coeff in a .vsh2 antenna file
Parameters
----------
filename : string
"""
# create vsh2 file
if filename == '':
_filevsh2 = self._filename.replace('.trx', '.vsh2')
_filevsh2 = filename
filevsh2 = pyu.getlong(_filevsh2, pstruc['DIRANT'])
if os.path.isfile(filevsh2):
print(filevsh2, ' already exist')
else:
print('create ', filevsh2, ' file')
coeff = {}
coeff['fmin'] = self.fGHz[0]
coeff['fmax'] = self.fGHz[-1]
coeff['Br.ind'] = self.C.Br.ind2
coeff['Bi.ind'] = self.C.Bi.ind2
coeff['Cr.ind'] = self.C.Cr.ind2
coeff['Ci.ind'] = self.C.Ci.ind2
coeff['Br.s2'] = self.C.Br.s2
coeff['Bi.s2'] = self.C.Bi.s2
coeff['Cr.s2'] = self.C.Cr.s2
coeff['Ci.s2'] = self.C.Ci.s2
io.savemat(filevsh2, coeff, appendmat=False)
def loadsh2(self):
""" load spherical harmonics coefficient in shape 2
"""
_filesh2 = self._filename.split('.')[0]+'.sh2'
filesh2 = pyu.getlong(_filesh2, pstruc['DIRANT'])
if os.path.isfile(filesh2):
coeff = io.loadmat(filesh2, appendmat=False)
#
# This test is to fix a problem with 2 different
# behavior of io.loadmat
#
if type(coeff['fmin']) == float:
fmin = coeff['fmin']
fmax = coeff['fmax']
else:
fmin = coeff['fmin'][0][0]
fmax = coeff['fmax'][0][0]
if type(coeff['Cx.lmax']) == float:
lmax = coeff['Cx.lmax']
else:
lmax = coeff['Cx.lmax'][0][0]
Cx = SCoeff(typ='s2',
fmin=fmin,
fmax=fmax,
lmax = lmax,
data=coeff['Cx.s2'],
ind=coeff['Cx.ind'])
Cy = SCoeff(typ='s2',
fmin=fmin,
fmax=fmax,
lmax = lmax,
data=coeff['Cy.s2'],
ind=coeff['Cy.ind'])
Cz = SCoeff(typ='s2',
fmin=fmin,
fmax=fmax,
lmax = lmax,
data=coeff['Cz.s2'],
ind=coeff['Cz.ind'])
self.S = SSHCoeff(Cx, Cy,Cz)
Nf = np.shape(Cx.s2)[0]
self.fGHz = np.linspace(fmin, fmax, Nf)
else:
print( _filesh2, ' does not exist')
def loadvsh2(self):
""" load antenna from .vsh2 file format
Load antenna's vsh2 file which only contains
the vsh coefficients in shape 2
"""
_filevsh2 = self._filename
filevsh2 = pyu.getlong(_filevsh2, pstruc['DIRANT'])
if os.path.isfile(filevsh2):
coeff = io.loadmat(filevsh2, appendmat=False)
#
# This test is to fix a problem with 2 different
# behavior of io.loadmat
#
if type(coeff['fmin']) == float:
fmin = coeff['fmin']
fmax = coeff['fmax']
else:
fmin = coeff['fmin'][0][0]
fmax = coeff['fmax'][0][0]
Br = VCoeff(typ='s2', fmin=fmin, fmax=fmax,
data=coeff['Br.s2'], ind=coeff['Br.ind'])
Bi = VCoeff(typ='s2', fmin=fmin, fmax=fmax,
data=coeff['Bi.s2'], ind=coeff['Bi.ind'])
Cr = VCoeff(typ='s2', fmin=fmin, fmax=fmax,
data=coeff['Cr.s2'], ind=coeff['Cr.ind'])
Ci = VCoeff(typ='s2', fmin=fmin, fmax=fmax,
data=coeff['Ci.s2'], ind=coeff['Ci.ind'])
self.C = VSHCoeff(Br, Bi, Cr, Ci)
Nf = np.shape(Br.s2)[0]
self.fGHz = np.linspace(fmin, fmax, Nf)
else:
print( _filevsh2, ' does not exist')
def loadvsh3_old(self):
""" Load antenna vsh coefficients in shape 3
"""
_filevsh3 = self._filename
filevsh3 = getlong(_filevsh3, pstruc['DIRANT'])
fmin = 2.
fmax = 8.
if os.path.isfile(filevsh3):
coeff = io.loadmat(filevsh3, appendmat=False)
Br = VCoeff('s3', fmin, fmax, coeff['Br.s3'],
coeff['Br.ind'], coeff['Br.k'])
Bi = VCoeff('s3', fmin, fmax, coeff['Bi.s3'],
coeff['Bi.ind'], coeff['Bi.k'])
Cr = VCoeff('s3', fmin, fmax, coeff['Cr.s3'],
coeff['Cr.ind'], coeff['Cr.k'])
Ci = VCoeff('s3', fmin, fmax, coeff['Ci.s3'],
coeff['Ci.ind'], coeff['Ci.k'])
self.C = VSHCoeff(Br, Bi, Cr, Ci)
self.fGHz = np.linspace(fmin, fmax, 121)
else:
print(_filevsh3, ' does not exist')
def pol2cart(self, ith):
""" converts FTheta, FPhi to Fx,Fy,Fz for theta=ith
Parameters
----------
ith : theta index
Returns
-------
Fx
Fy
Fz
See Also
--------
cart2pol
"""
Fth = self.Ft[:, ith, :]
Fph = self.Fp[:, ith, :]
th = self.theta[ith]
ph = self.phi
Fx = Fth * np.cos(th) * np.cos(ph) - Fph * np.sin(ph)
Fy = Fth * np.cos(th) * np.sin(ph) + Fph * np.cos(ph)
Fz = (-1) * Fth * np.sin(th)
return(Fx, Fy, Fz)
def cart2pol(self, Fx, Fy, Fz, ith):
""" converts Fx,Fy,Fz to Ftheta, Fphi for theta=ith
Parameters
----------
Fx : np.array
Fy : np.array
Fz : np.array
ith : theta index
See Also
--------
pol2cart
"""
th = self.theta[ith]
ph = self.phi
Fth = Fx * np.cos(th) * np.cos(ph) + Fy * np.cos(th) * np.sin(ph) - Fz * np.sin(th)
Fph = -Fx * np.sin(ph) + Fy * np.cos(th)
SqG = np.sqrt(np.real(Fph * np.conj(Fph) + Fth * np.conj(Fth)))
self.sqG[:, ith, :] = SqG
self.Ft[:, ith, :] = Fth
self.Fp[:, ith, :] = Fph
def forcesympol(A):
""" plot VSH transform vsh basis in 3D plot
Parameters
----------
n,m : integer values (m<=n)
theta : ndarray
phi : ndarray
sf : boolean
if sf : plotted figures are saved in a *.png file
else : plotted figures aren't saved
Examples
--------
.. plot::
:include-source:
>>> from pylayers.antprop.antenna import *
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> n=5
>>> m=3
>>> theta = np.linspace(0,np.pi,30)
>>> phi = np.linspace(0,2*np.pi,60)
>>> plotVW(n,m,theta,phi)
"""
# calculate v and w
if m <= n:
theta[np.where(theta == np.pi / 2)[0]] = np.pi / 2 + \
1e-10 # .. todo :: not clean
x = -np.cos(theta)
Pmm1n, Pmp1n = AFLegendre(n, m, x)
t1 = np.sqrt((n + m) * (n - m + 1))
t2 = np.sqrt((n - m) * (n + m + 1))
y1 = t1 * Pmm1n[:, m, n] - t2 * Pmp1n[:, m, n]
y2 = t1 * Pmm1n[:, m, n] + t2 * Pmp1n[:, m, n]
Ephi = np.exp(1j * m * phi)
cphi = np.cos(m * phi)
if m == 0:
sphi = 1e-10
else:
sphi = np.sin(m * phi)
ny = len(y1)
ne = len(Ephi)
vy = np.ones(ny)
ve = np.ones(ne)
Y1 = np.outer(y1, ve)
Y2 = np.outer(y2, ve)
EPh = np.outer(vy, Ephi)
const = (-1.0) ** n / (2 * np.sqrt(n * (n + 1)))
V = const * Y1 * EPh
#V[np.isinf(V)|isnan(V)]=0
Vcos = cphi * V
Vsin = sphi * V
if m == 0:
#W=np.zeros((len(theta),len(phi)))
W = np.ones((len(theta), len(phi))) * 1e-10
else:
Waux = Y2 * EPh
x1 = 1.0 / x
W = np.outer(x1, const) * Waux
Wcos = cphi * W
Wsin = sphi * W
# plot V and W
Ntheta = np.size(theta)
vt = np.ones(Ntheta)
Nphi = np.size(phi)
vp = np.ones(Nphi)
Phi = np.outer(vt, phi)
Theta = np.outer(theta, vp)
#figdirV='/home/rburghel/Bureau/bases_decomposition_VW/base_V_Vsin_Vcos/'
figdirV = './'
ext1 = '.pdf'
ext2 = '.eps'
ext3 = '.png'
fig = plt.figure()
ax = axes3d.Axes3D(fig)
X = abs(V) * np.cos(Phi) * np.sin(Theta)
Y = abs(V) * np.sin(Phi) * np.sin(Theta)
Z = abs(V) * np.cos(Theta)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.hot_r)
ax.set_xlim3d([-1, 1])
ax.set_ylim3d([-1, 1])
ax.set_zlim3d([-1, 1])
if sf:
sz = fig.get_size_inches()
fig.set_size_inches(sz * 1.8)
figname = figdirV + 'V' + str(n) + str(m)
fig.savefig(figname + ext1, orientation='portrait')
fig.savefig(figname + ext2, orientation='portrait')
fig.savefig(figname + ext3, orientation='portrait')
fig = plt.figure()
ax = axes3d.Axes3D(fig)
X = abs(Vcos) * np.cos(Phi) * np.sin(Theta)
Y = abs(Vcos) * np.sin(Phi) * np.sin(Theta)
Z = abs(Vcos) * np.cos(Theta)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.hot_r)
ax.set_xlim3d([-1, 1])
ax.set_ylim3d([-1, 1])
ax.set_zlim3d([-1, 1])
if sf:
sz = fig.get_size_inches()
fig.set_size_inches(sz * 1.8)
figname = figdirV + 'Vcos' + str(n) + str(m) + '.jpg'
fig.savefig(figname + ext1, orientation='portrait')
fig.savefig(figname + ext2, orientation='portrait')
fig.savefig(figname + ext3, orientation='portrait')
fig = plt.figure()
ax = axes3d.Axes3D(fig)
X = abs(Vsin) * np.cos(Phi) * np.sin(Theta)
Y = abs(Vsin) * np.sin(Phi) * np.sin(Theta)
Z = abs(Vsin) * np.cos(Theta)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.hot_r)
ax.set_xlim3d([-1, 1])
ax.set_ylim3d([-1, 1])
ax.set_zlim3d([-1, 1])
if sf:
sz = fig.get_size_inches()
fig.set_size_inches(sz * 1.8)
figname = figdirV + 'Vsin' + str(n) + str(m) + '.jpg'
fig.savefig(figname + ext1, orientation='portrait')
fig.savefig(figname + ext2, orientation='portrait')
fig.savefig(figname + ext3, orientation='portrait')
#figdirW='/home/rburghel/Bureau/bases_decomposition_VW/base_W_Wsin_Wcos/'
figdirW = './'
fig = plt.figure()
ax = axes3d.Axes3D(fig)
X = abs(W) * np.cos(Phi) * np.sin(Theta)
Y = abs(W) * np.sin(Phi) * np.sin(Theta)
Z = abs(W) * np.cos(Theta)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.hot_r)
ax.set_xlim3d([-1, 1])
ax.set_ylim3d([-1, 1])
ax.set_zlim3d([-1, 1])
if sf:
sz = fig.get_size_inches()
fig.set_size_inches(sz * 1.8)
figname = figdirW + 'W' + str(n) + str(m)
fig.savefig(figname + ext1, orientation='portrait')
fig.savefig(figname + ext2, orientation='portrait')
fig.savefig(figname + ext3, orientation='portrait')
fig = plt.figure()
ax = axes3d.Axes3D(fig)
X = abs(Wcos) * np.cos(Phi) * np.sin(Theta)
Y = abs(Wcos) * np.sin(Phi) * np.sin(Theta)
Z = abs(Wcos) * np.cos(Theta)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.hot_r)
ax.set_xlim3d([-1, 1])
ax.set_ylim3d([-1, 1])
ax.set_zlim3d([-1, 1])
if sf:
sz = fig.get_size_inches()
fig.set_size_inches(sz * 1.8)
figname = figdirW + 'Wcos' + str(n) + str(m)
fig.savefig(figname + ext1, orientation='portrait')
fig.savefig(figname + ext2, orientation='portrait')
fig.savefig(figname + ext3, orientation='portrait')
fig = plt.figure()
ax = axes3d.Axes3D(fig)
X = abs(Wsin) * np.cos(Phi) * np.sin(Theta)
Y = abs(Wsin) * np.sin(Phi) * np.sin(Theta)
fig = plt.figure()
ax = axes3d.Axes3D(fig)
X = abs(Wsin) * np.cos(Phi) * np.sin(Theta)
Y = abs(Wsin) * np.sin(Phi) * np.sin(Theta)
Z = abs(Wsin) * np.cos(Theta)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.hot_r)
ax.set_xlim3d([-1, 1])
ax.set_ylim3d([-1, 1])
ax.set_zlim3d([-1, 1])
if sf:
sz = fig.get_size_inches()
fig.set_size_inches(sz * 1.8)
figname = figdirW + 'Wsin' + str(n) + str(m)
fig.savefig(figname + ext1, orientation='portrait')
fig.savefig(figname + ext2, orientation='portrait')
fig.savefig(figname + ext3, orientation='portrait')
plt.show()
else:
print("Error: m>n!!!")
def compdiag(k, A, th, ph, Fthr, Fphr, typ='modulus', lang='english', fontsize=18):
""" makes comparison between original pattern and reconstructed pattern
Parameters
----------
k : frequency index
A : Antenna
ph : phi base (1 x Np)
th : theta base (1 x Nt)
Fthr : Fth output of Fsynth Nf x (Ntheta*Tphi)
Fphr : Fth output of Fsynth Nf x (Ntheta*Tphi)
lang = 'french'
= 'english'
"""
Nf = np.shape(Fthr)[0]
#Fthr = Fthr.reshape(Nf,len(th),len(ph))
#Fphr = Fphr.reshape(Nf,len(th),len(ph))
plt.figure()
rc('text', usetex=True)
Ftho = A.Ftheta
Fpho = A.Fphi
# limites module Fthr, Ftho, Fphr, Fpho
maxTr = abs(Fthr[:, :, k]).max()
maxTo = abs(Ftho[:, :, k ]).max()
MmT = max(maxTr, maxTo)
minTr = abs(Fthr[ :, :, k ]).min()
minTo = abs(Ftho[ :, :, k ]).min()
mmT = min(minTr, minTo)
maxPr = abs(Fphr[ :, :, k ]).max()
maxPo = abs(Fpho[ :, :, k ]).max()
MmP = max(maxPr, maxPo)
minPr = abs(Fphr[ :, :, k ]).min()
minPo = abs(Fpho[ :, :, k ]).min()
mmP = min(minPr, minPo)
# limites real Fthr, Ftho, Fphr, Fpho
maxTrr = np.real(Fthr[ :, :, k ]).max()
maxTor = np.real(Ftho[ :, :, k ]).max()
MrT = max(maxTrr, maxTor)
minTrr = np.real(Fthr[ :, :, k ]).min()
minTor = np.real(Ftho[ :, :, k ]).min()
mrT = min(minTrr, minTor)
maxPrr = np.real(Fphr[ :, :, k ]).max()
maxPor = np.real(Fpho[ :, :, k ]).max()
MrP = max(maxPrr, maxPor)
minPrr = np.real(Fphr[ :, :, k ]).min()
minPor = np.real(Fpho[ :, :, k ]).min()
mrP = min(minPrr, minPor)
# limites real Fthr, Ftho, Fphr, Fpho
maxTri = np.imag(Fthr[ :, :, k ]).max()
maxToi = np.imag(Ftho[ :, :, k ]).max()
MiT = max(maxTri, maxToi)
minTri = np.imag(Fthr[ :, :, k ]).min()
minToi = np.imag(Ftho[ :, :, k ]).min()
miT = min(minTri, minToi)
maxPri = np.imag(Fphr[ :, :, k ]).max()
maxPoi = np.imag(Fpho[ :, :, k ]).max()
MiP = max(maxPri, maxPoi)
minPri = np.imag(Fphr[ :, :, k ]).min()
minPoi = np.imag(Fpho[ :, :, k ]).min()
miP = min(minPri, minPoi)
# limithes arg Fth,Fph
maxATr = np.angle(Fthr[ :, :, k ]).max()
maxATo = np.angle(Ftho[ :, :, k ]).max()
maT = max(maxATr, maxATo)
minATr = np.angle(Fthr[ :, :, k ]).min()
minATo = np.angle(Ftho[ :, :, k ]).min()
maT0 = min(minATr, minATo)
maxAPr = np.angle(Fphr[ :, :, k ]).max()
maxAPo = np.angle(Fpho[ :, :, k ]).max()
maP = max(maxAPr, maxAPo)
minAPr = np.angle(Fphr[ :, :, k ]).min()
minAPo = np.angle(Fpho[ :, :, k ]).min()
maP0 = min(minAPr, minAPo)
ax = plt.axes([0, 0, 360, 180])
rtd = 180 / np.pi
plt.subplot(221)
if typ == 'modulus':
#
#cmap=cm.jet
#pcolor(A.phi*rtd,A.theta*rtd,abs(Ftho[k,:,:]),vmin=0,vmax=mmT)
#
#cmap= gray
#pcolor(A.phi*rtd,A.theta*rtd,abs(Ftho[k,:,:]),cmap=cm.gray_r,vmin=0,vmax=mmT)
#
#cmap=cm.hot
plt.pcolor(A.phi * rtd, A.theta * rtd, abs(Ftho[ :, :, k ]),
cmap=cm.hot_r, vmin=mmT, vmax=MmT)
plt.title(r'$|F_{\theta}|$ original', fontsize=fontsize)
if typ == 'real':
#pcolor(A.phi*rtd,A.theta*rtd,real(Ftho[k,:,:]),cmap=cm.gray_r,vmin=0,vmax=mmT)
plt.pcolor(A.phi * rtd, A.theta * rtd, np.real(Ftho[ :, :, k ]),
cmap=cm.hot_r, vmin=mrT, vmax=MrT)
title(r'Re ($F_{\theta}$) original', fontsize=fontsize)
if typ == 'imag':
#pcolor(A.phi*rtd,A.theta*rtd,imag(Ftho[k,:,:]),cmap=cm.gray_r,vmin=0,vmax=mmT)
pcolor(A.phi * rtd, A.theta * rtd, np.imag(Ftho[ :, :, k ]),
cmap=cm.hot_r, vmin=miT, vmax=MiT)
title(r'Im ($F_{\theta}$) original', fontsize=fontsize)
if typ == 'phase':
#pcolor(A.phi*rtd,A.theta*rtd,angle(Ftho[k,:,:]),cmap=cm.gray_r,vmin=maT0,vmax=maT)
plt.pcolor(A.phi * rtd, A.theta * rtd, np.angle(Ftho[ :, :, k ]),
cmap=cm.hot_r, vmin=maT0, vmax=maT)
if lang == 'french':
plt.title(r'Arg ($F_{\theta}$) original', fontsize=fontsize)
else:
plt.title(r'Ang ($F_{\theta}$) original', fontsize=fontsize)
plt.axis([0, 360, 0, 180])
plt.ylabel(r'$\theta$ (deg)', fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
cbar = plt.colorbar()
for t in cbar.ax.get_yticklabels():
t.set_fontsize(fontsize)
plt.subplot(222)
if typ == 'modulus':
plt.pcolor(A.phi * rtd, A.theta * rtd, abs(Fpho[:, :, k ]),
cmap=cm.hot_r, vmin=mmP, vmax=MmP)
plt.title('$|F_{\phi}|$ original', fontsize=fontsize)
if typ == 'real':
plt.pcolor(A.phi * rtd, A.theta * rtd, np.real(Fpho[ :, :, k ]),
cmap=cm.hot_r, vmin=mrP, vmax=MrP)
plt.title('Re ($F_{\phi}$) original', fontsize=fontsize)
if typ == 'imag':
plt.pcolor(A.phi * rtd, A.theta * rtd, np.imag(Fpho[ :, :, k ]),
cmap=cm.hot_r, vmin=miP, vmax=MiP)
plt.title('Im ($F_{\phi}$) original', fontsize=fontsize)
if typ == 'phase':
plt.pcolor(A.phi * rtd, A.theta * rtd, np.angle(Fpho[ :, :, k ]),
cmap=cm.hot_r, vmin=maP0, vmax=maP)
if lang == 'french':
plt.title('Arg ($F_{\phi}$) original', fontsize=fontsize)
else:
plt.title('Ang ($F_{\phi}$) original', fontsize=fontsize)
plt.axis([0, 360, 0, 180])
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
cbar = plt.colorbar()
for t in cbar.ax.get_yticklabels():
t.set_fontsize(fontsize)
plt.subplot(223)
if typ == 'modulus':
plt.pcolor(ph * rtd, th * rtd, abs(Fthr[:, :, k ]),
cmap=cm.hot_r, vmin=mmT, vmax=MmT)
if lang == 'french':
plt.title(r'$|F_{\theta}|$ reconstruit', fontsize=fontsize)
else:
plt.title(r'$|F_{\theta}|$ reconstructed', fontsize=fontsize)
if typ == 'real':
plt.pcolor(ph * rtd, th * rtd, np.real(Fthr[:,:,k ]),
cmap=cm.hot_r, vmin=mrT, vmax=MrT)
if lang == 'french':
title(r'Re ($F_{\theta}$) reconstruit', fontsize=fontsize)
else:
title(r'Re ($F_{\theta}$) reconstructed', fontsize=fontsize)
if typ == 'imag':
plt.pcolor(ph * rtd, th * rtd, np.imag(Fthr[ :, :, k ]),
cmap=cm.hot_r, vmin=miT, vmax=MiT)
if lang == 'french':
plt.title(r'Im ($F_{\theta}$) reconstruit', fontsize=fontsize)
else:
plt.title(r'Im ($F_{\theta}$) reconstructed', fontsize=fontsize)
if typ == 'phase':
plt.pcolor(A.phi * rtd, A.theta * rtd, np.angle(Fthr[:,:,k]),
cmap=cm.hot_r, vmin=maT0, vmax=maT)
if lang == 'french':
plt.title(r'Arg ($F_{\theta}$) reconstruit', fontsize=fontsize)
else:
plt.title(r'Ang ($F_{\theta}$) reconstructed', fontsize=fontsize)
plt.axis([0, 360, 0, 180])
plt.xlabel(r'$\phi$ (deg)', fontsize=fontsize)
plt.ylabel(r'$\theta$ (deg)', fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
cbar = plt.colorbar()
for t in cbar.ax.get_yticklabels():
t.set_fontsize(fontsize)
plt.subplot(224)
if typ == 'modulus':
plt.pcolor(ph * rtd, th * rtd, abs(Fphr[ :, :,k]),
cmap=cm.hot_r, vmin=mmP, vmax=MmP)
if lang == 'french':
plt.title('$|F_{\phi}|$ reconstruit', fontsize=fontsize)
else:
plt.title('$|F_{\phi}|$ reconstructed', fontsize=fontsize)
if typ == 'real':
plt.pcolor(ph * rtd, th * rtd, np.real(Fphr[ :, :,k]),
cmap=cm.hot_r, vmin=mrP, vmax=MrP)
if lang == 'french':
plt.title('Re ($F_{\phi}$) reconstruit', fontsize=fontsize)
else:
plt.title('Re ($F_{\phi}$) reconstructed', fontsize=fontsize)
if typ == 'imag':
plt.pcolor(ph * rtd, th * rtd, np.imag(Fphr[ :, :,k]),
cmap=cm.hot_r, vmin=miP, vmax=MiP)
if lang == 'french':
plt.title('Im ($F_{\phi}$) reconstruit', fontsize=fontsize)
else:
plt.title('Im ($F_{\phi}$) reconstructed', fontsize=fontsize)
if typ == 'phase':
plt.pcolor(A.phi * rtd, A.theta * rtd, np.angle(Fphr[ :, :,k]),
cmap=cm.hot_r, vmin=maP0, vmax=maP)
if lang == 'french':
plt.title('Arg ($F_{\phi}$) reconstruit', fontsize=fontsize)
else:
plt.title('Ang ($F_{\phi}$) reconstructed', fontsize=fontsize)
plt.axis([0, 360, 0, 180])
plt.xlabel(r'$\phi$ (deg)', fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
cbar = plt.colorbar()
for t in cbar.ax.get_yticklabels():
t.set_fontsize(fontsize)
def BeamGauss(theta,phi,Gmax=19.77,HPBW_az=10,HPBW_el=40,Tilt=10):
""" Beam with a Gaussian shape
Parameters
----------
theta : float
angle in degree
phi : float
angle in degree
Gmax : float
HPBW_az : float
Half Power Beamwidth azimuth degree
HPBW_el : float
Half Power Beamwidth elevation degree
Tilt : float
angle in degree
"""
c = np.pi/180.
az = c*(theta-(Tilt+90))*2*np.sqrt(np.log(2))
el = c*phi*2*np.sqrt(np.log(2))
taz = -(az/(HPBW_az*c))**2
tel = -(el/(HPBW_el*c))**2
gain = 10**(Gmax/10.)*np.exp(taz)*np.exp(tel)
return(gain)
def show3D(F, theta, phi, k, col=True):
""" show 3D matplotlib diagram
Parameters
----------
F : ndarray (Nf,Nt,Np)
theta : ndarray (1xNt)
angle
phi : ndarray (1xNp)
angle
theta : ndarray (Nt)
k : int
frequency index
col : boolean
if col -> color coded plot3D
if col == False -> simple plot3D
Examples
--------
.. plot::
:include-source:
>>> import matplotlib.pyplot as plt
>>> from pylayers.antprop.antenna import *
>>> A = Antenna('defant.vsh3')
>>> A.eval(grid=True)
Warnings
--------
len(theta) must be equal with shape(F)[1]
len(phi) must be equal with shape(F)[2]
"""
nth = len(theta)
nph = len(phi)
if k >= np.shape(F)[0]:
print('Error: frequency index k not in F defined interval')
if nth != np.shape(F)[1]:
print('Error: shape mistmatch between theta and F')
if nph != np.shape(F)[2]:
print('Error: shape mistmatch between phi and F')
fig = plt.figure()
ax = axes3d.Axes3D(fig)
V = F[k, :, :]
vt = np.ones(nth)
vp = np.ones(nph)
Th = np.outer(theta, vp)
Ph = np.outer(vt, phi)
X = abs(V) * np.cos(Ph) * np.sin(Th)
Y = abs(V) * np.sin(Ph) * np.sin(Th)
Z = abs(V) * np.cos(Th)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if (col):
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.hot_r)
else:
ax.plot3D(np.ravel(X), np.ravel(Y), np.ravel(Z))
class AntPosRot(Antenna):
""" Antenna + position + Rotation
"""
def __init__(self,name,p,T):
Antenna.__init__(self,name)
self.p = p
self.T = T
def _show3(self,**kwargs):
Antenna._show3(self,newfig=False,interact=False,T=self.T,po=self.p,**kwargs)
def field(self,p):
"""
Parameters
----------
p : np.array (N,3)
"""
rad_to_deg = 180/np.pi
assert p.shape[-1]==3
if len(p.shape)==1:
r = p[None,:]-self.p[None,:]
else:
r = p-self.p[None,:]
dist = np.sqrt(np.sum(r*r,axis=-1))[:,None]
u = r/dist
th = np.arccos(u[:,2])
ph = np.arctan2(u[:,1],u[:,0])
tang = np.vstack((th,ph)).T
#print("global",tang*rad_to_deg)
Rt, tangl = geu.BTB_tx(tang, self.T)
#print("local",tangl*rad_to_deg)
self.eval(th=tangl[:,0],ph=tangl[:,1],grid=False)
E = (self.Ft[:,None,:]*self.T[:,2][None,:,None]+self.Fp[:,None,:]*self.T[:,0][None,:,None])
P = np.exp(-1j*2*np.pi*self.fGHz[None,None,:]*dist[...,None]/0.3)/dist[...,None]
EP = E*P
return(EP)
#Rr, rangl = geu.BTB_rx(rang, self.Tr)
def _gain(Ft,Fp):
""" calculates antenna gain
Returns
-------
G : np.array(Nt,Np,Nf) dtype:float
linear gain
or np.array(Nr,Nf)
sqG : np.array(Nt,Np,Nf) dtype:float
linear sqare root of gain
or np.array(Nr,Nf)
efficiency : np.array (,Nf) dtype:float
efficiency
hpster : np.array (,Nf) dtype:float
half power solid angle : 1 ~ 4pi steradian
ehpbw : np.array (,Nf) dtyp:float
equivalent half power beamwidth (radians)
Notes
-----
.. math:: G(\theta,phi) = |F_{\\theta}|^2 + |F_{\\phi}|^2
"""
G = np.real( Fp * np.conj(Fp)
+ Ft * np.conj(Ft) )
return(G)
def _hpbw(G,th,ph):
""" half power beamwidth
Parameters
----------
Gain : Ftheta
Nt x Np
th : np.array
,Nt
ph : np.array
,Np
Returns
-------
ehpbw : effective half power beamwidth
hpster : half power solid angle (steradians)
"""
#
GdB = 10*np.log10(G)
GdBmax = np.max(np.max(GdB,axis=0),axis=0)
dt = th[1]-th[0]
dp = ph[1]-ph[0]
Nt = len(th)
Np = len(ph)
Nf = GdB.shape[2]
hpster = np.zeros(Nf)
ehpbw = np.zeros(Nf)
for k in range(Nf):
U = np.zeros((Nt,Np))
A = GdB[:,:,k]*np.ones(Nt)[:,None]*np.ones(Np)[None,:]
u = np.where(A>(GdBmax[k]-3))
U[u] = 1
V = U*np.sin(th)[:,None]
hpster[k] = np.sum(V)*dt*dp/(4*np.pi)
ehpbw[k] = np.arccos(1-2*hpster[k])
return ehpbw,hpster
def _efficiency(G,th,ph):
""" determine antenna efficiency
Parameters
----------
Gain : Ftheta
Nt x Np
th : np.array
,Nt
ph : np.array
,Np
Returns
-------
oefficiency :
"""
#
dt = th[1]-th[0]
dp = ph[1]-ph[0]
Nt = len(th)
Np = len(ph)
Gs = G*np.sin(th)[:,None,None]*np.ones(Np)[None,:,None]
efficiency = np.sum(np.sum(Gs,axis=0),axis=0)*dt*dp/(4*np.pi)
return efficiency
def _dirmax(G,th,ph):
""" determine information in Gmax direction
Parameters
----------
Gain : Ftheta
Nt x Np
th : np.array
,Nt
# GdBmax (,Nf)
# Get direction of Gmax and get the polarisation state in that direction
#
Returns
--------
"""
GdB = 10*np.log10(G)
GdBmax = np.max(np.max(GdB,axis=0),axis=0)
umax = np.array(np.where(GdB==GdBmax))[:,0]
theta_max = th[umax[0]]
phi_max = ph[umax[1]]
M = geu.SphericalBasis(np.array([[theta_max,phi_max]]))
sl = M[:,2].squeeze()
uth = M[:,0]
uph = M[:,1]
el = Ft[tuple(umax)]*uth + Fp[tuple(umax)]*uph
eln = el/np.linalg.norm(el)
el = np.abs(eln.squeeze())
hl = np.cross(sl,el)
return GdBmax,theta_max,phi_max,(hl,sl,el)
def F0(nu,sigma):
""" F0 function for horn antenna pattern
Parameters
----------
nu : np.array
(....,nf)
sigma : np.array
(,nf)
Notes
-----
http://www.ece.rutgers.edu/~orfanidi/ewa/ch18.pdf
18.3.2
"""
nuos = nu/sigma
argp = nuos + sigma
argm = nuos - sigma
expf = np.exp(1j*(np.pi/2)*nuos**2)
sf = 1./sigma
sp , cp = fresnel(argp)
sm , cm = fresnel(argm)
Fp = cp-1j*sp
Fm = cm-1j*sm
F = sf*expf*(Fp -Fm)
return F
def F1(nu,sigma):
""" F1 function for horn antenna pattern
http://www.ece.rutgers.edu/~orfanidi/ewa/ch18.pdf
18.3.3
"""
F = 0.5*(F0(nu+0.5,sigma)+F0(nu-0.5,sigma))
return F
if (__name__ == "__main__"):
doctest.testmod()
|
buguen/pylayers
|
pylayers/antprop/antenna.py
|
Python
|
lgpl-3.0
| 170,791
|
[
"Gaussian",
"Mayavi"
] |
e86422456c6ced663881ed6a56ace61e077bbf3655a6c3906aae410005e37b23
|
'''
PMDL Export Blender Addon
By Jack Andersen <jackoalan@gmail.com>
This file defines the `pmdl_draw_general` class to generate vertex+index
buffers and mesh arrays to draw them. `PAR1` files also include bone-weight
coefficients per-vertex for vertex-shader-driven skeletal evaluation.
'''
import struct
import bpy
from . import pmdl_loop_vert
# Round up to nearest 32 multiple
def ROUND_UP_32(num):
if num%32:
return ((num>>5)<<5)+32
else:
return num
# Round up to nearest 4 multiple
def ROUND_UP_4(num):
if num%4:
return ((num>>2)<<2)+4
else:
return num
# This routine conditionally inserts a loop into a multi-tiered
# array/set collection; simultaneously relating verts to loops and
# eliminating redundant loops (containing identical UV coordinates)
def _augment_loop_vert_array(lv_array, mesh, loop):
# Create loop_vert object for comparitive testing
lv = pmdl_loop_vert.loop_vert(mesh, loop)
# First perform quick check to see if loop is already in a set
for existing_loop_set in lv_array:
if lv in existing_loop_set:
return
# Now perform extended check to see if any UV coordinate values already match
for existing_loop_set in lv_array:
for existing_loop in existing_loop_set:
matches = True
for uv_layer in mesh.uv_layers:
existing_uv_coords = uv_layer.data[existing_loop.loop.index].uv
check_uv_coords = uv_layer.data[loop.index].uv
if (existing_uv_coords[0] != check_uv_coords[0] or
existing_uv_coords[1] != check_uv_coords[1]):
matches = False
break
if matches:
existing_loop_set.append(lv)
return
# If we get here, no match found; add new set to `lv_array`
lv_array.append([lv])
# Get loop set from collection generated with above method;
# containing a specified loop
def _get_loop_set(lv_array, mesh, loop):
# Create loop_vert object for comparitive testing
lv = pmdl_loop_vert.loop_vert(mesh, loop)
for existing_loop_set in lv_array:
if lv in existing_loop_set:
return existing_loop_set
return None
# Method to find triangle opposite another triangle over two loop-vert sets
def _find_polygon_opposite_lvs(mesh, original_triangle, lv_a, lv_b):
a_idx = lv_a[0].loop.vertex_index
b_idx = lv_b[0].loop.vertex_index
for triangle in mesh.polygons:
if triangle == original_triangle:
continue
if (a_idx in triangle.vertices and b_idx in triangle.vertices):
return triangle
return None
class pmdl_draw_general:
def __init__(self):
# 4-byte ID string used in generated PMDL file
self.file_identifier = '_GEN'
# Array that holds collections. A collection is a 16-bit index
# worth of vertices, elements referencing them, and a
# primitive array to draw them
self.collections = []
# If vertex index space is exceeded for a single additional vertex,
# a new collection is created and returned by this routine
def _check_collection_overflow(self, mesh, collection, rigger):
max_bone_count = 0;
if rigger:
max_bone_count = rigger.max_bone_count
if not collection or len(collection['vertices']) >= 65535:
new_collection = {'uv_count':len(mesh.uv_layers), 'max_bone_count':max_bone_count, 'vertices':[], 'vert_weights':[], 'tri_strips':[]}
self.collections.append(new_collection)
return new_collection, True
else:
return collection, False
# Augments draw generator with a single blender MESH data object
def add_mesh(self, pmdl, obj, rigger):
max_bone_count = 0;
if rigger:
max_bone_count = rigger.max_bone_count
mesh = obj.data
print("Optimising mesh:", obj.name)
opt_gpu_vert_count = 0
# First, generate compressed loop-vertex array-array-set collection
loop_vert_array = []
for vert in mesh.vertices:
loop_verts = []
for loop in mesh.loops:
if loop.vertex_index == vert.index:
_augment_loop_vert_array(loop_verts, mesh, loop)
loop_vert_array.append(loop_verts)
# Find best collection to add mesh data into
best_collection = None
for collection in self.collections:
if (collection['uv_count'] == len(mesh.uv_layers) and
collection['max_bone_count'] == max_bone_count and
len(collection['vertices']) < 65000):
best_collection = collection
break
if not best_collection:
# Create a new one if no good one found
best_collection, is_new_collection = self._check_collection_overflow(mesh, None, rigger)
# If rigging, start an array of bone names to be bound to contiguous tri-strips
tri_strip_bones = []
tri_strip_bones_overflow = False
# Now begin generating draw primitives
visited_polys = set()
for poly in mesh.polygons:
# Skip if already visited
if poly in visited_polys:
continue
# Begin a tri-strip primitive (array of vert indices)
tri_strip = []
# Temporary references to trace out strips of triangles
temp_poly = poly
# Rolling references of last two emitted loop-vert sets (b is older)
last_loop_vert_a = None
last_loop_vert_b = None
# In the event of vertex-buffer overflow, this will be made true;
# resulting in the immediate splitting of a tri-strip
is_new_collection = False
# As long as there is a connected polygon to visit
while temp_poly:
if 0 == len(tri_strip): # First triangle in strip
# Add three loop-vert vertices to tri-strip
for poly_loop_idx in temp_poly.loop_indices:
poly_loop = mesh.loops[poly_loop_idx]
loop_vert = _get_loop_set(loop_vert_array[poly_loop.vertex_index], mesh, poly_loop)
# If rigging, ensure that necessary bones are available and get weights
weights = None
if rigger:
weights = rigger.augment_bone_array_with_lv(obj, tri_strip_bones, loop_vert)
if weights is None:
tri_strip_bones_overflow = True
break
if loop_vert not in best_collection['vertices']:
best_collection, is_new_collection = self._check_collection_overflow(mesh, best_collection, rigger)
if is_new_collection:
break
best_collection['vertices'].append(loop_vert)
best_collection['vert_weights'].append(weights)
tri_strip.append(best_collection['vertices'].index(loop_vert))
last_loop_vert_b = last_loop_vert_a
last_loop_vert_a = loop_vert
opt_gpu_vert_count += 1
#print('appended initial loop', loop_vert[0].loop.index)
if is_new_collection:
break
else: # Not the first triangle in strip; look up all three loop-verts,
# ensure it matches last-2 rolling reference, emit remaining loop-vert
# Iterate loop verts
odd_loop_vert_out = None
loop_vert_match_count = 0
for poly_loop_idx in temp_poly.loop_indices:
poly_loop = mesh.loops[poly_loop_idx]
loop_vert = _get_loop_set(loop_vert_array[poly_loop.vertex_index], mesh, poly_loop)
if (loop_vert == last_loop_vert_a or loop_vert == last_loop_vert_b):
loop_vert_match_count += 1
continue
odd_loop_vert_out = loop_vert
# Ensure there are two existing matches to continue tri-strip
if loop_vert_match_count != 2 or not odd_loop_vert_out:
break
# If rigging, ensure that necessary bones are available and get weights
weights = None
if rigger:
weights = rigger.augment_bone_array_with_lv(obj, tri_strip_bones, odd_loop_vert_out)
if weights is None:
tri_strip_bones_overflow = True
break
# Add to tri-strip
if odd_loop_vert_out not in best_collection['vertices']:
best_collection, is_new_collection = self._check_collection_overflow(mesh, best_collection, rigger)
if is_new_collection:
break
best_collection['vertices'].append(odd_loop_vert_out)
best_collection['vert_weights'].append(weights)
tri_strip.append(best_collection['vertices'].index(odd_loop_vert_out))
last_loop_vert_b = last_loop_vert_a
last_loop_vert_a = odd_loop_vert_out
opt_gpu_vert_count += 1
# This polygon is good
visited_polys.add(temp_poly)
pmdl.prog_add_polygon()
# Find a polygon directly connected to this one to continue strip
temp_poly = _find_polygon_opposite_lvs(mesh, temp_poly, last_loop_vert_a, last_loop_vert_b)
if temp_poly in visited_polys:
temp_poly = None
# Add tri-strip to element array
best_collection['tri_strips'].append({'mesh':obj, 'strip':tri_strip, 'strip_bones':tri_strip_bones})
if tri_strip_bones_overflow:
tri_strip_bones = []
tri_strip_bones_overflow = False
print("GPU will receive", opt_gpu_vert_count, "unified tri-strip vertices out of", len(mesh.loops), "original vertices")
print("Mesh contains", len(mesh.polygons), "triangles\n")
# Generate binary vertex buffer of collection index
def generate_vertex_buffer(self, index, endian_char, psize):
collection = self.collections[index]
if not collection:
return None
# Generate vert buffer struct
vstruct = struct.Struct(endian_char + 'f')
# If rigging, determine maximum number of bones in this collection
max_bones = 0
for i in range(len(collection['vertices'])):
weight_count = 0
if collection['vert_weights'][i]:
weight_count = len(collection['vert_weights'][i])
if weight_count > max_bones:
max_bones = weight_count
max_bones = ROUND_UP_4(max_bones)
# Build byte array
vert_bytes = bytearray()
for i in range(len(collection['vertices'])):
loop_vert = collection['vertices'][i]
bloop = loop_vert[0]
mesh = bloop.mesh
bvert = mesh.vertices[bloop.loop.vertex_index]
#print(bvert.co)
# Position
for comp in bvert.co:
vert_bytes += vstruct.pack(comp)
# Normal
for comp in bvert.normal:
vert_bytes += vstruct.pack(comp)
# UVs
for uv_idx in range(collection['uv_count']):
for comp in mesh.uv_layers[uv_idx].data[bloop.loop.index].uv:
vert_bytes += vstruct.pack(comp)
# Weights
weights = collection['vert_weights'][i]
for j in range(max_bones):
if j < len(weights):
vert_bytes += vstruct.pack(weights[j])
else:
vert_bytes += vstruct.pack(0.0)
return collection['uv_count'], max_bones, vert_bytes
# Generate binary element buffer of collection index
def generate_element_buffer(self, index, endian_char, psize):
collection = self.collections[index]
if not collection:
return None
# Generate element buffer struct
estruct = struct.Struct(endian_char + 'H')
# Build mesh-primitive hierarchy
last_mesh = collection['tri_strips'][0]['mesh']
mesh_primitives = {'mesh':last_mesh, 'primitives':[]}
collection_primitives = [mesh_primitives]
# Collection element byte-array
cur_offset = 0
element_bytes = bytearray()
# Last element index entry and strip length for forming degenerate strip
last_elem = None
strip_len = 0
# Last strip bone array (for rigging)
last_strip_bones = collection['tri_strips'][0]['strip_bones']
# Build single degenerate tri-strip
for strip in collection['tri_strips']:
#print('new strip', collection['tri_strips'].index(strip))
if last_mesh != strip['mesh'] or last_strip_bones != strip['strip_bones']:
#print('splitting primitive')
# New mesh; force new strip
mesh_primitives['primitives'].append({'offset':cur_offset, 'length':strip_len, 'bones':last_strip_bones})
cur_offset += strip_len
last_elem = None
strip_len = 0
last_mesh = strip['mesh']
mesh_primitives = {'mesh':last_mesh, 'primitives':[]}
collection_primitives.append(mesh_primitives)
elif last_elem:
#print('extending primitive')
# Existing mesh being extended as degenerate strip
strip_len += 2
element_bytes += estruct.pack(last_elem)
element_bytes += estruct.pack(strip['strip'][0])
# If current element count is odd, add additional degenerate strip to make it even
# This ensures that the sub-strip has proper winding-order for backface culling
if (strip_len & 1):
strip_len += 1
element_bytes += estruct.pack(strip['strip'][0])
# Primitive tri-strip byte array
for idx in strip['strip']:
#print(idx)
strip_len += 1
element_bytes += estruct.pack(idx)
last_elem = idx
# Final mesh entry
mesh_primitives['primitives'].append({'offset':cur_offset, 'length':strip_len, 'bones':last_strip_bones})
cur_offset += strip_len
return collection_primitives, element_bytes
# Generate binary draw-index buffer of collection index
def generate_index_buffer(self, collection_primitives, endian_char, psize, rigger):
# Bytearray to fill
index_bytes = bytearray()
# Pointer space to hold collection's graphics API drawing context
for i in range(psize*3):
index_bytes.append(0)
# And array
for mesh in collection_primitives:
# Primitive count
index_bytes += struct.pack(endian_char + 'I', len(mesh['primitives']))
# Primitive array
for prim in mesh['primitives']:
# If rigging, append skin index
if rigger:
skin_index = rigger.augment_skin(prim['bones'])
index_bytes += struct.pack(endian_char + 'I', skin_index)
index_bytes += struct.pack(endian_char + 'I', 3)
index_bytes += struct.pack(endian_char + 'I', prim['offset'])
index_bytes += struct.pack(endian_char + 'I', prim['length'])
return index_bytes
|
jackoalan/PSPL
|
Extensions/PMDLFormat/Blender/io_pmdl_export/pmdl_draw_general.py
|
Python
|
mit
| 16,891
|
[
"VisIt"
] |
6ff8abb8d2e722f9afca00298cdb31d919f6383aef56dc7e9b77a0a6d868eaa5
|
from distutils.core import setup
setup(
name='Glider Singleton Publishing Service NetCDF Subscriber',
version='1.0',
author='Michael Lindemuth',
author_email='mlindemu@usf.edu',
packages=['gsps_netcdf_subscriber'],
scripts=[
'gsps_netcdf_subscriber/gsps_netcdf_sub.py'
]
)
|
USF-COT/gsps_netcdf_subscriber
|
setup.py
|
Python
|
mit
| 310
|
[
"NetCDF"
] |
e4bade8ca536782e12b8310907a50ed495e9426ad9fa20f9202d39beee638afa
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
DESRES Molecular Structure file format topology parser
======================================================
Classes to read a topology from a DESRES_ Molecular Structure file
format (DMS_) coordinate files (as used by the Desmond_ MD package).
.. _DESRES: http://www.deshawresearch.com
.. _Desmond: http://www.deshawresearch.com/resources_desmond.html
.. _DMS: http://www.deshawresearch.com/Desmond_Users_Guide-0.7.pdf
Classes
-------
.. autoclass:: DMSParser
:members:
:inherited-members:
"""
from __future__ import absolute_import
import numpy as np
import sqlite3
import os
from . import guessers
from .base import TopologyReaderBase, squash_by
from ..core.topology import Topology
from ..core.topologyattrs import (
Atomids,
Atomnames,
Bonds,
Charges,
ChainIDs,
Atomtypes,
Masses,
Resids,
Resnums,
Resnames,
Segids,
AtomAttr, # for custom Attributes
)
class Atomnums(AtomAttr):
"""The number for each Atom"""
attrname = 'atomnums'
singular = 'atomnum'
class DMSParser(TopologyReaderBase):
"""Read a topology from a DESRES_ Molecular Structure file.
Format (DMS_) coordinate files (as used by the Desmond_ MD package).
Reads the following attributes:
Atom:
- Atomids
- Atomnums
- Atomnames
- Masses
- Charges
- Chainids
Residue:
- Resnames
- Resids
Segment:
- Segids
Guesses the following attributes
- Atomtypes
.. _DESRES: http://www.deshawresearch.com
.. _Desmond: http://www.deshawresearch.com/resources_desmond.html
.. _DMS: http://www.deshawresearch.com/Desmond_Users_Guide-0.7.pdf
"""
format = 'DMS'
def parse(self):
"""Parse DMS file *filename* and return the Topology object"""
# Fix by SB: Needed because sqlite3.connect does not raise anything
# if file is not there
if not os.path.isfile(self.filename):
raise IOError("No such file: {0}".format(self.filename))
def dict_factory(cursor, row):
"""
Fetch SQL records as dictionaries, rather than the default tuples.
"""
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
attrs = {}
# Row factories for different data types
facs = {np.int32: lambda c, r: r[0],
np.float32: lambda c, r: r[0],
object: lambda c, r: str(r[0].strip())}
with sqlite3.connect(self.filename) as con:
# Selecting single column, so just strip tuple
for attrname, dt in [
('id', np.int32),
('anum', np.int32),
('mass', np.float32),
('charge', np.float32),
('name', object),
('resname', object),
('resid', np.int32),
('chain', object),
('segid', object),
]:
try:
cur = con.cursor()
cur.row_factory = facs[dt]
cur.execute('SELECT {} FROM particle'
''.format(attrname))
vals = cur.fetchall()
except sqlite3.DatabaseError:
raise IOError(
"Failed reading the atoms from DMS Database")
else:
attrs[attrname] = np.array(vals, dtype=dt)
try:
cur.row_factory = dict_factory
cur.execute('SELECT * FROM bond')
bonds = cur.fetchall()
except sqlite3.DatabaseError:
raise IOError("Failed reading the bonds from DMS Database")
else:
bondlist = []
bondorder = {}
for b in bonds:
desc = tuple(sorted([b['p0'], b['p1']]))
bondlist.append(desc)
bondorder[desc] = b['order']
attrs['bond'] = bondlist
attrs['bondorder'] = bondorder
atomtypes = guessers.guess_types(attrs['name'])
topattrs = []
# Bundle in Atom level objects
for attr, cls in [
('id', Atomids),
('anum', Atomnums),
('mass', Masses),
('charge', Charges),
('name', Atomnames),
('chain', ChainIDs),
]:
topattrs.append(cls(attrs[attr]))
topattrs.append(Atomtypes(atomtypes, guessed=True))
# Residues
atom_residx, res_resids, (res_resnames, res_segids) = squash_by(
attrs['resid'], attrs['resname'], attrs['segid'])
topattrs.append(Resids(res_resids))
topattrs.append(Resnums(res_resids.copy()))
topattrs.append(Resnames(res_resnames))
# Segments
res_segidx, seg_segids = squash_by(
res_segids)[:2]
topattrs.append(Segids(seg_segids))
# Bonds
topattrs.append(Bonds(attrs['bond']))
top = Topology(len(attrs['id']), len(res_resids), len(seg_segids),
attrs=topattrs,
atom_resindex=atom_residx,
residue_segindex=res_segidx)
return top
|
kain88-de/mdanalysis
|
package/MDAnalysis/topology/DMSParser.py
|
Python
|
gpl-2.0
| 6,420
|
[
"MDAnalysis"
] |
f3f5166ca0ffa34e14c27a8eed9883b90056fbee204616ae08b1bf248df91cf0
|
"""Contains Timing and Timer classes"""
# timing.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import logging
import time
class Timing(object):
"""System timing object.
This object manages timing for the whole system. Only one of these
objects should exist. By convention it is called 'timing'.
The timing keeps the current time in 'time' and a set of Timer
objects.
"""
HZ = None
secs_per_tick = None
tick = 0
def __init__(self, machine):
self.timers = set()
self.timers_to_remove = set()
self.timers_to_add = set()
self.log = logging.getLogger("Timing")
self.machine = machine
try:
Timing.HZ = self.machine.config['timing']['hz']
except KeyError:
Timing.HZ = 30
self.log.info("Configuring system Timing for %sHz", Timing.HZ)
Timing.secs_per_tick = 1 / float(Timing.HZ)
def add(self, timer):
timer.wakeup = time.time() + timer.frequency
self.timers_to_add.add(timer)
def remove(self, timer):
self.timers_to_remove.add(timer)
def timer_tick(self):
global tick
Timing.tick += 1
for timer in self.timers:
if timer.wakeup and timer.wakeup <= time.time():
timer.call()
if timer.frequency:
timer.wakeup += timer.frequency
else:
timer.wakeup = None
while self.timers_to_remove:
timer = self.timers_to_remove.pop()
if timer in self.timers:
self.timers.remove(timer)
for timer in self.timers_to_add:
self.timers.add(timer)
self.timers_to_add = set()
@staticmethod
def secs(s):
return s / 1000.0
@staticmethod
def string_to_secs(s):
return Timing.string_to_ms(s) / 1000.0
@staticmethod
def string_to_ms(time):
"""Converts a string of real-world time into int of ms. Example
inputs:
200ms
2s
None
If no "s" or "ms" is provided, this method assumes "milliseconds."
If time is 'None' or a string of 'None', this method returns 0.
Returns: An integer. The examples listed above 200, 2000 and 0,
respectively
"""
time = str(time).upper()
if time.endswith("MS") or time.endswith("MSEC"):
time = ''.join(i for i in time if not i.isalpha())
return int(time)
elif time.endswith("S") or time.endswith("SEC"):
time = ''.join(i for i in time if not i.isalpha())
return int(float(time) * 1000)
elif not time or time == 'NONE':
return 0
else:
time = ''.join(i for i in time if not i.isalpha())
return int(time)
@staticmethod
def int_to_pwm(ratio, length):
"""Converts a decimal between 0 and 1 to a pwm mask of whatever length
you want.
For example, an input ratio of .5 with a result length of 8 returns
10101010. And input ratio of .7 with a result length of 32 returns
11011011101101101101110110110110.
Another way to think about this is this method converts a decimal
percentage into the corresponding pwm mask.
Args:
ratio (float): A value between 0 and 1 that you want to convert.
length (int): How many digits you want in your result.
"""
whole_num = 0 # tracks our whole number
output = 0 # our output mask
count = 0 # our current count
for _i in range(length):
count += ratio
if int(count) > whole_num:
output = output | 1
whole_num += 1
output = output << 1
return output
@staticmethod
def pwm_ms_to_byte_int(self, pwm_on, pwm_off):
"""Converts a pwm_on / pwm_off ms times to a single byte pwm mask.
"""
total_ms = pwm_on + pwm_off
if total_ms % 2 or total_ms > 8:
# todo dunno what to do here.
self.log.error("pwm_ms_to_byte error: pwm_on + pwm_off total must "
"be 1, 2, 4, or 8.")
quit()
if not pwm_on:
return 0
elif not pwm_off:
return 255
else:
return int(pwm_on / float(pwm_on + pwm_off) * 255)
class Timer(object):
"""Periodic timer object.
A timer defines a callable plus a frequency (in sec) at which it should be
called. The frequency can be set to None so that the timer is not enabled,
but it still exists.
Args:
callback (method): The method you want called each time this timer is
fired.
args (tuple): Arguments you want to pass to the callback.
frequency (int or float): How often, in seconds, you want this timer
to be called.
"""
def __init__(self, callback, args=tuple(), frequency=None):
self.callback = callback
self.args = args
self.wakeup = None
self.frequency = frequency
self.log = logging.getLogger("Timer")
self.log.debug('Creating timer for callback "%s" every %ss',
self.callback.__name__, self.frequency)
def call(self):
self.callback(*self.args)
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
|
jabdoa2/mpf
|
mpf/system/timing.py
|
Python
|
mit
| 6,667
|
[
"Brian"
] |
bd129bbf8f297ec41f7c265deafe6a60f8cd7fc9212c5798460a2d133ff742a6
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
'''
Miscellaneous algorithms
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname(os.path.realpath(__file__))
>>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data'))
>>> os.chdir(datadir)
'''
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from builtins import zip
from builtins import range
import os
import os.path as op
import nibabel as nb
import numpy as np
from math import floor, ceil
from scipy.ndimage.morphology import grey_dilation
from scipy.special import legendre
import scipy.io as sio
import itertools
import scipy.stats as stats
from nipype import logging
import warnings
from . import metrics as nam
from ..interfaces.base import (BaseInterface, traits, TraitedSpec, File,
InputMultiPath, OutputMultiPath,
BaseInterfaceInputSpec, isdefined,
DynamicTraitedSpec, Undefined)
from nipype.utils.filemanip import fname_presuffix, split_filename
iflogger = logging.getLogger('interface')
class PickAtlasInputSpec(BaseInterfaceInputSpec):
atlas = File(exists=True, desc="Location of the atlas that will be used.",
mandatory=True)
labels = traits.Either(
traits.Int, traits.List(traits.Int),
desc=("Labels of regions that will be included in the mask. Must be\
compatible with the atlas used."),
mandatory=True
)
hemi = traits.Enum(
'both', 'left', 'right',
desc="Restrict the mask to only one hemisphere: left or right",
usedefault=True
)
dilation_size = traits.Int(
usedefault=True,
desc="Defines how much the mask will be dilated (expanded in 3D)."
)
output_file = File(desc="Where to store the output mask.")
class PickAtlasOutputSpec(TraitedSpec):
mask_file = File(exists=True, desc="output mask file")
class PickAtlas(BaseInterface):
"""Returns ROI masks given an atlas and a list of labels. Supports dilation
and left right masking (assuming the atlas is properly aligned).
"""
input_spec = PickAtlasInputSpec
output_spec = PickAtlasOutputSpec
def _run_interface(self, runtime):
nim = self._get_brodmann_area()
nb.save(nim, self._gen_output_filename())
return runtime
def _gen_output_filename(self):
if not isdefined(self.inputs.output_file):
output = fname_presuffix(fname=self.inputs.atlas, suffix="_mask",
newpath=os.getcwd(), use_ext=True)
else:
output = os.path.realpath(self.inputs.output_file)
return output
def _get_brodmann_area(self):
nii = nb.load(self.inputs.atlas)
origdata = nii.get_data()
newdata = np.zeros(origdata.shape)
if not isinstance(self.inputs.labels, list):
labels = [self.inputs.labels]
else:
labels = self.inputs.labels
for lab in labels:
newdata[origdata == lab] = 1
if self.inputs.hemi == 'right':
newdata[int(floor(float(origdata.shape[0]) / 2)):, :, :] = 0
elif self.inputs.hemi == 'left':
newdata[:int(ceil(float(origdata.shape[0]) / 2)), :, :] = 0
if self.inputs.dilation_size != 0:
newdata = grey_dilation(
newdata, (2 * self.inputs.dilation_size + 1,
2 * self.inputs.dilation_size +
1,
2 * self.inputs.dilation_size + 1))
return nb.Nifti1Image(newdata, nii.affine, nii.header)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['mask_file'] = self._gen_output_filename()
return outputs
class SimpleThresholdInputSpec(BaseInterfaceInputSpec):
volumes = InputMultiPath(
File(exists=True), desc='volumes to be thresholded', mandatory=True)
threshold = traits.Float(
desc='volumes to be thresholdedeverything below this value will be set\
to zero',
mandatory=True
)
class SimpleThresholdOutputSpec(TraitedSpec):
thresholded_volumes = OutputMultiPath(
File(exists=True), desc="thresholded volumes")
class SimpleThreshold(BaseInterface):
"""Applies a threshold to input volumes
"""
input_spec = SimpleThresholdInputSpec
output_spec = SimpleThresholdOutputSpec
def _run_interface(self, runtime):
for fname in self.inputs.volumes:
img = nb.load(fname)
data = np.array(img.get_data())
active_map = data > self.inputs.threshold
thresholded_map = np.zeros(data.shape)
thresholded_map[active_map] = data[active_map]
new_img = nb.Nifti1Image(thresholded_map, img.affine, img.header)
_, base, _ = split_filename(fname)
nb.save(new_img, base + '_thresholded.nii')
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs["thresholded_volumes"] = []
for fname in self.inputs.volumes:
_, base, _ = split_filename(fname)
outputs["thresholded_volumes"].append(
os.path.abspath(base + '_thresholded.nii'))
return outputs
class ModifyAffineInputSpec(BaseInterfaceInputSpec):
volumes = InputMultiPath(
File(exists=True),
desc='volumes which affine matrices will be modified',
mandatory=True
)
transformation_matrix = traits.Array(
value=np.eye(4),
shape=(4, 4),
desc="transformation matrix that will be left multiplied by the\
affine matrix",
usedefault=True
)
class ModifyAffineOutputSpec(TraitedSpec):
transformed_volumes = OutputMultiPath(File(exist=True))
class ModifyAffine(BaseInterface):
"""Left multiplies the affine matrix with a specified values. Saves the volume
as a nifti file.
"""
input_spec = ModifyAffineInputSpec
output_spec = ModifyAffineOutputSpec
def _gen_output_filename(self, name):
_, base, _ = split_filename(name)
return os.path.abspath(base + "_transformed.nii")
def _run_interface(self, runtime):
for fname in self.inputs.volumes:
img = nb.load(fname)
affine = img.affine
affine = np.dot(self.inputs.transformation_matrix, affine)
nb.save(nb.Nifti1Image(img.get_data(), affine, img.header),
self._gen_output_filename(fname))
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['transformed_volumes'] = []
for fname in self.inputs.volumes:
outputs['transformed_volumes'].append(
self._gen_output_filename(fname))
return outputs
class CreateNiftiInputSpec(BaseInterfaceInputSpec):
data_file = File(exists=True, mandatory=True, desc="ANALYZE img file")
header_file = File(
exists=True, mandatory=True, desc="corresponding ANALYZE hdr file")
affine = traits.Array(desc="affine transformation array")
class CreateNiftiOutputSpec(TraitedSpec):
nifti_file = File(exists=True)
class CreateNifti(BaseInterface):
"""Creates a nifti volume
"""
input_spec = CreateNiftiInputSpec
output_spec = CreateNiftiOutputSpec
def _gen_output_file_name(self):
_, base, _ = split_filename(self.inputs.data_file)
return os.path.abspath(base + ".nii")
def _run_interface(self, runtime):
hdr = nb.AnalyzeHeader.from_fileobj(
open(self.inputs.header_file, 'rb'))
if isdefined(self.inputs.affine):
affine = self.inputs.affine
else:
affine = None
data = hdr.data_from_fileobj(open(self.inputs.data_file, 'rb'))
img = nb.Nifti1Image(data, affine, hdr)
nb.save(img, self._gen_output_file_name())
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['nifti_file'] = self._gen_output_file_name()
return outputs
class TSNRInputSpec(BaseInterfaceInputSpec):
in_file = InputMultiPath(File(exists=True), mandatory=True,
desc='realigned 4D file or a list of 3D files')
regress_poly = traits.Range(low=1, desc='Remove polynomials')
tsnr_file = File('tsnr.nii.gz', usedefault=True, hash_files=False,
desc='output tSNR file')
mean_file = File('mean.nii.gz', usedefault=True, hash_files=False,
desc='output mean file')
stddev_file = File('stdev.nii.gz', usedefault=True, hash_files=False,
desc='output tSNR file')
detrended_file = File('detrend.nii.gz', usedefault=True, hash_files=False,
desc='input file after detrending')
class TSNROutputSpec(TraitedSpec):
tsnr_file = File(exists=True, desc='tsnr image file')
mean_file = File(exists=True, desc='mean image file')
stddev_file = File(exists=True, desc='std dev image file')
detrended_file = File(desc='detrended input file')
class TSNR(BaseInterface):
"""Computes the time-course SNR for a time series
Typically you want to run this on a realigned time-series.
Example
-------
>>> tsnr = TSNR()
>>> tsnr.inputs.in_file = 'functional.nii'
>>> res = tsnr.run() # doctest: +SKIP
"""
input_spec = TSNRInputSpec
output_spec = TSNROutputSpec
def _run_interface(self, runtime):
img = nb.load(self.inputs.in_file[0])
header = img.header.copy()
vollist = [nb.load(filename) for filename in self.inputs.in_file]
data = np.concatenate([vol.get_data().reshape(
vol.get_shape()[:3] + (-1,)) for vol in vollist], axis=3)
data = np.nan_to_num(data)
if data.dtype.kind == 'i':
header.set_data_dtype(np.float32)
data = data.astype(np.float32)
if isdefined(self.inputs.regress_poly):
timepoints = img.shape[-1]
X = np.ones((timepoints, 1))
for i in range(self.inputs.regress_poly):
X = np.hstack((X, legendre(
i + 1)(np.linspace(-1, 1, timepoints))[:, None]))
betas = np.dot(np.linalg.pinv(X), np.rollaxis(data, 3, 2))
datahat = np.rollaxis(np.dot(X[:, 1:],
np.rollaxis(
betas[1:, :, :, :], 0, 3)),
0, 4)
data = data - datahat
img = nb.Nifti1Image(data, img.get_affine(), header)
nb.save(img, op.abspath(self.inputs.detrended_file))
meanimg = np.mean(data, axis=3)
stddevimg = np.std(data, axis=3)
tsnr = np.zeros_like(meanimg)
tsnr[stddevimg > 1.e-3] = meanimg[stddevimg > 1.e-3] / stddevimg[stddevimg > 1.e-3]
img = nb.Nifti1Image(tsnr, img.get_affine(), header)
nb.save(img, op.abspath(self.inputs.tsnr_file))
img = nb.Nifti1Image(meanimg, img.get_affine(), header)
nb.save(img, op.abspath(self.inputs.mean_file))
img = nb.Nifti1Image(stddevimg, img.get_affine(), header)
nb.save(img, op.abspath(self.inputs.stddev_file))
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
for k in ['tsnr_file', 'mean_file', 'stddev_file']:
outputs[k] = op.abspath(getattr(self.inputs, k))
if isdefined(self.inputs.regress_poly):
outputs['detrended_file'] = op.abspath(self.inputs.detrended_file)
return outputs
class GunzipInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True)
class GunzipOutputSpec(TraitedSpec):
out_file = File(exists=True)
class Gunzip(BaseInterface):
"""Gunzip wrapper
"""
input_spec = GunzipInputSpec
output_spec = GunzipOutputSpec
def _gen_output_file_name(self):
_, base, ext = split_filename(self.inputs.in_file)
if ext[-2:].lower() == ".gz":
ext = ext[:-3]
return os.path.abspath(base + ext[:-3])
def _run_interface(self, runtime):
import gzip
in_file = gzip.open(self.inputs.in_file, 'rb')
out_file = open(self._gen_output_file_name(), 'wb')
out_file.write(in_file.read())
out_file.close()
in_file.close()
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_file'] = self._gen_output_file_name()
return outputs
def replaceext(in_list, ext):
out_list = list()
for filename in in_list:
path, name, _ = split_filename(op.abspath(filename))
out_name = op.join(path, name) + ext
out_list.append(out_name)
return out_list
def matlab2csv(in_array, name, reshape):
output_array = np.asarray(in_array)
if reshape:
if len(np.shape(output_array)) > 1:
output_array = np.reshape(output_array, (
np.shape(output_array)[0] * np.shape(output_array)[1], 1))
iflogger.info(np.shape(output_array))
output_name = op.abspath(name + '.csv')
np.savetxt(output_name, output_array, delimiter=',')
return output_name
class Matlab2CSVInputSpec(TraitedSpec):
in_file = File(exists=True, mandatory=True, desc='Input MATLAB .mat file')
reshape_matrix = traits.Bool(
True, usedefault=True,
desc='The output of this interface is meant for R, so matrices will be\
reshaped to vectors by default.'
)
class Matlab2CSVOutputSpec(TraitedSpec):
csv_files = OutputMultiPath(
File(desc='Output CSV files for each variable saved in the input .mat\
file')
)
class Matlab2CSV(BaseInterface):
"""Simple interface to save the components of a MATLAB .mat file as a text
file with comma-separated values (CSVs).
CSV files are easily loaded in R, for use in statistical processing.
For further information, see cran.r-project.org/doc/manuals/R-data.pdf
Example
-------
>>> from nipype.algorithms import misc
>>> mat2csv = misc.Matlab2CSV()
>>> mat2csv.inputs.in_file = 'cmatrix.mat'
>>> mat2csv.run() # doctest: +SKIP
"""
input_spec = Matlab2CSVInputSpec
output_spec = Matlab2CSVOutputSpec
def _run_interface(self, runtime):
in_dict = sio.loadmat(op.abspath(self.inputs.in_file))
# Check if the file has multiple variables in it. If it does, loop
# through them and save them as individual CSV files.
# If not, save the variable as a single CSV file using the input file
# name and a .csv extension.
saved_variables = list()
for key in list(in_dict.keys()):
if not key.startswith('__'):
if isinstance(in_dict[key][0], np.ndarray):
saved_variables.append(key)
else:
iflogger.info('One of the keys in the input file, {k}, is not a Numpy array'.format(k=key))
if len(saved_variables) > 1:
iflogger.info(
'{N} variables found:'.format(N=len(saved_variables)))
iflogger.info(saved_variables)
for variable in saved_variables:
iflogger.info(
'...Converting {var} - type {ty} - to\
CSV'.format(var=variable, ty=type(in_dict[variable]))
)
matlab2csv(
in_dict[variable], variable, self.inputs.reshape_matrix)
elif len(saved_variables) == 1:
_, name, _ = split_filename(self.inputs.in_file)
variable = saved_variables[0]
iflogger.info('Single variable found {var}, type {ty}:'.format(
var=variable, ty=type(in_dict[variable])))
iflogger.info('...Converting {var} to CSV from {f}'.format(
var=variable, f=self.inputs.in_file))
matlab2csv(in_dict[variable], name, self.inputs.reshape_matrix)
else:
iflogger.error('No values in the MATLAB file?!')
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
in_dict = sio.loadmat(op.abspath(self.inputs.in_file))
saved_variables = list()
for key in list(in_dict.keys()):
if not key.startswith('__'):
if isinstance(in_dict[key][0], np.ndarray):
saved_variables.append(key)
else:
iflogger.error('One of the keys in the input file, {k}, is\
not a Numpy array'.format(k=key))
if len(saved_variables) > 1:
outputs['csv_files'] = replaceext(saved_variables, '.csv')
elif len(saved_variables) == 1:
_, name, ext = split_filename(self.inputs.in_file)
outputs['csv_files'] = op.abspath(name + '.csv')
else:
iflogger.error('No values in the MATLAB file?!')
return outputs
def merge_csvs(in_list):
for idx, in_file in enumerate(in_list):
try:
in_array = np.loadtxt(in_file, delimiter=',')
except ValueError as ex:
try:
in_array = np.loadtxt(in_file, delimiter=',', skiprows=1)
except ValueError as ex:
first = open(in_file, 'r')
header_line = first.readline()
header_list = header_line.split(',')
n_cols = len(header_list)
try:
in_array = np.loadtxt(
in_file, delimiter=',', skiprows=1,
usecols=list(range(1, n_cols))
)
except ValueError as ex:
in_array = np.loadtxt(
in_file, delimiter=',', skiprows=1, usecols=list(range(1, n_cols - 1)))
if idx == 0:
out_array = in_array
else:
out_array = np.dstack((out_array, in_array))
out_array = np.squeeze(out_array)
iflogger.info('Final output array shape:')
iflogger.info(np.shape(out_array))
return out_array
def remove_identical_paths(in_files):
import os.path as op
from ..utils.filemanip import split_filename
if len(in_files) > 1:
out_names = list()
commonprefix = op.commonprefix(in_files)
lastslash = commonprefix.rfind('/')
commonpath = commonprefix[0:(lastslash + 1)]
for fileidx, in_file in enumerate(in_files):
path, name, ext = split_filename(in_file)
in_file = op.join(path, name)
name = in_file.replace(commonpath, '')
name = name.replace('_subject_id_', '')
out_names.append(name)
else:
path, name, ext = split_filename(in_files[0])
out_names = [name]
return out_names
def maketypelist(rowheadings, shape, extraheadingBool, extraheading):
typelist = []
if rowheadings:
typelist.append(('heading', 'a40'))
if len(shape) > 1:
for idx in range(1, (min(shape) + 1)):
typelist.append((str(idx), float))
else:
for idx in range(1, (shape[0] + 1)):
typelist.append((str(idx), float))
if extraheadingBool:
typelist.append((extraheading, 'a40'))
iflogger.info(typelist)
return typelist
def makefmtlist(output_array, typelist, rowheadingsBool,
shape, extraheadingBool):
fmtlist = []
if rowheadingsBool:
fmtlist.append('%s')
if len(shape) > 1:
output = np.zeros(max(shape), typelist)
for idx in range(1, min(shape) + 1):
output[str(idx)] = output_array[:, idx - 1]
fmtlist.append('%f')
else:
output = np.zeros(1, typelist)
for idx in range(1, len(output_array) + 1):
output[str(idx)] = output_array[idx - 1]
fmtlist.append('%f')
if extraheadingBool:
fmtlist.append('%s')
fmt = ','.join(fmtlist)
return fmt, output
class MergeCSVFilesInputSpec(TraitedSpec):
in_files = InputMultiPath(File(exists=True), mandatory=True,
desc='Input comma-separated value (CSV) files')
out_file = File('merged.csv', usedefault=True,
desc='Output filename for merged CSV file')
column_headings = traits.List(
traits.Str, desc='List of column headings to save in merged CSV file\
(must be equal to number of input files). If left undefined, these\
will be pulled from the input filenames.')
row_headings = traits.List(
traits.Str, desc='List of row headings to save in merged CSV file\
(must be equal to number of rows in the input files).')
row_heading_title = traits.Str(
'label', usedefault=True, desc='Column heading for the row headings\
added')
extra_column_heading = traits.Str(
desc='New heading to add for the added field.')
extra_field = traits.Str(
desc='New field to add to each row. This is useful for saving the\
group or subject ID in the file.')
class MergeCSVFilesOutputSpec(TraitedSpec):
csv_file = File(desc='Output CSV file containing columns ')
class MergeCSVFiles(BaseInterface):
"""This interface is designed to facilitate data loading in the R environment.
It takes input CSV files and merges them into a single CSV file.
If provided, it will also incorporate column heading names into the
resulting CSV file.
CSV files are easily loaded in R, for use in statistical processing.
For further information, see cran.r-project.org/doc/manuals/R-data.pdf
Example
-------
>>> from nipype.algorithms import misc
>>> mat2csv = misc.MergeCSVFiles()
>>> mat2csv.inputs.in_files = ['degree.mat','clustering.mat']
>>> mat2csv.inputs.column_headings = ['degree','clustering']
>>> mat2csv.run() # doctest: +SKIP
"""
input_spec = MergeCSVFilesInputSpec
output_spec = MergeCSVFilesOutputSpec
def _run_interface(self, runtime):
extraheadingBool = False
extraheading = ''
rowheadingsBool = False
"""
This block defines the column headings.
"""
if isdefined(self.inputs.column_headings):
iflogger.info('Column headings have been provided:')
headings = self.inputs.column_headings
else:
iflogger.info(
'Column headings not provided! Pulled from input filenames:')
headings = remove_identical_paths(self.inputs.in_files)
if isdefined(self.inputs.extra_field):
if isdefined(self.inputs.extra_column_heading):
extraheading = self.inputs.extra_column_heading
iflogger.info('Extra column heading provided: {col}'.format(
col=extraheading))
else:
extraheading = 'type'
iflogger.info(
'Extra column heading was not defined. Using "type"')
headings.append(extraheading)
extraheadingBool = True
if len(self.inputs.in_files) == 1:
iflogger.warn('Only one file input!')
if isdefined(self.inputs.row_headings):
iflogger.info('Row headings have been provided. Adding "labels"\
column header.')
prefix = '"{p}","'.format(p=self.inputs.row_heading_title)
csv_headings = prefix + '","'.join(itertools.chain(
headings)) + '"\n'
rowheadingsBool = True
else:
iflogger.info('Row headings have not been provided.')
csv_headings = '"' + '","'.join(itertools.chain(headings)) + '"\n'
iflogger.info('Final Headings:')
iflogger.info(csv_headings)
"""
Next we merge the arrays and define the output text file
"""
output_array = merge_csvs(self.inputs.in_files)
_, name, ext = split_filename(self.inputs.out_file)
if not ext == '.csv':
ext = '.csv'
out_file = op.abspath(name + ext)
file_handle = open(out_file, 'w')
file_handle.write(csv_headings)
shape = np.shape(output_array)
typelist = maketypelist(
rowheadingsBool, shape, extraheadingBool, extraheading)
fmt, output = makefmtlist(
output_array, typelist, rowheadingsBool, shape, extraheadingBool)
if rowheadingsBool:
row_heading_list = self.inputs.row_headings
row_heading_list_with_quotes = []
for row_heading in row_heading_list:
row_heading_with_quotes = '"' + row_heading + '"'
row_heading_list_with_quotes.append(row_heading_with_quotes)
row_headings = np.array(row_heading_list_with_quotes, dtype='|S40')
output['heading'] = row_headings
if isdefined(self.inputs.extra_field):
extrafieldlist = []
if len(shape) > 1:
mx = shape[0]
else:
mx = 1
for idx in range(0, mx):
extrafieldlist.append(self.inputs.extra_field)
iflogger.info(len(extrafieldlist))
output[extraheading] = extrafieldlist
iflogger.info(output)
iflogger.info(fmt)
np.savetxt(file_handle, output, fmt, delimiter=',')
file_handle.close()
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
_, name, ext = split_filename(self.inputs.out_file)
if not ext == '.csv':
ext = '.csv'
out_file = op.abspath(name + ext)
outputs['csv_file'] = out_file
return outputs
class AddCSVColumnInputSpec(TraitedSpec):
in_file = File(exists=True, mandatory=True,
desc='Input comma-separated value (CSV) files')
out_file = File('extra_heading.csv', usedefault=True,
desc='Output filename for merged CSV file')
extra_column_heading = traits.Str(
desc='New heading to add for the added field.')
extra_field = traits.Str(
desc='New field to add to each row. This is useful for saving the\
group or subject ID in the file.')
class AddCSVColumnOutputSpec(TraitedSpec):
csv_file = File(desc='Output CSV file containing columns ')
class AddCSVColumn(BaseInterface):
"""Short interface to add an extra column and field to a text file
Example
-------
>>> from nipype.algorithms import misc
>>> addcol = misc.AddCSVColumn()
>>> addcol.inputs.in_file = 'degree.csv'
>>> addcol.inputs.extra_column_heading = 'group'
>>> addcol.inputs.extra_field = 'male'
>>> addcol.run() # doctest: +SKIP
"""
input_spec = AddCSVColumnInputSpec
output_spec = AddCSVColumnOutputSpec
def _run_interface(self, runtime):
in_file = open(self.inputs.in_file, 'r')
_, name, ext = split_filename(self.inputs.out_file)
if not ext == '.csv':
ext = '.csv'
out_file = op.abspath(name + ext)
out_file = open(out_file, 'w')
firstline = in_file.readline()
firstline = firstline.replace('\n', '')
new_firstline = firstline + ',"' + \
self.inputs.extra_column_heading + '"\n'
out_file.write(new_firstline)
for line in in_file:
new_line = line.replace('\n', '')
new_line = new_line + ',' + self.inputs.extra_field + '\n'
out_file.write(new_line)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
_, name, ext = split_filename(self.inputs.out_file)
if not ext == '.csv':
ext = '.csv'
out_file = op.abspath(name + ext)
outputs['csv_file'] = out_file
return outputs
class AddCSVRowInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec):
in_file = traits.File(mandatory=True,
desc='Input comma-separated value (CSV) files')
_outputs = traits.Dict(traits.Any, value={}, usedefault=True)
def __setattr__(self, key, value):
if key not in self.copyable_trait_names():
if not isdefined(value):
super(AddCSVRowInputSpec, self).__setattr__(key, value)
self._outputs[key] = value
else:
if key in self._outputs:
self._outputs[key] = value
super(AddCSVRowInputSpec, self).__setattr__(key, value)
class AddCSVRowOutputSpec(TraitedSpec):
csv_file = File(desc='Output CSV file containing rows ')
class AddCSVRow(BaseInterface):
"""Simple interface to add an extra row to a csv file
.. note:: Requires `pandas <http://pandas.pydata.org/>`_
.. warning:: Multi-platform thread-safe execution is possible with
`lockfile <https://pythonhosted.org/lockfile/lockfile.html>`_. Please
recall that (1) this module is alpha software; and (2) it should be
installed for thread-safe writing.
If lockfile is not installed, then the interface is not thread-safe.
Example
-------
>>> from nipype.algorithms import misc
>>> addrow = misc.AddCSVRow()
>>> addrow.inputs.in_file = 'scores.csv'
>>> addrow.inputs.si = 0.74
>>> addrow.inputs.di = 0.93
>>> addrow.inputs.subject_id = 'S400'
>>> addrow.inputs.list_of_values = [ 0.4, 0.7, 0.3 ]
>>> addrow.run() # doctest: +SKIP
"""
input_spec = AddCSVRowInputSpec
output_spec = AddCSVRowOutputSpec
def __init__(self, infields=None, force_run=True, **kwargs):
super(AddCSVRow, self).__init__(**kwargs)
undefined_traits = {}
self._infields = infields
self._have_lock = False
self._lock = None
if infields:
for key in infields:
self.inputs.add_trait(key, traits.Any)
self.inputs._outputs[key] = Undefined
undefined_traits[key] = Undefined
self.inputs.trait_set(trait_change_notify=False, **undefined_traits)
if force_run:
self._always_run = True
def _run_interface(self, runtime):
try:
import pandas as pd
except ImportError:
raise ImportError(('This interface requires pandas '
'(http://pandas.pydata.org/) to run.'))
try:
import lockfile as pl
self._have_lock = True
except ImportError:
from warnings import warn
warn(('Python module lockfile was not found: AddCSVRow will not be'
' thread-safe in multi-processor execution'))
input_dict = {}
for key, val in list(self.inputs._outputs.items()):
# expand lists to several columns
if key == 'trait_added' and val in self.inputs.copyable_trait_names():
continue
if isinstance(val, list):
for i, v in enumerate(val):
input_dict['%s_%d' % (key, i)] = v
else:
input_dict[key] = val
df = pd.DataFrame([input_dict])
if self._have_lock:
self._lock = pl.FileLock(self.inputs.in_file)
# Acquire lock
self._lock.acquire()
if op.exists(self.inputs.in_file):
formerdf = pd.read_csv(self.inputs.in_file, index_col=0)
df = pd.concat([formerdf, df], ignore_index=True)
with open(self.inputs.in_file, 'w') as f:
df.to_csv(f)
if self._have_lock:
self._lock.release()
# Using nipype.external.portalocker this might be something like:
# with pl.Lock(self.inputs.in_file, timeout=1) as fh:
# if op.exists(fh):
# formerdf = pd.read_csv(fh, index_col=0)
# df = pd.concat([formerdf, df], ignore_index=True)
# df.to_csv(fh)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['csv_file'] = self.inputs.in_file
return outputs
def _outputs(self):
return self._add_output_traits(super(AddCSVRow, self)._outputs())
def _add_output_traits(self, base):
return base
class CalculateNormalizedMomentsInputSpec(TraitedSpec):
timeseries_file = File(
exists=True, mandatory=True,
desc='Text file with timeseries in columns and timepoints in rows,\
whitespace separated')
moment = traits.Int(
mandatory=True,
desc="Define which moment should be calculated, 3 for skewness, 4 for\
kurtosis.")
class CalculateNormalizedMomentsOutputSpec(TraitedSpec):
moments = traits.List(traits.Float(), desc='Moments')
class CalculateNormalizedMoments(BaseInterface):
"""Calculates moments of timeseries.
Example
-------
>>> from nipype.algorithms import misc
>>> skew = misc.CalculateNormalizedMoments()
>>> skew.inputs.moment = 3
>>> skew.inputs.timeseries_file = 'timeseries.txt'
>>> skew.run() # doctest: +SKIP
"""
input_spec = CalculateNormalizedMomentsInputSpec
output_spec = CalculateNormalizedMomentsOutputSpec
def _run_interface(self, runtime):
self._moments = calc_moments(
self.inputs.timeseries_file, self.inputs.moment)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['skewness'] = self._moments
return outputs
def calc_moments(timeseries_file, moment):
"""Returns nth moment (3 for skewness, 4 for kurtosis) of timeseries
(list of values; one per timeseries).
Keyword arguments:
timeseries_file -- text file with white space separated timepoints in rows
"""
timeseries = np.genfromtxt(timeseries_file)
m2 = stats.moment(timeseries, 2, axis=0)
m3 = stats.moment(timeseries, moment, axis=0)
zero = (m2 == 0)
return np.where(zero, 0, m3 / m2 ** (moment / 2.0))
class AddNoiseInputSpec(TraitedSpec):
in_file = File(exists=True, mandatory=True,
desc='input image that will be corrupted with noise')
in_mask = File(exists=True, desc=('input mask, voxels outside this mask '
'will be considered background'))
snr = traits.Float(10.0, desc='desired output SNR in dB', usedefault=True)
dist = traits.Enum('normal', 'rician', usedefault=True, mandatory=True,
desc=('desired noise distribution'))
bg_dist = traits.Enum('normal', 'rayleigh', usedefault=True, mandatory=True,
desc=('desired noise distribution, currently '
'only normal is implemented'))
out_file = File(desc='desired output filename')
class AddNoiseOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='corrupted image')
class AddNoise(BaseInterface):
"""
Corrupts with noise the input image
Example
-------
>>> from nipype.algorithms.misc import AddNoise
>>> noise = AddNoise()
>>> noise.inputs.in_file = 'T1.nii'
>>> noise.inputs.in_mask = 'mask.nii'
>>> noise.snr = 30.0
>>> noise.run() # doctest: +SKIP
"""
input_spec = AddNoiseInputSpec
output_spec = AddNoiseOutputSpec
def _run_interface(self, runtime):
in_image = nb.load(self.inputs.in_file)
in_data = in_image.get_data()
snr = self.inputs.snr
if isdefined(self.inputs.in_mask):
in_mask = nb.load(self.inputs.in_mask).get_data()
else:
in_mask = np.ones_like(in_data)
result = self.gen_noise(in_data, mask=in_mask, snr_db=snr,
dist=self.inputs.dist, bg_dist=self.inputs.bg_dist)
res_im = nb.Nifti1Image(result, in_image.affine, in_image.header)
res_im.to_filename(self._gen_output_filename())
return runtime
def _gen_output_filename(self):
if not isdefined(self.inputs.out_file):
_, base, ext = split_filename(self.inputs.in_file)
out_file = os.path.abspath('%s_SNR%03.2f%s' % (base, self.inputs.snr, ext))
else:
out_file = self.inputs.out_file
return out_file
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = self._gen_output_filename()
return outputs
def gen_noise(self, image, mask=None, snr_db=10.0, dist='normal', bg_dist='normal'):
"""
Generates a copy of an image with a certain amount of
added gaussian noise (rayleigh for background in mask)
"""
from math import sqrt
snr = sqrt(np.power(10.0, snr_db / 10.0))
if mask is None:
mask = np.ones_like(image)
else:
mask[mask > 0] = 1
mask[mask < 1] = 0
if mask.ndim < image.ndim:
mask = np.rollaxis(np.array([mask] * image.shape[3]), 0, 4)
signal = image[mask > 0].reshape(-1)
if dist == 'normal':
signal = signal - signal.mean()
sigma_n = sqrt(signal.var() / snr)
noise = np.random.normal(size=image.shape, scale=sigma_n)
if (np.any(mask == 0)) and (bg_dist == 'rayleigh'):
bg_noise = np.random.rayleigh(size=image.shape, scale=sigma_n)
noise[mask == 0] = bg_noise[mask == 0]
im_noise = image + noise
elif dist == 'rician':
sigma_n = signal.mean() / snr
n_1 = np.random.normal(size=image.shape, scale=sigma_n)
n_2 = np.random.normal(size=image.shape, scale=sigma_n)
stde_1 = n_1 / sqrt(2.0)
stde_2 = n_2 / sqrt(2.0)
im_noise = np.sqrt((image + stde_1)**2 + (stde_2)**2)
else:
raise NotImplementedError(('Only normal and rician distributions '
'are supported'))
return im_noise
class NormalizeProbabilityMapSetInputSpec(TraitedSpec):
in_files = InputMultiPath(File(exists=True, mandatory=True,
desc='The tpms to be normalized'))
in_mask = File(exists=True,
desc='Masked voxels must sum up 1.0, 0.0 otherwise.')
class NormalizeProbabilityMapSetOutputSpec(TraitedSpec):
out_files = OutputMultiPath(File(exists=True),
desc="normalized maps")
class NormalizeProbabilityMapSet(BaseInterface):
""" Returns the input tissue probability maps (tpms, aka volume fractions)
normalized to sum up 1.0 at each voxel within the mask.
.. note:: Please recall this is not a spatial normalization algorithm
Example
-------
>>> from nipype.algorithms import misc
>>> normalize = misc.NormalizeProbabilityMapSet()
>>> normalize.inputs.in_files = [ 'tpm_00.nii.gz', 'tpm_01.nii.gz', \
'tpm_02.nii.gz' ]
>>> normalize.inputs.in_mask = 'tpms_msk.nii.gz'
>>> normalize.run() # doctest: +SKIP
"""
input_spec = NormalizeProbabilityMapSetInputSpec
output_spec = NormalizeProbabilityMapSetOutputSpec
def _run_interface(self, runtime):
mask = None
if isdefined(self.inputs.in_mask):
mask = self.inputs.in_mask
self._out_filenames = normalize_tpms(self.inputs.in_files, mask)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_files'] = self._out_filenames
return outputs
class SplitROIsInputSpec(TraitedSpec):
in_file = File(exists=True, mandatory=True,
desc='file to be splitted')
in_mask = File(exists=True, desc='only process files inside mask')
roi_size = traits.Tuple(traits.Int, traits.Int, traits.Int,
desc='desired ROI size')
class SplitROIsOutputSpec(TraitedSpec):
out_files = OutputMultiPath(File(exists=True),
desc='the resulting ROIs')
out_masks = OutputMultiPath(File(exists=True),
desc='a mask indicating valid values')
out_index = OutputMultiPath(File(exists=True),
desc='arrays keeping original locations')
class SplitROIs(BaseInterface):
"""
Splits a 3D image in small chunks to enable parallel processing.
ROIs keep time series structure in 4D images.
>>> from nipype.algorithms import misc
>>> rois = misc.SplitROIs()
>>> rois.inputs.in_file = 'diffusion.nii'
>>> rois.inputs.in_mask = 'mask.nii'
>>> rois.run() # doctest: +SKIP
"""
input_spec = SplitROIsInputSpec
output_spec = SplitROIsOutputSpec
def _run_interface(self, runtime):
mask = None
roisize = None
self._outnames = {}
if isdefined(self.inputs.in_mask):
mask = self.inputs.in_mask
if isdefined(self.inputs.roi_size):
roisize = self.inputs.roi_size
res = split_rois(self.inputs.in_file,
mask, roisize)
self._outnames['out_files'] = res[0]
self._outnames['out_masks'] = res[1]
self._outnames['out_index'] = res[2]
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
for k, v in self._outnames.items():
outputs[k] = v
return outputs
class MergeROIsInputSpec(TraitedSpec):
in_files = InputMultiPath(File(exists=True, mandatory=True,
desc='files to be re-merged'))
in_index = InputMultiPath(File(exists=True, mandatory=True),
desc='array keeping original locations')
in_reference = File(exists=True, desc='reference file')
class MergeROIsOutputSpec(TraitedSpec):
merged_file = File(exists=True, desc='the recomposed file')
class MergeROIs(BaseInterface):
"""
Splits a 3D image in small chunks to enable parallel processing.
ROIs keep time series structure in 4D images.
Example
-------
>>> from nipype.algorithms import misc
>>> rois = misc.MergeROIs()
>>> rois.inputs.in_files = ['roi%02d.nii' % i for i in range(1, 6)]
>>> rois.inputs.in_reference = 'mask.nii'
>>> rois.inputs.in_index = ['roi%02d_idx.npz' % i for i in range(1, 6)]
>>> rois.run() # doctest: +SKIP
"""
input_spec = MergeROIsInputSpec
output_spec = MergeROIsOutputSpec
def _run_interface(self, runtime):
res = merge_rois(self.inputs.in_files,
self.inputs.in_index,
self.inputs.in_reference)
self._merged = res
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['merged_file'] = self._merged
return outputs
def normalize_tpms(in_files, in_mask=None, out_files=[]):
"""
Returns the input tissue probability maps (tpms, aka volume fractions)
normalized to sum up 1.0 at each voxel within the mask.
"""
import nibabel as nib
import numpy as np
import os.path as op
in_files = np.atleast_1d(in_files).tolist()
if len(out_files) != len(in_files):
for i, finname in enumerate(in_files):
fname, fext = op.splitext(op.basename(finname))
if fext == '.gz':
fname, fext2 = op.splitext(fname)
fext = fext2 + fext
out_file = op.abspath('%s_norm_%02d%s' % (fname, i, fext))
out_files += [out_file]
imgs = [nib.load(fim) for fim in in_files]
if len(in_files) == 1:
img_data = imgs[0].get_data()
img_data[img_data > 0.0] = 1.0
hdr = imgs[0].header.copy()
hdr['data_type'] = 16
hdr.set_data_dtype(np.float32)
nib.save(nib.Nifti1Image(img_data.astype(np.float32), imgs[0].affine,
hdr), out_files[0])
return out_files[0]
img_data = np.array([im.get_data() for im in imgs]).astype(np.float32)
# img_data[img_data>1.0] = 1.0
img_data[img_data < 0.0] = 0.0
weights = np.sum(img_data, axis=0)
msk = np.ones_like(imgs[0].get_data())
msk[weights <= 0] = 0
if in_mask is not None:
msk = nib.load(in_mask).get_data()
msk[msk <= 0] = 0
msk[msk > 0] = 1
msk = np.ma.masked_equal(msk, 0)
for i, out_file in enumerate(out_files):
data = np.ma.masked_equal(img_data[i], 0)
probmap = data / weights
hdr = imgs[i].header.copy()
hdr['data_type'] = 16
hdr.set_data_dtype('float32')
nib.save(nib.Nifti1Image(probmap.astype(np.float32), imgs[i].affine,
hdr), out_file)
return out_files
def split_rois(in_file, mask=None, roishape=None):
"""
Splits an image in ROIs for parallel processing
"""
import nibabel as nb
import numpy as np
from math import sqrt, ceil
import os.path as op
if roishape is None:
roishape = (10, 10, 1)
im = nb.load(in_file)
imshape = im.shape
dshape = imshape[:3]
nvols = imshape[-1]
roisize = roishape[0] * roishape[1] * roishape[2]
droishape = (roishape[0], roishape[1], roishape[2], nvols)
if mask is not None:
mask = nb.load(mask).get_data()
mask[mask > 0] = 1
mask[mask < 1] = 0
else:
mask = np.ones(dshape)
mask = mask.reshape(-1).astype(np.uint8)
nzels = np.nonzero(mask)
els = np.sum(mask)
nrois = int(ceil(els / float(roisize)))
data = im.get_data().reshape((mask.size, -1))
data = np.squeeze(data.take(nzels, axis=0))
nvols = data.shape[-1]
roidefname = op.abspath('onesmask.nii.gz')
nb.Nifti1Image(np.ones(roishape, dtype=np.uint8), None,
None).to_filename(roidefname)
out_files = []
out_mask = []
out_idxs = []
for i in range(nrois):
first = i * roisize
last = (i + 1) * roisize
fill = 0
if last > els:
fill = last - els
last = els
droi = data[first:last, ...]
iname = op.abspath('roi%010d_idx' % i)
out_idxs.append(iname + '.npz')
np.savez(iname, (nzels[0][first:last],))
if fill > 0:
droi = np.vstack((droi, np.zeros((fill, nvols), dtype=np.float32)))
partialmsk = np.ones((roisize,), dtype=np.uint8)
partialmsk[-fill:] = 0
partname = op.abspath('partialmask.nii.gz')
nb.Nifti1Image(partialmsk.reshape(roishape), None,
None).to_filename(partname)
out_mask.append(partname)
else:
out_mask.append(roidefname)
fname = op.abspath('roi%010d.nii.gz' % i)
nb.Nifti1Image(droi.reshape(droishape),
None, None).to_filename(fname)
out_files.append(fname)
return out_files, out_mask, out_idxs
def merge_rois(in_files, in_idxs, in_ref,
dtype=None, out_file=None):
"""
Re-builds an image resulting from a parallelized processing
"""
import nibabel as nb
import numpy as np
import os.path as op
import subprocess as sp
if out_file is None:
out_file = op.abspath('merged.nii.gz')
if dtype is None:
dtype = np.float32
# if file is compressed, uncompress using os
# to avoid memory errors
if op.splitext(in_ref)[1] == '.gz':
try:
iflogger.info('uncompress %i' % in_ref)
sp.check_call(['gunzip', in_ref], stdout=sp.PIPE, shell=True)
in_ref = op.splitext(in_ref)[0]
except:
pass
ref = nb.load(in_ref)
aff = ref.affine
hdr = ref.header.copy()
rsh = ref.shape
del ref
npix = rsh[0] * rsh[1] * rsh[2]
fcdata = nb.load(in_files[0]).get_data()
if fcdata.ndim == 4:
ndirs = fcdata.shape[-1]
else:
ndirs = 1
newshape = (rsh[0], rsh[1], rsh[2], ndirs)
hdr.set_data_dtype(dtype)
hdr.set_xyzt_units('mm', 'sec')
if ndirs < 300:
data = np.zeros((npix, ndirs))
for cname, iname in zip(in_files, in_idxs):
f = np.load(iname)
idxs = np.squeeze(f['arr_0'])
cdata = nb.load(cname).get_data().reshape(-1, ndirs)
nels = len(idxs)
idata = (idxs, )
try:
data[idata, ...] = cdata[0:nels, ...]
except:
print(('Consistency between indexes and chunks was '
'lost: data=%s, chunk=%s') % (str(data.shape),
str(cdata.shape)))
raise
hdr.set_data_shape(newshape)
nb.Nifti1Image(data.reshape(newshape).astype(dtype),
aff, hdr).to_filename(out_file)
else:
hdr.set_data_shape(rsh[:3])
nii = []
for d in range(ndirs):
fname = op.abspath('vol%06d.nii' % d)
nb.Nifti1Image(np.zeros(rsh[:3]), aff, hdr).to_filename(fname)
nii.append(fname)
for cname, iname in zip(in_files, in_idxs):
f = np.load(iname)
idxs = np.squeeze(f['arr_0'])
for d, fname in enumerate(nii):
data = nb.load(fname).get_data().reshape(-1)
cdata = nb.load(cname).get_data().reshape(-1, ndirs)[:, d]
nels = len(idxs)
idata = (idxs, )
data[idata] = cdata[0:nels]
nb.Nifti1Image(data.reshape(rsh[:3]),
aff, hdr).to_filename(fname)
imgs = [nb.load(im) for im in nii]
allim = nb.concat_images(imgs)
allim.to_filename(out_file)
return out_file
# Deprecated interfaces ------------------------------------------------------
class Distance(nam.Distance):
"""Calculates distance between two volumes.
.. deprecated:: 0.10.0
Use :py:class:`nipype.algorithms.metrics.Distance` instead.
"""
def __init__(self, **inputs):
super(nam.Distance, self).__init__(**inputs)
warnings.warn(("This interface has been deprecated since 0.10.0,"
" please use nipype.algorithms.metrics.Distance"),
DeprecationWarning)
class Overlap(nam.Overlap):
"""Calculates various overlap measures between two maps.
.. deprecated:: 0.10.0
Use :py:class:`nipype.algorithms.metrics.Overlap` instead.
"""
def __init__(self, **inputs):
super(nam.Overlap, self).__init__(**inputs)
warnings.warn(("This interface has been deprecated since 0.10.0,"
" please use nipype.algorithms.metrics.Overlap"),
DeprecationWarning)
class FuzzyOverlap(nam.FuzzyOverlap):
"""Calculates various overlap measures between two maps, using a fuzzy
definition.
.. deprecated:: 0.10.0
Use :py:class:`nipype.algorithms.metrics.FuzzyOverlap` instead.
"""
def __init__(self, **inputs):
super(nam.FuzzyOverlap, self).__init__(**inputs)
warnings.warn(("This interface has been deprecated since 0.10.0,"
" please use nipype.algorithms.metrics.FuzzyOverlap"),
DeprecationWarning)
|
sgiavasis/nipype
|
nipype/algorithms/misc.py
|
Python
|
bsd-3-clause
| 51,523
|
[
"Gaussian"
] |
744b12e64c1accfa78461ad12cfc95c50fddbc2b256f966110971459d2660ca0
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, print_function
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
setup(
name = 'pyproteins',
version = '1.5',
license='BSD',
description = 'Toolbox to manipulate protein sequence data',
author = 'Guillaume Launay & Cecile Hilpert',
author_email = 'pitooon@gmail.com',
url = 'https://github.com/glaunay/pyproteins', # use the URL to the github repo
package_dir={'': 'src'},
#packages=find_packages('src'),
packages=['pyproteins'],
include_package_data=True,
zip_safe=False,
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
download_url = 'https://github.com/glaunay/pyproteins/tarball/1.4', # I'll explain this in a second
keywords = ['protein', 'sequence'], # arbitrary keywords
classifiers = [],
install_requires=[
'bs4', 'biopython', 'numpy','lxml'
],
package_data = {
'pyproteins': ['conf/confModule1.json','bin/module1.py', 'external/*']
},
#data_files=[
# ('external', ['external/pathos.tar.bz']),
# ('bin', ['bin/module1.py']),
# ('conf',['conf/confModule1.json'])
# ]
# dependency_links = [
# "http://dev.danse.us/trac/pathos"
# ]
)
|
glaunay/pyproteins
|
setup.py
|
Python
|
gpl-3.0
| 1,434
|
[
"Biopython"
] |
2d5349b40f6a44d74db3e85f8f6ce9ba0419970c61d8f46b25a9eb47d9a880c4
|
from sys import stdout
import simtk.openmm as mm
import simtk.openmm.app as app
from simtk import unit as u
pdb = app.PDBFile("/home/kyleb/src/openmm/openmm/wrappers/python/simtk/openmm/app/data/tip3p.pdb")
forcefield = app.ForceField('iamoeba.xml')
system = forcefield.createSystem(pdb.topology, nonbondedMethod=app.PME, nonbondedCutoff=1.0*u.nanometers)
system.addForce(mm.MonteCarloBarostat(1*u.atmospheres, 300*u.kelvin, 25))
integrator = mm.LangevinIntegrator(300 * u.kelvin, 1.0 / u.picoseconds, 0.5*u.femtoseconds)
platform = mm.Platform.getPlatformByName('CUDA')
simulation = app.Simulation(pdb.topology, system, integrator)
simulation.context.setPositions(pdb.positions)
print('Minimizing...')
simulation.minimizeEnergy()
simulation.context.setVelocitiesToTemperature(300*u.kelvin)
print('Equilibrating...')
simulation.step(100)
simulation.reporters.append(app.DCDReporter('./iamoeba/trajectory.dcd', 1000))
simulation.reporters.append(app.StateDataReporter(stdout, 1000, step=True, potentialEnergy=True, temperature=True, progress=True, remainingTime=True, speed=True, totalSteps=1000, separator='\t'))
simulation.step(100000)
print('Running Production...')
|
kyleabeauchamp/HMCNotes
|
code/old/build_iamoeba_box.py
|
Python
|
gpl-2.0
| 1,177
|
[
"OpenMM"
] |
d88c6f2eeb12b581fcd3a6a95d321c92f95bf27eec353afabcd46ad96760307e
|
"""
Convenience routines for performing common operations.
@since: 0.28
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
import os, sys
from zeroinstall import support, SafeException, logger
from zeroinstall.support import tasks
from zeroinstall import logger
DontUseGUI = object()
def get_selections_gui(iface_uri, gui_args, test_callback = None, use_gui = True):
"""Run the GUI to choose and download a set of implementations.
The user may ask the GUI to submit a bug report about the program. In that case,
the GUI may ask us to test it. test_callback is called in that case with the implementations
to be tested; the callback will typically call L{zeroinstall.injector.run.test_selections} and return the result of that.
@param iface_uri: the required program, or None to show just the preferences dialog
@type iface_uri: str
@param gui_args: any additional arguments for the GUI itself
@type gui_args: [str]
@param test_callback: function to use to try running the program
@type test_callback: L{zeroinstall.injector.selections.Selections} -> str
@param use_gui: if True, raise a SafeException if the GUI is not available. If None, returns DontUseGUI if the GUI cannot be started. If False, returns DontUseGUI always. (since 1.11)
@param use_gui: bool | None
@return: the selected implementations
@rtype: L{zeroinstall.injector.selections.Selections}
@since: 0.28
"""
if use_gui is False:
return DontUseGUI
if 'DISPLAY' not in os.environ:
if use_gui is None:
return DontUseGUI
else:
raise SafeException("Can't use GUI because $DISPLAY is not set")
from zeroinstall.injector import selections, qdom
from io import BytesIO
from os.path import join, dirname
gui_exe = join(dirname(__file__), '0launch-gui', '0launch-gui')
import socket
cli, gui = socket.socketpair()
try:
child = os.fork()
if child == 0:
# We are the child (GUI)
try:
try:
cli.close()
# We used to use pipes to support Python2.3...
os.dup2(gui.fileno(), 1)
os.dup2(gui.fileno(), 0)
if use_gui is True:
gui_args = ['-g'] + gui_args
if iface_uri is not None:
gui_args = gui_args + ['--', iface_uri]
os.execvp(sys.executable, [sys.executable, gui_exe] + gui_args)
except:
import traceback
traceback.print_exc(file = sys.stderr)
finally:
sys.stderr.flush()
os._exit(1)
# We are the parent (CLI)
gui.close()
gui = None
while True:
logger.info("Waiting for selections from GUI...")
reply = support.read_bytes(cli.fileno(), len('Length:') + 9, null_ok = True)
if reply:
if not reply.startswith(b'Length:'):
raise Exception("Expected Length:, but got %s" % repr(reply))
reply = reply.decode('ascii')
xml = support.read_bytes(cli.fileno(), int(reply.split(':', 1)[1], 16))
dom = qdom.parse(BytesIO(xml))
sels = selections.Selections(dom)
if dom.getAttribute('run-test'):
logger.info("Testing program, as requested by GUI...")
if test_callback is None:
output = b"Can't test: no test_callback was passed to get_selections_gui()\n"
else:
output = test_callback(sels)
logger.info("Sending results to GUI...")
output = ('Length:%8x\n' % len(output)).encode('utf-8') + output
logger.debug("Sending: %s", repr(output))
while output:
sent = cli.send(output)
output = output[sent:]
continue
else:
sels = None
pid, status = os.waitpid(child, 0)
assert pid == child
if status == 1 << 8:
logger.info("User cancelled the GUI; aborting")
return None # Aborted
elif status == 100 << 8:
if use_gui is None:
return DontUseGUI
else:
raise SafeException("No GUI available")
if status != 0:
raise Exception("Error from GUI: code = %d" % status)
break
finally:
for sock in [cli, gui]:
if sock is not None: sock.close()
return sels
def ensure_cached(uri, command = 'run', config = None):
"""Ensure that an implementation of uri is cached.
If not, it downloads one. It uses the GUI if a display is
available, or the console otherwise.
@param uri: the required interface
@type uri: str
@return: the selected implementations, or None if the user cancelled
@rtype: L{zeroinstall.injector.selections.Selections}
"""
from zeroinstall.injector.driver import Driver
if config is None:
from zeroinstall.injector.config import load_config
config = load_config()
from zeroinstall.injector.requirements import Requirements
requirements = Requirements(uri)
requirements.command = command
d = Driver(config, requirements)
if d.need_download() or not d.solver.ready:
sels = get_selections_gui(uri, ['--command', command], use_gui = None)
if sels != DontUseGUI:
return sels
done = d.solve_and_download_impls()
tasks.wait_for_blocker(done)
return d.solver.selections
def exec_man(stores, sels, main = None, fallback_name = None):
"""Exec the man command to show the man-page for this interface.
Never returns.
@since: 1.12"""
interface_uri = sels.interface
selected_impl = sels.selections[interface_uri]
if selected_impl.id.startswith('package'):
impl_path = None
else:
impl_path = selected_impl.get_path(stores)
if main is None:
if sels.commands:
selected_command = sels.commands[0]
else:
print("No <command> in selections!", file=sys.stderr)
sys.exit(1)
main = selected_command.path
if main is None:
print("No main program for interface '%s'" % interface_uri, file=sys.stderr)
sys.exit(1)
prog_name = os.path.basename(main)
if impl_path is None:
# Package implementation
logger.debug("Searching for man-page native command %s (from %s)" % (prog_name, fallback_name))
os.execlp('man', 'man', prog_name)
assert impl_path
logger.debug("Searching for man-page for %s or %s in %s" % (prog_name, fallback_name, impl_path))
# TODO: the feed should say where the man-pages are, but for now we'll accept
# a directory called man in some common locations...
for mandir in ['man', 'share/man', 'usr/man', 'usr/share/man']:
manpath = os.path.join(impl_path, mandir)
if os.path.isdir(manpath):
# Note: unlike "man -M", this also copes with LANG settings...
os.environ['MANPATH'] = manpath
os.execlp('man', 'man', prog_name)
sys.exit(1)
# No man directory given or found, so try searching for man files
manpages = []
for root, dirs, files in os.walk(impl_path):
for f in files:
if f.endswith('.gz'):
manpage_file = f[:-3]
else:
manpage_file = f
if manpage_file.endswith('.1') or \
manpage_file.endswith('.6') or \
manpage_file.endswith('.8'):
manpage_prog = manpage_file[:-2]
if manpage_prog == prog_name or manpage_prog == fallback_name:
os.execlp('man', 'man', os.path.join(root, f))
sys.exit(1)
else:
manpages.append((root, f))
for d in list(dirs):
if d.startswith('.'):
dirs.remove(d)
print("No matching manpage was found for '%s' (%s)" % (fallback_name, interface_uri))
if manpages:
print("These non-matching man-pages were found, however:")
for root, file in manpages:
print(os.path.join(root, file))
sys.exit(1)
|
timdiels/0install
|
zeroinstall/helpers.py
|
Python
|
lgpl-2.1
| 7,229
|
[
"VisIt"
] |
fdb94b4aca660b7c6843aada12c23cfd5d6f516ad94fd7773df1fcec7a1c2609
|
#!/usr/bin/env python
# Copyright 2017-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: James D. McClain <jmcclain@princeton.edu>
#
"""Module for running restricted closed-shell k-point ccsd(t)"""
import ctypes
import h5py
import numpy as np
import pyscf.pbc.cc.kccsd_rhf
from itertools import product
from pyscf import lib
from pyscf.cc import _ccsd
from pyscf.lib import logger
from pyscf.lib.parameters import LARGE_DENOM
from pyscf.pbc.lib import kpts_helper
from pyscf.pbc.mp.kmp2 import (get_frozen_mask, get_nocc, get_nmo,
padded_mo_coeff, padding_k_idx) # noqa
#einsum = np.einsum
einsum = lib.einsum
# CCSD(T) equations taken from Scuseria, JCP (94), 1991
#
# NOTE: As pointed out in cc/ccsd_t_slow.py, there is an error in this paper
# and the equation should read [ia] >= [jb] >= [kc] (since the only
# symmetry in spin-less operators is the exchange of a column of excitation
# ooperators).
def kernel(mycc, eris, t1=None, t2=None, max_memory=2000, verbose=logger.INFO):
'''Returns the CCSD(T) for restricted closed-shell systems with k-points.
Note:
Returns real part of the CCSD(T) energy, raises warning if there is
a complex part.
Args:
mycc (:class:`RCCSD`): Coupled-cluster object storing results of
a coupled-cluster calculation.
eris (:class:`_ERIS`): Integral object holding the relevant electron-
repulsion integrals and Fock matrix elements
t1 (:obj:`ndarray`): t1 coupled-cluster amplitudes
t2 (:obj:`ndarray`): t2 coupled-cluster amplitudes
max_memory (float): Maximum memory used in calculation (NOT USED)
verbose (int, :class:`Logger`): verbosity of calculation
Returns:
energy_t (float): The real-part of the k-point CCSD(T) energy.
'''
assert isinstance(mycc, pyscf.pbc.cc.kccsd_rhf.RCCSD)
cpu1 = cpu0 = (logger.process_clock(), logger.perf_counter())
if isinstance(verbose, logger.Logger):
log = verbose
else:
log = logger.Logger(mycc.stdout, verbose)
if t1 is None: t1 = mycc.t1
if t2 is None: t2 = mycc.t2
if eris is None:
raise TypeError('Electron repulsion integrals, `eris`, must be passed in '
'to the CCSD(T) kernel or created in the cc object for '
'the k-point CCSD(T) to run!')
if t1 is None or t2 is None:
raise TypeError('Must pass in t1/t2 amplitudes to k-point CCSD(T)! (Maybe '
'need to run `.ccsd()` on the ccsd object?)')
cell = mycc._scf.cell
kpts = mycc.kpts
# The dtype of any local arrays that will be created
dtype = t1.dtype
nkpts, nocc, nvir = t1.shape
mo_energy_occ = [eris.mo_energy[ki][:nocc] for ki in range(nkpts)]
mo_energy_vir = [eris.mo_energy[ki][nocc:] for ki in range(nkpts)]
mo_energy = np.asarray([eris.mo_energy[ki] for ki in range(nkpts)], dtype=np.float, order='C')
fov = eris.fock[:, :nocc, nocc:]
mo_e = mo_energy
mo_e_o = mo_energy_occ
mo_e_v = mo_energy_vir
# Set up class for k-point conservation
kconserv = kpts_helper.get_kconserv(cell, kpts)
# Create necessary temporary eris for fast read
feri_tmp, t2T, eris_vvop, eris_vooo_C = create_t3_eris(mycc, kconserv, [eris.vovv, eris.oovv, eris.ooov, t2])
t1T = np.array([x.T for x in t1], dtype=np.complex128, order='C')
fvo = np.array([x.T for x in fov], dtype=np.complex128, order='C')
cpu1 = log.timer_debug1('CCSD(T) tmp eri creation', *cpu1)
#def get_w_old(ki, kj, kk, ka, kb, kc, a0, a1, b0, b1, c0, c1, out=None):
# '''Wijkabc intermediate as described in Scuseria paper before Pijkabc acts'''
# km = kconserv[kc, kk, kb]
# kf = kconserv[kk, kc, kj]
# ret = einsum('kjcf,fiba->abcijk', t2[kk,kj,kc,:,:,c0:c1,:], eris.vovv[kf,ki,kb,:,:,b0:b1,a0:a1].conj())
# ret = ret - einsum('mkbc,jima->abcijk', t2[km,kk,kb,:,:,b0:b1,c0:c1], eris.ooov[kj,ki,km,:,:,:,a0:a1].conj())
# return ret
def get_w(ki, kj, kk, ka, kb, kc, a0, a1, b0, b1, c0, c1):
'''Wijkabc intermediate as described in Scuseria paper before Pijkabc acts
Uses tranposed eris for fast data access.'''
km = kconserv[kc, kk, kb]
kf = kconserv[kk, kc, kj]
out = einsum('cfjk,abif->abcijk', t2T[kc,kf,kj,c0:c1,:,:,:], eris_vvop[ka,kb,ki,a0:a1,b0:b1,:,nocc:])
out = out - einsum('cbmk,aijm->abcijk', t2T[kc,kb,km,c0:c1,b0:b1,:,:], eris_vooo_C[ka,ki,kj,a0:a1,:,:,:])
return out
def get_permuted_w(ki, kj, kk, ka, kb, kc, orb_indices):
'''Pijkabc operating on Wijkabc intermediate as described in Scuseria paper'''
a0, a1, b0, b1, c0, c1 = orb_indices
out = get_w(ki, kj, kk, ka, kb, kc, a0, a1, b0, b1, c0, c1)
out = out + get_w(kj, kk, ki, kb, kc, ka, b0, b1, c0, c1, a0, a1).transpose(2,0,1,5,3,4)
out = out + get_w(kk, ki, kj, kc, ka, kb, c0, c1, a0, a1, b0, b1).transpose(1,2,0,4,5,3)
out = out + get_w(ki, kk, kj, ka, kc, kb, a0, a1, c0, c1, b0, b1).transpose(0,2,1,3,5,4)
out = out + get_w(kk, kj, ki, kc, kb, ka, c0, c1, b0, b1, a0, a1).transpose(2,1,0,5,4,3)
out = out + get_w(kj, ki, kk, kb, ka, kc, b0, b1, a0, a1, c0, c1).transpose(1,0,2,4,3,5)
return out
def get_rw(ki, kj, kk, ka, kb, kc, orb_indices):
'''R operating on Wijkabc intermediate as described in Scuseria paper'''
a0, a1, b0, b1, c0, c1 = orb_indices
ret = (4. * get_permuted_w(ki,kj,kk,ka,kb,kc,orb_indices) +
1. * get_permuted_w(kj,kk,ki,ka,kb,kc,orb_indices).transpose(0,1,2,5,3,4) +
1. * get_permuted_w(kk,ki,kj,ka,kb,kc,orb_indices).transpose(0,1,2,4,5,3) -
2. * get_permuted_w(ki,kk,kj,ka,kb,kc,orb_indices).transpose(0,1,2,3,5,4) -
2. * get_permuted_w(kk,kj,ki,ka,kb,kc,orb_indices).transpose(0,1,2,5,4,3) -
2. * get_permuted_w(kj,ki,kk,ka,kb,kc,orb_indices).transpose(0,1,2,4,3,5))
return ret
#def get_v_old(ki, kj, kk, ka, kb, kc, a0, a1, b0, b1, c0, c1):
# '''Vijkabc intermediate as described in Scuseria paper'''
# km = kconserv[ki,ka,kj]
# kf = kconserv[ki,ka,kj]
# out = np.zeros((a1-a0,b1-b0,c1-c0) + (nocc,)*3, dtype=dtype)
# if kk == kc:
# out = out + einsum('kc,ijab->abcijk', 0.5*t1[kk,:,c0:c1], eris.oovv[ki,kj,ka,:,:,a0:a1,b0:b1].conj())
# out = out + einsum('kc,ijab->abcijk', 0.5*fov[kk,:,c0:c1], t2[ki,kj,ka,:,:,a0:a1,b0:b1])
# return out
def get_v(ki, kj, kk, ka, kb, kc, a0, a1, b0, b1, c0, c1):
'''Vijkabc intermediate as described in Scuseria paper'''
#km = kconserv[ki,ka,kj]
#kf = kconserv[ki,ka,kj]
out = np.zeros((a1-a0,b1-b0,c1-c0) + (nocc,)*3, dtype=dtype)
if kk == kc:
out = out + einsum('ck,baji->abcijk', 0.5*t1T[kk,c0:c1,:], eris_vvop[kb,ka,kj,b0:b1,a0:a1,:,:nocc])
# We see this is the same t2T term needed for the `w` contraction:
# einsum('cbmk,aijm->abcijk', t2T[kc,kb,km,c0:c1,b0:b1], eris_vooo_C[ka,ki,kj,a0:a1])
#
# For the kpoint indices [kk,ki,kj,kc,ka,kb] we have that we need
# t2T[kb,ka,km], where km = kconserv[kb,kj,ka]
# The remaining k-point not used in t2T, i.e. kc, has the condition kc == kk in the case of
# get_v. So, we have from 3-particle conservation
# (kk-kc) + ki + kj - ka - kb = 0,
# i.e. ki = km.
out = out + einsum('ck,baij->abcijk', 0.5*fvo[kk,c0:c1,:], t2T[kb,ka,ki,b0:b1,a0:a1,:,:])
return out
def get_permuted_v(ki, kj, kk, ka, kb, kc, orb_indices):
'''Pijkabc operating on Vijkabc intermediate as described in Scuseria paper'''
a0, a1, b0, b1, c0, c1 = orb_indices
ret = get_v(ki, kj, kk, ka, kb, kc, a0, a1, b0, b1, c0, c1)
ret = ret + get_v(kj, kk, ki, kb, kc, ka, b0, b1, c0, c1, a0, a1).transpose(2,0,1,5,3,4)
ret = ret + get_v(kk, ki, kj, kc, ka, kb, c0, c1, a0, a1, b0, b1).transpose(1,2,0,4,5,3)
ret = ret + get_v(ki, kk, kj, ka, kc, kb, a0, a1, c0, c1, b0, b1).transpose(0,2,1,3,5,4)
ret = ret + get_v(kk, kj, ki, kc, kb, ka, c0, c1, b0, b1, a0, a1).transpose(2,1,0,5,4,3)
ret = ret + get_v(kj, ki, kk, kb, ka, kc, b0, b1, a0, a1, c0, c1).transpose(1,0,2,4,3,5)
return ret
def contract_t3Tv(kpt_indices, orb_indices, data):
'''Calculate t3T(ransposed) array using C driver.'''
ki, kj, kk, ka, kb, kc = kpt_indices
a0, a1, b0, b1, c0, c1 = orb_indices
slices = np.array([a0, a1, b0, b1, c0, c1], dtype=np.int32)
mo_offset = np.array([ki,kj,kk,ka,kb,kc], dtype=np.int32)
vvop_ab = np.asarray(data[0][0], dtype=np.complex128, order='C')
vvop_ac = np.asarray(data[0][1], dtype=np.complex128, order='C')
vvop_ba = np.asarray(data[0][2], dtype=np.complex128, order='C')
vvop_bc = np.asarray(data[0][3], dtype=np.complex128, order='C')
vvop_ca = np.asarray(data[0][4], dtype=np.complex128, order='C')
vvop_cb = np.asarray(data[0][5], dtype=np.complex128, order='C')
vooo_aj = np.asarray(data[1][0], dtype=np.complex128, order='C')
vooo_ak = np.asarray(data[1][1], dtype=np.complex128, order='C')
vooo_bi = np.asarray(data[1][2], dtype=np.complex128, order='C')
vooo_bk = np.asarray(data[1][3], dtype=np.complex128, order='C')
vooo_ci = np.asarray(data[1][4], dtype=np.complex128, order='C')
vooo_cj = np.asarray(data[1][5], dtype=np.complex128, order='C')
t2T_cj = np.asarray(data[2][0], dtype=np.complex128, order='C')
t2T_bk = np.asarray(data[2][1], dtype=np.complex128, order='C')
t2T_ci = np.asarray(data[2][2], dtype=np.complex128, order='C')
t2T_ak = np.asarray(data[2][3], dtype=np.complex128, order='C')
t2T_bi = np.asarray(data[2][4], dtype=np.complex128, order='C')
t2T_aj = np.asarray(data[2][5], dtype=np.complex128, order='C')
t2T_cb = np.asarray(data[3][0], dtype=np.complex128, order='C')
t2T_bc = np.asarray(data[3][1], dtype=np.complex128, order='C')
t2T_ca = np.asarray(data[3][2], dtype=np.complex128, order='C')
t2T_ac = np.asarray(data[3][3], dtype=np.complex128, order='C')
t2T_ba = np.asarray(data[3][4], dtype=np.complex128, order='C')
t2T_ab = np.asarray(data[3][5], dtype=np.complex128, order='C')
data = [vvop_ab, vvop_ac, vvop_ba, vvop_bc, vvop_ca, vvop_cb,
vooo_aj, vooo_ak, vooo_bi, vooo_bk, vooo_ci, vooo_cj,
t2T_cj, t2T_cb, t2T_bk, t2T_bc, t2T_ci, t2T_ca, t2T_ak,
t2T_ac, t2T_bi, t2T_ba, t2T_aj, t2T_ab]
data_ptrs = [x.ctypes.data_as(ctypes.c_void_p) for x in data]
data_ptrs = (ctypes.c_void_p*24)(*data_ptrs)
a0, a1, b0, b1, c0, c1 = task
t3Tw = np.empty((a1-a0,b1-b0,c1-c0) + (nocc,)*3, dtype=np.complex128, order='C')
t3Tv = np.empty((a1-a0,b1-b0,c1-c0) + (nocc,)*3, dtype=np.complex128, order='C')
drv = _ccsd.libcc.CCsd_zcontract_t3T
drv(t3Tw.ctypes.data_as(ctypes.c_void_p),
t3Tv.ctypes.data_as(ctypes.c_void_p),
mo_e.ctypes.data_as(ctypes.c_void_p),
t1T.ctypes.data_as(ctypes.c_void_p),
fvo.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(nocc), ctypes.c_int(nvir),
ctypes.c_int(nkpts),
mo_offset.ctypes.data_as(ctypes.c_void_p),
slices.ctypes.data_as(ctypes.c_void_p),
data_ptrs)
return t3Tw, t3Tv
def get_data(kpt_indices):
idx_args = get_data_slices(kpt_indices, task, kconserv)
vvop_indices, vooo_indices, t2T_vvop_indices, t2T_vooo_indices = idx_args
vvop_data = [eris_vvop[tuple(x)] for x in vvop_indices]
vooo_data = [eris_vooo_C[tuple(x)] for x in vooo_indices]
t2T_vvop_data = [t2T[tuple(x)] for x in t2T_vvop_indices]
t2T_vooo_data = [t2T[tuple(x)] for x in t2T_vooo_indices]
data = [vvop_data, vooo_data, t2T_vvop_data, t2T_vooo_data]
return data
energy_t = 0.0
# Get location of padded elements in occupied and virtual space
nonzero_opadding, nonzero_vpadding = padding_k_idx(mycc, kind="split")
mem_now = lib.current_memory()[0]
max_memory = max(0, mycc.max_memory - mem_now)
blkmin = 4
# temporary t3 array is size: 2 * nkpts**3 * blksize**3 * nocc**3 * 16
vir_blksize = min(nvir, max(blkmin, int((max_memory*.9e6/16/nocc**3/nkpts**3/2)**(1./3))))
tasks = []
log.debug('max_memory %d MB (%d MB in use)', max_memory, mem_now)
log.debug('virtual blksize = %d (nvir = %d)', nvir, vir_blksize)
for a0, a1 in lib.prange(0, nvir, vir_blksize):
for b0, b1 in lib.prange(0, nvir, vir_blksize):
for c0, c1 in lib.prange(0, nvir, vir_blksize):
tasks.append((a0,a1,b0,b1,c0,c1))
for ka in range(nkpts):
for kb in range(ka+1):
for task_id, task in enumerate(tasks):
a0,a1,b0,b1,c0,c1 = task
my_permuted_w = np.zeros((nkpts,)*3 + (a1-a0,b1-b0,c1-c0) + (nocc,)*3, dtype=dtype)
my_permuted_v = np.zeros((nkpts,)*3 + (a1-a0,b1-b0,c1-c0) + (nocc,)*3, dtype=dtype)
for ki, kj, kk in product(range(nkpts), repeat=3):
# Find momentum conservation condition for triples
# amplitude t3ijkabc
kc = kpts_helper.get_kconserv3(cell, kpts, [ki, kj, kk, ka, kb])
if not (ka >= kb and kb >= kc):
continue
kpt_indices = [ki,kj,kk,ka,kb,kc]
data = get_data(kpt_indices)
t3Tw, t3Tv = contract_t3Tv(kpt_indices, task, data)
my_permuted_w[ki,kj,kk] = t3Tw
my_permuted_v[ki,kj,kk] = t3Tv
#my_permuted_w[ki,kj,kk] = get_permuted_w(ki,kj,kk,ka,kb,kc,task)
#my_permuted_v[ki,kj,kk] = get_permuted_v(ki,kj,kk,ka,kb,kc,task)
for ki, kj, kk in product(range(nkpts), repeat=3):
# eigenvalue denominator: e(i) + e(j) + e(k)
eijk = _get_epqr([0,nocc,ki,mo_e_o,nonzero_opadding],
[0,nocc,kj,mo_e_o,nonzero_opadding],
[0,nocc,kk,mo_e_o,nonzero_opadding])
# Find momentum conservation condition for triples
# amplitude t3ijkabc
kc = kpts_helper.get_kconserv3(cell, kpts, [ki, kj, kk, ka, kb])
if not (ka >= kb and kb >= kc):
continue
if ka == kb and kb == kc:
symm_kpt = 1.
elif ka == kb or kb == kc:
symm_kpt = 3.
else:
symm_kpt = 6.
eabc = _get_epqr([a0,a1,ka,mo_e_v,nonzero_vpadding],
[b0,b1,kb,mo_e_v,nonzero_vpadding],
[c0,c1,kc,mo_e_v,nonzero_vpadding],
fac=[-1.,-1.,-1.])
eijkabc = (eijk[None,None,None,:,:,:] + eabc[:,:,:,None,None,None])
pwijk = my_permuted_w[ki,kj,kk] + my_permuted_v[ki,kj,kk]
rwijk = (4. * my_permuted_w[ki,kj,kk] +
1. * my_permuted_w[kj,kk,ki].transpose(0,1,2,5,3,4) +
1. * my_permuted_w[kk,ki,kj].transpose(0,1,2,4,5,3) -
2. * my_permuted_w[ki,kk,kj].transpose(0,1,2,3,5,4) -
2. * my_permuted_w[kk,kj,ki].transpose(0,1,2,5,4,3) -
2. * my_permuted_w[kj,ki,kk].transpose(0,1,2,4,3,5))
rwijk = rwijk / eijkabc
energy_t += symm_kpt * einsum('abcijk,abcijk', rwijk, pwijk.conj())
energy_t *= (1. / 3)
energy_t /= nkpts
if abs(energy_t.imag) > 1e-4:
log.warn('Non-zero imaginary part of CCSD(T) energy was found %s', energy_t.imag)
log.timer('CCSD(T)', *cpu0)
log.note('CCSD(T) correction per cell = %.15g', energy_t.real)
log.note('CCSD(T) correction per cell (imag) = %.15g', energy_t.imag)
return energy_t.real
###################################
# Helper function for t3 creation
###################################
def check_read_success(filename, **kwargs):
'''Determine criterion for successfully reading a dataset based on its
meta values.
For now, returns False.'''
def check_write_complete(filename, **kwargs):
'''Check for `completed` attr in file.'''
import os
mode = kwargs.get('mode', 'r')
if not os.path.isfile(filename):
return False
f = h5py.File(filename, mode=mode, **kwargs)
return f.attrs.get('completed', False)
write_complete = check_write_complete(filename, **kwargs)
return False and write_complete
def transpose_t2(t2, nkpts, nocc, nvir, kconserv, out=None):
'''Creates t2.transpose(2,3,1,0).'''
if out is None:
out = np.empty((nkpts,nkpts,nkpts,nvir,nvir,nocc,nocc), dtype=t2.dtype)
# Check if it's stored in lower triangular form
if len(t2.shape) == 7 and t2.shape[:2] == (nkpts, nkpts):
for ki, kj, ka in product(range(nkpts), repeat=3):
kb = kconserv[ki,ka,kj]
out[ka,kb,kj] = t2[ki,kj,ka].transpose(2,3,1,0)
elif len(t2.shape) == 6 and t2.shape[:2] == (nkpts*(nkpts+1)//2, nkpts):
for ki, kj, ka in product(range(nkpts), repeat=3):
kb = kconserv[ki,ka,kj]
# t2[ki,kj,ka] = t2[tril_index(ki,kj),ka] ki<kj
# t2[kj,ki,kb] = t2[ki,kj,ka].transpose(1,0,3,2) ki<kj
# = t2[tril_index(ki,kj),ka].transpose(1,0,3,2)
if ki <= kj:
tril_idx = (kj*(kj+1))//2 + ki
out[ka,kb,kj] = t2[tril_idx,ka].transpose(2,3,1,0).copy()
out[kb,ka,ki] = out[ka,kb,kj].transpose(1,0,3,2)
else:
raise ValueError('No known conversion for t2 shape %s' % t2.shape)
return out
def create_eris_vvop(vovv, oovv, nkpts, nocc, nvir, kconserv, out=None):
'''Creates vvop from vovv and oovv array (physicist notation).'''
nmo = nocc + nvir
assert(vovv.shape == (nkpts,nkpts,nkpts,nvir,nocc,nvir,nvir))
if out is None:
out = np.empty((nkpts,nkpts,nkpts,nvir,nvir,nocc,nmo), dtype=vovv.dtype)
else:
assert(out.shape == (nkpts,nkpts,nkpts,nvir,nvir,nocc,nmo))
for ki, kj, ka in product(range(nkpts), repeat=3):
kb = kconserv[ki,ka,kj]
out[ki,kj,ka,:,:,:,nocc:] = vovv[kb,ka,kj].conj().transpose(3,2,1,0)
if oovv is not None:
out[ki,kj,ka,:,:,:,:nocc] = oovv[kb,ka,kj].conj().transpose(3,2,1,0)
return out
def create_eris_vooo(ooov, nkpts, nocc, nvir, kconserv, out=None):
'''Creates vooo from ooov array.
This is not exactly chemist's notation, but close. Here a chemist notation vooo
is created from physicist ooov, and then the last two indices of vooo are swapped.
'''
assert(ooov.shape == (nkpts,nkpts,nkpts,nocc,nocc,nocc,nvir))
if out is None:
out = np.empty((nkpts,nkpts,nkpts,nvir,nocc,nocc,nocc), dtype=ooov.dtype)
for ki, kj, ka in product(range(nkpts), repeat=3):
kb = kconserv[ki,kj,ka]
# <bj|ai> -> (ba|ji) (Physicist->Chemist)
# (ij|ab) = (ba|ij)* (Permutational symmetry)
# out = (ij|ab).transpose(0,1,3,2)
out[ki,kj,kb] = ooov[kb,kj,ka].conj().transpose(3,1,0,2)
return out
def create_t3_eris(mycc, kconserv, eris, tmpfile='tmp_t3_eris.h5'):
'''Create/transpose necessary eri integrals needed for fast read-in by CCSD(T).'''
eris_vovv, eris_oovv, eris_ooov, t2 = eris
nkpts = mycc.nkpts
nocc = mycc.nocc
nmo = mycc.nmo
nvir = nmo - nocc
nmo = nocc + nvir
feri_tmp = None
h5py_kwargs = {}
feri_tmp_filename = tmpfile
dtype = np.result_type(eris_vovv, eris_oovv, eris_ooov, t2)
if not check_read_success(feri_tmp_filename):
feri_tmp = lib.H5TmpFile(feri_tmp_filename, 'w', **h5py_kwargs)
t2T_out = feri_tmp.create_dataset('t2T', (nkpts,nkpts,nkpts,nvir,nvir,nocc,nocc), dtype=dtype) # noqa: E501
eris_vvop_out = feri_tmp.create_dataset('vvop', (nkpts,nkpts,nkpts,nvir,nvir,nocc,nmo), dtype=dtype) # noqa: E501
eris_vooo_C_out = feri_tmp.create_dataset('vooo_C', (nkpts,nkpts,nkpts,nvir,nocc,nocc,nocc), dtype=dtype) # noqa: E501
transpose_t2(t2, nkpts, nocc, nvir, kconserv, out=t2T_out)
create_eris_vvop(eris_vovv, eris_oovv, nkpts, nocc, nvir, kconserv, out=eris_vvop_out)
create_eris_vooo(eris_ooov, nkpts, nocc, nvir, kconserv, out=eris_vooo_C_out)
feri_tmp.attrs['completed'] = True
feri_tmp.close()
feri_tmp = lib.H5TmpFile(feri_tmp_filename, 'r', **h5py_kwargs)
t2T = feri_tmp['t2T']
eris_vvop = feri_tmp['vvop']
eris_vooo_C = feri_tmp['vooo_C']
mem_now = lib.current_memory()[0]
max_memory = max(0, mycc.max_memory - mem_now)
unit = nkpts**3 * (nvir**2 * nocc**2 + nvir**2 * nmo * nocc + nvir * nocc**3)
if (unit*16 < max_memory): # Store all in memory
t2T = t2T[:]
eris_vvop = eris_vvop[:]
eris_vooo_C = eris_vooo_C[:]
return feri_tmp, t2T, eris_vvop, eris_vooo_C
def _convert_to_int(kpt_indices):
'''Convert all kpoint indices for 3-particle operator to integers.'''
out_indices = [0]*6
for ix, x in enumerate(kpt_indices):
assert isinstance(x, (int, np.int, np.ndarray, list))
if isinstance(x, (np.ndarray)) and (x.ndim == 0):
out_indices[ix] = int(x)
else:
out_indices[ix] = x
return out_indices
def _tile_list(kpt_indices):
'''Similar to a cartesian product but for a list of kpoint indices for
a 3-particle operator.'''
max_length = 0
out_indices = [0]*6
for ix, x in enumerate(kpt_indices):
if hasattr(x, '__len__'):
max_length = max(max_length, len(x))
if max_length == 0:
return kpt_indices
else:
for ix, x in enumerate(kpt_indices):
if isinstance(x, (int, np.int)):
out_indices[ix] = [x] * max_length
else:
out_indices[ix] = x
return map(list, zip(*out_indices))
def zip_kpoints(kpt_indices):
'''Similar to a cartesian product but for a list of kpoint indices for
a 3-particle operator. Ensures all indices are integers.'''
out_indices = _convert_to_int(kpt_indices)
out_indices = _tile_list(out_indices)
return out_indices
def get_data_slices(kpt_indices, orb_indices, kconserv):
kpt_indices = zip_kpoints(kpt_indices)
if isinstance(kpt_indices[0], (int, np.int)): # Ensure we are working
kpt_indices = [kpt_indices] # with a list of lists
a0,a1,b0,b1,c0,c1 = orb_indices
length = len(kpt_indices)*6
def _vijk_indices(kpt_indices, orb_indices, transpose=(0, 1, 2)):
'''Get indices needed for t3 construction and a given transpose of (a,b,c).'''
kpt_indices = ([kpt_indices[x] for x in transpose] +
[kpt_indices[x+3] for x in transpose])
orb_indices = lib.flatten([[orb_indices[2*x], orb_indices[2*x+1]]
for x in transpose])
ki, kj, kk, ka, kb, kc = kpt_indices
a0, a1, b0, b1, c0, c1 = orb_indices
kf = kconserv[ka,ki,kb]
km = kconserv[kc,kk,kb]
sl00 = slice(None, None)
vvop_idx = [ka, kb, ki, slice(a0,a1), slice(b0,b1), sl00, sl00]
vooo_idx = [ka, ki, kj, slice(a0,a1), sl00, sl00, sl00]
t2T_vvop_idx = [kc, kf, kj, slice(c0,c1), sl00, sl00, sl00]
t2T_vooo_idx = [kc, kb, km, slice(c0,c1), sl00, sl00, sl00]
return vvop_idx, vooo_idx, t2T_vvop_idx, t2T_vooo_idx
vvop_indices = [0] * length
vooo_indices = [0] * length
t2T_vvop_indices = [0] * length
t2T_vooo_indices = [0] * length
transpose = [(0, 1, 2), (0, 2, 1), (1, 0, 2),
(1, 2, 0), (2, 0, 1), (2, 1, 0)]
count = 0
for kpt in kpt_indices:
for t in transpose:
vvop_idx, vooo_idx, t2T_vvop_idx, t2T_vooo_idx = _vijk_indices(kpt, orb_indices, t)
vvop_indices[count] = vvop_idx
vooo_indices[count] = vooo_idx
t2T_vvop_indices[count] = t2T_vvop_idx
t2T_vooo_indices[count] = t2T_vooo_idx
count += 1
return vvop_indices, vooo_indices, t2T_vvop_indices, t2T_vooo_indices
def _get_epqr(pindices,qindices,rindices,fac=[1.0,1.0,1.0],large_num=LARGE_DENOM):
'''Create a denominator
fac[0]*e[kp,p0:p1] + fac[1]*e[kq,q0:q1] + fac[2]*e[kr,r0:r1]
where padded elements have been replaced by a large number.
Args:
pindices (5-list of object):
A list of p0, p1, kp, orbital values, and non-zero indices for the first
denominator element.
qindices (5-list of object):
A list of q0, q1, kq, orbital values, and non-zero indices for the second
denominator element.
rindices (5-list of object):
A list of r0, r1, kr, orbital values, and non-zero indices for the third
denominator element.
fac (3-list of float):
Factors to multiply the first and second denominator elements.
large_num (float):
Number to replace the padded elements.
'''
def get_idx(x0,x1,kx,n0_p):
return np.logical_and(n0_p[kx] >= x0, n0_p[kx] < x1)
assert(all([len(x) == 5 for x in [pindices,qindices]]))
p0,p1,kp,mo_e_p,nonzero_p = pindices
q0,q1,kq,mo_e_q,nonzero_q = qindices
r0,r1,kr,mo_e_r,nonzero_r = rindices
fac_p, fac_q, fac_r = fac
epqr = large_num * np.ones((p1-p0,q1-q0,r1-r0), dtype=mo_e_p[0].dtype)
idxp = get_idx(p0,p1,kp,nonzero_p)
idxq = get_idx(q0,q1,kq,nonzero_q)
idxr = get_idx(r0,r1,kr,nonzero_r)
n0_ovp_pqr = np.ix_(nonzero_p[kp][idxp]-p0, nonzero_q[kq][idxq]-q0, nonzero_r[kr][idxr]-r0)
epqr[n0_ovp_pqr] = lib.direct_sum('p,q,r->pqr', fac_p*mo_e_p[kp][p0:p1],
fac_q*mo_e_q[kq][q0:q1],
fac_r*mo_e_r[kr][r0:r1])[n0_ovp_pqr]
#epqr[n0_ovp_pqr] = temp[n0_ovp_pqr]
return epqr
if __name__ == '__main__':
from pyscf.pbc import gto
from pyscf.pbc import scf
from pyscf.pbc import cc
cell = gto.Cell()
cell.atom = '''
C 0.000000000000 0.000000000000 0.000000000000
C 1.685068664391 1.685068664391 1.685068664391
'''
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade'
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
cell.verbose = 4
cell.mesh = [24, 24, 24]
cell.build()
nmp = [1,1,4]
kpts = cell.make_kpts(nmp)
kpts -= kpts[0]
kmf = scf.KRHF(cell, kpts=kpts, exxdiv=None)
kmf.conv_tol = 1e-12
kmf.conv_tol_grad = 1e-12
kmf.direct_scf_tol = 1e-16
ehf = kmf.kernel()
mycc = cc.KRCCSD(kmf)
eris = mycc.ao2mo()
ecc, t1, t2 = mycc.kernel(eris=eris)
energy_t = kernel(mycc, eris=eris, verbose=9)
# Start of supercell calculations
from pyscf.pbc.tools.pbc import super_cell
supcell = super_cell(cell, nmp)
supcell.build()
kmf = scf.RHF(supcell, exxdiv=None)
kmf.conv_tol = 1e-12
kmf.conv_tol_grad = 1e-12
kmf.direct_scf_tol = 1e-16
sup_ehf = kmf.kernel()
myscc = cc.RCCSD(kmf)
eris = myscc.ao2mo()
sup_ecc, t1, t2 = myscc.kernel(eris=eris)
sup_energy_t = myscc.ccsd_t(eris=eris)
print("Kpoint CCSD: %20.16f" % ecc)
print("Supercell CCSD: %20.16f" % (sup_ecc/np.prod(nmp)))
print("Kpoint CCSD(T): %20.16f" % energy_t)
print("Supercell CCSD(T): %20.16f" % (sup_energy_t/np.prod(nmp)))
|
sunqm/pyscf
|
pyscf/pbc/cc/kccsd_t_rhf.py
|
Python
|
apache-2.0
| 28,756
|
[
"PySCF"
] |
f4084bf84c601ec0955b31a1067b328d91279f17d811fe352d215fd874589d1a
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# CREATED:2015-02-14 19:13:49 by Brian McFee <brian.mcfee@nyu.edu>
'''Unit tests for time and frequency conversion'''
import warnings
import os
try:
os.environ.pop('LIBROSA_CACHE_DIR')
except KeyError:
pass
import librosa
import numpy as np
from nose.tools import raises, eq_
warnings.resetwarnings()
warnings.simplefilter('always')
def test_frames_to_samples():
def __test(x, y, hop_length, n_fft):
y_test = librosa.frames_to_samples(x,
hop_length=hop_length,
n_fft=n_fft)
assert np.allclose(y, y_test)
y = np.asanyarray(y)
assert y.shape == y_test.shape
assert y.ndim == y_test.ndim
for x in [100, np.arange(10.5)]:
for hop_length in [512, 1024]:
for n_fft in [None, 1024]:
y = x * hop_length
if n_fft is not None:
y += n_fft // 2
yield __test, x, y, hop_length, n_fft
def test_samples_to_frames():
def __test(x, y, hop_length, n_fft):
y_test = librosa.samples_to_frames(x,
hop_length=hop_length,
n_fft=n_fft)
assert np.allclose(y, y_test)
y = np.asanyarray(y)
assert y.shape == y_test.shape
assert y.ndim == y_test.ndim
for x in [100, np.arange(10.5)]:
for hop_length in [512, 1024]:
for n_fft in [None, 1024]:
y = x * hop_length
if n_fft is not None:
y += n_fft // 2
yield __test, y, x, hop_length, n_fft
def test_frames_to_time():
def __test(sr, hop_length, n_fft):
# Generate frames at times 0s, 1s, 2s
frames = np.arange(3) * sr // hop_length
if n_fft:
frames -= n_fft // (2 * hop_length)
times = librosa.frames_to_time(frames,
sr=sr,
hop_length=hop_length,
n_fft=n_fft)
# we need to be within one frame
assert np.all(np.abs(times - np.asarray([0, 1, 2])) * sr
< hop_length)
for sr in [22050, 44100]:
for hop_length in [256, 512]:
for n_fft in [None, 2048]:
yield __test, sr, hop_length, n_fft
def test_time_to_samples():
def __test(sr):
assert np.allclose(librosa.time_to_samples([0, 1, 2], sr=sr),
[0, sr, 2 * sr])
for sr in [22050, 44100]:
yield __test, sr
def test_samples_to_time():
def __test(sr):
assert np.allclose(librosa.samples_to_time([0, sr, 2 * sr], sr=sr),
[0, 1, 2])
for sr in [22050, 44100]:
yield __test, sr
def test_time_to_frames():
def __test(sr, hop_length, n_fft):
# Generate frames at times 0s, 1s, 2s
times = np.arange(3)
frames = librosa.time_to_frames(times,
sr=sr,
hop_length=hop_length,
n_fft=n_fft)
if n_fft:
frames -= n_fft // (2 * hop_length)
# we need to be within one frame
assert np.all(np.abs(times - np.asarray([0, 1, 2])) * sr
< hop_length)
for sr in [22050, 44100]:
for hop_length in [256, 512]:
for n_fft in [None, 2048]:
yield __test, sr, hop_length, n_fft
def test_octs_to_hz():
def __test(a440):
freq = np.asarray([55, 110, 220, 440]) * (float(a440) / 440.0)
freq_out = librosa.octs_to_hz([1, 2, 3, 4], A440=a440)
assert np.allclose(freq, freq_out)
for a440 in [415, 430, 435, 440, 466]:
yield __test, a440
def test_hz_to_octs():
def __test(a440):
freq = np.asarray([55, 110, 220, 440]) * (float(a440) / 440.0)
octs = [1, 2, 3, 4]
oct_out = librosa.hz_to_octs(freq, A440=a440)
assert np.allclose(octs, oct_out)
for a440 in [415, 430, 435, 440, 466]:
yield __test, a440
def test_note_to_midi():
def __test(tuning, accidental, octave, round_midi):
note = 'C{:s}'.format(accidental)
if octave is not None:
note = '{:s}{:d}'.format(note, octave)
else:
octave = 0
if tuning is not None:
note = '{:s}{:+d}'.format(note, tuning)
else:
tuning = 0
midi_true = 12 * (octave + 1) + tuning * 0.01
if accidental == '#':
midi_true += 1
elif accidental in list('b!'):
midi_true -= 1
midi = librosa.note_to_midi(note, round_midi=round_midi)
if round_midi:
midi_true = np.round(midi_true)
eq_(midi, midi_true)
midi = librosa.note_to_midi([note], round_midi=round_midi)
eq_(midi[0], midi_true)
@raises(librosa.ParameterError)
def __test_fail():
librosa.note_to_midi('does not pass')
for tuning in [None, -25, 0, 25]:
for octave in [None, 1, 2, 3]:
if octave is None and tuning is not None:
continue
for accidental in ['', '#', 'b', '!']:
for round_midi in [False, True]:
yield __test, tuning, accidental, octave, round_midi
yield __test_fail
def test_note_to_hz():
def __test(tuning, accidental, octave, round_midi):
note = 'A{:s}'.format(accidental)
if octave is not None:
note = '{:s}{:d}'.format(note, octave)
else:
octave = 0
if tuning is not None:
note = '{:s}{:+d}'.format(note, tuning)
else:
tuning = 0
if round_midi:
tuning = np.around(tuning, -2)
hz_true = 440.0 * (2.0**(tuning * 0.01 / 12)) * (2.0**(octave - 4))
if accidental == '#':
hz_true *= 2.0**(1./12)
elif accidental in list('b!'):
hz_true /= 2.0**(1./12)
hz = librosa.note_to_hz(note, round_midi=round_midi)
assert np.allclose(hz, hz_true)
hz = librosa.note_to_hz([note], round_midi=round_midi)
assert np.allclose(hz[0], hz_true)
@raises(librosa.ParameterError)
def __test_fail():
librosa.note_to_midi('does not pass')
for tuning in [None, -25, 0, 25]:
for octave in [None, 1, 2, 3]:
if octave is None and tuning is not None:
continue
for accidental in ['', '#', 'b', '!']:
for round_midi in [False, True]:
yield __test, tuning, accidental, octave, round_midi
yield __test_fail
def test_midi_to_note():
def __test(midi_num, note, octave, cents):
note_out = librosa.midi_to_note(midi_num, octave=octave, cents=cents)
eq_(note_out, note)
midi_num = 24.25
yield __test, midi_num, 'C', False, False
yield __test, midi_num, 'C1', True, False
yield raises(librosa.ParameterError)(__test), midi_num, 'C+25', False, True
yield __test, midi_num, 'C1+25', True, True
yield __test, [midi_num], ['C'], False, False
def test_midi_to_hz():
assert np.allclose(librosa.midi_to_hz([33, 45, 57, 69]),
[55, 110, 220, 440])
def test_hz_to_midi():
assert np.allclose(librosa.hz_to_midi(55), 33)
assert np.allclose(librosa.hz_to_midi([55, 110, 220, 440]),
[33, 45, 57, 69])
def test_hz_to_note():
def __test(hz, note, octave, cents):
note_out = librosa.hz_to_note(hz, octave=octave, cents=cents)
eq_(note_out, note)
hz = 440
yield __test, hz, 'A', False, False
yield __test, hz, 'A4', True, False
yield raises(librosa.ParameterError)(__test), hz, 'A+0', False, True
yield __test, hz, 'A4+0', True, True
yield __test, [hz, 2*hz], ['A4+0', 'A5+0'], True, True
def test_fft_frequencies():
def __test(sr, n_fft):
freqs = librosa.fft_frequencies(sr=sr, n_fft=n_fft)
# DC
eq_(freqs[0], 0)
# Nyquist, positive here for more convenient display purposes
eq_(freqs[-1], sr / 2.0)
# Ensure that the frequencies increase linearly
dels = np.diff(freqs)
assert np.allclose(dels, dels[0])
for n_fft in [1024, 2048]:
for sr in [8000, 22050]:
yield __test, sr, n_fft
def test_cqt_frequencies():
def __test(n_bins, fmin, bins_per_octave, tuning):
freqs = librosa.cqt_frequencies(n_bins,
fmin,
bins_per_octave=bins_per_octave,
tuning=tuning)
# Make sure we get the right number of bins
eq_(len(freqs), n_bins)
# And that the first bin matches fmin by tuning
assert np.allclose(freqs[0],
fmin * 2.0**(float(tuning) / bins_per_octave))
# And that we have constant Q
Q = np.diff(np.log2(freqs))
assert np.allclose(Q, 1./bins_per_octave)
for n_bins in [12, 24, 36]:
for fmin in [440.0]:
for bins_per_octave in [12, 24, 36]:
for tuning in [-0.25, 0.0, 0.25]:
yield __test, n_bins, fmin, bins_per_octave, tuning
def test_tempo_frequencies():
def __test(n_bins, hop_length, sr):
freqs = librosa.tempo_frequencies(n_bins, hop_length=hop_length, sr=sr)
# Verify the length
eq_(len(freqs), n_bins)
# 0-bin should be infinite
assert not np.isfinite(freqs[0])
# remaining bins should be spaced by 1/hop_length
if n_bins > 1:
invdiff = (freqs[1:]**-1) * (60.0 * sr)
assert np.allclose(invdiff[0], hop_length)
assert np.allclose(np.diff(invdiff), np.asarray(hop_length)), np.diff(invdiff)
for n_bins in [1, 16, 128]:
for hop_length in [256, 512, 1024]:
for sr in [11025, 22050, 44100]:
yield __test, n_bins, hop_length, sr
def test_A_weighting():
def __test(min_db):
# Check that 1KHz is around 0dB
a_khz = librosa.A_weighting(1000.0, min_db=min_db)
assert np.allclose(a_khz, 0, atol=1e-3)
a_range = librosa.A_weighting(np.linspace(2e1, 2e4),
min_db=min_db)
# Check that the db cap works
if min_db is not None:
assert not np.any(a_range < min_db)
for min_db in [None, -40, -80]:
yield __test, min_db
|
r9y9/librosa
|
tests/test_time_frequency.py
|
Python
|
isc
| 10,703
|
[
"Brian"
] |
518aa611f278962e81795b89ff1d74ace8030db5b2839db3a9142fab80e42f3a
|
"""
Gaussian expansion of distances
"""
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Layer
class GaussianExpansion(Layer):
"""
Simple Gaussian expansion.
A vector of distance [d1, d2, d3, ..., dn] is expanded to a
matrix of shape [n, m], where m is the number of Gaussian basis centers
"""
def __init__(self, centers, width, **kwargs):
"""
Args:
centers (np.ndarray): Gaussian basis centers
width (float): width of the Gaussian basis
**kwargs:
"""
self.centers = np.array(centers).ravel()
self.width = width
super().__init__(**kwargs)
def build(self, input_shape):
"""
build the layer
Args:
input_shape (tuple): tuple of int for the input shape
"""
self.built = True
def call(self, inputs, masks=None):
"""
The core logic function
Args:
inputs (tf.Tensor): input distance tensor, with shape [None, n]
masks (tf.Tensor): bool tensor, not used here
"""
return tf.math.exp(-((inputs[:, :, None] - self.centers[None, None, :]) ** 2) / self.width**2)
def compute_output_shape(self, input_shape):
"""
Compute the output shape, used in older keras API
"""
return input_shape[0], input_shape[1], len(self.centers)
def get_config(self):
"""
Get layer configurations
"""
base_config = super().get_config()
config = {"centers": self.centers.tolist(), "width": self.width}
return dict(list(base_config.items()) + list(config.items()))
|
materialsvirtuallab/megnet
|
megnet/layers/featurizer/_gaussian_expansion.py
|
Python
|
bsd-3-clause
| 1,686
|
[
"Gaussian"
] |
ef27d1fcba6670e77af4cb091e0251d1a2c6919e1fd609a5276e78114be8acb7
|
from __future__ import with_statement
import imp
import inspect
import os
import sys
from attest import ast, statistics
from attest.codegen import to_source, SourceGenerator
__all__ = ['COMPILES_AST',
'ExpressionEvaluator',
'TestFailure',
'assert_hook',
'AssertTransformer',
'AssertImportHook',
]
try:
compile(ast.parse('pass'), '<string>', 'exec')
except TypeError:
COMPILES_AST = False
else:
COMPILES_AST = True
class ExpressionEvaluator(SourceGenerator):
"""Evaluates ``expr`` in the context of ``globals`` and ``locals``,
expanding the values of variables and the results of binary operations, but
keeping comparison and boolean operators.
.. testsetup::
from attest import ExpressionEvaluator
>>> var = 1 + 2
>>> value = ExpressionEvaluator('var == 5 - 3', globals(), locals())
>>> value.late_visit()
>>> repr(value)
'(3 == 2)'
>>> bool(value)
False
.. versionadded:: 0.5
"""
def __init__(self, expr, globals, locals):
self.expr = expr
# Putting locals in globals for closures
self.globals = dict(globals)
self.locals = locals
self.globals.update(self.locals)
self.result = []
self.node = ast.parse(self.expr).body[0].value
# Trigger visit after init because we don't want to
# evaluate twice in case of a successful assert
def late_visit(self):
self.visit(self.node)
def __repr__(self):
return ''.join(self.result)
def __str__(self):
return '\n'.join((self.expr, repr(self)))
def __nonzero__(self):
return bool(eval(self.expr, self.globals, self.locals))
def eval(self, node):
return eval(to_source(node), self.globals, self.locals)
def write(self, s):
self.result.append(str(s))
def visit_Name(self, node):
value = self.eval(node)
if getattr(value, '__name__', None):
self.write(value.__name__)
else:
self.write(repr(value))
def generic_visit(self, node):
self.write(repr(self.eval(node)))
visit_BinOp = visit_Subscript = generic_visit
visit_ListComp = visit_GeneratorExp = generic_visit
visit_SetComp = visit_DictComp = generic_visit
visit_Call = visit_Attribute = generic_visit
class TestFailure(AssertionError):
"""Extended :exc:`AssertionError` used by the assert hook.
:param value: The asserted expression evaluated with
:class:`ExpressionEvaluator`.
:param msg: Optional message passed to the assertion.
.. versionadded:: 0.5
"""
def __init__(self, value, msg=''):
self.value = value
AssertionError.__init__(self, msg)
def assert_hook(expr, msg='', globals=None, locals=None):
"""Like ``assert``, but using :class:`ExpressionEvaluator`. If
you import this in test modules and the :class:`AssertImportHook` is
installed (which it is automatically the first time you import from
:mod:`attest`), ``assert`` statements are rewritten as a call to
this.
The import must be a top-level *from* import, example::
from attest import Tests, assert_hook
.. versionadded:: 0.5
"""
statistics.assertions += 1
if globals is None:
globals = inspect.stack()[1][0].f_globals
if locals is None:
locals = inspect.stack()[1][0].f_locals
value = ExpressionEvaluator(expr, globals, locals)
if not value:
# Visit only if assertion fails
value.late_visit()
raise TestFailure(value, msg)
# Build AST nodes on 2.5 more easily
def _build(node, **kwargs):
node = node()
for key, value in kwargs.iteritems():
setattr(node, key, value)
return node
class AssertTransformer(ast.NodeTransformer):
"""Parses `source` with :mod:`_ast` and transforms `assert`
statements into calls to :func:`assert_hook`.
.. warning::
CPython 2.5 doesn't compile AST nodes and when that fails this
transformer will generate source code from the AST instead. While
Attest's own tests passes on CPython 2.5, there might be code that
it currently would render back incorrectly, most likely resulting
in a failure. Because Python's syntax is simple, this isn't very
likely, but you might want to :meth:`~AssertImportHook.disable` the
import hook if you test regularly on CPython 2.5.
It also messes up the line numbers so they don't match the original
source code, meaning tracebacks will point to the line numbers in
the *generated* source and preview the code on that line in the
*original* source. The improved error message with the import hook
is often worth it however, and failures will still point to the
right file and function.
.. versionadded:: 0.5
"""
def __init__(self, source, filename=''):
self.source = source
self.filename = filename
@property
def should_rewrite(self):
""":const:`True` if the source imports :func:`assert_hook`."""
return ('assert_hook' in self.source and
any(s.module == 'attest' and
any(n.name == 'assert_hook' for n in s.names)
for s in ast.parse(self.source).body
if isinstance(s, ast.ImportFrom)))
def make_module(self, name, newpath=None):
"""Compiles the transformed code into a module object which it also
inserts in :data:`sys.modules`.
:returns: The module object.
"""
module = imp.new_module(name)
module.__file__ = self.filename
if newpath:
module.__path__ = newpath
sys.modules[name] = module
exec self.code in vars(module)
return module
@property
def node(self):
"""The transformed AST node."""
node = ast.parse(self.source, self.filename)
node = self.visit(node)
ast.fix_missing_locations(node)
return node
@property
def code(self):
"""The :attr:`node` compiled into a code object."""
if COMPILES_AST:
return compile(self.node, self.filename, 'exec')
return compile(to_source(self.node), self.filename, 'exec')
def visit_Assert(self, node):
args = [_build(ast.Str, s=to_source(node.test)),
node.msg if node.msg is not None else _build(ast.Str, s=''),
_build(ast.Call,
func=_build(ast.Name, id='globals', ctx=ast.Load()),
args=[], keywords=[], starargs=None, kwargs=None),
_build(ast.Call,
func=_build(ast.Name, id='locals', ctx=ast.Load()),
args=[], keywords=[], starargs=None, kwargs=None)
]
return ast.copy_location(
_build(ast.Expr, value=_build(ast.Call,
func=_build(ast.Name, id='assert_hook', ctx=ast.Load()),
args=args, keywords=[], starargs=None, kwargs=None)), node)
class AssertImportHookEnabledDescriptor(object):
def __get__(self, instance, owner):
return any(isinstance(ih, owner) for ih in sys.meta_path)
class AssertImportHook(object):
"""An :term:`importer` that transforms imported modules with
:class:`AssertTransformer`.
.. versionadded:: 0.5
"""
#: Class property, :const:`True` if the hook is enabled.
enabled = AssertImportHookEnabledDescriptor()
@classmethod
def enable(cls):
"""Enable the import hook."""
cls.disable()
sys.meta_path.insert(0, cls())
@classmethod
def disable(cls):
"""Disable the import hook."""
sys.meta_path[:] = [ih for ih in sys.meta_path
if not isinstance(ih, cls)]
def __init__(self):
self._cache = {}
def __enter__(self):
sys.meta_path.insert(0, self)
def __exit__(self, exc_type, exc_value, traceback):
sys.meta_path.remove(self)
def find_module(self, name, path=None):
lastname = name.rsplit('.', 1)[-1]
try:
self._cache[name] = imp.find_module(lastname, path), path
except ImportError:
return
return self
def load_module(self, name):
if name in sys.modules:
return sys.modules[name]
source, filename, newpath = self.get_source(name)
(fd, fn, info), path = self._cache[name]
if source is None:
return imp.load_module(name, fd, fn, info)
transformer = AssertTransformer(source, filename)
if not transformer.should_rewrite:
fd, fn, info = imp.find_module(name.rsplit('.', 1)[-1], path)
return imp.load_module(name, fd, fn, info)
try:
return transformer.make_module(name, newpath)
except Exception, err:
raise ImportError('cannot import %s: %s' % (name, err))
def get_source(self, name):
try:
(fd, fn, info), path = self._cache[name]
except KeyError:
raise ImportError(name)
code = filename = newpath = None
if info[2] == imp.PY_SOURCE:
filename = fn
with fd:
code = fd.read()
elif info[2] == imp.PY_COMPILED:
filename = fn[:-1]
with open(filename, 'U') as f:
code = f.read()
elif info[2] == imp.PKG_DIRECTORY:
filename = os.path.join(fn, '__init__.py')
newpath = [fn]
with open(filename, 'U') as f:
code = f.read()
return code, filename, newpath
|
dag/attest
|
attest/hook.py
|
Python
|
bsd-2-clause
| 9,729
|
[
"VisIt"
] |
07157ca0d6cb2bd7a81f2587476859dc1b81167054cb403cc355dfec2bcac985
|
"""
This examples shows how many lines can be grouped together in a single
object, for convenience and efficiency.
We want to plot a large number of lines. We could use mlab.plot3d for
this, but it will create an object for each line, this will be
inefficient. This example shows how to create one object comprised of
many lines.
The underlying idea is the same as that used to plot graphes (see for
instance :ref:`example_flight_graph`): create a set of points, and
specify explicitely the connectivity between them. First we create the
set of unconnected point (the underlying data structure is a
:ref:`poly_data`) using `mlab.pipeline.scalar_scatter`. To add the
connections, we need to keep track of which point is connected to which.
As we only have lines, this is fairly easy: in a line, each point is
connected to the following one.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2010, Enthought
# License: BSD style
import numpy as np
# The number of points per line
N = 300
# The scalar parameter for each line
t = np.linspace(-2 * np.pi, 2 * np.pi, N)
from mayavi import mlab
mlab.figure(1, size=(400, 400), bgcolor=(0, 0, 0))
mlab.clf()
# We create a list of positions and connections, each describing a line.
# We will collapse them in one array before plotting.
x = list()
y = list()
z = list()
s = list()
connections = list()
# The index of the current point in the total amount of points
index = 0
# Create each line one after the other in a loop
for i in range(50):
x.append(np.sin(t))
y.append(np.cos((2 + .02 * i) * t))
z.append(np.cos((3 + .02 * i) * t))
s.append(t)
# This is the tricky part: in a line, each point is connected
# to the one following it. We have to express this with the indices
# of the final set of points once all lines have been combined
# together, this is why we need to keep track of the total number of
# points already created (index)
connections.append(np.vstack(
[np.arange(index, index + N - 1.5),
np.arange(index + 1, index + N - .5)]
).T)
index += N
# Now collapse all positions, scalars and connections in big arrays
x = np.hstack(x)
y = np.hstack(y)
z = np.hstack(z)
s = np.hstack(s)
connections = np.vstack(connections)
# Create the points
src = mlab.pipeline.scalar_scatter(x, y, z, s)
# Connect them
src.mlab_source.dataset.lines = connections
src.update()
# The stripper filter cleans up connected lines
lines = mlab.pipeline.stripper(src)
# Finally, display the set of lines
mlab.pipeline.surface(lines, colormap='Accent', line_width=1, opacity=.4)
# And choose a nice view
mlab.view(33.6, 106, 5.5, [0, 0, .05])
mlab.roll(125)
mlab.show()
|
dmsurti/mayavi
|
examples/mayavi/mlab/plotting_many_lines.py
|
Python
|
bsd-3-clause
| 2,777
|
[
"Mayavi"
] |
08f2a52ecde61f8a6fe49e4e537ace47444e5299b62059c15daf3c1c7528906d
|
__all__ = ['resample_image',
'resample_image_to_target']
import os
from ..core import ants_image as iio
from .. import utils
def resample_image(image, resample_params, use_voxels=False, interp_type=1):
"""
Resample image by spacing or number of voxels with
various interpolators. Works with multi-channel images.
ANTsR function: `resampleImage`
Arguments
---------
image : ANTsImage
input image
resample_params : tuple/list
vector of size dimension with numeric values
use_voxels : boolean
True means interpret resample params as voxel counts
interp_type : integer
one of 0 (linear), 1 (nearest neighbor), 2 (gaussian), 3 (windowed sinc), 4 (bspline)
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> fi = ants.image_read( ants.get_ants_data("r16"))
>>> finn = ants.resample_image(fi,(50,60),True,0)
>>> filin = ants.resample_image(fi,(1.5,1.5),False,1)
"""
if image.components == 1:
inimage = image.clone('float')
outimage = image.clone('float')
rsampar = 'x'.join([str(rp) for rp in resample_params])
args = [image.dimension, inimage, outimage, rsampar, int(use_voxels), interp_type]
processed_args = utils._int_antsProcessArguments(args)
libfn = utils.get_lib_fn('ResampleImage')
libfn(processed_args)
outimage = outimage.clone(image.pixeltype)
return outimage
else:
raise ValueError('images with more than 1 component not currently supported')
def resample_image_to_target(image, target, interp_type='linear', imagetype=0, verbose=False, **kwargs):
"""
Resample image by using another image as target reference.
This function uses ants.apply_transform with an identity matrix
to achieve proper resampling.
ANTsR function: `resampleImageToTarget`
Arguments
---------
image : ANTsImage
image to resample
target : ANTsImage
image of reference, the output will be in this space
interp_type : string
Choice of interpolator. Supports partial matching.
linear
nearestNeighbor
multiLabel for label images but genericlabel is preferred
gaussian
bSpline
cosineWindowedSinc
welchWindowedSinc
hammingWindowedSinc
lanczosWindowedSinc
genericLabel use this for label images
imagetype : integer
choose 0/1/2/3 mapping to scalar/vector/tensor/time-series
verbose : boolean
print command and run verbose application of transform.
kwargs : keyword arguments
additional arugment passed to antsApplyTransforms C code
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> fi = ants.image_read(ants.get_ants_data('r16'))
>>> fi2mm = ants.resample_image(fi, (2,2), use_voxels=0, interp_type='linear')
>>> resampled = ants.resample_image_to_target(fi2mm, fi, verbose=True)
"""
fixed = target
moving = image
compose = None
transformlist = 'identity'
interpolator = interp_type
interpolator_oldoptions = ("linear", "nearestNeighbor", "gaussian", "cosineWindowedSinc", "bSpline")
if isinstance(interp_type, int):
interpolator = interpolator_oldoptions[interp_type]
accepted_interpolators = {"linear", "nearestNeighbor", "multiLabel", "gaussian",
"bSpline", "cosineWindowedSinc", "welchWindowedSinc",
"hammingWindowedSinc", "lanczosWindowedSinc", "genericLabel"}
if interpolator not in accepted_interpolators:
raise ValueError('interpolator not supported - see %s' % accepted_interpolators)
args = [fixed, moving, transformlist, interpolator]
if not isinstance(fixed, str):
if isinstance(fixed, iio.ANTsImage) and isinstance(moving, iio.ANTsImage):
inpixeltype = fixed.pixeltype
warpedmovout = moving.clone()
f = fixed
m = moving
if (moving.dimension == 4) and (fixed.dimension==3) and (imagetype==0):
raise ValueError('Set imagetype 3 to transform time series images.')
wmo = warpedmovout
mytx = ['-t', 'identity']
if compose is None:
args = ['-d', fixed.dimension, '-i', m, '-o', wmo, '-r', f, '-n', interpolator] + mytx
tfn = '%scomptx.nii.gz' % compose if compose is not None else 'NA'
if compose is not None:
mycompo = '[%s,1]' % tfn
args = ['-d', fixed.dimension, '-i', m, '-o', mycompo, '-r', f, '-n', interpolator] + mytx
myargs = utils._int_antsProcessArguments(args)
# NO CLUE WHAT THIS DOES OR WHY IT'S NEEDED
for jj in range(len(myargs)):
if myargs[jj] is not None:
if myargs[jj] == '-':
myargs2 = [None]*(len(myargs)-1)
myargs2[:(jj-1)] = myargs[:(jj-1)]
myargs2[jj:(len(myargs)-1)] = myargs[(jj+1):(len(myargs))]
myargs = myargs2
myverb = int(verbose)
processed_args = myargs + ['-z', str(1), '-v', str(myverb), '--float', str(1), '-e', str(imagetype)]
libfn = utils.get_lib_fn('antsApplyTransforms')
libfn(processed_args)
if compose is None:
return warpedmovout.clone(inpixeltype)
else:
if os.path.exists(tfn):
return tfn
else:
return None
else:
return 1
else:
processed_args = myargs + ['-z', str(1), '--float', str(1), '-e', str(imagetype)]
libfn = utils.get_lib_fn('antsApplyTransforms')
libfn(processed_args)
|
ANTsX/ANTsPy
|
ants/registration/resample_image.py
|
Python
|
apache-2.0
| 5,955
|
[
"Gaussian"
] |
20355fdd249363f307cd9f708423f3f9886eea7fc23565a64696ede45a1fece1
|
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test the cf module.
"""
from __future__ import (absolute_import, division, print_function)
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
import unittest
import mock
import iris
import iris.fileformats.cf as cf
class TestCaching(unittest.TestCase):
def test_cached(self):
# Make sure attribute access to the underlying netCDF4.Variable
# is cached.
name = 'foo'
nc_var = mock.MagicMock()
cf_var = cf.CFAncillaryDataVariable(name, nc_var)
self.assertEqual(nc_var.ncattrs.call_count, 1)
# Accessing a netCDF attribute should result in no further calls
# to nc_var.ncattrs() and the creation of an attribute on the
# cf_var.
# NB. Can't use hasattr() because that triggers the attribute
# to be created!
self.assertTrue('coordinates' not in cf_var.__dict__)
_ = cf_var.coordinates
self.assertEqual(nc_var.ncattrs.call_count, 1)
self.assertTrue('coordinates' in cf_var.__dict__)
# Trying again results in no change.
_ = cf_var.coordinates
self.assertEqual(nc_var.ncattrs.call_count, 1)
self.assertTrue('coordinates' in cf_var.__dict__)
# Trying another attribute results in just a new attribute.
self.assertTrue('standard_name' not in cf_var.__dict__)
_ = cf_var.standard_name
self.assertEqual(nc_var.ncattrs.call_count, 1)
self.assertTrue('standard_name' in cf_var.__dict__)
@tests.skip_data
class TestCFReader(tests.IrisTest):
def setUp(self):
filename = tests.get_data_path(
('NetCDF', 'rotated', 'xyt', 'small_rotPole_precipitation.nc'))
self.cfr = cf.CFReader(filename)
def test_ancillary_variables_pass_0(self):
self.assertEqual(self.cfr.cf_group.ancillary_variables, {})
def test_auxiliary_coordinates_pass_0(self):
self.assertEqual(sorted(self.cfr.cf_group.auxiliary_coordinates.keys()),
['lat', 'lon'])
lat = self.cfr.cf_group['lat']
self.assertEqual(lat.shape, (190, 174))
self.assertEqual(lat.dimensions, ('rlat', 'rlon'))
self.assertEqual(lat.ndim, 2)
self.assertEqual(lat.cf_attrs(),
(('long_name', 'latitude'),
('standard_name', 'latitude'),
('units', 'degrees_north')))
lon = self.cfr.cf_group['lon']
self.assertEqual(lon.shape, (190, 174))
self.assertEqual(lon.dimensions, ('rlat', 'rlon'))
self.assertEqual(lon.ndim, 2)
self.assertEqual(lon.cf_attrs(),
(('long_name', 'longitude'),
('standard_name', 'longitude'),
('units', 'degrees_east')))
def test_bounds_pass_0(self):
self.assertEqual(sorted(self.cfr.cf_group.bounds.keys()), ['time_bnds'])
time_bnds = self.cfr.cf_group['time_bnds']
self.assertEqual(time_bnds.shape, (4, 2))
self.assertEqual(time_bnds.dimensions, ('time', 'time_bnds'))
self.assertEqual(time_bnds.ndim, 2)
self.assertEqual(time_bnds.cf_attrs(), ())
def test_coordinates_pass_0(self):
self.assertEqual(sorted(self.cfr.cf_group.coordinates.keys()),
['rlat', 'rlon', 'time'])
rlat = self.cfr.cf_group['rlat']
self.assertEqual(rlat.shape, (190,))
self.assertEqual(rlat.dimensions, ('rlat',))
self.assertEqual(rlat.ndim, 1)
attr = []
attr.append(('axis', 'Y'))
attr.append(('long_name', 'rotated latitude'))
attr.append(('standard_name', 'grid_latitude'))
attr.append(('units', 'degrees'))
self.assertEqual(rlat.cf_attrs(), tuple(attr))
rlon = self.cfr.cf_group['rlon']
self.assertEqual(rlon.shape, (174,))
self.assertEqual(rlon.dimensions, ('rlon',))
self.assertEqual(rlon.ndim, 1)
attr = []
attr.append(('axis', 'X'))
attr.append(('long_name', 'rotated longitude'))
attr.append(('standard_name', 'grid_longitude'))
attr.append(('units', 'degrees'))
self.assertEqual(rlon.cf_attrs(), tuple(attr))
time = self.cfr.cf_group['time']
self.assertEqual(time.shape, (4,))
self.assertEqual(time.dimensions, ('time',))
self.assertEqual(time.ndim, 1)
attr = []
attr.append(('axis', 'T'))
attr.append(('bounds', 'time_bnds'))
attr.append(('calendar', 'gregorian'))
attr.append(('long_name', 'Julian Day'))
attr.append(('units', 'days since 1950-01-01 00:00:00.0'))
self.assertEqual(time.cf_attrs(), tuple(attr))
def test_data_pass_0(self):
self.assertEqual(sorted(self.cfr.cf_group.data_variables.keys()),
['pr'])
data = self.cfr.cf_group['pr']
self.assertEqual(data.shape, (4, 190, 174))
self.assertEqual(data.dimensions, ('time', 'rlat', 'rlon'))
self.assertEqual(data.ndim, 3)
attr = []
attr.append(('_FillValue', 1e+30))
attr.append(('cell_methods', 'time: mean'))
attr.append(('coordinates', 'lon lat'))
attr.append(('grid_mapping', 'rotated_pole'))
attr.append(('long_name', 'Precipitation'))
attr.append(('missing_value', 1e+30))
attr.append(('standard_name', 'precipitation_flux'))
attr.append(('units', 'kg m-2 s-1'))
attr = tuple(attr)
self.assertEqual(data.cf_attrs()[0][0], attr[0][0])
self.assertAlmostEqual(data.cf_attrs()[0][1], attr[0][1], delta=1.6e+22)
self.assertEqual(data.cf_attrs()[1:5], attr[1:5])
self.assertAlmostEqual(data.cf_attrs()[5][1], attr[5][1], delta=1.6e+22)
self.assertEqual(data.cf_attrs()[6:], attr[6:])
def test_formula_terms_pass_0(self):
self.assertEqual(self.cfr.cf_group.formula_terms, {})
def test_grid_mapping_pass_0(self):
self.assertEqual(sorted(self.cfr.cf_group.grid_mappings.keys()),
['rotated_pole'])
rotated_pole = self.cfr.cf_group['rotated_pole']
self.assertEqual(rotated_pole.shape, ())
self.assertEqual(rotated_pole.dimensions, ())
self.assertEqual(rotated_pole.ndim, 0)
attr = []
attr.append(('grid_mapping_name', 'rotated_latitude_longitude'))
attr.append(('grid_north_pole_latitude', 18.0))
attr.append(('grid_north_pole_longitude', -140.75))
self.assertEqual(rotated_pole.cf_attrs(), tuple(attr))
def test_cell_measures_pass_0(self):
self.assertEqual(self.cfr.cf_group.cell_measures, {})
def test_global_attributes_pass_0(self):
self.assertEqual(
sorted(self.cfr.cf_group.global_attributes.keys()),
['Conventions', 'NCO', 'experiment',
'history', 'institution', 'source',]
)
self.assertEqual(self.cfr.cf_group.global_attributes['Conventions'],
'CF-1.0')
self.assertEqual(self.cfr.cf_group.global_attributes['experiment'],
'ER3')
self.assertEqual(self.cfr.cf_group.global_attributes['institution'],
'DMI')
self.assertEqual(self.cfr.cf_group.global_attributes['source'],
'HIRHAM')
def test_variable_cf_group_pass_0(self):
self.assertEqual(sorted(self.cfr.cf_group['time'].cf_group.keys()),
['time_bnds'])
self.assertEqual(sorted(self.cfr.cf_group['pr'].cf_group.keys()),
['lat', 'lon', 'rlat', 'rlon', 'rotated_pole', 'time'])
def test_variable_attribute_touch_pass_0(self):
lat = self.cfr.cf_group['lat']
self.assertEqual(lat.cf_attrs(),
(('long_name', 'latitude'),
('standard_name', 'latitude'),
('units', 'degrees_north')))
self.assertEqual(lat.cf_attrs_used(), ())
self.assertEqual(lat.cf_attrs_unused(),
(('long_name', 'latitude'),
('standard_name', 'latitude'),
('units', 'degrees_north')))
# touch some variable attributes.
lat.long_name
lat.units
self.assertEqual(lat.cf_attrs_used(),
(('long_name', 'latitude'),
('units', 'degrees_north')))
self.assertEqual(lat.cf_attrs_unused(),
(('standard_name', 'latitude'),))
# clear the attribute touch history.
lat.cf_attrs_reset()
self.assertEqual(lat.cf_attrs_used(), ())
self.assertEqual(lat.cf_attrs_unused(),
(('long_name', 'latitude'),
('standard_name', 'latitude'),
('units', 'degrees_north')))
@tests.skip_data
class TestLoad(tests.IrisTest):
def test_attributes_empty(self):
filename = tests.get_data_path(('NetCDF', 'global', 'xyt',
'SMALL_hires_wind_u_for_ipcc4.nc'))
cube = iris.load_cube(filename)
self.assertEqual(cube.coord('time').attributes, {})
def test_attributes_contain_positive(self):
filename = tests.get_data_path(('NetCDF', 'global', 'xyt',
'SMALL_hires_wind_u_for_ipcc4.nc'))
cube = iris.load_cube(filename)
self.assertEqual(cube.coord('height').attributes['positive'], 'up')
def test_attributes_populated(self):
filename = tests.get_data_path(
('NetCDF', 'label_and_climate', 'small_FC_167_mon_19601101.nc'))
cube = iris.load_cube(filename)
self.assertEqual(
sorted(cube.coord('longitude').attributes.items()),
[('data_type', 'float'),
('modulo', 360),
('topology', 'circular')
]
)
def test_cell_methods(self):
filename = tests.get_data_path(('NetCDF', 'global', 'xyt', 'SMALL_hires_wind_u_for_ipcc4.nc'))
cube = iris.load_cube(filename)
self.assertEqual(cube.cell_methods,
(iris.coords.CellMethod(method=u'mean',
coords=(u'time', ),
intervals=(u'6 minutes', ),
comments=()), ))
@tests.skip_data
class TestClimatology(tests.IrisTest):
def setUp(self):
filename = tests.get_data_path(('NetCDF', 'label_and_climate',
'A1B-99999a-river-sep-2070-2099.nc'))
self.cfr = cf.CFReader(filename)
def test_bounds(self):
time = self.cfr.cf_group['temp_dmax_tmean_abs'].cf_group.coordinates['time']
climatology = time.cf_group.climatology
self.assertEqual(len(climatology), 1)
self.assertEqual(list(climatology.keys()), ['climatology_bounds'])
climatology_var = climatology['climatology_bounds']
self.assertEqual(climatology_var.ndim, 2)
self.assertEqual(climatology_var.shape, (1, 2))
@tests.skip_data
class TestLabels(tests.IrisTest):
def setUp(self):
filename = tests.get_data_path(
('NetCDF', 'label_and_climate',
'A1B-99999a-river-sep-2070-2099.nc'))
self.cfr_start = cf.CFReader(filename)
filename = tests.get_data_path(
('NetCDF', 'label_and_climate',
'small_FC_167_mon_19601101.nc'))
self.cfr_end = cf.CFReader(filename)
def test_label_dim_start(self):
cf_data_var = self.cfr_start.cf_group['temp_dmax_tmean_abs']
region_group = self.cfr_start.cf_group.labels['region_name']
self.assertEqual(sorted(self.cfr_start.cf_group.labels.keys()),
[u'region_name'])
self.assertEqual(sorted(cf_data_var.cf_group.labels.keys()),
[u'region_name'])
self.assertEqual(region_group.cf_label_dimensions(cf_data_var),
(u'georegion',))
self.assertEqual(region_group.cf_label_data(cf_data_var)[0],
'Anglian')
cf_data_var = self.cfr_start.cf_group['cdf_temp_dmax_tmean_abs']
self.assertEqual(sorted(self.cfr_start.cf_group.labels.keys()),
[u'region_name'])
self.assertEqual(sorted(cf_data_var.cf_group.labels.keys()),
[u'region_name'])
self.assertEqual(region_group.cf_label_dimensions(cf_data_var),
(u'georegion',))
self.assertEqual(region_group.cf_label_data(cf_data_var)[0],
'Anglian')
def test_label_dim_end(self):
cf_data_var = self.cfr_end.cf_group['tas']
self.assertEqual(sorted(self.cfr_end.cf_group.labels.keys()), [u'experiment_id', u'institution', u'source'])
self.assertEqual(sorted(cf_data_var.cf_group.labels.keys()), [u'experiment_id', u'institution', u'source'])
self.assertEqual(self.cfr_end.cf_group.labels['experiment_id'].cf_label_dimensions(cf_data_var), (u'ensemble',))
self.assertEqual(self.cfr_end.cf_group.labels['experiment_id'].cf_label_data(cf_data_var)[0], '2005')
self.assertEqual(self.cfr_end.cf_group.labels['institution'].cf_label_dimensions(cf_data_var), (u'ensemble',))
self.assertEqual(self.cfr_end.cf_group.labels['institution'].cf_label_data(cf_data_var)[0], 'ECMWF')
self.assertEqual(self.cfr_end.cf_group.labels['source'].cf_label_dimensions(cf_data_var), (u'ensemble',))
self.assertEqual(self.cfr_end.cf_group.labels['source'].cf_label_data(cf_data_var)[0], 'IFS33R1/HOPE-E, Sys 1, Met 1, ENSEMBLES')
if __name__ == "__main__":
tests.main()
|
Jozhogg/iris
|
lib/iris/tests/test_cf.py
|
Python
|
lgpl-3.0
| 14,692
|
[
"NetCDF"
] |
bd1c2e525488234796fc18e42d6762630126702f581624e0ffb63e6de2b7f62b
|
'''
Created on Jun 2, 2011
@author: mkiyer
'''
import sys
import logging
import argparse
import pysam
# local imports
from chimerascan.lib import config
def sam_to_bam(input_sam_file, output_bam_file):
samfh = pysam.Samfile(input_sam_file, "r")
bamfh = pysam.Samfile(output_bam_file, "wb", template=samfh)
num_frags = 0
for r in samfh:
bamfh.write(r)
num_frags += 1
logging.debug("Found %d fragments" % (num_frags))
samfh.close()
bamfh.close()
return config.JOB_SUCCESS
def main():
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
parser = argparse.ArgumentParser()
parser.add_argument("input_sam_file")
parser.add_argument("output_bam_file")
args = parser.parse_args()
return sam_to_bam(args.input_sam_file, args.output_bam_file)
if __name__ == '__main__':
sys.exit(main())
|
tectronics/chimerascan
|
chimerascan/pipeline/sam_to_bam.py
|
Python
|
gpl-3.0
| 937
|
[
"pysam"
] |
9d62dd0a45f8bfe34dc857697a22187f0f0bda1ed175130f5973ece93d629238
|
"""Visit and parse objdump."""
from scoff.ast.visits import ASTVisitor
from hdltools.binutils.instruction import AsmInstruction
from hdltools.binutils.function import AsmFunction
class AsmDumpPass(ASTVisitor):
"""Visit objdump output."""
def __init__(self):
"""Initialize."""
self._visited = False
super().__init__()
self._fn_locs = {}
self._fn_by_name = {}
def visit_Function(self, node):
"""Visit function."""
self._fn_locs[node.header.symbol.name] = int(node.header.addr, 16)
self._fn_by_name[node.header.symbol.name] = AsmFunction(
node.header.symbol.name,
int(node.header.addr, 16),
node.instructions,
)
def visit_Instruction(self, node):
"""Visit instruction."""
return AsmInstruction(
int(node.addr, 16),
int(node.opcode, 16),
node.asm_txt.replace("\t", " "),
node.parent,
)
def get_functions(self):
"""Get functions."""
return self._fn_by_name
def get_main(self):
"""Get main function."""
if not self._visited:
raise RuntimeError("visit has not occurred yet")
return self._fn_by_name["main"]
def get_entry_point(self):
"""Get entry point."""
if not self._visited:
raise RuntimeError("visit has not occurred yet")
return self._fn_by_name["_start"]
def visit(self, node):
"""Visit."""
self._visited = False
super().visit(node)
self._visited = True
|
brunosmmm/hdltools
|
hdltools/binutils/passes.py
|
Python
|
mit
| 1,596
|
[
"VisIt"
] |
23bf1b985455e385918128ffa50c8ecb29c3fc7f0e773d8262a056cf4801fdd5
|
import unittest
from nose.tools import * # PEP8 asserts
from tests.base import OsfTestCase
from tests.test_features import requires_search
from tests.factories import (
UserFactory, ProjectFactory, NodeFactory,
UnregUserFactory, UnconfirmedUserFactory
)
from framework.auth.core import Auth
from website import settings
import website.search.search as search
from website.search import elastic_search
from website.search.util import build_query
from website.search_migration.migrate import migrate
@requires_search
class SearchTestCase(OsfTestCase):
def tearDown(self):
super(SearchTestCase, self).tearDown()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
def setUp(self):
super(SearchTestCase, self).setUp()
search.create_index(elastic_search.INDEX)
def query(term):
results = search.search(build_query(term), index=elastic_search.INDEX)
return results
def query_user(name):
term = 'category:user AND "{}"'.format(name)
return query(term)
@requires_search
class TestUserUpdate(SearchTestCase):
def setUp(self):
super(TestUserUpdate, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = UserFactory(fullname='David Bowie')
def test_new_user(self):
# Verify that user has been added to Elastic Search
docs = query_user(self.user.fullname)['results']
assert_equal(len(docs), 1)
def test_new_user_unconfirmed(self):
user = UnconfirmedUserFactory()
docs = query_user(user.fullname)['results']
assert_equal(len(docs), 0)
token = user.get_confirmation_token(user.username)
user.confirm_email(token)
user.save()
docs = query_user(user.fullname)['results']
assert_equal(len(docs), 1)
def test_change_name(self):
"""Add a user, change her name, and verify that only the new name is
found in search.
"""
user = UserFactory(fullname='Barry Mitchell')
fullname_original = user.fullname
user.fullname = user.fullname[::-1]
user.save()
docs_original = query_user(fullname_original)['results']
assert_equal(len(docs_original), 0)
docs_current = query_user(user.fullname)['results']
assert_equal(len(docs_current), 1)
def test_disabled_user(self):
"""Test that disabled users are not in search index"""
user = UserFactory(fullname='Bettie Page')
user.save()
# Ensure user is in search index
assert_equal(len(query_user(user.fullname)['results']), 1)
# Disable the user
user.is_disabled = True
user.save()
# Ensure user is not in search index
assert_equal(len(query_user(user.fullname)['results']), 0)
def test_merged_user(self):
user = UserFactory(fullname='Annie Lennox')
merged_user = UserFactory(fullname='Lisa Stansfield')
user.save()
merged_user.save()
assert_equal(len(query_user(user.fullname)['results']), 1)
assert_equal(len(query_user(merged_user.fullname)['results']), 1)
user.merge_user(merged_user)
assert_equal(len(query_user(user.fullname)['results']), 1)
assert_equal(len(query_user(merged_user.fullname)['results']), 0)
def test_employment(self):
user = UserFactory(fullname='Helga Finn')
user.save()
institution = 'Finn\'s Fine Filers'
docs = query_user(institution)['results']
assert_equal(len(docs), 0)
user.jobs.append({
'institution': institution,
'title': 'The Big Finn',
})
user.save()
docs = query_user(institution)['results']
assert_equal(len(docs), 1)
def test_education(self):
user = UserFactory(fullname='Henry Johnson')
user.save()
institution = 'Henry\'s Amazing School!!!'
docs = query_user(institution)['results']
assert_equal(len(docs), 0)
user.schools.append({
'institution': institution,
'degree': 'failed all classes',
})
user.save()
docs = query_user(institution)['results']
assert_equal(len(docs), 1)
def test_name_fields(self):
names = ['Bill Nye', 'William', 'the science guy', 'Sanford', 'the Great']
user = UserFactory(fullname=names[0])
user.given_name = names[1]
user.middle_names = names[2]
user.family_name = names[3]
user.suffix = names[4]
user.save()
docs = [query_user(name)['results'] for name in names]
assert_equal(sum(map(len, docs)), len(docs)) # 1 result each
assert_true(all([user._id == doc[0]['id'] for doc in docs]))
@requires_search
class TestProject(SearchTestCase):
def setUp(self):
super(TestProject, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = UserFactory(fullname='John Deacon')
self.project = ProjectFactory(title='Red Special', creator=self.user)
def test_new_project_private(self):
"""Verify that a private project is not present in Elastic Search.
"""
docs = query(self.project.title)['results']
assert_equal(len(docs), 0)
def test_make_public(self):
"""Make project public, and verify that it is present in Elastic
Search.
"""
self.project.set_privacy('public')
docs = query(self.project.title)['results']
assert_equal(len(docs), 1)
@requires_search
class TestPublicNodes(SearchTestCase):
def setUp(self):
super(TestPublicNodes, self).setUp()
self.user = UserFactory(usename='Doug Bogie')
self.title = 'Red Special'
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(
title=self.title,
creator=self.user,
is_public=True,
)
self.component = NodeFactory(
project=self.project,
title=self.title,
creator=self.user,
is_public=True
)
self.registration = ProjectFactory(
title=self.title,
creator=self.user,
is_public=True,
is_registration=True
)
def test_make_private(self):
"""Make project public, then private, and verify that it is not present
in search.
"""
self.project.set_privacy('private')
docs = query('category:project AND ' + self.title)['results']
assert_equal(len(docs), 0)
self.component.set_privacy('private')
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 0)
self.registration.set_privacy('private')
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 0)
def test_public_parent_title(self):
self.project.set_title('hello & world',self.consolidate_auth)
self.project.save()
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 1)
assert_equal(docs[0]['parent_title'], 'hello & world')
assert_true(docs[0]['parent_url'])
def test_make_parent_private(self):
"""Make parent of component, public, then private, and verify that the
component still appears but doesn't link to the parent in search.
"""
self.project.set_privacy('private')
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 1)
assert_equal(docs[0]['parent_title'], '-- private project --')
assert_false(docs[0]['parent_url'])
def test_delete_project(self):
"""
"""
self.component.remove_node(self.consolidate_auth)
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 0)
self.project.remove_node(self.consolidate_auth)
docs = query('category:project AND ' + self.title)['results']
assert_equal(len(docs), 0)
def test_change_title(self):
"""
"""
title_original = self.project.title
self.project.set_title(
'Blue Ordinary', self.consolidate_auth, save=True)
docs = query('category:project AND ' + title_original)['results']
assert_equal(len(docs), 0)
docs = query('category:project AND ' + self.project.title)['results']
assert_equal(len(docs), 1)
def test_add_tags(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
self.project.add_tag(tag, self.consolidate_auth, save=True)
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 1)
def test_remove_tag(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
self.project.add_tag(tag, self.consolidate_auth, save=True)
self.project.remove_tag(tag, self.consolidate_auth, save=True)
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
def test_update_wiki(self):
"""Add text to a wiki page, then verify that project is found when
searching for wiki text.
"""
wiki_content = {
'home': 'Hammer to fall',
'swag': '#YOLO'
}
for key, value in wiki_content.items():
docs = query(value)['results']
assert_equal(len(docs), 0)
self.project.update_node_wiki(
key, value, self.consolidate_auth,
)
docs = query(value)['results']
assert_equal(len(docs), 1)
def test_clear_wiki(self):
"""Add wiki text to page, then delete, then verify that project is not
found when searching for wiki text.
"""
wiki_content = 'Hammer to fall'
self.project.update_node_wiki(
'home', wiki_content, self.consolidate_auth,
)
self.project.update_node_wiki('home', '', self.consolidate_auth)
docs = query(wiki_content)['results']
assert_equal(len(docs), 0)
def test_add_contributor(self):
"""Add a contributor, then verify that project is found when searching
for contributor.
"""
user2 = UserFactory(fullname='Adam Lambert')
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
self.project.add_contributor(user2, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_remove_contributor(self):
"""Add and remove a contributor, then verify that project is not found
when searching for contributor.
"""
user2 = UserFactory(fullname='Brian May')
self.project.add_contributor(user2, save=True)
self.project.remove_contributor(user2, self.consolidate_auth)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
def test_hide_contributor(self):
user2 = UserFactory(fullname='Brian May')
self.project.add_contributor(user2)
self.project.set_visible(user2, False, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
self.project.set_visible(user2, True, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_wrong_order_search(self):
title_parts = self.title.split(' ')
title_parts.reverse()
title_search = ' '.join(title_parts)
docs = query(title_search)['results']
assert_equal(len(docs), 3)
def test_tag_aggregation(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
self.project.add_tag(tag, self.consolidate_auth, save=True)
docs = query(self.title)['tags']
assert len(docs) == 3
for doc in docs:
assert doc['key'] in tags
def test_count_aggregation(self):
docs = query("*")['counts']
assert_equal(docs['total'], 4)
assert_equal(docs['project'], 1)
assert_equal(docs['component'], 1)
assert_equal(docs['registration'], 1)
@requires_search
class TestAddContributor(SearchTestCase):
"""Tests of the search.search_contributor method
"""
def setUp(self):
super(TestAddContributor, self).setUp()
self.name1 = 'Roger1 Taylor1'
self.name2 = 'John2 Deacon2'
self.user = UserFactory(fullname=self.name1)
def test_unreg_users_dont_show_in_search(self):
unreg = UnregUserFactory()
contribs = search.search_contributor(unreg.fullname)
assert_equal(len(contribs['users']), 0)
def test_unreg_users_do_show_on_projects(self):
unreg = UnregUserFactory(fullname='Robert Paulson')
self.project = ProjectFactory(
title='Glamour Rock',
creator=unreg,
is_public=True,
)
results = query(unreg.fullname)['results']
assert_equal(len(results), 1)
def test_search_fullname(self):
"""Verify that searching for full name yields exactly one result.
"""
contribs = search.search_contributor(self.name1)
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2)
assert_equal(len(contribs['users']), 0)
def test_search_firstname(self):
"""Verify that searching for first name yields exactly one result.
"""
contribs = search.search_contributor(self.name1.split(' ')[0])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2.split(' ')[0])
assert_equal(len(contribs['users']), 0)
def test_search_partial(self):
"""Verify that searching for part of first name yields exactly one
result.
"""
contribs = search.search_contributor(self.name1.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 0)
class TestSearchExceptions(OsfTestCase):
"""
Verify that the correct exception is thrown when the connection is lost
"""
@classmethod
def setUpClass(cls):
super(TestSearchExceptions, cls).setUpClass()
if settings.SEARCH_ENGINE == 'elastic':
cls._es = search.search_engine.es
search.search_engine.es = None
@classmethod
def tearDownClass(cls):
super(TestSearchExceptions, cls).tearDownClass()
if settings.SEARCH_ENGINE == 'elastic':
search.search_engine.es = cls._es
def test_connection_error(self):
"""
Ensures that saving projects/users doesn't break as a result of connection errors
"""
self.user = UserFactory(usename='Doug Bogie')
self.project = ProjectFactory(
title="Tom Sawyer",
creator=self.user,
is_public=True,
)
self.user.save()
self.project.save()
class TestSearchMigration(SearchTestCase):
"""
Verify that the correct indices are created/deleted during migration
"""
@classmethod
def tearDownClass(cls):
super(TestSearchMigration, cls).tearDownClass()
search.create_index(settings.ELASTIC_INDEX)
def setUp(self):
super(TestSearchMigration, self).setUp()
self.es = search.search_engine.es
search.delete_index(settings.ELASTIC_INDEX)
search.create_index(settings.ELASTIC_INDEX)
self.user = UserFactory(fullname='David Bowie')
self.project = ProjectFactory(
title=settings.ELASTIC_INDEX,
creator=self.user,
is_public=True
)
def test_first_migration_no_delete(self):
migrate(delete=False, index=settings.ELASTIC_INDEX)
var = self.es.indices.get_aliases()
assert_equal(var[settings.ELASTIC_INDEX + '_v1']['aliases'].keys()[0], settings.ELASTIC_INDEX)
def test_multiple_migrations_no_delete(self):
for n in xrange(1, 21):
migrate(delete=False, index=settings.ELASTIC_INDEX)
var = self.es.indices.get_aliases()
assert_equal(var[settings.ELASTIC_INDEX + '_v{}'.format(n)]['aliases'].keys()[0], settings.ELASTIC_INDEX)
def test_first_migration_with_delete(self):
migrate(delete=True, index=settings.ELASTIC_INDEX)
var = self.es.indices.get_aliases()
assert_equal(var[settings.ELASTIC_INDEX + '_v1']['aliases'].keys()[0], settings.ELASTIC_INDEX)
def test_multiple_migrations_with_delete(self):
for n in xrange(1, 21, 2):
migrate(delete=True, index=settings.ELASTIC_INDEX)
var = self.es.indices.get_aliases()
assert_equal(var[settings.ELASTIC_INDEX + '_v{}'.format(n)]['aliases'].keys()[0], settings.ELASTIC_INDEX)
migrate(delete=True, index=settings.ELASTIC_INDEX)
var = self.es.indices.get_aliases()
assert_equal(var[settings.ELASTIC_INDEX + '_v{}'.format(n + 1)]['aliases'].keys()[0], settings.ELASTIC_INDEX)
assert not var.get(settings.ELASTIC_INDEX + '_v{}'.format(n))
|
himanshuo/osf.io
|
tests/test_elastic.py
|
Python
|
apache-2.0
| 17,890
|
[
"Brian"
] |
0feb838d7a898fe05fd185c4dbbaad92a7eb393938b4d1c17319847170873553
|
"""
This is a simple script that verifies several ways of accessing numpy arrays
and ensures that their memory is properly cleaned.
"""
import pytest
from .addons import using
import numpy as np
import psi4
pytestmark = pytest.mark.quick
# If it's too small, something odd happens with the memory manager
mat_size = 10000
def snapshot_memory():
import memory_profiler as mp
return mp.memory_usage()[0] * 1048576
def check_leak(func, tol=1.e6):
start = snapshot_memory()
func()
diff = abs(start - snapshot_memory())
# A megabyte is excusable due to various GC funcs
if diff > tol:
raise MemoryError("Function did not correctly clean up")
else:
print("Function %s: PASSED" % func.__name__)
return True
def build_mat():
mat = psi4.core.Matrix(mat_size, mat_size)
return mat
def build_view_mat():
mat = psi4.core.Matrix(mat_size, mat_size)
view = mat.np
return mat, view
def build_viewh_mat():
mat = psi4.core.Matrix(mat_size, mat_size)
view = mat.np
return mat, view
def build_view_set_mat():
mat = psi4.core.Matrix(mat_size, mat_size)
view = mat.np
view[:] = 5
return mat, view
def build_arr_mat():
mat = psi4.core.Matrix(mat_size, mat_size)
view = np.asarray(mat)
return mat, view
def build_copy_mat():
mat = psi4.core.Matrix(mat_size, mat_size)
view = np.array(mat)
return mat, view
@using("memory_profiler")
def test_build_mat():
assert(check_leak(build_mat))
@using("memory_profiler")
def test_build_view_mat():
assert(check_leak(build_view_mat))
@using("memory_profiler")
def test_build_viewh_mat():
assert(check_leak(build_viewh_mat))
@using("memory_profiler")
def test_build_view_set_mat():
assert(check_leak(build_view_set_mat))
@using("memory_profiler")
def test_build_arr_mat():
assert(check_leak(build_arr_mat))
@using("memory_profiler")
def test_build_copy_mat():
assert(check_leak(build_copy_mat))
@using("memory_profiler")
def test_totals():
start = snapshot_memory()
check_leak(build_mat)
check_leak(build_view_mat)
check_leak(build_viewh_mat)
check_leak(build_view_set_mat)
check_leak(build_arr_mat)
check_leak(build_copy_mat)
# Double check totals
diff = abs(start - snapshot_memory())
if diff > 1.e6:
raise MemoryError("\nA function leaked %d bytes of memory!" % diff)
else:
print("\nNo leaks detected!")
|
ashutoshvt/psi4
|
tests/pytests/test_np_views.py
|
Python
|
lgpl-3.0
| 2,472
|
[
"Psi4"
] |
56890db706f4e56d875af422362f190e94deece9bc733dade5c5d6d6abee7b6f
|
# Load python packages
import re, sys, math
from collections import OrderedDict
# Load Moose packages
from FactorySystem import MooseObject
from ..slides import RemarkSlide, SlideWarehouse
##
# Base class for markdown slide generation
class RemarkSlideSet(MooseObject):
##
# Defines the available properties for the SlideSet base class
@staticmethod
def validParams():
params = MooseObject.validParams()
params.addRequiredParam('type', 'The type of slide set to create')
params.addParam('title', 'The title of the slide set, if this exists a title slide will be injected')
params.addParam('active', [], 'A list of ordered slide names to output, if blank all slides are output')
params.addParam('inactive', [], 'A list of slide names to exclude from output')
params.addParam('contents', False, 'Include table of contents slide')
params.addParam('contents_title', 'The table-of-contents heading for this slide set')
params.addParam('contents_level', 1, 'The heading level to include in the contents')
params.addParam('contents_items_per_slide', 15, 'The number of contents items to include on a page')
params.addParam('show_in_contents', True, 'Toggle if slide set content appears in the table-of-contents')
params.addParam('style', 'The CSS style sheet to utilize for this slide set')
params.addParam('non_ascii_warn', True, 'Produce warning if non-ascii characters are located')
# Create the common parameters from RemarkSlide 'properties' group
slide_params = RemarkSlide.validParams()
for key in slide_params.groupKeys('properties'):
params.addParam(key, slide_params.getDescription(key))
params.addParamsToGroup('properties', slide_params.groupKeys('properties'))
return params
##
# Constructor
# @param name The name of the object
# @param params The InputParameters for the object being created
# @param kwars Optional key, value pairings
#
# Optional key, value pairs:
# slide_type = <str>
# The name of the Slide class to build, by default 'Slide' is used
def __init__(self, name, params, **kwargs):
MooseObject.__init__(self, name, params)
# Set the Slide type
self.__slide_type = kwargs.pop('slide_type', 'RemarkSlide')
# Get a reference to the items needed to create objects
self.__factory = self.getParam('_factory')
self.__parser = self.getParam('_parser')
self.__root = self.getParam('_root')
# Create a storage object for the slides created by this set
self.__slide_warehouse = SlideWarehouse(set_name = name, \
active = self.getParam('active'), \
inactive = self.getParam('inactive'))
# Storage for markdown links
self.__links = []
# Print a message
print ' ', name
##
# The method that creates/retrieves the markdown (virtual)
def read(self):
return ''
##
# Returns a reference to the SlideWarehouse object
def warehouse(self):
return self.__slide_warehouse
##
# Creates the individual RemarkSlide objects
# @param raw The raw markdown, obtained from read() method, to separate into slides
def build(self, markdown):
# Extract links
markdown = re.sub(r'\[.*?\]:.*?\n', self.__subLinkStorage, markdown)
# Separate the slide content
raw_slides = re.split(r'\n---', markdown)
# Build the individual slide objects
for raw in raw_slides:
if raw:
slide = self.__createSlide(raw)
self.warehouse().addObject(slide)
# Create the title slide
if self.isParamValid('title'):
name = self.name() + '-title'
raw = '# ' + self.getParam('title') + '\n'
options = {'show_in_contents':False, 'title':True, 'name':name, 'class':'center,middle'}
slide = self.__createSlide(raw, **options)
self.warehouse().insertObject(0, slide)
##
# Return the complete markdown for this slide set
def markdown(self):
# Create a list of all the slide markdown
output = []
# Extract the slide content
for slide in self.warehouse().activeObjects():
output.append(slide.markdown)
# Join the list with slide breaks
output = '\n---\n'.join(output)
# Append the links
for link in self.__links:
output += link + '\n'
return output
##
# Sub method for storing wiki link shortcuts
def __subLinkStorage(self, match):
self.__links.append(match.group(0).replace(r'\r\n',''))
return ''
##
# Create the a slide from raw markdown (private)
# @param raw The raw markdown to build the slide from
# @param kwargs Optional key, value pairs
#
def __createSlide(self, raw, **kwargs):
# Get the default input parameters from the slide being created
params = self.__factory.validParams('RemarkSlide')
params.applyParams(self.parameters())
# Apply the common properties from this class
#for key in params.groupKeys('properties'):
# if self.isParamValid(key):
# params[key] = self.getParam(key)
# Add the parent and markdown parameters
params.addPrivateParam('_parent', self)
# Over-ride parameters with optional key, value pairs
for key, value in kwargs.iteritems():
params[key] = value
# Build the slide object
slide = self.__factory.create(self.__slide_type, params)
# Determine and set the slide name
raw = slide.parseName(raw)
# Apply the [./Slides] block
if self.__root:
node = self.__root.getNode(self.name()).getNode('Slides')
if node:
node = node.getNode(slide.name())
if node:
print ' '*6 + 'Apply settings from input file'
self.__parser.extractParams('', slide.parameters(), node)
# Parse the raw markdown and store it in the slide
self._parseSlide(slide, raw)
return slide
##
# Method that calls the various parse methods for the slide content (protected)
# This also applies settings from the input file, this method exists to
# allow parent classes to modify slide settings
# @see INLDjangoWikiSet, INLCoverSet, INLMergeSet
def _parseSlide(self, slide, raw):
# Parse the content into Remark format and store the content in the slide
raw = slide.parse(raw)
raw = slide.parseImages(raw)
slide.markdown = raw
##
# A helper that extracts the contents entries from each of the active slides (protected)
def _extractContents(self):
contents = []
# Loop through all active slides
for slide in self.warehouse().activeObjects():
# Do nothing if the contents for the slides are disabled
if not slide.getParam('show_in_contents'):
continue
# Build a tuple containing the table-of-contents information for this slide
pattern = re.compile(r'^\s*(#+)\s+(.*)', re.MULTILINE)
for m in pattern.finditer(slide.markdown):
contents.append((m.group(2).strip(), slide.name(), len(m.group(1)), slide.number))
# Separate contents into chunks based on the allowable size
n = int(self.getParam('contents_items_per_slide'))
output = [contents[i:i+n] for i in range(0, len(contents),n)]
return output
##
# A helper method that creates the empty contents slides (protected)
# @param number The number of contents entries
def _createContentsSlides(self, n):
# Determine the table of contents header
if self.isParamValid('contents_title'):
contents_title = '# ' + self.getParam('contents_title') + '\n'
elif self.isParamValid('title'):
contents_title = '# ' + self.getParam('title') + ' Contents\n'
else:
contents_title = '# Contents\n'
# Locate the slide insert location
if self.warehouse().hasObject(self.name() + '-title'):
idx = 1
else:
idx = 0
# Add the content(s) slides
for i in range(n):
name = '-'.join([self.name(), 'contents', str(i)])
options = {'name' : name, 'show_in_contents' : False}
if i == 0:
slide = self.__createSlide(contents_title, **options)
else:
slide = self.__createSlide('', **options)
self.warehouse().insertObject(idx, slide)
idx += 1
##
# Initialize contents (public)
# This creates and inserts the correct number of contents slides
# @see SlideSetWarehouse::__contents
def initContents(self):
# Do nothing if the 'contents' flag is not set in the input file
if not self.getParam('contents'):
return
# Extract the contents entries
contents = self._extractContents()
# Create the contents slides
self._createContentsSlides(len(contents))
##
# Inserts the table-of-contents html into the already existing contents slides (public)
# @see SlideSetWarehouse::__contents
def contents(self):
# Do nothing if the 'contents' flag is not set in the input file
if not self.getParam('contents'):
return
# Extract the contents entries
contents = self._extractContents()
# Build the table-of-contents entries
max_per_slide = int(self.getParam('contents_items_per_slide'))
lvl = int(self.getParam('contents_level'))
for i in range(len(contents)):
output = ''
for item in contents[i]:
if item[2] <= lvl:
title = item[0] # the heading content
name = item[1] # slide name
indent = 25*(item[2]-1) # heading level indenting
idx = str(item[3]) # slide index
height = '12px'
# Build a link to the slide, by name
link = '<a href="#' + name + '">'
# Create the contents entry
output += '<p style="line-height:' + height + ';text-align:left;text-indent:' + str(indent) + 'px;">' + link + title + '</a>'
output += '<span style="float:right;">' + link + idx + '</a>'
output += '</span></p>\n'
# Write the contents to the slide
name = '-'.join([self.name(), 'contents', str(i)])
self.warehouse().getObject(name).markdown += output
|
danielru/moose
|
python/PresentationBuilder/slidesets/RemarkSlideSet.py
|
Python
|
lgpl-2.1
| 10,006
|
[
"MOOSE"
] |
304a116d331c2a54dd782fdcd606d6f1882946800a5f1ec484ae1fb5088a8dd2
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Contains the core classes and functionality that makes Horizon what it is.
This module is considered internal, and should not be relied on directly.
Public APIs are made available through the :mod:`horizon` module and
the classes contained therein.
"""
import collections
import copy
import inspect
import logging
import os
from django.conf import settings # noqa
from django.conf.urls.defaults import include # noqa
from django.conf.urls.defaults import patterns # noqa
from django.conf.urls.defaults import url # noqa
from django.core.exceptions import ImproperlyConfigured # noqa
from django.core.urlresolvers import reverse # noqa
from django.utils.datastructures import SortedDict # noqa
from django.utils.functional import SimpleLazyObject # noqa
from django.utils.importlib import import_module # noqa
from django.utils.module_loading import module_has_submodule # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import conf
from horizon.decorators import _current_component # noqa
from horizon.decorators import require_auth # noqa
from horizon.decorators import require_perms # noqa
from horizon import loaders
LOG = logging.getLogger(__name__)
def _decorate_urlconf(urlpatterns, decorator, *args, **kwargs):
for pattern in urlpatterns:
if getattr(pattern, 'callback', None):
pattern._callback = decorator(pattern.callback, *args, **kwargs)
if getattr(pattern, 'url_patterns', []):
_decorate_urlconf(pattern.url_patterns, decorator, *args, **kwargs)
class NotRegistered(Exception):
pass
class HorizonComponent(object):
def __init__(self):
super(HorizonComponent, self).__init__()
if not self.slug:
raise ImproperlyConfigured('Every %s must have a slug.'
% self.__class__)
def __unicode__(self):
name = getattr(self, 'name', u"Unnamed %s" % self.__class__.__name__)
return unicode(name)
def _get_default_urlpatterns(self):
package_string = '.'.join(self.__module__.split('.')[:-1])
if getattr(self, 'urls', None):
try:
mod = import_module('.%s' % self.urls, package_string)
except ImportError:
mod = import_module(self.urls)
urlpatterns = mod.urlpatterns
else:
# Try importing a urls.py from the dashboard package
if module_has_submodule(import_module(package_string), 'urls'):
urls_mod = import_module('.urls', package_string)
urlpatterns = urls_mod.urlpatterns
else:
urlpatterns = patterns('')
return urlpatterns
class Registry(object):
def __init__(self):
self._registry = {}
if not getattr(self, '_registerable_class', None):
raise ImproperlyConfigured('Subclasses of Registry must set a '
'"_registerable_class" property.')
def _register(self, cls):
"""Registers the given class.
If the specified class is already registered then it is ignored.
"""
if not inspect.isclass(cls):
raise ValueError('Only classes may be registered.')
elif not issubclass(cls, self._registerable_class):
raise ValueError('Only %s classes or subclasses may be registered.'
% self._registerable_class.__name__)
if cls not in self._registry:
cls._registered_with = self
self._registry[cls] = cls()
return self._registry[cls]
def _unregister(self, cls):
"""Unregisters the given class.
If the specified class isn't registered, ``NotRegistered`` will
be raised.
"""
if not issubclass(cls, self._registerable_class):
raise ValueError('Only %s classes or subclasses may be '
'unregistered.' % self._registerable_class)
if cls not in self._registry.keys():
raise NotRegistered('%s is not registered' % cls)
del self._registry[cls]
return True
def _registered(self, cls):
if inspect.isclass(cls) and issubclass(cls, self._registerable_class):
found = self._registry.get(cls, None)
if found:
return found
else:
# Allow for fetching by slugs as well.
for registered in self._registry.values():
if registered.slug == cls:
return registered
class_name = self._registerable_class.__name__
if hasattr(self, "_registered_with"):
parent = self._registered_with._registerable_class.__name__
raise NotRegistered('%(type)s with slug "%(slug)s" is not '
'registered with %(parent)s "%(name)s".'
% {"type": class_name,
"slug": cls,
"parent": parent,
"name": self.slug})
else:
slug = getattr(cls, "slug", cls)
raise NotRegistered('%(type)s with slug "%(slug)s" is not '
'registered.' % {"type": class_name,
"slug": slug})
class Panel(HorizonComponent):
""" A base class for defining Horizon dashboard panels.
All Horizon dashboard panels should extend from this class. It provides
the appropriate hooks for automatically constructing URLconfs, and
providing permission-based access control.
.. attribute:: name
The name of the panel. This will be displayed in the
auto-generated navigation and various other places.
Default: ``''``.
.. attribute:: slug
A unique "short name" for the panel. The slug is used as
a component of the URL path for the panel. Default: ``''``.
.. attribute:: permissions
A list of permission names, all of which a user must possess in order
to access any view associated with this panel. This attribute
is combined cumulatively with any permissions required on the
``Dashboard`` class with which it is registered.
.. attribute:: urls
Path to a URLconf of views for this panel using dotted Python
notation. If no value is specified, a file called ``urls.py``
living in the same package as the ``panel.py`` file is used.
Default: ``None``.
.. attribute:: nav
.. method:: nav(context)
The ``nav`` attribute can be either boolean value or a callable
which accepts a ``RequestContext`` object as a single argument
to control whether or not this panel should appear in
automatically-generated navigation. Default: ``True``.
.. attribute:: index_url_name
The ``name`` argument for the URL pattern which corresponds to
the index view for this ``Panel``. This is the view that
:meth:`.Panel.get_absolute_url` will attempt to reverse.
"""
name = ''
slug = ''
urls = None
nav = True
index_url_name = "index"
def __repr__(self):
return "<Panel: %s>" % self.slug
def get_absolute_url(self):
""" Returns the default URL for this panel.
The default URL is defined as the URL pattern with ``name="index"`` in
the URLconf for this panel.
"""
try:
return reverse('horizon:%s:%s:%s' % (self._registered_with.slug,
self.slug,
self.index_url_name))
except Exception as exc:
# Logging here since this will often be called in a template
# where the exception would be hidden.
LOG.info("Error reversing absolute URL for %s: %s" % (self, exc))
raise
@property
def _decorated_urls(self):
urlpatterns = self._get_default_urlpatterns()
# Apply access controls to all views in the patterns
permissions = getattr(self, 'permissions', [])
_decorate_urlconf(urlpatterns, require_perms, permissions)
_decorate_urlconf(urlpatterns, _current_component, panel=self)
# Return the three arguments to django.conf.urls.defaults.include
return urlpatterns, self.slug, self.slug
class PanelGroup(object):
""" A container for a set of :class:`~horizon.Panel` classes.
When iterated, it will yield each of the ``Panel`` instances it
contains.
.. attribute:: slug
A unique string to identify this panel group. Required.
.. attribute:: name
A user-friendly name which will be used as the group heading in
places such as the navigation. Default: ``None``.
.. attribute:: panels
A list of panel module names which should be contained within this
grouping.
"""
def __init__(self, dashboard, slug=None, name=None, panels=None):
self.dashboard = dashboard
self.slug = slug or getattr(self, "slug", "default")
self.name = name or getattr(self, "name", None)
# Our panels must be mutable so it can be extended by others.
self.panels = list(panels or getattr(self, "panels", []))
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def __unicode__(self):
return self.name
def __iter__(self):
panel_instances = []
for name in self.panels:
try:
panel_instances.append(self.dashboard.get_panel(name))
except NotRegistered as e:
LOG.debug(e)
return iter(panel_instances)
class Dashboard(Registry, HorizonComponent):
""" A base class for defining Horizon dashboards.
All Horizon dashboards should extend from this base class. It provides the
appropriate hooks for automatic discovery of :class:`~horizon.Panel`
modules, automatically constructing URLconfs, and providing
permission-based access control.
.. attribute:: name
The name of the dashboard. This will be displayed in the
auto-generated navigation and various other places.
Default: ``''``.
.. attribute:: slug
A unique "short name" for the dashboard. The slug is used as
a component of the URL path for the dashboard. Default: ``''``.
.. attribute:: panels
The ``panels`` attribute can be either a flat list containing the name
of each panel **module** which should be loaded as part of this
dashboard, or a list of :class:`~horizon.PanelGroup` classes which
define groups of panels as in the following example::
class SystemPanels(horizon.PanelGroup):
slug = "syspanel"
name = _("System Panel")
panels = ('overview', 'instances', ...)
class Syspanel(horizon.Dashboard):
panels = (SystemPanels,)
Automatically generated navigation will use the order of the
modules in this attribute.
Default: ``[]``.
.. warning::
The values for this attribute should not correspond to the
:attr:`~.Panel.name` attributes of the ``Panel`` classes.
They should be the names of the Python modules in which the
``panel.py`` files live. This is used for the automatic
loading and registration of ``Panel`` classes much like
Django's ``ModelAdmin`` machinery.
Panel modules must be listed in ``panels`` in order to be
discovered by the automatic registration mechanism.
.. attribute:: default_panel
The name of the panel which should be treated as the default
panel for the dashboard, i.e. when you visit the root URL
for this dashboard, that's the panel that is displayed.
Default: ``None``.
.. attribute:: permissions
A list of permission names, all of which a user must possess in order
to access any panel registered with this dashboard. This attribute
is combined cumulatively with any permissions required on individual
:class:`~horizon.Panel` classes.
.. attribute:: urls
Optional path to a URLconf of additional views for this dashboard
which are not connected to specific panels. Default: ``None``.
.. attribute:: nav
Optional boolean to control whether or not this dashboard should
appear in automatically-generated navigation. Default: ``True``.
.. attribute:: supports_tenants
Optional boolean that indicates whether or not this dashboard includes
support for projects/tenants. If set to ``True`` this dashboard's
navigation will include a UI element that allows the user to select
project/tenant. Default: ``False``.
.. attribute:: public
Boolean value to determine whether this dashboard can be viewed
without being logged in. Defaults to ``False``.
"""
_registerable_class = Panel
name = ''
slug = ''
urls = None
panels = []
default_panel = None
nav = True
supports_tenants = False
public = False
def __repr__(self):
return "<Dashboard: %s>" % self.slug
def __init__(self, *args, **kwargs):
super(Dashboard, self).__init__(*args, **kwargs)
self._panel_groups = None
def get_panel(self, panel):
"""
Returns the specified :class:`~horizon.Panel` instance registered
with this dashboard.
"""
return self._registered(panel)
def get_panels(self):
"""
Returns the :class:`~horizon.Panel` instances registered with this
dashboard in order, without any panel groupings.
"""
all_panels = []
panel_groups = self.get_panel_groups()
for panel_group in panel_groups.values():
all_panels.extend(panel_group)
return all_panels
def get_panel_group(self, slug):
return self._panel_groups[slug]
def get_panel_groups(self):
registered = copy.copy(self._registry)
panel_groups = []
# Gather our known panels
if self._panel_groups is not None:
for panel_group in self._panel_groups.values():
for panel in panel_group:
registered.pop(panel.__class__)
panel_groups.append((panel_group.slug, panel_group))
# Deal with leftovers (such as add-on registrations)
if len(registered):
slugs = [panel.slug for panel in registered.values()]
new_group = PanelGroup(self,
slug="other",
name=_("Other"),
panels=slugs)
panel_groups.append((new_group.slug, new_group))
return SortedDict(panel_groups)
def get_absolute_url(self):
""" Returns the default URL for this dashboard.
The default URL is defined as the URL pattern with ``name="index"``
in the URLconf for the :class:`~horizon.Panel` specified by
:attr:`~horizon.Dashboard.default_panel`.
"""
try:
return self._registered(self.default_panel).get_absolute_url()
except Exception:
# Logging here since this will often be called in a template
# where the exception would be hidden.
LOG.exception("Error reversing absolute URL for %s." % self)
raise
@property
def _decorated_urls(self):
urlpatterns = self._get_default_urlpatterns()
default_panel = None
# Add in each panel's views except for the default view.
for panel in self._registry.values():
if panel.slug == self.default_panel:
default_panel = panel
continue
urlpatterns += patterns('',
url(r'^%s/' % panel.slug, include(panel._decorated_urls)))
# Now the default view, which should come last
if not default_panel:
raise NotRegistered('The default panel "%s" is not registered.'
% self.default_panel)
urlpatterns += patterns('',
url(r'', include(default_panel._decorated_urls)))
# Require login if not public.
if not self.public:
_decorate_urlconf(urlpatterns, require_auth)
# Apply access controls to all views in the patterns
permissions = getattr(self, 'permissions', [])
_decorate_urlconf(urlpatterns, require_perms, permissions)
_decorate_urlconf(urlpatterns, _current_component, dashboard=self)
# Return the three arguments to django.conf.urls.defaults.include
return urlpatterns, self.slug, self.slug
def _autodiscover(self):
""" Discovers panels to register from the current dashboard module. """
if getattr(self, "_autodiscover_complete", False):
return
panels_to_discover = []
panel_groups = []
# If we have a flat iterable of panel names, wrap it again so
# we have a consistent structure for the next step.
if all([isinstance(i, basestring) for i in self.panels]):
self.panels = [self.panels]
# Now iterate our panel sets.
for panel_set in self.panels:
# Instantiate PanelGroup classes.
if not isinstance(panel_set, collections.Iterable) and \
issubclass(panel_set, PanelGroup):
panel_group = panel_set(self)
# Check for nested tuples, and convert them to PanelGroups
elif not isinstance(panel_set, PanelGroup):
panel_group = PanelGroup(self, panels=panel_set)
# Put our results into their appropriate places
panels_to_discover.extend(panel_group.panels)
panel_groups.append((panel_group.slug, panel_group))
self._panel_groups = SortedDict(panel_groups)
# Do the actual discovery
package = '.'.join(self.__module__.split('.')[:-1])
mod = import_module(package)
for panel in panels_to_discover:
try:
before_import_registry = copy.copy(self._registry)
import_module('.%s.panel' % panel, package)
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, panel):
raise
self._autodiscover_complete = True
@classmethod
def register(cls, panel):
""" Registers a :class:`~horizon.Panel` with this dashboard. """
panel_class = Horizon.register_panel(cls, panel)
# Support template loading from panel template directories.
panel_mod = import_module(panel.__module__)
panel_dir = os.path.dirname(panel_mod.__file__)
template_dir = os.path.join(panel_dir, "templates")
if os.path.exists(template_dir):
key = os.path.join(cls.slug, panel.slug)
loaders.panel_template_dirs[key] = template_dir
return panel_class
@classmethod
def unregister(cls, panel):
""" Unregisters a :class:`~horizon.Panel` from this dashboard. """
success = Horizon.unregister_panel(cls, panel)
if success:
# Remove the panel's template directory.
key = os.path.join(cls.slug, panel.slug)
if key in loaders.panel_template_dirs:
del loaders.panel_template_dirs[key]
return success
class Workflow(object):
def __init__(*args, **kwargs):
raise NotImplementedError()
try:
from django.utils.functional import empty # noqa
except ImportError:
#Django 1.3 fallback
empty = None
class LazyURLPattern(SimpleLazyObject):
def __iter__(self):
if self._wrapped is empty:
self._setup()
return iter(self._wrapped)
def __reversed__(self):
if self._wrapped is empty:
self._setup()
return reversed(self._wrapped)
def __len__(self):
if self._wrapped is empty:
self._setup()
return len(self._wrapped)
def __getitem__(self, idx):
if self._wrapped is empty:
self._setup()
return self._wrapped[idx]
class Site(Registry, HorizonComponent):
""" The overarching class which encompasses all dashboards and panels. """
# Required for registry
_registerable_class = Dashboard
name = "Horizon"
namespace = 'horizon'
slug = 'horizon'
urls = 'horizon.site_urls'
def __repr__(self):
return u"<Site: %s>" % self.slug
@property
def _conf(self):
return conf.HORIZON_CONFIG
@property
def dashboards(self):
return self._conf['dashboards']
@property
def default_dashboard(self):
return self._conf['default_dashboard']
def register(self, dashboard):
""" Registers a :class:`~horizon.Dashboard` with Horizon."""
return self._register(dashboard)
def unregister(self, dashboard):
""" Unregisters a :class:`~horizon.Dashboard` from Horizon. """
return self._unregister(dashboard)
def registered(self, dashboard):
return self._registered(dashboard)
def register_panel(self, dashboard, panel):
dash_instance = self.registered(dashboard)
return dash_instance._register(panel)
def unregister_panel(self, dashboard, panel):
dash_instance = self.registered(dashboard)
if not dash_instance:
raise NotRegistered("The dashboard %s is not registered."
% dashboard)
return dash_instance._unregister(panel)
def get_dashboard(self, dashboard):
""" Returns the specified :class:`~horizon.Dashboard` instance. """
return self._registered(dashboard)
def get_dashboards(self):
""" Returns an ordered tuple of :class:`~horizon.Dashboard` modules.
Orders dashboards according to the ``"dashboards"`` key in
``HORIZON_CONFIG`` or else returns all registered dashboards
in alphabetical order.
Any remaining :class:`~horizon.Dashboard` classes registered with
Horizon but not listed in ``HORIZON_CONFIG['dashboards']``
will be appended to the end of the list alphabetically.
"""
if self.dashboards:
registered = copy.copy(self._registry)
dashboards = []
for item in self.dashboards:
dashboard = self._registered(item)
dashboards.append(dashboard)
registered.pop(dashboard.__class__)
if len(registered):
extra = registered.values()
extra.sort()
dashboards.extend(extra)
return dashboards
else:
dashboards = self._registry.values()
dashboards.sort()
return dashboards
def get_default_dashboard(self):
""" Returns the default :class:`~horizon.Dashboard` instance.
If ``"default_dashboard"`` is specified in ``HORIZON_CONFIG``
then that dashboard will be returned. If not, the first dashboard
returned by :func:`~horizon.get_dashboards` will be returned.
"""
if self.default_dashboard:
return self._registered(self.default_dashboard)
elif len(self._registry):
return self.get_dashboards()[0]
else:
raise NotRegistered("No dashboard modules have been registered.")
def get_user_home(self, user):
""" Returns the default URL for a particular user.
This method can be used to customize where a user is sent when
they log in, etc. By default it returns the value of
:meth:`get_absolute_url`.
An alternative function can be supplied to customize this behavior
by specifying a either a URL or a function which returns a URL via
the ``"user_home"`` key in ``HORIZON_CONFIG``. Each of these
would be valid::
{"user_home": "/home",} # A URL
{"user_home": "my_module.get_user_home",} # Path to a function
{"user_home": lambda user: "/" + user.name,} # A function
{"user_home": None,} # Will always return the default dashboard
This can be useful if the default dashboard may not be accessible
to all users. When user_home is missing from HORIZON_CONFIG,
it will default to the settings.LOGIN_REDIRECT_URL value.
"""
user_home = self._conf['user_home']
if user_home:
if callable(user_home):
return user_home(user)
elif isinstance(user_home, basestring):
# Assume we've got a URL if there's a slash in it
if user_home.find("/") != -1:
return user_home
else:
mod, func = user_home.rsplit(".", 1)
return getattr(import_module(mod), func)(user)
# If it's not callable and not a string, it's wrong.
raise ValueError('The user_home setting must be either a string '
'or a callable object (e.g. a function).')
else:
return self.get_absolute_url()
def get_absolute_url(self):
""" Returns the default URL for Horizon's URLconf.
The default URL is determined by calling
:meth:`~horizon.Dashboard.get_absolute_url`
on the :class:`~horizon.Dashboard` instance returned by
:meth:`~horizon.get_default_dashboard`.
"""
return self.get_default_dashboard().get_absolute_url()
@property
def _lazy_urls(self):
""" Lazy loading for URL patterns.
This method avoids problems associated with attempting to evaluate
the the URLconf before the settings module has been loaded.
"""
def url_patterns():
return self._urls()[0]
return LazyURLPattern(url_patterns), self.namespace, self.slug
def _urls(self):
""" Constructs the URLconf for Horizon from registered Dashboards. """
urlpatterns = self._get_default_urlpatterns()
self._autodiscover()
# Discover each dashboard's panels.
for dash in self._registry.values():
dash._autodiscover()
# Allow for override modules
if self._conf.get("customization_module", None):
customization_module = self._conf["customization_module"]
bits = customization_module.split('.')
mod_name = bits.pop()
package = '.'.join(bits)
mod = import_module(package)
try:
before_import_registry = copy.copy(self._registry)
import_module('%s.%s' % (package, mod_name))
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, mod_name):
raise
# Compile the dynamic urlconf.
for dash in self._registry.values():
urlpatterns += patterns('',
url(r'^%s/' % dash.slug, include(dash._decorated_urls)))
# Return the three arguments to django.conf.urls.defaults.include
return urlpatterns, self.namespace, self.slug
def _autodiscover(self):
""" Discovers modules to register from ``settings.INSTALLED_APPS``.
This makes sure that the appropriate modules get imported to register
themselves with Horizon.
"""
if not getattr(self, '_registerable_class', None):
raise ImproperlyConfigured('You must set a '
'"_registerable_class" property '
'in order to use autodiscovery.')
# Discover both dashboards and panels, in that order
for mod_name in ('dashboard', 'panel'):
for app in settings.INSTALLED_APPS:
mod = import_module(app)
try:
before_import_registry = copy.copy(self._registry)
import_module('%s.%s' % (app, mod_name))
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, mod_name):
raise
class HorizonSite(Site):
"""
A singleton implementation of Site such that all dealings with horizon
get the same instance no matter what. There can be only one.
"""
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Site, cls).__new__(cls, *args, **kwargs)
return cls._instance
# The one true Horizon
Horizon = HorizonSite()
|
neumerance/deploy
|
horizon/base.py
|
Python
|
apache-2.0
| 29,640
|
[
"VisIt"
] |
3a95eb66a173f150958a38979144f69924c15648e752bcb5e63910f47fcee377
|
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
"""Transformation Pipeline"""
calibration_image_src = 'camera_cal/calibration*.jpg'
FONT_SIZE = 200
def calibrate_camera(calibration_image_src = calibration_image_src, ):
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob(calibration_image_src)
# Step through the list and search for chessboard corners
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6),None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp) # image
imgpoints.append(corners) #real world 2D
return {'objpoints': objpoints, 'imgpoints': imgpoints}
def undistort_image(img, pts):
objpoints, imgpoints = pts['objpoints'], pts['imgpoints']
_shape = img.shape if len(img.shape) == 2 else img.shape[::2]
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, _shape,None,None)
dst = cv2.undistort(img, mtx, dist, None, mtx)
return dst
def gaussian_blur(img, kernel_size=7):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def sobel_thresholding(img, kernel_size=5, threshold=(30,255), dim='x'):
"""one dimensional thresholding"""
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
x, y = (1, 0) if dim is 'x' else (0,1)
sobel = cv2.Sobel(gray, cv2.CV_64F, x, y, ksize = kernel_size)
sobel = np.absolute(sobel)
scaled_sobel = np.uint8(255*sobel/np.max(sobel))
_, binary = cv2.threshold(scaled_sobel.astype('uint8'), threshold[0], threshold[1], cv2.THRESH_BINARY)
return binary
def direction_thresholding(img, kernel_size=15, threshold = (0.9, 1.1)):
"""threshold by angle of the gradient"""
# Grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Calculate the x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=kernel_size)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=kernel_size)
# Take the absolute value of the gradient direction,
# apply a threshold, and create a binary image result
absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
_, binary = cv2.threshold(absgraddir.astype('uint8'), threshold[0], threshold[1], cv2.THRESH_BINARY)
return binary
# color channel thresholding
def hls_thresholding(img, channel_ix, threshold=(150,255)):
"""HLS thresholding"""
# channel in HLS
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
channel = hls[:,:,channel_ix]
_, binary = cv2.threshold(channel.astype('uint8'), threshold[0], threshold[1], cv2.THRESH_BINARY)
return binary
# color channel thresholding
def rgb_thresholding(img, channel_ix, threshold=(170,255)):
"""R thresholding"""
# R channel in BGR = cv2.COLOR_BGR2GRAY
channel = img[:,:,channel_ix]
_, binary = cv2.threshold(channel.astype('uint8'), threshold[0], threshold[1], cv2.THRESH_BINARY)
return binary
# # laplacian threshold
def laplacian_thresholding(img, kernel=15):
"""Laplacian thresholding"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
laplacian = cv2.Laplacian(gray,cv2.CV_32F,ksize= kernel)
return (laplacian < 0.15 * np.min(laplacian)).astype(np.uint8)
# gray channel threshold
def gray_thresholding(img, threshold=(130,255)):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, gray_binary = cv2.threshold(gray.astype('uint8'), threshold[0], threshold[1], cv2.THRESH_BINARY)
return gray_binary
def define_vertices(img):
imshape = img.shape
vertices = np.array([[(0,imshape[0]), (imshape[1]/2., 3*imshape[0]/5.), (imshape[1],imshape[0])]], dtype=np.int32)
if vertices.shape[1]:
vertices = [vertices]
return vertices
def region_of_interest(img):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
#defining a blank mask to start with
vertices = define_vertices(img)
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
# additional layer of cropping. Take 70% and lower on the y-axis
crop = range(0, int(7 * img.shape[0] / 10),1)
masked_image[crop,:] = 0
return masked_image
def lane_masking(img, threshold = 25):
img = gaussian_blur(img)
s_binary = hls_thresholding(img, 2)
gray_binary = gray_thresholding(img)
laplacian_binary = laplacian_thresholding(img)
# AND following OR gate
combined_binary = cv2.bitwise_and(laplacian_binary, cv2.bitwise_or(s_binary, gray_binary))
# Region of interest
filtered_binary = region_of_interest(combined_binary)
# If one side is not detected apply x AND y on that side
# This happens after histogram filtering and region of interest
_hist = filtered_binary.sum(axis=0)
middlepoint = filtered_binary.shape[1] // 2
_left_pixel_sum, _right_pixel_sum = _hist[:middlepoint].sum(), _hist[middlepoint:].sum()
if _left_pixel_sum < threshold or _right_pixel_sum < threshold:
print("appending additional binary masking")
_second_pass_binary = post_lane_masking(img)
filtered_second_pass = region_of_interest(_second_pass_binary)
filtered_binary = cv2.bitwise_or(filtered_binary, filtered_second_pass)
return filtered_binary
def post_lane_masking(img):
x_binary = sobel_thresholding(img)
y_binary = sobel_thresholding(img,dim='y')
x_y_binary = cv2.bitwise_and(x_binary, y_binary)
return x_y_binary
def histogram_filter(img, offset = 50):
filtered = img.copy()
_hist = filtered.sum(axis=0)
middlepoint = filtered.shape[1] // 2
left_max_ix, right_max_ix = _hist[:middlepoint].argmax(), _hist[middlepoint:].argmax() + middlepoint
left_range, right_range = (left_max_ix - offset, left_max_ix + offset), (right_max_ix - offset, right_max_ix + offset)
filtered[:,:left_range[0]] = 0
filtered[:,left_range[1]:right_range[0]] = 0
filtered[:,right_range[1]:] = 0
return filtered
def fit_lanes(masked_image):
# determine the mid point along x-axis
image_shape = masked_image.shape
mid_point = image_shape[1]/2
# index
ix = masked_image.nonzero()
left_xs = np.arange(0, mid_point, 1).reshape(-1,1)
right_xs = np.arange(mid_point, image_shape[1], 1).reshape(-1,1)
points = [(x,y) for y,x in zip(ix[0],ix[1])]
# linear regression for left and right space
left_points = np.array(list(filter(lambda x: x[0] < mid_point, points )))
right_points = np.array(list(filter(lambda x: x[0] >= mid_point, points )))
lr_left, lr_right = LinearRegression(), LinearRegression()
lr_right.fit(right_points[:,0].reshape(-1,1), right_points[:,1].reshape(-1,1))
lr_left.fit(left_points[:,0].reshape(-1,1), left_points[:,1].reshape(-1,1))
# prediction for left and right space
left_ys = lr_left.predict(left_xs).reshape(-1,)
right_ys = lr_right.predict(right_xs).reshape(-1,)
left_xs = left_xs.reshape(-1,)
right_xs = right_xs.reshape(-1,)
# Mask Y values
points_left = np.array(list(filter(lambda p: p[1] > image_shape[0]//2 and p[1] < image_shape[0] , zip(left_xs,left_ys))))
points_right = np.array(list(filter(lambda p: p[1] > image_shape[0]//2 and p[1] < image_shape[0], zip(right_xs,right_ys))))
return points_left, points_right
def retrieve_src_points(left, right, shape):
y_cutoff = 65 * shape // 100
left_cutoff_ix = (left[:,1] > y_cutoff).nonzero()[0].max()
right_cutoff_ix = (right[:,1] > y_cutoff).nonzero()[0].min()
p1, p2 = left[left_cutoff_ix,], right[right_cutoff_ix,]
# Retreieve the trapezoid for perspective transformation
# We can use the points for all images
l = {}
l1, l2 = np.array([int(left[:,0].min()), int(left[:,1].max())]), p1
r = {}
r1, r2 = np.array([int(right[:,0].max()), int(right[:,1].max())]), p2
return np.float32([l1, l2, r1, r2])
def setup_transformation_pipeline(offset=10):
"""
Set up the transformation pipeline
Encapsulate the camera distortion and
transformation pipeline that includes warping of the detected lane points
"""
pts = calibrate_camera()
images = glob.glob("test_images/*")
# Pick the image with a straight lane to calibrate the camera
img = cv2.imread(images[0])
# run the same pipeline
dst = undistort_image(img, pts)
masked_img = lane_masking(dst)
_img = region_of_interest(masked_img)
# instead of polynomial fit
# use linear regression to determine the src for perspective transformation
left,right = fit_lanes(_img)
src = retrieve_src_points(left, right, masked_img.shape[0])
dst = np.float32([src[0], (src[0][0], offset), src[2], (src[2][0], offset)])
return TransformationPipeline(pts, src, dst)
class PerspectiveTransformer:
def __init__(self, src, dst):
self.src = src #both src and dst should be mappings that are representative of a straight lane
self.dst = dst
self.M = cv2.getPerspectiveTransform(src, dst)
self.M_inv = cv2.getPerspectiveTransform(dst, src)
def transform(self, img):
return cv2.warpPerspective(img, self.M, (img.shape[1], img.shape[0]), flags=cv2.INTER_LINEAR)
def inverse_transform(self, img):
return cv2.warpPerspective(img, self.M_inv, (img.shape[1], img.shape[0]), flags=cv2.INTER_LINEAR)
class TransformationPipeline():
def __init__(self, camera_calibration, src, dst ):
self.camera_calibration = camera_calibration
self.perspective_transformer = PerspectiveTransformer(src, dst)
def transform(self, img):
_img = self.undistort_image(img)
# depending on the avail of filtered_warped_image apply another round of masking
binary_img = self.lane_masking(_img)
warped_image = self.perspective_transform(binary_img)
filtered_warped_image = self.histogram_filter(warped_image)
return filtered_warped_image
def undistort_image(self, img):
return undistort_image(img, self.camera_calibration)
def lane_masking(self, img):
return lane_masking(img)
def post_lane_masking(self, img, warped):
return post_lane_masking(img, warped)
def region_of_interest(self, img):
# Filters the image for the lower trapezoid
return region_of_interest(img)
def perspective_transform(self, img):
return self.perspective_transformer.transform(img)
def inverse_perspective_transform(self, img):
return self.perspective_transformer.inverse_transform(img)
def histogram_filter(self, img):
return histogram_filter(img)
|
dzorlu/sdc
|
advanced_lane_detection/image_transformation.py
|
Python
|
mit
| 11,694
|
[
"Gaussian"
] |
db28eee97d4588cfed91e06440cfd9ffbb43530599fe9f7dd3f97fb09023b5a4
|
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Script to generate inputs/outputs exclusion lists for GradientTape.
To use this script:
bazel run tensorflow/python/eager:gradient_input_output_exclusions -- \
$PWD/tensorflow/python/eager/pywrap_gradient_exclusions.cc
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import liveness
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
_GENERATED_FILE_HEADER = """/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Inputs/Outputs exclusion lists for GradientTape.
//
// This file is MACHINE GENERATED! Do not edit.
// Generated by: tensorflow/python/eager/gen_gradient_input_output_exclusions.py
"""
_INCLUDES = """
#include "tensorflow/python/eager/pywrap_gradient_exclusions.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
#include "tensorflow/core/lib/gtl/flatset.h"
using tensorflow::string;
"""
_EXCLUDED_OPS = [
# Composite ops with custom gradient functions.
"If",
"StatelessIf",
"While",
"StatelessWhile",
"Case",
# TF Lite. These ops only appear in OSS.
# TODO(srbs): Find a better way to filter these out.
"AudioMicrofrontend",
]
class _SubscriptUseTracker(transformer.Base):
"""Track uses of composite names, excluding certain names when subscripted."""
def __init__(self, ctx, exclude_when_subscripted):
super(_SubscriptUseTracker, self).__init__(ctx)
self.exclude = exclude_when_subscripted
self.reads = set()
self.complex_reads = set()
def visit_Attribute(self, node):
"""Visits attribute nodes in the AST."""
if anno.hasanno(node, anno.Basic.QN):
qn = anno.getanno(node, anno.Basic.QN)
if isinstance(node.ctx, gast.Load):
self.reads.add(qn)
node = self.generic_visit(node)
return node
def visit_Subscript(self, node):
"""Visits nodes with subscript in the AST."""
if anno.hasanno(node, anno.Basic.QN):
qn = anno.getanno(node, anno.Basic.QN)
if isinstance(node.ctx, gast.Load):
self.reads.add(qn)
elif not isinstance(node.slice, gast.Index):
if anno.hasanno(node, anno.Basic.QN):
self.complex_reads.add(anno.getanno(node, anno.Basic.QN))
elif anno.hasanno(node.value, anno.Basic.QN):
self.complex_reads.add(anno.getanno(node.value, anno.Basic.QN))
value_qn = anno.getanno(node.value, anno.Basic.QN, None)
if value_qn in self.exclude:
node.value = self.generic_visit(node.value)
else:
node.value = self.visit(node.value)
node.slice = self.visit(node.slice)
return node
class _FunctionCallsTracker(transformer.Base):
"""Tracks any function calls made with a given first argument name."""
def __init__(self, ctx, first_argument_name):
super(_FunctionCallsTracker, self).__init__(ctx)
self.first_argument_name = first_argument_name
self.calls = set()
def visit_Name(self, node):
node = self.generic_visit(node)
if isinstance(node.ctx, gast.Load) and node.id in self.ctx.info.namespace:
anno.setanno(node, "static_value", self.ctx.info.namespace[node.id])
return node
def visit_Attribute(self, node):
node = self.generic_visit(node)
parent_val = anno.getanno(node.value, "static_value", default=None)
if parent_val is not None:
if hasattr(parent_val, node.attr):
anno.setanno(node, "static_value", getattr(parent_val, node.attr))
return node
def visit_Call(self, node):
node = self.generic_visit(node)
if (node.args and anno.getanno(node.args[0], anno.Basic.QN,
None) == self.first_argument_name):
fn_object = anno.getanno(node.func, "static_value", None)
if fn_object is not None:
self.calls.add(fn_object)
return node
_ALL = object()
def _live_tensors(f, attr_name="inputs"):
"""Returns the indices of the used inputs.
Note: This currently only handles direct index accesses e.g. op.inputs[1].
If the function has slicing or list comprehension on attr_name then returns
_ALL. This ensure that this is correct even if inefficient.
Args:
f: A grad function, taking the op as first argument.
attr_name: op attr to track. "inputs" or "outputs".
Returns:
Either one of:
* set of integers representing individual indices of inputs used
* the value _ALL, if indices are used but cannot be determined which
* empty set, if no inputs are used
"""
node, _ = parser.parse_entity(f, ())
entity_info = transformer.EntityInfo(
source_code=None,
source_file=None,
future_features=(),
namespace=sys.modules[f.__module__].__dict__)
ctx = transformer.Context(entity_info)
graphs = cfg.build(node)
node = qual_names.resolve(node)
node = activity.resolve(node, ctx, None)
node = liveness.resolve(node, ctx, graphs)
op_arg_name = anno.getanno(node.args.args[0], anno.Basic.QN)
op_inputs_outputs_name = qual_names.QN(op_arg_name, attr=attr_name)
special_tracker = _SubscriptUseTracker(ctx, (op_inputs_outputs_name,))
node = special_tracker.visit(node)
live_vars_in = anno.getanno(node.body[0], anno.Static.LIVE_VARS_IN)
inputs_outputs_used_qns = set()
for v in special_tracker.complex_reads:
# Complicated patterns like op.inputs[:3]. Could be smarter about them
# if they matter much.
if v == op_inputs_outputs_name:
return _ALL
for v in live_vars_in:
if v in special_tracker.reads:
if (v.has_subscript() and v.parent == op_inputs_outputs_name):
inputs_outputs_used_qns.add(v)
elif v == op_inputs_outputs_name:
# When op.{attr_name} is used directly, assume all tensors are
# used for now. In that case, no point digging further.
# TODO(mdan): We can descend into tuple expansions.
return _ALL
function_calls_tracker = _FunctionCallsTracker(ctx, op_arg_name)
node = function_calls_tracker.visit(node)
input_output_indices = set()
for called_f in function_calls_tracker.calls:
child_indices = _live_tensors(called_f, attr_name=attr_name)
if child_indices is _ALL:
return _ALL
input_output_indices |= child_indices
for v in inputs_outputs_used_qns:
assert v.has_subscript()
_, subscript = v.qn
if not subscript.is_simple():
# Not a number, assuming it can be anything.
return _ALL
subscript_val, = subscript.qn
if not isinstance(subscript_val, qual_names.NumberLiteral):
# Not a number, assuming it can be anything.
return _ALL
input_output_indices.add(subscript_val.value)
return input_output_indices
def _get_num_inputs_outputs(op_type):
"""Returns (num_inputs, num_outputs).
Args:
op_type: String. The type of the Operation. Used to lookup the op in the
registry.
Returns:
(num_inputs, num_outputs), for either num_inputs or num_outputs if the value
can't be statically inferred from the OpDef alone or of the OpDef lookup
fails, -1 is returned.
"""
def _is_list_arg(arg):
return arg.number_attr or arg.type_list_attr
def _count_args(arg_defs):
for arg in arg_defs:
if _is_list_arg(arg):
# Op has list type args which could be variable.
return -1
return len(arg_defs)
op_def = op_def_registry.get(op_type)
if not op_def:
return -1, -1
return _count_args(op_def.input_arg), _count_args(op_def.output_arg)
def get_entries(attr_name):
"""Returns the dict of entries.
Each entry is of the form {op_name, {true|false, indices}}
true: All values are unused.
false: `indices` are the only unused indices.
Note: ops for which all values are used are not printed.
Args:
attr_name: inputs or outputs.
Returns:
A dict from op_type to formatted entry in the dict.
"""
assert attr_name in ["inputs", "outputs"]
entries = {}
spaces = " "
for op_type in ops._gradient_registry.list(): # pylint: disable=protected-access
if op_type in _EXCLUDED_OPS:
continue
num_values = _get_num_inputs_outputs(op_type)[0 if attr_name ==
"inputs" else 1]
gradient_fn = ops._gradient_registry.lookup(op_type) # pylint: disable=protected-access
if gradient_fn is None:
# NotDifferentiable
if num_values != -1:
entries[op_type] = spaces + "{\"%s\", {true, {}}}," % op_type
continue
used_tensors = _live_tensors(gradient_fn, attr_name=attr_name)
if used_tensors is _ALL:
continue
elif not used_tensors:
entries[op_type] = spaces + "{\"%s\", {true, {}}}," % op_type
else:
all_tensors = set(range(num_values))
unused_tensors = all_tensors - used_tensors
if unused_tensors:
entries[op_type] = spaces + "{\"%s\", {false, {%s}}}," % (
op_type, ", ".join(str(i) for i in sorted(list(unused_tensors))))
return entries
def get_contents():
"""Returns contents for the generated file."""
contents = ""
contents += _GENERATED_FILE_HEADER + _INCLUDES
contents += """
bool OpGradientDoesntRequireInputIndices(
const string& op_name,
std::pair<bool, tensorflow::gtl::FlatSet<int>>** output) {
static tensorflow::gtl::FlatMap<
string, std::pair<bool, tensorflow::gtl::FlatSet<int>>>* m =
new tensorflow::gtl::FlatMap<
string, std::pair<bool, tensorflow::gtl::FlatSet<int>>>({
"""
entries = get_entries("inputs")
contents += "\n".join(entries[op_type] for op_type in sorted(entries))
contents += "\n {\"VarHandleOp\", {true, {}}},\n"
contents += """ });
auto it = m->find(op_name);
if (it == m->end()) return false;
*output = &it->second;
return true;
}
"""
contents += """
bool OpGradientDoesntRequireOutputIndices(
const string& op_name,
std::pair<bool, tensorflow::gtl::FlatSet<int>>** output) {
static tensorflow::gtl::FlatMap<
string, std::pair<bool, tensorflow::gtl::FlatSet<int>>>* m =
new tensorflow::gtl::FlatMap<
string, std::pair<bool, tensorflow::gtl::FlatSet<int>>>({
"""
entries = get_entries("outputs")
contents += "\n".join(entries[op_type] for op_type in sorted(entries))
contents += "\n {\"VarHandleOp\", {true, {}}},\n"
contents += """ });
auto it = m->find(op_name);
if (it == m->end()) return false;
*output = &it->second;
return true;
}
"""
return contents
def main(output_file):
with open(output_file, "w") as fp:
fp.write(get_contents())
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("output", metavar="O", type=str, help="Output file.")
args = arg_parser.parse_args()
main(args.output)
|
jhseu/tensorflow
|
tensorflow/python/eager/gradient_input_output_exclusions.py
|
Python
|
apache-2.0
| 12,367
|
[
"VisIt"
] |
b415910a5c2b54bff20ee008d12e90b235b94dfb97e380fd9e708a231110a52c
|
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El
n0 = 50
n1 = 50
display = False
worldRank = El.mpi.WorldRank()
worldSize = El.mpi.WorldSize()
# Stack two 2D finite-difference matrices on top of each other
# and make the last column dense
def StackedFD2D(N0,N1):
A = El.DistSparseMatrix()
height = 2*N0*N1
width = N0*N1
A.Resize(height,width)
localHeight = A.LocalHeight()
A.Reserve(6*localHeight)
for sLoc in xrange(localHeight):
s = A.GlobalRow(sLoc)
if s < N0*N1:
x0 = s % N0
x1 = s / N0
A.QueueUpdate( s, s, 11 )
if x0 > 0:
A.QueueUpdate( s, s-1, -10 )
if x0+1 < N0:
A.QueueUpdate( s, s+1, 20 )
if x1 > 0:
A.QueueUpdate( s, s-N0, -30 )
if x1+1 < N1:
A.QueueUpdate( s, s+N0, 40 )
else:
sRel = s-N0*N1
x0 = sRel % N0
x1 = sRel / N0
A.QueueUpdate( s, sRel, -20 )
if x0 > 0:
A.QueueUpdate( s, sRel-1, -1 )
if x0+1 < N0:
A.QueueUpdate( s, sRel+1, -2 )
if x1 > 0:
A.QueueUpdate( s, sRel-N0, -3 )
if x1+1 < N1:
A.QueueUpdate( s, sRel+N0, 3 )
# The dense last column
A.QueueUpdate( s, width-1, -10/height );
A.ProcessQueues()
return A
A = StackedFD2D(n0,n1)
b = El.DistMultiVec()
El.Gaussian( b, 2*n0*n1, 1 )
if display:
El.Display( A, "A" )
El.Display( b, "b" )
ctrl = El.LPAffineCtrl_d()
ctrl.mehrotraCtrl.qsdCtrl.progress = True
ctrl.mehrotraCtrl.progress = True
ctrl.mehrotraCtrl.outerEquil = True
ctrl.mehrotraCtrl.time = True
startLAV = El.mpi.Time()
x = El.LAV( A, b, ctrl )
endLAV = El.mpi.Time()
if worldRank == 0:
print "LAV time:", endLAV-startLAV, "seconds"
if display:
El.Display( x, "x" )
bTwoNorm = El.Nrm2( b )
bInfNorm = El.MaxNorm( b )
r = El.DistMultiVec()
El.Copy( b, r )
El.Multiply( El.NORMAL, -1., A, x, 1., r )
if display:
El.Display( r, "r" )
rTwoNorm = El.Nrm2( r )
rOneNorm = El.EntrywiseNorm( r, 1 )
if worldRank == 0:
print "|| b ||_2 =", bTwoNorm
print "|| b ||_oo =", bInfNorm
print "|| A x - b ||_2 =", rTwoNorm
print "|| A x - b ||_1 =", rOneNorm
startLS = El.mpi.Time()
xLS = El.LeastSquares(A,b)
endLS = El.mpi.Time()
if worldRank == 0:
print "LS time:", endLS-startLS, "seconds"
if display:
El.Display( xLS, "x_{LS}" )
rLS = El.DistMultiVec()
El.Copy( b, rLS )
El.Multiply( El.NORMAL, -1., A, xLS, 1., rLS )
if display:
El.Display( rLS, "A x_{LS} - b" )
rLSTwoNorm = El.Nrm2(rLS)
rLSOneNorm = El.EntrywiseNorm(rLS,1)
if worldRank == 0:
print "|| A x_{LS} - b ||_2 =", rLSTwoNorm
print "|| A x_{LS} - b ||_1 =", rLSOneNorm
# Require the user to press a button before the figures are closed
El.Finalize()
if worldSize == 1:
raw_input('Press Enter to exit')
|
birm/Elemental
|
examples/interface/LAV.py
|
Python
|
bsd-3-clause
| 2,954
|
[
"Gaussian"
] |
7c3d6de37955d3338774ce5d23410cf145b1e207a437378b01a2e1b3055bd437
|
#
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
import os, re, time, datetime
from os.path import join, exists
from argparse import ArgumentParser
import xml.dom.minidom
import mx
"""
Context manager for a single gate task that can prevent the
task from executing or time and log its execution.
"""
class Task:
# None or a list of strings. If not None, only tasks whose title
# matches at least one of the substrings in this list will return
# a non-None value from __enter__. The body of a 'with Task(...) as t'
# statement should check 't' and exit immediately if it is None.
filters = None
dryRun = False
startAtFilter = None
filtersExclude = False
def __init__(self, title, tasks=None, disableJacoco=False):
self.tasks = tasks
self.title = title
self.skipped = False
if tasks is not None:
for t in tasks:
if t.title == title:
mx.abort('Gate task with title "' + title + '" is already defined')
if Task.startAtFilter:
assert not Task.filters
if Task.startAtFilter in title:
self.skipped = False
Task.startAtFilter = None
else:
self.skipped = True
elif Task.filters:
if Task.filtersExclude:
self.skipped = any([f in title for f in Task.filters])
else:
self.skipped = not any([f in title for f in Task.filters])
if not self.skipped:
self.start = time.time()
self.end = None
self.duration = None
self.disableJacoco = disableJacoco
mx.log(time.strftime('gate: %d %b %Y %H:%M:%S: BEGIN: ') + title)
def __enter__(self):
assert self.tasks is not None, "using Task with 'with' statement requires to pass the tasks list in the constructor"
if self.skipped:
return None
if self.disableJacoco:
self.jacacoSave = _jacoco
if Task.dryRun:
return None
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self.skipped:
self.tasks.append(self.stop())
if self.disableJacoco:
global _jacoco
_jacoco = self.jacacoSave
@staticmethod
def _human_fmt(num):
for unit in ['', 'K', 'M', 'G']:
if abs(num) < 1024.0:
return "%3.1f%sB" % (num, unit)
num /= 1024.0
return "%.1fTB" % (num)
@staticmethod
def _diskstats():
if hasattr(os, 'statvfs'):
_, f_frsize, f_blocks, _, f_bavail, _, _, _, _, _ = os.statvfs(os.getcwd())
total = f_frsize * f_blocks
free = f_frsize * f_bavail
return ' [disk (free/total): {}/{}]'.format(Task._human_fmt(free), Task._human_fmt(total))
return ''
def stop(self):
self.end = time.time()
self.duration = datetime.timedelta(seconds=self.end - self.start)
mx.log(time.strftime('gate: %d %b %Y %H:%M:%S: END: ') + self.title + ' [' + str(self.duration) + ']' + Task._diskstats())
return self
def abort(self, codeOrMessage):
self.end = time.time()
self.duration = datetime.timedelta(seconds=self.end - self.start)
mx.log(time.strftime('gate: %d %b %Y %H:%M:%S: ABORT: ') + self.title + ' [' + str(self.duration) + ']' + Task._diskstats())
mx.abort(codeOrMessage)
return self
_gate_runners = []
_extra_gate_arguments = []
def add_gate_argument(*args, **kwargs):
"""
Adds an argument declaration to the ArgumentParser used by the gate method.
"""
_extra_gate_arguments.append((args, kwargs))
def add_gate_runner(suite, runner):
"""
Adds a gate runner function for a given suite to be called by the gate once common gate tasks
have been executed. The 'runner' function is called with these arguments:
args: the argparse.Namespace object containing result of parsing gate command line
tasks: list of Task to which extra Tasks should be added
"""
suiteRunner = (suite, runner)
_gate_runners.append(suiteRunner)
def add_omit_clean_args(parser):
parser.add_argument('-j', '--omit-java-clean', action='store_false', dest='cleanJava', help='omit cleaning Java native code')
parser.add_argument('-n', '--omit-native-clean', action='store_false', dest='cleanNative', help='omit cleaning and building native code')
parser.add_argument('-e', '--omit-ide-clean', action='store_false', dest='cleanIDE', help='omit ideclean/ideinit')
parser.add_argument('-d', '--omit-dist-clean', action='store_false', dest='cleanDist', help='omit cleaning distributions')
parser.add_argument('-o', '--omit-clean', action='store_true', dest='noClean', help='equivalent to -j -n -e')
def gate_clean(cleanArgs, tasks, name='Clean'):
with Task(name, tasks) as t:
if t:
mx.command_function('clean')(cleanArgs)
def check_gate_noclean_arg(args):
'''
Checks the -o option (noClean) and sets the sub-options in args appropriately
and returns the relevant args for the clean command (N.B. IDE currently ignored).
'''
if args.noClean:
args.cleanIDE = False
args.cleanJava = False
args.cleanNative = False
args.cleanDist = False
cleanArgs = []
if not args.cleanNative:
cleanArgs.append('--no-native')
if not args.cleanJava:
cleanArgs.append('--no-java')
if not args.cleanDist:
cleanArgs.append('--no-dist')
return cleanArgs
def _warn_or_abort(msg, strict_mode):
reporter = mx.abort if strict_mode else mx.warn
reporter(msg)
def gate(args):
"""run the tests used to validate a push
If this command exits with a 0 exit code, then the gate passed."""
parser = ArgumentParser(prog='mx gate')
add_omit_clean_args(parser)
parser.add_argument('--all-suites', action='store_true', help='run gate tasks for all suites, not just the primary suite')
parser.add_argument('--dry-run', action='store_true', help='just show the tasks that will be run without running them')
parser.add_argument('-x', action='store_true', help='makes --task-filter an exclusion instead of inclusion filter')
parser.add_argument('--jacocout', help='specify the output directory for jacoco report')
parser.add_argument('--strict-mode', action='store_true', help='abort if a task cannot be executed due to missing tool configuration')
filtering = parser.add_mutually_exclusive_group()
filtering.add_argument('-t', '--task-filter', help='comma separated list of substrings to select subset of tasks to be run')
filtering.add_argument('-s', '--start-at', help='substring to select starting task')
for a, k in _extra_gate_arguments:
parser.add_argument(*a, **k)
args = parser.parse_args(args)
cleanArgs = check_gate_noclean_arg(args)
global _jacoco
if args.dry_run:
Task.dryRun = True
if args.start_at:
Task.startAtFilter = args.start_at
elif args.task_filter:
Task.filters = args.task_filter.split(',')
Task.filtersExclude = args.x
elif args.x:
mx.abort('-x option cannot be used without --task-filter option')
tasks = []
total = Task('Gate')
try:
with Task('Versions', tasks) as t:
if t:
mx.command_function('version')(['--oneline'])
mx.command_function('sversions')([])
with Task('JDKReleaseInfo', tasks) as t:
if t:
jdkDirs = os.pathsep.join([mx.get_env('JAVA_HOME', ''), mx.get_env('EXTRA_JAVA_HOMES', '')])
for jdkDir in jdkDirs.split(os.pathsep):
release = join(jdkDir, 'release')
if exists(release):
mx.log('==== ' + jdkDir + ' ====')
with open(release) as fp:
mx.log(fp.read().strip())
with Task('Pylint', tasks) as t:
if t:
if mx.command_function('pylint')([]) != 0:
_warn_or_abort('Pylint not configured correctly. Cannot execute Pylint task.', args.strict_mode)
gate_clean(cleanArgs, tasks)
with Task('Distribution Overlap Check', tasks) as t:
if t:
if mx.command_function('checkoverlap')([]) != 0:
t.abort('Found overlapping distributions.')
with Task('Canonicalization Check', tasks) as t:
if t:
mx.log(time.strftime('%d %b %Y %H:%M:%S - Ensuring mx/projects files are canonicalized...'))
if mx.command_function('canonicalizeprojects')([]) != 0:
t.abort('Rerun "mx canonicalizeprojects" and check-in the modified mx/suite*.py files.')
if mx.get_env('JDT'):
with Task('BuildJavaWithEcj', tasks) as t:
if t: mx.command_function('build')(['-p', '--no-native', '--warning-as-error'])
gate_clean(cleanArgs, tasks, name='CleanAfterEcjBuild')
else:
_warn_or_abort('JDT environment variable not set. Cannot execute BuildJavaWithEcj task.', args.strict_mode)
with Task('BuildJavaWithJavac', tasks) as t:
if t: mx.command_function('build')(['-p', '--warning-as-error', '--no-native', '--force-javac'])
with Task('IDEConfigCheck', tasks) as t:
if t:
if args.cleanIDE:
mx.command_function('ideclean')([])
mx.command_function('ideinit')([])
eclipse_exe = mx.get_env('ECLIPSE_EXE')
if eclipse_exe is not None:
with Task('CodeFormatCheck', tasks) as t:
if t and mx.command_function('eclipseformat')(['-e', eclipse_exe]) != 0:
t.abort('Formatter modified files - run "mx eclipseformat", check in changes and repush')
else:
_warn_or_abort('ECLIPSE_EXE environment variable not set. Cannot execute CodeFormatCheck task.', args.strict_mode)
with Task('Checkstyle', tasks) as t:
if t and mx.command_function('checkstyle')([]) != 0:
t.abort('Checkstyle warnings were found')
with Task('Checkheaders', tasks) as t:
if t and mx.command_function('checkheaders')([]) != 0:
t.abort('Checkheaders warnings were found')
with Task('FindBugs', tasks) as t:
if t and mx.command_function('findbugs')([]) != 0:
t.abort('FindBugs warnings were found')
if exists('jacoco.exec'):
os.unlink('jacoco.exec')
if args.jacocout is not None:
_jacoco = 'append'
else:
_jacoco = 'off'
for suiteRunner in _gate_runners:
suite, runner = suiteRunner
if args.all_suites or suite is mx.primary_suite():
runner(args, tasks)
if args.jacocout is not None:
mx.command_function('jacocoreport')([args.jacocout])
_jacoco = 'off'
except KeyboardInterrupt:
total.abort(1)
except BaseException as e:
import traceback
traceback.print_exc()
total.abort(str(e))
total.stop()
mx.log('Gate task times:')
for t in tasks:
mx.log(' ' + str(t.duration) + '\t' + t.title)
mx.log(' =======')
mx.log(' ' + str(total.duration))
if args.task_filter:
Task.filters = None
def checkheaders(args):
"""check Java source headers against any required pattern"""
failures = {}
for p in mx.projects():
if not p.isJavaProject():
continue
csConfig = join(mx.project(p.checkstyleProj).dir, '.checkstyle_checks.xml')
if not exists(csConfig):
mx.log('Cannot check headers for ' + p.name + ' - ' + csConfig + ' does not exist')
continue
dom = xml.dom.minidom.parse(csConfig)
for module in dom.getElementsByTagName('module'):
if module.getAttribute('name') == 'RegexpHeader':
for prop in module.getElementsByTagName('property'):
if prop.getAttribute('name') == 'header':
value = prop.getAttribute('value')
matcher = re.compile(value, re.MULTILINE)
for sourceDir in p.source_dirs():
for root, _, files in os.walk(sourceDir):
for name in files:
if name.endswith('.java') and name != 'package-info.java':
f = join(root, name)
with open(f) as fp:
content = fp.read()
if not matcher.match(content):
failures[f] = csConfig
for n, v in failures.iteritems():
mx.log('{0}: header does not match RegexpHeader defined in {1}'.format(n, v))
return len(failures)
_jacoco = 'off'
_jacoco_includes = []
def add_jacoco_includes(patterns):
"""
Adds to the list of JaCoCo includes.
"""
_jacoco_includes.extend(patterns)
_jacoco_excluded_annotations = ['@Test']
def add_jacoco_excluded_annotations(annotations):
"""
Adds to the list of annotations which if present denote a class that should
be excluded from JaCoCo analysis.
"""
_jacoco_excluded_annotations.extend(annotations)
def get_jacoco_agent_args():
'''
Gets the args to be added to a VM command line for injecting the JaCoCo agent
if use of JaCoCo has been requested otherwise returns None.
'''
if _jacoco == 'on' or _jacoco == 'append':
jacocoagent = mx.library("JACOCOAGENT", True)
includes = list(_jacoco_includes)
baseExcludes = []
for p in mx.projects():
projsetting = getattr(p, 'jacoco', '')
if projsetting == 'exclude':
baseExcludes.append(p.name)
if projsetting == 'include':
includes.append(p.name + '.*')
def _filter(l):
# filter out specific classes which are already covered by a baseExclude package
return [clazz for clazz in l if not any([clazz.startswith(package) for package in baseExcludes])]
excludes = []
for p in mx.projects():
if p.isJavaProject():
excludes += _filter(p.find_classes_with_annotations(None, _jacoco_excluded_annotations, includeInnerClasses=True).keys())
excludes += _filter(p.find_classes_with_matching_source_line(None, lambda line: 'JaCoCo Exclude' in line, includeInnerClasses=True).keys())
excludes += [package + '.*' for package in baseExcludes]
agentOptions = {
'append' : 'true' if _jacoco == 'append' else 'false',
'bootclasspath' : 'true',
'includes' : ':'.join(includes),
'excludes' : ':'.join(excludes),
'destfile' : 'jacoco.exec'
}
return ['-javaagent:' + jacocoagent.get_path(True) + '=' + ','.join([k + '=' + v for k, v in agentOptions.items()])]
return None
def jacocoreport(args):
"""create a JaCoCo coverage report
Creates the report from the 'jacoco.exec' file in the current directory.
Default output directory is 'coverage', but an alternative can be provided as an argument."""
jacocoreport = mx.library("JACOCOREPORT", True)
out = 'coverage'
if len(args) == 1:
out = args[0]
elif len(args) > 1:
mx.abort('jacocoreport takes only one argument : an output directory')
includes = list(_jacoco_includes)
for p in mx.projects():
projsetting = getattr(p, 'jacoco', '')
if projsetting == 'include' or projsetting == '':
includes.append(p.name)
includedirs = set()
for p in mx.projects():
projsetting = getattr(p, 'jacoco', '')
if projsetting == 'exclude':
continue
for include in includes:
if include in p.dir:
includedirs.add(p.dir)
for i in includedirs:
bindir = i + '/bin'
mx.ensure_dir_exists(bindir)
mx.run_java(['-jar', jacocoreport.get_path(True), '--in', 'jacoco.exec', '--out', out] + sorted(includedirs))
|
smarr/mxtool
|
mx_gate.py
|
Python
|
gpl-2.0
| 17,717
|
[
"VisIt"
] |
885c1779d18f87f59c7b9c962b99bda9865b2420fb67d213dff338192dafb129
|
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
"""Convenience functions and classes to present views to the user"""
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtCore import Qt
import logging
logger = logging.getLogger('camelot.view.workspace')
from camelot.admin.action import ApplicationActionGuiContext
from camelot.core.utils import ugettext as _
from camelot.view.model_thread import object_thread, post
from camelot.view.controls.action_widget import ( ActionLabel,
HOVER_ANIMATION_DISTANCE,
NOTIFICATION_ANIMATION_DISTANCE )
from camelot.view.art import Icon
class DesktopBackground(QtGui.QWidget):
"""
A custom background widget for the desktop. This widget is contained
by the first tab ('Start' tab) of the desktop workspace.
"""
def __init__( self, gui_context, parent ):
super(DesktopBackground, self).__init__( parent )
self.gui_context = gui_context
mainLayout = QtGui.QVBoxLayout()
actionButtonsLayout = QtGui.QGridLayout()
actionButtonsLayout.setObjectName('actionButtonsLayout')
actionButtonsLayout.setContentsMargins(200, 20, 200, 20)
actionButtonInfoWidget = ActionButtonInfoWidget()
actionButtonInfoWidget.setObjectName('actionButtonInfoWidget')
mainLayout.addWidget(actionButtonInfoWidget, 0, Qt.AlignCenter)
mainLayout.addLayout(actionButtonsLayout)
self.setLayout(mainLayout)
# Set a white background color
palette = self.palette()
self.setAutoFillBackground(True)
palette.setBrush(QtGui.QPalette.Window, Qt.white)
self.setPalette(palette)
# This method is invoked when the desktop workspace decides or gets told
# that the actions should be updated due to the presence of to be added
# actions.
@QtCore.pyqtSlot(object)
def set_actions(self, actions):
"""
:param actions: a list of EntityActions
"""
#
# Remove old actions
#
for actionButton in self.findChildren(ActionLabel):
actionButton.deleteLater()
# Make sure that the action buttons aren't visually split
# up in two rows when there are e.g. only 3 of them.
# So:
# <= 3 action buttons: 1 row and 1, 2 or 3 columns;
# >= 4 action buttons: 2 rows and 2, 3, 4 or 5 columns.
actionButtonsLayoutMaxItemsPerRowCount = max((len(actions) + 1) / 2, 3)
actionButtonsLayout = self.findChild(QtGui.QGridLayout, 'actionButtonsLayout')
if actionButtonsLayout is not None:
for position in xrange(0, min( len(actions), actionButtonsLayoutMaxItemsPerRowCount) ):
action = actions[position]
actionButton = action.render( self.gui_context, self )
actionButton.entered.connect(self.onActionButtonEntered)
actionButton.left.connect(self.onActionButtonLeft)
actionButton.setInteractive(True)
actionButtonsLayout.addWidget(ActionButtonContainer(actionButton), 0, position, Qt.AlignCenter)
for position in xrange(actionButtonsLayoutMaxItemsPerRowCount, len(actions)):
action = actions[position]
actionButton = action.render( self.gui_context, self )
actionButton.entered.connect(self.onActionButtonEntered)
actionButton.left.connect(self.onActionButtonLeft)
actionButton.setInteractive(True)
actionButtonsLayout.addWidget(ActionButtonContainer(actionButton), 1, position % actionButtonsLayoutMaxItemsPerRowCount, Qt.AlignCenter)
@QtCore.pyqtSlot()
def onActionButtonEntered(self):
actionButton = self.sender()
actionButtonInfoWidget = self.findChild(QtGui.QWidget, 'actionButtonInfoWidget')
if actionButtonInfoWidget is not None:
# @todo : get state should be called with a model context as first
# argument
post( actionButton.action.get_state,
actionButtonInfoWidget.setInfoFromState,
args = (None,) )
@QtCore.pyqtSlot()
def onActionButtonLeft(self):
actionButtonInfoWidget = self.findChild(QtGui.QWidget, 'actionButtonInfoWidget')
if actionButtonInfoWidget is not None:
actionButtonInfoWidget.resetInfo()
# This custom event handler makes sure that the action buttons aren't
# drawn in the wrong position on this widget after the screen has been
# e.g. maximized or resized by using the window handles.
def resizeEvent(self, event):
for actionButton in self.findChildren(ActionLabel):
actionButton.resetLayout()
event.ignore()
# This slot is called after the navpane's animation has finished. During
# this sliding animation, all action buttons are linearly moved to the right,
# giving the user a small window in which he or she may cause visual problems
# by already hovering the action buttons. This switch assures that the user
# cannot perform mouse interaction with the action buttons until they're
# static.
@QtCore.pyqtSlot()
def makeInteractive(self, interactive=True):
for actionButton in self.findChildren(ActionLabel):
actionButton.setInteractive(interactive)
def refresh(self):
pass
class ActionButtonContainer(QtGui.QWidget):
def __init__(self, actionButton, parent = None):
super(ActionButtonContainer, self).__init__(parent)
mainLayout = QtGui.QHBoxLayout()
# Set some margins to avoid the ActionButton being visually clipped
# when performing the hoverAnimation.
mainLayout.setContentsMargins(2*NOTIFICATION_ANIMATION_DISTANCE,
2*HOVER_ANIMATION_DISTANCE,
2*NOTIFICATION_ANIMATION_DISTANCE,
2*HOVER_ANIMATION_DISTANCE)
mainLayout.addWidget(actionButton)
self.setLayout(mainLayout)
def mousePressEvent(self, event):
# Send this event to the ActionButton that is contained by this widget.
self.layout().itemAt(0).widget().onContainerMousePressEvent(event)
class ActionButtonInfoWidget(QtGui.QWidget):
def __init__(self, parent = None):
super(ActionButtonInfoWidget, self).__init__(parent)
mainLayout = QtGui.QHBoxLayout()
font = self.font()
font.setPointSize(14)
actionNameLabel = QtGui.QLabel()
actionNameLabel.setFont(font)
actionNameLabel.setFixedSize(250, 50)
actionNameLabel.setAlignment(Qt.AlignCenter)
actionNameLabel.setObjectName('actionNameLabel')
actionDescriptionLabel = QtGui.QLabel()
actionDescriptionLabel.setFixedSize(250, 200)
actionDescriptionLabel.setObjectName('actionDescriptionLabel')
mainLayout.addWidget(actionNameLabel, 0, Qt.AlignVCenter)
mainLayout.addWidget(actionDescriptionLabel)
self.setLayout(mainLayout)
@QtCore.pyqtSlot( object )
def setInfoFromState(self, state):
actionNameLabel = self.findChild(QtGui.QLabel, 'actionNameLabel')
if actionNameLabel is not None:
actionNameLabel.setText( unicode( state.verbose_name ) )
actionDescriptionLabel = self.findChild(QtGui.QLabel, 'actionDescriptionLabel')
if actionDescriptionLabel is not None:
tooltip = unicode( state.tooltip or '' )
actionDescriptionLabel.setText(tooltip)
if tooltip:
# Do not use show() or hide() in this case, since it will
# cause the actionButtons to be drawn on the wrong position.
# Instead, just set the width of the widget to either 0 or 250.
actionDescriptionLabel.setFixedWidth(250)
else:
actionDescriptionLabel.setFixedWidth(0)
def resetInfo(self):
actionNameLabel = self.findChild(QtGui.QLabel, 'actionNameLabel')
if actionNameLabel is not None:
actionNameLabel.setText('')
actionDescriptionLabel = self.findChild(QtGui.QLabel, 'actionDescriptionLabel')
if actionDescriptionLabel is not None:
actionDescriptionLabel.setText('')
class DesktopTabbar(QtGui.QTabBar):
change_view_mode_signal = QtCore.pyqtSignal()
def mouseDoubleClickEvent(self, event):
self.change_view_mode_signal.emit()
event.accept()
def tabSizeHint(self, index):
originalSizeHint = super(DesktopTabbar, self).tabSizeHint(index)
minimumWidth = max(160, originalSizeHint.width())
return QtCore.QSize(minimumWidth, originalSizeHint.height())
class DesktopWorkspace(QtGui.QWidget):
"""
A tab based workspace that can be used by views to display themselves.
In essence this is a wrapper around QTabWidget to do some initial setup
and provide it with a background widget.
This was originallly implemented using the QMdiArea, but the QMdiArea has
too many drawbacks, like not being able to add close buttons to the tabs
in a decent way.
.. attribute:: background
The widget class to be used as the view for the uncloseable 'Start' tab.
"""
view_activated_signal = QtCore.pyqtSignal(QtGui.QWidget)
change_view_mode_signal = QtCore.pyqtSignal()
last_view_closed_signal = QtCore.pyqtSignal()
def __init__(self, app_admin, parent):
super(DesktopWorkspace, self).__init__(parent)
self.gui_context = ApplicationActionGuiContext()
self.gui_context.admin = app_admin
self.gui_context.workspace = self
self._app_admin = app_admin
layout = QtGui.QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
# Setup the tab widget
self._tab_widget = QtGui.QTabWidget( self )
tab_bar = DesktopTabbar(self._tab_widget)
tab_bar.setToolTip(_('Double click to (un)maximize'))
tab_bar.change_view_mode_signal.connect(self._change_view_mode)
self._tab_widget.setTabBar(tab_bar)
self._tab_widget.setDocumentMode(True)
self._tab_widget.setTabsClosable(True)
self._tab_widget.tabCloseRequested.connect(self._tab_close_request)
self._tab_widget.currentChanged.connect(self._tab_changed)
layout.addWidget(self._tab_widget)
# Setup the background widget
self._background_widget = DesktopBackground( self.gui_context, self )
self._app_admin.actions_changed_signal.connect(self.reload_background_widget)
self._tab_widget.addTab(self._background_widget,
Icon('tango/16x16/actions/go-home.png').getQIcon(),
_('Home'))
if tab_bar.tabButton(0, QtGui.QTabBar.RightSide):
tab_bar.tabButton(0, QtGui.QTabBar.RightSide).hide()
elif tab_bar.tabButton(0, QtGui.QTabBar.LeftSide):
# mac for example has the close button on the left side by default
tab_bar.tabButton(0, QtGui.QTabBar.LeftSide).hide()
self.setLayout(layout)
self.reload_background_widget()
@QtCore.pyqtSlot()
def reload_background_widget(self):
post(self._app_admin.get_actions, self._background_widget.set_actions)
@QtCore.pyqtSlot()
def _change_view_mode(self):
self.change_view_mode_signal.emit()
@QtCore.pyqtSlot(int)
def _tab_close_request(self, index):
"""
Handle the request for the removal of a tab at index.
Note that only at-runtime added tabs are being closed, implying
the immortality of the 'Start' tab.
"""
if index > 0:
view = self._tab_widget.widget(index)
if view:
# it's not enough to simply remove the tab, because this
# would keep the underlying view widget alive
view.deleteLater()
self._tab_widget.removeTab(index)
@QtCore.pyqtSlot(int)
def _tab_changed(self, _index):
"""
The active tab has changed, emit the view_activated signal.
"""
self.view_activated_signal.emit(self.active_view())
def active_view(self):
"""
:return: The currently active view or None in case of the 'Start' tab.
"""
i = self._tab_widget.currentIndex()
if i == 0: # 'Start' tab
return None
return self._tab_widget.widget(i)
@QtCore.pyqtSlot(QtCore.QString)
def change_title(self, new_title):
"""
Slot to be called when the tile of a view needs to change.
Note: the title of the 'Start' tab cannot be overwritten.
"""
# the request of the sender does not work in older pyqt versions
# therefore, take the current index, notice this is not correct !!
#
# sender = self.sender()
sender = self.active_view()
if sender:
index = self._tab_widget.indexOf(sender)
if index > 0:
self._tab_widget.setTabText(index, new_title)
@QtCore.pyqtSlot(QtGui.QIcon)
def change_icon(self, new_icon):
"""
Slot to be called when the icon of a view needs to change.
Note: the icon of the 'Start' tab cannot be overwritten.
"""
# the request of the sender does not work in older pyqt versions
# therefore, take the current index, notice this is not correct !!
#
# sender = self.sender()
sender = self.active_view()
if sender:
index = self._tab_widget.indexOf(sender)
if index > 0:
self._tab_widget.setTabIcon(index, new_icon)
def set_view(self, view, icon = None, title = '...'):
"""
Remove the currently active view and replace it with a new view.
"""
index = self._tab_widget.currentIndex()
if index == 0: # 'Start' tab is currently visible.
self.add_view(view, icon, title)
else:
self._tab_close_request(index)
view.title_changed_signal.connect(self.change_title)
view.icon_changed_signal.connect(self.change_icon)
if icon:
index = self._tab_widget.insertTab(index, view, icon, title)
else:
index = self._tab_widget.insertTab(index, view, title)
self._tab_widget.setCurrentIndex(index)
def add_view(self, view, icon = None, title = '...'):
"""
Add a Widget implementing AbstractView to the workspace.
"""
assert object_thread( self )
view.title_changed_signal.connect(self.change_title)
view.icon_changed_signal.connect(self.change_icon)
if icon:
index = self._tab_widget.addTab(view, icon, title)
else:
index = self._tab_widget.addTab(view, title)
self._tab_widget.setCurrentIndex(index)
def refresh(self):
"""Refresh all views on the desktop"""
for i in range( self._tab_widget.count() ):
self._tab_widget.widget(i).refresh()
def close_all_views(self):
"""
Remove all views, except the 'Start' tab, from the workspace.
"""
# NOTE: will call removeTab until tab widget is cleared
# but removeTab does not really delete the page objects
#self._tab_widget.clear()
max_index = self._tab_widget.count()
while max_index > 0:
self._tab_widget.tabCloseRequested.emit(max_index)
max_index -= 1
top_level_windows = []
def show_top_level(view, parent):
"""Show a widget as a top level window
:param view: the widget extend AbstractView
:param parent: the widget with regard to which the top level
window will be placed.
"""
from camelot.view.register import register
#
# Register the view with reference to itself. This will keep
# the Python object alive as long as the Qt object is not
# destroyed. Hence Python will not trigger the deletion of the
# view as long as the window is not closed
#
register( view, view )
#
# set the parent to None to avoid the window being destructed
# once the parent gets destructed
#
view.setParent( None )
view.setWindowFlags(QtCore.Qt.Window)
#
# Make the window title blank to prevent the something
# like main.py or pythonw being displayed
#
view.setWindowTitle( u'' )
view.title_changed_signal.connect( view.setWindowTitle )
view.icon_changed_signal.connect( view.setWindowIcon )
view.setAttribute(QtCore.Qt.WA_DeleteOnClose)
#
# position the new window in the center of the same screen
# as the parent
#
screen = QtGui.QApplication.desktop().screenNumber(parent)
available = QtGui.QApplication.desktop().availableGeometry(screen)
point = QtCore.QPoint(available.x() + available.width()/2,
available.y() + available.height()/2)
point = QtCore.QPoint(point.x()-view.width()/2,
point.y()-view.height()/2)
view.move( point )
#view.setWindowModality(QtCore.Qt.WindowModal)
view.show()
|
jeroendierckx/Camelot
|
camelot/view/workspace.py
|
Python
|
gpl-2.0
| 18,783
|
[
"VisIt"
] |
28a8b804ff34b3c1eb99941018c5d66b32b729879ee466685827885e9edcc50c
|
"""Installers for programming language specific libraries.
"""
import os
from fabric.api import env, cd, settings
from cloudbio import fabutils
from cloudbio.custom import shared
def r_library_installer(config):
"""Install R libraries using CRAN and Bioconductor.
"""
with shared._make_tmp_dir() as tmp_dir:
with cd(tmp_dir):
# Create an Rscript file with install details.
out_file = os.path.join(tmp_dir, "install_packages.R")
_make_install_script(out_file, config)
# run the script and then get rid of it
rscript = fabutils.find_cmd(env, "Rscript", "--version")
if rscript:
env.safe_run("%s %s" % (rscript, out_file))
else:
env.logger.warn("Rscript not found; skipping install of R libraries.")
env.safe_run("rm -f %s" % out_file)
def _make_install_script(out_file, config):
if env.safe_exists(out_file):
env.safe_run("rm -f %s" % out_file)
env.safe_run("touch %s" % out_file)
lib_loc = os.path.join(env.system_install, "lib", "R", "site-library")
env.safe_sudo("mkdir -p %s" % lib_loc)
with settings(warn_only=True):
env.safe_sudo("chown -R %s %s" % (env.user, lib_loc))
repo_info = """
.libPaths(c("%s"))
library(methods)
cran.repos <- getOption("repos")
cran.repos["CRAN" ] <- "%s"
options(repos=cran.repos)
source("%s")
""" % (lib_loc, config["cranrepo"], config["biocrepo"])
env.safe_append(out_file, repo_info)
install_fn = """
repo.installer <- function(repos, install.fn, pkg_name_fn) {
%s
maybe.install <- function(pname) {
check_name <- ifelse(is.null(pkg_name_fn), pname, pkg_name_fn(pname))
if (!(is.element(check_name, installed.packages()[,1])))
install.fn(pname)
}
}
"""
if config.get("update_packages", True):
update_str = """
update.packages(lib.loc="%s", repos=repos, ask=FALSE)
""" % lib_loc
else:
update_str = "\n"
env.safe_append(out_file, install_fn % update_str)
std_install = """
std.pkgs <- c(%s)
std.installer = repo.installer(cran.repos, install.packages, NULL)
lapply(std.pkgs, std.installer)
""" % (", ".join('"%s"' % p for p in config['cran']))
env.safe_append(out_file, std_install)
if len(config.get("bioc", [])) > 0:
bioc_install = """
bioc.pkgs <- c(%s)
bioc.installer = repo.installer(biocinstallRepos(), biocLite, NULL)
lapply(bioc.pkgs, bioc.installer)
""" % (", ".join('"%s"' % p for p in config['bioc']))
env.safe_append(out_file, bioc_install)
if config.get("cran-after-bioc"):
std2_install = """
std2.pkgs <- c(%s)
lapply(std2.pkgs, std.installer)
""" % (", ".join('"%s"' % p for p in config['cran-after-bioc']))
env.safe_append(out_file, std2_install)
if config.get("github"):
dev_install = """
library(devtools)
github.pkgs <- c(%s)
get_pkg_name <- function(orig) {
unlist(strsplit(unlist(strsplit(orig, "/"))[2], "@"))[1]
}
github_installer = repo.installer(NULL, install_github, get_pkg_name)
lapply(github.pkgs, github_installer)
""" % (", ".join('"%s"' % p for p in config['github']))
env.safe_append(out_file, dev_install)
|
heuermh/cloudbiolinux
|
cloudbio/libraries.py
|
Python
|
mit
| 3,405
|
[
"Bioconductor"
] |
d7aa0eb6652e7e6f5af97e1753bef49728b04ebbdb810d7596016a900d0031be
|
# (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.fileformats.netcdf._load_aux_factory` function."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
import warnings
from iris.coords import DimCoord
from iris.cube import Cube
from iris.fileformats.netcdf import _load_aux_factory
from iris.tests import mock
class TestAtmosphereHybridSigmaPressureCoordinate(tests.IrisTest):
def setUp(self):
standard_name = 'atmosphere_hybrid_sigma_pressure_coordinate'
self.requires = dict(formula_type=standard_name)
coordinates = [(mock.sentinel.b, 'b'), (mock.sentinel.ps, 'ps')]
self.provides = dict(coordinates=coordinates)
self.engine = mock.Mock(requires=self.requires, provides=self.provides)
self.cube = mock.create_autospec(Cube, spec_set=True, instance=True)
# Patch out the check_dependencies functionality.
func = 'iris.aux_factory.HybridPressureFactory._check_dependencies'
patcher = mock.patch(func)
patcher.start()
self.addCleanup(patcher.stop)
def test_formula_terms_ap(self):
self.provides['coordinates'].append((mock.sentinel.ap, 'ap'))
self.requires['formula_terms'] = dict(ap='ap', b='b', ps='ps')
_load_aux_factory(self.engine, self.cube)
# Check cube.add_aux_coord method.
self.assertEqual(self.cube.add_aux_coord.call_count, 0)
# Check cube.add_aux_factory method.
self.assertEqual(self.cube.add_aux_factory.call_count, 1)
args, _ = self.cube.add_aux_factory.call_args
self.assertEqual(len(args), 1)
factory = args[0]
self.assertEqual(factory.delta, mock.sentinel.ap)
self.assertEqual(factory.sigma, mock.sentinel.b)
self.assertEqual(factory.surface_air_pressure, mock.sentinel.ps)
def test_formula_terms_a_p0(self):
coord_a = DimCoord(np.arange(5), units='Pa')
coord_p0 = DimCoord(10, units='1')
coord_expected = DimCoord(np.arange(5) * 10, units='Pa',
long_name='vertical pressure', var_name='ap')
self.provides['coordinates'].extend([(coord_a, 'a'), (coord_p0, 'p0')])
self.requires['formula_terms'] = dict(a='a', b='b', ps='ps', p0='p0')
_load_aux_factory(self.engine, self.cube)
# Check cube.coord_dims method.
self.assertEqual(self.cube.coord_dims.call_count, 1)
args, _ = self.cube.coord_dims.call_args
self.assertEqual(len(args), 1)
self.assertIs(args[0], coord_a)
# Check cube.add_aux_coord method.
self.assertEqual(self.cube.add_aux_coord.call_count, 1)
args, _ = self.cube.add_aux_coord.call_args
self.assertEqual(len(args), 2)
self.assertEqual(args[0], coord_expected)
self.assertIsInstance(args[1], mock.Mock)
# Check cube.add_aux_factory method.
self.assertEqual(self.cube.add_aux_factory.call_count, 1)
args, _ = self.cube.add_aux_factory.call_args
self.assertEqual(len(args), 1)
factory = args[0]
self.assertEqual(factory.delta, coord_expected)
self.assertEqual(factory.sigma, mock.sentinel.b)
self.assertEqual(factory.surface_air_pressure, mock.sentinel.ps)
def test_formula_terms_p0_non_scalar(self):
coord_p0 = DimCoord(np.arange(5))
self.provides['coordinates'].append((coord_p0, 'p0'))
self.requires['formula_terms'] = dict(p0='p0')
with self.assertRaises(ValueError):
_load_aux_factory(self.engine, self.cube)
def test_formula_terms_p0_bounded(self):
coord_a = DimCoord(np.arange(5))
coord_p0 = DimCoord(1, bounds=[0, 2], var_name='p0')
self.provides['coordinates'].extend([(coord_a, 'a'), (coord_p0, 'p0')])
self.requires['formula_terms'] = dict(a='a', b='b', ps='ps', p0='p0')
with warnings.catch_warnings(record=True) as warn:
warnings.simplefilter('always')
_load_aux_factory(self.engine, self.cube)
self.assertEqual(len(warn), 1)
msg = 'Ignoring atmosphere hybrid sigma pressure scalar ' \
'coordinate {!r} bounds.'.format(coord_p0.name())
self.assertEqual(msg, str(warn[0].message))
def _check_no_delta(self):
# Check cube.add_aux_coord method.
self.assertEqual(self.cube.add_aux_coord.call_count, 0)
# Check cube.add_aux_factory method.
self.assertEqual(self.cube.add_aux_factory.call_count, 1)
args, _ = self.cube.add_aux_factory.call_args
self.assertEqual(len(args), 1)
factory = args[0]
# Check that the factory has no delta term
self.assertEqual(factory.delta, None)
self.assertEqual(factory.sigma, mock.sentinel.b)
self.assertEqual(factory.surface_air_pressure, mock.sentinel.ps)
def test_formula_terms_ap_missing_coords(self):
self.requires['formula_terms'] = dict(ap='ap', b='b', ps='ps')
with mock.patch('warnings.warn') as warn:
_load_aux_factory(self.engine, self.cube)
warn.assert_called_once_with("Unable to find coordinate for variable "
"'ap'")
self._check_no_delta()
def test_formula_terms_no_delta_terms(self):
self.requires['formula_terms'] = dict(b='b', ps='ps')
_load_aux_factory(self.engine, self.cube)
self._check_no_delta()
def test_formula_terms_no_p0_term(self):
coord_a = DimCoord(np.arange(5), units='Pa')
self.provides['coordinates'].append((coord_a, 'a'))
self.requires['formula_terms'] = dict(a='a', b='b', ps='ps')
_load_aux_factory(self.engine, self.cube)
self._check_no_delta()
def test_formula_terms_no_a_term(self):
coord_p0 = DimCoord(10, units='1')
self.provides['coordinates'].append((coord_p0, 'p0'))
self.requires['formula_terms'] = dict(a='p0', b='b', ps='ps')
_load_aux_factory(self.engine, self.cube)
self._check_no_delta()
if __name__ == '__main__':
tests.main()
|
LukeC92/iris
|
lib/iris/tests/unit/fileformats/netcdf/test__load_aux_factory.py
|
Python
|
lgpl-3.0
| 6,986
|
[
"NetCDF"
] |
acd1c66885dcd5bfc4942d69d00eda0e4ce0a672bc422ed9a251a5a6905aac34
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Bwa(Package):
"""Burrow-Wheeler Aligner for pairwise alignment between DNA sequences."""
homepage = "http://github.com/lh3/bwa"
url = "https://github.com/lh3/bwa/releases/download/v0.7.15/bwa-0.7.15.tar.bz2"
version('0.7.17', '82cba7ef695538e6a38b9d4156837381',
url="https://github.com/lh3/bwa/releases/download/v0.7.17/bwa-0.7.17.tar.bz2")
version('0.7.16a', 'c5115c9a5ea0406848500e4b23a7708c',
url="https://github.com/lh3/bwa/releases/download/v0.7.16/bwa-0.7.16a.tar.bz2")
version('0.7.15', 'fcf470a46a1dbe2f96a1c5b87c530554',
url="https://github.com/lh3/bwa/releases/download/v0.7.15/bwa-0.7.15.tar.bz2")
version('0.7.13', 'f094f609438511766c434178a3635ab4',
url="https://github.com/lh3/bwa/releases/download/v0.7.13/bwa-0.7.13.tar.bz2")
version('0.7.12', 'e24a587baaad411d5da89516ad7a261a',
url='https://github.com/lh3/bwa/archive/0.7.12.tar.gz')
depends_on('zlib')
def install(self, spec, prefix):
filter_file(r'^INCLUDES=',
"INCLUDES=-I%s" % spec['zlib'].prefix.include, 'Makefile')
filter_file(r'^LIBS=', "LIBS=-L%s " % spec['zlib'].prefix.lib,
'Makefile')
make()
mkdirp(prefix.bin)
install('bwa', join_path(prefix.bin, 'bwa'))
set_executable(join_path(prefix.bin, 'bwa'))
mkdirp(prefix.doc)
install('README.md', prefix.doc)
install('NEWS.md', prefix.doc)
mkdirp(prefix.man.man1)
install('bwa.1', prefix.man.man1)
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/bwa/package.py
|
Python
|
lgpl-2.1
| 2,823
|
[
"BWA"
] |
428f03021eb543fd94676f2ba61375a571e8c8c0dafb39d5b6cc2e249bef4d32
|
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, antecedents, out_attributes, user_options, num_cores,
out_path):
import os
from genomicode import parallel
from genomicode import filelib
from genomicode import alignlib
from Betsy import module_utils as mlib
fastq_node, sai_node, orient_node, sample_node, reference_node = \
antecedents
fastq_files = mlib.find_merged_fastq_files(
sample_node.identifier, fastq_node.identifier)
sai_path = sai_node.identifier
assert filelib.dir_exists(sai_path)
orient = mlib.read_orientation(orient_node.identifier)
ref = alignlib.create_reference_genome(reference_node.identifier)
filelib.safe_mkdir(out_path)
metadata = {}
metadata["tool"] = "bwa %s" % alignlib.get_bwa_version()
# Technically, doesn't need the SampleGroupFile, since that's
# already reflected in the sai data. But better, because the
# sai data might not always be generated by BETSY.
# Find the merged fastq files.
# Find the sai files.
sai_filenames = filelib.list_files_in_path(
sai_path, endswith=".sai", case_insensitive=True)
assert sai_filenames, "No .sai files."
bwa = mlib.findbin("bwa")
# bwa samse -f <output.sam> <reference.fa> <input.sai> <input.fq>
# bwa sampe -f <output.sam> <reference.fa> <input_1.sai> <input_2.sai>
# <input_1.fq> <input_2.fq> >
# list of (pair1.fq, pair1.sai, pair2.fq, pair2.sai, output.sam)
# all full paths
jobs = []
for x in fastq_files:
sample, pair1_fq, pair2_fq = x
# The sai file should be in the format:
# <sai_path>/<sample>.sai Single end read
# <sai_path>/<sample>_1.sai Paired end read
# <sai_path>/<sample>_2.sai Paired end read
# Look for pair1_sai and pair2_sai.
pair1_sai = pair2_sai = None
for sai_filename in sai_filenames:
p, s, e = mlib.splitpath(sai_filename)
assert e == ".sai"
if s == sample:
assert not pair1_sai
pair1_sai = sai_filename
elif s == "%s_1" % (sample):
assert not pair1_sai
pair1_sai = sai_filename
elif s == "%s_2" % (sample):
assert not pair2_sai
pair2_sai = sai_filename
assert pair1_sai, "Missing .sai file: %s" % sample
if pair2_fq:
assert pair2_sai, "Missing .sai file 2: %s" % sample
if pair2_sai:
assert pair2_fq, "Missing .fq file 2: %s" % sample
sam_filename = os.path.join(out_path, "%s.sam" % sample)
log_filename = os.path.join(out_path, "%s.log" % sample)
x = sample, pair1_fq, pair1_sai, pair2_fq, pair2_sai, \
sam_filename, log_filename
jobs.append(x)
orientation = orient.orientation
#orientation = sample_node.data.attributes["orientation"]
assert orientation in ["single", "paired_fr", "paired_rf"]
# Make a list of bwa commands.
sq = mlib.sq
commands = []
for x in jobs:
sample, pair1_fq, pair1_sai, pair2_fq, pair2_sai, \
sam_filename, log_filename = x
if orientation == "single":
assert not pair2_fq
assert not pair2_sai
samse = "samse"
if orientation.startswith("paired"):
samse = "sampe"
x = [
sq(bwa),
samse,
"-f", sq(sam_filename),
sq(ref.fasta_file_full),
]
if orientation == "single":
x += [
sq(pair1_sai),
sq(pair1_fq),
]
else:
y = [
sq(pair1_sai),
sq(pair2_sai),
sq(pair1_fq),
sq(pair2_fq),
]
if orientation == "paired_rf":
y = [
sq(pair2_sai),
sq(pair1_sai),
sq(pair2_fq),
sq(pair1_fq),
]
x += y
x += [
">&", sq(log_filename),
]
x = " ".join(x)
commands.append(x)
metadata["commands"] = commands
metadata["num_cores"] = num_cores
parallel.pshell(commands, max_procs=num_cores)
# Make sure the analysis completed successfully.
x = [x[-2] for x in jobs]
filelib.assert_exists_nz_many(x)
return metadata
def name_outfile(self, antecedents, user_options):
return "bwa.sam"
|
jefftc/changlab
|
Betsy/Betsy/modules/convert_sai_to_sam_folder.py
|
Python
|
mit
| 5,144
|
[
"BWA"
] |
3deaf3c93fb59c4f715af5197a77b85044f012679709f3d6b2894d0e79d5364e
|
#!/Library/Frameworks/Python.framework/Versions/2.7/bin/python
# -*- coding: utf-8 -*-
import os
import json
import urllib2
import subprocess
name = ''
email = ''
options = { 'developer': '', 'android': '', 'ios': '', 'designer': '', 'web' : '',
'sublime': '', 'vim': '', 'zsh': '',
'animations': '', 'showhiddenfiles': '', 'autoupdate': '', }
# Check if Xcode Command Line Tools are installed
if os.system('xcode-select -p') != 0:
print "Installing XCode Tools"
os.system('xcode-select --install')
print "**************************************************************"
print "Install the XCode Command Line Tools and run this script again"
print "**************************************************************"
exit()
# Accept XCode License
# os.system('sudo xcodebuild -license accept')
# Sudo: Spectacle, ZSH, OSX Settings
print "\n\nWelcome... TO THE WORLD OF TOMORROW\n"
# Basic Info
while name == '':
name = raw_input("What's your name?\n").strip()
while email == '' or '@' not in email:
email = raw_input("What's your email?\n").strip()
# Setup Options
while options['designer'] not in ['y', 'n']:
options['designer'] = raw_input("Do you want to install Designer Tools? (%s) " % '|'.join(['y','n']))
while options['developer'] not in ['y', 'n']:
options['developer'] = raw_input("Do you want to install Developer Tools? (%s) " % '|'.join(['y','n']))
if options['developer'] == 'y':
while options['web'] not in ['y', 'n']:
options['web'] = raw_input("Do you want to install Web Developer Tools? (%s) " % '|'.join(['y','n']))
while options['android'] not in ['y', 'n']:
options['android'] = raw_input("Do you want to install Android Tools? (%s) " % '|'.join(['y','n']))
while options['ios'] not in ['y', 'n']:
options['ios'] = raw_input("Do you want to install iOS Tools? (%s) " % '|'.join(['y','n']))
# Other Options
while options['vim'] not in ['y', 'n']:
options['vim'] = raw_input("Do you want to install VIM with Awesome VIM? (%s) " % '|'.join(['y','n']))
while options['zsh'] not in ['y', 'n']:
options['zsh'] = raw_input("Do you want to install Oh My Zsh? (%s) " % '|'.join(['y','n']))
while options['animations'] not in ['y', 'n']:
options['animations'] = raw_input("Do you want to accelerate OSX animations? (%s) " % '|'.join(['y','n']))
while options['showhiddenfiles'] not in ['y', 'n']:
options['showhiddenfiles'] = raw_input("Do you want to show hidden files? (%s) " % '|'.join(['y','n']))
while options['autoupdate'] not in ['y', 'n']:
options['autoupdate'] = raw_input("Do you want to update your computer automatically? (Recommended) (%s) " % '|'.join(['y','n']))
def show_notification(text):
os.system('osascript -e \'display notification "'+ text +'" with title "Mac Setup"\' > /dev/null')
print "Hi %s!" % name
print "You'll be asked for your password at a few points in the process"
print "*************************************"
print "Setting up your Mac..."
print "*************************************"
# Create a Private Key
if not os.path.isfile(os.path.expanduser("~") + '/.ssh/id_rsa.pub') :
print "Creating your Private Key"
os.system('ssh-keygen -t rsa -b 4096 -f ~/.ssh/id_rsa -N "" -C "%s"' % email)
# Set computer name & git info (as done via System Preferences → Sharing)
os.system('sudo scutil --set ComputerName "%s"' % name)
os.system('sudo scutil --set HostName "%s"' % name)
os.system('sudo scutil --set LocalHostName "%s"' % name.replace(' ', '-')) # Doesn't support spaces
os.system('sudo defaults write /Library/Preferences/SystemConfiguration/com.apple.smb.server NetBIOSName -string "%s"' % name)
os.system('git config --global user.name "%s"' % name)
os.system('git config --global user.email "%s"' % email)
# Install Brew & Brew Cask
print "Installing Brew & Brew Cask"
os.system('touch ~/.bash_profile')
os.system('/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"')
os.system('brew tap homebrew/cask-versions')
os.system('brew tap homebrew/cask-fonts')
os.system('brew update && brew upgrade && brew cleanup')
# Install Languages
print "Installing Git+NodeJS+Python+Ruby"
os.system('brew install git node python python3 ruby yarn')
os.system('brew link --overwrite git node python python3 ruby')
os.system('brew unlink python && brew link --overwrite python') # Fixes an issue with pip
os.system('brew install git-flow git-lfs')
os.system('git lfs install')
print "Installing Useful Stuff"
os.system('brew install graphicsmagick curl wget sqlite libpng libxml2 openssl')
os.system('brew install bat tldr tree')
print "Installing Command Line Tools"
os.system('npm install -g yo gulp-cli node-gyp serve ndb')
# OSX Tweaks & Essentials
print "Installing Quicklook Helpers"
os.system('brew install --cask qlcolorcode qlstephen qlmarkdown quicklook-csv quicklook-json webpquicklook suspicious-package qlprettypatch')
# Permissions Fix for macOS Catalina
os.system('xattr -d -r com.apple.quarantine ~/Library/QuickLook')
print "Installing Fonts"
# For some reason most fonts require SVN
os.system('brew install svn')
os.system('brew install --cask font-dosis font-droid-sans-mono-for-powerline font-open-sans font-open-sans-condensed font-roboto font-roboto-mono font-roboto-slab font-consolas-for-powerline font-inconsolata font-inconsolata-for-powerline font-lato font-menlo-for-powerline font-meslo-lg font-meslo-for-powerline font-noto-sans font-noto-serif font-source-sans-pro font-source-serif-pro font-ubuntu font-pt-mono font-pt-sans font-pt-serif font-fira-mono font-fira-mono-for-powerline font-fira-code font-fira-sans font-source-code-pro font-hack font-anka-coder font-jetbrains-mono')
print "Installing Essential Apps"
os.system('brew install --cask iterm2 spectacle the-unarchiver')
os.system('brew install --cask google-chrome firefox sourcetree visual-studio-code dropbox skype spotify slack vlc')
# Appropriate Software
if options['developer'] == 'y':
print "Installing Developer Tools"
os.system('brew install --cask docker ngrok sequel-pro cyberduck postman')
os.system('curl -o- https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash')
if options['android'] == 'y':
print "Installing Android Tools"
os.system('brew fetch --cask java')
show_notification("We need your password")
os.system('brew install --cask java')
os.system('brew install --cask android-studio')
os.system('brew install --cask android-platform-tools')
if options['ios'] == 'y':
print "Installing iOS Tools"
show_notification("We need your password")
os.system('sudo gem install cocoapods')
show_notification("We need your password")
os.system('sudo gem install fastlane --verbose')
if options['web'] == 'y':
print "Installing Web Developer Tools"
os.system('brew install --cask imageoptim imagealpha xnconvert')
if options['designer'] == 'y':
print "Installing Designer Tools"
os.system('brew install --cask invisionsync skala-preview')
os.system('brew install --cask adapter handbrake')
os.system('brew install --cask origami-studio')
if options['vim'] == 'y':
print "Installing VIM + Awesome VIM"
os.system('brew install vim')
os.system('git clone https://github.com/amix/vimrc.git ~/.vim_runtime')
os.system('sh ~/.vim_runtime/install_awesome_vimrc.sh')
# Oh-My-ZSH. Dracula Theme for iTerm2 needs to be installed manually
if options['zsh'] == 'y':
print "Installing Oh-My-Zsh with Dracula Theme"
show_notification("We need your password")
# Setup Adapted from https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh
if os.system('test -d ~/.oh-my-zsh') != 0:
os.system('umask g-w,o-w && git clone --depth=1 https://github.com/robbyrussell/oh-my-zsh.git ~/.oh-my-zsh')
if os.system('test -f ~/.zshrc') != 0:
os.system('cp ~/.oh-my-zsh/templates/zshrc.zsh-template ~/.zshrc')
os.system('git clone git://github.com/zsh-users/zsh-autosuggestions ~/.oh-my-zsh/custom/plugins/zsh-autosuggestions')
os.system('git clone git://github.com/zsh-users/zsh-syntax-highlighting ~/.oh-my-zsh/custom/plugins/zsh-syntax-highlighting')
# If the user has the default .zshrc tune it a bit
if (subprocess.call(['bash', '-c', 'diff <(tail -n +6 ~/.zshrc) <(tail -n +6 ~/.oh-my-zsh/templates/zshrc.zsh-template) > /dev/null']) == 0):
# Agnoster Theme
os.system('sed -i -e \'s/robbyrussell/agnoster/g\' ~/.zshrc &> /dev/null')
# Plugins
os.system('sed -i -e \'s/plugins=(git)/plugins=(git brew sublime node npm docker zsh-autosuggestions zsh-syntax-highlighting colored-man-pages copydir copyfile extract)/g\' ~/.zshrc &> /dev/null')
# Customizations
os.system('echo "alias dog=\'bat\'" >> ~/.zshrc')
# Don't show the user in the prompt
os.system('echo "DEFAULT_USER=\`whoami\`" >> ~/.zshrc')
os.system('echo "export NVM_DIR=\"\$HOME/.nvm\"\n[ -s \"\$NVM_DIR/nvm.sh\" ] && . \"\$NVM_DIR/nvm.sh\" # This loads nvm" >> ~/.zshrc')
# Remove the 'last login' message
os.system('touch ~/.hushlogin')
os.system('git clone https://github.com/dracula/iterm.git ~/Desktop/dracula-theme/')
# Random OSX Settings
print "Tweaking OSX Settings"
if options['showhiddenfiles'] == 'y':
# Finder: show hidden files by default
os.system('defaults write com.apple.finder AppleShowAllFiles -bool true')
# Finder: show all filename extensions
os.system('defaults write NSGlobalDomain AppleShowAllExtensions -bool true')
# Finder: allow text selection in Quick Look
os.system('defaults write com.apple.finder QLEnableTextSelection -bool true')
# Check for software updates daily
os.system('defaults write com.apple.SoftwareUpdate ScheduleFrequency -int 1')
# Disable auto-correct
#os.system('defaults write NSGlobalDomain NSAutomaticSpellingCorrectionEnabled -bool false')
# Require password immediately after sleep or screen saver begins
os.system('defaults write com.apple.screensaver askForPassword -int 1')
os.system('defaults write com.apple.screensaver askForPasswordDelay -int 0')
# Show the ~/Library folder
os.system('chflags nohidden ~/Library')
# Don’t automatically rearrange Spaces based on most recent use
os.system('defaults write com.apple.dock mru-spaces -bool false')
# Prevent Time Machine from prompting to use new hard drives as backup volume
os.system('defaults write com.apple.TimeMachine DoNotOfferNewDisksForBackup -bool true')
if options['animations'] == 'y':
print "Tweaking System Animations"
os.system('defaults write NSGlobalDomain NSWindowResizeTime -float 0.1')
os.system('defaults write com.apple.dock expose-animation-duration -float 0.15')
os.system('defaults write com.apple.dock autohide-delay -float 0')
os.system('defaults write com.apple.dock autohide-time-modifier -float 0.3')
os.system('defaults write NSGlobalDomain com.apple.springing.delay -float 0.5')
os.system('killall Dock')
if options['autoupdate'] == 'y':
print "Enabling Automatic Brew Updates & Upgrades"
os.system('brew tap domt4/autoupdate')
os.system('brew autoupdate --start --upgrade')
# Make Google Chrome the default browser
os.system('open -a "Google Chrome" --args --make-default-browser')
# Open Spectacle (Needs to be enabled manually)
os.system('open -a "Spectacle"')
# Open Dropbox
os.system('open -a "Dropbox"')
# Clean Up
os.system('brew cleanup')
# Mute startup sound
show_notification("We need your password")
os.system('sudo nvram SystemAudioVolume=%00')
print ""
print ""
print "*************************************"
print "Enabling FileVault"
os.system('sudo fdesetup enable')
print ""
print "*************************************"
print "Your SSH Public Key Is:"
with open(os.path.expanduser("~") + '/.ssh/id_rsa.pub', 'r') as f:
print f.read()
print ""
if options['zsh'] == 'y':
print "*************************************"
print "Remember to set up iTerm2:"
print "* Go to iTerm2 > Preferences > Profiles > Colors Tab"
print " * Load Presets..."
print " * Import..."
print " * Pick Desktop > dracula-theme > iterm > Dracula.itermcolors"
print "* Go to iTerm2 > Preferences > Profiles > Text Tab"
print " * Regular Font"
print " * 12pt Menlo for Powerline Font"
print ""
if options['sublime'] == 'y':
print "*************************************"
print "Please launch Sublime Text to finish setup"
print "Material Theme needs to be enabled manually"
print "On User Preferences, add: \"theme\": \"Material-Theme.sublime-theme\""
print ""
print "*************************************"
print "Remember to restart your Mac"
print "*************************************"
show_notification("All done! Enjoy your new macOS!")
# Change the shell if necessary
if options['zsh'] == 'y':
os.system('chsh -s /bin/zsh &> /dev/null')
|
Aerolab/setup
|
setup.py
|
Python
|
mit
| 12,816
|
[
"GULP"
] |
c45b08c248a51645427caa5f349a22856b042f6994bce6f31b00a47040b3e355
|
"""
This is a module for XPS analysis. It is modelled after the Galore package (https://github.com/SMTG-UCL/galore), but
with some modifications for easier analysis from pymatgen itself. Please cite the following original work if you use
this::
Adam J. Jackson, Alex M. Ganose, Anna Regoutz, Russell G. Egdell, David O. Scanlon (2018). Galore: Broadening and
weighting for simulation of photoelectron spectroscopy. Journal of Open Source Software, 3(26), 773,
doi: 10.21105/joss.007733
You may wish to look at the optional dependency galore for more functionality such as plotting and other cross-sections.
Note that the atomic_subshell_photoionization_cross_sections.csv has been reparsed from the original compilation::
Yeh, J. J.; Lindau, I. Atomic Subshell Photoionization Cross Sections and Asymmetry Parameters: 1 ⩽ Z ⩽ 103.
Atomic Data and Nuclear Data Tables 1985, 32 (1), 1–155. https://doi.org/10.1016/0092-640X(85)90016-6.
This version contains all detailed information for all orbitals.
"""
import collections
import warnings
from pathlib import Path
import numpy as np
import pandas as pd
from pymatgen.core.periodic_table import Element
from pymatgen.core.spectrum import Spectrum
from pymatgen.electronic_structure.dos import CompleteDos
def _load_cross_sections(fname):
data = pd.read_csv(fname)
d = collections.defaultdict(dict)
for row in data.itertuples():
sym = row.element
el = Element(sym)
if el.Z > 92:
continue
orb = row.orbital
shell = int(orb[0])
orbtype = orb[1]
nelect = None
for l in el.full_electronic_structure:
if l[0] == shell and l[1] == orbtype:
nelect = l[2]
break
if nelect is not None:
d[sym][orbtype] = row.weight / nelect
return d
CROSS_SECTIONS = _load_cross_sections(Path(__file__).parent / "atomic_subshell_photoionization_cross_sections.csv")
class XPS(Spectrum):
"""
Class representing an X-ray photoelectron spectra.
"""
XLABEL = "Binding Energy (eV)"
YLABEL = "Intensity"
@classmethod
def from_dos(cls, dos: CompleteDos):
"""
:param dos: CompleteDos object with project element-orbital DOS. Can be obtained from Vasprun.get_complete_dos.
:param sigma: Smearing for Gaussian.
:return: XPS
"""
total = np.zeros(dos.energies.shape)
for el in dos.structure.composition.keys():
spd_dos = dos.get_element_spd_dos(el)
for orb, pdos in spd_dos.items():
weight = CROSS_SECTIONS[el.symbol].get(str(orb), None)
if weight is not None:
total += pdos.get_densities() * weight
else:
warnings.warn(f"No cross-section for {el}{orb}")
return XPS(-dos.energies, total / np.max(total))
|
materialsproject/pymatgen
|
pymatgen/analysis/xps.py
|
Python
|
mit
| 2,915
|
[
"Gaussian",
"pymatgen"
] |
2b5b5f9a36b11496f2d9d64aa52a1007ab46506aa3ab10e06518989d44929a8c
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from django.conf import settings
from django.core import mail
from django.core.files.storage import default_storage as storage
from django.test.utils import override_settings
from django.utils import translation
from unittest import mock
import pytest
import six
from unittest.mock import Mock, patch
from pyquery import PyQuery as pq
from olympia import amo
from olympia.activity.models import ActivityLog, ActivityLogToken
from olympia.addons.models import (
Addon, AddonApprovalsCounter, AddonReviewerFlags)
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.tests import TestCase, file_factory, version_factory
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import send_mail
from olympia.files.models import File
from olympia.reviewers.models import (
AutoApprovalSummary, ReviewerScore, ViewExtensionPendingQueue)
from olympia.reviewers.utils import (
PENDING_STATUSES, ReviewAddon, ReviewFiles, ReviewHelper,
ViewUnlistedAllListTable, view_table_factory)
from olympia.users.models import UserProfile
pytestmark = pytest.mark.django_db
REVIEW_FILES_STATUSES = (amo.STATUS_APPROVED, amo.STATUS_DISABLED)
class TestViewPendingQueueTable(TestCase):
def setUp(self):
super(TestViewPendingQueueTable, self).setUp()
self.table = view_table_factory(ViewExtensionPendingQueue)([])
def test_addon_name(self):
row = Mock()
page = Mock()
page.start_index = Mock()
page.start_index.return_value = 1
row.addon_name = u'フォクすけといっしょ'
row.addon_slug = 'test'
row.latest_version = u'0.12'
self.table.set_page(page)
a = pq(self.table.render_addon_name(row))
assert a.attr('href') == (
reverse('reviewers.review', args=[six.text_type(row.addon_slug)]))
assert a.text() == u"フォクすけといっしょ 0.12"
def test_addon_type_id(self):
row = Mock()
row.addon_type_id = amo.ADDON_THEME
assert six.text_type(self.table.render_addon_type_id(row)) == (
u'Complete Theme')
def test_waiting_time_in_days(self):
row = Mock()
row.waiting_time_days = 10
row.waiting_time_hours = 10 * 24
assert self.table.render_waiting_time_min(row) == u'10 days'
def test_waiting_time_one_day(self):
row = Mock()
row.waiting_time_days = 1
row.waiting_time_hours = 24
row.waiting_time_min = 60 * 24
assert self.table.render_waiting_time_min(row) == u'1 day'
def test_waiting_time_in_hours(self):
row = Mock()
row.waiting_time_days = 0
row.waiting_time_hours = 22
row.waiting_time_min = 60 * 22
assert self.table.render_waiting_time_min(row) == u'22 hours'
def test_waiting_time_in_min(self):
row = Mock()
row.waiting_time_days = 0
row.waiting_time_hours = 0
row.waiting_time_min = 11
assert self.table.render_waiting_time_min(row) == u'11 minutes'
def test_waiting_time_in_secs(self):
row = Mock()
row.waiting_time_days = 0
row.waiting_time_hours = 0
row.waiting_time_min = 0
assert self.table.render_waiting_time_min(row) == u'moments ago'
def test_flags(self):
row = Mock()
row.flags = [('admin-review', 'Admin Review')]
doc = pq(self.table.render_flags(row))
assert doc('div.ed-sprite-admin-review').length
class TestUnlistedViewAllListTable(TestCase):
def setUp(self):
super(TestUnlistedViewAllListTable, self).setUp()
self.table = ViewUnlistedAllListTable([])
def test_addon_name(self):
row = Mock()
page = Mock()
page.start_index = Mock()
page.start_index.return_value = 1
row.addon_name = u'フォクすけといっしょ'
row.addon_slug = 'test'
row.latest_version = u'0.12'
self.table.set_page(page)
a = pq(self.table.render_addon_name(row))
assert (a.attr('href') == reverse(
'reviewers.review', args=['unlisted', str(row.addon_slug)]))
assert a.text() == u'フォクすけといっしょ 0.12'
def test_last_review(self):
row = Mock()
row.review_version_num = u'0.34.3b'
row.review_date = u'2016-01-01'
doc = pq(self.table.render_review_date(row))
assert doc.text() == u'0.34.3b on 2016-01-01'
def test_no_review(self):
row = Mock()
row.review_version_num = None
row.review_date = None
doc = pq(self.table.render_review_date(row))
assert doc.text() == u'No Reviews'
def test_authors_few(self):
row = Mock()
row.authors = [(123, 'bob'), (456, 'steve')]
doc = pq(self.table.render_authors(row))
assert doc('span').text() == 'bob steve'
assert doc('span a:eq(0)').attr('href') == UserProfile.create_user_url(
123)
assert doc('span a:eq(1)').attr('href') == UserProfile.create_user_url(
456)
assert doc('span').attr('title') == 'bob steve'
def test_authors_four(self):
row = Mock()
row.authors = [(123, 'bob'), (456, 'steve'), (789, 'cvan'),
(999, 'basta')]
doc = pq(self.table.render_authors(row))
assert doc.text() == 'bob steve cvan ...'
assert doc('span a:eq(0)').attr('href') == UserProfile.create_user_url(
123)
assert doc('span a:eq(1)').attr('href') == UserProfile.create_user_url(
456)
assert doc('span a:eq(2)').attr('href') == UserProfile.create_user_url(
789)
assert doc('span').attr('title') == 'bob steve cvan basta', doc.html()
yesterday = datetime.today() - timedelta(days=1)
# Those tests can call signing when making things public. We want to test that
# it works correctly, so we set ENABLE_ADDON_SIGNING to True and mock the
# actual signing call.
@override_settings(ENABLE_ADDON_SIGNING=True)
@mock.patch('olympia.lib.crypto.signing.call_signing', lambda f: None)
class TestReviewHelper(TestCase):
fixtures = ['base/addon_3615', 'base/users']
preamble = 'Mozilla Add-ons: Delicious Bookmarks 2.1.072'
def setUp(self):
super(TestReviewHelper, self).setUp()
class FakeRequest:
user = UserProfile.objects.get(pk=10482)
self.request = FakeRequest()
self.addon = Addon.objects.get(pk=3615)
self.version = self.addon.versions.all()[0]
self.helper = self.get_helper()
self.file = self.version.files.all()[0]
self.create_paths()
def _check_score(self, reviewed_type, bonus=0):
scores = ReviewerScore.objects.all()
assert len(scores) > 0
assert scores[0].score == amo.REVIEWED_SCORES[reviewed_type] + bonus
assert scores[0].note_key == reviewed_type
def remove_paths(self):
for path in (self.file.file_path, self.file.guarded_file_path):
if not storage.exists(path):
storage.delete(path)
def create_paths(self):
for path in (self.file.file_path, self.file.guarded_file_path):
if not storage.exists(path):
with storage.open(path, 'w') as f:
f.write('test data\n')
self.addCleanup(self.remove_paths)
def get_data(self):
return {'comments': 'foo', 'addon_files': self.version.files.all(),
'action': 'public', 'operating_systems': 'osx',
'applications': 'Firefox',
'info_request': self.addon.pending_info_request}
def get_helper(self, content_review_only=False):
return ReviewHelper(
request=self.request, addon=self.addon, version=self.version,
content_review_only=content_review_only)
def setup_type(self, status):
self.addon.update(status=status)
return self.get_helper().handler.review_type
def check_log_count(self, id):
return (ActivityLog.objects.for_addons(self.helper.addon)
.filter(action=id).count())
def test_no_request(self):
self.request = None
helper = self.get_helper()
assert helper.content_review_only is False
assert helper.actions == {}
helper = self.get_helper(content_review_only=True)
assert helper.content_review_only is True
assert helper.actions == {}
def test_type_nominated(self):
assert self.setup_type(amo.STATUS_NOMINATED) == 'extension_nominated'
def test_type_pending(self):
assert self.setup_type(amo.STATUS_PENDING) == 'extension_pending'
assert self.setup_type(amo.STATUS_NULL) == 'extension_pending'
assert self.setup_type(amo.STATUS_APPROVED) == 'extension_pending'
assert self.setup_type(amo.STATUS_DISABLED) == 'extension_pending'
def test_no_version(self):
helper = ReviewHelper(
request=self.request, addon=self.addon, version=None)
assert helper.handler.review_type == 'extension_pending'
def test_review_files(self):
version_factory(addon=self.addon,
created=self.version.created - timedelta(days=1),
file_kw={'status': amo.STATUS_APPROVED})
for status in REVIEW_FILES_STATUSES:
self.setup_data(status=status)
assert self.helper.handler.__class__ == ReviewFiles
def test_review_addon(self):
self.setup_data(status=amo.STATUS_NOMINATED)
assert self.helper.handler.__class__ == ReviewAddon
def test_process_action_none(self):
self.helper.set_data({'action': 'foo'})
with self.assertRaises(Exception):
self.helper.process()
def test_process_action_good(self):
self.helper.set_data({'action': 'reply', 'comments': 'foo'})
self.helper.process()
assert len(mail.outbox) == 1
def test_action_details(self):
for status in Addon.STATUS_CHOICES:
self.addon.update(status=status)
helper = self.get_helper()
actions = helper.actions
for k, v in actions.items():
assert six.text_type(
v['details']), "Missing details for: %s" % k
def get_review_actions(
self, addon_status, file_status, content_review_only=False):
self.file.update(status=file_status)
self.addon.update(status=addon_status)
# Need to clear self.version.all_files cache since we updated the file.
if self.version:
del self.version.all_files
return self.get_helper(content_review_only=content_review_only).actions
def test_actions_full_nominated(self):
expected = ['public', 'reject', 'reply', 'super', 'comment']
assert list(self.get_review_actions(
addon_status=amo.STATUS_NOMINATED,
file_status=amo.STATUS_AWAITING_REVIEW).keys()) == expected
def test_actions_full_update(self):
expected = ['public', 'reject', 'reply', 'super', 'comment']
assert list(self.get_review_actions(
addon_status=amo.STATUS_APPROVED,
file_status=amo.STATUS_AWAITING_REVIEW).keys()) == expected
def test_actions_full_nonpending(self):
expected = ['reply', 'super', 'comment']
f_statuses = [amo.STATUS_APPROVED, amo.STATUS_DISABLED]
for file_status in f_statuses:
assert list(self.get_review_actions(
addon_status=amo.STATUS_APPROVED,
file_status=file_status).keys()) == expected
def test_actions_public_post_reviewer(self):
self.grant_permission(self.request.user, 'Addons:PostReview')
expected = ['reject_multiple_versions', 'reply', 'super', 'comment']
assert list(self.get_review_actions(
addon_status=amo.STATUS_APPROVED,
file_status=amo.STATUS_APPROVED).keys()) == expected
# Now make current version auto-approved...
AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
expected = ['confirm_auto_approved', 'reject_multiple_versions',
'reply', 'super', 'comment']
assert list(self.get_review_actions(
addon_status=amo.STATUS_APPROVED,
file_status=amo.STATUS_APPROVED).keys()) == expected
def test_actions_content_review(self):
self.grant_permission(self.request.user, 'Addons:ContentReview')
AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
expected = ['confirm_auto_approved', 'reject_multiple_versions',
'reply', 'super', 'comment']
assert list(self.get_review_actions(
addon_status=amo.STATUS_APPROVED,
file_status=amo.STATUS_APPROVED,
content_review_only=True).keys()) == expected
def test_actions_public_static_theme(self):
# Having Addons:PostReview and dealing with a public add-on would
# normally be enough to give you access to reject multiple versions
# action, but it should not be available for static themes.
self.grant_permission(self.request.user, 'Addons:PostReview')
self.addon.update(type=amo.ADDON_STATICTHEME)
expected = ['public', 'reject', 'reply', 'super', 'comment']
assert list(self.get_review_actions(
addon_status=amo.STATUS_APPROVED,
file_status=amo.STATUS_AWAITING_REVIEW).keys()) == expected
def test_actions_no_version(self):
"""Deleted addons and addons with no versions in that channel have no
version set."""
expected = ['comment']
self.version = None
assert list(self.get_review_actions(
addon_status=amo.STATUS_APPROVED,
file_status=amo.STATUS_APPROVED).keys()) == expected
def test_set_files(self):
self.file.update(datestatuschanged=yesterday)
self.helper.set_data({'addon_files': self.version.files.all()})
self.helper.handler.set_files(amo.STATUS_APPROVED,
self.helper.handler.data['addon_files'])
self.file = self.version.files.all()[0]
assert self.file.status == amo.STATUS_APPROVED
assert self.file.datestatuschanged.date() > yesterday.date()
def test_logs(self):
self.helper.set_data({'comments': 'something'})
self.helper.handler.log_action(amo.LOG.APPROVE_VERSION)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
def test_notify_email(self):
self.helper.set_data(self.get_data())
base_fragment = 'To respond, please reply to this email or visit'
user = self.addon.listed_authors[0]
ActivityLogToken.objects.create(version=self.version, user=user)
uuid = self.version.token.get(user=user).uuid.hex
reply_email = (
'reviewreply+%s@%s' % (uuid, settings.INBOUND_EMAIL_DOMAIN))
templates = (
'extension_nominated_to_approved',
'extension_nominated_to_rejected',
'extension_pending_to_rejected',
'theme_nominated_to_approved',
'theme_nominated_to_rejected',
'theme_pending_to_rejected',)
for template in templates:
mail.outbox = []
self.helper.handler.notify_email(template, 'Sample subject %s, %s')
assert len(mail.outbox) == 1
assert base_fragment in mail.outbox[0].body
assert mail.outbox[0].reply_to == [reply_email]
mail.outbox = []
# This one does not inherit from base.txt because it's for unlisted
# signing notification, which is not really something that necessitates
# reviewer interaction, so it's simpler.
template = 'unlisted_to_reviewed_auto'
self.helper.handler.notify_email(template, 'Sample subject %s, %s')
assert len(mail.outbox) == 1
assert base_fragment not in mail.outbox[0].body
assert mail.outbox[0].reply_to == [reply_email]
def test_email_links(self):
expected = {
'extension_nominated_to_approved': 'addon_url',
'extension_nominated_to_rejected': 'dev_versions_url',
'extension_pending_to_approved': 'addon_url',
'extension_pending_to_rejected': 'dev_versions_url',
'theme_nominated_to_approved': 'addon_url',
'theme_nominated_to_rejected': 'dev_versions_url',
'theme_pending_to_approved': 'addon_url',
'theme_pending_to_rejected': 'dev_versions_url',
'unlisted_to_reviewed_auto': 'dev_versions_url',
}
self.helper.set_data(self.get_data())
context_data = self.helper.handler.get_context_data()
for template, context_key in six.iteritems(expected):
mail.outbox = []
self.helper.handler.notify_email(template, 'Sample subject %s, %s')
assert len(mail.outbox) == 1
assert context_key in context_data
assert context_data.get(context_key) in mail.outbox[0].body
def setup_data(self, status, delete=None,
file_status=amo.STATUS_AWAITING_REVIEW,
channel=amo.RELEASE_CHANNEL_LISTED,
content_review_only=False, type=amo.ADDON_EXTENSION):
if delete is None:
delete = []
mail.outbox = []
ActivityLog.objects.for_addons(self.helper.addon).delete()
self.addon.update(status=status, type=type)
self.file.update(status=file_status)
if channel == amo.RELEASE_CHANNEL_UNLISTED:
self.make_addon_unlisted(self.addon)
self.version.reload()
self.file.reload()
self.helper = self.get_helper(content_review_only=content_review_only)
data = self.get_data().copy()
for key in delete:
del data[key]
self.helper.set_data(data)
def test_send_reviewer_reply(self):
assert not self.addon.pending_info_request
self.setup_data(amo.STATUS_APPROVED, ['addon_files'])
self.helper.handler.reviewer_reply()
assert not self.addon.pending_info_request
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == self.preamble
assert self.check_log_count(amo.LOG.REVIEWER_REPLY_VERSION.id) == 1
def test_request_more_information(self):
self.setup_data(amo.STATUS_APPROVED, ['addon_files'])
self.helper.handler.data['info_request'] = True
self.helper.handler.reviewer_reply()
self.assertCloseToNow(
self.addon.pending_info_request,
now=datetime.now() + timedelta(days=7))
assert len(mail.outbox) == 1
assert (
mail.outbox[0].subject ==
'Mozilla Add-ons: Action Required for Delicious Bookmarks 2.1.072')
assert self.check_log_count(amo.LOG.REQUEST_INFORMATION.id) == 1
def test_request_more_information_custom_deadline(self):
self.setup_data(amo.STATUS_APPROVED, ['addon_files'])
self.helper.handler.data['info_request'] = True
self.helper.handler.data['info_request_deadline'] = 42
self.helper.handler.reviewer_reply()
self.assertCloseToNow(
self.addon.pending_info_request,
now=datetime.now() + timedelta(days=42))
assert len(mail.outbox) == 1
assert (
mail.outbox[0].subject ==
'Mozilla Add-ons: Action Required for Delicious Bookmarks 2.1.072')
assert self.check_log_count(amo.LOG.REQUEST_INFORMATION.id) == 1
def test_request_more_information_reset_notified_flag(self):
self.setup_data(amo.STATUS_APPROVED, ['addon_files'])
flags = AddonReviewerFlags.objects.create(
addon=self.addon,
pending_info_request=datetime.now() - timedelta(days=1),
notified_about_expiring_info_request=True)
self.helper.handler.data['info_request'] = True
self.helper.handler.reviewer_reply()
flags.reload()
self.assertCloseToNow(
flags.pending_info_request,
now=datetime.now() + timedelta(days=7))
assert not flags.notified_about_expiring_info_request
assert len(mail.outbox) == 1
assert (
mail.outbox[0].subject ==
'Mozilla Add-ons: Action Required for Delicious Bookmarks 2.1.072')
assert self.check_log_count(amo.LOG.REQUEST_INFORMATION.id) == 1
def test_request_more_information_deleted_addon(self):
self.addon.delete()
self.test_request_more_information()
def test_email_no_locale(self):
self.addon.name = {
'es': '¿Dónde está la biblioteca?'
}
self.setup_data(amo.STATUS_NOMINATED, ['addon_files'])
with translation.override('es'):
assert translation.get_language() == 'es'
self.helper.handler.process_public()
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
u'Mozilla Add-ons: Delicious Bookmarks 2.1.072 Approved')
assert '/en-US/firefox/addon/a3615' not in mail.outbox[0].body
assert '/es/firefox/addon/a3615' not in mail.outbox[0].body
assert '/addon/a3615' in mail.outbox[0].body
assert 'Your add-on, Delicious Bookmarks ' in mail.outbox[0].body
def test_nomination_to_public_no_files(self):
self.setup_data(amo.STATUS_NOMINATED, ['addon_files'])
self.helper.handler.process_public()
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_APPROVED)
def test_nomination_to_public_and_current_version(self):
self.setup_data(amo.STATUS_NOMINATED, ['addon_files'])
self.addon = Addon.objects.get(pk=3615)
self.addon.update(_current_version=None)
assert not self.addon.current_version
self.helper.handler.process_public()
self.addon = Addon.objects.get(pk=3615)
assert self.addon.current_version
def test_nomination_to_public_new_addon(self):
""" Make sure new add-ons can be made public (bug 637959) """
status = amo.STATUS_NOMINATED
self.setup_data(status)
# Make sure we have no public files
for version in self.addon.versions.all():
version.files.update(status=amo.STATUS_AWAITING_REVIEW)
self.helper.handler.process_public()
# Re-fetch the add-on
addon = Addon.objects.get(pk=3615)
assert addon.status == amo.STATUS_APPROVED
assert addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_APPROVED)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == '%s Approved' % self.preamble
# AddonApprovalsCounter counter is now at 1 for this addon since there
# was a human review.
approval_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
assert approval_counter.counter == 1
self.assertCloseToNow(approval_counter.last_human_review)
assert storage.exists(self.file.file_path)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
self._check_score(amo.REVIEWED_ADDON_FULL)
@patch('olympia.reviewers.utils.sign_file')
def test_nomination_to_public(self, sign_mock):
sign_mock.reset()
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_public()
assert self.addon.status == amo.STATUS_APPROVED
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_APPROVED)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s Approved' % self.preamble)
assert 'has been approved' in mail.outbox[0].body
# AddonApprovalsCounter counter is now at 1 for this addon.
approval_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
assert approval_counter.counter == 1
sign_mock.assert_called_with(self.file)
assert storage.exists(self.file.file_path)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
self._check_score(amo.REVIEWED_ADDON_FULL)
@patch('olympia.reviewers.utils.sign_file')
def test_old_nomination_to_public_bonus_score(self, sign_mock):
sign_mock.reset()
self.setup_data(amo.STATUS_NOMINATED)
self.version.update(nomination=self.days_ago(9))
self.helper.handler.process_public()
assert self.addon.status == amo.STATUS_APPROVED
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_APPROVED)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s Approved' % self.preamble)
assert 'has been approved' in mail.outbox[0].body
# AddonApprovalsCounter counter is now at 1 for this addon.
approval_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
assert approval_counter.counter == 1
sign_mock.assert_called_with(self.file)
assert storage.exists(self.file.file_path)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
# Score has bonus points added for reviewing an old add-on.
# 2 days over the limit = 4 points
self._check_score(amo.REVIEWED_ADDON_FULL, bonus=4)
@patch('olympia.reviewers.utils.sign_file')
def test_nomination_to_public_no_request(self, sign_mock):
self.request = None
sign_mock.reset()
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_public()
assert self.addon.status == amo.STATUS_APPROVED
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_APPROVED)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s Approved' % self.preamble)
assert 'has been approved' in mail.outbox[0].body
# AddonApprovalsCounter counter is now at 0 for this addon since there
# was an automatic approval.
approval_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
assert approval_counter.counter == 0
# Since approval counter did not exist for this add-on before, the last
# human review field should be empty.
assert approval_counter.last_human_review is None
sign_mock.assert_called_with(self.file)
assert storage.exists(self.file.file_path)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
# No request, no user, therefore no score.
assert ReviewerScore.objects.count() == 0
@patch('olympia.reviewers.utils.sign_file')
def test_public_addon_with_version_awaiting_review_to_public(
self, sign_mock):
sign_mock.reset()
self.addon.current_version.update(created=self.days_ago(1))
self.version = version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED,
version='3.0.42',
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.preamble = 'Mozilla Add-ons: Delicious Bookmarks 3.0.42'
self.file = self.version.files.all()[0]
self.setup_data(amo.STATUS_APPROVED)
self.create_paths()
AddonApprovalsCounter.objects.create(
addon=self.addon, counter=1, last_human_review=self.days_ago(42))
# Safeguards.
assert isinstance(self.helper.handler, ReviewFiles)
assert self.addon.status == amo.STATUS_APPROVED
assert self.file.status == amo.STATUS_AWAITING_REVIEW
assert self.addon.current_version.files.all()[0].status == (
amo.STATUS_APPROVED)
self.helper.handler.process_public()
self.addon.reload()
assert self.addon.status == amo.STATUS_APPROVED
assert self.file.reload().status == amo.STATUS_APPROVED
assert self.addon.current_version.files.all()[0].status == (
amo.STATUS_APPROVED)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s Updated' % self.preamble)
assert 'has been updated' in mail.outbox[0].body
# AddonApprovalsCounter counter is now at 2 for this addon since there
# was another human review. The last human review date should have been
# updated.
approval_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
assert approval_counter.counter == 2
self.assertCloseToNow(approval_counter.last_human_review)
sign_mock.assert_called_with(self.file)
assert storage.exists(self.file.file_path)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
self._check_score(amo.REVIEWED_ADDON_UPDATE)
@patch('olympia.reviewers.utils.sign_file')
def test_public_addon_with_version_awaiting_review_to_sandbox(
self, sign_mock):
sign_mock.reset()
self.addon.current_version.update(created=self.days_ago(1))
self.version = version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED,
version='3.0.42',
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.preamble = 'Mozilla Add-ons: Delicious Bookmarks 3.0.42'
self.file = self.version.files.all()[0]
self.setup_data(amo.STATUS_APPROVED)
self.create_paths()
AddonApprovalsCounter.objects.create(addon=self.addon, counter=1)
# Safeguards.
assert isinstance(self.helper.handler, ReviewFiles)
assert self.addon.status == amo.STATUS_APPROVED
assert self.file.status == amo.STATUS_AWAITING_REVIEW
assert self.addon.current_version.files.all()[0].status == (
amo.STATUS_APPROVED)
self.helper.handler.process_sandbox()
self.addon.reload()
assert self.addon.status == amo.STATUS_APPROVED
assert self.file.reload().status == amo.STATUS_DISABLED
assert self.addon.current_version.files.all()[0].status == (
amo.STATUS_APPROVED)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
"%s didn't pass review" % self.preamble)
assert 'reviewed and did not meet the criteria' in mail.outbox[0].body
# AddonApprovalsCounter counter is still at 1 for this addon.
approval_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
assert approval_counter.counter == 1
assert not sign_mock.called
assert storage.exists(self.file.guarded_file_path)
assert not storage.exists(self.file.file_path)
assert self.check_log_count(amo.LOG.REJECT_VERSION.id) == 1
self._check_score(amo.REVIEWED_ADDON_UPDATE)
def test_public_addon_confirm_auto_approval(self):
self.grant_permission(self.request.user, 'Addons:PostReview')
self.setup_data(amo.STATUS_APPROVED, file_status=amo.STATUS_APPROVED)
summary = AutoApprovalSummary.objects.create(
version=self.version, verdict=amo.AUTO_APPROVED, weight=151)
assert summary.confirmed is None
self.create_paths()
# Safeguards.
assert self.addon.status == amo.STATUS_APPROVED
assert self.file.status == amo.STATUS_APPROVED
assert self.addon.current_version.files.all()[0].status == (
amo.STATUS_APPROVED)
self.helper.handler.confirm_auto_approved()
summary.reload()
assert summary.confirmed is True
approvals_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
self.assertCloseToNow(approvals_counter.last_human_review)
assert self.check_log_count(amo.LOG.APPROVE_CONTENT.id) == 0
assert self.check_log_count(amo.LOG.CONFIRM_AUTO_APPROVED.id) == 1
activity = (ActivityLog.objects.for_addons(self.addon)
.filter(action=amo.LOG.CONFIRM_AUTO_APPROVED.id)
.get())
assert activity.arguments == [self.addon, self.version]
assert activity.details['comments'] == ''
# Check points awarded.
self._check_score(amo.REVIEWED_EXTENSION_MEDIUM_RISK)
def test_public_with_unreviewed_version_addon_confirm_auto_approval(self):
self.grant_permission(self.request.user, 'Addons:PostReview')
self.setup_data(amo.STATUS_APPROVED, file_status=amo.STATUS_APPROVED)
self.current_version = self.version
summary = AutoApprovalSummary.objects.create(
version=self.version, verdict=amo.AUTO_APPROVED, weight=152)
self.version = version_factory(
addon=self.addon, version='3.0',
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.file = self.version.files.all()[0]
self.helper = self.get_helper() # To make it pick up the new version.
self.helper.set_data(self.get_data())
# Confirm approval action should be available even if the latest
# version is not public, what we care about is the current_version.
assert 'confirm_auto_approved' in self.helper.actions
self.helper.handler.confirm_auto_approved()
summary.reload()
assert summary.confirmed is True
approvals_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
self.assertCloseToNow(approvals_counter.last_human_review)
assert self.check_log_count(amo.LOG.APPROVE_CONTENT.id) == 0
assert self.check_log_count(amo.LOG.CONFIRM_AUTO_APPROVED.id) == 1
activity = (ActivityLog.objects.for_addons(self.addon)
.filter(action=amo.LOG.CONFIRM_AUTO_APPROVED.id)
.get())
assert activity.arguments == [self.addon, self.current_version]
assert activity.details['comments'] == ''
# Check points awarded.
self._check_score(amo.REVIEWED_EXTENSION_MEDIUM_RISK)
def test_public_with_disabled_version_addon_confirm_auto_approval(self):
self.grant_permission(self.request.user, 'Addons:PostReview')
self.setup_data(amo.STATUS_APPROVED, file_status=amo.STATUS_APPROVED)
self.current_version = self.version
summary = AutoApprovalSummary.objects.create(
version=self.version, verdict=amo.AUTO_APPROVED, weight=153)
self.version = version_factory(
addon=self.addon, version='3.0',
file_kw={'status': amo.STATUS_DISABLED})
self.file = self.version.files.all()[0]
self.helper = self.get_helper() # To make it pick up the new version.
self.helper.set_data(self.get_data())
# Confirm approval action should be available even if the latest
# version is not public, what we care about is the current_version.
assert 'confirm_auto_approved' in self.helper.actions
self.helper.handler.confirm_auto_approved()
summary.reload()
assert summary.confirmed is True
approvals_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
self.assertCloseToNow(approvals_counter.last_human_review)
assert self.check_log_count(amo.LOG.APPROVE_CONTENT.id) == 0
assert self.check_log_count(amo.LOG.CONFIRM_AUTO_APPROVED.id) == 1
activity = (ActivityLog.objects.for_addons(self.addon)
.filter(action=amo.LOG.CONFIRM_AUTO_APPROVED.id)
.get())
assert activity.arguments == [self.addon, self.current_version]
assert activity.details['comments'] == ''
# Check points awarded.
self._check_score(amo.REVIEWED_EXTENSION_MEDIUM_RISK)
def test_unlisted_version_addon_confirm_auto_approval(self):
self.grant_permission(self.request.user, 'Addons:ReviewUnlisted')
self.setup_data(amo.STATUS_APPROVED, file_status=amo.STATUS_APPROVED)
AutoApprovalSummary.objects.create(
version=self.version, verdict=amo.AUTO_APPROVED)
self.version = version_factory(
addon=self.addon, version='3.0',
channel=amo.RELEASE_CHANNEL_UNLISTED)
self.file = self.version.files.all()[0]
self.helper = self.get_helper() # To make it pick up the new version.
self.helper.set_data(self.get_data())
# Confirm approval action should be available since the version
# we are looking at is unlisted and reviewer has permission.
assert 'confirm_auto_approved' in self.helper.actions
self.helper.handler.confirm_auto_approved()
assert (
AddonApprovalsCounter.objects.filter(addon=self.addon).count() ==
0) # Not incremented since it was unlisted.
assert self.check_log_count(amo.LOG.CONFIRM_AUTO_APPROVED.id) == 1
activity = (ActivityLog.objects.for_addons(self.addon)
.filter(action=amo.LOG.CONFIRM_AUTO_APPROVED.id)
.get())
assert activity.arguments == [self.addon, self.version]
@patch('olympia.reviewers.utils.sign_file')
def test_null_to_public_unlisted(self, sign_mock):
sign_mock.reset()
self.setup_data(amo.STATUS_NULL,
channel=amo.RELEASE_CHANNEL_UNLISTED)
self.helper.handler.process_public()
assert self.addon.status == amo.STATUS_NULL
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_APPROVED)
# AddonApprovalsCounter was not touched since the version we made
# public is unlisted.
assert not AddonApprovalsCounter.objects.filter(
addon=self.addon).exists()
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s signed and ready to download' % self.preamble)
assert ('%s is now signed and ready for you to download' %
self.version.version in mail.outbox[0].body)
assert 'You received this email because' not in mail.outbox[0].body
sign_mock.assert_called_with(self.file)
assert storage.exists(self.file.file_path)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
@patch('olympia.reviewers.utils.sign_file')
def test_nomination_to_public_failed_signing(self, sign_mock):
sign_mock.side_effect = Exception
sign_mock.reset()
self.setup_data(amo.STATUS_NOMINATED)
with self.assertRaises(Exception):
self.helper.handler.process_public()
# AddonApprovalsCounter was not touched since we failed signing.
assert not AddonApprovalsCounter.objects.filter(
addon=self.addon).exists()
# Status unchanged.
assert self.addon.status == amo.STATUS_NOMINATED
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_AWAITING_REVIEW)
assert len(mail.outbox) == 0
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 0
@patch('olympia.reviewers.utils.sign_file')
def test_nomination_to_sandbox(self, sign_mock):
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_sandbox()
assert self.addon.status == amo.STATUS_NULL
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_DISABLED)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s didn\'t pass review' % self.preamble)
assert 'did not meet the criteria' in mail.outbox[0].body
# AddonApprovalsCounter was not touched since we didn't approve.
assert not AddonApprovalsCounter.objects.filter(
addon=self.addon).exists()
assert not sign_mock.called
assert storage.exists(self.file.guarded_file_path)
assert not storage.exists(self.file.file_path)
assert self.check_log_count(amo.LOG.REJECT_VERSION.id) == 1
def test_email_unicode_monster(self):
self.addon.name = u'TaobaoShopping淘宝网导航按钮'
self.addon.save()
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_sandbox()
assert u'TaobaoShopping淘宝网导航按钮' in mail.outbox[0].subject
def test_nomination_to_super_review(self):
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_super_review()
assert self.addon.needs_admin_code_review
assert self.check_log_count(amo.LOG.REQUEST_ADMIN_REVIEW_CODE.id) == 1
def test_auto_approved_admin_code_review(self):
self.setup_data(amo.STATUS_APPROVED, file_status=amo.STATUS_APPROVED)
AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
self.helper.handler.process_super_review()
assert self.addon.needs_admin_code_review
assert self.check_log_count(amo.LOG.REQUEST_ADMIN_REVIEW_CODE.id) == 1
def test_auto_approved_admin_content_review(self):
self.setup_data(amo.STATUS_APPROVED, file_status=amo.STATUS_APPROVED,
content_review_only=True)
AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
self.helper.handler.process_super_review()
assert self.addon.needs_admin_content_review
assert self.check_log_count(
amo.LOG.REQUEST_ADMIN_REVIEW_CONTENT.id) == 1
def test_auto_approved_admin_theme_review(self):
self.setup_data(amo.STATUS_APPROVED, file_status=amo.STATUS_APPROVED,
type=amo.ADDON_STATICTHEME)
AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
self.helper.handler.process_super_review()
assert self.addon.needs_admin_theme_review
assert self.check_log_count(amo.LOG.REQUEST_ADMIN_REVIEW_THEME.id) == 1
def test_nomination_to_super_review_and_escalate(self):
self.setup_data(amo.STATUS_NOMINATED)
self.file.update(status=amo.STATUS_AWAITING_REVIEW)
self.helper.handler.process_super_review()
assert self.addon.needs_admin_code_review
assert self.check_log_count(amo.LOG.REQUEST_ADMIN_REVIEW_CODE.id) == 1
def test_operating_system_present(self):
self.setup_data(amo.STATUS_APPROVED)
self.helper.handler.process_sandbox()
assert 'Tested on osx with Firefox' in mail.outbox[0].body
def test_operating_system_not_present(self):
self.setup_data(amo.STATUS_APPROVED)
data = self.get_data().copy()
data['operating_systems'] = ''
self.helper.set_data(data)
self.helper.handler.process_sandbox()
assert 'Tested with Firefox' in mail.outbox[0].body
def test_application_not_present(self):
self.setup_data(amo.STATUS_APPROVED)
data = self.get_data().copy()
data['applications'] = ''
self.helper.set_data(data)
self.helper.handler.process_sandbox()
assert 'Tested on osx' in mail.outbox[0].body
def test_both_not_present(self):
self.setup_data(amo.STATUS_APPROVED)
data = self.get_data().copy()
data['applications'] = ''
data['operating_systems'] = ''
self.helper.set_data(data)
self.helper.handler.process_sandbox()
assert 'Tested' not in mail.outbox[0].body
def test_pending_to_super_review(self):
for status in PENDING_STATUSES:
self.setup_data(status)
self.helper.handler.process_super_review()
assert self.addon.needs_admin_code_review
def test_nominated_review_time_set_version_process_public(self):
self.version.update(reviewed=None)
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_public()
assert self.version.reload().reviewed
def test_nominated_review_time_set_version_process_sandbox(self):
self.version.update(reviewed=None)
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_sandbox()
assert self.version.reload().reviewed
def test_nominated_review_time_set_file_process_public(self):
self.file.update(reviewed=None)
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_public()
assert File.objects.get(pk=self.file.pk).reviewed
def test_nominated_review_time_set_file_process_sandbox(self):
self.file.update(reviewed=None)
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_sandbox()
assert File.objects.get(pk=self.file.pk).reviewed
def test_review_unlisted_while_a_listed_version_is_awaiting_review(self):
self.make_addon_unlisted(self.addon)
self.version.reload()
version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.addon.update(status=amo.STATUS_NOMINATED)
assert self.get_helper()
def test_reject_multiple_versions(self):
old_version = self.version
self.version = version_factory(addon=self.addon, version='3.0')
AutoApprovalSummary.objects.create(
version=self.version, verdict=amo.AUTO_APPROVED, weight=101)
# An extra file should not change anything.
file_factory(version=self.version, platform=amo.PLATFORM_LINUX.id)
self.setup_data(amo.STATUS_APPROVED, file_status=amo.STATUS_APPROVED)
# Safeguards.
assert isinstance(self.helper.handler, ReviewFiles)
assert self.addon.status == amo.STATUS_APPROVED
assert self.file.status == amo.STATUS_APPROVED
assert self.addon.current_version.is_public()
data = self.get_data().copy()
data['versions'] = self.addon.versions.all()
self.helper.set_data(data)
self.helper.handler.reject_multiple_versions()
self.addon.reload()
self.file.reload()
assert self.addon.status == amo.STATUS_NULL
assert self.addon.current_version is None
assert list(self.addon.versions.all()) == [self.version, old_version]
assert self.file.status == amo.STATUS_DISABLED
assert len(mail.outbox) == 1
assert mail.outbox[0].to == [self.addon.authors.all()[0].email]
assert mail.outbox[0].subject == (
u'Mozilla Add-ons: Delicious Bookmarks has been disabled on '
u'addons.mozilla.org')
assert ('your add-on Delicious Bookmarks has been disabled'
in mail.outbox[0].body)
log_token = ActivityLogToken.objects.get()
assert log_token.uuid.hex in mail.outbox[0].reply_to[0]
assert self.check_log_count(amo.LOG.REJECT_VERSION.id) == 2
assert self.check_log_count(amo.LOG.REJECT_CONTENT.id) == 0
logs = (ActivityLog.objects.for_addons(self.addon)
.filter(action=amo.LOG.REJECT_VERSION.id))
assert logs[0].created == logs[1].created
# Check points awarded.
self._check_score(amo.REVIEWED_EXTENSION_MEDIUM_RISK)
def test_reject_multiple_versions_except_latest(self):
old_version = self.version
extra_version = version_factory(addon=self.addon, version='3.1')
# Add yet another version we don't want to reject.
self.version = version_factory(addon=self.addon, version='42.0')
AutoApprovalSummary.objects.create(
version=self.version, verdict=amo.AUTO_APPROVED, weight=91)
self.setup_data(amo.STATUS_APPROVED, file_status=amo.STATUS_APPROVED)
# Safeguards.
assert isinstance(self.helper.handler, ReviewFiles)
assert self.addon.status == amo.STATUS_APPROVED
assert self.file.status == amo.STATUS_APPROVED
assert self.addon.current_version.is_public()
data = self.get_data().copy()
data['versions'] = self.addon.versions.all().exclude(
pk=self.version.pk)
self.helper.set_data(data)
self.helper.handler.reject_multiple_versions()
self.addon.reload()
self.file.reload()
# latest_version is still public so the add-on is still public.
assert self.addon.status == amo.STATUS_APPROVED
assert self.addon.current_version == self.version
assert list(self.addon.versions.all().order_by('-pk')) == [
self.version, extra_version, old_version]
assert self.file.status == amo.STATUS_DISABLED
assert len(mail.outbox) == 1
assert mail.outbox[0].to == [self.addon.authors.all()[0].email]
assert mail.outbox[0].subject == (
u'Mozilla Add-ons: Versions disabled for Delicious Bookmarks')
assert ('Version(s) affected and disabled:\n3.1, 2.1.072'
in mail.outbox[0].body)
log_token = ActivityLogToken.objects.filter(
version=self.version).get()
assert log_token.uuid.hex in mail.outbox[0].reply_to[0]
assert self.check_log_count(amo.LOG.REJECT_VERSION.id) == 2
assert self.check_log_count(amo.LOG.REJECT_CONTENT.id) == 0
# Check points awarded.
self._check_score(amo.REVIEWED_EXTENSION_MEDIUM_RISK)
def test_reject_multiple_versions_content_review(self):
self.grant_permission(self.request.user, 'Addons:ContentReview')
old_version = self.version
self.version = version_factory(addon=self.addon, version='3.0')
self.setup_data(
amo.STATUS_APPROVED, file_status=amo.STATUS_APPROVED,
content_review_only=True)
# Safeguards.
assert isinstance(self.helper.handler, ReviewFiles)
assert self.addon.status == amo.STATUS_APPROVED
assert self.file.status == amo.STATUS_APPROVED
assert self.addon.current_version.is_public()
data = self.get_data().copy()
data['versions'] = self.addon.versions.all()
self.helper.set_data(data)
self.helper.handler.reject_multiple_versions()
self.addon.reload()
self.file.reload()
assert self.addon.status == amo.STATUS_NULL
assert self.addon.current_version is None
assert list(self.addon.versions.all()) == [self.version, old_version]
assert self.file.status == amo.STATUS_DISABLED
assert len(mail.outbox) == 1
assert mail.outbox[0].to == [self.addon.authors.all()[0].email]
assert mail.outbox[0].subject == (
u'Mozilla Add-ons: Delicious Bookmarks has been disabled on '
u'addons.mozilla.org')
assert ('your add-on Delicious Bookmarks has been disabled'
in mail.outbox[0].body)
log_token = ActivityLogToken.objects.get()
assert log_token.uuid.hex in mail.outbox[0].reply_to[0]
assert self.check_log_count(amo.LOG.REJECT_VERSION.id) == 0
assert self.check_log_count(amo.LOG.REJECT_CONTENT.id) == 2
def test_confirm_auto_approval_content_review(self):
self.grant_permission(self.request.user, 'Addons:ContentReview')
self.setup_data(
amo.STATUS_APPROVED, file_status=amo.STATUS_APPROVED,
content_review_only=True)
summary = AutoApprovalSummary.objects.create(
version=self.version, verdict=amo.AUTO_APPROVED)
self.create_paths()
# Safeguards.
assert self.addon.status == amo.STATUS_APPROVED
assert self.file.status == amo.STATUS_APPROVED
assert self.addon.current_version.files.all()[0].status == (
amo.STATUS_APPROVED)
self.helper.handler.confirm_auto_approved()
summary.reload()
assert summary.confirmed is None # unchanged.
approvals_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
assert approvals_counter.counter == 0
assert approvals_counter.last_human_review is None
self.assertCloseToNow(approvals_counter.last_content_review)
assert self.check_log_count(amo.LOG.CONFIRM_AUTO_APPROVED.id) == 0
assert self.check_log_count(amo.LOG.APPROVE_CONTENT.id) == 1
activity = (ActivityLog.objects.for_addons(self.addon)
.filter(action=amo.LOG.APPROVE_CONTENT.id)
.get())
assert activity.arguments == [self.addon, self.version]
assert activity.details['comments'] == ''
# Check points awarded.
self._check_score(amo.REVIEWED_CONTENT_REVIEW)
def test_dev_versions_url_in_context(self):
self.helper.set_data(self.get_data())
context_data = self.helper.handler.get_context_data()
assert context_data['dev_versions_url'] == absolutify(
self.addon.get_dev_url('versions'))
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
context_data = self.helper.handler.get_context_data()
assert context_data['dev_versions_url'] == absolutify(
reverse('devhub.addons.versions', args=[self.addon.id]))
def test_send_email_autoescape():
s = 'woo&&<>\'""'
# Make sure HTML is not auto-escaped.
send_mail(u'Random subject with %s', s,
recipient_list=['nobody@mozilla.org'],
from_email='nobody@mozilla.org',
use_deny_list=False)
assert len(mail.outbox) == 1
assert mail.outbox[0].body == s
|
kumar303/addons-server
|
src/olympia/reviewers/tests/test_utils.py
|
Python
|
bsd-3-clause
| 53,663
|
[
"VisIt"
] |
173b1dafe2da373783a68d0dae04844be7469ee786c3cbb49efe4e6bda653ff4
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
#
"""
Display filtered data
"""
from gramps.gen.simple import SimpleAccess, SimpleDoc
from gramps.gui.plug.quick import QuickTable
from gramps.gen.utils.file import media_path_full
from gramps.gui.plug.quick import run_quick_report_by_name_direct
from gramps.gen.lib import Person
from gramps.gen.datehandler import get_date
import posixpath
from collections import defaultdict
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
ngettext = glocale.translation.ngettext # else "nearby" comments are ignored
fname_map = {'all': _('Filtering_on|all'),
'Inverse Person': _('Filtering_on|Inverse Person'),
'Inverse Family': _('Filtering_on|Inverse Family'),
'Inverse Event': _('Filtering_on|Inverse Event'),
'Inverse Place': _('Filtering_on|Inverse Place'),
'Inverse Source': _('Filtering_on|Inverse Source'),
'Inverse Repository': _('Filtering_on|Inverse Repository'),
'Inverse MediaObject': _('Filtering_on|Inverse MediaObject'),
'Inverse Note': _('Filtering_on|Inverse Note'),
'all people': _('Filtering_on|all people'),
'all families': _('Filtering_on|all families'),
'all events': _('Filtering_on|all events'),
'all places': _('Filtering_on|all places'),
'all sources': _('Filtering_on|all sources'),
'all repositories': _('Filtering_on|all repositories'),
'all media': _('Filtering_on|all media'),
'all notes': _('Filtering_on|all notes'),
'males': _('Filtering_on|males'),
'females': _('Filtering_on|females'),
'people with unknown gender':
_('Filtering_on|people with unknown gender'),
'incomplete names':
_('Filtering_on|incomplete names'),
'people with missing birth dates':
_('Filtering_on|people with missing birth dates'),
'disconnected people': _('Filtering_on|disconnected people'),
'all families': _('Filtering_on|all families'),
'unique surnames': _('Filtering_on|unique surnames'),
'people with media': _('Filtering_on|people with media'),
'media references': _('Filtering_on|media references'),
'unique media': _('Filtering_on|unique media'),
'missing media': _('Filtering_on|missing media'),
'media by size': _('Filtering_on|media by size'),
'list of people': _('Filtering_on|list of people')}
def run(database, document, filter_name, *args, **kwargs):
"""
Loops through the families that the person is a child in, and display
the information about the other children.
"""
# setup the simple access functions
sdb = SimpleAccess(database)
sdoc = SimpleDoc(document)
stab = QuickTable(sdb)
if (filter_name == 'all'):
sdoc.title(_("Summary counts of current selection"))
sdoc.paragraph("")
sdoc.paragraph(_("Right-click row (or press ENTER) to see selected items."))
sdoc.paragraph("")
stab.columns(_("Object"), _("Count/Total"))
stab.row([_("People"), "Filter", "Person"],
"%d/%d" % (len(database.get_person_handles()),
len(database.basedb.get_person_handles())))
stab.row([_("Families"), "Filter", "Family"],
"%d/%d" % (len(database.get_family_handles()),
len(database.basedb.get_family_handles())))
stab.row([_("Events"), "Filter", "Event"],
"%d/%d" % (len(database.get_event_handles()),
len(database.basedb.get_event_handles())))
stab.row([_("Places"), "Filter", "Place"],
"%d/%d" % (len(database.get_place_handles()),
len(database.basedb.get_place_handles())))
stab.row([_("Sources"), "Filter", "Source"],
"%d/%d" % (len(database.get_source_handles()),
len(database.basedb.get_source_handles())))
stab.row([_("Repositories"), "Filter", "Repository"],
"%d/%d" % (len(database.get_repository_handles()),
len(database.basedb.get_repository_handles())))
stab.row([_("Media"), "Filter", "MediaObject"],
"%d/%d" % (len(database.get_media_object_handles()),
len(database.basedb.get_media_object_handles())))
stab.row([_("Notes"), "Filter", "Note"],
"%d/%d" % (len(database.get_note_handles()),
len(database.basedb.get_note_handles())))
sdoc.paragraph("")
stab.write(sdoc)
return
# display the title
if filter_name in fname_map:
sdoc.title(_("Filtering on %s") % fname_map[filter_name]) # listed above
else:
sdoc.title(_("Filtering on %s") % _(filter_name))
sdoc.paragraph("")
matches = 0
if (filter_name == 'Inverse Person'):
sdb.dbase = database.db
stab.columns(_("Person"), _("Gramps ID"), _("Birth Date"))
proxy_handles = set(database.iter_person_handles())
for person in database.db.iter_people():
if person.handle not in proxy_handles:
stab.row(person, person.gramps_id,
sdb.birth_or_fallback(person))
matches += 1
elif (filter_name == 'Inverse Family'):
sdb.dbase = database.db
stab.columns(_("Family"), _("Gramps ID"))
proxy_handles = set(database.iter_family_handles())
for family in database.db.iter_families():
if family.handle not in proxy_handles:
stab.row(family, family.gramps_id)
matches += 1
elif (filter_name == 'Inverse Event'):
sdb.dbase = database.db
stab.columns(_("Event"), _("Gramps ID"))
proxy_handles = set(database.iter_event_handles())
for event in database.db.iter_events():
if event.handle not in proxy_handles:
stab.row(event, event.gramps_id)
matches += 1
elif (filter_name == 'Inverse Place'):
sdb.dbase = database.db
stab.columns(_("Place"), _("Gramps ID"))
proxy_handles = set(database.iter_place_handles())
for place in database.db.iter_places():
if place.handle not in proxy_handles:
stab.row(place, place.gramps_id)
matches += 1
elif (filter_name == 'Inverse Source'):
sdb.dbase = database.db
stab.columns(_("Source"), _("Gramps ID"))
proxy_handles = set(database.iter_source_handles())
for source in database.db.iter_sources():
if source.handle not in proxy_handles:
stab.row(source, source.gramps_id)
matches += 1
elif (filter_name == 'Inverse Repository'):
sdb.dbase = database.db
stab.columns(_("Repository"), _("Gramps ID"))
proxy_handles = set(database.iter_repository_handles())
for repository in database.db.iter_repositories():
if repository.handle not in proxy_handles:
stab.row(repository, repository.gramps_id)
matches += 1
elif (filter_name == 'Inverse MediaObject'):
sdb.dbase = database.db
stab.columns(_("Media"), _("Gramps ID"))
proxy_handles = set(database.iter_media_object_handles())
for media in database.db.iter_media_objects():
if media.handle not in proxy_handles:
stab.row(media, media.gramps_id)
matches += 1
elif (filter_name == 'Inverse Note'):
sdb.dbase = database.db
stab.columns(_("Note"), _("Gramps ID"))
proxy_handles = set(database.iter_note_handles())
for note in database.db.iter_notes():
if note.handle not in proxy_handles:
stab.row(note, note.gramps_id)
matches += 1
elif (filter_name in ['all people', 'Person']):
stab.columns(_("Person"), _("Gramps ID"), _("Birth Date"))
for person in database.iter_people():
stab.row(person, person.gramps_id, sdb.birth_or_fallback(person))
matches += 1
elif (filter_name in ['all families', 'Family']):
stab.columns(_("Family"), _("Gramps ID"))
for family in database.iter_families():
stab.row(family, family.gramps_id)
matches += 1
elif (filter_name in ['all events', 'Event']):
stab.columns(_("Event"), _("Gramps ID"))
for obj in database.iter_events():
stab.row(obj, obj.gramps_id)
matches += 1
elif (filter_name in ['all places', 'Place']):
stab.columns(_("Place"), _("Gramps ID"))
for obj in database.iter_places():
stab.row(obj, obj.gramps_id)
matches += 1
elif (filter_name in ['all sources', 'Source']):
stab.columns(_("Source"), _("Gramps ID"))
for obj in database.iter_sources():
stab.row(obj, obj.gramps_id)
matches += 1
elif (filter_name in ['all repositories', 'Repository']):
stab.columns(_("Repository"), _("Gramps ID"))
for obj in database.iter_repositories():
stab.row(obj, obj.gramps_id)
matches += 1
elif (filter_name in ['all media', 'MediaObject']):
stab.columns(_("Media"), _("Gramps ID"))
for obj in database.iter_media_objects():
stab.row(obj, obj.gramps_id)
matches += 1
elif (filter_name in ['all notes', 'Note']):
stab.columns(_("Note"), _("Gramps ID"))
for obj in database.iter_notes():
stab.row(obj, obj.gramps_id)
matches += 1
elif (filter_name == 'males'):
stab.columns(_("Person"), _("Birth Date"), _("Name type"))
for person in database.iter_people():
if person.gender == Person.MALE:
stab.row(person, sdb.birth_or_fallback(person),
str(person.get_primary_name().get_type()))
matches += 1
elif (filter_name == 'females'):
stab.columns(_("Person"), _("Birth Date"), _("Name type"))
for person in database.iter_people():
if person.gender == Person.FEMALE:
stab.row(person, sdb.birth_or_fallback(person),
str(person.get_primary_name().get_type()))
matches += 1
elif (filter_name == 'people with unknown gender'):
stab.columns(_("Person"), _("Birth Date"), _("Name type"))
for person in database.iter_people():
if person.gender not in [Person.FEMALE, Person.MALE]:
stab.row(person, sdb.birth_or_fallback(person),
str(person.get_primary_name().get_type()))
matches += 1
elif (filter_name == 'incomplete names'):
stab.columns(_("Name"), _("Birth Date"), _("Name type"))
for person in database.iter_people():
for name in [person.get_primary_name()] + person.get_alternate_names():
if name.get_first_name().strip() == "":
stab.row([name.get_name(), "Person", person.handle], sdb.birth_or_fallback(person),
str(name.get_type()))
matches += 1
else:
if name.get_surname_list():
for surname in name.get_surname_list():
if surname.get_surname().strip() == "":
stab.row([name.get_first_name(), "Person", person.handle], sdb.birth_or_fallback(person),
str(name.get_type()))
matches += 1
else:
stab.row([name.get_first_name(), "Person", person.handle], sdb.birth_or_fallback(person),
str(name.get_type()))
matches += 1
elif (filter_name == 'people with missing birth dates'):
stab.columns(_("Person"), _("Type"))
for person in database.iter_people():
birth_ref = person.get_birth_ref()
if birth_ref:
birth = database.get_event_from_handle(birth_ref.ref)
if not get_date(birth):
stab.row(person, _("birth event but no date"))
matches += 1
else:
stab.row(person, _("missing birth event"))
matches += 1
elif (filter_name == 'disconnected people'):
stab.columns(_("Person"), _("Birth Date"), _("Name type"))
for person in database.iter_people():
if ((not person.get_main_parents_family_handle()) and
(not len(person.get_family_handle_list()))):
stab.row(person, sdb.birth_or_fallback(person),
str(person.get_primary_name().get_type()))
matches += 1
elif (filter_name == 'unique surnames'):
namelist = defaultdict(int)
for person in database.iter_people():
names = [person.get_primary_name()] + person.get_alternate_names()
surnames = list(set([name.get_group_name() for name in names]))
for surname in surnames:
namelist[surname] += 1
stab.columns(_("Surname"), _("Count"))
for name in sorted(namelist):
stab.row(name, namelist[name])
matches += 1
stab.set_callback("leftdouble",
lambda name: run_quick_report_by_name_direct("samesurnames",
database,
document,
name))
elif (filter_name == 'people with media'):
stab.columns(_("Person"), _("Media count"))
for person in database.iter_people():
length = len(person.get_media_list())
if length > 0:
stab.row(person, str(length))
matches += 1
elif (filter_name == 'media references'):
stab.columns(_("Person"), _("Reference"))
for person in database.iter_people():
medialist = person.get_media_list()
for item in medialist:
stab.row(person, _("media"))
matches += 1
elif (filter_name == 'unique media'):
stab.columns(_("Unique Media"))
for photo in database.iter_media_objects():
fullname = media_path_full(database, photo.get_path())
stab.row(fullname)
matches += 1
elif (filter_name == 'missing media'):
stab.columns(_("Missing Media"))
for photo in database.iter_media_objects():
fullname = media_path_full(database, photo.get_path())
try:
posixpath.getsize(fullname)
except:
stab.row(fullname)
matches += 1
elif (filter_name == 'media by size'):
stab.columns(_("Media"), _("Size in bytes"))
for photo in database.iter_media_objects():
fullname = media_path_full(database, photo.get_path())
try:
bytes = posixpath.getsize(fullname)
stab.row(fullname, str(bytes))
matches += 1
except:
pass
elif (filter_name == 'list of people'):
stab.columns(_("Person"), _("Birth Date"), _("Name type"))
handles = kwargs["handles"]
for person_handle in handles:
person = database.get_person_from_handle(person_handle)
stab.row(person, sdb.birth_or_fallback(person),
str(person.get_primary_name().get_type()))
matches += 1
else:
raise AttributeError("invalid filter name: '%s'" % filter_name)
# translators: leave all/any {...} untranslated
sdoc.paragraph(ngettext("Filter matched {number_of} record.",
"Filter matched {number_of} records.", matches
).format(number_of=matches) )
sdoc.paragraph("")
document.has_data = matches > 0
if matches > 0:
stab.write(sdoc)
|
pmghalvorsen/gramps_branch
|
gramps/plugins/quickview/filterbyname.py
|
Python
|
gpl-2.0
| 17,381
|
[
"Brian"
] |
ea2e621a5d48767c86c54d7823cd03258858854342e322a7a54a0f3fab65e3bb
|
#
# This file is part of PyOLab. https://github.com/matsselen/pyolab
# (C) 2017 Mats Selen <mats.selen@gmail.com>
#
# SPDX-License-Identifier: BSD-3-Clause
# (https://opensource.org/licenses/BSD-3-Clause)
#
# system stuff
import os
import sys
import time
from Tkinter import * #This interface allow us to draw windows
# local common code
sys.path.append('../PyOLabCode/')
from analClass import AnalysisClass
from pyolabGlobals import G
from userGlobals import U
from commMethods import *
from setupMethods import *
# local user code
from userMethods import *
"""
This is example code that creates the GUI, launches data
fetching and data analysis threads, and responds to user input.
"""
# start out by defining some functions that are needed by main()
#
# this little method constructs the [sensor, key+value] bytes needed by the setOutputConfig command
# (see the engineering docs for more info about this)
def skv(s,k,v):
kv = ((k&7)<<5) + (v&31)
return [s,kv]
# this is called whenever the slider controlling the DAC voltage is moved.
# it calls the setOutputConfig method that tells the DAQ to set the output voltage
def dacAction(val):
iv = int(val)
idvList = skv(25,1,iv)+skv(25,0,1)
setOutputConfig(G.serialPort,idvList,1)
# this is called when the "Run/Pause" button is clicked
def b2Action():
if U.b2['text'] == ' Run ':
U.b2['text'] = ' Pause '
if G.configIsSet:
startData(G.serialPort)
else:
print "You need to set a configuration before acquiring data"
else:
U.b2['text'] = ' Run '
stopData(G.serialPort)
# This is the main code rigth here - pretty exciting
def main():
# ======== START OF GUI SETUP ==============================
root = Tk()
root.title('IOLab')
root.geometry('180x220')
frame1 = Frame(root)
frame1.pack()
# sets up a slider that controls the DAC output voltage
U.dac = Scale(frame1, from_=0, to=31, resolution=1 , label='DAC setting', orient=HORIZONTAL, command=dacAction).pack()
# sets up a button to start & stop the data acquisition
U.b2 = Button(frame1, text=' Run ', command=b2Action)
U.b2.pack(side=TOP, fill=NONE)
# leave a space between the button and the voltage displays
Label(frame1, text=' ').pack()
# set up and show the voltage displays
U.txtA7 = StringVar(frame1)
U.txtA8 = StringVar(frame1)
U.txtA9 = StringVar(frame1)
labelA7=Label(frame1, textvariable=U.txtA7, font="TkHeadingFont 20").pack(side=TOP)
labelA8=Label(frame1, textvariable=U.txtA8, font="TkHeadingFont 20").pack(side=TOP)
labelA9=Label(frame1, textvariable=U.txtA9, font="TkHeadingFont 20").pack(side=TOP)
# ======== END OF GUI SETUP =================
# set up IOLab user callback routines
analClass = AnalysisClass(analUserStart, analUserEnd, analUserLoop)
# start up the IOLab data acquisition stuff
if not startItUp():
print "Problems getting things started...bye"
os._exit(1)
#-------------------------------------------
# this is the main GUI event loop
root.mainloop()
#-------------------------------------------
# when we get to this point it means we have quite the GUI
print "Quitting..."
# shut down the IOLab data acquisition
shutItDown()
#=====================================================
# run the above main() code
if __name__ == "__main__":
main()
|
matsselen/pyolab
|
AnalogExample/userExample.py
|
Python
|
bsd-3-clause
| 3,649
|
[
"exciting"
] |
001712a5ebd29c0f15d49b78eb9bfb91c0f16688dbb5b6b8d59286fa6b95408c
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements Kalman filtering for linear state space models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import distributions
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
# TODO(allenl): support for always-factored covariance matrices
class KalmanFilter(object):
"""Inference on linear state models.
The model for observations in a given state is:
observation(t) = observation_model * state(t)
+ Gaussian(0, observation_noise_covariance)
State updates take the following form:
state(t) = state_transition * state(t-1)
+ state_noise_transform * Gaussian(0, state_transition_noise_covariance)
This is a real-valued analog to hidden Markov models, with linear transitions
and a Gaussian noise model. Given initial conditions, noise, and state
transition, Kalman filtering recursively estimates states and observations,
along with their associated uncertainty. When fed observations, future state
and uncertainty estimates are conditioned on those observations (in a Bayesian
sense).
Typically some "given"s mentioned above (noises) will be unknown, and so
optimizing the Kalman filter's probabilistic predictions with respect to these
parameters is a good approach. The state transition and observation models are
usually known a priori as a modeling decision.
"""
def __init__(self, dtype=dtypes.float32,
simplified_posterior_covariance_computation=False):
"""Initialize the Kalman filter.
Args:
dtype: The data type to use for floating point tensors.
simplified_posterior_covariance_computation: If True, uses an algebraic
simplification of the Kalman filtering posterior covariance update,
which is slightly faster at the cost of numerical stability. The
simplified update is often stable when using double precision on small
models or with fixed transition matrices.
"""
self._simplified_posterior_covariance_computation = (
simplified_posterior_covariance_computation)
self.dtype = dtype
def do_filter(
self, estimated_state, estimated_state_covariance,
predicted_observation, predicted_observation_covariance,
observation, observation_model, observation_noise):
"""Convenience function for scoring predictions.
Scores a prediction against an observation, and computes the updated
posterior over states.
Shapes given below for arguments are for single-model Kalman filtering
(e.g. KalmanFilter). For ensembles, prior_state and prior_state_var are
same-length tuples of values corresponding to each model.
Args:
estimated_state: A prior mean over states [batch size x state dimension]
estimated_state_covariance: Covariance of state prior [batch size x D x
D], with D depending on the Kalman filter implementation (typically
the state dimension).
predicted_observation: A prediction for the observed value, such as that
returned by observed_from_state. A [batch size x num features] Tensor.
predicted_observation_covariance: A covariance matrix corresponding to
`predicted_observation`, a [batch size x num features x num features]
Tensor.
observation: The observed value corresponding to the predictions
given [batch size x observation dimension]
observation_model: The [batch size x observation dimension x model state
dimension] Tensor indicating how a particular state is mapped to
(pre-noise) observations for each part of the batch.
observation_noise: A [batch size x observation dimension x observation
dimension] Tensor or [observation dimension x observation dimension]
Tensor with covariance matrices to use for each part of the batch (a
two-dimensional input will be broadcast).
Returns:
posterior_state, posterior_state_var: Posterior mean and
covariance, updated versions of prior_state and
prior_state_var.
log_prediction_prob: Log probability of the observations under
the priors, suitable for optimization (should be maximized).
"""
symmetrized_observation_covariance = 0.5 * (
predicted_observation_covariance + array_ops.matrix_transpose(
predicted_observation_covariance))
instability_message = (
"This may occur due to numerically unstable filtering when there is "
"a large difference in posterior variances, or when inferences are "
"near-deterministic. Considering tuning the "
"'filtering_maximum_posterior_variance_ratio' or "
"'filtering_minimum_posterior_variance' parameters in your "
"StateSpaceModelConfiguration, or tuning the transition matrix.")
symmetrized_observation_covariance = numerics.verify_tensor_all_finite(
symmetrized_observation_covariance,
"Predicted observation covariance was not finite. {}".format(
instability_message))
diag = array_ops.matrix_diag_part(symmetrized_observation_covariance)
min_diag = math_ops.reduce_min(diag)
non_negative_assert = control_flow_ops.Assert(
min_diag >= 0.,
[("The predicted observation covariance "
"has a negative diagonal entry. {}").format(instability_message),
min_diag])
with ops.control_dependencies([non_negative_assert]):
observation_covariance_cholesky = linalg_ops.cholesky(
symmetrized_observation_covariance)
log_prediction_prob = distributions.MultivariateNormalTriL(
predicted_observation, observation_covariance_cholesky).log_prob(
observation)
(posterior_state,
posterior_state_var) = self.posterior_from_prior_state(
prior_state=estimated_state,
prior_state_var=estimated_state_covariance,
observation=observation,
observation_model=observation_model,
predicted_observations=(predicted_observation,
predicted_observation_covariance),
observation_noise=observation_noise)
return (posterior_state, posterior_state_var, log_prediction_prob)
def predict_state_mean(self, prior_state, transition_matrices):
"""Compute state transitions.
Args:
prior_state: Current estimated state mean [batch_size x state_dimension]
transition_matrices: A [batch size, state dimension, state dimension]
batch of matrices (dtype matching the `dtype` argument to the
constructor) with the transition matrix raised to the power of the
number of steps to be taken (not element-wise; use
math_utils.matrix_to_powers if there is no efficient special case) if
more than one step is desired.
Returns:
State mean advanced based on `transition_matrices` (dimensions matching
first argument).
"""
advanced_state = array_ops.squeeze(
math_ops.matmul(
transition_matrices,
prior_state[..., None]),
axis=[-1])
return advanced_state
def predict_state_var(
self, prior_state_var, transition_matrices, transition_noise_sums):
r"""Compute variance for state transitions.
Computes a noise estimate corresponding to the value returned by
predict_state_mean.
Args:
prior_state_var: Covariance matrix specifying uncertainty of current state
estimate [batch size x state dimension x state dimension]
transition_matrices: A [batch size, state dimension, state dimension]
batch of matrices (dtype matching the `dtype` argument to the
constructor) with the transition matrix raised to the power of the
number of steps to be taken (not element-wise; use
math_utils.matrix_to_powers if there is no efficient special case).
transition_noise_sums: A [batch size, state dimension, state dimension]
Tensor (dtype matching the `dtype` argument to the constructor) with:
\sum_{i=0}^{num_steps - 1} (
state_transition_to_powers_fn(i)
* state_transition_noise_covariance
* state_transition_to_powers_fn(i)^T
)
for the number of steps to be taken in each part of the batch (this
should match `transition_matrices`). Use math_utils.power_sums_tensor
with `tf.gather` if there is no efficient special case.
Returns:
State variance advanced based on `transition_matrices` and
`transition_noise_sums` (dimensions matching first argument).
"""
prior_variance_transitioned = math_ops.matmul(
math_ops.matmul(transition_matrices, prior_state_var),
transition_matrices,
adjoint_b=True)
return prior_variance_transitioned + transition_noise_sums
def posterior_from_prior_state(self, prior_state, prior_state_var,
observation, observation_model,
predicted_observations,
observation_noise):
"""Compute a posterior over states given an observation.
Args:
prior_state: Prior state mean [batch size x state dimension]
prior_state_var: Prior state covariance [batch size x state dimension x
state dimension]
observation: The observed value corresponding to the predictions given
[batch size x observation dimension]
observation_model: The [batch size x observation dimension x model state
dimension] Tensor indicating how a particular state is mapped to
(pre-noise) observations for each part of the batch.
predicted_observations: An (observation mean, observation variance) tuple
computed based on the current state, usually the output of
observed_from_state.
observation_noise: A [batch size x observation dimension x observation
dimension] or [observation dimension x observation dimension] Tensor
with covariance matrices to use for each part of the batch (a
two-dimensional input will be broadcast).
Returns:
Posterior mean and covariance (dimensions matching the first two
arguments).
"""
observed_mean, observed_var = predicted_observations
residual = observation - observed_mean
# TODO(allenl): Can more of this be done using matrix_solve_ls?
kalman_solve_rhs = math_ops.matmul(
observation_model, prior_state_var, adjoint_b=True)
# This matrix_solve adjoint doesn't make a difference symbolically (since
# observed_var is a covariance matrix, and should be symmetric), but
# filtering on multivariate series is unstable without it. See
# test_multivariate_symmetric_covariance_float64 in kalman_filter_test.py
# for an example of the instability (fails with adjoint=False).
kalman_gain_transposed = linalg_ops.matrix_solve(
matrix=observed_var, rhs=kalman_solve_rhs, adjoint=True)
posterior_state = prior_state + array_ops.squeeze(
math_ops.matmul(
kalman_gain_transposed,
array_ops.expand_dims(residual, -1),
adjoint_a=True),
axis=[-1])
gain_obs = math_ops.matmul(
kalman_gain_transposed, observation_model, adjoint_a=True)
identity_extradim = linalg_ops.eye(
array_ops.shape(gain_obs)[1], dtype=gain_obs.dtype)[None]
identity_minus_factor = identity_extradim - gain_obs
if self._simplified_posterior_covariance_computation:
# posterior covariance =
# (I - kalman_gain * observation_model) * prior_state_var
posterior_state_var = math_ops.matmul(identity_minus_factor,
prior_state_var)
else:
observation_noise = ops.convert_to_tensor(observation_noise)
# A Joseph form update, which provides better numeric stability than the
# simplified optimal Kalman gain update, at the cost of a few extra
# operations. Joseph form updates are valid for any gain (not just the
# optimal Kalman gain), and so are more forgiving of numerical errors in
# computing the optimal Kalman gain.
#
# posterior covariance =
# (I - kalman_gain * observation_model) * prior_state_var
# * (I - kalman_gain * observation_model)^T
# + kalman_gain * observation_noise * kalman_gain^T
left_multiplied_state_var = math_ops.matmul(identity_minus_factor,
prior_state_var)
multiplied_state_var = math_ops.matmul(
identity_minus_factor, left_multiplied_state_var, adjoint_b=True)
def _batch_observation_noise_update():
return (multiplied_state_var + math_ops.matmul(
math_ops.matmul(
kalman_gain_transposed, observation_noise, adjoint_a=True),
kalman_gain_transposed))
def _matrix_observation_noise_update():
return (multiplied_state_var + math_ops.matmul(
math_utils.batch_times_matrix(
kalman_gain_transposed, observation_noise, adj_x=True),
kalman_gain_transposed))
if observation_noise.get_shape().ndims is None:
posterior_state_var = control_flow_ops.cond(
math_ops.equal(array_ops.rank(observation_noise), 2),
_matrix_observation_noise_update, _batch_observation_noise_update)
else:
# If static shape information exists, it gets checked in each cond()
# branch, so we need a special case to avoid graph-build-time
# exceptions.
if observation_noise.get_shape().ndims == 2:
posterior_state_var = _matrix_observation_noise_update()
else:
posterior_state_var = _batch_observation_noise_update()
return posterior_state, posterior_state_var
def observed_from_state(self, state_mean, state_var, observation_model,
observation_noise):
"""Compute an observation distribution given a state distribution.
Args:
state_mean: State mean vector [batch size x state dimension]
state_var: State covariance [batch size x state dimension x state
dimension]
observation_model: The [batch size x observation dimension x model state
dimension] Tensor indicating how a particular state is mapped to
(pre-noise) observations for each part of the batch.
observation_noise: A [batch size x observation dimension x observation
dimension] Tensor with covariance matrices to use for each part of the
batch. To remove observation noise, pass a Tensor of zeros (or simply
0, which will broadcast).
Returns:
observed_mean: Observation mean vector [batch size x observation
dimension]
observed_var: Observation covariance [batch size x observation dimension x
observation dimension]
"""
observed_mean = array_ops.squeeze(
math_ops.matmul(
array_ops.expand_dims(state_mean, 1),
observation_model,
adjoint_b=True),
axis=[1])
observed_var = math_ops.matmul(
math_ops.matmul(observation_model, state_var),
observation_model,
adjoint_b=True)
observed_var += observation_noise
return observed_mean, observed_var
|
nburn42/tensorflow
|
tensorflow/contrib/timeseries/python/timeseries/state_space_models/kalman_filter.py
|
Python
|
apache-2.0
| 16,368
|
[
"Gaussian"
] |
218dca56297392b41c8929a781fff431217b44aa8ab58fa2537361204567474d
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tool for deploying apps to an app server.
Currently, the application only uploads new appversions. To do this, it first
walks the directory tree rooted at the path the user specifies, adding all the
files it finds to a list. It then uploads the application configuration
(app.yaml) to the server using HTTP, followed by uploading each of the files.
It then commits the transaction with another request.
The bulk of this work is handled by the AppVersionUpload class, which exposes
methods to add to the list of files, fetch a list of modified files, upload
files, and commit or rollback the transaction.
"""
import calendar
import datetime
import getpass
import logging
import mimetypes
import optparse
import os
import re
import sha
import sys
import tempfile
import time
import urllib2
import google
import yaml
from google.appengine.cron import groctimespecification
from google.appengine.api import appinfo
from google.appengine.api import croninfo
from google.appengine.api import validation
from google.appengine.api import yaml_errors
from google.appengine.api import yaml_object
from google.appengine.datastore import datastore_index
from google.appengine.tools import appengine_rpc
from google.appengine.tools import bulkloader
MAX_FILES_TO_CLONE = 100
LIST_DELIMITER = "\n"
TUPLE_DELIMITER = "|"
VERSION_FILE = "../VERSION"
UPDATE_CHECK_TIMEOUT = 3
NAG_FILE = ".appcfg_nag"
MAX_LOG_LEVEL = 4
verbosity = 1
appinfo.AppInfoExternal.ATTRIBUTES[appinfo.RUNTIME] = "python"
_api_versions = os.environ.get('GOOGLE_TEST_API_VERSIONS', '1')
_options = validation.Options(*_api_versions.split(','))
appinfo.AppInfoExternal.ATTRIBUTES[appinfo.API_VERSION] = _options
del _api_versions, _options
def StatusUpdate(msg):
"""Print a status message to stderr.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print >>sys.stderr, msg
def GetMimeTypeIfStaticFile(config, filename):
"""Looks up the mime type for 'filename'.
Uses the handlers in 'config' to determine if the file should
be treated as a static file.
Args:
config: The app.yaml object to check the filename against.
filename: The name of the file.
Returns:
The mime type string. For example, 'text/plain' or 'image/gif'.
None if this is not a static file.
"""
for handler in config.handlers:
handler_type = handler.GetHandlerType()
if handler_type in ("static_dir", "static_files"):
if handler_type == "static_dir":
regex = os.path.join(re.escape(handler.GetHandler()), ".*")
else:
regex = handler.upload
if re.match(regex, filename):
if handler.mime_type is not None:
return handler.mime_type
else:
guess = mimetypes.guess_type(filename)[0]
if guess is None:
default = "application/octet-stream"
print >>sys.stderr, ("Could not guess mimetype for %s. Using %s."
% (filename, default))
return default
return guess
return None
def BuildClonePostBody(file_tuples):
"""Build the post body for the /api/clone{files,blobs} urls.
Args:
file_tuples: A list of tuples. Each tuple should contain the entries
appropriate for the endpoint in question.
Returns:
A string containing the properly delimited tuples.
"""
file_list = []
for tup in file_tuples:
path = tup[0]
tup = tup[1:]
file_list.append(TUPLE_DELIMITER.join([path] + list(tup)))
return LIST_DELIMITER.join(file_list)
class NagFile(validation.Validated):
"""A validated YAML class to represent the user's nag preferences.
Attributes:
timestamp: The timestamp of the last nag.
opt_in: True if the user wants to check for updates on dev_appserver
start. False if not. May be None if we have not asked the user yet.
"""
ATTRIBUTES = {
"timestamp": validation.TYPE_FLOAT,
"opt_in": validation.Optional(validation.TYPE_BOOL),
}
@staticmethod
def Load(nag_file):
"""Load a single NagFile object where one and only one is expected.
Args:
nag_file: A file-like object or string containing the yaml data to parse.
Returns:
A NagFile instance.
"""
return yaml_object.BuildSingleObject(NagFile, nag_file)
def GetVersionObject(isfile=os.path.isfile, open_fn=open):
"""Gets the version of the SDK by parsing the VERSION file.
Args:
isfile: used for testing.
open_fn: Used for testing.
Returns:
A Yaml object or None if the VERSION file does not exist.
"""
version_filename = os.path.join(os.path.dirname(google.__file__),
VERSION_FILE)
if not isfile(version_filename):
logging.error("Could not find version file at %s", version_filename)
return None
version_fh = open_fn(version_filename, "r")
try:
version = yaml.safe_load(version_fh)
finally:
version_fh.close()
return version
def RetryWithBackoff(initial_delay, backoff_factor, max_tries, callable):
"""Calls a function multiple times, backing off more and more each time.
Args:
initial_delay: Initial delay after first try, in seconds.
backoff_factor: Delay will be multiplied by this factor after each try.
max_tries: Maximum number of tries.
callable: The method to call, will pass no arguments.
Returns:
True if the function succeded in one of its tries.
Raises:
Whatever the function raises--an exception will immediately stop retries.
"""
delay = initial_delay
while not callable() and max_tries > 0:
StatusUpdate("Will check again in %s seconds." % delay)
time.sleep(delay)
delay *= backoff_factor
max_tries -= 1
return max_tries > 0
class UpdateCheck(object):
"""Determines if the local SDK is the latest version.
Nags the user when there are updates to the SDK. As the SDK becomes
more out of date, the language in the nagging gets stronger. We
store a little yaml file in the user's home directory so that we nag
the user only once a week.
The yaml file has the following field:
'timestamp': Last time we nagged the user in seconds since the epoch.
Attributes:
server: An AbstractRpcServer instance used to check for the latest SDK.
config: The app's AppInfoExternal. Needed to determine which api_version
the app is using.
"""
def __init__(self,
server,
config,
isdir=os.path.isdir,
isfile=os.path.isfile,
open_fn=open):
"""Create a new UpdateCheck.
Args:
server: The AbstractRpcServer to use.
config: The yaml object that specifies the configuration of this
application.
isdir: Replacement for os.path.isdir (for testing).
isfile: Replacement for os.path.isfile (for testing).
open_fn: Replacement for the open builtin (for testing).
"""
self.server = server
self.config = config
self.isdir = isdir
self.isfile = isfile
self.open = open_fn
@staticmethod
def MakeNagFilename():
"""Returns the filename for the nag file for this user."""
user_homedir = os.path.expanduser("~/")
if not os.path.isdir(user_homedir):
drive, unused_tail = os.path.splitdrive(os.__file__)
if drive:
os.environ["HOMEDRIVE"] = drive
return os.path.expanduser("~/" + NAG_FILE)
def _ParseVersionFile(self):
"""Parse the local VERSION file.
Returns:
A Yaml object or None if the file does not exist.
"""
return GetVersionObject(isfile=self.isfile, open_fn=self.open)
def CheckSupportedVersion(self):
"""Determines if the app's api_version is supported by the SDK.
Uses the api_version field from the AppInfoExternal to determine if
the SDK supports that api_version.
Raises:
SystemExit if the api_version is not supported.
"""
version = self._ParseVersionFile()
if version is None:
logging.error("Could not determine if the SDK supports the api_version "
"requested in app.yaml.")
return
if self.config.api_version not in version["api_versions"]:
logging.critical("The api_version specified in app.yaml (%s) is not "
"supported by this release of the SDK. The supported "
"api_versions are %s.",
self.config.api_version, version["api_versions"])
sys.exit(1)
def CheckForUpdates(self):
"""Queries the server for updates and nags the user if appropriate.
Queries the server for the latest SDK version at the same time reporting
the local SDK version. The server will respond with a yaml document
containing the fields:
"release": The name of the release (e.g. 1.2).
"timestamp": The time the release was created (YYYY-MM-DD HH:MM AM/PM TZ).
"api_versions": A list of api_version strings (e.g. ['1', 'beta']).
We will nag the user with increasing severity if:
- There is a new release.
- There is a new release with a new api_version.
- There is a new release that does not support the api_version named in
self.config.
"""
version = self._ParseVersionFile()
if version is None:
logging.info("Skipping update check")
return
logging.info("Checking for updates to the SDK.")
try:
response = self.server.Send("/api/updatecheck",
timeout=UPDATE_CHECK_TIMEOUT,
release=version["release"],
timestamp=version["timestamp"],
api_versions=version["api_versions"])
except urllib2.URLError, e:
logging.info("Update check failed: %s", e)
return
latest = yaml.safe_load(response)
if latest["release"] == version["release"]:
logging.info("The SDK is up to date.")
return
api_versions = latest["api_versions"]
if self.config.api_version not in api_versions:
self._Nag(
"The api version you are using (%s) is obsolete! You should\n"
"upgrade your SDK and test that your code works with the new\n"
"api version." % self.config.api_version,
latest, version, force=True)
return
if self.config.api_version != api_versions[len(api_versions) - 1]:
self._Nag(
"The api version you are using (%s) is deprecated. You should\n"
"upgrade your SDK to try the new functionality." %
self.config.api_version, latest, version)
return
self._Nag("There is a new release of the SDK available.",
latest, version)
def _ParseNagFile(self):
"""Parses the nag file.
Returns:
A NagFile if the file was present else None.
"""
nag_filename = UpdateCheck.MakeNagFilename()
if self.isfile(nag_filename):
fh = self.open(nag_filename, "r")
try:
nag = NagFile.Load(fh)
finally:
fh.close()
return nag
return None
def _WriteNagFile(self, nag):
"""Writes the NagFile to the user's nag file.
If the destination path does not exist, this method will log an error
and fail silently.
Args:
nag: The NagFile to write.
"""
nagfilename = UpdateCheck.MakeNagFilename()
try:
fh = self.open(nagfilename, "w")
try:
fh.write(nag.ToYAML())
finally:
fh.close()
except (OSError, IOError), e:
logging.error("Could not write nag file to %s. Error: %s", nagfilename, e)
def _Nag(self, msg, latest, version, force=False):
"""Prints a nag message and updates the nag file's timestamp.
Because we don't want to nag the user everytime, we store a simple
yaml document in the user's home directory. If the timestamp in this
doc is over a week old, we'll nag the user. And when we nag the user,
we update the timestamp in this doc.
Args:
msg: The formatted message to print to the user.
latest: The yaml document received from the server.
version: The local yaml version document.
force: If True, always nag the user, ignoring the nag file.
"""
nag = self._ParseNagFile()
if nag and not force:
last_nag = datetime.datetime.fromtimestamp(nag.timestamp)
if datetime.datetime.now() - last_nag < datetime.timedelta(weeks=1):
logging.debug("Skipping nag message")
return
if nag is None:
nag = NagFile()
nag.timestamp = time.time()
self._WriteNagFile(nag)
print "****************************************************************"
print msg
print "-----------"
print "Latest SDK:"
print yaml.dump(latest)
print "-----------"
print "Your SDK:"
print yaml.dump(version)
print "-----------"
print "Please visit http://code.google.com/appengine for the latest SDK"
print "****************************************************************"
def AllowedToCheckForUpdates(self, input_fn=raw_input):
"""Determines if the user wants to check for updates.
On startup, the dev_appserver wants to check for updates to the SDK.
Because this action reports usage to Google when the user is not
otherwise communicating with Google (e.g. pushing a new app version),
the user must opt in.
If the user does not have a nag file, we will query the user and
save the response in the nag file. Subsequent calls to this function
will re-use that response.
Args:
input_fn: used to collect user input. This is for testing only.
Returns:
True if the user wants to check for updates. False otherwise.
"""
nag = self._ParseNagFile()
if nag is None:
nag = NagFile()
nag.timestamp = time.time()
if nag.opt_in is None:
answer = input_fn("Allow dev_appserver to check for updates on startup? "
"(Y/n): ")
answer = answer.strip().lower()
if answer == "n" or answer == "no":
print ("dev_appserver will not check for updates on startup. To "
"change this setting, edit %s" % UpdateCheck.MakeNagFilename())
nag.opt_in = False
else:
print ("dev_appserver will check for updates on startup. To change "
"this setting, edit %s" % UpdateCheck.MakeNagFilename())
nag.opt_in = True
self._WriteNagFile(nag)
return nag.opt_in
class IndexDefinitionUpload(object):
"""Provides facilities to upload index definitions to the hosting service."""
def __init__(self, server, config, definitions):
"""Creates a new DatastoreIndexUpload.
Args:
server: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
config: The AppInfoExternal object derived from the app.yaml file.
definitions: An IndexDefinitions object.
"""
self.server = server
self.config = config
self.definitions = definitions
def DoUpload(self):
"""Uploads the index definitions."""
StatusUpdate("Uploading index definitions.")
self.server.Send("/api/datastore/index/add",
app_id=self.config.application,
version=self.config.version,
payload=self.definitions.ToYAML())
class CronEntryUpload(object):
"""Provides facilities to upload cron entries to the hosting service."""
def __init__(self, server, config, cron):
"""Creates a new CronEntryUpload.
Args:
server: The RPC server to use. Should be an instance of a subclass of
AbstractRpcServer
config: The AppInfoExternal object derived from the app.yaml file.
cron: The CronInfoExternal object loaded from the cron.yaml file.
"""
self.server = server
self.config = config
self.cron = cron
def DoUpload(self):
"""Uploads the cron entries."""
StatusUpdate("Uploading cron entries.")
self.server.Send("/api/datastore/cron/update",
app_id=self.config.application,
version=self.config.version,
payload=self.cron.ToYAML())
class IndexOperation(object):
"""Provide facilities for writing Index operation commands."""
def __init__(self, server, config):
"""Creates a new IndexOperation.
Args:
server: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
config: appinfo.AppInfoExternal configuration object.
"""
self.server = server
self.config = config
def DoDiff(self, definitions):
"""Retrieve diff file from the server.
Args:
definitions: datastore_index.IndexDefinitions as loaded from users
index.yaml file.
Returns:
A pair of datastore_index.IndexDefinitions objects. The first record
is the set of indexes that are present in the index.yaml file but missing
from the server. The second record is the set of indexes that are
present on the server but missing from the index.yaml file (indicating
that these indexes should probably be vacuumed).
"""
StatusUpdate("Fetching index definitions diff.")
response = self.server.Send("/api/datastore/index/diff",
app_id=self.config.application,
payload=definitions.ToYAML())
return datastore_index.ParseMultipleIndexDefinitions(response)
def DoDelete(self, definitions):
"""Delete indexes from the server.
Args:
definitions: Index definitions to delete from datastore.
Returns:
A single datstore_index.IndexDefinitions containing indexes that were
not deleted, probably because they were already removed. This may
be normal behavior as there is a potential race condition between fetching
the index-diff and sending deletion confirmation through.
"""
StatusUpdate("Deleting selected index definitions.")
response = self.server.Send("/api/datastore/index/delete",
app_id=self.config.application,
payload=definitions.ToYAML())
return datastore_index.ParseIndexDefinitions(response)
class VacuumIndexesOperation(IndexOperation):
"""Provide facilities to request the deletion of datastore indexes."""
def __init__(self, server, config, force,
confirmation_fn=raw_input):
"""Creates a new VacuumIndexesOperation.
Args:
server: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
config: appinfo.AppInfoExternal configuration object.
force: True to force deletion of indexes, else False.
confirmation_fn: Function used for getting input form user.
"""
super(VacuumIndexesOperation, self).__init__(server, config)
self.force = force
self.confirmation_fn = confirmation_fn
def GetConfirmation(self, index):
"""Get confirmation from user to delete an index.
This method will enter an input loop until the user provides a
response it is expecting. Valid input is one of three responses:
y: Confirm deletion of index.
n: Do not delete index.
a: Delete all indexes without asking for further confirmation.
If the user enters nothing at all, the default action is to skip
that index and do not delete.
If the user selects 'a', as a side effect, the 'force' flag is set.
Args:
index: Index to confirm.
Returns:
True if user enters 'y' or 'a'. False if user enter 'n'.
"""
while True:
print "This index is no longer defined in your index.yaml file."
print
print index.ToYAML()
print
confirmation = self.confirmation_fn(
"Are you sure you want to delete this index? (N/y/a): ")
confirmation = confirmation.strip().lower()
if confirmation == "y":
return True
elif confirmation == "n" or not confirmation:
return False
elif confirmation == "a":
self.force = True
return True
else:
print "Did not understand your response."
def DoVacuum(self, definitions):
"""Vacuum indexes in datastore.
This method will query the server to determine which indexes are not
being used according to the user's local index.yaml file. Once it has
made this determination, it confirms with the user which unused indexes
should be deleted. Once confirmation for each index is receives, it
deletes those indexes.
Because another user may in theory delete the same indexes at the same
time as the user, there is a potential race condition. In this rare cases,
some of the indexes previously confirmed for deletion will not be found.
The user is notified which indexes these were.
Args:
definitions: datastore_index.IndexDefinitions as loaded from users
index.yaml file.
"""
unused_new_indexes, notused_indexes = self.DoDiff(definitions)
deletions = datastore_index.IndexDefinitions(indexes=[])
if notused_indexes.indexes is not None:
for index in notused_indexes.indexes:
if self.force or self.GetConfirmation(index):
deletions.indexes.append(index)
if deletions.indexes:
not_deleted = self.DoDelete(deletions)
if not_deleted.indexes:
not_deleted_count = len(not_deleted.indexes)
if not_deleted_count == 1:
warning_message = ("An index was not deleted. Most likely this is "
"because it no longer exists.\n\n")
else:
warning_message = ("%d indexes were not deleted. Most likely this "
"is because they no longer exist.\n\n"
% not_deleted_count)
for index in not_deleted.indexes:
warning_message += index.ToYAML()
logging.warning(warning_message)
class LogsRequester(object):
"""Provide facilities to export request logs."""
def __init__(self, server, config, output_file,
num_days, append, severity, now):
"""Constructor.
Args:
server: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
config: appinfo.AppInfoExternal configuration object.
output_file: Output file name.
num_days: Number of days worth of logs to export; 0 for all available.
append: True if appending to an existing file.
severity: App log severity to request (0-4); None for no app logs.
now: POSIX timestamp used for calculating valid dates for num_days.
"""
self.server = server
self.config = config
self.output_file = output_file
self.append = append
self.num_days = num_days
self.severity = severity
self.version_id = self.config.version + ".1"
self.sentinel = None
self.write_mode = "w"
if self.append:
self.sentinel = FindSentinel(self.output_file)
self.write_mode = "a"
self.valid_dates = None
if self.num_days:
patterns = []
now = PacificTime(now)
for i in xrange(self.num_days):
then = time.gmtime(now - 24*3600 * i)
patterns.append(re.escape(time.strftime("%d/%m/%Y", then)))
patterns.append(re.escape(time.strftime("%d/%b/%Y", then)))
self.valid_dates = re.compile(r"[^[]+\[(" + "|".join(patterns) + r"):")
def DownloadLogs(self):
"""Download the requested logs.
This will write the logs to the file designated by
self.output_file, or to stdout if the filename is '-'.
Multiple roundtrips to the server may be made.
"""
StatusUpdate("Downloading request logs for %s %s." %
(self.config.application, self.version_id))
tf = tempfile.TemporaryFile()
offset = None
try:
while True:
try:
offset = self.RequestLogLines(tf, offset)
if not offset:
break
except KeyboardInterrupt:
StatusUpdate("Keyboard interrupt; saving data downloaded so far.")
break
StatusUpdate("Copying request logs to %r." % self.output_file)
if self.output_file == "-":
of = sys.stdout
else:
try:
of = open(self.output_file, self.write_mode)
except IOError, err:
StatusUpdate("Can't write %r: %s." % (self.output_file, err))
sys.exit(1)
try:
line_count = CopyReversedLines(tf, of)
finally:
of.flush()
if of is not sys.stdout:
of.close()
finally:
tf.close()
StatusUpdate("Copied %d records." % line_count)
def RequestLogLines(self, tf, offset):
"""Make a single roundtrip to the server.
Args:
tf: Writable binary stream to which the log lines returned by
the server are written, stripped of headers, and excluding
lines skipped due to self.sentinel or self.valid_dates filtering.
offset: Offset string for a continued request; None for the first.
Returns:
The offset string to be used for the next request, if another
request should be issued; or None, if not.
"""
logging.info("Request with offset %r.", offset)
kwds = {"app_id": self.config.application,
"version": self.version_id,
"limit": 100,
}
if offset:
kwds["offset"] = offset
if self.severity is not None:
kwds["severity"] = str(self.severity)
response = self.server.Send("/api/request_logs", payload=None, **kwds)
response = response.replace("\r", "\0")
lines = response.splitlines()
logging.info("Received %d bytes, %d records.", len(response), len(lines))
offset = None
if lines and lines[0].startswith("#"):
match = re.match(r"^#\s*next_offset=(\S+)\s*$", lines[0])
del lines[0]
if match:
offset = match.group(1)
if lines and lines[-1].startswith("#"):
del lines[-1]
valid_dates = self.valid_dates
sentinel = self.sentinel
len_sentinel = None
if sentinel:
len_sentinel = len(sentinel)
for line in lines:
if ((sentinel and
line.startswith(sentinel) and
line[len_sentinel : len_sentinel+1] in ("", "\0")) or
(valid_dates and not valid_dates.match(line))):
return None
tf.write(line + "\n")
if not lines:
return None
return offset
def PacificTime(now):
"""Helper to return the number of seconds between UTC and Pacific time.
This is needed to compute today's date in Pacific time (more
specifically: Mountain View local time), which is how request logs
are reported. (Google servers always report times in Mountain View
local time, regardless of where they are physically located.)
This takes (post-2006) US DST into account. Pacific time is either
8 hours or 7 hours west of UTC, depending on whether DST is in
effect. Since 2007, US DST starts on the Second Sunday in March
March, and ends on the first Sunday in November. (Reference:
http://aa.usno.navy.mil/faq/docs/daylight_time.php.)
Note that the server doesn't report its local time (the HTTP Date
header uses UTC), and the client's local time is irrelevant.
Args:
now: A posix timestamp giving current UTC time.
Returns:
A pseudo-posix timestamp giving current Pacific time. Passing
this through time.gmtime() will produce a tuple in Pacific local
time.
"""
now -= 8*3600
if IsPacificDST(now):
now += 3600
return now
def IsPacificDST(now):
"""Helper for PacificTime to decide whether now is Pacific DST (PDT).
Args:
now: A pseudo-posix timestamp giving current time in PST.
Returns:
True if now falls within the range of DST, False otherwise.
"""
DAY = 24*3600
SUNDAY = 6
pst = time.gmtime(now)
year = pst[0]
assert year >= 2007
begin = calendar.timegm((year, 3, 8, 2, 0, 0, 0, 0, 0))
while time.gmtime(begin).tm_wday != SUNDAY:
begin += DAY
end = calendar.timegm((year, 11, 1, 2, 0, 0, 0, 0, 0))
while time.gmtime(end).tm_wday != SUNDAY:
end += DAY
return begin <= now < end
def CopyReversedLines(instream, outstream, blocksize=2**16):
r"""Copy lines from input stream to output stream in reverse order.
As a special feature, null bytes in the input are turned into
newlines followed by tabs in the output, but these "sub-lines"
separated by null bytes are not reversed. E.g. If the input is
"A\0B\nC\0D\n", the output is "C\n\tD\nA\n\tB\n".
Args:
instream: A seekable stream open for reading in binary mode.
outstream: A stream open for writing; doesn't have to be seekable or binary.
blocksize: Optional block size for buffering, for unit testing.
Returns:
The number of lines copied.
"""
line_count = 0
instream.seek(0, 2)
last_block = instream.tell() // blocksize
spillover = ""
for iblock in xrange(last_block + 1, -1, -1):
instream.seek(iblock * blocksize)
data = instream.read(blocksize)
lines = data.splitlines(True)
lines[-1:] = "".join(lines[-1:] + [spillover]).splitlines(True)
if lines and not lines[-1].endswith("\n"):
lines[-1] += "\n"
lines.reverse()
if lines and iblock > 0:
spillover = lines.pop()
if lines:
line_count += len(lines)
data = "".join(lines).replace("\0", "\n\t")
outstream.write(data)
return line_count
def FindSentinel(filename, blocksize=2**16):
"""Return the sentinel line from the output file.
Args:
filename: The filename of the output file. (We'll read this file.)
blocksize: Optional block size for buffering, for unit testing.
Returns:
The contents of the last line in the file that doesn't start with
a tab, with its trailing newline stripped; or None if the file
couldn't be opened or no such line could be found by inspecting
the last 'blocksize' bytes of the file.
"""
if filename == "-":
StatusUpdate("Can't combine --append with output to stdout.")
sys.exit(2)
try:
fp = open(filename, "rb")
except IOError, err:
StatusUpdate("Append mode disabled: can't read %r: %s." % (filename, err))
return None
try:
fp.seek(0, 2)
fp.seek(max(0, fp.tell() - blocksize))
lines = fp.readlines()
del lines[:1]
sentinel = None
for line in lines:
if not line.startswith("\t"):
sentinel = line
if not sentinel:
StatusUpdate("Append mode disabled: can't find sentinel in %r." %
filename)
return None
return sentinel.rstrip("\n")
finally:
fp.close()
class AppVersionUpload(object):
"""Provides facilities to upload a new appversion to the hosting service.
Attributes:
server: The AbstractRpcServer to use for the upload.
config: The AppInfoExternal object derived from the app.yaml file.
app_id: The application string from 'config'.
version: The version string from 'config'.
files: A dictionary of files to upload to the server, mapping path to
hash of the file contents.
in_transaction: True iff a transaction with the server has started.
An AppVersionUpload can do only one transaction at a time.
deployed: True iff the Deploy method has been called.
"""
def __init__(self, server, config):
"""Creates a new AppVersionUpload.
Args:
server: The RPC server to use. Should be an instance of HttpRpcServer or
TestRpcServer.
config: An AppInfoExternal object that specifies the configuration for
this application.
"""
self.server = server
self.config = config
self.app_id = self.config.application
self.version = self.config.version
self.files = {}
self.in_transaction = False
self.deployed = False
def _Hash(self, content):
"""Compute the hash of the content.
Args:
content: The data to hash as a string.
Returns:
The string representation of the hash.
"""
h = sha.new(content).hexdigest()
return "%s_%s_%s_%s_%s" % (h[0:8], h[8:16], h[16:24], h[24:32], h[32:40])
def AddFile(self, path, file_handle):
"""Adds the provided file to the list to be pushed to the server.
Args:
path: The path the file should be uploaded as.
file_handle: A stream containing data to upload.
"""
assert not self.in_transaction, "Already in a transaction."
assert file_handle is not None
reason = appinfo.ValidFilename(path)
if reason:
logging.error(reason)
return
pos = file_handle.tell()
content_hash = self._Hash(file_handle.read())
file_handle.seek(pos, 0)
self.files[path] = content_hash
def Begin(self):
"""Begins the transaction, returning a list of files that need uploading.
All calls to AddFile must be made before calling Begin().
Returns:
A list of pathnames for files that should be uploaded using UploadFile()
before Commit() can be called.
"""
assert not self.in_transaction, "Already in a transaction."
StatusUpdate("Initiating update.")
self.server.Send("/api/appversion/create", app_id=self.app_id,
version=self.version, payload=self.config.ToYAML())
self.in_transaction = True
files_to_clone = []
blobs_to_clone = []
for path, content_hash in self.files.iteritems():
mime_type = GetMimeTypeIfStaticFile(self.config, path)
if mime_type is not None:
blobs_to_clone.append((path, content_hash, mime_type))
else:
files_to_clone.append((path, content_hash))
files_to_upload = {}
def CloneFiles(url, files, file_type):
"""Sends files to the given url.
Args:
url: the server URL to use.
files: a list of files
file_type: the type of the files
"""
if not files:
return
StatusUpdate("Cloning %d %s file%s." %
(len(files), file_type, len(files) != 1 and "s" or ""))
for i in xrange(0, len(files), MAX_FILES_TO_CLONE):
if i > 0 and i % MAX_FILES_TO_CLONE == 0:
StatusUpdate("Cloned %d files." % i)
chunk = files[i:min(len(files), i + MAX_FILES_TO_CLONE)]
result = self.server.Send(url,
app_id=self.app_id, version=self.version,
payload=BuildClonePostBody(chunk))
if result:
files_to_upload.update(dict(
(f, self.files[f]) for f in result.split(LIST_DELIMITER)))
CloneFiles("/api/appversion/cloneblobs", blobs_to_clone, "static")
CloneFiles("/api/appversion/clonefiles", files_to_clone, "application")
logging.info("Files to upload: " + str(files_to_upload))
self.files = files_to_upload
return sorted(files_to_upload.iterkeys())
def UploadFile(self, path, file_handle):
"""Uploads a file to the hosting service.
Must only be called after Begin().
The path provided must be one of those that were returned by Begin().
Args:
path: The path the file is being uploaded as.
file_handle: A file-like object containing the data to upload.
Raises:
KeyError: The provided file is not amongst those to be uploaded.
"""
assert self.in_transaction, "Begin() must be called before UploadFile()."
if path not in self.files:
raise KeyError("File '%s' is not in the list of files to be uploaded."
% path)
del self.files[path]
mime_type = GetMimeTypeIfStaticFile(self.config, path)
if mime_type is not None:
self.server.Send("/api/appversion/addblob", app_id=self.app_id,
version=self.version, path=path, content_type=mime_type,
payload=file_handle.read())
else:
self.server.Send("/api/appversion/addfile", app_id=self.app_id,
version=self.version, path=path,
payload=file_handle.read())
def Commit(self):
"""Commits the transaction, making the new app version available.
All the files returned by Begin() must have been uploaded with UploadFile()
before Commit() can be called.
This tries the new 'deploy' method; if that fails it uses the old 'commit'.
Raises:
Exception: Some required files were not uploaded.
"""
assert self.in_transaction, "Begin() must be called before Commit()."
if self.files:
raise Exception("Not all required files have been uploaded.")
try:
self.Deploy()
if not RetryWithBackoff(1, 2, 8, self.IsReady):
logging.warning("Version still not ready to serve, aborting.")
raise Exception("Version not ready.")
self.StartServing()
except urllib2.HTTPError, e:
if e.code != 404:
raise
StatusUpdate("Closing update.")
self.server.Send("/api/appversion/commit", app_id=self.app_id,
version=self.version)
self.in_transaction = False
def Deploy(self):
"""Deploys the new app version but does not make it default.
All the files returned by Begin() must have been uploaded with UploadFile()
before Deploy() can be called.
Raises:
Exception: Some required files were not uploaded.
"""
assert self.in_transaction, "Begin() must be called before Deploy()."
if self.files:
raise Exception("Not all required files have been uploaded.")
StatusUpdate("Deploying new version.")
self.server.Send("/api/appversion/deploy", app_id=self.app_id,
version=self.version)
self.deployed = True
def IsReady(self):
"""Check if the new app version is ready to serve traffic.
Raises:
Exception: Deploy has not yet been called.
Returns:
True if the server returned the app is ready to serve.
"""
assert self.deployed, "Deploy() must be called before IsReady()."
StatusUpdate("Checking if new version is ready to serve.")
result = self.server.Send("/api/appversion/isready", app_id=self.app_id,
version=self.version)
return result == "1"
def StartServing(self):
"""Start serving with the newly created version.
Raises:
Exception: Deploy has not yet been called.
"""
assert self.deployed, "Deploy() must be called before IsReady()."
StatusUpdate("Closing update: new version is ready to start serving.")
self.server.Send("/api/appversion/startserving",
app_id=self.app_id, version=self.version)
self.in_transaction = False
def Rollback(self):
"""Rolls back the transaction if one is in progress."""
if not self.in_transaction:
return
StatusUpdate("Rolling back the update.")
self.server.Send("/api/appversion/rollback", app_id=self.app_id,
version=self.version)
self.in_transaction = False
self.files = {}
def DoUpload(self, paths, max_size, openfunc):
"""Uploads a new appversion with the given config and files to the server.
Args:
paths: An iterator that yields the relative paths of the files to upload.
max_size: The maximum size file to upload.
openfunc: A function that takes a path and returns a file-like object.
"""
logging.info("Reading app configuration.")
path = ""
try:
StatusUpdate("Scanning files on local disk.")
num_files = 0
for path in paths:
file_handle = openfunc(path)
try:
if self.config.skip_files.match(path):
logging.info("Ignoring file '%s': File matches ignore regex.",
path)
else:
file_length = GetFileLength(file_handle)
if file_length > max_size:
logging.error("Ignoring file '%s': Too long "
"(max %d bytes, file is %d bytes)",
path, max_size, file_length)
else:
logging.info("Processing file '%s'", path)
self.AddFile(path, file_handle)
finally:
file_handle.close()
num_files += 1
if num_files % 500 == 0:
StatusUpdate("Scanned %d files." % num_files)
except KeyboardInterrupt:
logging.info("User interrupted. Aborting.")
raise
except EnvironmentError, e:
logging.error("An error occurred processing file '%s': %s. Aborting.",
path, e)
raise
try:
missing_files = self.Begin()
if missing_files:
StatusUpdate("Uploading %d files." % len(missing_files))
num_files = 0
for missing_file in missing_files:
logging.info("Uploading file '%s'" % missing_file)
file_handle = openfunc(missing_file)
try:
self.UploadFile(missing_file, file_handle)
finally:
file_handle.close()
num_files += 1
if num_files % 500 == 0:
StatusUpdate("Uploaded %d files." % num_files)
self.Commit()
except KeyboardInterrupt:
logging.info("User interrupted. Aborting.")
self.Rollback()
raise
except:
logging.exception("An unexpected error occurred. Aborting.")
self.Rollback()
raise
logging.info("Done!")
def FileIterator(base, separator=os.path.sep):
"""Walks a directory tree, returning all the files. Follows symlinks.
Args:
base: The base path to search for files under.
separator: Path separator used by the running system's platform.
Yields:
Paths of files found, relative to base.
"""
dirs = [""]
while dirs:
current_dir = dirs.pop()
for entry in os.listdir(os.path.join(base, current_dir)):
name = os.path.join(current_dir, entry)
fullname = os.path.join(base, name)
if os.path.isfile(fullname):
if separator == "\\":
name = name.replace("\\", "/")
yield name
elif os.path.isdir(fullname):
dirs.append(name)
def GetFileLength(fh):
"""Returns the length of the file represented by fh.
This function is capable of finding the length of any seekable stream,
unlike os.fstat, which only works on file streams.
Args:
fh: The stream to get the length of.
Returns:
The length of the stream.
"""
pos = fh.tell()
fh.seek(0, 2)
length = fh.tell()
fh.seek(pos, 0)
return length
def GetUserAgent(get_version=GetVersionObject,
get_platform=appengine_rpc.GetPlatformToken):
"""Determines the value of the 'User-agent' header to use for HTTP requests.
If the 'APPCFG_SDK_NAME' environment variable is present, that will be
used as the first product token in the user-agent.
Args:
get_version: Used for testing.
get_platform: Used for testing.
Returns:
String containing the 'user-agent' header value, which includes the SDK
version, the platform information, and the version of Python;
e.g., "appcfg_py/1.0.1 Darwin/9.2.0 Python/2.5.2".
"""
product_tokens = []
sdk_name = os.environ.get("APPCFG_SDK_NAME")
if sdk_name:
product_tokens.append(sdk_name)
else:
version = get_version()
if version is None:
release = "unknown"
else:
release = version["release"]
product_tokens.append("appcfg_py/%s" % release)
product_tokens.append(get_platform())
python_version = ".".join(str(i) for i in sys.version_info)
product_tokens.append("Python/%s" % python_version)
return " ".join(product_tokens)
def GetSourceName(get_version=GetVersionObject):
"""Gets the name of this source version."""
version = get_version()
if version is None:
release = "unknown"
else:
release = version["release"]
return "Google-appcfg-%s" % (release,)
class AppCfgApp(object):
"""Singleton class to wrap AppCfg tool functionality.
This class is responsible for parsing the command line and executing
the desired action on behalf of the user. Processing files and
communicating with the server is handled by other classes.
Attributes:
actions: A dictionary mapping action names to Action objects.
action: The Action specified on the command line.
parser: An instance of optparse.OptionParser.
options: The command line options parsed by 'parser'.
argv: The original command line as a list.
args: The positional command line args left over after parsing the options.
raw_input_fn: Function used for getting raw user input, like email.
password_input_fn: Function used for getting user password.
error_fh: Unexpected HTTPErrors are printed to this file handle.
Attributes for testing:
parser_class: The class to use for parsing the command line. Because
OptionsParser will exit the program when there is a parse failure, it
is nice to subclass OptionsParser and catch the error before exiting.
"""
def __init__(self, argv, parser_class=optparse.OptionParser,
rpc_server_class=appengine_rpc.HttpRpcServer,
raw_input_fn=raw_input,
password_input_fn=getpass.getpass,
error_fh=sys.stderr,
update_check_class=UpdateCheck):
"""Initializer. Parses the cmdline and selects the Action to use.
Initializes all of the attributes described in the class docstring.
Prints help or error messages if there is an error parsing the cmdline.
Args:
argv: The list of arguments passed to this program.
parser_class: Options parser to use for this application.
rpc_server_class: RPC server class to use for this application.
raw_input_fn: Function used for getting user email.
password_input_fn: Function used for getting user password.
error_fh: Unexpected HTTPErrors are printed to this file handle.
update_check_class: UpdateCheck class (can be replaced for testing).
"""
self.parser_class = parser_class
self.argv = argv
self.rpc_server_class = rpc_server_class
self.raw_input_fn = raw_input_fn
self.password_input_fn = password_input_fn
self.error_fh = error_fh
self.update_check_class = update_check_class
self.parser = self._GetOptionParser()
for action in self.actions.itervalues():
action.options(self, self.parser)
self.options, self.args = self.parser.parse_args(argv[1:])
if len(self.args) < 1:
self._PrintHelpAndExit()
if self.args[0] not in self.actions:
self.parser.error("Unknown action '%s'\n%s" %
(self.args[0], self.parser.get_description()))
action_name = self.args.pop(0)
self.action = self.actions[action_name]
self.parser, self.options = self._MakeSpecificParser(self.action)
if self.options.help:
self._PrintHelpAndExit()
if self.options.verbose == 2:
logging.getLogger().setLevel(logging.INFO)
elif self.options.verbose == 3:
logging.getLogger().setLevel(logging.DEBUG)
global verbosity
verbosity = self.options.verbose
def Run(self):
"""Executes the requested action.
Catches any HTTPErrors raised by the action and prints them to stderr.
"""
try:
self.action(self)
except urllib2.HTTPError, e:
body = e.read()
print >>self.error_fh, ("Error %d: --- begin server output ---\n"
"%s\n--- end server output ---" %
(e.code, body.rstrip("\n")))
return 1
except yaml_errors.EventListenerError, e:
print >>self.error_fh, ("Error parsing yaml file:\n%s" % e)
return 1
return 0
def _GetActionDescriptions(self):
"""Returns a formatted string containing the short_descs for all actions."""
action_names = self.actions.keys()
action_names.sort()
desc = ""
for action_name in action_names:
desc += " %s: %s\n" % (action_name, self.actions[action_name].short_desc)
return desc
def _GetOptionParser(self):
"""Creates an OptionParser with generic usage and description strings.
Returns:
An OptionParser instance.
"""
class Formatter(optparse.IndentedHelpFormatter):
"""Custom help formatter that does not reformat the description."""
def format_description(self, description):
"""Very simple formatter."""
return description + "\n"
desc = self._GetActionDescriptions()
desc = ("Action must be one of:\n%s"
"Use 'help <action>' for a detailed description.") % desc
parser = self.parser_class(usage="%prog [options] <action>",
description=desc,
formatter=Formatter(),
conflict_handler="resolve")
parser.add_option("-h", "--help", action="store_true",
dest="help", help="Show the help message and exit.")
parser.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
parser.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs.")
parser.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
parser.add_option("-s", "--server", action="store", dest="server",
default="appengine.google.com",
metavar="SERVER", help="The server to connect to.")
parser.add_option("--secure", action="store_true", dest="secure",
default=False,
help="Use SSL when communicating with the server.")
parser.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
parser.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
parser.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
parser.add_option("--passin", action="store_true",
dest="passin", default=False,
help="Read the login password from stdin.")
return parser
def _MakeSpecificParser(self, action):
"""Creates a new parser with documentation specific to 'action'.
Args:
action: An Action instance to be used when initializing the new parser.
Returns:
A tuple containing:
parser: An instance of OptionsParser customized to 'action'.
options: The command line options after re-parsing.
"""
parser = self._GetOptionParser()
parser.set_usage(action.usage)
parser.set_description("%s\n%s" % (action.short_desc, action.long_desc))
action.options(self, parser)
options, unused_args = parser.parse_args(self.argv[1:])
return parser, options
def _PrintHelpAndExit(self, exit_code=2):
"""Prints the parser's help message and exits the program.
Args:
exit_code: The integer code to pass to sys.exit().
"""
self.parser.print_help()
sys.exit(exit_code)
def _GetRpcServer(self):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = self.options.email
if email is None:
email = self.raw_input_fn("Email: ")
password_prompt = "Password for %s: " % email
if self.options.passin:
password = self.raw_input_fn(password_prompt)
else:
password = self.password_input_fn(password_prompt)
return (email, password)
if self.options.host and self.options.host == "localhost":
email = self.options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = self.rpc_server_class(
self.options.server,
lambda: (email, "password"),
GetUserAgent(),
GetSourceName(),
host_override=self.options.host,
save_cookies=self.options.save_cookies)
server.authenticated = True
return server
if self.options.passin:
auth_tries = 1
else:
auth_tries = 3
return self.rpc_server_class(self.options.server, GetUserCredentials,
GetUserAgent(), GetSourceName(),
host_override=self.options.host,
save_cookies=self.options.save_cookies,
auth_tries=auth_tries,
account_type="HOSTED_OR_GOOGLE",
secure=self.options.secure)
def _FindYaml(self, basepath, file_name):
"""Find yaml files in application directory.
Args:
basepath: Base application directory.
file_name: Filename without extension to search for.
Returns:
Path to located yaml file if one exists, else None.
"""
if not os.path.isdir(basepath):
self.parser.error("Not a directory: %s" % basepath)
for yaml_file in (file_name + ".yaml", file_name + ".yml"):
yaml_path = os.path.join(basepath, yaml_file)
if os.path.isfile(yaml_path):
return yaml_path
return None
def _ParseAppYaml(self, basepath):
"""Parses the app.yaml file.
Args:
basepath: the directory of the application.
Returns:
An AppInfoExternal object.
"""
appyaml_filename = self._FindYaml(basepath, "app")
if appyaml_filename is None:
self.parser.error("Directory does not contain an app.yaml "
"configuration file.")
fh = open(appyaml_filename, "r")
try:
appyaml = appinfo.LoadSingleAppInfo(fh)
finally:
fh.close()
return appyaml
def _ParseIndexYaml(self, basepath):
"""Parses the index.yaml file.
Args:
basepath: the directory of the application.
Returns:
A single parsed yaml file or None if the file does not exist.
"""
file_name = self._FindYaml(basepath, "index")
if file_name is not None:
fh = open(file_name, "r")
try:
index_defs = datastore_index.ParseIndexDefinitions(fh)
finally:
fh.close()
return index_defs
return None
def _ParseCronYaml(self, basepath):
"""Parses the cron.yaml file.
Args:
basepath: the directory of the application.
Returns:
A CronInfoExternal object.
"""
file_name = self._FindYaml(basepath, "cron")
if file_name is not None:
fh = open(file_name, "r")
try:
cron_info = croninfo.LoadSingleCron(fh)
finally:
fh.close()
return cron_info
return None
def Help(self):
"""Prints help for a specific action.
Expects self.args[0] to contain the name of the action in question.
Exits the program after printing the help message.
"""
if len(self.args) != 1 or self.args[0] not in self.actions:
self.parser.error("Expected a single action argument. Must be one of:\n" +
self._GetActionDescriptions())
action = self.actions[self.args[0]]
self.parser, unused_options = self._MakeSpecificParser(action)
self._PrintHelpAndExit(exit_code=0)
def Update(self):
"""Updates and deploys a new appversion."""
if len(self.args) != 1:
self.parser.error("Expected a single <directory> argument.")
basepath = self.args[0]
appyaml = self._ParseAppYaml(basepath)
rpc_server = self._GetRpcServer()
updatecheck = self.update_check_class(rpc_server, appyaml)
updatecheck.CheckForUpdates()
appversion = AppVersionUpload(rpc_server, appyaml)
appversion.DoUpload(FileIterator(basepath), self.options.max_size,
lambda path: open(os.path.join(basepath, path), "rb"))
index_defs = self._ParseIndexYaml(basepath)
if index_defs:
index_upload = IndexDefinitionUpload(rpc_server, appyaml, index_defs)
try:
index_upload.DoUpload()
except urllib2.HTTPError, e:
StatusUpdate("Error %d: --- begin server output ---\n"
"%s\n--- end server output ---" %
(e.code, e.read().rstrip("\n")))
print >> self.error_fh, (
"Your app was updated, but there was an error updating your "
"indexes. Please retry later with appcfg.py update_indexes.")
cron_entries = self._ParseCronYaml(basepath)
if cron_entries:
cron_upload = CronEntryUpload(rpc_server, appyaml, cron_entries)
cron_upload.DoUpload()
def _UpdateOptions(self, parser):
"""Adds update-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option("-S", "--max_size", type="int", dest="max_size",
default=10485760, metavar="SIZE",
help="Maximum size of a file to upload.")
def VacuumIndexes(self):
"""Deletes unused indexes."""
if len(self.args) != 1:
self.parser.error("Expected a single <directory> argument.")
basepath = self.args[0]
config = self._ParseAppYaml(basepath)
index_defs = self._ParseIndexYaml(basepath)
if index_defs is None:
index_defs = datastore_index.IndexDefinitions()
rpc_server = self._GetRpcServer()
vacuum = VacuumIndexesOperation(rpc_server,
config,
self.options.force_delete)
vacuum.DoVacuum(index_defs)
def _VacuumIndexesOptions(self, parser):
"""Adds vacuum_indexes-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option("-f", "--force", action="store_true", dest="force_delete",
default=False,
help="Force deletion without being prompted.")
def UpdateCron(self):
"""Updates any new or changed cron definitions."""
if len(self.args) != 1:
self.parser.error("Expected a single <directory> argument.")
basepath = self.args[0]
appyaml = self._ParseAppYaml(basepath)
rpc_server = self._GetRpcServer()
cron_entries = self._ParseCronYaml(basepath)
if cron_entries:
cron_upload = CronEntryUpload(rpc_server, appyaml, cron_entries)
cron_upload.DoUpload()
def UpdateIndexes(self):
"""Updates indexes."""
if len(self.args) != 1:
self.parser.error("Expected a single <directory> argument.")
basepath = self.args[0]
appyaml = self._ParseAppYaml(basepath)
rpc_server = self._GetRpcServer()
index_defs = self._ParseIndexYaml(basepath)
if index_defs:
index_upload = IndexDefinitionUpload(rpc_server, appyaml, index_defs)
index_upload.DoUpload()
def Rollback(self):
"""Does a rollback of any existing transaction for this app version."""
if len(self.args) != 1:
self.parser.error("Expected a single <directory> argument.")
basepath = self.args[0]
appyaml = self._ParseAppYaml(basepath)
appversion = AppVersionUpload(self._GetRpcServer(), appyaml)
appversion.in_transaction = True
appversion.Rollback()
def RequestLogs(self):
"""Write request logs to a file."""
if len(self.args) != 2:
self.parser.error(
"Expected a <directory> argument and an <output_file> argument.")
if (self.options.severity is not None and
not 0 <= self.options.severity <= MAX_LOG_LEVEL):
self.parser.error(
"Severity range is 0 (DEBUG) through %s (CRITICAL)." % MAX_LOG_LEVEL)
if self.options.num_days is None:
self.options.num_days = int(not self.options.append)
basepath = self.args[0]
appyaml = self._ParseAppYaml(basepath)
rpc_server = self._GetRpcServer()
logs_requester = LogsRequester(rpc_server, appyaml, self.args[1],
self.options.num_days,
self.options.append,
self.options.severity,
time.time())
logs_requester.DownloadLogs()
def _RequestLogsOptions(self, parser):
"""Adds request_logs-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option("-n", "--num_days", type="int", dest="num_days",
action="store", default=None,
help="Number of days worth of log data to get. "
"The cut-off point is midnight UTC. "
"Use 0 to get all available logs. "
"Default is 1, unless --append is also given; "
"then the default is 0.")
parser.add_option("-a", "--append", dest="append",
action="store_true", default=False,
help="Append to existing file.")
parser.add_option("--severity", type="int", dest="severity",
action="store", default=None,
help="Severity of app-level log messages to get. "
"The range is 0 (DEBUG) through 4 (CRITICAL). "
"If omitted, only request logs are returned.")
def CronInfo(self, now=None, output=sys.stdout):
"""Displays information about cron definitions.
Args:
now: used for testing.
output: Used for testing.
"""
if len(self.args) != 1:
self.parser.error("Expected a single <directory> argument.")
if now is None:
now = datetime.datetime.now()
basepath = self.args[0]
cron_entries = self._ParseCronYaml(basepath)
if cron_entries and cron_entries.cron:
for entry in cron_entries.cron:
description = entry.description
if not description:
description = "<no description>"
print >>output, "\n%s:\nURL: %s\nSchedule: %s" % (description,
entry.schedule,
entry.url)
schedule = groctimespecification.GrocTimeSpecification(entry.schedule)
matches = schedule.GetMatches(now, self.options.num_runs)
for match in matches:
print >>output, "%s, %s from now" % (
match.strftime("%Y-%m-%d %H:%M:%S"), match - now)
def _CronInfoOptions(self, parser):
"""Adds cron_info-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option("-n", "--num_runs", type="int", dest="num_runs",
action="store", default=5,
help="Number of runs of each cron job to display"
"Default is 5")
def _CheckRequiredUploadOptions(self):
"""Checks that upload options are present."""
for option in ["filename", "kind", "config_file"]:
if getattr(self.options, option) is None:
self.parser.error("Option '%s' is required." % option)
if not self.options.url:
self.parser.error("You must have google.appengine.ext.remote_api.handler "
"assigned to an endpoint in app.yaml, or provide "
"the url of the handler via the 'url' option.")
def InferUploadUrl(self, appyaml):
"""Uses app.yaml to determine the remote_api endpoint.
Args:
appyaml: A parsed app.yaml file.
Returns:
The url of the remote_api endpoint as a string, or None
"""
handlers = appyaml.handlers
handler_suffix = "remote_api/handler.py"
app_id = appyaml.application
for handler in handlers:
if hasattr(handler, "script") and handler.script:
if handler.script.endswith(handler_suffix):
server = self.options.server
if server == "appengine.google.com":
return "http://%s.appspot.com%s" % (app_id, handler.url)
else:
return "http://%s%s" % (server, handler.url)
return None
def RunBulkloader(self, **kwargs):
"""Invokes the bulkloader with the given keyword arguments.
Args:
kwargs: Keyword arguments to pass to bulkloader.Run().
"""
try:
import sqlite3
except ImportError:
logging.error("upload_data action requires SQLite3 and the python "
"sqlite3 module (included in python since 2.5).")
sys.exit(1)
sys.exit(bulkloader.Run(kwargs))
def PerformUpload(self, run_fn=None):
"""Performs a datastore upload via the bulkloader.
Args:
run_fn: Function to invoke the bulkloader, used for testing.
"""
if run_fn is None:
run_fn = self.RunBulkloader
if len(self.args) != 1:
self.parser.error("Expected <directory> argument.")
basepath = self.args[0]
appyaml = self._ParseAppYaml(basepath)
self.options.app_id = appyaml.application
if not self.options.url:
url = self.InferUploadUrl(appyaml)
if url is not None:
self.options.url = url
self._CheckRequiredUploadOptions()
if self.options.batch_size < 1:
self.parser.error("batch_size must be 1 or larger.")
if verbosity == 1:
logging.getLogger().setLevel(logging.INFO)
self.options.debug = False
else:
logging.getLogger().setLevel(logging.DEBUG)
self.options.debug = True
StatusUpdate("Uploading data records.")
run_fn(app_id=self.options.app_id,
url=self.options.url,
filename=self.options.filename,
batch_size=self.options.batch_size,
kind=self.options.kind,
num_threads=self.options.num_threads,
bandwidth_limit=self.options.bandwidth_limit,
rps_limit=self.options.rps_limit,
http_limit=self.options.http_limit,
db_filename=self.options.db_filename,
config_file=self.options.config_file,
auth_domain=self.options.auth_domain,
has_header=self.options.has_header,
loader_opts=self.options.loader_opts,
log_file=self.options.log_file,
passin=self.options.passin,
email=self.options.email,
debug=self.options.debug,
exporter_opts=None,
download=False,
result_db_filename=None,
)
def _PerformUploadOptions(self, parser):
"""Adds 'upload_data' specific options to the 'parser' passed in.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option("--filename", type="string", dest="filename",
action="store",
help="The name of the file containing the input data."
" (Required)")
parser.add_option("--config_file", type="string", dest="config_file",
action="store",
help="Name of the configuration file. (Required)")
parser.add_option("--kind", type="string", dest="kind",
action="store",
help="The kind of the entities to store. (Required)")
parser.add_option("--url", type="string", dest="url",
action="store",
help="The location of the remote_api endpoint.")
parser.add_option("--num_threads", type="int", dest="num_threads",
action="store", default=10,
help="Number of threads to upload records with.")
parser.add_option("--batch_size", type="int", dest="batch_size",
action="store", default=10,
help="Number of records to post in each request.")
parser.add_option("--bandwidth_limit", type="int", dest="bandwidth_limit",
action="store", default=250000,
help="The maximum bytes/second bandwidth for transfers.")
parser.add_option("--rps_limit", type="int", dest="rps_limit",
action="store", default=20,
help="The maximum records/second for transfers.")
parser.add_option("--http_limit", type="int", dest="http_limit",
action="store", default=8,
help="The maximum requests/second for transfers.")
parser.add_option("--db_filename", type="string", dest="db_filename",
action="store",
help="Name of the progress database file.")
parser.add_option("--auth_domain", type="string", dest="auth_domain",
action="store", default="gmail.com",
help="The name of the authorization domain to use.")
parser.add_option("--has_header", dest="has_header",
action="store_true", default=False,
help="Whether the first line of the input file should be"
" skipped")
parser.add_option("--loader_opts", type="string", dest="loader_opts",
help="A string to pass to the Loader.Initialize method.")
parser.add_option("--log_file", type="string", dest="log_file",
help="File to write bulkloader logs. If not supplied "
"then a new log file will be created, named: "
"bulkloader-log-TIMESTAMP.")
class Action(object):
"""Contains information about a command line action.
Attributes:
function: The name of a function defined on AppCfg or its subclasses
that will perform the appropriate action.
usage: A command line usage string.
short_desc: A one-line description of the action.
long_desc: A detailed description of the action. Whitespace and
formatting will be preserved.
options: A function that will add extra options to a given OptionParser
object.
"""
def __init__(self, function, usage, short_desc, long_desc="",
options=lambda obj, parser: None):
"""Initializer for the class attributes."""
self.function = function
self.usage = usage
self.short_desc = short_desc
self.long_desc = long_desc
self.options = options
def __call__(self, appcfg):
"""Invoke this Action on the specified AppCfg.
This calls the function of the appropriate name on AppCfg, and
respects polymophic overrides."""
method = getattr(appcfg, self.function)
return method()
actions = {
"help": Action(
function="Help",
usage="%prog help <action>",
short_desc="Print help for a specific action."),
"update": Action(
function="Update",
usage="%prog [options] update <directory>",
options=_UpdateOptions,
short_desc="Create or update an app version.",
long_desc="""
Specify a directory that contains all of the files required by
the app, and appcfg.py will create/update the app version referenced
in the app.yaml file at the top level of that directory. appcfg.py
will follow symlinks and recursively upload all files to the server.
Temporary or source control files (e.g. foo~, .svn/*) will be skipped."""),
"update_cron": Action(
function="UpdateCron",
usage="%prog [options] update_cron <directory>",
short_desc="Update application cron definitions.",
long_desc="""
The 'update_cron' command will update any new, removed or changed cron
definitions from the cron.yaml file."""),
"update_indexes": Action(
function="UpdateIndexes",
usage="%prog [options] update_indexes <directory>",
short_desc="Update application indexes.",
long_desc="""
The 'update_indexes' command will add additional indexes which are not currently
in production as well as restart any indexes that were not completed."""),
"vacuum_indexes": Action(
function="VacuumIndexes",
usage="%prog [options] vacuum_indexes <directory>",
options=_VacuumIndexesOptions,
short_desc="Delete unused indexes from application.",
long_desc="""
The 'vacuum_indexes' command will help clean up indexes which are no longer
in use. It does this by comparing the local index configuration with
indexes that are actually defined on the server. If any indexes on the
server do not exist in the index configuration file, the user is given the
option to delete them."""),
"rollback": Action(
function="Rollback",
usage="%prog [options] rollback <directory>",
short_desc="Rollback an in-progress update.",
long_desc="""
The 'update' command requires a server-side transaction. Use 'rollback'
if you get an error message about another transaction being in progress
and you are sure that there is no such transaction."""),
"request_logs": Action(
function="RequestLogs",
usage="%prog [options] request_logs <directory> <output_file>",
options=_RequestLogsOptions,
short_desc="Write request logs in Apache common log format.",
long_desc="""
The 'request_logs' command exports the request logs from your application
to a file. It will write Apache common log format records ordered
chronologically. If output file is '-' stdout will be written."""),
"cron_info": Action(
function="CronInfo",
usage="%prog [options] cron_info <directory>",
options=_CronInfoOptions,
short_desc="Display information about cron jobs.",
long_desc="""
The 'cron_info' command will display the next 'number' runs (default 5) for
each cron job defined in the cron.yaml file."""),
"upload_data": Action(
function="PerformUpload",
usage="%prog [options] upload_data <directory>",
options=_PerformUploadOptions,
short_desc="Upload CSV records to datastore",
long_desc="""
The 'upload_data' command translates CSV records into datastore entities and
uploads them into your application's datastore."""),
}
def main(argv):
logging.basicConfig(format=("%(asctime)s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
try:
result = AppCfgApp(argv).Run()
if result:
sys.exit(result)
except KeyboardInterrupt:
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main(sys.argv)
|
jamslevy/gsoc
|
thirdparty/google_appengine/google/appengine/tools/appcfg.py
|
Python
|
apache-2.0
| 74,994
|
[
"VisIt"
] |
ec121a83a1969477ba256d66da4b9e2ab977e27d7f81075523217e104a89b88a
|
import eva
import eva.tests
import eva.tests.schemas
import eva.adapter
import eva.exceptions
import eva.job
from unittest import mock
import httmock
import datetime
class TestFimexAdapter(eva.tests.BaseTestAdapter):
adapter_class = eva.adapter.FimexFillFileAdapter
config_ini = \
"""
[adapter]
fimex_fill_file_ncfill_path = /path/to/ncfill
fimex_fill_file_template_directory = /template
input_data_format = foo
input_product = foo
input_service_backend = foo
output_base_url = file:///foo
output_data_format = netcdf
output_filename_pattern = /output/{{reference_time|iso8601_compact}}.nc
output_product = foo
output_service_backend = foo
""" # NOQA
def test_with_partial(self):
"""!
@brief Test that the adapter requires input_partial=NO.
"""
self.config['adapter']['input_partial'] = 'YES'
with self.assertRaises(eva.exceptions.InvalidConfigurationException):
self.create_adapter()
def test_create_job(self):
"""!
@brief Test that job creation generates the correct FIMEX command line.
"""
self.create_adapter()
resource = mock.MagicMock()
resource.url = 'file:///foo/bar/baz'
resource.format.slug = 'netcdf'
resource.data.productinstance.reference_time = eva.coerce_to_utc(datetime.datetime(2016, 1, 1, 18, 15, 0))
with httmock.HTTMock(*eva.tests.schemas.SCHEMAS):
job = self.create_job(resource)
check_command = ' '.join([
"time",
"/path/to/ncfill",
"--input '/foo/bar/baz'",
"--output '/output/20160101T181500Z.nc'",
"--input_format 'netcdf'",
"--reference_time '2016-01-01T18:15:00+0000'",
"--template_directory '/template'"
])
self.assertTrue(check_command in job.command)
def test_finish_job_and_generate_resources(self):
"""!
@brief Test that job finish works and doesn't throw any exceptions.
"""
self.create_adapter()
resource = mock.MagicMock()
with httmock.HTTMock(*eva.tests.schemas.SCHEMAS):
job = self.create_job(resource)
job.set_status(eva.job.COMPLETE)
self.adapter.finish_job(job)
resources = self.generate_resources(job)
self.assertEqual(len(resources['productinstance']), 0)
self.assertEqual(len(resources['data']), 0)
self.assertEqual(len(resources['datainstance']), 1)
self.assertEqual(resources['datainstance'][0].servicebackend, self.adapter.output_service_backend)
self.assertEqual(resources['datainstance'][0].partial, True)
|
metno/EVA
|
eva/tests/adapters/test_fimex_fill_file.py
|
Python
|
gpl-2.0
| 2,649
|
[
"NetCDF"
] |
95e00f801860c0b1a1056974bd97fb0f46cfbc9c9985e6adf698d4d8d7bdf27c
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=80 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2014 Raoul Snyman #
# Portions copyright (c) 2008-2014 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Ken Roberts #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
################################################################
# WARNING WARNING WARNING WARNING WARNING WARNING WARNING #
# #
# The 2to3 tool will change the 'db' calls to '.db'. #
# I don't believe that is what should be changed. #
# #
# The other changes are only removing the u'' text to '' text #
# and should be ok.
################################################################
__version__ = '0.0.1'
__v = __version__.split('.')
__version_hex__ = int(__v[0]) << 24 | \
int(__v[1]) << 16 | \
int(__v[2]) << 8
__module = 'projectors'
import logging
log = logging.getLogger(__name__)
log.debug(u'XML Parser module loaded')
from os import path
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement
from xml.dom.minidom import parse, parseString
from openlp.core.projectors.db import get_manufacturer_by_name, add_new_model, list_manufacturers
# Helper functions
def __prettify(s):
'''Converts XML string to space-delimited xml elements.'''
return parseString(ElementTree.tostring(s, 'utf-8')).toprettyxml(indent=" ")
def __check_source_element(src):
'''Validates the source element.'''
# log.debug("__check_source_element(src='%s')" % src)
if src.tag.lower() != u'source':
log.error(u'Invalid entity where source should be: %s' % src.tag)
return len(src.attrib) == 2 and \
'pjlink_code' in src.attrib and \
'pjlink_name' in src.attrib
def __add_manufacturer(mfgr):
'''Fetches models from manufacturer'''
log.debug(u"__add_manufacturer(mfgr='%s')" % mfgr)
if mfgr.tag.lower() != u'manufacturer':
log.error(u'Invalid entity where manufacturer should be: %s' % mfgr.tag)
# raise invalid element?
return None
if not u'name' in mfgr.attrib:
log.error(u'Manufacturer tag missing name')
return None
# raise Invalid element?
for m in mfgr.getchildren():
z = __add_model(mfgr.attrib[u'name'], m)
r = None if z is None else z
return r
def __add_model(mfgr, model):
'''Fetches sources from model'''
log.debug(u"__add_model(mfgr='%s', model='%s')" % (mfgr, model))
if model.tag.lower() != u'model':
log.error(u'Invalid entity where model should be: %s' % model.tag)
# raise invalid element?
return None
if not u'name' in model.attrib:
log.error(u'Model tag missing name')
return None
sources = []
for s in model.getchildren():
if not __check_source_element(s):
log.debug(u"Invalid or missing attributes for source - skipping")
continue
sources.append({u'name': u'%s' % s.attrib[u'pjlink_name'],
u'code': u'%s' % s.attrib[u'pjlink_code'],
u'text': u'%s' % s.text.strip()}
)
log.debug(u"Processing %s %s" % (mfgr, model.attrib[u'name']))
for i in sources:
log.debug(u"Source: name='%s' code='%s' text='%s'" % \
(i[u'code'], i[u'name'], i[u'text']))
return add_new_model(mfgr, model.attrib[u'name'], sources)
def __export_mfgr(page, manf):
log.debug(u'__export_mfgr(doc)')
mfgr = SubElement(page, u'manufacturer',
name=manf.name)
for m in manf.models:
model = SubElement(mfgr, u'model',
name=m.name)
for s in m.sources:
source = SubElement(model, u'source',
pjlink_name=s.pjlink_name,
pjlink_code=s.pjlink_code)
source.text = s.text
return manf.name
# Main functions
def export_pjlink(directory, manufacturer):
'''Export the manufacturer/model/source tables from the database as an XML file.
If manufacturer is None, export all entries in database, otherwise
only export the entries for manufacturer.
Input:
directory: Directory where file will be saved.
manufaturer: Manufacturer name.
Return:
Filename where xml file was saved or None.
Files:
<dir>/pjlink_<manufaturer_name>.xml
'''
if manufacturer.lower() == u'all':
log.debug(u"export_mfgr() Exporting all entries in database")
l = list_manufacturers().keys()
file_name = u'pjlink_multiple.xml'
else:
l = [manufacturer, ]
file_name = u'pjlink_%s.xml' % manufacturer.lower()
page = Element(u'pjlink', {u'class': u'1'})
c = 0
l.sort() # Sort manufacturers so we get a nice output
for i in l:
z = get_manufacturer_by_name(i)
if z is None:
log.warn(u'export_mfgr() No database entry for %s' % manufacturer)
continue
log.debug(u"export_mfgr() exporting entries for manufacturer '%s'" % z)
__export_mfgr(page, z)
c = c+1
if c == 0:
log.warn(u'export_file(): No manufacturers exported')
return None
log.debug(u"Writing XML file %s" % path.join(directory, file_name))
try:
f = open(path.join(directory, file_name), 'wb')
f.write(__prettify(page))
f.close()
except IOError, e:
log.error(u"Error writing XML file %s: code '%s' : '%s'" % \
(e.errno, e.strerror))
log.debug(u"Wrote file %s" % path.join(directory, file_name))
return file_name
def import_pjlink(filename):
'''Import XML file and save to manufacturer/model/source tables.
See docs/projectors_xml_format.txt for format of XML file.
Inputs:
filename: Full pathname to file.
Return:
Name of manufacturer(s) that was added to database or None.
Modifies:
database manufacturer/model/source tables.
'''
log.debug(u"import_mfgr(filename='%s')" % filename)
try:
doc = ElementTree.parse(filename)
except IOError, e:
log.error(u"Can't parse file '%s' code '%s': '%s'" % \
(filename, e.errno, e.strerror))
return None
root = doc.getroot()
# Test for PJLink document
if root.tag.lower() != u'pjlink':
# Raise invalid xml document
log.error(u'Invalid root - root tag="%s"' % root.tag)
pjlink_attr = root.attrib
if not u'class' in pjlink_attr:
log.debug(u'No pjlink class specified')
# raise invalid pjlink class?
elif pjlink_attr[u'class'] != '1':
log.error(u'Invalid pjlink class specified: class=%s' % pjlink_attr[u'class'])
# raise invalid pjlink class?
else:
log.debug(u'Found pjlink class=1 root element')
# Retrieve manufacturer entry
if len(root.getchildren()) < 1:
log.error(u'Empty pjlink root document')
return None
# Process manufacturer entry(ies)
r = []
for m in root.getchildren():
log.debug(u'import_mfgr() adding %s' % m.attrib[u'name'])
l = __add_manufacturer(m)
r.append(l)
log.debug(u"import_mfgr(): Done parsing %s" % filename)
return r if len(r) >= 1 else None
|
alisonken1/openlp-projector-2.0
|
openlp/projectors/xml_parser.py
|
Python
|
gpl-2.0
| 9,287
|
[
"Brian"
] |
69e2552cc8050460a25fae9d89083d4e443c7c60d66f2bc047201fc4b27387ed
|
# import asyncio
import sys
from verbs import VisitPy2 as Visit
# visit = Visit('http://192.168.59.100:8008/v1/app')
#visit = Visit('http://192.168.59.200:8080/v1')
visit = Visit('http://127.0.0.1:8080/v1')
headers = { 'username':'user1',
'password':'self.test_account_pw1',
'content-type': 'application/json'}
# data = """{ 'email': {
# 'from':'password1',
# 'to':'user1@email.com' }
# }"""
#
# data = {
# 'email': 'a@b.com',
# 'firstname':'aaa',
# 'lastname':'bbb',
# 'experience':'10'
# }
# data = {
# 'doctorid':'d001',
# 'datetimeslot':'201511201400',
# 'patientid':'p001',
# 'illness': 'illness01',
# }
# #
data = {
'email': 'a@b.com',
'firstname':'aaa',
'lastname':'bbb',
'birthdate':'20151111'
}
if __name__ == '__main__':
# if len(sys.argv) == 1:
# print(visit.post(suffix_url='doctor',
# headers=headers, data=data))
# elif len(sys.argv) == 2:
# uid = sys.argv[1]
# print(visit.get(suffix_url='doctor/{}'.format(uid), headers=headers))
# data2 = {
# 'firstname':'ccc',
# 'lastname':'ddd',
# 'experience':20
# }
# print(visit.put(suffix_url='doctor/{}'.format(uid),
# headers=headers, data=data2))
# print(visit.get(suffix_url='doctor/{}'.format(uid), headers=headers))
# if len(sys.argv) == 1:
# print(visit.post(suffix_url='appointment',
# headers=headers, data=data))
# elif len(sys.argv) == 2:
# docidtimepaid = sys.argv[1]
# print(visit.get(suffix_url='appointment/{}'.format(docidtimepaid), headers=headers))
# elif len(sys.argv) == 2:
# docidtime = sys.argv[1]
# print(visit.get(suffix_url='appointment/{}'.format(docidtime), headers=headers))
# if len(sys.argv) == 1:
# print(visit.post(suffix_url='patient',
# headers=headers, data=data))
# elif len(sys.argv) == 2:
# uid = sys.argv[1]
# print(visit.get(suffix_url='patient/{}'.format(uid), headers=headers))
# data2 = {
# 'firstname':'ccc',
# 'lastname':'ddd',
# 'birthdate':'19999999'
# }
# print(visit.put(suffix_url='patient/{}'.format(uid),
# headers=headers, data=data2))
# print(visit.get(suffix_url='patient/{}'.format(uid), headers=headers))
if len(sys.argv) == 1:
print(visit.post(suffix_url='patient',
headers=headers, data=data))
elif len(sys.argv) == 2:
uid = sys.argv[1]
odata = {
"objname": 'objectname2',
"datetime": "201511201300",
"description": "x-ray"
}
print(visit.post(suffix_url='obj/{}'.format(uid), headers=headers, data=odata))
print(visit.get(suffix_url='obj/{}/{}'.format(uid, odata['objname']+'-'+odata['datetime']),
headers=headers))
# print(visit.get(suffix_url='patient/{}'.format(uid), headers=headers))
|
pa2515-group2/server
|
server/verb.py
|
Python
|
mit
| 3,201
|
[
"VisIt"
] |
f0a2a9ff1a7f2eac57974bbf6ed348d34354c58d0a7202e1dd9ca10622915b0e
|
#!coding:utf-8
# Transmageddon
# Copyright (C) 2009 Christian Schaller <uraeus@gnome.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
from skin import app_theme
from file_choose_button import FileChooserButton
from dtk.ui.button import Button
from dtk.ui.label import Label
from dtk.ui.menu import Menu
from new_combobox import NewComboBox
from conv_task_gui import ConvTAskGui, MediaItem
from gui import Form
import sys
import os
os.environ["GST_DEBUG_DUMP_DOT_DIR"] = "/tmp"
os.putenv('GST_DEBUG_DUMP_DIR_DIR', '/tmp')
import which
import time
import transcoder_engine
import gobject; gobject.threads_init()
from urlparse import urlparse
import codecfinder
import about
import presets
import utils
import datetime
from locales import _
# from gettext import gettext as _
import gettext
try:
import pygtk
pygtk.require("2.0")
import glib
import gtk
import pygst
pygst.require("0.10")
import gst
import gst.pbutils
except Exception, e:
print "failed to import required modules"
print e
sys.exit(1)
major, minor, patch = gst.pygst_version
if (major == 0) and (patch < 22):
print "You need version 0.10.22 or higher of gstreamer-python for Transmageddon"
sys.exit(1)
major, minor, patch = gobject.pygobject_version
if (major == 2) and (minor < 18):
print "You need version 2.18.0 or higher of pygobject for Transmageddon"
sys.exit(1)
TARGET_TYPE_URI_LIST = 80
dnd_list = [ ( 'text/uri-list', 0, TARGET_TYPE_URI_LIST ) ]
name_to_supported_containers_map = {
"AVI" : "AVI", #2
"3GP" : "3GPP", #9
"MP4" : "MPEG4", #8
"MPG" : "MPEG PS", #3
"TS" : "MPEG TS", #4
"OGG" : "Ogg", #0
"MKV" : "Matroska", #1
"M2TS": "AVCHD/BD", #5
"FLV" : "FLV", #6
"MOV" : "Quicktime", #7
"MXF" : "MXF", #10
"ASF" : "ASF", #11
"WebM": "WebM", #12
_("Audio-only") : _("Audio-only")
}
supported_containers = [
"AVI", #2
"3GP", #9
"MP4", #8
"MPG", #3
"TS", #4
"OGG", #0
"MKV", #1
"M2TS", #5
"FLV", #6
"MOV", #7
"MXF", #10
"ASF", #11
"I can not get this item to show for some reason",
"WebM" #12
]
# supported_containers = [
# "AVI", #2
# "3GPP", #9
# "MPEG4", #8
# "MPEG PS", #3
# "MPEG TS", #4
# "Ogg", #0
# "Matroska", #1
# "AVCHD/BD", #5
# "FLV", #6
# "Quicktime", #7
# "MXF", #10
# "ASF", #11
# "I can not get this item to show for some reason",
# "WebM" #12
# ]
supported_audio_codecs = [
"vorbis",
"flac",
"mp3",
"aac",
"ac3",
"speex",
"celt",
"amrnb",
"wma2"
]
supported_video_codecs = [
"theora",
"dirac",
"h264",
"mpeg2",
"mpeg4",
"xvid",
"h263p",
"wmv2",
"vp8"
]
# Maps containers to the codecs they support. The first two elements are
# "special" in that they are the default audio/video selections for that
# container modify code. # 123456
supported_video_container_map = {
'Ogg': [ 'Theora', 'Dirac', 'On2 vp8' ],
'MXF': [ 'H264', 'MPEG2', 'MPEG4' ],
'Matroska': [ 'Dirac', 'Theora', 'H264', 'On2 vp8',
'MPEG4', 'MPEG2', 'xvid', 'H263+' ],
# 'AVI': [ 'H264', 'Dirac', 'MPEG2', 'MPEG4', 'xvid',
# 'WMV', 'On2 vp8' ], # Windows Media Video 2
'AVI': [ 'H264', 'MPEG4', 'xvid'],
'Quicktime': [ 'H264', 'Dirac', 'MPEG2', 'MPEG4', 'On2 vp8' ],
# 'MPEG4': [ 'H264', 'MPEG2', 'MPEG4' ],
'MPEG4': [ 'H264', 'MPEG4' ],
'FLV': [ 'H264'],
# '3GPP': [ 'H264', 'MPEG2', 'MPEG4', 'H263+' ],
'3GPP': [ 'MPEG4', 'H263+' ],
# 'MPEG PS': [ 'MPEG2', 'MPEG1', 'H264', 'MPEG4' ],
'MPEG PS': [ 'MPEG2', 'H264'],
'MPEG TS': [ 'MPEG2', 'MPEG1', 'H264', 'MPEG4', 'Dirac' ],
'AVCHD/BD': [ 'H264' ],
'ASF': [ 'WMV' ], # Windows Media Video 2
'WebM': [ 'On2 vp8']
}
supported_audio_container_map = {
'Ogg': [ 'Vorbis', 'FLAC', 'Speex', 'Celt Ultra' ],
'MXF': [ 'MP3', 'AAC', 'AC3' ],
'Matroska': [ 'FLAC', 'AAC', 'AC3', 'Vorbis' ],
# 'AVI': [ 'mp3', 'AC3', 'WMA' ], # Windows Media Audio 2
'AVI': [ 'MP3', 'AC3', 'WMA' ],
'Quicktime': [ 'AAC', 'AC3', 'MP3' ],
'MPEG4': [ 'AAC', 'MP3' ],
# '3GPP': [ 'AAC', 'mp3', 'AMR-NB' ],
'3GPP': [ 'AAC', 'AMR-NB' ],
# 'MPEG PS': [ 'mp3', 'AC3', 'AAC', 'mp2' ],
'MPEG PS': [ 'MP3', 'AC3' ],
'MPEG TS': [ 'MP3', 'AC3', 'AAC', 'MP2' ],
'AVCHD/BD': [ 'AC3' ],
'FLV': [ 'MP3' ],
'ASF': [ 'WMA', 'MP3'], # Windows Media Audio 2
'WebM': [ 'Vorbis']
# "No container" is 13th option here (0-12)
# if adding more containers make sure to update code for 'No container as it is placement tied'
}
class TransmageddonUI:
"""This class loads the GtkBuilder file of the UI"""
def __init__(self, conv_list=[]):
self.form = Form()
self.form.show_all()
self.form.hide_setting()
# self.form.show_and_hide_task_btn.connect("clicked", self.conv_task_gui_show_and_hide_task_btn_clicked)
# conv task list.
self.conv_list = conv_list
self.conv_dict = {} # save conv state{filename, self.audiodata...}.
# Transmageddon conv task list init.
self.conv_task_gui = ConvTAskGui() # task list gui.
# self.conv_task_gui.start_btn.connect("clicked", self.conv_task_gui_start_btn_clicked)
self.conv_task_gui.pause_btn.connect("clicked", self.conv_task_gui_pause_btn_clicked)
self.conv_task_gui.close_btn.connect("clicked", lambda w : self.conv_task_gui.hide_all())
self.conv_task_gui.list_view.connect("button-press-event", self.show_popup_menu)
self.conv_task_gui.list_view.connect("single-click-item", self.save_open_selsect_file_name)
self.conv_task_gui.list_view.connect("delete-select-items", self.delete_task_list)
self.task_list = []
self.conv_task_list = []
self.task_index = 0
#Set up i18n
gettext.bindtextdomain("transmageddon","../../share/locale")
gettext.textdomain("transmageddon")
#initialize discoverer
self.discovered = gst.pbutils.Discoverer(5000000000)
self.discovered.connect('discovered', self.succeed)
self.discovered.start()
self.audiorows=[] # set up the lists for holding the codec combobuttons
self.videorows=[]
self.audiocodecs=[] # create lists to store the ordered lists of codecs
self.videocodecs=[]
# set flag so we remove bogus value from menu only once
self.bogus=0
# these dynamic comboboxes allow us to support files with multiple streams eventually
def dynamic_comboboxes_audio(streams,extra = []):
vbox = gtk.VBox()
self.audiorows.append(self.form.bit_rate_combo)
return self.form.bit_rate_combo
def dynamic_comboboxes_video(streams,extra = []):
vbox = gtk.VBox()
self.videorows.append(self.form.frame_rate_combo)
return self.form.frame_rate_combo
#Define functionality of our button and main window
# self.TopWindow = self.builder.get_object("TopWindow")
self.TopWindow = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.vbox1 = gtk.VBox()
# self.FileChooser = gtk.FileChooserButton("选择文件")
# self.FileChooser = FileChooserButton("选择文件")
self.FileChooser = self.form.modify_chooser_btn
self.videoinformation = gtk.Label("Video height/width:")
self.audioinformation = gtk.Label("Audio Channels:")
self.videocodec = gtk.Label("Video codec:")
self.audiocodec = gtk.Label("Audio codec:")
self.audiobox = dynamic_comboboxes_audio([gobject.TYPE_PYOBJECT])
self.videobox = dynamic_comboboxes_video([gobject.TYPE_PYOBJECT])
self.CodecBox = gtk.Table()
self.presetsmodel = gtk.ListStore(gobject.TYPE_STRING)
self.presetchoice = gtk.ComboBox(self.presetsmodel)
# self.presetchoice = self.form.brand_combo # NewComboBox(110)
self.cellrenderertext1 = gtk.CellRendererText()
self.presetchoice.pack_start(self.cellrenderertext1, True)
self.presetchoice.add_attribute(self.cellrenderertext1, 'text', 0)
self.containerchoice = self.form.format_combo # NewComboBox(110)
self.rotationchoice = NewComboBox(110)
self.hbox_button = gtk.HBox()
self.transcodebutton = self.form.start_btn # Button("开始转换") #gtk.Button("transcodebutton")
self.cancelbutton = Button("停止转换") #gtk.Button("cancelbutton")
self.ProgressBar = gtk.ProgressBar()
self.StatusBar = gtk.Statusbar()
self.audiorows[0].connect("changed", self.on_audiocodec_changed)
self.videorows[0].connect("changed", self.on_videocodec_changed)
# TopWindow.
# self.TopWindow.set_title("视频格式转换")
# self.TopWindow.set_size_request(350, 400)
# self.TopWindow.connect("destroy", lambda w : gtk.main_quit())
# FileChooser.
# self.FileChooser.connect("file-set", self.on_FileChooser_file_set)
self.FileChooser.connect("changed", self.on_FileChooser_file_set)
# self.FileChooser.connect("selection-changed", self.on_FileChooser_file_set)
self.FileChooser.connect("changed", self.on_FileChooser_file_set)
# self.FileChooser.dialog.connect("file-set", self.on_FileChooser_file_set)
# self.FileChooser.dialog.connect("selection-changed", self.on_FileChooser_file_set)
# presetchoice.
self.presetchoice.set_active(0)
self.presetchoice.connect("changed", self.on_presetchoice_changed)
# containerchoice.
self.containerchoice.connect("changed", self.on_containerchoice_changed)
# rotationchoice.
self.rotationchoice.connect("changed", self.on_rotationchoice_changed)
#
# transcodebutton.
self.transcodebutton.connect("clicked", self.on_transcodebutton_clicked)
# cancelbutton.
self.cancelbutton.connect("clicked", self.on_cancelbutton_clicked)
#
# add child widgets.
#
# self.hbox_button.pack_start(self.cancelbutton, True, True)
# self.hbox_button.pack_start(self.transcodebutton, True, True)
# self.vbox1.pack_start(self.FileChooser, False, False)
# self.vbox1.pack_start(self.videoinformation, True, True) # 视频高度/宽度
# self.vbox1.pack_start(self.audioinformation, True, True) # 音频通道
# self.vbox1.pack_start(self.videocodec, False, False) # 视频信息
# self.vbox1.pack_start(self.audiocodec, False, False) # 音频信息
# self.vbox1.pack_start(self.presetchoice, False, False)
# self.vbox1.pack_start(self.containerchoice, False, False)
# self.vbox1.pack_start(self.CodecBox, False, False)
# self.vbox1.pack_start(self.rotationchoice, False, False)
# self.vbox1.pack_start(self.ProgressBar, False, False)
# self.vbox1.pack_start(self.hbox_button, False, False)
# self.vbox1.pack_start(self.StatusBar, False, False)
# self.TopWindow.add(self.vbox1)
def get_file_path_from_dnd_dropped_uri(self, uri):
# get the path to file
path = ""
if uri.startswith('file:\\\\\\'): # windows
path = uri[8:] # 8 is len('file:///')
elif uri.startswith('file://'): # nautilus, rox
path = uri[7:] # 7 is len('file://')
elif uri.startswith('file:'): # xffm
path = uri[5:] # 5 is len('file:')
return path
def on_drag_data_received(widget, context, x, y, selection, target_type, \
timestamp):
if target_type == TARGET_TYPE_URI_LIST:
uri = selection.data.strip('\r\n\x00')
# self.builder.get_object ("FileChooser").set_uri(uri)
self.TopWindow.connect('drag_data_received', on_drag_data_received)
self.TopWindow.drag_dest_set( gtk.DEST_DEFAULT_MOTION |
gtk.DEST_DEFAULT_HIGHLIGHT | gtk.DEST_DEFAULT_DROP, dnd_list, \
gtk.gdk.ACTION_COPY)
self.start_time = False
self.multipass = False
self.passcounter = False
# Set the Videos XDG UserDir as the default directory for the filechooser
# also make sure directory exists
if 'get_user_special_dir' in glib.__dict__:
self.videodirectory = \
glib.get_user_special_dir(glib.USER_DIRECTORY_VIDEOS)
self.audiodirectory = \
glib.get_user_special_dir(glib.USER_DIRECTORY_MUSIC)
else:
print "XDG video or audio directory not available"
self.videodirectory = os.getenv('HOME')
self.audiodirectory = os.getenv('HOME')
if self.videodirectory is None:
print "XDG video or audio directory not available"
self.videodirectory = os.getenv('HOME')
self.audiodirectory = os.getenv('HOME')
CheckDir = os.path.isdir(self.videodirectory)
if CheckDir == (False):
os.mkdir(self.videodirectory)
CheckDir = os.path.isdir(self.audiodirectory)
if CheckDir == (False):
os.mkdir(self.audiodirectory)
self.FileChooser.set_current_folder(self.videodirectory)
# Setting AppIcon
FileExist = os.path.isfile("../../share/pixmaps/transmageddon.svg")
if FileExist:
self.TopWindow.set_icon_from_file( \
"../../share/pixmaps/transmageddon.svg")
else:
try:
self.TopWindow.set_icon_from_file("transmageddon.svg")
except:
print "failed to find appicon"
# default all but top box to insensitive by default
# self.containerchoice.set_sensitive(False)
self.CodecBox.set_sensitive(False)
self.transcodebutton.set_sensitive(False)
self.cancelbutton.set_sensitive(False)
self.presetchoice.set_sensitive(False)
self.containerchoice.set_sensitive(False)
self.rotationchoice.set_sensitive(False)
# set default values for various variables
self.AudioCodec = "vorbis"
self.VideoCodec = "theora"
self.ProgressBar.set_text("Transcoding Progress")
self.container = False
self.vsourcecaps = False
self.asourcecaps = False
self.videopasstoggle=False # toggle for passthrough mode chosen
self.audiopasstoggle=False
self.videopass=False # toggle for enabling adding of video passthrough on menu
self.audiopass=False
self.containertoggle=False # used to not check for encoders with pbutils
self.discover_done=False # lets us know that discover is finished
self.missingtoggle=False
self.interlaced=False
self.havevideo=False # tracks if input file got video
self.haveaudio=False
self.devicename = "nopreset"
self.nocontaineroptiontoggle=False
self.outputdirectory=False # directory for holding output directory value
# create variables to store passthrough options slot in the menu
self.audiopassmenuno=1
self.videopassmenuno=1
self.videonovideomenuno=-2
# create toggle so I can split codepath depending on if I using a preset
# or not
self.usingpreset=False
self.presetaudiocodec="None"
self.presetvideocodec="None"
self.inputvideocaps=None # using this value to store videocodec name to feed uridecodebin to avoid decoding video when not keeping video
self.nocontainernumber = int(13) # this needs to be set to the number of the no container option in the menu (from 0)
self.p_duration = gst.CLOCK_TIME_NONE
self.p_time = gst.FORMAT_TIME
# Populate the Container format combobox
for i in supported_containers:
self.containerchoice.append_text(str(i))
# add i18n "No container"option
# self.containerchoice.append_text("No container (Audio-only)") # 不输出视频,只有音频
self.containerchoice.append_text(_("Audio-only"))
# Populate the rotatation box
self.rotationlist = ["No rotation (default)",\
"Clockwise 90 degrees", \
"Rotate 180 degrees",
"Counterclockwise 90 degrees", \
"Horizontal flip",
"Vertical flip", \
"Upper left diagonal flip",
"Upper right diagnonal flip" ]
for y in self.rotationlist:
self.rotationchoice.append_text(y)
self.rotationchoice.set_active(0)
self.rotationvalue = int(0)
# Populate Device Presets combobox
devicelist = []
shortname = []
preset_list = sorted(presets.get().items(),
key = (lambda x: x[1].make + x[1].model))
for x, (name, device) in enumerate(preset_list):
# print "device:", device
self.presetchoice.append_text(str(device))
devicelist.append(str(device))
shortname.append(str(name))
for (name, device) in (presets.get().items()):
shortname.append(str(name))
self.presetchoices = dict(zip(devicelist, shortname))
self.presetchoice.prepend_text("No Presets")
self.waiting_for_signal="False"
# self.TopWindow.show_all() # show TopWindow.
for conv in self.conv_list:
self.FileChooser.set_filename(conv)
self.push_all(conv)
# for key in self.conv_dict.keys():
# print "key:", self.conv_dict[key]
# Get all preset values
def reverse_lookup(self,v):
for k in codecfinder.codecmap:
if codecfinder.codecmap[k] == v:
return k
def provide_presets(self, devicename): #
# print "provide_presets:"
devices = presets.get() # presets.py return _presets
# print "devices:", devices
device = devices[devicename]
# print "device:", device
preset = device.presets["Normal"] # get { container, videc , audec, extension... }
# print "preset:", preset.vcodec.width # test input.
self.usingpreset=True
self.containerchoice.set_active(-1) # resetting to -1 to ensure population of menu triggers
self.presetaudiocodec = preset.acodec.name
# print "self.presetaudiocodec", self.presetaudiocodec
self.presetvideocodec = preset.vcodec.name
# print "self.presetvideocodec:", self.presetvideocodec
if preset.container == "application/ogg":
self.containerchoice.set_active(0)
elif preset.container == "video/x-matroska":
self.containerchoice.set_active(1)
elif preset.container == "video/x-msvideo":
self.containerchoice.set_active(2)
elif preset.container == "video/mpeg,mpegversion=2,systemstream=true":
self.containerchoice.set_active(3)
elif preset.container == "video/mpegts,systemstream=true,packetsize=188":
self.containerchoice.set_active(4)
elif preset.container == "video/mpegts,systemstream=true,packetsize=192":
self.containerchoice.set_active(5)
elif preset.container == "video/x-flv":
self.containerchoice.set_active(6)
elif preset.container == "video/quicktime,variant=apple":
self.containerchoice.set_active(7)
elif preset.container == "video/quicktime,variant=iso":
self.containerchoice.set_active(8)
elif preset.container == "video/quicktime,variant=3gpp":
self.containerchoice.set_active(9)
elif preset.container == "application/mxf":
self.containerchoice.set_active(10)
elif preset.container == "video/x-ms-asf":
self.containerchoice.set_active(11)
elif preset.container == "video/webm":
self.containerchoice.set_active(12)
else:
print "failed to set container format from preset data"
# Check for number of passes
# passes = preset.vcodec.passes
#if passes == "0":
self.multipass = False
#else:
# self.multipass = int(passes)
# self.passcounter = int(0)
# Create query on uridecoder to get values to populate progressbar
# Notes:
# Query interface only available on uridecoder, not decodebin2)
# FORMAT_TIME only value implemented by all plugins used
# a lot of original code from gst-python synchronizer.py example
def Increment_Progressbar(self):
# self.conv_task_gui.show_time_label.set_text(_("..."))
if self.start_time == False:
self.start_time = time.time()
try:
position, format = \
self._transcoder.uridecoder.query_position(gst.FORMAT_TIME)
# print "position is " + str(position)
except:
position = gst.CLOCK_TIME_NONE
try:
duration, format = \
self._transcoder.uridecoder.query_duration(gst.FORMAT_TIME)
# print "duration is " + str(duration)
except:
duration = gst.CLOCK_TIME_NONE
if position != gst.CLOCK_TIME_NONE:
value = float(position) / duration
# print "value is " + str(value)
if float(value) < (1.0) and float(value) >= 0:
self.ProgressBar.set_fraction(value)
percent = (value*100)
timespent = time.time() - self.start_time
percent_remain = (100-percent)
# print "percent remain " + str(percent_remain)
# print "percentage is " + str(percent)
if percent != 0:
rem = (timespent / percent) * percent_remain
else:
rem = 0.1
hour = rem / 3600
min = (rem % 3600) / 60
sec = rem % 60
time_rem = "%(hour)02d:%(min)02d:%(sec)02d" % {
"hour": hour,
"min": min,
"sec": sec,
}
#
# print "time_rem:", time_rem # add progress info to task gui.
if self.conv_task_gui.pause_btn.label == _("Pause"):
self.conv_task_gui.show_time_label.set_text( "%s : %s" % (_("Time remaining"), time_rem))
else:
self.conv_task_gui.show_time_label.set_text("")
#
if percent_remain > 0.5:
if self.passcounter == int(0):
txt = "Estimated time remaining: %(time)s"
self.ProgressBar.set_text(txt % \
{'time': str(time_rem)})
else:
txt = "Pass %(count)d time remaining: %(time)s"
self.ProgressBar.set_text(txt % { \
'count': self.passcounter, \
'time': str(time_rem), })
return True
else:
self.ProgressBar.set_fraction(0.0)
return False
else:
return False
# Call gobject.timeout_add with a value of 500millisecond to regularly poll
# for position so we can
# use it for the progressbar
def ProgressBarUpdate(self, source):
gobject.timeout_add(500, self.Increment_Progressbar)
def _on_eos(self, source):
context_id = self.StatusBar.get_context_id("EOS")
if (self.multipass == False) or (self.passcounter == int(0)):
self.StatusBar.push(context_id, ("File saved to %(dir)s" % \
{'dir': self.outputdirectory}))
self.FileChooser.set_sensitive(True)
self.containerchoice.set_sensitive(True)
self.CodecBox.set_sensitive(True)
self.presetchoice.set_sensitive(True)
self.cancelbutton.set_sensitive(False)
self.transcodebutton.set_sensitive(True)
self.rotationchoice.set_sensitive(True)
self.start_time = False
self.conv_task_gui.show_time_label.set_text("")
self.ProgressBar.set_text("Done Transcoding")
# self.task_index = min(self.task_index + 1, len(self.task_list) - 1) # task
self.task_list[self.task_index].Pipeline("null") # close Pipeline.
self.task_list[self.task_index].conv_flags = 1 # conv done.
try:
self.conv_task_gui.list_view.items[self.task_index].set_status_icon("success") # set status icon.
except Exception, e:
print "_on_eos[error]:", e
self.task_index += 1
self.ProgressBar.set_fraction(1.0)
self.ProgressBar = gtk.ProgressBar() # restart .
if self.task_index < len(self.task_list):
self.start_conv_function()
self.start_time = False
self.multipass = False
self.passcounter = False
self.audiopasstoggle=False
self.videopasstoggle=False
self.houseclean=False # due to not knowing which APIs to use I need
# this toggle to avoid errors when cleaning
# the codec comboboxes
else:
self.StatusBar.push(context_id, ("Pass %(count)d Complete" % \
{'count': self.passcounter}))
self.start_time = False
self.ProgressBar.set_text("Start next pass")
if self.passcounter == (self.multipass-1):
self.passcounter = int(0)
self._start_transcoding()
else:
self.passcounter = self.passcounter+1
self._start_transcoding()
# Use the pygst extension 'discoverer' to get information about the incoming
# media. Probably need to get codec data in another way.
# this code is probably more complex than it needs to be currently
def succeed(self, discoverer, info, error):
result=gst.pbutils.DiscovererInfo.get_result(info)
streaminfo=info.get_stream_info()
try:
container = streaminfo.get_caps()
except Exception, e:
print "succeed[error]:", e
# self.form.bit_rate_combo.prepend_text("No A")
self.form.frame_rate_combo.prepend_text(_("No Video"))
seekbool = info.get_seekable()
clipduration=info.get_duration()
# result=gst.pbutils.DiscovererInfo.get_result(info)
# streaminfo=info.get_stream_info()
# container = streaminfo.get_caps()
# seekbool = info.get_seekable()
# clipduration=info.get_duration()
audiostreamcounter=-1
audiostreams=[]
audiotags=[]
audiochannels=[]
samplerate=[]
inputaudiocaps=[]
markupaudioinfo=[]
videowidth = None
videoheight = None
for i in info.get_stream_list():
if isinstance(i, gst.pbutils.DiscovererAudioInfo):
audiostreamcounter=audiostreamcounter+1
inputaudiocaps.append(i.get_caps())
audiostreams.append( \
gst.pbutils.get_codec_description(inputaudiocaps[audiostreamcounter]))
audiotags.append(i.get_tags())
test=i.get_channels()
audiochannels.append(i.get_channels())
samplerate.append(i.get_sample_rate())
self.haveaudio=True
self.audiodata = { 'audiochannels' : audiochannels[audiostreamcounter], \
'samplerate' : samplerate[audiostreamcounter], 'audiotype' : inputaudiocaps[audiostreamcounter], \
'clipduration' : clipduration }
markupaudioinfo.append((''.join(('<small>', \
'Audio channels: ', str(audiochannels[audiostreamcounter]) ,'</small>'))))
self.containerchoice.set_active(-1) # set this here to ensure it happens even with quick audio-only
self.containerchoice.set_active(0)
if self.haveaudio==False:
self.audioinformation.set_markup(''.join(('<small>', "No Audio", '</small>')))
self.audiocodec.set_markup(''.join(('<small>', "",'</small>')))
if isinstance(i, gst.pbutils.DiscovererVideoInfo):
self.inputvideocaps=i.get_caps()
videotags=i.get_tags()
interlacedbool = i.is_interlaced()
if interlacedbool is True:
self.interlaced=True
self.havevideo=True
self.populate_menu_choices() # run this to ensure video menu gets filled
videoheight=i.get_height()
videowidth=i.get_width()
videodenom=i.get_framerate_denom()
videonum=i.get_framerate_num()
self.videodata = { 'videowidth' : videowidth, 'videoheight' : videoheight, 'videotype' : self.inputvideocaps,
'fratenum' : videonum, 'frateden' : videodenom }
self.discover_done=True
if self.havevideo==False:
self.videoinformation.set_markup(''.join(('<small>', _("No Video"), '</small>')))
self.videocodec.set_markup(''.join(('<small>', "",
'</small>')))
if self.waiting_for_signal == True:
if self.containertoggle == True:
if self.container != False:
self.check_for_passthrough(self.container)
else:
self.check_for_elements()
if self.missingtoggle==False:
self._start_transcoding()
if self.container != False:
self.check_for_passthrough(self.container)
# set markup
if audiostreamcounter >= 0:
self.audioinformation.set_markup(''.join(('<small>', \
'Audio channels: ', str(audiochannels[0]), '</small>')))
self.audiocodec.set_markup(''.join(('<small>','Audio codec: ', \
str(gst.pbutils.get_codec_description(inputaudiocaps[audiostreamcounter])), \
'</small>')))
if videowidth and videoheight:
self.videoinformation.set_markup(''.join(('<small>', 'Video width/height: ', str(videowidth),
"x", str(videoheight), '</small>')))
self.videocodec.set_markup(''.join(('<small>', 'Video codec: ',
str(gst.pbutils.get_codec_description(self.inputvideocaps)),
'</small>')))
def discover(self, path):
self.discovered.discover_uri_async("file://"+path)
def mediacheck(self, FileChosen):
uri = urlparse (FileChosen)
path = uri.path
self.discover(path)
def check_for_passthrough(self, containerchoice):
videointersect = ("EMPTY")
audiointersect = ("EMPTY")
if (containerchoice != False or self.usingpreset==False):
container = codecfinder.containermap[containerchoice]
containerelement = codecfinder.get_muxer_element(container)
if containerelement == False:
self.containertoggle = True
self.check_for_elements()
else:
factory = gst.registry_get_default().lookup_feature(containerelement)
for x in factory.get_static_pad_templates():
if (x.direction == gst.PAD_SINK):
sourcecaps = x.get_caps()
if self.havevideo == True:
if videointersect == ("EMPTY"):
# clean accepted caps to 'pure' value without parsing requirements
# might be redudant and caused by encodebin bug
textdata=gst.Caps.to_string(self.videodata['videotype'])
sep= ','
minitext = textdata.split(sep, 1)[0]
cleaned_videodata=gst.Caps(minitext)
videointersect = sourcecaps.intersect(cleaned_videodata)
if videointersect != ("EMPTY"):
self.vsourcecaps = videointersect
if self.haveaudio == True:
if audiointersect == ("EMPTY"):
audiointersect = sourcecaps.intersect(self.audiodata['audiotype'])
if audiointersect != ("EMPTY"):
self.asourcecaps = audiointersect
if videointersect != ("EMPTY"):
self.videopass=True
else:
self.videopass=False
if audiointersect != ("EMPTY"):
self.audiopass=True
else:
self.audiopass=False
# define the behaviour of the other buttons
def on_FileChooser_file_set(self, widget, filename, uri):
# print "on_FileChooser_file_set:"
self.form.path_entry.set_text(filename) # 现实完整路径
self.filename = self.FileChooser.get_filename()
# print "self.filename:", self.filename
self.audiodata = {}
if self.filename is not None:
self.haveaudio=False #make sure to reset these for each file
self.havevideo=False #
self.mediacheck(self.filename)
self.ProgressBar.set_fraction(0.0)
self.ProgressBar.set_text("Transcoding Progress")
if (self.havevideo==False and self.nocontaineroptiontoggle==False):
self.nocontaineroptiontoggle=True
else:
self.presetchoice.set_sensitive(True)
self.presetchoice.set_active(0)
# removing bogus text from supported_containers
if self.bogus==0:
self.containerchoice.remove_text(12)
self.bogus=1
self.nocontaineroptiontoggle=False
self.containerchoice.set_sensitive(True)
def push_all(self, key):
self.conv_dict[key] = (self.filename,
self.audiodata,
self.haveaudio,
self.havevideo,
self.discovered,
self.nocontaineroptiontoggle,
self.bogus
)
def pop_all(self, key):
self.filename = self.conv_dict[key][0]
self.audiodata = self.conv_dict[key][1]
self.haveaudio = self.conv_dict[key][2]
self.havevideo = self.conv_dict[key][3]
self.discovered = self.conv_dict[key][4]
self.nocontaineroptiontoggle = self.conv_dict[key][5]
self.bogus = self.conv_dict[key][6]
def conv_task_gui_show_and_hide_task_btn_clicked(self, widget):
if not self.conv_task_gui.get_visible():
self.conv_task_gui.show_all()
else:
self.conv_task_gui.hide_all()
def conv_task_gui_pause_play(self):
self.task_list[self.task_index].Pipeline("pause")
self.conv_task_gui.list_view.items[self.task_index].set_status_icon("wait")
def conv_task_gui_staring_play(self):
self.task_list[self.task_index].Pipeline("playing")
self.conv_task_gui.list_view.items[self.task_index].set_status_icon("working")
def start_conv_function(self):
try:
self.task_list[self.task_index].Pipeline("playing")
self.conv_task_gui.show_time_label.set_text(_("Converting"))
self._transcoder = self.task_list[self.task_index]
self._transcoder.connect("ready-for-querying", self.ProgressBarUpdate)
self._transcoder.connect("got-eos", self._on_eos)
self._transcoder.connect("got-error", self.show_error)
self.conv_task_gui.list_view.items[self.task_index].set_status_icon("working")
self.ProgressBar = self.conv_task_gui.list_view.items[self.task_index]
self.conv_task_gui.queue_draw()
except Exception, e:
print "start_conv_function[error]:", e
def conv_task_gui_pause_btn_clicked(self, widget):
try:
if self.task_list[self.task_index].state_label != "null":
if widget.label == _("Pause"):
widget.set_label(_("continue"))
self.conv_task_gui_pause_play()
else:
widget.set_label(_("Pause"))
self.conv_task_gui_staring_play()
except Exception, e:
print "conv_task_gui_pause_btn_clicked:", e
def show_popup_menu(self, widget, event):
if 3 == event.button:
self.root_menu = Menu([(None, _("Open a directory"), self.open_conv_file_dir),
None,
(None, _("Delete"), self.delete_conv_task_file),
(None, _("Remove complete tasks"),self.clear_succeed_conv_task_file)
], True)
self.root_menu.show((int(event.x_root), int(event.y_root)), (0, 0))
def clear_succeed_conv_task_file(self):
temp_task_list = []
task_i = 0
for task in self.task_list:
if task.conv_flags:
del self.conv_task_gui.list_view.items[task_i]
temp_task_list.append(task)
else:
task_i += 1
task.Pipeline("pause") # pause task.
# delete transcoder task.
for temp_task in temp_task_list:
self.task_list.remove(temp_task)
# restart start task.
if temp_task_list != [] and self.task_list != []:
self.task_index = 0
self.conv_task_gui.show_all()
self.start_conv_function()
gtk.timeout_add(1200, self.restart_start_btn)
else:
try:
if not self.task_list[self.task_index].conv_flags:
self.task_list[self.task_index].Pipeline("playing")
except Exception, e:
print "clear_succeed_conv_task_file[error]:", e
def delete_conv_task_file(self):
self.select_rows = self.conv_task_gui.list_view.select_rows
self.items = self.conv_task_gui.list_view.items
temp_task_list = []
for row in self.select_rows:
if not self.task_list[row].conv_flags:
self.task_list[row].Pipeline("null") # set pipiline null.
temp_task_list.append(self.task_list[row]) # add to temp task list.
for temp_task in temp_task_list:
self.task_list.remove(temp_task)
# delete select.
self.conv_task_gui.list_view.delete_select_items()
self.conv_task_gui.show_time_label.set_text("")
def delete_task_list(self, list_view, list_item):
# clear items.
self.conv_task_gui.list_view.items = []
# find task index.
self.task_index = 0
for task in self.task_list:
if task.conv_flags:
self.task_index += 1
break
# restart draw media item list view.
for transcoder in self.task_list:
media_item = MediaItem()
media_item.set_name(transcoder.name)
media_item.path = transcoder.outputdirectory
media_item.set_format(transcoder.container)
# set state icon.
if transcoder.conv_flags:
media_item.set_status_icon("success")
self.conv_task_gui.list_view.add_items([media_item])
if self.conv_task_gui.list_view.items != []:
# start run task.
self.conv_task_gui.show_all()
self.start_conv_function()
gtk.timeout_add(1200, self.restart_start_btn)
def open_conv_file_dir(self):
# os.system("nautilus %s" % (self.list_view_select_file_dir))
os.system("xdg-open '%s'" % (self.list_view_select_file_dir))
def save_open_selsect_file_name(self, list_view, list_item, column, offset_x, offset_y):
self.list_view_select_file_dir = list_item.get_path()
def _start_transcoding(self):
filechoice = self.FileChooser.get_uri()
self.filename = self.FileChooser.get_filename()
if (self.havevideo and (self.VideoCodec != "novid")):
vheight = self.videodata['videoheight']
vwidth = self.videodata['videowidth']
ratenum = self.videodata['fratenum']
ratednom = self.videodata['frateden']
if self.videopasstoggle == False:
videocodec = self.VideoCodec
else: # this is probably redundant and caused by encodebin
textdata=gst.Caps.to_string(self.vsourcecaps)
sep= ','
minitext = textdata.split(sep, 1)[0]
videocodec = minitext
self.outputdirectory=self.videodirectory
else:
self.outputdirectory=self.audiodirectory
videocodec=False
vheight=False
vwidth=False
ratenum=False
ratednom=False
if self.haveaudio:
achannels = self.audiodata['audiochannels']
if self.audiopasstoggle == False:
audiocodec = self.AudioCodec
else:
audiocodec = gst.Caps.to_string(self.asourcecaps)
else:
audiocodec=False
achannels=False
new_width, new_height = (int(vwidth), int(vheight))
model_text = self.form.model_combo.get_active_text()
if model_text != _("No Model"):
new_width, new_height = self.form.model_dict[model_text]
else:
if type(videocodec) != bool:
ratio_text = self.form.ratio_combo.get_active_text().replace(" x ", "-").split("-")
new_width = ratio_text[0]
new_height = ratio_text[1]
# print "active_text:", ratio_text
# print "new_width:", new_width
# print "new_height:", new_height
import urllib
# get set output path.
out_path = self.form.save_path_entry.get_text()
if not len(out_path):
out_path = os.path.expanduser("~")
# print "out_path:", out_path
self.outputdirectory = out_path # output path.
# _format = self.outputfilename[-3:]
# add conv task.
for conv in self.conv_list:
filechoice = "file://" + urllib.quote(conv)
self.filename = conv
name = os.path.splitext(os.path.split(conv)[1])[0]
name_time = datetime.datetime.now()
container_fromat = self.ContainerFormatSuffix
self.outputfilename = name + "-LD-%s%s" % (name_time, container_fromat)
transcoder = transcoder_engine.Transcoder(
filechoice, self.filename,
self.outputdirectory, self.container, audiocodec,
videocodec, self.devicename,
vheight, vwidth, ratenum, ratednom, achannels,
self.multipass, self.passcounter, self.outputfilename,
self.timestamp, self.rotationvalue, self.audiopasstoggle,
self.videopasstoggle, self.interlaced, self.inputvideocaps,
int(new_width), int(new_height))
transcoder.name = name
transcoder.outputdirectory = self.outputdirectory
transcoder.container = self.container
self.task_list.append(transcoder)
media_item = MediaItem()
media_item.set_name(transcoder.name)
media_item.path = transcoder.outputdirectory
media_item.set_format(container_fromat[1:])
self.conv_task_gui.list_view.add_items([media_item])
self.conv_task_list.append(media_item)
self.conv_task_gui.show_all()
self.start_conv_function()
gtk.timeout_add(1000, self.restart_start_btn)
return True
def restart_start_btn(self): # .
self.task_list[self.task_index].Pipeline("playing")
def donemessage(self, donemessage, null):
if donemessage == gst.pbutils.INSTALL_PLUGINS_SUCCESS:
# print "success " + str(donemessage)
if gst.update_registry():
print "Plugin registry updated, trying again"
else:
print "GStreamer registry update failed"
if self.containertoggle == False:
# print "done installing plugins, starting transcode"
# FIXME - might want some test here to check plugins needed are
# actually installed
# but it is a rather narrow corner case when it fails
self._start_transcoding()
elif donemessage == gst.pbutils.INSTALL_PLUGINS_PARTIAL_SUCCESS:
self.check_for_elements()
elif donemessage == gst.pbutils.INSTALL_PLUGINS_NOT_FOUND:
context_id = self.StatusBar.get_context_id("EOS")
self.StatusBar.push(context_id, \
"Plugins not found, choose different codecs.")
self.FileChooser.set_sensitive(True)
self.containerchoice.set_sensitive(True)
self.CodecBox.set_sensitive(True)
self.cancelbutton.set_sensitive(False)
self.transcodebutton.set_sensitive(True)
elif donemessage == gst.pbutils.INSTALL_PLUGINS_USER_ABORT:
context_id = self.StatusBar.get_context_id("EOS")
self.StatusBar.push(context_id, "Codec installation aborted.")
self.FileChooser.set_sensitive(True)
self.containerchoice.set_sensitive(True)
self.CodecBox.set_sensitive(True)
self.cancelbutton.set_sensitive(False)
self.transcodebutton.set_sensitive(True)
else:
context_id = self.StatusBar.get_context_id("EOS")
self.StatusBar.push(context_id, "Missing plugin installation failed: ") + gst.pbutils.InstallPluginsReturn()
def check_for_elements(self):
if self.container==False:
containerstatus=True
videostatus=True
else:
# containerchoice = self.builder.get_object ("containerchoice").get_active_text ()
containerchoice = name_to_supported_containers_map[self.containerchoice.get_active_text()]
containerstatus = codecfinder.get_muxer_element(codecfinder.containermap[containerchoice])
if self.havevideo:
if self.videopasstoggle != True:
if self.VideoCodec == "novid":
videostatus=True
else:
videostatus = codecfinder.get_video_encoder_element(self.VideoCodec)
else:
videostatus=True
if self.haveaudio:
if self.audiopasstoggle != True:
audiostatus = codecfinder.get_audio_encoder_element(self.AudioCodec)
else:
audiostatus=True
else:
audiostatus=True
if self.havevideo == False: # this flags help check if input is audio-only file
videostatus=True
if not containerstatus or not videostatus or not audiostatus:
self.missingtoggle=True
fail_info = []
if self.containertoggle==True:
audiostatus=True
videostatus=True
if containerstatus == False:
fail_info.append(gst.caps_from_string(codecfinder.containermap[containerchoice]))
if audiostatus == False:
fail_info.append(self.AudioCodec)
if videostatus == False:
fail_info.append(self.VideoCodec)
missing = []
for x in fail_info:
missing.append(gst.pbutils.missing_encoder_installer_detail_new(x))
context = gst.pbutils.InstallPluginsContext ()
context.set_xid(self.TopWindow.get_window().xid)
strmissing = str(missing)
gst.pbutils.install_plugins_async (missing, context, \
self.donemessage, "NULL")
# The transcodebutton is the one that calls the Transcoder class and thus
# starts the transcoding
def on_transcodebutton_clicked(self, widget): # 确定按钮 事件.
self.containertoggle = False
self.cancelbutton.set_sensitive(True)
# self.ProgressBar.set_fraction(0.0)
# create a variable with a timestamp code
timeget = datetime.datetime.now()
self.timestamp = str(timeget.strftime("-%H%M%S-%d%m%Y"))
# Remove suffix from inbound filename so we can reuse it together with suffix to create outbound filename
self.nosuffix = os.path.splitext(os.path.basename(self.filename))[0]
# pick output suffix
# container = self.builder.get_object ("containerchoice").get_active_text ()
container = name_to_supported_containers_map[self.containerchoice.get_active_text()]
if self.container==False: # deal with container less formats
self.ContainerFormatSuffix = codecfinder.nocontainersuffixmap[gst.Caps.to_string(self.AudioCodec)]
else:
if self.havevideo == False:
self.ContainerFormatSuffix = codecfinder.audiosuffixmap[container]
else:
self.ContainerFormatSuffix = codecfinder.csuffixmap[container]
self.outputfilename = str(self.nosuffix+self.timestamp+self.ContainerFormatSuffix)
context_id = self.StatusBar.get_context_id("EOS")
self.StatusBar.push(context_id, ("Writing %(filename)s" % {'filename': self.outputfilename}))
if self.multipass == False:
self.ProgressBar.set_text("Transcoding Progress")
else:
self.passcounter=int(1)
self.ProgressBar.set_text("Pass %(count)d Progress" % {'count': self.passcounter})
if self.haveaudio:
if self.audiodata.has_key("samplerate"):
self.check_for_elements()
if self.missingtoggle==False:
self._start_transcoding()
else:
self.waiting_for_signal="True"
elif self.havevideo:
if self.videodata.has_key("videoheight"):
self.check_for_elements()
if self.missingtoggle==False:
self._start_transcoding()
else:
self.waiting_for_signal="True"
#hide format conv from.
self.form.hide_all()
def on_cancelbutton_clicked(self, widget): # 取消按钮 事件.
self.FileChooser.set_sensitive(True)
self.containerchoice.set_sensitive(True)
self.CodecBox.set_sensitive(True)
self.presetchoice.set_sensitive(True)
self.rotationchoice.set_sensitive(True)
self.presetchoice.set_active(0)
self.cancelbutton.set_sensitive(False)
self.transcodebutton.set_sensitive(True)
self._cancel_encoding = \
transcoder_engine.Transcoder.Pipeline(self._transcoder,"null")
self.ProgressBar.set_fraction(0.0)
self.ProgressBar.set_text("Transcoding Progress")
context_id = self.StatusBar.get_context_id("EOS")
self.StatusBar.pop(context_id)
self.audiopasstoggle=False
def populate_menu_choices(self):
# self.audiocodecs - contains list of whats in self.audiorows
# self.videocodecs - contains listof whats in self.videorows
# audio_codecs, video_codecs - temporary lists
# clean up stuff from previous run
self.houseclean=True # set this to avoid triggering events when cleaning out menus
self.audiorows[0].clear_items()
# self.audiorows[0].append_text("")
# for c in self.audiocodecs: #
# self.audiorows[0].remove_text(0)
self.audiocodecs =[]
if self.havevideo==True:
if self.container != False:
self.videorows[0].clear_items()
# self.videorows[0].append_text("")
# for c in self.videocodecs:
# self.videorows[0].remove_text(0)
self.videocodecs=[]
self.houseclean=False
# end of housecleaning
# start filling audio
if self.haveaudio==True:
if self.usingpreset==True: # First fill menu based on presetvalue
self.audiorows[0].append_text(str(gst.pbutils.get_codec_description(self.presetaudiocodec)))
self.audiorows[0].set_active(0)
self.audiocodecs.append(self.presetaudiocodec)
elif self.container==False: # special setup for container less case, looks ugly, but good enough for now
# self.audiorows[0].append_text(str(gst.pbutils.get_codec_description("audio/mpeg, mpegversion=(int)1, layer=(int)3")))
# self.audiorows[0].append_text(str(gst.pbutils.get_codec_description("audio/mpeg, mpegversion=4, stream-format=adts")))
audio_only_select_code = str(gst.pbutils.get_codec_description("audio/x-flac"))
self.audiorows[0].append_text(audio_only_select_code)
self.audiocodecs.append(gst.Caps("audio/mpeg, mpegversion=(int)1, layer=(int)3"))
self.audiocodecs.append(gst.Caps("audio/mpeg, mpegversion=4, stream-format=adts"))
self.audiocodecs.append(gst.Caps("audio/x-flac"))
self.audiorows[0].set_active(0)
self.audiorows[0].set_sensitive(True)
else:
audio_codecs = []
audio_codecs = supported_audio_container_map[self.container]
import copy
temp_audio_codecs = copy.copy(audio_codecs)
c_i = 0
for c in temp_audio_codecs:
if c == "MP3":
temp_audio_codecs[c_i] = "mp3"
elif c == "MP2":
temp_audio_codecs[c_i] = "mp2"
c_i += 1
for c in audio_codecs:
self.audiorows[0].append_text(c)
for c in temp_audio_codecs:
self.audiocodecs.append(gst.Caps(codecfinder.codecmap[c]))
self.audiorows[0].set_sensitive(True)
self.audiorows[0].set_active(0)
else:
self.audiorows[0].set_sensitive(False)
# fill in with video
if self.havevideo==True:
if self.container != False:
if self.usingpreset==True:
self.videorows[0].append_text(str(gst.pbutils.get_codec_description(self.presetvideocodec)))
self.videorows[0].set_active(0)
self.videocodecs.append(self.presetvideocodec)
else:
video_codecs=[]
video_codecs = supported_video_container_map[self.container]
self.rotationchoice.set_sensitive(True)
for c in video_codecs:
self.videocodecs.append(gst.Caps(codecfinder.codecmap[c]))
for c in video_codecs: # I can't update the menu with loop append
# print "c:", c
self.videorows[0].append_text(c)
self.form.frame_rate_label.set_sensitive(True)
self.videorows[0].set_sensitive(True)
self.videorows[0].set_active(0)
#add a 'No Video option'
self.videorows[0].append_text(_("No Video"))
self.videocodecs.append("novid")
self.videonovideomenuno=(len(self.videocodecs))-1
# add the Passthrough option.
# if self.videopass==True:
# self.videorows[0].append_text("Video passthrough")
# self.videocodecs.append("pass")
# self.videopassmenuno=(len(self.videocodecs))-1
# if self.audiopass==True:
# self.audiorows[0].append_text("Audio passthrough")
# self.audiocodecs.append("pass")
# self.audiopassmenuno=(len(self.audiocodecs))-1
else:
self.form.frame_rate_label.set_sensitive(False)
self.videorows[0].set_sensitive(False)
self.videorows[0].prepend_text(_("No Video"))
def on_containerchoice_changed(self, widget, text):
self.CodecBox.set_sensitive(True)
self.ProgressBar.set_fraction(0.0)
self.ProgressBar.set_text("Transcoding Progress")
if self.containerchoice.get_active() == self.nocontainernumber:
self.container = False
self.videorows[0].set_active(self.videonovideomenuno)
self.videorows[0].set_sensitive(False)
self.form.frame_rate_label.set_sensitive(False)
else:
if self.containerchoice.get_active()!= -1:
self.container = name_to_supported_containers_map[self.containerchoice.get_active_text ()]
if self.discover_done == True:
self.check_for_passthrough(self.container)
self.transcodebutton.set_sensitive(True)
self.populate_menu_choices()
def on_presetchoice_changed(self, widget):
# presetchoice = self.builder.get_object ("presetchoice").get_active_text ()
presetchoice = self.presetchoice.get_active_text ()
self.ProgressBar.set_fraction(0.0)
if presetchoice == "No Presets":
self.usingpreset=False
self.devicename = "nopreset"
self.containerchoice.set_sensitive(True)
self.containerchoice.set_active(0)
self.start_time = False
self.multipass = False
self.passcounter = False
self.rotationchoice.set_sensitive(True)
# if self.builder.get_object("containerchoice").get_active_text():
if name_to_supported_containers_map[self.containerchoice.get_active_text()]:
self.populate_menu_choices()
self.CodecBox.set_sensitive(True)
self.transcodebutton.set_sensitive(True)
else: # 手机产品信息.
self.usingpreset=True
self.ProgressBar.set_fraction(0.0)
self.devicename = self.presetchoices[presetchoice]
# print "======================================"
# print "on_presetchoice_changed:"
# print "self.devicename", self.devicename
self.provide_presets(self.devicename)
self.containerchoice.set_sensitive(False)
self.CodecBox.set_sensitive(False)
self.rotationchoice.set_sensitive(False)
# if self.builder.get_object("containerchoice").get_active_text():
if name_to_supported_containers_map[self.containerchoice.get_active_text()]:
self.transcodebutton.set_sensitive(True)
# print "======================================="
def on_rotationchoice_changed(self, widget, text):
self.rotationvalue = self.rotationchoice.get_active()
def on_audiocodec_changed(self, widget, text):
if (self.houseclean == False and self.usingpreset==False):
audio_codec = self.audiorows[0].get_active()
# print "audio_codec:", audio_codec
# if audio_codec == "MP3":
# audio_codec = "mp3"
self.AudioCodec = self.audiocodecs[audio_codec]
if self.container != False:
if self.audiorows[0].get_active() == self.audiopassmenuno:
self.audiopasstoggle=True
elif self.usingpreset==True:
self.AudioCodec = gst.Caps(self.presetaudiocodec)
def on_videocodec_changed(self, widget, text):
if (self.houseclean == False and self.usingpreset==False):
if self.container != False:
self.VideoCodec = self.videocodecs[self.videorows[0].get_active()]
else:
self.VideoCodec = "novid"
if self.videorows[0].get_active() == self.videopassmenuno:
self.videopasstoggle=True
elif self.usingpreset==True:
self.VideoCodec = gst.Caps(self.presetvideocodec)
def on_about_dialog_activate(self, widget):
"""
Show the about dialog.
"""
about.AboutDialog()
def show_error(self, NONE, error_string):
if (error_string=="noaudioparser") or (error_string=="novideoparser"):
self.FileChooser.set_sensitive(True)
self.containerchoice.set_sensitive(True)
self.CodecBox.set_sensitive(True)
self.presetchoice.set_sensitive(True)
self.rotationchoice.set_sensitive(True)
self.presetchoice.set_active(0)
self.cancelbutton.set_sensitive(False)
self.transcodebutton.set_sensitive(True)
self.ProgressBar.set_fraction(0.0)
# self.ProgressBar.set_text("Transcoding Progress")
self.ProgressBar.set_text("show_error")
if error_string=="noaudioparser":
error_message = "No audio parser, passthrough not available"
codecs = supported_container_map[self.container]
self.AudioCodec = codecs[0]
self.audiopasstoggle = False
elif error_string=="novideoparser":
error_message= "No video parser, passthrough not available"
codecs = supported_container_map[self.container]
self.VideoCodec = codecs[1]
self.videopasstoggle = False
else:
error_message="Uknown error"
else:
error_message = error_string
self.conv_task_gui.show_time_label.set_text(_(error_message))
self.conv_task_gui.list_view.items[self.task_index].set_status_icon("error")
self.task_list[self.task_index].Pipeline("null")
self.task_index = 0
gtk.timeout_add(3000, self.clear_error_label_show)
context_id = self.StatusBar.get_context_id("EOS")
self.StatusBar.push(context_id, error_message)
def clear_error_label_show(self):
self.conv_task_gui.show_time_label.set_text("")
return False
def on_debug_activate(self, widget):
dotfile = "/tmp/transmageddon-debug-graph.dot"
pngfile = "/tmp/transmageddon-pipeline.png"
if os.access(dotfile, os.F_OK):
os.remove(dotfile)
if os.access(pngfile, os.F_OK):
os.remove(pngfile)
gst.DEBUG_BIN_TO_DOT_FILE (self._transcoder.pipeline, \
gst.DEBUG_GRAPH_SHOW_ALL, 'transmageddon-debug-graph')
# check if graphviz is installed with a simple test
try:
dot = which.which("dot")
os.system(dot + " -Tpng -o " + pngfile + " " + dotfile)
gtk.show_uri(gtk.gdk.Screen(), "file://"+pngfile, 0)
except which.WhichError:
print "The debug feature requires graphviz (dot) to be installed."
print "Transmageddon can not find the (dot) binary."
if __name__ == "__main__":
hwg = TransmageddonUI()
gtk.main()
|
linuxdeepin/deepin-media-player
|
src/format_conv/transmageddon.py
|
Python
|
gpl-3.0
| 64,931
|
[
"DIRAC"
] |
88e7ad072fa35f82950612e330d8bbd57796600d59de5d74dd1ddef63ed7ddc8
|
import numpy as np
from numpy.testing import assert_allclose
import hyperspy.api as hs
from hyperspy.models.model1d import Model1D
from hyperspy.misc.test_utils import ignore_warning
class TestPowerLaw:
def setup_method(self, method):
s = hs.signals.Signal1D(np.zeros(1024))
s.axes_manager[0].offset = 100
s.axes_manager[0].scale = 0.01
m = s.create_model()
m.append(hs.model.components1D.PowerLaw())
m[0].A.value = 10
m[0].r.value = 4
self.m = m
def test_estimate_parameters_binned_only_current(self):
self.m.signal.metadata.Signal.binned = True
s = self.m.as_signal(show_progressbar=None, parallel=False)
s.metadata.Signal.binned = True
g = hs.model.components1D.PowerLaw()
g.estimate_parameters(s,
None,
None,
only_current=True)
assert_allclose(g.A.value, 10.084913947965161)
assert_allclose(g.r.value, 4.0017676988807409)
def test_estimate_parameters_unbinned_only_current(self):
self.m.signal.metadata.Signal.binned = False
s = self.m.as_signal(show_progressbar=None, parallel=False)
s.metadata.Signal.binned = False
g = hs.model.components1D.PowerLaw()
g.estimate_parameters(s,
None,
None,
only_current=True)
assert_allclose(g.A.value, 10.064378823244837)
assert_allclose(g.r.value, 4.0017522876514304)
def test_estimate_parameters_binned(self):
self.m.signal.metadata.Signal.binned = True
s = self.m.as_signal(show_progressbar=None, parallel=False)
s.metadata.Signal.binned = True
g = hs.model.components1D.PowerLaw()
g.estimate_parameters(s,
None,
None,
only_current=False)
assert_allclose(g.A.value, 10.084913947965161)
assert_allclose(g.r.value, 4.0017676988807409)
def test_estimate_parameters_unbinned(self):
self.m.signal.metadata.Signal.binned = False
s = self.m.as_signal(show_progressbar=None, parallel=False)
s.metadata.Signal.binned = False
g = hs.model.components1D.PowerLaw()
g.estimate_parameters(s,
None,
None,
only_current=False)
assert_allclose(g.A.value, 10.064378823244837)
assert_allclose(g.r.value, 4.0017522876514304)
# Test that it all works when calling it with a different signal
s2 = hs.stack((s, s))
g.estimate_parameters(s2,
None,
None,
only_current=False)
assert_allclose(g.A.map["values"][1], 10.064378823244837)
assert_allclose(g.r.map["values"][0], 4.0017522876514304)
class TestOffset:
def setup_method(self, method):
s = hs.signals.Signal1D(np.zeros(10))
s.axes_manager[0].scale = 0.01
m = s.create_model()
m.append(hs.model.components1D.Offset())
m[0].offset.value = 10
self.m = m
def test_estimate_parameters_binned(self):
self.m.signal.metadata.Signal.binned = True
s = self.m.as_signal(show_progressbar=None, parallel=False)
s.metadata.Signal.binned = True
g = hs.model.components1D.Offset()
g.estimate_parameters(s,
None,
None,
only_current=True)
assert_allclose(g.offset.value, 10)
def test_estimate_parameters_unbinned(self):
self.m.signal.metadata.Signal.binned = False
s = self.m.as_signal(show_progressbar=None, parallel=False)
s.metadata.Signal.binned = False
g = hs.model.components1D.Offset()
g.estimate_parameters(s,
None,
None,
only_current=True)
assert_allclose(g.offset.value, 10)
class TestPolynomial:
def setup_method(self, method):
s = hs.signals.Signal1D(np.zeros(1024))
s.axes_manager[0].offset = -5
s.axes_manager[0].scale = 0.01
m = s.create_model()
m.append(hs.model.components1D.Polynomial(order=2))
coeff_values = (0.5, 2, 3)
self.m = m
s_2d = hs.signals.Signal1D(np.arange(1000).reshape(10, 100))
self.m_2d = s_2d.create_model()
self.m_2d.append(hs.model.components1D.Polynomial(order=2))
s_3d = hs.signals.Signal1D(np.arange(1000).reshape(2, 5, 100))
self.m_3d = s_3d.create_model()
self.m_3d.append(hs.model.components1D.Polynomial(order=2))
# if same component is pased, axes_managers get mixed up, tests
# sometimes randomly fail
for _m in [self.m, self.m_2d, self.m_3d]:
_m[0].coefficients.value = coeff_values
def test_gradient(self):
c = self.m[0]
np.testing.assert_array_almost_equal(c.grad_coefficients(1),
np.array([[6, ], [4.5], [3.5]]))
assert c.grad_coefficients(np.arange(10)).shape == (3, 10)
def test_estimate_parameters_binned(self):
self.m.signal.metadata.Signal.binned = True
s = self.m.as_signal(show_progressbar=None, parallel=False)
s.metadata.Signal.binned = True
g = hs.model.components1D.Polynomial(order=2)
g.estimate_parameters(s,
None,
None,
only_current=True)
assert_allclose(g.coefficients.value[0], 0.5)
assert_allclose(g.coefficients.value[1], 2)
assert_allclose(g.coefficients.value[2], 3)
def test_estimate_parameters_unbinned(self):
self.m.signal.metadata.Signal.binned = False
s = self.m.as_signal(show_progressbar=None, parallel=False)
s.metadata.Signal.binned = False
g = hs.model.components1D.Polynomial(order=2)
g.estimate_parameters(s,
None,
None,
only_current=True)
assert_allclose(g.coefficients.value[0], 0.5)
assert_allclose(g.coefficients.value[1], 2)
assert_allclose(g.coefficients.value[2], 3)
def test_2d_signal(self):
# This code should run smoothly, any exceptions should trigger failure
s = self.m_2d.as_signal(show_progressbar=None, parallel=False)
model = Model1D(s)
p = hs.model.components1D.Polynomial(order=2)
model.append(p)
p.estimate_parameters(s, 0, 100, only_current=False)
np.testing.assert_allclose(p.coefficients.map['values'],
np.tile([0.5, 2, 3], (10, 1)))
def test_3d_signal(self):
# This code should run smoothly, any exceptions should trigger failure
s = self.m_3d.as_signal(show_progressbar=None, parallel=False)
model = Model1D(s)
p = hs.model.components1D.Polynomial(order=2)
model.append(p)
p.estimate_parameters(s, 0, 100, only_current=False)
np.testing.assert_allclose(p.coefficients.map['values'],
np.tile([0.5, 2, 3], (2, 5, 1)))
class TestGaussian:
def setup_method(self, method):
s = hs.signals.Signal1D(np.zeros(1024))
s.axes_manager[0].offset = -5
s.axes_manager[0].scale = 0.01
m = s.create_model()
m.append(hs.model.components1D.Gaussian())
m[0].sigma.value = 0.5
m[0].centre.value = 1
m[0].A.value = 2
self.m = m
def test_estimate_parameters_binned(self):
self.m.signal.metadata.Signal.binned = True
s = self.m.as_signal(show_progressbar=None, parallel=False)
s.metadata.Signal.binned = True
g = hs.model.components1D.Gaussian()
g.estimate_parameters(s,
None,
None,
only_current=True)
assert_allclose(g.sigma.value, 0.5)
assert_allclose(g.A.value, 2)
assert_allclose(g.centre.value, 1)
def test_estimate_parameters_unbinned(self):
self.m.signal.metadata.Signal.binned = False
s = self.m.as_signal(show_progressbar=None, parallel=False)
s.metadata.Signal.binned = False
g = hs.model.components1D.Gaussian()
g.estimate_parameters(s,
None,
None,
only_current=True)
assert_allclose(g.sigma.value, 0.5)
assert_allclose(g.A.value, 2)
assert_allclose(g.centre.value, 1)
class TestExpression:
def setup_method(self, method):
self.g = hs.model.components1D.Expression(
expression="height * exp(-(x - x0) ** 2 * 4 * log(2)/ fwhm ** 2)",
name="Gaussian",
position="x0",
height=1,
fwhm=1,
x0=0,
module="numpy")
def test_name(self):
assert self.g.name == "Gaussian"
def test_position(self):
assert self.g._position is self.g.x0
def test_f(self):
assert self.g.function(0) == 1
def test_grad_height(self):
assert_allclose(
self.g.grad_height(2),
1.5258789062500007e-05)
def test_grad_x0(self):
assert_allclose(
self.g.grad_x0(2),
0.00016922538587889289)
def test_grad_fwhm(self):
assert_allclose(
self.g.grad_fwhm(2),
0.00033845077175778578)
def test_expression_substitution():
expr = 'A / B; A = x+2; B = x-c'
comp = hs.model.components1D.Expression(expr, name='testcomp',
autodoc=True,
c=2)
assert ''.join(p.name for p in comp.parameters) == 'c'
assert comp.function(1) == -3
class TestScalableFixedPattern:
def setup_method(self, method):
s = hs.signals.Signal1D(np.linspace(0., 100., 10))
s1 = hs.signals.Signal1D(np.linspace(0., 1., 10))
s.axes_manager[0].scale = 0.1
s1.axes_manager[0].scale = 0.1
self.s = s
self.pattern = s1
def test_both_unbinned(self):
s = self.s
s1 = self.pattern
s.metadata.Signal.binned = False
s1.metadata.Signal.binned = False
m = s.create_model()
fp = hs.model.components1D.ScalableFixedPattern(s1)
m.append(fp)
with ignore_warning(message="invalid value encountered in sqrt",
category=RuntimeWarning):
m.fit()
assert abs(fp.yscale.value - 100) <= 0.1
def test_both_binned(self):
s = self.s
s1 = self.pattern
s.metadata.Signal.binned = True
s1.metadata.Signal.binned = True
m = s.create_model()
fp = hs.model.components1D.ScalableFixedPattern(s1)
m.append(fp)
with ignore_warning(message="invalid value encountered in sqrt",
category=RuntimeWarning):
m.fit()
assert abs(fp.yscale.value - 100) <= 0.1
def test_pattern_unbinned_signal_binned(self):
s = self.s
s1 = self.pattern
s.metadata.Signal.binned = True
s1.metadata.Signal.binned = False
m = s.create_model()
fp = hs.model.components1D.ScalableFixedPattern(s1)
m.append(fp)
with ignore_warning(message="invalid value encountered in sqrt",
category=RuntimeWarning):
m.fit()
assert abs(fp.yscale.value - 1000) <= 1
def test_pattern_binned_signal_unbinned(self):
s = self.s
s1 = self.pattern
s.metadata.Signal.binned = False
s1.metadata.Signal.binned = True
m = s.create_model()
fp = hs.model.components1D.ScalableFixedPattern(s1)
m.append(fp)
with ignore_warning(message="invalid value encountered in sqrt",
category=RuntimeWarning):
m.fit()
assert abs(fp.yscale.value - 10) <= .1
class TestHeavisideStep:
def setup_method(self, method):
self.c = hs.model.components1D.HeavisideStep()
def test_integer_values(self):
c = self.c
np.testing.assert_array_almost_equal(c.function([-1, 0, 2]),
[0, 0.5, 1])
def test_float_values(self):
c = self.c
np.testing.assert_array_almost_equal(c.function([-0.5, 0.5, 2]),
[0, 1, 1])
def test_not_sorted(self):
c = self.c
np.testing.assert_array_almost_equal(c.function([3, -0.1, 0]),
[1, 0, 0.5])
def test_gradients(self):
c = self.c
np.testing.assert_array_almost_equal(c.A.grad([3, -0.1, 0]),
[1, 1, 1])
np.testing.assert_array_almost_equal(c.n.grad([3, -0.1, 0]),
[1, 0, 0.5])
|
CodeMonkeyJan/hyperspy
|
hyperspy/tests/model/test_components.py
|
Python
|
gpl-3.0
| 13,325
|
[
"Gaussian"
] |
e83f13e15de69daa9060ba63697b88a7c1a3b56776d864c1717eb46ab8ef6fbf
|
#!/usr/bin/env python
from ase_utils import symbol_number, symnum_to_sym
import numpy as np
import re
import os
import sys
import pickle
from scipy.integrate import trapz
from collections import OrderedDict
int_keys = ("num_bands", "num_wann", 'bands_num_points',
"fermi_surface_num_points", "num_iter", "num_cg_steps",
"conv_window", "conv_noise_num", "num_dump_cycles",
"search_shells", "wannier_plot_supercell")
float_keys = (
"dis_win_min",
"dis_win_max",
"dis_froz_min",
"dis_froz_max",
"wannier_plot_radius",
"hr_cutoff",
"dist_cutoff",
"fermi_energy",
"conv_tol",
"conv_noise_amp",
"kmesh_tol", )
bool_keys = (
"guiding_centres",
"use_bloch_phases",
"write_xyz",
"write_hr_diag",
"wannier_plot",
"bands_plot",
"fermi_surface_plot",
"hr_plot", )
string_keys = (
"spin",
"wannier_plot_format",
"wannier_plot_mode",
"bands_plot_format", )
list_keys = ("wannier_plot_list", "bands_plot_project", "mp_grid")
all_keys = float_keys + bool_keys + string_keys + list_keys
s_orbs = ['s']
eg_orbs = ['dxz', 'dyz', 'dxy']
t2g_orbs = ['dz2', 'dx2-y2']
d_orbs = ['dz2', 'dxz', 'dyz', 'dx2-y2', 'dxy']
p_orbs = ['pz', 'px', 'py']
# angular momentum dict, which is NOT that used in wannier90
orb_dict = {
's': (0, 0),
'pz': (0, 1),
'px': (1, 1),
'py': (-1, 1),
'dz2': (0, 2),
'dxz': (1, 2),
'dyz': (-1, 2),
'dxy': (2, 2),
'dx2-y2': (-2, 2),
'fz3': (0, 3),
'fxz2': (1, 3),
'fyz2': (-1, 3),
'fxyz': (2, 3),
'fz(x2-y2)': (-2, 3),
'fx(x2-3y2)': (3, 3),
'fx(3x2-y2)': (-3, 3)
}
w90_orb_dict = {
's': (1, 0),
'pz': (1, 1),
'px': (2, 1),
'py': (3, 1),
'dz2': (1, 2),
'dxz': (2, 2),
'dyz': (3, 2),
'dxy': (5, 2),
'dx2-y2': (4, 2),
'fz3': (1, 3),
'fxz2': (2, 3),
'fyz2': (3, 3),
'fxyz': (5, 3),
'fz(x2-y2)': (4, 3),
'fx(x2-3y2)': (6, 3),
'fx(3x2-y2)': (7, 3),
'p': (None, 1),
'd': (None, 2),
'f': (None, 3),
}
reversed_w90_orb_dict = dict(zip(w90_orb_dict.values(), w90_orb_dict.keys()))
class wannier_input(object):
def __init__(self, atoms, seed=None, bands=None, spin=0, **kwargs):
"""
The wannier.win generator.
"""
self.unit_cell = None
self.set_atoms(atoms)
if seed is None:
seed = 'wannier90'
self.seed = seed
self.bands = bands
self.spin = spin
self.projection_dict = None
self.kpoints = None
self.kpath = None
self.mp_grid = None
self.float_params = {}
self.string_params = {}
self.int_params = {}
self.bool_params = {}
self.list_params = {}
for key in float_keys:
self.float_params[key] = None
for key in string_keys:
self.string_params[key] = None
for key in int_keys:
self.int_params[key] = None
for key in bool_keys:
self.bool_params[key] = None
for key in list_keys:
self.list_params[key] = None
self.basis = []
self.set(**kwargs)
self.projection_dict_by_site = {}
self.initial_basis = []
self.axis = {}
def set_atoms(self, atoms):
self.atoms = atoms
self.unit_cell = atoms.get_cell()
def add_basis(self,
atom_name,
orb=None,
m=None,
l=None,
r=None,
spin=None,
axis=None):
"""
set the initial projections for wannier functions.
Parameters:
---------------------
atoms_name: string
name of specy or a atoms. Eg. 'Fe' or 'Fe1'.
orb: string
name of orbitals, 'dxy', which has the same meaning of a l,m pair.
m, l, r: int
quantum numbers. m and r can be none. If m is None, it means all possible m for a given l. r=None means r is default 1.Note r is not the radial quantum number. It labels 1, 2, 3. Eg. if there're two 4p and 5p orbitals, they can be labled as r=1 and r=2 respectively.
spin: string or None
"up"|"down"|None
Results:
---------
[aname, mi, l, r, spin] added to self.initial_basis (list)
TODO: Is nwann correct for spin polarized structure? Check this.
"""
if orb is not None:
if m is not None or l is not None:
raise ValueError(
"the projection can be either given by name (like 'dxy') or by (m,l) pair (4,2)"
)
m, l = w90_orb_dict[orb]
if m is None:
mlist = [i + 1 for i in range(l * 2 + 1)]
else:
mlist = [m]
if '0' <= atom_name[-1] <= '9': # single atoms
sdict = symbol_number(self.atoms)
for mi in mlist:
self.initial_basis.append([atom_name, mi, l, r, spin])
if axis is not None:
self.axis[atom_name] = axis
print "Axis added"
else:
sdict = symbol_number(self.atoms)
for aname in sdict:
if symnum_to_sym(aname) == atom_name:
for mi in mlist:
self.initial_basis.append([aname, mi, l, r, spin])
if axis is not None:
self.axis[aname] = axis
print "Axis added"
def add_basis_from_dict(self, atom_map=None, conf_dict=None, band='v+c'):
"""
Parameters:
-----------
atom_map: dict
a dict map chemical symbols or chemical_symbols+number to label pairs. eg. {'Bi':3 , 'Fe1':'3_up', 'Fe2':'3_dn','O':-2}. If the ion is not magnetic, usually use its valence as the label. The label does not have to be physically meaningful.
conf_dict: dict
a dict map the (elem, label) tuple -> electron configuration. eg. {(Mn,'3_up'): [(dxy, 3, 1, 'up'), (dyz,),}
band: string
'v'|'v+c'
"""
# add spin=None to conf_dict if spin is not given.
for key in conf_dict:
val = conf_dict[key]
conf_dict[key] = [
list(v) + [None]
if not (v[-1] == 'up' or v[-1] == 'dn' or v[-1] is None) else
list(v) for v in val
]
for s in atom_map:
confs = conf_dict[(s, atom_map[s])]
min_r = min([o[2] for o in confs])
for conf in confs:
if isinstance(conf[0], int) or conf[0] is None: #(m, l ,...)
if len(conf) == 4:
m, l, r, occ = conf
spin = None
elif len(conf) == 5:
m, l, r, occ, spin = conf
if occ > 0 or band == 'v+c':
self.add_basis(
atom_name=s, m=m, l=l, r=r - min_r + 1, spin=spin)
else: # ('dxy'...)
if len(conf) == 3:
orb_name, r, occ = conf
spin = None
elif len(conf) == 4:
orb_name, r, occ, spin = conf
print conf
if occ > 0 or band == 'v+c':
self.add_basis(
atom_name=s,
orb=orb_name,
r=r - min_r + 1,
spin=spin)
def get_basis(self, atoms=None):
"""
get the name of the basis.
"""
if self.atoms is None:
if atoms is None:
raise ValueError("atoms should be specified.")
else:
self.atoms = atoms
self.basis = []
for b in self.initial_basis:
symnum, m, l, r, spin = b
bname = '|'.join(
[symnum, reversed_w90_orb_dict[(m, l)], str(r), str(spin)])
self.basis.append(bname)
return self.basis
def write_basis(self, fname='basis.txt'):
"""
write basis to a file. default fname is basis.txt.
"""
self.get_basis()
with open(fname, 'w') as myfile:
for i, b in enumerate(self.basis):
myfile.write(str(b) + '\t' + str(i+1) + '\n')
def set(self, **kwargs):
"""
set the parameters.
"""
for key in kwargs:
if key in self.float_params:
self.float_params[key] = kwargs[key]
elif key in self.string_params:
self.string_params[key] = kwargs[key]
elif key in self.int_params:
self.int_params[key] = kwargs[key]
elif key in self.bool_params:
self.bool_params[key] = kwargs[key]
elif key in self.list_params:
self.list_params[key] = kwargs[key]
else:
raise ValueError("%s is not a valid key" % key)
def set_energy_window(self, win, froz_win, shift_efermi=0):
"""
set the energy window for entanglement.
:param win: the disentangle window. [min,max]
:param froz_win: the frozen window. [min,max]
:param shift_efermi: shift the energies. eg. if shift_efermi =3, the energies will be added by 3.
"""
assert froz_win[0] >= win[0]
assert froz_win[1] <= win[1]
self.set(
dis_win_min=win[0] + shift_efermi,
dis_win_max=win[1] + shift_efermi,
dis_froz_min=froz_win[0] + shift_efermi,
dis_froz_max=froz_win[1] + shift_efermi, )
def set_kpath(self, kpoints, labels, npoints):
"""
set the kpoints path to draw band diagram.
:param kpoints: the spectial kpoint list.
:param labels: the name of the kpoints.
:param npoints: the number of kpoints in between.
"""
self.kpath = zip(labels, np.array(kpoints, dtype=float))
self.set(bands_num_points=npoints)
def gen_input(self):
if self.int_params['num_wann'] is None:
self.int_params['num_wann'] = len(self.initial_basis)
print self.int_params
print self.float_params
input_text = ""
for key, value in self.float_params.items():
if value is not None:
input_text += "{0} = {1}\n".format(key, value)
for key, value in self.string_params.items():
if value is not None:
input_text += "{0} = {1}\n".format(key, value)
for key, value in self.int_params.items():
if value is not None:
input_text += "{0} = {1}\n".format(key, value)
for key, value in self.bool_params.items():
if value is not None:
input_text += "{0} = {1}\n".format(key, 'true'
if True else 'false')
for key, value in self.list_params.items():
if value is not None and key not in ['mp_grid']:
input_text += "{0} = {1}\n".format(key,
','.join(map(str, value)))
print input_text
# projection block
if self.projection_dict is not None or self.projection_dict_by_site is not None:
input_text += '\nbegin projections\n'
if self.projection_dict is not None:
for key, value in self.projection_dict.items():
if isinstance(value, list):
input_text += "{0}: {1}\n".format(key, ','.join(value))
elif isinstance(value, dict):
for lr, t in value.items():
input_text += "{0}:{1}:{2}\n".format(
key, ','.join(value), lr)
for key, value in self.projection_dict_by_site.items():
for a in key:
position = self.atoms.get_positions()[symbol_number(
self.atoms)[a]]
input_text += "c={0}: {1}\n".format(
','.join(map(str, position)), ','.join(value))
poses = self.atoms.get_positions()
sdict = symbol_number(self.atoms)
for b in self.initial_basis:
symnum, m, l, r, spin = b
pos = poses[sdict[symnum]]
pos_text = ','.join(map(str, pos))
input_text += "c=%s: l=%s, mr=%s " % (pos_text, l, m)
if r is not None:
input_text += ":r=%s" % (r)
if spin is not None:
input_text += "(%s)" % (spin[0])
if symnum in self.axis:
#input_text += ":z=%s"%(','.join(map(str, self.axis[symnum])))
input_text += "%s" % (self.axis[symnum])
input_text += "\t\t# %s|%s|%s|%s\n" % (
symnum, reversed_w90_orb_dict[(m, l)], r, spin)
input_text += 'end projections\n\n'
#unit cell block
if self.unit_cell is not None:
input_text += 'begin unit_cell_cart\n'
for vec in self.unit_cell:
input_text += '\t' + '\t'.join(map(str, vec)) + '\n'
input_text += 'end unit_cell_cart\n\n'
# atom cordinates
if self.atoms is not None:
input_text += '\nbegin atoms_cart\n'
for sym, pos in zip(self.atoms.get_chemical_symbols(),
self.atoms.get_positions()):
input_text += '{0}\t{1}\n'.format(sym,
'\t'.join(map(str, pos)))
input_text += 'end atoms_cart\n\n'
# kpoints
if 'mp_grid' in self.list_params:
input_text += 'mp_grid = \t{0}\n'.format(
'\t'.join(map(str, self.list_params['mp_grid'])))
if self.kpoints is not None:
input_text += 'begin kpoints\n'
for kpt in self.kpoints:
input_text += '\t' + '\t'.join(map(str, kpt)) + '\n'
input_text += 'end kpoints\n\n'
# k path
if self.kpath is not None:
input_text += 'begin kpoint_path\n'
for k_from, k_to in zip(self.kpath[:-1], self.kpath[1:]):
input_text += "{0} {1}\t{2} {3}\n".format(
k_from[0], ' '.join([str(x) for x in k_from[1]]), k_to[0],
' '.join([str(x) for x in k_to[1]]))
input_text += 'end kpoint_path\n\n'
self.input_text = input_text
return self.input_text
def write_input(self,
fname="wannier90.win",
basis_fname='basis.txt',
save_dict=True):
"""
write wannier input file.
"""
#if not self.input_text:
self.gen_input()
with open(fname, 'w') as infile:
infile.write(self.input_text)
self.write_basis(fname=basis_fname)
if save_dict:
with open('%s.pickle' % fname, 'w') as pfile:
pickle.dump(self, pfile)
def get_nwann(self):
"""
get number of wannier functions
"""
if self.int_params['num_wann'] is None:
self.int_params['num_wann'] = len(self.initial_basis)
return self.int_params['num_wann']
def pdos_band(input_fname, iwann, dos=True, band=True, restart='plot'):
"""
calculate projected dos and projected band.
"""
os.system('cp %s %s.bak' % (input_fname, input_fname))
pre = os.path.splitext(input_fname)[0]
valdict = {}
if restart:
valdict['restart'] = restart
if band:
valdict['bands_plot_project'] = iwann
valdict['bands_plot'] = 'true'
text = replace_value_file(input_fname, valdict)
with open(input_fname, 'w') as myfile:
myfile.write(text)
os.system('bash -c "wannier90.x %s"' % input_fname)
os.system('cp %s_band.dat %s_band_%s.dat' % (pre, pre, iwann))
os.system('cp %s %s.band' % (input_fname, input_fname))
os.system('cp %s.bak %s' % (input_fname, input_fname))
if dos:
valdict['dos'] = 'true'
valdict['dos_kmesh'] = 10
valdict['dos_project'] = iwann
text = replace_value_file(input_fname, valdict)
with open(input_fname, 'w') as myfile:
myfile.write(text)
os.system('bash -c "postw90.x %s"' % input_fname)
os.system('cp %s-dos.dat %s_dos_%s.dat' % (pre, pre, iwann))
os.system('cp %s %s.dos' % (input_fname, input_fname))
os.system('cp %s.bak %s' % (input_fname, input_fname))
def find_value_line(lines, key):
"""
"""
for i, line in enumerate(lines):
m = re.search('^\s*%s\s*[=|:]\s*.*' % key, line)
if m is not None:
return i
return None
def sub_lines(text, key, value):
i = find_value_line(text, key)
if i is not None:
text[i] = "%s = %s\n" % (key, value)
else:
text.append("%s = %s\n" % (key, value))
return text
def sub_text(filename, **kwargs):
"""
write value to the wannier input file.
:param filename: the name of the input file.
usage: e.g. sub_text('wannier90.win',bands_plot='true')
"""
with open(filename) as infile:
lines = infile.readlines()
for key, value in kwargs.items():
lines = sub_lines(lines, key, value)
with open(filename, 'w') as outfile:
for line in lines:
outfile.write(line)
def run_wannier(command=None, spin=None, copy_win=True):
"""
run wannier90.
"""
if command is None:
command = 'wannier90.x'
if spin is None:
name = 'wannier90'
elif spin == 'up':
name = 'wannier90.up'
spinline = 'spin = up\n'
elif spin == 'dn' or spin == 'down':
name = 'wannier90.dn'
spinline = 'spin = down\n'
if spin is not None and copy_win:
with open("%s.win" % name, 'w') as myfile:
myfile.write(spinline)
myfile.write(open('wannier90.win').read())
#os.system('cp wannier90.win %s.win' % name)
os.system("%s %s" % (command, name))
if spin is not None:
if not os.path.exists(name):
os.mkdir(name)
def test():
from ase import Atoms
atoms = Atoms('MnO', positions=[(0, 0, 0), (1, 1, 1)], cell=np.eye(3))
wa = wannier_input(atoms)
print(atoms)
wa.set(num_bands=9, dis_win_min=-3.0, dis_win_max=12.5)
wa.add_basis('Mn', 'dxy')
wa.add_basis('Mn', 'p')
wa.add_basis('O', orb='s', spin='up')
wa.add_basis('O', orb='p', r=2, spin='up')
print(wa.gen_input())
#wa.atoms=atoms
#wa.unit_cell=np.eye(3)
#wa.set_kpath([(-1,0,0),[0,0,0.5],[0,0.5,0.5]],['G','L','X'],40)
#print(wa.gen_input())
#wa.write_input()
def wannier_default(name='BaTiO3'):
from ase_utils.cubic_perovskite import gen_primitive
z_db = {'Ba': 10, 'Ti': 12, 'O': 6}
#orb_db is (symbol, label), label can be valence, but also others,like 'high_spin'
orb_db = { # (symbol, valence): [(m, l, r, occ),... ]
('Ba', 2): [(None, 0, 5, 2), (None, 1, 5, 6), (None, 0, 6, 0)],
('Ti', 4):
[(None, 0, 3, 2), (None, 1, 3, 6), (None, 2, 3, 0), (None, 0, 4, 0)],
('O', -2): [(None, 0, 2, 2), (None, 1, 2, 6)],
}
atoms = gen_primitive('BaTiO3', latticeconstant=3.946, mag_order='PM')
vals = {'Ba': 2, 'Ti': 4, 'O': -2}
wa = wannier_input(atoms)
syms = atoms.get_chemical_symbols()
nwann = sum((z_db[s] for s in syms))
print(wa.gen_input())
return wa
def wannier_closeshell(atoms, val_dict=None, band='v+c'):
from ase_utils.cubic_perovskite import gen_primitive
from data.ONCV_PBEsol_conf import ONCV_PBEsol_conf
from psp import gen_ion_conf_dict
econf = ONCV_PBEsol_conf
vals = {
'Ba': 2,
'Ti': 4,
'O': -2,
'Ca': 2,
'Sr': 2,
'Pb': 2,
'Sn': 2,
'Zr': 2,
'Li': 1,
'Nb': 5
}
if val_dict is not None:
for key in val_dict:
vals[key] = val_dict[key]
econf = gen_ion_conf_dict(vals, econf)
wa = wannier_input(atoms)
wa.add_basis_from_dict(atom_map=vals, conf_dict=econf, band=band)
syms = atoms.get_chemical_symbols()
print wa.gen_input()
#print wa.get_nwann()
return wa
def replace_value(text, valdict, position='start'):
"""
Replace text line a=b with a=c (if a=... exist, else add line a=c)
Params:
-------------
text: string
valdict: dictionary of a:c. Note that c will be written as str(c).
If the default is not the same as python format
e.g. you may like to write [1,2,3] as 1 2 3,
then please use "1 2 3", rather than using [1,2,3]
position: 'start'| 'end'. If the key do not exist already, write at the beginning or end of file.
"""
lines = text.split('\n')
newlines = []
for line in lines:
indict=False
for key in valdict:
if re.findall(r'\s*%s\s*=' % key, line) != []:
indict=True
found_key=key
newlines.append('%s = %s\n' % (key, str(valdict[key])))
if not indict:
newlines.append(line+'\n')
else:
valdict.pop(found_key)
for key in valdict:
if position == 'start':
newlines.insert(0, '%s = %s\n' % (key, str(valdict[key])))
else:
newlines.append('%s = %s\n' % (key, str(valdict[key])))
return ''.join(newlines)
def replace_value_file(fname, valdict, position='start'):
"""
same as replace_value, but replace text from file instead of replacing a text
"""
with open(fname) as myfile:
text=myfile.read()
return replace_value(text,valdict,position=position)
def occupation(fname, efermi):
data = np.loadtxt(fname)
#plt.plot(data[:,0]-efermi,data[:,1])
return trapz(data[:, 1][data[:, 0] - efermi < 0],
data[:, 0][data[:, 0] - efermi < 0]) / 2
def read_basis(fname):
bdict = OrderedDict()
with open(fname) as myfile:
for iline, line in enumerate(myfile.readlines()):
a = line.strip().split()
if len(a) != 0:
bdict[a[0]] = iline
return bdict
#wannier_default()
#wannier_closeshell()
if __name__ == '__main__':
wannier_closeshell()
|
mailhexu/pyDFTutils
|
resource/phonon_ana/pyFA/wannier.py
|
Python
|
lgpl-3.0
| 22,509
|
[
"ASE",
"Wannier90"
] |
9b898795f10a6abbf47e4883d45dd59fcfb28a34f0f66301cd832a7485d1051e
|
# -*- coding: utf-8 -*-
"""
Models used to implement SAML SSO support in third_party_auth
(inlcuding Shibboleth support)
"""
import json
import logging
import re
from config_models.models import ConfigurationModel, cache
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from organizations.models import Organization
from social_core.backends.base import BaseAuth
from social_core.backends.oauth import OAuthAuth
from social_core.backends.saml import SAMLAuth
from social_core.exceptions import SocialAuthBaseException
from social_core.utils import module_member
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.theming.helpers import get_current_request
from openedx.core.lib.hash_utils import create_hash256
from .lti import LTI_PARAMS_KEY, LTIAuthBackend
from .saml import STANDARD_SAML_PROVIDER_KEY, get_saml_idp_choices, get_saml_idp_class
log = logging.getLogger(__name__)
REGISTRATION_FORM_FIELD_BLACKLIST = [
'name',
'username'
]
# A dictionary of {name: class} entries for each python-social-auth backend available.
# Because this setting can specify arbitrary code to load and execute, it is set via
# normal Django settings only and cannot be changed at runtime:
def _load_backend_classes(base_class=BaseAuth):
""" Load the list of python-social-auth backend classes from Django settings """
for class_path in settings.AUTHENTICATION_BACKENDS:
auth_class = module_member(class_path)
if issubclass(auth_class, base_class):
yield auth_class
_PSA_BACKENDS = {backend_class.name: backend_class for backend_class in _load_backend_classes()}
_PSA_OAUTH2_BACKENDS = [backend_class.name for backend_class in _load_backend_classes(OAuthAuth)]
_PSA_SAML_BACKENDS = [backend_class.name for backend_class in _load_backend_classes(SAMLAuth)]
_LTI_BACKENDS = [backend_class.name for backend_class in _load_backend_classes(LTIAuthBackend)]
def clean_json(value, of_type):
""" Simple helper method to parse and clean JSON """
if not value.strip():
return json.dumps(of_type())
try:
value_python = json.loads(value)
except ValueError as err:
raise ValidationError(u"Invalid JSON: {}".format(err))
if not isinstance(value_python, of_type):
raise ValidationError(u"Expected a JSON {}".format(of_type))
return json.dumps(value_python, indent=4)
def clean_username(username=''):
""" Simple helper method to ensure a username is compatible with our system requirements. """
return re.sub(r'[^-\w]+', '_', username)[:30]
class AuthNotConfigured(SocialAuthBaseException):
""" Exception when SAMLProviderData or other required info is missing """
def __init__(self, provider_name):
super(AuthNotConfigured, self).__init__()
self.provider_name = provider_name
def __str__(self):
return _('Authentication with {} is currently unavailable.').format(
self.provider_name
)
class ProviderConfig(ConfigurationModel):
"""
Abstract Base Class for configuring a third_party_auth provider
.. no_pii:
"""
KEY_FIELDS = ('slug',)
icon_class = models.CharField(
max_length=50,
blank=True,
default=u'fa-sign-in',
help_text=(
u'The Font Awesome (or custom) icon class to use on the login button for this provider. '
'Examples: fa-google-plus, fa-facebook, fa-linkedin, fa-sign-in, fa-university'
),
)
# We use a FileField instead of an ImageField here because ImageField
# doesn't support SVG. This means we don't get any image validation, but
# that should be fine because only trusted users should be uploading these
# anyway.
icon_image = models.FileField(
blank=True,
help_text=(
u'If there is no Font Awesome icon available for this provider, upload a custom image. '
'SVG images are recommended as they can scale to any size.'
),
)
name = models.CharField(max_length=50, blank=False, help_text=u"Name of this provider (shown to users)")
slug = models.SlugField(
max_length=30, db_index=True, default=u'default',
help_text=(
u'A short string uniquely identifying this provider. '
'Cannot contain spaces and should be a usable as a CSS class. Examples: "ubc", "mit-staging"'
))
secondary = models.BooleanField(
default=False,
help_text=_(
'Secondary providers are displayed less prominently, '
'in a separate list of "Institution" login providers.'
),
)
organization = models.ForeignKey(
Organization,
blank=True,
null=True,
on_delete=models.CASCADE,
help_text=_(
'optional. If this provider is an Organization, this attribute '
'can be used reference users in that Organization'
)
)
site = models.ForeignKey(
Site,
default=settings.SITE_ID,
related_name='%(class)ss',
help_text=_(
'The Site that this provider configuration belongs to.'
),
on_delete=models.CASCADE,
)
skip_hinted_login_dialog = models.BooleanField(
default=False,
help_text=_(
"If this option is enabled, users that visit a \"TPA hinted\" URL for this provider "
"(e.g. a URL ending with `?tpa_hint=[provider_name]`) will be forwarded directly to "
"the login URL of the provider instead of being first prompted with a login dialog."
),
)
skip_registration_form = models.BooleanField(
default=False,
help_text=_(
"If this option is enabled, users will not be asked to confirm their details "
"(name, email, etc.) during the registration process. Only select this option "
"for trusted providers that are known to provide accurate user information."
),
)
skip_email_verification = models.BooleanField(
default=False,
help_text=_(
"If this option is selected, users will not be required to confirm their "
"email, and their account will be activated immediately upon registration."
),
)
send_welcome_email = models.BooleanField(
default=False,
help_text=_(
"If this option is selected, users will be sent a welcome email upon registration."
),
)
visible = models.BooleanField(
default=False,
help_text=_(
"If this option is not selected, users will not be presented with the provider "
"as an option to authenticate with on the login screen, but manual "
"authentication using the correct link is still possible."
),
)
max_session_length = models.PositiveIntegerField(
null=True,
blank=True,
default=None,
verbose_name=u'Max session length (seconds)',
help_text=_(
"If this option is set, then users logging in using this SSO provider will have "
"their session length limited to no longer than this value. If set to 0 (zero), "
"the session will expire upon the user closing their browser. If left blank, the "
"Django platform session default length will be used."
)
)
send_to_registration_first = models.BooleanField(
default=False,
help_text=_(
"If this option is selected, users will be directed to the registration page "
"immediately after authenticating with the third party instead of the login page."
),
)
sync_learner_profile_data = models.BooleanField(
default=False,
help_text=_(
"Synchronize user profile data received from the identity provider with the edX user "
"account on each SSO login. The user will be notified if the email address associated "
"with their account is changed as a part of this synchronization."
)
)
enable_sso_id_verification = models.BooleanField(
default=False,
help_text=u"Use the presence of a profile from a trusted third party as proof of identity verification.",
)
prefix = None # used for provider_id. Set to a string value in subclass
backend_name = None # Set to a field or fixed value in subclass
accepts_logins = True # Whether to display a sign-in button when the provider is enabled
# "enabled" field is inherited from ConfigurationModel
class Meta(object):
app_label = "third_party_auth"
abstract = True
def clean(self):
""" Ensure that either `icon_class` or `icon_image` is set """
super(ProviderConfig, self).clean()
if bool(self.icon_class) == bool(self.icon_image):
raise ValidationError('Either an icon class or an icon image must be given (but not both)')
@property
def provider_id(self):
""" Unique string key identifying this provider. Must be URL and css class friendly. """
assert self.prefix is not None
return "-".join((self.prefix, ) + tuple(getattr(self, field) for field in self.KEY_FIELDS))
@property
def backend_class(self):
""" Get the python-social-auth backend class used for this provider """
return _PSA_BACKENDS[self.backend_name]
@property
def full_class_name(self):
""" Get the fully qualified class name of this provider. """
return '{}.{}'.format(self.__module__, self.__class__.__name__)
def get_url_params(self):
""" Get a dict of GET parameters to append to login links for this provider """
return {}
def is_active_for_pipeline(self, pipeline):
""" Is this provider being used for the specified pipeline? """
return self.backend_name == pipeline['backend']
def match_social_auth(self, social_auth):
""" Is this provider being used for this UserSocialAuth entry? """
return self.backend_name == social_auth.provider
def get_remote_id_from_social_auth(self, social_auth):
""" Given a UserSocialAuth object, return the remote ID used by this provider. """
# This is generally the same thing as the UID, expect when one backend is used for multiple providers
assert self.match_social_auth(social_auth)
return social_auth.uid
def get_social_auth_uid(self, remote_id):
"""
Return the uid in social auth.
This is default implementation. Subclass may override with a different one.
"""
return remote_id
@classmethod
def get_register_form_data(cls, pipeline_kwargs):
"""Gets dict of data to display on the register form.
register_user uses this to populate
the new account creation form with values supplied by the user's chosen
provider, preventing duplicate data entry.
Args:
pipeline_kwargs: dict of string -> object. Keyword arguments
accumulated by the pipeline thus far.
Returns:
Dict of string -> string. Keys are names of form fields; values are
values for that field. Where there is no value, the empty string
must be used.
"""
registration_form_data = {}
# Details about the user sent back from the provider.
details = pipeline_kwargs.get('details').copy()
# Set the registration form to use the `fullname` detail for the `name` field.
registration_form_data['name'] = details.get('fullname', '')
# Get the username separately to take advantage of the de-duping logic
# built into the pipeline. The provider cannot de-dupe because it can't
# check the state of taken usernames in our system. Note that there is
# technically a data race between the creation of this value and the
# creation of the user object, so it is still possible for users to get
# an error on submit.
registration_form_data['username'] = clean_username(pipeline_kwargs.get('username') or '')
# Any other values that are present in the details dict should be copied
# into the registration form details. This may include details that do
# not map to a value that exists in the registration form. However,
# because the fields that are actually rendered are not based on this
# list, only those values that map to a valid registration form field
# will actually be sent to the form as default values.
for blacklisted_field in REGISTRATION_FORM_FIELD_BLACKLIST:
details.pop(blacklisted_field, None)
registration_form_data.update(details)
return registration_form_data
def get_authentication_backend(self):
"""Gets associated Django settings.AUTHENTICATION_BACKEND string."""
return '{}.{}'.format(self.backend_class.__module__, self.backend_class.__name__)
@property
def display_for_login(self):
"""
Determines whether the provider ought to be shown as an option with
which to authenticate on the login screen, registration screen, and elsewhere.
"""
return bool(self.enabled_for_current_site and self.accepts_logins and self.visible)
@property
def enabled_for_current_site(self):
"""
Determines if the provider is able to be used with the current site.
"""
return self.enabled and self.site == Site.objects.get_current(get_current_request())
class OAuth2ProviderConfig(ProviderConfig):
"""
Configuration Entry for an OAuth2 based provider.
Also works for OAuth1 providers.
.. no_pii:
"""
# We are keying the provider config by backend_name here as suggested in the python social
# auth documentation. In order to reuse a backend for a second provider, a subclass can be
# created with seperate name.
# example:
# class SecondOpenIDProvider(OpenIDAuth):
# name = "second-openId-provider"
KEY_FIELDS = ('backend_name',)
prefix = 'oa2'
backend_name = models.CharField(
max_length=50, blank=False, db_index=True,
help_text=(
u"Which python-social-auth OAuth2 provider backend to use. "
"The list of backend choices is determined by the THIRD_PARTY_AUTH_BACKENDS setting."
# To be precise, it's set by AUTHENTICATION_BACKENDS
# which production.py sets from THIRD_PARTY_AUTH_BACKENDS
)
)
key = models.TextField(blank=True, verbose_name=u"Client ID")
secret = models.TextField(
blank=True,
verbose_name=u"Client Secret",
help_text=(
u'For increased security, you can avoid storing this in your database by leaving '
' this field blank and setting '
'SOCIAL_AUTH_OAUTH_SECRETS = {"(backend name)": "secret", ...} ' # pylint: disable=unicode-format-string
'in your instance\'s Django settings (or lms.auth.json)'
)
)
other_settings = models.TextField(blank=True, help_text=u"Optional JSON object with advanced settings, if any.")
class Meta(object):
app_label = "third_party_auth"
verbose_name = u"Provider Configuration (OAuth)"
verbose_name_plural = verbose_name
def clean(self):
""" Standardize and validate fields """
super(OAuth2ProviderConfig, self).clean()
self.other_settings = clean_json(self.other_settings, dict)
def get_setting(self, name):
""" Get the value of a setting, or raise KeyError """
if name == "KEY":
return self.key
if name == "SECRET":
if self.secret:
return self.secret
# To allow instances to avoid storing secrets in the DB, the secret can also be set via Django:
return getattr(settings, 'SOCIAL_AUTH_OAUTH_SECRETS', {}).get(self.backend_name, '')
if self.other_settings:
other_settings = json.loads(self.other_settings)
assert isinstance(other_settings, dict), "other_settings should be a JSON object (dictionary)"
return other_settings[name]
raise KeyError
class SAMLConfiguration(ConfigurationModel):
"""
General configuration required for this edX instance to act as a SAML
Service Provider and allow users to authenticate via third party SAML
Identity Providers (IdPs)
.. no_pii:
"""
KEY_FIELDS = ('site_id', 'slug')
site = models.ForeignKey(
Site,
default=settings.SITE_ID,
related_name='%(class)ss',
help_text=_(
'The Site that this SAML configuration belongs to.'
),
on_delete=models.CASCADE,
)
slug = models.SlugField(
max_length=30,
default=u'default',
help_text=(
u'A short string uniquely identifying this configuration. '
'Cannot contain spaces. Examples: "ubc", "mit-staging"'
),
)
private_key = models.TextField(
help_text=(
u'To generate a key pair as two files, run '
'"openssl req -new -x509 -days 3652 -nodes -out saml.crt -keyout saml.key". '
'Paste the contents of saml.key here. '
'For increased security, you can avoid storing this in your database by leaving '
'this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PRIVATE_KEY setting '
'in your instance\'s Django settings (or lms.auth.json).'
),
blank=True,
)
public_key = models.TextField(
help_text=(
u'Public key certificate. '
'For increased security, you can avoid storing this in your database by leaving '
'this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PUBLIC_CERT setting '
'in your instance\'s Django settings (or lms.auth.json).'
),
blank=True,
)
entity_id = models.CharField(max_length=255, default="http://saml.example.com", verbose_name=u"Entity ID")
org_info_str = models.TextField(
verbose_name=u"Organization Info",
default=u'{"en-US": {"url": "http://www.example.com", "displayname": "Example Inc.", "name": "example"}}',
help_text=u"JSON dictionary of 'url', 'displayname', and 'name' for each language",
)
other_config_str = models.TextField(
default=u'{\n"SECURITY_CONFIG": {"metadataCacheDuration": 604800, "signMetadata": false}\n}',
help_text=(
u"JSON object defining advanced settings that are passed on to python-saml. "
"Valid keys that can be set here include: SECURITY_CONFIG and SP_EXTRA"
),
)
class Meta(object):
app_label = "third_party_auth"
verbose_name = u"SAML Configuration"
verbose_name_plural = verbose_name
def __str__(self):
"""
Return human-readable string representation.
"""
return u"SAMLConfiguration {site}: {slug} on {date:%Y-%m-%d %H:%M:%S}".format(
site=self.site.name,
slug=self.slug,
date=self.change_date,
)
def clean(self):
""" Standardize and validate fields """
super(SAMLConfiguration, self).clean()
self.org_info_str = clean_json(self.org_info_str, dict)
self.other_config_str = clean_json(self.other_config_str, dict)
self.private_key = (
self.private_key
.replace("-----BEGIN RSA PRIVATE KEY-----", "")
.replace("-----BEGIN PRIVATE KEY-----", "")
.replace("-----END RSA PRIVATE KEY-----", "")
.replace("-----END PRIVATE KEY-----", "")
.strip()
)
self.public_key = (
self.public_key
.replace("-----BEGIN CERTIFICATE-----", "")
.replace("-----END CERTIFICATE-----", "")
.strip()
)
def get_setting(self, name):
""" Get the value of a setting, or raise KeyError """
default_saml_contact = {
# Default contact information to put into the SAML metadata that gets generated by python-saml.
"givenName": _(u"{platform_name} Support").format(
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
),
"emailAddress": configuration_helpers.get_value('TECH_SUPPORT_EMAIL', settings.TECH_SUPPORT_EMAIL),
}
if name == "ORG_INFO":
return json.loads(self.org_info_str)
if name == "SP_ENTITY_ID":
return self.entity_id
if name == "SP_PUBLIC_CERT":
if self.public_key:
return self.public_key
# To allow instances to avoid storing keys in the DB, the key pair can also be set via Django:
if self.slug == 'default':
return getattr(settings, 'SOCIAL_AUTH_SAML_SP_PUBLIC_CERT', '')
else:
public_certs = getattr(settings, 'SOCIAL_AUTH_SAML_SP_PUBLIC_CERT_DICT', {})
return public_certs.get(self.slug, '')
if name == "SP_PRIVATE_KEY":
if self.private_key:
return self.private_key
# To allow instances to avoid storing keys in the DB, the private key can also be set via Django:
if self.slug == 'default':
return getattr(settings, 'SOCIAL_AUTH_SAML_SP_PRIVATE_KEY', '')
else:
private_keys = getattr(settings, 'SOCIAL_AUTH_SAML_SP_PRIVATE_KEY_DICT', {})
return private_keys.get(self.slug, '')
other_config = {
# These defaults can be overriden by self.other_config_str
"GET_ALL_EXTRA_DATA": True, # Save all attribute values the IdP sends into the UserSocialAuth table
"TECHNICAL_CONTACT": default_saml_contact,
"SUPPORT_CONTACT": default_saml_contact,
}
other_config.update(json.loads(self.other_config_str))
return other_config[name] # SECURITY_CONFIG, SP_EXTRA, or similar extra settings
def active_saml_configurations_filter():
"""
Returns a mapping to be used for the SAMLProviderConfig to limit the SAMLConfiguration choices to the current set.
"""
query_set = SAMLConfiguration.objects.current_set()
return {'id__in': query_set.values_list('id', flat=True)}
class SAMLProviderConfig(ProviderConfig):
"""
Configuration Entry for a SAML/Shibboleth provider.
.. no_pii:
"""
prefix = 'saml'
backend_name = models.CharField(
max_length=50, default=u'tpa-saml', blank=False,
help_text=u"Which python-social-auth provider backend to use. 'tpa-saml' is the standard edX SAML backend.")
entity_id = models.CharField(
max_length=255, verbose_name=u"Entity ID", help_text=u"Example: https://idp.testshib.org/idp/shibboleth")
metadata_source = models.CharField(
max_length=255,
help_text=(
u"URL to this provider's XML metadata. Should be an HTTPS URL. "
"Example: https://www.testshib.org/metadata/testshib-providers.xml"
))
attr_user_permanent_id = models.CharField(
max_length=128, blank=True, verbose_name=u"User ID Attribute",
help_text=(
u"URN of the SAML attribute that we can use as a unique, "
"persistent user ID. Leave blank for default."
))
attr_full_name = models.CharField(
max_length=128, blank=True, verbose_name=u"Full Name Attribute",
help_text=u"URN of SAML attribute containing the user's full name. Leave blank for default.")
default_full_name = models.CharField(
max_length=255, blank=True, verbose_name=u"Default Value for Full Name",
help_text=u"Default value for full name to be used if not present in SAML response.")
attr_first_name = models.CharField(
max_length=128, blank=True, verbose_name=u"First Name Attribute",
help_text=u"URN of SAML attribute containing the user's first name. Leave blank for default.")
default_first_name = models.CharField(
max_length=255, blank=True, verbose_name=u"Default Value for First Name",
help_text=u"Default value for first name to be used if not present in SAML response.")
attr_last_name = models.CharField(
max_length=128, blank=True, verbose_name=u"Last Name Attribute",
help_text=u"URN of SAML attribute containing the user's last name. Leave blank for default.")
default_last_name = models.CharField(
max_length=255, blank=True, verbose_name=u"Default Value for Last Name",
help_text=u"Default value for last name to be used if not present in SAML response.")
attr_username = models.CharField(
max_length=128, blank=True, verbose_name=u"Username Hint Attribute",
help_text=u"URN of SAML attribute to use as a suggested username for this user. Leave blank for default.")
default_username = models.CharField(
max_length=255, blank=True, verbose_name=u"Default Value for Username",
help_text=u"Default value for username to be used if not present in SAML response.")
attr_email = models.CharField(
max_length=128, blank=True, verbose_name=u"Email Attribute",
help_text=u"URN of SAML attribute containing the user's email address[es]. Leave blank for default.")
default_email = models.CharField(
max_length=255, blank=True, verbose_name=u"Default Value for Email",
help_text=u"Default value for email to be used if not present in SAML response.")
automatic_refresh_enabled = models.BooleanField(
default=True, verbose_name=u"Enable automatic metadata refresh",
help_text=u"When checked, the SAML provider's metadata will be included "
"in the automatic refresh job, if configured."
)
identity_provider_type = models.CharField(
max_length=128, blank=False, verbose_name=u"Identity Provider Type", default=STANDARD_SAML_PROVIDER_KEY,
choices=get_saml_idp_choices(), help_text=(
u"Some SAML providers require special behavior. For example, SAP SuccessFactors SAML providers require an "
"additional API call to retrieve user metadata not provided in the SAML response. Select the provider type "
"which best matches your use case. If in doubt, choose the Standard SAML Provider type."
)
)
debug_mode = models.BooleanField(
default=False, verbose_name=u"Debug Mode",
help_text=(
u"In debug mode, all SAML XML requests and responses will be logged. "
"This is helpful for testing/setup but should always be disabled before users start using this provider."
),
)
other_settings = models.TextField(
verbose_name=u"Advanced settings", blank=True,
help_text=(
u'For advanced use cases, enter a JSON object with addtional configuration. '
'The tpa-saml backend supports {"requiredEntitlements": ["urn:..."]}, ' # pylint: disable=unicode-format-string
'which can be used to require the presence of a specific eduPersonEntitlement, '
'and {"extra_field_definitions": [{"name": "...", "urn": "..."},...]}, which can be ' # pylint: disable=unicode-format-string
'used to define registration form fields and the URNs that can be used to retrieve '
'the relevant values from the SAML response. Custom provider types, as selected '
'in the "Identity Provider Type" field, may make use of the information stored '
'in this field for additional configuration.'
))
archived = models.BooleanField(default=False)
saml_configuration = models.ForeignKey(
SAMLConfiguration,
on_delete=models.SET_NULL,
limit_choices_to=active_saml_configurations_filter,
null=True,
blank=True,
)
def clean(self):
""" Standardize and validate fields """
super(SAMLProviderConfig, self).clean()
self.other_settings = clean_json(self.other_settings, dict)
class Meta(object):
app_label = "third_party_auth"
verbose_name = u"Provider Configuration (SAML IdP)"
verbose_name_plural = "Provider Configuration (SAML IdPs)"
def get_url_params(self):
""" Get a dict of GET parameters to append to login links for this provider """
return {'idp': self.slug}
def is_active_for_pipeline(self, pipeline):
""" Is this provider being used for the specified pipeline? """
return self.backend_name == pipeline['backend'] and self.slug == pipeline['kwargs']['response']['idp_name']
def match_social_auth(self, social_auth):
""" Is this provider being used for this UserSocialAuth entry? """
prefix = self.slug + ":"
return self.backend_name == social_auth.provider and social_auth.uid.startswith(prefix)
def get_remote_id_from_social_auth(self, social_auth):
""" Given a UserSocialAuth object, return the remote ID used by this provider. """
assert self.match_social_auth(social_auth)
# Remove the prefix from the UID
return social_auth.uid[len(self.slug) + 1:]
def get_social_auth_uid(self, remote_id):
""" Get social auth uid from remote id by prepending idp_slug to the remote id """
return '{}:{}'.format(self.slug, remote_id)
def get_setting(self, name):
""" Get the value of a setting, or raise KeyError """
if self.other_settings:
other_settings = json.loads(self.other_settings)
return other_settings[name]
raise KeyError
def get_config(self):
"""
Return a SAMLIdentityProvider instance for use by SAMLAuthBackend.
Essentially this just returns the values of this object and its
associated 'SAMLProviderData' entry.
"""
if self.other_settings:
conf = json.loads(self.other_settings)
else:
conf = {}
attrs = (
'attr_user_permanent_id', 'attr_full_name', 'attr_first_name',
'attr_last_name', 'attr_username', 'attr_email', 'entity_id')
attr_defaults = {
'attr_full_name': 'default_full_name',
'attr_first_name': 'default_first_name',
'attr_last_name': 'default_last_name',
'attr_username': 'default_username',
'attr_email': 'default_email',
}
# Defaults for missing attributes in SAML Response
conf['attr_defaults'] = {}
for field in attrs:
field_name = attr_defaults.get(field)
val = getattr(self, field)
if val:
conf[field] = val
# Default values for SAML attributes
default = getattr(self, field_name) if field_name else None
conf['attr_defaults'][field] = default
# Now get the data fetched automatically from the metadata.xml:
data = SAMLProviderData.current(self.entity_id)
if not data or not data.is_valid():
log.error(
'No SAMLProviderData found for provider "%s" with entity id "%s" and IdP slug "%s". ' # pylint: disable=unicode-format-string
'Run "manage.py saml pull" to fix or debug.',
self.name, self.entity_id, self.slug
)
raise AuthNotConfigured(provider_name=self.name)
conf['x509cert'] = data.public_key
conf['url'] = data.sso_url
# Add SAMLConfiguration appropriate for this IdP
conf['saml_sp_configuration'] = (
self.saml_configuration or
SAMLConfiguration.current(self.site.id, 'default')
)
idp_class = get_saml_idp_class(self.identity_provider_type)
return idp_class(self.slug, **conf)
class SAMLProviderData(models.Model):
"""
Data about a SAML IdP that is fetched automatically by 'manage.py saml pull'
This data is only required during the actual authentication process.
.. no_pii:
"""
cache_timeout = 600
fetched_at = models.DateTimeField(db_index=True, null=False)
expires_at = models.DateTimeField(db_index=True, null=True)
entity_id = models.CharField(max_length=255, db_index=True) # This is the key for lookups in this table
sso_url = models.URLField(verbose_name=u"SSO URL")
public_key = models.TextField()
class Meta(object):
app_label = "third_party_auth"
verbose_name = u"SAML Provider Data"
verbose_name_plural = verbose_name
ordering = ('-fetched_at', )
def is_valid(self):
""" Is this data valid? """
if self.expires_at and timezone.now() > self.expires_at:
return False
return bool(self.entity_id and self.sso_url and self.public_key)
is_valid.boolean = True
@classmethod
def cache_key_name(cls, entity_id):
""" Return the name of the key to use to cache the current data """
return 'configuration/{}/current/{}'.format(cls.__name__, entity_id)
@classmethod
def current(cls, entity_id):
"""
Return the active data entry, if any, otherwise None
"""
cached = cache.get(cls.cache_key_name(entity_id))
if cached is not None:
return cached
try:
current = cls.objects.filter(entity_id=entity_id).order_by('-fetched_at')[0]
except IndexError:
current = None
cache.set(cls.cache_key_name(entity_id), current, cls.cache_timeout)
return current
class LTIProviderConfig(ProviderConfig):
"""
Configuration required for this edX instance to act as a LTI
Tool Provider and allow users to authenticate and be enrolled in a
course via third party LTI Tool Consumers.
.. no_pii:
"""
prefix = 'lti'
backend_name = 'lti'
# This provider is not visible to users
icon_class = None
icon_image = None
secondary = False
# LTI login cannot be initiated by the tool provider
accepts_logins = False
KEY_FIELDS = ('lti_consumer_key', )
lti_consumer_key = models.CharField(
max_length=255,
help_text=(
u'The name that the LTI Tool Consumer will use to identify itself'
)
)
lti_hostname = models.CharField(
default=u'localhost',
max_length=255,
help_text=(
u'The domain that will be acting as the LTI consumer.'
),
db_index=True
)
lti_consumer_secret = models.CharField(
default=create_hash256,
max_length=255,
help_text=(
u'The shared secret that the LTI Tool Consumer will use to '
'authenticate requests. Only this edX instance and this '
'tool consumer instance should know this value. '
'For increased security, you can avoid storing this in '
'your database by leaving this field blank and setting '
'SOCIAL_AUTH_LTI_CONSUMER_SECRETS = {"consumer key": "secret", ...} ' # pylint: disable=unicode-format-string
'in your instance\'s Django setttigs (or lms.auth.json)'
),
blank=True,
)
lti_max_timestamp_age = models.IntegerField(
default=10,
help_text=(
u'The maximum age of oauth_timestamp values, in seconds.'
)
)
def match_social_auth(self, social_auth):
""" Is this provider being used for this UserSocialAuth entry? """
prefix = self.lti_consumer_key + ":"
return self.backend_name == social_auth.provider and social_auth.uid.startswith(prefix)
def get_remote_id_from_social_auth(self, social_auth):
""" Given a UserSocialAuth object, return the remote ID used by this provider. """
assert self.match_social_auth(social_auth)
# Remove the prefix from the UID
return social_auth.uid[len(self.lti_consumer_key) + 1:]
def is_active_for_pipeline(self, pipeline):
""" Is this provider being used for the specified pipeline? """
try:
return (
self.backend_name == pipeline['backend'] and
self.lti_consumer_key == pipeline['kwargs']['response'][LTI_PARAMS_KEY]['oauth_consumer_key']
)
except KeyError:
return False
def get_lti_consumer_secret(self):
""" If the LTI consumer secret is not stored in the database, check Django settings instead """
if self.lti_consumer_secret:
return self.lti_consumer_secret
return getattr(settings, 'SOCIAL_AUTH_LTI_CONSUMER_SECRETS', {}).get(self.lti_consumer_key, '')
class Meta(object):
app_label = "third_party_auth"
verbose_name = u"Provider Configuration (LTI)"
verbose_name_plural = verbose_name
|
cpennington/edx-platform
|
common/djangoapps/third_party_auth/models.py
|
Python
|
agpl-3.0
| 37,165
|
[
"VisIt"
] |
4f54395286e130fd1b0b390d75f76ca583aeb07c70e8a0be1ce1e5f9becfdc8a
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Driver for Microsoft Azure Virtual Machines service.
http://azure.microsoft.com/en-us/services/virtual-machines/
"""
import re
import time
import collections
import random
import sys
import copy
import base64
from datetime import datetime
from xml.dom import minidom
from xml.sax.saxutils import escape as xml_escape
from libcloud.utils.py3 import ET
from libcloud.common.azure import AzureServiceManagementConnection
from libcloud.common.azure import AzureRedirectException
from libcloud.compute.providers import Provider
from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize
from libcloud.compute.base import NodeImage, StorageVolume
from libcloud.compute.types import NodeState
from libcloud.common.types import LibcloudError
from libcloud.utils.py3 import _real_unicode
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import ensure_string
from libcloud.utils.py3 import urlquote as url_quote
from libcloud.utils.misc import ReprMixin
HTTPSConnection = httplib.HTTPSConnection
if sys.version_info < (3,):
_unicode_type = unicode
def _str(value):
if isinstance(value, unicode):
return value.encode('utf-8')
return str(value)
else:
_str = str
_unicode_type = str
AZURE_SERVICE_MANAGEMENT_HOST = 'management.core.windows.net'
X_MS_VERSION = '2013-08-01'
WINDOWS_SERVER_REGEX = re.compile(
r'Win|SQL|SharePoint|Visual|Dynamics|DynGP|BizTalk'
)
"""
Sizes must be hardcoded because Microsoft doesn't provide an API to fetch them
From http://msdn.microsoft.com/en-us/library/windowsazure/dn197896.aspx
Prices are for Linux instances in East US data center. To see what pricing will
actually be, visit:
http://azure.microsoft.com/en-gb/pricing/details/virtual-machines/
"""
AZURE_COMPUTE_INSTANCE_TYPES = {
'A0': {
'id': 'ExtraSmall',
'name': 'Extra Small Instance',
'ram': 768,
'disk': 127,
'bandwidth': None,
'price': '0.0211',
'max_data_disks': 1,
'cores': 'Shared'
},
'A1': {
'id': 'Small',
'name': 'Small Instance',
'ram': 1792,
'disk': 127,
'bandwidth': None,
'price': '0.0633',
'max_data_disks': 2,
'cores': 1
},
'A2': {
'id': 'Medium',
'name': 'Medium Instance',
'ram': 3584,
'disk': 127,
'bandwidth': None,
'price': '0.1266',
'max_data_disks': 4,
'cores': 2
},
'A3': {
'id': 'Large',
'name': 'Large Instance',
'ram': 7168,
'disk': 127,
'bandwidth': None,
'price': '0.2531',
'max_data_disks': 8,
'cores': 4
},
'A4': {
'id': 'ExtraLarge',
'name': 'Extra Large Instance',
'ram': 14336,
'disk': 127,
'bandwidth': None,
'price': '0.5062',
'max_data_disks': 16,
'cores': 8
},
'A5': {
'id': 'A5',
'name': 'Memory Intensive Instance',
'ram': 14336,
'disk': 127,
'bandwidth': None,
'price': '0.2637',
'max_data_disks': 4,
'cores': 2
},
'A6': {
'id': 'A6',
'name': 'A6 Instance',
'ram': 28672,
'disk': 127,
'bandwidth': None,
'price': '0.5273',
'max_data_disks': 8,
'cores': 4
},
'A7': {
'id': 'A7',
'name': 'A7 Instance',
'ram': 57344,
'disk': 127,
'bandwidth': None,
'price': '1.0545',
'max_data_disks': 16,
'cores': 8
},
'A8': {
'id': 'A8',
'name': 'A8 Instance',
'ram': 57344,
'disk': 127,
'bandwidth': None,
'price': '2.0774',
'max_data_disks': 16,
'cores': 8
},
'A9': {
'id': 'A9',
'name': 'A9 Instance',
'ram': 114688,
'disk': 127,
'bandwidth': None,
'price': '4.7137',
'max_data_disks': 16,
'cores': 16
},
'A10': {
'id': 'A10',
'name': 'A10 Instance',
'ram': 57344,
'disk': 127,
'bandwidth': None,
'price': '1.2233',
'max_data_disks': 16,
'cores': 8
},
'A11': {
'id': 'A11',
'name': 'A11 Instance',
'ram': 114688,
'disk': 127,
'bandwidth': None,
'price': '2.1934',
'max_data_disks': 16,
'cores': 16
},
'D1': {
'id': 'Standard_D1',
'name': 'D1 Faster Compute Instance',
'ram': 3584,
'disk': 127,
'bandwidth': None,
'price': '0.0992',
'max_data_disks': 2,
'cores': 1
},
'D2': {
'id': 'Standard_D2',
'name': 'D2 Faster Compute Instance',
'ram': 7168,
'disk': 127,
'bandwidth': None,
'price': '0.1983',
'max_data_disks': 4,
'cores': 2
},
'D3': {
'id': 'Standard_D3',
'name': 'D3 Faster Compute Instance',
'ram': 14336,
'disk': 127,
'bandwidth': None,
'price': '0.3965',
'max_data_disks': 8,
'cores': 4
},
'D4': {
'id': 'Standard_D4',
'name': 'D4 Faster Compute Instance',
'ram': 28672,
'disk': 127,
'bandwidth': None,
'price': '0.793',
'max_data_disks': 16,
'cores': 8
},
'D11': {
'id': 'Standard_D11',
'name': 'D11 Faster Compute Instance',
'ram': 14336,
'disk': 127,
'bandwidth': None,
'price': '0.251',
'max_data_disks': 4,
'cores': 2
},
'D12': {
'id': 'Standard_D12',
'name': 'D12 Faster Compute Instance',
'ram': 28672,
'disk': 127,
'bandwidth': None,
'price': '0.502',
'max_data_disks': 8,
'cores': 4
},
'D13': {
'id': 'Standard_D13',
'name': 'D13 Faster Compute Instance',
'ram': 57344,
'disk': 127,
'bandwidth': None,
'price': '0.9038',
'max_data_disks': 16,
'cores': 8
},
'D14': {
'id': 'Standard_D14',
'name': 'D14 Faster Compute Instance',
'ram': 114688,
'disk': 127,
'bandwidth': None,
'price': '1.6261',
'max_data_disks': 32,
'cores': 16
}
}
_KNOWN_SERIALIZATION_XFORMS = {
'include_apis': 'IncludeAPIs',
'message_id': 'MessageId',
'content_md5': 'Content-MD5',
'last_modified': 'Last-Modified',
'cache_control': 'Cache-Control',
'account_admin_live_email_id': 'AccountAdminLiveEmailId',
'service_admin_live_email_id': 'ServiceAdminLiveEmailId',
'subscription_id': 'SubscriptionID',
'fqdn': 'FQDN',
'private_id': 'PrivateID',
'os_virtual_hard_disk': 'OSVirtualHardDisk',
'logical_disk_size_in_gb': 'LogicalDiskSizeInGB',
'logical_size_in_gb': 'LogicalSizeInGB',
'os': 'OS',
'persistent_vm_downtime_info': 'PersistentVMDowntimeInfo',
'copy_id': 'CopyId',
'os_disk_configuration': 'OSDiskConfiguration',
'is_dns_programmed': 'IsDnsProgrammed'
}
class AzureNodeDriver(NodeDriver):
connectionCls = AzureServiceManagementConnection
name = 'Azure Virtual machines'
website = 'http://azure.microsoft.com/en-us/services/virtual-machines/'
type = Provider.AZURE
_instance_types = AZURE_COMPUTE_INSTANCE_TYPES
_blob_url = ".blob.core.windows.net"
features = {'create_node': ['password']}
service_location = collections.namedtuple(
'service_location',
['is_affinity_group', 'service_location']
)
NODE_STATE_MAP = {
'RoleStateUnknown': NodeState.UNKNOWN,
'CreatingVM': NodeState.PENDING,
'StartingVM': NodeState.PENDING,
'Provisioning': NodeState.PENDING,
'CreatingRole': NodeState.PENDING,
'StartingRole': NodeState.PENDING,
'ReadyRole': NodeState.RUNNING,
'BusyRole': NodeState.PENDING,
'StoppingRole': NodeState.PENDING,
'StoppingVM': NodeState.PENDING,
'DeletingVM': NodeState.PENDING,
'StoppedVM': NodeState.STOPPED,
'RestartingRole': NodeState.REBOOTING,
'CyclingRole': NodeState.TERMINATED,
'FailedStartingRole': NodeState.TERMINATED,
'FailedStartingVM': NodeState.TERMINATED,
'UnresponsiveRole': NodeState.TERMINATED,
'StoppedDeallocated': NodeState.TERMINATED,
}
def __init__(self, subscription_id=None, key_file=None, **kwargs):
"""
subscription_id contains the Azure subscription id in the form of GUID
key_file contains the Azure X509 certificate in .pem form
"""
self.subscription_id = subscription_id
self.key_file = key_file
self.follow_redirects = kwargs.get('follow_redirects', True)
super(AzureNodeDriver, self).__init__(
self.subscription_id,
self.key_file,
secure=True,
**kwargs
)
def list_sizes(self):
"""
Lists all sizes
:rtype: ``list`` of :class:`NodeSize`
"""
sizes = []
for _, values in self._instance_types.items():
node_size = self._to_node_size(copy.deepcopy(values))
sizes.append(node_size)
return sizes
def list_images(self, location=None):
"""
Lists all images
:rtype: ``list`` of :class:`NodeImage`
"""
data = self._perform_get(self._get_image_path(), Images)
custom_image_data = self._perform_get(
self._get_vmimage_path(),
VMImages
)
images = [self._to_image(i) for i in data]
images.extend(self._vm_to_image(j) for j in custom_image_data)
if location is not None:
images = [
image
for image in images
if location in image.extra["location"]
]
return images
def list_locations(self):
"""
Lists all locations
:rtype: ``list`` of :class:`NodeLocation`
"""
data = self._perform_get(
'/' + self.subscription_id + '/locations',
Locations
)
return [self._to_location(l) for l in data]
def list_nodes(self, ex_cloud_service_name):
"""
List all nodes
ex_cloud_service_name parameter is used to scope the request
to a specific Cloud Service. This is a required parameter as
nodes cannot exist outside of a Cloud Service nor be shared
between a Cloud Service within Azure.
:param ex_cloud_service_name: Cloud Service name
:type ex_cloud_service_name: ``str``
:rtype: ``list`` of :class:`Node`
"""
response = self._perform_get(
self._get_hosted_service_path(ex_cloud_service_name) +
'?embed-detail=True',
None
)
self.raise_for_response(response, 200)
data = self._parse_response(response, HostedService)
vips = None
if (len(data.deployments) > 0 and
data.deployments[0].virtual_ips is not None):
vips = [vip.address for vip in data.deployments[0].virtual_ips]
try:
return [
self._to_node(n, ex_cloud_service_name, vips)
for n in data.deployments[0].role_instance_list
]
except IndexError:
return []
def reboot_node(self, node, ex_cloud_service_name=None,
ex_deployment_slot=None):
"""
Reboots a node.
ex_cloud_service_name parameter is used to scope the request
to a specific Cloud Service. This is a required parameter as
nodes cannot exist outside of a Cloud Service nor be shared
between a Cloud Service within Azure.
:param ex_cloud_service_name: Cloud Service name
:type ex_cloud_service_name: ``str``
:param ex_deployment_slot: Options are "production" (default)
or "Staging". (Optional)
:type ex_deployment_slot: ``str``
:rtype: ``bool``
"""
if ex_cloud_service_name is None:
if node.extra is not None:
ex_cloud_service_name = node.extra.get(
'ex_cloud_service_name'
)
if not ex_cloud_service_name:
raise ValueError("ex_cloud_service_name is required.")
if not ex_deployment_slot:
ex_deployment_slot = "Production"
_deployment_name = self._get_deployment(
service_name=ex_cloud_service_name,
deployment_slot=ex_deployment_slot
).name
try:
response = self._perform_post(
self._get_deployment_path_using_name(
ex_cloud_service_name,
_deployment_name
) + '/roleinstances/' + _str(node.id) + '?comp=reboot',
''
)
self.raise_for_response(response, 202)
if self._parse_response_for_async_op(response):
return True
else:
return False
except Exception:
return False
def list_volumes(self, node=None):
"""
Lists volumes of the disks in the image repository that are
associated with the specified subscription.
Pass Node object to scope the list of volumes to a single
instance.
:rtype: ``list`` of :class:`StorageVolume`
"""
data = self._perform_get(self._get_disk_path(), Disks)
volumes = [self._to_volume(volume=v, node=node) for v in data]
return volumes
def create_node(self, name, size, image, ex_cloud_service_name,
ex_storage_service_name=None, ex_new_deployment=False,
ex_deployment_slot="Production", ex_deployment_name=None,
ex_admin_user_id="azureuser", ex_custom_data=None,
ex_virtual_network_name=None, ex_network_config=None,
auth=None, **kwargs):
"""
Create Azure Virtual Machine
Reference: http://bit.ly/1fIsCb7
[www.windowsazure.com/en-us/documentation/]
We default to:
+ 3389/TCP - RDP - 1st Microsoft instance.
+ RANDOM/TCP - RDP - All succeeding Microsoft instances.
+ 22/TCP - SSH - 1st Linux instance
+ RANDOM/TCP - SSH - All succeeding Linux instances.
The above replicates the standard behavior of the Azure UI.
You can retrieve the assigned ports to each instance by
using the following private function:
_get_endpoint_ports(service_name)
Returns public,private port key pair.
@inherits: :class:`NodeDriver.create_node`
:keyword image: The image to use when creating this node
:type image: `NodeImage`
:keyword size: The size of the instance to create
:type size: `NodeSize`
:keyword ex_cloud_service_name: Required.
Name of the Azure Cloud Service.
:type ex_cloud_service_name: ``str``
:keyword ex_storage_service_name: Optional:
Name of the Azure Storage Service.
:type ex_storage_service_name: ``str``
:keyword ex_new_deployment: Optional. Tells azure to create a
new deployment rather than add to an
existing one.
:type ex_new_deployment: ``boolean``
:keyword ex_deployment_slot: Optional: Valid values: production|
staging.
Defaults to production.
:type ex_deployment_slot: ``str``
:keyword ex_deployment_name: Optional. The name of the
deployment.
If this is not passed in we default
to using the Cloud Service name.
:type ex_deployment_name: ``str``
:type ex_custom_data: ``str``
:keyword ex_custom_data: Optional script or other data which is
injected into the VM when it's beginning
provisioned.
:keyword ex_admin_user_id: Optional. Defaults to 'azureuser'.
:type ex_admin_user_id: ``str``
:keyword ex_virtual_network_name: Optional. If this is not passed
in no virtual network is used.
:type ex_virtual_network_name: ``str``
:keyword ex_network_config: Optional. The ConfigurationSet to use
for network configuration
:type ex_network_config: `ConfigurationSet`
"""
# TODO: Refactor this method to make it more readable, split it into
# multiple smaller methods
auth = self._get_and_check_auth(auth)
password = auth.password
if not isinstance(size, NodeSize):
raise ValueError('Size must be an instance of NodeSize')
if not isinstance(image, NodeImage):
raise ValueError(
"Image must be an instance of NodeImage, "
"produced by list_images()"
)
# Retrieve a list of currently available nodes for the provided cloud
# service
node_list = self.list_nodes(
ex_cloud_service_name=ex_cloud_service_name
)
if ex_network_config is None:
network_config = ConfigurationSet()
else:
network_config = ex_network_config
network_config.configuration_set_type = 'NetworkConfiguration'
# Base64 encode custom data if provided
if ex_custom_data:
ex_custom_data = self._encode_base64(data=ex_custom_data)
# We do this because we need to pass a Configuration to the
# method. This will be either Linux or Windows.
if WINDOWS_SERVER_REGEX.search(image.id, re.I):
machine_config = WindowsConfigurationSet(
computer_name=name,
admin_password=password,
admin_user_name=ex_admin_user_id
)
machine_config.domain_join = None
if not node_list or ex_new_deployment:
port = "3389"
else:
port = random.randint(41952, 65535)
endpoints = self._get_deployment(
service_name=ex_cloud_service_name,
deployment_slot=ex_deployment_slot
)
for instances in endpoints.role_instance_list:
ports = [ep.public_port for ep in
instances.instance_endpoints]
while port in ports:
port = random.randint(41952, 65535)
endpoint = ConfigurationSetInputEndpoint(
name='Remote Desktop',
protocol='tcp',
port=port,
local_port='3389',
load_balanced_endpoint_set_name=None,
enable_direct_server_return=False
)
else:
if not node_list or ex_new_deployment:
port = "22"
else:
port = random.randint(41952, 65535)
endpoints = self._get_deployment(
service_name=ex_cloud_service_name,
deployment_slot=ex_deployment_slot
)
for instances in endpoints.role_instance_list:
ports = []
if instances.instance_endpoints is not None:
for ep in instances.instance_endpoints:
ports += [ep.public_port]
while port in ports:
port = random.randint(41952, 65535)
endpoint = ConfigurationSetInputEndpoint(
name='SSH',
protocol='tcp',
port=port,
local_port='22',
load_balanced_endpoint_set_name=None,
enable_direct_server_return=False
)
machine_config = LinuxConfigurationSet(
name,
ex_admin_user_id,
password,
False,
ex_custom_data
)
network_config.input_endpoints.items.append(endpoint)
_storage_location = self._get_cloud_service_location(
service_name=ex_cloud_service_name
)
if ex_storage_service_name is None:
ex_storage_service_name = ex_cloud_service_name
ex_storage_service_name = re.sub(
r'[\W_-]+',
'',
ex_storage_service_name.lower(),
flags=re.UNICODE
)
if self._is_storage_service_unique(
service_name=ex_storage_service_name):
self._create_storage_account(
service_name=ex_storage_service_name,
location=_storage_location.service_location,
is_affinity_group=_storage_location.is_affinity_group
)
# OK, bit annoying here. You must create a deployment before
# you can create an instance; however, the deployment function
# creates the first instance, but all subsequent instances
# must be created using the add_role function.
#
# So, yeah, annoying.
if not node_list or ex_new_deployment:
# This is the first node in this cloud service.
if not ex_deployment_name:
ex_deployment_name = ex_cloud_service_name
vm_image_id = None
disk_config = None
if image.extra.get('vm_image', False):
vm_image_id = image.id
# network_config = None
else:
blob_url = "http://%s.blob.core.windows.net" % (
ex_storage_service_name)
# Azure's pattern in the UI.
disk_name = "%s-%s-%s.vhd" % (
ex_cloud_service_name,
name,
time.strftime("%Y-%m-%d")
)
media_link = "%s/vhds/%s" % (blob_url, disk_name)
disk_config = OSVirtualHardDisk(image.id, media_link)
response = self._perform_post(
self._get_deployment_path_using_name(ex_cloud_service_name),
AzureXmlSerializer.virtual_machine_deployment_to_xml(
ex_deployment_name,
ex_deployment_slot,
name,
name,
machine_config,
disk_config,
'PersistentVMRole',
network_config,
None,
None,
size.id,
ex_virtual_network_name,
vm_image_id
)
)
self.raise_for_response(response, 202)
self._ex_complete_async_azure_operation(response)
else:
_deployment_name = self._get_deployment(
service_name=ex_cloud_service_name,
deployment_slot=ex_deployment_slot
).name
vm_image_id = None
disk_config = None
if image.extra.get('vm_image', False):
vm_image_id = image.id
# network_config = None
else:
blob_url = "http://%s.blob.core.windows.net" % (
ex_storage_service_name
)
disk_name = "%s-%s-%s.vhd" % (
ex_cloud_service_name,
name,
time.strftime("%Y-%m-%d")
)
media_link = "%s/vhds/%s" % (blob_url, disk_name)
disk_config = OSVirtualHardDisk(image.id, media_link)
path = self._get_role_path(ex_cloud_service_name, _deployment_name)
body = AzureXmlSerializer.add_role_to_xml(
name, # role_name
machine_config, # system_config
disk_config, # os_virtual_hard_disk
'PersistentVMRole', # role_type
network_config, # network_config
None, # availability_set_name
None, # data_virtual_hard_disks
vm_image_id, # vm_image
size.id # role_size
)
response = self._perform_post(path, body)
self.raise_for_response(response, 202)
self._ex_complete_async_azure_operation(response)
return Node(
id=name,
name=name,
state=NodeState.PENDING,
public_ips=[],
private_ips=[],
driver=self.connection.driver,
extra={
'ex_cloud_service_name': ex_cloud_service_name
}
)
def destroy_node(self, node, ex_cloud_service_name=None,
ex_deployment_slot="Production"):
"""
Remove Azure Virtual Machine
This removes the instance, but does not
remove the disk. You will need to use destroy_volume.
Azure sometimes has an issue where it will hold onto
a blob lease for an extended amount of time.
:keyword ex_cloud_service_name: Required.
Name of the Azure Cloud Service.
:type ex_cloud_service_name: ``str``
:keyword ex_deployment_slot: Optional: The name of the deployment
slot. If this is not passed in we
default to production.
:type ex_deployment_slot: ``str``
"""
if not isinstance(node, Node):
raise ValueError("A libcloud Node object is required.")
if ex_cloud_service_name is None and node.extra is not None:
ex_cloud_service_name = node.extra.get('ex_cloud_service_name')
if not ex_cloud_service_name:
raise ValueError("Unable to get ex_cloud_service_name from Node.")
_deployment = self._get_deployment(
service_name=ex_cloud_service_name,
deployment_slot=ex_deployment_slot
)
_deployment_name = _deployment.name
_server_deployment_count = len(_deployment.role_instance_list)
if _server_deployment_count > 1:
path = self._get_role_path(
ex_cloud_service_name,
_deployment_name,
node.id
)
else:
path = self._get_deployment_path_using_name(
ex_cloud_service_name,
_deployment_name
)
path += '?comp=media'
self._perform_delete(path)
return True
def ex_list_cloud_services(self):
return self._perform_get(
self._get_hosted_service_path(),
HostedServices
)
def ex_create_cloud_service(self, name, location, description=None,
extended_properties=None):
"""
Create an azure cloud service.
:param name: Name of the service to create
:type name: ``str``
:param location: Standard azure location string
:type location: ``str``
:param description: Optional description
:type description: ``str``
:param extended_properties: Optional extended_properties
:type extended_properties: ``dict``
:rtype: ``bool``
"""
response = self._perform_cloud_service_create(
self._get_hosted_service_path(),
AzureXmlSerializer.create_hosted_service_to_xml(
name,
self._encode_base64(name),
description,
location,
None,
extended_properties
)
)
self.raise_for_response(response, 201)
return True
def ex_destroy_cloud_service(self, name):
"""
Delete an azure cloud service.
:param name: Name of the cloud service to destroy.
:type name: ``str``
:rtype: ``bool``
"""
response = self._perform_cloud_service_delete(
self._get_hosted_service_path(name)
)
self.raise_for_response(response, 200)
return True
def ex_add_instance_endpoints(self, node, endpoints,
ex_deployment_slot="Production"):
all_endpoints = [
{
"name": endpoint.name,
"protocol": endpoint.protocol,
"port": endpoint.public_port,
"local_port": endpoint.local_port,
}
for endpoint in node.extra['instance_endpoints']
]
all_endpoints.extend(endpoints)
result = self.ex_set_instance_endpoints(node, all_endpoints,
ex_deployment_slot)
return result
def ex_set_instance_endpoints(self, node, endpoints,
ex_deployment_slot="Production"):
"""
For example::
endpoint = ConfigurationSetInputEndpoint(
name='SSH',
protocol='tcp',
port=port,
local_port='22',
load_balanced_endpoint_set_name=None,
enable_direct_server_return=False
)
{
'name': 'SSH',
'protocol': 'tcp',
'port': port,
'local_port': '22'
}
"""
ex_cloud_service_name = node.extra['ex_cloud_service_name']
vm_role_name = node.name
network_config = ConfigurationSet()
network_config.configuration_set_type = 'NetworkConfiguration'
for endpoint in endpoints:
new_endpoint = ConfigurationSetInputEndpoint(**endpoint)
network_config.input_endpoints.items.append(new_endpoint)
_deployment_name = self._get_deployment(
service_name=ex_cloud_service_name,
deployment_slot=ex_deployment_slot
).name
response = self._perform_put(
self._get_role_path(
ex_cloud_service_name,
_deployment_name,
vm_role_name
),
AzureXmlSerializer.add_role_to_xml(
None, # role_name
None, # system_config
None, # os_virtual_hard_disk
'PersistentVMRole', # role_type
network_config, # network_config
None, # availability_set_name
None, # data_virtual_hard_disks
None, # vm_image
None # role_size
)
)
self.raise_for_response(response, 202)
def ex_create_storage_service(self, name, location,
description=None, affinity_group=None,
extended_properties=None):
"""
Create an azure storage service.
:param name: Name of the service to create
:type name: ``str``
:param location: Standard azure location string
:type location: ``str``
:param description: (Optional) Description of storage service.
:type description: ``str``
:param affinity_group: (Optional) Azure affinity group.
:type affinity_group: ``str``
:param extended_properties: (Optional) Additional configuration
options support by Azure.
:type extended_properties: ``dict``
:rtype: ``bool``
"""
response = self._perform_storage_service_create(
self._get_storage_service_path(),
AzureXmlSerializer.create_storage_service_to_xml(
service_name=name,
label=self._encode_base64(name),
description=description,
location=location,
affinity_group=affinity_group,
extended_properties=extended_properties
)
)
self.raise_for_response(response, 202)
return True
def ex_destroy_storage_service(self, name):
"""
Destroy storage service. Storage service must not have any active
blobs. Sometimes Azure likes to hold onto volumes after they are
deleted for an inordinate amount of time, so sleep before calling
this method after volume deletion.
:param name: Name of storage service.
:type name: ``str``
:rtype: ``bool``
"""
response = self._perform_storage_service_delete(
self._get_storage_service_path(name)
)
self.raise_for_response(response, 200)
return True
"""
Functions not implemented
"""
def create_volume_snapshot(self):
raise NotImplementedError(
'You cannot create snapshots of '
'Azure VMs at this time.'
)
def attach_volume(self):
raise NotImplementedError(
'attach_volume is not supported '
'at this time.'
)
def create_volume(self):
raise NotImplementedError(
'create_volume is not supported '
'at this time.'
)
def detach_volume(self):
raise NotImplementedError(
'detach_volume is not supported '
'at this time.'
)
def destroy_volume(self):
raise NotImplementedError(
'destroy_volume is not supported '
'at this time.'
)
"""
Private Functions
"""
def _perform_cloud_service_create(self, path, data):
request = AzureHTTPRequest()
request.method = 'POST'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.body = data
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
return response
def _perform_cloud_service_delete(self, path):
request = AzureHTTPRequest()
request.method = 'DELETE'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
return response
def _perform_storage_service_create(self, path, data):
request = AzureHTTPRequest()
request.method = 'POST'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.body = data
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
return response
def _perform_storage_service_delete(self, path):
request = AzureHTTPRequest()
request.method = 'DELETE'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
return response
def _to_node(self, data, ex_cloud_service_name=None, virtual_ips=None):
"""
Convert the data from a Azure response object into a Node
"""
remote_desktop_port = ''
ssh_port = ''
public_ips = virtual_ips or []
if data.instance_endpoints is not None:
if len(data.instance_endpoints) >= 1:
public_ips = [data.instance_endpoints[0].vip]
for port in data.instance_endpoints:
if port.name == 'Remote Desktop':
remote_desktop_port = port.public_port
if port.name == "SSH":
ssh_port = port.public_port
return Node(
id=data.role_name,
name=data.role_name,
state=self.NODE_STATE_MAP.get(
data.instance_status,
NodeState.UNKNOWN
),
public_ips=public_ips,
private_ips=[data.ip_address],
driver=self.connection.driver,
extra={
'instance_endpoints': data.instance_endpoints,
'remote_desktop_port': remote_desktop_port,
'ssh_port': ssh_port,
'power_state': data.power_state,
'instance_size': data.instance_size,
'ex_cloud_service_name': ex_cloud_service_name
}
)
def _to_location(self, data):
"""
Convert the data from a Azure response object into a location
"""
country = data.display_name
if "Asia" in data.display_name:
country = "Asia"
if "Europe" in data.display_name:
country = "Europe"
if "US" in data.display_name:
country = "US"
if "Japan" in data.display_name:
country = "Japan"
if "Brazil" in data.display_name:
country = "Brazil"
vm_role_sizes = data.compute_capabilities.virtual_machines_role_sizes
return AzureNodeLocation(
id=data.name,
name=data.display_name,
country=country,
driver=self.connection.driver,
available_services=data.available_services,
virtual_machine_role_sizes=vm_role_sizes
)
def _to_node_size(self, data):
"""
Convert the AZURE_COMPUTE_INSTANCE_TYPES into NodeSize
"""
return NodeSize(
id=data["id"],
name=data["name"],
ram=data["ram"],
disk=data["disk"],
bandwidth=data["bandwidth"],
price=data["price"],
driver=self.connection.driver,
extra={
'max_data_disks': data["max_data_disks"],
'cores': data["cores"]
}
)
def _to_image(self, data):
return NodeImage(
id=data.name,
name=data.label,
driver=self.connection.driver,
extra={
'os': data.os,
'category': data.category,
'description': data.description,
'location': data.location,
'affinity_group': data.affinity_group,
'media_link': data.media_link,
'vm_image': False
}
)
def _vm_to_image(self, data):
return NodeImage(
id=data.name,
name=data.label,
driver=self.connection.driver,
extra={
'os': data.os_disk_configuration.os,
'category': data.category,
'location': data.location,
'media_link': data.os_disk_configuration.media_link,
'affinity_group': data.affinity_group,
'deployment_name': data.deployment_name,
'vm_image': True
}
)
def _to_volume(self, volume, node):
extra = {
'affinity_group': volume.affinity_group,
'os': volume.os,
'location': volume.location,
'media_link': volume.media_link,
'source_image_name': volume.source_image_name
}
role_name = getattr(volume.attached_to, 'role_name', None)
hosted_service_name = getattr(
volume.attached_to,
'hosted_service_name',
None
)
deployment_name = getattr(
volume.attached_to,
'deployment_name',
None
)
if role_name is not None:
extra['role_name'] = role_name
if hosted_service_name is not None:
extra['hosted_service_name'] = hosted_service_name
if deployment_name is not None:
extra['deployment_name'] = deployment_name
if node:
if role_name is not None and role_name == node.id:
return StorageVolume(
id=volume.name,
name=volume.name,
size=int(volume.logical_disk_size_in_gb),
driver=self.connection.driver,
extra=extra
)
else:
return StorageVolume(
id=volume.name,
name=volume.name,
size=int(volume.logical_disk_size_in_gb),
driver=self.connection.driver,
extra=extra
)
def _get_deployment(self, **kwargs):
_service_name = kwargs['service_name']
_deployment_slot = kwargs['deployment_slot']
response = self._perform_get(
self._get_deployment_path_using_slot(
_service_name,
_deployment_slot
),
None
)
self.raise_for_response(response, 200)
return self._parse_response(response, Deployment)
def _get_cloud_service_location(self, service_name=None):
if not service_name:
raise ValueError("service_name is required.")
res = self._perform_get(
'%s?embed-detail=False' % (
self._get_hosted_service_path(service_name)
),
HostedService
)
_affinity_group = res.hosted_service_properties.affinity_group
_cloud_service_location = res.hosted_service_properties.location
if _affinity_group is not None and _affinity_group is not '':
return self.service_location(True, _affinity_group)
elif _cloud_service_location is not None:
return self.service_location(False, _cloud_service_location)
else:
return None
def _is_storage_service_unique(self, service_name=None):
if not service_name:
raise ValueError("service_name is required.")
_check_availability = self._perform_get(
'%s/operations/isavailable/%s%s' % (
self._get_storage_service_path(),
_str(service_name),
''
),
AvailabilityResponse
)
self.raise_for_response(_check_availability, 200)
return _check_availability.result
def _create_storage_account(self, **kwargs):
if kwargs['is_affinity_group'] is True:
response = self._perform_post(
self._get_storage_service_path(),
AzureXmlSerializer.create_storage_service_input_to_xml(
kwargs['service_name'],
kwargs['service_name'],
self._encode_base64(kwargs['service_name']),
kwargs['location'],
None, # Location
True, # geo_replication_enabled
None # extended_properties
)
)
self.raise_for_response(response, 202)
else:
response = self._perform_post(
self._get_storage_service_path(),
AzureXmlSerializer.create_storage_service_input_to_xml(
kwargs['service_name'],
kwargs['service_name'],
self._encode_base64(kwargs['service_name']),
None, # Affinity Group
kwargs['location'], # Location
True, # geo_replication_enabled
None # extended_properties
)
)
self.raise_for_response(response, 202)
# We need to wait for this to be created before we can
# create the storage container and the instance.
self._ex_complete_async_azure_operation(
response,
"create_storage_account"
)
def _get_operation_status(self, request_id):
return self._perform_get(
'/' + self.subscription_id + '/operations/' + _str(request_id),
Operation
)
def _perform_get(self, path, response_type):
request = AzureHTTPRequest()
request.method = 'GET'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
if response_type is not None:
return self._parse_response(response, response_type)
return response
def _perform_post(self, path, body, response_type=None):
request = AzureHTTPRequest()
request.method = 'POST'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.body = ensure_string(self._get_request_body(body))
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
return response
def _perform_put(self, path, body, response_type=None):
request = AzureHTTPRequest()
request.method = 'PUT'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.body = ensure_string(self._get_request_body(body))
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
return response
def _perform_delete(self, path):
request = AzureHTTPRequest()
request.method = 'DELETE'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
self.raise_for_response(response, 202)
def _perform_request(self, request):
try:
return self.connection.request(
action=request.path,
data=request.body,
headers=request.headers,
method=request.method
)
except AzureRedirectException:
e = sys.exc_info()[1]
parsed_url = urlparse.urlparse(e.location)
request.host = parsed_url.netloc
return self._perform_request(request)
except Exception as e:
raise e
def _update_request_uri_query(self, request):
"""
pulls the query string out of the URI and moves it into
the query portion of the request object. If there are already
query parameters on the request the parameters in the URI will
appear after the existing parameters
"""
if '?' in request.path:
request.path, _, query_string = request.path.partition('?')
if query_string:
query_params = query_string.split('&')
for query in query_params:
if '=' in query:
name, _, value = query.partition('=')
request.query.append((name, value))
request.path = url_quote(request.path, '/()$=\',')
# add encoded queries to request.path.
if request.query:
request.path += '?'
for name, value in request.query:
if value is not None:
request.path += '%s=%s%s' % (
name,
url_quote(value, '/()$=\','),
'&'
)
request.path = request.path[:-1]
return request.path, request.query
def _update_management_header(self, request):
"""
Add additional headers for management.
"""
if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:
request.headers['Content-Length'] = str(len(request.body))
# append additional headers base on the service
# request.headers.append(('x-ms-version', X_MS_VERSION))
# if it is not GET or HEAD request, must set content-type.
if request.method not in ['GET', 'HEAD']:
for key in request.headers:
if 'content-type' == key.lower():
break
else:
request.headers['Content-Type'] = 'application/xml'
return request.headers
def _parse_response(self, response, return_type):
"""
Parse the HTTPResponse's body and fill all the data into a class of
return_type.
"""
return self._parse_response_body_from_xml_text(
response=response,
return_type=return_type
)
def _parse_response_body_from_xml_text(self, response, return_type):
"""
parse the xml and fill all the data into a class of return_type
"""
respbody = response.body
doc = minidom.parseString(respbody)
return_obj = return_type()
for node in self._get_child_nodes(doc, return_type.__name__):
self._fill_data_to_return_object(node, return_obj)
# Note: We always explicitly assign status code to the custom return
# type object
return_obj.status = response.status
return return_obj
def _get_child_nodes(self, node, tag_name):
return [childNode for childNode in node.getElementsByTagName(tag_name)
if childNode.parentNode == node]
def _fill_data_to_return_object(self, node, return_obj):
members = dict(vars(return_obj))
for name, value in members.items():
if isinstance(value, _ListOf):
setattr(
return_obj,
name,
self._fill_list_of(
node,
value.list_type,
value.xml_element_name
)
)
elif isinstance(value, ScalarListOf):
setattr(
return_obj,
name,
self._fill_scalar_list_of(
node,
value.list_type,
self._get_serialization_name(name),
value.xml_element_name
)
)
elif isinstance(value, _DictOf):
setattr(
return_obj,
name,
self._fill_dict_of(
node,
self._get_serialization_name(name),
value.pair_xml_element_name,
value.key_xml_element_name,
value.value_xml_element_name
)
)
elif isinstance(value, WindowsAzureData):
setattr(
return_obj,
name,
self._fill_instance_child(node, name, value.__class__)
)
elif isinstance(value, dict):
setattr(
return_obj,
name,
self._fill_dict(
node,
self._get_serialization_name(name)
)
)
elif isinstance(value, _Base64String):
value = self._fill_data_minidom(node, name, '')
if value is not None:
value = self._decode_base64_to_text(value)
# always set the attribute,
# so we don't end up returning an object
# with type _Base64String
setattr(return_obj, name, value)
else:
value = self._fill_data_minidom(node, name, value)
if value is not None:
setattr(return_obj, name, value)
def _fill_list_of(self, xmldoc, element_type, xml_element_name):
xmlelements = self._get_child_nodes(xmldoc, xml_element_name)
return [
self._parse_response_body_from_xml_node(xmlelement, element_type)
for xmlelement in xmlelements
]
def _parse_response_body_from_xml_node(self, node, return_type):
"""
parse the xml and fill all the data into a class of return_type
"""
return_obj = return_type()
self._fill_data_to_return_object(node, return_obj)
return return_obj
def _fill_scalar_list_of(self,
xmldoc,
element_type,
parent_xml_element_name,
xml_element_name):
xmlelements = self._get_child_nodes(xmldoc, parent_xml_element_name)
if xmlelements:
xmlelements = self._get_child_nodes(
xmlelements[0],
xml_element_name
)
return [
self._get_node_value(xmlelement, element_type)
for xmlelement in xmlelements
]
def _get_node_value(self, xmlelement, data_type):
value = xmlelement.firstChild.nodeValue
if data_type is datetime:
return self._to_datetime(value)
elif data_type is bool:
return value.lower() != 'false'
else:
return data_type(value)
def _get_serialization_name(self, element_name):
"""
Converts a Python name into a serializable name.
"""
known = _KNOWN_SERIALIZATION_XFORMS.get(element_name)
if known is not None:
return known
if element_name.startswith('x_ms_'):
return element_name.replace('_', '-')
if element_name.endswith('_id'):
element_name = element_name.replace('_id', 'ID')
for name in ['content_', 'last_modified', 'if_', 'cache_control']:
if element_name.startswith(name):
element_name = element_name.replace('_', '-_')
return ''.join(name.capitalize() for name in element_name.split('_'))
def _fill_dict_of(self, xmldoc, parent_xml_element_name,
pair_xml_element_name, key_xml_element_name,
value_xml_element_name):
return_obj = {}
xmlelements = self._get_child_nodes(xmldoc, parent_xml_element_name)
if xmlelements:
xmlelements = self._get_child_nodes(
xmlelements[0],
pair_xml_element_name
)
for pair in xmlelements:
keys = self._get_child_nodes(pair, key_xml_element_name)
values = self._get_child_nodes(pair, value_xml_element_name)
if keys and values:
key = keys[0].firstChild.nodeValue
value = values[0].firstChild.nodeValue
return_obj[key] = value
return return_obj
def _fill_instance_child(self, xmldoc, element_name, return_type):
"""
Converts a child of the current dom element to the specified type.
"""
xmlelements = self._get_child_nodes(
xmldoc,
self._get_serialization_name(element_name)
)
if not xmlelements:
return None
return_obj = return_type()
self._fill_data_to_return_object(xmlelements[0], return_obj)
return return_obj
def _fill_dict(self, xmldoc, element_name):
xmlelements = self._get_child_nodes(xmldoc, element_name)
if xmlelements:
return_obj = {}
for child in xmlelements[0].childNodes:
if child.firstChild:
return_obj[child.nodeName] = child.firstChild.nodeValue
return return_obj
def _encode_base64(self, data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
encoded = base64.b64encode(data)
return encoded.decode('utf-8')
def _decode_base64_to_bytes(self, data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
return base64.b64decode(data)
def _decode_base64_to_text(self, data):
decoded_bytes = self._decode_base64_to_bytes(data)
return decoded_bytes.decode('utf-8')
def _fill_data_minidom(self, xmldoc, element_name, data_member):
xmlelements = self._get_child_nodes(
xmldoc,
self._get_serialization_name(element_name)
)
if not xmlelements or not xmlelements[0].childNodes:
return None
value = xmlelements[0].firstChild.nodeValue
if data_member is None:
return value
elif isinstance(data_member, datetime):
return self._to_datetime(value)
elif type(data_member) is bool:
return value.lower() != 'false'
elif type(data_member) is str:
return _real_unicode(value)
else:
return type(data_member)(value)
def _to_datetime(self, strtime):
return datetime.strptime(strtime, "%Y-%m-%dT%H:%M:%S.%f")
def _get_request_body(self, request_body):
if request_body is None:
return b''
if isinstance(request_body, WindowsAzureData):
request_body = self._convert_class_to_xml(request_body)
if isinstance(request_body, bytes):
return request_body
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
request_body = str(request_body)
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
return request_body
def _convert_class_to_xml(self, source, xml_prefix=True):
root = ET.Element()
doc = self._construct_element_tree(source, root)
result = ensure_string(ET.tostring(doc, encoding='utf-8',
method='xml'))
return result
def _construct_element_tree(self, source, etree):
if source is None:
return ET.Element()
if isinstance(source, list):
for value in source:
etree.append(self._construct_element_tree(value, etree))
elif isinstance(source, WindowsAzureData):
class_name = source.__class__.__name__
etree.append(ET.Element(class_name))
for name, value in vars(source).items():
if value is not None:
if (isinstance(value, list) or
isinstance(value, WindowsAzureData)):
etree.append(
self._construct_element_tree(value, etree)
)
else:
ele = ET.Element(self._get_serialization_name(name))
ele.text = xml_escape(str(value))
etree.append(ele)
etree.append(ET.Element(class_name))
return etree
def _parse_response_for_async_op(self, response):
if response is None:
return None
result = AsynchronousOperationResult()
if response.headers:
for name, value in response.headers.items():
if name.lower() == 'x-ms-request-id':
result.request_id = value
return result
def _get_deployment_path_using_name(self, service_name,
deployment_name=None):
components = [
'services/hostedservices/',
_str(service_name),
'/deployments'
]
resource = ''.join(components)
return self._get_path(resource, deployment_name)
def _get_path(self, resource, name):
path = '/' + self.subscription_id + '/' + resource
if name is not None:
path += '/' + _str(name)
return path
def _get_image_path(self, image_name=None):
return self._get_path('services/images', image_name)
def _get_vmimage_path(self, image_name=None):
return self._get_path('services/vmimages', image_name)
def _get_hosted_service_path(self, service_name=None):
return self._get_path('services/hostedservices', service_name)
def _get_deployment_path_using_slot(self, service_name, slot=None):
return self._get_path(
'services/hostedservices/%s/deploymentslots' % (
_str(service_name)
),
slot
)
def _get_disk_path(self, disk_name=None):
return self._get_path('services/disks', disk_name)
def _get_role_path(self, service_name, deployment_name, role_name=None):
components = [
'services/hostedservices/',
_str(service_name),
'/deployments/',
deployment_name,
'/roles'
]
resource = ''.join(components)
return self._get_path(resource, role_name)
def _get_storage_service_path(self, service_name=None):
return self._get_path('services/storageservices', service_name)
def _ex_complete_async_azure_operation(self, response=None,
operation_type='create_node'):
request_id = self._parse_response_for_async_op(response)
operation_status = self._get_operation_status(request_id.request_id)
timeout = 60 * 5
waittime = 0
interval = 5
while operation_status.status == "InProgress" and waittime < timeout:
operation_status = self._get_operation_status(request_id)
if operation_status.status == "Succeeded":
break
waittime += interval
time.sleep(interval)
if operation_status.status == 'Failed':
raise LibcloudError(
'Message: Async request for operation %s has failed' %
operation_type,
driver=self.connection.driver
)
def raise_for_response(self, response, valid_response):
if response.status != valid_response:
values = (response.error, response.body, response.status)
message = 'Message: %s, Body: %s, Status code: %s' % (values)
raise LibcloudError(message, driver=self)
"""
XML Serializer
Borrowed from the Azure SDK for Python which is licensed under Apache 2.0.
https://github.com/Azure/azure-sdk-for-python
"""
def _lower(text):
return text.lower()
class AzureXmlSerializer(object):
@staticmethod
def create_storage_service_input_to_xml(service_name,
description,
label,
affinity_group,
location,
geo_replication_enabled,
extended_properties):
return AzureXmlSerializer.doc_from_data(
'CreateStorageServiceInput',
[
('ServiceName', service_name),
('Description', description),
('Label', label),
('AffinityGroup', affinity_group),
('Location', location),
('GeoReplicationEnabled', geo_replication_enabled, _lower)
],
extended_properties
)
@staticmethod
def update_storage_service_input_to_xml(description,
label,
geo_replication_enabled,
extended_properties):
return AzureXmlSerializer.doc_from_data(
'UpdateStorageServiceInput',
[
('Description', description),
('Label', label, AzureNodeDriver._encode_base64),
('GeoReplicationEnabled', geo_replication_enabled, _lower)
],
extended_properties
)
@staticmethod
def regenerate_keys_to_xml(key_type):
return AzureXmlSerializer.doc_from_data(
'RegenerateKeys',
[('KeyType', key_type)]
)
@staticmethod
def update_hosted_service_to_xml(label, description, extended_properties):
return AzureXmlSerializer.doc_from_data(
'UpdateHostedService',
[
('Label', label, AzureNodeDriver._encode_base64),
('Description', description)
],
extended_properties
)
@staticmethod
def create_hosted_service_to_xml(service_name,
label,
description,
location,
affinity_group=None,
extended_properties=None):
if affinity_group:
return AzureXmlSerializer.doc_from_data(
'CreateHostedService',
[
('ServiceName', service_name),
('Label', label),
('Description', description),
('AffinityGroup', affinity_group),
],
extended_properties
)
return AzureXmlSerializer.doc_from_data(
'CreateHostedService',
[
('ServiceName', service_name),
('Label', label),
('Description', description),
('Location', location),
],
extended_properties
)
@staticmethod
def create_storage_service_to_xml(service_name,
label,
description,
location,
affinity_group,
extended_properties=None):
return AzureXmlSerializer.doc_from_data(
'CreateStorageServiceInput',
[
('ServiceName', service_name),
('Label', label),
('Description', description),
('Location', location),
('AffinityGroup', affinity_group)
],
extended_properties
)
@staticmethod
def create_deployment_to_xml(name,
package_url,
label,
configuration,
start_deployment,
treat_warnings_as_error,
extended_properties):
return AzureXmlSerializer.doc_from_data(
'CreateDeployment',
[
('Name', name),
('PackageUrl', package_url),
('Label', label, AzureNodeDriver._encode_base64),
('Configuration', configuration),
('StartDeployment', start_deployment, _lower),
('TreatWarningsAsError', treat_warnings_as_error, _lower)
],
extended_properties
)
@staticmethod
def swap_deployment_to_xml(production, source_deployment):
return AzureXmlSerializer.doc_from_data(
'Swap',
[
('Production', production),
('SourceDeployment', source_deployment)
]
)
@staticmethod
def update_deployment_status_to_xml(status):
return AzureXmlSerializer.doc_from_data(
'UpdateDeploymentStatus',
[('Status', status)]
)
@staticmethod
def change_deployment_to_xml(configuration,
treat_warnings_as_error,
mode,
extended_properties):
return AzureXmlSerializer.doc_from_data(
'ChangeConfiguration',
[
('Configuration', configuration),
('TreatWarningsAsError', treat_warnings_as_error, _lower),
('Mode', mode)
],
extended_properties
)
@staticmethod
def upgrade_deployment_to_xml(mode,
package_url,
configuration,
label,
role_to_upgrade,
force,
extended_properties):
return AzureXmlSerializer.doc_from_data(
'UpgradeDeployment',
[
('Mode', mode),
('PackageUrl', package_url),
('Configuration', configuration),
('Label', label, AzureNodeDriver._encode_base64),
('RoleToUpgrade', role_to_upgrade),
('Force', force, _lower)
],
extended_properties
)
@staticmethod
def rollback_upgrade_to_xml(mode, force):
return AzureXmlSerializer.doc_from_data(
'RollbackUpdateOrUpgrade',
[
('Mode', mode),
('Force', force, _lower)
]
)
@staticmethod
def walk_upgrade_domain_to_xml(upgrade_domain):
return AzureXmlSerializer.doc_from_data(
'WalkUpgradeDomain',
[('UpgradeDomain', upgrade_domain)]
)
@staticmethod
def certificate_file_to_xml(data, certificate_format, password):
return AzureXmlSerializer.doc_from_data(
'CertificateFile',
[
('Data', data),
('CertificateFormat', certificate_format),
('Password', password)
]
)
@staticmethod
def create_affinity_group_to_xml(name, label, description, location):
return AzureXmlSerializer.doc_from_data(
'CreateAffinityGroup',
[
('Name', name),
('Label', label, AzureNodeDriver._encode_base64),
('Description', description),
('Location', location)
]
)
@staticmethod
def update_affinity_group_to_xml(label, description):
return AzureXmlSerializer.doc_from_data(
'UpdateAffinityGroup',
[
('Label', label, AzureNodeDriver._encode_base64),
('Description', description)
]
)
@staticmethod
def subscription_certificate_to_xml(public_key, thumbprint, data):
return AzureXmlSerializer.doc_from_data(
'SubscriptionCertificate',
[
('SubscriptionCertificatePublicKey', public_key),
('SubscriptionCertificateThumbprint', thumbprint),
('SubscriptionCertificateData', data)
]
)
@staticmethod
def os_image_to_xml(label, media_link, name, os):
return AzureXmlSerializer.doc_from_data(
'OSImage',
[
('Label', label),
('MediaLink', media_link),
('Name', name),
('OS', os)
]
)
@staticmethod
def data_virtual_hard_disk_to_xml(host_caching,
disk_label,
disk_name,
lun,
logical_disk_size_in_gb,
media_link,
source_media_link):
return AzureXmlSerializer.doc_from_data(
'DataVirtualHardDisk',
[
('HostCaching', host_caching),
('DiskLabel', disk_label),
('DiskName', disk_name),
('Lun', lun),
('LogicalDiskSizeInGB', logical_disk_size_in_gb),
('MediaLink', media_link),
('SourceMediaLink', source_media_link)
]
)
@staticmethod
def disk_to_xml(has_operating_system, label, media_link, name, os):
return AzureXmlSerializer.doc_from_data(
'Disk',
[
('HasOperatingSystem', has_operating_system, _lower),
('Label', label),
('MediaLink', media_link),
('Name', name),
('OS', os)
]
)
@staticmethod
def restart_role_operation_to_xml():
xml = ET.Element("OperationType")
xml.text = "RestartRoleOperation"
doc = AzureXmlSerializer.doc_from_xml(
'RestartRoleOperation',
xml
)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def shutdown_role_operation_to_xml():
xml = ET.Element("OperationType")
xml.text = "ShutdownRoleOperation"
doc = AzureXmlSerializer.doc_from_xml(
'ShutdownRoleOperation',
xml
)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def start_role_operation_to_xml():
xml = ET.Element("OperationType")
xml.text = "StartRoleOperation"
doc = AzureXmlSerializer.doc_from_xml(
'StartRoleOperation',
xml
)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def windows_configuration_to_xml(configuration, xml):
AzureXmlSerializer.data_to_xml(
[('ConfigurationSetType', configuration.configuration_set_type)],
xml
)
AzureXmlSerializer.data_to_xml(
[('ComputerName', configuration.computer_name)],
xml
)
AzureXmlSerializer.data_to_xml(
[('AdminPassword', configuration.admin_password)],
xml
)
AzureXmlSerializer.data_to_xml(
[
(
'ResetPasswordOnFirstLogon',
configuration.reset_password_on_first_logon,
_lower
)
],
xml
)
AzureXmlSerializer.data_to_xml(
[
(
'EnableAutomaticUpdates',
configuration.enable_automatic_updates,
_lower
)
],
xml
)
AzureXmlSerializer.data_to_xml(
[('TimeZone', configuration.time_zone)],
xml
)
if configuration.domain_join is not None:
domain = ET.xml("DomainJoin")
creds = ET.xml("Credentials")
domain.appemnd(creds)
xml.append(domain)
AzureXmlSerializer.data_to_xml(
[('Domain', configuration.domain_join.credentials.domain)],
creds
)
AzureXmlSerializer.data_to_xml(
[
(
'Username',
configuration.domain_join.credentials.username
)
],
creds
)
AzureXmlSerializer.data_to_xml(
[
(
'Password',
configuration.domain_join.credentials.password
)
],
creds
)
AzureXmlSerializer.data_to_xml(
[('JoinDomain', configuration.domain_join.join_domain)],
domain
)
AzureXmlSerializer.data_to_xml(
[
(
'MachineObjectOU',
configuration.domain_join.machine_object_ou
)
],
domain
)
if configuration.stored_certificate_settings is not None:
cert_settings = ET.Element("StoredCertificateSettings")
xml.append(cert_settings)
for cert in configuration.stored_certificate_settings:
cert_setting = ET.Element("CertificateSetting")
cert_settings.append(cert_setting)
cert_setting.append(AzureXmlSerializer.data_to_xml(
[('StoreLocation', cert.store_location)])
)
AzureXmlSerializer.data_to_xml(
[('StoreName', cert.store_name)],
cert_setting
)
AzureXmlSerializer.data_to_xml(
[('Thumbprint', cert.thumbprint)],
cert_setting
)
AzureXmlSerializer.data_to_xml(
[('AdminUsername', configuration.admin_user_name)],
xml
)
return xml
@staticmethod
def linux_configuration_to_xml(configuration, xml):
AzureXmlSerializer.data_to_xml(
[('ConfigurationSetType', configuration.configuration_set_type)],
xml
)
AzureXmlSerializer.data_to_xml(
[('HostName', configuration.host_name)],
xml
)
AzureXmlSerializer.data_to_xml(
[('UserName', configuration.user_name)],
xml
)
AzureXmlSerializer.data_to_xml(
[('UserPassword', configuration.user_password)],
xml
)
AzureXmlSerializer.data_to_xml(
[
(
'DisableSshPasswordAuthentication',
configuration.disable_ssh_password_authentication,
_lower
)
],
xml
)
if configuration.ssh is not None:
ssh = ET.Element("SSH")
pkeys = ET.Element("PublicKeys")
kpairs = ET.Element("KeyPairs")
ssh.append(pkeys)
ssh.append(kpairs)
xml.append(ssh)
for key in configuration.ssh.public_keys:
pkey = ET.Element("PublicKey")
pkeys.append(pkey)
AzureXmlSerializer.data_to_xml(
[('Fingerprint', key.fingerprint)],
pkey
)
AzureXmlSerializer.data_to_xml([('Path', key.path)], pkey)
for key in configuration.ssh.key_pairs:
kpair = ET.Element("KeyPair")
kpairs.append(kpair)
AzureXmlSerializer.data_to_xml(
[('Fingerprint', key.fingerprint)],
kpair
)
AzureXmlSerializer.data_to_xml([('Path', key.path)], kpair)
if configuration.custom_data is not None:
AzureXmlSerializer.data_to_xml(
[('CustomData', configuration.custom_data)],
xml
)
return xml
@staticmethod
def network_configuration_to_xml(configuration, xml):
AzureXmlSerializer.data_to_xml(
[('ConfigurationSetType', configuration.configuration_set_type)],
xml
)
input_endpoints = ET.Element("InputEndpoints")
xml.append(input_endpoints)
for endpoint in configuration.input_endpoints:
input_endpoint = ET.Element("InputEndpoint")
input_endpoints.append(input_endpoint)
AzureXmlSerializer.data_to_xml(
[
(
'LoadBalancedEndpointSetName',
endpoint.load_balanced_endpoint_set_name
)
],
input_endpoint
)
AzureXmlSerializer.data_to_xml(
[('LocalPort', endpoint.local_port)],
input_endpoint
)
AzureXmlSerializer.data_to_xml(
[('Name', endpoint.name)],
input_endpoint
)
AzureXmlSerializer.data_to_xml(
[('Port', endpoint.port)],
input_endpoint
)
if (endpoint.load_balancer_probe.path or
endpoint.load_balancer_probe.port or
endpoint.load_balancer_probe.protocol):
load_balancer_probe = ET.Element("LoadBalancerProbe")
input_endpoint.append(load_balancer_probe)
AzureXmlSerializer.data_to_xml(
[('Path', endpoint.load_balancer_probe.path)],
load_balancer_probe
)
AzureXmlSerializer.data_to_xml(
[('Port', endpoint.load_balancer_probe.port)],
load_balancer_probe
)
AzureXmlSerializer.data_to_xml(
[('Protocol', endpoint.load_balancer_probe.protocol)],
load_balancer_probe
)
AzureXmlSerializer.data_to_xml(
[('Protocol', endpoint.protocol)],
input_endpoint
)
AzureXmlSerializer.data_to_xml(
[
(
'EnableDirectServerReturn',
endpoint.enable_direct_server_return,
_lower
)
],
input_endpoint
)
subnet_names = ET.Element("SubnetNames")
xml.append(subnet_names)
for name in configuration.subnet_names:
AzureXmlSerializer.data_to_xml(
[('SubnetName', name)],
subnet_names
)
return xml
@staticmethod
def role_to_xml(availability_set_name,
data_virtual_hard_disks,
network_configuration_set,
os_virtual_hard_disk,
vm_image_name,
role_name,
role_size,
role_type,
system_configuration_set,
xml):
AzureXmlSerializer.data_to_xml([('RoleName', role_name)], xml)
AzureXmlSerializer.data_to_xml([('RoleType', role_type)], xml)
config_sets = ET.Element("ConfigurationSets")
xml.append(config_sets)
if system_configuration_set is not None:
config_set = ET.Element("ConfigurationSet")
config_sets.append(config_set)
if isinstance(system_configuration_set, WindowsConfigurationSet):
AzureXmlSerializer.windows_configuration_to_xml(
system_configuration_set,
config_set
)
elif isinstance(system_configuration_set, LinuxConfigurationSet):
AzureXmlSerializer.linux_configuration_to_xml(
system_configuration_set,
config_set
)
if network_configuration_set is not None:
config_set = ET.Element("ConfigurationSet")
config_sets.append(config_set)
AzureXmlSerializer.network_configuration_to_xml(
network_configuration_set,
config_set
)
if availability_set_name is not None:
AzureXmlSerializer.data_to_xml(
[('AvailabilitySetName', availability_set_name)],
xml
)
if data_virtual_hard_disks is not None:
vhds = ET.Element("DataVirtualHardDisks")
xml.append(vhds)
for hd in data_virtual_hard_disks:
vhd = ET.Element("DataVirtualHardDisk")
vhds.append(vhd)
AzureXmlSerializer.data_to_xml(
[('HostCaching', hd.host_caching)],
vhd
)
AzureXmlSerializer.data_to_xml(
[('DiskLabel', hd.disk_label)],
vhd
)
AzureXmlSerializer.data_to_xml(
[('DiskName', hd.disk_name)],
vhd
)
AzureXmlSerializer.data_to_xml(
[('Lun', hd.lun)],
vhd
)
AzureXmlSerializer.data_to_xml(
[('LogicalDiskSizeInGB', hd.logical_disk_size_in_gb)],
vhd
)
AzureXmlSerializer.data_to_xml(
[('MediaLink', hd.media_link)],
vhd
)
if os_virtual_hard_disk is not None:
hd = ET.Element("OSVirtualHardDisk")
xml.append(hd)
AzureXmlSerializer.data_to_xml(
[('HostCaching', os_virtual_hard_disk.host_caching)],
hd
)
AzureXmlSerializer.data_to_xml(
[('DiskLabel', os_virtual_hard_disk.disk_label)],
hd
)
AzureXmlSerializer.data_to_xml(
[('DiskName', os_virtual_hard_disk.disk_name)],
hd
)
AzureXmlSerializer.data_to_xml(
[('MediaLink', os_virtual_hard_disk.media_link)],
hd
)
AzureXmlSerializer.data_to_xml(
[('SourceImageName', os_virtual_hard_disk.source_image_name)],
hd
)
if vm_image_name is not None:
AzureXmlSerializer.data_to_xml(
[('VMImageName', vm_image_name)],
xml
)
if role_size is not None:
AzureXmlSerializer.data_to_xml([('RoleSize', role_size)], xml)
return xml
@staticmethod
def add_role_to_xml(role_name,
system_configuration_set,
os_virtual_hard_disk,
role_type,
network_configuration_set,
availability_set_name,
data_virtual_hard_disks,
vm_image_name,
role_size):
doc = AzureXmlSerializer.doc_from_xml('PersistentVMRole')
xml = AzureXmlSerializer.role_to_xml(
availability_set_name,
data_virtual_hard_disks,
network_configuration_set,
os_virtual_hard_disk,
vm_image_name,
role_name,
role_size,
role_type,
system_configuration_set,
doc
)
result = ensure_string(ET.tostring(xml, encoding='utf-8'))
return result
@staticmethod
def update_role_to_xml(role_name,
os_virtual_hard_disk,
role_type,
network_configuration_set,
availability_set_name,
data_virtual_hard_disks,
vm_image_name,
role_size):
doc = AzureXmlSerializer.doc_from_xml('PersistentVMRole')
AzureXmlSerializer.role_to_xml(
availability_set_name,
data_virtual_hard_disks,
network_configuration_set,
os_virtual_hard_disk,
vm_image_name,
role_name,
role_size,
role_type,
None,
doc
)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def capture_role_to_xml(post_capture_action,
target_image_name,
target_image_label,
provisioning_configuration):
xml = AzureXmlSerializer.data_to_xml(
[('OperationType', 'CaptureRoleOperation')]
)
AzureXmlSerializer.data_to_xml(
[('PostCaptureAction', post_capture_action)],
xml
)
if provisioning_configuration is not None:
provisioning_config = ET.Element("ProvisioningConfiguration")
xml.append(provisioning_config)
if isinstance(provisioning_configuration, WindowsConfigurationSet):
AzureXmlSerializer.windows_configuration_to_xml(
provisioning_configuration,
provisioning_config
)
elif isinstance(provisioning_configuration, LinuxConfigurationSet):
AzureXmlSerializer.linux_configuration_to_xml(
provisioning_configuration,
provisioning_config
)
AzureXmlSerializer.data_to_xml(
[('TargetImageLabel', target_image_label)],
xml
)
AzureXmlSerializer.data_to_xml(
[('TargetImageName', target_image_name)],
xml
)
doc = AzureXmlSerializer.doc_from_xml('CaptureRoleOperation', xml)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def virtual_machine_deployment_to_xml(deployment_name,
deployment_slot,
label,
role_name,
system_configuration_set,
os_virtual_hard_disk,
role_type,
network_configuration_set,
availability_set_name,
data_virtual_hard_disks,
role_size,
virtual_network_name,
vm_image_name):
doc = AzureXmlSerializer.doc_from_xml('Deployment')
AzureXmlSerializer.data_to_xml([('Name', deployment_name)], doc)
AzureXmlSerializer.data_to_xml(
[('DeploymentSlot', deployment_slot)],
doc
)
AzureXmlSerializer.data_to_xml([('Label', label)], doc)
role_list = ET.Element("RoleList")
role = ET.Element("Role")
role_list.append(role)
doc.append(role_list)
AzureXmlSerializer.role_to_xml(
availability_set_name,
data_virtual_hard_disks,
network_configuration_set,
os_virtual_hard_disk,
vm_image_name,
role_name,
role_size,
role_type,
system_configuration_set,
role
)
if virtual_network_name is not None:
doc.append(
AzureXmlSerializer.data_to_xml(
[('VirtualNetworkName', virtual_network_name)]
)
)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def data_to_xml(data, xml=None):
"""
Creates an xml fragment from the specified data.
data: Array of tuples, where first: xml element name
second: xml element text
third: conversion function
"""
for element in data:
name = element[0]
val = element[1]
if len(element) > 2:
converter = element[2]
else:
converter = None
if val is not None:
if converter is not None:
text = _str(converter(_str(val)))
else:
text = _str(val)
entry = ET.Element(name)
entry.text = text
if xml is not None:
xml.append(entry)
else:
return entry
return xml
@staticmethod
def doc_from_xml(document_element_name, inner_xml=None):
"""
Wraps the specified xml in an xml root element with default azure
namespaces
"""
# Note: Namespaces don't work consistency in Python 2 and 3.
"""
nsmap = {
None: "http://www.w3.org/2001/XMLSchema-instance",
"i": "http://www.w3.org/2001/XMLSchema-instance"
}
xml.attrib["xmlns:i"] = "http://www.w3.org/2001/XMLSchema-instance"
xml.attrib["xmlns"] = "http://schemas.microsoft.com/windowsazure"
"""
xml = ET.Element(document_element_name)
xml.set("xmlns", "http://schemas.microsoft.com/windowsazure")
if inner_xml is not None:
xml.append(inner_xml)
return xml
@staticmethod
def doc_from_data(document_element_name, data, extended_properties=None):
doc = AzureXmlSerializer.doc_from_xml(document_element_name)
AzureXmlSerializer.data_to_xml(data, doc)
if extended_properties is not None:
doc.append(
AzureXmlSerializer.extended_properties_dict_to_xml_fragment(
extended_properties
)
)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def extended_properties_dict_to_xml_fragment(extended_properties):
if extended_properties is not None and len(extended_properties) > 0:
xml = ET.Element("ExtendedProperties")
for key, val in extended_properties.items():
extended_property = ET.Element("ExtendedProperty")
name = ET.Element("Name")
name.text = _str(key)
value = ET.Element("Value")
value.text = _str(val)
extended_property.append(name)
extended_property.append(value)
xml.append(extended_property)
return xml
"""
Data Classes
Borrowed from the Azure SDK for Python.
"""
class WindowsAzureData(object):
"""
This is the base of data class.
It is only used to check whether it is instance or not.
"""
pass
class WindowsAzureDataTypedList(WindowsAzureData):
list_type = None
xml_element_name = None
def __init__(self):
self.items = _ListOf(self.list_type, self.xml_element_name)
def __iter__(self):
return iter(self.items)
def __len__(self):
return len(self.items)
def __getitem__(self, index):
return self.items[index]
class OSVirtualHardDisk(WindowsAzureData):
def __init__(self, source_image_name=None, media_link=None,
host_caching=None, disk_label=None, disk_name=None):
self.source_image_name = source_image_name
self.media_link = media_link
self.host_caching = host_caching
self.disk_label = disk_label
self.disk_name = disk_name
self.os = '' # undocumented, not used when adding a role
class LinuxConfigurationSet(WindowsAzureData):
def __init__(self,
host_name=None,
user_name=None,
user_password=None,
disable_ssh_password_authentication=None,
custom_data=None):
self.configuration_set_type = 'LinuxProvisioningConfiguration'
self.host_name = host_name
self.user_name = user_name
self.user_password = user_password
self.disable_ssh_password_authentication = \
disable_ssh_password_authentication
self.ssh = SSH()
self.custom_data = custom_data
class WindowsConfigurationSet(WindowsAzureData):
def __init__(self,
computer_name=None,
admin_password=None,
reset_password_on_first_logon=None,
enable_automatic_updates=None,
time_zone=None,
admin_user_name=None):
self.configuration_set_type = 'WindowsProvisioningConfiguration'
self.computer_name = computer_name
self.admin_password = admin_password
self.reset_password_on_first_logon = reset_password_on_first_logon
self.enable_automatic_updates = enable_automatic_updates
self.time_zone = time_zone
self.admin_user_name = admin_user_name
self.domain_join = DomainJoin()
self.stored_certificate_settings = StoredCertificateSettings()
class DomainJoin(WindowsAzureData):
def __init__(self):
self.credentials = Credentials()
self.join_domain = ''
self.machine_object_ou = ''
class Credentials(WindowsAzureData):
def __init__(self):
self.domain = ''
self.username = ''
self.password = ''
class CertificateSetting(WindowsAzureData):
"""
Initializes a certificate setting.
thumbprint:
Specifies the thumbprint of the certificate to be provisioned. The
thumbprint must specify an existing service certificate.
store_name:
Specifies the name of the certificate store from which retrieve
certificate.
store_location:
Specifies the target certificate store location on the virtual machine
The only supported value is LocalMachine.
"""
def __init__(self, thumbprint='', store_name='', store_location=''):
self.thumbprint = thumbprint
self.store_name = store_name
self.store_location = store_location
class StoredCertificateSettings(WindowsAzureDataTypedList):
list_type = CertificateSetting
_repr_attributes = [
'items'
]
class SSH(WindowsAzureData):
def __init__(self):
self.public_keys = PublicKeys()
self.key_pairs = KeyPairs()
class PublicKey(WindowsAzureData):
def __init__(self, fingerprint='', path=''):
self.fingerprint = fingerprint
self.path = path
class PublicKeys(WindowsAzureDataTypedList):
list_type = PublicKey
_repr_attributes = [
'items'
]
class AzureKeyPair(WindowsAzureData):
def __init__(self, fingerprint='', path=''):
self.fingerprint = fingerprint
self.path = path
class KeyPairs(WindowsAzureDataTypedList):
list_type = AzureKeyPair
_repr_attributes = [
'items'
]
class LoadBalancerProbe(WindowsAzureData):
def __init__(self):
self.path = ''
self.port = ''
self.protocol = ''
class ConfigurationSet(WindowsAzureData):
def __init__(self):
self.configuration_set_type = ''
self.role_type = ''
self.input_endpoints = ConfigurationSetInputEndpoints()
self.subnet_names = ScalarListOf(str, 'SubnetName')
class ConfigurationSets(WindowsAzureDataTypedList):
list_type = ConfigurationSet
_repr_attributes = [
'items'
]
class ConfigurationSetInputEndpoint(WindowsAzureData):
def __init__(self,
name='',
protocol='',
port='',
local_port='',
load_balanced_endpoint_set_name='',
enable_direct_server_return=False):
self.enable_direct_server_return = enable_direct_server_return
self.load_balanced_endpoint_set_name = load_balanced_endpoint_set_name
self.local_port = local_port
self.name = name
self.port = port
self.load_balancer_probe = LoadBalancerProbe()
self.protocol = protocol
class ConfigurationSetInputEndpoints(WindowsAzureDataTypedList):
list_type = ConfigurationSetInputEndpoint
xml_element_name = 'InputEndpoint'
_repr_attributes = [
'items'
]
class Location(WindowsAzureData):
def __init__(self):
self.name = ''
self.display_name = ''
self.available_services = ScalarListOf(str, 'AvailableService')
self.compute_capabilities = ComputeCapability()
class Locations(WindowsAzureDataTypedList):
list_type = Location
_repr_attributes = [
'items'
]
class ComputeCapability(WindowsAzureData):
def __init__(self):
self.virtual_machines_role_sizes = ScalarListOf(str, 'RoleSize')
class VirtualMachinesRoleSizes(WindowsAzureData):
def __init__(self):
self.role_size = ScalarListOf(str, 'RoleSize')
class OSImage(WindowsAzureData):
def __init__(self):
self.affinity_group = ''
self.category = ''
self.location = ''
self.logical_size_in_gb = 0
self.label = ''
self.media_link = ''
self.name = ''
self.os = ''
self.eula = ''
self.description = ''
class Images(WindowsAzureDataTypedList):
list_type = OSImage
_repr_attributes = [
'items'
]
class VMImage(WindowsAzureData):
def __init__(self):
self.name = ''
self.label = ''
self.category = ''
self.os_disk_configuration = OSDiskConfiguration()
self.service_name = ''
self.deployment_name = ''
self.role_name = ''
self.location = ''
self.affinity_group = ''
class VMImages(WindowsAzureDataTypedList):
list_type = VMImage
_repr_attributes = [
'items'
]
class VirtualIP(WindowsAzureData):
def __init__(self):
self.address = ''
self.is_dns_programmed = ''
self.name = ''
class VirtualIPs(WindowsAzureDataTypedList):
list_type = VirtualIP
_repr_attributes = [
'items'
]
class HostedService(WindowsAzureData, ReprMixin):
_repr_attributes = [
'service_name',
'url'
]
def __init__(self):
self.url = ''
self.service_name = ''
self.hosted_service_properties = HostedServiceProperties()
self.deployments = Deployments()
class HostedServices(WindowsAzureDataTypedList, ReprMixin):
list_type = HostedService
_repr_attributes = [
'items'
]
class HostedServiceProperties(WindowsAzureData):
def __init__(self):
self.description = ''
self.location = ''
self.affinity_group = ''
self.label = _Base64String()
self.status = ''
self.date_created = ''
self.date_last_modified = ''
self.extended_properties = _DictOf(
'ExtendedProperty',
'Name',
'Value'
)
class Deployment(WindowsAzureData):
def __init__(self):
self.name = ''
self.deployment_slot = ''
self.private_id = ''
self.status = ''
self.label = _Base64String()
self.url = ''
self.configuration = _Base64String()
self.role_instance_list = RoleInstanceList()
self.upgrade_status = UpgradeStatus()
self.upgrade_domain_count = ''
self.role_list = RoleList()
self.sdk_version = ''
self.input_endpoint_list = InputEndpoints()
self.locked = False
self.rollback_allowed = False
self.persistent_vm_downtime_info = PersistentVMDowntimeInfo()
self.created_time = ''
self.last_modified_time = ''
self.extended_properties = _DictOf(
'ExtendedProperty',
'Name',
'Value'
)
self.virtual_ips = VirtualIPs()
class Deployments(WindowsAzureDataTypedList):
list_type = Deployment
_repr_attributes = [
'items'
]
class UpgradeStatus(WindowsAzureData):
def __init__(self):
self.upgrade_type = ''
self.current_upgrade_domain_state = ''
self.current_upgrade_domain = ''
class RoleInstance(WindowsAzureData):
def __init__(self):
self.role_name = ''
self.instance_name = ''
self.instance_status = ''
self.instance_upgrade_domain = 0
self.instance_fault_domain = 0
self.instance_size = ''
self.instance_state_details = ''
self.instance_error_code = ''
self.ip_address = ''
self.instance_endpoints = InstanceEndpoints()
self.power_state = ''
self.fqdn = ''
self.host_name = ''
class RoleInstanceList(WindowsAzureDataTypedList):
list_type = RoleInstance
_repr_attributes = [
'items'
]
class InstanceEndpoint(WindowsAzureData):
def __init__(self):
self.name = ''
self.vip = ''
self.public_port = ''
self.local_port = ''
self.protocol = ''
class InstanceEndpoints(WindowsAzureDataTypedList):
list_type = InstanceEndpoint
_repr_attributes = [
'items'
]
class InputEndpoint(WindowsAzureData):
def __init__(self):
self.role_name = ''
self.vip = ''
self.port = ''
class InputEndpoints(WindowsAzureDataTypedList):
list_type = InputEndpoint
_repr_attributes = [
'items'
]
class Role(WindowsAzureData):
def __init__(self):
self.role_name = ''
self.os_version = ''
class RoleList(WindowsAzureDataTypedList):
list_type = Role
_repr_attributes = [
'items'
]
class PersistentVMDowntimeInfo(WindowsAzureData):
def __init__(self):
self.start_time = ''
self.end_time = ''
self.status = ''
class AsynchronousOperationResult(WindowsAzureData):
def __init__(self, request_id=None):
self.request_id = request_id
class Disk(WindowsAzureData):
def __init__(self):
self.affinity_group = ''
self.attached_to = AttachedTo()
self.has_operating_system = ''
self.is_corrupted = ''
self.location = ''
self.logical_disk_size_in_gb = 0
self.label = ''
self.media_link = ''
self.name = ''
self.os = ''
self.source_image_name = ''
class Disks(WindowsAzureDataTypedList):
list_type = Disk
_repr_attributes = [
'items'
]
class AttachedTo(WindowsAzureData):
def __init__(self):
self.hosted_service_name = ''
self.deployment_name = ''
self.role_name = ''
class OperationError(WindowsAzureData):
def __init__(self):
self.code = ''
self.message = ''
class Operation(WindowsAzureData):
def __init__(self):
self.id = ''
self.status = ''
self.http_status_code = ''
self.error = OperationError()
class OperatingSystem(WindowsAzureData):
def __init__(self):
self.version = ''
self.label = _Base64String()
self.is_default = True
self.is_active = True
self.family = 0
self.family_label = _Base64String()
class OSDiskConfiguration(WindowsAzureData):
def __init__(self):
self.name = ''
self.host_caching = ''
self.os_state = ''
self.os = ''
self.media_link = ''
self.logical_disk_size_in_gb = 0
class OperatingSystems(WindowsAzureDataTypedList):
list_type = OperatingSystem
_repr_attributes = [
'items'
]
class OperatingSystemFamily(WindowsAzureData):
def __init__(self):
self.name = ''
self.label = _Base64String()
self.operating_systems = OperatingSystems()
class OperatingSystemFamilies(WindowsAzureDataTypedList):
list_type = OperatingSystemFamily
_repr_attributes = [
'items'
]
class Subscription(WindowsAzureData):
def __init__(self):
self.subscription_id = ''
self.subscription_name = ''
self.subscription_status = ''
self.account_admin_live_email_id = ''
self.service_admin_live_email_id = ''
self.max_core_count = 0
self.max_storage_accounts = 0
self.max_hosted_services = 0
self.current_core_count = 0
self.current_hosted_services = 0
self.current_storage_accounts = 0
self.max_virtual_network_sites = 0
self.max_local_network_sites = 0
self.max_dns_servers = 0
class AvailabilityResponse(WindowsAzureData):
def __init__(self):
self.result = False
class SubscriptionCertificate(WindowsAzureData):
def __init__(self):
self.subscription_certificate_public_key = ''
self.subscription_certificate_thumbprint = ''
self.subscription_certificate_data = ''
self.created = ''
class SubscriptionCertificates(WindowsAzureDataTypedList):
list_type = SubscriptionCertificate
_repr_attributes = [
'items'
]
class AzureHTTPRequest(object):
def __init__(self):
self.host = ''
self.method = ''
self.path = ''
self.query = [] # list of (name, value)
self.headers = {} # list of (header name, header value)
self.body = ''
self.protocol_override = None
class AzureHTTPResponse(object):
def __init__(self, status, message, headers, body):
self.status = status
self.message = message
self.headers = headers
self.body = body
"""
Helper classes and functions.
"""
class _Base64String(str):
pass
class _ListOf(list):
"""
A list which carries with it the type that's expected to go in it.
Used for deserializaion and construction of the lists
"""
def __init__(self, list_type, xml_element_name=None):
self.list_type = list_type
if xml_element_name is None:
self.xml_element_name = list_type.__name__
else:
self.xml_element_name = xml_element_name
super(_ListOf, self).__init__()
class ScalarListOf(list):
"""
A list of scalar types which carries with it the type that's
expected to go in it along with its xml element name.
Used for deserializaion and construction of the lists
"""
def __init__(self, list_type, xml_element_name):
self.list_type = list_type
self.xml_element_name = xml_element_name
super(ScalarListOf, self).__init__()
class _DictOf(dict):
"""
A dict which carries with it the xml element names for key,val.
Used for deserializaion and construction of the lists
"""
def __init__(self,
pair_xml_element_name,
key_xml_element_name,
value_xml_element_name):
self.pair_xml_element_name = pair_xml_element_name
self.key_xml_element_name = key_xml_element_name
self.value_xml_element_name = value_xml_element_name
super(_DictOf, self).__init__()
class AzureNodeLocation(NodeLocation):
# we can also have something in here for available services which is an
# extra to the API with Azure
def __init__(self, id, name, country, driver, available_services,
virtual_machine_role_sizes):
super(AzureNodeLocation, self).__init__(id, name, country, driver)
self.available_services = available_services
self.virtual_machine_role_sizes = virtual_machine_role_sizes
def __repr__(self):
return (
(
'<AzureNodeLocation: id=%s, name=%s, country=%s, '
'driver=%s services=%s virtualMachineRoleSizes=%s >'
) % (
self.id,
self.name,
self.country,
self.driver.name,
','.join(self.available_services),
','.join(self.virtual_machine_role_sizes)
)
)
|
Scalr/libcloud
|
libcloud/compute/drivers/azure.py
|
Python
|
apache-2.0
| 113,059
|
[
"VisIt"
] |
35dd7dc1f12e283d11056e8abe9d1db1addd364f0a6356ea77ccb5d13921db4c
|
#!/usr/local/www/vamps/software/python/bin/python
# -*- coding: utf-8 -*-
#
# See README file for details. Send your questions / remarks to "A. Murat Eren", <meren / mbl.edu>
import sys
try:
import Levenshtein
except:
print '''
You need Levenshtein module installed to run this software.
Here is a fast implementation of Levenshtein distance for Python:
http://code.google.com/p/pylevenshtein/
'''
sys.exit(-1)
class Settings:
"""
Settings class with templates for different regions of 16S rRNA gene (see __main__ for client side usage).
* reversed: if the sequence is reverse sequenced.
* start : nucleotide position to start the search (if reversed is true, it is -(start),
means not from the beginning but from the end of the sequence)
* freedom : the length of the search space from both directions.
* length : expected length of the anchor sequence
"""
def __init__(self, region = None):
self.general_settings = {
'v6v4-361': {'reversed': True, 'start' : 361, 'freedom' : 50, 'length' : 13},
# previously determiend anchor consensus: G[T,G]AG.[A,G]GT[A,G][A,G]AAT
'v6v4-4xx': {'reversed': True, 'start' : 480, 'freedom' : 60, 'length' : 11},
# previously determiend anchor consensus: G[T,G]AG.[A,G]GT[A,G][A,G]AAT
'v3v5-440': {'reversed': False, 'start' : 440, 'freedom' : 30, 'length' : 13},
# previously determined anchor consensus: GGATTAGA[T,G]ACCC
'v3v5-370': {'reversed': False, 'start' : 370, 'freedom' : 50, 'length' : 12},
# previously determined anchor consensus: [A,T,C][A,T,G]GCGAA[A,G]GC[A,G][A,C,G]
}
if region:
self.region_settings = DictDotNotationWrapper(self.general_settings[region])
def available_regions(self):
return self.general_settings.keys()
class DictDotNotationWrapper(object):
"""
This is just a wrapper class for syntactic convenience (if your dictionary instance is from this class,
basically you can reach items in the dictionary with dot notation) (so you can ignore this one if
you're trying to understand what this program does).
"""
def __init__(self, dictionary):
self.dictionary = dictionary
def __getattr__(self, key):
return self.dictionary.get(key, None)
def __setattr__(self, key, val):
super.__setattr__(self, key, val)
def generate_tuples(start, freedom, length, direction = -1, step = 0, list_of_tuples = [], reversed_read = False):
"""
This recursive function crates a list of tuples that are expanding from 'start' in both
directions until they reach the borders of 'freedom' that sent as a parameter. 'length' is
the distance from the start. if you call generate_tuples(10, 5, 3), you would get this:
[(10, 14), (11, 15), (9, 13), (12, 16), (8, 12), (13, 17), (7, 11), (14, 18), (6, 10), (15, 19), (5, 9)]
maybe this would give a better idea:
(10, 14)
(9, 13) (11, 15)
(8, 12) (12, 16)
(7, 11) (13, 17)
(6, 10) (14, 18)
(5, 9) (15, 19)
so, if you know where it is likely to find the pattern, you can start from there, and expand search to both
directions step by step in a somewhat optimized way.
"""
if reversed_read:
list_of_tuples.append((-(start), -(start - length)))
else:
list_of_tuples.append((start, start + length))
if step == freedom * 2:
return list_of_tuples
direction *= -1
step += 1
return generate_tuples(start + direction * step, freedom, length, direction, step, list_of_tuples, reversed_read = reversed_read)
def trim_sequence(sequence, location, s):
if s.reversed:
return sequence[location[0]:] # this includes the anchor in trimmed sequence..
else:
return sequence[:location[1]] # same thing here for the reversed == False
def find_best_distance(sequence, valid_anchor_sequences, max_divergence, list_of_tuples):
best_loc_for_every_anchor = []
for anchor in valid_anchor_sequences:
non_perfect_matches = []
for tpl in list_of_tuples:
r = Levenshtein.ratio(sequence[tpl[0]:tpl[1]], anchor)
if r == 1:
# r is 1, means that we found the perfect match to one of the valid anchors.
# we'll return it imediately.
return (anchor, tpl)
elif r >= max_divergence:
# r is not 1, but it is larger than the 'max_divergence' accepted.
# we'll put it in a list and keep testing.
non_perfect_matches.append((r, tpl))
if len(non_perfect_matches):
best_loc_for_every_anchor.append((anchor, sorted(non_perfect_matches, reverse=True)[0]))
# potential FIXME: OK. at this point, 'best_loc_for_every_anchor' is such a list that every member of this
# list is an anchor from 'valid_anchor_sequences' and the location within the 'sequence' where the sequence
# is most similar to this anchor. The data structure for every item in this list looks like this:
#
# ('GGAGCGGTGGAAT', (0.7692307692307693, (-344, -331)))
#
# it reads "the best distance of GGAGCGGTGGAAT to every oligonucleotide in the given sequence was at sequence[-344:-331]"
#
# but it is important to remember that this 'best' is coming from a sorted list. so, there may be equally good ones that left out
# and maybe some of them were better candidates. this issue becomes even more of a challenge when we pick 'best_anchor' in the next
# line by sorting this list. just by chance, from two equaly good candidates, the one that could be more prefferable in terms of the
# trimming location in the sequence might be beaten by another one during the sorting. at some point we might want to plug
# in a probabilistic logic here to pick competing locations (maybe equally distant options could be ranked based on a
# pre-computed mixture of gaussian curves for a given region to pick the most appealing location to use as an anchor):
best_loc_for_every_anchor_sorted = sorted(best_loc_for_every_anchor, key = lambda k: k[1][0], reverse=True)
if len(best_loc_for_every_anchor_sorted):
best_anchor = best_loc_for_every_anchor_sorted[0]
return (best_anchor[0], best_anchor[1][1])
else:
return (None, None)
def colorize(sequence, location):
Green = lambda s: '\033[30m\033[42m' + s + '' + '\033[0m'
return sequence[0:location[0]] + Green(sequence[location[0]:location[1]]) + sequence[location[1]:]
def main(s):
# define the search space
list_of_tuples = generate_tuples(s.start, s.freedom, s.length, reversed_read = s.reversed)
input = s.input_fasta
while input.next():
anchor, location = find_best_distance(input.seq, s.valid_anchor_sequences, s.max_divergence, list_of_tuples)
if anchor and location:
# lets get that anchor at the beginning of the list..
# this step will make the algorithm get faster as it carries frequently
# used anchors at the beginning of the list
if s.valid_anchor_sequences.index(anchor) != 0:
s.valid_anchor_sequences.insert(0, s.valid_anchor_sequences.pop(s.valid_anchor_sequences.index(anchor)))
# busines time.
trimmed = trim_sequence(input.seq, location, s)
if s.screen:
s.output.write('>' + input.id + '\n')
s.output.write(colorize(input.seq, location) + '\n')
else:
s.output.write('>' + input.id + '\n')
s.output.write(trimmed + '\n')
if input.pos % 100 == 0:
sys.stderr.write('\rTRIMMING: %.2d%% -- pos: %d' % (input.pos * 100 / input.total_seq, input.pos))
sys.stderr.flush()
else:
if s.screen:
s.failed.write('>' + input.id + '\n')
s.failed.write(input.seq + '\n')
if input.pos % 100 == 0:
sys.stderr.write('\rTRIMMING: %.2d%% -- pos: %d' % (input.pos * 100 / input.total_seq, input.pos))
sys.stderr.flush()
else:
s.failed.write('>' + input.id + '\n')
s.failed.write(input.seq + '\n')
print
if __name__ == '__main__':
import argparse
import fastalib as u
parser = argparse.ArgumentParser(description='Fuzzy anchor trimming for 454 Sequences')
parser.add_argument('-i', '--input-fasta', required=True, metavar = 'FASTA_FILE',
help = 'Sequences file to be trimmed in FASTA format')
parser.add_argument('-r', '--region', required=True, metavar = 'REGION',
help = 'Region in the 16S rRNA gene. Available options: %s' %
', '.join(Settings().available_regions()), choices = Settings().available_regions())
parser.add_argument('-a', '--anchor-sequences', required=True, metavar = 'ANCHORS_FILE',
help = 'Input file that contains the list of valid anchor sequences')
parser.add_argument('-o', '--output', help = 'Where trimmed sequences will be written (default: standart output)')
parser.add_argument('-d', '--max-divergence', type=float, default=0.9,
help = 'Maximum Levenshtine distance allowed candidate trimming site from one of the valid\
anchor sequence (default: 0.90). Please see README file in order to get more\
information on maximum divergence.')
args = parser.parse_args()
s = Settings(args.region).region_settings
s.input_fasta = u.SequenceSource(args.input_fasta)
s.valid_anchor_sequences = [sequence.strip() for sequence in open(args.anchor_sequences).readlines()]
s.max_divergence = args.max_divergence
if args.output:
s.screen = False
s.output = open(args.output, 'w')
s.failed = open(args.output + '-FAILED', 'w')
else:
s.screen = True
s.output = sys.stdout
s.failed = sys.stderr
sys.exit(main(s))
|
avoorhis/mbl_sequencing_pipeline
|
pipeline/anchortrimming_mbl.py
|
Python
|
gpl-2.0
| 10,632
|
[
"Gaussian"
] |
f871adf8c38c0e10a76c9ea24b41afebffbd1d11efdcd179e8b4f0253fbebd7d
|
from __future__ import print_function
import warnings
from copy import copy
import numpy as np
import netCDF4 as nc
from .constants import Ha2eV
from .util import create_directory, formatted_array_lines
from .qptanalyzer import QptAnalyzer
from .mpi import MPI, comm, size, rank, master_only, mpi_watch, i_am_master
# =========================================================================== #
__all__ = ['EpcAnalyzer']
class EpcAnalyzer(object):
"""
Main class for analyzing electron-phonon coupling related quantities.
It is intented to analyze the files produced by ABINIT
in a phonon response-function calculation, with one q-point per dataset,
the first q-point being Gamma.
For documentation, see `ElectronPhononCoupling.compute`
"""
verbose = False
wtq = None
broadening = None
temperatures = []
omegase = []
smearing = None
mu = None
qred = None
omega = None
zero_point_renormalization = None
zero_point_broadening = None
temperature_dependent_renormalization = None
temperature_dependent_broadening = None
zero_point_renormalization_modes = None
split_fan_ddw = False
renormalization_is_dynamical = False
broadening_is_dynamical = False
self_energy = None
spectral_function = None
self_energy_T = None
spectral_function_T = None
my_iqpts = [0]
def __init__(self,
# Options
asr=True,
verbose=False,
# Parameters
nqpt=1,
wtq=[1.0],
temp_range=[0,0,1],
omega_range=[0,0,1],
smearing=0.00367,
fermi_level = None,
# File names
rootname='epc.out',
eigk_fname='',
eigq_fnames=list(),
ddb_fnames=list(),
eigr2d_fnames=list(),
eigi2d_fnames=list(),
fan_fnames=list(),
gkk_fnames=list(),
# Double grid
nqpt_fine=1,
wtq_fine=[1.0],
eigq_fine_fnames=list(),
gkk_fine_fnames=list(),
ddb_fine_fnames=list(),
**kwargs):
# Check that the minimum number of files is present
if not eigk_fname:
raise Exception('Must provide a file for eigk_fname')
if not eigr2d_fnames:
raise Exception('Must provide at least one file for eigr2d_fnames')
if not ddb_fnames:
raise Exception('Must provide at least one file for ddb_fnames')
if len(wtq) != nqpt:
raise Exception("Must provide nqpt weights in the 'wtq' list.")
# Set basic quantities
self.nqpt = nqpt
self.set_weights(wtq)
self.nqpt_fine = nqpt_fine
self.set_weights_fine(wtq_fine)
# Set file names
self.eig0_fname = eigk_fname
self.eigq_fnames = eigq_fnames
self.ddb_fnames = ddb_fnames
self.eigr2d_fnames = eigr2d_fnames
self.eigi2d_fnames = eigi2d_fnames
self.fan_fnames = fan_fnames
self.gkk_fnames = gkk_fnames
self.eigq_fine_fnames = eigq_fine_fnames
self.ddb_fine_fnames = ddb_fine_fnames
self.gkk_fine_fnames = gkk_fine_fnames
# Initialize a single QptAnalyzer for q=0
if self.gkk_fnames:
gkk0 = self.gkk_fnames[0]
elif self.gkk_fine_fnames:
gkk0 = self.gkk_fine_fnames[0]
else:
gkk0 = None
if self.fan_fnames:
fan0 = self.fan_fnames[0]
else:
fan0 = None
self.qptanalyzer = QptAnalyzer(
wtq=self.wtq[0],
eigk_fname=self.eig0_fname,
ddb_fname=self.ddb_fnames[0],
eigr2d0_fname=self.eigr2d_fnames[0],
fan0_fname=fan0,
gkk0_fname=gkk0,
asr=asr,
)
# Read the first DDB and check that it is Gamma
self.check_gamma()
# Read other files at q=0 and broadcast the data
self.read_zero_files()
# Get arrays dimensions
self.nkpt = self.qptanalyzer.eigr2d0.nkpt
self.nband = self.qptanalyzer.eigr2d0.nband
self.natom = self.qptanalyzer.eigr2d0.natom
self.kpts = self.qptanalyzer.eigr2d0.kpt[:,:]
# Set parameters
self.set_temp_range(temp_range)
self.set_omega_range(omega_range)
self.set_smearing(smearing)
self.set_output(rootname)
# Split the workload between workers
# (needed to find the fermi level)
self.distribute_workload()
# Set the fermi level
if fermi_level is None:
self.find_fermi_level()
else:
self.set_fermi_level(fermi_level)
self.verbose = verbose
@property
def nc_output(self):
return str(self.output) + '_EP.nc'
@property
def ren_dat(self):
return str(self.output) + '_REN.dat'
@property
def BRD_dat(self):
return str(self.output) + '_BRD.dat'
@master_only
def check_gamma(self):
self.qptanalyzer.read_nonzero_files()
if not self.qptanalyzer.is_gamma:
raise Exception('The first Q-point is not Gamma.')
@mpi_watch
def read_zero_files(self):
"""Read the q=0 files and broadcast to all mpi workers."""
# Master reads the files
if i_am_master:
self.qptanalyzer.read_zero_files()
# Broadcast
self.qptanalyzer.broadcast_zero_files()
def set_temp_range(self, temp_range=(0, 0, 1)):
"""Set the minimum, maximum and step temperature."""
args = list(temp_range)
assert len(args) == 3
minimum, maximum, step = args
if all([isinstance(i, int) for i in args]):
if (maximum - minimum) % step == 0:
maximum += 1
self.temperatures = np.arange(minimum, maximum, step, dtype=float)
self.ntemp = len(self.temperatures)
self.qptanalyzer.temperatures = self.temperatures
def check_temperatures(self):
if not len(self.temperatures):
warnings.warn('Temperatures were not set. '
'Please specify it with the "temp_range" '
'keyword argument ')
def set_omega_range(self, omega_range=(0, 0, 1)):
"""Set the minimum, makimum and step frequency for the self-energy."""
self.omegase = np.arange(*omega_range, dtype=float)
self.nomegase = len(self.omegase)
self.qptanalyzer.omegase = self.omegase
def set_smearing(self, smearing_Ha):
"""Set the smearing, in Hartree."""
self.smearing = smearing_Ha
self.qptanalyzer.smearing = smearing_Ha
def set_output(self, root):
"""Set the root for output names."""
self.output = root
def set_weights(self, wtq, normalize=True):
"""Set the q-points weights."""
if normalize:
self.wtq = np.array(wtq) / sum(wtq)
else:
self.wtq = np.array(wtq)
def set_weights_fine(self, wtq, normalize=True):
"""Set the q-points weights."""
if normalize:
self.wtq_fine = np.array(wtq) / sum(wtq)
else:
self.wtq_fine = np.array(wtq)
def set_fermi_level(self, mu):
"""Set the Fermi level, in Hartree."""
self.mu = mu
self.qptanalyzer.mu = mu
@mpi_watch
def find_fermi_level(self):
"""
Find the Fermi level by gathering information from all workers
and broadcast the result.
"""
all_max_val = self.gather_qpt_function('get_max_val')
all_min_cond = self.gather_qpt_function('get_min_cond')
if i_am_master:
max_val = np.max(all_max_val)
min_cond = np.min(all_min_cond)
mu = (max_val + min_cond) / 2.0
mu = np.array(mu, dtype=np.float64)
else:
mu = np.empty(1, dtype=np.float64)
comm.Bcast([mu, MPI.DOUBLE])
self.set_fermi_level(mu)
def set_iqpt(self, iqpt, fine=False):
"""
Give the qptanalyzer the weight and files corresponding
to one particular qpoint and read the files.
"""
if fine:
self.set_iqpt_fine(iqpt)
else:
self.set_iqpt_coarse(iqpt)
def set_iqpt_coarse(self, iqpt):
"""
Give the qptanalyzer the weight and files corresponding
to one particular qpoint and read the files.
"""
self.qptanalyzer.wtq = self.wtq[iqpt]
self.qptanalyzer.ddb.fname = self.ddb_fnames[iqpt]
if self.eigr2d_fnames:
self.qptanalyzer.eigr2d.fname = self.eigr2d_fnames[iqpt]
if self.eigq_fnames:
self.qptanalyzer.eigq.fname = self.eigq_fnames[iqpt]
if self.fan_fnames:
self.qptanalyzer.fan.fname = self.fan_fnames[iqpt]
if self.gkk_fnames:
self.qptanalyzer.gkk.fname = self.gkk_fnames[iqpt]
if self.eigi2d_fnames:
self.qptanalyzer.eigi2d.fname = self.eigi2d_fnames[iqpt]
self.qptanalyzer.read_nonzero_files()
def set_iqpt_fine(self, iqpt):
"""
Give the qptanalyzer the weight and files corresponding
to one particular qpoint and read the files.
"""
self.qptanalyzer.wtq = self.wtq_fine[iqpt]
self.qptanalyzer.ddb.fname = self.ddb_fine_fnames[iqpt]
self.qptanalyzer.eigq.fname = self.eigq_fine_fnames[iqpt]
self.qptanalyzer.gkk.fname = self.gkk_fine_fnames[iqpt]
self.qptanalyzer.read_nonzero_files()
def set_ddb(self, iqpt):
"""
Give the qptanalyzer the weight and ddb file corresponding
to one particular qpoint, then read and diagonalize the dynamical matrix.
"""
self.qptanalyzer.wtq = self.wtq[iqpt]
self.qptanalyzer.ddb.fname = self.ddb_fnames[iqpt]
self.qptanalyzer.read_ddb()
@mpi_watch
def distribute_workload(self, fine=False):
"""Distribute the q-points indicies to be treated by each worker."""
if fine:
self.my_iqpts = self.get_iqpts(self.nqpt_fine)
else:
self.my_iqpts = self.get_iqpts(self.nqpt)
def get_iqpts(self, nqpt):
"""Distribute the q-points indicies to be treated by each worker."""
max_nqpt_per_worker = (
nqpt // size + min(nqpt % size, 1))
n_active_workers = (
nqpt // max_nqpt_per_worker
+ min(nqpt % max_nqpt_per_worker, 1))
my_iqpts = list()
for i in range(max_nqpt_per_worker):
iqpt = rank * max_nqpt_per_worker + i
if iqpt >= nqpt:
break
my_iqpts.append(iqpt)
return my_iqpts
@property
def active_worker(self):
return bool(self.my_iqpts)
def get_active_ranks(self):
"""Get the ranks of all active workers."""
max_nqpt_per_worker = (self.nqpt // size
+ min(self.nqpt % size, 1))
n_active_workers = (self.nqpt // max_nqpt_per_worker
+ min(self.nqpt % max_nqpt_per_worker, 1))
return np.arange(n_active_workers)
@mpi_watch
def sum_qpt_function(self, func_name, fine=False, *args, **kwargs):
"""Call a certain function or each q-points and sum the result."""
partial_sum = self.sum_qpt_function_me(func_name, fine=fine,
*args, **kwargs)
if i_am_master:
total = partial_sum
active_ranks = self.get_active_ranks()
if len(active_ranks) > 1:
for irank in active_ranks[1:]:
partial_sum = comm.recv(source=irank, tag=irank)
total += partial_sum
elif self.active_worker:
comm.send(partial_sum, dest=0, tag=rank)
return
else:
return
# Now I could broadcast the total result to all workers
# but right now there is no need to.
return total
def sum_qpt_function_me(self, func_name, fine=False, *args, **kwargs):
"""
Call a certain function or each q-points of this worker
and sum the result.
"""
if not self.active_worker:
return None
iqpt = self.my_iqpts[0]
self.set_iqpt(iqpt, fine=fine)
if self.verbose:
print("Q-point: {} with wtq = {} and reduced coord. {}".format(
iqpt, self.qptanalyzer.wtq, self.qptanalyzer.qred))
q0 = getattr(self.qptanalyzer, func_name)(*args, **kwargs)
total = copy(q0)
if len(self.my_iqpts) == 1:
return total
for iqpt in self.my_iqpts[1:]:
self.set_iqpt(iqpt, fine=fine)
if self.verbose:
print("Q-point: {} with wtq = {} and reduced coord. {}".format(
iqpt, self.qptanalyzer.wtq, self.qptanalyzer.qred))
qpt = getattr(self.qptanalyzer, func_name)(*args, **kwargs)
total += qpt
return total
@mpi_watch
def gather_qpt_function(self, func_name, *args, **kwargs):
"""Call a certain function or each q-points and gather all results."""
partial = self.gather_qpt_function_me(func_name, *args, **kwargs)
if i_am_master:
# Contruct an array with the shape of partial,
# adding a dimension of length nqpt.
total = np.zeros([self.nqpt] + list(partial.shape[1:]),
dtype=partial.dtype)
for i, arr in enumerate(partial):
total[i,...] = arr[...]
active_ranks = self.get_active_ranks()
if len(active_ranks) > 1:
for irank in active_ranks[1:]:
partial = comm.recv(source=irank, tag=irank)
for arr in partial:
i += 1
total[i,...] = arr[...]
elif self.active_worker:
comm.send(partial, dest=0, tag=rank)
return
else:
return
# Now I could broadcast the total result to all workers
# but right now there is no need to.
return total
def gather_qpt_function_me(self, func_name, *args, **kwargs):
"""
Call a certain function or each q-points of this worker
and gather all results.
"""
if not self.active_worker:
return None
nqpt_me = len(self.my_iqpts)
iqpt = self.my_iqpts[0]
self.set_iqpt(iqpt)
if self.verbose:
print("Q-point: {} with wtq = {} and reduced coord. {}".format(
iqpt, self.qptanalyzer.wtq, self.qptanalyzer.qred))
q0 = np.array(getattr(self.qptanalyzer, func_name)(*args, **kwargs))
total = np.zeros([nqpt_me] + list(q0.shape), dtype=q0.dtype)
total[0,...] = q0[...]
if len(self.my_iqpts) == 1:
return total
for i, iqpt in enumerate(self.my_iqpts[1:]):
self.set_iqpt(iqpt)
if self.verbose:
print("Q-point: {} with wtq = {} and reduced coord. {}".format(
iqpt, self.qptanalyzer.wtq, self.qptanalyzer.qred))
qpt = getattr(self.qptanalyzer, func_name)(*args, **kwargs)
total[i+1,...] = qpt[...]
return total
@mpi_watch
def gather_qpt_info(self):
"""Gather qpt reduced coordinates and mode frequencies."""
partial = self.gather_qpt_info_me()
if i_am_master:
qred_all = np.zeros((self.nqpt, 3), dtype=np.float)
omega_all = np.zeros((self.nqpt, 3 * self.natom), dtype=np.float)
qred_p, omega_p = partial
for i, (qred, omega) in enumerate(zip(qred_p, omega_p)):
qred_all[i,...] = qred[...]
omega_all[i,...] = omega[...]
active_ranks = self.get_active_ranks()
if len(active_ranks) > 1:
for irank in active_ranks[1:]:
partial = comm.recv(source=irank, tag=10000+irank)
qred_p, omega_p = partial
for qred, omega in zip(qred_p, omega_p):
i += 1
qred_all[i,...] = qred[...]
omega_all[i,...] = omega[...]
elif self.active_worker:
comm.send(partial, dest=0, tag=10000+rank)
return
else:
return
self.qred = qred_all
self.omega = omega_all
return self.qred, self.omega
def gather_qpt_info_me(self):
"""Gather qpt reduced coordinates and mode frequencies."""
if not self.active_worker:
return None
nqpt_me = len(self.my_iqpts)
qred = np.zeros((nqpt_me, 3), dtype=np.float)
omega = np.zeros((nqpt_me, 3 * self.natom), dtype=np.float)
for i, iqpt in enumerate(self.my_iqpts):
self.set_ddb(iqpt)
qred[i,:] = self.qptanalyzer.qred[:]
omega[i,:] = np.real(self.qptanalyzer.omega[:])
return qred, omega
@mpi_watch
def sum_qpt_functions_double_grid(self, func_coarse, func_fine,
*args, **kwargs):
"""
Sum a certain function on the coarse grid,
and another one on the fine grid.
Only master sums the result.
"""
self.distribute_workload(fine=False)
sum_coarse = self.sum_qpt_function(func_coarse, fine=False)
self.distribute_workload(fine=True)
self.read_zero_files()
sum_fine = self.sum_qpt_function(func_fine, fine=True)
if i_am_master:
result = sum_coarse + sum_fine
else:
result = None
return result
def compute_static_zp_renormalization_nosplit(self):
"""Compute the zero-point renormalization in a static scheme."""
self.distribute_workload()
self.zero_point_renormalization = self.sum_qpt_function('get_zpr_static_sternheimer')
self.renormalization_is_dynamical = False
def compute_static_td_renormalization_nosplit(self):
"""
Compute the temperature-dependent renormalization in a static scheme.
"""
self.check_temperatures()
self.distribute_workload()
self.temperature_dependent_renormalization = self.sum_qpt_function(
'get_tdr_static_nosplit')
self.renormalization_is_dynamical = False
def compute_dynamical_td_renormalization(self):
"""
Compute the temperature-dependent renormalization
in a dynamical scheme.
"""
self.check_temperatures()
self.distribute_workload()
self.temperature_dependent_renormalization = self.sum_qpt_function(
'get_tdr_dynamical')
self.renormalization_is_dynamical = True
def compute_dynamical_td_renormalization_double_grid(self):
"""
Compute the temperature-dependent renormalization
in a dynamical scheme.
"""
self.check_temperatures()
self.temperature_dependent_renormalization = (
self.sum_qpt_functions_double_grid('get_tdr_static_nosplit',
'get_tdr_dynamical_active'))
self.renormalization_is_dynamical = True
def compute_dynamical_zp_renormalization_double_grid(self):
"""
Compute the temperature-dependent renormalization
in a dynamical scheme.
"""
self.check_temperatures()
self.zero_point_renormalization = (
self.sum_qpt_functions_double_grid('get_zpr_static_sternheimer',
'get_zpr_dynamical_active'))
self.renormalization_is_dynamical = True
def compute_dynamical_zp_renormalization(self):
"""Compute the zero-point renormalization in a dynamical scheme."""
self.distribute_workload()
self.zero_point_renormalization = (
self.sum_qpt_function('get_zpr_dynamical'))
self.renormalization_is_dynamical = True
def compute_static_td_renormalization(self):
"""
Compute the temperature-dependent renormalization in a static scheme
with the transitions split between active and sternheimer.
"""
self.check_temperatures()
self.distribute_workload()
self.temperature_dependent_renormalization = (
self.sum_qpt_function('get_tdr_static'))
self.renormalization_is_dynamical = False
def compute_static_zp_renormalization(self):
"""
Compute the zero-point renormalization in a static scheme
with the transitions split between active and sternheimer.
"""
self.zero_point_renormalization = (
self.sum_qpt_function('get_zpr_static'))
self.renormalization_is_dynamical = False
def compute_dynamical_td_broadening(self):
"""
Compute the temperature-dependent broadening in a static scheme
from the GKK files.
"""
self.check_temperatures()
self.distribute_workload()
self.temperature_dependent_broadening = (
self.sum_qpt_function('get_tdb_dynamical'))
self.broadening_is_dynamical = True
def compute_dynamical_zp_broadening(self):
"""
Compute the zero-point broadening in a static scheme
from the GKK files.
"""
self.distribute_workload()
self.zero_point_broadening = (
self.sum_qpt_function('get_zpb_dynamical'))
self.broadening_is_dynamical = True
def compute_static_td_broadening(self):
"""
Compute the temperature-dependent broadening in a static scheme
from the GKK files.
"""
self.check_temperatures()
self.distribute_workload()
self.temperature_dependent_broadening = (
self.sum_qpt_function('get_tdb_static'))
self.broadening_is_dynamical = False
def compute_static_zp_broadening(self):
"""
Compute the zero-point broadening in a static scheme
from the GKK files.
"""
self.distribute_workload()
self.zero_point_broadening = (
self.sum_qpt_function('get_zpb_static'))
self.broadening_is_dynamical = False
def compute_static_td_broadening_nosplit(self):
"""
Compute the temperature-dependent broadening in a static scheme
from the EIGI2D files.
"""
self.check_temperatures()
self.distribute_workload()
self.temperature_dependent_broadening = (
self.sum_qpt_function('get_tdb_static_nosplit'))
self.broadening_is_dynamical = False
def compute_static_zp_broadening_nosplit(self):
"""
Compute the zero-point broadening in a static scheme
from the EIGI2D files.
"""
self.distribute_workload()
self.zero_point_broadening = (
self.sum_qpt_function('get_zpb_static_nosplit'))
self.broadening_is_dynamical = False
def compute_ddw_active_zpr(self):
"""
Compute the zero-point renormalization in a static scheme
with the transitions split between active and sternheimer.
"""
self.zero_point_renormalization = (
self.sum_qpt_function('get_zpr_ddw_active'))
self.renormalization_is_dynamical = False
def compute_static_zp_broadening(self):
"""
Compute the zero-point broadening in a static scheme
from the FAN files.
"""
self.distribute_workload()
self.zero_point_broadening = self.sum_qpt_function('get_zpb_static')
self.broadening_is_dynamical = False
def compute_static_zp_renormalization_modes(self):
"""
Compute the zero-point renormalization in a static scheme
with the transitions split between active and sternheimer.
Retain the mode decomposition of the zpr.
"""
self.distribute_workload()
self.zero_point_renormalization_modes = (
self.sum_qpt_function('get_zpr_static_modes'))
self.renormalization_is_dynamical = False
def compute_zp_self_energy(self):
"""
Compute the zp frequency-dependent self-energy from one q-point.
The self-energy is evaluated on a frequency mesh 'omegase'
that is shifted by the bare energies, such that, what is retured is
Simga'_kn(omega) = Sigma_kn(omega + E^0_kn)
"""
self.distribute_workload()
self.self_energy = self.sum_qpt_function('get_zp_self_energy')
def compute_td_self_energy(self):
"""
Compute the td frequency-dependent self-energy from one q-point.
The self-energy is evaluated on a frequency mesh 'omegase'
that is shifted by the bare energies, such that, what is retured is
Simga'_kn(omega,T) = Sigma_kn(omega + E^0_kn,T)
"""
self.distribute_workload()
self.self_energy_T = self.sum_qpt_function('get_td_self_energy')
@master_only
def compute_zp_spectral_function(self):
"""
Compute the spectral function of all quasiparticles in the
semi-static approximation, that is, the 'upper bands' contribution
to the self-energy is evaluated at the bare energy.
The spectral function is evaluated on a frequency mesh 'omegase'
that is shifted by the bare energies, such that, what is retured is
A'_kn(omega) = A_kn(omega + E^0_kn)
"""
nomegase = self.nomegase
nkpt = self.nkpt
nband = self.nband
self.spectral_function = np.zeros((nomegase, nkpt, nband), dtype=float)
omega = np.einsum('kn,l->knl', np.ones((nkpt, nband)), self.omegase)
self.spectral_function = (
(1 / np.pi) * np.abs(self.self_energy.imag) /
((omega - self.self_energy.real) ** 2 + self.self_energy.imag ** 2)
)
@master_only
def compute_td_spectral_function(self):
"""
Compute the spectral function of all quasiparticles in the
semi-static approximation, that is, the 'upper bands' contribution
to the self-energy is evaluated at the bare energy.
The spectral function is evaluated on a frequency mesh 'omegase'
that is shifted by the bare energies, such that, what is retured is
A'_kn(omega) = A_kn(omega + E^0_kn)
"""
nomegase = self.nomegase
nkpt = self.nkpt
nband = self.nband
ntemp = self.ntemp
self.spectral_function_T = np.zeros((nomegase, ntemp, nkpt, nband),
dtype=float)
omega = np.einsum('ijt,l->ijlt',
np.ones((nkpt, nband, ntemp)), self.omegase)
self.spectral_function_T = (
(1 / np.pi) * np.abs(self.self_energy_T.imag) /
((omega - self.self_energy_T.real) ** 2
+ self.self_energy_T.imag ** 2)
)
def compute_zp_self_energy_double_grid(self):
"""
Compute the temperature-dependent renormalization
in a dynamical scheme.
"""
self.check_temperatures()
self.self_energy = (
self.sum_qpt_functions_double_grid('get_zp_self_energy_sternheimer',
'get_zp_self_energy_active'))
def compute_td_self_energy_double_grid(self):
"""
Compute the temperature-dependent renormalization
in a dynamical scheme.
"""
self.check_temperatures()
self.self_energy_T = (
self.sum_qpt_functions_double_grid('get_td_self_energy_sternheimer',
'get_td_self_energy_active'))
@master_only
def write_netcdf(self):
"""Write all data to a netCDF file."""
create_directory(self.nc_output)
# Write on a NC files with etsf-io name convention
with nc.Dataset(self.nc_output, 'w') as ds:
# Read dim from first EIGR2D file
dim = nc.Dataset(self.eigr2d_fnames[0], 'r')
# Determine nsppol from reading occ
nsppol = len(dim.variables['occupations'][:,0,0])
if nsppol > 1:
warnings.warn("nsppol > 1 has not been tested.")
mband = len(dim.dimensions['product_mband_nsppol']) / nsppol
# Create dimension
ds.createDimension('number_of_atoms',
len(dim.dimensions['number_of_atoms']))
ds.createDimension('number_of_kpoints',
len(dim.dimensions['number_of_kpoints']))
ds.createDimension('product_mband_nsppol',
len(dim.dimensions['product_mband_nsppol']))
ds.createDimension('cartesian', 3)
ds.createDimension('cplex', 2)
ds.createDimension('number_of_qpoints', self.nqpt)
ds.createDimension('number_of_spins',
len(dim.dimensions['number_of_spins']))
ds.createDimension('max_number_of_states',mband)
ds.createDimension('number_of_modes',
3*len(dim.dimensions['number_of_atoms']))
ds.createDimension('number_of_temperature', len(self.temperatures))
ds.createDimension('number_of_frequencies', len(self.omegase))
# Write data on the eigenvalues
data = ds.createVariable('reduced_coordinates_of_kpoints', 'd',
('number_of_kpoints','cartesian'))
data[:,:] = dim.variables['reduced_coordinates_of_kpoints'][:,:]
data = ds.createVariable(
'eigenvalues','d',
('number_of_spins','number_of_kpoints','max_number_of_states'))
data[:,:,:] = dim.variables['eigenvalues'][:,:,:]
data = ds.createVariable(
'occupations','i',
('number_of_spins','number_of_kpoints','max_number_of_states'))
data[:,:,:] = dim.variables['occupations'][:,:,:]
data = ds.createVariable(
'primitive_vectors', 'd',
('cartesian','cartesian'))
data[:,:] = dim.variables['primitive_vectors'][:,:]
dim.close()
# Write epc data
data = ds.createVariable('renormalization_is_dynamical', 'i1')
data[:] = self.renormalization_is_dynamical
data = ds.createVariable('broadening_is_dynamical', 'i1')
data[:] = self.broadening_is_dynamical
data = ds.createVariable('temperatures','d',
('number_of_temperature'))
data[:] = self.temperatures[:]
data = ds.createVariable('smearing', 'd')
data[:] = self.smearing
data = ds.createVariable('omegase', 'd',
('number_of_frequencies'))
data[:] = self.omegase[:]
# qpt
data = ds.createVariable(
'reduced_coordinates_of_qpoints','d',
('number_of_qpoints', 'cartesian'))
if self.qred is not None:
data[...] = self.qred[...]
# omega
data = ds.createVariable(
'phonon_mode_frequencies','d',
('number_of_qpoints', 'number_of_modes'))
if self.omega is not None:
data[...] = self.omega[...]
# ZPR
zpr = ds.createVariable(
'zero_point_renormalization','d',
('number_of_spins', 'number_of_kpoints',
'max_number_of_states'))
#fan = ds.createVariable(
# 'fan_zero_point_renormalization','d',
# ('number_of_spins', 'number_of_kpoints',
# 'max_number_of_states'))
#ddw = ds.createVariable(
# 'ddw_zero_point_renormalization','d',
# ('number_of_spins', 'number_of_kpoints',
# 'max_number_of_states'))
if self.zero_point_renormalization is not None:
# FIXME number of spin
zpr[0,:,:] = self.zero_point_renormalization[:,:].real
#fan[0,:,:] = self.fan_zero_point_renormalization[:,:].real
#ddw[0,:,:] = self.ddw_zero_point_renormalization[:,:].real
# TDR
data = ds.createVariable(
'temperature_dependent_renormalization','d',
('number_of_spins','number_of_kpoints',
'max_number_of_states','number_of_temperature'))
if self.temperature_dependent_renormalization is not None:
# FIXME number of spin
data[0,:,:,:] = (
self.temperature_dependent_renormalization[:,:,:].real)
# ZPR
data = ds.createVariable(
'zero_point_broadening','d',
('number_of_spins', 'number_of_kpoints',
'max_number_of_states'))
if self.zero_point_broadening is not None:
# FIXME number of spin
data[0,:,:] = self.zero_point_broadening[:,:].real
zpr_modes = ds.createVariable(
'zero_point_renormalization_by_modes','d',
('number_of_modes', 'number_of_spins', 'number_of_kpoints',
'max_number_of_states'))
if self.zero_point_renormalization_modes is not None:
zpr_modes[:,0,:,:] = (
self.zero_point_renormalization_modes[:,:,:])
# TDB
data = ds.createVariable(
'temperature_dependent_broadening','d',
('number_of_spins','number_of_kpoints',
'max_number_of_states','number_of_temperature'))
if self.temperature_dependent_broadening is not None:
# FIXME number of spin
data[0,:,:,:] = (
self.temperature_dependent_broadening[:,:,:].real)
# ZSE
self_energy = ds.createVariable('self_energy','d',
('number_of_spins', 'number_of_kpoints',
'max_number_of_states', 'number_of_frequencies', 'cplex'))
if self.self_energy is not None:
# FIXME number of spin
self_energy[0,:,:,:,0] = self.self_energy[:,:,:].real
self_energy[0,:,:,:,1] = self.self_energy[:,:,:].imag
# TSE
self_energy_T = ds.createVariable(
'self_energy_temperature_dependent','d',
('number_of_spins', 'number_of_kpoints',
'max_number_of_states', 'number_of_frequencies',
'number_of_temperature', 'cplex'))
if self.self_energy_T is not None:
# FIXME number of spin
self_energy_T[0,:,:,:,:,0] = self.self_energy_T[:,:,:,:].real
self_energy_T[0,:,:,:,:,1] = self.self_energy_T[:,:,:,:].imag
# ZSF
spectral_function = ds.createVariable(
'spectral_function','d',
('number_of_spins', 'number_of_kpoints',
'max_number_of_states', 'number_of_frequencies'))
if self.spectral_function is not None:
# FIXME number of spin
spectral_function[0,:,:,:] = self.spectral_function[:,:,:]
spectral_function_T = ds.createVariable(
'spectral_function_temperature_dependent','d',
('number_of_spins', 'number_of_kpoints',
'max_number_of_states', 'number_of_frequencies',
'number_of_temperature'))
# TSF
if self.spectral_function_T is not None:
# FIXME number of spin
spectral_function_T[0,:,:,:,:] = (
self.spectral_function_T[:,:,:,:])
return
@master_only
def write_renormalization(self):
"""Write the computed renormalization in a text file."""
create_directory(self.ren_dat)
with open(self.ren_dat, "w") as f:
if self.zero_point_renormalization is not None:
f.write("Total zero point renormalization (eV) for "
"{} Q points\n".format(self.nqpt))
for ikpt, kpt in enumerate(self.kpts):
f.write('Kpt: {0[0]} {0[1]} {0[2]}\n'.format(kpt))
for line in formatted_array_lines(
self.zero_point_renormalization[ikpt,:].real*Ha2eV):
f.write(line)
if self.temperature_dependent_renormalization is not None:
f.write("Temperature dependence at Gamma (eV)\n")
for iband in range(self.nband):
f.write('Band: {}\n'.format(iband))
for tt, T in enumerate(self.temperatures):
ren = (
self.temperature_dependent_renormalization[0,iband,tt]
.real * Ha2eV)
f.write("{:>8.1f} {:>12.8f}\n".format(T, ren))
@master_only
def write_broadening(self):
"""Write the computed broadening in a text file."""
create_directory(self.ren_dat)
with open(self.ren_dat, "w") as f:
if self.zero_point_broadening is not None:
f.write("Total zero point broadening (eV) for "
"{} Q points\n".format(self.nqpt))
for ikpt, kpt in enumerate(self.kpts):
f.write('Kpt: {0[0]} {0[1]} {0[2]}\n'.format(kpt))
for line in formatted_array_lines(
self.zero_point_broadening[ikpt,:].real*Ha2eV):
f.write(line)
if self.temperature_dependent_broadening is not None:
f.write("Temperature dependence at Gamma\n")
for iband in range(self.nband):
f.write('Band: {}\n'.format(iband))
for tt, T in enumerate(self.temperatures):
brd = (self.temperature_dependent_broadening[0,iband,tt]
.real * Ha2eV)
f.write("{:>8.1f} {:>12.8f}\n".format(T, brd))
|
jmbeuken/abinit
|
scripts/post_processing/ElectronPhononCoupling/ElectronPhononCoupling/core/epcanalyzer.py
|
Python
|
gpl-3.0
| 38,739
|
[
"ABINIT",
"NetCDF"
] |
ec0a1f700f7281a43ad9d77ad979c9d00647c9e83928aa453e0c8e01387d8e25
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2012 Michiel D. Nauta
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""Tools/Database Repair/Check and Repair Database"""
# pylint: disable=too-many-statements,too-many-locals,too-many-branches
# pylint: disable=wrong-import-position,too-many-public-methods,no-self-use
# pylint: disable=too-many-arguments
# -------------------------------------------------------------------------
#
# python modules
#
# -------------------------------------------------------------------------
import os
from io import StringIO
from collections import defaultdict
import time
# ------------------------------------------------------------------------
#
# Set up logging
#
# ------------------------------------------------------------------------
import logging
# -------------------------------------------------------------------------
#
# gtk modules
#
# -------------------------------------------------------------------------
from gi.repository import Gtk
# -------------------------------------------------------------------------
#
# Gramps modules
#
# -------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
ngettext = glocale.translation.ngettext # else "nearby" comments are ignored
from gramps.gen.lib import (Citation, Event, EventType, Family, Media,
Name, Note, Person, Place, Repository, Source,
StyledText, Tag)
from gramps.gen.db import DbTxn, CLASS_TO_KEY_MAP
from gramps.gen.config import config
from gramps.gen.utils.id import create_id
from gramps.gen.utils.db import family_name
from gramps.gen.utils.unknown import make_unknown
from gramps.gen.utils.file import (media_path_full, find_file)
from gramps.gui.managedwindow import ManagedWindow
from gramps.gen.utils.file import create_checksum
from gramps.gui.plug import tool
from gramps.gui.dialog import OkDialog, MissingMediaDialog
from gramps.gen.display.name import displayer as _nd
from gramps.gui.glade import Glade
from gramps.gen.errors import HandleError
# table for handling control chars in notes.
# All except 09, 0A, 0D are replaced with space.
strip_dict = dict.fromkeys(list(range(9)) + list(range(11, 13)) +
list(range(14, 32)), " ")
class ProgressMeter:
def __init__(self, *args, **kwargs):
pass
def set_pass(self, *args):
pass
def step(self):
pass
def close(self):
pass
# -------------------------------------------------------------------------
#
# Low Level repair
#
# -------------------------------------------------------------------------
def cross_table_duplicates(db, uistate):
"""
Function to find the presence of identical handles that occur in different
database tables.
Assumes there are no intable duplicates, see low_level function.
:param db: the database to check
:type db: :class:`gen.db.read.DbBsddbRead`
:returns: the presence of cross table duplicate handles
:rtype: bool
"""
if uistate:
parent = uistate.window
else:
parent = None
progress = ProgressMeter(_('Checking Database'), '', parent=parent)
progress.set_pass(_('Looking for cross table duplicates'), 9)
logging.info('Looking for cross table duplicates')
total_nr_handles = 0
all_handles = set([])
for get_handles_func in [db.get_person_handles,
db.get_family_handles,
db.get_event_handles,
db.get_place_handles,
db.get_source_handles,
db.get_citation_handles,
db.get_media_handles,
db.get_repository_handles,
db.get_note_handles]:
handle_list = get_handles_func()
total_nr_handles += len(handle_list)
all_handles.update(handle_list)
progress.step()
progress.close()
num_errors = total_nr_handles - len(all_handles)
if num_errors == 0:
logging.info(' OK: No cross table duplicates')
else:
logging.warning(' FAIL: Found %d cross table duplicates',
num_errors)
return total_nr_handles > len(all_handles)
# -------------------------------------------------------------------------
#
# runTool
#
# -------------------------------------------------------------------------
class Check(tool.BatchTool):
def __init__(self, dbstate, user, options_class, name, callback=None):
uistate = user.uistate
tool.BatchTool.__init__(self, dbstate, user, options_class, name)
if self.fail:
return
cli = uistate is None
if uistate:
from gramps.gui.utils import ProgressMeter as PM
global ProgressMeter
ProgressMeter = PM
if self.db.readonly:
# TODO: split plugin in a check and repair part to support
# checking of a read only database
return
# The low-level repair is bypassing the transaction mechanism.
# As such, we run it before starting the transaction.
# We only do this for the dbdir backend.
if self.db.__class__.__name__ == 'DbBsddb':
if cross_table_duplicates(self.db, uistate):
CheckReport(uistate, _(
"Your Family Tree contains cross table duplicate handles."
"\n "
"This is bad and can be fixed by making a backup of your\n"
"Family Tree and importing that backup in an empty family"
"\n"
"tree. The rest of the checking is skipped, the Check and"
"\n"
"Repair tool should be run anew on this new Family Tree."),
cli)
return
with DbTxn(_("Check Integrity"), self.db, batch=True) as trans:
self.db.disable_signals()
checker = CheckIntegrity(dbstate, uistate, trans)
# start with empty objects, broken links can be corrected below
# then. This is done before fixing encoding and missing photos,
# since otherwise we will be trying to fix empty records which are
# then going to be deleted.
checker.cleanup_empty_objects()
checker.fix_encoding()
checker.fix_alt_place_names()
checker.fix_ctrlchars_in_notes()
checker.cleanup_missing_photos(cli)
checker.cleanup_deleted_name_formats()
prev_total = -1
total = 0
while prev_total != total:
prev_total = total
checker.check_for_broken_family_links()
checker.check_parent_relationships()
checker.cleanup_empty_families(cli)
checker.cleanup_duplicate_spouses()
total = checker.family_errors()
checker.fix_duplicated_grampsid()
checker.check_events()
checker.check_person_references()
checker.check_family_references()
checker.check_place_references()
checker.check_source_references()
checker.check_citation_references()
checker.check_media_references()
checker.check_repo_references()
checker.check_note_references()
checker.check_tag_references()
checker.check_checksum()
checker.check_media_sourceref()
# for bsddb the check_backlinks doesn't work in 'batch' mode because
# the table used for backlinks is closed.
with DbTxn(_("Check Backlink Integrity"), self.db,
batch=False) as checker.trans:
checker.check_backlinks()
# rebuilding reference maps needs to be done outside of a transaction
# to avoid nesting transactions.
if checker.bad_backlinks:
checker.progress.set_pass(_('Rebuilding reference maps...'), 6)
logging.info('Rebuilding reference maps...')
self.db.reindex_reference_map(checker.callback)
else:
logging.info(' OK: no backlink problems found')
self.db.enable_signals()
self.db.request_rebuild()
errs = checker.build_report(uistate)
if errs:
CheckReport(uistate, checker.text.getvalue(), cli)
# -------------------------------------------------------------------------
#
#
#
# -------------------------------------------------------------------------
class CheckIntegrity:
def __init__(self, dbstate, uistate, trans):
self.uistate = uistate
if self.uistate:
self.parent_window = self.uistate.window
else:
self.parent_window = None
self.db = dbstate.db
self.trans = trans
self.bad_photo = []
self.replaced_photo = []
self.removed_photo = []
self.empty_family = []
self.broken_links = []
self.duplicate_links = []
self.broken_parent_links = []
self.fam_rel = []
self.invalid_events = set()
self.invalid_birth_events = set()
self.invalid_death_events = set()
self.invalid_person_references = set()
self.invalid_family_references = set()
self.invalid_place_references = set()
self.invalid_source_references = set()
self.invalid_citation_references = set()
self.invalid_repo_references = set()
self.invalid_media_references = set()
self.invalid_note_references = set()
self.invalid_tag_references = set()
self.invalid_dates = []
self.removed_name_format = []
self.empty_objects = defaultdict(list)
self.replaced_sourceref = []
self.place_errors = 0
self.duplicated_gramps_ids = 0
self.bad_backlinks = 0
self.text = StringIO()
self.last_img_dir = config.get('behavior.addmedia-image-dir')
self.progress = ProgressMeter(_('Checking Database'), '',
parent=self.parent_window)
self.explanation = Note(_(
'Objects referenced by this note were referenced but '
'missing so that is why they have been created '
'when you ran Check and Repair on %s.') %
time.strftime('%x %X', time.localtime()))
self.explanation.set_handle(create_id())
def family_errors(self):
return (len(self.broken_parent_links) +
len(self.broken_links) +
len(self.empty_family) +
len(self.duplicate_links))
def cleanup_deleted_name_formats(self):
"""
Permanently remove deleted name formats from db.
When user deletes custom name format those are not removed only marked
as "inactive". This method does the cleanup of the name format table,
as well as fixes the display_as, sort_as values for each Name in the
db.
"""
self.progress.set_pass(_('Looking for invalid name format references'),
self.db.get_number_of_people())
logging.info('Looking for invalid name format references')
deleted_name_formats = [number for (number, name, dummy, act)
in self.db.name_formats if not act]
# remove the invalid references from all Name objects
for person_handle in self.db.get_person_handles():
person = self.db.get_person_from_handle(person_handle)
p_changed = False
name = person.get_primary_name()
if name.get_sort_as() in deleted_name_formats:
name.set_sort_as(Name.DEF)
p_changed = True
if name.get_display_as() in deleted_name_formats:
name.set_display_as(Name.DEF)
p_changed = True
if p_changed:
person.set_primary_name(name)
a_changed = False
name_list = []
for name in person.get_alternate_names():
if name.get_sort_as() in deleted_name_formats:
name.set_sort_as(Name.DEF)
a_changed = True
if name.get_display_as() in deleted_name_formats:
name.set_display_as(Name.DEF)
a_changed = True
name_list.append(name)
if a_changed:
person.set_alternate_names(name_list)
if p_changed or a_changed:
self.db.commit_person(person, self.trans)
self.removed_name_format.append(person_handle)
self.progress.step()
# update the custom name name format table
for number in deleted_name_formats:
_nd.del_name_format(number)
self.db.name_formats = _nd.get_name_format(only_custom=True,
only_active=False)
if len(self.removed_name_format) == 0:
logging.info(' OK: no invalid name formats found found')
def cleanup_duplicate_spouses(self):
self.progress.set_pass(_('Looking for duplicate spouses'),
self.db.get_number_of_people())
logging.info('Looking for duplicate spouses')
previous_errors = len(self.duplicate_links)
for handle in self.db.get_person_handles():
pers = self.db.get_person_from_handle(handle)
splist = pers.get_family_handle_list()
if len(splist) != len(set(splist)):
new_list = []
for value in splist:
if value not in new_list:
new_list.append(value)
self.duplicate_links.append((handle, value))
pers.set_family_handle_list(new_list)
self.db.commit_person(pers, self.trans)
self.progress.step()
if previous_errors == len(self.duplicate_links):
logging.info(' OK: no duplicate spouses found')
def fix_encoding(self):
self.progress.set_pass(_('Looking for character encoding errors'),
self.db.get_number_of_media())
logging.info('Looking for character encoding errors')
error_count = 0
for handle in self.db.get_media_handles():
data = self.db.get_raw_media_data(handle)
if not isinstance(data[2], str) or not isinstance(data[4], str):
obj = self.db.get_media_from_handle(handle)
if not isinstance(data[2], str):
obj.path = obj.path.decode('utf-8')
logging.warning(' FAIL: encoding error on media object '
'"%(gid)s" path "%(path)s"',
{'gid': obj.gramps_id, 'path': obj.path})
if not isinstance(data[4], str):
obj.desc = obj.desc.decode('utf-8')
logging.warning(' FAIL: encoding error on media object '
'"%(gid)s" description "%(desc)s"',
{'gid': obj.gramps_id, 'desc': obj.desc})
self.db.commit_media(obj, self.trans)
error_count += 1
# Once we are here, fix the mime string if not str
if not isinstance(data[3], str):
obj = self.db.get_media_from_handle(handle)
try:
if data[3] == str(data[3]):
obj.mime = str(data[3])
else:
obj.mime = ""
except:
obj.mime = ""
self.db.commit_media(obj, self.trans)
logging.warning(' FAIL: encoding error on media object '
'"%(desc)s" mime "%(mime)s"',
{'desc': obj.desc, 'mime': obj.mime})
error_count += 1
self.progress.step()
if error_count == 0:
logging.info(' OK: no encoding errors found')
def fix_ctrlchars_in_notes(self):
self.progress.set_pass(_('Looking for ctrl characters in notes'),
self.db.get_number_of_notes())
logging.info('Looking for ctrl characters in notes')
error_count = 0
for handle in self.db.get_note_handles():
note = self.db.get_note_from_handle(handle)
stext = note.get_styledtext()
old_text = str(stext)
new_text = old_text.translate(strip_dict)
if old_text != new_text:
logging.warning(' FAIL: control characters found in note'
' "%s"', note.get_gramps_id())
error_count += 1
# Commit only if ctrl char found.
note.set_styledtext(StyledText(text=new_text,
tags=stext.get_tags()))
self.db.commit_note(note, self.trans)
self.progress.step()
if error_count == 0:
logging.info(' OK: no ctrl characters in notes found')
def fix_alt_place_names(self):
"""
This scans all places and cleans up alternative names. It removes
Blank names, names that are duplicates of the primary name, and
duplicates in the alt_names list.
"""
self.progress.set_pass(_('Looking for bad alternate place names'),
self.db.get_number_of_places())
logging.info('Looking for bad alternate place names')
for handle in self.db.get_place_handles():
place = self.db.get_place_from_handle(handle)
fixed_alt_names = []
fixup = False
for name in place.get_alternative_names():
if not name.value or \
name == place.name or \
name in fixed_alt_names:
fixup = True
continue
fixed_alt_names.append(name)
if fixup:
place.set_alternative_names(fixed_alt_names)
self.db.commit_place(place, self.trans)
self.place_errors += 1
self.progress.step()
if self.place_errors == 0:
logging.info(' OK: no bad alternate places found')
else:
logging.info(' %d bad alternate places found and fixed',
self.place_errors)
def check_for_broken_family_links(self):
# Check persons referenced by the family objects
fhandle_list = self.db.get_family_handles()
self.progress.set_pass(_('Looking for broken family links'),
len(fhandle_list) +
self.db.get_number_of_people())
logging.info('Looking for broken family links')
previous_errors = len(self.broken_parent_links + self.broken_links)
for family_handle in fhandle_list:
family = self.db.get_family_from_handle(family_handle)
father_handle = family.get_father_handle()
mother_handle = family.get_mother_handle()
if father_handle:
try:
father = self.db.get_person_from_handle(father_handle)
except HandleError:
# The person referenced by the father handle does not exist
# in the database
# This is tested by TestcaseGenerator where the mother is
# "Broken6"
family.set_father_handle(None)
self.db.commit_family(family, self.trans)
self.broken_parent_links.append((father_handle,
family_handle))
logging.warning(" FAIL: family '%(fam_gid)s' "
"father handle '%(hand)s' does not exist",
{'fam_gid': family.gramps_id,
'hand': father_handle})
father_handle = None
if mother_handle:
try:
mother = self.db.get_person_from_handle(mother_handle)
except HandleError:
# The person referenced by the mother handle does not exist
# in the database
# This is tested by TestcaseGenerator where the mother is
# "Broken7"
family.set_mother_handle(None)
self.db.commit_family(family, self.trans)
self.broken_parent_links.append((mother_handle,
family_handle))
logging.warning(" FAIL: family '%(fam_gid)s' "
"mother handle '%(hand)s' does not exist",
{'fam_gid': family.gramps_id,
'hand': mother_handle})
mother_handle = None
if father_handle and father and \
family_handle not in father.get_family_handle_list():
# The referenced father has no reference back to the family
# This is tested by TestcaseGenerator where the father is
# "Broken1"
self.broken_parent_links.append((father_handle, family_handle))
father.add_family_handle(family_handle)
self.db.commit_person(father, self.trans)
logging.warning(" FAIL: family '%(fam_gid)s' father "
"'%(hand)s' does not refer back to the family",
{'fam_gid': family.gramps_id,
'hand': father_handle})
if mother_handle and mother and \
family_handle not in mother.get_family_handle_list():
# The referenced mother has no reference back to the family.
# This is tested by TestcaseGenerator where the father is
# "Broken4"
self.broken_parent_links.append((mother_handle, family_handle))
mother.add_family_handle(family_handle)
self.db.commit_person(mother, self.trans)
logging.warning(" FAIL: family '%(fam_gid)s' mother "
"'%(hand)s' does not refer back to the family",
{'fam_gid': family.gramps_id,
'hand': mother_handle})
for child_ref in family.get_child_ref_list():
child_handle = child_ref.ref
try:
child = self.db.get_person_from_handle(child_handle)
except HandleError:
# The person referenced by the child handle
# does not exist in the database
# This is tested by TestcaseGenerator where the father
# is "Broken20"
logging.warning(" FAIL: family '%(fam_gid)s' child "
"'%(hand)s' does not exist in the "
"database",
{'fam_gid': family.gramps_id,
'hand': child_handle})
family.remove_child_ref(child_ref)
self.db.commit_family(family, self.trans)
self.broken_links.append((child_handle, family_handle))
else:
if child_handle in [father_handle, mother_handle]:
# The child is one of the parents: impossible Remove
# such child from the family
# This is tested by TestcaseGenerator where the father
# is "Broken19"
logging.warning(" FAIL: family '%(fam_gid)s' "
"child '%(child_gid)s' is one of the "
"parents",
{'fam_gid': family.gramps_id,
'child_gid': child.gramps_id})
family.remove_child_ref(child_ref)
self.db.commit_family(family, self.trans)
self.broken_links.append((child_handle, family_handle))
continue
if family_handle == child.get_main_parents_family_handle():
continue
if family_handle not in \
child.get_parent_family_handle_list():
# The referenced child has no reference to the family
# This is tested by TestcaseGenerator where the father
# is "Broken8"
logging.warning(
" FAIL: family '%(fam_gid)s' "
"child '%(child_gid)s' has no reference"
" to the family. Reference added",
{'fam_gid': family.gramps_id,
'child_gid': child.gramps_id})
child.add_parent_family_handle(family_handle)
self.db.commit_person(child, self.trans)
new_ref_list = []
new_ref_handles = []
replace = False
for child_ref in family.get_child_ref_list():
child_handle = child_ref.ref
if child_handle in new_ref_handles:
replace = True
else:
new_ref_list.append(child_ref)
new_ref_handles.append(child_handle)
if replace:
family.set_child_ref_list(new_ref_list)
self.db.commit_family(family, self.trans)
self.progress.step()
# Check persons membership in referenced families
for person_handle in self.db.get_person_handles():
person = self.db.get_person_from_handle(person_handle)
phandle_list = person.get_parent_family_handle_list()
new_list = list(set(phandle_list))
if len(phandle_list) != len(new_list):
person.set_parent_family_handle_list(new_list)
self.db.commit_person(person, self.trans)
for par_family_handle in person.get_parent_family_handle_list():
try:
family = self.db.get_family_from_handle(par_family_handle)
except HandleError:
person.remove_parent_family_handle(par_family_handle)
self.db.commit_person(person, self.trans)
continue
for child_handle in [child_ref.ref for child_ref
in family.get_child_ref_list()]:
if child_handle == person_handle:
break
else:
# Person is not a child in the referenced parent family
# This is tested by TestcaseGenerator where the father
# is "Broken9"
logging.warning(" FAIL: family '%(fam_gid)s' person "
"'%(pers_gid)s' is not a child in the "
"referenced parent family",
{'fam_gid': family.gramps_id,
'pers_gid': person.gramps_id})
person.remove_parent_family_handle(par_family_handle)
self.db.commit_person(person, self.trans)
self.broken_links.append((person_handle, family_handle))
for family_handle in person.get_family_handle_list():
try:
family = self.db.get_family_from_handle(family_handle)
except HandleError:
# The referenced family does not exist in database
# This is tested by TestcaseGenerator where the father
# is "Broken20"
logging.warning(" FAIL: person '%(pers_gid)s' refers "
"to family '%(hand)s' which is not in the "
"database",
{'pers_gid': person.gramps_id,
'hand': family_handle})
person.remove_family_handle(family_handle)
self.db.commit_person(person, self.trans)
self.broken_links.append((person_handle, family_handle))
continue
if family.get_father_handle() == person_handle:
continue
if family.get_mother_handle() == person_handle:
continue
# The person is not a member of the referenced family
# This is tested by TestcaseGenerator where the father is
# "Broken2" and the family misses the link to the father, and
# where the mother is "Broken3" and the family misses the link
# to the mother
logging.warning(" FAIL: family '%(fam_gid)s' person "
"'%(pers_gid)s' is not member of the "
"referenced family",
{'fam_gid': family.gramps_id,
'pers_gid': person.gramps_id})
person.remove_family_handle(family_handle)
self.db.commit_person(person, self.trans)
self.broken_links.append((person_handle, family_handle))
self.progress.step()
if previous_errors == len(self.broken_parent_links +
self.broken_links):
logging.info(' OK: no broken family links found')
def cleanup_missing_photos(self, cli=0):
self.progress.set_pass(_('Looking for unused objects'),
len(self.db.get_media_handles()))
logging.info('Looking for missing photos')
missmedia_action = 0
# ---------------------------------------------------------------------
def remove_clicked():
# File is lost => remove all references and the object itself
for handle in self.db.get_person_handles(sort_handles=False):
person = self.db.get_person_from_handle(handle)
if person.has_media_reference(objectid):
person.remove_media_references([objectid])
self.db.commit_person(person, self.trans)
for handle in self.db.get_family_handles():
family = self.db.get_family_from_handle(handle)
if family.has_media_reference(objectid):
family.remove_media_references([objectid])
self.db.commit_family(family, self.trans)
for handle in self.db.get_event_handles():
event = self.db.get_event_from_handle(handle)
if event.has_media_reference(objectid):
event.remove_media_references([objectid])
self.db.commit_event(event, self.trans)
for handle in self.db.get_source_handles():
source = self.db.get_source_from_handle(handle)
if source.has_media_reference(objectid):
source.remove_media_references([objectid])
self.db.commit_source(source, self.trans)
for handle in self.db.get_citation_handles():
citation = self.db.get_citation_from_handle(handle)
if citation.has_media_reference(objectid):
citation.remove_media_references([objectid])
self.db.commit_citation(citation, self.trans)
for handle in self.db.get_place_handles():
place = self.db.get_place_from_handle(handle)
if place.has_media_reference(objectid):
place.remove_media_references([objectid])
self.db.commit_place(place, self.trans)
self.removed_photo.append(objectid)
self.db.remove_media(objectid, self.trans)
logging.warning(' FAIL: media object and all references to '
'it removed')
def leave_clicked():
self.bad_photo.append(objectid)
logging.warning(' FAIL: references to missing file kept')
def select_clicked():
# File is lost => select a file to replace the lost one
def fs_close_window(dummy):
self.bad_photo.append(objectid)
logging.warning(' FAIL: references to missing file '
'kept')
def fs_ok_clicked(obj):
name = fs_top.get_filename()
if os.path.isfile(name):
obj = self.db.get_media_from_handle(objectid)
obj.set_path(name)
self.db.commit_media(obj, self.trans)
self.replaced_photo.append(objectid)
self.last_img_dir = os.path.dirname(name)
logging.warning(' FAIL: media object reselected to '
'"%s"', name)
else:
self.bad_photo.append(objectid)
logging.warning(' FAIL: references to missing file '
'kept')
fs_top = Gtk.FileChooserDialog(
title="%s - Gramps" % _("Select file"),
transient_for=self.parent_window)
fs_top.add_buttons(_('_Cancel'), Gtk.ResponseType.CANCEL,
_('_OK'), Gtk.ResponseType.OK)
fs_top.set_current_folder(self.last_img_dir)
response = fs_top.run()
if response == Gtk.ResponseType.OK:
fs_ok_clicked(fs_top)
elif response == Gtk.ResponseType.CANCEL:
fs_close_window(fs_top)
fs_top.destroy()
# --------------------------------------------------------------------
for objectid in self.db.get_media_handles():
obj = self.db.get_media_from_handle(objectid)
photo_name = media_path_full(self.db, obj.get_path())
photo_desc = obj.get_description()
if photo_name is not None and photo_name != "" \
and not find_file(photo_name):
if cli:
logging.warning(" FAIL: media file %s was not found.",
photo_name)
self.bad_photo.append(objectid)
else:
if missmedia_action == 0:
logging.warning(' FAIL: media object "%(desc)s" '
'reference to missing file "%(name)s" '
'found',
{'desc': photo_desc,
'name': photo_name})
mmd = MissingMediaDialog(
_("Media object could not be found"),
_("The file:\n%(file_name)s\nis referenced in "
"the database, but no longer exists.\n"
"The file may have been deleted or moved to "
"a different location.\n"
"You may choose to either remove the "
"reference from the database,\n"
"keep the reference to the missing file, "
"or select a new file.")
% {'file_name': '<b>%s</b>' % photo_name},
remove_clicked, leave_clicked, select_clicked,
parent=self.uistate.window)
missmedia_action = mmd.default_action
elif missmedia_action == 1:
logging.warning(' FAIL: media object "%(desc)s" '
'reference to missing file "%(name)s" '
'found',
{'desc': photo_desc,
'name': photo_name})
remove_clicked()
elif missmedia_action == 2:
logging.warning(' FAIL: media object "%(desc)s" '
'reference to missing file "%(name)s" '
'found',
{'desc': photo_desc,
'name': photo_name})
leave_clicked()
elif missmedia_action == 3:
logging.warning(' FAIL: media object "%(desc)s" '
'reference to missing file "%(name)s" '
'found',
{'desc': photo_desc,
'name': photo_name})
select_clicked()
self.progress.step()
if len(self.bad_photo + self.removed_photo) == 0:
logging.info(' OK: no missing photos found')
def cleanup_empty_objects(self):
# the position of the change column in the primary objects
CHANGE_PERSON = 17
CHANGE_FAMILY = 12
CHANGE_EVENT = 10
CHANGE_SOURCE = 8
CHANGE_CITATION = 9
CHANGE_PLACE = 11
CHANGE_MEDIA = 8
CHANGE_REPOS = 7
CHANGE_NOTE = 5
empty_person_data = Person().serialize()
empty_family_data = Family().serialize()
empty_event_data = Event().serialize()
empty_source_data = Source().serialize()
empty_citation_data = Citation().serialize()
empty_place_data = Place().serialize()
empty_media_data = Media().serialize()
empty_repos_data = Repository().serialize()
empty_note_data = Note().serialize()
_db = self.db
def _empty(empty, flag):
''' Closure for dispatch table, below '''
def _fx(value):
return self._check_empty(value, empty, flag)
return _fx
table = (
# Dispatch table for cleaning up empty objects. Each entry is
# a tuple containing:
# 0. Type of object being cleaned up
# 1. function to read the object from the database
# 2. function returning cursor over the object type
# 3. function returning number of objects of this type
# 4. text identifying the object being cleaned up
# 5. function to check if the data is empty
# 6. function to remove the object, if empty
('persons',
_db.get_person_from_handle,
_db.get_person_cursor,
_db.get_number_of_people,
_('Looking for empty people records'),
_empty(empty_person_data, CHANGE_PERSON),
_db.remove_person),
('families',
_db.get_family_from_handle,
_db.get_family_cursor,
_db.get_number_of_families,
_('Looking for empty family records'),
_empty(empty_family_data, CHANGE_FAMILY),
_db.remove_family),
('events',
_db.get_event_from_handle,
_db.get_event_cursor,
_db.get_number_of_events,
_('Looking for empty event records'),
_empty(empty_event_data, CHANGE_EVENT),
_db.remove_event),
('sources',
_db.get_source_from_handle,
_db.get_source_cursor,
_db.get_number_of_sources,
_('Looking for empty source records'),
_empty(empty_source_data, CHANGE_SOURCE),
_db.remove_source),
('citations',
_db.get_citation_from_handle,
_db.get_citation_cursor,
_db.get_number_of_citations,
_('Looking for empty citation records'),
_empty(empty_citation_data, CHANGE_CITATION),
_db.remove_citation),
('places',
_db.get_place_from_handle,
_db.get_place_cursor,
_db.get_number_of_places,
_('Looking for empty place records'),
_empty(empty_place_data, CHANGE_PLACE),
_db.remove_place),
('media',
_db.get_media_from_handle,
_db.get_media_cursor,
_db.get_number_of_media,
_('Looking for empty media records'),
_empty(empty_media_data, CHANGE_MEDIA),
_db.remove_media),
('repos',
_db.get_repository_from_handle,
_db.get_repository_cursor,
_db.get_number_of_repositories,
_('Looking for empty repository records'),
_empty(empty_repos_data, CHANGE_REPOS),
_db.remove_repository),
('notes',
_db.get_note_from_handle,
_db.get_note_cursor,
_db.get_number_of_notes,
_('Looking for empty note records'),
_empty(empty_note_data, CHANGE_NOTE),
_db.remove_note),
)
# Now, iterate over the table, dispatching the functions
for (the_type, dummy, cursor_func, total_func,
text, check_func, remove_func) in table:
with cursor_func() as cursor:
total = total_func()
self.progress.set_pass(text, total)
logging.info(text)
for handle, data in cursor:
self.progress.step()
if check_func(data):
# we cannot remove here as that would destroy cursor
# so save the handles for later removal
logging.warning(' FAIL: empty %(type)s record with '
'handle "%(hand)s" was found',
{'type': the_type, 'hand': handle})
self.empty_objects[the_type].append(handle)
# now remove
for handle in self.empty_objects[the_type]:
remove_func(handle, self.trans)
if len(self.empty_objects[the_type]) == 0:
logging.info(' OK: no empty %s found', the_type)
def _check_empty(self, data, empty_data, changepos):
"""compare the data with the data of an empty object
change, handle and gramps_id are not compared """
if changepos is not None:
return (data[2:changepos] == empty_data[2:changepos] and
data[changepos + 1:] == empty_data[changepos + 1:])
else:
return data[2:] == empty_data[2:]
def cleanup_empty_families(self, dummy):
fhandle_list = self.db.get_family_handles()
self.progress.set_pass(_('Looking for empty families'),
len(fhandle_list))
logging.info('Looking for empty families')
previous_errors = len(self.empty_family)
for family_handle in fhandle_list:
self.progress.step()
family = self.db.get_family_from_handle(family_handle)
family_id = family.get_gramps_id()
father_handle = family.get_father_handle()
mother_handle = family.get_mother_handle()
if not father_handle and not mother_handle and \
len(family.get_child_ref_list()) == 0:
self.empty_family.append(family_id)
self.delete_empty_family(family_handle)
if previous_errors == len(self.empty_family):
logging.info(' OK: no empty families found')
def delete_empty_family(self, family_handle):
for key in self.db.get_person_handles(sort_handles=False):
child = self.db.get_person_from_handle(key)
changed = False
changed |= child.remove_parent_family_handle(family_handle)
changed |= child.remove_family_handle(family_handle)
if changed:
self.db.commit_person(child, self.trans)
self.db.remove_family(family_handle, self.trans)
def check_parent_relationships(self):
"""Repair father=female or mother=male in hetero families
"""
fhandle_list = self.db.get_family_handles()
self.progress.set_pass(_('Looking for broken parent relationships'),
len(fhandle_list))
logging.info('Looking for broken parent relationships')
previous_errors = len(self.fam_rel)
for family_handle in fhandle_list:
self.progress.step()
family = self.db.get_family_from_handle(family_handle)
father_handle = family.get_father_handle()
if father_handle:
fgender = self.db.get_person_from_handle(
father_handle).get_gender()
else:
fgender = None
mother_handle = family.get_mother_handle()
if mother_handle:
mgender = self.db.get_person_from_handle(
mother_handle).get_gender()
else:
mgender = None
if (fgender == Person.FEMALE or
mgender == Person.MALE) and fgender != mgender:
# swap. note: (at most) one handle may be None
logging.warning(' FAIL: the family "%s" has a father=female'
' or mother=male in a different sex family',
family.gramps_id)
family.set_father_handle(mother_handle)
family.set_mother_handle(father_handle)
self.db.commit_family(family, self.trans)
self.fam_rel.append(family_handle)
if previous_errors == len(self.fam_rel):
logging.info(' OK: no broken parent relationships found')
def check_events(self):
'''Looking for event problems'''
self.progress.set_pass(_('Looking for event problems'),
self.db.get_number_of_people() +
self.db.get_number_of_families())
logging.info('Looking for event problems')
for key in self.db.get_person_handles(sort_handles=False):
self.progress.step()
person = self.db.get_person_from_handle(key)
birth_ref = person.get_birth_ref()
none_handle = False
if birth_ref:
newref = birth_ref
if not birth_ref.ref:
none_handle = True
birth_ref.ref = create_id()
birth_handle = birth_ref.ref
try:
birth = self.db.get_event_from_handle(birth_handle)
except HandleError:
# The birth event referenced by the birth handle
# does not exist in the database
# This is tested by TestcaseGenerator person "Broken11"
make_unknown(birth_handle, self.explanation.handle,
self.class_event, self.commit_event,
self.trans, type=EventType.BIRTH)
logging.warning(' FAIL: the person "%(gid)s" refers to '
'a birth event "%(hand)s" which does not '
'exist in the database',
{'gid': person.gramps_id,
'hand': birth_handle})
self.invalid_events.add(key)
else:
if int(birth.get_type()) != EventType.BIRTH:
# Birth event was not of the type "Birth"
# This is tested by TestcaseGenerator person "Broken14"
logging.warning(' FAIL: the person "%(gid)s" refers'
' to a birth event which is of type '
'"%(type)s" instead of Birth',
{'gid': person.gramps_id,
'type': int(birth.get_type())})
birth.set_type(EventType(EventType.BIRTH))
self.db.commit_event(birth, self.trans)
self.invalid_birth_events.add(key)
if none_handle:
person.set_birth_ref(newref)
self.db.commit_person(person, self.trans)
none_handle = False
death_ref = person.get_death_ref()
if death_ref:
newref = death_ref
if not death_ref.ref:
none_handle = True
death_ref.ref = create_id()
death_handle = death_ref.ref
try:
death = self.db.get_event_from_handle(death_handle)
except HandleError:
# The death event referenced by the death handle
# does not exist in the database
# This is tested by TestcaseGenerator person "Broken12"
logging.warning(' FAIL: the person "%(gid)s" refers to '
'a death event "%(hand)s" which does not '
'exist in the database',
{'gid': person.gramps_id,
'hand': death_handle})
make_unknown(death_handle, self.explanation.handle,
self.class_event, self.commit_event,
self.trans, type=EventType.DEATH)
self.invalid_events.add(key)
else:
if int(death.get_type()) != EventType.DEATH:
# Death event was not of the type "Death"
# This is tested by TestcaseGenerator person "Broken15"
logging.warning(
' FAIL: the person "%(gid)s" refers to a death '
'event which is of type "%(type)s" instead of '
'Death',
{'gid': person.gramps_id,
'type': int(death.get_type())})
death.set_type(EventType(EventType.DEATH))
self.db.commit_event(death, self.trans)
self.invalid_death_events.add(key)
if none_handle:
person.set_death_ref(newref)
self.db.commit_person(person, self.trans)
none_handle = False
newlist = []
if person.get_event_ref_list():
for event_ref in person.get_event_ref_list():
newlist.append(event_ref)
if not event_ref.ref:
none_handle = True
event_ref.ref = create_id()
event_handle = event_ref.ref
try:
self.db.get_event_from_handle(event_handle)
except HandleError:
# The event referenced by the person
# does not exist in the database
# TODO: There is no better way?
# This is tested by TestcaseGenerator person "Broken11"
# This is tested by TestcaseGenerator person "Broken12"
# This is tested by TestcaseGenerator person "Broken13"
logging.warning(
' FAIL: the person "%(gid)s" refers to an event'
' "%(hand)s" which does not exist in the database',
{'gid': person.gramps_id,
'hand': event_handle})
make_unknown(event_handle, self.explanation.handle,
self.class_event,
self.commit_event, self.trans)
self.invalid_events.add(key)
if none_handle:
person.set_event_ref_list(newlist)
self.db.commit_person(person, self.trans)
elif not isinstance(person.get_event_ref_list(), list):
# event_list is None or other garbage
logging.warning(' FAIL: the person "%s" has an event ref '
'list which is invalid', (person.gramps_id))
person.set_event_ref_list([])
self.db.commit_person(person, self.trans)
self.invalid_events.add(key)
for key in self.db.get_family_handles():
self.progress.step()
family = self.db.get_family_from_handle(key)
if family.get_event_ref_list():
none_handle = False
newlist = []
for event_ref in family.get_event_ref_list():
newlist.append(event_ref)
if not event_ref.ref:
none_handle = True
event_ref.ref = create_id()
event_handle = event_ref.ref
try:
self.db.get_event_from_handle(event_handle)
except HandleError:
# The event referenced by the family
# does not exist in the database
logging.warning(' FAIL: the family "%(gid)s" refers'
' to an event "%(hand)s" which does '
'not exist in the database',
{'gid': family.gramps_id,
'hand': event_handle})
make_unknown(event_handle, self.explanation.handle,
self.class_event, self.commit_event,
self.trans)
self.invalid_events.add(key)
if none_handle:
family.set_event_ref_list(newlist)
self.db.commit_family(family, self.trans)
elif not isinstance(family.get_event_ref_list(), list):
# event_list is None or other garbage
logging.warning(' FAIL: the family "%s" has an event ref '
'list which is invalid', (family.gramps_id))
family.set_event_ref_list([])
self.db.commit_family(family, self.trans)
self.invalid_events.add(key)
if len(self.invalid_birth_events) + len(self.invalid_death_events) + \
len(self.invalid_events) == 0:
logging.info(' OK: no event problems found')
def check_backlinks(self):
'''Looking for backlink reference problems'''
total = self.db.get_total()
self.progress.set_pass(_('Looking for backlink reference problems') +
' (1)', total)
logging.info('Looking for backlink reference problems')
# dict of object handles indexed by forward link created here
my_blinks = defaultdict(list)
my_items = 0 # count of my backlinks for progress meter
# dict of object handles indexed by forward link from db
db_blinks = {}
db_items = 0 # count of db backlinks for progress meter
# first we assemble our own backlinks table, and while we have the
# handle, gather up a second table with the db's backlinks
for obj_class in CLASS_TO_KEY_MAP.keys():
for handle in self.db.method("iter_%s_handles", obj_class)():
self.progress.step()
blinks = list(self.db.find_backlink_handles(handle))
db_blinks[(obj_class, handle)] = blinks
db_items += len(blinks)
pri_obj = self.db.method('get_%s_from_handle',
obj_class)(handle)
handle_list = pri_obj.get_referenced_handles_recursively()
my_items += len(handle_list)
for item in handle_list:
my_blinks[item].append((obj_class, handle))
# Now we go through our backlinks and the dbs table comparing them
# check that each real reference has a backlink in the db table
self.progress.set_pass(_('Looking for backlink reference problems') +
' (2)', my_items)
for key, blinks in my_blinks.items():
for item in blinks:
self.progress.step()
if key not in db_blinks:
# object has reference to something not in db;
# should have been found in previous checks
logging.warning(' Fail: reference to an object %(obj)s'
' not in the db by %(ref)s!',
{'obj': key, 'ref': item})
continue
if item not in db_blinks[key]:
# Object has reference with no cooresponding backlink
self.bad_backlinks += 1
pri_obj = self.db.method('get_%s_from_handle',
key[0])(key[1])
logging.warning(' FAIL: the "%(cls)s" [%(gid)s] '
'has a "%(cls2)s" reference'
' with no corresponding backlink.',
{'gid': pri_obj.gramps_id,
'cls': key[0], 'cls2': item[0]})
# Now we go through the db table and make checks against ours
# Check for db backlinks that don't have a reference object at all
self.progress.set_pass(_('Looking for backlink reference problems') +
' (3)', db_items)
for key, blinks in db_blinks.items():
for item in blinks:
self.progress.step()
if item not in db_blinks:
# backlink to object entirely missing
self.bad_backlinks += 1
pri_obj = self.db.method('get_%s_from_handle',
key[0])(key[1])
logging.warning(' FAIL: the "%(cls)s" [%(gid)s] '
'has a backlink to a missing'
' "%(cls2)s" object.',
{'gid': pri_obj.gramps_id,
'cls': key[0], 'cls2': item[0]})
continue
# Check if the object has a reference to the backlinked one
if key not in my_blinks or item not in my_blinks[key]:
# backlink to object which doesn't have reference
self.bad_backlinks += 1
pri_obj = self.db.method('get_%s_from_handle',
key[0])(key[1])
logging.warning(' FAIL: the "%(cls)s" [%(gid)s] '
'has a backlink to a "%(cls2)s"'
' with no corresponding reference.',
{'gid': pri_obj.gramps_id,
'cls': key[0], 'cls2': item[0]})
def callback(self, *args):
self.progress.step()
def check_person_references(self):
'''Looking for person reference problems'''
plist = self.db.get_person_handles()
self.progress.set_pass(_('Looking for person reference problems'),
len(plist))
logging.info('Looking for person reference problems')
for key in plist:
self.progress.step()
none_handle = False
newlist = []
person = self.db.get_person_from_handle(key)
for pref in person.get_person_ref_list():
newlist.append(pref)
if not pref.ref:
none_handle = True
pref.ref = create_id()
try:
self.db.get_person_from_handle(pref.ref)
except HandleError:
# The referenced person does not exist in the database
make_unknown(pref.ref, self.explanation.handle,
self.class_person, self.commit_person,
self.trans)
self.invalid_person_references.add(key)
if none_handle:
person.set_person_ref_list(newlist)
self.db.commit_person(person, self.trans)
if len(self.invalid_person_references) == 0:
logging.info(' OK: no event problems found')
def check_family_references(self):
'''Looking for family reference problems'''
plist = self.db.get_person_handles()
self.progress.set_pass(_('Looking for family reference problems'),
len(plist))
logging.info('Looking for family reference problems')
for key in plist:
self.progress.step()
person = self.db.get_person_from_handle(key)
for ordinance in person.get_lds_ord_list():
family_handle = ordinance.get_family_handle()
if family_handle:
try:
self.db.get_family_from_handle(family_handle)
except HandleError:
# The referenced family does not exist in the database
make_unknown(family_handle, self.explanation.handle,
self.class_family, self.commit_family,
self.trans, db=self.db)
self.invalid_family_references.add(key)
if len(self.invalid_family_references) == 0:
logging.info(' OK: no event problems found')
def check_repo_references(self):
'''Looking for repository reference problems'''
slist = self.db.get_source_handles()
self.progress.set_pass(_('Looking for repository reference problems'),
len(slist))
logging.info('Looking for repository reference problems')
for key in slist:
self.progress.step()
none_handle = False
newlist = []
source = self.db.get_source_from_handle(key)
for reporef in source.get_reporef_list():
newlist.append(reporef)
if not reporef.ref:
none_handle = True
reporef.ref = create_id()
try:
self.db.get_repository_from_handle(reporef.ref)
except HandleError:
# The referenced repository does not exist in the database
make_unknown(reporef.ref, self.explanation.handle,
self.class_repo, self.commit_repo, self.trans)
self.invalid_repo_references.add(key)
if none_handle:
source.set_reporef_list(newlist)
self.db.commit_source(source, self.trans)
if len(self.invalid_repo_references) == 0:
logging.info(' OK: no repository reference problems found')
def check_place_references(self):
'''Looking for place reference problems'''
plist = self.db.get_person_handles()
flist = self.db.get_family_handles()
elist = self.db.get_event_handles()
llist = self.db.get_place_handles()
self.progress.set_pass(
_('Looking for place reference problems'),
len(elist) + len(plist) + len(flist) + len(llist))
logging.info('Looking for place reference problems')
for key in llist:
self.progress.step()
none_handle = False
newlist = []
place = self.db.get_place_from_handle(key)
for placeref in place.get_placeref_list():
newlist.append(placeref)
if not placeref.ref:
none_handle = True
placeref.ref = create_id()
try:
self.db.get_place_from_handle(placeref.ref)
except HandleError:
# The referenced place does not exist in the database
make_unknown(placeref.ref, self.explanation.handle,
self.class_place, self.commit_place,
self.trans)
logging.warning(' FAIL: the place "%(gid)s" refers '
'to a parent place "%(hand)s" which '
'does not exist in the database',
{'gid': place.gramps_id,
'hand': placeref.ref})
self.invalid_place_references.add(key)
if none_handle:
place.set_placeref_list(newlist)
self.db.commit_place(place, self.trans)
# check persons -> the LdsOrd references a place
for key in plist:
self.progress.step()
person = self.db.get_person_from_handle(key)
for ordinance in person.lds_ord_list:
place_handle = ordinance.get_place_handle()
if place_handle:
try:
place = self.db.get_place_from_handle(place_handle)
except HandleError:
# The referenced place does not exist in the database
# This is tested by TestcaseGenerator person "Broken17"
# This is tested by TestcaseGenerator person "Broken18"
make_unknown(place_handle, self.explanation.handle,
self.class_place, self.commit_place,
self.trans)
logging.warning(' FAIL: the person "%(gid)s" refers'
' to an LdsOrd place "%(hand)s" which '
'does not exist in the database',
{'gid': person.gramps_id,
'hand': place_handle})
self.invalid_place_references.add(key)
# check families -> the LdsOrd references a place
for key in flist:
self.progress.step()
family = self.db.get_family_from_handle(key)
for ordinance in family.lds_ord_list:
place_handle = ordinance.get_place_handle()
if place_handle:
try:
place = self.db.get_place_from_handle(place_handle)
except HandleError:
# The referenced place does not exist in the database
make_unknown(place_handle, self.explanation.handle,
self.class_place, self.commit_place,
self.trans)
logging.warning(' FAIL: the family "%(gid)s" refers'
' to an LdsOrd place "%(hand)s" which '
'does not exist in the database',
{'gid': family.gramps_id,
'hand': place_handle})
self.invalid_place_references.add(key)
# check events
for key in elist:
self.progress.step()
event = self.db.get_event_from_handle(key)
place_handle = event.get_place_handle()
if place_handle:
try:
place = self.db.get_place_from_handle(place_handle)
except HandleError:
# The referenced place does not exist in the database
make_unknown(place_handle, self.explanation.handle,
self.class_place, self.commit_place,
self.trans)
logging.warning(' FAIL: the event "%(gid)s" refers '
'to an LdsOrd place "%(hand)s" which '
'does not exist in the database',
{'gid': event.gramps_id,
'hand': place_handle})
self.invalid_place_references.add(key)
if len(self.invalid_place_references) == 0:
logging.info(' OK: no place reference problems found')
def check_citation_references(self):
'''Looking for citation reference problems'''
known_handles = self.db.get_citation_handles()
total = (
self.db.get_number_of_people() +
self.db.get_number_of_families() +
self.db.get_number_of_events() +
self.db.get_number_of_places() +
self.db.get_number_of_citations() +
self.db.get_number_of_sources() +
self.db.get_number_of_media() +
self.db.get_number_of_repositories()
)
self.progress.set_pass(_('Looking for citation reference problems'),
total)
logging.info('Looking for citation reference problems')
for handle in self.db.get_person_handles():
self.progress.step()
person = self.db.get_person_from_handle(handle)
handle_list = person.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Citation':
if not item[1]:
new_handle = create_id()
person.replace_citation_references(None, new_handle)
self.db.commit_person(person, self.trans)
self.invalid_citation_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_citation_references.add(item[1])
for handle in self.db.get_family_handles():
self.progress.step()
family = self.db.get_family_from_handle(handle)
handle_list = family.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Citation':
if not item[1]:
new_handle = create_id()
family.replace_citation_references(None, new_handle)
self.db.commit_family(family, self.trans)
self.invalid_citation_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_citation_references.add(item[1])
for handle in self.db.get_place_handles():
self.progress.step()
place = self.db.get_place_from_handle(handle)
handle_list = place.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Citation':
if not item[1]:
new_handle = create_id()
place.replace_citation_references(None, new_handle)
self.db.commit_place(place, self.trans)
self.invalid_citation_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_citation_references.add(item[1])
for handle in self.db.get_citation_handles():
self.progress.step()
citation = self.db.get_citation_from_handle(handle)
handle_list = citation.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Citation':
if not item[1]:
new_handle = create_id()
citation.replace_citation_references(None, new_handle)
self.db.commit_citation(citation, self.trans)
self.invalid_citation_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_citation_references.add(item[1])
for handle in self.db.get_repository_handles():
self.progress.step()
repository = self.db.get_repository_from_handle(handle)
handle_list = repository.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Citation':
if not item[1]:
new_handle = create_id()
repository.replace_citation_references(None,
new_handle)
self.db.commit_repository(repository, self.trans)
self.invalid_citation_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_citation_references.add(item[1])
for handle in self.db.get_media_handles():
self.progress.step()
obj = self.db.get_media_from_handle(handle)
handle_list = obj.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Citation':
if not item[1]:
new_handle = create_id()
obj.replace_citation_references(None, new_handle)
self.db.commit_media(obj, self.trans)
self.invalid_citation_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_citation_references.add(item[1])
for handle in self.db.get_event_handles():
self.progress.step()
event = self.db.get_event_from_handle(handle)
handle_list = event.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Citation':
if not item[1]:
new_handle = create_id()
event.replace_citation_references(None, new_handle)
self.db.commit_event(event, self.trans)
self.invalid_citation_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_citation_references.add(item[1])
for bad_handle in self.invalid_citation_references:
created = make_unknown(bad_handle, self.explanation.handle,
self.class_citation, self.commit_citation,
self.trans,
source_class_func=self.class_source,
source_commit_func=self.commit_source,
source_class_arg=create_id())
self.invalid_source_references.add(created[0].handle)
if len(self.invalid_citation_references) == 0:
logging.info(' OK: no citation reference problems found')
def check_source_references(self):
'''Looking for source reference problems'''
clist = self.db.get_citation_handles()
self.progress.set_pass(_('Looking for source reference problems'),
len(clist))
logging.info('Looking for source reference problems')
for key in clist:
self.progress.step()
citation = self.db.get_citation_from_handle(key)
source_handle = citation.get_reference_handle()
if not source_handle:
source_handle = create_id()
citation.set_reference_handle(source_handle)
self.db.commit_citation(citation, self.trans)
if source_handle:
try:
self.db.get_source_from_handle(source_handle)
except HandleError:
# The referenced source does not exist in the database
make_unknown(source_handle, self.explanation.handle,
self.class_source, self.commit_source,
self.trans)
logging.warning(' FAIL: the citation "%(gid)s" refers '
'to source "%(hand)s" which does not exist'
' in the database',
{'gid': citation.gramps_id,
'hand': source_handle})
self.invalid_source_references.add(key)
if len(self.invalid_source_references) == 0:
logging.info(' OK: no source reference problems found')
def check_media_references(self):
'''Looking for media object reference problems'''
known_handles = self.db.get_media_handles(False)
total = (
self.db.get_number_of_people() +
self.db.get_number_of_families() +
self.db.get_number_of_events() +
self.db.get_number_of_places() +
self.db.get_number_of_citations() +
self.db.get_number_of_sources()
)
self.progress.set_pass(_('Looking for media object reference '
'problems'), total)
logging.info('Looking for media object reference problems')
for handle in self.db.get_person_handles():
self.progress.step()
person = self.db.get_person_from_handle(handle)
handle_list = person.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Media':
if not item[1]:
new_handle = create_id()
person.replace_media_references(None, new_handle)
self.db.commit_person(person, self.trans)
self.invalid_media_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_media_references.add(item[1])
for handle in self.db.get_family_handles():
self.progress.step()
family = self.db.get_family_from_handle(handle)
handle_list = family.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Media':
if not item[1]:
new_handle = create_id()
family.replace_media_references(None, new_handle)
self.db.commit_family(family, self.trans)
self.invalid_media_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_media_references.add(item[1])
for handle in self.db.get_place_handles():
self.progress.step()
place = self.db.get_place_from_handle(handle)
handle_list = place.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Media':
if not item[1]:
new_handle = create_id()
place.replace_media_references(None, new_handle)
self.db.commit_place(place, self.trans)
self.invalid_media_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_media_references.add(item[1])
for handle in self.db.get_event_handles():
self.progress.step()
event = self.db.get_event_from_handle(handle)
handle_list = event.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Media':
if not item[1]:
new_handle = create_id()
event.replace_media_references(None, new_handle)
self.db.commit_event(event, self.trans)
self.invalid_media_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_media_references.add(item[1])
for handle in self.db.get_citation_handles():
self.progress.step()
citation = self.db.get_citation_from_handle(handle)
handle_list = citation.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Media':
if not item[1]:
new_handle = create_id()
citation.replace_media_references(None, new_handle)
self.db.commit_citation(citation, self.trans)
self.invalid_media_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_media_references.add(item[1])
for handle in self.db.get_source_handles():
self.progress.step()
source = self.db.get_source_from_handle(handle)
handle_list = source.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Media':
if not item[1]:
new_handle = create_id()
source.replace_media_references(None, new_handle)
self.db.commit_source(source, self.trans)
self.invalid_media_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_media_references.add(item[1])
for bad_handle in self.invalid_media_references:
make_unknown(bad_handle, self.explanation.handle, self.class_media,
self.commit_media, self.trans)
if len(self.invalid_media_references) == 0:
logging.info(' OK: no media reference problems found')
def check_note_references(self):
'''Looking for note reference problems'''
# Here I assume check note_references runs after all the next checks.
missing_references = (len(self.invalid_person_references) +
len(self.invalid_family_references) +
len(self.invalid_birth_events) +
len(self.invalid_death_events) +
len(self.invalid_events) +
len(self.invalid_place_references) +
len(self.invalid_citation_references) +
len(self.invalid_source_references) +
len(self.invalid_repo_references) +
len(self.invalid_media_references))
if missing_references:
self.db.add_note(self.explanation, self.trans, set_gid=True)
known_handles = self.db.get_note_handles()
total = (self.db.get_number_of_people() +
self.db.get_number_of_families() +
self.db.get_number_of_events() +
self.db.get_number_of_places() +
self.db.get_number_of_media() +
self.db.get_number_of_citations() +
self.db.get_number_of_sources() +
self.db.get_number_of_repositories())
self.progress.set_pass(_('Looking for note reference problems'),
total)
logging.info('Looking for note reference problems')
for handle in self.db.get_person_handles():
self.progress.step()
person = self.db.get_person_from_handle(handle)
handle_list = person.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Note':
if not item[1]:
new_handle = create_id()
person.replace_note_references(None, new_handle)
self.db.commit_person(person, self.trans)
self.invalid_note_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_note_references.add(item[1])
for handle in self.db.get_family_handles():
self.progress.step()
family = self.db.get_family_from_handle(handle)
handle_list = family.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Note':
if not item[1]:
new_handle = create_id()
family.replace_note_references(None, new_handle)
self.db.commit_family(family, self.trans)
self.invalid_note_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_note_references.add(item[1])
for handle in self.db.get_place_handles():
self.progress.step()
place = self.db.get_place_from_handle(handle)
handle_list = place.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Note':
if not item[1]:
new_handle = create_id()
place.replace_note_references(None, new_handle)
self.db.commit_place(place, self.trans)
self.invalid_note_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_note_references.add(item[1])
for handle in self.db.get_citation_handles():
self.progress.step()
citation = self.db.get_citation_from_handle(handle)
handle_list = citation.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Note':
if not item[1]:
new_handle = create_id()
citation.replace_note_references(None, new_handle)
self.db.commit_citation(citation, self.trans)
self.invalid_note_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_note_references.add(item[1])
for handle in self.db.get_source_handles():
self.progress.step()
source = self.db.get_source_from_handle(handle)
handle_list = source.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Note':
if not item[1]:
new_handle = create_id()
source.replace_note_references(None, new_handle)
self.db.commit_source(source, self.trans)
self.invalid_note_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_note_references.add(item[1])
for handle in self.db.get_media_handles():
self.progress.step()
obj = self.db.get_media_from_handle(handle)
handle_list = obj.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Note':
if not item[1]:
new_handle = create_id()
obj.replace_note_references(None, new_handle)
self.db.commit_media(obj, self.trans)
self.invalid_note_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_note_references.add(item[1])
for handle in self.db.get_event_handles():
self.progress.step()
event = self.db.get_event_from_handle(handle)
handle_list = event.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Note':
if not item[1]:
new_handle = create_id()
event.replace_note_references(None, new_handle)
self.db.commit_event(event, self.trans)
self.invalid_note_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_note_references.add(item[1])
for handle in self.db.get_repository_handles():
self.progress.step()
repo = self.db.get_repository_from_handle(handle)
handle_list = repo.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Note':
if not item[1]:
new_handle = create_id()
repo.replace_note_references(None, new_handle)
self.db.commit_repository(repo, self.trans)
self.invalid_note_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_note_references.add(item[1])
for bad_handle in self.invalid_note_references:
make_unknown(bad_handle, self.explanation.handle,
self.class_note, self.commit_note, self.trans)
if len(self.invalid_note_references) == 0:
logging.info(' OK: no note reference problems found')
else:
if not missing_references:
self.db.add_note(self.explanation, self.trans, set_gid=True)
def check_checksum(self):
''' fix media checksums '''
self.progress.set_pass(_('Updating checksums on media'),
len(self.db.get_media_handles()))
for objectid in self.db.get_media_handles():
self.progress.step()
obj = self.db.get_media_from_handle(objectid)
full_path = media_path_full(self.db, obj.get_path())
new_checksum = create_checksum(full_path)
if new_checksum != obj.checksum:
logging.info('checksum: updating ' + obj.gramps_id)
obj.checksum = new_checksum
self.db.commit_media(obj, self.trans)
def check_tag_references(self):
'''Looking for tag reference problems'''
known_handles = self.db.get_tag_handles()
total = (self.db.get_number_of_people() +
self.db.get_number_of_families() +
self.db.get_number_of_media() +
self.db.get_number_of_notes() +
self.db.get_number_of_events() +
self.db.get_number_of_citations() +
self.db.get_number_of_sources() +
self.db.get_number_of_places() +
self.db.get_number_of_repositories())
self.progress.set_pass(_('Looking for tag reference problems'),
total)
logging.info('Looking for tag reference problems')
for handle in self.db.get_person_handles():
self.progress.step()
person = self.db.get_person_from_handle(handle)
handle_list = person.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Tag':
if not item[1]:
new_handle = create_id()
person.replace_tag_references(None, new_handle)
self.db.commit_person(person, self.trans)
self.invalid_tag_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_tag_references.add(item[1])
for handle in self.db.get_family_handles():
self.progress.step()
family = self.db.get_family_from_handle(handle)
handle_list = family.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Tag':
if not item[1]:
new_handle = create_id()
family.replace_tag_references(None, new_handle)
self.db.commit_family(family, self.trans)
self.invalid_tag_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_tag_references.add(item[1])
for handle in self.db.get_media_handles():
self.progress.step()
obj = self.db.get_media_from_handle(handle)
handle_list = obj.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Tag':
if not item[1]:
new_handle = create_id()
obj.replace_tag_references(None, new_handle)
self.db.commit_media(obj, self.trans)
self.invalid_tag_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_tag_references.add(item[1])
for handle in self.db.get_note_handles():
self.progress.step()
note = self.db.get_note_from_handle(handle)
handle_list = note.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Tag':
if not item[1]:
new_handle = create_id()
note.replace_tag_references(None, new_handle)
self.db.commit_note(note, self.trans)
self.invalid_tag_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_tag_references.add(item[1])
for handle in self.db.get_event_handles():
self.progress.step()
event = self.db.get_event_from_handle(handle)
handle_list = event.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Tag':
if not item[1]:
new_handle = create_id()
event.replace_tag_references(None, new_handle)
self.db.commit_event(event, self.trans)
self.invalid_tag_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_tag_references.add(item[1])
for handle in self.db.get_citation_handles():
self.progress.step()
citation = self.db.get_citation_from_handle(handle)
handle_list = citation.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Tag':
if not item[1]:
new_handle = create_id()
citation.replace_tag_references(None, new_handle)
self.db.commit_citation(citation, self.trans)
self.invalid_tag_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_tag_references.add(item[1])
for handle in self.db.get_source_handles():
self.progress.step()
source = self.db.get_source_from_handle(handle)
handle_list = source.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Tag':
if not item[1]:
new_handle = create_id()
source.replace_tag_references(None, new_handle)
self.db.commit_source(source, self.trans)
self.invalid_tag_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_tag_references.add(item[1])
for handle in self.db.get_place_handles():
self.progress.step()
place = self.db.get_place_from_handle(handle)
handle_list = place.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Tag':
if not item[1]:
new_handle = create_id()
place.replace_tag_references(None, new_handle)
self.db.commit_place(place, self.trans)
self.invalid_tag_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_tag_references.add(item[1])
for handle in self.db.get_repository_handles():
self.progress.step()
repository = self.db.get_repository_from_handle(handle)
handle_list = repository.get_referenced_handles_recursively()
for item in handle_list:
if item[0] == 'Tag':
if not item[1]:
new_handle = create_id()
repository.replace_tag_references(None, new_handle)
self.db.commit_repository(repository, self.trans)
self.invalid_tag_references.add(new_handle)
elif item[1] not in known_handles:
self.invalid_tag_references.add(item[1])
for bad_handle in self.invalid_tag_references:
make_unknown(bad_handle, None, self.class_tag,
self.commit_tag, self.trans)
if len(self.invalid_tag_references) == 0:
logging.info(' OK: no tag reference problems found')
def check_media_sourceref(self):
"""
This repairs a problem with database upgrade from database schema
version 15 to 16. Mediarefs on source primary objects can contain
sourcerefs, and these were not converted to citations.
"""
total = (self.db.get_number_of_sources())
self.progress.set_pass(_('Looking for media source reference '
'problems'), total)
logging.info('Looking for media source reference problems')
for handle in self.db.get_source_handles():
self.progress.step()
source = self.db.get_source_from_handle(handle)
new_media_ref_list = []
citation_changed = False
for media_ref in source.get_media_list():
citation_list = media_ref.get_citation_list()
new_citation_list = []
for citation_handle in citation_list:
# Either citation_handle is a handle, in which case it has
# been converted, or it is a 6-tuple, in which case it now
# needs to be converted.
if len(citation_handle) == 6:
if len(citation_handle) == 6:
sourceref = citation_handle
else:
sourceref = eval(citation_handle)
new_citation = Citation()
new_citation.set_date_object(sourceref[0])
new_citation.set_privacy(sourceref[1])
new_citation.set_note_list(sourceref[2])
new_citation.set_confidence_level(sourceref[3])
new_citation.set_reference_handle(sourceref[4])
new_citation.set_page(sourceref[5])
citation_handle = create_id()
new_citation.set_handle(citation_handle)
self.replaced_sourceref.append(handle)
citation_changed = True
logging.warning(' FAIL: the source "%s" has a media'
' reference with a source citation '
'which is invalid', (source.gramps_id))
self.db.add_citation(new_citation, self.trans)
new_citation_list.append(citation_handle)
media_ref.set_citation_list(new_citation_list)
new_media_ref_list.append(media_ref)
if citation_changed:
source.set_media_list(new_media_ref_list)
self.db.commit_source(source, self.trans)
if len(self.replaced_sourceref) > 0:
logging.info(' OK: no broken source citations on mediarefs '
'found')
def fix_duplicated_grampsid(self):
"""
This searches for duplicated Gramps ID within each of the major
classes. It does not check across classes. If duplicates are
found, a new Gramps ID is assigned.
"""
total = (
self.db.get_number_of_citations() +
self.db.get_number_of_events() +
self.db.get_number_of_families() +
self.db.get_number_of_media() +
self.db.get_number_of_notes() +
self.db.get_number_of_people() +
self.db.get_number_of_places() +
self.db.get_number_of_repositories() +
self.db.get_number_of_sources()
)
self.progress.set_pass(_('Looking for Duplicated Gramps ID '
'problems'), total)
logging.info('Looking for Duplicated Gramps ID problems')
gid_list = []
for citation in self.db.iter_citations():
self.progress.step()
ogid = gid = citation.get_gramps_id()
if gid in gid_list:
gid = self.db.find_next_citation_gramps_id()
citation.set_gramps_id(gid)
self.db.commit_citation(citation, self.trans)
logging.warning(' FAIL: Duplicated Gramps ID found, '
'Original: "%s" changed to: "%s"', ogid, gid)
self.duplicated_gramps_ids += 1
gid_list.append(gid)
gid_list = []
for event in self.db.iter_events():
self.progress.step()
ogid = gid = event.get_gramps_id()
if gid in gid_list:
gid = self.db.find_next_event_gramps_id()
event.set_gramps_id(gid)
self.db.commit_event(event, self.trans)
logging.warning(' FAIL: Duplicated Gramps ID found, '
'Original: "%s" changed to: "%s"', ogid, gid)
self.duplicated_gramps_ids += 1
gid_list.append(gid)
gid_list = []
for family in self.db.iter_families():
self.progress.step()
ogid = gid = family.get_gramps_id()
if gid in gid_list:
gid = self.db.find_next_family_gramps_id()
family.set_gramps_id(gid)
self.db.commit_family(family, self.trans)
logging.warning(' FAIL: Duplicated Gramps ID found, '
'Original: "%s" changed to: "%s"', ogid, gid)
self.duplicated_gramps_ids += 1
gid_list.append(gid)
gid_list = []
for media in self.db.iter_media():
self.progress.step()
ogid = gid = media.get_gramps_id()
if gid in gid_list:
gid = self.db.find_next_media_gramps_id()
media.set_gramps_id(gid)
self.db.commit_media(media, self.trans)
logging.warning(' FAIL: Duplicated Gramps ID found, '
'Original: "%s" changed to: "%s"', ogid, gid)
self.duplicated_gramps_ids += 1
gid_list.append(gid)
gid_list = []
for note in self.db.iter_notes():
ogid = gid = note.get_gramps_id()
if gid in gid_list:
gid = self.db.find_next_note_gramps_id()
note.set_gramps_id(gid)
self.db.commit_note(note, self.trans)
logging.warning(' FAIL: Duplicated Gramps ID found, '
'Original: "%s" changed to: "%s"', ogid, gid)
self.duplicated_gramps_ids += 1
gid_list.append(gid)
gid_list = []
for person in self.db.iter_people():
self.progress.step()
ogid = gid = person.get_gramps_id()
if gid in gid_list:
gid = self.db.find_next_person_gramps_id()
person.set_gramps_id(gid)
self.db.commit_person(person, self.trans)
logging.warning(' FAIL: Duplicated Gramps ID found, '
'Original: "%s" changed to: "%s"', ogid, gid)
self.duplicated_gramps_ids += 1
gid_list.append(gid)
gid_list = []
for place in self.db.iter_places():
self.progress.step()
ogid = gid = place.get_gramps_id()
if gid in gid_list:
gid = self.db.find_next_place_gramps_id()
place.set_gramps_id(gid)
self.db.commit_place(place, self.trans)
logging.warning(' FAIL: Duplicated Gramps ID found, '
'Original: "%s" changed to: "%s"', ogid, gid)
self.duplicated_gramps_ids += 1
gid_list.append(gid)
gid_list = []
for repository in self.db.iter_repositories():
self.progress.step()
ogid = gid = repository.get_gramps_id()
if gid in gid_list:
gid = self.db.find_next_repository_gramps_id()
repository.set_gramps_id(gid)
self.db.commit_repository(repository, self.trans)
logging.warning(' FAIL: Duplicated Gramps ID found, '
'Original: "%s" changed to: "%s"', ogid, gid)
self.duplicated_gramps_ids += 1
gid_list.append(gid)
gid_list = []
for source in self.db.iter_sources():
self.progress.step()
ogid = gid = source.get_gramps_id()
if gid in gid_list:
gid = self.db.find_next_source_gramps_id()
source.set_gramps_id(gid)
self.db.commit_source(source, self.trans)
logging.warning(' FAIL: Duplicated Gramps ID found, '
'Original: "%s" changed to: "%s"', ogid, gid)
self.duplicated_gramps_ids += 1
gid_list.append(gid)
def class_person(self, handle):
person = Person()
person.set_handle(handle)
return person
def commit_person(self, person, trans, dummy):
self.db.add_person(person, trans, set_gid=True)
def class_family(self, handle):
family = Family()
family.set_handle(handle)
return family
def commit_family(self, family, trans, dummy):
self.db.add_family(family, trans, set_gid=True)
def class_event(self, handle):
event = Event()
event.set_handle(handle)
return event
def commit_event(self, event, trans, dummy):
self.db.add_event(event, trans, set_gid=True)
def class_place(self, handle):
place = Place()
place.set_handle(handle)
return place
def commit_place(self, place, trans, dummy):
self.db.add_place(place, trans, set_gid=True)
def class_source(self, handle):
source = Source()
source.set_handle(handle)
return source
def commit_source(self, source, trans, dummy):
self.db.add_source(source, trans, set_gid=True)
def class_citation(self, handle):
citation = Citation()
citation.set_handle(handle)
return citation
def commit_citation(self, citation, trans, dummy):
self.db.add_citation(citation, trans, set_gid=True)
def class_repo(self, handle):
repo = Repository()
repo.set_handle(handle)
return repo
def commit_repo(self, repo, trans, dummy):
self.db.add_repository(repo, trans, set_gid=True)
def class_media(self, handle):
obj = Media()
obj.set_handle(handle)
return obj
def commit_media(self, obj, trans, dummy):
self.db.add_media(obj, trans, set_gid=True)
def class_note(self, handle):
note = Note()
note.set_handle(handle)
return note
def commit_note(self, note, trans, dummy):
self.db.add_note(note, trans, set_gid=True)
def class_tag(self, handle):
tag = Tag()
tag.set_handle(handle)
return tag
def commit_tag(self, tag, trans, dummy):
self.db.add_tag(tag, trans)
def build_report(self, uistate=None):
''' build the report from various counters'''
self.progress.close()
bad_photos = len(self.bad_photo)
replaced_photos = len(self.replaced_photo)
removed_photos = len(self.removed_photo)
photos = bad_photos + replaced_photos + removed_photos
efam = len(self.empty_family)
blink = len(self.broken_links)
plink = len(self.broken_parent_links)
slink = len(self.duplicate_links)
rel = len(self.fam_rel)
event_invalid = len(self.invalid_events)
birth_invalid = len(self.invalid_birth_events)
death_invalid = len(self.invalid_death_events)
person = birth_invalid + death_invalid
person_references = len(self.invalid_person_references)
family_references = len(self.invalid_family_references)
invalid_dates = len(self.invalid_dates)
place_references = len(self.invalid_place_references)
citation_references = len(self.invalid_citation_references)
source_references = len(self.invalid_source_references)
repo_references = len(self.invalid_repo_references)
media_references = len(self.invalid_media_references)
note_references = len(self.invalid_note_references)
tag_references = len(self.invalid_tag_references)
name_format = len(self.removed_name_format)
replaced_sourcerefs = len(self.replaced_sourceref)
dup_gramps_ids = self.duplicated_gramps_ids
empty_objs = sum(len(obj) for obj in self.empty_objects.values())
errors = (photos + efam + blink + plink + slink + rel +
event_invalid + person + self.place_errors +
person_references + family_references + place_references +
citation_references + repo_references + media_references +
note_references + tag_references + name_format + empty_objs +
invalid_dates + source_references + dup_gramps_ids +
self.bad_backlinks)
if errors == 0:
if uistate:
OkDialog(_("No errors were found"),
_('The database has passed internal checks'),
parent=uistate.window)
else:
print(_("No errors were found: the database has passed "
"internal checks."))
return 0
if blink > 0:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext("{quantity} broken child/family link was fixed\n",
"{quantity} broken child/family links were fixed\n",
blink).format(quantity=blink)
)
for (person_handle, family_handle) in self.broken_links:
try:
person = self.db.get_person_from_handle(person_handle)
except HandleError:
cname = _("Non existing child")
else:
cname = person.get_primary_name().get_name()
try:
family = self.db.get_family_from_handle(family_handle)
except HandleError:
pname = _("Unknown")
else:
pname = family_name(family, self.db)
self.text.write('\t')
self.text.write(
_("%(person)s was removed from the family of %(family)s\n")
% {'person': cname, 'family': pname}
)
if plink > 0:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext("{quantity} broken spouse/family link was fixed\n",
"{quantity} broken spouse/family links were fixed\n",
plink).format(quantity=plink)
)
for (person_handle, family_handle) in self.broken_parent_links:
try:
person = self.db.get_person_from_handle(person_handle)
except HandleError:
cname = _("Non existing person")
else:
cname = person.get_primary_name().get_name()
try:
family = self.db.get_family_from_handle(family_handle)
except HandleError:
pname = _("Unknown")
else:
pname = family_name(family, self.db)
self.text.write('\t')
self.text.write(
_("%(person)s was restored to the family of %(family)s\n")
% {'person': cname, 'family': pname}
)
if slink > 0:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext("{quantity} duplicate "
"spouse/family link was found\n",
"{quantity} duplicate "
"spouse/family links were found\n",
slink).format(quantity=slink)
)
for (person_handle, family_handle) in self.broken_parent_links:
try:
person = self.db.get_person_from_handle(person_handle)
except HandleError:
cname = _("Non existing person")
else:
cname = person.get_primary_name().get_name()
try:
family = self.db.get_family_from_handle(family_handle)
except HandleError:
pname = _("None")
else:
pname = family_name(family, self.db)
self.text.write('\t')
self.text.write(
_("%(person)s was restored to the family of %(family)s\n")
% {'person': cname, 'family': pname}
)
if efam:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext("{quantity} family "
"with no parents or children found, removed.\n",
"{quantity} families "
"with no parents or children found, removed.\n",
efam).format(quantity=efam)
)
if efam == 1:
self.text.write("\t%s\n" % self.empty_family[0])
if rel:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext("{quantity} corrupted family relationship fixed\n",
"{quantity} corrupted family relationships fixed\n",
rel).format(quantity=rel)
)
if self.place_errors:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext("{quantity} place alternate name fixed\n",
"{quantity} place alternate names fixed\n",
self.place_errors).format(quantity=self.place_errors)
)
if person_references:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext(
"{quantity} person was referenced but not found\n",
"{quantity} persons were referenced, but not found\n",
person_references).format(quantity=person_references)
)
if family_references:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext("{quantity} family was "
"referenced but not found\n",
"{quantity} families were "
"referenced, but not found\n",
family_references).format(quantity=family_references)
)
if invalid_dates:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext("{quantity} date was corrected\n",
"{quantity} dates were corrected\n",
invalid_dates).format(quantity=invalid_dates)
)
if repo_references:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext(
"{quantity} repository was "
"referenced but not found\n",
"{quantity} repositories were "
"referenced, but not found\n",
repo_references).format(quantity=repo_references)
)
if photos:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext("{quantity} media object was "
"referenced but not found\n",
"{quantity} media objects were "
"referenced, but not found\n",
photos).format(quantity=photos)
)
if bad_photos:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext(
"Reference to {quantity} missing media object was kept\n",
"References to {quantity} media objects were kept\n",
bad_photos).format(quantity=bad_photos)
)
if replaced_photos:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext("{quantity} missing media object was replaced\n",
"{quantity} missing media objects were replaced\n",
replaced_photos).format(quantity=replaced_photos)
)
if removed_photos:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext("{quantity} missing media object was removed\n",
"{quantity} missing media objects were removed\n",
removed_photos).format(quantity=removed_photos)
)
if event_invalid:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext("{quantity} event was referenced but not found\n",
"{quantity} events were referenced, but not found\n",
event_invalid).format(quantity=event_invalid)
)
if birth_invalid:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext("{quantity} invalid birth event name was fixed\n",
"{quantity} invalid birth event names were fixed\n",
birth_invalid).format(quantity=birth_invalid)
)
if death_invalid:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext("{quantity} invalid death event name was fixed\n",
"{quantity} invalid death event names were fixed\n",
death_invalid).format(quantity=death_invalid)
)
if place_references:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext("{quantity} place was referenced but not found\n",
"{quantity} places were referenced, but not found\n",
place_references).format(quantity=place_references)
)
if citation_references:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext(
"{quantity} citation was referenced but not found\n",
"{quantity} citations were referenced, but not found\n",
citation_references
).format(quantity=citation_references)
)
if source_references:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext(
"{quantity} source was referenced but not found\n",
"{quantity} sources were referenced, but not found\n",
source_references).format(quantity=source_references)
)
if media_references:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext(
"{quantity} media object was referenced but not found\n",
"{quantity} media objects were referenced,"
" but not found\n",
media_references).format(quantity=media_references)
)
if note_references:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext("{quantity} note object was "
"referenced but not found\n",
"{quantity} note objects were "
"referenced, but not found\n",
note_references).format(quantity=note_references)
)
if tag_references:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext("{quantity} tag object was "
"referenced but not found\n",
"{quantity} tag objects were "
"referenced, but not found\n",
tag_references).format(quantity=tag_references)
)
if tag_references:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext("{quantity} tag object was "
"referenced but not found\n",
"{quantity} tag objects were "
"referenced, but not found\n",
tag_references).format(quantity=tag_references)
)
if name_format:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext("{quantity} invalid name format "
"reference was removed\n",
"{quantity} invalid name format "
"references were removed\n",
name_format).format(quantity=name_format)
)
if replaced_sourcerefs:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext(
"{quantity} invalid source citation was fixed\n",
"{quantity} invalid source citations were fixed\n",
replaced_sourcerefs
).format(quantity=replaced_sourcerefs)
)
if dup_gramps_ids > 0:
self.text.write(
# translators: leave all/any {...} untranslated
ngettext("{quantity} Duplicated Gramps ID fixed\n",
"{quantity} Duplicated Gramps IDs fixed\n",
dup_gramps_ids).format(quantity=dup_gramps_ids)
)
if empty_objs > 0:
self.text.write(_(
"%(empty_obj)d empty objects removed:\n"
" %(person)d person objects\n"
" %(family)d family objects\n"
" %(event)d event objects\n"
" %(source)d source objects\n"
" %(media)d media objects\n"
" %(place)d place objects\n"
" %(repo)d repository objects\n"
" %(note)d note objects\n") % {
'empty_obj': empty_objs,
'person': len(self.empty_objects['persons']),
'family': len(self.empty_objects['families']),
'event': len(self.empty_objects['events']),
'source': len(self.empty_objects['sources']),
'media': len(self.empty_objects['media']),
'place': len(self.empty_objects['places']),
'repo': len(self.empty_objects['repos']),
'note': len(self.empty_objects['notes'])
}
)
if self.bad_backlinks:
self.text.write(_("%d bad backlinks were fixed;\n")
% self.bad_backlinks +
_("All reference maps have been rebuilt.") + '\n')
return errors
# -------------------------------------------------------------------------
#
# Display the results
#
# -------------------------------------------------------------------------
class CheckReport(ManagedWindow):
""" Report out the results """
def __init__(self, uistate, text, cli=0):
if cli:
print(text)
if uistate:
ManagedWindow.__init__(self, uistate, [], self)
topdialog = Glade()
topdialog.get_object("close").connect('clicked', self.close)
window = topdialog.toplevel
textwindow = topdialog.get_object("textwindow")
textwindow.get_buffer().set_text(text)
self.set_window(window,
# topdialog.get_widget("title"),
topdialog.get_object("title"),
_("Integrity Check Results"))
self.setup_configs('interface.checkreport', 450, 400)
self.show()
def build_menu_names(self, obj):
return (_('Check and Repair'), None)
# ------------------------------------------------------------------------
#
#
#
# ------------------------------------------------------------------------
class CheckOptions(tool.ToolOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, person_id=None):
tool.ToolOptions.__init__(self, name, person_id)
|
sam-m888/gramps
|
gramps/plugins/tool/check.py
|
Python
|
gpl-2.0
| 129,173
|
[
"Brian"
] |
0925c50b059bba12a3aba9425d0e150286d37fa1b41573fbdb175d185449163d
|
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hover controller parameters."""
import copy
from makani.config import mconfig
from makani.config.m600.control import hover_controllers
from makani.config.m600.control.experiments import hover as hover_experiments
from makani.control import system_types as m
from makani.lib.python import dict_util
import numpy as np
@mconfig.Config(deps={
'crosswind': mconfig.WING_MODEL + '.control.crosswind',
'flight_plan': 'common.flight_plan',
'ground_frame': 'base_station.ground_frame',
'ground_station': 'base_station.ground_station',
'gs_model': 'base_station.gs_model',
'levelwind': 'base_station.levelwind',
'perch': 'base_station.perch',
'phys': 'common.physical_constants',
'rotors': mconfig.WING_MODEL + '.rotors',
'system': mconfig.WING_MODEL + '.system_params',
'tether': mconfig.WING_MODEL + '.tether',
'winch_sys': 'base_station.winch',
'wing': mconfig.WING_MODEL + '.wing',
'wing_serial': 'common.wing_serial',
})
def MakeParams(params):
with dict_util.MustConsumeAllDictEntries(
hover_controllers.GetHoverControllers(
params['wing_serial'])) as controllers:
return MakeParamsHelper(params, controllers)
def MakeParamsHelper(params, controllers):
"""Return hover controller parameters."""
gs02_drum_radius = params['ground_station']['gs02']['drum_radius']
using_gs02 = (params['gs_model'] == m.kGroundStationModelGSv2)
if using_gs02:
perched_wing_pos_p = params['ground_station']['gs02']['perched_wing_pos_p']
else:
perched_wing_pos_p = params['perch']['perched_wing_pos_p']
tether_mass = params['tether']['length'] * params['tether']['linear_density']
gps_error_tolerance = 1.0
# Set offset in ground coordinates [m] to ascend off perch.
ascend_offset_g_z = -0.7 - gps_error_tolerance
# This is obtained from flight data (HH-01/02 and CW-01/02).
target_above_perch_tether_elevation = np.deg2rad(-0.0)
if using_gs02:
perched_wing_pos_anchor = copy.copy(perched_wing_pos_p)
perched_wing_pos_anchor[2] -= gs02_drum_radius
perched_tether_length = np.linalg.norm(perched_wing_pos_anchor)
else:
perched_tether_length = np.linalg.norm(perched_wing_pos_p)
launch_perch_elevation_max = max(
np.deg2rad(8.0),
target_above_perch_tether_elevation + gps_error_tolerance /
perched_tether_length)
launch_perch_elevation_min = min(
np.deg2rad(0.0),
target_above_perch_tether_elevation - gps_error_tolerance /
perched_tether_length)
# Confirm that the ascend offset is negative because it is in ground
# coordinates.
assert ascend_offset_g_z < 0.0
altitude = {
# Maximum thrust-to-weight ratio [#] that the pilot can command
# during pilot hover. The weight is defined as the wing weight
# plus the payed tether weight.
'max_pilot_thrust_to_weight_ratio': 1.6,
# Proportional, integral, and derivative loop gains ([N/m],
# [N/m-s], and [N/(m/s)] respectively) and the integrated
# thrust saturation limits [N].
#
# The integrator saturations are set to 50% of the maximum
# nominal thrust.
'low_altitude_pid': {
'kp': controllers['low_altitude']['kp'],
'ki': controllers['low_altitude']['ki'],
'kd': controllers['low_altitude']['kd'],
'int_output_min': -((params['wing']['m'] + tether_mass)
* params['phys']['g'] * 0.5),
'int_output_max': ((params['wing']['m'] + tether_mass)
* params['phys']['g'] * 0.5)
},
'high_altitude_pid': {
'kp': controllers['high_altitude']['kp'],
'ki': controllers['high_altitude']['ki'],
'kd': controllers['high_altitude']['kd'],
'int_output_min': -((params['wing']['m'] + tether_mass)
* params['phys']['g'] * 0.5),
'int_output_max': ((params['wing']['m'] + tether_mass)
* params['phys']['g'] * 0.5)
},
# Crossover frequency [Hz] for the boost integrator. During
# HoverAscend we expand the PID controller to include a (1/s^2)
# double-integrator term such that the controller transfer
# function is
#
# [(boost_fc * 2 pi)/s + 1] * (ki/s + kp + kd*s)
#
# where boost_fc is the crossover frequency of the contribution
# of the boost term compared to the existing PID.
'boost_fc': 0.1,
# Minimum and maximum thrust [N] contributions from the boost
# stage.
'boost_output_min': -5e3,
'boost_output_max': 5e3,
# Whether the boost feature is enabled [bool].
'boost_enabled': True,
# Altitudes [m] (above the ground station origin) below and
# above which the low altitude gains and the high altitude gains
# are used.
'low_altitude': 50.0,
'high_altitude': 100.0,
# Maximum combined thrust [N] command from all the motors. This
# is the thrust commanded during acceleration and also the final
# saturation to the thrust command. It is set conservatively
# high right now for the Rev3 propellers, assuming that the
# constrained least squares solver will also handle the
# saturation.
'max_thrust': 28000.0,
# Rate limit for applying max thrust [N/s]. This rate limit
# prevents discontinuities in the rotor speed commands which
# the rotor speed controllers do not like.
'max_thrust_rate': 20000.0,
# Flight mode times [sec] to start and end crossfading the
# thrust feedforward term from 0.0 to 1.0 during HoverTransOut.
'transout_thrust_fade_start': 1.0,
'transout_thrust_fade_end': 2.0
}
angles = {
# Pitch moment [N-m] to apply when we are in contact with the
# perch. This increases the stability of the constrained pitch
# system, and also accounts for the fact that the pitch
# integrator is turned off during this time.
'perch_contact_extra_pitch_moment_min': 3057.8,
'perch_contact_extra_pitch_moment_max': 10057.8,
'perch_contact_total_pitch_moment_min': -7000.0,
'perch_contact_total_pitch_moment_max': 7000.0,
# Pitch angle errors [rad] where the perch contact pitch moment fades
# from full moment (angle_min) to zero (angle_max).
'perch_contact_extra_pitch_moment_fade_angle_min': -0.15,
'perch_contact_extra_pitch_moment_fade_angle_max': -0.05,
# Ratio [#] of the cross-coupling gain between roll rate and yaw
# moment over the normal gain between yaw rate and yaw moment.
# This uses a yaw moment to damp roll oscillations through the
# bridle coupling. LQR typically chooses values between -0.1
# and -0.3 for this term, depending on wind speed and tether
# weight.
#
# TODO(b/25647658): On the 2015-11-10 flight test, there was
# significant coupling of roll vibrations to the yaw command, so
# we have zeroed this term. We should revisit this issue.
'bridle_roll_damping_factor': 0.0,
# Proportional, integral, and derivative loop gains ([N-m/rad],
# [N-m/rad-s], and [N-m/(rad/s)] respectively) and the
# integrated moment saturation limits [N-m] for the roll, pitch
# and yaw angle control loops.
'roll_pid': {
'kp': controllers['roll']['kp'],
'ki': controllers['roll']['ki'],
'kd': controllers['roll']['kd'],
'int_output_min': -500.0,
'int_output_max': 500.0
},
# Gain on roll rate to blown flaps moment request [N-m/(rad/s)]
# to the inner ailerons. See delta_blown_aileron_per_roll_moment.
'blown_flaps_roll_rate_gain': 0.0,
# The integrated error limits are set to 5000 N-m, which roughly
# corresponds to the center-of-mass being off by 30 cm.
'low_thrust_pitch_pid': {
'kp': controllers['low_thrust_pitch']['kp'],
'ki': controllers['low_thrust_pitch']['ki'],
'kd': controllers['low_thrust_pitch']['kd'],
'int_output_min': -5000.0,
'int_output_max': 5000.0
},
'pitch_pid': {
'kp': controllers['pitch']['kp'],
'ki': controllers['pitch']['ki'],
'kd': controllers['pitch']['kd'],
'int_output_min': -5000.0,
'int_output_max': 5000.0
},
# The integrated error limits correspond to 5000 N-m, which
# roughly corresponds to the center-of-mass being off by 35 cm.
'yaw_pid': {
'kp': controllers['yaw']['kp'],
'ki': controllers['yaw']['ki'],
'kd': controllers['yaw']['kd'],
'int_output_min': -5000.0,
'int_output_max': 5000.0
},
# Minimum and maximum commanded moments [N-m]. It is necessary
# to saturate these because a large moment command can cause a
# limit cycle due to the rate limit on the motor speed command
# imposed by the stacking controller (for an example see
# 20160107-180836-motor_hitl_pilot_hover.h5).
'min_moment': [-5e3, -15e3, -25e3],
'max_moment': [5e3, 10e3, 25e3],
# Minimum and maximum commanded moments [N-m] during
# hover-accel. The pitch moment is opened from the normal
# limits to fight propwash over the elevator causing too quick
# of a pitch forward. See b/31313922.
'min_accel_moment': [-5e3, -10e3, -25e3],
'max_accel_moment': [5e3, 20e3, 25e3],
# Nominal pitch moment [N-m] to request from the elevator in
# order to unload the top propellers for the high-tail. Note
# that to evenly distribute the forces between the top and
# bottom propellers, this should be even lower; however the
# elevator is a very unreliable and unsteady source of pitch
# moment so we keep the nominal request small.
'nominal_elevator_pitch_moment': 0.0,
# Proportional, integral, and derivative loop gains
# ([N-m/N-m-s], [N-s/N-m-s^2], [N-m/N-m]) and the saturations on
# the output [N-m] for the integrated pitch error loop. We use
# the elevator deflection to reduce the integrated pitch moment
# from the motors.
'int_pitch_pid': {
'kp': controllers['int_pitch']['kp'],
'ki': controllers['int_pitch']['ki'],
'kd': controllers['int_pitch']['kd'],
'int_output_min': -2000.0,
'int_output_max': 2000.0
},
# Proportional, integral, and derivative loop gains
# ([N-m/N-m-s], [N-s/N-m-s^2], [N-m/N-m]) and the saturations on
# the output [N-m] for the integrated yaw error loop. We use
# the rudder deflection to reduce the integrated yaw moment from
# the motors.
'int_yaw_pid': {
'kp': controllers['int_yaw']['kp'],
'ki': controllers['int_yaw']['ki'],
'kd': controllers['int_yaw']['kd'],
'int_output_min': -2000.0,
'int_output_max': 2000.0
}
}
# To confirm system response, we inject step inputs in position and
# angle commands during autonomous hover-in-place.
inject = {
'use_signal_injection': False,
# Amplitude [m] and start and stop times [s] of a rectangle
# function along each axis of position in hover coordinates,
# i.e. positive x is up, positive y is toward the starboard
# wing, and positive z is toward the ground station.
'position_amplitude': [2.0, 0.0, 0.0],
'position_start_time': [240.0, 0.0, 0.0],
'position_stop_time': [270.0, 0.0, 0.0],
# Amplitude [rad] and start and stop times [s] of a rectangle
# function along each of the hover Euler angles: roll, pitch,
# and yaw.
'angles_amplitude': [0.2, 0.05, 0.05],
'angles_start_time': [60.0, 120.0, 180.0],
'angles_stop_time': [90.0, 150.0, 210.0],
# Amplitude [rad] and start and stop times [s] of a triangle
# function differential input to the center flaps. This uses
# the full triangle wave, so the signal will have positive and
# negative sections.
'blown_flaps_amplitude': 0.2,
'blown_flaps_period': 30.0,
'blown_flaps_start_time': 300.0,
'blown_flaps_stop_time': 450.0,
# Start and stop times [s], period [s], and up and down
# positions [rad] to place the outer ailerons fethered to, and
# normal to, the wind (respectively).
'drag_flaps_start_time': 540.0,
'drag_flaps_stop_time': 660.0,
'drag_flaps_period': 30.0,
'drag_flaps_low_drag_pos': np.deg2rad(-80.0),
'drag_flaps_high_drag_pos': 0.0,
# Amplitude [rad] and start and stop times [s] of a triangle
# function input to the elevator. This only uses a half-cycle
# of a triangle wave, so the entire signal will have the same
# sign as the amplitude.
'elevator_amplitude': 0.0,
'elevator_start_time': 0.0,
'elevator_stop_time': 0.0,
# Amplitude [rad] and start and stop times [s] of a triangle
# function input to the rudder. This uses the full triangle
# wave, so the signal will have positive and negative sections.
'rudder_amplitude': 0.3,
'rudder_start_time': 480.0,
'rudder_stop_time': 540.0
}
# Leave the altitude gate (in platform frame) to ascent-complete gate.
max_z_for_pay_out = perched_wing_pos_p[2]
mode = {
# Angle [rad] from the perch/platform azimuth to the wind direction for
# which the ground station is considered aligned for HoverAscend.
'aligned_perch_azi_to_wind_angle_for_ascend': (
np.pi / 2.0 if params['gs_model'] == m.kGroundStationModelGSv2
else np.pi),
# Maximum angle [rad] between the perch azimuth and the
# downwind direction allowed before ascend.
'max_perch_wind_misalignment_for_ascend': np.deg2rad(10.0),
# Maximum angle [rad] to allow between the kite azimuth and the ideal
# platform azimuth prior to descent onto GS02. This corresponds to
# ~20 cm misalignment on the panels.
'max_platform_misalignment_for_descend': np.deg2rad(1.4),
# Maximum z position [m], i.e. minimum altitude, before starting
# payout.
'max_z_for_pay_out': max_z_for_pay_out,
# Maximum yaw error [rad] and yaw rate [rad/s] allowed before we
# go to payout from ascend. This prevents dramatic position
# movements near the perch for low bandwidth controllers.
'max_yaw_angle_error_for_pay_out': 0.05,
'max_yaw_rate_for_pay_out': 0.025,
# Minimum winch position [m] at which to enter HoverFullLength.
'min_winch_pos_for_transform_gs_up':
np.deg2rad(-360.0) * gs02_drum_radius,
# Maximum azimuth error [rad] required before initiating the
# ground station transform.
'max_azimuth_error_for_transform': np.deg2rad(1.0),
# Maximum z position error [m] required before initiating the
# ground station transform.
'max_z_error_for_transform': 1.0,
# Maximum roll angle error [rad] allowed before acceleration.
'max_roll_angle_error_for_accel': 0.4,
# Maximum yaw angle error [rad] allowed before acceleration.
'max_yaw_angle_error_for_accel': 0.15,
# Maximum yaw rate [rad/s] allowed before acceleration.
'max_yaw_rate_for_accel': 0.1,
# Maximum total angular rate [rad/s] allowed before acceleration.
'max_angular_rate_for_accel': 0.2,
# Maximum azimuth error [rad] from the acceleration start
# location allowed before acceleration.
'max_azimuth_error_for_accel': np.deg2rad(10.0),
# Maximum z position [m] error allowed before acceleration.
'max_z_error_for_accel': 4.0,
# Maximum wing speed [m/s] allowed before acceleration.
'max_speed_for_accel': 3.0,
# Maximum y velocity [m/s] in body coordinates allowed
# before acceleration.
'max_body_y_vel_for_accel': 2.0,
# Minimum tension [N] before ascend. See b/113601392.
'min_tension_for_ascend': 1500.0,
# Minimum tension [N] allowed before acceleration.
'min_tension_for_accel': 4000.0,
# Minimum time [s] the controller is allowed to spend in
# transition-out. This is a safe guard against the pilot
# unintentionally commanding reel-in immediately following
# transition-out.
'min_time_in_trans_out': 12.0,
# Maximum tether elevation error [rad] that must be sustained,
# and which must be attained instantaneously, for the ground station
# transform.
'max_tether_elevation_error_for_gs_transform_staging': np.deg2rad(1.5),
'max_tether_elevation_error_for_gs_transform_kickoff': np.deg2rad(0.5),
# The duration [s] to sustain the tether elevation before
# initiating a ground station transform.
'min_gs_transform_staging_time': 10.0,
}
# Propwash velocity [m/s] at the elevator x position assuming zero
# apparent wind speed. The positive z component accounts for the
# vectoring of the propwash about the main wing and is determined
# empirically.
propwash_b = [-24.0, 0.0, 5.0]
# Apparent wind speed [m/s] at which the middle of the propwash
# impinges on the tail.
center_propwash_wind_speed = -4.0
output = {
# Weights [#] for the relative importance of total thrust and
# moments when saturations are applied. In general, meeting
# pitch and yaw requests is favored over meeting thrust
# requests.
'weights': {
'thrust': 1.0,
'moment': [3.0, 20.0, 1.0]
},
# Time [s] to slowly ramp the motor thrusts and moments up after
# the throttle has been brought above the software e-stop
# threshold (if the throttle was held low for long enough to
# latch the software e-stop), and for ramp down after perching.
# This is set to be conservatively long to start testing, and
# for gentle perching.
'gain_ramp_time': 2.5,
'propwash_b': propwash_b,
# Apparent wind speeds [m/s], along the negative body z-axis, at
# which there is no interaction of the propwash with the
# elevator, full interaction, and in the center of the full
# interaction.
'zero_propwash_wind_speed': center_propwash_wind_speed - 8.0,
'full_propwash_wind_speed': center_propwash_wind_speed - 3.0,
'center_propwash_wind_speed': center_propwash_wind_speed,
# Additional elevator deflection [rad] applied during
# transition-out.
'delta_ele_trans_out': np.deg2rad(-30.0),
# Conversion [rad/N-m] between the elevator pitch moment and the
# elevator deflection, assuming that the airspeed is the nominal
# propwash speed.
#
# This assumes a dCL/dalpha of 4.8, a horizontal tail area of
# 3.5 m^2, and a lever arm of 6.25 m.
'delta_elevator_per_pitch_moment': (
-1.0 / (0.5 * params['phys']['rho'] * 4.8 * 3.5 * 6.25
* np.linalg.norm(propwash_b)**2.0)),
# Minimum and maximum elevator feedback deflections [rad]. This
# assumes that the elevator feed-forward term places the
# elevator at zero lift. The minimum delta is intended to keep
# the elevator from stalling. The maximum delta has a larger
# range because there is significant uncertainty in the angle of
# the propwash. We think it is more important to ensure that
# the elevator applies some forward pitching moment, rather than
# protect against stall, so we increase the limit on this side.
'min_delta_elevator_fb': np.deg2rad(-9.0),
'max_delta_elevator_fb': np.deg2rad(15.0),
# Low-pass filter cutoff frequency [Hz]. We filter the
# feed-forward component of the elevator command to reduce servo
# wear and to reduce a feedback path between the elevator motion
# through the gyros back to the elevator.
'elevator_cutoff_freq': 1.0,
# Forward velocities [m/s] of the wing used to schedule flap
# gains and cutoff frequencies.
'no_aileron_rudder_speed': 4.0,
'full_aileron_rudder_speed': 10.0,
# Roll and yaw moment control derivatives [#/rad] for the
# aileron and rudder.
#
# TODO: Calculate these from the aero database.
'cl_da': np.rad2deg(-0.0071),
'cn_dr': np.rad2deg(-0.0012),
# Inner aileron deflections per roll moment [rad/N-m] assuming
# they are in the propwash. This number should be determined
# empirically, and hence we do not use configuration parameters
# here. For now we estimate it as:
#
# 1 / (0.5 * rho * propwash^2 * cl_da * (lever arm ratio) * (area ratio)
# * (wing area) * (wing span))
#
# assuming:
#
# cl_da [1/rad] : -0.0071 * 180 / pi
# propwash [m/s] : 24
# lever arm ratio [#] : 0.5 / 3.5
# area ratio [#] : 1 / 2
#
# TODO: Estimate from flight data.
'delta_blown_aileron_per_roll_moment': -1.18e-4,
# Forward speeds [m/s] at which to fade the blown flaps roll
# actuation to zero. The intent is to only use blown flaps when
# the airflow over the flaps is dominated by propwash, not
# freestream, such as may occur during trans-out when the kite
# has significant forward velocity.
'zero_blown_flaps_forward_speed': 5.0,
'full_blown_flaps_forward_speed': 2.5,
# Flap offsets and lower and upper limits [rad] in the standard
# order: port ailerons A1 and A2, center flaps A4 and A5,
# starboard ailerons A7 and A8, elevator, rudder. The aileron
# offsets are slightly up so there is still some room left for
# control during acceleration as the 0 degree flap position
# corresponds to maximum lift.
'flap_offsets': [
np.deg2rad(angle)
for angle in [-11.5, -11.5, -11.5, -11.5, -11.5, -11.5, 0.0, 0.0]
],
'lower_flap_limits': [
np.deg2rad(angle)
for angle in [-80.0, -80.0, -35.0, -35.0, -80.0, -80.0, -89.0, -22.0]
],
'upper_flap_limits': [
np.deg2rad(angle)
for angle in [0.0, 0.0, 15.0, 15.0, 0.0, 0.0, 15.0, 22.0]
],
# Deadzone values for the gs02 azimuth [rad] while perched and
# during all other flight modes.
'gs02_deadzone_while_perched': 0.0,
'gs02_deadzone_during_flight': np.deg2rad(0.5),
# Flap angle [rad] when ailerons are spoiled.
'spoiled_aileron_angle': -np.deg2rad(75.0),
}
# Expected heeling angle [rad] of the vessel away from the kite.
# This angle describes how much the vessel is tilted towards the kite,
# away from being upright.
# It is measured as a rotational angle over the axis defined by
# z_hat_g X kite_hat_g.
vessel_heel_ff = np.deg2rad(-0.5) if params['system']['offshore'] else 0.0
# The target tether elevation angle [rad] in ground frame.
tether_elevation_target_g = np.deg2rad(6.0)
tether_elevation_target_p = tether_elevation_target_g - vessel_heel_ff
path = {
# Maximum accelerations [m/s^2] allowed in the path.
'max_acceleration_g': [1.0, 1.0, 1.0],
# Perched position of the wing [m] in the perch frame (GSv1) or the
# platform frame (GSv2).
'perched_wing_pos_p': perched_wing_pos_p,
# Maximum speeds [m/s] that the path will ever move by in the
# radial, tangential, and z directions in autonomous hover
# during various phases of flight.
'max_normal_radial_speed': 5.0,
# During the RPX and CW programs, robust operation with tangential speeds
# of 5.0 m/s was demonstrated. Following the loss-of-vehicle in FCW-01
# this was reduced to 2.0 m/s due to concerns about the possible coupling
# of large hover sideslips to roll moments.
#
# TODO(b/143181116): Revisit once roll moments in hover are better
# understood.
'max_normal_tangential_speed': 2.0,
'max_ascend_perch_z_speed': 0.45,
'max_ascend_near_perch_z_speed': 0.5,
'max_ascend_normal_z_speed': 2.0,
'max_descend_perch_z_speed': 0.3,
'max_descend_near_perch_z_speed': 0.5 if using_gs02 else 1.0,
'max_descend_normal_z_speed': 2.0,
'max_accel_z_speed': 45.0,
'gps_error_tolerance': gps_error_tolerance,
# Ascend / Descend.
# Vertical offset [m] in ground coordinates from the perched
# position that the wing ascends to before paying out.
'ascend_offset_g_z': ascend_offset_g_z,
# Vertical offset [m] in ground coordinates from the perched
# position that the wing attempts to descend to after reel-in.
# (This should be positive to be below the perched position).
# This is conservatively large in case there is GPS drift.
'descend_offset_g_z': 10.0,
# Cutoff frequency [Hz] and damping ratio [#] for the
# low-pass-filter that smooths the velocity command in
# SmoothRawPositionCommand.
'velocity_cutoff_freq': 10.0,
'velocity_damping_ratio': 1.0 / np.sqrt(2.0),
# Pay-out / Reel-in.
# The angle [rad] at the levelwind that we attempt to achieve
# with the tether based on the position of the wing and a
# horizontal tension in the tether (negative means down).
# The target tether elevation [rad] for reel-in and payout.
'target_reel_tether_elevation': tether_elevation_target_p,
# The allowed range of tether elevation in GS02v2 during transform
# modes is 0 - 17 degrees (relative to the GS). The GS is expected to tilt
# 1 degree from hover tension in a steady hover state. So the center of
# the tether elevation window, relative to ground, is 7.5 degrees.
# We further reduce it by 1 degree. See b/131630581 for more details.
'target_transform_tether_elevation': tether_elevation_target_p,
# Target tether elevation above the perch at the end of ascend
# or at the beginning of descend.
'target_above_perch_tether_elevation':
target_above_perch_tether_elevation,
# Expected heeling angle [rad] of the vessel away from the kite.
'vessel_heel_ff': vessel_heel_ff,
# Proportional gains [rad/rad], integral gains [rad/rad-s],
# derivative gains [rad/(rad/s)], and integrated angle saturations
# [rad] for the PID controlling tether elevation in PayOut and
# ReelIn. Note that a separate controller is used in the
# PrepGsTransform modes.
'reel_tether_elevation_pid': {
'kp': controllers['reel_tether_elevation']['kp'],
'ki': controllers['reel_tether_elevation']['ki'],
'kd': controllers['reel_tether_elevation']['kd'],
'int_output_min': -0.4,
'int_output_max': 0.4
},
# Maximum payout [m] within which to prepare for acending/descending.
'max_payout_for_perching_prep': 7.0,
# Cutoff frequency [Hz] and damping ratio [#] for filter applied
# to tether elevation error. This is part of the total open loop
# gain of the tether elevation controller and supplies
# additional roll-off to avoid exciting tether modes above the
# controller's unity gain frequency.
'tether_elevation_error_fc': 0.05,
'tether_elevation_error_zeta': 1.0,
# Proportional, integral, and derivative loop gains ([m/rad],
# [(m/rad)/s], and [m/(rad/s)] respectively) and the integrated
# tether elevation limits [m] for the PrepGsTransform flight modes.
'transform_tether_elevation_pid': {
'kp': controllers['transform_tether_elevation']['kp'],
'ki': controllers['transform_tether_elevation']['ki'],
'kd': controllers['transform_tether_elevation']['kd'],
# TODO(b/116036191): Add protection from unreasonable altitude
# offsets.
'int_output_min': -100.0,
'int_output_max': 100.0,
},
# Azimuth offset [rad] from downwind at which the transform should be
# completed.
'transform_azimuth_offset': 0.0,
# The elevation angle [rad] limits of the launch/perch trajectory when
# the wing is near the perch.
# It is also necessary to make sure the window is compatible to
# near perch tether elevation control and the GPS error tolerance.
'launch_perch_elevation_max': launch_perch_elevation_max,
'launch_perch_elevation_min': launch_perch_elevation_min,
# Tether length thresholds [m] for when to use .
'reel_short_tether_length': 0.1 * params['tether']['length'],
'reel_long_tether_length': 0.8 * params['tether']['length'],
# Azimuth offset [rad] used in pay-out and reel-in at short
# tether lengths, formerly used to bias perching to one side.
# This is cross-faded to transform_azimuth_offset at long tether lengths.
'reel_azimuth_offset': 0.0,
# Minimum and maximum elevations [rad] commanded by the hover
# controller at long tether lengths. These limits are
# cross-faded with the launch_perch_elevation at short tether
# lengths.
'reel_elevation_min': 0.0,
'reel_elevation_max': 0.5,
# Full length.
# Elevation above the horizion (positive is up) where the wing begins its
# acceleration into crosswind.
'accel_start_elevation': 0.3,
# Reel-in before engage.
# Duration [s] of the crossfade from the estimated inertial velocity to
# the nominal velocity command in trans-out.
# TODO: This can probably be tightened up.
'transout_vg_cmd_crossfade_duration': 6.0,
# Multiplier to velocity command during TransOut to
# reduce the jump in thrust and power at the transition to TransOut
# The lower this gain, the smaller the jump in thrust, the higher
# the kite deceleration and the lower the final hover altitude. Values
# too high would violate the min thrust threshold (T > 0 N) and
# create tether motion.
'transout_vel_cmd_multiplier': 0.8,
# Minimum altitude (negative Xg.z position) commanded in HoverTransOut.
'transout_min_altitude': 150.0,
# Maximum altitude for safe horizontal translation.
'max_altitude_error_for_translation': 5.0
}
position = {
# Feed-forward Euler angles [rad] in XYZ order that result in no
# position movement. The roll angle is due to the asymmetric
# bridling. This is positive when the nominal tether direction
# has a negative y component in the body axes.
#
# The pitch angle accounts for the rotors being tilted forward by 3
# degrees, in addition to a substantial "blown lift" effect experienced
# by the propwash in interaction with the main wing.
#
# There are several effects that determine the yaw set-point:
# - Because the wing is rolled from the roll bridling and pitched
# back, it is necessary to add a positive yaw to point the
# thrust vector up.
# - Because the pylons are cambered and develop lift in the -y
# direction, it is necessary to cancel this lift by yawing in the
# positive direction.
# - If the propellers are all rotating in the same direction (not
# currently the case), then it is necessary to cancel the reaction
# moment of the propellers with a yaw offset.
#
# The yaw set-point was determined empirically; see b/72126270.
'eulers_ff': [np.arctan2(params['wing']['bridle_y_offset'],
params['wing']['bridle_rad'] +
params['wing']['bridle_pos'][0][2]),
np.arctan2(params['rotors'][0]['axis'][2],
params['rotors'][0]['axis'][0]) - 0.30,
np.deg2rad(4.0)],
# Tether lengths [m] that we use to schedule the radial and
# tangential controller gains.
'short_tether': 100.0,
'long_tether': 200.0,
# Altitudes [m] above ground-station origin (positive is up)
# that are used to schedule the long tether tangential gains.
# The low altitude is chosen to approximately correspond to the
# normal altitude at the short tether length. The high value is
# chosen to be approximately when the tether no longer touches
# the ground.
'low_altitude': 15.0,
'high_altitude': 30.0,
# Proportional gains [rad/m], integral gains [rad/m-s],
# derivative gains [rad/(m/s)], and integrated angle saturations
# [rad] for the radial and tangential position PID controllers.
'short_tether_radial_pid': {
'kp': 0.0,
'ki': 0.0,
'kd': 0.0,
'int_output_min': 0.0,
'int_output_max': 0.0
},
'long_tether_radial_pid': {
'kp': 0.0,
'ki': 0.0,
'kd': controllers['radial']['kd'],
'int_output_min': 0.0,
'int_output_max': 0.0
},
# The integrator saturations are chosen so the yaw command from
# the integrator will be less than 6 degrees.
'short_tether_tangential_pid': {
'kp': controllers['tangential_short_tether']['kp'],
'ki': controllers['tangential_short_tether']['ki'],
'kd': controllers['tangential_short_tether']['kd'],
'int_output_min': -0.3,
'int_output_max': 0.3
},
'low_altitude_long_tether_tangential_pid': {
'kp': controllers['tangential_low_altitude_long_tether']['kp'],
'ki': controllers['tangential_low_altitude_long_tether']['ki'],
'kd': controllers['tangential_low_altitude_long_tether']['kd'],
'int_output_min': -0.3,
'int_output_max': 0.3
},
'high_altitude_long_tether_tangential_pid': {
'kp': controllers['tangential_high_altitude_long_tether']['kp'],
'ki': controllers['tangential_high_altitude_long_tether']['ki'],
'kd': controllers['tangential_high_altitude_long_tether']['kd'],
'int_output_min': -0.3,
'int_output_max': 0.3
},
# Maximum angles [rad] that the proportional and derivative
# feedback is allowed to command.
'max_pos_angle_fb': 0.1,
'max_vel_angle_fb': 0.1,
# Gains on joysticks [rad/#] in pilot hover mode.
'k_pilot': [0.8, 0.3, 0.3],
# Starting and ending times [s] for crossfading between the current
# attitude and the nominal attitude command during the first few seconds
# of trans-out from crosswind. Yaw is especially prone to saturating
# motors and can rob a substantial amount of thrust which is why it is
# crossfaded more slowly.
'transout_angles_cmd_crossfade_start_times': [0.0, 0.0, 1.0],
'transout_angles_cmd_crossfade_end_times': [1.0, 1.0, 4.0],
# Constant hover pitch command [rad] for the kite during HoverTransOut
'transout_low_wind_pitch_cmd': np.deg2rad(4.0),
'transout_high_wind_pitch_cmd': np.deg2rad(-5.0),
# Wind speeds [m/s] across which to crossfade the transout pitch commands.
'transout_pitch_low_wind_speed': 3.0,
'transout_pitch_high_wind_speed': 11.0,
# Ending time [s] for crossfading between the current body angular rates
# and the nominal body angular rates.
# TODO: Examine making this consistent with the angle
# command crossfade.
'transout_pqr_cmd_crossfade_duration': [1.0, 1.0, 4.0],
# Minimum and maximum command angles [rad] that are passed to
# the hover angle controller.
'min_angles': [-0.4, -0.3, -0.4],
'max_angles': [0.4, 0.332, 0.4],
# Time [sec] to crossfade from the constant pitch angle command in
# HoverTransOut to regular tension regulation in HoverPrepTransformGsDown
'transformdown_pitch_cmd_crossfade_time': 3.0,
}
tension = {
# Minimum allowed tension [N] on the tether during hover. The
# M600 specification requires a minimum of 2.5 kN, so we stay a
# safe margin above this.
'tension_min_set_point': 8000.0,
# Proportional gains [rad/N] and integral gains [rad/N-s], and
# integrated pitch saturation [rad] for the tension loop. Note
# that the derivative term is effectively handled by the radial
# position loop. See generator_hover_controllers.m.
# TODO: Decrease int_output_min to allow high hover in
# higher winds.
'tension_hard_pid': {
'kp': controllers['tension_hard']['kp'],
'ki': controllers['tension_hard']['ki'],
'kd': controllers['tension_hard']['kd'],
'int_output_min': np.deg2rad(-7.0),
'int_output_max': np.deg2rad(12.0)
},
'tension_soft_pid': {
'kp': controllers['tension_soft']['kp'],
'ki': controllers['tension_soft']['ki'],
'kd': controllers['tension_soft']['kd'],
'int_output_min': np.deg2rad(-7.0),
'int_output_max': np.deg2rad(12.0)
},
# The hard and soft tension controllers are calculated based on
# spring constants. However, we schedule these controllers as a
# function of payout. The equation for the catenary spring
# constant as a function of payout is:
#
# mu g cosh(r / 2a)
# k_catenary = ---- * --------------------------------------
# 2 (r / 2a) cosh(r / 2a) - sinh(r / 2a)
#
# a = t0 / (mu * g)
#
# mu: Linear mass density [kg/m].
# g: Acceleration from gravity [m/s^2].
# r: Horizontal distance of tether [m].
# t0: Horizontal tension [N/m].
#
# From this equation, the 10 kN/m and 1 kN/m spring constants,
# using a horizontal tension of about 3400 N, occur around 80 m
# and 170 m of payout respectively. To be conservative, we use
# significantly lower values of payout for now.
#
# Payouts [m] below and above which we use the hard-spring
# tensions gains and the soft-spring tension gains. Between
# these values linearly crossfade the controllers.
'hard_spring_payout': 10.0,
'soft_spring_payout': 50.0,
# Rate limit [rad/s] on pitch command adjustments applied based
# on current flight mode. This avoids sudden changes in pitch
# request which can lead to large pitching moments and a
# potential reduction in overall thrust.
'additional_pitch_cmd_rate_limit': 0.1,
# Use payout [m] to schedule minimum and maximum pitch angles
# [rad]. Near the perch, we hold pitch constant to preserve
# perching geometry and avoid dramatic movements while perching.
#
# TODO: Move this pitch table to the attitude or position
# controllers, which control absolute pitch; the tension controller
# references pitch as an offset from the neutral attitude eulers_ff.
'payout_table': [0.0, 1.5, 5.0, 77.0],
'min_pitch_table': (np.deg2rad([16.0, 16.0, 3.0, -20.0]) + 0.30).tolist(),
'max_pitch_table': (np.deg2rad([16.0, 16.0, 11.5, 17.0]) + 0.30).tolist(),
# The coefficient of drag [#] during hover. From the way this
# is used in the code, it should be the drag coefficient of the
# wing at the nominal Euler angles, but referenced to the full
# wing area.
'hover_drag_coeff': 2.2,
# Maximum change in tension [N] that may be commanded by the
# pilot in autonomous modes using the pitch stick.
'max_pilot_extra_tension': 10000.0,
# The rate limit for the horizontal tension command [N/s].
# TODO(b/116036824): Check that this rate limit doesn't interfere with
# the pilot's ability to manually command extra tension.
'horizontal_tension_cmd_rate_limit': 500.0,
# Threshold value [#] that the joystick roll must exceed (positive or
# negative) to increment the horizontal tension command.
'horizontal_tension_joystick_roll_threshold': 0.8,
# Number of cycles [#] that the joystick roll must exceed its threshold
# value to increment the horizontal tension command.
'horizontal_tension_num_cycles_for_increment': 10,
# Horizontal tension increment [N] that may be applied by the pilot using
# the roll stick.
'horizontal_tension_pilot_increment': 500.0,
# Filter parameters for the pilot horizontal tension offset.
'horizontal_tension_pilot_offset_fc': 0.1, # [Hz]
'horizontal_tension_pilot_offset_zeta': 0.7, # [#]
# Maximum horizontal tension offset [N] that may be applied by the pilot.
'horizontal_tension_max_pilot_offset': 10e3,
}
# Zone [m] where wing-perch contact is possible.
contact_zone = 2.2
# Final winch speeds [m/s].
final_winch_speed = 0.1
if params['flight_plan'] == m.kFlightPlanDisengageEngage:
# For the first GS02 transform HITL, we will reel out slowly from slightly
# below the transform angle. After transforming up to high-tension, then
# back down to reel, we will reel in 1.5 m and stop.
upper_limit = -1.90 * np.pi * gs02_drum_radius
lower_limit = upper_limit - 1.5
winch_position_pay_out_table = np.linspace(
lower_limit, upper_limit, 6).tolist()
winch_speed_pay_out_table = [
final_winch_speed,
final_winch_speed,
final_winch_speed,
final_winch_speed,
final_winch_speed,
final_winch_speed,
]
winch_position_reel_in_table = np.linspace(
lower_limit, upper_limit, 5).tolist()
winch_speed_reel_in_table = [
0.0,
final_winch_speed,
final_winch_speed,
final_winch_speed,
final_winch_speed
]
else:
# Low, high, and high pitch groove winch speeds [m/s]. The high
# winch speed is decreased for the launch-perch flight plan
# because this flight plan is typically used when we are under
# constraints and have a limited range of movement. The high pitch
# groove winch speed is set to ensure the levelwind shuttle can keep
# up with the position of the tether on the drum in the last 1/2
# turn before starting the transform when paying out.
low_winch_speed = 0.5
high_pitch_groove_winch_speed = 0.15
if params['flight_plan'] in (m.kFlightPlanLaunchPerch,
m.kFlightPlanHoverInPlace):
high_winch_speed = low_winch_speed
else:
high_winch_speed = 2.0
# Reel-in is slowed down in the "perch approach zone." This zone
# [m] prevents a slightly shortened tether from causing excessive
# perch approach speed (a "shortened tether" could be caused by low
# tension during reel-in).
perch_approach_zone = 0.04 * params['tether']['length']
# Distance [m] over which we distribute acceleration and
# deceleration.
acc_dec_zone = 8.0 * high_winch_speed
# Transform slow zone [m].
transform_zone = 1.5 * 2.0 * np.pi * params['winch_sys']['r_drum']
# The predicted winch position [m] when the wing is perched includes
# the tether length and a 180 degree tether wrap around the drum
# during the perch transformation. We also add a little extra
# margin (1.0 m) for pay-out and reel-in.
winch_position_perched = (-params['tether']['length']
- np.pi * params['winch_sys']['r_drum'] + 1.0)
# Table of winch positions [m] that are used to schedule pay-out
# velocities.
winch_position_pay_out_table = [
winch_position_perched,
winch_position_perched + contact_zone + acc_dec_zone,
-acc_dec_zone - transform_zone,
-transform_zone,
-0.1,
0.0
]
# Table of winch speeds [m/s] (sign does not matter) at
# different points during pay-out.
winch_speed_pay_out_table = [
high_winch_speed,
high_winch_speed,
high_winch_speed,
high_pitch_groove_winch_speed,
high_pitch_groove_winch_speed,
high_pitch_groove_winch_speed
]
# Table of winch positions [m] that are used to schedule reel-in
# velocities.
winch_position_reel_in_table = [
winch_position_perched + perch_approach_zone,
winch_position_perched + perch_approach_zone + acc_dec_zone,
-acc_dec_zone - transform_zone,
-transform_zone,
0.0
]
# Table of winch speeds [m/s] (sign does not matter) at
# different points during reel-in.
winch_speed_reel_in_table = [
low_winch_speed,
high_winch_speed,
high_winch_speed,
high_pitch_groove_winch_speed,
high_pitch_groove_winch_speed,
]
gs02 = params['ground_station']['gs02']
winch = {
'winch_position_pay_out_table': winch_position_pay_out_table,
'winch_speed_pay_out_table': winch_speed_pay_out_table,
'winch_position_reel_in_table': winch_position_reel_in_table,
'winch_speed_reel_in_table': winch_speed_reel_in_table,
# Payout [m] below which we could contact the perch. The winch
# slows to its contact speed here.
'contact_payout': contact_zone,
# Final speed [m/s] when the winch is contacting the perch.
'contact_winch_speed': final_winch_speed,
# Maximum allowable winch speed [m/s] command. This may be
# higher than the winch speeds in the payout and reel-in tables
# because the pilot may choose to increase the winch speed.
'max_winch_speed': 3.0,
# Maximum linear acceleration [m/s^2] of the winch drum.
'max_winch_accel': gs02['max_drum_accel_in_reel'] * gs02['drum_radius'],
# Tension [N] at which the winch stops reel-in (presumably
# because it is trying to reel the wing in while it is perched).
# This is set to the maximum hover tension from the
# specification.
'max_tension': 55e3,
}
assert mconfig.IsStrictlyIncreasing(winch['winch_position_pay_out_table'])
assert mconfig.IsStrictlyIncreasing(winch['winch_position_reel_in_table'])
experiments = hover_experiments.GetExperiments()
return {
'altitude': altitude,
'angles': angles,
'inject': inject,
'mode': mode,
'output': output,
'path': path,
'position': position,
'tension': tension,
'winch': winch,
'experiments': experiments
}
|
google/makani
|
config/m600/control/hover.py
|
Python
|
apache-2.0
| 47,054
|
[
"exciting"
] |
1b1d966b6aad6cafeb987af72053123b28ee16f627b0b89723633d8aa3043eba
|
#!/usr/bin/env python3
# Script by JK
# Extracts targeted contigs from a multi-FASTA file eg. draft genome assembly
import subprocess
import os
import sys
import shutil
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Seq import UnknownSeq
import csv
# Usage
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Extracts contigs containing a target sequence (eg. gene) from several multi-FASTA assemblies \n and aligns the contigs at the target sequence',
usage='\n %(prog)s --db DBFILE --out OUTFILE --anno ANNOFILE [OPTIONS] FASTA-1 FASTA-2 ... FASTA-N')
parser.add_argument('--db', metavar='DBFILE', required=True, help='target sequence (FASTA) to search for (REQUIRED to create BLAST database)')
parser.add_argument('--id', metavar='ID', default='100', help='percentage identity cutoff (default=100)')
parser.add_argument('--out', metavar='OUTFILE', default='contigs.gbk', help='output file (default=contigs.gbk)')
parser.add_argument('--anno', metavar='ANNOFILE', help='reference proteins.faa file to annotate from (optional | requires Prokka to be installed)')
parser.add_argument('--cpus', metavar='CPUS', default='8', help='number of cpus to use (default=8)')
#parser.add_argument('--log', metavar='LOGFILE', default='tempdb/temp.log', help='Log file')
parser.add_argument('assembly', metavar='FASTA', nargs='+', help='FASTA assemblies to search')
parser.add_argument('--version', action='version', version='v2.0')
args = parser.parse_args()
# Functions
def msg(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def err(*args, **kwargs):
msg(*args, **kwargs)
if os.path.isdir('tempdb'):
shutil.rmtree('tempdb')
sys.exit(1);
def check_file(f):
if os.path.isfile(f) == False:
err('ERROR: Cannot find "{}". Check CFML output files exist in this directory.'.format(f))
def progexit():
shutil.rmtree('./tempdb')
banner()
msg('Done. Output written to {}.'.format(outfile))
banner()
sys.exit(0)
def banner():
msg('--------------------------')
return()
# Check output file
outfile = args.out
tmpout = 'tempdb/tmpout'
if (os.path.isfile(outfile)):
msg('ERROR: {} already exists. Please specify another output file.'.format(outfile))
sys.exit(1)
# Create local BLAST database
print('Creating BLAST database ...')
if os.path.isdir('tempdb'):
msg('ERROR: "tempdb" already exists. Please remove/rename this directory first.')
sys.exit(1)
subprocess.call(['mkdir', 'tempdb'])
subprocess.call(['makeblastdb', '-in', args.db, '-parse_seqids', '-dbtype', 'nucl', '-out', 'tempdb/db'])
banner()
# Run BLAST and find lengths of contigs and orientation/position of target gene
seqlist = []
seqplus = []
seqminus = []
contigs = []
genestart = []
geneend = []
for f in args.assembly:
check_file(f)
seqlist.append(f)
blastn_for = NcbiblastnCommandline(query=f, db='tempdb/db', word_size=32, strand='plus', dust='no', perc_identity=args.id, evalue=1E-99, outfmt=6, out='tempdb/bplus.txt')
blastn_for()
blastn_rev = NcbiblastnCommandline(query=f, db='tempdb/db', word_size=32, strand='minus', dust='no', perc_identity=args.id, evalue=1E-99, outfmt=6, out='tempdb/bminus.txt')
blastn_rev()
if os.stat('tempdb/bplus.txt').st_size > 0:
msg('Searching {} ...'.format(f))
bplus = list(csv.reader(open('tempdb/bplus.txt', 'r'), delimiter='\t'))
bplus.insert(0, f)
seqplus.append(bplus)
cont = str(bplus[1][0])
contigs.append(cont)
start = int(bplus[1][6])
end = int(bplus[1][7])
for s in SeqIO.parse(f, 'fasta'):
if s.id == cont:
msg("Found '{}' (+) ...".format(cont))
genestart.append(start - 1)
geneend.append(len(s) - end)
if os.stat('tempdb/bminus.txt').st_size > 0:
msg('Searching {} ...'.format(f))
bminus = list(csv.reader(open('tempdb/bminus.txt', 'r'), delimiter='\t'))
bminus.insert(0, f)
seqminus.append(bminus)
cont = str(bminus[1][0])
contigs.append(cont)
start = int(bminus[1][6])
end = int(bminus[1][7])
for s in SeqIO.parse(f, 'fasta'):
if s.id == cont:
msg("Found '{}' (-) ...".format(cont))
genestart.append(len(s) - end)
geneend.append(start - 1)
if not genestart or not geneend:
err('ERROR: No BLAST hits found. Try adjusting --id')
else:
maxstart = max(genestart)
maxend = max(geneend)
banner()
# Write target contigs to FASTA file
msg('Writing forward sequence matches ... ')
for row in seqplus:
seqname = row[0]
cont = row[1][0]
begin = int(row[1][6])
finish = int(row[1][7])
for s in SeqIO.parse(seqname, 'fasta'):
if s.id == cont:
b1len = maxstart - (begin - 1) # Determine start buffer
b2len = maxend - (len(s) - finish) # Determine end buffer
buff1 = UnknownSeq(b1len, character = 'N')
buff2 = UnknownSeq(b2len, character = 'N')
newseq = str(buff1 + s.seq + buff2) # Buffer sandwich
record = SeqRecord(Seq(newseq), id=seqname, description='')
with open(tmpout, 'a') as f:
SeqIO.write(record, f, 'fasta') # Write new sequence to file
msg('Writing reverse sequence matches (reverse complement) ...')
for row in seqminus:
seqname = row[0]
cont = row[1][0]
begin = int(row[1][6])
finish = int(row[1][7])
for s in SeqIO.parse(seqname, 'fasta'):
if s.id == cont:
b1len = maxstart - (len(s) - finish) # Determine start buffer
b2len = maxend - (begin - 1) # Determine end buffer
buff1 = UnknownSeq(b1len, character = 'N')
buff2 = UnknownSeq(b2len, character = 'N')
rc = s.reverse_complement()
newseq = str(buff1 + rc.seq + buff2) # Reverse complement
record = SeqRecord(Seq(newseq), id=seqname, description='')
with open(tmpout, 'a') as f:
SeqIO.write(record, f, 'fasta') # Write new sequence to file
# Optional annotation
if args.anno == None:
shutil.move(tmpout, outfile)
progexit()
else:
for seq in SeqIO.parse(tmpout, 'fasta'):
seq.id = os.path.splitext(os.path.basename(seq.id))[0]
strname = str(seq.id)
splitfile = 'tempdb/strname'
banner()
msg('Annotating {} ...'.format(strname))
banner()
SeqIO.write(seq, splitfile, 'fasta')
subprocess.call(['prokka', '--outdir', 'tempdb/split', '--force', '--prefix', strname, '--locustag', strname, '--compliant', '--proteins', args.anno, '--cpus', args.cpus, splitfile])
import glob
read_files = glob.glob('tempdb/split/*.gbk')
with open(outfile, 'w') as write_file:
for f in read_files:
with open(f, 'r') as infile:
write_file.write(infile.read())
progexit()
|
kwongj/contig-puller
|
contig-puller.py
|
Python
|
gpl-2.0
| 6,533
|
[
"BLAST"
] |
768ec0db444aa8c20a672f3327835ab2a4390ef1738ed259de9528e03f86b5d3
|
"""
Created on May 17, 2012
@author: nmvdewie
"""
import unittest
import rmgpy.qm.qmtp as qm
import rmgpy.qm.parsers as pars
import os
import rmgpy.molecule as mol
import rmgpy.qm.calculator as calc
from rmgpy.thermo import ThermoData
import re
class Test(unittest.TestCase):
def testMOPAC_PM3_Parser(self):
driver = qm.QMTP('mopac')
name = 'GRWFGVWFFZKLTI-UHFFFAOYAF'
InChIaug = 'InChI=1/C10H16/c1-7-4-5-8-6-9(7)10(8,2)3/h4,8-9H,5-6H2,1-3H3'
molecule = mol.Molecule().fromInChI(InChIaug)
directory = os.path.join(os.path.dirname(__file__),'data','QMfiles','MOPAC')
mf = qm.molFile(molecule, name, directory)
parser = pars.MOPACPM3Parser(mf, driver)
result = parser.parse()
assert isinstance(result, ThermoData)
def testCCLibParser(self):
#Tests to check whether the CCLibParser wrapper around CCLib works fine
name = 'AAAOFKFEDKWQNN-UHFFFAOYAY'
InChIaug = 'InChI=1/C9H14O2/c1-6(2)9-5-8(11-10)4-7(9)3/h4-6,8,10H,1-3H3'
molecule = mol.Molecule().fromInChI(InChIaug)
inputFileExtension = '.log'
driver = qm.QMTP('gaussian03', 'pm3')
directory = os.path.join(os.path.dirname(__file__),'data','QMfiles','G03')
parsingTool = pars.CCLibParser(os.path.join(directory, name+inputFileExtension), driver)
data = parsingTool.parse(molecule)
self.assertEqual(data.groundStateDegeneracy, 1)
self.assertEqual(data.cclib_data.natom, 25)
self.assertEqual(data.cclib_data.molmass, 154.09938)
self.assertEqual(len(data.cclib_data.atomcoords[-1]), data.cclib_data.natom)
self.assertEqual(len(data.atomCoords), data.numberOfAtoms)
self.assertEqual(len(data.cclib_data.rotcons[-1]), 3)
self.assertEqual(len(data.cclib_data.atomnos), data.cclib_data.natom)
print 'Z-coord Atom coord of 1st atom: '+str(data.cclib_data.atomcoords[-1][0][2])
if __name__ == "__main__":
unittest.main( testRunner = unittest.TextTestRunner(verbosity=2) )
|
faribas/RMG-Py
|
unittest/qm/parserTest.py
|
Python
|
mit
| 2,113
|
[
"MOPAC",
"cclib"
] |
cdbe2995b79c8dbde955733aed8bfcb0c3bf8ebfcb8b232db2e291a2d5a7b035
|
#! /usr/bin/env python
'''Test of text objects
'''
from OpenGLContext import testingcontext
BaseContext = testingcontext.getInteractive()
from OpenGL.GL import *
from OpenGLContext.arrays import *
import string, time, sys, os
import logging
log = logging.getLogger( __name__ )
from OpenGLContext.scenegraph.basenodes import *
from OpenGLContext.scenegraph.text import fontprovider
class TestContext( BaseContext ):
currentStyle = -1
def Render( self, mode = 0):
BaseContext.Render( self, mode )
for shape in self.shapes:
mode.visit( shape )
def setupFontProviders( self ):
"""Load font providers for the context
See the OpenGLContext.scenegraph.text package for the
available font providers.
"""
from OpenGLContext.scenegraph.text import fontprovider
try:
from OpenGLContext.scenegraph.text import toolsfont
registry = self.getTTFFiles()
except ImportError, err:
log.warn( """Unable to import fonttools-based TTF-file registry, no TTF font support!""" )
else:
fontprovider.setTTFRegistry(
registry,
)
self.families = registry.DEFAULT_FAMILY_SETS.keys()
self.families.sort()
self.family = self.families[0]
try:
from OpenGLContext.scenegraph.text import pygamefont
except ImportError:
log.warn( """Unable to import PyGame TTF-font renderer, no PyGame anti-aliased font support!""" )
try:
from OpenGLContext.scenegraph.text import glutfont
except ImportError:
log.warn( """Unable to import GLUT-based TTF-file registry, no GLUT bitmap font support!""" )
def OnInit( self ):
print """You should see a 3D-rendered text message"""
print ' <p> previous fontstyle'
print ' <n> next fontstyle'
print ' <f> next font-family'
print ' <d> set current font as default font for family'
self.addEventHandler( "keypress", name="n", function = self.OnNextStyle)
self.addEventHandler( "keypress", name="p", function = self.OnPreviousStyle)
self.addEventHandler( "keypress", name="d", function = self.OnSetDefault)
self.addEventHandler( "keypress", name="f", function = self.OnNextFamily)
self.currentDefault = FontStyle(
justify = "MIDDLE",
family = [self.family],
)
self.currentDisplay = FontStyle(
justify = "MIDDLE",
)
self.defaultText = Text(
string=["Current Default"],
fontStyle = self.currentDefault,
)
self.displayText = Text(
string=["Hello World!", "VRML97 Text"],
fontStyle = self.currentDisplay,
)
self.sg = sceneGraph(
children = [
Transform(
translation = (0,3,0),
children = [
Shape(
geometry = self.defaultText,
appearance = Appearance(
material = Material(
diffuseColor = (.3,.2,.8),
shininess = .1,
),
),
),
],
),
Shape(
geometry = self.displayText,
appearance = Appearance(
material = Material(
diffuseColor = (.3,.2,.8),
shininess = .1,
),
),
),
]
)
self.buildStyles()
def buildStyles( self ):
"""Build the set of styles from which to choose"""
try:
self.styles = self.getTTFFiles().familyMembers( self.family )
except ImportError:
self.styles = ['SERIF','SANS','TYPEWRITER']
if self.styles:
self.currentDefaultName = (
self.getDefaultTTFFont( self.family ) or
self.styles[0]
)
self.defaultDisplayText()
if self.currentDefaultName in self.styles:
index = self.styles.index( self.currentDefaultName )
else:
index = 0
self.setStyle( index )
def setStyle (self, index = 0):
"""Set the current font style"""
if self.styles:
self.currentStyle = index%(len(self.styles))
name = self.styles[self.currentStyle]
self.currentDisplay.family = [name, ]
#self.sg.children[1].geometry.fontStyle = self.currentDisplay
#print 'node', repr(self.sg.children[1].geometry)
assert not self.cache.getData( self.sg.children[1].geometry ), """Cache wasn't cleared for the text node"""
self.mainDisplayText()
def OnPreviousStyle( self, event = None):
"""Advance to the next font font style"""
self.currentStyle -= 1
if not self.currentStyle % 20:
providers = fontprovider.FontProvider.getProviders( self.currentStyle )
for provider in providers:
provider.clear()
self.setStyle( self.currentStyle )
self.triggerRedraw( 1 )
def OnNextStyle( self, event = None):
"""Advance to the next font font style"""
self.currentStyle += 1
if not self.currentStyle % 20:
providers = fontprovider.FontProvider.getProviders( self.currentStyle )
for provider in providers:
provider.clear()
self.setStyle( self.currentStyle )
self.triggerRedraw( 1 )
def OnSetDefault( self, event = None ):
"""Set default font for OpenGLContext to currently displayed"""
if self.styles:
name = self.currentDisplay.family[0]
self.setDefaultTTFFont( name, type=self.family )
self.currentDefaultName = name
self.defaultDisplayText()
self.triggerRedraw(1)
def OnNextFamily( self, event=None ):
"""Choose the next font-family to display"""
index = self.families.index( self.family )
index += 1
index = index %len(self.families)
self.family = self.families[index]
self.buildStyles()
self.triggerRedraw(1)
def mainDisplayText( self ):
"""Decide on what text to display in the main text area"""
base = []
if self.styles:
base.append( "Hello World!" )
base.append( self.styles[self.currentStyle] )
base.append( '(%s)'%( self.family ))
else:
base.extend( ['No', repr(self.family), 'fonts available'])
self.displayText.string = base
return base
def defaultDisplayText( self ):
self.currentDefault.family = [
self.currentDefaultName
]
self.defaultText.string = [
'Current Default',
self.currentDefaultName
]
if __name__ == "__main__":
TestContext.ContextMainLoop()
|
stack-of-tasks/rbdlpy
|
tutorial/lib/python2.7/site-packages/OpenGLContext/bin/choosefonts.py
|
Python
|
lgpl-3.0
| 7,200
|
[
"VisIt"
] |
e2ef9ec646cbcb3395444cbc2e88a5138a92e6de485080e5d677e0fbea2d17eb
|
import bpy
class MeshProperty(bpy.types.PropertyGroup):
"""Property group for mesh information
Holds all the properties for the meshes used in the animation.
Used by the Paths-panel and the Spikes-panel"""
mesh = bpy.props.StringProperty(name="Mesh")
neuron_object = bpy.props.StringProperty(name="Neuron Object")
animPaths = bpy.props.BoolProperty(name="Animate paths", default=True)
animSpikes = bpy.props.BoolProperty(name="Animate spikes", default = False)
spikeScale = bpy.props.FloatProperty(name = "Spike scaling", description = "How big the spikes should be at the beginning of the animation", default = 3.0)
spikeFadeout = bpy.props.IntProperty(name = "Spike fadeout", description = "How fast the spikes should scale down, in frames", default = 15)
spikeUseLayerColor = bpy.props.BoolProperty(name = "Use layer color", default = False)
spikeColor = bpy.props.FloatVectorProperty(name = "Spike color", default = (1.0, 0.0, 0.0, 1.0), subtype = 'COLOR', size = 4, min = 0.0, max = 1.0)
orientationType = bpy.props.EnumProperty(
name="materialOption",
items=(
('NONE', 'None', 'Neuron orientation is not influenced'),
('OBJECT', 'Object', 'The neurons are tracking a specific object, e.g. a camera'),
('FOLLOW', 'Follow Curve', 'The neuron orientation is following the curve')
),
default='FOLLOW'
)
orientationObject = bpy.props.StringProperty(name="Orientation object")
path_bevel_resolution = bpy.props.IntProperty(name = "Path resolution", min = 0, default = 8)
def register():
"""Registers the mesh properties"""
bpy.utils.register_class(MeshProperty)
bpy.types.Scene.pam_anim_mesh = bpy.props.PointerProperty(type=MeshProperty)
def unregister():
"""Unregisters the mesh properties"""
del bpy.types.Scene.pam_anim_mesh
bpy.utils.unregister_class(MeshProperty)
|
MartinPyka/Parametric-Anatomical-Modeling
|
pam/pam_anim/tools/meshTools.py
|
Python
|
gpl-2.0
| 1,930
|
[
"NEURON"
] |
cdebfeb3fc1941ee559dac2c952f35ecbb77af13efdc45302e58c523e0651770
|
# Copyright (c) 2003-2010 Sylvain Thenault (thenault@gmail.com).
# Copyright (c) 2003-2010 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
""" %prog [options] module_or_package
Check that a module satisfy a coding standard (and more !).
%prog --help
Display this help message and exit.
%prog --help-msg <msg-id>[,<msg-id>]
Display help messages about given message identifiers and exit.
"""
# import this first to avoid builtin namespace pollution
from pylint.checkers import utils
import sys
import os
import re
import tokenize
from logilab.common.configuration import UnsupportedAction, OptionsManagerMixIn
from logilab.common.optik_ext import check_csv
from logilab.common.modutils import load_module_from_name
from logilab.common.interface import implements
from logilab.common.textutils import splitstrip
from logilab.common.fileutils import norm_open
from logilab.common.ureports import Table, Text
from logilab.common.__pkginfo__ import version as common_version
from logilab.astng import MANAGER, nodes
from logilab.astng.__pkginfo__ import version as astng_version
from pylint.utils import UnknownMessage, MessagesHandlerMixIn, \
ReportsHandlerMixIn, MSG_TYPES, sort_checkers, expand_modules
from pylint.interfaces import ILinter, IRawChecker, IASTNGChecker
from pylint.checkers import BaseRawChecker, EmptyReport, \
table_lines_from_stats
from pylint.reporters.text import TextReporter, ParseableTextReporter, \
VSTextReporter, ColorizedTextReporter
from pylint.reporters.html import HTMLReporter
from pylint import config
from pylint.__pkginfo__ import version
OPTION_RGX = re.compile('\s*#*\s*pylint:(.*)')
REPORTER_OPT_MAP = {'text': TextReporter,
'parseable': ParseableTextReporter,
'msvs': VSTextReporter,
'colorized': ColorizedTextReporter,
'html': HTMLReporter,}
# Python Linter class #########################################################
MSGS = {
'F0001': ('%s',
'Used when an error occurred preventing the analysis of a \
module (unable to find it for instance).'),
'F0002': ('%s: %s',
'Used when an unexpected error occurred while building the ASTNG \
representation. This is usually accompanied by a traceback. \
Please report such errors !'),
'F0003': ('ignored builtin module %s',
'Used to indicate that the user asked to analyze a builtin module\
which has been skipped.'),
'F0004': ('unexpected inferred value %s',
'Used to indicate that some value of an unexpected type has been \
inferred.'),
'I0001': ('Unable to run raw checkers on built-in module %s',
'Used to inform that a built-in module has not been checked \
using the raw checkers.'),
'I0010': ('Unable to consider inline option %r',
'Used when an inline option is either badly formatted or can\'t \
be used inside modules.'),
'I0011': ('Locally disabling %s',
'Used when an inline option disables a message or a messages \
category.'),
'I0012': ('Locally enabling %s',
'Used when an inline option enables a message or a messages \
category.'),
'I0013': ('Ignoring entire file',
'Used to inform that the file will not be checked'),
'E0001': ('%s',
'Used when a syntax error is raised for a module.'),
'E0011': ('Unrecognized file option %r',
'Used when an unknown inline option is encountered.'),
'E0012': ('Bad option value %r',
'Used when a bad value for an inline option is encountered.'),
}
class PyLinter(OptionsManagerMixIn, MessagesHandlerMixIn, ReportsHandlerMixIn,
BaseRawChecker):
"""lint Python modules using external checkers.
This is the main checker controlling the other ones and the reports
generation. It is itself both a raw checker and an astng checker in order
to:
* handle message activation / deactivation at the module level
* handle some basic but necessary stats'data (number of classes, methods...)
"""
__implements__ = (ILinter, IRawChecker, IASTNGChecker)
name = 'master'
priority = 0
msgs = MSGS
may_be_disabled = False
options = (('ignore',
{'type' : 'csv', 'metavar' : '<file>',
'dest' : 'black_list', 'default' : ('CVS',),
'help' : 'Add <file or directory> to the black list. It \
should be a base name, not a path. You may set this option multiple times.'}),
('enable-checker',
{'type' : 'csv', 'metavar': '<checker ids>',
'group': 'Messages control',
'help' : 'Enable only checker(s) with the given id(s).\
This option conflicts with the disable-checker option'}),
('disable-checker',
{'type' : 'csv', 'metavar': '<checker ids>',
'group': 'Messages control',
'help' : 'Enable all checker(s) except those with the \
given id(s).\
This option conflicts with the enable-checker option'}),
('persistent',
{'default': True, 'type' : 'yn', 'metavar' : '<y_or_n>',
'help' : 'Pickle collected data for later comparisons.'}),
('load-plugins',
{'type' : 'csv', 'metavar' : '<modules>', 'default' : (),
'help' : 'List of plugins (as comma separated values of \
python modules names) to load, usually to register additional checkers.'}),
('output-format',
{'default': 'text', 'type': 'choice', 'metavar' : '<format>',
'choices': ('text', 'parseable', 'msvs', 'colorized', 'html'),
'short': 'f',
'group': 'Reports',
'help' : 'Set the output format. Available formats are text,\
parseable, colorized, msvs (visual studio) and html'}),
('include-ids',
{'type' : 'yn', 'metavar' : '<y_or_n>', 'default' : 0,
'short': 'i',
'group': 'Reports',
'help' : 'Include message\'s id in output'}),
('files-output',
{'default': 0, 'type' : 'yn', 'metavar' : '<y_or_n>',
'group': 'Reports',
'help' : 'Put messages in a separate file for each module / \
package specified on the command line instead of printing them on stdout. \
Reports (if any) will be written in a file name "pylint_global.[txt|html]".'}),
('reports',
{'default': 1, 'type' : 'yn', 'metavar' : '<y_or_n>',
'short': 'r',
'group': 'Reports',
'help' : 'Tells whether to display a full report or only the\
messages'}),
('evaluation',
{'type' : 'string', 'metavar' : '<python_expression>',
'group': 'Reports',
'default': '10.0 - ((float(5 * error + warning + refactor + \
convention) / statement) * 10)',
'help' : 'Python expression which should return a note less \
than 10 (10 is the highest note). You have access to the variables errors \
warning, statement which respectively contain the number of errors / warnings\
messages and the total number of statements analyzed. This is used by the \
global evaluation report (R0004).'}),
('comment',
{'default': 0, 'type' : 'yn', 'metavar' : '<y_or_n>',
'group': 'Reports',
'help' : 'Add a comment according to your evaluation note. \
This is used by the global evaluation report (R0004).'}),
('enable-report',
{'type' : 'csv', 'metavar': '<rpt ids>',
'group': 'Reports',
'help' : 'Enable the report(s) with the given id(s).'}),
('disable-report',
{'type' : 'csv', 'metavar': '<rpt ids>',
'group': 'Reports',
'help' : 'Disable the report(s) with the given id(s).'}),
('enable-msg-cat',
{'type' : 'string', 'metavar': '<msg cats>',
'group': 'Messages control',
'help' : 'Enable all messages in the listed categories (IRCWEF).'}),
('disable-msg-cat',
{'type' : 'string', 'metavar': '<msg cats>', 'default': 'I',
'group': 'Messages control',
'help' : 'Disable all messages in the listed categories (IRCWEF).'}),
('enable-msg',
{'type' : 'csv', 'metavar': '<msg ids>',
'group': 'Messages control',
'help' : 'Enable the message(s) with the given id(s).'}),
('disable-msg',
{'type' : 'csv', 'metavar': '<msg ids>',
'group': 'Messages control',
'help' : 'Disable the message(s) with the given id(s).'}),
)
option_groups = (
('Messages control', 'Options controling analysis messages'),
('Reports', 'Options related to output formating and reporting'),
)
def __init__(self, options=(), reporter=None, option_groups=(),
pylintrc=None):
# some stuff has to be done before ancestors initialization...
#
# checkers / reporter / astng manager
self.reporter = None
self._checkers = {}
self._ignore_file = False
# visit variables
self.base_name = None
self.base_file = None
self.current_name = None
self.current_file = None
self.stats = None
# init options
self.options = options + PyLinter.options
self.option_groups = option_groups + PyLinter.option_groups
self._options_methods = {
'enable-report': self.enable_report,
'disable-report': self.disable_report,
'enable-msg': self.enable_message,
'disable-msg': self.disable_message,
'enable-msg-cat': self.enable_message_category,
'disable-msg-cat': self.disable_message_category}
full_version = '%%prog %s, \nastng %s, common %s\nPython %s' % (
version, astng_version, common_version, sys.version)
OptionsManagerMixIn.__init__(self, usage=__doc__,
version=full_version,
config_file=pylintrc or config.PYLINTRC)
MessagesHandlerMixIn.__init__(self)
ReportsHandlerMixIn.__init__(self)
BaseRawChecker.__init__(self)
# provided reports
self.reports = (('R0001', 'Messages by category',
report_total_messages_stats),
('R0002', '% errors / warnings by module',
report_messages_by_module_stats),
('R0003', 'Messages',
report_messages_stats),
('R0004', 'Global evaluation',
self.report_evaluation),
)
self.register_checker(self)
self._dynamic_plugins = []
self.load_provider_defaults()
self.set_reporter(reporter or TextReporter(sys.stdout))
def load_plugin_modules(self, modnames):
"""take a list of module names which are pylint plugins and load
and register them
"""
for modname in modnames:
if modname in self._dynamic_plugins:
continue
self._dynamic_plugins.append(modname)
module = load_module_from_name(modname)
module.register(self)
def set_reporter(self, reporter):
"""set the reporter used to display messages and reports"""
self.reporter = reporter
reporter.linter = self
def set_option(self, opt_name, value, action=None, opt_dict=None):
"""overridden from configuration.OptionsProviderMixin to handle some
special options
"""
if opt_name in self._options_methods:
if value:
meth = self._options_methods[opt_name]
value = check_csv(None, opt_name, value)
if isinstance(value, (list, tuple)):
for _id in value :
meth(_id)
else :
meth(value)
elif opt_name == 'output-format':
self.set_reporter(REPORTER_OPT_MAP[value.lower()]())
elif opt_name in ('enable-checker', 'disable-checker'):
if not value:
return
checkerids = [v.lower() for v in check_csv(None, opt_name, value)]
self.enable_checkers(checkerids, opt_name == 'enable-checker')
try:
BaseRawChecker.set_option(self, opt_name, value, action, opt_dict)
except UnsupportedAction:
print >> sys.stderr, 'option %s can\'t be read from config file' % \
opt_name
# checkers manipulation methods ############################################
def register_checker(self, checker):
"""register a new checker
checker is an object implementing IRawChecker or / and IASTNGChecker
"""
assert checker.priority <= 0, 'checker priority can\'t be >= 0'
self._checkers[checker.name] = checker
if hasattr(checker, 'reports'):
for r_id, r_title, r_cb in checker.reports:
self.register_report(r_id, r_title, r_cb, checker)
self.register_options_provider(checker)
if hasattr(checker, 'msgs'):
self.register_messages(checker)
checker.load_defaults()
def enable_checkers(self, listed, enabled):
"""only enable/disable checkers from the given list"""
if enabled: # if we are activating a checker; deactivate them all first
for checker in self._checkers.values():
if not checker.may_be_disabled:
continue
checker.enable(not enabled)
for checkerid in listed:
try:
checker = self._checkers[checkerid]
except KeyError:
raise Exception('no checker named %s' % checkerid)
checker.enable(enabled)
def disable_noerror_checkers(self):
"""disable all checkers without error messages, and the
'miscellaneous' checker which can be safely deactivated in debug
mode
"""
for checker in self._checkers.values():
if checker.name == 'miscellaneous':
checker.enable(False)
continue
# if checker is already explicitly disabled (e.g. rpython), don't
# enable it
if checker.enabled:
for msgid in getattr(checker, 'msgs', {}).keys():
if msgid[0] == 'E':
checker.enable(True)
break
else:
checker.enable(False)
# block level option handling #############################################
#
# see func_block_disable_msg.py test case for expected behaviour
def process_tokens(self, tokens):
"""process tokens from the current module to search for module/block
level options
"""
comment = tokenize.COMMENT
newline = tokenize.NEWLINE
#line_num = 0
for (tok_type, _, start, _, line) in tokens:
if tok_type not in (comment, newline):
continue
#if start[0] == line_num:
# continue
match = OPTION_RGX.search(line)
if match is None:
continue
if match.group(1).strip() == "disable-all":
self.add_message('I0013', line=start[0])
self._ignore_file = True
return
try:
opt, value = match.group(1).split('=', 1)
except ValueError:
self.add_message('I0010', args=match.group(1).strip(),
line=start[0])
continue
opt = opt.strip()
#line_num = start[0]
if opt in self._options_methods and not opt.endswith('-report'):
meth = self._options_methods[opt]
for msgid in splitstrip(value):
try:
meth(msgid, 'module', start[0])
except UnknownMessage:
self.add_message('E0012', args=msgid, line=start[0])
else:
self.add_message('E0011', args=opt, line=start[0])
def collect_block_lines(self, node, msg_state):
"""walk ast to collect block level options line numbers"""
# recurse on children (depth first)
for child in node.get_children():
self.collect_block_lines(child, msg_state)
first = node.fromlineno
last = node.tolineno
# first child line number used to distinguish between disable-msg
# which are the first child of scoped node with those defined later.
# For instance in the code below:
#
# 1. def meth8(self):
# 2. """test late disabling"""
# 3. # pylint: disable-msg=E1102
# 4. print self.blip
# 5. # pylint: disable-msg=E1101
# 6. print self.bla
#
# E1102 should be disabled from line 1 to 6 while E1101 from line 5 to 6
#
# this is necessary to disable locally messages applying to class /
# function using their fromlineno
if isinstance(node, (nodes.Module, nodes.Class, nodes.Function)) and node.body:
firstchildlineno = node.body[0].fromlineno
else:
firstchildlineno = last
for msgid, lines in msg_state.iteritems():
for lineno, state in lines.items():
if first <= lineno <= last:
if lineno > firstchildlineno:
state = True
# set state for all lines for this block
first, last = node.block_range(lineno)
for line in xrange(first, last+1):
# do not override existing entries
if not line in self._module_msgs_state.get(msgid, ()):
if line in lines: # state change in the same block
state = lines[line]
try:
self._module_msgs_state[msgid][line] = state
except KeyError:
self._module_msgs_state[msgid] = {line: state}
del lines[lineno]
# code checking methods ###################################################
def check(self, files_or_modules):
"""main checking entry: check a list of files or modules from their
name.
"""
self.reporter.include_ids = self.config.include_ids
if not isinstance(files_or_modules, (list, tuple)):
files_or_modules = (files_or_modules,)
checkers = sort_checkers(self._checkers.values())
# notify global begin
for checker in checkers:
checker.open()
# build ast and check modules or packages
for descr in self.expand_files(files_or_modules):
modname, filepath = descr['name'], descr['path']
self.set_current_module(modname, filepath)
# get the module representation
astng = self.get_astng(filepath, modname)
if astng is None:
continue
self.base_name = descr['basename']
self.base_file = descr['basepath']
if self.config.files_output:
reportfile = 'pylint_%s.%s' % (modname, self.reporter.extension)
self.reporter.set_output(open(reportfile, 'w'))
self._ignore_file = False
# fix the current file (if the source file was not available or
# if it's actually a c extension)
self.current_file = astng.file
self.check_astng_module(astng, checkers)
# notify global end
self.set_current_module('')
checkers.reverse()
for checker in checkers:
checker.close()
def expand_files(self, modules):
"""get modules and errors from a list of modules and handle errors
"""
result, errors = expand_modules(modules, self.config.black_list)
for error in errors:
message = modname = error["mod"]
key = error["key"]
self.set_current_module(modname)
if key == "F0001":
message = str(error["ex"]).replace(os.getcwd() + os.sep, '')
self.add_message(key, args=message)
return result
def set_current_module(self, modname, filepath=None):
"""set the name of the currently analyzed module and
init statistics for it
"""
if not modname and filepath is None:
return
self.current_name = modname
self.current_file = filepath or modname
self.stats['by_module'][modname] = {}
self.stats['by_module'][modname]['statement'] = 0
for msg_cat in MSG_TYPES.values():
self.stats['by_module'][modname][msg_cat] = 0
# XXX hack, to be correct we need to keep module_msgs_state
# for every analyzed module (the problem stands with localized
# messages which are only detected in the .close step)
if modname:
self._module_msgs_state = {}
self._module_msg_cats_state = {}
def get_astng(self, filepath, modname):
"""return a astng representation for a module"""
try:
return MANAGER.astng_from_file(filepath, modname)
except SyntaxError, ex:
self.add_message('E0001', line=ex.lineno, args=ex.msg)
except KeyboardInterrupt:
raise
except Exception, ex:
#if __debug__:
# import traceback
# traceback.print_exc()
self.add_message('F0002', args=(ex.__class__, ex))
def check_astng_module(self, astng, checkers):
"""check a module from its astng representation, real work"""
# call raw checkers if possible
if not astng.pure_python:
self.add_message('I0001', args=astng.name)
else:
#assert astng.file.endswith('.py')
stream = norm_open(astng.file)
# invoke IRawChecker interface on self to fetch module/block
# level options
self.process_module(stream)
if self._ignore_file:
return False
# walk ast to collect line numbers
orig_state = self._module_msgs_state.copy()
self._module_msgs_state = {}
self.collect_block_lines(astng, orig_state)
for checker in checkers:
if implements(checker, IRawChecker) and checker is not self:
stream.seek(0)
checker.process_module(stream)
# generate events to astng checkers
self.astng_events(astng, [checker for checker in checkers
if implements(checker, IASTNGChecker)])
return True
def astng_events(self, astng, checkers, _reversed_checkers=None):
"""generate event to astng checkers according to the current astng
node and recurse on its children
"""
if _reversed_checkers is None:
_reversed_checkers = checkers[:]
_reversed_checkers.reverse()
if astng.is_statement:
self.stats['statement'] += 1
# generate events for this node on each checker
for checker in checkers:
checker.visit(astng)
# recurse on children
for child in astng.get_children():
self.astng_events(child, checkers, _reversed_checkers)
for checker in _reversed_checkers:
checker.leave(astng)
# IASTNGChecker interface #################################################
def open(self):
"""initialize counters"""
self.stats = { 'by_module' : {},
'by_msg' : {},
'statement' : 0
}
for msg_cat in MSG_TYPES.values():
self.stats[msg_cat] = 0
def close(self):
"""close the whole package /module, it's time to make reports !
if persistent run, pickle results for later comparison
"""
if self.base_name is not None:
# load old results if any
old_stats = config.load_results(self.base_name)
if self.config.reports:
self.make_reports(self.stats, old_stats)
# save results if persistent run
if self.config.persistent:
config.save_results(self.stats, self.base_name)
# specific reports ########################################################
def report_evaluation(self, sect, stats, old_stats):
"""make the global evaluation report"""
# check with at least check 1 statements (usually 0 when there is a
# syntax error preventing pylint from further processing)
if stats['statement'] == 0:
raise EmptyReport()
# get a global note for the code
evaluation = self.config.evaluation
try:
note = eval(evaluation, {}, self.stats)
except Exception, ex:
msg = 'An exception occurred while rating: %s' % ex
else:
stats['global_note'] = note
msg = 'Your code has been rated at %.2f/10' % note
if old_stats.has_key('global_note'):
msg += ' (previous run: %.2f/10)' % old_stats['global_note']
if self.config.comment:
msg = '%s\n%s' % (msg, config.get_note_message(note))
sect.append(Text(msg))
# some reporting functions ####################################################
def report_total_messages_stats(sect, stats, old_stats):
"""make total errors / warnings report"""
lines = ['type', 'number', 'previous', 'difference']
lines += table_lines_from_stats(stats, old_stats,
('convention', 'refactor',
'warning', 'error'))
sect.append(Table(children=lines, cols=4, rheaders=1))
def report_messages_stats(sect, stats, _):
"""make messages type report"""
if not stats['by_msg']:
# don't print this report when we didn't detected any errors
raise EmptyReport()
in_order = [(value, msg_id)
for msg_id, value in stats['by_msg'].items()
if not msg_id.startswith('I')]
in_order.sort()
in_order.reverse()
lines = ('message id', 'occurrences')
for value, msg_id in in_order:
lines += (msg_id, str(value))
sect.append(Table(children=lines, cols=2, rheaders=1))
def report_messages_by_module_stats(sect, stats, _):
"""make errors / warnings by modules report"""
if len(stats['by_module']) == 1:
# don't print this report when we are analysing a single module
raise EmptyReport()
by_mod = {}
for m_type in ('fatal', 'error', 'warning', 'refactor', 'convention'):
total = stats[m_type]
for module in stats['by_module'].keys():
mod_total = stats['by_module'][module][m_type]
if total == 0:
percent = 0
else:
percent = float((mod_total)*100) / total
by_mod.setdefault(module, {})[m_type] = percent
sorted_result = []
for module, mod_info in by_mod.items():
sorted_result.append((mod_info['error'],
mod_info['warning'],
mod_info['refactor'],
mod_info['convention'],
module))
sorted_result.sort()
sorted_result.reverse()
lines = ['module', 'error', 'warning', 'refactor', 'convention']
for line in sorted_result:
if line[0] == 0 and line[1] == 0:
break
lines.append(line[-1])
for val in line[:-1]:
lines.append('%.2f' % val)
if len(lines) == 5:
raise EmptyReport()
sect.append(Table(children=lines, cols=5, rheaders=1))
# utilities ###################################################################
# this may help to import modules using gettext
try:
__builtins__._ = str
except AttributeError:
__builtins__['_'] = str
def preprocess_options(args, search_for):
"""look for some options (keys of <search_for>) which have to be processed
before others
values of <search_for> are callback functions to call when the option is
found
"""
i = 0
while i < len(args):
arg = args[i]
if arg.startswith('--'):
try:
option, val = arg[2:].split('=', 1)
except ValueError:
option, val = arg[2:], None
try:
cb, takearg = search_for[option]
del args[i]
if takearg and val is None:
val = args[i]
del args[i]
cb(option, val)
except KeyError:
i += 1
else:
i += 1
class Run:
"""helper class to use as main for pylint :
run(*sys.argv[1:])
"""
LinterClass = PyLinter
option_groups = (
('Commands', 'Options which are actually commands. Options in this \
group are mutually exclusive.'),
)
def __init__(self, args, reporter=None):
self._rcfile = None
self._plugins = []
preprocess_options(args, {
# option: (callback, takearg)
'rcfile': (self.cb_set_rcfile, True),
'load-plugins': (self.cb_add_plugins, True),
})
self.linter = linter = self.LinterClass((
('rcfile',
{'action' : 'callback', 'callback' : lambda *args: 1,
'type': 'string', 'metavar': '<file>',
'help' : 'Specify a configuration file.'}),
('init-hook',
{'action' : 'callback', 'type' : 'string', 'metavar': '<code>',
'callback' : cb_init_hook,
'help' : 'Python code to execute, usually for sys.path \
manipulation such as pygtk.require().'}),
('help-msg',
{'action' : 'callback', 'type' : 'string', 'metavar': '<msg-id>',
'callback' : self.cb_help_message,
'group': 'Commands',
'help' : '''Display a help message for the given message id and \
exit. The value may be a comma separated list of message ids.'''}),
('list-msgs',
{'action' : 'callback', 'metavar': '<msg-id>',
'callback' : self.cb_list_messages,
'group': 'Commands',
'help' : "Generate pylint's messages."}),
('full-documentation',
{'action' : 'callback', 'metavar': '<msg-id>',
'callback' : self.cb_full_documentation,
'group': 'Commands',
'help' : "Generate pylint's full documentation."}),
('generate-rcfile',
{'action' : 'callback', 'callback' : self.cb_generate_config,
'group': 'Commands',
'help' : '''Generate a sample configuration file according to \
the current configuration. You can put other options before this one to get \
them in the generated configuration.'''}),
('generate-man',
{'action' : 'callback', 'callback' : self.cb_generate_manpage,
'group': 'Commands',
'help' : "Generate pylint's man page.",'hide': 'True'}),
('errors-only',
{'action' : 'callback', 'callback' : self.cb_error_mode,
'short': 'e',
'help' : '''In error mode, checkers without error messages are \
disabled and for others, only the ERROR messages are displayed, and no reports \
are done by default'''}),
('profile',
{'type' : 'yn', 'metavar' : '<y_or_n>',
'default': False,
'help' : 'Profiled execution.'}),
), option_groups=self.option_groups,
reporter=reporter, pylintrc=self._rcfile)
# register standard checkers
from pylint import checkers
checkers.initialize(linter)
# load command line plugins
linter.load_plugin_modules(self._plugins)
# add some help section
linter.add_help_section('Environment variables', config.ENV_HELP)
linter.add_help_section('Output', '''
Using the default text output, the message format is :
MESSAGE_TYPE: LINE_NUM:[OBJECT:] MESSAGE
There are 5 kind of message types :
* (C) convention, for programming standard violation
* (R) refactor, for bad code smell
* (W) warning, for python specific problems
* (E) error, for probable bugs in the code
* (F) fatal, if an error occurred which prevented pylint from doing further
processing.
''')
linter.add_help_section('Output status code', '''
Pylint should leave with following status code:
* 0 if everything went fine
* 1 if a fatal message was issued
* 2 if an error message was issued
* 4 if a warning message was issued
* 8 if a refactor message was issued
* 16 if a convention message was issued
* 32 on usage error
status 1 to 16 will be bit-ORed so you can know which different categories has
been issued by analysing pylint output status code
''')
# read configuration
linter.disable_message('W0704')
linter.read_config_file()
# is there some additional plugins in the file configuration, in
config_parser = linter._config_parser
if config_parser.has_option('MASTER', 'load-plugins'):
plugins = splitstrip(config_parser.get('MASTER', 'load-plugins'))
linter.load_plugin_modules(plugins)
# now we can load file config and command line, plugins (which can
# provide options) have been registered
linter.load_config_file()
if reporter:
# if a custom reporter is provided as argument, it may be overridden
# by file parameters, so re-set it here, but before command line
# parsing so it's still overrideable by command line option
linter.set_reporter(reporter)
args = linter.load_command_line_configuration(args)
if not args:
print linter.help()
sys.exit(32)
# insert current working directory to the python path to have a correct
# behaviour
sys.path.insert(0, os.getcwd())
if self.linter.config.profile:
print >> sys.stderr, '** profiled run'
from hotshot import Profile, stats
prof = Profile('stones.prof')
prof.runcall(linter.check, args)
prof.close()
data = stats.load('stones.prof')
data.strip_dirs()
data.sort_stats('time', 'calls')
data.print_stats(30)
else:
linter.check(args)
sys.path.pop(0)
sys.exit(self.linter.msg_status)
def cb_set_rcfile(self, name, value):
"""callback for option preprocessing (i.e. before optik parsing)"""
self._rcfile = value
def cb_add_plugins(self, name, value):
"""callback for option preprocessing (i.e. before optik parsing)"""
self._plugins.extend(splitstrip(value))
def cb_error_mode(self, *args, **kwargs):
"""error mode:
* checkers without error messages are disabled
* for others, only the ERROR messages are displayed
* disable reports
* do not save execution information
"""
self.linter.disable_noerror_checkers()
self.linter.set_option('disable-msg-cat', 'WCRI')
self.linter.set_option('reports', False)
self.linter.set_option('persistent', False)
def cb_generate_config(self, *args, **kwargs):
"""optik callback for sample config file generation"""
self.linter.generate_config(skipsections=('COMMANDS',))
sys.exit(0)
def cb_generate_manpage(self, *args, **kwargs):
"""optik callback for sample config file generation"""
from pylint import __pkginfo__
self.linter.generate_manpage(__pkginfo__)
sys.exit(0)
def cb_help_message(self, option, opt_name, value, parser):
"""optik callback for printing some help about a particular message"""
self.linter.help_message(splitstrip(value))
sys.exit(0)
def cb_full_documentation(self, option, opt_name, value, parser):
"""optik callback for printing full documentation"""
self.linter.print_full_documentation()
sys.exit(0)
def cb_list_messages(self, option, opt_name, value, parser): # FIXME
"""optik callback for printing available messages"""
self.linter.list_sorted_messages()
sys.exit(0)
def cb_init_hook(option, opt_name, value, parser):
"""exec arbitrary code to set sys.path for instance"""
exec value
if __name__ == '__main__':
Run(sys.argv[1:])
|
dbbhattacharya/kitsune
|
vendor/packages/pylint/lint.py
|
Python
|
bsd-3-clause
| 39,287
|
[
"VisIt"
] |
9cd9ff4d7a59f9fd33365cd0d8e140cd8d1ef24d2e00228386024adcb4d46c3b
|
#-------------------------------------------------------------------------------
# Name: utils.execute
# Purpose: Submodule containing utility functions for execution of
# computational chemistry software packages for Open Anharmonic
#
# Author: Brian Skinn
# bskinn@alum.mit.edu
#
# Created: 5 Oct 2015
# Copyright: (c) Brian Skinn 2016
# License: The MIT License; see "license.txt" for full license terms
# and contributor agreement.
#
# This file is part of opan (Open Anharmonic), a system for automated
# computation of anharmonic properties of molecular systems via wrapper
# calls to computational/quantum chemical software packages.
#
# http://www.github.com/bskinn/opan
#
#-------------------------------------------------------------------------------
""" Functions to enable Open Anharmonic to execute external software packages.
**Functions**
"""
# Module-level imports
from ..const import DEF as _DEF, EnumSoftware as _E_SW, EnumFileType as _E_FT
# Functions
def execute_orca(inp_tp, work_dir, exec_cmd, subs=None, subs_delims=("<",">"),
sim_name="orcarun",
inp_ext=_DEF.FILE_EXTS[_E_SW.ORCA][_E_FT.INPUTFILE],
out_ext=_DEF.FILE_EXTS[_E_SW.ORCA][_E_FT.OUTPUT],
wait_to_complete=True,
bohrs=False):
"""Executes |orca| on a dynamically constructed input file.
.. warning:: Function is still under active development! Execution with
`wait_to_complete` == |True| should be robust, however.
**Execution**
Generates an |orca| input file dynamically from information passed into the
various arguments, performs the run, and returns with exit info and
computation results *(in some fashion; still under development)*.
Any required resources (.gbw,
.xyz, etc.) MUST already be present in `work_dir`. No check for pre-existing
files of the same base name is made; any such will be overwritten.
|orca| MUST be called using a wrapper script; this function does not
implement the redirection necessary to send output from a direct |orca| call
to a file on disk.
If `wait_to_complete` is |True|, the :func:`subprocess.call` syntax will
be used and the function will not return until execution of the
wrapper script completes.
If |False|, *[indicate what will be returned if not waiting]*.
.. todo:: execute_orca: The different output modes, depending on waiting
or not.
The command to call |orca| must be specified in the parameter list syntax of
the `args` argument to the :class:`subprocess.Popen` constructor.
The implementation is flexible and general, to allow interface with local
scripts for, e.g., submission to a job queue in a shared-resource
environment.
Valid |orca| input syntax of the resulting text is NOT checked
before calling |orca|.
No mechanism is implemented to detect hangs of |orca|. Periodic manual
oversight is recommended.
**Template Substitution**
See :func:`utils.template_subst <opan.utils.base.template_subst>` for
implementation details of the tag substitution mechanism.
Here, in addition to performing any substitutions on the input file
template as indicated by `subs`,
the special tags **INP** and **OUT**, enclosed with the
`subs_delims` delimiters, will be replaced with ``sim_name + '.' + inp_ext``
and ``sim_name + '.' + out_ext``, respectively, in the input template and
in all elements of
`exec_cmd` before executing the call. In the special case of
`inp_ext` == |None|, the **INP** tag will be replaced with just
`sim_name` (no extension), and similarly for **OUT** if
`out_ext` == |None|. The tag **NAME** will be replaced just with
`sim_name` in all cases.
`inp_ext` and `out_ext` must be different, to avoid collisions.
**Return Values**
The information returned depends on the value of `wait_to_complete`:
If `wait_to_complete` == |True|:
A |tuple| of objects is returned, with elements of type ::
(OrcaOutput, OpanXYZ, OrcaEngrad, OrcaHess)
These objects contain the corresponding results from the computation,
if the latter exist, or |None| if they are missing.
If `wait_to_complete` == |False|:
**TBD**, but current intention is to return the PID of the spawned
subprocess.
**Signature**
Parameters
----------
inp_tp
|str| --
Template text for the input file to be generated.
work_dir
|str| --
Path to base working directory. Must already exist and contain any
resource files (.gbw, .xyz, etc.) required for the calculation.
exec_cmd
|list| of |str| --
Sequence of strings defining the |orca| execution call in the syntax of
the :class:`~subprocess.Popen` constructor. This call must
be to a local script; stream redirection of the forked process
is not supported in this function.
subs
|dict| of |str|, optional --
Substitutions to be performed in the template (see *Template
Substitution*, above).
subs_delims
2-|tuple| of |str|, optional --
Tag delimiters passed directly to
:func:`~opan.utils.base.template_subst`. Defaults to ``('<','>')``.
sim_name
|str|, optional --
Basename to use for the input/output/working files.
If omitted, "orcarun" will be used.
inp_ext
|str|, optional --
Extension to be used for the input file generated
(default is 'txt').
out_ext
|str|, optional --
Extension to be used for the output file generated
(default is 'out').
wait_to_complete
|bool|, optional --
Whether to wait within this function for |orca| execution to
complete (|True|), or to spawn/fork a child process and return
(|False|). Default is |True|. |False| **IS NOT YET IMPLEMENTED**.
bohrs
|bool|, optional --
Flag to indicate the units (Bohrs or Angstroms) of the
coordinates in .xyz and .trj files.
Returns
-------
[varies]
|tuple| of objects or |int| PID.
Varies depending on `wait_to_complete`; see *Return Values* above
Raises
------
~exceptions.ValueError
If `inp_ext` and `out_ext` are identical.
~exceptions.KeyError
If special tag names **INP**, **OUT**, or **NAME** are defined in `subs`
~exceptions.TypeError
If any elements in `subs` are not tuples
"""
# Imports
import os, subprocess as sp
from ..output import OrcaOutput
from ..xyz import OpanXYZ
from ..grad import OrcaEngrad
from ..hess import OrcaHess
from ..utils import template_subst
# Special key constants
INPKEY = "INP"
OUTKEY = "OUT"
NAMEKEY = "NAME"
# Store old dir; switch to new; default exception fine for
# handling case of invalid dir.
olddir = os.getcwd()
os.chdir(work_dir)
# Check for inp_ext identical to out_ext
if inp_ext == out_ext:
raise ValueError("'inp_ext' and 'out_ext' cannot be identical.")
##end if
# Build the input and output file names and store the substitution keys
if inp_ext:
inp_fname = sim_name + '.' + inp_ext
else:
inp_fname = sim_name
##end if
if out_ext:
out_fname = sim_name + '.' + out_ext
else:
out_fname = sim_name
##end if
SPECIALSUBS = {INPKEY: inp_fname, OUTKEY: out_fname, NAMEKEY: sim_name}
# Complain if special tags used in subs
if not set(SPECIALSUBS.keys()).isdisjoint(subs):
raise KeyError("Redefinition of special tag(s) is forbidden: {0}"
.format(list(set(SPECIALSUBS.keys()).intersection(subs))))
## end if
# Perform the replacement into the exec_cmd of the input and output
# filenames.
exec_cmd_subs = [
s.replace(subs_delims[0] + INPKEY + subs_delims[1], inp_fname)
for s in exec_cmd]
exec_cmd_subs = [
s.replace(subs_delims[0] + OUTKEY + subs_delims[1], out_fname)
for s in exec_cmd_subs]
exec_cmd_subs = [
s.replace(subs_delims[0] + NAMEKEY + subs_delims[1], sim_name)
for s in exec_cmd_subs]
# Perform the content substitutions into the template string
augsubs = subs.copy().update(SPECIALSUBS)
input_text = template_subst(inp_tp, augsubs, delims=subs_delims)
# Create and write the input file
with open(inp_fname, 'w') as input_file:
input_file.write(input_text)
##end with
# Perform the ORCA call; collect return values as appropriate
#!TODO: execute_orca: Implement non-waiting return
if wait_to_complete:
# Run ORCA
sp.call(exec_cmd_subs, cwd=os.getcwd())
# Bind ORCA_XXXXX objects and return. Have to address possibility of
# any or all of these not existing.
try:
o_out = OrcaOutput(out_fname,'file')
except IOError:
o_out = None
## end try
try:
o_xyz = OpanXYZ(path=os.path.join(work_dir, sim_name + ".xyz"), \
bohrs=bohrs)
except IOError:
o_xyz = None
## end try
try:
o_trj = OpanXYZ(path=os.path.join(work_dir, sim_name + ".trj"), \
bohrs=bohrs)
except IOError:
o_trj = None
## end try
try:
o_engrad = OrcaEngrad(os.path.join(work_dir, sim_name + ".engrad"))
except IOError:
o_engrad = None
## end try
try:
o_hess = OrcaHess(os.path.join(work_dir, sim_name + ".hess"))
except IOError:
o_hess = None
## end try
else:
raise NotImplementedError("Background execution not yet implemented.")
## end if
# Return to prior working directory
os.chdir(olddir)
# Return something appropriate, either computation results or information
# on the queued computation.
#TODO: execute_orca: Must refine this, esp for the different exec modes
return o_out, o_xyz, o_engrad, o_hess
## end def execute_orca
if __name__ == '__main__': # pragma: no cover
print("Module not executable.")
|
bskinn/opan
|
opan/utils/execute.py
|
Python
|
mit
| 10,503
|
[
"Brian",
"ORCA"
] |
81e4dbf345313eefc50306e7a32a5bc211917285685731e50bf63e0f7404a945
|
from past.types import basestring
from builtins import * # noqa: F403, F401
import os
import shutil
import pprint
import requests
import time
from io import BytesIO
from xml.etree import ElementTree
from zipfile import ZipFile, is_zipfile
from requests.auth import HTTPBasicAuth
import geoserver
from geoserver.catalog import Catalog as GeoServerCatalog
from geoserver.support import JDBCVirtualTable, JDBCVirtualTableGeometry, JDBCVirtualTableParam
from geoserver.util import shapefile_and_friends
from ..utilities import ConvertDictToXml, ConvertXmlToDict
from ..base import SpatialDatasetEngine
class GeoServerSpatialDatasetEngine(SpatialDatasetEngine):
"""
Definition for GeoServer Dataset Engine objects.
"""
@property
def type(self):
"""
GeoServer Spatial Dataset Type
"""
return 'GEOSERVER'
@property
def gwc_endpoint(self):
return self._gwc_endpoint
def __init__(self, endpoint, apikey=None, username=None, password=None):
"""
Default constructor for Dataset Engines.
Args:
api_endpoint (string): URL of the dataset service API endpoint (e.g.: www.host.com/api)
apikey (string, optional): API key that will be used to authenticate with the dataset service.
username (string, optional): Username that will be used to authenticate with the dataset service.
password (string, optional): Password that will be used to authenticate with the dataset service.
"""
# Set custom property /geoserver/rest/ -> /geoserver/gwc/rest/
if '/' == endpoint[-1]:
self._gwc_endpoint = endpoint.replace('rest', 'gwc/rest')
else:
self._gwc_endpoint = endpoint.replace('rest', 'gwc/rest/')
super(GeoServerSpatialDatasetEngine, self).__init__(
endpoint=endpoint,
apikey=apikey,
username=username,
password=password
)
def _apply_changes_to_gs_object(self, attributes_dict, gs_object):
# Catalog object
catalog = self._get_geoserver_catalog_object()
# Make the changes
for attribute, value in attributes_dict.items():
if hasattr(gs_object, attribute):
if attribute == 'styles':
styles_objects = []
for style in attributes_dict['styles']:
# Lookup by name and workspace
if ':' in style:
style_split = style.split(':')
styles_objects.append(catalog.get_style(name=style_split[1], workspace=style_split[0]))
# Lookup by name only
else:
styles_objects.append(catalog.get_style(name=style))
setattr(gs_object, 'styles', styles_objects)
elif attribute == 'default_style':
style = attributes_dict['default_style']
if ':' in style:
style_split = style.split(':')
style_object = catalog.get_style(name=style_split[1], workspace=style_split[0])
# Lookup by name only
else:
style_object = catalog.get_style(name=style)
gs_object.default_style = style_object
else:
setattr(gs_object, attribute, value)
return gs_object
def _assemble_url(self, *args):
"""
Create a URL from all the args.
"""
endpoint = self.endpoint
# Eliminate trailing slash if necessary
if endpoint[-1] == '/':
endpoint = endpoint[:-1]
pieces = list(args)
pieces.insert(0, endpoint)
return '/'.join(pieces)
def _get_non_rest_endpoint(self):
"""
Get endpoint without the "rest".
"""
endpoint = self.endpoint
# Eliminate trailing slash if necessary
if endpoint[-1] == '/':
endpoint = endpoint[:-1]
if endpoint[-5:] == '/rest':
endpoint = endpoint[:-5]
return endpoint
def _get_geoserver_catalog_object(self):
"""
Internal method used to get the connection object to GeoServer.
"""
return GeoServerCatalog(self.endpoint, username=self.username, password=self.password)
def _get_wms_url(self, layer_id, style='', srs='EPSG:4326', bbox='-180,-90,180,90', version='1.1.0',
width='512', height='512', output_format='image/png', tiled=False, transparent=True):
"""
Assemble a WMS url.
"""
endpoint = self._get_non_rest_endpoint()
if tiled:
tiled_option = 'yes'
else:
tiled_option = 'no'
if transparent:
transparent_option = 'true'
else:
transparent_option = 'false'
wms_url = '{0}/wms?service=WMS&version={1}&request=GetMap&' \
'layers={2}&styles={3}&' \
'transparent={10}&tiled={9}&' \
'srs={4}&bbox={5}&' \
'width={6}&height={7}&' \
'format={8}'.format(endpoint, version, layer_id, style, srs, bbox, width, height, output_format,
tiled_option, transparent_option)
return wms_url
def _get_wcs_url(self, resource_id, srs='EPSG:4326', bbox='-180,-90,180,90', output_format='png', namespace=None,
width='512', height='512'):
"""
Assemble a WCS url.
"""
endpoint = self._get_non_rest_endpoint()
wcs_url = '{0}/wcs?service=WCS&version=1.1.0&request=GetCoverage&' \
'identifier={1}&' \
'srs={2}&BoundingBox={3}&' \
'width={5}&height={6}&' \
'format={4}'.format(endpoint, resource_id, srs, bbox, output_format, width, height)
if namespace and isinstance(namespace, str):
wcs_url = '{0}&namespace={1}'.format(wcs_url, namespace)
return wcs_url
def _get_wfs_url(self, resource_id, output_format='GML3'):
"""
Assemble a WFS url.
"""
endpoint = self._get_non_rest_endpoint()
if output_format == 'GML3':
wfs_url = '{0}/wfs?service=WFS&version=2.0.0&request=GetFeature&typeNames={1}'.format(endpoint, resource_id)
elif output_format == 'GML2':
wfs_url = '{0}/wfs?service=WFS&version=1.0.0&request=GetFeature&typeNames={1}&' \
'outputFormat=GML2'.format(endpoint, resource_id)
else:
wfs_url = '{0}/wfs?service=WFS&version=2.0.0&request=GetFeature&typeNames={1}&' \
'outputFormat={2}'.format(endpoint, resource_id, output_format)
return wfs_url
@staticmethod
def _handle_debug(return_object, debug):
"""
Handle debug
"""
if debug:
pprint.pprint(return_object)
def _handle_delete(self, identifier, gs_object, purge, recurse, debug):
"""
Handle delete calls
"""
# Get a GeoServer catalog object and query for list of resources
catalog = self._get_geoserver_catalog_object()
# Initialize response dictionary
response_dict = {'success': False}
if gs_object:
try:
# Execute
catalog.delete(config_object=gs_object, purge=purge, recurse=recurse)
# Update response dictionary
response_dict['success'] = True
response_dict['result'] = None
except geoserver.catalog.FailedRequestError as e:
# Update response dictionary
response_dict['success'] = False
response_dict['error'] = str(e)
else:
# Update response dictionary
response_dict['success'] = False
response_dict['error'] = 'GeoServer object does not exist: "{0}".'.format(identifier)
self._handle_debug(response_dict, debug)
return response_dict
def _handle_list(self, gs_objects, with_properties, debug):
"""
Handle list calls
"""
if not with_properties:
names = []
for gs_object in gs_objects:
names.append(gs_object.name)
# Assemble Response
response_dict = {'success': True,
'result': names}
# Handle the debug and return
self._handle_debug(response_dict, debug)
return response_dict
# Handle the debug and return
gs_object_dicts = self._transcribe_geoserver_objects(gs_objects)
# Assemble Response
response_dict = {'success': True,
'result': gs_object_dicts}
self._handle_debug(response_dict, debug)
return response_dict
def _process_identifier(self, identifier):
"""
Split identifier into name and workspace parts if applicable
"""
# Assume no workspace and only name
workspace = None
name = identifier
# Colon ':' is a delimiter between workspace and name i.e: workspace:name
if ':' in identifier:
workspace, name = identifier.split(':')
return workspace, name
def _transcribe_geoserver_objects(self, gs_object_list):
"""
Convert a list of geoserver objects to a list of Python dictionaries.
"""
gs_dict_list = []
for gs_object in gs_object_list:
gs_dict_list.append(self._transcribe_geoserver_object(gs_object))
return gs_dict_list
def _transcribe_geoserver_object(self, gs_object):
"""
Convert geoserver objects to Python dictionaries.
"""
# Constants
NAMED_OBJECTS = ('store', 'workspace')
NAMED_OBJECTS_WITH_WORKSPACE = ('resource', 'default_style')
OMIT_ATTRIBUTES = ('writers', 'attribution_object', 'dirty', 'dom', 'save_method')
# Load into a dictionary
object_dictionary = {}
resource_object = None
# Get the non-private attributes
attributes = [a for a in dir(gs_object) if not a.startswith('__') and not a.startswith('_')]
for attribute in attributes:
if not callable(getattr(gs_object, attribute)):
# Handle special cases upfront
if attribute in NAMED_OBJECTS:
sub_object = getattr(gs_object, attribute)
if not sub_object or isinstance(sub_object, str):
object_dictionary[attribute] = sub_object
else:
object_dictionary[attribute] = sub_object.name
elif attribute in NAMED_OBJECTS_WITH_WORKSPACE:
# Append workspace if applicable
sub_object = getattr(gs_object, attribute)
# Stash resource for later use
if attribute == 'resource':
resource_object = sub_object
if sub_object and not isinstance(sub_object, str):
if sub_object.workspace:
try:
object_dictionary[attribute] = '{0}:{1}'.format(sub_object.workspace.name,
sub_object.name)
except AttributeError:
object_dictionary[attribute] = '{0}:{1}'.format(sub_object.workspace,
sub_object.name)
else:
object_dictionary[attribute] = sub_object.name
elif isinstance(sub_object, str):
object_dictionary[attribute] = getattr(gs_object, attribute)
elif attribute in OMIT_ATTRIBUTES:
# Omit these attributes
pass
elif attribute == 'catalog':
# Store URL in place of catalog
catalog_object = getattr(gs_object, 'catalog')
object_dictionary[attribute] = catalog_object.gs_base_url
elif attribute == 'styles':
styles = getattr(gs_object, attribute)
styles_names = []
for style in styles:
if style is not None:
if not isinstance(style, basestring):
if style.workspace:
styles_names.append('{0}:{1}'.format(style.workspace, style.name))
else:
styles_names.append(style.name)
else:
styles_names = getattr(gs_object, attribute)
object_dictionary[attribute] = styles_names
# Store attribute properties as is
else:
object_dictionary[attribute] = getattr(gs_object, attribute)
# Inject appropriate WFS and WMS URLs
if 'resource_type' in object_dictionary:
# Feature Types Get WFS
if object_dictionary['resource_type'] == 'featureType':
if object_dictionary['workspace']:
resource_id = '{0}:{1}'.format(object_dictionary['workspace'], object_dictionary['name'])
else:
resource_id = object_dictionary['name']
object_dictionary['wfs'] = {
'gml3': self._get_wfs_url(resource_id, 'GML3'),
'gml2': self._get_wfs_url(resource_id, 'GML2'),
'shapefile': self._get_wfs_url(resource_id, 'shape-zip'),
'geojson': self._get_wfs_url(resource_id, 'application/json'),
'geojsonp': self._get_wfs_url(resource_id, 'text/javascript'),
'csv': self._get_wfs_url(resource_id, 'csv')
}
# Coverage Types Get WCS
elif object_dictionary['resource_type'] == 'coverage':
workspace = None
name = object_dictionary['name']
bbox = '-180,-90,180,90'
srs = 'EPSG:4326'
width = '512'
height = '512'
if object_dictionary['workspace']:
workspace = object_dictionary['workspace']
if resource_object and resource_object.native_bbox:
# Find the native bounding box
nbbox = resource_object.native_bbox
minx = nbbox[0]
maxx = nbbox[1]
miny = nbbox[2]
maxy = nbbox[3]
srs = resource_object.projection
bbox = '{0},{1},{2},{3}'.format(minx, miny, maxx, maxy)
# Resize the width to be proportionate to the image aspect ratio
aspect_ratio = (float(maxx) - float(minx)) / (float(maxy) - float(miny))
width = str(int(aspect_ratio * float(height)))
object_dictionary['wcs'] = {
'png': self._get_wcs_url(name, output_format='png', namespace=workspace, srs=srs, bbox=bbox),
'gif': self._get_wcs_url(name, output_format='gif', namespace=workspace, srs=srs, bbox=bbox),
'jpeg': self._get_wcs_url(name, output_format='jpeg', namespace=workspace, srs=srs, bbox=bbox),
'tiff': self._get_wcs_url(name, output_format='tif', namespace=workspace, srs=srs, bbox=bbox),
'bmp': self._get_wcs_url(name, output_format='bmp', namespace=workspace, srs=srs, bbox=bbox),
'geotiff': self._get_wcs_url(name, output_format='geotiff', namespace=workspace, srs=srs,
bbox=bbox),
'gtopo30': self._get_wcs_url(name, output_format='gtopo30', namespace=workspace, srs=srs,
bbox=bbox),
'arcgrid': self._get_wcs_url(name, output_format='ArcGrid', namespace=workspace, srs=srs,
bbox=bbox),
'arcgrid_gz': self._get_wcs_url(name, output_format='ArcGrid-GZIP', namespace=workspace, srs=srs,
bbox=bbox),
}
elif object_dictionary['resource_type'] == 'layer':
# Defaults
bbox = '-180,-90,180,90'
srs = 'EPSG:4326'
width = '512'
height = '512'
style = ''
# Layer and style
layer = object_dictionary['name']
if 'default_style' in object_dictionary:
style = object_dictionary['default_style']
# Try to extract the bounding box from the resource which was saved earlier
if resource_object and resource_object.native_bbox:
# Find the native bounding box
nbbox = resource_object.native_bbox
minx = nbbox[0]
maxx = nbbox[1]
miny = nbbox[2]
maxy = nbbox[3]
srs = resource_object.projection
bbox = '{0},{1},{2},{3}'.format(minx, miny, maxx, maxy)
# Resize the width to be proportionate to the image aspect ratio
aspect_ratio = (float(maxx) - float(minx)) / (float(maxy) - float(miny))
width = str(int(aspect_ratio * float(height)))
object_dictionary['wms'] = {
'png': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='image/png'),
'png8': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='image/png8'),
'jpeg': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='image/jpeg'),
'gif': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='image/gif'),
'tiff': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='image/tiff'),
'tiff8': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='image/tiff8'),
'geotiff': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='image/geotiff'),
'geotiff8': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='image/geotiff8'),
'svg': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='image/svg'),
'pdf': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='application/pdf'),
'georss': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='rss'),
'kml': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='kml'),
'kmz': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='kmz'),
'openlayers': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='application/openlayers')
}
elif object_dictionary['resource_type'] == 'layerGroup':
# Defaults
bbox = '-180,-90,180,90'
srs = 'EPSG:4326'
width = '512'
height = '512'
style = ''
# Layer and style
layer = object_dictionary['name']
if 'default_style' in object_dictionary:
style = object_dictionary['default_style']
# Try to extract the bounding box from the resource which was saved earlier
if 'bounds' in object_dictionary and object_dictionary['bounds']:
# Find the native bounding box
nbbox = object_dictionary['bounds']
minx = nbbox[0]
maxx = nbbox[1]
miny = nbbox[2]
maxy = nbbox[3]
srs = nbbox[4]
bbox = '{0},{1},{2},{3}'.format(minx, miny, maxx, maxy)
# Resize the width to be proportionate to the image aspect ratio
aspect_ratio = (float(maxx) - float(minx)) / (float(maxy) - float(miny))
width = str(int(aspect_ratio * float(height)))
object_dictionary['wms'] = {
'png': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='image/png'),
'png8': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='image/png8'),
'jpeg': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='image/jpeg'),
'gif': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='image/gif'),
'tiff': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='image/tiff'),
'tiff8': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='image/tiff8'),
'geptiff': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='image/geotiff'),
'geotiff8': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='image/geotiff8'),
'svg': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='image/svg'),
'pdf': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='application/pdf'),
'georss': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='rss'),
'kml': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='kml'),
'kmz': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='kmz'),
'openlayers': self._get_wms_url(layer, style, bbox=bbox, srs=srs, width=width, height=height,
output_format='application/openlayers')
}
return object_dictionary
def list_resources(self, with_properties=False, store=None, workspace=None, debug=False):
"""
List the names of all resources available from the spatial dataset service.
Args:
with_properties (bool, optional): Return list of resource dictionaries instead of a list of resource names.
store (string, optional): Return only resources belonging to a certain store.
workspace (string, optional): Return only resources belonging to a certain workspace.
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Returns:
(dict): Response dictionary
Examples:
response = engine.list_resource()
response = engine.list_resource(store="example_store")
response = engine.list_resource(with_properties=True, workspace="example_workspace")
"""
# Get a GeoServer catalog object and query for list of resources
catalog = self._get_geoserver_catalog_object()
try:
resource_objects = catalog.get_resources(store=store, workspace=workspace)
return self._handle_list(resource_objects, with_properties, debug)
except geoserver.catalog.AmbiguousRequestError as e:
response_object = {'success': False,
'error': str(e)}
except TypeError:
response_object = {'success': False,
'error': 'Multiple stores found named "{0}".'.format(store)}
self._handle_debug(response_object, debug)
return response_object
def list_layers(self, with_properties=False, debug=False):
"""
List names of all layers available from the spatial dataset service.
Args:
with_properties (bool, optional): Return list of layer dictionaries instead of a list of layer names.
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Returns:
(dict): Response dictionary
Examples:
response = engine.list_layers()
response = engine.list_layers(with_properties=True)
"""
# Get a GeoServer catalog object and query for list of layers
catalog = self._get_geoserver_catalog_object()
layer_objects = catalog.get_layers()
return self._handle_list(layer_objects, with_properties, debug)
def list_layer_groups(self, with_properties=False, debug=False):
"""
List the names of all layer groups available from the spatial dataset service.
Args:
with_properties (bool, optional): Return list of layer group dictionaries instead of a list of layer group names. # noqa: E501
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Returns:
(dict): Response dictionary
Examples:
response = engine.list_layer_groups()
response = engine.list_layer_groups(with_properties=True)
"""
# Get a GeoServer catalog object and query for list of layer groups
catalog = self._get_geoserver_catalog_object()
layer_group_objects = catalog.get_layergroups()
return self._handle_list(layer_group_objects, with_properties, debug)
def list_workspaces(self, with_properties=False, debug=False):
"""
List the names of all workspaces available from the spatial dataset service.
Args:
with_properties (bool, optional): Return list of workspace dictionaries instead of a list of workspace names.
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Returns:
(dict): Response dictionary
Examples:
response = engine.list_workspaces()
response = engine.list_workspaces(with_properties=True)
"""
# Get a GeoServer catalog object and query for list of layer groups
catalog = self._get_geoserver_catalog_object()
workspaces = catalog.get_workspaces()
return self._handle_list(workspaces, with_properties, debug)
def list_stores(self, workspace=None, with_properties=False, debug=False):
"""
List the names of all stores available from the spatial dataset service.
Args:
workspace (string, optional): List long stores belonging to this workspace.
with_properties (bool, optional): Return list of store dictionaries instead of a list of store names.
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Returns:
(dict): Response dictionary
Examples:
response = engine.list_stores()
response = engine.list_stores(workspace='example_workspace", with_properties=True)
"""
# Get a GeoServer catalog object and query for list of layer groups
catalog = self._get_geoserver_catalog_object()
try:
stores = catalog.get_stores(workspace=workspace)
return self._handle_list(stores, with_properties, debug)
except AttributeError:
response_dict = {'success': False,
'error': 'Invalid workspace "{0}".'.format(workspace)}
self._handle_debug(response_dict, debug)
return response_dict
def list_styles(self, workspace=None, with_properties=False, debug=False):
"""
List the names of all styles available from the spatial dataset service.
Args:
workspace (string): Return only resources belonging to a certain workspace.
with_properties (bool, optional): Return list of style dictionaries instead of a list of style names.
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Returns:
(dict): Response dictionary
Examples:
response = engine.list_styles()
response = engine.list_styles(with_properties=True)
"""
# Get a GeoServer catalog object and query for list of layer groups
catalog = self._get_geoserver_catalog_object()
styles = catalog.get_styles(workspace=workspace)
return self._handle_list(styles, with_properties, debug)
def get_resource(self, resource_id, store_id=None, debug=False):
"""
Retrieve a resource object.
Args:
resource_id (string): Identifier of the resource to retrieve. Can be a name or a workspace-name combination (e.g.: "name" or "workspace:name"). # noqa: E501
store (string, optional): Get resource from this store.
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Returns:
(dict): Response dictionary
Examples:
response = engine.get_resource('example_workspace:resource_name')
response = engine.get_resource('resource_name', store='example_store')
"""
# Get a GeoServer catalog object and query for list of layer groups
catalog = self._get_geoserver_catalog_object()
# Process identifier
workspace, name = self._process_identifier(resource_id)
# Get default work space if none is given
if not workspace:
workspace = catalog.get_default_workspace().name
# Get resource
try:
resource = catalog.get_resource(name=name, store=store_id, workspace=workspace)
if not resource:
response_dict = {'success': False,
'error': 'Resource "{0}" not found.'.format(resource_id)}
else:
resource_dict = self._transcribe_geoserver_object(resource)
# Assemble Response
response_dict = {'success': True,
'result': resource_dict}
except geoserver.catalog.FailedRequestError as e:
response_dict = {'success': False,
'error': str(e)}
# Handle the debug and return
self._handle_debug(response_dict, debug)
return response_dict
def get_layer(self, layer_id, store_id=None, debug=False):
"""
Retrieve a layer object.
Args:
layer_id (string): Identifier of the layer to retrieve. Can be a name or a workspace-name combination (e.g.: "name" or "workspace:name"). # noqa: E501
store_id (string, optional): Return only resources belonging to a certain store.
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Returns:
(dict): Response dictionary
Examples:
response = engine.get_layer('layer_name')
response = engine.get_layer('workspace_name:layer_name')
"""
# Get a GeoServer catalog object and query for list of layer groups
catalog = self._get_geoserver_catalog_object()
try:
# Get layer
layer = catalog.get_layer(name=layer_id)
if layer and store_id:
layer.store = store_id
if not layer:
response_dict = {'success': False,
'error': 'Layer "{0}" not found.'.format(layer_id)}
else:
layer_dict = self._transcribe_geoserver_object(layer)
# Get layer caching properties (gsconfig doesn't support this)
gwc_url = '{0}layers/{1}.xml'.format(self.gwc_endpoint, layer_id)
auth = (self.username, self.password)
r = requests.get(gwc_url, auth=auth)
if r.status_code == 200:
root = ElementTree.XML(r.text)
tile_caching_dict = ConvertXmlToDict(root)
layer_dict['tile_caching'] = tile_caching_dict['GeoServerLayer']
# Assemble Response
response_dict = {'success': True,
'result': layer_dict}
except geoserver.catalog.FailedRequestError as e:
response_dict = {'success': False,
'error': str(e)}
# Handle the debug and return
self._handle_debug(response_dict, debug)
return response_dict
def get_layer_group(self, layer_group_id, debug=False):
"""
Retrieve a layer group object.
Args:
layer_group_id (string): Identifier of the layer group to retrieve. Can be a name or a workspace-name combination (e.g.: "name" or "workspace:name"). # noqa: E501
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Returns:
(dict): Response dictionary
Examples:
response = engine.get_layer_group('layer_group_name')
response = engine.get_layer_group('workspace_name:layer_group_name')
"""
# Get a GeoServer catalog object and query for list of layer groups
catalog = self._get_geoserver_catalog_object()
workspace, name = self._process_identifier(layer_group_id)
try:
# Get resource
layer_group = catalog.get_layergroup(name=name, workspace=workspace)
if not layer_group:
response_dict = {'success': False,
'error': 'Layer Group "{0}" not found.'.format(layer_group_id)}
else:
layer_group_dict = self._transcribe_geoserver_object(layer_group)
# Assemble Response
response_dict = {'success': True,
'result': layer_group_dict}
except geoserver.catalog.FailedRequestError as e:
response_dict = {'success': False,
'error': str(e)}
# Handle the debug and return
self._handle_debug(response_dict, debug)
return response_dict
def get_store(self, store_id, debug=False):
"""
Retrieve a store object.
Args:
store_id (string): Identifier of the store to retrieve. Can be a name or a workspace-name combination (e.g.: "name" or "workspace:name"). # noqa: E501
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Returns:
(dict): Response dictionary
Examples:
response = engine.get_store('store_name')
response = engine.get_store('workspace_name:store_name')
"""
# Get a GeoServer catalog object and query for list of layer groups
catalog = self._get_geoserver_catalog_object()
# Process identifier
workspace, name = self._process_identifier(store_id)
# Get default work space if none is given
if not workspace:
workspace = catalog.get_default_workspace().name
try:
# Get resource
store = catalog.get_store(name=name, workspace=workspace)
if not store:
response_dict = {'success': False,
'error': 'Store "{0}" not found.'.format(store_id)}
else:
store_dict = self._transcribe_geoserver_object(store)
# Assemble Response
response_dict = {'success': True,
'result': store_dict}
except geoserver.catalog.FailedRequestError as e:
response_dict = {'success': False,
'error': str(e)}
# Handle the debug and return
self._handle_debug(response_dict, debug)
return response_dict
def get_workspace(self, workspace_id, debug=False):
"""
Retrieve a workspace object.
Args:
workspace_id (string): Identifier of the workspace to retrieve.
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Returns:
(dict): Response dictionary
Examples:
response = engine.get_workspace('workspace_name')
"""
# Get a GeoServer catalog object and query for list of layer groups
catalog = self._get_geoserver_catalog_object()
try:
# Get resource
workspace = catalog.get_workspace(name=workspace_id)
if not workspace:
response_dict = {'success': False,
'error': 'Workspace "{0}" not found.'.format(workspace_id)}
else:
workspace_dict = self._transcribe_geoserver_object(workspace)
# Assemble Response
response_dict = {'success': True,
'result': workspace_dict}
except geoserver.catalog.FailedRequestError as e:
response_dict = {'success': False,
'error': str(e)}
# Handle the debug and return
self._handle_debug(response_dict, debug)
return response_dict
def get_style(self, style_id, debug=False):
"""
Retrieve a style object.
Args:
style_id (string): Identifier of the style to retrieve.
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Returns:
(dict): Response dictionary
Examples:
response = engine.get_style('style_name')
"""
# Get a GeoServer catalog object and query for list of layer groups
catalog = self._get_geoserver_catalog_object()
# Process identifier
workspace, name = self._process_identifier(style_id)
# Get default work space if none is given
if not workspace:
workspace = catalog.get_default_workspace().name
try:
# Get style
style = catalog.get_style(name=name, workspace=workspace)
if not style:
response_dict = {'success': False,
'error': 'Workspace "{0}" not found.'.format(style_id)}
else:
style_dict = self._transcribe_geoserver_object(style)
# Assemble Response
response_dict = {'success': True,
'result': style_dict}
except geoserver.catalog.FailedRequestError as e:
response_dict = {'success': False,
'error': str(e)}
# Handle the debug and return
self._handle_debug(response_dict, debug)
return response_dict
def link_sqlalchemy_db_to_geoserver(self, store_id, sqlalchemy_engine, docker=False, debug=False,
docker_ip_address='172.17.0.1'):
"""
Helper function to simplify linking postgis databases to geoservers using the sqlalchemy engine object.
Args:
store_id (string): Identifier for the store to add the resource to. Can be a store name or a workspace name combination (e.g.: "name" or "workspace:name"). Note that the workspace must be an existing workspace. If no workspace is given, the default workspace will be assigned. # noqa: E501
sqlalchemy_engine (sqlalchemy_engine): An SQLAlchemy engine object.
docker (bool, optional): Set to True if the database and geoserver are running in a Docker container. Defaults to False. # noqa: E501
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
docker_ip_address (str, optional): Override the docker network ip address. Defaults to '172.17.41.1'.
Returns:
(dict): Response dictionary
"""
connection_dict = sqlalchemy_engine.url.translate_connect_args()
response = self.create_postgis_feature_resource(
store_id=store_id,
host=docker_ip_address if docker else connection_dict['host'],
port=connection_dict['port'],
database=connection_dict['database'],
user=connection_dict['username'],
password=connection_dict['password'],
debug=debug
)
return response
def create_postgis_feature_resource(self, store_id, host, port, database, user, password, table=None, debug=False):
"""
Use this method to link an existing PostGIS database to GeoServer as a feature store. Note that this method only works for data in vector formats. # noqa: E501
Args:
store_id (string): Identifier for the store to add the resource to. Can be a store name or a workspace name combination (e.g.: "name" or "workspace:name"). Note that the workspace must be an existing workspace. If no workspace is given, the default workspace will be assigned. # noqa: E501
host (string): Host of the PostGIS database (e.g.: 'www.example.com').
port (string): Port of the PostGIS database (e.g.: '5432')
database (string): Name of the database.
user (string): Database user that has access to the database.
password (string): Password of database user.
table (string, optional): Name of existing table to add as a feature resource to the newly created feature store. A layer will automatically be created for the feature resource as well. Both the layer and the resource will share the same name as the table. # noqa: E501
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Returns:
(dict): Response dictionary
Examples:
# With Table
response = engine.create_postgis_feature_resource(store_id='workspace:store_name', table='table_name', host='localhost', port='5432', database='database_name', user='user', password='pass') # noqa: E501
# Without table
response = engine.create_postgis_resource(store_id='workspace:store_name', host='localhost', port='5432', database='database_name', user='user', password='pass') # noqa: E501
"""
# Get a GeoServer catalog object and query for list of layer groups
catalog = self._get_geoserver_catalog_object()
# Process identifier
workspace, name = self._process_identifier(store_id)
# Get default work space if none is given
if not workspace:
workspace = catalog.get_default_workspace().name
# Determine if store exists
store = catalog.get_store(name=name, workspace=workspace)
store_exists = store is not None
# Create the store if it doesn't exist already
if not store_exists:
xml = """
<dataStore>
<name>{0}</name>
<connectionParameters>
<host>{1}</host>
<port>{2}</port>
<database>{3}</database>
<user>{4}</user>
<passwd>{5}</passwd>
<dbtype>postgis</dbtype>
</connectionParameters>
</dataStore>
""".format(name, host, port, database, user, password)
# Prepare headers
headers = {
"Content-type": "text/xml",
"Accept": "application/xml"
}
# Prepare URL to create store
url = self._assemble_url('workspaces', workspace, 'datastores')
# Execute: POST /workspaces/<ws>/datastores
response = requests.post(url=url,
data=xml,
headers=headers,
auth=HTTPBasicAuth(username=self.username, password=self.password))
# Return with error if this doesn't work
if response.status_code != 201:
response_dict = {'success': False,
'error': '{1}({0}): {2}'.format(response.status_code, response.reason, response.text)}
self._handle_debug(response_dict, debug)
return response_dict
if not table:
# Wrap up successfully with new store created
MAX_ATTEMPTS = 5
attempts = 0
resource_dict = {}
while attempts < MAX_ATTEMPTS:
attempts += 1
try:
new_store = catalog.get_store(name=name, workspace=workspace)
if not new_store:
raise geoserver.catalog.FailedRequestError()
resource_dict = self._transcribe_geoserver_object(new_store)
break
except geoserver.catalog.FailedRequestError:
time.sleep(1)
response_dict = {'success': True,
'result': resource_dict}
self._handle_debug(response_dict, debug)
return response_dict
# Throw error if resource already exists
try:
resource = catalog.get_resource(name=table, store=name, workspace=workspace)
if resource:
message = "There is already a resource named " + table
if workspace:
message += " in " + workspace
response_dict = {'success': False,
'error': message}
self._handle_debug(response_dict, debug)
return response_dict
except geoserver.catalog.FailedRequestError:
pass
# Prepare file for adding the table
xml = """
<featureType>
<name>{0}</name>
</featureType>
""".format(table)
# Prepare headers
headers = {
"Content-type": "text/xml",
"Accept": "application/xml"
}
# Prepare URL
url = self._assemble_url('workspaces', workspace, 'datastores', name, 'featuretypes')
# Execute: POST /workspaces/<ws>/datastores/<ds>/featuretypes
response = requests.post(url=url,
data=xml,
headers=headers,
auth=HTTPBasicAuth(username=self.username, password=self.password))
# Handle failure
if response.status_code != 201:
response_dict = {'success': False,
'error': '{1}({0}): {2}'.format(response.status_code, response.reason, response.text)}
self._handle_debug(response_dict, debug)
return response_dict
# Wrap up successfully
new_resource = catalog.get_resource(name=table, store=name, workspace=workspace)
resource_dict = self._transcribe_geoserver_object(new_resource)
response_dict = {'success': True,
'result': resource_dict}
self._handle_debug(response_dict, debug)
return response_dict
def add_table_to_postgis_store(self, store_id, table, debug=False):
"""
Add an existing postgis table as a feature resource to a postgis store that already exists.
Args:
store_id (string): Identifier for the store to add the resource to. Can be a store name or a workspace name combination (e.g.: "name" or "workspace:name"). Note that the workspace must be an existing workspace. If no workspace is given, the default workspace will be assigned. # noqa: E501
table (string): Name of existing table to add as a feature resource. A layer will automatically be created for this resource. Both the resource and the layer will share the same name as the table. # noqa: E501
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Returns:
(dict): Response dictionary
Examples:
response = engine.add_table_to_postgis_store(store_id='workspace:store_name', table='table_name')
"""
# Get a GeoServer catalog object and query for list of layer groups
catalog = self._get_geoserver_catalog_object()
# Process identifier
workspace, name = self._process_identifier(store_id)
# Get default work space if none is given
if not workspace:
workspace = catalog.get_default_workspace().name
# Throw error store does not exist
try:
catalog.get_store(name=name, workspace=workspace)
except geoserver.catalog.FailedRequestError:
message = "There is no store named " + name
if workspace:
message += " in " + workspace
response_dict = {'success': False,
'error': message}
self._handle_debug(response_dict, debug)
return response_dict
# Prepare file
xml = """
<featureType>
<name>{0}</name>
</featureType>
""".format(table)
# Prepare headers
headers = {
"Content-type": "text/xml",
"Accept": "application/xml"
}
# Prepare URL
url = self._assemble_url('workspaces', workspace, 'datastores', name, 'featuretypes')
# Execute: POST /workspaces/<ws>/datastores
response = requests.post(url=url,
data=xml,
headers=headers,
auth=HTTPBasicAuth(username=self.username, password=self.password))
if response.status_code != 201:
response_dict = {'success': False,
'error': '{1}({0}): {2}'.format(response.status_code, response.reason, response.text)}
self._handle_debug(response_dict, debug)
return response_dict
# Wrap up successfully
new_store = catalog.get_store(name=name, workspace=workspace)
resource_dict = self._transcribe_geoserver_object(new_store)
response_dict = {'success': True,
'result': resource_dict}
self._handle_debug(response_dict, debug)
return response_dict
def create_sql_view(self, feature_type_name, postgis_store_id, sql, geometry_column, geometry_type,
geometry_srid=4326, default_style_id=None, key_column=None, parameters=None, debug=False):
"""
Create a new feature type configured as an SQL view.
Args:
feature_type_name (string): Name of the feature type and layer to be created.
postgis_store_id (string): Identifier of existing postgis store with tables that will be queried by the sql view. Can be a store name or a workspace-name combination (e.g.: "name" or "workspace:name").
sql (string): SQL that will be used to construct the sql view / virtual table.
geometry_column (string): Name of the geometry column.
geometry_type (string): Type of the geometry column (e.g. "Point", "LineString", "Polygon").
geometry_srid (string, optional): EPSG spatial reference id of the geometry column. Defaults to 4326.
default_style_id (string, optional): Identifier of a style to assign as the default style. Can be a style name or a workspace-name combination (e.g.: "name" or "workspace:name").
key_column (string, optional): The name of the key column.
parameters (iterable, optional): A list/tuple of tuple-triplets representing parameters in the form (name, default, regex_validation), (e.g.: (('variable', 'pressure', '^[\w]+$'), ('simtime', '0:00:00', '^[\w\:]+$'))
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Returns:
(dict): Response dictionary
Example:
::
sql = "SELECT name, value, geometry FROM pipes"
response = engine.create_sql_view(
feature_type_name='my_feature_type',
postgis_store_id='my_workspace:my_postgis_store',
sql=sql,
geometry_column='geometry',
geometry_type='LineString',
geometry_srid=32144,
default_style_id='my_workspace:pipes',
debug=True
)
""" # noqa: E501, W605
# Get a catalog object
catalog = self._get_geoserver_catalog_object()
# Process identifier
store_workspace_name, store_name = self._process_identifier(postgis_store_id)
# Get Existing PostGIS Store
store = catalog.get_store(store_name, workspace=store_workspace_name)
# Define virtual table / sql view
epsg_code = 'EPSG:{0}'.format(geometry_srid)
geometry = JDBCVirtualTableGeometry(geometry_column, geometry_type, str(geometry_srid))
if parameters is not None:
jdbc_parameters = []
for parameter_args in parameters:
jdbc_parameters.append(JDBCVirtualTableParam(*parameter_args))
parameters = jdbc_parameters
sql_view = JDBCVirtualTable(feature_type_name, sql, 'false', geometry, key_column, parameters)
# Publish Feature Type
catalog.publish_featuretype(feature_type_name, store, epsg_code, jdbc_virtual_table=sql_view)
# Wrap Up
r_feature_layer = catalog.get_layer(feature_type_name)
if default_style_id is None:
resource_dict = self._transcribe_geoserver_object(r_feature_layer)
response_dict = {'success': True,
'result': resource_dict}
self._handle_debug(response_dict, debug)
return response_dict
# Associate Style
style_workspace, style_name = self._process_identifier(default_style_id)
style = catalog.get_style(style_name, workspace=style_workspace)
r_feature_layer.default_style = style
catalog.save(r_feature_layer)
resource_dict = self._transcribe_geoserver_object(r_feature_layer)
response_dict = {'success': True,
'result': resource_dict}
self._handle_debug(response_dict, debug)
return response_dict
def create_shapefile_resource(self, store_id, shapefile_base=None, shapefile_zip=None, shapefile_upload=None,
overwrite=False, charset=None, debug=False):
"""
Use this method to add shapefile resources to GeoServer.
This method will result in the creation of three items: a feature type store, a feature type resource, and a layer. If store_id references a store that does not exist, it will be created. The feature type resource and the subsequent layer will be created with the same name as the feature type store. Provide shapefile with either shapefile_base, shapefile_zip, or shapefile_upload arguments. # noqa: E501
Args:
store_id (string): Identifier for the store to add the resource to. Can be a store name or a workspace name combination (e.g.: "name" or "workspace:name"). Note that the workspace must be an existing workspace. If no workspace is given, the default workspace will be assigned. # noqa: E501
shapefile_base (string, optional): Path to shapefile base name (e.g.: "/path/base" for shapefile at "/path/base.shp")
shapefile_zip (string, optional): Path to a zip file containing the shapefile and side cars.
shapefile_upload (FileUpload list, optional): A list of Django FileUpload objects containing a shapefile and side cars that have been uploaded via multipart/form-data form. # noqa: E501
overwrite (bool, optional): Overwrite the file if it already exists.
charset (string, optional): Specify the character encoding of the file being uploaded (e.g.: ISO-8559-1)
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Returns:
(dict): Response dictionary
Examples:
# For example.shp (path to file but omit the .shp extension)
shapefile_base = "/path/to/shapefile/example"
response = engine.create_shapefile_resource(store_id='workspace:store_name', shapefile_base=shapefile_base)
# Using zip
shapefile_zip = "/path/to/shapefile/example.zip"
response = engine.create_shapefile_resource(store_id='workspace:store_name', shapefile_zip=shapefile_zip)
# Using upload
file_list = request.FILES.getlist('files')
response = engine.create_shapefile_resource(store_id='workspace:store_name', shapefile_upload=file_list)
"""
# Validate shapefile arguments
arg_value_error_msg = 'Exactly one of the "shapefile_base", "shapefile_zip", ' \
'or "shapefile_upload" arguments must be specified. '
if not shapefile_base and not shapefile_zip and not shapefile_upload:
raise ValueError(arg_value_error_msg + 'None given.')
elif shapefile_zip and shapefile_upload and shapefile_base:
raise ValueError(arg_value_error_msg + '"shapefile_base", "shapefile_zip", and '
'"shapefile_upload" given.')
elif shapefile_base and shapefile_zip:
raise ValueError(arg_value_error_msg + '"shapefile_base" and "shapefile_zip" given.')
elif shapefile_base and shapefile_upload:
raise ValueError(arg_value_error_msg + '"shapefile_base" and "shapefile_upload" given.')
elif shapefile_zip and shapefile_upload:
raise ValueError(arg_value_error_msg + '"shapefile_zip" and "shapefile_upload" given.')
# Get a GeoServer catalog object and query for list of layer groups
catalog = self._get_geoserver_catalog_object()
# Process identifier
workspace, name = self._process_identifier(store_id)
# Get default work space if none is given
if not workspace:
workspace = catalog.get_default_workspace().name
# Throw error if overwrite is not true and store already exists
if not overwrite:
try:
catalog.get_store(name=name, workspace=workspace)
message = "There is already a store named " + name
if workspace:
message += " in " + workspace
response_dict = {'success': False,
'error': message}
self._handle_debug(response_dict, debug)
return response_dict
except geoserver.catalog.FailedRequestError:
pass
# Prepare files
temp_archive = None
zip_file_in_memory = None
# Shapefile Base Case
if shapefile_base:
shapefile_plus_sidecars = shapefile_and_friends(shapefile_base)
temp_archive = '{0}.zip'.format(os.path.join(os.path.split(shapefile_base)[0], name))
with ZipFile(temp_archive, 'w') as zfile:
for extension, filepath in shapefile_plus_sidecars.items():
filename = '{0}.{1}'.format(name, extension)
zfile.write(filename=filepath, arcname=filename)
files = {'file': open(temp_archive, 'rb')}
# Shapefile Zip Case
elif shapefile_zip:
if is_zipfile(shapefile_zip):
files = {'file': open(shapefile_zip, 'rb')}
else:
raise TypeError('"{0}" is not a zip archive.'.format(shapefile_zip))
# Shapefile Upload Case
elif shapefile_upload:
# Write files in memory to zipfile in memory
zip_file_in_memory = BytesIO()
with ZipFile(zip_file_in_memory, 'w') as zfile:
for file in shapefile_upload:
extension = os.path.splitext(file.name)[1]
filename = '{0}{1}'.format(name, extension)
zfile.writestr(filename, file.read())
files = {'file': zip_file_in_memory.getvalue()}
# Prepare headers
headers = {
"Content-type": "application/zip",
"Accept": "application/xml"
}
# Prepare URL
url = self._assemble_url('workspaces', workspace, 'datastores', name, 'file.shp')
# Set params
params = {}
if charset:
params['charset'] = charset
if overwrite:
params['update'] = 'overwrite'
# Execute: PUT /workspaces/<ws>/datastores/<ds>/file.shp
response = requests.put(url=url,
files=files,
headers=headers,
params=params,
auth=HTTPBasicAuth(username=self.username, password=self.password))
# Clean up file stuff
if shapefile_base or shapefile_zip:
files['file'].close()
if temp_archive:
os.remove(temp_archive)
if zip_file_in_memory:
zip_file_in_memory.close()
# Wrap up with failure
if response.status_code != 201:
response_dict = {'success': False,
'error': '{1}({0}): {2}'.format(response.status_code, response.reason, response.text)}
self._handle_debug(response_dict, debug)
return response_dict
if shapefile_base:
# This case uses the store name as the Resource ID.
resource_id = name
elif shapefile_zip:
# This case uses the filename as the Resource ID.
resource_id = os.path.splitext(os.path.basename(shapefile_zip))[0]
elif shapefile_upload:
# This case uses the store name as the Resource ID.
resource_id = name
# Wrap up successfully
new_resource = catalog.get_resource(name=resource_id, store=name, workspace=workspace)
resource_dict = self._transcribe_geoserver_object(new_resource)
response_dict = {'success': True,
'result': resource_dict}
self._handle_debug(response_dict, debug)
return response_dict
def create_coverage_resource(self, store_id, coverage_type, coverage_file=None,
coverage_upload=None, coverage_name=None,
overwrite=False, query_after_success=True, debug=False):
"""
Use this method to add coverage resources to GeoServer.
This method will result in the creation of three items: a coverage store, a coverage resource, and a layer. If store_id references a store that does not exist, it will be created. Unless coverage_name is specified, the coverage resource and the subsequent layer will be created with the same name as the image file that is uploaded. # noqa: E501
Args:
store_id (string): Identifier for the store to add the image to or to be created. Can be a name or a workspace name combination (e.g.: "name" or "workspace:name"). Note that the workspace must be an existing workspace. If no workspace is given, the default workspace will be assigned. # noqa: E501
coverage_type (string): Type of coverage that is being created. Valid values include: 'geotiff', 'worldimage', 'imagemosaic', 'imagepyramid', 'gtopo30', 'arcgrid', 'grassgrid', 'erdasimg', 'aig', 'gif', 'png', 'jpeg', 'tiff', 'dted', 'rpftoc', 'rst', 'nitf', 'envihdr', 'mrsid', 'ehdr', 'ecw', 'netcdf', 'erdasimg', 'jp2mrsid'. # noqa: E501
coverage_file (string, optional): Path to the coverage image or zip archive. Most files will require a .prj file with the Well Known Text definition of the projection. Zip this file up with the image and send the archive. # noqa: E501
coverage_upload (FileUpload list, optional): A list of Django FileUpload objects containing a coverage file and .prj file or archive that have been uploaded via multipart/form-data form. # noqa: E501
coverage_name (string): Name of the coverage resource and subsequent layer that are created. If unspecified, these will match the name of the image file that is uploaded. # noqa: E501
overwrite (bool, optional): Overwrite the file if it already exists.
charset (string, optional): Specify the character encoding of the file being uploaded (e.g.: ISO-8559-1)
query_after_success(bool, optional): Query geoserver for resource objects after successful upload. Defaults to True.
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Note
If the type coverage being uploaded includes multiple files (e.g.: image, world file, projecttion file), they must be uploaded as a zip archive. Otherwise upload the single file. # noqa: E501
Returns:
(dict): Response dictionary
Examples:
coverage_file = '/path/to/geotiff/example.zip'
response = engine.create_coverage_resource(store_id='workspace:store_name', coverage_file=coverage_file, coverage_type='geotiff') # noqa: E501
"""
# Globals
VALID_COVERAGE_TYPES = ('jp2mrsid',
'geotiff',
'nitf',
'netcdf',
'aig',
'dted',
'imagepyramid',
'ehdr',
'mrsid',
'erdasimg',
'ecw',
'rpftoc',
'rst',
'gtopo30',
'arcgrid',
'imagemosaic',
'envihdr',
'worldimage',
'grassgrid')
# Validate coverage type
if coverage_type not in VALID_COVERAGE_TYPES:
raise ValueError('"{0}" is not a valid coverage_type. Use either {1}'.format(
coverage_type, ', '.join(VALID_COVERAGE_TYPES)))
# Get a GeoServer catalog object and query for list of layer groups
catalog = self._get_geoserver_catalog_object()
# Process identifier
workspace, store_name = self._process_identifier(store_id)
# Get default work space if none is given
if not workspace:
workspace = catalog.get_default_workspace().name
# Throw error if overwrite is not true and store already exists
if not overwrite:
try:
catalog.get_store(name=store_name, workspace=workspace)
message = "There is already a store named " + store_name
if workspace:
message += " in " + workspace
response_dict = {'success': False,
'error': message}
self._handle_debug(response_dict, debug)
return response_dict
except geoserver.catalog.FailedRequestError:
pass
# Prepare files
working_dir = None
if coverage_type == 'grassgrid':
# Validation
if coverage_file is None:
raise ValueError('The coverage_file parameter is required for coverage_type "grassgrid".')
if not is_zipfile(coverage_file):
raise ValueError('The coverage_file parameter must be a path to a valid zip archive for '
'coverage_type "grassgrid".')
working_dir = os.path.join(os.path.dirname(coverage_file), '.gstmp')
original_coverage_filename = os.path.basename(coverage_file)
# When the test stops in the middle it leaves the temp folder and next time the test fails due to the
# existing folder. So, we need to make sure to delete the folder before extract the contents.
if os.path.exists(working_dir):
shutil.rmtree(working_dir)
# Unzip
zip_file = ZipFile(coverage_file)
zip_file.extractall(working_dir)
# Change Header
valid_grass_file = False
for f in os.listdir(working_dir):
if 'prj' not in f:
# Defaults
north = 90.0
south = -90.0
east = -180.0
rows = 360
cols = 720
corrupt_file = False
with open(os.path.join(working_dir, f), 'r') as f:
contents = f.readlines()
for line in contents[0:6]:
if 'north' in line:
north = float(line.split(':')[1].strip())
elif 'south' in line:
south = float(line.split(':')[1].strip())
elif 'east' in line:
east = float(line.split(':')[1].strip())
elif 'west' in line:
pass
elif 'rows' in line:
rows = int(line.split(':')[1].strip())
elif 'cols' in line:
cols = int(line.split(':')[1].strip())
else:
corrupt_file = True
if corrupt_file:
break
# Calcuate new header
xllcorner = east
yllcorner = south
cellsize = (north - south) / rows
header = ['ncols {0}\n'.format(cols),
'nrows {0}\n'.format(rows),
'xllcorner {0}\n'.format(xllcorner),
'yllcorner {0}\n'.format(yllcorner),
'cellsize {0}\n'.format(cellsize)]
# Strip off old header and add new one
for _ in range(0, 6):
contents.pop(0)
contents = header + contents
with open(os.path.join(working_dir, f.name), 'w') as o:
for line in contents:
o.write(line)
valid_grass_file = True
if not valid_grass_file:
# Clean up
for f in os.listdir(working_dir):
os.remove(os.path.join(working_dir, f))
os.rmdir(working_dir)
raise IOError('GRASS file could not be processed, check to ensure the GRASS grid is correctly '
'formatted or included.')
# New coverage zip file (rezip)
coverage_file = os.path.join(working_dir, original_coverage_filename)
with ZipFile(coverage_file, 'w') as zf:
for f in os.listdir(working_dir):
if f != original_coverage_filename:
zf.write(os.path.join(working_dir, f), f)
# Prepare file(s) for upload
files = None
data = None
if coverage_file is not None:
if is_zipfile(coverage_file):
files = {'file': open(coverage_file, 'rb')}
content_type = 'application/zip'
else:
content_type = 'image/{0}'.format(coverage_type)
data = open(coverage_file, 'rb')
if not coverage_name:
coverage_filename = os.path.basename(coverage_file)
coverage_name = coverage_filename.split('.')[0]
elif coverage_upload is not None:
content_type = 'application/zip'
# Check if zip archive
try:
if coverage_upload.name.endswith('.zip'):
files = {'file': coverage_upload}
else:
content_type = 'image/{0}'.format(coverage_type)
data = coverage_upload
if not coverage_name:
coverage_filename = os.path.basename(coverage_upload.name)
coverage_name = coverage_filename.split('.')[0]
except AttributeError:
pass
if files is None and data is None:
# Write files in memory to zipfile in memory
zip_file_in_memory = BytesIO()
in_memory_coverage_name = None
with ZipFile(zip_file_in_memory, 'w') as zfile:
for f in coverage_upload:
zfile.writestr(os.path.basename(f.name), f.read())
if 'prj' not in f.name:
in_memory_coverage_name = os.path.basename(f.name).split('.')[0]
files = {'file': zip_file_in_memory.getvalue()}
if not coverage_name and in_memory_coverage_name:
coverage_name = in_memory_coverage_name
# Prepare headers
extension = coverage_type
if coverage_type == 'grassgrid':
extension = 'arcgrid'
headers = {
"Content-type": content_type,
"Accept": "application/xml"
}
# Prepare URL
url = self._assemble_url('workspaces', workspace, 'coveragestores', store_name, 'file.{0}'.format(extension))
# Set params
params = {}
if coverage_name:
params['coverageName'] = coverage_name
if overwrite:
params['update'] = 'overwrite'
# Execute: PUT /workspaces/<ws>/datastores/<ds>/file.shp
response = requests.put(url=url,
files=files,
data=data,
headers=headers,
params=params,
auth=(self.username, self.password))
# Clean up
if coverage_file:
if is_zipfile(coverage_file):
files['file'].close()
else:
data.close()
if working_dir:
for f in os.listdir(working_dir):
os.remove(os.path.join(working_dir, f))
os.rmdir(working_dir)
if response.status_code != 201:
response_dict = {'success': False,
'error': '{1}({0}): {2}'.format(response.status_code, response.reason, response.text)}
self._handle_debug(response_dict, debug)
return response_dict
# Wrap up successfully
# NOTE: On success response returns xml representation of object, which we don't handle currently
# So we use gsconfg to get the resource object
if query_after_success:
new_resource = catalog.get_resource(name=coverage_name, store=store_name, workspace=workspace)
resource_dict = self._transcribe_geoserver_object(new_resource)
else:
resource_dict = None
response_dict = {'success': True,
'result': resource_dict}
self._handle_debug(response_dict, debug)
return response_dict
def create_layer_group(self, layer_group_id, layers, styles, bounds=None, debug=False):
"""
Create a layer group. The number of layers and the number of styles must be the same.
Args:
layer_group_id (string): Identifier of the layer group to create.
layers (iterable): A list of layer names to be added to the group. Must be the same length as the styles list.
styles (iterable): A list of style names to associate with each layer in the group. Must be the same length as the layers list. # noqa: #501
bounds (iterable): A tuple representing the bounding box of the layer group (e.g.: ('-74.02722', '-73.907005', '40.684221', '40.878178', 'EPSG:4326') ) # noqa: #501
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Returns:
(dict): Response dictionary
Examples:
layers = ('layer1', 'layer2')
styles = ('style1', 'style2')
bounds = ('-74.02722', '-73.907005', '40.684221', '40.878178', 'EPSG:4326')
response = engine.create_layer_group(layer_group_id='layer_group_name', layers=layers, styles=styles, bounds=bounds) # noqa: E501
"""
# Get a GeoServer catalog object and query for list of layer groups
catalog = self._get_geoserver_catalog_object()
workspace, name = self._process_identifier(layer_group_id)
# Response dictionary
response_dict = {'success': False}
# Create layer group
try:
layer_group = catalog.create_layergroup(name, layers, styles, bounds, workspace=workspace)
catalog.save(layer_group)
layer_group_dict = self._transcribe_geoserver_object(layer_group)
response_dict['success'] = True
response_dict['result'] = layer_group_dict
except geoserver.catalog.ConflictingDataError as e:
response_dict['success'] = False
response_dict['error'] = str(e)
except geoserver.catalog.FailedRequestError as e:
response_dict['success'] = False
response_dict['error'] = str(e)
self._handle_debug(response_dict, debug)
return response_dict
def create_workspace(self, workspace_id, uri, debug=False):
"""
Create a new workspace.
Args:
workspace_id (string): Identifier of the workspace to create. Must be unique.
uri (string): URI associated with your project. Does not need to be a real web URL, just a unique identifier. One suggestion is to append the URL of your project with the name of the workspace (e.g.: http:www.example.com/workspace-name). # noqa: E501
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Returns:
(dict): Response dictionary
Examples:
response = engine.create_workspace(workspace_id='workspace_name', uri='www.example.com/workspace_name')
"""
# Get a GeoServer catalog object and query for list of layer groups
catalog = self._get_geoserver_catalog_object()
# Create workspace
try:
# Do create
workspace = catalog.create_workspace(workspace_id, uri)
workspace_dict = self._transcribe_geoserver_object(workspace)
response_dict = {'success': True,
'result': workspace_dict}
except AssertionError as e:
response_dict = {'success': False,
'error': str(e)}
self._handle_debug(response_dict, debug)
return response_dict
def create_style(self, style_id, sld, overwrite=False, debug=False):
"""
Create a new SLD style object.
Args:
style_id (string): Identifier of the style to create.
sld (string): Styled Layer Descriptor string
overwrite (bool, optional): Overwrite if style already exists. Defaults to False.
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Returns:
(dict): Response dictionary
Examples:
sld = '/path/to/style.sld'
sld_file = open(sld, 'r')
response = engine.create_style(style_id='fred', sld=sld_file.read(), debug=True)
sld_file.close()
"""
# Get a GeoServer catalog object
catalog = self._get_geoserver_catalog_object()
# Process identifier
workspace, name = self._process_identifier(style_id)
# Create workspace
try:
# Do create
num_attempts = 0
while True:
try:
catalog.create_style(name=name,
data=sld,
workspace=workspace,
overwrite=overwrite)
break
except geoserver.catalog.UploadError as e:
num_attempts += 1
if num_attempts >= 5:
raise e
style = catalog.get_style(name=name, workspace=workspace)
style_dict = self._transcribe_geoserver_object(style)
response_dict = {'success': True,
'result': style_dict}
except AssertionError as e:
response_dict = {'success': False,
'error': str(e)}
except geoserver.catalog.ConflictingDataError as e:
response_dict = {'success': False,
'error': str(e)}
self._handle_debug(response_dict, debug)
return response_dict
def update_resource(self, resource_id, store=None, debug=False, **kwargs):
"""
Update an existing resource.
Args:
resource_id (string): Identifier of the resource to update. Can be a name or a workspace-name combination (e.g.: "name" or "workspace:name"). # noqa: E501
store (string, optional): Update a resource in this store.
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
**kwargs (kwargs, optional): Key value pairs representing the attributes and values to change.
Returns:
(dict): Response dictionary
Examples:
response = engine.update_resource(resource_id='workspace:resource_name', enabled=False, title='New Title')
"""
# Get a GeoServer catalog object and query for list of layer groups
catalog = self._get_geoserver_catalog_object()
# Process identifier
workspace, name = self._process_identifier(resource_id)
# Get default work space if none is given
if not workspace:
workspace = catalog.get_default_workspace().name
try:
# Get resource
resource = catalog.get_resource(name=name, store=store, workspace=workspace)
# Make the changes
updated_resource = self._apply_changes_to_gs_object(kwargs, resource)
# Save the changes
catalog.save(updated_resource)
# Return the updated resource dictionary
resource_dict = self._transcribe_geoserver_object(updated_resource)
# Assemble Response
response_dict = {'success': True,
'result': resource_dict}
except geoserver.catalog.FailedRequestError as e:
response_dict = {'success': False,
'error': str(e)}
self._handle_debug(response_dict, debug)
return response_dict
def update_layer(self, layer_id, debug=False, **kwargs):
"""
Update an existing layer.
Args:
layer_id (string): Identifier of the layer to update. Can be a name or a workspace-name combination (e.g.: "name" or "workspace:name"). # noqa: E501
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
**kwargs (kwargs, optional): Key value pairs representing the attributes and values to change.
Returns:
(dict): Response dictionary
Examples:
updated_layer = engine.update_layer(layer_id='workspace:layer_name', default_style='style1', styles=['style1', 'style2']) # noqa: E501
"""
# Pop tile caching properties to handle separately
tile_caching = kwargs.pop('tile_caching', None)
# Get a GeoServer catalog object and query for list of layer groups
catalog = self._get_geoserver_catalog_object()
try:
# Get resource
layer = catalog.get_layer(name=layer_id)
# Apply changes from kwargs
updated_layer = self._apply_changes_to_gs_object(kwargs, layer)
# Save the changes
catalog.save(updated_layer)
# Return the updated resource dictionary
layer_dict = self._transcribe_geoserver_object(updated_layer)
# Assemble Response
response_dict = {'success': True,
'result': layer_dict}
# Handle tile caching properties (gsconfig doesn't support this)
if tile_caching is not None:
gwc_url = '{0}layers/{1}.xml'.format(self.gwc_endpoint, layer_id)
auth = (self.username, self.password)
xml = ConvertDictToXml({'GeoServerLayer': tile_caching})
r = requests.post(
gwc_url,
auth=auth,
headers={'Content-Type': 'text/xml'},
data=ElementTree.tostring(xml)
)
if r.status_code == 200:
layer_dict['tile_caching'] = tile_caching
response_dict = {'success': True,
'result': layer_dict}
else:
response_dict = {'success': False,
'error': r.text}
except geoserver.catalog.FailedRequestError as e:
response_dict = {'success': False,
'error': str(e)}
self._handle_debug(response_dict, debug)
return response_dict
def update_layer_group(self, layer_group_id, debug=False, **kwargs):
"""
Update an existing layer. If modifying the layers, ensure the number of layers
and the number of styles are the same.
Args:
layer_group_id (string): Identifier of the layer group to update.
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
**kwargs (kwargs, optional): Key value pairs representing the attributes and values to change
Returns:
(dict): Response dictionary
Examples:
updated_layer_group = engine.update_layer_group(layer_group_id='layer_group_name', layers=['layer1', 'layer2'], styles=['style1', 'style2']) # noqa: E501
"""
# Get a GeoServer catalog object and query for list of layer groups
catalog = self._get_geoserver_catalog_object()
workspace, name = self._process_identifier(layer_group_id)
try:
# Get resource
layer_group = catalog.get_layergroup(name=name, workspace=workspace)
# Make the changes
for attribute, value in kwargs.items():
if hasattr(layer_group, attribute):
setattr(layer_group, attribute, value)
# Save the changes
catalog.save(layer_group)
# Return the updated resource dictionary
layer_group_dict = self._transcribe_geoserver_object(layer_group)
# Assemble Response
response_dict = {'success': True,
'result': layer_group_dict}
except geoserver.catalog.FailedRequestError as e:
response_dict = {'success': False,
'error': str(e)}
self._handle_debug(response_dict, debug)
return response_dict
def delete_resource(self, resource_id, store_id, purge=False, recurse=False, debug=False):
"""
Delete a resource.
Args:
resource_id (string): Identifier of the resource to delete. Can be a name or a workspace-name combination (e.g.: "name" or "workspace:name"). # noqa: E501
store_id (string): Return only resources belonging to a certain store.
purge (bool, optional): Purge if True.
recurse (bool, optional): Delete recursively any dependencies if True (i.e.: layers or layer groups it belongs to). # noqa: E501
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Returns:
(dict): Response dictionary
Examples:
response = engine.delete_resource('workspace:resource_name')
"""
# Get a GeoServer catalog object and query for list of layer groups
catalog = self._get_geoserver_catalog_object()
workspace, name = self._process_identifier(resource_id)
# Get default work space if none is given
if not workspace:
workspace = catalog.get_default_workspace().name
# Get resource
resource = catalog.get_resource(name=name, store=store_id, workspace=workspace)
# Handle delete
return self._handle_delete(identifier=name, gs_object=resource, purge=purge,
recurse=recurse, debug=debug)
def delete_layer(self, layer_id, store_id=None, purge=False, recurse=False, debug=False):
"""
Delete a layer.
Args:
layer_id (string): Identifier of the layer to delete. Can be a name or a workspace-name combination (e.g.: "name" or "workspace:name"). # noqa: E501
store_id (string, optional): Return only resources belonging to a certain store.
purge (bool, optional): Purge if True.
recurse (bool, optional): Delete recursively if True (i.e: delete layer groups it belongs to).
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Returns:
(dict): Response dictionary
Examples:
response = engine.delete_layer('workspace:layer_name')
"""
# Get a GeoServer catalog object and query for list of layer groups
catalog = self._get_geoserver_catalog_object()
# Get resource
layer = catalog.get_layer(name=layer_id)
if layer and store_id:
layer.store = store_id
# Handle delete
return self._handle_delete(identifier=layer_id, gs_object=layer, purge=purge,
recurse=recurse, debug=debug)
def delete_layer_group(self, layer_group_id, purge=False, recurse=False, debug=False):
"""
Delete a layer group.
Args:
layer_group_id (string): Identifier of the layer group to delete.
purge (bool, optional): Purge if True.
recurse (bool, optional): Delete recursively if True.
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Returns:
(dict): Response dictionary
Examples:
response = engine.delete_layer_group('layer_group_name')
"""
# Get a GeoServer catalog object and query for list of layer groups
catalog = self._get_geoserver_catalog_object()
workspace, name = self._process_identifier(layer_group_id)
# Get layer group
layer_group = catalog.get_layergroup(name=name, workspace=workspace)
# Handle delete
return self._handle_delete(identifier=layer_group_id, gs_object=layer_group, purge=purge,
recurse=recurse, debug=debug)
def delete_workspace(self, workspace_id, purge=False, recurse=False, debug=False):
"""
Delete a workspace.
Args:
workspace_id (string): Identifier of the workspace to delete.
purge (bool, optional): Purge if True.
recurse (bool, optional): Delete recursively if True.
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Returns:
(dict): Response dictionary
Examples:
response = engine.delete_resource('workspace_name')
"""
# Get a GeoServer catalog object and query for list of layer groups
catalog = self._get_geoserver_catalog_object()
# Get layer group
workspace = catalog.get_workspace(workspace_id)
# Handle delete
return self._handle_delete(identifier=workspace_id, gs_object=workspace, purge=purge,
recurse=recurse, debug=debug)
def delete_store(self, store_id, purge=False, recurse=False, debug=False):
"""
Delete a store.
Args:
store_id (string): Identifier of the store to delete.
purge (bool, optional): Purge if True.
recurse (bool, optional): Delete recursively if True.
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Returns:
(dict): Response dictionary
Examples:
response = engine.delete_store('workspace:store_name')
"""
# Get a GeoServer catalog object and query for list of layer groups
catalog = self._get_geoserver_catalog_object()
# Process identifier
workspace, name = self._process_identifier(store_id)
# Get default work space if none is given
if not workspace:
workspace = catalog.get_default_workspace().name
# Get layer group
try:
store = catalog.get_store(name=name, workspace=workspace)
# Handle delete
return self._handle_delete(identifier=store_id, gs_object=store, purge=purge,
recurse=recurse, debug=debug)
except geoserver.catalog.FailedRequestError as e:
# Update response dictionary
response_dict = {'success': False,
'error': str(e)}
self._handle_debug(response_dict, debug)
return response_dict
def delete_style(self, style_id, purge=False, recurse=False, debug=False):
"""
Delete a style.
Args:
style_id (string): Identifier of the style to delete.
purge (bool, optional): Purge if True.
recurse (bool, optional): Delete recursively if True.
debug (bool, optional): Pretty print the response dictionary to the console for debugging. Defaults to False.
Returns:
(dict): Response dictionary
Examples:
response = engine.delete_resource('style_name')
"""
# Get a GeoServer catalog object and query for list of layer groups
catalog = self._get_geoserver_catalog_object()
# Process identifier
workspace, name = self._process_identifier(style_id)
# Get layer group
try:
style = catalog.get_style(name=name, workspace=workspace)
# Handle delete
return self._handle_delete(identifier=style_id, gs_object=style, purge=purge,
recurse=recurse, debug=debug)
except geoserver.catalog.FailedRequestError as e:
# Update response dictionary
response_dict = {'success': False,
'error': str(e)}
self._handle_debug(response_dict, debug)
return response_dict
def validate(self):
"""
Validate the GeoServer spatial dataset engine. Will throw and error if not valid.
"""
try:
r = requests.get(self.endpoint, auth=(self.username, self.password))
except requests.exceptions.MissingSchema:
raise AssertionError('The URL "{0}" provided for the GeoServer spatial dataset service endpoint is '
'invalid.'.format(self.endpoint))
if r.status_code == 401:
raise AssertionError('The username and password of the GeoServer spatial dataset service engine are '
'not valid.')
if r.status_code != 200:
raise AssertionError('The URL "{0}" is not a valid GeoServer spatial dataset service '
'endpoint.'.format(self.endpoint))
if 'Geoserver Configuration API' not in r.text:
raise AssertionError('The URL "{0}" is not a valid GeoServer spatial dataset service '
'endpoint.'.format(self.endpoint))
|
tethysplatform/tethys_dataset_services
|
tethys_dataset_services/engines/geoserver_engine.py
|
Python
|
bsd-2-clause
| 99,537
|
[
"NetCDF"
] |
da7a0c7ba56814667359c803ea8c0698e5f8d701fae5ed066436153ebd6fdb1d
|
########################################################################
# Common functions used for solving projectEuler.net. problems.
# Copyright (C) 2011 Santiago Alessandri
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# You can contact me at san.lt.ss@gmail.com
# Visit my wiki at http://wiki.san-ss.com.ar
########################################################################
from math import sqrt
def factorial(n):
res = 1
for i in range(2, n+1):
res *= i
return res
def rho(set):
resultSet = [[]]
for elem in set:
tmpResultSet = (list(x) for x in resultSet)
for newSet in tmpResultSet:
newSet.append(elem)
resultSet += tmpResultSet
return resultSet
def sum_of_consecutive_squares(base, top):
result = 0
if base == 0:
result += 1
result += (top * (top + 1) * (2 * top + 1)) // 6
result -= ((base - 1) * base * (2 * (base - 1) + 1)) // 6
return result
def is_palindrome(x):
x = str(x)
for i in range(0, len(x) // 2):
if x[i] != x[len(x) - 1 - i]:
return False
return True
def gcd(a, b):
if a < 0: a = -a
if b < 0: b = -b
if a == 0: return b
if b == 0: return a
while b != 0:
(a, b) = (b, a % b)
return a
def mcm(a, b):
return a * b // gcd(a, b)
def is_square(n):
return int(n ** (0.5)) ** 2 == n
def is_prime(number):
if number == 2:
return True
if number & 1 == 0:
return False
i = 3
limit = int(number ** (0.5))
while i <= limit:
if (number % i) == 0:
return False
i += 2
return True
def find_primes_less_than(n):
not_p = set()
primes = []
for i in range(2, n):
if i not in not_p:
primes.append(i)
for j in range(i * 2, n, i):
not_p.add(j)
return primes
def phi(n):
result = n
i = 2
while i ** 2 <= n:
if n % i == 0:
result -= result // i
while n % i == 0:
n //= i
i += 1
if n > 1:
result -= result // n
return result
def is_pandigital(x):
str_x = str(x)
if len(str_x) != 9:
return False
digits_set = set(str_x.replace('0', ''))
return len(digits_set) == 9
def is_permutation(x, y):
digits_x = {}
for d in str(x):
digits_x[d] = digits_x.get(d, 0) + 1
digits_y = {}
for d in str(y):
digits_y[d] = digits_y.get(d, 0) + 1
return digits_x == digits_y
def factors(n):
result = set()
sq = int(n ** (0.5))
for x in range(1, sq + 1):
if n % x == 0:
result.add(x)
result.add(n / x)
if sq ** 2 == n:
result.add(sq)
return result
def solve_cuadratic(a, b, c):
inner_root = b ** 2 - 4 * a * c
if inner_root < 0:
return None
sqrt_inner_root = inner_root ** (0.5)
return ((-b - sqrt_inner_root) / (2 * a), (-b + sqrt_inner_root) / (2 * a))
def mod_pow(base, exp, mod):
res = 1
while exp > 0:
if exp & 1:
res = (res * base) % mod
exp >>= 1
base = (base ** 2) % mod
return res
|
sanSS/programming-contests
|
project-euler/CommonFunctions.py
|
Python
|
gpl-3.0
| 3,805
|
[
"VisIt"
] |
e9efab91ba3c0561cb1969d84d71c23a193cc217786f37fa926619a366939e92
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import espressomd
import numpy as np
class NSquare(ut.TestCase):
S = espressomd.System(box_l=[1.0, 1.0, 1.0])
def setUp(self):
self.S.part.clear()
self.S.cell_system.set_n_square(use_verlet_lists=False)
def test_load_balancing(self):
n_part = 235
n_nodes = self.S.cell_system.get_state()['n_nodes']
n_part_avg = n_part // n_nodes
for i in range(n_part):
self.S.part.add(id=i, pos=np.random.random(3), type=1)
part_dist = self.S.cell_system.resort()
# Check that we did not lose particles
self.assertEqual(sum(part_dist), n_part)
# Check that the particles are evenly distributed
for node_parts in part_dist:
self.assertAlmostEqual(node_parts, n_part_avg, delta=2)
# Check that we can still access all the particles
# This basically checks if part_node and local_particles
# are still in a valid state after the particle exchange
self.assertEqual(sum(self.S.part[:].type), n_part)
if __name__ == "__main__":
ut.main()
|
KaiSzuttor/espresso
|
testsuite/python/nsquare.py
|
Python
|
gpl-3.0
| 1,824
|
[
"ESPResSo"
] |
ba33c31b48d7be033e20671a7125bb33b96a920a0721079de5daa3a9411bf67f
|
n = 5
while n > 0:
print n
n = n - 1
print "Blast Off!!!"
|
ashgang/RandomAlgos
|
Python/countDown.py
|
Python
|
apache-2.0
| 60
|
[
"BLAST"
] |
26b06ccee4848a532f019fd43540a4b951246c2f8a855584cd751fac6b1c4de1
|
from Bio import Alphabet
COUNT = 1
FREQ = 2
##################################################################
# A class to handle frequency tables
# Copyright Iddo Friedberg idoerg@cc.huji.ac.il
# Biopython (http://biopython.org) license applies
# Methods to read a letter frequency or a letter count file:
# Example files for a DNA alphabet:
#
# A count file (whitespace seperated):
#
# A 50
# C 37
# G 23
# T 58
#
# The same info as a frequency file:
#
# A 0.2976
# C 0.2202
# G 0.1369
# T 0.3452
#
# Functions:
# read_count(f): read a count file from stream f. Then convert to
# frequencies
# read_freq(f): read a frequency data file from stream f. Of course, we then
# don't have the counts, but it is usually the letter frquencies which are
# interesting.
#
# Methods:
# (all internal)
# Attributes:
# alphabet: The IUPAC alphabet set (or any other) whose letters you are
# using. Common sets are: IUPAC.protein (20-letter protein),
# IUPAC.unambiguous_dna (4-letter DNA). See Bio/alphabet for more.
# data: frequency dictionary.
# count: count dictionary. Empty if no counts are provided.
#
# Example of use:
# >>> from SubsMat import FreqTable
# >>> ftab = FreqTable.FreqTable(my_frequency_dictionary,FreqTable.FREQ)
# >>> ftab = FreqTable.FreqTable(my_count_dictionary,FreqTable.COUNT)
# >>> ftab = FreqTable.read_count(open('myDNACountFile'))
#
#
##################################################################
class FreqTable(dict):
def _freq_from_count(self):
total = float(sum(self.count.values()))
for i, v in self.count.iteritems():
self[i] = v / total
def _alphabet_from_input(self):
s = ''
for i in sorted(self):
s += i
return s
def __init__(self,in_dict,dict_type,alphabet=None):
self.alphabet = alphabet
if dict_type == COUNT:
self.count = in_dict
self._freq_from_count()
elif dict_type == FREQ:
self.count = {}
self.update(in_dict)
else:
raise ValueError("bad dict_type")
if not alphabet:
self.alphabet = Alphabet.Alphabet()
self.alphabet.letters = self._alphabet_from_input()
def read_count(f):
count = {}
for line in f:
key, value = line.strip().split()
count[key] = int(value)
freq_table = FreqTable(count,COUNT)
return freq_table
def read_freq(f):
freq_dict = {}
for line in f:
key, value = line.strip().split()
freq_dict[key] = float(value)
return FreqTable(freq_dict,FREQ)
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/SubsMat/FreqTable.py
|
Python
|
gpl-2.0
| 2,610
|
[
"Biopython"
] |
e211d30ea08fd17d4583c4f409500a1ec8fc0f0b407061df7126e32fe56e8cc6
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This module contains tools for maintaining parser context.
Maintaining parser context explicitly is significant overhead and can
be difficult in the face of changing AST structures. Certain parser
productions require various nesting or unnesting of previously parsed
nodes. Both of these operations can result in the parser context not
being correctly updated.
The tools in this module attempt to automatically maintain parser
context based on the context information found in lexer tokens. The
general approach is to infer context information by propagating known
contexts through the AST structure.
"""
from __future__ import annotations
import re
import bisect
from edb import _edgeql_rust
from edb.common import ast
from edb.common import markup
from edb.common import typeutils
NEW_LINE = re.compile(br'\r\n?|\n')
class ParserContext(markup.MarkupExceptionContext):
title = 'Source Context'
def __init__(self, name, buffer, start: int, end: int, document=None, *,
filename=None, context_lines=1):
self.name = name
self.buffer = buffer
self.start = start
self.end = end
self.document = document
self.filename = filename
self.context_lines = context_lines
self._points = None
assert start is not None
assert end is not None
def __getstate__(self):
dic = self.__dict__.copy()
dic['_points'] = None
return dic
def _calc_points(self):
self._points = _edgeql_rust.SourcePoint.from_offsets(
self.buffer.encode('utf-8'),
[self.start, self.end]
)
@property
def start_point(self):
if self._points is None:
self._calc_points()
return self._points[0]
@property
def end_point(self):
if self._points is None:
self._calc_points()
return self._points[1]
@classmethod
@markup.serializer.no_ref_detect
def as_markup(cls, self, *, ctx):
me = markup.elements
body = []
lines = []
line_numbers = []
start = self.start_point
buf_bytes = self.buffer.encode('utf-8')
offset = 0
buf_lines = []
line_offsets = [0]
for match in NEW_LINE.finditer(buf_bytes):
buf_lines.append(buf_bytes[offset:match.start()].decode('utf-8'))
offset = match.end()
line_offsets.append(offset)
for i in range(self.context_lines + 1, 1, -1):
try:
ctx_line, _ = self._get_line_snippet(
start, offset=-i, line_offsets=line_offsets)
except ValueError:
pass
else:
lines.append(ctx_line)
line_numbers.append(start.line - i)
snippet, _ = self._get_line_snippet(start, line_offsets=line_offsets)
lines.append(snippet)
line_numbers.append(start.line)
for i in range(1, self.context_lines + 1):
try:
ctx_line, _ = self._get_line_snippet(
start, offset=i, line_offsets=line_offsets)
except ValueError:
pass
else:
lines.append(ctx_line)
line_numbers.append(start.line + i)
tbp = me.lang.TracebackPoint(
name=self.name, filename=self.name, lineno=start.line,
colno=start.column, lines=lines, line_numbers=line_numbers,
context=True)
body.append(tbp)
return me.lang.ExceptionContext(title=self.title, body=body)
def _find_line(self, point, offset=0, *, line_offsets):
len_buffer = len(self.buffer)
if point.line == 0:
if offset < 0:
raise ValueError('not enough lines in buffer')
else:
return 0, len_buffer
line_no = bisect.bisect_right(line_offsets, point.offset) - 1 + offset
if line_no >= len(line_offsets):
raise ValueError('not enough lines in buffer')
# start and end cannot be less than 0 and greater than the
# buffer length
try:
linestart = min(len_buffer, max(0, line_offsets[line_no]))
except IndexError:
if line_no < 0:
# Can't be negative
linestart = 0
else:
# Can't be beyond the buffer's length
linestart = len_buffer
try:
lineend = min(len_buffer, max(0, line_offsets[line_no + 1] - 1))
except IndexError:
if line_no + 1 < 0:
# Can't be negative
lineend = 0
else:
# Can't be beyond the buffer's length
lineend = len_buffer
return linestart, lineend
def _get_line_snippet(
self, point, max_length=120, *, offset=0, line_offsets):
line_start, line_end = self._find_line(
point, offset=offset, line_offsets=line_offsets)
line_len = line_end - line_start
if line_len > max_length:
before = min(max_length // 2, point.offset - line_start)
after = max_length - before
else:
before = point.offset - line_start
after = line_len - before
start = point.offset - before
end = point.offset + after
return self.buffer[start:end], before
def _get_context(items, *, reverse=False):
ctx = None
items = reversed(items) if reverse else items
# find non-empty start and end
#
for item in items:
if isinstance(item, (list, tuple)):
ctx = _get_context(item, reverse=reverse)
if ctx:
return ctx
else:
ctx = getattr(item, 'context', None)
if ctx:
return ctx
return None
def empty_context():
"""Return a dummy context that points to an empty string."""
return ParserContext(
name='<empty>',
buffer='',
start=0,
end=0,
)
def get_context(*kids):
start_ctx = _get_context(kids)
end_ctx = _get_context(kids, reverse=True)
if not start_ctx:
return None
return ParserContext(
name=start_ctx.name,
buffer=start_ctx.buffer,
start=start_ctx.start,
end=end_ctx.end,
)
def merge_context(ctxlist):
ctxlist.sort(key=lambda x: (x.start, x.end))
# assume same name and buffer apply to all
#
return ParserContext(
name=ctxlist[0].name,
buffer=ctxlist[0].buffer,
start=ctxlist[0].start,
end=ctxlist[-1].end,
)
def force_context(node, context):
if hasattr(node, 'context'):
ContextPropagator.run(node, default=context)
node.context = context
def has_context(func):
"""Provide automatic context for Nonterm production rules."""
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
obj, *args = args
if len(args) == 1:
# apparently it's a production rule that just returns its
# only arg, so don't need to change the context
#
arg = args[0]
if getattr(arg, 'val', None) is obj.val:
if hasattr(arg, 'context'):
obj.context = arg.context
if hasattr(obj.val, 'context'):
obj.val.context = obj.context
return result
# Avoid mangling any existing context.
if getattr(obj, 'context', None) is None:
obj.context = get_context(*args)
# we have the context for the nonterminal, but now we need to
# enforce context in the obj.val, recursively, in case it was
# a complex production with nested AST nodes
#
force_context(obj.val, obj.context)
return result
return wrapper
class ContextVisitor(ast.NodeVisitor):
pass
class ContextPropagator(ContextVisitor):
"""Propagate context from children to root.
It is assumed that if a node has a context, all of its children
also have correct context. For a node that has no context, its
context is derived as a superset of all of the contexts of its
descendants.
"""
def __init__(self, default=None):
super().__init__()
self._default = default
def container_visit(self, node):
ctxlist = []
for el in node:
if isinstance(el, ast.AST) or typeutils.is_container(el):
ctx = self.visit(el)
if isinstance(ctx, list):
ctxlist.extend(ctx)
else:
ctxlist.append(ctx)
return ctxlist
def generic_visit(self, node):
# base case: we already have context
#
if getattr(node, 'context', None) is not None:
return node.context
# we need to derive context based on the children
#
ctxlist = self.container_visit(v[1] for v in ast.iter_fields(node))
# now that we have all of the children contexts, let's merge
# them into one
#
if ctxlist:
node.context = merge_context(ctxlist)
else:
node.context = self._default
return node.context
class ContextValidator(ContextVisitor):
def generic_visit(self, node):
if getattr(node, 'context', None) is None:
raise RuntimeError('node {} has no context'.format(node))
super().generic_visit(node)
|
edgedb/edgedb
|
edb/common/context.py
|
Python
|
apache-2.0
| 10,229
|
[
"VisIt"
] |
e0d9b625337cad8fa3f6cafea643afb5670b2d18b3d012d9c670d970de62ae88
|
import pytest
from .utils import *
import psi4
import numpy as np
pytestmark = pytest.mark.skip
ref = {
"conv": {
"ene": {
"d": -100.01941126909895,
"t": -100.05801143109962,
"q": -100.06768524807164,
"5": -100.0704303543983,
"6": -100.07073832974459,
"7": -100.07079540040381,
},
"grd": {
"d": np.array([[0, 0, -1.97897507e-02], [0, 0, 1.97897507e-02]]),
"t": np.array([[0, 0, -2.44719827e-02], [0, 0, 2.44719827e-02]]),
"q": np.array([[0, 0, -2.56632476e-02], [0, 0, 2.56632476e-02]]),
"5": np.array([[0, 0, -2.55690860e-02], [0, 0, 2.55690860e-02]]),
"6": np.array([[0, 0, -2.55202901e-02], [0, 0, 2.55202901e-02]]),
"7": np.array([[0, 0, -2.54893228e-02], [0, 0, 2.54893228e-02]]),
},
"hss": {
"d": np.array(
[
[1.14201580e-02, 2.88636997e-16, 3.97687335e-16, -1.14201580e-02, -2.78228656e-16, -3.97687335e-16],
[2.88636997e-16, 1.14201580e-02, -5.22281113e-16, -3.80143660e-16, -1.14201580e-02, 5.22281113e-16],
[3.97687335e-16, -5.22281113e-16, 6.37133401e-01, -3.97687335e-16, 5.22281113e-16, -6.37133401e-01],
[-1.14201580e-02, -3.80143660e-16, -3.97687335e-16, 1.14201580e-02, 3.58459616e-16, 3.97687335e-16],
[-2.78228656e-16, -1.14201580e-02, 5.22281113e-16, 3.58459616e-16, 1.14201580e-02, -5.22281113e-16],
[-3.97687335e-16, 5.22281113e-16, -6.37133401e-01, 3.97687335e-16, -5.22281113e-16, 6.37133401e-01],
]
),
"t": np.array(
[
[1.41221543e-02, 3.43562420e-15, -5.56983751e-16, -1.41221543e-02, -8.82111243e-16, 5.56983751e-16],
[3.43562420e-15, 1.41221543e-02, -5.72652355e-16, -4.26135257e-15, -1.41221543e-02, 5.72652355e-16],
[-5.56983751e-16, -5.72652355e-16, 6.33543175e-01, 5.56983751e-16, 5.72652355e-16, -6.33543175e-01],
[-1.41221543e-02, -4.26135257e-15, 5.56983751e-16, 1.41221543e-02, 1.60375621e-15, -5.56983751e-16],
[-8.82111243e-16, -1.41221543e-02, 5.72652355e-16, 1.60375621e-15, 1.41221543e-02, -5.72652355e-16],
[5.56983751e-16, 5.72652355e-16, -6.33543175e-01, -5.56983751e-16, -5.72652355e-16, 6.33543175e-01],
]
),
"q": np.array(
[
[1.48096027e-02, -5.90539645e-15, 9.84881270e-16, -1.48096027e-02, 4.75007061e-15, -9.84881270e-16],
[-5.90539645e-15, 1.48096027e-02, 9.45937039e-17, 1.76222627e-15, -1.48096027e-02, -9.45937039e-17],
[9.84881270e-16, 9.45937039e-17, 6.27190954e-01, -9.84881270e-16, -9.45937039e-17, -6.27190954e-01],
[-1.48096027e-02, 1.76222627e-15, -9.84881270e-16, 1.48096027e-02, -7.22703890e-15, 9.84881270e-16],
[4.75007061e-15, -1.48096027e-02, -9.45937039e-17, -7.22703890e-15, 1.48096027e-02, 9.45937039e-17],
[-9.84881270e-16, -9.45937039e-17, -6.27190954e-01, 9.84881270e-16, 9.45937039e-17, 6.27190954e-01],
]
),
"5": np.array(
[
[1.47552645e-02, 2.85244420e-13, 4.57266232e-16, -1.47552645e-02, -4.56798164e-13, -4.57266234e-16],
[2.85244420e-13, 1.47552645e-02, 4.63692795e-16, -2.69798442e-13, -1.47552645e-02, -4.63692795e-16],
[4.57266232e-16, 4.63692795e-16, 6.26827336e-01, -4.57266233e-16, -4.63692795e-16, -6.26827336e-01],
[
-1.47552645e-02,
-2.69798442e-13,
-4.57266233e-16,
1.47552645e-02,
-1.26964042e-13,
4.57266233e-16,
],
[
-4.56798164e-13,
-1.47552645e-02,
-4.63692795e-16,
-1.26964042e-13,
1.47552645e-02,
4.63692795e-16,
],
[-4.57266234e-16, -4.63692795e-16, -6.26827336e-01, 4.57266233e-16, 4.63692795e-16, 6.26827336e-01],
]
),
"6": np.array(
[
[
1.47271054e-02,
-3.46663981e-13,
-6.58652686e-17,
-1.47271054e-02,
-4.04281331e-13,
6.58652675e-17,
],
[-3.46663981e-13, 1.47271054e-02, 9.55744800e-16, 1.12365491e-14, -1.47271054e-02, -9.55744797e-16],
[-6.58652686e-17, 9.55744800e-16, 6.26683611e-01, 6.58652679e-17, -9.55744800e-16, -6.26683610e-01],
[-1.47271054e-02, 1.12365491e-14, 6.58652679e-17, 1.47271054e-02, 5.85830351e-14, -6.58652754e-17],
[-4.04281331e-13, -1.47271054e-02, -9.55744800e-16, 5.85830351e-14, 1.47271054e-02, 9.55744798e-16],
[6.58652675e-17, -9.55744797e-16, -6.26683610e-01, -6.58652754e-17, 9.55744798e-16, 6.26683610e-01],
]
),
# "7": np.zeros(36).reshape((6, 6)),
},
},
"df": {
"ene": {
"d": -100.01940060570712,
"t": -100.05800433992121,
"q": -100.06768368288756,
"5": -100.0704283042375,
"6/5": -100.07073665835942,
"6": -100.07073587640389,
"7/6": -100.07079295732615,
# "7": 0.0,
},
"grd": {
"d": np.array([[0, 0, -1.97887695e-02], [0, 0, 1.97887695e-02]]),
"t": np.array([[0, 0, -2.44674251e-02], [0, 0, 2.44674251e-02]]),
"q": np.array([[0, 0, -2.56623539e-02], [0, 0, 2.56623539e-02]]),
"5": np.array([[0, 0, -2.55665605e-02], [0, 0, 2.55665605e-02]]),
"6/5": np.array([[0, 0, -2.55170642e-02], [0, 0, 2.55170642e-02]]),
"6": np.array([[0, 0, -2.55207994e-02], [0, 0, 2.55207994e-02]]),
"7/6": np.array([[0, 0, -2.54898158e-02], [0, 0, 2.54898158e-02]]),
# "7": np.zeros(6).reshape(2, 3),
},
"hss": {
"d": np.array(
[
[1.14195918e-02, -8.20455290e-15, 8.48376579e-16, -1.14195918e-02, 1.01425643e-14, -8.48376580e-16],
[-8.20455290e-15, 1.14195918e-02, 7.87137296e-16, 8.63378854e-15, -1.14195918e-02, -7.87137294e-16],
[8.48376579e-16, 7.87137296e-16, 6.37098987e-01, -8.48376579e-16, -7.87137296e-16, -6.37098987e-01],
[-1.14195918e-02, 8.63378854e-15, -8.48376579e-16, 1.14195918e-02, -8.52027257e-15, 8.48376579e-16],
[1.01425643e-14, -1.14195918e-02, -7.87137296e-16, -8.52027257e-15, 1.14195918e-02, 7.87137295e-16],
[-8.48376580e-16, -7.87137294e-16, -6.37098987e-01, 8.48376579e-16, 7.87137295e-16, 6.37098987e-01],
]
),
"t": np.array(
[
[1.41195242e-02, -1.44279825e-13, -2.62663252e-16, -1.41195242e-02, 2.52178758e-13, 2.62663246e-16],
[-1.44279825e-13, 1.41195242e-02, 7.25775059e-16, 1.88123226e-13, -1.41195242e-02, -7.25775063e-16],
[-2.62663252e-16, 7.25775059e-16, 6.33561555e-01, 2.62663253e-16, -7.25775060e-16, -6.33561555e-01],
[-1.41195242e-02, 1.88123226e-13, 2.62663253e-16, 1.41195242e-02, -3.52532511e-13, -2.62663248e-16],
[2.52178758e-13, -1.41195242e-02, -7.25775060e-16, -3.52532511e-13, 1.41195242e-02, 7.25775063e-16],
[2.62663246e-16, -7.25775063e-16, -6.33561555e-01, -2.62663248e-16, 7.25775063e-16, 6.33561555e-01],
]
),
"q": np.array(
[
[1.48090869e-02, 2.05969460e-14, -1.49618888e-15, -1.48090869e-02, 3.81831832e-14, 1.49618913e-15],
[2.05969460e-14, 1.48090870e-02, -9.64983006e-16, 1.26240566e-13, -1.48090869e-02, 9.64982950e-16],
[-1.49618888e-15, -9.64983006e-16, 6.27200845e-01, 1.49618893e-15, 9.64982952e-16, -6.27200845e-01],
[-1.48090869e-02, 1.26240566e-13, 1.49618893e-15, 1.48090869e-02, -8.38158432e-14, -1.49618905e-15],
[3.81831832e-14, -1.48090869e-02, 9.64982952e-16, -8.38158432e-14, 1.48090869e-02, -9.64982956e-16],
[1.49618913e-15, 9.64982950e-16, -6.27200845e-01, -1.49618905e-15, -9.64982956e-16, 6.27200845e-01],
]
),
"5": np.array(
[
[1.47538071e-02, 5.47383498e-13, -1.15209254e-15, -1.47538071e-02, -2.32833029e-13, 1.15209256e-15],
[5.47383498e-13, 1.47538071e-02, 4.09315798e-16, -5.13303988e-13, -1.47538071e-02, -4.09315765e-16],
[-1.15209254e-15, 4.09315798e-16, 6.26827395e-01, 1.15209262e-15, -4.09316413e-16, -6.26827395e-01],
[-1.47538071e-02, -5.13303988e-13, 1.15209262e-15, 1.47538071e-02, 4.11866032e-13, -1.15209258e-15],
[-2.32833029e-13, -1.47538071e-02, -4.09316413e-16, 4.11866032e-13, 1.47538071e-02, 4.09315929e-16],
[1.15209256e-15, -4.09315765e-16, -6.26827395e-01, -1.15209258e-15, 4.09315929e-16, 6.26827395e-01],
]
),
"6/5": np.array(
[
[1.47252438e-02, 5.92051339e-14, -1.14461969e-15, -1.47252438e-02, 5.68396132e-14, 1.14461967e-15],
[5.92051339e-14, 1.47252438e-02, 6.43834765e-16, -9.29323460e-14, -1.47252437e-02, -6.43834148e-16],
[-1.14461969e-15, 6.43834765e-16, 6.26683723e-01, 1.14461973e-15, -6.43834668e-16, -6.26683723e-01],
[-1.47252438e-02, -9.29323460e-14, 1.14461973e-15, 1.47252438e-02, 3.73113650e-14, -1.14461967e-15],
[5.68396132e-14, -1.47252437e-02, -6.43834668e-16, 3.73113650e-14, 1.47252438e-02, 6.43834161e-16],
[1.14461967e-15, -6.43834148e-16, -6.26683723e-01, -1.14461967e-15, 6.43834161e-16, 6.26683723e-01],
]
),
# "6": np.zeros(36).reshape((6, 6)),
# "7/6": np.zeros(36).reshape((6, 6)),
# "7": np.zeros(36).reshape((6, 6)),
},
},
}
# hand-adjust zetas -- what to pass/xfail/fail is NOT read from Libint2 config
# * leaving at dtq5 so running pytest from source doesn't catch hours-long 7z
# * @pytest.mark.parametrize("zeta", ["d", "t", "q", "5", "6/5", "6", "7/6", "7"])
@pytest.mark.parametrize("zeta", ["d", "t", "q", "5"])
@pytest.mark.parametrize("scftype", ["conv", "df"])
@pytest.mark.parametrize("der", ["ene", "grd", "hss"])
def test_zeta(scftype, zeta, der, request):
if zeta not in ref[scftype][der]:
pytest.skip()
hf = psi4.geometry(
"""
H 0. 0. -1.64558411
F 0. 0. 0.08729475
symmetry c1
units bohr
"""
)
if zeta in "dtq56":
basis = f"cc-pv{zeta}z"
elif zeta == "6/5":
basis = "cc-pv6z"
elif zeta in ["7/6", "7"]:
basis = "7zapa-nr"
psi4.set_options(
{
# "e_convergence": 10,
# "d_convergence": 9,
"scf_type": {
"df": "df",
"conv": "direct",
}[scftype],
"basis": basis,
}
)
df_basis_scf = None
if scftype == "df":
if zeta in "dtq5":
df_basis_scf = f"cc-pv{zeta}z-jkfit"
elif zeta == "6/5":
df_basis_scf = "cc-pv5z-jkfit"
elif zeta in ["6", "7/6"]:
df_basis_scf = "cc-pv6z-ri"
elif scftype == "conv":
# * only used for preiterations
# * 7zapa gets aug-cc-pv6z-ri by default
if zeta == "6":
df_basis_scf = "cc-pv6z-ri"
if df_basis_scf:
psi4.set_options({"df_basis_scf": df_basis_scf})
if der == "hss":
kwargs = {"ref_gradient": psi4.core.Matrix.from_array(ref[scftype]["grd"][zeta])}
else:
kwargs = {}
ans, wfn = {"ene": psi4.energy, "grd": psi4.gradient, "hss": psi4.hessian,}[
der
]("hf", return_wfn=True, **kwargs)
if isinstance(ans, float):
print(ans)
else:
print(ans.np)
assert compare_values(ref[scftype][der][zeta], ans, 6, f"Hartree--Fock {scftype} {der} {zeta}-zeta")
|
lothian/psi4
|
tests/pytests/test_zeta.py
|
Python
|
lgpl-3.0
| 12,752
|
[
"Psi4"
] |
fe3c95912a55ed1cac489ad134e0991a2c66339f0b5a045cbdca5cd6a7881bb7
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module define the various drones used to assimilate data.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 18, 2012"
import abc
import os
import re
import glob
import logging
import fnmatch
import json
import six
from six.moves import zip
from monty.io import zopen
from pymatgen.io.vasp.inputs import Incar, Potcar, Poscar
from pymatgen.io.vasp.outputs import Vasprun, Oszicar, Dynmat
from pymatgen.io.gaussian import GaussianOutput
from pymatgen.entries.computed_entries import ComputedEntry, \
ComputedStructureEntry
from monty.json import MSONable
logger = logging.getLogger(__name__)
class AbstractDrone(six.with_metaclass(abc.ABCMeta, MSONable)):
"""
Abstract drone class that defines the various methods that must be
implemented by drones. Because of the quirky nature of Python"s
multiprocessing, the intermediate data representations has to be in the
form of python primitives. So all objects that drones work with must be
MSONable. All drones must also implement the standard MSONable as_dict() and
from_dict API.
"""
@abc.abstractmethod
def assimilate(self, path):
"""
Assimilate data in a directory path into a pymatgen object. Because of
the quirky nature of Python"s multiprocessing, the object must support
pymatgen"s as_dict() for parallel processing.
Args:
path: directory path
Returns:
An assimilated object
"""
return
@abc.abstractmethod
def get_valid_paths(self, path):
"""
Checks if path contains valid data for assimilation, and then returns
the valid paths. The paths returned can be a list of directory or file
paths, depending on what kind of data you are assimilating. For
example, if you are assimilating VASP runs, you are only interested in
directories containing vasprun.xml files. On the other hand, if you are
interested converting all POSCARs in a directory tree to cifs for
example, you will want the file paths.
Args:
path: input path as a tuple generated from os.walk, i.e.,
(parent, subdirs, files).
Returns:
List of valid dir/file paths for assimilation
"""
return
class VaspToComputedEntryDrone(AbstractDrone):
"""
VaspToEntryDrone assimilates directories containing vasp output to
ComputedEntry/ComputedStructureEntry objects. There are some restrictions
on the valid directory structures:
1. There can be only one vasp run in each directory.
2. Directories designated "relax1", "relax2" are considered to be 2 parts
of an aflow style run, and only "relax2" is parsed.
3. The drone parses only the vasprun.xml file.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries.
parameters (list): Input parameters to include. It has to be one of
the properties supported by the Vasprun object. See
:class:`pymatgen.io.vasp.Vasprun`. If parameters is None,
a default set of parameters that are necessary for typical
post-processing will be set.
data (list): Output data to include. Has to be one of the properties
supported by the Vasprun object.
"""
def __init__(self, inc_structure=False, parameters=None, data=None):
self._inc_structure = inc_structure
self._parameters = {"is_hubbard", "hubbards", "potcar_spec",
"potcar_symbols", "run_type"}
if parameters:
self._parameters.update(parameters)
self._data = data if data else []
def assimilate(self, path):
files = os.listdir(path)
if "relax1" in files and "relax2" in files:
filepath = glob.glob(os.path.join(path, "relax2",
"vasprun.xml*"))[0]
else:
vasprun_files = glob.glob(os.path.join(path, "vasprun.xml*"))
filepath = None
if len(vasprun_files) == 1:
filepath = vasprun_files[0]
elif len(vasprun_files) > 1:
"""
This is a bit confusing, since there maybe be multi-steps. By
default, assimilate will try to find a file simply named
vasprun.xml, vasprun.xml.bz2, or vasprun.xml.gz. Failing which
it will try to get a relax2 from an aflow style run if
possible. Or else, a randomly chosen file containing
vasprun.xml is chosen.
"""
for fname in vasprun_files:
if os.path.basename(fname) in ["vasprun.xml",
"vasprun.xml.gz",
"vasprun.xml.bz2"]:
filepath = fname
break
if re.search("relax2", fname):
filepath = fname
break
filepath = fname
try:
vasprun = Vasprun(filepath)
except Exception as ex:
logger.debug("error in {}: {}".format(filepath, ex))
return None
entry = vasprun.get_computed_entry(self._inc_structure,
parameters=self._parameters,
data=self._data)
entry.parameters["history"] = _get_transformation_history(path)
return entry
def get_valid_paths(self, path):
(parent, subdirs, files) = path
if "relax1" in subdirs and "relax2" in subdirs:
return [parent]
if (not parent.endswith("/relax1")) and \
(not parent.endswith("/relax2")) and (
len(glob.glob(os.path.join(parent, "vasprun.xml*"))) > 0 or (
len(glob.glob(os.path.join(parent, "POSCAR*"))) > 0 and
len(glob.glob(os.path.join(parent, "OSZICAR*"))) > 0)
):
return [parent]
return []
def __str__(self):
return " VaspToComputedEntryDrone"
def as_dict(self):
return {"init_args": {"inc_structure": self._inc_structure,
"parameters": self._parameters,
"data": self._data},
"version": __version__,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
return cls(**d["init_args"])
class SimpleVaspToComputedEntryDrone(VaspToComputedEntryDrone):
"""
A simpler VaspToComputedEntryDrone. Instead of parsing vasprun.xml, it
parses only the INCAR, POTCAR, OSZICAR and KPOINTS files, which are much
smaller and faster to parse. However, much fewer properties are available
compared to the standard VaspToComputedEntryDrone.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries. Structure will be parsed from the CONTCAR.
"""
def __init__(self, inc_structure=False):
self._inc_structure = inc_structure
self._parameters = {"is_hubbard", "hubbards", "potcar_spec",
"run_type"}
def assimilate(self, path):
files = os.listdir(path)
try:
files_to_parse = {}
if "relax1" in files and "relax2" in files:
for filename in ("INCAR", "POTCAR", "POSCAR"):
search_str = os.path.join(path, "relax1", filename + "*")
files_to_parse[filename] = glob.glob(search_str)[0]
for filename in ("CONTCAR", "OSZICAR"):
search_str = os.path.join(path, "relax2", filename + "*")
files_to_parse[filename] = glob.glob(search_str)[-1]
else:
for filename in (
"INCAR", "POTCAR", "CONTCAR", "OSZICAR", "POSCAR", "DYNMAT"
):
files = glob.glob(os.path.join(path, filename + "*"))
if len(files) < 1:
continue
if len(files) == 1 or filename == "INCAR" or \
filename == "POTCAR" or filename == "DYNMAT":
files_to_parse[filename] = files[-1]\
if filename == "POTCAR" else files[0]
elif len(files) > 1:
"""
This is a bit confusing, since there maybe be
multiple steps. By default, assimilate will try to find
a file simply named filename, filename.bz2, or
filename.gz. Failing which it will try to get a relax2
from a custodian double relaxation style run if
possible. Or else, a random file is chosen.
"""
for fname in files:
if fnmatch.fnmatch(os.path.basename(fname),
"{}(\.gz|\.bz2)*"
.format(filename)):
files_to_parse[filename] = fname
break
if fname == "POSCAR" and \
re.search("relax1", fname):
files_to_parse[filename] = fname
break
if (fname in ("CONTCAR", "OSZICAR") and
re.search("relax2", fname)):
files_to_parse[filename] = fname
break
files_to_parse[filename] = fname
poscar, contcar, incar, potcar, oszicar, dynmat = [None]*6
if 'POSCAR' in files_to_parse:
poscar = Poscar.from_file(files_to_parse["POSCAR"])
if 'CONTCAR' in files_to_parse:
contcar = Poscar.from_file(files_to_parse["CONTCAR"])
if 'INCAR' in files_to_parse:
incar = Incar.from_file(files_to_parse["INCAR"])
if 'POTCAR' in files_to_parse:
potcar = Potcar.from_file(files_to_parse["POTCAR"])
if 'OSZICAR' in files_to_parse:
oszicar = Oszicar(files_to_parse["OSZICAR"])
if 'DYNMAT' in files_to_parse:
dynmat = Dynmat(files_to_parse["DYNMAT"])
param = {"hubbards":{}}
if poscar is not None and incar is not None and "LDAUU" in incar:
param["hubbards"] = dict(zip(poscar.site_symbols,
incar["LDAUU"]))
param["is_hubbard"] = (
incar.get("LDAU", False) and sum(param["hubbards"].values()) > 0
) if incar is not None else False
param["run_type"] = None
if incar is not None:
param["run_type"] = "GGA+U" if param["is_hubbard"] else "GGA"
param["history"] = _get_transformation_history(path)
param["potcar_spec"] = potcar.spec if potcar is not None else None
energy = oszicar.final_energy if oszicar is not None else 1e10
structure = contcar.structure if contcar is not None\
else poscar.structure
initial_vol = poscar.structure.volume if poscar is not None else \
None
final_vol = contcar.structure.volume if contcar is not None else \
None
delta_volume = None
if initial_vol is not None and final_vol is not None:
delta_volume = (final_vol / initial_vol - 1)
data = {"filename": path, "delta_volume": delta_volume}
if dynmat is not None:
data['phonon_frequencies'] = dynmat.get_phonon_frequencies()
if self._inc_structure:
entry = ComputedStructureEntry(
structure, energy, parameters=param, data=data
)
else:
entry = ComputedEntry(
structure.composition, energy, parameters=param, data=data
)
return entry
except Exception as ex:
logger.debug("error in {}: {}".format(path, ex))
return None
def __str__(self):
return "SimpleVaspToComputedEntryDrone"
def as_dict(self):
return {"init_args": {"inc_structure": self._inc_structure},
"version": __version__, "@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
return cls(**d["init_args"])
class GaussianToComputedEntryDrone(AbstractDrone):
"""
GaussianToEntryDrone assimilates directories containing Gaussian output to
ComputedEntry/ComputedStructureEntry objects. By default, it is assumed
that Gaussian output files have a ".log" extension.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries.
parameters (list): Input parameters to include. It has to be one of
the properties supported by the GaussianOutput object. See
:class:`pymatgen.io.gaussianio GaussianOutput`. The parameters
have to be one of python"s primitive types, i.e., list, dict of
strings and integers. If parameters is None, a default set of
parameters will be set.
data (list): Output data to include. Has to be one of the properties
supported by the GaussianOutput object. The parameters have to
be one of python"s primitive types, i.e. list, dict of strings
and integers. If data is None, a default set will be set.
file_extensions (list):
File extensions to be considered as Gaussian output files.
Defaults to just the typical "log" extension.
.. note::
Like the GaussianOutput class, this is still in early beta.
"""
def __init__(self, inc_structure=False, parameters=None, data=None,
file_extensions=(".log",)):
self._inc_structure = inc_structure
self._parameters = {"functional", "basis_set", "charge", "spin_mult",
"route"}
if parameters:
self._parameters.update(parameters)
self._data = {"stationary_type", "properly_terminated"}
if data:
self._data.update(data)
self._file_extensions = file_extensions
def assimilate(self, path):
try:
gaurun = GaussianOutput(path)
except Exception as ex:
logger.debug("error in {}: {}".format(path, ex))
return None
param = {}
for p in self._parameters:
param[p] = getattr(gaurun, p)
data = {}
for d in self._data:
data[d] = getattr(gaurun, d)
if self._inc_structure:
entry = ComputedStructureEntry(gaurun.final_structure,
gaurun.final_energy,
parameters=param,
data=data)
else:
entry = ComputedEntry(gaurun.final_structure.composition,
gaurun.final_energy, parameters=param,
data=data)
return entry
def get_valid_paths(self, path):
(parent, subdirs, files) = path
return [os.path.join(parent, f) for f in files
if os.path.splitext(f)[1] in self._file_extensions]
def __str__(self):
return " GaussianToComputedEntryDrone"
def as_dict(self):
return {"init_args": {"inc_structure": self._inc_structure,
"parameters": self._parameters,
"data": self._data,
"file_extensions": self._file_extensions},
"version": __version__, "@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
return cls(**d["init_args"])
def _get_transformation_history(path):
"""
Checks for a transformations.json* file and returns the history.
"""
trans_json = glob.glob(os.path.join(path, "transformations.json*"))
if trans_json:
try:
with zopen(trans_json[0]) as f:
return json.load(f)["history"]
except:
return None
return None
|
aykol/pymatgen
|
pymatgen/apps/borg/hive.py
|
Python
|
mit
| 17,214
|
[
"Gaussian",
"VASP",
"pymatgen"
] |
ebe45df5561ac6519718f372041d7b1fcd4f7e97040815de6cf828aea10106f8
|
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of The Regents of the University of California
# nor the names of its contributors may be used to endorse or
# promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE
# UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
# ----------------------------------------------------------------------
# Filename: Phy.py
# Version: 0.1
# Description: Defines the methods available to a single physical interface (PHY),
# more specifically a type of memory (i.e. DDR, QDR, etc)
# Author: Dustin Richmond
# TODOs:
# - OCT Pins - A Phy's OCT pin is in the board file, and should be used by the TCL Scripts
# - Naming
# - One oct per memory system (I think)
# - Macros to enable OCT pins (Sometimes OCT pins can be used by multiple IPs)
# - QSys/TCL - Naming changed because types are now upper case
import xml.etree.ElementTree as ET
import Tinker, Memory
import abc, sys
from IP import parse_list, parse_string, parse_int, parse_id, parse_float, parse_macros, IP
class Phy(Memory.Memory):
_C_BURST_WIDTHS = []
_C_BURST_DEFAULT = 0
_C_MAX_DATA_BUS_WIDTH = 2048
_C_FPHY_MHZ_RANGE = (1,2000)
_C_FPGA_MHZ_RANGE = (1,300)
_C_FREF_MHZ_RANGE = (1,500)
_C_DQ_PIN_RANGE = (0,128)
_C_POW2_DQ_PIN_RANGE = (0,128)
_C_BANDWIDTH_BS_RANGE= (0,1<<36)
_C_SIZE_RANGE = (0,1<<32)
def __init__(self, e):
super(Phy,self).__init__(e)
@classmethod
def validate(cls, d):
"""
Validate the parameters that describe the intrinsic settings of
this IP
Arguments:
d -- A Description object, containing the parsed user description
of a custom board
"""
super(Phy, cls).validate(d)
cls.check_size(d)
cls.check_fphy_frequency(d)
cls.check_fref_frequency(d)
cls.check_dq_pins(d)
cls.check_pow_2_dq_pins(d)
cls.check_bandwidth_bs(d)
return
@classmethod
def parse(cls,e):
d = super(Phy, cls).parse(e)
i = parse_id(e)
fref_mhz = parse_float(e, "fref_mhz")
fphy_mhz = parse_float(e, "fphy_mhz")
dq_pins = parse_int(e, "dq_pins")
macros = parse_macros(e)
d["id"] = i
d["fphy_mhz"] = fphy_mhz
d["fref_mhz"] = fref_mhz
d["dq_pins"] = dq_pins
d["macros"] = macros
group = cls.parse_grouping(e)
group.remove(i)
octs = cls.parse_oct_pins(e)
d["group"] = group
d["oct_pins"] = octs
pow2_dq_pins = int(2 ** Tinker.clog2(dq_pins))
d["pow2_dq_pins"] = pow2_dq_pins
d["bandwidth_bs"] = int((d["fphy_mhz"] * 10**6 * cls._C_RATE * pow2_dq_pins) / 8)
return d
def configure(self, d):
"""
Configure this object according to a high level description
fill in any missing defaults, and verify that the description
can be implemented
Arguments:
d -- A Description object, containing the parsed user description
of a custom board
"""
super(Phy,self).configure(d)
if("ratio" not in d and "width" not in d):
self["ratio"] = self.get_default_ratio()
self["width"] = int(self["pow2_dq_pins"] * ratio2int(self["ratio"])) * self._C_RATE
elif("ratio" not in d and "width" in d):
self["ratio"] = int2ratio(d["width"]/ self["pow2_dq_pins"])
elif("ratio" in d and "width" not in d):
self["width"] = int(self["pow2_dq_pins"] & ratio2int(d["ratio"])) * self._C_RATE
else:
self["width"] = d["width"]
self["ratio"] = d["ratio"]
self["fpga_mhz"] = self["fphy_mhz"] / float(ratio2int(self["ratio"]))
if("burst" not in d):
self["burst"] = self.get_default_burst()
else:
self["burst"] = d["burst"]
if("role" not in d):
Tinker.key_error("role", Tinker.tostr_dict(d))
else:
self["role"] = d["role"]
if(d["role"] != "primary"):
if("master" not in d):
Tinker.key_error("master", Tinker.tostr_dict(d))
else:
self["master"] = d["master"]
def verify(self):
"""
Verify that this object can implement the high level description
Arguments:
d -- A Description object, containing the complete description
of a the IP configuration
"""
super(Phy,self).verify()
self.check_base_address(self);
#self.check_oct_pin(self) # TODO:
self.check_role(self)
self.check_ratio(self)
self.check_width(self);
self.check_burst(self)
self.check_fphy_frequency(self)
self.check_fref_frequency(self)
self.check_fpga_frequency(self)
if(self["role"] != "primary"):
self.check_master(self)
def __get_name(self):
return self["type"] + "_" + self["id"]
def __get_phy_interface(self, sid, version, verbose):
# TODO: Change naming in TCL files
n = self.__get_name()
e = ET.Element("interface",attrib={"name":n, "internal":"tinker." + n,
"type":"conduit", "dir":"end"})
return e
def __get_pll_interface(self, sid, version, verbose):
n = self.__get_name()
e = ET.Element("interface",attrib={"name":n + "_mem_pll_ref",
"internal":"tinker." + n + "_pll_ref",
"type":"conduit", "dir":"end"})
return e
def __get_oct_interface(self, sid, version, verbose):
# TODO: Change naming in TCL files
# TODO: OCT Pin macros
n = self.__get_name()
e = ET.Element("interface",attrib={"name":n + "_mem_oct", # TODO: Oct pin name
"internal":"tinker." + n + "_oct",
"type":"conduit", "dir":"end"})
return e
def get_pin_elements(self, sid, version, verbose):
# TODO: Change naming in TCL files
l = []
l.append(self.__get_phy_interface(sid, version, verbose))
r = self["role"]
if(r =="primary" or r == "independent"):
l.append(self.__get_pll_interface(sid, version, verbose))
if(r =="primary"):
l.append(self.__get_oct_interface(sid, version, verbose))
return l
def __get_interface_element(self, sid, version, verbose):
e = ET.Element("interface")
e.set("name","tinker")
e.set("type","slave")
e.set("address", str(hex(self["base_address"])))
e.set("width", str(self["width"]))
e.set("maxburst", str(self["burst"]))
e.set("latency", str(self["latency"]))
return e
def get_interface_element(self, sid, version, verbose):
self.verify()
e = self.__get_interface_element(sid, version, verbose)
if(verbose):
self.__set_verbose_interface_element(e)
return e
def __set_verbose_interface_element(self, e):
e.set("id", self["id"])
e.set("ratio", self["ratio"])
e.set("role", self["role"])
e.set("mem_frequency_mhz",str(int(self["fphy_mhz"])))
e.set("ref_frequency_mhz",str(int(self["fref_mhz"])))
if(self["role"] == "secondary"):
e.set("shared","pll,dll,oct")
e.set("primary",self["master"])
elif(self["role"] == "independent"):
#e.set("shared",self["oct_pin"])
e.set("shared","oct")
e.set("primary",self["master"])
else:
e.set("shared","")
@classmethod
def get_default_burst(cls):
return cls._C_BURST_DEFAULT
@classmethod
def get_default_ratio(cls):
if("Quarter" in cls._C_CLOCK_RATIOS):
return "Quarter"
if("Half" in cls._C_CLOCK_RATIOS):
return "Half"
return "Full"
@classmethod
def check_ratio(cls, d):
ratio = d.get("ratio")
if(ratio is None):
Tinker.key_error("ratio", Tinker.tostr_dict(d))
if(ratio not in cls._C_CLOCK_RATIOS):
Tinker.value_error_xml("ratio", ratio, str(list(cls._C_CLOCK_RATIOS)),
Tinker.tostr_dict(d))
@classmethod
def check_role(cls, d):
role = d.get("role")
roles = d.get("roles")
if(role is None):
Tinker.key_error("role", Tinker.tostr_dict(d))
if(roles is None):
Tinker.key_error("roles", Tinker.tostr_dict(d))
if(role not in cls._C_ROLES):
Tinker.value_error_xml("role", role, str(list(cls._C_ROLES)),
Tinker.tostr_dict(d))
if(role not in roles):
Tinker.value_error_map("role", role, str(roles),
Tinker.tostr_dict(d))
@classmethod
def check_oct_pin(cls, d):
oct_pin = d.get("oct_pin")
oct_pins = d.get("oct_pins")
if(oct_pin is None):
Tinker.key_error("oct_pin", Tinker.tostr_dict(d))
if(oct_pin is None):
Tinker.key_error("oct_pins", Tinker.tostr_dict(d))
if(oct_pin not in self["oct_pins"]):
Tinker.value_error_map("oct_pin", role, str(oct_pins),
Tinker.tostr_dict(d))
@classmethod
def check_base_address(cls, d):
cls.check_size(d)
sz = d.get("size")
base = d.get("base_address")
if(base is None):
Tinker.key_error("base_address", Tinker.tostr_dict(d))
if(not Tinker.is_in_range(base, 0, (2 ** 64) - sz)):
Tinker.value_error_map("base_address", str(base),
"Range(0x%x, 0x%x)" % (0, (2**64) - sz),
Tinker.tostr_dict(d))
if((base % sz) != 0):
Tinker.value_error_map("base_address", str(base),
"Multiples of 0x%x (Size)" % sz,
Tinker.tostr_dict(d))
@classmethod
def check_width(cls, d):
cls.check_pow_2_dq_pins(d)
cls.check_ratio(d)
p2dqp = d["pow2_dq_pins"]
r = d["ratio"]
rw = int(p2dqp* cls._C_RATE * ratio2int(r))
width = d.get("width")
if(width is None):
Tinker.key_error("width", Tinker.tostr_dict(d))
if(not Tinker.is_pow_2(width)):
Tinker.value_error_map("width", str(width),
"Integer powers of 2",
Tinker.tostr_dict(d))
if(width % p2dqp != 0):
Tinker.value_error_map("width", str(width),
"Multiple of 0x%x (Pow2 DQ Width)" % p2dqp,
Tinker.tostr_dict(d))
if(not Tinker.is_in_range(width, 0, cls._C_MAX_DATA_BUS_WIDTH)):
Tinker.value_error_map("width", str(width),
str(list(cls._C_MAX_DATA_BUS_WIDTH)),
Tinker.tostr_dict(d))
if(rw != width):
Tinker.value_error_map("width", str(w), str(rw), Tinker.tostr_dict(d))
@classmethod
def check_burst(cls, d):
burst = d.get("burst")
if(burst is None):
Tinker.key_error("burst", Tinker.tostr_dict(d))
if(not Tinker.is_pow_2(burst)):
Tinker.value_error_map("burst", str(burst),
"Integer powers of 2",
Tinker.tostr_dict(d))
if(burst not in cls._C_BURST_WIDTHS):
Tinker.value_error_map("burst", str(burst),
str(list(cls._C_BURST_WIDTHS)),
Tinker.tostr_dict(d))
@classmethod
def check_fpga_frequency(cls, d):
fpga = d.get("fpga_mhz")
fpga_min = cls._C_FPGA_MHZ_RANGE[0]
fpga_max = cls._C_FPGA_MHZ_RANGE[1]
if(fpga is None):
Tinker.key_error("fpga_mhz", Tinker.tostr_dict(d))
if(not Tinker.is_in_range(fpga, fpga_min, fpga_max)):
Tinker.value_error_map("fpga_mhz", str(fpga),
"Range(0x%x, 0x%x)" % (fpga_min, fpga_max),
Tinker.tostr_dict(d))
@classmethod
def check_fphy_frequency(cls, d):
fphy = d.get("fphy_mhz")
fphy_min = cls._C_FPHY_MHZ_RANGE[0]
fphy_max = cls._C_FPHY_MHZ_RANGE[1]
if(fphy is None):
Tinker.key_error("fphy_mhz", Tinker.tostr_dict(d))
if(not Tinker.is_in_range(fphy, fphy_min, fphy_max)):
Tinker.value_error_map("fphy_mhz", str(fphy),
"Range(0x%x, 0x%x)" % (fphy_min, fphy_max),
Tinker.tostr_dict(d))
@classmethod
def check_fref_frequency(cls, d):
fref = d.get("fref_mhz")
fref_min = cls._C_FREF_MHZ_RANGE[0]
fref_max = cls._C_FREF_MHZ_RANGE[1]
if(fref is None):
Tinker.key_error("fref_mhz", Tinker.tostr_dict(d))
if(not Tinker.is_in_range(fref, fref_min, fref_max)):
Tinker.value_error_map("fref_mhz", str(fref),
"Range(0x%x, 0x%x)" % (fref_min, fref_max),
Tinker.tostr_dict(d))
@classmethod
def check_dq_pins(cls, d):
p = d.get("dq_pins")
dq_min = cls._C_DQ_PIN_RANGE[0]
dq_max = cls._C_DQ_PIN_RANGE[1]
if(p is None):
Tinker.key_error("dq_pins", Tinker.tostr_dict(d))
if(not Tinker.is_in_range(p, dq_min, dq_max)):
Tinker.value_error_map("dq_pins", str(p),
"Range(0x%x, 0x%x)" % (dq_min, dq_max),
Tinker.tostr_dict(d))
@classmethod
def check_pow_2_dq_pins(cls, d):
p = d.get("pow2_dq_pins")
pdq_min = cls._C_POW2_DQ_PIN_RANGE[0]
pdq_max = cls._C_POW2_DQ_PIN_RANGE[1]
if(p is None):
Tinker.key_error("pow2_dq_pins", Tinker.tostr_dict(d))
if(not Tinker.is_in_range(p, pdq_min, pdq_max)):
Tinker.value_error_map("pow2_dq_pins", str(p),
"Range(0x%x, 0x%x)" % (pdq_min, pdq_max),
Tinker.tostr_dict(d))
if(not Tinker.is_pow_2(p)):
Tinker.value_error_map("pow2_dq_pins", str(p),
"Integer powers of 2",
Tinker.tostr_dict(d))
@classmethod
def check_bandwidth_bs(cls, d):
bw = d.get("bandwidth_bs")
bw_min = cls._C_BANDWIDTH_BS_RANGE[0]
bw_max = cls._C_BANDWIDTH_BS_RANGE[1]
if(bw is None):
Tinker.key_error("bandwidth_bs", Tinker.tostr_dict(d))
if(not Tinker.is_in_range(bw,bw_min,bw_max)):
Tinker.value_error_map("bandwidth_bs", str(hex(bw)),
"Range(0x%x, 0x%x)" % (bw_min, bw_max),
Tinker.tostr_dict(d))
@classmethod
def check_size(cls, d):
sz = d.get("size")
sz_min = cls._C_SIZE_RANGE[0]
sz_max = cls._C_SIZE_RANGE[1]
if(sz is None):
Tinker.key_error("size", Tinker.tostr_dict(d))
if(not Tinker.is_in_range(sz, sz_min, sz_max)):
Tinker.value_error_map("size", str(hex(sz)),
"Range(0x%x, 0x%x)" % (sz_min, sz_max),
Tinker.tostr_dict(d))
if(not Tinker.is_pow_2(sz)):
Tinker.value_error_map("pow2_dq_pins", str(sz),
"Integer powers of 2",
Tinker.tostr_dict(d))
@classmethod
def check_master(cls, d):
id = d.get("master")
if(not id.isalpha()):
Tinker.value_error_xml("master", id, "Alphabetic strings",
ET.tostring(e))
def set_burst(self, b):
self["burst"] = b
self.check_burst(self)
def set_ratio(self, r):
self["ratio"] = r
self.check_ratio(self)
def set_width(self, w):
self["width"] = w
self.check_width(self)
def set_base_address(self, b):
self["base_address"] = b
self.check_base_address(self)
def get_macros(self):
return self["macros"]
@classmethod
def parse_oct_pins(cls, e):
pins = parse_list(e, "oct_pins")
for p in pins:
if(not Tinker.is_valid_verilog_name(p)):
Tinker.value_error_xml("oct_pins", p, "Valid Verilog Names",
ET.tostring(e))
return pins
@classmethod
def parse_grouping(cls, e):
ids = parse_list(e, "grouping")
for i in ids:
if(not Tinker.is_id(i)):
Tinker.value_error_xml("grouping", i, "Alphabetic Characters",
ET.tostring(e))
return ids
def int2ratio(i):
if(i == 1):
return "Full"
if(i == 2):
return "Half"
if(i == 4):
return "Quarter"
return "Invalid"
def ratio2int(ratio):
if(ratio =="Full"):
return 1
if(ratio =="Half"):
return 2
if(ratio =="Quarter"):
return 4
return 0
|
drichmond/tinker
|
python/Phy.py
|
Python
|
bsd-3-clause
| 19,038
|
[
"TINKER"
] |
4a995e399f643c0f5d211d455ed42ff9384168e680cc10d4d65d8f4510f1aa95
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-04 09:45
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='HistoricalClient',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('first_name', models.CharField(max_length=64)),
('last_name', models.CharField(max_length=64)),
('dob', models.DateField(blank=True, help_text='Date of Birth', null=True)),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=1, null=True)),
('address', models.CharField(blank=True, help_text='Street Address', max_length=128, null=True)),
('city', models.CharField(blank=True, max_length=64, null=True)),
('status', models.CharField(choices=[('NEW', 'New client'), ('TMP', 'Temp ID issued'), ('APP', 'Application submitted'), ('REG', 'Registered')], max_length=64, null=True)),
('newId', models.CharField(blank=True, max_length=32, null=True)),
('oldId', models.CharField(blank=True, max_length=32, null=True)),
('tempId', models.CharField(blank=True, max_length=32, null=True)),
('photo', models.TextField(max_length=100, null=True)),
('reg_notes', models.TextField(blank=True, default='', help_text='Registration notes', null=True)),
('notes', models.TextField(blank=True, default='', help_text='General notes', null=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-history_date', '-history_id'),
'verbose_name': 'historical client',
'get_latest_by': 'history_date',
},
),
migrations.CreateModel(
name='HistoricalDependent',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('first_name', models.CharField(max_length=64)),
('last_name', models.CharField(max_length=64)),
('dob', models.DateField(blank=True, help_text='Date of Birth', null=True)),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=1, null=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-history_date', '-history_id'),
'verbose_name': 'historical dependent',
'get_latest_by': 'history_date',
},
),
migrations.CreateModel(
name='HistoricalVisit',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('visited_at', models.DateTimeField(default=django.utils.timezone.now)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-history_date', '-history_id'),
'verbose_name': 'historical visit',
'get_latest_by': 'history_date',
},
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=64)),
('last_name', models.CharField(max_length=64)),
('dob', models.DateField(blank=True, help_text='Date of Birth', null=True)),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=1, null=True)),
],
),
migrations.CreateModel(
name='Visit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('visited_at', models.DateTimeField(default=django.utils.timezone.now)),
],
options={
'ordering': ['-visited_at', 'client'],
},
),
migrations.CreateModel(
name='Client',
fields=[
('person_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='clients.Person')),
('address', models.CharField(blank=True, help_text='Street Address', max_length=128, null=True)),
('city', models.CharField(blank=True, max_length=64, null=True)),
('status', models.CharField(choices=[('NEW', 'New client'), ('TMP', 'Temp ID issued'), ('APP', 'Application submitted'), ('REG', 'Registered')], max_length=64, null=True)),
('newId', models.CharField(blank=True, max_length=32, null=True)),
('oldId', models.CharField(blank=True, max_length=32, null=True)),
('tempId', models.CharField(blank=True, max_length=32, null=True)),
('photo', models.ImageField(null=True, upload_to='')),
('reg_notes', models.TextField(blank=True, default='', help_text='Registration notes', null=True)),
('notes', models.TextField(blank=True, default='', help_text='General notes', null=True)),
],
bases=('clients.person',),
),
migrations.CreateModel(
name='Dependent',
fields=[
('person_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='clients.Person')),
('dependent_on', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='dependents', to='clients.Client')),
],
bases=('clients.person',),
),
migrations.AddField(
model_name='historicaldependent',
name='person_ptr',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='clients.Person'),
),
migrations.AddField(
model_name='historicalclient',
name='person_ptr',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='clients.Person'),
),
migrations.AddField(
model_name='visit',
name='client',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='visits', to='clients.Client'),
),
migrations.AddField(
model_name='visit',
name='picked_up_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='pickups', to='clients.Client'),
),
migrations.AddField(
model_name='historicalvisit',
name='client',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='clients.Client'),
),
migrations.AddField(
model_name='historicalvisit',
name='picked_up_by',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='clients.Client'),
),
migrations.AddField(
model_name='historicaldependent',
name='dependent_on',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='clients.Client'),
),
]
|
tomp/food_pantry
|
clients/migrations/0001_initial.py
|
Python
|
mit
| 9,562
|
[
"VisIt"
] |
5d8b78f46b5cdb9e7824a4297f9c0cb29e7a52407b43dffd1fd7dfc67ad78ff6
|
import urllib,urllib2,re,cookielib,string, urlparse,sys,os
import xbmc, xbmcgui, xbmcaddon, xbmcplugin,urlresolver
from t0mm0.common.net import Net as net
from t0mm0.common.addon import Addon
from resources.libs import main
from decimal import Decimal
import time
#Mash Up - by Mash2k3 2012.
from t0mm0.common.addon import Addon
from resources.universal import playbackengine, watchhistory
addon_id = 'plugin.video.movie25'
selfAddon = xbmcaddon.Addon(id=addon_id)
addon = Addon('plugin.video.movie25', sys.argv)
art = main.art
datapath = addon.get_profile()
wh = watchhistory.WatchHistory('plugin.video.movie25')
def MAIN():
main.GA("Plugin","SominalTv")
main.addDir('Search','xoxe',624,art+'/search.png')
main.addDir('Hindi','http://www.sominaltvfilms.com/category/hindi-movies',620,art+'/hindi.png')
main.addDir('Telugu','http://www.sominaltvfilms.com/category/telugu',620,art+'/telugu.png')
main.addDir('Tamil','http://www.sominaltvfilms.com/category/tamil',620,art+'/tamil.png')
main.addDir('Malayalam','http://www.sominaltvfilms.com/category/malayalam',620,art+'/malayalam.png')
main.addDir('Punjabi','http://www.sominaltvfilms.com/category/punjabi',620,art+'/punjabi.png')
main.addDir('BluRay','http://www.sominaltvfilms.com/category/bluray',620,art+'/bluray.png')
main.addDir('All English Subtitled Movies','http://www.sominaltvfilms.com/category/english-subtitled',620,art+'/subtitled.png')
main.addDir('All Hindi Dubbed Movies','http://www.sominaltvfilms.com/category/hindi-dubbed',620,art+'/dubbed.png')
def AtoZ(url):
main.addDir('0-9','http://www.sominaltvfilms.com/search/label/%23'+url+'?&max-results=15',620,art+'/09.png')
for i in string.ascii_uppercase:
main.addDir(i,'http://www.sominaltvfilms.com/search/label/'+i+url+'?&max-results=15',620,art+'/'+i.lower()+'.png')
main.GA("Watchseries","A-Z")
main.VIEWSB()
def SEARCH():
keyb = xbmc.Keyboard('', 'Search Movies')
keyb.doModal()
if (keyb.isConfirmed()):
search = keyb.getText()
encode=urllib.quote(search)
surl='http://www.sominaltvfilms.com/?s='+encode
link=main.OPENURL(surl)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
match=re.compile('<img width=".+?" height=".+?" src="(.+?)" class=".+?" alt=".+?".+?<h1 class=".+?"><a class=".+?" href="(.+?)" title=".+?">(.+?)</a></h1>.+?<div class="excerpt-wrapper"><div class="excerpt"><p>(.+?)</p>').findall(link)
for thumb,url,name,desc in match:
desc=desc.replace('</div><div class="separator" style="clear: both; text-align: left;">','').replace('<span class="Apple-style-span" style="background-color: white; color: #333333; font-family: Verdana, Arial, sans-serif; font-size: 13px; line-height: 18px;">','').replace('</div><div class="separator" style="clear: both; text-align: justify;">','').replace('</div><div class="separator" style="clear: both; text-align: center;">','').replace('</span>','').replace('<span>','').replace('</div><div class="separator" style="clear: both; text-align: justify;"><span class="Apple-style-span" style="background-color: white; color: #333333; font-family: Verdana, Arial, sans-serif; font-size: 13px; line-height: 18px;">','')
desc=desc.replace('<br>','').replace('</br>','').replace('</div>','').replace('<div>','')
main.addDirM(name,url,621,thumb,desc,thumb,'','','')
def LIST(mname,murl):
main.GA("SominalTv","List")
if mname=='Hindi':
main.addDir('Hindi English Subtitled','http://www.sominaltvfilms.com/category/hindi-movies-english-subtitles',620,art+'/subtitled.png')
main.addDir('Hindi BluRay','http://www.sominaltvfilms.com/category/hindi-blurays',620,art+'/bluray.png')
elif mname=='Telugu':
main.addDir('Telugu English Subtitled','http://www.sominaltvfilms.com/category/telugu-movies-english-subtitles',620,art+'/subtitled.png')
main.addDir('Telugu BluRay','http://www.sominaltvfilms.com/category/telugu-blurays',620,art+'/bluray.png')
elif mname=='Tamil':
main.addDir('Tamil English Subtitled','http://www.sominaltvfilms.com/category/tamil-movies-english-subtitles',620,art+'/subtitled.png')
main.addDir('Tamil BluRay','http://www.sominaltvfilms.com/category/tamil-blurays',620,art+'/bluray.png')
elif mname=='Malayalam':
main.addDir('Malayalam English Subtitled','http://www.sominaltvfilms.com/category/malayalam-movies-english-subtitles',620,art+'/subtitled.png')
elif mname=='Punjabi':
main.addDir('Punjabi English Subtitled','http://www.sominaltvfilms.com/category/punjabi-movies-english-subtitles',620,art+'/subtitled.png')
elif mname=='All Hindi Dubbed Movies':
main.addDir('Dubbed BluRay','http://www.sominaltvfilms.com/category/hindi-dubbed-blurays',620,art+'/bluray.png')
link=main.OPENURL(murl)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
match=re.compile("""<div class='inner'><figure><a href="([^<]+)"><img src="(.+?)" alt="(.+?)"/>.+?<div class='description'><div class='date'>.+?<p>(.+?)</p>""").findall(link)
dialogWait = xbmcgui.DialogProgress()
ret = dialogWait.create('Please wait until Show list is cached.')
totalLinks = len(match)
loadedLinks = 0
remaining_display = 'Movies loaded :: [B]'+str(loadedLinks)+' / '+str(totalLinks)+'[/B].'
dialogWait.update(0,'[B]Will load instantly from now on[/B]',remaining_display)
for url,thumb,name,desc in match:
desc=desc.replace(' ','')
name=main.unescapes(name)
main.addDirM(name,url,621,thumb,desc,thumb,'','','')
loadedLinks = loadedLinks + 1
percent = (loadedLinks * 100)/totalLinks
remaining_display = 'Movies loaded :: [B]'+str(loadedLinks)+' / '+str(totalLinks)+'[/B].'
dialogWait.update(percent,'[B]Will load instantly from now on[/B]',remaining_display)
if (dialogWait.iscanceled()):
return False
dialogWait.close()
del dialogWait
paginate = re.compile("""<a class="nextpostslink" href="(.+?)">.+?</a>""").findall(link)
if len(paginate)>0:
main.addDir('Next',paginate[0],620,art+'/next2.png')
main.VIEWS()
def LINK(mname,murl,thumb,fan,desc):
parts=[]
link=main.OPENURL(murl)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
match= re.compile('<a href="http://adf.ly/377117/(.+?)".+?target="_blank.+?>(.+?)</a>').findall(link)
if len(match)==0:
match= re.compile('<a class="btn btn-custom btn-medium btn-red btn-red " target=".+?" href="http://adf.ly/377117/(.+?)"><span style=".+?"><strong>(.+?)</strong>').findall(link)
b=1
for url,name in match:
name=name.replace('</b>','').replace('<b>','').replace('<span style="font-size: x-large;">','').replace('<span id="goog_1857978069"></span><span id="goog_1857978070"></span>','').replace('<span style="font-family: Verdana, sans-serif; font-size: x-large;">','').replace('<span style="font-family: Verdana, sans-serif; font-size: large;">','').replace('<span>','').replace('</span>','')
http= re.compile('http://').findall(url)
if len(http)==0:
url='http://'+url
if re.findall('part',name[0:4],re.I):
name=mname+' '+name
main.addPlayc(name,url,622,thumb,desc,fan,'','','')
parts.append(('Part '+str(b),url))
b=b+1
if parts and len(parts)>1:
main.addPlayc(mname+' [COLOR blue]Play All[/COLOR]',str(parts),622,thumb,desc,fan,'','','')
def unescapes(text):
if text:
rep = {"\u003d":"=","\u0026":"&","u003d":"=","u0026":"&","%26":"&","&":"&","&":"&",",": ","," ": " ","\n": "","\t": "","\r": "","%5B": "[","%5D": "]",
"%3a": ":","%3A":":","%2f":"/","%2F":"/","%3f":"?","%3F":"?","%3d":"=","%3D":"=","%2C":",","%2c":",","%3C":"<",
"%20":" ","%22":'"',"%3D":"=","%3A":":","%2F":"/","%3E":">","%3B":",","%27":"'","%0D":"","%0A":"","%92":"'",
"<": "<",">": ">",""": '"',"’": "'","´": "'"}
for s, r in rep.items():
text = text.replace(s, r)
#except TypeError: pass
return text
def getvideo2(murl,answer=''):
link2=main.OPENURL(murl)
linkx=dekode(link2)
stream_url2= re.compile('file: "(.+?)"').findall(linkx)
if stream_url2:
return stream_url2[0]
else:
namelist=[]
urllist=[]
SRT=os.path.join(datapath,'Sub.srt')
link2=link2.replace('\r','').replace('\n','').replace('\t','').replace(' ','').replace('iframe src="//www.facebook.com','')
docUrl= re.compile('iframe src="(.+?)"').findall(link2)
if len(docUrl)==0:
link3=dekode(link2)
try:
docUrl= re.compile('iframe src="(.+?)"').findall(link3)
except:
youtube= re.compile('<iframe width=".+?" height=".+?" src="http://www.youtube.com/embed/(.+?)" scrolling=".+?"').findall(link2)
url = "plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid="+youtube[0]+"&hd=1"
stream_url = url
# play with bookmark
player = playbackengine.PlayWithoutQueueSupport(resolved_url=stream_url, addon_id=addon_id, video_type='', title=mname,season='', episode='', year='',img=thumb,infolabels='', watchedCallbackwithParams=main.WatchedCallbackwithParams,imdb_id='')
#WatchHistory
if selfAddon.getSetting("whistory") == "true":
wh.add_item(mname+' '+'[COLOR green]SominalFilms[/COLOR]', sys.argv[0]+sys.argv[2], infolabels='', img=thumb, fanart='', is_folder=False)
player.KeepAlive()
return ok
if docUrl:
xbmc.executebuiltin("XBMC.Notification(Please Wait!,Collecting Links,3000)")
link2=main.OPENURL(docUrl[0])
link2=link2.replace('\r','').replace('\n','').replace('\t','').replace(' ','').replace('\/','/').replace('\\','')
link2=unescapes(link2)
match= re.compile('url_encoded_fmt_stream_map":"(.+?),"').findall(link2)[0]
if match:
subtitle_url_start= re.compile("\"ttsurl\":\"(.+?)\"").findall(link2)
print unescapes(str(subtitle_url_start[0]))
v_add= re.compile("id=(.+?)&").findall(subtitle_url_start[0])
if v_add:
print v_add
subtitle_url_start = subtitle_url_start[0] + '&v=' + v_add[0]
subtitle_url_start = subtitle_url_start + '&name&lang=en&hl=en&format=1&type=track&kind'
print "Subtitle File="+str(subtitle_url_start)
#Converts Xml file to SRT file
try:
link=main.OPENURL(subtitle_url_start)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','').replace('<text start="0">','')
except:
link=''
submatch= re.compile('<text start="(.+?)" dur="(.+?)">(.+?)</text>').findall(link)
if submatch:
i=1
for start,dur,text in submatch:
#Converts seconds to HH:MM:SS,MS format for srt file
text=text.replace(''',"'").replace('"','"').replace('&',"&").replace("'","'").replace('<i>','').replace("#8211;","-").replace('</i>','').replace("’","'").replace('&quot;','"').replace('×','').replace('&','').replace('‘','').replace('–','').replace('“','').replace('”','').replace('—','')
dur=Decimal(start)+Decimal(dur)
dur=str(dur)
if(float(start)%1 != 0):
start1=start.split('.')[0]
start2=start.split('.')[1]
else:
start1=start
start2=0
start = time.strftime('%H:%M:%S', time.gmtime(float(start1)))
if(float(dur)%1 != 0):
dur1=dur.split('.')[0]
dur2=dur.split('.')[1]
else:
dur1=dur
dur2=0
dur = time.strftime('%H:%M:%S', time.gmtime(float(dur1)))
#creating srt file and saving it on mashup profile folder
open(SRT,'a').write("""
"""+str(i)+"""
"""+str(start)+","+str(start2)+" --> "+str(dur)+","+str(dur2)+"""
"""+text+"""
""")
i=i+1
streams_map = str(match)
print streams_map
stream= re.compile('url=(.+?)&type=.+?&quality=(.+?)[,\"]{1}').findall(streams_map)
for stream_url,stream_quality in stream:
stream_url = stream_url
stream_url = main.unescapes(stream_url)
urllist.append(stream_url)
stream_qlty = stream_quality.upper()
if (stream_qlty == 'HD720'):
stream_qlty = 'HD-720p'
elif (stream_qlty == 'LARGE'):
stream_qlty = 'SD-480p'
elif (stream_qlty == 'MEDIUM'):
stream_qlty = 'SD-360p'
namelist.append(stream_qlty)
dialog = xbmcgui.Dialog()
if answer=='x11g':
answer='0'
else:
answer =dialog.select("Quality Select", namelist)
return urllist[int(answer)]
def LINK2(mname,murl,thumb,desc):
SRT=os.path.join(datapath,'Sub.srt')
if os.path.exists(SRT):
os.remove(SRT)
ok=True
infoLabels =main.GETMETAT(mname,'','',thumb)
video_type='movie'
season=''
episode=''
img=infoLabels['cover_url']
fanart =infoLabels['backdrop_url']
imdb_id=infoLabels['imdb_id']
infolabels = { 'supports_meta' : 'true', 'video_type':video_type, 'name':str(infoLabels['title']), 'imdb_id':str(infoLabels['imdb_id']), 'season':str(season), 'episode':str(episode), 'year':str(infoLabels['year']) }
main.GA("SominalTv","Watched")
if murl:
if "'," in murl:
print murl
mname=main.removeColoredText(mname)
pl=xbmc.PlayList(1);pl.clear()
playlist = sorted(list(set(eval(murl))), key=lambda playlist: playlist[0])
for xname,link in playlist:
pl.add(getvideo2(link,answer='x11g'),xbmcgui.ListItem(mname+' '+xname,thumbnailImage=img))
xbmc.Player().play(pl)
xbmc.Player().setSubtitles(SRT)
while xbmc.Player().isPlaying():
xbmc.sleep(2500)
if selfAddon.getSetting("whistory") == "true":
wh.add_item(mname+' '+'[COLOR green]SominalFilms[/COLOR]', sys.argv[0]+sys.argv[2], infolabels='', img=thumb, fanart='', is_folder=False)
else:
stream_url2=getvideo2(murl)
xbmc.executebuiltin("XBMC.Notification(Please Wait!,Opening Link,3000)")
infoL={'Title': infoLabels['title'], 'Plot': infoLabels['plot'], 'Genre': infoLabels['genre']}
# play with bookmark
player = playbackengine.PlayWithoutQueueSupport(resolved_url=stream_url2, addon_id=addon_id, video_type=video_type, title=str(infoLabels['title']),season=str(season), episode=str(episode), year=str(infoLabels['year']),img=img,infolabels=infoL, watchedCallbackwithParams=main.WatchedCallbackwithParams,imdb_id=imdb_id)
player.setSubtitles(SRT)#inserts Srt file from profile folder
#WatchHistory
if selfAddon.getSetting("whistory") == "true":
wh.add_item(mname+' '+'[COLOR green]SominalFilms[/COLOR]', sys.argv[0]+sys.argv[2], infolabels='', img=thumb, fanart='', is_folder=False)
player.KeepAlive()
return ok
else:
xbmc.executebuiltin("XBMC.Notification(Sorry!,Protected Link,5000)")
def _enk_dec_num(kode, enc):
if re.search('fromCharCode', enc):
x = ''
for nbr in kode.split():
x += chr(int(nbr) - 3)
return x
else:
return None
def _enk_dec_swap(kode, enc):
if re.search('charAt', enc) and not re.search('@', enc):
x = ''
i = 0
while i < (len(kode) - 1):
x += (kode[i + 1] + kode[i])
i += 2
return (x + (kode[len(kode) - 1] if i < len(kode) else ''))
else:
return None
def _enk_dec_skip(kode, enc):
if re.search('charAt', enc) and re.search('@', enc):
x = ''
i = 0
while i < len(kode):
if(kode[i] == '|' and kode[i + 1] == '|'):
x += '@'
else:
x += kode[i]
i += 2
return x
else:
return None
def _enk_dec_reverse(kode, enc):
if re.search('reverse', enc):
return kode[::-1]
else:
return None
ENK_DEC_FUNC = [_enk_dec_num, _enk_dec_skip, _enk_dec_swap, _enk_dec_reverse]
def dekode(html):
kodeParts = re.compile('var kode\="kode\=\\\\"(.+?)\\\\";(.+?);"').findall(html)
if len(kodeParts) == 0:
return None
kode = None
while len(kodeParts) == 1:
kode = kodeParts[0][0].replace('BY_PASS_D', '"').replace('BY_PASS_S', '\'').replace('\\\\', '\\')
enc = kodeParts[0][1].replace('BY_PASS_D', '"').replace('BY_PASS_S', '\'').replace('\\\\', '\\')
for dec_func in ENK_DEC_FUNC:
x = dec_func(kode, enc)
if x is not None:
kode = x
kodeParts = re.compile('kode\="(.+?)";(.*)').findall(kode.replace('\\"', 'BY_PASS_D').replace('\\\'', 'BY_PASS_S'))
dekoded = kode.replace('\\"', '"').replace('\\\'', '\'').replace('\\\\', '\\')
return dekoded
|
marduk191/plugin.video.movie25
|
resources/libs/plugins/sominaltvfilms.py
|
Python
|
gpl-3.0
| 20,517
|
[
"ADF"
] |
7aaa9bbdec05fe823548d20359d52fba95626942071979962a16e4f5097e32b5
|
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds an expanded text ad that uses upgraded URLS.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
from googleads import errors
ADGROUP_ID = 'INSERT_ADGROUP_ID_HERE'
def main(client, adgroup_id):
# Initialize appropriate service.
adgroup_ad_service = client.GetService('AdGroupAdService', version='v201809')
# Create the expanded text ad
expanded_text_ad = {
'xsi_type': 'ExpandedTextAd',
'headlinePart1': 'Luxury Cruise to Mars',
'headlinePart2': 'Visit the Red Planet in style.',
'description': 'Low-gravity fun for everyone!',
# Specify a list of final URLs. This field cannot be set if URL
# field is set, or finalUrls is unset. This may be specified at ad,
# criterion, and feed item levels.
'finalUrls': [
'http://www.example.com/cruise/space/',
'http://www.example.com/locations/mars/'
],
# Specify a list of final mobile URLs. This field cannot be set if URL
# field is set, or finalUrls is unset. This may be specified at ad,
# criterion, and feed item levels.
'finalMobileUrls': [
'http://mobile.example.com/cruise/space/',
'http://mobile.example.com/locations/mars/'
]
}
# Specify a tracking URL for 3rd party tracking provider. You may specify
# one at customer, campaign, ad group, ad, criterion or feed item levels.
expanded_text_ad['trackingUrlTemplate'] = (
'http://tracker.example.com/?season={_season}&promocode={_promocode}'
'&u={lpurl}'
)
expanded_text_ad['urlCustomParameters'] = {
'parameters': [
# Since your tracking URL has two custom parameters, provide
# their values too. This can be provided at campaign, ad group,
# ad, criterion, or feed item levels.
{
'key': 'season',
'value': 'christmas'
},
{
'key': 'promocode',
'value': 'NYC123'
}
]
}
text_adgroup_ad = {
'adGroupId': adgroup_id,
'ad': expanded_text_ad,
# Optional: Set the status.
'status': 'PAUSED'
}
operations = [{
'operator': 'ADD',
'operand': text_adgroup_ad
}]
response = adgroup_ad_service.mutate(operations)
if 'value' in response:
for adgroup_ad in response['value']:
print('AdGroupAd with ID "%s" was added.' % adgroup_ad['ad']['id'])
print('Upgraded URL properties:')
print('Final Urls: %s' % adgroup_ad['ad']['finalUrls'])
print('Final Mobile URLs: %s' % adgroup_ad['ad']['finalMobileUrls'])
print('Tracking URL template: %s'
% adgroup_ad['ad']['trackingUrlTemplate'])
print('Custom parameters: %s' % adgroup_ad['ad']['urlCustomParameters'])
else:
raise errors.GoogleAdsError('Failed to create AdGroupAd.')
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, ADGROUP_ID)
|
googleads/googleads-python-lib
|
examples/adwords/v201809/advanced_operations/add_expanded_text_ad_with_upgraded_urls.py
|
Python
|
apache-2.0
| 3,843
|
[
"VisIt"
] |
3d3316c507b55dbb799477ab780b99ec74f38601cbb94ad91f36a91d4e885b2f
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 7 14:38:46 2011
Plot pca training error as a function of percentage of samples used
@author: -
"""
# Computes the gaussian gradients on a boxm_alpha_scene
import os;
import optparse;
import time;
import sys;
import plot_pca_functions;
import numpy as np
import matplotlib.pyplot as plt
import math
if __name__=="__main__":
#Parse inputs
# parser = optparse.OptionParser(description='Compute PCA basis');
#
# parser.add_option('--pca_dir', action="store", dest="pca_dir");
# options, args = parser.parse_args();
#
# pca_dir = options.pca_dir;
pca_dir = '/Users/isa/Experiments/PCA/CapitolBOXM_6_4_4';
print (pca_dir)
if not os.path.isdir( pca_dir + '/'):
sys.exit(-1);
fig = plt.figure(10);
for frac in range(1,6):
this_pca_dir = pca_dir + '/' +str(int(frac*10));
if not os.path.isdir( this_pca_dir + '/'):
sys.exit(-1);
error_file = this_pca_dir + "/normalized_training_error.txt";
error= plot_pca_functions.read_vector(error_file);
plt.title('Training Error ');
x = np.arange(0, len(error), 1);
y = plt.plot(x, error, label=(str(int(frac*10)) +'% of total samples'));
plt.hold(True);
plt.xlabel('Number of components used for reconstruction');
plt.ylabel('Average error per feature vector');
a = plt.gca()
a.set_xlim([1,125])
plt.legend();
plt.show();
|
mirestrepo/voxels-at-lems
|
bvpl/bvpl_octree/plot_training_error.py
|
Python
|
bsd-2-clause
| 1,422
|
[
"Gaussian"
] |
393d8536479a0fc4c61004fe8b635446e53c12d247df4202c8f625425207b594
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Views tests for the OSF."""
from __future__ import absolute_import
import datetime as dt
import httplib as http
import json
import math
import time
import unittest
import urllib
import datetime
import mock
from nose.tools import * # noqa PEP8 asserts
from modularodm import Q
from modularodm.exceptions import ValidationError
from framework import auth
from framework.auth import User, Auth
from framework.auth.exceptions import InvalidTokenError
from framework.auth.utils import impute_names_model
from framework.celery_tasks import handlers
from framework.exceptions import HTTPError
from tests.base import (
assert_is_redirect,
capture_signals,
fake,
get_default_metaschema,
OsfTestCase,
)
from tests.factories import (
ApiOAuth2ApplicationFactory, ApiOAuth2PersonalTokenFactory, AuthUserFactory,
BookmarkCollectionFactory, CollectionFactory, MockAddonNodeSettings, NodeFactory,
NodeLogFactory, PrivateLinkFactory, ProjectWithAddonFactory, ProjectFactory,
RegistrationFactory, UnconfirmedUserFactory, UnregUserFactory, UserFactory, WatchConfigFactory,
InstitutionFactory,
)
from tests.test_features import requires_search
from website import mailchimp_utils
from website import mails, settings
from website.addons.github.tests.factories import GitHubAccountFactory
from website.models import Node, NodeLog, Pointer
from website.profile.utils import add_contributor_json, serialize_unregistered
from website.profile.views import fmt_date_or_none, update_osf_help_mails_subscription
from website.project.decorators import check_can_access
from website.project.model import has_anonymous_link
from website.project.signals import contributor_added
from website.project.views.contributor import (
deserialize_contributors,
notify_added_contributor,
send_claim_email,
send_claim_registered_email,
)
from website.project.views.node import _should_show_wiki_widget, _view_project, abbrev_authors
from website.util import api_url_for, web_url_for
from website.util import permissions, rubeus
class Addon(MockAddonNodeSettings):
@property
def complete(self):
return True
def archive_errors(self):
return 'Error'
class Addon2(MockAddonNodeSettings):
@property
def complete(self):
return True
def archive_errors(self):
return 'Error'
class TestViewingProjectWithPrivateLink(OsfTestCase):
def setUp(self):
super(TestViewingProjectWithPrivateLink, self).setUp()
self.user = AuthUserFactory() # Is NOT a contributor
self.project = ProjectFactory(is_public=False)
self.link = PrivateLinkFactory()
self.link.nodes.append(self.project)
self.link.save()
self.project_url = self.project.web_url_for('view_project')
def test_edit_private_link_empty(self):
node = ProjectFactory(creator=self.user)
link = PrivateLinkFactory()
link.nodes.append(node)
link.save()
url = node.api_url_for("project_private_link_edit")
res = self.app.put_json(url, {'pk': link._id, 'value': ''}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_in('Title cannot be blank', res.body)
def test_edit_private_link_invalid(self):
node = ProjectFactory(creator=self.user)
link = PrivateLinkFactory()
link.nodes.append(node)
link.save()
url = node.api_url_for("project_private_link_edit")
res = self.app.put_json(url, {'pk': link._id, 'value': '<a></a>'}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_in('Invalid link name.', res.body)
@mock.patch('framework.auth.core.Auth.private_link')
def test_can_be_anonymous_for_public_project(self, mock_property):
mock_property.return_value(mock.MagicMock())
mock_property.anonymous = True
anonymous_link = PrivateLinkFactory(anonymous=True)
anonymous_link.nodes.append(self.project)
anonymous_link.save()
self.project.set_privacy('public')
self.project.save()
self.project.reload()
auth = Auth(user=self.user, private_key=anonymous_link.key)
assert_true(has_anonymous_link(self.project, auth))
def test_has_private_link_key(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_equal(res.status_code, 200)
def test_not_logged_in_no_key(self):
res = self.app.get(self.project_url, {'view_only': None})
assert_is_redirect(res)
res = res.follow(expect_errors=True)
assert_equal(res.status_code, 301)
assert_equal(
res.request.path,
'/login'
)
def test_logged_in_no_private_key(self):
res = self.app.get(self.project_url, {'view_only': None}, auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, http.FORBIDDEN)
def test_logged_in_has_key(self):
res = self.app.get(
self.project_url, {'view_only': self.link.key}, auth=self.user.auth)
assert_equal(res.status_code, 200)
@unittest.skip('Skipping for now until we find a way to mock/set the referrer')
def test_prepare_private_key(self):
res = self.app.get(self.project_url, {'key': self.link.key})
res = res.click('Registrations')
assert_is_redirect(res)
res = res.follow()
assert_equal(res.status_code, 200)
assert_equal(res.request.GET['key'], self.link.key)
def test_cannot_access_registrations_or_forks_with_anon_key(self):
anonymous_link = PrivateLinkFactory(anonymous=True)
anonymous_link.nodes.append(self.project)
anonymous_link.save()
self.project.is_public = False
self.project.save()
url = self.project_url + 'registrations/?view_only={}'.format(anonymous_link.key)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 401)
url = self.project_url + 'forks/?view_only={}'.format(anonymous_link.key)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_can_access_registrations_and_forks_with_not_anon_key(self):
link = PrivateLinkFactory(anonymous=False)
link.nodes.append(self.project)
link.save()
self.project.is_public = False
self.project.save()
url = self.project_url + 'registrations/?view_only={}'.format(self.link.key)
res = self.app.get(url)
assert_equal(res.status_code, 200)
url = self.project_url + 'forks/?view_only={}'.format(self.link.key)
res = self.app.get(url)
assert_equal(res.status_code, 200)
def test_check_can_access_valid(self):
contributor = AuthUserFactory()
self.project.add_contributor(contributor, auth=Auth(self.project.creator))
self.project.save()
assert_true(check_can_access(self.project, contributor))
def test_check_user_access_invalid(self):
noncontrib = AuthUserFactory()
with assert_raises(HTTPError):
check_can_access(self.project, noncontrib)
def test_check_user_access_if_user_is_None(self):
assert_false(check_can_access(self.project, None))
class TestProjectViews(OsfTestCase):
ADDONS_UNDER_TEST = {
'addon1': {
'node_settings': Addon,
},
'addon2': {
'node_settings': Addon2,
},
}
def setUp(self):
super(TestProjectViews, self).setUp()
self.user1 = AuthUserFactory()
self.user1.save()
self.consolidate_auth1 = Auth(user=self.user1)
self.auth = self.user1.auth
self.user2 = AuthUserFactory()
self.auth2 = self.user2.auth
# A project has 2 contributors
self.project = ProjectFactory(
title="Ham",
description='Honey-baked',
creator=self.user1
)
self.project.add_contributor(self.user2, auth=Auth(self.user1))
self.project.save()
self.project2 = ProjectFactory(
title="Tofu",
description='Glazed',
creator=self.user1
)
self.project2.add_contributor(self.user2, auth=Auth(self.user1))
self.project2.save()
def test_node_setting_with_multiple_matched_institution_email_domains(self):
# User has alternate emails matching more than one institution's email domains
inst1 = InstitutionFactory(email_domains=['foo.bar'])
inst2 = InstitutionFactory(email_domains=['baz.qux'])
user = AuthUserFactory()
user.emails.append('queen@foo.bar')
user.emails.append('brian@baz.qux')
user.save()
project = ProjectFactory(creator=user)
# node settings page loads without error
url = project.web_url_for('node_setting')
res = self.app.get(url, auth=user.auth)
assert_equal(res.status_code, 200)
# user is automatically affiliated with institutions
# that matched email domains
user.reload()
assert_in(inst1, user.affiliated_institutions)
assert_in(inst2, user.affiliated_institutions)
def test_edit_title_empty(self):
node = ProjectFactory(creator=self.user1)
url = node.api_url_for("edit_node")
res = self.app.post_json(url, {'name': 'title', 'value': ''}, auth=self.user1.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_in('Title cannot be blank', res.body)
def test_edit_title_invalid(self):
node = ProjectFactory(creator=self.user1)
url = node.api_url_for("edit_node")
res = self.app.post_json(url, {'name': 'title', 'value': '<a></a>'}, auth=self.user1.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_in('Invalid title.', res.body)
def test_cannot_remove_only_visible_contributor(self):
self.project.visible_contributor_ids.remove(self.user1._id)
self.project.save()
url = self.project.api_url_for('project_remove_contributor')
res = self.app.post_json(
url, {'contributorID': self.user2._id,
'nodeIDs': [self.project._id]}, auth=self.auth, expect_errors=True
)
assert_equal(res.status_code, http.FORBIDDEN)
assert_equal(res.json['message_long'], 'Must have at least one bibliographic contributor')
assert_true(self.project.is_contributor(self.user2))
def test_remove_only_visible_contributor_return_false(self):
self.project.visible_contributor_ids.remove(self.user1._id)
self.project.save()
ret = self.project.remove_contributor(contributor=self.user2, auth=self.consolidate_auth1)
assert_false(ret)
self.project.reload()
assert_true(self.project.is_contributor(self.user2))
def test_can_view_nested_project_as_admin(self):
self.parent_project = NodeFactory(
title='parent project',
category='project',
parent=self.project,
is_public=False
)
self.parent_project.save()
self.child_project = NodeFactory(
title='child project',
category='project',
parent=self.parent_project,
is_public=False
)
self.child_project.save()
url = self.child_project.web_url_for('view_project')
res = self.app.get(url, auth=self.auth)
assert_not_in('Private Project', res.body)
assert_in('parent project', res.body)
def test_edit_description(self):
url = "/api/v1/project/{0}/edit/".format(self.project._id)
self.app.post_json(url,
{"name": "description", "value": "Deep-fried"},
auth=self.auth)
self.project.reload()
assert_equal(self.project.description, "Deep-fried")
def test_project_api_url(self):
url = self.project.api_url
res = self.app.get(url, auth=self.auth)
data = res.json
assert_equal(data['node']['category'], 'Project')
assert_equal(data['node']['node_type'], 'project')
assert_equal(data['node']['title'], self.project.title)
assert_equal(data['node']['is_public'], self.project.is_public)
assert_equal(data['node']['is_registration'], False)
assert_equal(data['node']['id'], self.project._primary_key)
assert_equal(data['node']['watched_count'], 0)
assert_true(data['user']['is_contributor'])
assert_equal(data['node']['description'], self.project.description)
assert_equal(data['node']['url'], self.project.url)
assert_equal(data['node']['tags'], [t._primary_key for t in self.project.tags])
assert_in('forked_date', data['node'])
assert_in('watched_count', data['node'])
assert_in('registered_from_url', data['node'])
# TODO: Test "parent" and "user" output
def test_add_contributor_post(self):
# Two users are added as a contributor via a POST request
project = ProjectFactory(creator=self.user1, is_public=True)
user2 = UserFactory()
user3 = UserFactory()
url = "/api/v1/project/{0}/contributors/".format(project._id)
dict2 = add_contributor_json(user2)
dict3 = add_contributor_json(user3)
dict2.update({
'permission': 'admin',
'visible': True,
})
dict3.update({
'permission': 'write',
'visible': False,
})
self.app.post_json(
url,
{
'users': [dict2, dict3],
'node_ids': [project._id],
},
content_type="application/json",
auth=self.auth,
).maybe_follow()
project.reload()
assert_in(user2._id, project.contributors)
# A log event was added
assert_equal(project.logs[-1].action, "contributor_added")
assert_equal(len(project.contributors), 3)
assert_in(user2._id, project.permissions)
assert_in(user3._id, project.permissions)
assert_equal(project.permissions[user2._id], ['read', 'write', 'admin'])
assert_equal(project.permissions[user3._id], ['read', 'write'])
def test_manage_permissions(self):
url = self.project.api_url + 'contributors/manage/'
self.app.post_json(
url,
{
'contributors': [
{'id': self.project.creator._id, 'permission': 'admin',
'registered': True, 'visible': True},
{'id': self.user1._id, 'permission': 'read',
'registered': True, 'visible': True},
{'id': self.user2._id, 'permission': 'admin',
'registered': True, 'visible': True},
]
},
auth=self.auth,
)
self.project.reload()
assert_equal(self.project.get_permissions(self.user1), ['read'])
assert_equal(self.project.get_permissions(self.user2), ['read', 'write', 'admin'])
def test_manage_permissions_again(self):
url = self.project.api_url + 'contributors/manage/'
self.app.post_json(
url,
{
'contributors': [
{'id': self.user1._id, 'permission': 'admin',
'registered': True, 'visible': True},
{'id': self.user2._id, 'permission': 'admin',
'registered': True, 'visible': True},
]
},
auth=self.auth,
)
self.project.reload()
self.app.post_json(
url,
{
'contributors': [
{'id': self.user1._id, 'permission': 'admin',
'registered': True, 'visible': True},
{'id': self.user2._id, 'permission': 'read',
'registered': True, 'visible': True},
]
},
auth=self.auth,
)
self.project.reload()
assert_equal(self.project.get_permissions(self.user2), ['read'])
assert_equal(self.project.get_permissions(self.user1), ['read', 'write', 'admin'])
def test_contributor_manage_reorder(self):
# Two users are added as a contributor via a POST request
project = ProjectFactory(creator=self.user1, is_public=True)
reg_user1, reg_user2 = UserFactory(), UserFactory()
project.add_contributors(
[
{'user': reg_user1, 'permissions': [
'read', 'write', 'admin'], 'visible': True},
{'user': reg_user2, 'permissions': [
'read', 'write', 'admin'], 'visible': False},
]
)
# Add a non-registered user
unregistered_user = project.add_unregistered_contributor(
fullname=fake.name(), email=fake.email(),
auth=self.consolidate_auth1,
save=True,
)
url = project.api_url + 'contributors/manage/'
self.app.post_json(
url,
{
'contributors': [
{'id': reg_user2._id, 'permission': 'admin',
'registered': True, 'visible': False},
{'id': project.creator._id, 'permission': 'admin',
'registered': True, 'visible': True},
{'id': unregistered_user._id, 'permission': 'admin',
'registered': False, 'visible': True},
{'id': reg_user1._id, 'permission': 'admin',
'registered': True, 'visible': True},
]
},
auth=self.auth,
)
project.reload()
assert_equal(
# Note: Cast ForeignList to list for comparison
list(project.contributors),
[reg_user2, project.creator, unregistered_user, reg_user1]
)
assert_equal(
project.visible_contributors,
[project.creator, unregistered_user, reg_user1]
)
def test_project_remove_contributor(self):
url = self.project.api_url_for('project_remove_contributor')
# User 1 removes user2
payload = {"contributorID": self.user2._id,
"nodeIDs": [self.project._id]}
self.app.post(url, json.dumps(payload),
content_type="application/json",
auth=self.auth).maybe_follow()
self.project.reload()
assert_not_in(self.user2._id, self.project.contributors)
# A log event was added
assert_equal(self.project.logs[-1].action, "contributor_removed")
def test_multiple_project_remove_contributor(self):
url = self.project.api_url_for('project_remove_contributor')
# User 1 removes user2
payload = {"contributorID": self.user2._id,
"nodeIDs": [self.project._id, self.project2._id]}
res = self.app.post(url, json.dumps(payload),
content_type="application/json",
auth=self.auth).maybe_follow()
self.project.reload()
self.project2.reload()
assert_not_in(self.user2._id, self.project.contributors)
assert_not_in('/dashboard/', res.json)
assert_not_in(self.user2._id, self.project2.contributors)
# A log event was added
assert_equal(self.project.logs[-1].action, "contributor_removed")
def test_private_project_remove_self_not_admin(self):
url = self.project.api_url_for('project_remove_contributor')
# user2 removes self
payload = {"contributorID": self.user2._id,
"nodeIDs": [self.project._id]}
res = self.app.post(url, json.dumps(payload),
content_type="application/json",
auth=self.auth2).maybe_follow()
self.project.reload()
assert_equal(res.status_code, 200)
assert_equal(res.json['redirectUrl'], '/dashboard/')
assert_not_in(self.user2._id, self.project.contributors)
def test_public_project_remove_self_not_admin(self):
url = self.project.api_url_for('project_remove_contributor')
# user2 removes self
self.public_project = ProjectFactory(creator=self.user1, is_public=True)
self.public_project.add_contributor(self.user2, auth=Auth(self.user1))
self.public_project.save()
payload = {"contributorID": self.user2._id,
"nodeIDs": [self.public_project._id]}
res = self.app.post(url, json.dumps(payload),
content_type="application/json",
auth=self.auth2).maybe_follow()
self.public_project.reload()
assert_equal(res.status_code, 200)
assert_equal(res.json['redirectUrl'], '/' + self.public_project._id + '/')
assert_not_in(self.user2._id, self.public_project.contributors)
def test_project_remove_other_not_admin(self):
url = self.project.api_url_for('project_remove_contributor')
# User 1 removes user2
payload = {"contributorID": self.user1._id,
"nodeIDs": [self.project._id]}
res = self.app.post(url, json.dumps(payload),
content_type="application/json",
expect_errors=True,
auth=self.auth2).maybe_follow()
self.project.reload()
assert_equal(res.status_code, 403)
assert_equal(res.json['message_long'],
'You do not have permission to perform this action. '
'If this should not have occurred and the issue persists, '
'please report it to <a href="mailto:support@osf.io">support@osf.io</a>.'
)
assert_in(self.user1._id, self.project.contributors)
def test_project_remove_fake_contributor(self):
url = self.project.api_url_for('project_remove_contributor')
# User 1 removes user2
payload = {"contributorID": 'badid',
"nodeIDs": [self.project._id]}
res = self.app.post(url, json.dumps(payload),
content_type="application/json",
expect_errors=True,
auth=self.auth).maybe_follow()
self.project.reload()
# Assert the contributor id was invalid
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], 'Contributor not found.')
assert_not_in('badid', self.project.contributors)
def test_project_remove_self_only_admin(self):
url = self.project.api_url_for('project_remove_contributor')
# User 1 removes user2
payload = {"contributorID": self.user1._id,
"nodeIDs": [self.project._id]}
res = self.app.post(url, json.dumps(payload),
content_type="application/json",
expect_errors=True,
auth=self.auth).maybe_follow()
self.project.reload()
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], 'Could not remove contributor.')
assert_in(self.user1._id, self.project.contributors)
def test_get_contributors_abbrev(self):
# create a project with 3 registered contributors
project = ProjectFactory(creator=self.user1, is_public=True)
reg_user1, reg_user2 = UserFactory(), UserFactory()
project.add_contributors(
[
{'user': reg_user1, 'permissions': [
'read', 'write', 'admin'], 'visible': True},
{'user': reg_user2, 'permissions': [
'read', 'write', 'admin'], 'visible': True},
]
)
# add an unregistered contributor
project.add_unregistered_contributor(
fullname=fake.name(), email=fake.email(),
auth=self.consolidate_auth1,
save=True,
)
url = project.api_url_for('get_node_contributors_abbrev')
res = self.app.get(url, auth=self.auth)
assert_equal(len(project.contributors), 4)
assert_equal(len(res.json['contributors']), 3)
assert_equal(len(res.json['others_count']), 1)
assert_equal(res.json['contributors'][0]['separator'], ',')
assert_equal(res.json['contributors'][1]['separator'], ',')
assert_equal(res.json['contributors'][2]['separator'], ' &')
def test_edit_node_title(self):
url = "/api/v1/project/{0}/edit/".format(self.project._id)
# The title is changed though posting form data
self.app.post_json(url, {"name": "title", "value": "Bacon"},
auth=self.auth).maybe_follow()
self.project.reload()
# The title was changed
assert_equal(self.project.title, "Bacon")
# A log event was saved
assert_equal(self.project.logs[-1].action, "edit_title")
def test_make_public(self):
self.project.is_public = False
self.project.save()
url = "/api/v1/project/{0}/permissions/public/".format(self.project._id)
res = self.app.post_json(url, {}, auth=self.auth)
self.project.reload()
assert_true(self.project.is_public)
assert_equal(res.json['status'], 'success')
def test_make_private(self):
self.project.is_public = True
self.project.save()
url = "/api/v1/project/{0}/permissions/private/".format(self.project._id)
res = self.app.post_json(url, {}, auth=self.auth)
self.project.reload()
assert_false(self.project.is_public)
assert_equal(res.json['status'], 'success')
def test_cant_make_public_if_not_admin(self):
non_admin = AuthUserFactory()
self.project.add_contributor(non_admin, permissions=['read', 'write'])
self.project.is_public = False
self.project.save()
url = "/api/v1/project/{0}/permissions/public/".format(self.project._id)
res = self.app.post_json(
url, {}, auth=non_admin.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.FORBIDDEN)
assert_false(self.project.is_public)
def test_cant_make_private_if_not_admin(self):
non_admin = AuthUserFactory()
self.project.add_contributor(non_admin, permissions=['read', 'write'])
self.project.is_public = True
self.project.save()
url = "/api/v1/project/{0}/permissions/private/".format(self.project._id)
res = self.app.post_json(
url, {}, auth=non_admin.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.FORBIDDEN)
assert_true(self.project.is_public)
def test_add_tag(self):
url = self.project.api_url_for('project_add_tag')
self.app.post_json(url, {'tag': "foo'ta#@%#%^&g?"}, auth=self.auth)
self.project.reload()
assert_in("foo'ta#@%#%^&g?", self.project.tags)
assert_equal("foo'ta#@%#%^&g?", self.project.logs[-1].params['tag'])
def test_remove_tag(self):
self.project.add_tag("foo'ta#@%#%^&g?", auth=self.consolidate_auth1, save=True)
assert_in("foo'ta#@%#%^&g?", self.project.tags)
url = self.project.api_url_for("project_remove_tag")
self.app.delete_json(url, {"tag": "foo'ta#@%#%^&g?"}, auth=self.auth)
self.project.reload()
assert_not_in("foo'ta#@%#%^&g?", self.project.tags)
assert_equal("tag_removed", self.project.logs[-1].action)
assert_equal("foo'ta#@%#%^&g?", self.project.logs[-1].params['tag'])
# Regression test for #OSF-5257
def test_removal_empty_tag_throws_error(self):
url = self.project.api_url_for('project_remove_tag')
res= self.app.delete_json(url, {'tag': ''}, auth=self.auth, expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
# Regression test for #OSF-5257
def test_removal_unknown_tag_throws_error(self):
self.project.add_tag('narf', auth=self.consolidate_auth1, save=True)
url = self.project.api_url_for('project_remove_tag')
res= self.app.delete_json(url, {'tag': 'troz'}, auth=self.auth, expect_errors=True)
assert_equal(res.status_code, http.CONFLICT)
# Regression test for https://github.com/CenterForOpenScience/osf.io/issues/1478
@mock.patch('website.archiver.tasks.archive')
def test_registered_projects_contributions(self, mock_archive):
# register a project
self.project.register_node(get_default_metaschema(), Auth(user=self.project.creator), '', None)
# get the first registered project of a project
url = self.project.api_url_for('get_registrations')
res = self.app.get(url, auth=self.auth)
data = res.json
pid = data['nodes'][0]['id']
url2 = api_url_for('get_summary', pid=pid)
# count contributions
res2 = self.app.get(url2, auth=self.auth)
data = res2.json
assert_is_not_none(data['summary']['nlogs'])
def test_forks_contributions(self):
# fork a project
self.project.fork_node(Auth(user=self.project.creator))
# get the first forked project of a project
url = self.project.api_url_for('get_forks')
res = self.app.get(url, auth=self.auth)
data = res.json
pid = data['nodes'][0]['id']
url2 = api_url_for('get_summary', pid=pid)
# count contributions
res2 = self.app.get(url2, auth=self.auth)
data = res2.json
assert_is_not_none(data['summary']['nlogs'])
@mock.patch('framework.transactions.commands.begin')
@mock.patch('framework.transactions.commands.rollback')
@mock.patch('framework.transactions.commands.commit')
def test_get_logs(self, *mock_commands):
# Add some logs
for _ in range(5):
self.project.add_log('file_added', params={'node': self.project._id}, auth=self.consolidate_auth1)
self.project.save()
url = self.project.api_url_for('get_logs')
res = self.app.get(url, auth=self.auth)
for mock_command in mock_commands:
assert_false(mock_command.called)
self.project.reload()
data = res.json
assert_equal(len(data['logs']), len(self.project.logs))
assert_equal(data['total'], len(self.project.logs))
assert_equal(data['page'], 0)
assert_equal(data['pages'], 1)
most_recent = data['logs'][0]
assert_equal(most_recent['action'], 'file_added')
def test_get_logs_invalid_page_input(self):
url = self.project.api_url_for('get_logs')
invalid_input = 'invalid page'
res = self.app.get(
url, {'page': invalid_input}, auth=self.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
assert_equal(
res.json['message_long'],
'Invalid value for "page".'
)
def test_get_logs_negative_page_num(self):
url = self.project.api_url_for('get_logs')
invalid_input = -1
res = self.app.get(
url, {'page': invalid_input}, auth=self.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
assert_equal(
res.json['message_long'],
'Invalid value for "page".'
)
def test_get_logs_page_num_beyond_limit(self):
url = self.project.api_url_for('get_logs')
size = 10
page_num = math.ceil(len(self.project.logs) / float(size))
res = self.app.get(
url, {'page': page_num}, auth=self.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
assert_equal(
res.json['message_long'],
'Invalid value for "page".'
)
def test_get_logs_with_count_param(self):
# Add some logs
for _ in range(5):
self.project.add_log('file_added', params={'node': self.project._id}, auth=self.consolidate_auth1)
self.project.save()
url = self.project.api_url_for('get_logs')
res = self.app.get(url, {'count': 3}, auth=self.auth)
assert_equal(len(res.json['logs']), 3)
# 1 project create log, 1 add contributor log, then 5 generated logs
assert_equal(res.json['total'], 5 + 2)
assert_equal(res.json['page'], 0)
assert_equal(res.json['pages'], 3)
def test_get_logs_defaults_to_ten(self):
# Add some logs
for _ in range(12):
self.project.add_log('file_added', params={'node': self.project._id}, auth=self.consolidate_auth1)
self.project.save()
url = self.project.api_url_for('get_logs')
res = self.app.get(url, auth=self.auth)
assert_equal(len(res.json['logs']), 10)
# 1 project create log, 1 add contributor log, then 5 generated logs
assert_equal(res.json['total'], 12 + 2)
assert_equal(res.json['page'], 0)
assert_equal(res.json['pages'], 2)
def test_get_more_logs(self):
# Add some logs
for _ in range(12):
self.project.add_log('file_added', params={'node': self.project._id}, auth=self.consolidate_auth1)
self.project.save()
url = self.project.api_url_for('get_logs')
res = self.app.get(url, {"page": 1}, auth=self.auth)
assert_equal(len(res.json['logs']), 4)
# 1 project create log, 1 add contributor log, then 12 generated logs
assert_equal(res.json['total'], 12 + 2)
assert_equal(res.json['page'], 1)
assert_equal(res.json['pages'], 2)
def test_logs_private(self):
"""Add logs to a public project, then to its private component. Get
the ten most recent logs; assert that ten logs are returned and that
all belong to the project and not its component.
"""
# Add some logs
for _ in range(15):
self.project.add_log(
auth=self.consolidate_auth1,
action='file_added',
params={'node': self.project._id}
)
self.project.is_public = True
self.project.save()
child = NodeFactory(parent=self.project)
for _ in range(5):
child.add_log(
auth=self.consolidate_auth1,
action='file_added',
params={'node': child._id}
)
url = self.project.api_url_for('get_logs')
res = self.app.get(url).maybe_follow()
assert_equal(len(res.json['logs']), 10)
# 1 project create log, 1 add contributor log, then 15 generated logs
assert_equal(res.json['total'], 15 + 2)
assert_equal(res.json['page'], 0)
assert_equal(res.json['pages'], 2)
assert_equal(
[self.project._id] * 10,
[
log['params']['node']
for log in res.json['logs']
]
)
def test_can_view_public_log_from_private_project(self):
project = ProjectFactory(is_public=True)
fork = project.fork_node(auth=self.consolidate_auth1)
url = fork.api_url_for('get_logs')
res = self.app.get(url, auth=self.auth)
assert_equal(
[each['action'] for each in res.json['logs']],
['node_forked', 'project_created'],
)
project.is_public = False
project.save()
res = self.app.get(url, auth=self.auth)
assert_equal(
[each['action'] for each in res.json['logs']],
['node_forked', 'project_created'],
)
def test_for_private_component_log(self):
for _ in range(5):
self.project.add_log(
auth=self.consolidate_auth1,
action='file_added',
params={'node': self.project._id}
)
self.project.is_public = True
self.project.save()
child = NodeFactory(parent=self.project)
child.is_public = False
child.set_title("foo", auth=self.consolidate_auth1)
child.set_title("bar", auth=self.consolidate_auth1)
child.save()
url = self.project.api_url_for('get_logs')
res = self.app.get(url).maybe_follow()
assert_equal(len(res.json['logs']), 7)
assert_not_in(
child._id,
[
log['params']['node']
for log in res.json['logs']
]
)
def test_remove_project(self):
url = self.project.api_url
res = self.app.delete_json(url, {}, auth=self.auth).maybe_follow()
self.project.reload()
assert_equal(self.project.is_deleted, True)
assert_in('url', res.json)
assert_equal(res.json['url'], '/dashboard/')
def test_suspended_project(self):
node = NodeFactory(parent=self.project, creator=self.user1)
node.remove_node(Auth(self.user1))
node.suspended = True
node.save()
url = node.api_url
res = self.app.get(url, auth=Auth(self.user1), expect_errors=True)
assert_equal(res.status_code, 451)
def test_private_link_edit_name(self):
link = PrivateLinkFactory()
link.nodes.append(self.project)
link.save()
assert_equal(link.name, "link")
url = self.project.api_url + 'private_link/edit/'
self.app.put_json(
url,
{'pk': link._id, "value": "new name"},
auth=self.auth,
).maybe_follow()
self.project.reload()
link.reload()
assert_equal(link.name, "new name")
def test_remove_private_link(self):
link = PrivateLinkFactory()
link.nodes.append(self.project)
link.save()
url = self.project.api_url_for('remove_private_link')
self.app.delete_json(
url,
{'private_link_id': link._id},
auth=self.auth,
).maybe_follow()
self.project.reload()
link.reload()
assert_true(link.is_deleted)
def test_remove_component(self):
node = NodeFactory(parent=self.project, creator=self.user1)
url = node.api_url
res = self.app.delete_json(url, {}, auth=self.auth).maybe_follow()
node.reload()
assert_equal(node.is_deleted, True)
assert_in('url', res.json)
assert_equal(res.json['url'], self.project.url)
def test_cant_remove_component_if_not_admin(self):
node = NodeFactory(parent=self.project, creator=self.user1)
non_admin = AuthUserFactory()
node.add_contributor(
non_admin,
permissions=['read', 'write'],
save=True,
)
url = node.api_url
res = self.app.delete_json(
url, {}, auth=non_admin.auth,
expect_errors=True,
).maybe_follow()
assert_equal(res.status_code, http.FORBIDDEN)
assert_false(node.is_deleted)
def test_watch_and_unwatch(self):
url = self.project.api_url_for('togglewatch_post')
self.app.post_json(url, {}, auth=self.auth)
res = self.app.get(self.project.api_url, auth=self.auth)
assert_equal(res.json['node']['watched_count'], 1)
self.app.post_json(url, {}, auth=self.auth)
res = self.app.get(self.project.api_url, auth=self.auth)
assert_equal(res.json['node']['watched_count'], 0)
def test_view_project_returns_whether_to_show_wiki_widget(self):
user = AuthUserFactory()
project = ProjectFactory.build(creator=user, is_public=True)
project.add_contributor(user)
project.save()
url = project.api_url_for('view_project')
res = self.app.get(url, auth=user.auth)
assert_equal(res.status_code, http.OK)
assert_in('show_wiki_widget', res.json['user'])
def test_fork_count_does_not_include_deleted_forks(self):
user = AuthUserFactory()
project = ProjectFactory(creator=user)
auth = Auth(project.creator)
fork = project.fork_node(auth)
project.save()
fork.remove_node(auth)
fork.save()
url = project.api_url_for('view_project')
res = self.app.get(url, auth=user.auth)
assert_in('fork_count', res.json['node'])
assert_equal(0, res.json['node']['fork_count'])
def test_statistic_page_redirect(self):
url = self.project.web_url_for('project_statistics_redirect')
res = self.app.get(url, auth=self.auth)
assert_equal(res.status_code, 302)
assert_in(self.project.web_url_for('project_statistics', _guid=True), res.location)
def test_registration_retraction_redirect(self):
url = self.project.web_url_for('node_registration_retraction_redirect')
res = self.app.get(url, auth=self.auth)
assert_equal(res.status_code, 302)
assert_in(self.project.web_url_for('node_registration_retraction_get', _guid=True), res.location)
def test_update_node(self):
url = self.project.api_url_for('update_node')
res = self.app.put_json(url, {'title': 'newtitle'}, auth=self.auth)
assert_equal(res.status_code, 200)
self.project.reload()
assert_equal(self.project.title, 'newtitle')
# Regression test
def test_update_node_with_tags(self):
self.project.add_tag('cheezebørger', auth=Auth(self.project.creator), save=True)
url = self.project.api_url_for('update_node')
res = self.app.put_json(url, {'title': 'newtitle'}, auth=self.auth)
assert_equal(res.status_code, 200)
self.project.reload()
assert_equal(self.project.title, 'newtitle')
# Regression test
def test_get_registrations_sorted_by_registered_date_descending(self):
# register a project several times, with various registered_dates
registrations = []
for days_ago in (21, 3, 2, 8, 13, 5, 1):
registration = RegistrationFactory(project=self.project)
reg_date = registration.registered_date - dt.timedelta(days_ago)
registration.registered_date = reg_date
registration.save()
registrations.append(registration)
registrations.sort(key=lambda r: r.registered_date, reverse=True)
expected = [ r._id for r in registrations ]
registrations_url = self.project.api_url_for('get_registrations')
res = self.app.get(registrations_url, auth=self.auth)
data = res.json
actual = [ n['id'] for n in data['nodes'] ]
assert_equal(actual, expected)
class TestEditableChildrenViews(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user, is_public=False)
self.child = ProjectFactory(parent=self.project, creator=self.user, is_public=True)
self.grandchild = ProjectFactory(parent=self.child, creator=self.user, is_public=False)
self.great_grandchild = ProjectFactory(parent=self.grandchild, creator=self.user, is_public=True)
self.great_great_grandchild = ProjectFactory(parent=self.great_grandchild, creator=self.user, is_public=False)
url = self.project.api_url_for('get_editable_children')
self.project_results = self.app.get(url, auth=self.user.auth).json
def test_get_editable_children(self):
assert_equal(len(self.project_results['children']), 4)
assert_equal(self.project_results['node']['id'], self.project._id)
def test_editable_children_order(self):
assert_equal(self.project_results['children'][0]['id'], self.child._id)
assert_equal(self.project_results['children'][1]['id'], self.grandchild._id)
assert_equal(self.project_results['children'][2]['id'], self.great_grandchild._id)
assert_equal(self.project_results['children'][3]['id'], self.great_great_grandchild._id)
def test_editable_children_indents(self):
assert_equal(self.project_results['children'][0]['indent'], 0)
assert_equal(self.project_results['children'][1]['indent'], 1)
assert_equal(self.project_results['children'][2]['indent'], 2)
assert_equal(self.project_results['children'][3]['indent'], 3)
def test_editable_children_parents(self):
assert_equal(self.project_results['children'][0]['parent_id'], self.project._id)
assert_equal(self.project_results['children'][1]['parent_id'], self.child._id)
assert_equal(self.project_results['children'][2]['parent_id'], self.grandchild._id)
assert_equal(self.project_results['children'][3]['parent_id'], self.great_grandchild._id)
def test_editable_children_privacy(self):
assert_false(self.project_results['node']['is_public'])
assert_true(self.project_results['children'][0]['is_public'])
assert_false(self.project_results['children'][1]['is_public'])
assert_true(self.project_results['children'][2]['is_public'])
assert_false(self.project_results['children'][3]['is_public'])
def test_editable_children_titles(self):
assert_equal(self.project_results['node']['title'], self.project.title)
assert_equal(self.project_results['children'][0]['title'], self.child.title)
assert_equal(self.project_results['children'][1]['title'], self.grandchild.title)
assert_equal(self.project_results['children'][2]['title'], self.great_grandchild.title)
assert_equal(self.project_results['children'][3]['title'], self.great_great_grandchild.title)
class TestChildrenViews(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
def test_get_children(self):
project = ProjectFactory(creator=self.user)
child = NodeFactory(parent=project, creator=self.user)
url = project.api_url_for('get_children')
res = self.app.get(url, auth=self.user.auth)
nodes = res.json['nodes']
assert_equal(len(nodes), 1)
assert_equal(nodes[0]['id'], child._primary_key)
def test_get_children_includes_pointers(self):
project = ProjectFactory(creator=self.user)
pointed = ProjectFactory()
project.add_pointer(pointed, Auth(self.user))
project.save()
url = project.api_url_for('get_children')
res = self.app.get(url, auth=self.user.auth)
nodes = res.json['nodes']
assert_equal(len(nodes), 1)
assert_equal(nodes[0]['title'], pointed.title)
pointer = Pointer.find_one(Q('node', 'eq', pointed))
assert_equal(nodes[0]['id'], pointer._primary_key)
def test_get_children_filter_for_permissions(self):
# self.user has admin access to this project
project = ProjectFactory(creator=self.user)
# self.user only has read access to this project, which project points
# to
read_only_pointed = ProjectFactory()
read_only_creator = read_only_pointed.creator
read_only_pointed.add_contributor(self.user, auth=Auth(read_only_creator), permissions=['read'])
read_only_pointed.save()
# self.user only has read access to this project, which is a subproject
# of project
read_only = ProjectFactory()
read_only_pointed.add_contributor(self.user, auth=Auth(read_only_creator), permissions=['read'])
project.nodes.append(read_only)
# self.user adds a pointer to read_only
project.add_pointer(read_only_pointed, Auth(self.user))
project.save()
url = project.api_url_for('get_children')
res = self.app.get(url, auth=self.user.auth)
assert_equal(len(res.json['nodes']), 2)
url = project.api_url_for('get_children', permissions='write')
res = self.app.get(url, auth=self.user.auth)
assert_equal(len(res.json['nodes']), 0)
def test_get_children_render_nodes_receives_auth(self):
project = ProjectFactory(creator=self.user)
NodeFactory(parent=project, creator=self.user)
url = project.api_url_for('get_children')
res = self.app.get(url, auth=self.user.auth)
perm = res.json['nodes'][0]['permissions']
assert_equal(perm, 'admin')
class TestGetNodeTree(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
self.user2 = AuthUserFactory()
def test_get_single_node(self):
project = ProjectFactory(creator=self.user)
# child = NodeFactory(parent=project, creator=self.user)
url = project.api_url_for('get_node_tree')
res = self.app.get(url, auth=self.user.auth)
node_id = res.json[0]['node']['id']
assert_equal(node_id, project._primary_key)
def test_get_node_with_children(self):
project = ProjectFactory(creator=self.user)
child1 = NodeFactory(parent=project, creator=self.user)
child2 = NodeFactory(parent=project, creator=self.user2)
child3 = NodeFactory(parent=project, creator=self.user)
url = project.api_url_for('get_node_tree')
res = self.app.get(url, auth=self.user.auth)
tree = res.json[0]
parent_node_id = tree['node']['id']
child1_id = tree['children'][0]['node']['id']
child2_id = tree['children'][1]['node']['id']
child3_id = tree['children'][2]['node']['id']
assert_equal(parent_node_id, project._primary_key)
assert_equal(child1_id, child1._primary_key)
assert_equal(child2_id, child2._primary_key)
assert_equal(child3_id, child3._primary_key)
def test_get_node_not_parent_owner(self):
project = ProjectFactory(creator=self.user2)
child = NodeFactory(parent=project, creator=self.user2)
url = project.api_url_for('get_node_tree')
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(res.json, [])
# Parent node should show because of user2 read access, the children should not
def test_get_node_parent_not_admin(self):
project = ProjectFactory(creator=self.user)
project.add_contributor(self.user2, auth=Auth(self.user))
project.save()
child1 = NodeFactory(parent=project, creator=self.user)
child2 = NodeFactory(parent=project, creator=self.user)
child3 = NodeFactory(parent=project, creator=self.user)
url = project.api_url_for('get_node_tree')
res = self.app.get(url, auth=self.user2.auth)
tree = res.json[0]
parent_node_id = tree['node']['id']
children = tree['children']
assert_equal(parent_node_id, project._primary_key)
assert_equal(children, [])
class TestUserProfile(OsfTestCase):
def setUp(self):
super(TestUserProfile, self).setUp()
self.user = AuthUserFactory()
def test_sanitization_of_edit_profile(self):
url = api_url_for('edit_profile', uid=self.user._id)
post_data = {'name': 'fullname', 'value': 'new<b> name</b> '}
request = self.app.post(url, post_data, auth=self.user.auth)
assert_equal('new name', request.json['name'])
def test_fmt_date_or_none(self):
with assert_raises(HTTPError) as cm:
#enter a date before 1900
fmt_date_or_none(dt.datetime(1890, 10, 31, 18, 23, 29, 227))
# error should be raised because date is before 1900
assert_equal(cm.exception.code, http.BAD_REQUEST)
def test_unserialize_social(self):
url = api_url_for('unserialize_social')
payload = {
'profileWebsites': ['http://frozen.pizza.com/reviews'],
'twitter': 'howtopizza',
'github': 'frozenpizzacode',
}
self.app.put_json(
url,
payload,
auth=self.user.auth,
)
self.user.reload()
for key, value in payload.iteritems():
assert_equal(self.user.social[key], value)
assert_true(self.user.social['researcherId'] is None)
# Regression test for help-desk ticket
def test_making_email_primary_is_not_case_sensitive(self):
user = AuthUserFactory(username='fred@queen.test')
# make confirmed email have different casing
user.emails[0] = user.emails[0].capitalize()
user.save()
url = api_url_for('update_user')
res = self.app.put_json(
url,
{'id': user._id, 'emails': [{'address': 'fred@queen.test', 'primary': True, 'confirmed': True}]},
auth=user.auth
)
assert_equal(res.status_code, 200)
def test_unserialize_social_validation_failure(self):
url = api_url_for('unserialize_social')
# profileWebsites URL is invalid
payload = {
'profileWebsites': ['http://goodurl.com', 'http://invalidurl'],
'twitter': 'howtopizza',
'github': 'frozenpizzacode',
}
res = self.app.put_json(
url,
payload,
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], 'Invalid personal URL.')
def test_serialize_social_editable(self):
self.user.social['twitter'] = 'howtopizza'
self.user.social['profileWebsites'] = ['http://www.cos.io', 'http://www.osf.io', 'http://www.wordup.com']
self.user.save()
url = api_url_for('serialize_social')
res = self.app.get(
url,
auth=self.user.auth,
)
assert_equal(res.json.get('twitter'), 'howtopizza')
assert_equal(res.json.get('profileWebsites'), ['http://www.cos.io', 'http://www.osf.io', 'http://www.wordup.com'])
assert_true(res.json.get('github') is None)
assert_true(res.json['editable'])
def test_serialize_social_not_editable(self):
user2 = AuthUserFactory()
self.user.social['twitter'] = 'howtopizza'
self.user.social['profileWebsites'] = ['http://www.cos.io', 'http://www.osf.io', 'http://www.wordup.com']
self.user.save()
url = api_url_for('serialize_social', uid=self.user._id)
res = self.app.get(
url,
auth=user2.auth,
)
assert_equal(res.json.get('twitter'), 'howtopizza')
assert_equal(res.json.get('profileWebsites'), ['http://www.cos.io', 'http://www.osf.io', 'http://www.wordup.com'])
assert_true(res.json.get('github') is None)
assert_false(res.json['editable'])
def test_serialize_social_addons_editable(self):
self.user.add_addon('github')
oauth_settings = GitHubAccountFactory()
oauth_settings.save()
self.user.external_accounts.append(oauth_settings)
self.user.save()
url = api_url_for('serialize_social')
res = self.app.get(
url,
auth=self.user.auth,
)
assert_equal(
res.json['addons']['github'],
'abc'
)
def test_serialize_social_addons_not_editable(self):
user2 = AuthUserFactory()
self.user.add_addon('github')
oauth_settings = GitHubAccountFactory()
oauth_settings.save()
self.user.external_accounts.append(oauth_settings)
self.user.save()
url = api_url_for('serialize_social', uid=self.user._id)
res = self.app.get(
url,
auth=user2.auth,
)
assert_not_in('addons', res.json)
def test_unserialize_and_serialize_jobs(self):
jobs = [{
'institution': 'an institution',
'department': 'a department',
'title': 'a title',
'startMonth': 'January',
'startYear': '2001',
'endMonth': 'March',
'endYear': '2001',
'ongoing': False,
}, {
'institution': 'another institution',
'department': None,
'title': None,
'startMonth': 'May',
'startYear': '2001',
'endMonth': None,
'endYear': None,
'ongoing': True,
}]
payload = {'contents': jobs}
url = api_url_for('unserialize_jobs')
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(len(self.user.jobs), 2)
url = api_url_for('serialize_jobs')
res = self.app.get(
url,
auth=self.user.auth,
)
for i, job in enumerate(jobs):
assert_equal(job, res.json['contents'][i])
def test_unserialize_and_serialize_schools(self):
schools = [{
'institution': 'an institution',
'department': 'a department',
'degree': 'a degree',
'startMonth': 1,
'startYear': '2001',
'endMonth': 5,
'endYear': '2001',
'ongoing': False,
}, {
'institution': 'another institution',
'department': None,
'degree': None,
'startMonth': 5,
'startYear': '2001',
'endMonth': None,
'endYear': None,
'ongoing': True,
}]
payload = {'contents': schools}
url = api_url_for('unserialize_schools')
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(len(self.user.schools), 2)
url = api_url_for('serialize_schools')
res = self.app.get(
url,
auth=self.user.auth,
)
for i, job in enumerate(schools):
assert_equal(job, res.json['contents'][i])
def test_unserialize_jobs(self):
jobs = [
{
'institution': fake.company(),
'department': fake.catch_phrase(),
'title': fake.bs(),
'startMonth': 5,
'startYear': '2013',
'endMonth': 3,
'endYear': '2014',
'ongoing': False,
}
]
payload = {'contents': jobs}
url = api_url_for('unserialize_jobs')
res = self.app.put_json(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.user.reload()
# jobs field is updated
assert_equal(self.user.jobs, jobs)
def test_unserialize_names(self):
fake_fullname_w_spaces = ' {} '.format(fake.name())
names = {
'full': fake_fullname_w_spaces,
'given': 'Tea',
'middle': 'Gray',
'family': 'Pot',
'suffix': 'Ms.',
}
url = api_url_for('unserialize_names')
res = self.app.put_json(url, names, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.user.reload()
# user is updated
assert_equal(self.user.fullname, fake_fullname_w_spaces.strip())
assert_equal(self.user.given_name, names['given'])
assert_equal(self.user.middle_names, names['middle'])
assert_equal(self.user.family_name, names['family'])
assert_equal(self.user.suffix, names['suffix'])
def test_unserialize_schools(self):
schools = [
{
'institution': fake.company(),
'department': fake.catch_phrase(),
'degree': fake.bs(),
'startMonth': 5,
'startYear': '2013',
'endMonth': 3,
'endYear': '2014',
'ongoing': False,
}
]
payload = {'contents': schools}
url = api_url_for('unserialize_schools')
res = self.app.put_json(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.user.reload()
# schools field is updated
assert_equal(self.user.schools, schools)
def test_unserialize_jobs_valid(self):
jobs = [
{
'institution': fake.company(),
'department': fake.catch_phrase(),
'title': fake.bs(),
'startMonth': 5,
'startYear': '2013',
'endMonth': 3,
'endYear': '2014',
'ongoing': False,
}
]
payload = {'contents': jobs}
url = api_url_for('unserialize_jobs')
res = self.app.put_json(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_get_current_user_gravatar_default_size(self):
url = api_url_for('current_user_gravatar')
res = self.app.get(url, auth=self.user.auth)
current_user_gravatar = res.json['gravatar_url']
assert_true(current_user_gravatar is not None)
url = api_url_for('get_gravatar', uid=self.user._id)
res = self.app.get(url, auth=self.user.auth)
my_user_gravatar = res.json['gravatar_url']
assert_equal(current_user_gravatar, my_user_gravatar)
def test_get_other_user_gravatar_default_size(self):
user2 = AuthUserFactory()
url = api_url_for('current_user_gravatar')
res = self.app.get(url, auth=self.user.auth)
current_user_gravatar = res.json['gravatar_url']
url = api_url_for('get_gravatar', uid=user2._id)
res = self.app.get(url, auth=self.user.auth)
user2_gravatar = res.json['gravatar_url']
assert_true(user2_gravatar is not None)
assert_not_equal(current_user_gravatar, user2_gravatar)
def test_get_current_user_gravatar_specific_size(self):
url = api_url_for('current_user_gravatar')
res = self.app.get(url, auth=self.user.auth)
current_user_default_gravatar = res.json['gravatar_url']
url = api_url_for('current_user_gravatar', size=11)
res = self.app.get(url, auth=self.user.auth)
current_user_small_gravatar = res.json['gravatar_url']
assert_true(current_user_small_gravatar is not None)
assert_not_equal(current_user_default_gravatar, current_user_small_gravatar)
def test_get_other_user_gravatar_specific_size(self):
user2 = AuthUserFactory()
url = api_url_for('get_gravatar', uid=user2._id)
res = self.app.get(url, auth=self.user.auth)
gravatar_default_size = res.json['gravatar_url']
url = api_url_for('get_gravatar', uid=user2._id, size=11)
res = self.app.get(url, auth=self.user.auth)
gravatar_small = res.json['gravatar_url']
assert_true(gravatar_small is not None)
assert_not_equal(gravatar_default_size, gravatar_small)
def test_update_user_timezone(self):
assert_equal(self.user.timezone, 'Etc/UTC')
payload = {'timezone': 'America/New_York', 'id': self.user._id}
url = api_url_for('update_user', uid=self.user._id)
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(self.user.timezone, 'America/New_York')
def test_update_user_locale(self):
assert_equal(self.user.locale, 'en_US')
payload = {'locale': 'de_DE', 'id': self.user._id}
url = api_url_for('update_user', uid=self.user._id)
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(self.user.locale, 'de_DE')
def test_update_user_locale_none(self):
assert_equal(self.user.locale, 'en_US')
payload = {'locale': None, 'id': self.user._id}
url = api_url_for('update_user', uid=self.user._id)
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(self.user.locale, 'en_US')
def test_update_user_locale_empty_string(self):
assert_equal(self.user.locale, 'en_US')
payload = {'locale': '', 'id': self.user._id}
url = api_url_for('update_user', uid=self.user._id)
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(self.user.locale, 'en_US')
def test_cannot_update_user_without_user_id(self):
user1 = AuthUserFactory()
url = api_url_for('update_user')
header = {'emails': [{'address': user1.username}]}
res = self.app.put_json(url, header, auth=user1.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], '"id" is required')
@mock.patch('framework.auth.views.mails.send_mail')
def test_add_emails_return_emails(self, send_mail):
user1 = AuthUserFactory()
url = api_url_for('update_user')
email = 'test@cos.io'
header = {'id': user1._id,
'emails': [{'address': user1.username, 'primary': True, 'confirmed': True},
{'address': email, 'primary': False, 'confirmed': False}
]}
res = self.app.put_json(url, header, auth=user1.auth)
assert_equal(res.status_code, 200)
assert_in('emails', res.json['profile'])
assert_equal(len(res.json['profile']['emails']), 2)
@mock.patch('framework.auth.views.mails.send_mail')
def test_resend_confirmation_return_emails(self, send_mail):
user1 = AuthUserFactory()
url = api_url_for('resend_confirmation')
email = 'test@cos.io'
header = {'id': user1._id,
'email': {'address': email, 'primary': False, 'confirmed': False}
}
res = self.app.put_json(url, header, auth=user1.auth)
assert_equal(res.status_code, 200)
assert_in('emails', res.json['profile'])
assert_equal(len(res.json['profile']['emails']), 2)
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_update_user_mailing_lists(self, mock_get_mailchimp_api, send_mail):
email = fake.email()
self.user.emails.append(email)
list_name = 'foo'
self.user.mailchimp_mailing_lists[list_name] = True
self.user.save()
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]}
list_id = mailchimp_utils.get_list_id_from_name(list_name)
url = api_url_for('update_user', uid=self.user._id)
emails = [
{'address': self.user.username, 'primary': False, 'confirmed': True},
{'address': email, 'primary': True, 'confirmed': True}]
payload = {'locale': '', 'id': self.user._id, 'emails': emails}
self.app.put_json(url, payload, auth=self.user.auth)
mock_client.lists.unsubscribe.assert_called_with(
id=list_id,
email={'email': self.user.username},
send_goodbye=True
)
mock_client.lists.subscribe.assert_called_with(
id=list_id,
email={'email': email},
merge_vars={
'fname': self.user.given_name,
'lname': self.user.family_name,
},
double_optin=False,
update_existing=True
)
handlers.celery_teardown_request()
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_unsubscribe_mailchimp_not_called_if_user_not_subscribed(self, mock_get_mailchimp_api, send_mail):
email = fake.email()
self.user.emails.append(email)
list_name = 'foo'
self.user.mailchimp_mailing_lists[list_name] = False
self.user.save()
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]}
url = api_url_for('update_user', uid=self.user._id)
emails = [
{'address': self.user.username, 'primary': False, 'confirmed': True},
{'address': email, 'primary': True, 'confirmed': True}]
payload = {'locale': '', 'id': self.user._id, 'emails': emails}
self.app.put_json(url, payload, auth=self.user.auth)
assert_equal(mock_client.lists.unsubscribe.call_count, 0)
assert_equal(mock_client.lists.subscribe.call_count, 0)
handlers.celery_teardown_request()
# TODO: Uncomment once outstanding issues with this feature are addressed
# def test_twitter_redirect_success(self):
# self.user.social['twitter'] = fake.last_name()
# self.user.save()
# res = self.app.get(web_url_for('redirect_to_twitter', twitter_handle=self.user.social['twitter']))
# assert_equals(res.status_code, http.FOUND)
# assert_in(self.user.url, res.location)
# def test_twitter_redirect_is_case_insensitive(self):
# self.user.social['twitter'] = fake.last_name()
# self.user.save()
# res1 = self.app.get(web_url_for('redirect_to_twitter', twitter_handle=self.user.social['twitter']))
# res2 = self.app.get(web_url_for('redirect_to_twitter', twitter_handle=self.user.social['twitter'].lower()))
# assert_equal(res1.location, res2.location)
# def test_twitter_redirect_unassociated_twitter_handle_returns_404(self):
# unassociated_handle = fake.last_name()
# expected_error = 'There is no active user associated with the Twitter handle: {0}.'.format(unassociated_handle)
# res = self.app.get(
# web_url_for('redirect_to_twitter', twitter_handle=unassociated_handle),
# expect_errors=True
# )
# assert_equal(res.status_code, http.NOT_FOUND)
# assert_true(expected_error in res.body)
# def test_twitter_redirect_handle_with_multiple_associated_accounts_redirects_to_selection_page(self):
# self.user.social['twitter'] = fake.last_name()
# self.user.save()
# user2 = AuthUserFactory()
# user2.social['twitter'] = self.user.social['twitter']
# user2.save()
# expected_error = 'There are multiple OSF accounts associated with the Twitter handle: <strong>{0}</strong>.'.format(self.user.social['twitter'])
# res = self.app.get(
# web_url_for(
# 'redirect_to_twitter',
# twitter_handle=self.user.social['twitter'],
# expect_error=True
# )
# )
# assert_equal(res.status_code, http.MULTIPLE_CHOICES)
# assert_true(expected_error in res.body)
# assert_true(self.user.url in res.body)
# assert_true(user2.url in res.body)
class TestUserProfileApplicationsPage(OsfTestCase):
def setUp(self):
super(TestUserProfileApplicationsPage, self).setUp()
self.user = AuthUserFactory()
self.user2 = AuthUserFactory()
self.platform_app = ApiOAuth2ApplicationFactory(owner=self.user)
self.detail_url = web_url_for('oauth_application_detail', client_id=self.platform_app.client_id)
def test_non_owner_cant_access_detail_page(self):
res = self.app.get(self.detail_url, auth=self.user2.auth, expect_errors=True)
assert_equal(res.status_code, http.FORBIDDEN)
def test_owner_cant_access_deleted_application(self):
self.platform_app.is_active = False
self.platform_app.save()
res = self.app.get(self.detail_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, http.GONE)
def test_owner_cant_access_nonexistent_application(self):
url = web_url_for('oauth_application_detail', client_id='nonexistent')
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, http.NOT_FOUND)
def test_url_has_not_broken(self):
assert_equal(self.platform_app.url, self.detail_url)
class TestUserProfileTokensPage(OsfTestCase):
def setUp(self):
super(TestUserProfileTokensPage, self).setUp()
self.user = AuthUserFactory()
self.token = ApiOAuth2PersonalTokenFactory()
self.detail_url = web_url_for('personal_access_token_detail', _id=self.token._id)
def test_url_has_not_broken(self):
assert_equal(self.token.url, self.detail_url)
class TestUserAccount(OsfTestCase):
def setUp(self):
super(TestUserAccount, self).setUp()
self.user = AuthUserFactory()
self.user.set_password('password')
self.user.save()
@mock.patch('website.profile.views.push_status_message')
def test_password_change_valid(self, mock_push_status_message):
old_password = 'password'
new_password = 'Pa$$w0rd'
confirm_password = new_password
url = web_url_for('user_account_password')
post_data = {
'old_password': old_password,
'new_password': new_password,
'confirm_password': confirm_password,
}
res = self.app.post(url, post_data, auth=(self.user.username, old_password))
assert_true(302, res.status_code)
res = res.follow(auth=(self.user.username, new_password))
assert_true(200, res.status_code)
self.user.reload()
assert_true(self.user.check_password(new_password))
assert_true(mock_push_status_message.called)
assert_in('Password updated successfully', mock_push_status_message.mock_calls[0][1][0])
@mock.patch('website.profile.views.push_status_message')
def test_password_change_invalid(self, mock_push_status_message, old_password='', new_password='',
confirm_password='', error_message='Old password is invalid'):
url = web_url_for('user_account_password')
post_data = {
'old_password': old_password,
'new_password': new_password,
'confirm_password': confirm_password,
}
res = self.app.post(url, post_data, auth=self.user.auth)
assert_true(302, res.status_code)
res = res.follow(auth=self.user.auth)
assert_true(200, res.status_code)
self.user.reload()
assert_false(self.user.check_password(new_password))
assert_true(mock_push_status_message.called)
error_strings = [e[1][0] for e in mock_push_status_message.mock_calls]
assert_in(error_message, error_strings)
def test_password_change_invalid_old_password(self):
self.test_password_change_invalid(
old_password='invalid old password',
new_password='new password',
confirm_password='new password',
error_message='Old password is invalid',
)
def test_password_change_invalid_confirm_password(self):
self.test_password_change_invalid(
old_password='password',
new_password='new password',
confirm_password='invalid confirm password',
error_message='Password does not match the confirmation',
)
def test_password_change_invalid_new_password_length(self):
self.test_password_change_invalid(
old_password='password',
new_password='12345',
confirm_password='12345',
error_message='Password should be at least six characters',
)
def test_password_change_invalid_blank_password(self, old_password='', new_password='', confirm_password=''):
self.test_password_change_invalid(
old_password=old_password,
new_password=new_password,
confirm_password=confirm_password,
error_message='Passwords cannot be blank',
)
def test_password_change_invalid_blank_new_password(self):
for password in ('', ' '):
self.test_password_change_invalid_blank_password('password', password, 'new password')
def test_password_change_invalid_blank_confirm_password(self):
for password in ('', ' '):
self.test_password_change_invalid_blank_password('password', 'new password', password)
@mock.patch('framework.auth.views.mails.send_mail')
def test_user_cannot_request_account_export_before_throttle_expires(self, send_mail):
url = api_url_for('request_export')
self.app.post(url, auth=self.user.auth)
assert_true(send_mail.called)
res = self.app.post(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(send_mail.call_count, 1)
@mock.patch('framework.auth.views.mails.send_mail')
def test_user_cannot_request_account_deactivation_before_throttle_expires(self, send_mail):
url = api_url_for('request_deactivation')
self.app.post(url, auth=self.user.auth)
assert_true(send_mail.called)
res = self.app.post(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(send_mail.call_count, 1)
class TestAddingContributorViews(OsfTestCase):
def setUp(self):
super(TestAddingContributorViews, self).setUp()
self.creator = AuthUserFactory()
self.project = ProjectFactory(creator=self.creator)
self.auth = Auth(self.project.creator)
# Authenticate all requests
self.app.authenticate(*self.creator.auth)
contributor_added.connect(notify_added_contributor)
def test_serialize_unregistered_without_record(self):
name, email = fake.name(), fake.email()
res = serialize_unregistered(fullname=name, email=email)
assert_equal(res['fullname'], name)
assert_equal(res['email'], email)
assert_equal(res['id'], None)
assert_false(res['registered'])
assert_true(res['gravatar'])
assert_false(res['active'])
def test_deserialize_contributors(self):
contrib = UserFactory()
unreg = UnregUserFactory()
name, email = fake.name(), fake.email()
unreg_no_record = serialize_unregistered(name, email)
contrib_data = [
add_contributor_json(contrib),
serialize_unregistered(fake.name(), unreg.username),
unreg_no_record
]
contrib_data[0]['permission'] = 'admin'
contrib_data[1]['permission'] = 'write'
contrib_data[2]['permission'] = 'read'
contrib_data[0]['visible'] = True
contrib_data[1]['visible'] = True
contrib_data[2]['visible'] = True
res = deserialize_contributors(
self.project,
contrib_data,
auth=Auth(self.creator))
assert_equal(len(res), len(contrib_data))
assert_true(res[0]['user'].is_registered)
assert_false(res[1]['user'].is_registered)
assert_true(res[1]['user']._id)
assert_false(res[2]['user'].is_registered)
assert_true(res[2]['user']._id)
def test_deserialize_contributors_validates_fullname(self):
name = "<img src=1 onerror=console.log(1)>"
email = fake.email()
unreg_no_record = serialize_unregistered(name, email)
contrib_data = [unreg_no_record]
contrib_data[0]['permission'] = 'admin'
contrib_data[0]['visible'] = True
with assert_raises(ValidationError):
deserialize_contributors(
self.project,
contrib_data,
auth=Auth(self.creator),
validate=True)
def test_deserialize_contributors_validates_email(self):
name = fake.name()
email = "!@#$%%^&*"
unreg_no_record = serialize_unregistered(name, email)
contrib_data = [unreg_no_record]
contrib_data[0]['permission'] = 'admin'
contrib_data[0]['visible'] = True
with assert_raises(ValidationError):
deserialize_contributors(
self.project,
contrib_data,
auth=Auth(self.creator),
validate=True)
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_deserialize_contributors_sends_unreg_contributor_added_signal(self, _):
unreg = UnregUserFactory()
from website.project.signals import unreg_contributor_added
serialized = [serialize_unregistered(fake.name(), unreg.username)]
serialized[0]['visible'] = True
with capture_signals() as mock_signals:
deserialize_contributors(self.project, serialized,
auth=Auth(self.creator))
assert_equal(mock_signals.signals_sent(), set([unreg_contributor_added]))
def test_serialize_unregistered_with_record(self):
name, email = fake.name(), fake.email()
user = self.project.add_unregistered_contributor(fullname=name,
email=email, auth=Auth(self.project.creator))
self.project.save()
res = serialize_unregistered(
fullname=name,
email=email
)
assert_false(res['active'])
assert_false(res['registered'])
assert_equal(res['id'], user._primary_key)
assert_true(res['gravatar_url'])
assert_equal(res['fullname'], name)
assert_equal(res['email'], email)
def test_add_contributor_with_unreg_contribs_and_reg_contribs(self):
n_contributors_pre = len(self.project.contributors)
reg_user = UserFactory()
name, email = fake.name(), fake.email()
pseudouser = {
'id': None,
'registered': False,
'fullname': name,
'email': email,
'permission': 'admin',
'visible': True,
}
reg_dict = add_contributor_json(reg_user)
reg_dict['permission'] = 'admin'
reg_dict['visible'] = True
payload = {
'users': [reg_dict, pseudouser],
'node_ids': []
}
url = self.project.api_url_for('project_contributors_post')
self.app.post_json(url, payload).maybe_follow()
self.project.reload()
assert_equal(len(self.project.contributors),
n_contributors_pre + len(payload['users']))
new_unreg = auth.get_user(email=email)
assert_false(new_unreg.is_registered)
# unclaimed record was added
new_unreg.reload()
assert_in(self.project._primary_key, new_unreg.unclaimed_records)
rec = new_unreg.get_unclaimed_record(self.project._primary_key)
assert_equal(rec['name'], name)
assert_equal(rec['email'], email)
@mock.patch('website.project.views.contributor.send_claim_email')
def test_add_contributors_post_only_sends_one_email_to_unreg_user(
self, mock_send_claim_email):
# Project has components
comp1, comp2 = NodeFactory(
creator=self.creator), NodeFactory(creator=self.creator)
self.project.nodes.append(comp1)
self.project.nodes.append(comp2)
self.project.save()
# An unreg user is added to the project AND its components
unreg_user = { # dict because user has not previous unreg record
'id': None,
'registered': False,
'fullname': fake.name(),
'email': fake.email(),
'permission': 'admin',
'visible': True,
}
payload = {
'users': [unreg_user],
'node_ids': [comp1._primary_key, comp2._primary_key]
}
# send request
url = self.project.api_url_for('project_contributors_post')
assert_true(self.project.can_edit(user=self.creator))
self.app.post_json(url, payload, auth=self.creator.auth)
# finalize_invitation should only have been called once
assert_equal(mock_send_claim_email.call_count, 1)
@mock.patch('website.mails.send_mail')
def test_add_contributors_post_only_sends_one_email_to_registered_user(self, mock_send_mail):
# Project has components
comp1 = NodeFactory(creator=self.creator, parent=self.project)
comp2 = NodeFactory(creator=self.creator, parent=self.project)
# A registered user is added to the project AND its components
user = UserFactory()
user_dict = {
'id': user._id,
'fullname': user.fullname,
'email': user.username,
'permission': 'write',
'visible': True}
payload = {
'users': [user_dict],
'node_ids': [comp1._primary_key, comp2._primary_key]
}
# send request
url = self.project.api_url_for('project_contributors_post')
assert self.project.can_edit(user=self.creator)
self.app.post_json(url, payload, auth=self.creator.auth)
# send_mail should only have been called once
assert_equal(mock_send_mail.call_count, 1)
@mock.patch('website.mails.send_mail')
def test_add_contributors_post_sends_email_if_user_not_contributor_on_parent_node(self, mock_send_mail):
# Project has a component with a sub-component
component = NodeFactory(creator=self.creator, parent=self.project)
sub_component = NodeFactory(creator=self.creator, parent=component)
# A registered user is added to the project and the sub-component, but NOT the component
user = UserFactory()
user_dict = {
'id': user._id,
'fullname': user.fullname,
'email': user.username,
'permission': 'write',
'visible': True}
payload = {
'users': [user_dict],
'node_ids': [sub_component._primary_key]
}
# send request
url = self.project.api_url_for('project_contributors_post')
assert self.project.can_edit(user=self.creator)
self.app.post_json(url, payload, auth=self.creator.auth)
# send_mail is called for both the project and the sub-component
assert_equal(mock_send_mail.call_count, 2)
@mock.patch('website.project.views.contributor.send_claim_email')
def test_email_sent_when_unreg_user_is_added(self, send_mail):
name, email = fake.name(), fake.email()
pseudouser = {
'id': None,
'registered': False,
'fullname': name,
'email': email,
'permission': 'admin',
'visible': True,
}
payload = {
'users': [pseudouser],
'node_ids': []
}
url = self.project.api_url_for('project_contributors_post')
self.app.post_json(url, payload).maybe_follow()
assert_true(send_mail.called)
assert_true(send_mail.called_with(email=email))
@mock.patch('website.mails.send_mail')
def test_email_sent_when_reg_user_is_added(self, send_mail):
contributor = UserFactory()
contributors = [{
'user': contributor,
'visible': True,
'permissions': ['read', 'write']
}]
project = ProjectFactory()
project.add_contributors(contributors, auth=self.auth)
project.save()
assert_true(send_mail.called)
send_mail.assert_called_with(
contributor.username,
mails.CONTRIBUTOR_ADDED,
user=contributor,
node=project,
referrer_name=self.auth.user.fullname)
assert_almost_equal(contributor.contributor_added_email_records[project._id]['last_sent'], int(time.time()), delta=1)
@mock.patch('website.mails.send_mail')
def test_contributor_added_email_not_sent_to_unreg_user(self, send_mail):
unreg_user = UnregUserFactory()
contributors = [{
'user': unreg_user,
'visible': True,
'permissions': ['read', 'write']
}]
project = ProjectFactory()
project.add_contributors(contributors, auth=Auth(self.project.creator))
project.save()
assert_false(send_mail.called)
@mock.patch('website.mails.send_mail')
def test_forking_project_does_not_send_contributor_added_email(self, send_mail):
project = ProjectFactory()
project.fork_node(auth=Auth(project.creator))
assert_false(send_mail.called)
@mock.patch('website.mails.send_mail')
def test_templating_project_does_not_send_contributor_added_email(self, send_mail):
project = ProjectFactory()
project.use_as_template(auth=Auth(project.creator))
assert_false(send_mail.called)
@mock.patch('website.archiver.tasks.archive')
@mock.patch('website.mails.send_mail')
def test_registering_project_does_not_send_contributor_added_email(self, send_mail, mock_archive):
project = ProjectFactory()
project.register_node(get_default_metaschema(), Auth(user=project.creator), '', None)
assert_false(send_mail.called)
@mock.patch('website.mails.send_mail')
def test_notify_contributor_email_does_not_send_before_throttle_expires(self, send_mail):
contributor = UserFactory()
project = ProjectFactory()
auth = Auth(project.creator)
notify_added_contributor(project, contributor, auth)
assert_true(send_mail.called)
# 2nd call does not send email because throttle period has not expired
notify_added_contributor(project, contributor, auth)
assert_equal(send_mail.call_count, 1)
@mock.patch('website.mails.send_mail')
def test_notify_contributor_email_sends_after_throttle_expires(self, send_mail):
throttle = 0.5
contributor = UserFactory()
project = ProjectFactory()
auth = Auth(project.creator)
notify_added_contributor(project, contributor, auth, throttle=throttle)
assert_true(send_mail.called)
time.sleep(1) # throttle period expires
notify_added_contributor(project, contributor, auth, throttle=throttle)
assert_equal(send_mail.call_count, 2)
def test_add_multiple_contributors_only_adds_one_log(self):
n_logs_pre = len(self.project.logs)
reg_user = UserFactory()
name = fake.name()
pseudouser = {
'id': None,
'registered': False,
'fullname': name,
'email': fake.email(),
'permission': 'write',
'visible': True,
}
reg_dict = add_contributor_json(reg_user)
reg_dict['permission'] = 'admin'
reg_dict['visible'] = True
payload = {
'users': [reg_dict, pseudouser],
'node_ids': []
}
url = self.project.api_url_for('project_contributors_post')
self.app.post_json(url, payload).maybe_follow()
self.project.reload()
assert_equal(len(self.project.logs), n_logs_pre + 1)
def test_add_contribs_to_multiple_nodes(self):
child = NodeFactory(parent=self.project, creator=self.creator)
n_contributors_pre = len(child.contributors)
reg_user = UserFactory()
name, email = fake.name(), fake.email()
pseudouser = {
'id': None,
'registered': False,
'fullname': name,
'email': email,
'permission': 'admin',
'visible': True,
}
reg_dict = add_contributor_json(reg_user)
reg_dict['permission'] = 'admin'
reg_dict['visible'] = True
payload = {
'users': [reg_dict, pseudouser],
'node_ids': [self.project._primary_key, child._primary_key]
}
url = "/api/v1/project/{0}/contributors/".format(self.project._id)
self.app.post_json(url, payload).maybe_follow()
child.reload()
assert_equal(len(child.contributors),
n_contributors_pre + len(payload['users']))
def tearDown(self):
super(TestAddingContributorViews, self).tearDown()
contributor_added.disconnect(notify_added_contributor)
class TestUserInviteViews(OsfTestCase):
def setUp(self):
super(TestUserInviteViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.invite_url = '/api/v1/project/{0}/invite_contributor/'.format(
self.project._primary_key)
def test_invite_contributor_post_if_not_in_db(self):
name, email = fake.name(), fake.email()
res = self.app.post_json(
self.invite_url,
{'fullname': name, 'email': email},
auth=self.user.auth,
)
contrib = res.json['contributor']
assert_true(contrib['id'] is None)
assert_equal(contrib['fullname'], name)
assert_equal(contrib['email'], email)
def test_invite_contributor_post_if_unreg_already_in_db(self):
# A n unreg user is added to a different project
name, email = fake.name(), fake.email()
project2 = ProjectFactory()
unreg_user = project2.add_unregistered_contributor(fullname=name, email=email,
auth=Auth(project2.creator))
project2.save()
res = self.app.post_json(self.invite_url,
{'fullname': name, 'email': email}, auth=self.user.auth)
expected = add_contributor_json(unreg_user)
expected['fullname'] = name
expected['email'] = email
assert_equal(res.json['contributor'], expected)
def test_invite_contributor_post_if_emaiL_already_registered(self):
reg_user = UserFactory()
# Tries to invite user that is already regiestered
res = self.app.post_json(self.invite_url,
{'fullname': fake.name(), 'email': reg_user.username},
auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_invite_contributor_post_if_user_is_already_contributor(self):
unreg_user = self.project.add_unregistered_contributor(
fullname=fake.name(), email=fake.email(),
auth=Auth(self.project.creator)
)
self.project.save()
# Tries to invite unreg user that is already a contributor
res = self.app.post_json(self.invite_url,
{'fullname': fake.name(), 'email': unreg_user.username},
auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_invite_contributor_with_no_email(self):
name = fake.name()
res = self.app.post_json(self.invite_url,
{'fullname': name, 'email': None}, auth=self.user.auth)
assert_equal(res.status_code, http.OK)
data = res.json
assert_equal(data['status'], 'success')
assert_equal(data['contributor']['fullname'], name)
assert_true(data['contributor']['email'] is None)
assert_false(data['contributor']['registered'])
def test_invite_contributor_requires_fullname(self):
res = self.app.post_json(self.invite_url,
{'email': 'brian@queen.com', 'fullname': ''}, auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_email_to_given_email(self, send_mail):
project = ProjectFactory()
given_email = fake.email()
unreg_user = project.add_unregistered_contributor(
fullname=fake.name(),
email=given_email,
auth=Auth(project.creator),
)
project.save()
send_claim_email(email=given_email, user=unreg_user, node=project)
assert_true(send_mail.called)
assert_true(send_mail.called_with(
to_addr=given_email,
mail=mails.INVITE
))
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_email_to_referrer(self, send_mail):
project = ProjectFactory()
referrer = project.creator
given_email, real_email = fake.email(), fake.email()
unreg_user = project.add_unregistered_contributor(fullname=fake.name(),
email=given_email, auth=Auth(
referrer)
)
project.save()
send_claim_email(email=real_email, user=unreg_user, node=project)
assert_true(send_mail.called)
# email was sent to referrer
send_mail.assert_called_with(
referrer.username,
mails.FORWARD_INVITE,
user=unreg_user,
referrer=referrer,
claim_url=unreg_user.get_claim_url(project._id, external=True),
email=real_email.lower().strip(),
fullname=unreg_user.get_unclaimed_record(project._id)['name'],
node=project
)
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_email_before_throttle_expires(self, send_mail):
project = ProjectFactory()
given_email = fake.email()
unreg_user = project.add_unregistered_contributor(
fullname=fake.name(),
email=given_email,
auth=Auth(project.creator),
)
project.save()
send_claim_email(email=fake.email(), user=unreg_user, node=project)
send_mail.reset_mock()
# 2nd call raises error because throttle hasn't expired
with assert_raises(HTTPError):
send_claim_email(email=fake.email(), user=unreg_user, node=project)
assert_false(send_mail.called)
class TestClaimViews(OsfTestCase):
def setUp(self):
super(TestClaimViews, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
self.given_name = fake.name()
self.given_email = fake.email()
self.user = self.project.add_unregistered_contributor(
fullname=self.given_name,
email=self.given_email,
auth=Auth(user=self.referrer)
)
self.project.save()
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_claim_user_post_with_registered_user_id(self, send_mail):
# registered user who is attempting to claim the unclaimed contributor
reg_user = UserFactory()
payload = {
# pk of unreg user record
'pk': self.user._primary_key,
'claimerId': reg_user._primary_key
}
url = '/api/v1/user/{uid}/{pid}/claim/email/'.format(
uid=self.user._primary_key,
pid=self.project._primary_key,
)
res = self.app.post_json(url, payload)
# mail was sent
assert_equal(send_mail.call_count, 2)
# ... to the correct address
referrer_call = send_mail.call_args_list[0]
claimer_call = send_mail.call_args_list[1]
args, _ = referrer_call
assert_equal(args[0], self.referrer.username)
args, _ = claimer_call
assert_equal(args[0], reg_user.username)
# view returns the correct JSON
assert_equal(res.json, {
'status': 'success',
'email': reg_user.username,
'fullname': self.given_name,
})
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_registered_email(self, mock_send_mail):
reg_user = UserFactory()
send_claim_registered_email(
claimer=reg_user,
unreg_user=self.user,
node=self.project
)
assert_equal(mock_send_mail.call_count, 2)
first_call_args = mock_send_mail.call_args_list[0][0]
assert_equal(first_call_args[0], self.referrer.username)
second_call_args = mock_send_mail.call_args_list[1][0]
assert_equal(second_call_args[0], reg_user.username)
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_registered_email_before_throttle_expires(self, mock_send_mail):
reg_user = UserFactory()
send_claim_registered_email(
claimer=reg_user,
unreg_user=self.user,
node=self.project,
)
mock_send_mail.reset_mock()
# second call raises error because it was called before throttle period
with assert_raises(HTTPError):
send_claim_registered_email(
claimer=reg_user,
unreg_user=self.user,
node=self.project,
)
assert_false(mock_send_mail.called)
@mock.patch('website.project.views.contributor.send_claim_registered_email')
def test_claim_user_post_with_email_already_registered_sends_correct_email(
self, send_claim_registered_email):
reg_user = UserFactory()
payload = {
'value': reg_user.username,
'pk': self.user._primary_key
}
url = self.project.api_url_for('claim_user_post', uid=self.user._id)
self.app.post_json(url, payload)
assert_true(send_claim_registered_email.called)
def test_user_with_removed_unclaimed_url_claiming(self):
""" Tests that when an unclaimed user is removed from a project, the
unregistered user object does not retain the token.
"""
self.project.remove_contributor(self.user, Auth(user=self.referrer))
assert_not_in(
self.project._primary_key,
self.user.unclaimed_records.keys()
)
def test_user_with_claim_url_cannot_claim_twice(self):
""" Tests that when an unclaimed user is replaced on a project with a
claimed user, the unregistered user object does not retain the token.
"""
reg_user = AuthUserFactory()
self.project.replace_contributor(self.user, reg_user)
assert_not_in(
self.project._primary_key,
self.user.unclaimed_records.keys()
)
def test_claim_user_form_redirects_to_password_confirm_page_if_user_is_logged_in(self):
reg_user = AuthUserFactory()
url = self.user.get_claim_url(self.project._primary_key)
res = self.app.get(url, auth=reg_user.auth)
assert_equal(res.status_code, 302)
res = res.follow(auth=reg_user.auth)
token = self.user.get_unclaimed_record(self.project._primary_key)['token']
expected = self.project.web_url_for(
'claim_user_registered',
uid=self.user._id,
token=token,
)
assert_equal(res.request.path, expected)
def test_get_valid_form(self):
url = self.user.get_claim_url(self.project._primary_key)
res = self.app.get(url).maybe_follow()
assert_equal(res.status_code, 200)
def test_invalid_claim_form_redirects_to_register_page(self):
uid = self.user._primary_key
pid = self.project._primary_key
url = '/user/{uid}/{pid}/claim/?token=badtoken'.format(**locals())
res = self.app.get(url, expect_errors=True).maybe_follow()
assert_equal(res.status_code, 200)
assert_equal(res.request.path, web_url_for('auth_login'))
def test_posting_to_claim_form_with_valid_data(self):
url = self.user.get_claim_url(self.project._primary_key)
res = self.app.post(url, {
'username': self.user.username,
'password': 'killerqueen',
'password2': 'killerqueen'
}).maybe_follow()
assert_equal(res.status_code, 200)
self.user.reload()
assert_true(self.user.is_registered)
assert_true(self.user.is_active)
assert_not_in(self.project._primary_key, self.user.unclaimed_records)
def test_posting_to_claim_form_removes_all_unclaimed_data(self):
# user has multiple unclaimed records
p2 = ProjectFactory(creator=self.referrer)
self.user.add_unclaimed_record(node=p2, referrer=self.referrer,
given_name=fake.name())
self.user.save()
assert_true(len(self.user.unclaimed_records.keys()) > 1) # sanity check
url = self.user.get_claim_url(self.project._primary_key)
self.app.post(url, {
'username': self.given_email,
'password': 'bohemianrhap',
'password2': 'bohemianrhap'
})
self.user.reload()
assert_equal(self.user.unclaimed_records, {})
def test_posting_to_claim_form_sets_fullname_to_given_name(self):
# User is created with a full name
original_name = fake.name()
unreg = UnregUserFactory(fullname=original_name)
# User invited with a different name
different_name = fake.name()
new_user = self.project.add_unregistered_contributor(
email=unreg.username,
fullname=different_name,
auth=Auth(self.project.creator),
)
self.project.save()
# Goes to claim url
claim_url = new_user.get_claim_url(self.project._id)
self.app.post(claim_url, {
'username': unreg.username,
'password': 'killerqueen', 'password2': 'killerqueen'
})
unreg.reload()
# Full name was set correctly
assert_equal(unreg.fullname, different_name)
# CSL names were set correctly
parsed_name = impute_names_model(different_name)
assert_equal(unreg.given_name, parsed_name['given_name'])
assert_equal(unreg.family_name, parsed_name['family_name'])
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_claim_user_post_returns_fullname(self, send_mail):
url = '/api/v1/user/{0}/{1}/claim/email/'.format(self.user._primary_key,
self.project._primary_key)
res = self.app.post_json(url,
{'value': self.given_email,
'pk': self.user._primary_key},
auth=self.referrer.auth)
assert_equal(res.json['fullname'], self.given_name)
assert_true(send_mail.called)
assert_true(send_mail.called_with(to_addr=self.given_email))
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_claim_user_post_if_email_is_different_from_given_email(self, send_mail):
email = fake.email() # email that is different from the one the referrer gave
url = '/api/v1/user/{0}/{1}/claim/email/'.format(self.user._primary_key,
self.project._primary_key)
self.app.post_json(url,
{'value': email, 'pk': self.user._primary_key}
)
assert_true(send_mail.called)
assert_equal(send_mail.call_count, 2)
call_to_invited = send_mail.mock_calls[0]
assert_true(call_to_invited.called_with(
to_addr=email
))
call_to_referrer = send_mail.mock_calls[1]
assert_true(call_to_referrer.called_with(
to_addr=self.given_email
))
def test_claim_url_with_bad_token_returns_400(self):
url = self.project.web_url_for(
'claim_user_registered',
uid=self.user._id,
token='badtoken',
)
res = self.app.get(url, auth=self.referrer.auth, expect_errors=400)
assert_equal(res.status_code, 400)
def test_cannot_claim_user_with_user_who_is_already_contributor(self):
# user who is already a contirbutor to the project
contrib = AuthUserFactory()
self.project.add_contributor(contrib, auth=Auth(self.project.creator))
self.project.save()
# Claiming user goes to claim url, but contrib is already logged in
url = self.user.get_claim_url(self.project._primary_key)
res = self.app.get(
url,
auth=contrib.auth,
).follow(
auth=contrib.auth,
expect_errors=True,
)
# Response is a 400
assert_equal(res.status_code, 400)
class TestWatchViews(OsfTestCase):
def setUp(self):
super(TestWatchViews, self).setUp()
self.user = AuthUserFactory()
self.consolidate_auth = Auth(user=self.user)
self.auth = self.user.auth # used for requests auth
# A public project
self.project = ProjectFactory(is_public=True)
self.project.save()
# Manually reset log date to 100 days ago so it won't show up in feed
self.project.logs[0].date = dt.datetime.utcnow() - dt.timedelta(days=100)
self.project.logs[0].save()
# A log added now
self.last_log = self.project.add_log(
NodeLog.TAG_ADDED,
params={'node': self.project._primary_key},
auth=self.consolidate_auth,
log_date=dt.datetime.utcnow(),
save=True,
)
# Clear watched list
self.user.watched = []
self.user.save()
def test_watching_a_project_appends_to_users_watched_list(self):
n_watched_then = len(self.user.watched)
url = '/api/v1/project/{0}/watch/'.format(self.project._id)
res = self.app.post_json(url,
params={"digest": True},
auth=self.auth)
assert_equal(res.json['watchCount'], 1)
self.user.reload()
n_watched_now = len(self.user.watched)
assert_equal(res.status_code, 200)
assert_equal(n_watched_now, n_watched_then + 1)
assert_true(self.user.watched[-1].digest)
def test_watching_project_twice_returns_400(self):
url = "/api/v1/project/{0}/watch/".format(self.project._id)
res = self.app.post_json(url,
params={},
auth=self.auth)
assert_equal(res.status_code, 200)
# User tries to watch a node she's already watching
res2 = self.app.post_json(url,
params={},
auth=self.auth,
expect_errors=True)
assert_equal(res2.status_code, http.BAD_REQUEST)
def test_unwatching_a_project_removes_from_watched_list(self):
# The user has already watched a project
watch_config = WatchConfigFactory(node=self.project)
self.user.watch(watch_config)
self.user.save()
n_watched_then = len(self.user.watched)
url = '/api/v1/project/{0}/unwatch/'.format(self.project._id)
res = self.app.post_json(url, {}, auth=self.auth)
self.user.reload()
n_watched_now = len(self.user.watched)
assert_equal(res.status_code, 200)
assert_equal(n_watched_now, n_watched_then - 1)
assert_false(self.user.is_watching(self.project))
def test_toggle_watch(self):
# The user is not watching project
assert_false(self.user.is_watching(self.project))
url = "/api/v1/project/{0}/togglewatch/".format(self.project._id)
res = self.app.post_json(url, {}, auth=self.auth)
# The response json has a watchcount and watched property
assert_equal(res.json['watchCount'], 1)
assert_true(res.json['watched'])
assert_equal(res.status_code, 200)
self.user.reload()
# The user is now watching the project
assert_true(res.json['watched'])
assert_true(self.user.is_watching(self.project))
def test_toggle_watch_node(self):
# The project has a public sub-node
node = NodeFactory(creator=self.user, parent=self.project, is_public=True)
url = "/api/v1/project/{}/node/{}/togglewatch/".format(self.project._id,
node._id)
res = self.app.post_json(url, {}, auth=self.auth)
assert_equal(res.status_code, 200)
self.user.reload()
# The user is now watching the sub-node
assert_true(res.json['watched'])
assert_true(self.user.is_watching(node))
def test_get_watched_logs(self):
project = ProjectFactory()
# Add some logs
for _ in range(12):
project.add_log('file_added', params={'node': project._id}, auth=self.consolidate_auth)
project.save()
watch_cfg = WatchConfigFactory(node=project)
self.user.watch(watch_cfg)
self.user.save()
url = api_url_for("watched_logs_get")
res = self.app.get(url, auth=self.auth)
assert_equal(len(res.json['logs']), 10)
# 1 project create log then 12 generated logs
assert_equal(res.json['total'], 12 + 1)
assert_equal(res.json['page'], 0)
assert_equal(res.json['pages'], 2)
assert_equal(res.json['logs'][0]['action'], 'file_added')
def test_get_more_watched_logs(self):
project = ProjectFactory()
# Add some logs
for _ in range(12):
project.add_log('file_added', params={'node': project._id}, auth=self.consolidate_auth)
project.save()
watch_cfg = WatchConfigFactory(node=project)
self.user.watch(watch_cfg)
self.user.save()
url = api_url_for("watched_logs_get")
page = 1
res = self.app.get(url, {'page': page}, auth=self.auth)
assert_equal(len(res.json['logs']), 3)
# 1 project create log then 12 generated logs
assert_equal(res.json['total'], 12 + 1)
assert_equal(res.json['page'], page)
assert_equal(res.json['pages'], 2)
assert_equal(res.json['logs'][0]['action'], 'file_added')
def test_get_more_watched_logs_invalid_page(self):
project = ProjectFactory()
watch_cfg = WatchConfigFactory(node=project)
self.user.watch(watch_cfg)
self.user.save()
url = api_url_for("watched_logs_get")
invalid_page = 'invalid page'
res = self.app.get(
url, {'page': invalid_page}, auth=self.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
assert_equal(
res.json['message_long'],
'Invalid value for "page".'
)
def test_get_more_watched_logs_invalid_size(self):
project = ProjectFactory()
watch_cfg = WatchConfigFactory(node=project)
self.user.watch(watch_cfg)
self.user.save()
url = api_url_for("watched_logs_get")
invalid_size = 'invalid size'
res = self.app.get(
url, {'size': invalid_size}, auth=self.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
assert_equal(
res.json['message_long'],
'Invalid value for "size".'
)
class TestPointerViews(OsfTestCase):
def setUp(self):
super(TestPointerViews, self).setUp()
self.user = AuthUserFactory()
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
def _make_pointer_only_user_can_see(self, user, project, save=False):
node = ProjectFactory(creator=user)
project.add_pointer(node, auth=Auth(user=user), save=save)
def test_pointer_list_write_contributor_can_remove_private_component_entry(self):
"""Ensure that write contributors see the button to delete a pointer,
even if they cannot see what it is pointing at"""
url = web_url_for('view_project', pid=self.project._id)
user2 = AuthUserFactory()
self.project.add_contributor(user2,
auth=Auth(self.project.creator),
permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS)
self._make_pointer_only_user_can_see(user2, self.project)
self.project.save()
res = self.app.get(url, auth=self.user.auth).maybe_follow()
assert_equal(res.status_code, 200)
has_controls = res.lxml.xpath('//li[@node_reference]/p[starts-with(normalize-space(text()), "Private Link")]//i[contains(@class, "remove-pointer")]')
assert_true(has_controls)
def test_pointer_list_write_contributor_can_remove_public_component_entry(self):
url = web_url_for('view_project', pid=self.project._id)
for i in xrange(3):
self.project.add_pointer(ProjectFactory(creator=self.user),
auth=Auth(user=self.user))
self.project.save()
res = self.app.get(url, auth=self.user.auth).maybe_follow()
assert_equal(res.status_code, 200)
has_controls = res.lxml.xpath(
'//li[@node_reference]//i[contains(@class, "remove-pointer")]')
assert_equal(len(has_controls), 3)
def test_pointer_list_read_contributor_cannot_remove_private_component_entry(self):
url = web_url_for('view_project', pid=self.project._id)
user2 = AuthUserFactory()
self.project.add_contributor(user2,
auth=Auth(self.project.creator),
permissions=[permissions.READ])
self._make_pointer_only_user_can_see(user2, self.project)
self.project.save()
res = self.app.get(url, auth=user2.auth).maybe_follow()
assert_equal(res.status_code, 200)
pointer_nodes = res.lxml.xpath('//li[@node_reference]')
has_controls = res.lxml.xpath('//li[@node_reference]/p[starts-with(normalize-space(text()), "Private Link")]//i[contains(@class, "remove-pointer")]')
assert_equal(len(pointer_nodes), 1)
assert_false(has_controls)
def test_pointer_list_read_contributor_cannot_remove_public_component_entry(self):
url = web_url_for('view_project', pid=self.project._id)
self.project.add_pointer(ProjectFactory(creator=self.user),
auth=Auth(user=self.user))
user2 = AuthUserFactory()
self.project.add_contributor(user2,
auth=Auth(self.project.creator),
permissions=[permissions.READ])
self.project.save()
res = self.app.get(url, auth=user2.auth).maybe_follow()
assert_equal(res.status_code, 200)
pointer_nodes = res.lxml.xpath('//li[@node_reference]')
has_controls = res.lxml.xpath(
'//li[@node_reference]//i[contains(@class, "remove-pointer")]')
assert_equal(len(pointer_nodes), 1)
assert_equal(len(has_controls), 0)
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1109
def test_get_pointed_excludes_folders(self):
pointer_project = ProjectFactory(is_public=True) # project that points to another project
pointed_project = ProjectFactory(creator=self.user) # project that other project points to
pointer_project.add_pointer(pointed_project, Auth(pointer_project.creator), save=True)
# Project is in an organizer collection
collection = CollectionFactory(creator=pointed_project.creator)
collection.add_pointer(pointed_project, Auth(pointed_project.creator), save=True)
url = pointed_project.api_url_for('get_pointed')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
# pointer_project's id is included in response, but folder's id is not
pointer_ids = [each['id'] for each in res.json['pointed']]
assert_in(pointer_project._id, pointer_ids)
assert_not_in(collection._id, pointer_ids)
def test_add_pointers(self):
url = self.project.api_url + 'pointer/'
node_ids = [
NodeFactory()._id
for _ in range(5)
]
self.app.post_json(
url,
{'nodeIds': node_ids},
auth=self.user.auth,
).maybe_follow()
self.project.reload()
assert_equal(
len(self.project.nodes),
5
)
def test_add_the_same_pointer_more_than_once(self):
url = self.project.api_url + 'pointer/'
double_node = NodeFactory()
self.app.post_json(
url,
{'nodeIds': [double_node._id]},
auth=self.user.auth,
)
res = self.app.post_json(
url,
{'nodeIds': [double_node._id]},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_add_pointers_no_user_logg_in(self):
url = self.project.api_url_for('add_pointers')
node_ids = [
NodeFactory()._id
for _ in range(5)
]
res = self.app.post_json(
url,
{'nodeIds': node_ids},
auth=None,
expect_errors=True
)
assert_equal(res.status_code, 401)
def test_add_pointers_public_non_contributor(self):
project2 = ProjectFactory()
project2.set_privacy('public')
project2.save()
url = self.project.api_url_for('add_pointers')
self.app.post_json(
url,
{'nodeIds': [project2._id]},
auth=self.user.auth,
).maybe_follow()
self.project.reload()
assert_equal(
len(self.project.nodes),
1
)
def test_add_pointers_contributor(self):
user2 = AuthUserFactory()
self.project.add_contributor(user2)
self.project.save()
url = self.project.api_url_for('add_pointers')
node_ids = [
NodeFactory()._id
for _ in range(5)
]
self.app.post_json(
url,
{'nodeIds': node_ids},
auth=user2.auth,
).maybe_follow()
self.project.reload()
assert_equal(
len(self.project.nodes),
5
)
def test_add_pointers_not_provided(self):
url = self.project.api_url + 'pointer/'
res = self.app.post_json(url, {}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_move_pointers(self):
project_two = ProjectFactory(creator=self.user)
url = api_url_for('move_pointers')
node = NodeFactory()
pointer = self.project.add_pointer(node, auth=self.consolidate_auth)
assert_equal(len(self.project.nodes), 1)
assert_equal(len(project_two.nodes), 0)
user_auth = self.user.auth
move_request = \
{
'fromNodeId': self.project._id,
'toNodeId': project_two._id,
'pointerIds': [pointer.node._id],
}
self.app.post_json(
url,
move_request,
auth=user_auth,
).maybe_follow()
self.project.reload()
project_two.reload()
assert_equal(len(self.project.nodes), 0)
assert_equal(len(project_two.nodes), 1)
def test_remove_pointer(self):
url = self.project.api_url + 'pointer/'
node = NodeFactory()
pointer = self.project.add_pointer(node, auth=self.consolidate_auth)
self.app.delete_json(
url,
{'pointerId': pointer._id},
auth=self.user.auth,
)
self.project.reload()
assert_equal(
len(self.project.nodes),
0
)
def test_remove_pointer_not_provided(self):
url = self.project.api_url + 'pointer/'
res = self.app.delete_json(url, {}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_remove_pointer_not_found(self):
url = self.project.api_url + 'pointer/'
res = self.app.delete_json(
url,
{'pointerId': None},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_remove_pointer_not_in_nodes(self):
url = self.project.api_url + 'pointer/'
node = NodeFactory()
pointer = Pointer(node=node)
res = self.app.delete_json(
url,
{'pointerId': pointer._id},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_fork_pointer(self):
url = self.project.api_url + 'pointer/fork/'
node = NodeFactory(creator=self.user)
pointer = self.project.add_pointer(node, auth=self.consolidate_auth)
self.app.post_json(
url,
{'pointerId': pointer._id},
auth=self.user.auth
)
def test_fork_pointer_not_provided(self):
url = self.project.api_url + 'pointer/fork/'
res = self.app.post_json(url, {}, auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, 400)
def test_fork_pointer_not_found(self):
url = self.project.api_url + 'pointer/fork/'
res = self.app.post_json(
url,
{'pointerId': None},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_fork_pointer_not_in_nodes(self):
url = self.project.api_url + 'pointer/fork/'
node = NodeFactory()
pointer = Pointer(node=node)
res = self.app.post_json(
url,
{'pointerId': pointer._id},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_before_register_with_pointer(self):
"Assert that link warning appears in before register callback."
node = NodeFactory()
self.project.add_pointer(node, auth=self.consolidate_auth)
url = self.project.api_url + 'fork/before/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
prompts = [
prompt
for prompt in res.json['prompts']
if 'Links will be copied into your fork' in prompt
]
assert_equal(len(prompts), 1)
def test_before_fork_with_pointer(self):
"""Assert that link warning appears in before fork callback."""
node = NodeFactory()
self.project.add_pointer(node, auth=self.consolidate_auth)
url = self.project.api_url + 'beforeregister/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
prompts = [
prompt
for prompt in res.json['prompts']
if 'Links will be copied into your registration' in prompt
]
assert_equal(len(prompts), 1)
def test_before_register_no_pointer(self):
"""Assert that link warning does not appear in before register callback."""
url = self.project.api_url + 'fork/before/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
prompts = [
prompt
for prompt in res.json['prompts']
if 'Links will be copied into your fork' in prompt
]
assert_equal(len(prompts), 0)
def test_before_fork_no_pointer(self):
"""Assert that link warning does not appear in before fork callback."""
url = self.project.api_url + 'beforeregister/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
prompts = [
prompt
for prompt in res.json['prompts']
if 'Links will be copied into your registration' in prompt
]
assert_equal(len(prompts), 0)
def test_get_pointed(self):
pointing_node = ProjectFactory(creator=self.user)
pointing_node.add_pointer(self.project, auth=Auth(self.user))
url = self.project.api_url_for('get_pointed')
res = self.app.get(url, auth=self.user.auth)
pointed = res.json['pointed']
assert_equal(len(pointed), 1)
assert_equal(pointed[0]['url'], pointing_node.url)
assert_equal(pointed[0]['title'], pointing_node.title)
assert_equal(pointed[0]['authorShort'], abbrev_authors(pointing_node))
def test_get_pointed_private(self):
secret_user = UserFactory()
pointing_node = ProjectFactory(creator=secret_user)
pointing_node.add_pointer(self.project, auth=Auth(secret_user))
url = self.project.api_url_for('get_pointed')
res = self.app.get(url, auth=self.user.auth)
pointed = res.json['pointed']
assert_equal(len(pointed), 1)
assert_equal(pointed[0]['url'], None)
assert_equal(pointed[0]['title'], 'Private Component')
assert_equal(pointed[0]['authorShort'], 'Private Author(s)')
class TestPublicViews(OsfTestCase):
def test_explore(self):
res = self.app.get("/explore/").maybe_follow()
assert_equal(res.status_code, 200)
def test_forgot_password_get(self):
res = self.app.get(web_url_for('forgot_password_get'))
assert_equal(res.status_code, 200)
assert_in('Forgot Password', res.body)
class TestAuthViews(OsfTestCase):
def setUp(self):
super(TestAuthViews, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
def test_merge_user(self):
dupe = UserFactory(
username="copy@cat.com",
emails=['copy@cat.com']
)
dupe.set_password("copycat")
dupe.save()
url = "/api/v1/user/merge/"
self.app.post_json(
url,
{
"merged_username": "copy@cat.com",
"merged_password": "copycat"
},
auth=self.auth,
)
self.user.reload()
dupe.reload()
assert_true(dupe.is_merged)
@mock.patch('framework.auth.views.mails.send_mail')
def test_register_sends_confirm_email(self, send_mail):
url = '/register/'
self.app.post(url, {
'register-fullname': 'Freddie Mercury',
'register-username': 'fred@queen.com',
'register-password': 'killerqueen',
'register-username2': 'fred@queen.com',
'register-password2': 'killerqueen',
})
assert_true(send_mail.called)
assert_true(send_mail.called_with(
to_addr='fred@queen.com'
))
@mock.patch('framework.auth.views.mails.send_mail')
def test_register_ok(self, _):
url = api_url_for('register_user')
name, email, password = fake.name(), fake.email(), 'underpressure'
self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email,
'password': password,
}
)
user = User.find_one(Q('username', 'eq', email))
assert_equal(user.fullname, name)
# Regression test for https://github.com/CenterForOpenScience/osf.io/issues/2902
@mock.patch('framework.auth.views.mails.send_mail')
def test_register_email_case_insensitive(self, _):
url = api_url_for('register_user')
name, email, password = fake.name(), fake.email(), 'underpressure'
self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': str(email).upper(),
'password': password,
}
)
user = User.find_one(Q('username', 'eq', email))
assert_equal(user.fullname, name)
@mock.patch('framework.auth.views.send_confirm_email')
def test_register_scrubs_username(self, _):
url = api_url_for('register_user')
name = "<i>Eunice</i> O' \"Cornwallis\"<script type='text/javascript' src='http://www.cornify.com/js/cornify.js'></script><script type='text/javascript'>cornify_add()</script>"
email, password = fake.email(), 'underpressure'
res = self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email,
'password': password,
}
)
expected_scrub_username = "Eunice O' \"Cornwallis\"cornify_add()"
user = User.find_one(Q('username', 'eq', email))
assert_equal(res.status_code, http.OK)
assert_equal(user.fullname, expected_scrub_username)
def test_register_email_mismatch(self):
url = api_url_for('register_user')
name, email, password = fake.name(), fake.email(), 'underpressure'
res = self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email + 'lol',
'password': password,
},
expect_errors=True,
)
assert_equal(res.status_code, http.BAD_REQUEST)
users = User.find(Q('username', 'eq', email))
assert_equal(users.count(), 0)
def test_register_after_being_invited_as_unreg_contributor(self):
# Regression test for:
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/861
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1021
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1026
# A user is invited as an unregistered contributor
project = ProjectFactory()
name, email = fake.name(), fake.email()
project.add_unregistered_contributor(fullname=name, email=email, auth=Auth(project.creator))
project.save()
# The new, unregistered user
new_user = User.find_one(Q('username', 'eq', email))
# Instead of following the invitation link, they register at the regular
# registration page
# They use a different name when they register, but same email
real_name = fake.name()
password = 'myprecious'
url = api_url_for('register_user')
payload = {
'fullName': real_name,
'email1': email,
'email2': email,
'password': password,
}
# Send registration request
self.app.post_json(url, payload)
new_user.reload()
# New user confirms by following confirmation link
confirm_url = new_user.get_confirmation_url(email, external=False)
self.app.get(confirm_url)
new_user.reload()
# Password and fullname should be updated
assert_true(new_user.is_confirmed)
assert_true(new_user.check_password(password))
assert_equal(new_user.fullname, real_name)
@mock.patch('framework.auth.views.send_confirm_email')
def test_register_sends_user_registered_signal(self, mock_send_confirm_email):
url = api_url_for('register_user')
name, email, password = fake.name(), fake.email(), 'underpressure'
with capture_signals() as mock_signals:
self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email,
'password': password,
}
)
assert_equal(mock_signals.signals_sent(), set([auth.signals.user_registered,
auth.signals.unconfirmed_user_created]))
assert_true(mock_send_confirm_email.called)
@mock.patch('framework.auth.views.send_confirm_email')
def test_register_post_sends_user_registered_signal(self, mock_send_confirm_email):
url = web_url_for('auth_register_post')
name, email, password = fake.name(), fake.email(), 'underpressure'
with capture_signals() as mock_signals:
self.app.post(url, {
'register-fullname': name,
'register-username': email,
'register-password': password,
'register-username2': email,
'register-password2': password
})
assert_equal(mock_signals.signals_sent(), set([auth.signals.user_registered,
auth.signals.unconfirmed_user_created]))
assert_true(mock_send_confirm_email.called)
def test_resend_confirmation_get(self):
res = self.app.get('/resend/')
assert_equal(res.status_code, 200)
@mock.patch('framework.auth.views.mails.send_mail')
def test_resend_confirmation(self, send_mail):
email = 'test@example.com'
token = self.user.add_unconfirmed_email(email)
self.user.save()
url = api_url_for('resend_confirmation')
header = {'address': email, 'primary': False, 'confirmed': False}
self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth)
assert_true(send_mail.called)
assert_true(send_mail.called_with(
to_addr=email
))
self.user.reload()
assert_not_equal(token, self.user.get_confirmation_token(email))
with assert_raises(InvalidTokenError):
self.user.get_unconfirmed_email_for_token(token)
@mock.patch('framework.auth.views.mails.send_mail')
def test_click_confirmation_email(self, send_mail):
email = 'test@example.com'
token = self.user.add_unconfirmed_email(email)
self.user.save()
self.user.reload()
assert_equal(self.user.email_verifications[token]['confirmed'], False)
url = '/confirm/{}/{}/?logout=1'.format(self.user._id, token, self.user.username)
res = self.app.get(url)
self.user.reload()
assert_equal(self.user.email_verifications[token]['confirmed'], True)
assert_equal(res.status_code, 302)
login_url = 'login?service'
assert_in(login_url, res.body)
def test_get_email_to_add_no_email(self):
email_verifications = self.user.unconfirmed_email_info
assert_equal(email_verifications, [])
def test_get_unconfirmed_email(self):
email = 'test@example.com'
self.user.add_unconfirmed_email(email)
self.user.save()
self.user.reload()
email_verifications = self.user.unconfirmed_email_info
assert_equal(email_verifications, [])
def test_get_email_to_add(self):
email = 'test@example.com'
token = self.user.add_unconfirmed_email(email)
self.user.save()
self.user.reload()
assert_equal(self.user.email_verifications[token]['confirmed'], False)
url = '/confirm/{}/{}/?logout=1'.format(self.user._id, token, self.user.username)
self.app.get(url)
self.user.reload()
assert_equal(self.user.email_verifications[token]['confirmed'], True)
email_verifications = self.user.unconfirmed_email_info
assert_equal(email_verifications[0]['address'], 'test@example.com')
def test_add_email(self):
email = 'test@example.com'
token = self.user.add_unconfirmed_email(email)
self.user.save()
self.user.reload()
assert_equal(self.user.email_verifications[token]['confirmed'], False)
url = '/confirm/{}/{}/?logout=1'.format(self.user._id, token)
self.app.get(url)
self.user.reload()
email_verifications = self.user.unconfirmed_email_info
put_email_url = api_url_for('unconfirmed_email_add')
res = self.app.put_json(put_email_url, email_verifications[0], auth=self.user.auth)
self.user.reload()
assert_equal(res.json_body['status'], 'success')
assert_equal(self.user.emails[1], 'test@example.com')
def test_remove_email(self):
email = 'test@example.com'
token = self.user.add_unconfirmed_email(email)
self.user.save()
self.user.reload()
url = '/confirm/{}/{}/?logout=1'.format(self.user._id, token)
self.app.get(url)
self.user.reload()
email_verifications = self.user.unconfirmed_email_info
remove_email_url = api_url_for('unconfirmed_email_remove')
remove_res = self.app.delete_json(remove_email_url, email_verifications[0], auth=self.user.auth)
self.user.reload()
assert_equal(remove_res.json_body['status'], 'success')
assert_equal(self.user.unconfirmed_email_info, [])
def test_add_expired_email(self):
# Do not return expired token and removes it from user.email_verifications
email = 'test@example.com'
token = self.user.add_unconfirmed_email(email)
self.user.email_verifications[token]['expiration'] = dt.datetime.utcnow() - dt.timedelta(days=100)
self.user.save()
self.user.reload()
assert_equal(self.user.email_verifications[token]['email'], email)
self.user.clean_email_verifications(given_token=token)
unconfirmed_emails = self.user.unconfirmed_email_info
assert_equal(unconfirmed_emails, [])
assert_equal(self.user.email_verifications, {})
def test_clean_email_verifications(self):
# Do not return bad token and removes it from user.email_verifications
email = 'test@example.com'
token = 'blahblahblah'
self.user.email_verifications[token] = {'expiration': dt.datetime.utcnow() + dt.timedelta(days=1),
'email': email,
'confirmed': False }
self.user.save()
self.user.reload()
assert_equal(self.user.email_verifications[token]['email'], email)
self.user.clean_email_verifications(given_token=token)
unconfirmed_emails = self.user.unconfirmed_email_info
assert_equal(unconfirmed_emails, [])
assert_equal(self.user.email_verifications, {})
def test_add_invalid_email(self):
# Do not return expired token and removes it from user.email_verifications
email = u'\u0000\u0008\u000b\u000c\u000e\u001f\ufffe\uffffHello@yourmom.com'
# illegal_str = u'\u0000\u0008\u000b\u000c\u000e\u001f\ufffe\uffffHello'
# illegal_str += unichr(0xd800) + unichr(0xdbff) + ' World'
# email = 'test@example.com'
with assert_raises(ValidationError):
self.user.add_unconfirmed_email(email)
def test_add_email_merge(self):
email = "copy@cat.com"
dupe = UserFactory(
username=email,
emails=[email]
)
dupe.save()
token = self.user.add_unconfirmed_email(email)
self.user.save()
self.user.reload()
assert_equal(self.user.email_verifications[token]['confirmed'], False)
url = '/confirm/{}/{}/?logout=1'.format(self.user._id, token)
self.app.get(url)
self.user.reload()
email_verifications = self.user.unconfirmed_email_info
put_email_url = api_url_for('unconfirmed_email_add')
res = self.app.put_json(put_email_url, email_verifications[0], auth=self.user.auth)
self.user.reload()
assert_equal(res.json_body['status'], 'success')
assert_equal(self.user.emails[1], 'copy@cat.com')
def test_resend_confirmation_without_user_id(self):
email = 'test@example.com'
url = api_url_for('resend_confirmation')
header = {'address': email, 'primary': False, 'confirmed': False}
res = self.app.put_json(url, {'email': header}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], '"id" is required')
def test_resend_confirmation_without_email(self):
url = api_url_for('resend_confirmation')
res = self.app.put_json(url, {'id': self.user._id}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_resend_confirmation_not_work_for_primary_email(self):
email = 'test@example.com'
url = api_url_for('resend_confirmation')
header = {'address': email, 'primary': True, 'confirmed': False}
res = self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], 'Cannnot resend confirmation for confirmed emails')
def test_resend_confirmation_not_work_for_confirmed_email(self):
email = 'test@example.com'
url = api_url_for('resend_confirmation')
header = {'address': email, 'primary': False, 'confirmed': True}
res = self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], 'Cannnot resend confirmation for confirmed emails')
@mock.patch('framework.auth.views.mails.send_mail')
def test_resend_confirmation_does_not_send_before_throttle_expires(self, send_mail):
email = 'test@example.com'
self.user.save()
url = api_url_for('resend_confirmation')
header = {'address': email, 'primary': False, 'confirmed': False}
self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth)
assert_true(send_mail.called)
# 2nd call does not send email because throttle period has not expired
res = self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_confirm_email_clears_unclaimed_records_and_revokes_token(self):
unclaimed_user = UnconfirmedUserFactory()
# unclaimed user has been invited to a project.
referrer = UserFactory()
project = ProjectFactory(creator=referrer)
unclaimed_user.add_unclaimed_record(project, referrer, 'foo')
unclaimed_user.save()
# sanity check
assert_equal(len(unclaimed_user.email_verifications.keys()), 1)
# user goes to email confirmation link
token = unclaimed_user.get_confirmation_token(unclaimed_user.username)
url = web_url_for('confirm_email_get', uid=unclaimed_user._id, token=token)
res = self.app.get(url)
assert_equal(res.status_code, 302)
# unclaimed records and token are cleared
unclaimed_user.reload()
assert_equal(unclaimed_user.unclaimed_records, {})
assert_equal(len(unclaimed_user.email_verifications.keys()), 0)
def test_confirmation_link_registers_user(self):
user = User.create_unconfirmed('brian@queen.com', 'bicycle123', 'Brian May')
assert_false(user.is_registered) # sanity check
user.save()
confirmation_url = user.get_confirmation_url('brian@queen.com', external=False)
res = self.app.get(confirmation_url)
assert_equal(res.status_code, 302, 'redirects to settings page')
res = res.follow()
user.reload()
assert_true(user.is_registered)
# TODO: Use mock add-on
class TestAddonUserViews(OsfTestCase):
def setUp(self):
super(TestAddonUserViews, self).setUp()
self.user = AuthUserFactory()
def test_choose_addons_add(self):
"""Add add-ons; assert that add-ons are attached to project.
"""
url = '/api/v1/settings/addons/'
self.app.post_json(
url,
{'github': True},
auth=self.user.auth,
).maybe_follow()
self.user.reload()
assert_true(self.user.get_addon('github'))
def test_choose_addons_remove(self):
# Add, then delete, add-ons; assert that add-ons are not attached to
# project.
url = '/api/v1/settings/addons/'
self.app.post_json(
url,
{'github': True},
auth=self.user.auth,
).maybe_follow()
self.app.post_json(
url,
{'github': False},
auth=self.user.auth
).maybe_follow()
self.user.reload()
assert_false(self.user.get_addon('github'))
class TestConfigureMailingListViews(OsfTestCase):
@classmethod
def setUpClass(cls):
super(TestConfigureMailingListViews, cls).setUpClass()
cls._original_enable_email_subscriptions = settings.ENABLE_EMAIL_SUBSCRIPTIONS
settings.ENABLE_EMAIL_SUBSCRIPTIONS = True
def test_user_unsubscribe_and_subscribe_help_mailing_list(self):
user = AuthUserFactory()
url = api_url_for('user_choose_mailing_lists')
payload = {settings.OSF_HELP_LIST: False}
res = self.app.post_json(url, payload, auth=user.auth)
user.reload()
assert_false(user.osf_mailing_lists[settings.OSF_HELP_LIST])
payload = {settings.OSF_HELP_LIST: True}
res = self.app.post_json(url, payload, auth=user.auth)
user.reload()
assert_true(user.osf_mailing_lists[settings.OSF_HELP_LIST])
def test_get_notifications(self):
user = AuthUserFactory()
mailing_lists = dict(user.osf_mailing_lists.items() + user.mailchimp_mailing_lists.items())
url = api_url_for('user_notifications')
res = self.app.get(url, auth=user.auth)
assert_equal(mailing_lists, res.json['mailing_lists'])
def test_osf_help_mails_subscribe(self):
user = UserFactory()
user.osf_mailing_lists[settings.OSF_HELP_LIST] = False
user.save()
update_osf_help_mails_subscription(user, True)
assert_true(user.osf_mailing_lists[settings.OSF_HELP_LIST])
def test_osf_help_mails_unsubscribe(self):
user = UserFactory()
user.osf_mailing_lists[settings.OSF_HELP_LIST] = True
user.save()
update_osf_help_mails_subscription(user, False)
assert_false(user.osf_mailing_lists[settings.OSF_HELP_LIST])
@unittest.skipIf(settings.USE_CELERY, 'Subscription must happen synchronously for this test')
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_user_choose_mailing_lists_updates_user_dict(self, mock_get_mailchimp_api):
user = AuthUserFactory()
list_name = 'OSF General'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]}
list_id = mailchimp_utils.get_list_id_from_name(list_name)
payload = {settings.MAILCHIMP_GENERAL_LIST: True}
url = api_url_for('user_choose_mailing_lists')
res = self.app.post_json(url, payload, auth=user.auth)
user.reload()
# check user.mailing_lists is updated
assert_true(user.mailchimp_mailing_lists[settings.MAILCHIMP_GENERAL_LIST])
assert_equal(
user.mailchimp_mailing_lists[settings.MAILCHIMP_GENERAL_LIST],
payload[settings.MAILCHIMP_GENERAL_LIST]
)
# check that user is subscribed
mock_client.lists.subscribe.assert_called_with(id=list_id,
email={'email': user.username},
merge_vars={
'fname': user.given_name,
'lname': user.family_name,
},
double_optin=False,
update_existing=True)
def test_get_mailchimp_get_endpoint_returns_200(self):
url = api_url_for('mailchimp_get_endpoint')
res = self.app.get(url)
assert_equal(res.status_code, 200)
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_mailchimp_webhook_subscribe_action_does_not_change_user(self, mock_get_mailchimp_api):
""" Test that 'subscribe' actions sent to the OSF via mailchimp
webhooks update the OSF database.
"""
list_id = '12345'
list_name = 'OSF General'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': list_name}]}
# user is not subscribed to a list
user = AuthUserFactory()
user.mailchimp_mailing_lists = {'OSF General': False}
user.save()
# user subscribes and webhook sends request to OSF
data = {
'type': 'subscribe',
'data[list_id]': list_id,
'data[email]': user.username
}
url = api_url_for('sync_data_from_mailchimp') + '?key=' + settings.MAILCHIMP_WEBHOOK_SECRET_KEY
res = self.app.post(url,
data,
content_type="application/x-www-form-urlencoded",
auth=user.auth)
# user field is updated on the OSF
user.reload()
assert_true(user.mailchimp_mailing_lists[list_name])
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_mailchimp_webhook_profile_action_does_not_change_user(self, mock_get_mailchimp_api):
""" Test that 'profile' actions sent to the OSF via mailchimp
webhooks do not cause any database changes.
"""
list_id = '12345'
list_name = 'OSF General'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': list_name}]}
# user is subscribed to a list
user = AuthUserFactory()
user.mailchimp_mailing_lists = {'OSF General': True}
user.save()
# user hits subscribe again, which will update the user's existing info on mailchimp
# webhook sends request (when configured to update on changes made through the API)
data = {
'type': 'profile',
'data[list_id]': list_id,
'data[email]': user.username
}
url = api_url_for('sync_data_from_mailchimp') + '?key=' + settings.MAILCHIMP_WEBHOOK_SECRET_KEY
res = self.app.post(url,
data,
content_type="application/x-www-form-urlencoded",
auth=user.auth)
# user field does not change
user.reload()
assert_true(user.mailchimp_mailing_lists[list_name])
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_sync_data_from_mailchimp_unsubscribes_user(self, mock_get_mailchimp_api):
list_id = '12345'
list_name = 'OSF General'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': list_name}]}
# user is subscribed to a list
user = AuthUserFactory()
user.mailchimp_mailing_lists = {'OSF General': True}
user.save()
# user unsubscribes through mailchimp and webhook sends request
data = {
'type': 'unsubscribe',
'data[list_id]': list_id,
'data[email]': user.username
}
url = api_url_for('sync_data_from_mailchimp') + '?key=' + settings.MAILCHIMP_WEBHOOK_SECRET_KEY
res = self.app.post(url,
data,
content_type="application/x-www-form-urlencoded",
auth=user.auth)
# user field is updated on the OSF
user.reload()
assert_false(user.mailchimp_mailing_lists[list_name])
def test_sync_data_from_mailchimp_fails_without_secret_key(self):
user = AuthUserFactory()
payload = {'values': {'type': 'unsubscribe',
'data': {'list_id': '12345',
'email': 'freddie@cos.io'}}}
url = api_url_for('sync_data_from_mailchimp')
res = self.app.post_json(url, payload, auth=user.auth, expect_errors=True)
assert_equal(res.status_code, http.UNAUTHORIZED)
@classmethod
def tearDownClass(cls):
super(TestConfigureMailingListViews, cls).tearDownClass()
settings.ENABLE_EMAIL_SUBSCRIPTIONS = cls._original_enable_email_subscriptions
# TODO: Move to OSF Storage
class TestFileViews(OsfTestCase):
def setUp(self):
super(TestFileViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory.build(creator=self.user, is_public=True)
self.project.add_contributor(self.user)
self.project.save()
def test_files_get(self):
url = self.project.api_url_for('collect_file_trees')
res = self.app.get(url, auth=self.user.auth)
expected = _view_project(self.project, auth=Auth(user=self.user))
assert_equal(res.status_code, http.OK)
assert_equal(res.json['node'], expected['node'])
assert_in('tree_js', res.json)
assert_in('tree_css', res.json)
def test_grid_data(self):
url = self.project.api_url_for('grid_data')
res = self.app.get(url, auth=self.user.auth).maybe_follow()
assert_equal(res.status_code, http.OK)
expected = rubeus.to_hgrid(self.project, auth=Auth(self.user))
data = res.json['data']
assert_equal(len(data), len(expected))
class TestTagViews(OsfTestCase):
def setUp(self):
super(TestTagViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
@unittest.skip('Tags endpoint disabled for now.')
def test_tag_get_returns_200(self):
url = web_url_for('project_tag', tag='foo')
res = self.app.get(url)
assert_equal(res.status_code, 200)
@requires_search
class TestSearchViews(OsfTestCase):
def setUp(self):
super(TestSearchViews, self).setUp()
import website.search.search as search
search.delete_all()
self.project = ProjectFactory(creator=UserFactory(fullname='Robbie Williams'))
self.contrib = UserFactory(fullname='Brian May')
for i in range(0, 12):
UserFactory(fullname='Freddie Mercury{}'.format(i))
def tearDown(self):
super(TestSearchViews, self).tearDown()
import website.search.search as search
search.delete_all()
def test_search_contributor(self):
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': self.contrib.fullname})
assert_equal(res.status_code, 200)
result = res.json['users']
assert_equal(len(result), 1)
brian = result[0]
assert_equal(brian['fullname'], self.contrib.fullname)
assert_in('gravatar_url', brian)
assert_equal(brian['registered'], self.contrib.is_registered)
assert_equal(brian['active'], self.contrib.is_active)
def test_search_pagination_default(self):
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': 'fr'})
assert_equal(res.status_code, 200)
result = res.json['users']
pages = res.json['pages']
page = res.json['page']
assert_equal(len(result), 5)
assert_equal(pages, 3)
assert_equal(page, 0)
def test_search_pagination_default_page_1(self):
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': 'fr', 'page': 1})
assert_equal(res.status_code, 200)
result = res.json['users']
page = res.json['page']
assert_equal(len(result), 5)
assert_equal(page, 1)
def test_search_pagination_default_page_2(self):
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': 'fr', 'page': 2})
assert_equal(res.status_code, 200)
result = res.json['users']
page = res.json['page']
assert_equal(len(result), 2)
assert_equal(page, 2)
def test_search_pagination_smaller_pages(self):
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': 'fr', 'size': 5})
assert_equal(res.status_code, 200)
result = res.json['users']
pages = res.json['pages']
page = res.json['page']
assert_equal(len(result), 5)
assert_equal(page, 0)
assert_equal(pages, 3)
def test_search_pagination_smaller_pages_page_2(self):
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': 'fr', 'page': 2, 'size': 5, })
assert_equal(res.status_code, 200)
result = res.json['users']
pages = res.json['pages']
page = res.json['page']
assert_equal(len(result), 2)
assert_equal(page, 2)
assert_equal(pages, 3)
def test_search_projects(self):
url = '/search/'
res = self.app.get(url, {'q': self.project.title})
assert_equal(res.status_code, 200)
class TestODMTitleSearch(OsfTestCase):
""" Docs from original method:
:arg term: The substring of the title.
:arg category: Category of the node.
:arg isDeleted: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isFolder: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isRegistration: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg includePublic: yes or no. Whether the projects listed should include public projects.
:arg includeContributed: yes or no. Whether the search should include projects the current user has
contributed to.
:arg ignoreNode: a list of nodes that should not be included in the search.
:return: a list of dictionaries of projects
"""
def setUp(self):
super(TestODMTitleSearch, self).setUp()
self.user = AuthUserFactory()
self.user_two = AuthUserFactory()
self.project = ProjectFactory(creator=self.user, title="foo")
self.project_two = ProjectFactory(creator=self.user_two, title="bar")
self.public_project = ProjectFactory(creator=self.user_two, is_public=True, title="baz")
self.registration_project = RegistrationFactory(creator=self.user, title="qux")
self.folder = CollectionFactory(creator=self.user, title="quux")
self.dashboard = BookmarkCollectionFactory(creator=self.user, title="Dashboard")
self.url = api_url_for('search_projects_by_title')
def test_search_projects_by_title(self):
res = self.app.get(self.url, {'term': self.project.title}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.public_project.title,
'includePublic': 'yes',
'includeContributed': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.project.title,
'includePublic': 'no',
'includeContributed': 'yes'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.project.title,
'includePublic': 'no',
'includeContributed': 'yes',
'isRegistration': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'either'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.public_project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'either'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.registration_project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'either'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 2)
res = self.app.get(self.url,
{
'term': self.registration_project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.folder.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'yes'
}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
res = self.app.get(self.url,
{
'term': self.folder.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 0)
res = self.app.get(self.url,
{
'term': self.dashboard.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 0)
res = self.app.get(self.url,
{
'term': self.dashboard.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'yes'
}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
class TestReorderComponents(OsfTestCase):
def setUp(self):
super(TestReorderComponents, self).setUp()
self.creator = AuthUserFactory()
self.contrib = AuthUserFactory()
# Project is public
self.project = ProjectFactory.create(creator=self.creator, is_public=True)
self.project.add_contributor(self.contrib, auth=Auth(self.creator))
# subcomponent that only creator can see
self.public_component = NodeFactory(creator=self.creator, is_public=True)
self.private_component = NodeFactory(creator=self.creator, is_public=False)
self.project.nodes.append(self.public_component)
self.project.nodes.append(self.private_component)
self.project.save()
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/489
def test_reorder_components_with_private_component(self):
# contrib tries to reorder components
payload = {
'new_list': [
'{0}:node'.format(self.private_component._primary_key),
'{0}:node'.format(self.public_component._primary_key),
]
}
url = self.project.api_url_for('project_reorder_components')
res = self.app.post_json(url, payload, auth=self.contrib.auth)
assert_equal(res.status_code, 200)
class TestWikiWidgetViews(OsfTestCase):
def setUp(self):
super(TestWikiWidgetViews, self).setUp()
# project with no home wiki page
self.project = ProjectFactory()
self.read_only_contrib = AuthUserFactory()
self.project.add_contributor(self.read_only_contrib, permissions='read')
self.noncontributor = AuthUserFactory()
# project with no home wiki content
self.project2 = ProjectFactory(creator=self.project.creator)
self.project2.add_contributor(self.read_only_contrib, permissions='read')
self.project2.update_node_wiki(name='home', content='', auth=Auth(self.project.creator))
def test_show_wiki_for_contributors_when_no_wiki_or_content(self):
assert_true(_should_show_wiki_widget(self.project, self.project.creator))
assert_true(_should_show_wiki_widget(self.project2, self.project.creator))
def test_show_wiki_is_false_for_read_contributors_when_no_wiki_or_content(self):
assert_false(_should_show_wiki_widget(self.project, self.read_only_contrib))
assert_false(_should_show_wiki_widget(self.project2, self.read_only_contrib))
def test_show_wiki_is_false_for_noncontributors_when_no_wiki_or_content(self):
assert_false(_should_show_wiki_widget(self.project, self.noncontributor))
assert_false(_should_show_wiki_widget(self.project2, self.read_only_contrib))
class TestForkViews(OsfTestCase):
def setUp(self):
super(TestForkViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory.build(creator=self.user, is_public=True)
self.consolidated_auth = Auth(user=self.project.creator)
self.user.save()
self.project.save()
def test_fork_private_project_non_contributor(self):
self.project.set_privacy("private")
self.project.save()
url = self.project.api_url_for('node_fork_page')
non_contributor = AuthUserFactory()
res = self.app.post_json(url,
auth=non_contributor.auth,
expect_errors=True)
assert_equal(res.status_code, http.FORBIDDEN)
def test_fork_public_project_non_contributor(self):
url = self.project.api_url_for('node_fork_page')
non_contributor = AuthUserFactory()
res = self.app.post_json(url, auth=non_contributor.auth)
assert_equal(res.status_code, 200)
def test_fork_project_contributor(self):
contributor = AuthUserFactory()
self.project.set_privacy("private")
self.project.add_contributor(contributor)
self.project.save()
url = self.project.api_url_for('node_fork_page')
res = self.app.post_json(url, auth=contributor.auth)
assert_equal(res.status_code, 200)
def test_registered_forks_dont_show_in_fork_list(self):
fork = self.project.fork_node(self.consolidated_auth)
RegistrationFactory(project=fork)
url = self.project.api_url_for('get_forks')
res = self.app.get(url, auth=self.user.auth)
assert_equal(len(res.json['nodes']), 1)
assert_equal(res.json['nodes'][0]['id'], fork._id)
class TestProjectCreation(OsfTestCase):
def setUp(self):
super(TestProjectCreation, self).setUp()
self.creator = AuthUserFactory()
self.url = api_url_for('project_new_post')
self.user1 = AuthUserFactory()
self.user2 = AuthUserFactory()
self.project = ProjectFactory(creator=self.user1)
self.project.add_contributor(self.user2, auth=Auth(self.user1))
self.project.save()
def tearDown(self):
super(TestProjectCreation, self).tearDown()
def test_needs_title(self):
res = self.app.post_json(self.url, {}, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_create_component_strips_html(self):
user = AuthUserFactory()
project = ProjectFactory(creator=user)
url = web_url_for('project_new_node', pid=project._id)
post_data = {'title': '<b>New <blink>Component</blink> Title</b>', 'category': ''}
request = self.app.post(url, post_data, auth=user.auth).follow()
project.reload()
child = project.nodes[0]
# HTML has been stripped
assert_equal(child.title, 'New Component Title')
def test_strip_html_from_title(self):
payload = {
'title': 'no html <b>here</b>'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
node = Node.load(res.json['projectUrl'].replace('/', ''))
assert_true(node)
assert_equal('no html here', node.title)
def test_only_needs_title(self):
payload = {
'title': 'Im a real title'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
def test_title_must_be_one_long(self):
payload = {
'title': ''
}
res = self.app.post_json(
self.url, payload, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_title_must_be_less_than_200(self):
payload = {
'title': ''.join([str(x) for x in xrange(0, 250)])
}
res = self.app.post_json(
self.url, payload, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_fails_to_create_project_with_whitespace_title(self):
payload = {
'title': ' '
}
res = self.app.post_json(
self.url, payload, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_creates_a_project(self):
payload = {
'title': 'Im a real title'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
node = Node.load(res.json['projectUrl'].replace('/', ''))
assert_true(node)
assert_true(node.title, 'Im a real title')
def test_create_component_add_contributors_admin(self):
url = web_url_for('project_new_node', pid=self.project._id)
post_data = {'title': 'New Component With Contributors Title', 'category': '', 'inherit_contributors': True}
res = self.app.post(url, post_data, auth=self.user1.auth)
self.project.reload()
child = self.project.nodes[0]
assert_equal(child.title, 'New Component With Contributors Title')
assert_in(self.user1, child.contributors)
assert_in(self.user2, child.contributors)
# check redirect url
assert_in('/contributors/', res.location)
def test_create_component_with_contributors_read_write(self):
url = web_url_for('project_new_node', pid=self.project._id)
non_admin = AuthUserFactory()
self.project.add_contributor(non_admin, permissions=['read', 'write'])
self.project.save()
post_data = {'title': 'New Component With Contributors Title', 'category': '', 'inherit_contributors': True}
res = self.app.post(url, post_data, auth=non_admin.auth)
self.project.reload()
child = self.project.nodes[0]
assert_equal(child.title, 'New Component With Contributors Title')
assert_in(non_admin, child.contributors)
assert_in(self.user1, child.contributors)
assert_in(self.user2, child.contributors)
assert_equal(child.get_permissions(non_admin), ['read', 'write', 'admin'])
# check redirect url
assert_in('/contributors/', res.location)
def test_create_component_with_contributors_read(self):
url = web_url_for('project_new_node', pid=self.project._id)
non_admin = AuthUserFactory()
self.project.add_contributor(non_admin, permissions=['read'])
self.project.save()
post_data = {'title': 'New Component With Contributors Title', 'category': '', 'inherit_contributors': True}
res = self.app.post(url, post_data, auth=non_admin.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_create_component_add_no_contributors(self):
url = web_url_for('project_new_node', pid=self.project._id)
post_data = {'title': 'New Component With Contributors Title', 'category': ''}
res = self.app.post(url, post_data, auth=self.user1.auth)
self.project.reload()
child = self.project.nodes[0]
assert_equal(child.title, 'New Component With Contributors Title')
assert_in(self.user1, child.contributors)
assert_not_in(self.user2, child.contributors)
# check redirect url
assert_not_in('/contributors/', res.location)
def test_new_project_returns_serialized_node_data(self):
payload = {
'title': 'Im a real title'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
node = res.json['newNode']
assert_true(node)
assert_equal(node['title'], 'Im a real title')
def test_description_works(self):
payload = {
'title': 'Im a real title',
'description': 'I describe things!'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
node = Node.load(res.json['projectUrl'].replace('/', ''))
assert_true(node)
assert_true(node.description, 'I describe things!')
def test_can_template(self):
other_node = ProjectFactory(creator=self.creator)
payload = {
'title': 'Im a real title',
'template': other_node._id
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
node = Node.load(res.json['projectUrl'].replace('/', ''))
assert_true(node)
assert_true(node.template_node, other_node)
def test_project_before_template_no_addons(self):
project = ProjectFactory()
res = self.app.get(project.api_url_for('project_before_template'), auth=project.creator.auth)
assert_equal(res.json['prompts'], [])
def test_project_before_template_with_addons(self):
project = ProjectWithAddonFactory(addon='github')
res = self.app.get(project.api_url_for('project_before_template'), auth=project.creator.auth)
assert_in('GitHub', res.json['prompts'])
def test_project_new_from_template_non_user(self):
project = ProjectFactory()
url = api_url_for('project_new_from_template', nid=project._id)
res = self.app.post(url, auth=None)
assert_equal(res.status_code, 302)
res2 = res.follow(expect_errors=True)
assert_equal(res2.status_code, 301)
assert_equal(res2.request.path, '/login')
def test_project_new_from_template_public_non_contributor(self):
non_contributor = AuthUserFactory()
project = ProjectFactory(is_public=True)
url = api_url_for('project_new_from_template', nid=project._id)
res = self.app.post(url, auth=non_contributor.auth)
assert_equal(res.status_code, 201)
def test_project_new_from_template_contributor(self):
contributor = AuthUserFactory()
project = ProjectFactory(is_public=False)
project.add_contributor(contributor)
project.save()
url = api_url_for('project_new_from_template', nid=project._id)
res = self.app.post(url, auth=contributor.auth)
assert_equal(res.status_code, 201)
class TestUnconfirmedUserViews(OsfTestCase):
def test_can_view_profile(self):
user = UnconfirmedUserFactory()
url = web_url_for('profile_view_id', uid=user._id)
res = self.app.get(url)
assert_equal(res.status_code, 200)
class TestProfileNodeList(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
self.public = ProjectFactory(is_public=True)
self.public_component = NodeFactory(parent=self.public, is_public=True)
self.private = ProjectFactory(is_public=False)
self.deleted = ProjectFactory(is_public=True, is_deleted=True)
for node in (self.public, self.public_component, self.private, self.deleted):
node.add_contributor(self.user, auth=Auth(node.creator))
node.save()
def test_get_public_projects(self):
url = api_url_for('get_public_projects', uid=self.user._id)
res = self.app.get(url)
node_ids = [each['id'] for each in res.json['nodes']]
assert_in(self.public._id, node_ids)
assert_not_in(self.private._id, node_ids)
assert_not_in(self.deleted._id, node_ids)
assert_not_in(self.public_component._id, node_ids)
def test_get_public_components(self):
url = api_url_for('get_public_components', uid=self.user._id)
res = self.app.get(url)
node_ids = [each['id'] for each in res.json['nodes']]
assert_in(self.public_component._id, node_ids)
assert_not_in(self.public._id, node_ids)
assert_not_in(self.private._id, node_ids)
assert_not_in(self.deleted._id, node_ids)
class TestStaticFileViews(OsfTestCase):
def test_robots_dot_txt(self):
res = self.app.get('/robots.txt')
assert_equal(res.status_code, 200)
assert_in('User-agent', res)
assert_in('text/plain', res.headers['Content-Type'])
def test_favicon(self):
res = self.app.get('/favicon.ico')
assert_equal(res.status_code, 200)
assert_in('image/vnd.microsoft.icon', res.headers['Content-Type'])
def test_getting_started_page(self):
res = self.app.get('/getting-started/')
assert_equal(res.status_code, 302)
assert_equal(res.location, 'http://help.osf.io/')
class TestUserConfirmSignal(OsfTestCase):
def test_confirm_user_signal_called_when_user_claims_account(self):
unclaimed_user = UnconfirmedUserFactory()
# unclaimed user has been invited to a project.
referrer = UserFactory()
project = ProjectFactory(creator=referrer)
unclaimed_user.add_unclaimed_record(project, referrer, 'foo')
unclaimed_user.save()
token = unclaimed_user.get_unclaimed_record(project._primary_key)['token']
with capture_signals() as mock_signals:
url = web_url_for('claim_user_form', pid=project._id, uid=unclaimed_user._id, token=token)
payload = {'username': unclaimed_user.username,
'password': 'password',
'password2': 'password'}
res = self.app.post(url, payload)
assert_equal(res.status_code, 302)
assert_equal(mock_signals.signals_sent(), set([auth.signals.user_confirmed]))
def test_confirm_user_signal_called_when_user_confirms_email(self):
unconfirmed_user = UnconfirmedUserFactory()
unconfirmed_user.save()
# user goes to email confirmation link
token = unconfirmed_user.get_confirmation_token(unconfirmed_user.username)
with capture_signals() as mock_signals:
url = web_url_for('confirm_email_get', uid=unconfirmed_user._id, token=token)
res = self.app.get(url)
assert_equal(res.status_code, 302)
assert_equal(mock_signals.signals_sent(), set([auth.signals.user_confirmed]))
if __name__ == '__main__':
unittest.main()
|
zamattiac/osf.io
|
tests/test_views.py
|
Python
|
apache-2.0
| 184,669
|
[
"Brian"
] |
d5574e53201eaaaa538cc900a615372d3bd226d34b2fbd7ed91f667450f9a73c
|
# -*- coding: UTF-8 -*-
"""
Name: concatenate.py
Porpose: A simple concat demuxer UI
Compatibility: Python3, wxPython Phoenix
Author: Gianluca Pernigotto <jeanlucperni@gmail.com>
Copyright: (c) 2018/2021 Gianluca Pernigotto <jeanlucperni@gmail.com>
license: GPL3
Rev: Sep.29.2021
Code checker:
- pycodestyle
- flake8: --ignore F821
########################################################
This file is part of Videomass.
Videomass is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Videomass is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Videomass. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import wx
import wx.lib.agw.hyperlink as hpl
from videomass.vdms_io.checkup import check_files
from videomass.vdms_dialogs.epilogue import Formula
def compare_media_param(data):
"""
Compare video codec types, audio codec types with sample_rate,
dimensions (width and height).
Returns True if differences are found between them,
otherwise it returns False.
"""
vcodec = [] # video codec
acodec = [] # audio codec
ahz = [] # audio sample rate
size = [] # width x height (frame dimensions if video)
for streams in data:
for items in streams.get('streams'):
if items.get('codec_type') == 'video':
vcodec.append(items.get('codec_name'))
size.append(f"{items.get('width')}x{items.get('height')}")
if items.get('codec_type') == 'audio':
acodec.append(items.get('codec_name'))
ahz.append(items.get('sample_rate'))
for compare in (vcodec, acodec, ahz, size):
if len(compare) == 1:
return True
if all(items == compare[0] for items in compare) is False:
return True
return False
# -------------------------------------------------------------------------
class Conc_Demuxer(wx.Panel):
"""
A simple concat demuxer UI to set media files concatenation using
concat demuxer, see <https://ffmpeg.org/ffmpeg-formats.html#concat>
"""
MSG_1 = _("NOTE:\n\n- The concatenation function is performed only with "
"Audio files or only with Video files."
"\n\n- The order of concatenation depends on the order in "
"which the files were added."
"\n\n- The output file name will have the same name as the "
"first file added (also depends\n"
" on the settings made in the preferences dialog)."
"\n\n- Video files must have exactly same streams, same "
"codecs and same\n width/height, but can be wrapped in "
"different container formats."
"\n\n- Audio files must have exactly the same formats, "
"same codecs and same sample rate.")
# ----------------------------------------------------------------#
def __init__(self, parent):
"""
self.Command attribute is an empty string when radiobox is
set to 0 - 1 Selections, otherwise a '-vn' parameter is
added.
.
"""
get = wx.GetApp()
appdata = get.appset
self.parent = parent # parent is the MainFrame
self.command = ''
wx.Panel.__init__(self, parent=parent, style=wx.BORDER_THEME)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add((50, 50))
line0 = wx.StaticLine(self, wx.ID_ANY, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.LI_HORIZONTAL,
name=wx.StaticLineNameStr
)
sizer.Add(line0, 0, wx.ALL | wx.EXPAND, 5)
self.lbl_msg1 = wx.StaticText(self, wx.ID_ANY,
label=Conc_Demuxer.MSG_1
)
sizer.Add(self.lbl_msg1, 0, wx.ALL | wx.EXPAND, 5)
sizer_link2 = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(sizer_link2)
self.lbl_msg3 = wx.StaticText(self, wx.ID_ANY,
label=_("For more details, see the "
"Videomass User Guide:")
)
if appdata['GETLANG'] in appdata['SUPP_LANGs']:
lang = appdata['GETLANG'].split('_')[0]
page = (f"https://jeanslack.github.io/Videomass/"
f"Pages/User-guide-languages/{lang}/1-User_"
f"Interface_Overview_{lang}.pdf")
else:
page = ("https://jeanslack.github.io/Videomass/"
"Pages/User-guide-languages/en/1-User_"
"Interface_Overview_en.pdf"
)
link2 = hpl.HyperLinkCtrl(self, -1, _("1.4 Concatenate media files "
"(demuxer)"), URL=page)
sizer_link2.Add(self.lbl_msg3, 0, wx.ALL | wx.EXPAND, 5)
sizer_link2.Add(link2)
sizer_link1 = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(sizer_link1)
self.lbl_msg2 = wx.StaticText(self, wx.ID_ANY,
label=_("For more information, "
"visit the official FFmpeg "
"documentation:")
)
link1 = hpl.HyperLinkCtrl(self, -1, "3.4 concat",
URL="https://ffmpeg.org/ffmpeg-formats."
"html#concat"
)
sizer_link1.Add(self.lbl_msg2, 0, wx.ALL | wx.EXPAND, 5)
sizer_link1.Add(link1)
line1 = wx.StaticLine(self, wx.ID_ANY, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.LI_HORIZONTAL,
name=wx.StaticLineNameStr
)
sizer.Add(line1, 0, wx.ALL | wx.EXPAND, 5)
sizFormat = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(sizFormat)
self.SetSizer(sizer)
if appdata['ostype'] == 'Darwin':
self.lbl_msg1.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD))
self.lbl_msg2.SetFont(wx.Font(11, wx.SWISS, wx.NORMAL, wx.NORMAL))
self.lbl_msg3.SetFont(wx.Font(11, wx.SWISS, wx.NORMAL, wx.NORMAL))
else:
self.lbl_msg1.SetFont(wx.Font(9, wx.SWISS, wx.NORMAL, wx.BOLD))
self.lbl_msg2.SetFont(wx.Font(8, wx.SWISS, wx.NORMAL, wx.NORMAL))
self.lbl_msg3.SetFont(wx.Font(8, wx.SWISS, wx.NORMAL, wx.NORMAL))
# ---------------------------------------------------------
def on_start(self):
"""
Parameters definition
"""
logname = 'concatenate_demuxer.log'
fsource = self.parent.file_src
fname = os.path.splitext(os.path.basename(fsource[0]))[0]
if len(fsource) < 2:
wx.MessageBox(_('At least two files are required to perform '
'concatenation.'), _('ERROR'),
wx.ICON_ERROR, self
)
return
ext = os.path.splitext(self.parent.file_src[0])[1].split('.')[1]
diff = compare_media_param(self.parent.data_files)
if diff is True:
wx.MessageBox(_('The files do not have the same "codec_types", '
'same "sample_rate" or same "width" or "height". '
'Unable to proceed.'),
_('ERROR'), wx.ICON_ERROR, self
)
return
checking = check_files((fsource[0],),
self.parent.outpath_ffmpeg,
self.parent.same_destin,
self.parent.suffix,
ext
)
if not checking[0]: # User changing idea or not such files exist
return
f_src, destin, countmax = checking
newfile = f'{fname}{self.parent.suffix}.{ext}'
self.concat_demuxer(self.parent.file_src, newfile,
destin[0], ext, logname)
# -----------------------------------------------------------
def concat_demuxer(self, filesrc, newfile, destdir, outext, logname):
"""
Parameters redirection
"""
valupdate = self.update_dict(newfile, destdir, outext)
ending = Formula(self, valupdate[0], valupdate[1], _('Starts'))
if ending.ShowModal() == wx.ID_OK:
self.parent.switch_to_processing('concat_demuxer',
filesrc,
outext,
destdir,
self.command,
None,
'',
None,
logname,
1,
)
# -----------------------------------------------------------
def update_dict(self, newfile, destdir, ext):
"""
Update information before send to epilogue
"""
lenfile = len(self.parent.file_src)
formula = (_("SUMMARY\n\nFile to concatenate\nOutput filename\
\nDestination\nOutput Format\nTime Period"))
dictions = (f"\n\n{lenfile}\n{newfile}\n{destdir}\n{ext}\n"
f"Not applicable")
return formula, dictions
|
jeanslack/Videomass
|
videomass/vdms_panels/concatenate.py
|
Python
|
gpl-3.0
| 10,087
|
[
"VisIt"
] |
799392c25102aeb6c9369633ede0a45fef82e11d8bf5153b81d3d93a2e387ce1
|
import numpy as np
from ase.units import Ha
from ase.dft.kpoints import monkhorst_pack
from ase.parallel import paropen
from ase.lattice import bulk
from gpaw import GPAW, FermiDirac
from gpaw.wavefunctions.pw import PW
from gpaw.mpi import size, serial_comm
from gpaw.xc.rpa import RPACorrelation
from gpaw.test import equal
kpts = monkhorst_pack((4,4,4))
kpts += np.array([1/8., 1/8., 1/8.])
bulk = bulk('Na', 'bcc', a=4.23)
ecut = 350
calc = GPAW(mode=PW(ecut),dtype=complex, basis='dzp', kpts=kpts,
parallel={'domain': 1}, txt='gs_occ_pw.txt', nbands=4,
occupations=FermiDirac(0.01),
setups={'Na': '1'},
)
bulk.set_calculator(calc)
bulk.get_potential_energy()
calc.write('gs_occ_pw.gpw')
calc = GPAW('gs_occ_pw.gpw',txt='gs_pw.txt', parallel={'band': 1})
calc.diagonalize_full_hamiltonian(nbands=520)
calc.write('gs_pw.gpw', 'all')
ecut = 120
calc = GPAW('gs_pw.gpw', communicator=serial_comm, txt=None)
rpa = RPACorrelation(calc, txt='rpa_%s.txt' %(ecut))
E = rpa.calculate(ecut=[ecut])
equal(E, -1.106, 0.005)
|
robwarm/gpaw-symm
|
gpaw/test/rpa_energy_Na.py
|
Python
|
gpl-3.0
| 1,072
|
[
"ASE",
"GPAW"
] |
3235e83fefc5080f3c50a8a569cf0e753f0f86af465e73713607cf966c18fd89
|
#
# @file TestSpeciesReference.py
# @brief SpeciesReference unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Ben Bornstein
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestSpeciesReference.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestSpeciesReference(unittest.TestCase):
global SR
SR = None
def setUp(self):
self.SR = libsbml.SpeciesReference(2,4)
if (self.SR == None):
pass
pass
def tearDown(self):
_dummyList = [ self.SR ]; _dummyList[:] = []; del _dummyList
pass
def test_SpeciesReference_create(self):
self.assert_( self.SR.getTypeCode() == libsbml.SBML_SPECIES_REFERENCE )
self.assert_( self.SR.getMetaId() == "" )
self.assert_( self.SR.getNotes() == None )
self.assert_( self.SR.getAnnotation() == None )
self.assert_( self.SR.getSpecies() == "" )
self.assert_( self.SR.getStoichiometry() == 1 )
self.assert_( self.SR.getStoichiometryMath() == None )
self.assert_( self.SR.getDenominator() == 1 )
self.assertEqual( False, self.SR.isSetSpecies() )
self.assertEqual( False, self.SR.isSetStoichiometryMath() )
pass
def test_SpeciesReference_createWithNS(self):
xmlns = libsbml.XMLNamespaces()
xmlns.add( "http://www.sbml.org", "testsbml")
sbmlns = libsbml.SBMLNamespaces(2,1)
sbmlns.addNamespaces(xmlns)
object = libsbml.SpeciesReference(sbmlns)
self.assert_( object.getTypeCode() == libsbml.SBML_SPECIES_REFERENCE )
self.assert_( object.getMetaId() == "" )
self.assert_( object.getNotes() == None )
self.assert_( object.getAnnotation() == None )
self.assert_( object.getLevel() == 2 )
self.assert_( object.getVersion() == 1 )
self.assert_( object.getNamespaces() != None )
self.assert_( object.getNamespaces().getLength() == 2 )
_dummyList = [ object ]; _dummyList[:] = []; del _dummyList
pass
def test_SpeciesReference_free_NULL(self):
_dummyList = [ None ]; _dummyList[:] = []; del _dummyList
pass
def test_SpeciesReference_setId(self):
species = "X0";
self.SR.setId(species)
self.assert_(( species == self.SR.getId() ))
self.assertEqual( True, self.SR.isSetId() )
if (self.SR.getId() == species):
pass
self.SR.setId(self.SR.getId())
self.assert_(( species == self.SR.getId() ))
self.SR.setId("")
self.assertEqual( False, self.SR.isSetId() )
if (self.SR.getId() != None):
pass
pass
def test_SpeciesReference_setSpecies(self):
species = "X0";
self.SR.setSpecies(species)
self.assert_(( species == self.SR.getSpecies() ))
self.assertEqual( True, self.SR.isSetSpecies() )
if (self.SR.getSpecies() == species):
pass
self.SR.setSpecies(self.SR.getSpecies())
self.assert_(( species == self.SR.getSpecies() ))
self.SR.setSpecies("")
self.assertEqual( False, self.SR.isSetSpecies() )
if (self.SR.getSpecies() != None):
pass
pass
def test_SpeciesReference_setStoichiometryMath(self):
math = libsbml.parseFormula("k3 / k2")
stoich = libsbml.StoichiometryMath(2,4)
stoich.setMath(math)
self.SR.setStoichiometryMath(stoich)
math1 = self.SR.getStoichiometryMath()
self.assert_( math1 != None )
formula = libsbml.formulaToString(math1.getMath())
self.assert_( formula != None )
self.assert_(( "k3 / k2" == formula ))
self.assertEqual( True, self.SR.isSetStoichiometryMath() )
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestSpeciesReference))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
TheCoSMoCompany/biopredyn
|
Prototype/src/libsbml-5.10.0/src/bindings/python/test/sbml/TestSpeciesReference.py
|
Python
|
bsd-3-clause
| 4,905
|
[
"VisIt"
] |
952c20d035feaaf9eac9cf934fe070da5138c5655d31d5b5efaa5c2f28c4bd05
|
import sys
sys.path.insert(1, "../../")
import h2o
def offset_1388(ip, port):
print "Loading datasets..."
pros_hex = h2o.import_file(h2o.locate("smalldata/prostate/prostate.csv"))
pros_hex[1] = pros_hex[1].asfactor()
pros_hex[3] = pros_hex[3].asfactor()
pros_hex[4] = pros_hex[4].asfactor()
pros_hex[5] = pros_hex[5].asfactor()
pros_hex[8] = pros_hex[8].asfactor()
cars_hex = h2o.import_file(h2o.locate("smalldata/junit/cars.csv"))
cars_hex[0] = cars_hex[0].asfactor()
cars_hex[2] = cars_hex[2].asfactor()
print "Running Binomial Comparison..."
glm_bin_h2o = h2o.glm(x=pros_hex[2:9], y=pros_hex[1], training_frame=pros_hex, family="binomial", standardize=False,
offset_column="AGE", Lambda=[0], max_iterations=100)
print "binomial"
print "R:"
print "deviance: {0}".format(1464.9565781185)
print "null deviance: {0}".format(2014.93087862689)
print "aic: {0}".format(1494.9565781185)
print "H2O:"
print "deviance {0}".format(glm_bin_h2o.residual_deviance())
print "null deviance {0}".format(glm_bin_h2o.null_deviance())
print "aic {0}".format(glm_bin_h2o.aic())
assert abs(1464.9565781185 - glm_bin_h2o.residual_deviance()) < 0.1
assert abs(2014.93087862689 - glm_bin_h2o.null_deviance()) < 0.1
assert abs(1494.9565781185 - glm_bin_h2o.aic()) < 0.1
print "Running Regression Comparisons..."
glm_h2o = h2o.glm(x=cars_hex[2:8], y=cars_hex[1], training_frame=cars_hex, family="gaussian", standardize=False,
offset_column="year", Lambda = [0], max_iterations = 100)
print "gaussian"
print "R:"
print "deviance: {0}".format(4204.68399275449)
print "null deviance: {0}".format(16072.0955102041)
print "aic: {0}".format(2062.54330117177)
print "H2O:"
print "deviance {0}".format(glm_h2o.residual_deviance())
print "null deviance {0}".format(glm_h2o.null_deviance())
print "aic {0}".format(glm_h2o.aic())
assert abs(4204.68399275449 - glm_h2o.residual_deviance()) < 0.1
assert abs(16072.0955102041 - glm_h2o.null_deviance()) < 0.1
assert abs(2062.54330117177 - glm_h2o.aic()) < 0.1
glm_h2o = h2o.glm(x=cars_hex[2:8], y=cars_hex[1], training_frame=cars_hex, family="poisson", standardize=False,
offset_column="year", Lambda = [0], max_iterations = 100)
print "poisson"
print "R:"
print "deviance: {0}".format(54039.1725227918)
print "null deviance: {0}".format(59381.5624028358)
print "aic: {0}".format("Inf")
print "H2O:"
print "deviance {0}".format(glm_h2o.residual_deviance())
print "null deviance {0}".format(glm_h2o.null_deviance())
print "aic {0}".format(glm_h2o.aic())
assert abs(54039.1725227918 - glm_h2o.residual_deviance()) < 0.1
assert abs(59381.5624028358 - glm_h2o.null_deviance()) < 0.1
assert abs(float('inf') - glm_h2o.aic()) < 0.1
if __name__ == "__main__":
h2o.run_test(sys.argv, offset_1388)
|
weaver-viii/h2o-3
|
h2o-py/tests/testdir_jira/pyunit_NOPASS_pubdev_1388_offset_comparisons.py
|
Python
|
apache-2.0
| 2,998
|
[
"Gaussian"
] |
c103b871f4ddb4ed2e62288bf8efb67092217f3fad49267f112d8685c0b9aa74
|
# -*- coding: utf-8 -*-
"""This module tests various ways how to set up the provisioning using the provisioning dialog."""
import re
from datetime import datetime, timedelta
import fauxfactory
import pytest
from cfme import test_requirements
from cfme.common.provider import cleanup_vm
from cfme.infrastructure.virtual_machines import Vm
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.provisioning import provisioning_form
from cfme.services import requests
from cfme.web_ui import InfoBlock, fill, flash
from utils import testgen
from utils.appliance.implementations.ui import navigate_to
from utils.blockers import BZ
from utils.generators import random_vm_name
from utils.log import logger
from utils.version import current_version
from utils.wait import wait_for, TimedOutError
pytestmark = [
pytest.mark.meta(server_roles="+automate"),
pytest.mark.usefixtures('uses_infra_providers'),
pytest.mark.long_running,
test_requirements.provision,
pytest.mark.meta(blockers=[
BZ(
1265466,
unblock=lambda provider: not provider.one_of(RHEVMProvider))
]),
pytest.mark.tier(3)
]
pytest_generate_tests = testgen.generate([InfraProvider], required_fields=[
['provisioning', 'template'],
['provisioning', 'host'],
['provisioning', 'datastore']
], scope="module")
@pytest.fixture(scope="function")
def vm_name():
vm_name = random_vm_name('provd')
return vm_name
@pytest.fixture(scope="function")
def prov_data(provisioning, provider):
data = {
"first_name": fauxfactory.gen_alphanumeric(),
"last_name": fauxfactory.gen_alphanumeric(),
"email": "{}@{}.test".format(
fauxfactory.gen_alphanumeric(), fauxfactory.gen_alphanumeric()),
"manager_name": "{} {}".format(
fauxfactory.gen_alphanumeric(), fauxfactory.gen_alphanumeric()),
"vlan": provisioning.get("vlan", None),
# "datastore_create": False,
"datastore_name": {"name": provisioning["datastore"]},
"host_name": {"name": provisioning["host"]},
# "catalog_name": provisioning["catalog_item_type"],
}
if provider.type == 'rhevm':
data['provision_type'] = 'Native Clone'
elif provider.type == 'virtualcenter':
data['provision_type'] = 'VMware'
# Otherwise just leave it alone
return data
@pytest.fixture(scope="function")
def provisioner(request, setup_provider, provider, vm_name):
def _provisioner(template, provisioning_data, delayed=None):
vm = Vm(name=vm_name, provider=provider, template_name=template)
navigate_to(vm, 'ProvisionVM')
fill(provisioning_form, provisioning_data, action=provisioning_form.submit_button)
flash.assert_no_errors()
request.addfinalizer(lambda: cleanup_vm(vm_name, provider))
if delayed is not None:
total_seconds = (delayed - datetime.utcnow()).total_seconds()
row_description = 'Provision from [{}] to [{}]'.format(template, vm_name)
cells = {'Description': row_description}
try:
row, __ = wait_for(requests.wait_for_request, [cells],
fail_func=requests.reload, num_sec=total_seconds, delay=5)
pytest.fail("The provisioning was not postponed")
except TimedOutError:
pass
logger.info('Waiting for vm %s to appear on provider %s', vm_name, provider.key)
wait_for(provider.mgmt.does_vm_exist, [vm_name], handle_exception=True, num_sec=600)
# nav to requests page happens on successful provision
logger.info('Waiting for cfme provision request for vm %s', vm_name)
row_description = 'Provision from [{}] to [{}]'.format(template, vm_name)
cells = {'Description': row_description}
row, __ = wait_for(requests.wait_for_request, [cells],
fail_func=requests.reload, num_sec=900, delay=20)
assert 'Successfully' in row.last_message.text and row.status.text != 'Error'
return vm
return _provisioner
def test_change_cpu_ram(provisioner, soft_assert, provider, prov_data, vm_name):
""" Tests change RAM and CPU in provisioning dialog.
Prerequisities:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, set number of CPUs and amount of RAM.
* Submit the provisioning request and wait for it to finish.
* Visit the page of the provisioned VM. The summary should state correct values for CPU&RAM.
Metadata:
test_flag: provision
"""
prov_data["vm_name"] = vm_name
if provider.type == "scvmm" and current_version() == "5.6":
prov_data["num_cpus"] = "4"
else:
prov_data["num_sockets"] = "4"
prov_data["cores_per_socket"] = "1" if provider.type != "scvmm" else None
prov_data["memory"] = "4096"
template_name = provider.data['provisioning']['template']
vm = provisioner(template_name, prov_data)
# Go to the VM info
data = vm.get_detail(properties=("Properties", "Container")).strip()
# No longer possible to use version pick because of cherrypicking?
regexes = map(re.compile, [
r"^[^(]*(\d+) CPUs?.*, ([^)]+)[^)]*$",
r"^[^(]*\((\d+) CPUs?, ([^)]+)\)[^)]*$",
r"^.*?(\d+) CPUs? .*?(\d+ MB)$"])
for regex in regexes:
match = regex.match(data)
if match is not None:
num_cpus, memory = match.groups()
break
else:
raise ValueError("Could not parse string {}".format(repr(data)))
soft_assert(num_cpus == "4", "num_cpus should be {}, is {}".format("4", num_cpus))
soft_assert(memory == "4096 MB", "memory should be {}, is {}".format("4096 MB", memory))
# Special parametrization in testgen above
@pytest.mark.meta(blockers=[1209847, 1380782])
@pytest.mark.parametrize("disk_format", ["thin", "thick", "preallocated"])
@pytest.mark.uncollectif(lambda provider, disk_format:
(provider.type == "rhevm" and disk_format == "thick") or
(provider.type != "rhevm" and disk_format == "preallocated") or
# Temporarily, our storage domain cannot handle preallocated disks
(provider.type == "rhevm" and disk_format == "preallocated") or
(provider.type == "scvmm") or
(provider.key == "vsphere55" and disk_format == "thick"))
def test_disk_format_select(provisioner, disk_format, provider, prov_data, vm_name):
""" Tests disk format selection in provisioning dialog.
Prerequisities:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, set the disk format to be thick or thin.
* Submit the provisioning request and wait for it to finish.
* Visit the page of the provisioned VM.
* The ``Thin Provisioning Used`` field should state true of false according to the selection
Metadata:
test_flag: provision
"""
prov_data["vm_name"] = vm_name
prov_data["disk_format"] = disk_format
template_name = provider.data['provisioning']['template']
vm = provisioner(template_name, prov_data)
# Go to the VM info
vm.load_details(refresh=True)
thin = InfoBlock.text(
"Datastore Allocation Summary", "Thin Provisioning Used").strip().lower() == "true"
if disk_format == "thin":
assert thin, "The disk format should be Thin"
else:
assert not thin, "The disk format should not be Thin"
@pytest.mark.parametrize("started", [True, False])
def test_power_on_or_off_after_provision(provisioner, prov_data, provider, started, vm_name):
""" Tests setting the desired power state after provisioning.
Prerequisities:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, set whether you want or not the VM to be
powered on after provisioning.
* Submit the provisioning request and wait for it to finish.
* The VM should become steady in the desired VM power state.
Metadata:
test_flag: provision
"""
prov_data["vm_name"] = vm_name
prov_data["power_on"] = started
template_name = provider.data['provisioning']['template']
provisioner(template_name, prov_data)
wait_for(
lambda: provider.mgmt.does_vm_exist(vm_name) and
(provider.mgmt.is_vm_running if started else provider.mgmt.is_vm_stopped)(vm_name),
num_sec=240, delay=5
)
def test_tag(provisioner, prov_data, provider, vm_name):
""" Tests tagging VMs using provisioning dialogs.
Prerequisities:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, pick a tag.
* Submit the provisioning request and wait for it to finish.
* Visit th page of VM, it should display the selected tags
Metadata:
test_flag: provision
"""
prov_data["vm_name"] = vm_name
prov_data["apply_tags"] = [(["Service Level *", "Gold"], True)]
template_name = provider.data['provisioning']['template']
vm = provisioner(template_name, prov_data)
tags = vm.get_tags()
assert any(tag.category.display_name == "Service Level" and tag.display_name == "Gold"
for tag in tags), "Service Level: Gold not in tags ({})".format(str(tags))
@pytest.mark.meta(blockers=[1204115])
def test_provisioning_schedule(provisioner, provider, prov_data, vm_name):
""" Tests provision scheduling.
Prerequisities:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, set a scheduled provision and pick a time.
* Submit the provisioning request, it should not start before the scheduled time.
Metadata:
test_flag: provision
"""
now = datetime.utcnow()
prov_data["vm_name"] = vm_name
prov_data["schedule_type"] = "schedule"
prov_data["provision_date"] = now.strftime("%m/%d/%Y")
STEP = 5
minutes_diff = (STEP - (now.minute % STEP))
# To have some gap for automation
if minutes_diff <= 3:
minutes_diff += 5
provision_time = timedelta(minutes=minutes_diff) + now
prov_data["provision_start_hour"] = str(provision_time.hour)
prov_data["provision_start_min"] = str(provision_time.minute)
template_name = provider.data['provisioning']['template']
provisioner(template_name, prov_data, delayed=provision_time)
|
rlbabyuk/integration_tests
|
cfme/tests/infrastructure/test_provisioning_dialog.py
|
Python
|
gpl-2.0
| 10,935
|
[
"VisIt"
] |
3beaa4fe4d473f11284a4d36d52e7cdae32d8d4069b1e4182458948405935597
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.