text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
Testopus = "./core/octopus/layouts/octopusLayout.json"
|
TheGentlemanOctopus/thegentlemanoctopus
|
octopus_code/core/tests/utils.py
|
Python
|
gpl-3.0
| 54
|
[
"Octopus"
] |
16813871987f3526968b68760de26564be10bf564895f0c0e2f7e96c3bc57e3d
|
# import necessary python packages
import numpy as np
import pandas as pd
import datetime
import os
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import generate_binary_structure, binary_erosion
from collections import Counter
from matplotlib import pyplot as plt
from pymongo import MongoClient
from astropy.io import fits
from astropy import wcs
from scipy import stats
from scipy import optimize
from scipy.linalg.basic import LinAlgError
#from astropy import coordinates as coord
#from astropy.io import votable
#------------------------------------------------------------------------------------------------------------
# Setup path locations
plot_dir = '../plots'
if not os.path.isdir(plot_dir):
os.mkdir(plot_dir)
csv_dir = '../csv'
ann_dir = '../annfiles'
if not os.path.isdir(ann_dir):
os.mkdir(ann_dir)
dat_dir = '../datfiles'
if not os.path.isdir(dat_dir):
os.mkdir(dat_dir)
# Set constants
beta_release_date = datetime.datetime(2013, 10, 20, 12, 0, 0, 0) # date of beta release (YYY,MM,DD,HH,MM,SS,MS)
main_release_date = datetime.datetime(2013, 12, 17, 0, 0, 0, 0)
IMG_HEIGHT = 424.0 # number of pixels in the JPG image along the y axis
IMG_WIDTH = 424.0 # number of pixels in the JPG image along the x axis
FITS_HEIGHT = 301.0 # number of pixels in the FITS image along the y axis
FITS_WIDTH = 301.0 # number of pixels in the FITS image along the x axis
PIXEL_SIZE = 0.00016667#/3600.0 # the number of arcseconds per pixel in the FITS image
xmin = 1.
xmax = IMG_HEIGHT
ymin = 1.
ymax = IMG_WIDTH
xjpg2fits = float(IMG_WIDTH/FITS_WIDTH) # map the JPG pixels to the FITS pixels in x
yjpg2fits = float(IMG_HEIGHT/FITS_HEIGHT) # map the JPG pixels to the FITS pixels in y
def getWCSObj(subject):
# Determine the WCS object based on RGZ subject
src = subject["metadata"]["source"]
path = "./IMGS/%s.fits" % src
hdulist = fits.open(path)
w = wcs.WCS(hdulist[0].header)
return w
def plot_npeaks():
# Read in data
with open('%s/npeaks_ir.csv' % csv_dir,'rb') as f:
npeaks = [int(line.rstrip()) for line in f]
# Plot the distribution of the total number of IR sources per image
fig = plt.figure(figsize=(8,7))
ax1 = fig.add_subplot(111)
h = plt.hist(npeaks,bins=np.arange(np.max(npeaks)+1),axes=ax1)
ax1.set_title('RGZ source distribution')
ax1.set_xlabel('Number of IR peaks per image')
ax1.set_ylabel('Count')
fig.show()
fig.tight_layout()
# Save hard copy of the figure
fig.savefig('%s/ir_peaks_histogram.png' % plot_dir)
return None
def powerlaw_fit(xdata,ydata,epsilon=1e-3,pinit=[3.0,-1.0]):
logx = np.log10(xdata+1)
logy = np.log10(ydata)
logyerr = 1./np.sqrt(logy+epsilon)
# Line fitting function
fitfunc = lambda p,x: p[0] + p[1]*x
errfunc = lambda p,x,y,err: (y - fitfunc(p,x)) / err
out = optimize.leastsq(errfunc,pinit,args=(logx,logy,logyerr),full_output=1)
pfinal,covar = out[0],out[1]
amp,index = 10.0**pfinal[0],pfinal[1]
if covar is not None:
amperr,indexerr = np.sqrt(covar[1][1])*amp,np.sqrt(covar[0][0])
else:
amperr,indexerr = 0.,0.
return amp,amperr,index,indexerr
def plot_empirical_distribution_function(dfc):
# Plot the empirical distribution function (eg, how many users contribute to the total amount of work)
# for the RGZ data
fig = plt.figure(figsize=(8,7))
ax1 = fig.add_subplot(111)
volunteers = pd.value_counts(dfc.user_name)
# Calculate number of anonymous users and include in data
anonymous_count = dfc._id.count() - dfc.user_name.count()
volunteers = volunteers.set_value("anonymous", anonymous_count)
volunteers.sort(ascending=False)
vnorm = volunteers/volunteers.sum()
cdf = []
running_total = 0.
for v in vnorm:
running_total += v
cdf.append(running_total)
ax1.plot(np.arange(len(volunteers))+1,cdf)
#ax1.set_title('Empirical distribution of work in RGZ')
ax1.set_xlabel('Number of volunteers',fontsize=18)
ax1.set_ylabel('Percent of total classifications',fontsize=18)
ax1.set_xscale('log')
ax1.set_ylim(0,1)
varr = (100,1000)
lsarr = ('--','-.')
for v,ls in zip(varr,lsarr):
ax1.plot([1,v],[cdf[v]]*2,'k'+ls)
ax1.plot([v]*2,[0,cdf[v]],'k'+ls)
ax1.text(1.3,cdf[0],'Anonymous users',ha='left',fontsize=12)
#ax1.text(100,cdf[100]*1.1,'Anon. + 100',ha='right',va='baseline',fontsize=8)
#ax1.text(1000,cdf[1000]*1.1,'Anon. + 1000',ha='right',va='bottom',fontsize=8)
'''
ax1.text(0.95,0.30,'Anonymous users have done %2i%% of the total work.' % (cdf[0]*100.),ha='right',fontsize=12,transform=ax1.transAxes)
ax1.text(0.95,0.25,'The top 100 logged-in users have done %2i%% of the total work.' % ((cdf[100] - cdf[0])*100.),ha='right',fontsize=12,transform=ax1.transAxes)
ax1.text(0.95,0.20,'The top 1000 logged-in users have done %2i%% of the total work.' % ((cdf[1000] - cdf[0])*100.),ha='right',fontsize=12,transform=ax1.transAxes)
'''
print('Anonymous users have done %2i%% of the total work.' % (cdf[0]*100.))
print('The top 100 logged-in users have done %2i%% of the total work.' % ((cdf[100] - cdf[0])*100.))
print('The top 1000 logged-in users have done %2i%% of the total work.' % ((cdf[1000] - cdf[0])*100.))
fig.show()
fig.set_tight_layout(True)
# Save hard copy of the figure
fig.savefig('%s/distribution_of_work.png' % plot_dir)
fig.savefig('/Users/willettk/Dropbox/RGZ/fig4.eps')
return None
def plot_zipf(dfc):
# This can (and should) absolutely be re-factored to use the example in zipf.py. Way too slow
# Plotting user classifications in a more specific way as requested by Heinz Andernach,
# to see if it corresponds to Zipf's Law or Lotka's Law
fig = plt.figure(figsize=(8,8))
ax1 = fig.add_subplot(111)
# Note: does not include anonymous users
volunteers = pd.value_counts(dfc.user_name)
volunteers.sort(ascending=False)
xpoints = pd.Series(volunteers.values.ravel()).unique()
ypoints = [(volunteers >= x).sum() for x in xpoints]
ypoints = np.array(ypoints)
ax1.loglog(xpoints,ypoints,'ro')
# Fitting results to broken power law
brk = -50
xdata1 = xpoints[brk:]
ydata1 = ypoints[brk:]
amp1,amperr1,index1,indexerr1 = powerlaw_fit(xdata1,ydata1)
xdata2 = xpoints[:brk]
ydata2 = ypoints[:brk]
amp2,amperr2,index2,indexerr2 = powerlaw_fit(xdata2,ydata2)
print 'Fit 1: index = %5.2f, amp = %5.2f' % (index1,amp1)
print 'Fit 2: index = %5.2f, amp = %5.2f' % (index2,amp2)
# Overplot the fits
xplot = np.arange(xpoints.max() - 1)+1
ax1.plot(xplot,amp1 * (xplot**index1),'k--')
ax1.plot(xplot,amp2 * (xplot**index2),'k--')
ax1.text(0.98,0.9,r'$\alpha_1 =$ %4.1f $\pm$ %3.1f' % (index1,indexerr1),ha='right',fontsize=12,transform=ax1.transAxes)
ax1.text(0.98,0.8,r'$\alpha_2 =$ %4.1f $\pm$ %3.1f' % (index2,indexerr2),ha='right',fontsize=12,transform=ax1.transAxes)
ax1.set_title("Zipf's Law in Radio Galaxy Zoo?")
ax1.set_xlabel('Number of classifications')
ax1.set_ylabel('Number of volunteers with '+r'$\geq N$'+' classifications')
fig.show()
fig.set_tight_layout(True)
# Save hard copy of the figure
fig.savefig('%s/zipf_plot.png' % plot_dir)
return None
def plot_user_counts(dfc):
# Plot the total number of classifications per volunteer in the data
fig = plt.figure(figsize=(8,8))
ax1 = fig.add_subplot(211)
volunteers = pd.value_counts(dfc.user_name)
# Calculate number of anonymous users and include in data
anonymous_count = dfc._id.count() - dfc.user_name.count()
volunteers = volunteers.set_value("anonymous", anonymous_count)
volunteers.sort(ascending=False)
vcplot = volunteers.plot(ax=ax1,use_index=True,marker='.',color='red')
# Fitting results to broken power law
brk = 1000
xdata1 = np.arange(brk)
ydata1 = volunteers[:brk]
amp1,amperr1,index1,indexerr1 = powerlaw_fit(xdata1,ydata1)
xdata2 = np.arange(len(volunteers)-brk) + brk
ydata2 = volunteers[brk:]
amp2,amperr2,index2,indexerr2 = powerlaw_fit(xdata2,ydata2)
# Overplot the fits
xplot = np.arange(len(volunteers))
ax1.plot(xplot,amp1 * (xplot**index1),'k--')
ax1.plot(xplot,amp2 * (xplot**index2),'k--')
ax1.text(0.98,0.9,r'$\alpha_1 =$ %4.1f $\pm$ %3.1f' % (index1,indexerr1),ha='right',fontsize=12,transform=ax1.transAxes)
ax1.text(0.98,0.8,r'$\alpha_2 =$ %4.1f $\pm$ %3.1f' % (index2,indexerr2),ha='right',fontsize=12,transform=ax1.transAxes)
vcplot.set_title('RGZ volunteer distribution')
vcplot.set_xlabel('Volunteer')
vcplot.set_ylabel('Number of classifications')
vcplot.set_ylim((1,1e5))
vcplot.set_xscale('log')
vcplot.set_yscale('log')
ax2 = fig.add_subplot(212)
vchist = volunteers[1:].hist(ax=ax2,bins=50,bottom=0.1)
vchist.set_ylabel('Classifications per volunteer')
vchist.set_xlabel('Number of classifications')
vchist.set_yscale('log')
ax2.text(0.95,0.9,'Also %i anonymous classifications' % volunteers[0],ha='right',fontsize=12,transform=ax2.transAxes)
fig.show()
fig.set_tight_layout(True)
# Save hard copy of the figure
fig.savefig('%s/classifications_per_user.png' % plot_dir)
return None
def plot_classification_counts(dfs):
# Plot the total number of classifications per subject in the data
fig = plt.figure(figsize=(8,6))
ax1 = fig.add_subplot(111)
# Eliminate N=0 counts and tutorial image
dfs_good = dfs[(dfs.classification_count < 50) & (dfs.classification_count > 0)]
h = dfs_good.classification_count.hist(ax=ax1,bins=50,grid=False)
h.set_xlabel('Classifications per subject')
h.set_ylabel('Number of classifications')
n_nonzero = (dfs.classification_count > 0).sum()
xlim = h.get_xlim()
ylim = h.get_ylim()
h.text(0.7*xlim[1],0.9*ylim[1],r'$N_{non-zero} = %i$' % n_nonzero,fontsize=20)
fig.show()
fig.tight_layout()
# Save hard copy of the figure
fig.savefig('%s/classifications_per_subject.png' % plot_dir)
return None
def find_ir_peak(x,y,srcid):
# Perform a kernel density estimate on the data:
X, Y = np.mgrid[xmin:xmax, ymin:ymax]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
# Find the number of peaks
# http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array
#neighborhood = generate_binary_structure(2,2)
neighborhood = np.ones((10,10))
local_max = maximum_filter(Z, footprint=neighborhood)==Z
background = (Z==0)
eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)
detected_peaks = local_max - eroded_background
npeaks = detected_peaks.sum()
return X,Y,Z,npeaks
def plot_image(x,y,srcid,zid,X,Y,Z,npeaks,all_radio,radio_unique):
# Find the peak
xpeak = X[Z==Z.max()][0]
ypeak = Y[Z==Z.max()][0]
# Plot the infrared results
fig = plt.figure()
ax = fig.add_subplot(111)
# Plot the KDE map
ax.imshow(np.rot90(Z), cmap=plt.cm.hot_r,extent=[xmin, xmax, ymin, ymax])
# Plot the individual sources
ax.plot(x, y, 'go', markersize=4)
ax.text(270,40,r'IR peak: $(%i,%i)$'%(xpeak,ypeak),color='k',fontsize=14)
ax.text(270,70,r'$N_{peaks}$ = %i' % npeaks,color='k',fontsize=14)
ax.text(270,100,r'$N_{IR}$ = %i' % len(x),color='k',fontsize=14)
ax.plot([xpeak],[ypeak],'c*',markersize=12)
# Plot the radio counts
radio_flattened = [item for sublist in all_radio for item in sublist]
uniques = set(radio_flattened)
d = dict(zip(uniques,np.arange(len(uniques))))
c = Counter(all_radio)
for idx,ckeys in enumerate(c.keys()):
if len(ckeys) > 1:
t = ' and R'.join([str(d[x]) for x in ckeys])
else:
t = d[ckeys[0]]
singular = 's' if c[ckeys] != 1 else ''
ax.text(150,400-idx*20,'%3i vote%s: R%s' % (c[ckeys],singular,t))
# Rectangle showing the radio box size
radio_ir_scaling_factor = 435./132
box_counts = Counter(radio_flattened)
for ru in radio_unique:
x0,x1,y0,y1 = [float(ru_) * radio_ir_scaling_factor for ru_ in ru]
# Assume xmax matching is still good
xmax_index = '%.6f' % float(ru[1])
component_number = d[xmax_index]
number_votes = box_counts[xmax_index]
rectangle = plt.Rectangle((x0,y0), x1-x0, y1-y0, fill=False, linewidth=number_votes/5., edgecolor = 'c')
ax.add_patch(rectangle)
ax.text(x0-15,y0-15,'R%s' % component_number)
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymax, ymin])
ax.set_title('%s\n%s' % (zid,srcid))
#fig.show()
# Save hard copy of the figure
fig.savefig('%s/ir_peaks/%s_ir_peak.png' % (plot_dir,srcid))
# Close figure after it's done; otherwise mpl complains about having thousands of stuff open
plt.close()
return None
def find_consensus(sub,classifications,verbose=False,completed_only=False):
Nclass = sub["classification_count"] # number of classifications made per image
srcid = sub["metadata"]["source"] # determine the image source id
zid = sub["zooniverse_id"] # determine the image source id
'''
if completed_only:
dat_dir = '../datfiles/completed_20'
'''
classfile2 = open('%s/RGZBETA2-%s-classifications.txt' % (dat_dir,srcid), 'w')
imgid = sub["_id"] # grab the ObjectId corresponding for this image
# locate all the classifications of this image by user
user_classifications = classifications.find({"subject_ids": imgid, "updated_at": {"$gt": main_release_date}})
# count the number of users who classified this object
Nusers = classifications.find({"subject_ids": imgid, "updated_at": {"$gt": main_release_date}}).count()
# loop over the number of classifications
if Nclass == Nusers: # the number of classifications should equal the number of users who classified
# initialise coordinate variables
radio_ra = []
radio_dec = []
radio_x = []
radio_y = []
radio_w = []
radio_h = []
ir_ra = []
ir_dec = []
ir_radius = []
ir_x = []
ir_y = []
radio_comp = []
ir_comp = []
all_radio = []
all_radio_markings = []
Nuser_id = 0 # User id number
#---------------------------------------------------------------------------------------------------------------------
#---START: loop through the users who classified the image
for classification in list(user_classifications):
compid = 0 # Component id per image
rclass = classification["annotations"] # For now, analyze only the first set of continuous regions selected.
# Note that last two fields in annotations are timestamp and user_agent
Nuser_id += 1 # Increase the number of users who classified by 1.
#-------------------------------------------------------------------------------------------------------------------
#---START: loop through the keys in the annotation array, making sure that a classification has been made
for ann in rclass:
if ann.has_key('started_at') or ann.has_key('finished_at') or ann.has_key('user_agent') or ann.has_key('lang'):
continue
Nradio = 0 # counter for the number of radio components per classification
Nir = 0 # counter for the number of IR components per classification
if (ann.has_key('radio') and ann['radio'] != 'No Contours'): # get the radio annotations
radio = ann["radio"]
Nradio = len(radio) # count the number of radio components per classification
'''
print 'RADIO:'
print radio
'''
compid += 1 # we have a radio source - all components will be id with this number
list_radio = []
#---------------------------------------------------------------------------------------------------------------
#---STAR: loop through number of radio components in user classification
for rr in radio:
radio_marking = radio[rr]
# Find the location and size of the radio box in pixels
list_radio.append('%.6f' % float(radio_marking['xmax']))
all_radio_markings.append(radio_marking)
print >> classfile2, Nuser_id, compid,'RADIO', radio_marking['xmin'], radio_marking['xmax'], radio_marking['ymin'], radio_marking['ymax']
all_radio.append(tuple(sorted(list_radio)))
#---END: loop through number of radio components in user classification
#---------------------------------------------------------------------------------------------------------------
# get IR counterpart
irkey = ann.has_key('ir')
ir_nosources = True if (irkey and ann['ir'] == 'No Sources') else False
if (irkey and not ir_nosources): # get the infrared annotation for the radio classification.
ir = ann["ir"]
Nir = 1 #len(ir) # number of IR counterparts.
'''
print 'IR:'
print ir
'''
#exit()
#jj = 0
for ii in ir:
ir_marking = ir[ii]
# write to annotation file
print >> classfile2, Nuser_id, compid, 'IR', float(ir_marking['x']), float(ir_marking['y'])
ir_x.append(float(ir_marking['x']))
ir_y.append(float(ir_marking['y']))
else: # user did not classify an infrared source
Nir = 0
xir = -99.
yir = -99.
radiusir = -99.
print >> classfile2, Nuser_id, compid, 'IR', xir, yir
else: # user did not classify a radio source
Nradio = 0
Nir = 0
# there should always be a radio source, bug in program if we reach this part.
if not ann.has_key('radio'):
print >> classfile2,'%i No radio source - error in processing on image %s' % (Nuser_id, srcid)
elif ann['radio'] == 'No Contours':
print >> classfile2,'%i No radio source labeled by user for image %s' % (Nuser_id,srcid)
else:
print >> classfile2,'Unknown error processing radio source'
radio_comp.append( Nradio ) # add the number of radio components per user source to array.
ir_comp.append( Nir ) # add the number of IR counterparts per user soruce to array.
#---END: loop through the users who classified the image
#---------------------------------------------------------------------------------------------------------------------
else: # Nclass != Nusers
print 'Number of users who classified subject (%i) does not equal classification count (%i).' % (Nusers,Nclass)
# Process the radio markings into unique components
rlist = [(rr['xmin'],rr['xmax'],rr['ymin'],rr['ymax']) for rr in all_radio_markings]
if len(all_radio_markings) > 1:
radio_unique = [rlist[0]]
for rr in rlist[1:]:
if rr not in radio_unique:
radio_unique.append(rr)
# Use a 2-D Gaussian kernel to find the center of the IR sources and plot the analysis images
if len(ir_x) > 2:
try:
xpeak,ypeak,Z,npeaks = find_ir_peak(ir_x,ir_y,srcid)
plot_image(ir_x,ir_y,srcid,zid,xpeak,ypeak,Z,npeaks,all_radio,radio_unique)
except LinAlgError:
npeaks = len(ir_x)
print 'LinAlgError - only %i non-unique IR peaks labeled for %s' % (npeaks,srcid)
else:
npeaks = len(ir_x)
print 'Only %i IR peaks labeled for %s' % (npeaks,srcid)
# calculate the median number of components for both IR and radio for each object in image.
radio_med = np.median(radio_comp) # median number of radio components
Ncomp_radio = np.size(np.where(radio_comp == radio_med)) # number of classifications = median number
ir_med = np.median(ir_comp) # median number of infrared components
Ncomp_ir = np.size(np.where(ir_comp == ir_med)) # number of classifications = median number
if verbose:
print ' '
print 'Source.....................................................................................: %s' % srcid
print 'Number of users who classified the object..................................................: %d' % len(radio_comp)
print '................'
print 'Number of users who classified the radio source with the median value of radio components..: %d' % Ncomp_radio
print 'Median number of radio components per user.................................................: %f' % radio_med
print 'Number of users who classified the IR source with the median value of IR components........: %d' % Ncomp_ir
print 'Median number of IR components per user....................................................: %f' % ir_med
print ' '
classfile2.close()
return npeaks
def load_rgz_data():
# Connect to Mongo database
# Make sure to run mongorestore /path/to/database to restore the updated files
# mongod client must be running locally
client = MongoClient('localhost', 27017)
db = client['radio']
subjects = db['radio_subjects'] # subjects = images
classifications = db['radio_classifications'] # classifications = classifications of each subject per user
return subjects,classifications
def load_catalog():
# Connect to Mongo database
# Make sure to run mongorestore /path/to/database to restore the updated files
# mongod client must be running locally
client = MongoClient('localhost', 27017)
db = client['radio']
catalog = db['catalog']
return catalog
def overall_stats(subjects,classifications,verbose=True):
# Retrieve RGZ data, convert into data frames
batch_classifications = classifications.find({"updated_at": {"$gt": main_release_date}})
batch_subjects = subjects.find()
dfc = pd.DataFrame( list(batch_classifications) )
dfs = pd.DataFrame( list(batch_subjects) )
# Get some quick statistics on the dataset so far
n_subjects = subjects.count() # determine the number of images in the data set
n_classifications = classifications.find({"updated_at": {"$gt": main_release_date}}).count() # total number of classifications
users = classifications.distinct('user_name')
n_users = len(users)
# Find the most recent classification in this data dump
mrc = classifications.find().sort([("updated_at", -1)]).limit(1)
most_recent_date = [x for x in mrc][0]['updated_at']
# Find number of anonymous classifications
total_count = dfc._id.count()
loggedin_count = dfc.user_name.count()
anonymous_count = total_count - loggedin_count
anonymous_percent = float(anonymous_count)/total_count * 100
if verbose:
print ' '
print 'RGZ data as of %s' % most_recent_date.strftime("%H:%M:%S%Z %b %d, %Y")
print '---------------------------------'
print 'Total classifications : %i' % n_classifications
print 'Total distinct subjects : %i' % n_subjects
print 'Total distinct users : %i' % n_users
print ' '
print 'Percent of classifications by anonymous users: %.1f (%i,%i)' % (anonymous_percent,anonymous_count,loggedin_count)
print ' '
# Make some plots
plot_user_counts(dfc)
plot_classification_counts(dfs)
return None
def run_sample(subjects,classifications,n_subjects=1000,completed=False):
N = 0
if completed:
suffix = '_completed'
class_lim = {'state':'complete'}
else:
suffix = ''
class_lim = {'classification_count':{'$gt':0}}
# Look at just the newly retired ones (single-contour, 5 classifications)
# suffix = '_radio1'
# class_lim = {'state':'complete','metadata.contour_count':1,'classification_count':5}
with open('%s/npeaks_ir%s.csv' % (csv_dir,suffix),'wb') as f:
for sub in list(subjects.find(class_lim).limit(n_subjects)):
Nclass = sub["classification_count"] # number of classifications made per image
if Nclass > 0: # if no classifications move to next image (shouldn't happen)
npeak = find_consensus(sub,classifications,completed_only=completed)
print >> f, npeak
N += 1
# Check progress by printing to screen every 100 classifications
if not N % 100:
print N, datetime.datetime.now().strftime('%H:%M:%S.%f')
return None
def onemillion(classifications,users):
# DEPRECATED
# Does not work with new sanitized RGZ dumps (starting Feb 2016)
'''
Discrepancy between the API count and the number of classifications in MongoDB.
For example, on 14 Jan 2015, the counts were:
API = 997,395
MongoDB = 1,036,501
Consulting with Ivy and Chris S., we decided to go with the count on the API. So the correct classification for the
1 millionth ID for RGZ will be the 100000 + (Mongo - API) = 1,039,106th entry sorted by date in MongoDB.
First data dump that got to this was 15 Jan 2015, which had 1,040,566 documents in radio_classifications.
'''
# Limit the number of records to pull from this data dump.
ntot = classifications.count()
onemillionth = 1039106
diff1M = ntot - onemillionth
# Return the classifications surrounding 1 million
classifications_sorted = classifications.find().sort([("updated_at",-1)]).limit(diff1M)
lc = list(classifications_sorted)
lc.reverse()
names = set()
nu = 0
for idx,c in enumerate(lc):
idx1M = idx + 1000000
try:
username = c['user_name']
if username not in names:
names.add(username)
usr = users.find_one({'name':username})
email = usr['email']
# How many classifications have they done? Are these our "power" users?
nclass = classifications.find({'user_name':username}).count()
print 'Classification: %7i, Prize order: %2i, Date: %s, N_class = %5i, Username: %20s, Email: %s ' % (idx1M, nu+1, c['updated_at'], nclass, username, email)
nu += 1
except KeyError:
username = "Anonymous"
if nu >= 10:
break
return None
# If program is called from the command line, process the full dataset
if __name__ == '__main__':
subjects,classifications = load_rgz_data()
run_sample(subjects,classifications)
plot_npeaks()
|
willettk/rgz-analysis
|
python/rgz.py
|
Python
|
mit
| 28,157
|
[
"Galaxy",
"Gaussian"
] |
82e029fb514a2ad85f2f0d8d55ed0148a244b2e64d0eb3a778b14d0456852364
|
#!/usr/bin/env python
## Copyright (C) 2011-2012, 2014 The PISM Authors
## script to generate figure: results from SeaRISE experiments
## usage: if UAFX_G_D3_C?_??.nc are result NetCDF files then do
## $ slr_show.py -m UAFX
# try different netCDF modules
try:
from netCDF4 import Dataset as CDF
except:
print "netCDF4 is not installed!"
sys.exit(1)
from numpy import zeros
import pylab as plt
from optparse import OptionParser
parser = OptionParser()
parser.usage = "usage: %prog [options]"
parser.description = "A script for PISM output files to show time series plots using pylab."
parser.add_option("-a",dest="t_a",type="int",
help="start year, in years since 2004, default = 0",default=0)
parser.add_option("-e",dest="t_e",type="int",
help="end year, in years since 2004, default = 500",default=500)
parser.add_option("-m", "--model",dest="model",
help="choose experiment, default UAF1",default="UAF1")
(options, args) = parser.parse_args()
model = options.model
t_a = options.t_a
t_e = options.t_e
# first name in this list is CONTROL
NCNAMES = [model + "_G_D3_C1_E0.nc",model + "_G_D3_C2_E0.nc",model + "_G_D3_C3_E0.nc",model + "_G_D3_C4_E0.nc",model + "_G_D3_C1_S1.nc",model + "_G_D3_C1_S2.nc",model + "_G_D3_C1_S3.nc",model + "_G_D3_C1_M1.nc",model + "_G_D3_C1_M2.nc",model + "_G_D3_C1_M3.nc",model + "_G_D3_C1_T1.nc"]
# labels
labels = ["AR4 A1B","AR4 A1B 1.5x","AR4 A1B 2x","2x basal sliding","2.5x basal sliding","3x basal sliding","2 m/a bmr","20 m/a bmr","200 m/a bmr","AR4 A1B + 2x sliding"]
# line colors
colors = ['#984EA3', # violet
'#984EA3', # violet
'#984EA3', # violet
'#FF7F00', # orange
'#FF7F00', # orange
'#FF7F00', # orange
'#377EB8', # light blue
'#377EB8', # light blue
'#377EB8', # light blue
'#4DAF4A'] # green
dashes = ['-','--','-.','-','--','-.','-','--','-.','-']
print "control run name is " + NCNAMES[0]
n = len(NCNAMES)
nc0 = CDF(NCNAMES[0], 'r')
try:
t_units = nc0.variables['tseries'].units
t = nc0.variables['tseries'][t_a:t_e]
except:
t_units = nc0.variables['time'].units
t = nc0.variables['time'][t_a:t_e]
nc0.close()
# convert to years if t is in seconds
if (t_units.split()[0] == ('seconds' or 's')):
t /= 3.15569259747e7
ivol = zeros((len(t),n))
ivolshift = zeros((len(t),n-1))
for j in range(n):
nc = CDF(NCNAMES[j], 'r')
ivol[:,j] = nc.variables['ivol'][t_a:t_e]
nc.close()
for j in range(n-1):
ivolshift[:,j] = ivol[:,j+1] - ivol[:,0]
# "2,850,000 km3 of ice were to melt, global sea levels would rise 7.2 m"
scale = 7.2 / 2.850e6
# screen plot with high contrast
fig = plt.figure()
ax = fig.add_subplot(111,axisbg='0.15')
for j in range(n-1):
ax.plot(t,-(ivolshift[:,j]/1.0e9)*scale,dashes[j],color=colors[j],linewidth=3)
ax.set_xlabel('years from 2004')
ax.set_ylabel('sea level rise relative to control (m)')
ax.legend(labels,loc='upper left')
ax.grid(True,color='w')
plt.show()
# line colors
colors = ['#984EA3', # violet
'#984EA3', # violet
'#984EA3', # violet
'#FF7F00', # orange
'#FF7F00', # orange
'#FF7F00', # orange
'#084594', # dark blue
'#084594', # dark blue
'#084594', # dark blue
'#4DAF4A'] # green
# print plot with white background
fig = plt.figure()
ax = fig.add_subplot(111)
for j in range(n-1):
ax.plot(t,-(ivolshift[:,j]/1.0e9)*scale,dashes[j],color=colors[j],linewidth=2)
ax.set_xlabel('years from 2004')
ax.set_ylabel('sea level rise relative to control (m)')
ax.legend(labels,loc='upper left')
ax.grid(True)
plt.savefig(model + '_slr.pdf')
|
talbrecht/pism_pik06
|
util/slr_show.py
|
Python
|
gpl-3.0
| 3,713
|
[
"NetCDF"
] |
924e6b2c9db2d617cbf0c28e8b267bd34694a5687b22948192de3550ae1d9f78
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layers for regularization models via the addition of noise.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras.engine import Layer
class GaussianNoise(Layer):
"""Apply additive zero-centered Gaussian noise.
This is useful to mitigate overfitting
(you could see it as a form of random data augmentation).
Gaussian Noise (GS) is a natural choice as corruption process
for real valued inputs.
As it is a regularization layer, it is only active at training time.
Arguments:
stddev: float, standard deviation of the noise distribution.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, stddev, **kwargs):
super(GaussianNoise, self).__init__(**kwargs)
self.supports_masking = True
self.stddev = stddev
def call(self, inputs, training=None):
def noised():
return inputs + K.random_normal(
shape=K.shape(inputs), mean=0., stddev=self.stddev)
return K.in_train_phase(noised, inputs, training=training)
def get_config(self):
config = {'stddev': self.stddev}
base_config = super(GaussianNoise, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class GaussianDropout(Layer):
"""Apply multiplicative 1-centered Gaussian noise.
As it is a regularization layer, it is only active at training time.
Arguments:
rate: float, drop probability (as with `Dropout`).
The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
References:
- [Dropout: A Simple Way to Prevent Neural Networks from Overfitting
Srivastava, Hinton, et al.
2014](http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf)
"""
def __init__(self, rate, **kwargs):
super(GaussianDropout, self).__init__(**kwargs)
self.supports_masking = True
self.rate = rate
def call(self, inputs, training=None):
if 0 < self.rate < 1:
def noised():
stddev = np.sqrt(self.rate / (1.0 - self.rate))
return inputs * K.random_normal(
shape=K.shape(inputs), mean=1.0, stddev=stddev)
return K.in_train_phase(noised, inputs, training=training)
return inputs
def get_config(self):
config = {'rate': self.rate}
base_config = super(GaussianDropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class AlphaDropout(Layer):
"""Applies Alpha Dropout to the input.
Alpha Dropout is a `Dropout` that keeps mean and variance of inputs
to their original values, in order to ensure the self-normalizing property
even after this dropout.
Alpha Dropout fits well to Scaled Exponential Linear Units
by randomly setting activations to the negative saturation value.
Arguments:
rate: float, drop probability (as with `Dropout`).
The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`.
seed: A Python integer to use as random seed.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
References:
- [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super(AlphaDropout, self).__init__(**kwargs)
self.rate = rate
self.noise_shape = noise_shape
self.seed = seed
self.supports_masking = True
def _get_noise_shape(self, inputs):
return self.noise_shape if self.noise_shape else K.shape(inputs)
def call(self, inputs, training=None):
if 0. < self.rate < 1.:
noise_shape = self._get_noise_shape(inputs)
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
def dropped_inputs(inputs=inputs, rate=self.rate, seed=self.seed):
alpha_p = -alpha * scale
kept_idx = K.greater_equal(K.random_uniform(noise_shape, seed=seed),
rate)
kept_idx = K.cast(kept_idx, K.floatx())
a = ((1 - rate) * (1 + rate * alpha_p ** 2)) ** -0.5
b = -a * alpha_p * rate
x = inputs * kept_idx + alpha_p * (1 - kept_idx)
return a * x + b
return K.in_train_phase(dropped_inputs, inputs, training=training)
return inputs
def get_config(self):
config = {'rate': self.rate}
base_config = super(AlphaDropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
alistairlow/tensorflow
|
tensorflow/python/keras/_impl/keras/layers/noise.py
|
Python
|
apache-2.0
| 5,871
|
[
"Gaussian"
] |
93a6319c4203f92ab9e53a8974ee152eab2681f553a10ccfe3bbdadbe79b72a8
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, date
from lxml import etree
import time
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.addons.base_status.base_stage import base_stage
from openerp.addons.resource.faces import task as Task
_TASK_STATE = [('draft', 'New'),('open', 'In Progress'),('pending', 'Pending'), ('done', 'Done'), ('cancelled', 'Cancelled')]
class project_task_type(osv.osv):
_name = 'project.task.type'
_description = 'Task Stage'
_order = 'sequence'
_columns = {
'name': fields.char('Stage Name', required=True, size=64, translate=True),
'description': fields.text('Description'),
'sequence': fields.integer('Sequence'),
'case_default': fields.boolean('Default for New Projects',
help="If you check this field, this stage will be proposed by default on each new project. It will not assign this stage to existing projects."),
'project_ids': fields.many2many('project.project', 'project_task_type_rel', 'type_id', 'project_id', 'Projects'),
'state': fields.selection(_TASK_STATE, 'Related Status', required=True,
help="The status of your document is automatically changed regarding the selected stage. " \
"For example, if a stage is related to the status 'Close', when your document reaches this stage, it is automatically closed."),
'fold': fields.boolean('Folded by Default',
help="This stage is not visible, for example in status bar or kanban view, when there are no records in that stage to display."),
}
def _get_default_project_id(self, cr, uid, ctx={}):
proj = ctx.get('default_project_id', False)
if type(proj) is int:
return [proj]
return proj
_defaults = {
'sequence': 1,
'state': 'open',
'fold': False,
'case_default': False,
'project_ids': _get_default_project_id
}
_order = 'sequence'
def short_name(name):
"""Keep first word(s) of name to make it small enough
but distinctive"""
if not name: return name
# keep 7 chars + end of the last word
keep_words = name[:7].strip().split()
return ' '.join(name.split()[:len(keep_words)])
class project(osv.osv):
_name = "project.project"
_description = "Project"
_inherits = {'account.analytic.account': "analytic_account_id",
"mail.alias": "alias_id"}
_inherit = ['mail.thread', 'ir.needaction_mixin']
def _auto_init(self, cr, context=None):
""" Installation hook: aliases, project.project """
# create aliases for all projects and avoid constraint errors
alias_context = dict(context, alias_model_name='project.task')
return self.pool.get('mail.alias').migrate_to_alias(cr, self._name, self._table, super(project, self)._auto_init,
self._columns['alias_id'], 'id', alias_prefix='project+', alias_defaults={'project_id':'id'}, context=alias_context)
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
if user == 1:
return super(project, self).search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
if context and context.get('user_preference'):
cr.execute("""SELECT project.id FROM project_project project
LEFT JOIN account_analytic_account account ON account.id = project.analytic_account_id
LEFT JOIN project_user_rel rel ON rel.project_id = project.id
WHERE (account.user_id = %s or rel.uid = %s)"""%(user, user))
return [(r[0]) for r in cr.fetchall()]
return super(project, self).search(cr, user, args, offset=offset, limit=limit, order=order,
context=context, count=count)
def _complete_name(self, cr, uid, ids, name, args, context=None):
res = {}
for m in self.browse(cr, uid, ids, context=context):
res[m.id] = (m.parent_id and (m.parent_id.name + '/') or '') + m.name
return res
def onchange_partner_id(self, cr, uid, ids, part=False, context=None):
partner_obj = self.pool.get('res.partner')
if not part:
return {'value':{}}
val = {}
if 'pricelist_id' in self.fields_get(cr, uid, context=context):
pricelist = partner_obj.read(cr, uid, part, ['property_product_pricelist'], context=context)
pricelist_id = pricelist.get('property_product_pricelist', False) and pricelist.get('property_product_pricelist')[0] or False
val['pricelist_id'] = pricelist_id
return {'value': val}
def _get_projects_from_tasks(self, cr, uid, task_ids, context=None):
tasks = self.pool.get('project.task').browse(cr, uid, task_ids, context=context)
project_ids = [task.project_id.id for task in tasks if task.project_id]
return self.pool.get('project.project')._get_project_and_parents(cr, uid, project_ids, context)
def _get_project_and_parents(self, cr, uid, ids, context=None):
""" return the project ids and all their parent projects """
res = set(ids)
while ids:
cr.execute("""
SELECT DISTINCT parent.id
FROM project_project project, project_project parent, account_analytic_account account
WHERE project.analytic_account_id = account.id
AND parent.analytic_account_id = account.parent_id
AND project.id IN %s
""", (tuple(ids),))
ids = [t[0] for t in cr.fetchall()]
res.update(ids)
return list(res)
def _get_project_and_children(self, cr, uid, ids, context=None):
""" retrieve all children projects of project ids;
return a dictionary mapping each project to its parent project (or None)
"""
res = dict.fromkeys(ids, None)
while ids:
cr.execute("""
SELECT project.id, parent.id
FROM project_project project, project_project parent, account_analytic_account account
WHERE project.analytic_account_id = account.id
AND parent.analytic_account_id = account.parent_id
AND parent.id IN %s
""", (tuple(ids),))
dic = dict(cr.fetchall())
res.update(dic)
ids = dic.keys()
return res
def _progress_rate(self, cr, uid, ids, names, arg, context=None):
child_parent = self._get_project_and_children(cr, uid, ids, context)
# compute planned_hours, total_hours, effective_hours specific to each project
cr.execute("""
SELECT project_id, COALESCE(SUM(planned_hours), 0.0),
COALESCE(SUM(total_hours), 0.0), COALESCE(SUM(effective_hours), 0.0)
FROM project_task WHERE project_id IN %s AND state <> 'cancelled'
GROUP BY project_id
""", (tuple(child_parent.keys()),))
# aggregate results into res
res = dict([(id, {'planned_hours':0.0,'total_hours':0.0,'effective_hours':0.0}) for id in ids])
for id, planned, total, effective in cr.fetchall():
# add the values specific to id to all parent projects of id in the result
while id:
if id in ids:
res[id]['planned_hours'] += planned
res[id]['total_hours'] += total
res[id]['effective_hours'] += effective
id = child_parent[id]
# compute progress rates
for id in ids:
if res[id]['total_hours']:
res[id]['progress_rate'] = round(100.0 * res[id]['effective_hours'] / res[id]['total_hours'], 2)
else:
res[id]['progress_rate'] = 0.0
return res
def unlink(self, cr, uid, ids, context=None):
alias_ids = []
mail_alias = self.pool.get('mail.alias')
for proj in self.browse(cr, uid, ids, context=context):
if proj.tasks:
raise osv.except_osv(_('Invalid Action!'),
_('You cannot delete a project containing tasks. You can either delete all the project\'s tasks and then delete the project or simply deactivate the project.'))
elif proj.alias_id:
alias_ids.append(proj.alias_id.id)
res = super(project, self).unlink(cr, uid, ids, context=context)
mail_alias.unlink(cr, uid, alias_ids, context=context)
return res
def _get_attached_docs(self, cr, uid, ids, field_name, arg, context):
res = {}
attachment = self.pool.get('ir.attachment')
task = self.pool.get('project.task')
for id in ids:
project_attachments = attachment.search(cr, uid, [('res_model', '=', 'project.project'), ('res_id', '=', id)], context=context, count=True)
task_ids = task.search(cr, uid, [('project_id', '=', id)], context=context)
task_attachments = attachment.search(cr, uid, [('res_model', '=', 'project.task'), ('res_id', 'in', task_ids)], context=context, count=True)
res[id] = (project_attachments or 0) + (task_attachments or 0)
return res
def _task_count(self, cr, uid, ids, field_name, arg, context=None):
if context is None:
context = {}
res = dict.fromkeys(ids, 0)
ctx = context.copy()
ctx['active_test'] = False
task_ids = self.pool.get('project.task').search(cr, uid, [('project_id', 'in', ids)], context=ctx)
for task in self.pool.get('project.task').browse(cr, uid, task_ids, context):
res[task.project_id.id] += 1
return res
def _get_alias_models(self, cr, uid, context=None):
"""Overriden in project_issue to offer more options"""
return [('project.task', "Tasks")]
def _get_visibility_selection(self, cr, uid, context=None):
""" Overriden in portal_project to offer more options """
return [('public', 'All Users'),
('employees', 'Employees Only'),
('followers', 'Followers Only')]
def attachment_tree_view(self, cr, uid, ids, context):
task_ids = self.pool.get('project.task').search(cr, uid, [('project_id', 'in', ids)])
domain = [
'|',
'&', ('res_model', '=', 'project.project'), ('res_id', 'in', ids),
'&', ('res_model', '=', 'project.task'), ('res_id', 'in', task_ids)
]
res_id = ids and ids[0] or False
return {
'name': _('Attachments'),
'domain': domain,
'res_model': 'ir.attachment',
'type': 'ir.actions.act_window',
'view_id': False,
'view_mode': 'tree,form',
'view_type': 'form',
'limit': 80,
'context': "{'default_res_model': '%s','default_res_id': %d}" % (self._name, res_id)
}
# Lambda indirection method to avoid passing a copy of the overridable method when declaring the field
_alias_models = lambda self, *args, **kwargs: self._get_alias_models(*args, **kwargs)
_visibility_selection = lambda self, *args, **kwargs: self._get_visibility_selection(*args, **kwargs)
_columns = {
'complete_name': fields.function(_complete_name, string="Project Name", type='char', size=250),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the project without removing it."),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of Projects."),
'analytic_account_id': fields.many2one('account.analytic.account', 'Contract/Analytic', help="Link this project to an analytic account if you need financial management on projects. It enables you to connect projects with budgets, planning, cost and revenue analysis, timesheets on projects, etc.", ondelete="cascade", required=True),
'priority': fields.integer('Sequence', help="Gives the sequence order when displaying the list of projects"),
'members': fields.many2many('res.users', 'project_user_rel', 'project_id', 'uid', 'Project Members',
help="Project's members are users who can have an access to the tasks related to this project.", states={'close':[('readonly',True)], 'cancelled':[('readonly',True)]}),
'tasks': fields.one2many('project.task', 'project_id', "Task Activities"),
'planned_hours': fields.function(_progress_rate, multi="progress", string='Planned Time', help="Sum of planned hours of all tasks related to this project and its child projects.",
store = {
'project.project': (_get_project_and_parents, ['tasks', 'parent_id', 'child_ids'], 10),
'project.task': (_get_projects_from_tasks, ['planned_hours', 'remaining_hours', 'work_ids', 'state'], 20),
}),
'effective_hours': fields.function(_progress_rate, multi="progress", string='Time Spent', help="Sum of spent hours of all tasks related to this project and its child projects.",
store = {
'project.project': (_get_project_and_parents, ['tasks', 'parent_id', 'child_ids'], 10),
'project.task': (_get_projects_from_tasks, ['planned_hours', 'remaining_hours', 'work_ids', 'state'], 20),
}),
'total_hours': fields.function(_progress_rate, multi="progress", string='Total Time', help="Sum of total hours of all tasks related to this project and its child projects.",
store = {
'project.project': (_get_project_and_parents, ['tasks', 'parent_id', 'child_ids'], 10),
'project.task': (_get_projects_from_tasks, ['planned_hours', 'remaining_hours', 'work_ids', 'state'], 20),
}),
'progress_rate': fields.function(_progress_rate, multi="progress", string='Progress', type='float', group_operator="avg", help="Percent of tasks closed according to the total of tasks todo.",
store = {
'project.project': (_get_project_and_parents, ['tasks', 'parent_id', 'child_ids'], 10),
'project.task': (_get_projects_from_tasks, ['planned_hours', 'remaining_hours', 'work_ids', 'state'], 20),
}),
'resource_calendar_id': fields.many2one('resource.calendar', 'Working Time', help="Timetable working hours to adjust the gantt diagram report", states={'close':[('readonly',True)]} ),
'type_ids': fields.many2many('project.task.type', 'project_task_type_rel', 'project_id', 'type_id', 'Tasks Stages', states={'close':[('readonly',True)], 'cancelled':[('readonly',True)]}),
'task_count': fields.function(_task_count, type='integer', string="Open Tasks"),
'color': fields.integer('Color Index'),
'alias_id': fields.many2one('mail.alias', 'Alias', ondelete="restrict", required=True,
help="Internal email associated with this project. Incoming emails are automatically synchronized"
"with Tasks (or optionally Issues if the Issue Tracker module is installed)."),
'alias_model': fields.selection(_alias_models, "Alias Model", select=True, required=True,
help="The kind of document created when an email is received on this project's email alias"),
'privacy_visibility': fields.selection(_visibility_selection, 'Privacy / Visibility', required=True),
'state': fields.selection([('template', 'Template'),('draft','New'),('open','In Progress'), ('cancelled', 'Cancelled'),('pending','Pending'),('close','Closed')], 'Status', required=True,),
'doc_count':fields.function(_get_attached_docs, string="Number of documents attached", type='int')
}
def _get_type_common(self, cr, uid, context):
ids = self.pool.get('project.task.type').search(cr, uid, [('case_default','=',1)], context=context)
return ids
_order = "sequence, id"
_defaults = {
'active': True,
'type': 'contract',
'state': 'open',
'priority': 1,
'sequence': 10,
'type_ids': _get_type_common,
'alias_model': 'project.task',
'privacy_visibility': 'employees',
'alias_domain': False, # always hide alias during creation
}
# TODO: Why not using a SQL contraints ?
def _check_dates(self, cr, uid, ids, context=None):
for leave in self.read(cr, uid, ids, ['date_start', 'date'], context=context):
if leave['date_start'] and leave['date']:
if leave['date_start'] > leave['date']:
return False
return True
_constraints = [
(_check_dates, 'Error! project start-date must be lower then project end-date.', ['date_start', 'date'])
]
def set_template(self, cr, uid, ids, context=None):
res = self.setActive(cr, uid, ids, value=False, context=context)
return res
def set_done(self, cr, uid, ids, context=None):
task_obj = self.pool.get('project.task')
task_ids = task_obj.search(cr, uid, [('project_id', 'in', ids), ('state', 'not in', ('cancelled', 'done'))])
task_obj.case_close(cr, uid, task_ids, context=context)
return self.write(cr, uid, ids, {'state':'close'}, context=context)
def set_cancel(self, cr, uid, ids, context=None):
task_obj = self.pool.get('project.task')
task_ids = task_obj.search(cr, uid, [('project_id', 'in', ids), ('state', '!=', 'done')])
task_obj.case_cancel(cr, uid, task_ids, context=context)
return self.write(cr, uid, ids, {'state':'cancelled'}, context=context)
def set_pending(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state':'pending'}, context=context)
def set_open(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state':'open'}, context=context)
def reset_project(self, cr, uid, ids, context=None):
return self.setActive(cr, uid, ids, value=True, context=context)
def map_tasks(self, cr, uid, old_project_id, new_project_id, context=None):
""" copy and map tasks from old to new project """
if context is None:
context = {}
map_task_id = {}
task_obj = self.pool.get('project.task')
proj = self.browse(cr, uid, old_project_id, context=context)
for task in proj.tasks:
map_task_id[task.id] = task_obj.copy(cr, uid, task.id, {}, context=context)
self.write(cr, uid, [new_project_id], {'tasks':[(6,0, map_task_id.values())]})
task_obj.duplicate_task(cr, uid, map_task_id, context=context)
return True
def copy(self, cr, uid, id, default=None, context=None):
if context is None:
context = {}
if default is None:
default = {}
context['active_test'] = False
default['state'] = 'open'
default['line_ids'] = []
default['tasks'] = []
default.pop('alias_name', None)
default.pop('alias_id', None)
proj = self.browse(cr, uid, id, context=context)
if not default.get('name', False):
default.update(name=_("%s (copy)") % (proj.name))
res = super(project, self).copy(cr, uid, id, default, context)
self.map_tasks(cr,uid,id,res,context)
return res
def duplicate_template(self, cr, uid, ids, context=None):
if context is None:
context = {}
data_obj = self.pool.get('ir.model.data')
result = []
for proj in self.browse(cr, uid, ids, context=context):
parent_id = context.get('parent_id', False)
context.update({'analytic_project_copy': True})
new_date_start = time.strftime('%Y-%m-%d')
new_date_end = False
if proj.date_start and proj.date:
start_date = date(*time.strptime(proj.date_start,'%Y-%m-%d')[:3])
end_date = date(*time.strptime(proj.date,'%Y-%m-%d')[:3])
new_date_end = (datetime(*time.strptime(new_date_start,'%Y-%m-%d')[:3])+(end_date-start_date)).strftime('%Y-%m-%d')
context.update({'copy':True})
new_id = self.copy(cr, uid, proj.id, default = {
'name':_("%s (copy)") % (proj.name),
'state':'open',
'date_start':new_date_start,
'date':new_date_end,
'parent_id':parent_id}, context=context)
result.append(new_id)
child_ids = self.search(cr, uid, [('parent_id','=', proj.analytic_account_id.id)], context=context)
parent_id = self.read(cr, uid, new_id, ['analytic_account_id'])['analytic_account_id'][0]
if child_ids:
self.duplicate_template(cr, uid, child_ids, context={'parent_id': parent_id})
if result and len(result):
res_id = result[0]
form_view_id = data_obj._get_id(cr, uid, 'project', 'edit_project')
form_view = data_obj.read(cr, uid, form_view_id, ['res_id'])
tree_view_id = data_obj._get_id(cr, uid, 'project', 'view_project')
tree_view = data_obj.read(cr, uid, tree_view_id, ['res_id'])
search_view_id = data_obj._get_id(cr, uid, 'project', 'view_project_project_filter')
search_view = data_obj.read(cr, uid, search_view_id, ['res_id'])
return {
'name': _('Projects'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'project.project',
'view_id': False,
'res_id': res_id,
'views': [(form_view['res_id'],'form'),(tree_view['res_id'],'tree')],
'type': 'ir.actions.act_window',
'search_view_id': search_view['res_id'],
'nodestroy': True
}
# set active value for a project, its sub projects and its tasks
def setActive(self, cr, uid, ids, value=True, context=None):
task_obj = self.pool.get('project.task')
for proj in self.browse(cr, uid, ids, context=None):
self.write(cr, uid, [proj.id], {'state': value and 'open' or 'template'}, context)
cr.execute('select id from project_task where project_id=%s', (proj.id,))
tasks_id = [x[0] for x in cr.fetchall()]
if tasks_id:
task_obj.write(cr, uid, tasks_id, {'active': value}, context=context)
child_ids = self.search(cr, uid, [('parent_id','=', proj.analytic_account_id.id)])
if child_ids:
self.setActive(cr, uid, child_ids, value, context=None)
return True
def _schedule_header(self, cr, uid, ids, force_members=True, context=None):
context = context or {}
if type(ids) in (long, int,):
ids = [ids]
projects = self.browse(cr, uid, ids, context=context)
for project in projects:
if (not project.members) and force_members:
raise osv.except_osv(_('Warning!'),_("You must assign members on the project '%s'!") % (project.name,))
resource_pool = self.pool.get('resource.resource')
result = "from openerp.addons.resource.faces import *\n"
result += "import datetime\n"
for project in self.browse(cr, uid, ids, context=context):
u_ids = [i.id for i in project.members]
if project.user_id and (project.user_id.id not in u_ids):
u_ids.append(project.user_id.id)
for task in project.tasks:
if task.state in ('done','cancelled'):
continue
if task.user_id and (task.user_id.id not in u_ids):
u_ids.append(task.user_id.id)
calendar_id = project.resource_calendar_id and project.resource_calendar_id.id or False
resource_objs = resource_pool.generate_resources(cr, uid, u_ids, calendar_id, context=context)
for key, vals in resource_objs.items():
result +='''
class User_%s(Resource):
efficiency = %s
''' % (key, vals.get('efficiency', False))
result += '''
def Project():
'''
return result
def _schedule_project(self, cr, uid, project, context=None):
resource_pool = self.pool.get('resource.resource')
calendar_id = project.resource_calendar_id and project.resource_calendar_id.id or False
working_days = resource_pool.compute_working_calendar(cr, uid, calendar_id, context=context)
# TODO: check if we need working_..., default values are ok.
puids = [x.id for x in project.members]
if project.user_id:
puids.append(project.user_id.id)
result = """
def Project_%d():
start = \'%s\'
working_days = %s
resource = %s
""" % (
project.id,
project.date_start or time.strftime('%Y-%m-%d'), working_days,
'|'.join(['User_'+str(x) for x in puids]) or 'None'
)
vacation = calendar_id and tuple(resource_pool.compute_vacation(cr, uid, calendar_id, context=context)) or False
if vacation:
result+= """
vacation = %s
""" % ( vacation, )
return result
#TODO: DO Resource allocation and compute availability
def compute_allocation(self, rc, uid, ids, start_date, end_date, context=None):
if context == None:
context = {}
allocation = {}
return allocation
def schedule_tasks(self, cr, uid, ids, context=None):
context = context or {}
if type(ids) in (long, int,):
ids = [ids]
projects = self.browse(cr, uid, ids, context=context)
result = self._schedule_header(cr, uid, ids, False, context=context)
for project in projects:
result += self._schedule_project(cr, uid, project, context=context)
result += self.pool.get('project.task')._generate_task(cr, uid, project.tasks, ident=4, context=context)
local_dict = {}
exec result in local_dict
projects_gantt = Task.BalancedProject(local_dict['Project'])
for project in projects:
project_gantt = getattr(projects_gantt, 'Project_%d' % (project.id,))
for task in project.tasks:
if task.state in ('done','cancelled'):
continue
p = getattr(project_gantt, 'Task_%d' % (task.id,))
self.pool.get('project.task').write(cr, uid, [task.id], {
'date_start': p.start.strftime('%Y-%m-%d %H:%M:%S'),
'date_end': p.end.strftime('%Y-%m-%d %H:%M:%S')
}, context=context)
if (not task.user_id) and (p.booked_resource):
self.pool.get('project.task').write(cr, uid, [task.id], {
'user_id': int(p.booked_resource[0].name[5:]),
}, context=context)
return True
# ------------------------------------------------
# OpenChatter methods and notifications
# ------------------------------------------------
def create(self, cr, uid, vals, context=None):
if context is None: context = {}
# Prevent double project creation when 'use_tasks' is checked!
context = dict(context, project_creation_in_progress=True)
mail_alias = self.pool.get('mail.alias')
if not vals.get('alias_id') and vals.get('name', False):
vals.pop('alias_name', None) # prevent errors during copy()
alias_id = mail_alias.create_unique_alias(cr, uid,
# Using '+' allows using subaddressing for those who don't
# have a catchall domain setup.
{'alias_name': "project+"+short_name(vals['name'])},
model_name=vals.get('alias_model', 'project.task'),
context=context)
vals['alias_id'] = alias_id
if vals.get('type', False) not in ('template','contract'):
vals['type'] = 'contract'
project_id = super(project, self).create(cr, uid, vals, context)
mail_alias.write(cr, uid, [vals['alias_id']], {'alias_defaults': {'project_id': project_id} }, context)
return project_id
def write(self, cr, uid, ids, vals, context=None):
# if alias_model has been changed, update alias_model_id accordingly
if vals.get('alias_model'):
model_ids = self.pool.get('ir.model').search(cr, uid, [('model', '=', vals.get('alias_model', 'project.task'))])
vals.update(alias_model_id=model_ids[0])
return super(project, self).write(cr, uid, ids, vals, context=context)
class task(base_stage, osv.osv):
_name = "project.task"
_description = "Task"
_date_name = "date_start"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_track = {
'state': {
'project.mt_task_new': lambda self, cr, uid, obj, ctx=None: obj['state'] in ['new', 'draft'],
'project.mt_task_started': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'open',
'project.mt_task_closed': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'done',
},
'stage_id': {
'project.mt_task_stage': lambda self, cr, uid, obj, ctx=None: obj['state'] not in ['new', 'draft', 'done', 'open'],
},
'kanban_state': { # kanban state: tracked, but only block subtype
'project.mt_task_blocked': lambda self, cr, uid, obj, ctx=None: obj['kanban_state'] == 'blocked',
},
}
def _get_default_partner(self, cr, uid, context=None):
""" Override of base_stage to add project specific behavior """
project_id = self._get_default_project_id(cr, uid, context)
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return project.partner_id.id
return super(task, self)._get_default_partner(cr, uid, context=context)
def _get_default_project_id(self, cr, uid, context=None):
""" Gives default section by checking if present in the context """
return (self._resolve_project_id_from_context(cr, uid, context=context) or False)
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
project_id = self._get_default_project_id(cr, uid, context=context)
return self.stage_find(cr, uid, [], project_id, [('state', '=', 'draft')], context=context)
def _resolve_project_id_from_context(self, cr, uid, context=None):
""" Returns ID of project based on the value of 'default_project_id'
context key, or None if it cannot be resolved to a single
project.
"""
if context is None:
context = {}
if type(context.get('default_project_id')) in (int, long):
return context['default_project_id']
if isinstance(context.get('default_project_id'), basestring):
project_name = context['default_project_id']
project_ids = self.pool.get('project.project').name_search(cr, uid, name=project_name, context=context)
if len(project_ids) == 1:
return project_ids[0][0]
return None
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
stage_obj = self.pool.get('project.task.type')
order = stage_obj._order
access_rights_uid = access_rights_uid or uid
if read_group_order == 'stage_id desc':
order = '%s desc' % order
search_domain = []
project_id = self._resolve_project_id_from_context(cr, uid, context=context)
if project_id:
search_domain += ['|', ('project_ids', '=', project_id)]
search_domain += [('id', 'in', ids)]
stage_ids = stage_obj._search(cr, uid, search_domain, order=order, access_rights_uid=access_rights_uid, context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x,y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context):
fold[stage.id] = stage.fold or False
return result, fold
def _read_group_user_id(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
res_users = self.pool.get('res.users')
project_id = self._resolve_project_id_from_context(cr, uid, context=context)
access_rights_uid = access_rights_uid or uid
if project_id:
ids += self.pool.get('project.project').read(cr, access_rights_uid, project_id, ['members'], context=context)['members']
order = res_users._order
# lame way to allow reverting search, should just work in the trivial case
if read_group_order == 'user_id desc':
order = '%s desc' % order
# de-duplicate and apply search order
ids = res_users._search(cr, uid, [('id','in',ids)], order=order, access_rights_uid=access_rights_uid, context=context)
result = res_users.name_get(cr, access_rights_uid, ids, context=context)
# restore order of the search
result.sort(lambda x,y: cmp(ids.index(x[0]), ids.index(y[0])))
return result, {}
_group_by_full = {
'stage_id': _read_group_stage_ids,
'user_id': _read_group_user_id,
}
def _str_get(self, task, level=0, border='***', context=None):
return border+' '+(task.user_id and task.user_id.name.upper() or '')+(level and (': L'+str(level)) or '')+(' - %.1fh / %.1fh'%(task.effective_hours or 0.0,task.planned_hours))+' '+border+'\n'+ \
border[0]+' '+(task.name or '')+'\n'+ \
(task.description or '')+'\n\n'
# Compute: effective_hours, total_hours, progress
def _hours_get(self, cr, uid, ids, field_names, args, context=None):
res = {}
cr.execute("SELECT task_id, COALESCE(SUM(hours),0) FROM project_task_work WHERE task_id IN %s GROUP BY task_id",(tuple(ids),))
hours = dict(cr.fetchall())
for task in self.browse(cr, uid, ids, context=context):
res[task.id] = {'effective_hours': hours.get(task.id, 0.0), 'total_hours': (task.remaining_hours or 0.0) + hours.get(task.id, 0.0)}
res[task.id]['delay_hours'] = res[task.id]['total_hours'] - task.planned_hours
res[task.id]['progress'] = 0.0
if (task.remaining_hours + hours.get(task.id, 0.0)):
res[task.id]['progress'] = round(min(100.0 * hours.get(task.id, 0.0) / res[task.id]['total_hours'], 99.99),2)
if task.state in ('done','cancelled'):
res[task.id]['progress'] = 100.0
return res
def onchange_remaining(self, cr, uid, ids, remaining=0.0, planned=0.0):
if remaining and not planned:
return {'value':{'planned_hours': remaining}}
return {}
def onchange_planned(self, cr, uid, ids, planned=0.0, effective=0.0):
return {'value':{'remaining_hours': planned - effective}}
def onchange_project(self, cr, uid, id, project_id, context=None):
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return {'value': {'partner_id': project.partner_id.id}}
return {}
def duplicate_task(self, cr, uid, map_ids, context=None):
for new in map_ids.values():
task = self.browse(cr, uid, new, context)
child_ids = [ ch.id for ch in task.child_ids]
if task.child_ids:
for child in task.child_ids:
if child.id in map_ids.keys():
child_ids.remove(child.id)
child_ids.append(map_ids[child.id])
parent_ids = [ ch.id for ch in task.parent_ids]
if task.parent_ids:
for parent in task.parent_ids:
if parent.id in map_ids.keys():
parent_ids.remove(parent.id)
parent_ids.append(map_ids[parent.id])
#FIXME why there is already the copy and the old one
self.write(cr, uid, new, {'parent_ids':[(6,0,set(parent_ids))], 'child_ids':[(6,0, set(child_ids))]})
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default = default or {}
default.update({'work_ids':[], 'date_start': False, 'date_end': False, 'date_deadline': False})
if not default.get('remaining_hours', False):
default['remaining_hours'] = float(self.read(cr, uid, id, ['planned_hours'])['planned_hours'])
default['active'] = True
if not default.get('name', False):
default['name'] = self.browse(cr, uid, id, context=context).name or ''
if not context.get('copy',False):
new_name = _("%s (copy)") % (default.get('name', ''))
default.update({'name':new_name})
return super(task, self).copy_data(cr, uid, id, default, context)
def copy(self, cr, uid, id, default=None, context=None):
if context is None:
context = {}
if default is None:
default = {}
stage = self._get_default_stage_id(cr, uid, context=context)
if stage:
default['stage_id'] = stage
return super(task, self).copy(cr, uid, id, default, context)
def _is_template(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for task in self.browse(cr, uid, ids, context=context):
res[task.id] = True
if task.project_id:
if task.project_id.active == False or task.project_id.state == 'template':
res[task.id] = False
return res
def _get_task(self, cr, uid, ids, context=None):
result = {}
for work in self.pool.get('project.task.work').browse(cr, uid, ids, context=context):
if work.task_id: result[work.task_id.id] = True
return result.keys()
_columns = {
'active': fields.function(_is_template, store=True, string='Not a Template Task', type='boolean', help="This field is computed automatically and have the same behavior than the boolean 'active' field: if the task is linked to a template or unactivated project, it will be hidden unless specifically asked."),
'name': fields.char('Task Summary', size=128, required=True, select=True),
'description': fields.text('Description'),
'priority': fields.selection([('4','Very Low'), ('3','Low'), ('2','Medium'), ('1','Important'), ('0','Very important')], 'Priority', select=True),
'sequence': fields.integer('Sequence', select=True, help="Gives the sequence order when displaying a list of tasks."),
'stage_id': fields.many2one('project.task.type', 'Stage', track_visibility='onchange',
domain="['&', ('fold', '=', False), ('project_ids', '=', project_id)]"),
'state': fields.related('stage_id', 'state', type="selection", store=True,
selection=_TASK_STATE, string="Status", readonly=True,
help='The status is set to \'Draft\', when a case is created.\
If the case is in progress the status is set to \'Open\'.\
When the case is over, the status is set to \'Done\'.\
If the case needs to be reviewed then the status is \
set to \'Pending\'.'),
'categ_ids': fields.many2many('project.category', string='Tags'),
'kanban_state': fields.selection([('normal', 'Normal'),('blocked', 'Blocked'),('done', 'Ready for next stage')], 'Kanban State',
track_visibility='onchange',
help="A task's kanban state indicates special situations affecting it:\n"
" * Normal is the default situation\n"
" * Blocked indicates something is preventing the progress of this task\n"
" * Ready for next stage indicates the task is ready to be pulled to the next stage",
readonly=True, required=False),
'create_date': fields.datetime('Create Date', readonly=True, select=True),
'write_date': fields.datetime('Last Modification Date', readonly=True, select=True), #not displayed in the view but it might be useful with base_action_rule module (and it needs to be defined first for that)
'date_start': fields.datetime('Starting Date',select=True),
'date_end': fields.datetime('Ending Date',select=True),
'date_deadline': fields.date('Deadline',select=True),
'project_id': fields.many2one('project.project', 'Project', ondelete='set null', select="1", track_visibility='onchange'),
'parent_ids': fields.many2many('project.task', 'project_task_parent_rel', 'task_id', 'parent_id', 'Parent Tasks'),
'child_ids': fields.many2many('project.task', 'project_task_parent_rel', 'parent_id', 'task_id', 'Delegated Tasks'),
'notes': fields.text('Notes'),
'planned_hours': fields.float('Initially Planned Hours', help='Estimated time to do the task, usually set by the project manager when the task is in draft state.'),
'effective_hours': fields.function(_hours_get, string='Hours Spent', multi='hours', help="Computed using the sum of the task work done.",
store = {
'project.task': (lambda self, cr, uid, ids, c={}: ids, ['work_ids', 'remaining_hours', 'planned_hours'], 10),
'project.task.work': (_get_task, ['hours'], 10),
}),
'remaining_hours': fields.float('Remaining Hours', digits=(16,2), help="Total remaining time, can be re-estimated periodically by the assignee of the task."),
'total_hours': fields.function(_hours_get, string='Total', multi='hours', help="Computed as: Time Spent + Remaining Time.",
store = {
'project.task': (lambda self, cr, uid, ids, c={}: ids, ['work_ids', 'remaining_hours', 'planned_hours'], 10),
'project.task.work': (_get_task, ['hours'], 10),
}),
'progress': fields.function(_hours_get, string='Progress (%)', multi='hours', group_operator="avg", help="If the task has a progress of 99.99% you should close the task if it's finished or reevaluate the time",
store = {
'project.task': (lambda self, cr, uid, ids, c={}: ids, ['work_ids', 'remaining_hours', 'planned_hours', 'state', 'stage_id'], 10),
'project.task.work': (_get_task, ['hours'], 10),
}),
'delay_hours': fields.function(_hours_get, string='Delay Hours', multi='hours', help="Computed as difference between planned hours by the project manager and the total hours of the task.",
store = {
'project.task': (lambda self, cr, uid, ids, c={}: ids, ['work_ids', 'remaining_hours', 'planned_hours'], 10),
'project.task.work': (_get_task, ['hours'], 10),
}),
'user_id': fields.many2one('res.users', 'Assigned to', track_visibility='onchange'),
'delegated_user_id': fields.related('child_ids', 'user_id', type='many2one', relation='res.users', string='Delegated To'),
'partner_id': fields.many2one('res.partner', 'Customer'),
'work_ids': fields.one2many('project.task.work', 'task_id', 'Work done'),
'manager_id': fields.related('project_id', 'analytic_account_id', 'user_id', type='many2one', relation='res.users', string='Project Manager'),
'company_id': fields.many2one('res.company', 'Company'),
'id': fields.integer('ID', readonly=True),
'color': fields.integer('Color Index'),
'user_email': fields.related('user_id', 'email', type='char', string='User Email', readonly=True),
}
_defaults = {
'stage_id': _get_default_stage_id,
'project_id': _get_default_project_id,
'kanban_state': 'normal',
'priority': '2',
'progress': 0,
'sequence': 10,
'active': True,
'user_id': lambda obj, cr, uid, ctx=None: uid,
'company_id': lambda self, cr, uid, ctx=None: self.pool.get('res.company')._company_default_get(cr, uid, 'project.task', context=ctx),
'partner_id': lambda self, cr, uid, ctx=None: self._get_default_partner(cr, uid, context=ctx),
}
_order = "priority, sequence, date_start, name, id"
def set_high_priority(self, cr, uid, ids, *args):
"""Set task priority to high
"""
return self.write(cr, uid, ids, {'priority' : '0'})
def set_normal_priority(self, cr, uid, ids, *args):
"""Set task priority to normal
"""
return self.write(cr, uid, ids, {'priority' : '2'})
def _check_recursion(self, cr, uid, ids, context=None):
for id in ids:
visited_branch = set()
visited_node = set()
res = self._check_cycle(cr, uid, id, visited_branch, visited_node, context=context)
if not res:
return False
return True
def _check_cycle(self, cr, uid, id, visited_branch, visited_node, context=None):
if id in visited_branch: #Cycle
return False
if id in visited_node: #Already tested don't work one more time for nothing
return True
visited_branch.add(id)
visited_node.add(id)
#visit child using DFS
task = self.browse(cr, uid, id, context=context)
for child in task.child_ids:
res = self._check_cycle(cr, uid, child.id, visited_branch, visited_node, context=context)
if not res:
return False
visited_branch.remove(id)
return True
def _check_dates(self, cr, uid, ids, context=None):
if context == None:
context = {}
obj_task = self.browse(cr, uid, ids[0], context=context)
start = obj_task.date_start or False
end = obj_task.date_end or False
if start and end :
if start > end:
return False
return True
_constraints = [
(_check_recursion, 'Error ! You cannot create recursive tasks.', ['parent_ids']),
(_check_dates, 'Error ! Task end-date must be greater then task start-date', ['date_start','date_end'])
]
# Override view according to the company definition
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
users_obj = self.pool.get('res.users')
if context is None: context = {}
# read uom as admin to avoid access rights issues, e.g. for portal/share users,
# this should be safe (no context passed to avoid side-effects)
obj_tm = users_obj.browse(cr, SUPERUSER_ID, uid, context=context).company_id.project_time_mode_id
tm = obj_tm and obj_tm.name or 'Hours'
res = super(task, self).fields_view_get(cr, uid, view_id, view_type, context, toolbar, submenu=submenu)
if tm in ['Hours','Hour']:
return res
eview = etree.fromstring(res['arch'])
def _check_rec(eview):
if eview.attrib.get('widget','') == 'float_time':
eview.set('widget','float')
for child in eview:
_check_rec(child)
return True
_check_rec(eview)
res['arch'] = etree.tostring(eview)
for f in res['fields']:
if 'Hours' in res['fields'][f]['string']:
res['fields'][f]['string'] = res['fields'][f]['string'].replace('Hours',tm)
return res
# ----------------------------------------
# Case management
# ----------------------------------------
def stage_find(self, cr, uid, cases, section_id, domain=[], order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the lead:
- section_id: if set, stages must belong to this section or
be a default stage; if not set, stages must be default
stages
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
# collect all section_ids
section_ids = []
if section_id:
section_ids.append(section_id)
for task in cases:
if task.project_id:
section_ids.append(task.project_id.id)
search_domain = []
if section_ids:
search_domain = [('|')] * (len(section_ids)-1)
for section_id in section_ids:
search_domain.append(('project_ids', '=', section_id))
search_domain += list(domain)
# perform search, return the first found
stage_ids = self.pool.get('project.task.type').search(cr, uid, search_domain, order=order, context=context)
if stage_ids:
return stage_ids[0]
return False
def _check_child_task(self, cr, uid, ids, context=None):
if context == None:
context = {}
tasks = self.browse(cr, uid, ids, context=context)
for task in tasks:
if task.child_ids:
for child in task.child_ids:
if child.state in ['draft', 'open', 'pending']:
raise osv.except_osv(_("Warning!"), _("Child task still open.\nPlease cancel or complete child task first."))
return True
def action_close(self, cr, uid, ids, context=None):
""" This action closes the task
"""
task_id = len(ids) and ids[0] or False
self._check_child_task(cr, uid, ids, context=context)
if not task_id: return False
return self.do_close(cr, uid, [task_id], context=context)
def do_close(self, cr, uid, ids, context=None):
""" Compatibility when changing to case_close. """
return self.case_close(cr, uid, ids, context=context)
def case_close(self, cr, uid, ids, context=None):
""" Closes Task """
if not isinstance(ids, list): ids = [ids]
for task in self.browse(cr, uid, ids, context=context):
vals = {}
project = task.project_id
for parent_id in task.parent_ids:
if parent_id.state in ('pending','draft'):
reopen = True
for child in parent_id.child_ids:
if child.id != task.id and child.state not in ('done','cancelled'):
reopen = False
if reopen:
self.do_reopen(cr, uid, [parent_id.id], context=context)
# close task
vals['remaining_hours'] = 0.0
if not task.date_end:
vals['date_end'] = fields.datetime.now()
self.case_set(cr, uid, [task.id], 'done', vals, context=context)
return True
def do_reopen(self, cr, uid, ids, context=None):
for task in self.browse(cr, uid, ids, context=context):
project = task.project_id
self.case_set(cr, uid, [task.id], 'open', {}, context=context)
return True
def do_cancel(self, cr, uid, ids, context=None):
""" Compatibility when changing to case_cancel. """
return self.case_cancel(cr, uid, ids, context=context)
def case_cancel(self, cr, uid, ids, context=None):
tasks = self.browse(cr, uid, ids, context=context)
self._check_child_task(cr, uid, ids, context=context)
for task in tasks:
self.case_set(cr, uid, [task.id], 'cancelled', {'remaining_hours': 0.0}, context=context)
return True
def do_open(self, cr, uid, ids, context=None):
""" Compatibility when changing to case_open. """
return self.case_open(cr, uid, ids, context=context)
def case_open(self, cr, uid, ids, context=None):
if not isinstance(ids,list): ids = [ids]
return self.case_set(cr, uid, ids, 'open', {'date_start': fields.datetime.now()}, context=context)
def do_draft(self, cr, uid, ids, context=None):
""" Compatibility when changing to case_draft. """
return self.case_draft(cr, uid, ids, context=context)
def case_draft(self, cr, uid, ids, context=None):
return self.case_set(cr, uid, ids, 'draft', {}, context=context)
def do_pending(self, cr, uid, ids, context=None):
""" Compatibility when changing to case_pending. """
return self.case_pending(cr, uid, ids, context=context)
def case_pending(self, cr, uid, ids, context=None):
return self.case_set(cr, uid, ids, 'pending', {}, context=context)
def _delegate_task_attachments(self, cr, uid, task_id, delegated_task_id, context=None):
attachment = self.pool.get('ir.attachment')
attachment_ids = attachment.search(cr, uid, [('res_model', '=', self._name), ('res_id', '=', task_id)], context=context)
new_attachment_ids = []
for attachment_id in attachment_ids:
new_attachment_ids.append(attachment.copy(cr, uid, attachment_id, default={'res_id': delegated_task_id}, context=context))
return new_attachment_ids
def do_delegate(self, cr, uid, ids, delegate_data=None, context=None):
"""
Delegate Task to another users.
"""
if delegate_data is None:
delegate_data = {}
assert delegate_data['user_id'], _("Delegated User should be specified")
delegated_tasks = {}
for task in self.browse(cr, uid, ids, context=context):
delegated_task_id = self.copy(cr, uid, task.id, {
'name': delegate_data['name'],
'project_id': delegate_data['project_id'] and delegate_data['project_id'][0] or False,
'user_id': delegate_data['user_id'] and delegate_data['user_id'][0] or False,
'planned_hours': delegate_data['planned_hours'] or 0.0,
'parent_ids': [(6, 0, [task.id])],
'description': delegate_data['new_task_description'] or '',
'child_ids': [],
'work_ids': []
}, context=context)
self._delegate_task_attachments(cr, uid, task.id, delegated_task_id, context=context)
newname = delegate_data['prefix'] or ''
task.write({
'remaining_hours': delegate_data['planned_hours_me'],
'planned_hours': delegate_data['planned_hours_me'] + (task.effective_hours or 0.0),
'name': newname,
}, context=context)
if delegate_data['state'] == 'pending':
self.do_pending(cr, uid, [task.id], context=context)
elif delegate_data['state'] == 'done':
self.do_close(cr, uid, [task.id], context=context)
delegated_tasks[task.id] = delegated_task_id
return delegated_tasks
def set_remaining_time(self, cr, uid, ids, remaining_time=1.0, context=None):
for task in self.browse(cr, uid, ids, context=context):
if (task.state=='draft') or (task.planned_hours==0.0):
self.write(cr, uid, [task.id], {'planned_hours': remaining_time}, context=context)
self.write(cr, uid, ids, {'remaining_hours': remaining_time}, context=context)
return True
def set_remaining_time_1(self, cr, uid, ids, context=None):
return self.set_remaining_time(cr, uid, ids, 1.0, context)
def set_remaining_time_2(self, cr, uid, ids, context=None):
return self.set_remaining_time(cr, uid, ids, 2.0, context)
def set_remaining_time_5(self, cr, uid, ids, context=None):
return self.set_remaining_time(cr, uid, ids, 5.0, context)
def set_remaining_time_10(self, cr, uid, ids, context=None):
return self.set_remaining_time(cr, uid, ids, 10.0, context)
def set_kanban_state_blocked(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'kanban_state': 'blocked'}, context=context)
def set_kanban_state_normal(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'kanban_state': 'normal'}, context=context)
def set_kanban_state_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'kanban_state': 'done'}, context=context)
return False
def _store_history(self, cr, uid, ids, context=None):
for task in self.browse(cr, uid, ids, context=context):
self.pool.get('project.task.history').create(cr, uid, {
'task_id': task.id,
'remaining_hours': task.remaining_hours,
'planned_hours': task.planned_hours,
'kanban_state': task.kanban_state,
'type_id': task.stage_id.id,
'state': task.state,
'user_id': task.user_id.id
}, context=context)
return True
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals.get('project_id') and not context.get('default_project_id'):
context['default_project_id'] = vals.get('project_id')
# context: no_log, because subtype already handle this
create_context = dict(context, mail_create_nolog=True)
task_id = super(task, self).create(cr, uid, vals, context=create_context)
self._store_history(cr, uid, [task_id], context=context)
return task_id
# Overridden to reset the kanban_state to normal whenever
# the stage (stage_id) of the task changes.
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
if vals and not 'kanban_state' in vals and 'stage_id' in vals:
new_stage = vals.get('stage_id')
vals_reset_kstate = dict(vals, kanban_state='normal')
for t in self.browse(cr, uid, ids, context=context):
#TO FIX:Kanban view doesn't raise warning
#stages = [stage.id for stage in t.project_id.type_ids]
#if new_stage not in stages:
#raise osv.except_osv(_('Warning!'), _('Stage is not defined in the project.'))
write_vals = vals_reset_kstate if t.stage_id != new_stage else vals
super(task, self).write(cr, uid, [t.id], write_vals, context=context)
result = True
else:
result = super(task, self).write(cr, uid, ids, vals, context=context)
if ('stage_id' in vals) or ('remaining_hours' in vals) or ('user_id' in vals) or ('state' in vals) or ('kanban_state' in vals):
self._store_history(cr, uid, ids, context=context)
return result
def unlink(self, cr, uid, ids, context=None):
if context == None:
context = {}
self._check_child_task(cr, uid, ids, context=context)
res = super(task, self).unlink(cr, uid, ids, context)
return res
def _generate_task(self, cr, uid, tasks, ident=4, context=None):
context = context or {}
result = ""
ident = ' '*ident
for task in tasks:
if task.state in ('done','cancelled'):
continue
result += '''
%sdef Task_%s():
%s todo = \"%.2fH\"
%s effort = \"%.2fH\"''' % (ident,task.id, ident,task.remaining_hours, ident,task.total_hours)
start = []
for t2 in task.parent_ids:
start.append("up.Task_%s.end" % (t2.id,))
if start:
result += '''
%s start = max(%s)
''' % (ident,','.join(start))
if task.user_id:
result += '''
%s resource = %s
''' % (ident, 'User_'+str(task.user_id.id))
result += "\n"
return result
# ---------------------------------------------------
# Mail gateway
# ---------------------------------------------------
def message_get_reply_to(self, cr, uid, ids, context=None):
""" Override to get the reply_to of the parent project. """
return [task.project_id.message_get_reply_to()[0] if task.project_id else False
for task in self.browse(cr, uid, ids, context=context)]
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Override to updates the document according to the email. """
if custom_values is None:
custom_values = {}
defaults = {
'name': msg.get('subject'),
'planned_hours': 0.0,
}
defaults.update(custom_values)
return super(task, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
def message_update(self, cr, uid, ids, msg, update_vals=None, context=None):
""" Override to update the task according to the email. """
if update_vals is None:
update_vals = {}
maps = {
'cost': 'planned_hours',
}
for line in msg['body'].split('\n'):
line = line.strip()
res = tools.command_re.match(line)
if res:
match = res.group(1).lower()
field = maps.get(match)
if field:
try:
update_vals[field] = float(res.group(2).lower())
except (ValueError, TypeError):
pass
return super(task, self).message_update(cr, uid, ids, msg, update_vals=update_vals, context=context)
def project_task_reevaluate(self, cr, uid, ids, context=None):
if self.pool.get('res.users').has_group(cr, uid, 'project.group_time_work_estimation_tasks'):
return {
'view_type': 'form',
"view_mode": 'form',
'res_model': 'project.task.reevaluate',
'type': 'ir.actions.act_window',
'target': 'new',
}
return self.do_reopen(cr, uid, ids, context=context)
class project_work(osv.osv):
_name = "project.task.work"
_description = "Project Task Work"
_columns = {
'name': fields.char('Work summary', size=128),
'date': fields.datetime('Date', select="1"),
'task_id': fields.many2one('project.task', 'Task', ondelete='cascade', required=True, select="1"),
'hours': fields.float('Time Spent'),
'user_id': fields.many2one('res.users', 'Done by', required=True, select="1"),
'company_id': fields.related('task_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True)
}
_defaults = {
'user_id': lambda obj, cr, uid, context: uid,
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S')
}
_order = "date desc"
def create(self, cr, uid, vals, *args, **kwargs):
if 'hours' in vals and (not vals['hours']):
vals['hours'] = 0.00
if 'task_id' in vals:
cr.execute('update project_task set remaining_hours=remaining_hours - %s where id=%s', (vals.get('hours',0.0), vals['task_id']))
return super(project_work,self).create(cr, uid, vals, *args, **kwargs)
def write(self, cr, uid, ids, vals, context=None):
if 'hours' in vals and (not vals['hours']):
vals['hours'] = 0.00
if 'hours' in vals:
for work in self.browse(cr, uid, ids, context=context):
cr.execute('update project_task set remaining_hours=remaining_hours - %s + (%s) where id=%s', (vals.get('hours',0.0), work.hours, work.task_id.id))
return super(project_work,self).write(cr, uid, ids, vals, context)
def unlink(self, cr, uid, ids, *args, **kwargs):
for work in self.browse(cr, uid, ids):
cr.execute('update project_task set remaining_hours=remaining_hours + %s where id=%s', (work.hours, work.task_id.id))
return super(project_work,self).unlink(cr, uid, ids,*args, **kwargs)
class account_analytic_account(osv.osv):
_inherit = 'account.analytic.account'
_description = 'Analytic Account'
_columns = {
'use_tasks': fields.boolean('Tasks',help="If checked, this contract will be available in the project menu and you will be able to manage tasks or track issues"),
'company_uom_id': fields.related('company_id', 'project_time_mode_id', type='many2one', relation='product.uom'),
}
def on_change_template(self, cr, uid, ids, template_id, context=None):
res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, context=context)
if template_id and 'value' in res:
template = self.browse(cr, uid, template_id, context=context)
res['value']['use_tasks'] = template.use_tasks
return res
def _trigger_project_creation(self, cr, uid, vals, context=None):
'''
This function is used to decide if a project needs to be automatically created or not when an analytic account is created. It returns True if it needs to be so, False otherwise.
'''
if context is None: context = {}
return vals.get('use_tasks') and not 'project_creation_in_progress' in context
def project_create(self, cr, uid, analytic_account_id, vals, context=None):
'''
This function is called at the time of analytic account creation and is used to create a project automatically linked to it if the conditions are meet.
'''
project_pool = self.pool.get('project.project')
project_id = project_pool.search(cr, uid, [('analytic_account_id','=', analytic_account_id)])
if not project_id and self._trigger_project_creation(cr, uid, vals, context=context):
project_values = {
'name': vals.get('name'),
'analytic_account_id': analytic_account_id,
'type': vals.get('type','contract'),
}
return project_pool.create(cr, uid, project_values, context=context)
return False
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals.get('child_ids', False) and context.get('analytic_project_copy', False):
vals['child_ids'] = []
analytic_account_id = super(account_analytic_account, self).create(cr, uid, vals, context=context)
self.project_create(cr, uid, analytic_account_id, vals, context=context)
return analytic_account_id
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
vals_for_project = vals.copy()
for account in self.browse(cr, uid, ids, context=context):
if not vals.get('name'):
vals_for_project['name'] = account.name
if not vals.get('type'):
vals_for_project['type'] = account.type
self.project_create(cr, uid, account.id, vals_for_project, context=context)
return super(account_analytic_account, self).write(cr, uid, ids, vals, context=context)
def unlink(self, cr, uid, ids, *args, **kwargs):
project_obj = self.pool.get('project.project')
analytic_ids = project_obj.search(cr, uid, [('analytic_account_id','in',ids)])
if analytic_ids:
raise osv.except_osv(_('Warning!'), _('Please delete the project linked with this account first.'))
return super(account_analytic_account, self).unlink(cr, uid, ids, *args, **kwargs)
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if args is None:
args = []
if context is None:
context={}
if context.get('current_model') == 'project.project':
project_ids = self.search(cr, uid, args + [('name', operator, name)], limit=limit, context=context)
return self.name_get(cr, uid, project_ids, context=context)
return super(account_analytic_account, self).name_search(cr, uid, name, args=args, operator=operator, context=context, limit=limit)
class project_project(osv.osv):
_inherit = 'project.project'
_defaults = {
'use_tasks': True
}
class project_task_history(osv.osv):
"""
Tasks History, used for cumulative flow charts (Lean/Agile)
"""
_name = 'project.task.history'
_description = 'History of Tasks'
_rec_name = 'task_id'
_log_access = False
def _get_date(self, cr, uid, ids, name, arg, context=None):
result = {}
for history in self.browse(cr, uid, ids, context=context):
if history.state in ('done','cancelled'):
result[history.id] = history.date
continue
cr.execute('''select
date
from
project_task_history
where
task_id=%s and
id>%s
order by id limit 1''', (history.task_id.id, history.id))
res = cr.fetchone()
result[history.id] = res and res[0] or False
return result
def _get_related_date(self, cr, uid, ids, context=None):
result = []
for history in self.browse(cr, uid, ids, context=context):
cr.execute('''select
id
from
project_task_history
where
task_id=%s and
id<%s
order by id desc limit 1''', (history.task_id.id, history.id))
res = cr.fetchone()
if res:
result.append(res[0])
return result
_columns = {
'task_id': fields.many2one('project.task', 'Task', ondelete='cascade', required=True, select=True),
'type_id': fields.many2one('project.task.type', 'Stage'),
'state': fields.selection([('draft', 'New'), ('cancelled', 'Cancelled'),('open', 'In Progress'),('pending', 'Pending'), ('done', 'Done')], 'Status'),
'kanban_state': fields.selection([('normal', 'Normal'),('blocked', 'Blocked'),('done', 'Ready for next stage')], 'Kanban State', required=False),
'date': fields.date('Date', select=True),
'end_date': fields.function(_get_date, string='End Date', type="date", store={
'project.task.history': (_get_related_date, None, 20)
}),
'remaining_hours': fields.float('Remaining Time', digits=(16,2)),
'planned_hours': fields.float('Planned Time', digits=(16,2)),
'user_id': fields.many2one('res.users', 'Responsible'),
}
_defaults = {
'date': fields.date.context_today,
}
class project_task_history_cumulative(osv.osv):
_name = 'project.task.history.cumulative'
_table = 'project_task_history_cumulative'
_inherit = 'project.task.history'
_auto = False
_columns = {
'end_date': fields.date('End Date'),
'project_id': fields.many2one('project.project', 'Project'),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'project_task_history_cumulative')
cr.execute(""" CREATE VIEW project_task_history_cumulative AS (
SELECT
history.date::varchar||'-'||history.history_id::varchar AS id,
history.date AS end_date,
*
FROM (
SELECT
h.id AS history_id,
h.date+generate_series(0, CAST((coalesce(h.end_date, DATE 'tomorrow')::date - h.date) AS integer)-1) AS date,
h.task_id, h.type_id, h.user_id, h.kanban_state, h.state,
greatest(h.remaining_hours, 1) AS remaining_hours, greatest(h.planned_hours, 1) AS planned_hours,
t.project_id
FROM
project_task_history AS h
JOIN project_task AS t ON (h.task_id = t.id)
) AS history
)
""")
class project_category(osv.osv):
""" Category of project's task (or issue) """
_name = "project.category"
_description = "Category of project's task, issue, ..."
_columns = {
'name': fields.char('Name', size=64, required=True, translate=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
chjw8016/GreenOdoo7-haibao
|
openerp/addons/project/project.py
|
Python
|
mit
| 74,113
|
[
"VisIt"
] |
664fa61a0fe787aa69284274d449d231dcc72dd01d19b5e10065119f8b12930b
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 14 14:05:09 2016
@author: cjs14
functions based on nearest neighbour calculations
"""
import math
import pandas as pd
import numpy as np
from scipy.spatial import cKDTree
from collections import Counter
from IPython.core.display import clear_output
import matplotlib.patches as mpatches
from .. import shared
from ..atom_manipulation import Atom_Manipulation
from ..plotting import Plotter
def _createTreeFromEdges(edges):
"""
e.g. _createTreeFromEdges([[1,2],[0,1],[2,3],[8,9],[0,3]])
-> {0: [1], 1: [2, 0], 2: [1, 3], 3: [2,0], 8: [9], 9: [8]}
"""
tree = {}
for v1, v2 in edges:
tree.setdefault(v1, []).append(v2)
tree.setdefault(v2, []).append(v1)
return tree
def _longest_path(start,tree,lastnode=None):
"""a recursive function to compute the maximum unbroken chain given a tree
e.g. start=0, tree={0: [1], 1: [2, 0], 2: [1, 3], 3: [2,0], 8: [9], 9: [8]}
-> [0, 1, 2, 3, 0]
"""
if not start in tree:
return []
new_tree = tree.copy()
#nodes = new_tree.pop(start) # can use if don't want to complete loops
nodes = new_tree[start]
new_tree[start] = []
path = []
for node in nodes:
if node==lastnode:
continue # can't go back to lastnode, e.g. 1->2->1
new_path = _longest_path(node,new_tree,start)
if len(new_path) > len(path):
path = new_path
path.append(start)
return path
def guess_bonds(atoms_df, covalent_radii=None, threshold=0.1, max_length=5.,
radius=0.1,transparency=1.,color=None):
""" guess bonds between atoms, based on approximate covalent radii
Parameters
----------
atoms_df : pandas.Dataframe
all atoms, requires colums ['x','y','z','type', 'color']
covalent_radii : dict or None
a dict of covalent radii for each atom type, if None then taken from ipymd.shared.atom_data
threshold : float
include bonds with distance +/- threshold of guessed bond length (Angstrom)
max_length : float
maximum bond length to include (Angstrom)
radius : float
radius of displayed bond cylinder (Angstrom)
transparency : float
transparency of displayed bond cylinder
color : str or tuple
color of displayed bond cylinder, if None then colored by atom color
Returns
-------
bonds_df : pandas.Dataframe
a dataframe with start/end indexes relating to atoms in atoms_df
"""
if atoms_df.index.tolist() != [_ for _ in range(atoms_df.shape[0])]:
raise ValueError('the index for atoms_df must be in order, i.e. [0,1,2,...]')
if covalent_radii is None:
df = shared.atom_data()
covalent_radii = df.RCov.to_dict()
r_array = atoms_df[['x','y','z']].values
ck = cKDTree(r_array)
pairs = ck.query_pairs(max_length)
bonds = []
for i,j in pairs:
a, b = covalent_radii[atoms_df.iloc[i].type], covalent_radii[atoms_df.iloc[j].type]
rval = a + b
thr_a = rval - threshold
thr_b = rval + threshold
#thr_a2 = thr_a * thr_a
thr_b2 = thr_b * thr_b
dr2 = ((r_array[i] - r_array[j])**2).sum()
# print(dr2)
if dr2 < thr_b2:
if color is None:
bonds.append((i, j,math.sqrt(dr2),radius,
atoms_df.iloc[i].color,atoms_df.iloc[j].color,transparency))
else:
bonds.append((i, j,math.sqrt(dr2),radius,
color,color,transparency))
return pd.DataFrame(bonds, columns=['start','end','length','radius','color_start','color_end','transparency'])
def bond_lengths(atoms_df, coord_type, lattice_type, max_dist=4, max_coord=16,
repeat_meta=None, rounded=2, min_dist=0.01, leafsize=100):
""" calculate the unique bond lengths atoms in coords_atoms, w.r.t lattice_atoms
atoms_df : pandas.Dataframe
all atoms
coord_type : string
atoms to calcualte coordination of
lattice_type : string
atoms to act as lattice for coordination
max_dist : float
maximum distance for coordination consideration
max_coord : float
maximum possible coordination number
repeat_meta : pandas.Series
include consideration of repeating boundary idenfined by a,b,c in the meta data
min_dist : float
lattice points within this distance of the atom will be ignored (assumed self-interaction)
leafsize : int
points at which the algorithm switches to brute-force (kdtree specific)
Returns
-------
distances : set
list of unique distances
"""
if not coord_type in atoms_df.type.values or not lattice_type in atoms_df.type.values:
return set([])
coord_df = Atom_Manipulation(atoms_df,repeat_meta)
coord_df.filter_variables(coord_type)
lattice_df = Atom_Manipulation(atoms_df,repeat_meta)
lattice_df.filter_variables(lattice_type)
if repeat_meta is not None:
lattice_df.repeat_cell((-1,1),(-1,1),(-1,1))
lattice_tree = cKDTree(lattice_df.df[['x','y','z']].values, leafsize=leafsize)
all_dists,all_ids = lattice_tree.query(coord_df.df[['x','y','z']].values, k=max_coord, distance_upper_bound=max_dist)
distances = []
for dists in all_dists:
for d in dists:
if d > min_dist and not np.isinf(d):
distances.append(round(d,rounded))
return sorted(set(distances))
def coordination(coord_atoms_df, lattice_atoms_df, max_dist=4, max_coord=16,
repeat_meta=None, min_dist=0.01, leafsize=100):
""" calculate the coordination number of each atom in coords_atoms, w.r.t lattice_atoms
coords_atoms_df : pandas.Dataframe
atoms to calcualte coordination of
lattice_atoms_df : pandas.Dataframe
atoms to act as lattice for coordination
max_dist : float
maximum distance for coordination consideration
max_coord : float
maximum possible coordination number
repeat_meta : pandas.Series
include consideration of repeating boundary idenfined by a,b,c in the meta data
min_dist : float
lattice points within this distance of the atom will be ignored (assumed self-interaction)
leafsize : int
points at which the algorithm switches to brute-force (kdtree specific)
Returns
-------
coords : list
list of coordination numbers
"""
lattice_df = Atom_Manipulation(lattice_atoms_df,repeat_meta)
if repeat_meta is not None:
lattice_df.repeat_cell((-1,1),(-1,1),(-1,1))
lattice_tree = cKDTree(lattice_df.df[['x','y','z']].values, leafsize=leafsize)
all_dists,all_ids = lattice_tree.query(coord_atoms_df[['x','y','z']].values, k=max_coord, distance_upper_bound=max_dist)
coords = []
for dists in all_dists:
coords.append(np.count_nonzero(np.logical_and(dists>min_dist, dists<np.inf)))
return coords
def coordination_bytype(atoms_df, coord_type, lattice_type, max_dist=4, max_coord=16,
repeat_meta=None, min_dist=0.01, leafsize=100):
""" returns dataframe with additional column for the coordination number of
each atom of coord type, w.r.t lattice_type atoms
effectively an extension of calc_df_coordination
atoms_df : pandas.Dataframe
all atoms
coord_type : string
atoms to calcualte coordination of
lattice_type : string
atoms to act as lattice for coordination
max_dist : float
maximum distance for coordination consideration
max_coord : float
maximum possible coordination number
repeat_meta : pandas.Series
include consideration of repeating boundary idenfined by a,b,c in the meta data
min_dist : float
lattice points within this distance of the atom will be ignored (assumed self-interaction)
leafsize : int
points at which the algorithm switches to brute-force (kdtree specific)
Returns
-------
df : pandas.Dataframe
copy of atoms_df with new column named coord_{coord_type}_{lattice_type}
"""
df = atoms_df.copy()
df['coord_{0}_{1}'.format(coord_type, lattice_type)] = np.nan
if not coord_type in df.type.values or not lattice_type in df.type.values:
return df
coord_df = Atom_Manipulation(df)
coord_df.filter_variables(coord_type)
lattice_df = Atom_Manipulation(df)
lattice_df.filter_variables(lattice_type)
coords = coordination(coord_df.df,lattice_df.df,max_dist, max_coord,
repeat_meta, min_dist, leafsize)
df.loc[df['type']==coord_type,'coord_{0}_{1}'.format(coord_type, lattice_type)] = coords
return df
def compare_to_lattice(atoms_df, lattice_atoms_df, max_dist=10,leafsize=100):
""" calculate the minimum distance of each atom in atoms_df from a lattice point in lattice_atoms_df
atoms_df : pandas.Dataframe
atoms to calculate for
lattice_atoms_df : pandas.Dataframe
atoms to act as lattice points
max_dist : float
maximum distance for consideration in computation
leafsize : int
points at which the algorithm switches to brute-force (kdtree specific)
Returns
-------
distances : list
list of distances to nearest atom in lattice
"""
lattice_tree = cKDTree(lattice_atoms_df[['x','y','z']].values, leafsize=leafsize)
dists,idnums = lattice_tree.query(atoms_df[['x','y','z']].values, k=1, distance_upper_bound=max_dist)
return dists
def vacancy_identification(atoms_df, res=0.2, nn_dist=2., repeat_meta=None, remove_dups=True,
color='red',transparency=1.,radius=1, type_name='Vac', leafsize=100,
n_jobs=1, ipython_progress=False, ):
""" identify vacancies
atoms_df : pandas.Dataframe
atoms to calculate for
res : float
resolution of vacancy identification, i.e. spacing of reference lattice
nn_dist : float
maximum nearest-neighbour distance considered as a vacancy
repeat_meta : pandas.Series
include consideration of repeating boundary idenfined by a,b,c in the meta data
remove_dups : bool
only keep one vacancy site within the nearest-neighbour distance
leafsize : int
points at which the algorithm switches to brute-force (kdtree specific)
n_jobs : int, optional
Number of jobs to schedule for parallel processing. If -1 is given all processors are used.
ipython_progress : bool
print progress to IPython Notebook
Returns
-------
vac_df : pandas.DataFrame
new atom dataframe of vacancy sites as atoms
"""
xmin, xmax = atoms_df.x.min(),atoms_df.x.max()
ymin, ymax = atoms_df.y.min(),atoms_df.y.max()
zmin, zmax = atoms_df.z.min(),atoms_df.z.max()
xyz = np.mgrid[xmin:xmax:res, ymin:ymax:res, zmin:zmax:res].reshape(3,-1).T
if repeat_meta is not None:
repeat = Atom_Manipulation(atoms_df,repeat_meta)
repeat.repeat_cell((-1,1),(-1,1),(-1,1),original_first=True)
lattice_df = repeat.df
else:
lattice_df = atoms_df
if ipython_progress:
clear_output()
print('creating nearest neighbour tree')
lattice_tree = cKDTree(lattice_df[['x','y','z']].values, leafsize=leafsize)
if ipython_progress:
clear_output()
print('assessing nearest neighbours')
dists,idnums = lattice_tree.query(xyz, k=1, distance_upper_bound=nn_dist,n_jobs=n_jobs)
vac_list = []
for atom,dist in zip(xyz,dists):
if np.isinf(dist):
x,y,z = atom
vac_list.append([type_name,x,y,z,radius,color,transparency])
df = pd.DataFrame(vac_list,columns=['type','x','y','z','radius','color','transparency'])
if remove_dups and df.shape[0]>0:
vac_tree = cKDTree(df[['x','y','z']].values)
pairs = np.asarray(list(vac_tree.query_pairs(nn_dist)))
#drop first atom of each pair
if pairs.shape[0] > 0:
df.drop(pairs[:,0],inplace=True)
if ipython_progress:
clear_output()
return df
#TODO group atoms into specified molecules e.g. S2 or CaCO3
# http://chemwiki.ucdavis.edu/Textbook_Maps/Inorganic_Chemistry_Textbook_Maps/Map%3A_Inorganic_Chemistry_(Wikibook)/Chapter_08%3A_Ionic_and_Covalent_Solids_-_Structures/8.2%3A_Structures_related_to_NaCl_and_NiAs
# maybe supply central atom type(s) and 'other' atoms type(s), filter df by required atom types,
# then find nearest neighbours of central (removing molecule each time)
# create molecule x,y,z from average of central atoms
#http://www.ovito.org/manual/particles.modifiers.common_neighbor_analysis.html
#https://www.quora.com/Given-a-set-of-atomic-types-and-coordinates-from-an-MD-simulation-is-there-a-good-algorithm-for-determining-its-likely-crystal-structure?__filter__=all&__nsrc__=2&__snid3__=179254150
# http://iopscience.iop.org/article/10.1088/0965-0393/20/4/045021/pdf
def common_neighbour_analysis(atoms_df, upper_bound=4, max_neighbours=24,
repeat_meta=None, leafsize=100, ipython_progress=False):
""" compute atomic environment of each atom in atoms_df
Based on Faken, Daniel and Jónsson, Hannes,
'Systematic analysis of local atomic structure combined with 3D computer graphics',
March 1994, DOI: 10.1016/0927-0256(94)90109-0
ideally:
- FCC = 12 x 4,2,1
- HCP = 6 x 4,2,1 & 6 x 4,2,2
- BCC = 6 x 6,6,6 & 8 x 4,4,4
- icosahedral = 12 x 5,5,5
Paramaters
----------
repeat_meta : pandas.Series
include consideration of repeating boundary idenfined by a,b,c in the meta data
ipython_progress : bool
print progress to IPython Notebook
Returns
-------
df : pandas.Dataframe
copy of atoms_df with new column named cna
"""
df = atoms_df.copy()
max_id = df.shape[0] - 1 # starts at 0
if repeat_meta is not None:
repeat = Atom_Manipulation(df,repeat_meta)
repeat.repeat_cell((-1,1),(-1,1),(-1,1),original_first=True)
lattice_df = repeat.df
else:
lattice_df = df
if ipython_progress:
print('creating nearest neighbours dictionary')
# create nearest neighbours dictionary
lattice_tree = cKDTree(lattice_df[['x','y','z']].values, leafsize=leafsize)
all_dists,all_ids = lattice_tree.query(lattice_df[['x','y','z']].values, k=max_neighbours+1, distance_upper_bound=upper_bound)
nn_ids = {}
#nn_dists = {}
for dists,ids in zip(all_dists,all_ids):
mask = np.logical_and(dists>0.01, dists<np.inf)
# assume first id is of that atom, i.e. dists[0]==0
assert dists[0]==0, dists
nn_ids[ids[0]] = ids[mask]
#nn_dists[ids[0]] = dists[mask]
jkls = {}
for lid, nns in nn_ids.iteritems():
if lid > max_id:
continue
if ipython_progress:
clear_output()
print('assessing nearest neighbours: {0} of {1}'.format(lid,max_id))
jkls[lid] = []
for nn in nns:
# j is number of shared nearest neighbours
common_nns = set(nn_ids[nn]).intersection(nns)
j = len(common_nns)
# k is number of bonds between nearest neighbours
nn_bonds = []
for common_nn in common_nns:
for nn_bond in set(nn_ids[common_nn]).intersection(common_nns):
if sorted((common_nn, nn_bond)) not in nn_bonds:
nn_bonds.append(sorted((common_nn, nn_bond)))
k = len(nn_bonds)
# l is longest chain of nearest neighbour bonds
tree = _createTreeFromEdges(nn_bonds)
chain_lengths = [0]
for node in tree.iterkeys():
chain_lengths.append(len(_longest_path(node, tree))-1)
l = max(chain_lengths)
jkls[lid].append('{0},{1},{2}'.format(j,k,l))
jkls[lid] = Counter(jkls[lid])
df['cna'] = [jkls[key] for key in sorted(jkls)]
if ipython_progress:
clear_output()
return df
def _equala(i, j, accuracy):
return j*accuracy <= i <= j+j*(1-accuracy)
def cna_categories(atoms_df, accuracy=1., upper_bound=4, max_neighbours=24,
repeat_meta=None, leafsize=100, ipython_progress=False):
""" compute summed atomic environments of each atom in atoms_df
Based on Faken, Daniel and Jónsson, Hannes,
'Systematic analysis of local atomic structure combined with 3D computer graphics',
March 1994, DOI: 10.1016/0927-0256(94)90109-0
signatures:
- FCC = 12 x 4,2,1
- HCP = 6 x 4,2,1 & 6 x 4,2,2
- BCC = 6 x 6,6,6 & 8 x 4,4,4
- Diamond = 12 x 5,4,3 & 4 x 6,6,3
- Icosahedral = 12 x 5,5,5
Parameters
----------
accuracy : float
0 to 1 how accurate to fit to signature
repeat_meta : pandas.Series
include consideration of repeating boundary idenfined by a,b,c in the meta data
ipython_progress : bool
print progress to IPython Notebook
Returns
-------
df : pandas.Dataframe
copy of atoms_df with new column named cna
"""
df = common_neighbour_analysis(atoms_df, upper_bound, max_neighbours,
repeat_meta, leafsize=leafsize,
ipython_progress=ipython_progress)
cnas = df.cna.values
atype = []
for counter in cnas:
if _equala(counter['4,2,1'],6,accuracy) and _equala(counter['4,2,2'],6,accuracy):
atype.append('HCP')
elif _equala(counter['4,2,1'],12,accuracy):
atype.append('FCC')
elif _equala(counter['6,6,6'],8,accuracy) and _equala(counter['4,4,4'],6,accuracy):
atype.append('BCC')
elif _equala(counter['5,4,3'],12,accuracy) and _equala(counter['6,6,3'],4,accuracy):
atype.append('Diamond')
elif _equala(counter['5,5,5'],12,accuracy):
atype.append('Icosahedral')
else:
atype.append('Other')
df.cna = atype
return df
def cna_sum(atoms_df, upper_bound=4, max_neighbours=24,
repeat_meta=None, leafsize=100, ipython_progress=False):
""" compute summed atomic environments of each atom in atoms_df
Based on Faken, Daniel and Jónsson, Hannes,
'Systematic analysis of local atomic structure combined with 3D computer graphics',
March 1994, DOI: 10.1016/0927-0256(94)90109-0
common signatures:
- FCC = 12 x 4,2,1
- HCP = 6 x 4,2,1 & 6 x 4,2,2
- BCC = 6 x 6,6,6 & 8 x 4,4,4
- Diamond = 12 x 5,4,3 & 4 x 6,6,3
- Icosahedral = 12 x 5,5,5
Parameters
----------
repeat_meta : pandas.Series
include consideration of repeating boundary idenfined by a,b,c in the meta data
ipython_progress : bool
print progress to IPython Notebook
Returns
-------
counter : Counter
a counter of cna signatures
"""
df = common_neighbour_analysis(atoms_df, upper_bound, max_neighbours,
repeat_meta, leafsize=leafsize,
ipython_progress=ipython_progress)
cnas = df.cna.values
return sum(cnas,Counter())
#TODO move plotting to plotting module
def cna_plot(atoms_df, upper_bound=4, max_neighbours=24,
repeat_meta=None, leafsize=100,
barwidth=1, ipython_progress=False):
""" compute summed atomic environments of each atom in atoms_df
Based on Faken, Daniel and Jónsson, Hannes,
'Systematic analysis of local atomic structure combined with 3D computer graphics',
March 1994, DOI: 10.1016/0927-0256(94)90109-0
common signatures:
- FCC = 12 x 4,2,1
- HCP = 6 x 4,2,1 & 6 x 4,2,2
- BCC = 6 x 6,6,6 & 8 x 4,4,4
- Diamond = 12 x 5,4,3 & 4 x 6,6,3
- Icosahedral = 12 x 5,5,5
Parameters
----------
repeat_meta : pandas.Series
include consideration of repeating boundary idenfined by a,b,c in the meta data
ipython_progress : bool
print progress to IPython Notebook
Returns
-------
plot : matplotlib.pyplot
a matplotlib plot
"""
df = common_neighbour_analysis(atoms_df, upper_bound, max_neighbours,
repeat_meta, leafsize=leafsize,
ipython_progress=ipython_progress)
cnas = df.cna.values
counter = sum(cnas,Counter())
labels, values = zip(*counter.items())
indexes = np.arange(len(labels))
colors = []
patches = []
d = {'4,2,1':['orange','FCC or HCP (1 of 2)'],
'4,2,2':['red','HCP (1 of 2)'],
'6,6,6':['green','BCC (1 of 2)'],
'4,4,4':['green','BCC (2 of 2)'],
'5,5,5':['purple','Icosahedral'],
'5,4,3':['grey','Diamond (1 of 2)'],
'6,6,3':['grey','Diamond (1 of 2)']}
for label in labels:
if label in d:
colors.append(d[label][0])
patches.append(mpatches.Patch(color=d[label][0], label=d[label][1]))
else:
colors.append('blue')
plot = Plotter()
plot.axes.barh(indexes, values, barwidth, color=colors)
plot.axes.set_yticks(indexes + barwidth * 0.5, labels)
plot.axes.grid(True)
if patches:
plot.axes.legend(handles=patches)
plot.axes.set_ylabel('i,j,k')
return plot
#TODO _group_molecules needs work
def _group_molecules(atom_df,moltypes,maxdist=3,repeat_meta=None,
mean_xyz=True,remove_atoms=True,
color='red',transparency=1.,radius=1.,
leafsize=100):
molname = ''.join(['{}_{}'.format(k,v)
for k,v in Counter(moltypes).iteritems()])
search_df = atom_df[atom_df.type.isin([moltypes[0]])].copy()
#old_index = search_df.index
search_df.reset_index(inplace=True)
if repeat_meta is not None:
manip = Atom_Manipulation(search_df,repeat_meta)
manip.repeat_cell((-1,1), (-1,1), (-1, 1),original_first=True)
lattice_df = manip.df
lattice_df.reset_index(inplace=True,drop=True)
rep_map = dict(zip(range(lattice_df.shape[0]),list(search_df.index)*27))
else:
lattice_df = search_df.copy()
rep_map = dict(zip(range(search_df.shape[0]),list(search_df.index)))
lattice_tree = cKDTree(lattice_df[['x','y','z']].values, leafsize=leafsize)
dists,idnums = lattice_tree.query(search_df[['x','y','z']].values, k=len(moltypes), distance_upper_bound=maxdist)
mol_data = []
used_repeat = []
for i,dist,idnum in zip(search_df.index, dists,idnums):
#print i, old_index[i], dist, [rep_map[m] for m in idnum]
if i in used_repeat:
continue
mol = [i]
for j, d in zip(idnum[1:],dist[1:]):
if not j in mol and not np.isinf(d) and not rep_map.get(j) in used_repeat:
#used_repeat.append(rep_map[j])
mol.append(j)
else:
print('warning incomplete molecule')#, idnum, dist, j, rep_map.get(j)
repeat_mol = [rep_map[m] for m in mol]
if len(mol) == len(moltypes):
used_repeat.extend(repeat_mol)
if mean_xyz:
x,y,z = search_df.loc[repeat_mol,['x','y','z']].mean().values
else:
x,y,z = search_df.loc[i,['x','y','z']].values
mol_data.append([molname,x,y,z,radius,color,transparency])
#print i, repeat_mol
df = atom_df.copy()
if remove_atoms:
df.drop(used_repeat, inplace=True)
if mol_data:
moldf = pd.DataFrame(mol_data,columns=['type','x','y','z','radius','color','transparency'])
df = pd.concat([df,moldf])
return df
|
chrisjsewell/ipymd
|
ipymd/atom_analysis/nearest_neighbour.py
|
Python
|
gpl-3.0
| 24,578
|
[
"CRYSTAL",
"OVITO"
] |
304acd63687aa6b197f1601885726b7b8f4f2ea139f244cc51eedbee9f473654
|
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
"""
Gaussian Processes regression examples
"""
MPL_AVAILABLE = True
try:
import matplotlib.pyplot as plt
except ImportError:
MPL_AVAILABLE = False
import numpy as np
import GPy
def olympic_marathon_men(optimize=True, plot=True):
"""Run a standard Gaussian process regression on the Olympic marathon data."""
try:
import pods
except ImportError:
print("pods unavailable, see https://github.com/sods/ods for example datasets")
return
data = pods.datasets.olympic_marathon_men()
# create simple GP Model
m = GPy.models.GPRegression(data["X"], data["Y"])
# set the lengthscale to be something sensible (defaults to 1)
m.kern.lengthscale = 10.0
if optimize:
m.optimize("bfgs", max_iters=200)
if MPL_AVAILABLE and plot:
m.plot(plot_limits=(1850, 2050))
return m
def coregionalization_toy(optimize=True, plot=True):
"""
A simple demonstration of coregionalization on two sinusoidal functions.
"""
# build a design matrix with a column of integers indicating the output
X1 = np.random.rand(50, 1) * 8
X2 = np.random.rand(30, 1) * 5
# build a suitable set of observed variables
Y1 = np.sin(X1) + np.random.randn(*X1.shape) * 0.05
Y2 = np.sin(X2) + np.random.randn(*X2.shape) * 0.05 + 2.0
m = GPy.models.GPCoregionalizedRegression(X_list=[X1, X2], Y_list=[Y1, Y2])
if optimize:
m.optimize("bfgs", max_iters=100)
if MPL_AVAILABLE and plot:
slices = GPy.util.multioutput.get_slices([X1, X2])
m.plot(
fixed_inputs=[(1, 0)],
which_data_rows=slices[0],
Y_metadata={"output_index": 0},
)
m.plot(
fixed_inputs=[(1, 1)],
which_data_rows=slices[1],
Y_metadata={"output_index": 1},
ax=plt.gca(),
)
return m
def coregionalization_sparse(optimize=True, plot=True):
"""A simple demonstration of coregionalization on two sinusoidal
functions using sparse approximations. """
# build a design matrix with a column of integers indicating the output
X1 = np.random.rand(50, 1) * 8
X2 = np.random.rand(30, 1) * 5
# build a suitable set of observed variables
Y1 = np.sin(X1) + np.random.randn(*X1.shape) * 0.05
Y2 = np.sin(X2) + np.random.randn(*X2.shape) * 0.05 + 2.0
m = GPy.models.SparseGPCoregionalizedRegression(X_list=[X1, X2], Y_list=[Y1, Y2])
if optimize:
m.optimize("bfgs", max_iters=100)
if MPL_AVAILABLE and plot:
slices = GPy.util.multioutput.get_slices([X1, X2])
m.plot(
fixed_inputs=[(1, 0)],
which_data_rows=slices[0],
Y_metadata={"output_index": 0},
)
m.plot(
fixed_inputs=[(1, 1)],
which_data_rows=slices[1],
Y_metadata={"output_index": 1},
ax=plt.gca(),
)
plt.ylim(-3,)
return m
def epomeo_gpx(max_iters=200, optimize=True, plot=True):
"""
Perform Gaussian process regression on the latitude and longitude data
from the Mount Epomeo runs. Requires gpxpy to be installed on your system
to load in the data.
"""
try:
import pods
except ImportError:
print("pods unavailable, see https://github.com/sods/ods for example datasets")
return
data = pods.datasets.epomeo_gpx()
num_data_list = []
for Xpart in data["X"]:
num_data_list.append(Xpart.shape[0])
num_data_array = np.array(num_data_list)
num_data = num_data_array.sum()
Y = np.zeros((num_data, 2))
t = np.zeros((num_data, 2))
start = 0
for Xpart, index in zip(data["X"], range(len(data["X"]))):
end = start + Xpart.shape[0]
t[start:end, :] = np.hstack(
(Xpart[:, 0:1], index * np.ones((Xpart.shape[0], 1)))
)
Y[start:end, :] = Xpart[:, 1:3]
num_inducing = 200
Z = np.hstack(
(
np.linspace(t[:, 0].min(), t[:, 0].max(), num_inducing)[:, None],
np.random.randint(0, 4, num_inducing)[:, None],
)
)
k1 = GPy.kern.RBF(1)
k2 = GPy.kern.Coregionalize(output_dim=5, rank=5)
k = k1 ** k2
m = GPy.models.SparseGPRegression(t, Y, kernel=k, Z=Z, normalize_Y=True)
m.constrain_fixed(".*variance", 1.0)
m.inducing_inputs.constrain_fixed()
m.Gaussian_noise.variance.constrain_bounded(1e-3, 1e-1)
m.optimize(max_iters=max_iters, messages=True)
return m
def multiple_optima(
gene_number=937,
resolution=80,
model_restarts=10,
seed=10000,
max_iters=300,
optimize=True,
plot=True,
):
"""
Show an example of a multimodal error surface for Gaussian process
regression. Gene 939 has bimodal behaviour where the noisy mode is
higher.
"""
# Contour over a range of length scales and signal/noise ratios.
length_scales = np.linspace(0.1, 60.0, resolution)
log_SNRs = np.linspace(-3.0, 4.0, resolution)
try:
import pods
except ImportError:
print("pods unavailable, see https://github.com/sods/ods for example datasets")
return
data = pods.datasets.della_gatta_TRP63_gene_expression(
data_set="della_gatta", gene_number=gene_number
)
# data['Y'] = data['Y'][0::2, :]
# data['X'] = data['X'][0::2, :]
data["Y"] = data["Y"] - np.mean(data["Y"])
lls = GPy.examples.regression._contour_data(
data, length_scales, log_SNRs, GPy.kern.RBF
)
if MPL_AVAILABLE and plot:
plt.contour(length_scales, log_SNRs, np.exp(lls), 20, cmap=plt.cm.jet)
ax = plt.gca()
plt.xlabel("length scale")
plt.ylabel("log_10 SNR")
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# Now run a few optimizations
models = []
optim_point_x = np.empty(2)
optim_point_y = np.empty(2)
np.random.seed(seed=seed)
for i in range(0, model_restarts):
# kern = GPy.kern.RBF(
# 1, variance=np.random.exponential(1.), lengthscale=np.random.exponential(50.)
# )
kern = GPy.kern.RBF(
1, variance=np.random.uniform(1e-3, 1), lengthscale=np.random.uniform(5, 50)
)
m = GPy.models.GPRegression(data["X"], data["Y"], kernel=kern)
m.likelihood.variance = np.random.uniform(1e-3, 1)
optim_point_x[0] = m.rbf.lengthscale
optim_point_y[0] = np.log10(m.rbf.variance) - np.log10(m.likelihood.variance)
# optimize
if optimize:
m.optimize("scg", xtol=1e-6, ftol=1e-6, max_iters=max_iters)
optim_point_x[1] = m.rbf.lengthscale
optim_point_y[1] = np.log10(m.rbf.variance) - np.log10(m.likelihood.variance)
if MPL_AVAILABLE and plot:
plt.arrow(
optim_point_x[0],
optim_point_y[0],
optim_point_x[1] - optim_point_x[0],
optim_point_y[1] - optim_point_y[0],
label=str(i),
head_length=1,
head_width=0.5,
fc="k",
ec="k",
)
models.append(m)
if MPL_AVAILABLE and plot:
ax.set_xlim(xlim)
ax.set_ylim(ylim)
return m # (models, lls)
def _contour_data(data, length_scales, log_SNRs, kernel_call=GPy.kern.RBF):
"""
Evaluate the GP objective function for a given data set for a range of
signal to noise ratios and a range of lengthscales.
:data_set: A data set from the utils.datasets director.
:length_scales: a list of length scales to explore for the contour plot.
:log_SNRs: a list of base 10 logarithm signal to noise ratios to explore for the contour plot.
:kernel: a kernel to use for the 'signal' portion of the data.
"""
lls = []
total_var = np.var(data["Y"])
kernel = kernel_call(1, variance=1.0, lengthscale=1.0)
model = GPy.models.GPRegression(data["X"], data["Y"], kernel=kernel)
for log_SNR in log_SNRs:
SNR = 10.0 ** log_SNR
noise_var = total_var / (1.0 + SNR)
signal_var = total_var - noise_var
model.kern[".*variance"] = signal_var
model.likelihood.variance = noise_var
length_scale_lls = []
for length_scale in length_scales:
model[".*lengthscale"] = length_scale
length_scale_lls.append(model.log_likelihood())
lls.append(length_scale_lls)
return np.array(lls)
def olympic_100m_men(optimize=True, plot=True):
"""Run a standard Gaussian process regression on the Rogers and Girolami olympics data."""
try:
import pods
except ImportError:
print("pods unavailable, see https://github.com/sods/ods for example datasets")
return
data = pods.datasets.olympic_100m_men()
# create simple GP Model
m = GPy.models.GPRegression(data["X"], data["Y"])
# set the lengthscale to be something sensible (defaults to 1)
m.rbf.lengthscale = 10
if optimize:
m.optimize("bfgs", max_iters=200)
if MPL_AVAILABLE and plot:
m.plot(plot_limits=(1850, 2050))
return m
def toy_rbf_1d(optimize=True, plot=True):
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
try:
import pods
except ImportError:
print("pods unavailable, see https://github.com/sods/ods for example datasets")
return
data = pods.datasets.toy_rbf_1d()
# create simple GP Model
m = GPy.models.GPRegression(data["X"], data["Y"])
if optimize:
m.optimize("bfgs")
if MPL_AVAILABLE and plot:
m.plot()
return m
def toy_rbf_1d_50(optimize=True, plot=True):
"""Run a simple demonstration of a standard Gaussian process fitting
it to data sampled from an RBF covariance."""
try:
import pods
except ImportError:
print("pods unavailable, see https://github.com/sods/ods for example datasets")
return
data = pods.datasets.toy_rbf_1d_50()
# create simple GP Model
m = GPy.models.GPRegression(data["X"], data["Y"])
if optimize:
m.optimize("bfgs")
if MPL_AVAILABLE and plot:
m.plot()
return m
def toy_poisson_rbf_1d_laplace(optimize=True, plot=True):
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
optimizer = "scg"
x_len = 100
X = np.linspace(0, 10, x_len)[:, None]
f_true = np.random.multivariate_normal(np.zeros(x_len), GPy.kern.RBF(1).K(X))
Y = np.array([np.random.poisson(np.exp(f)) for f in f_true])[:, None]
kern = GPy.kern.RBF(1)
poisson_lik = GPy.likelihoods.Poisson()
laplace_inf = GPy.inference.latent_function_inference.Laplace()
# create simple GP Model
m = GPy.core.GP(
X, Y, kernel=kern, likelihood=poisson_lik, inference_method=laplace_inf
)
if optimize:
m.optimize(optimizer)
if MPL_AVAILABLE and plot:
m.plot()
# plot the real underlying rate function
plt.plot(X, np.exp(f_true), "--k", linewidth=2)
return m
def toy_ARD(
max_iters=1000, kernel_type="linear", num_samples=300, D=4, optimize=True, plot=True
):
# Create an artificial dataset where the values in the targets (Y)
# only depend in dimensions 1 and 3 of the inputs (X). Run ARD to
# see if this dependency can be recovered
X1 = np.sin(np.sort(np.random.rand(num_samples, 1) * 10, 0))
X2 = np.cos(np.sort(np.random.rand(num_samples, 1) * 10, 0))
X3 = np.exp(np.sort(np.random.rand(num_samples, 1), 0))
X4 = np.log(np.sort(np.random.rand(num_samples, 1), 0))
X = np.hstack((X1, X2, X3, X4))
Y1 = np.asarray(2 * X[:, 0] + 3).reshape(-1, 1)
Y2 = np.asarray(4 * (X[:, 2] - 1.5 * X[:, 0])).reshape(-1, 1)
Y = np.hstack((Y1, Y2))
Y = np.dot(Y, np.random.rand(2, D))
Y = Y + 0.2 * np.random.randn(Y.shape[0], Y.shape[1])
Y -= Y.mean()
Y /= Y.std()
if kernel_type == "linear":
kernel = GPy.kern.Linear(X.shape[1], ARD=1)
elif kernel_type == "rbf_inv":
kernel = GPy.kern.RBF_inv(X.shape[1], ARD=1)
else:
kernel = GPy.kern.RBF(X.shape[1], ARD=1)
kernel += GPy.kern.White(X.shape[1]) + GPy.kern.Bias(X.shape[1])
m = GPy.models.GPRegression(X, Y, kernel)
# len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25
# m.set_prior('.*lengthscale',len_prior)
if optimize:
m.optimize(optimizer="scg", max_iters=max_iters)
if MPL_AVAILABLE and plot:
m.kern.plot_ARD()
return m
def toy_ARD_sparse(
max_iters=1000, kernel_type="linear", num_samples=300, D=4, optimize=True, plot=True
):
# Create an artificial dataset where the values in the targets (Y)
# only depend in dimensions 1 and 3 of the inputs (X). Run ARD to
# see if this dependency can be recovered
X1 = np.sin(np.sort(np.random.rand(num_samples, 1) * 10, 0))
X2 = np.cos(np.sort(np.random.rand(num_samples, 1) * 10, 0))
X3 = np.exp(np.sort(np.random.rand(num_samples, 1), 0))
X4 = np.log(np.sort(np.random.rand(num_samples, 1), 0))
X = np.hstack((X1, X2, X3, X4))
Y1 = np.asarray(2 * X[:, 0] + 3)[:, None]
Y2 = np.asarray(4 * (X[:, 2] - 1.5 * X[:, 0]))[:, None]
Y = np.hstack((Y1, Y2))
Y = np.dot(Y, np.random.rand(2, D))
Y = Y + 0.2 * np.random.randn(Y.shape[0], Y.shape[1])
Y -= Y.mean()
Y /= Y.std()
if kernel_type == "linear":
kernel = GPy.kern.Linear(X.shape[1], ARD=1)
elif kernel_type == "rbf_inv":
kernel = GPy.kern.RBF_inv(X.shape[1], ARD=1)
else:
kernel = GPy.kern.RBF(X.shape[1], ARD=1)
# kernel += GPy.kern.Bias(X.shape[1])
X_variance = np.ones(X.shape) * 0.5
m = GPy.models.SparseGPRegression(X, Y, kernel, X_variance=X_variance)
# len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25
# m.set_prior('.*lengthscale',len_prior)
if optimize:
m.optimize(optimizer="scg", max_iters=max_iters)
if MPL_AVAILABLE and plot:
m.kern.plot_ARD()
return m
def robot_wireless(max_iters=100, kernel=None, optimize=True, plot=True):
"""Predict the location of a robot given wirelss signal strength readings."""
try:
import pods
except ImportError:
print("pods unavailable, see https://github.com/sods/ods for example datasets")
return
data = pods.datasets.robot_wireless()
# create simple GP Model
m = GPy.models.GPRegression(data["Y"], data["X"], kernel=kernel)
# optimize
if optimize:
m.optimize(max_iters=max_iters)
Xpredict = m.predict(data["Ytest"])[0]
if MPL_AVAILABLE and plot:
plt.plot(data["Xtest"][:, 0], data["Xtest"][:, 1], "r-")
plt.plot(Xpredict[:, 0], Xpredict[:, 1], "b-")
plt.axis("equal")
plt.title("WiFi Localization with Gaussian Processes")
plt.legend(("True Location", "Predicted Location"))
sse = ((data["Xtest"] - Xpredict) ** 2).sum()
print(("Sum of squares error on test data: " + str(sse)))
return m
def silhouette(max_iters=100, optimize=True, plot=True):
"""Predict the pose of a figure given a silhouette. This is a task from Agarwal and Triggs 2004 ICML paper."""
try:
import pods
except ImportError:
print("pods unavailable, see https://github.com/sods/ods for example datasets")
return
data = pods.datasets.silhouette()
# create simple GP Model
m = GPy.models.GPRegression(data["X"], data["Y"])
# optimize
if optimize:
m.optimize(messages=True, max_iters=max_iters)
print(m)
return m
def sparse_GP_regression_1D(
num_samples=400,
num_inducing=5,
max_iters=100,
optimize=True,
plot=True,
checkgrad=False,
):
"""Run a 1D example of a sparse GP regression."""
# sample inputs and outputs
X = np.random.uniform(-3.0, 3.0, (num_samples, 1))
Y = np.sin(X) + np.random.randn(num_samples, 1) * 0.05
# construct kernel
rbf = GPy.kern.RBF(1)
# create simple GP Model
m = GPy.models.SparseGPRegression(X, Y, kernel=rbf, num_inducing=num_inducing)
if checkgrad:
m.checkgrad()
if optimize:
m.optimize("tnc", max_iters=max_iters)
if MPL_AVAILABLE and plot:
m.plot()
return m
def sparse_GP_regression_2D(
num_samples=400, num_inducing=50, max_iters=100, optimize=True, plot=True, nan=False
):
"""Run a 2D example of a sparse GP regression."""
np.random.seed(1234)
X = np.random.uniform(-3.0, 3.0, (num_samples, 2))
Y = np.sin(X[:, 0:1]) * np.sin(X[:, 1:2]) + np.random.randn(num_samples, 1) * 0.05
if nan:
inan = np.random.binomial(1, 0.2, size=Y.shape)
Y[inan] = np.nan
# construct kernel
rbf = GPy.kern.RBF(2)
# create simple GP Model
m = GPy.models.SparseGPRegression(X, Y, kernel=rbf, num_inducing=num_inducing)
# contrain all parameters to be positive (but not inducing inputs)
m[".*len"] = 2.0
m.checkgrad()
# optimize
if optimize:
m.optimize("tnc", messages=1, max_iters=max_iters)
# plot
if MPL_AVAILABLE and plot:
m.plot()
print(m)
return m
def uncertain_inputs_sparse_regression(max_iters=200, optimize=True, plot=True):
"""Run a 1D example of a sparse GP regression with uncertain inputs."""
if MPL_AVAILABLE and plot:
fig, axes = plt.subplots(1, 2, figsize=(12, 5), sharex=True, sharey=True)
# sample inputs and outputs
S = np.ones((20, 1))
X = np.random.uniform(-3.0, 3.0, (20, 1))
Y = np.sin(X) + np.random.randn(20, 1) * 0.05
# likelihood = GPy.likelihoods.Gaussian(Y)
Z = np.random.uniform(-3.0, 3.0, (7, 1))
k = GPy.kern.RBF(1)
# create simple GP Model - no input uncertainty on this one
m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z)
if optimize:
m.optimize("scg", messages=1, max_iters=max_iters)
if MPL_AVAILABLE and plot:
m.plot(ax=axes[0])
axes[0].set_title("no input uncertainty")
print(m)
# the same Model with uncertainty
m = GPy.models.SparseGPRegression(X, Y, kernel=GPy.kern.RBF(1), Z=Z, X_variance=S)
if optimize:
m.optimize("scg", messages=1, max_iters=max_iters)
if MPL_AVAILABLE and plot:
m.plot(ax=axes[1])
axes[1].set_title("with input uncertainty")
fig.canvas.draw()
print(m)
return m
def simple_mean_function(max_iters=100, optimize=True, plot=True):
"""
The simplest possible mean function. No parameters, just a simple Sinusoid.
"""
# create simple mean function
mf = GPy.core.Mapping(1, 1)
mf.f = np.sin
mf.update_gradients = lambda a, b: None
X = np.linspace(0, 10, 50).reshape(-1, 1)
Y = np.sin(X) + 0.5 * np.cos(3 * X) + 0.1 * np.random.randn(*X.shape)
k = GPy.kern.RBF(1)
lik = GPy.likelihoods.Gaussian()
m = GPy.core.GP(X, Y, kernel=k, likelihood=lik, mean_function=mf)
if optimize:
m.optimize(max_iters=max_iters)
if MPL_AVAILABLE and plot:
m.plot(plot_limits=(-10, 15))
return m
def parametric_mean_function(max_iters=100, optimize=True, plot=True):
"""
A linear mean function with parameters that we'll learn alongside the kernel
"""
# create simple mean function
mf = GPy.core.Mapping(1, 1)
mf.f = np.sin
X = np.linspace(0, 10, 50).reshape(-1, 1)
Y = np.sin(X) + 0.5 * np.cos(3 * X) + 0.1 * np.random.randn(*X.shape) + 3 * X
mf = GPy.mappings.Linear(1, 1)
k = GPy.kern.RBF(1)
lik = GPy.likelihoods.Gaussian()
m = GPy.core.GP(X, Y, kernel=k, likelihood=lik, mean_function=mf)
if optimize:
m.optimize(max_iters=max_iters)
if MPL_AVAILABLE and plot:
m.plot()
return m
def warped_gp_cubic_sine(max_iters=100, plot=True):
"""
A test replicating the cubic sine regression problem from
Snelson's paper.
"""
X = (2 * np.pi) * np.random.random(151) - np.pi
Y = np.sin(X) + np.random.normal(0, 0.2, 151)
Y = np.array([np.power(abs(y), float(1) / 3) * (1, -1)[y < 0] for y in Y])
X = X[:, None]
Y = Y[:, None]
warp_k = GPy.kern.RBF(1)
warp_f = GPy.util.warping_functions.TanhFunction(n_terms=2)
warp_m = GPy.models.WarpedGP(X, Y, kernel=warp_k, warping_function=warp_f)
warp_m[".*\\.d"].constrain_fixed(1.0)
m = GPy.models.GPRegression(X, Y)
m.optimize_restarts(
parallel=False, robust=True, num_restarts=5, max_iters=max_iters
)
warp_m.optimize_restarts(
parallel=False, robust=True, num_restarts=5, max_iters=max_iters
)
# m.optimize(max_iters=max_iters)
# warp_m.optimize(max_iters=max_iters)
print(warp_m)
print(warp_m[".*warp.*"])
if MPL_AVAILABLE and plot:
warp_m.predict_in_warped_space = False
warp_m.plot(title="Warped GP - Latent space")
warp_m.predict_in_warped_space = True
warp_m.plot(title="Warped GP - Warped space")
m.plot(title="Standard GP")
warp_m.plot_warping()
plt.show()
return warp_m
def multioutput_gp_with_derivative_observations(plot=True):
f = lambda x: np.sin(x) + 0.1 * (x - 2.0) ** 2 - 0.005 * x ** 3
fd = lambda x: np.cos(x) + 0.2 * (x - 2.0) - 0.015 * x ** 2
N = 10 # Number of observations
M = 10 # Number of derivative observations
Npred = 100 # Number of prediction points
sigma = 0.05 # Noise of observations
sigma_der = 0.05 # Noise of derivative observations
x = np.array([np.linspace(1, 10, N)]).T
y = f(x) + np.array(sigma * np.random.normal(0, 1, (N, 1)))
xd = np.array([np.linspace(2, 8, M)]).T
yd = fd(xd) + np.array(sigma_der * np.random.normal(0, 1, (M, 1)))
xpred = np.array([np.linspace(0, 11, Npred)]).T
ypred_true = f(xpred)
ydpred_true = fd(xpred)
# squared exponential kernel:
se = GPy.kern.RBF(input_dim=1, lengthscale=1.5, variance=0.2)
# We need to generate separate kernel for the derivative observations and give the created kernel as an input:
se_der = GPy.kern.DiffKern(se, 0)
# Then
gauss = GPy.likelihoods.Gaussian(variance=sigma ** 2)
gauss_der = GPy.likelihoods.Gaussian(variance=sigma_der ** 2)
# Then create the model, we give everything in lists, the order of the inputs indicates the order of the outputs
# Now we have the regular observations first and derivative observations second, meaning that the kernels and
# the likelihoods must follow the same order. Crosscovariances are automatically taken care of
m = GPy.models.MultioutputGP(
X_list=[x, xd],
Y_list=[y, yd],
kernel_list=[se, se_der],
likelihood_list=[gauss, gauss_der],
)
# Optimize the model
m.optimize(messages=0, ipython_notebook=False)
if MPL_AVAILABLE and plot:
def plot_gp_vs_real(
m, x, yreal, size_inputs, title, fixed_input=1, xlim=[0, 11], ylim=[-1.5, 3]
):
fig, ax = plt.subplots()
ax.set_title(title)
plt.plot(x, yreal, "r", label="Real function")
rows = (
slice(0, size_inputs[0])
if fixed_input == 0
else slice(size_inputs[0], size_inputs[0] + size_inputs[1])
)
m.plot(
fixed_inputs=[(1, fixed_input)],
which_data_rows=rows,
xlim=xlim,
ylim=ylim,
ax=ax,
)
# Plot the model, the syntax is same as for multioutput models:
plot_gp_vs_real(
m,
xpred,
ydpred_true,
[x.shape[0], xd.shape[0]],
title="Latent function derivatives",
fixed_input=1,
xlim=[0, 11],
ylim=[-1.5, 3],
)
plot_gp_vs_real(
m,
xpred,
ypred_true,
[x.shape[0], xd.shape[0]],
title="Latent function",
fixed_input=0,
xlim=[0, 11],
ylim=[-1.5, 3],
)
# making predictions for the values:
mu, var = m.predict_noiseless(Xnew=[xpred, np.empty((0, 1))])
return m
|
SheffieldML/GPy
|
GPy/examples/regression.py
|
Python
|
bsd-3-clause
| 24,334
|
[
"Gaussian"
] |
dd576bc35c83b53dd4d49812b5252fbf78e4022c264ca7efd9941544eb89f893
|
from cellprofiler.gui.help import USING_METADATA_HELP_REF, USING_METADATA_GROUPING_HELP_REF, LOADING_IMAGE_SEQ_HELP_REF
TM_OVERLAP = 'Overlap'
TM_DISTANCE = 'Distance'
TM_MEASUREMENTS = 'Measurements'
TM_LAP = "LAP"
TM_ALL = [TM_OVERLAP, TM_DISTANCE, TM_MEASUREMENTS,TM_LAP]
LT_NONE = 0
LT_PHASE_1 = 1
LT_SPLIT = 2
LT_MITOSIS = 3
LT_GAP = 4
KM_VEL = 1
KM_NO_VEL = 0
KM_NONE = -1
'''Random motion model, for instance Brownian motion'''
M_RANDOM = "Random"
'''Velocity motion model, object position depends on prior velocity'''
M_VELOCITY = "Velocity"
'''Random and velocity models'''
M_BOTH = "Both"
RADIUS_STD_SETTING_TEXT = 'Number of standard deviations for search radius'
RADIUS_LIMIT_SETTING_TEXT = 'Search radius limit, in pixel units (Min,Max)'
ONLY_IF_2ND_PHASE_LAP_TEXT = '''<i>(Used only if the %(TM_LAP)s tracking method is applied and the second phase is run)</i>'''%globals()
import cellprofiler.icons
from cellprofiler.gui.help import PROTIP_RECOMEND_ICON, PROTIP_AVOID_ICON, TECH_NOTE_ICON
__doc__ = """
<b>Track Objects</b> allows tracking objects throughout sequential
frames of a series of images, so that from frame to frame
each object maintains a unique identity in the output measurements
<hr>
This module must be placed downstream of a module that identifies objects
(e.g., <b>IdentifyPrimaryObjects</b>). <b>TrackObjects</b> will associate each
object with the same object in the frames before and after. This allows the study
of objects' lineages and the timing and characteristics of dynamic events in
movies.
<p>Images in CellProfiler are processed sequentially by frame (whether loaded as a
series of images or a movie file). To process a collection of images/movies,
you will need to do the following:
<ul>
<li>Define each individual movie using metadata
either contained within the image file itself or as part of the images nomenclature
or folder structure. %(USING_METADATA_HELP_REF)s.</li>
<li>Group the movies to make sure
that each image sequence is handled individually. %(USING_METADATA_GROUPING_HELP_REF)s.
</li>
</ul>
For complete details, see <i>%(LOADING_IMAGE_SEQ_HELP_REF)s</i>.</p>
<p>For an example pipeline using TrackObjects, see the CellProfiler
<a href="http://www.cellprofiler.org/examples.shtml#Tracking">Examples</a> webpage.</p>
<h4>Available measurements</h4>
<b>Object measurements</b>
<ul>
<li><i>Label:</i> Each tracked object is assigned a unique identifier (label).
Child objects resulting from a split or merge are assigned the label of the ancestor.</li>
<li><i>ParentImageNumber, ParentObjectNumber:</i> The <i>ImageNumber</i> and
<i>ObjectNumber</i> of the parent object in the prior frame. For a split, each
child object will have the label of the object it split from. For a merge,
the child will have the label of the closest parent.</li>
<li><i>TrajectoryX, TrajectoryY:</i> The direction of motion (in x and y coordinates) of the
object from the previous frame to the current frame.</li>
<li><i>DistanceTraveled:</i> The distance traveled by the object from the
previous frame to the current frame (calculated as the magnitude of
the trajectory vectors).</li>
<li><i>Displacement:</i> The shortest distance traveled by the object from its
initial starting position to the position in the current frame. That is, it is
the straight-line path between the two points.</li>
<li><i>IntegratedDistance:</i> The total distance traveled by the object during
the lifetime of the object.</li>
<li><i>Linearity:</i> A measure of how linear the object trajectity is during the
object lifetime. Calculated as (displacement from initial to final
location)/(integrated object distance). Value is in range of [0,1].</li>
<li><i>Lifetime:</i> The number of frames an objects has existed. The lifetime starts
at 1 at the frame when an object appears, and is incremented with each frame that the
object persists. At the final frame of the image set/movie, the
lifetimes of all remaining objects are output.</li>
<li><i>FinalAge:</i> Similar to <i>LifeTime</i> but is only output at the final
frame of the object's life (or the movie ends, whichever comes first). At this point,
the final age of the object is output; no values are stored for earlier frames.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> This value
is useful if you want to plot a histogram of the object lifetimes; all but the final age
can be ignored or filtered out.</dd>
</dl></li>
</ul>
The following object measurements are specific to the %(TM_LAP)s tracking method:
<ul>
<li><i>LinkType:</i> The linking method used to link the object to its parent.
Possible values are
<ul>
<li><b>%(LT_NONE)d</b>: The object was not linked to a parent.</li>
<li><b>%(LT_PHASE_1)d</b>: The object was linked to a parent in the previous frame.</li>
<li><b>%(LT_SPLIT)d</b>: The object is linked as the start of a split path.</li>
<li><b>%(LT_MITOSIS)s</b>: The object was linked to its parent as a daughter of
a mitotic pair.</li>
<li><b>%(LT_GAP)d</b>: The object was linked to a parent in a frame prior to the
previous frame (a gap).</li>
</ul>
Under some circumstances, multiple linking methods may apply to a given object, e.g, an
object may be both the beginning of a split path and not have a parent. However, only
one linking method is assigned.</li>
<li><i>MovementModel:</i>The movement model used to track the object.
<ul>
<li><b>%(KM_NO_VEL)d</b>: The <i>%(M_RANDOM)s</i> model was used.</li>
<li><b>%(KM_VEL)d</b>: The <i>%(M_VELOCITY)s</i> model was used.</li>
<li><b>-1</b>: Neither model was used. This can occur under two circumstances:
<ul>
<li>At the beginning of a trajectory, when there is no data to determine the model as
yet.</li>
<li>At the beginning of a closed gap, since a model was not actually applied to make
the link in the first phase.</li>
</ul></li>
</ul>
</li>
<li><i>LinkingDistance:</i>The difference between the propagated position of an
object and the object to which it is matched.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> A slowly decaying histogram of
these distances indicates that the search radius is large enough. A cut-off histogram
is a sign that the search radius is too small.</dd>
</dl></li>
<li><i>StandardDeviation:</i>The Kalman filter maintains a running estimate
of the variance of the error in estimated position for each model.
This measurement records the linking distance divided by the standard deviation
of the error when linking the object with its parent.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> This value is multiplied by
the <i>"%(RADIUS_STD_SETTING_TEXT)s"</i> setting to constrain the search distance.
A histogram of this value can help determine if the <i>"%(RADIUS_LIMIT_SETTING_TEXT)s"</i>
setting is appropriate.</dd>
</dl>
</li>
<li><i>GapLength:</i> The number of frames between an object and its parent.
For instance, an object in frame 3 with a parent in frame 1 has a gap length of
2.</li>
<li><i>GapScore:</i> If an object is linked to its parent by bridging a gap,
this value is the score for the gap.</li>
<li><i>SplitScore:</i> If an object linked to its parent via a split, this
value is the score for the split.</li>
<li><i>MergeScore:</i> If an object linked to a child via a merge, this value is
the score for the merge.</li>
<li><i>MitosisScore:</i> If an object linked to two children via a mitosis,
this value is the score for the mitosis.</li>
</ul>
<b>Image measurements</b>
<ul>
<li><i>LostObjectCount:</i> Number of objects that appear in the previous frame
but have no identifiable child in the current frame.</li>
<li><i>NewObjectCount:</i> Number of objects that appear in the current frame but
have no identifiable parent in the previous frame. </li>
<li><i>SplitObjectCount:</i> Number of objects in the current frame that
resulted from a split from a parent object in the previous frame.</li>
<li><i>MergedObjectCount:</i> Number of objects in the current frame that
resulted from the merging of child objects in the previous frame.</li>
</ul>
See also: Any of the <b>Measure</b> modules, <b>IdentifyPrimaryObjects</b>, <b>Groups</b>.
"""%globals()
# CellProfiler is distributed under the GNU General Public License.
# See the accompanying file LICENSE for details.
#
# Copyright (c) 2003-2009 Massachusetts Institute of Technology
# Copyright (c) 2009-2015 Broad Institute
#
# Please see the AUTHORS file for credits.
#
# Website: http://www.cellprofiler.org
import logging
logger = logging.getLogger(__name__)
import numpy as np
import numpy.ma
from scipy.ndimage import distance_transform_edt
import scipy.ndimage
import scipy.sparse
import cellprofiler.cpmodule as cpm
import cellprofiler.cpimage as cpi
import cellprofiler.pipeline as cpp
import cellprofiler.settings as cps
from cellprofiler.settings import YES, NO
import cellprofiler.measurements as cpmeas
import cellprofiler.preferences as cpprefs
from cellprofiler.cpmath.lapjv import lapjv
import cellprofiler.cpmath.filter as cpfilter
from cellprofiler.cpmath.cpmorphology import fixup_scipy_ndimage_result as fix
from cellprofiler.cpmath.cpmorphology import centers_of_labels
from cellprofiler.cpmath.cpmorphology import associate_by_distance
from cellprofiler.cpmath.cpmorphology import all_connected_components
from cellprofiler.cpmath.index import Indexes
from identify import M_LOCATION_CENTER_X, M_LOCATION_CENTER_Y
from cellprofiler.gui.help import HELP_ON_MEASURING_DISTANCES
DT_COLOR_AND_NUMBER = 'Color and Number'
DT_COLOR_ONLY = 'Color Only'
DT_ALL = [DT_COLOR_AND_NUMBER, DT_COLOR_ONLY]
R_PARENT = "Parent"
F_PREFIX = "TrackObjects"
F_LABEL = "Label"
F_PARENT_OBJECT_NUMBER = "ParentObjectNumber"
F_PARENT_IMAGE_NUMBER = "ParentImageNumber"
F_TRAJECTORY_X = "TrajectoryX"
F_TRAJECTORY_Y = "TrajectoryY"
F_DISTANCE_TRAVELED = "DistanceTraveled"
F_DISPLACEMENT = "Displacement"
F_INTEGRATED_DISTANCE = "IntegratedDistance"
F_LINEARITY = "Linearity"
F_LIFETIME = "Lifetime"
F_FINAL_AGE = "FinalAge"
F_MOVEMENT_MODEL = "MovementModel"
F_LINK_TYPE = "LinkType"
F_LINKING_DISTANCE = "LinkingDistance"
F_STANDARD_DEVIATION = "StandardDeviation"
F_GAP_LENGTH = "GapLength"
F_GAP_SCORE = "GapScore"
F_MERGE_SCORE = "MergeScore"
F_SPLIT_SCORE = "SplitScore"
F_MITOSIS_SCORE = "MitosisScore"
F_KALMAN = "Kalman"
F_STATE = "State"
F_COV = "COV"
F_NOISE = "Noise"
F_VELOCITY_MODEL = "Vel"
F_STATIC_MODEL = "NoVel"
F_X = "X"
F_Y = "Y"
F_VX = "VX"
F_VY = "VY"
F_EXPT_ORIG_NUMTRACKS = "%s_OriginalNumberOfTracks"%F_PREFIX
F_EXPT_FILT_NUMTRACKS = "%s_FilteredNumberOfTracks"%F_PREFIX
def kalman_feature(model, matrix_or_vector, i, j=None):
'''Return the feature name for a Kalman feature
model - model used for Kalman feature: velocity or static
matrix_or_vector - the part of the Kalman state to save, vec, COV or noise
i - the name for the first (or only for vec and noise) index into the vector
j - the name of the second index into the matrix
'''
pieces = [F_KALMAN, model, matrix_or_vector, i]
if j is not None:
pieces.append(j)
return "_".join(pieces)
'''# of objects in the current frame without parents in the previous frame'''
F_NEW_OBJECT_COUNT = "NewObjectCount"
'''# of objects in the previous frame without parents in the new frame'''
F_LOST_OBJECT_COUNT = "LostObjectCount"
'''# of parents that split into more than one child'''
F_SPLIT_COUNT = "SplitObjectCount"
'''# of children that are merged from more than one parent'''
F_MERGE_COUNT = "MergedObjectCount"
'''Object area measurement for LAP method
The final part of the LAP method needs the object area measurement
which is stored using this name.'''
F_AREA = "Area"
F_ALL_COLTYPE_ALL = [(F_LABEL, cpmeas.COLTYPE_INTEGER),
(F_PARENT_OBJECT_NUMBER, cpmeas.COLTYPE_INTEGER),
(F_PARENT_IMAGE_NUMBER, cpmeas.COLTYPE_INTEGER),
(F_TRAJECTORY_X, cpmeas.COLTYPE_INTEGER),
(F_TRAJECTORY_Y, cpmeas.COLTYPE_INTEGER),
(F_DISTANCE_TRAVELED, cpmeas.COLTYPE_FLOAT),
(F_DISPLACEMENT, cpmeas.COLTYPE_FLOAT),
(F_INTEGRATED_DISTANCE, cpmeas.COLTYPE_FLOAT),
(F_LINEARITY, cpmeas.COLTYPE_FLOAT),
(F_LIFETIME, cpmeas.COLTYPE_INTEGER),
(F_FINAL_AGE, cpmeas.COLTYPE_INTEGER)]
F_IMAGE_COLTYPE_ALL = [(F_NEW_OBJECT_COUNT, cpmeas.COLTYPE_INTEGER),
(F_LOST_OBJECT_COUNT, cpmeas.COLTYPE_INTEGER),
(F_SPLIT_COUNT, cpmeas.COLTYPE_INTEGER),
(F_MERGE_COUNT, cpmeas.COLTYPE_INTEGER)]
F_ALL = [feature for feature, coltype in F_ALL_COLTYPE_ALL]
F_IMAGE_ALL = [feature for feature, coltype in F_IMAGE_COLTYPE_ALL]
class TrackObjects(cpm.CPModule):
module_name = 'TrackObjects'
category = "Object Processing"
variable_revision_number = 6
def create_settings(self):
self.tracking_method = cps.Choice(
'Choose a tracking method',
TM_ALL, doc="""
When trying to track an object in an image,
<b>TrackObjects</b> will search within a maximum
specified distance (see the <i>distance within which to search</i> setting)
of the object's location in the previous image, looking for a "match".
Objects that match are assigned the same number, or label, throughout the
entire movie.
There are several options for the method used to find a match. Choose
among these options based on which is most consistent from frame
to frame of your movie.
<ul>
<li><i>%(TM_OVERLAP)s:</i> Compares the amount of spatial overlap between identified objects in
the previous frame with those in the current frame. The object with the
greatest amount of spatial overlap will be assigned the same number (label).
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s">
Recommended when there is a high degree of overlap of an object from one frame to the next,
which is the case for movies with high frame rates relative to object motion.</dd>
</dl></li>
<li><i>%(TM_DISTANCE)s:</i> Compares the distance between each identified
object in the previous frame with that of the current frame. The
closest objects to each other will be assigned the same number (label).
Distances are measured from the perimeter of each object.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s">
Recommended for cases where the objects are not very crowded but where
<i>%(TM_OVERLAP)s</i> does not work sufficiently well, which is the case
for movies with low frame rates relative to object motion.</dd>
</dl></li>
<li><i>%(TM_MEASUREMENTS)s:</i> Compares each object in the
current frame with objects in the previous frame based on a particular
feature you have measured for the objects (for example, a particular intensity
or shape measurement that can distinguish nearby objects). The object
with the closest-matching measurement will be selected as a match and will be
assigned the same number (label). This selection requires that you run the
specified <b>Measure</b> module previous to this module in the pipeline so
that the measurement values can be used to track the objects.</li>
<li><i>%(TM_LAP)s:</i> Uses the linear assignment problem (LAP) framework. The
linear assignment problem (LAP) algorithm (<i>Jaqaman et al., 2008</i>)
addresses the challenges of high object density, motion heterogeneity,
temporary disappearances, and object merging and splitting.
The algorithm first links objects between consecutive frames and then links
the resulting partial trajectories into complete trajectories. Both steps are formulated
as global combinatorial optimization problems whose solution identifies the overall
most likely set of object trajectories throughout a movie.
<p>Tracks are constructed from an image sequence by detecting objects in each
frame and linking objects between consecutive frames as a first step. This step alone
may result in incompletely tracked objects due to the appearance and disappearance
of objects, either in reality or apparently because of noise and imaging limitations.
To correct this, you may apply an optional second step which closes temporal gaps
between tracked objects and captures merging and splitting events. This step takes
place at the end of the analysis run.</p>
<p><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Some recommendations on optimizing
the LAP settings<br>
<ul>
<li><i>Work with a minimal subset of your data:</i> Attempting to optimize these settings
by examining a dataset containing many objects may be complicated and frustrating.
Therefore, it is a good idea to work with a smaller portion of the data containing the
behavior of interest.
<ul>
<li>For example, if splits characterize your data, trying narrowing down to following just
one cell that undergoes a split and examine a few frames before and after the event.</li>
<li>You can insert the <b>Crop</b> module to zoom in a region of interest, optimize the
settings and then either remove or disable the module when done.</li>
<li>You can also use the <b>Input</b> modules to limit yourself to a few frames under
consideration. For example, use the filtering settings in the <b>Images</b> module to
use only certain files from the movie in the pipeline.</li>
</ul></li>
<li><i>Begin by optimzing the settings for the first phase of the LAP:</i> The 2nd phase of
the LAP method depends on the results of the first phase. Therefore, it is a good idea to
optimize the first phase settings as the initial step.
<ul>
<li>You can disable 2nd phase calculation by selecting <i>%(NO)s</i> for "Run the second
phase of the LAP algorithm?"</li>
<li>By maximizing the the number of correct frame-to-frame links in the first phase, the
2nd phase will have less candidates to consider for linking and have a better chance of
closing gaps correctly. </li>
<li>If tracks are not being linked in the first phase, you may need to adjust the number
of standard deviations for the search radius and/or the radius limits (most likely
the maximum limit). See the help for these settings for details.</li>
</ul></li>
<li><i>Use any visualization tools at your disposal:</i>Visualizing the data often allows for
easier decision making as opposed to sorting through tabular data alone.
<ul>
<li>The <a href="http://cran.r-project.org/">R</a> open-source software package has
analysis and visualization tools that can query a database. See <a href=
"http://www.broadinstitute.org/~leek/rtracking.html">here</a> for a use case by our
lead software engineer.</li>
<li><a href="http://cellprofiler.org/tracer/">CellProfiler Tracer</a> is a version of
CellProfiler Analyst that contains tools for visualizing time-lapse data that has been exported
using the <b>ExportToDatabase</b> module.</li>
</ul></li>
</ul>
</p>
<p><b>References</b>
<ul>
<li>Jaqaman K, Loerke D, Mettlen M, Kuwata H, Grinstein S, Schmid SL, Danuser G. (2008)
"Robust single-particle tracking in live-cell time-lapse sequences."
<i>Nature Methods</i> 5(8),695-702.
<a href="http://dx.doi.org/10.1038/nmeth.1237">(link)</a></li>
<li>Jaqaman K, Danuser G. (2009) "Computational image analysis of cellular dynamics:
a case study based on particle tracking." Cold Spring Harb Protoc. 2009(12):pdb.top65.
<a href="http://dx.doi.org/10.1101/pdb.top65">(link)</a></li>
</ul></p>
</li>
</ul>"""%globals())
self.object_name = cps.ObjectNameSubscriber(
'Select the objects to track',cps.NONE, doc="""
Select the objects to be tracked by this module.""")
self.measurement = cps.Measurement(
'Select object measurement to use for tracking',
lambda : self.object_name.value, doc="""
<i>(Used only if Measurements is the tracking method)</i><br>
Select which type of measurement (category) and which specific feature from the
<b>Measure</b> module will be used for tracking. Select the feature name from
the popup box or see each <b>Measure</b> module's help for the list of
the features measured by that module. If necessary, you will also be asked
to specify additional details such as the
image from which the measurements originated or the measurement scale.""")
self.pixel_radius = cps.Integer(
'Maximum pixel distance to consider matches',50,minval=1,doc="""
Objects in the subsequent frame will be considered potential matches if
they are within this distance. To determine a suitable pixel distance, you can look
at the axis increments on each image (shown in pixel units) or
use the distance measurement tool. %(HELP_ON_MEASURING_DISTANCES)s"""%globals())
self.model = cps.Choice(
"Select the movement model",[M_RANDOM, M_VELOCITY, M_BOTH], value=M_BOTH,doc = """
<i>(Used only if the %(TM_LAP)s tracking method is applied)</i><br>
This setting controls how to predict an object's position in
the next frame, assuming that each object moves randomly with
a frame-to-frame variance in position that follows a Gaussian
distribution.<br>
<ul>
<li><i>%(M_RANDOM)s:</i> A model in which objects move due to
Brownian Motion or a similar process where the variance in position
differs between objects.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s">
Use this model if the objects move with some
random jitter around a stationary location.</dd>
</dl></li>
<li><i>%(M_VELOCITY)s:</i> A model in which the object moves with
a velocity. Both velocity and position (after correcting for
velocity) vary following a Gaussian distribution.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Use this model if
the objects move along a spatial trajectory in some direction over time.</dd>
</dl></li>
<li><i>%(M_BOTH)s:</i> <b>TrackObjects</b> will predict each
object's position using both models and use the model with the
lowest penalty to join an object in one frame with one in another.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Use this
option if both models above are applicable over time.</dd>
</dl></li>
</ul>""" % globals())
self.radius_std = cps.Float(
RADIUS_STD_SETTING_TEXT, 3, minval=1,doc = """
<i>(Used only if the %(TM_LAP)s tracking method is applied)</i>
<br>
<b>TrackObjects</b> derives a search radius from an error
estimation based on (a) the standard deviation of the movement and
(b) the diameter of the object. The standard deviation is a measure of
the error between the observed and predicted positions of an object for
each movement model. The module will constrain the search for matching
objects from one frame to the next to the standard deviation
of the error times the number of standard
deviations that you enter here.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>If the standard deviation is quite small, but the object makes a
large spatial jump, this value may need to be set higher in order
to increase the search area and thereby make the frame-to-frame
linkage.</li>
</ul></dd>
</dl>"""%globals())
self.radius_limit = cps.FloatRange(
RADIUS_LIMIT_SETTING_TEXT, (2, 10), minval = 0,doc = """
<i>(Used only if the %(TM_LAP)s tracking method is applied)</i><br>
<b>TrackObjects</b> derives a search radius from an error
estimation based on (a) the standard deviation of the movement and
(b) the diameter of the object. Potentially, the module can make an erroneous assignment
with a large error, leading to a large estimated error for
the object in the next frame. Conversely, the module can arrive
at a small estimated error by chance, leading to a maximum radius
that does not track the object in a subsequent frame. The radius
limit constrains the search radius to reasonable values.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>Special care must be taken to adjust the upper limit appropriate
to the data.</li>
<li>The lower limit should be set to a radius (in pixels) that is a
reasonable displacement for any object from one frame to the next.
<ul>
<li>If you notice that a frame-to-frame linkage is not being made for a
steadily-moving object, it may be that this value needs to be <i>decreased</i>
such that the displacement falls above the lower limit.</li>
<li>Alternately, if you notice that a frame-to-frame linkage is not
being made for a roughly stationary object, this value may need to be
<i>increased</i> so that the small displacement error is offset by the
object diameter.</li>
</ul></li>
<li>The upper limit should be set to the maximum reasonable
displacement (in pixels) under any circumstances. Hence, if you notice that
a frame-to-frame linkage is not being made in the case of a unusually
large displacement, this value may need to be increased.</li>
</ul></dd>
</dl>"""%globals())
self.wants_second_phase = cps.Binary(
"Run the second phase of the LAP algorithm?", True, doc="""
<i>(Used only if the %(TM_LAP)s tracking method is applied)</i><br>
Select <i>%(YES)s</i> to run the second phase of the LAP algorithm
after processing all images. Select <i>%(NO)s</i> to omit the
second phase or to perform the second phase when running the module
as a data tool.
<p>Since object tracks may start and end not only because of the true appearance
and disappearance of objects, but also because of apparent disappearances due
to noise and limitations in imaging, you may want to run the second phase
which attempts to close temporal gaps between tracked objects and tries to
capture merging and splitting events.</p>
<p>For additional details on optimizing the LAP settings, see the help for each
the settings.</p>"""%globals())
self.gap_cost = cps.Integer(
'Gap closing cost', 40, minval=1, doc = '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
This setting assigns a cost to keeping a gap caused
when an object is missing from one of the frames of a track (the
alternative to keeping the gap is to bridge it by connecting
the tracks on either side of the missing frames).
The cost of bridging a gap is the distance, in pixels, of the
displacement of the object between frames.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>Set the gap closing cost higher if tracks from objects in previous
frames are being erroneously joined, across a gap, to tracks from
objects in subsequent frames. </li>
<li>Set the gap closing cost lower if tracks
are not properly joined due to gaps caused by mis-segmentation.</li>
</ul></dd>
</dl></p>'''%globals())
self.split_cost = cps.Integer(
'Split alternative cost', 40, minval=1, doc = '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
This setting is the cost of keeping two tracks distinct
when the alternative is to make them into one track that
splits. A split occurs when an object in one frame is assigned
to the same track as two objects in a subsequent frame.
The split cost takes two components into account:
<ul>
<li>The area of the split object relative to the area of
the resulting objects.</li>
<li>The displacement of the resulting
objects relative to the position of the original object.</li>
</ul>
The split cost is roughly measured in pixels. The split alternative cost is
(conceptually) subtracted from the cost of making the split.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>The split cost should be set lower if objects are being split
that should not be split. </li>
<li>The split cost should be set higher if objects
that should be split are not.</li>
<li>If you are confident that there should be no splits present in the data,
the cost can be set to 1 (the minimum value possible)</li>
</ul></dd>
</dl>'''%globals())
self.merge_cost = cps.Integer(
'Merge alternative cost', 40, minval=1,doc = '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
This setting is the cost of keeping two tracks
distinct when the alternative is to merge them into one.
A merge occurs when two objects in one frame are assigned to
the same track as a single object in a subsequent frame.
The merge score takes two components into account:
<ul>
<li>The area of the two objects
to be merged relative to the area of the resulting objects.</li>
<li>The displacement of the original objects relative to the final
object. </li>
</ul>
The merge cost is measured in pixels. The merge
alternative cost is (conceptually) subtracted from the
cost of making the merge.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>Set the merge alternative cost lower if objects are being
merged when they should otherwise be kept separate. </li>
<li>Set the merge alternative cost
higher if objects that are not merged should be merged.</li>
<li>If you are confident that there should be no merges present in the data,
the cost can be set to 1 (the minimum value possible)</li>
</ul></dd>
</dl>'''%globals())
self.mitosis_cost = cps.Integer(
'Mitosis alternative cost', 80, minval=1, doc = '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
This setting is the cost of not linking a parent and two daughters
via the mitosis model. the %(TM_LAP)s tracking method weighs this
cost against the score of a potential mitosis. The model expects
the daughters to be equidistant from the parent after mitosis,
so the parent location is expected to be midway between the daughters.
In addition, the model expects the daughters' areas to be equal
to the parent's area. The mitosis score is the distance error
of the parent times the area inequality ratio of the parent and
daughters (the larger of Area(daughters) / Area(parent) and
Area(parent) / Area(daughters)).<br>
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>An accepted mitosis closes two gaps, so all things being equal,
the mitosis alternative cost should be approximately double the
gap closing cost.</li>
<li>Increase the mitosis alternative cost to favor more mitoses
and decrease it to prevent more mitoses candidates from being
accepted.</li>
</ul></dd>
</dl>'''%globals())
self.mitosis_max_distance = cps.Integer(
'Maximum mitosis distance, in pixel units', 40, minval=1, doc= '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
This setting is the maximum allowed distance in pixels of either
of the daughter candidate centroids after mitosis from the parent candidate.
'''%globals())
self.max_gap_score = cps.Integer(
'Maximum gap displacement, in pixel units', 5, minval=1, doc = '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
This setting acts as a filter for unreasonably large
displacements during the second phase.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>The maximum gap displacement should be set to roughly
the maximum displacement of an object's center from frame to frame. An object that makes large
frame-to-frame jumps should have a higher value for this setting than one that only moves slightly.</li>
<li>Be aware that the LAP algorithm will run more slowly with a higher maximum gap displacement
value, since the higher this value, the more objects that must be compared at each step.</li>
<li>Objects that would have been tracked between successive frames for a lower maximum displacement
may not be tracked if the value is set higher.</li>
<li>This setting may be the culprit if an object is not tracked fame-to-frame despite optimizing
the LAP first-pass settings.</li>
</ul></dd>
</dl>'''%globals())
self.max_merge_score = cps.Integer(
'Maximum merge score', 50, minval=1, doc = '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
This setting acts as a filter for unreasonably large
merge scores. The merge score has two components:
<ul>
<li>The area of the resulting merged object relative to the area of the
two objects to be merged.</li>
<li>The distances between the objects to be merged and the resulting object. </li>
</ul>
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>The LAP algorithm will run more slowly with a higher maximum merge score value. </li>
<li>Objects that would have been merged at a lower maximum merge score will not be considered for merging.</li>
</ul></dd>
</dl>'''%globals())
self.max_split_score = cps.Integer(
'Maximum split score', 50, minval=1, doc = '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
This setting acts as a filter for unreasonably large split scores. The split score has two components:
<ul>
<li>The area of the initial object relative to the area of the
two objects resulting from the split.</li>
<li>The distances between the original and resulting objects. </li>
</ul>
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>The LAP algorithm will run more slowly with a maximum split score value. </li>
<li>Objects that would have been split at a lower maximum split score will not be considered for splitting.</li>
</ul></dd>
</dl>'''%globals())
self.max_frame_distance = cps.Integer(
'Maximum temporal gap, in frames', 5, minval=1, doc = '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
<b>Care must be taken to adjust this setting appropriate to the data.</b><br>
This setting controls the maximum number of frames that can
be skipped when merging a temporal gap caused by an unsegmented object.
These gaps occur when an image is mis-segmented and identification
fails to find an object in one or more frames.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>Set the maximum gap higher in order to have more chance of correctly recapturing an object after
erroneously losing the original for a few frames.</li>
<li>Set the maximum gap lower to reduce the chance of erroneously connecting to the wrong object after
correctly losing the original object (e.g., if the cell dies or moves off-screen).</li>
</ul></dd>
</dl>'''%globals())
self.wants_lifetime_filtering = cps.Binary(
'Filter objects by lifetime?', False, doc = '''
Select <i>%(YES)s</i> if you want objects to be filtered by their
lifetime, i.e., total duration in frames. This is useful for
marking objects which transiently appear and disappear, such
as the results of a mis-segmentation. <br>
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>This operation does not actually delete the filtered object,
but merely removes its label from the tracked object list;
the filtered object's per-object measurements are retained.</li>
<li>An object can be filtered only if it is tracked as an unique object.
Splits continue the lifetime count from their parents, so the minimum
lifetime value does not apply to them.</li>
</ul></dd>
</dl>'''%globals())
self.wants_minimum_lifetime = cps.Binary(
'Filter using a minimum lifetime?', True, doc = '''
<i>(Used only if objects are filtered by lifetime)</i><br>
Select <i>%(YES)s</i> to filter the object on the basis of a minimum number of frames.'''%globals())
self.min_lifetime = cps.Integer(
'Minimum lifetime', 1, minval=1,doc="""
Enter the minimum number of frames an object is permitted to persist. Objects
which last this number of frames or lower are filtered out.""")
self.wants_maximum_lifetime = cps.Binary(
'Filter using a maximum lifetime?', False, doc = '''
<i>(Used only if objects are filtered by lifetime)</i><br>
Select <i>%(YES)s</i> to filter the object on the basis of a maximum number of frames.'''%globals())
self.max_lifetime = cps.Integer(
'Maximum lifetime', 100, doc="""
Enter the maximum number of frames an object is permitted to persist. Objects
which last this number of frames or more are filtered out.""")
self.display_type = cps.Choice(
'Select display option', DT_ALL, doc="""
The output image can be saved as:
<ul>
<li><i>%(DT_COLOR_ONLY)s:</i> A color-labeled image, with each tracked
object assigned a unique color</li>
<li><i>%(DT_COLOR_AND_NUMBER)s:</i> Same as above but with the tracked object
number superimposed.</li>
</ul>"""%globals())
self.wants_image = cps.Binary(
"Save color-coded image?", False, doc="""
Select <i>%(YES)s</i> to retain the image showing the tracked objects
for later use in the pipeline. For example, a common use is for quality control purposes
saving the image with the <b>SaveImages</b> module.
<p>Please note that if you are using the second phase of the %(TM_LAP)s method,
the final labels are not assigned until <i>after</i> the pipeline has
completed the analysis run. That means that saving the color-coded image
will only show the penultimate result and not the final product.</p>."""%globals())
self.image_name = cps.ImageNameProvider(
"Name the output image", "TrackedCells", doc = '''
<i>(Used only if saving the color-coded image)</i><br>
Enter a name to give the color-coded image of tracked labels.''')
def settings(self):
return [self.tracking_method, self.object_name, self.measurement,
self.pixel_radius, self.display_type, self.wants_image,
self.image_name, self.model,
self.radius_std, self.radius_limit,
self.wants_second_phase,
self.gap_cost, self.split_cost, self.merge_cost,
self.max_gap_score, self.max_split_score,
self.max_merge_score, self.max_frame_distance,
self.wants_lifetime_filtering, self.wants_minimum_lifetime,
self.min_lifetime, self.wants_maximum_lifetime,
self.max_lifetime, self.mitosis_cost, self.mitosis_max_distance]
def validate_module(self, pipeline):
'''Make sure that the user has selected some limits when filtering'''
if (self.tracking_method == TM_LAP and
self.wants_lifetime_filtering.value and
(self.wants_minimum_lifetime.value == False and self.wants_minimum_lifetime.value == False) ):
raise cps.ValidationError(
'Please enter a minimum and/or maximum lifetime limit',
self.wants_lifetime_filtering)
def visible_settings(self):
result = [self.tracking_method, self.object_name]
if self.tracking_method == TM_MEASUREMENTS:
result += [ self.measurement]
if self.tracking_method == TM_LAP:
result += [self.model, self.radius_std, self.radius_limit]
result += [self.wants_second_phase]
if self.wants_second_phase:
result += [
self.gap_cost, self.split_cost, self.merge_cost,
self.mitosis_cost,
self.max_gap_score, self.max_split_score,
self.max_merge_score, self.max_frame_distance,
self.mitosis_max_distance]
else:
result += [self.pixel_radius]
result += [ self.wants_lifetime_filtering]
if self.wants_lifetime_filtering:
result += [ self.wants_minimum_lifetime ]
if self.wants_minimum_lifetime:
result += [ self.min_lifetime ]
result += [ self.wants_maximum_lifetime ]
if self.wants_maximum_lifetime:
result += [ self.max_lifetime ]
result +=[ self.display_type, self.wants_image]
if self.wants_image.value:
result += [self.image_name]
return result
@property
def static_model(self):
return self.model in (M_RANDOM, M_BOTH)
@property
def velocity_model(self):
return self.model in (M_VELOCITY, M_BOTH)
def get_ws_dictionary(self, workspace):
return self.get_dictionary(workspace.image_set_list)
def __get(self, field, workspace, default):
if self.get_ws_dictionary(workspace).has_key(field):
return self.get_ws_dictionary(workspace)[field]
return default
def __set(self, field, workspace, value):
self.get_ws_dictionary(workspace)[field] = value
def get_group_image_numbers(self, workspace):
m = workspace.measurements
assert isinstance(m, cpmeas.Measurements)
d = self.get_ws_dictionary(workspace)
group_number = m.get_group_number()
if not d.has_key("group_number") or d["group_number"] != group_number:
d["group_number"] = group_number
group_indexes = np.array([
(m.get_measurement(cpmeas.IMAGE, cpmeas.GROUP_INDEX, i), i)
for i in m.get_image_numbers()
if m.get_measurement(cpmeas.IMAGE, cpmeas.GROUP_NUMBER, i) ==
group_number], int)
order = np.lexsort([group_indexes[:, 0]])
d["group_image_numbers"] = group_indexes[order, 1]
return d["group_image_numbers"]
def get_saved_measurements(self, workspace):
return self.__get("measurements", workspace, np.array([], float))
def set_saved_measurements(self, workspace, value):
self.__set("measurements", workspace, value)
def get_saved_coordinates(self, workspace):
return self.__get("coordinates", workspace, np.zeros((2,0), int))
def set_saved_coordinates(self, workspace, value):
self.__set("coordinates", workspace, value)
def get_orig_coordinates(self, workspace):
'''The coordinates of the first occurrence of an object's ancestor'''
return self.__get("orig coordinates", workspace, np.zeros((2,0), int))
def set_orig_coordinates(self, workspace, value):
self.__set("orig coordinates", workspace, value)
def get_saved_labels(self, workspace):
return self.__get("labels", workspace, None)
def set_saved_labels(self, workspace, value):
self.__set("labels", workspace, value)
def get_saved_object_numbers(self, workspace):
return self.__get("object_numbers", workspace, np.array([], int))
def set_saved_object_numbers(self, workspace, value):
return self.__set("object_numbers", workspace, value)
def get_saved_ages(self, workspace):
return self.__get("ages", workspace, np.array([], int))
def set_saved_ages(self, workspace, values):
self.__set("ages", workspace, values)
def get_saved_distances(self, workspace):
return self.__get("distances", workspace, np.zeros((0,)))
def set_saved_distances(self, workspace, values):
self.__set("distances", workspace, values)
def get_max_object_number(self, workspace):
return self.__get("max_object_number", workspace, 0)
def set_max_object_number(self, workspace, value):
self.__set("max_object_number", workspace, value)
def get_kalman_states(self, workspace):
return self.__get("kalman_states", workspace, None)
def set_kalman_states(self, workspace, value):
self.__set("kalman_states", workspace, value)
def prepare_group(self, workspace, grouping, image_numbers):
'''Erase any tracking information at the start of a run'''
d = self.get_dictionary(workspace.image_set_list)
d.clear()
return True
def measurement_name(self, feature):
'''Return a measurement name for the given feature'''
if self.tracking_method == TM_LAP:
return "%s_%s" % (F_PREFIX, feature)
return "%s_%s_%s" % (F_PREFIX, feature, str(self.pixel_radius.value))
def image_measurement_name(self, feature):
'''Return a measurement name for an image measurement'''
if self.tracking_method == TM_LAP:
return "%s_%s_%s" % (F_PREFIX, feature, self.object_name.value)
return "%s_%s_%s_%s" % (F_PREFIX, feature, self.object_name.value,
str(self.pixel_radius.value))
def add_measurement(self, workspace, feature, values):
'''Add a measurement to the workspace's measurements
workspace - current image set's workspace
feature - name of feature being measured
values - one value per object
'''
workspace.measurements.add_measurement(
self.object_name.value,
self.measurement_name(feature),
values)
def add_image_measurement(self, workspace, feature, value):
measurement_name = self.image_measurement_name(feature)
workspace.measurements.add_image_measurement(measurement_name, value)
def run(self, workspace):
objects = workspace.object_set.get_objects(self.object_name.value)
if self.tracking_method == TM_DISTANCE:
self.run_distance(workspace, objects)
elif self.tracking_method == TM_OVERLAP:
self.run_overlap(workspace, objects)
elif self.tracking_method == TM_MEASUREMENTS:
self.run_measurements(workspace, objects)
elif self.tracking_method == TM_LAP:
self.run_lapdistance(workspace, objects)
else:
raise NotImplementedError("Unimplemented tracking method: %s" %
self.tracking_method.value)
if self.wants_image.value:
import matplotlib.figure
import matplotlib.axes
import matplotlib.backends.backend_agg
import matplotlib.transforms
from cellprofiler.gui.cpfigure_tools import figure_to_image, only_display_image
figure = matplotlib.figure.Figure()
canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(figure)
ax = figure.add_subplot(1,1,1)
self.draw(objects.segmented, ax,
self.get_saved_object_numbers(workspace))
#
# This is the recipe for just showing the axis
#
only_display_image(figure, objects.segmented.shape)
image_pixels = figure_to_image(figure, dpi=figure.dpi)
image = cpi.Image(image_pixels)
workspace.image_set.add(self.image_name.value, image)
if self.show_window:
workspace.display_data.labels = objects.segmented
workspace.display_data.object_numbers = \
self.get_saved_object_numbers(workspace)
def display(self, workspace, figure):
if hasattr(workspace.display_data, "labels"):
figure.set_subplots((1, 1))
subfigure = figure.figure
subfigure.clf()
ax = subfigure.add_subplot(1,1,1)
self.draw(workspace.display_data.labels, ax,
workspace.display_data.object_numbers)
else:
# We get here after running as a data tool
figure.figure.text(.5, .5, "Analysis complete",
ha="center", va="center")
def draw(self, labels, ax, object_numbers):
import matplotlib
indexer = np.zeros(len(object_numbers)+1,int)
indexer[1:] = object_numbers
#
# We want to keep the colors stable, but we also want the
# largest possible separation between adjacent colors. So, here
# we reverse the significance of the bits in the indices so
# that adjacent number (e.g. 0 and 1) differ by 128, roughly
#
pow_of_2 = 2**np.mgrid[0:8,0:len(indexer)][0]
bits = (indexer & pow_of_2).astype(bool)
indexer = np.sum(bits.transpose() * (2 ** np.arange(7,-1,-1)), 1)
recolored_labels = indexer[labels]
cm = matplotlib.cm.get_cmap(cpprefs.get_default_colormap())
cm.set_bad((0,0,0))
norm = matplotlib.colors.BoundaryNorm(range(256), 256)
img = ax.imshow(numpy.ma.array(recolored_labels, mask=(labels==0)),
cmap=cm, norm=norm)
if self.display_type == DT_COLOR_AND_NUMBER:
i,j = centers_of_labels(labels)
for n, x, y in zip(object_numbers, j, i):
if np.isnan(x) or np.isnan(y):
# This happens if there are missing labels
continue
ax.annotate(str(n), xy=(x,y),color='white',
arrowprops=dict(visible=False))
def run_distance(self, workspace, objects):
'''Track objects based on distance'''
old_i, old_j = self.get_saved_coordinates(workspace)
if len(old_i):
distances, (i,j) = distance_transform_edt(objects.segmented == 0,
return_indices=True)
#
# Look up the coordinates of the nearest new object (given by
# the transform i,j), then look up the label at that coordinate
# (objects.segmented[#,#])
#
new_object_numbers = objects.segmented[i[old_i, old_j],
j[old_i, old_j]]
#
# Mask out any objects at too great of a distance
#
new_object_numbers[distances[old_i, old_j] >
self.pixel_radius.value] = 0
#
# Do the same with the new centers and old objects
#
i,j = (centers_of_labels(objects.segmented)+.5).astype(int)
old_labels = self.get_saved_labels(workspace)
distances, (old_i,old_j) = distance_transform_edt(
old_labels == 0,
return_indices=True)
old_object_numbers = old_labels[old_i[i, j],
old_j[i, j]]
old_object_numbers[distances[i, j] > self.pixel_radius.value] = 0
self.map_objects(workspace,
new_object_numbers,
old_object_numbers,
i,j)
else:
i,j = (centers_of_labels(objects.segmented)+.5).astype(int)
count = len(i)
self.map_objects(workspace, np.zeros((0,),int),
np.zeros(count,int), i,j)
self.set_saved_labels(workspace, objects.segmented)
def run_lapdistance(self, workspace, objects):
'''Track objects based on distance'''
m = workspace.measurements
old_i, old_j = self.get_saved_coordinates(workspace)
n_old = len(old_i)
#
# Automatically set the cost of birth and death above
# that of the largest allowable cost.
#
costBorn = costDie = self.radius_limit.max * 1.10
kalman_states = self.get_kalman_states(workspace)
if kalman_states == None:
if self.static_model:
kalman_states = [ cpfilter.static_kalman_model()]
else:
kalman_states = []
if self.velocity_model:
kalman_states.append(cpfilter.velocity_kalman_model())
areas = fix(scipy.ndimage.sum(
np.ones(objects.segmented.shape), objects.segmented,
np.arange(1, np.max(objects.segmented) + 1,dtype=np.int32)))
areas = areas.astype(int)
model_types = np.array(
[m for m, s in ((KM_NO_VEL, self.static_model),
(KM_VEL, self.velocity_model)) if s], int)
if n_old > 0:
new_i, new_j = centers_of_labels(objects.segmented)
n_new = len(new_i)
i,j = np.mgrid[0:n_old, 0:n_new]
##############################
#
# Kalman filter prediction
#
#
# We take the lowest cost among all possible models
#
minDist = np.ones((n_old, n_new)) * self.radius_limit.max
d = np.ones((n_old, n_new)) * np.inf
sd = np.zeros((n_old, n_new))
# The index of the Kalman filter used: -1 means not used
kalman_used = -np.ones((n_old, n_new), int)
for nkalman, kalman_state in enumerate(kalman_states):
assert isinstance(kalman_state, cpfilter.KalmanState)
obs = kalman_state.predicted_obs_vec
dk = np.sqrt((obs[i,0] - new_i[j])**2 +
(obs[i,1] - new_j[j])**2)
noise_sd = np.sqrt(np.sum(kalman_state.noise_var[:,0:2], 1))
radius = np.maximum(np.minimum(noise_sd * self.radius_std.value,
self.radius_limit.max),
self.radius_limit.min)
is_best = ((dk < d) & (dk < radius[:, np.newaxis]))
d[is_best] = dk[is_best]
minDist[is_best] = radius[i][is_best]
kalman_used[is_best] = nkalman
minDist = np.maximum(np.minimum(minDist, self.radius_limit.max),
self.radius_limit.min)
#
#############################
#
# Linear assignment setup
#
n = len(old_i)+len(new_i)
kk = np.zeros((n+10)*(n+10), np.int32)
first = np.zeros(n+10, np.int32)
cc = np.zeros((n+10)*(n+10), np.float)
t = np.argwhere((d < minDist))
x = np.sqrt((old_i[t[0:t.size, 0]]-new_i[t[0:t.size, 1]])**2 + (old_j[t[0:t.size, 0]]-new_j[t[0:t.size, 1]])**2)
t = t+1
t = np.column_stack((t, x))
a = np.arange(len(old_i))+2
x = np.searchsorted(t[0:(t.size/2),0], a)
a = np.arange(len(old_i))+1
b = np.arange(len(old_i))+len(new_i)+1
c = np.zeros(len(old_i))+costDie
b = np.column_stack((a, b, c))
t = np.insert(t, x, b, 0)
i,j = np.mgrid[0:len(new_i),0:len(old_i)+1]
i = i+len(old_i)+1
j = j+len(new_i)
j[0:len(new_i)+1,0] = i[0:len(new_i)+1,0]-len(old_i)
x = np.zeros((len(new_i),len(old_i)+1))
x[0:len(new_i)+1,0] = costBorn
i = i.flatten()
j = j.flatten()
x = x.flatten()
x = np.column_stack((i, j, x))
t = np.vstack((t, x))
# Tack 0 <-> 0 at the start because object #s start at 1
i = np.hstack([0,t[:,0].astype(int)])
j = np.hstack([0,t[:,1].astype(int)])
c = np.hstack([0,t[:,2]])
x, y = lapjv(i, j, c)
a = np.argwhere(x > len(new_i))
b = np.argwhere(y >len(old_i))
x[a[0:len(a)]] = 0
y[b[0:len(b)]] = 0
a = np.arange(len(old_i))+1
b = np.arange(len(new_i))+1
new_object_numbers = x[a[0:len(a)]].astype(int)
old_object_numbers = y[b[0:len(b)]].astype(int)
###############################
#
# Kalman filter update
#
model_idx = np.zeros(len(old_object_numbers), int)
linking_distance = np.ones(len(old_object_numbers)) * np.NaN
standard_deviation = np.ones(len(old_object_numbers)) * np.NaN
model_type = np.ones(len(old_object_numbers), int) * KM_NONE
link_type = np.ones(len(old_object_numbers), int) * LT_NONE
mask = old_object_numbers > 0
old_idx = old_object_numbers - 1
model_idx[mask] =\
kalman_used[old_idx[mask], mask]
linking_distance[mask] = d[old_idx[mask], mask]
standard_deviation[mask] = \
linking_distance[mask] / noise_sd[old_idx[mask]]
model_type[mask] = model_types[model_idx[mask]]
link_type[mask] = LT_PHASE_1
#
# The measurement covariance is the square of the
# standard deviation of the measurement error. Assume
# that the measurement error comes from not knowing where
# the center is within the cell, then the error is
# proportional to the radius and the square to the area.
#
measurement_variance = areas.astype(float) / np.pi
#
# Broadcast the measurement error into a diagonal matrix
#
r = (measurement_variance[:, np.newaxis, np.newaxis] *
np.eye(2)[np.newaxis,:,:])
new_kalman_states = []
for kalman_state in kalman_states:
#
# The process noise covariance is a diagonal of the
# state noise variance.
#
state_len = kalman_state.state_len
q = np.zeros((len(old_idx), state_len, state_len))
if np.any(mask):
#
# Broadcast into the diagonal
#
new_idx = np.arange(len(old_idx))[mask]
matching_idx = old_idx[new_idx]
i,j = np.mgrid[0:len(matching_idx),0:state_len]
q[new_idx[i], j, j] = \
kalman_state.noise_var[matching_idx[i],j]
new_kalman_state = cpfilter.kalman_filter(
kalman_state,
old_idx,
np.column_stack((new_i, new_j)),
q,r)
new_kalman_states.append(new_kalman_state)
self.set_kalman_states(workspace, new_kalman_states)
i,j = (centers_of_labels(objects.segmented)+.5).astype(int)
self.map_objects(workspace,
new_object_numbers,
old_object_numbers,
i,j)
else:
i,j = centers_of_labels(objects.segmented)
count = len(i)
link_type = np.ones(count, int) * LT_NONE
model_type = np.ones(count, int) * KM_NONE
linking_distance = np.ones(count) * np.NaN
standard_deviation = np.ones(count) * np.NaN
#
# Initialize the kalman_state with the new objects
#
new_kalman_states = []
r = np.zeros((count, 2, 2))
for kalman_state in kalman_states:
q = np.zeros((count, kalman_state.state_len, kalman_state.state_len))
new_kalman_state = cpfilter.kalman_filter(
kalman_state, -np.ones(count),
np.column_stack((i,j)), q, r)
new_kalman_states.append(new_kalman_state)
self.set_kalman_states(workspace, new_kalman_states)
i = (i+.5).astype(int)
j = (j+.5).astype(int)
self.map_objects(workspace, np.zeros((0,),int),
np.zeros(count,int), i,j)
m = workspace.measurements
assert isinstance(m, cpmeas.Measurements)
m.add_measurement(self.object_name.value,
self.measurement_name(F_AREA),
areas)
m[self.object_name.value,
self.measurement_name(F_LINKING_DISTANCE)] = linking_distance
m[self.object_name.value,
self.measurement_name(F_STANDARD_DEVIATION)] = standard_deviation
m[self.object_name.value,
self.measurement_name(F_MOVEMENT_MODEL)] = model_type
m[self.object_name.value,
self.measurement_name(F_LINK_TYPE)] = link_type
self.save_kalman_measurements(workspace)
self.set_saved_labels(workspace, objects.segmented)
def get_kalman_models(self):
'''Return tuples of model and names of the vector elements'''
if self.static_model:
models = [ (F_STATIC_MODEL, (F_Y, F_X))]
else:
models = []
if self.velocity_model:
models.append((F_VELOCITY_MODEL, (F_Y, F_X, F_VY, F_VX)))
return models
def save_kalman_measurements(self, workspace):
'''Save the first-pass state_vec, state_cov and state_noise'''
m = workspace.measurements
object_name = self.object_name.value
for (model, elements), kalman_state in zip(
self.get_kalman_models(), self.get_kalman_states(workspace)):
assert isinstance(kalman_state, cpfilter.KalmanState)
nobjs = len(kalman_state.state_vec)
if nobjs > 0:
#
# Get the last state_noise entry for each object
#
# scipy.ndimage.maximum probably should return NaN if
# no index exists, but, in 0.8.0, returns 0. So stack
# a bunch of -1 values so every object will have a "-1"
# index.
last_idx = scipy.ndimage.maximum(
np.hstack((
-np.ones(nobjs),
np.arange(len(kalman_state.state_noise_idx)))),
np.hstack((
np.arange(nobjs), kalman_state.state_noise_idx)),
np.arange(nobjs))
last_idx = last_idx.astype(int)
for i, element in enumerate(elements):
#
# state_vec
#
mname = self.measurement_name(
kalman_feature(model, F_STATE, element))
values = np.zeros(0) if nobjs == 0 else kalman_state.state_vec[:,i]
m.add_measurement(object_name, mname, values)
#
# state_noise
#
mname = self.measurement_name(
kalman_feature(model, F_NOISE, element))
values = np.zeros(nobjs)
if nobjs > 0:
values[last_idx == -1] = np.NaN
values[last_idx > -1] = kalman_state.state_noise[last_idx[last_idx > -1], i]
m.add_measurement(object_name, mname, values)
#
# state_cov
#
for j, el2 in enumerate(elements):
mname = self.measurement_name(
kalman_feature(model, F_COV, element, el2))
values = kalman_state.state_cov[:, i, j]
m.add_measurement(object_name, mname, values)
def run_overlap(self, workspace, objects):
'''Track objects by maximum # of overlapping pixels'''
current_labels = objects.segmented
old_labels = self.get_saved_labels(workspace)
i,j = (centers_of_labels(objects.segmented)+.5).astype(int)
if old_labels is None:
count = len(i)
self.map_objects(workspace, np.zeros((0,),int),
np.zeros(count,int), i,j)
else:
mask = ((current_labels > 0) & (old_labels > 0))
cur_count = np.max(current_labels)
old_count = np.max(old_labels)
count = np.sum(mask)
if count == 0:
# There's no overlap.
self.map_objects(workspace,
np.zeros(old_count, int),
np.zeros(cur_count,int),
i,j)
else:
cur = current_labels[mask]
old = old_labels[mask]
histogram = scipy.sparse.coo_matrix(
(np.ones(count),(cur, old)),
shape=(cur_count+1,old_count+1)).toarray()
old_of_new = np.argmax(histogram, 1)[1:]
new_of_old = np.argmax(histogram, 0)[1:]
#
# The cast here seems to be needed to make scipy.ndimage.sum
# work. See http://projects.scipy.org/numpy/ticket/1012
#
old_of_new = np.array(old_of_new, np.int16)
old_of_new = np.array(old_of_new, np.int32)
new_of_old = np.array(new_of_old, np.int16)
new_of_old = np.array(new_of_old, np.int32)
self.map_objects(workspace,
new_of_old,
old_of_new,
i,j)
self.set_saved_labels(workspace, current_labels)
def run_measurements(self, workspace, objects):
current_labels = objects.segmented
new_measurements = workspace.measurements.get_current_measurement(
self.object_name.value,
self.measurement.value)
old_measurements = self.get_saved_measurements(workspace)
old_labels = self.get_saved_labels(workspace)
i,j = (centers_of_labels(objects.segmented)+.5).astype(int)
if old_labels is None:
count = len(i)
self.map_objects(workspace, np.zeros((0,),int),
np.zeros(count,int), i,j)
else:
associations = associate_by_distance(old_labels, current_labels,
self.pixel_radius.value)
best_child = np.zeros(len(old_measurements), int)
best_parent = np.zeros(len(new_measurements), int)
best_child_measurement = (np.ones(len(old_measurements), int) *
np.finfo(float).max)
best_parent_measurement = (np.ones(len(new_measurements), int) *
np.finfo(float).max)
for old, new in associations:
diff = abs(old_measurements[old-1] - new_measurements[new-1])
if diff < best_child_measurement[old-1]:
best_child[old-1] = new
best_child_measurement[old-1] = diff
if diff < best_parent_measurement[new-1]:
best_parent[new-1] = old
best_parent_measurement[new-1] = diff
self.map_objects(workspace, best_child, best_parent, i,j)
self.set_saved_labels(workspace,current_labels)
self.set_saved_measurements(workspace, new_measurements)
def run_as_data_tool(self, workspace):
m = workspace.measurements
assert isinstance(m, cpmeas.Measurements)
group_numbers = {}
for i in m.get_image_numbers():
group_number = m.get_measurement(cpmeas.IMAGE,
cpmeas.GROUP_NUMBER, i)
group_index = m.get_measurement(cpmeas.IMAGE,
cpmeas.GROUP_INDEX, i)
if ((not group_numbers.has_key(group_number)) or
(group_numbers[group_number][1] > group_index)):
group_numbers[group_number] = (i, group_index)
for group_number in sorted(group_numbers.keys()):
m.image_set_number = group_numbers[group_number][0]
self.post_group(workspace, {})
def flood(self, i, at, a, b, c, d, z):
z[i] = at
if(a[i] != -1 and z[a[i]] == 0):
z = self.flood(a[i], at, a, b, c, d, z)
if(b[i] != -1 and z[b[i]] == 0):
z = self.flood(b[i], at, a, b, c, d, z)
if(c[i] != -1 and z[c[i]] == 0):
z = self.flood(c[i], at, a, b, c, d, z)
if(c[i] != -1 and z[c[i]] == 0):
z = self.flood(c[i], at, a, b, c, d, z)
return z
def is_aggregation_module(self):
'''We connect objects across imagesets within a group = aggregation'''
return True
def post_group(self, workspace, grouping):
# If any tracking method other than LAP, recalculate measurements
# (Really, only the final age needs to be re-done)
image_numbers = self.get_group_image_numbers(workspace)
if self.tracking_method != TM_LAP:
m = workspace.measurements
assert(isinstance(m, cpmeas.Measurements))
self.recalculate_group(workspace, image_numbers)
return
self.recalculate_kalman_filters(workspace, image_numbers)
if (not self.wants_second_phase):
return
gap_cost = float(self.gap_cost.value)
split_alternative_cost = float(self.split_cost.value) / 2
merge_alternative_cost = float(self.merge_cost.value)
mitosis_alternative_cost = float(self.mitosis_cost.value)
max_gap_score = self.max_gap_score.value
max_merge_score = self.max_merge_score.value
max_split_score = self.max_split_score.value / 2 # to match legacy
max_frame_difference = self.max_frame_distance.value
m = workspace.measurements
assert(isinstance(m, cpmeas.Measurements))
image_numbers = self.get_group_image_numbers(workspace)
object_name = self.object_name.value
label, object_numbers, a, b, Area, \
parent_object_numbers, parent_image_numbers = [
[m.get_measurement(object_name, feature, i).astype(mtype)
for i in image_numbers]
for feature, mtype in (
(self.measurement_name(F_LABEL), int),
(cpmeas.OBJECT_NUMBER, int),
(M_LOCATION_CENTER_X, float),
(M_LOCATION_CENTER_Y, float),
(self.measurement_name(F_AREA), float),
(self.measurement_name(F_PARENT_OBJECT_NUMBER), int),
(self.measurement_name(F_PARENT_IMAGE_NUMBER), int)
)]
group_indices, new_object_count, lost_object_count, merge_count, \
split_count = [
np.array([m.get_measurement(cpmeas.IMAGE, feature, i)
for i in image_numbers], int)
for feature in (cpmeas.GROUP_INDEX,
self.image_measurement_name(F_NEW_OBJECT_COUNT),
self.image_measurement_name(F_LOST_OBJECT_COUNT),
self.image_measurement_name(F_MERGE_COUNT),
self.image_measurement_name(F_SPLIT_COUNT))]
#
# Map image number to group index and vice versa
#
image_number_group_index = np.zeros(np.max(image_numbers) + 1, int)
image_number_group_index[image_numbers] = np.array(group_indices, int)
group_index_image_number = np.zeros(np.max(group_indices) + 1, int)
group_index_image_number[group_indices] = image_numbers
if all([len(lll) == 0 for lll in label]):
return # Nothing to do
#sets up the arrays F, L, P, and Q
#F is an array of all the cells that are the starts of segments
# F[:, :2] are the coordinates
# F[:, 2] is the image index
# F[:, 3] is the object index
# F[:, 4] is the object number
# F[:, 5] is the label
# F[:, 6] is the area
# F[:, 7] is the index into P
#L is the ends
#P includes all cells
X = 0
Y = 1
IIDX = 2
OIIDX = 3
ONIDX = 4
LIDX = 5
AIDX = 6
PIDX = 7
P = np.vstack([
np.column_stack((x, y, np.ones(len(x)) * i, np.arange(len(x)),
o, l, area, np.zeros(len(x))))
for i, (x, y, o, l, area)
in enumerate(zip(a, b, object_numbers, label, Area))])
count_per_label = np.bincount(P[:, LIDX].astype(int))
idx = np.hstack([0, np.cumsum(count_per_label)])
unique_label = np.unique(P[:, LIDX].astype(int))
order = np.lexsort((P[:, OIIDX], P[:, IIDX], P[:, LIDX]))
P = P[order, :]
P[:, PIDX] = np.arange(len(P))
F = P[idx[unique_label], :]
L = P[idx[unique_label + 1] - 1, :]
# Creates P1 and P2, which is P without the starts and ends
# of segments respectively, representing possible
# points of merges and splits respectively
P1 = np.delete(P, idx[:-1], 0)
P2 = np.delete(P, idx[1:] - 1, 0)
##################################################
#
# Addresses of supplementary nodes:
#
# The LAP array is composed of the following ranges
#
# Count | node type
# ------------------
# T | segment starts and ends
# T | gaps
# OB | split starts
# OB | merge ends
# M | mitoses
#
# T = # tracks
# OB = # of objects that can serve as merge or split points
# M = # of mitoses
#
# The graph:
#
# Gap Alternatives (in other words, do nothing)
# ----------------------------------------------
# End[i] <----> Gap alternative[i]
# Gap alternative[i] <----> Start[i]
# Split[i] <----> Split[i]
# Merge[j] <----> Merge[j]
# Mitosis[i] <----> Mitosis[i]
#
#
# Bridge gaps:
# -----------------------------------------------
#
# End[i] <---> Start[j]
# Gap alternative[i] <----> Gap alternative[j]
#
# Splits
# -----------------------------------------------
#
# Split[i] <----> Start[j]
# Gap alternative[j] <----> Split[i]
#
# Merges
# -----------------------------------------------
# End[i] <----> Merge[j]
# Merge[j] <----> Gap alternative[i]
#
# Mitoses
# -----------------------------------------------
# The mitosis model is somewhat imperfect. The mitosis
# caps the parent and makes it unavailable as a candidate
# for a gap closing. In the best case, there is only one
# mitosis candidate for the left and right child and
# the left and right child are connected to gap alternatives,
# but there may be competing splits, gap closings or
# other mitoses.
#
# We take a greedy approach, ordering the mitoses by their
# scores and fulfilling them. After processing the mitoses,
# we run LAP again, keeping only the parent nodes of untaken
# mitoses and child nodes connected to gap alternatives
#
# End[i] <----> Mitosis[j]
#
##################################################
end_nodes = []
start_nodes = []
scores = []
#
# The offsets and lengths of the start/end node ranges
#
start_end_off = 0
start_end_len = len(L)
gap_off = start_end_end = start_end_len
gap_end = gap_off + start_end_len
#-------------------------------------------
#
# Null model (do nothing)
#
#-------------------------------------------
for first, second in ((end_nodes, start_nodes),
(start_nodes, end_nodes)):
first.append(np.arange(start_end_len))
second.append(np.arange(start_end_len) + gap_off)
scores.append(np.ones(start_end_len) * gap_cost/2)
#------------------------------------------
#
# Gap-closing model
#
#------------------------------------------
#
# Create the edges between ends and starts.
# The edge weight is the gap pair cost.
#
a, gap_scores = self.get_gap_pair_scores(F, L, max_frame_difference)
# filter by max gap score
mask = gap_scores <= max_gap_score
if np.sum(mask) > 0:
a, gap_scores = a[mask], gap_scores[mask]
end_nodes.append(a[:, 0])
start_nodes.append(a[:, 1])
scores.append(gap_scores)
#
# Hook the gap alternative ends of the starts to
# the gap alternative starts of the ends
#
end_nodes.append(a[:, 1] + gap_off)
start_nodes.append(a[:, 0] + gap_off)
scores.append(np.zeros(len(gap_scores)))
#---------------------------------------------------
#
# Merge model
#
#---------------------------------------------------
#
# The first column of z is the index of the track that ends. The second
# is the index into P2 of the object to be merged into
#
merge_off = gap_end
if len(P1) > 0:
# Do the initial winnowing in chunks of 10m pairs
lchunk_size = 10000000 / len(P1)
chunks = []
for lstart in range(0, len(L), lchunk_size):
lend = min(len(L), lstart+lchunk_size)
merge_p1idx, merge_lidx = \
[_.flatten() for _ in np.mgrid[0:len(P1), lstart:lend]]
z = (P1[merge_p1idx, IIDX] - L[merge_lidx, IIDX]).astype(np.int32)
mask = (z <= max_frame_difference) & (z > 0)
if np.sum(mask) > 0:
chunks.append([_[mask] for _ in merge_p1idx, merge_lidx, z])
if len(chunks) > 0:
merge_p1idx, merge_lidx, z = [
np.hstack([_[i] for _ in chunks]) for i in range(3)]
else:
merge_p1idx = merge_lidx = z = np.zeros(0, np.int32)
else:
merge_p1idx = merge_lidx = z = np.zeros(0, np.int32)
if len(z) > 0:
# Calculate penalty = distance * area penalty
AreaLast = L[merge_lidx, AIDX]
AreaBeforeMerge = P[P1[merge_p1idx, PIDX].astype(int) - 1, AIDX]
AreaAtMerge = P1[merge_p1idx, AIDX]
rho = self.calculate_area_penalty(
AreaLast + AreaBeforeMerge, AreaAtMerge)
d = np.sqrt(np.sum((L[merge_lidx, :2]-P2[merge_p1idx, :2])**2, 1))
merge_scores = d * rho
mask = merge_scores <= max_merge_score
merge_p1idx, merge_lidx, merge_scores = [
_[mask] for _ in merge_p1idx, merge_lidx, merge_scores]
merge_len = np.sum(mask)
if merge_len > 0:
#
# The end nodes are the ends being merged to the intermediates
# The start nodes are the intermediates and have node #s
# that start at merge_off
#
end_nodes.append(merge_lidx)
start_nodes.append(merge_off + np.arange(merge_len))
scores.append(merge_scores)
#
# Hook the gap alternative starts for the ends to
# the merge nodes
#
end_nodes.append(merge_off + np.arange(merge_len))
start_nodes.append(merge_lidx + gap_off)
scores.append(np.ones(merge_len) * gap_cost / 2)
#
# The alternative hypothesis is represented by merges hooked
# to merges
#
end_nodes.append(merge_off + np.arange(merge_len))
start_nodes.append(merge_off + np.arange(merge_len))
scores.append(np.ones(merge_len) * merge_alternative_cost)
else:
merge_len = 0
merge_end = merge_off+merge_len
#------------------------------------------------------
#
# Split model
#
#------------------------------------------------------
split_off = merge_end
if len(P2) > 0:
lchunk_size = 10000000 / len(P2)
chunks = []
for fstart in range(0, len(L), lchunk_size):
fend = min(len(L), fstart+lchunk_size)
split_p2idx, split_fidx = \
[_.flatten() for _ in np.mgrid[0:len(P2), fstart:fend]]
z = (F[split_fidx, IIDX] - P2[split_p2idx, IIDX]).astype(np.int32)
mask = (z <= max_frame_difference) & (z > 0)
if np.sum(mask) > 0:
chunks.append(
[_[mask] for _ in split_p2idx, split_fidx, z])
if len(chunks) > 0:
split_p2idx, split_fidx, z = [
np.hstack([_[i] for _ in chunks]) for i in range(3)]
else:
split_p2idx = split_fidx = z = np.zeros(0, np.int32)
else:
split_p2idx = split_fidx = z = np.zeros(0, int)
if len(z) > 0:
AreaFirst = F[split_fidx, AIDX]
AreaAfterSplit = P[ P2[split_p2idx, PIDX].astype(int) + 1, AIDX]
AreaAtSplit = P2[split_p2idx, AIDX]
d = np.sqrt(np.sum((F[split_fidx, :2] - P2[split_p2idx, :2])**2, 1))
rho = self.calculate_area_penalty(
AreaFirst + AreaAfterSplit, AreaAtSplit)
split_scores = d * rho
mask = (split_scores <= max_split_score)
split_p2idx, split_fidx, split_scores = \
[_[mask] for _ in split_p2idx, split_fidx, split_scores]
split_len = np.sum(mask)
if split_len > 0:
#
# The end nodes are the intermediates (starting at split_off)
# The start nodes are the F
#
end_nodes.append(np.arange(split_len) + split_off)
start_nodes.append(split_fidx)
scores.append(split_scores)
#
# Hook the alternate ends to the split starts
#
end_nodes.append(split_fidx + gap_off)
start_nodes.append(np.arange(split_len) + split_off)
scores.append(np.ones(split_len) * gap_cost/2)
#
# The alternate hypothesis is split nodes hooked to themselves
#
end_nodes.append(np.arange(split_len) + split_off)
start_nodes.append(np.arange(split_len) + split_off)
scores.append(np.ones(split_len) * split_alternative_cost)
else:
split_len = 0
split_end = split_off + split_len
#----------------------------------------------------------
#
# Mitosis model
#
#----------------------------------------------------------
mitoses, mitosis_scores = self.get_mitotic_triple_scores(F, L)
n_mitoses = len(mitosis_scores)
if n_mitoses > 0:
order = np.argsort(mitosis_scores)
mitoses, mitosis_scores = mitoses[order], mitosis_scores[order]
MDLIDX = 0 # index of left daughter
MDRIDX = 1 # index of right daughter
MPIDX = 2 # index of parent
mitoses_parent_lidx = mitoses[:, MPIDX]
mitoses_left_child_findx = mitoses[:, MDLIDX]
mitoses_right_child_findx = mitoses[:, MDRIDX]
#
# Create the ranges for mitoses
#
mitosis_off = split_end
mitosis_len = n_mitoses
mitosis_end = mitosis_off + mitosis_len
if n_mitoses > 0:
#
# Taking the mitosis score will cost us the parent gap at least.
#
end_nodes.append(mitoses_parent_lidx)
start_nodes.append(np.arange(n_mitoses) + mitosis_off)
scores.append(mitosis_scores)
#
# Balance the mitosis against the gap alternative.
#
end_nodes.append(np.arange(n_mitoses) + mitosis_off)
start_nodes.append(mitoses_parent_lidx + gap_off)
scores.append(np.ones(n_mitoses) * gap_cost / 2)
#
# The alternative hypothesis links mitosis to mitosis
# We charge the alternative hypothesis the mitosis_alternative
# cost.
#
end_nodes.append(np.arange(n_mitoses) + mitosis_off)
start_nodes.append(np.arange(n_mitoses) + mitosis_off)
scores.append(np.ones(n_mitoses) * mitosis_alternative_cost)
i = np.hstack(end_nodes)
j = np.hstack(start_nodes)
c = scores = np.hstack(scores)
#-------------------------------------------------------
#
# LAP Processing # 1
#
x, y = lapjv(i, j, c)
score_matrix = scipy.sparse.coo.coo_matrix((c, (i, j))).tocsr()
#---------------------------
#
# Useful debugging diagnostics
#
def desc(node):
'''Describe a node for graphviz'''
fl = F
if node < start_end_end:
fmt = "N%d:%d"
idx = node
elif node < gap_end:
fmt = "G%d:%d"
idx = node - gap_off
elif node < merge_end:
fmt = "M%d:%d"
idx = merge_p1idx[node - merge_off]
fl = P1
elif node < split_end:
fmt = "S%d:%d"
idx = split_p2idx[node - split_off]
fl = P2
else:
mitosis = mitoses[node - mitosis_off]
(lin, lon), (rin, ron), (pin, pon) = [
(image_numbers[fl[idx, IIDX]], fl[idx, ONIDX])
for idx, fl in zip(mitosis, (F, F, L))]
return "n%d[label=\"MIT%d:%d->%d:%d+%d:%d\"]" % (
node, pin, pon, lin, lon, rin, ron)
return "n%d[label=\"%s\"]" % (
node, fmt % (image_numbers[int(fl[idx, IIDX])],
int(fl[idx, ONIDX])))
def write_graph(path, x, y):
'''Write a graphviz DOT file'''
with open(path, "w") as fd:
fd.write("digraph trackobjects {\n")
graph_idx = np.where(
(x != np.arange(len(x))) & (y != np.arange(len(y))))[0]
for idx in graph_idx:
fd.write(desc(idx)+";\n")
for idx in graph_idx:
fd.write("n%d -> n%d [label=%0.2f];\n" %
(idx, x[idx], score_matrix[idx, x[idx]]))
fd.write("}\n")
#
#--------------------------------------------------------
#
# Mitosis fixup.
#
good_mitoses = np.zeros(len(mitoses), bool)
for midx, (lidx, ridx, pidx) in enumerate(mitoses):
#
# If the parent was not accepted or either of the children
# have been assigned to a mitosis, skip
#
if x[pidx] == midx + mitosis_off and not \
any([y[idx] >= mitosis_off and y[idx] < mitosis_end
for idx in lidx, ridx]):
alt_score = sum([score_matrix[y[idx], idx] for idx in lidx, ridx])
#
# Taking the alt score would cost us a mitosis alternative
# cost, but would remove half of a gap alternative.
#
alt_score += mitosis_alternative_cost - gap_cost / 2
#
# Alternatively, taking the mitosis score would cost us
# the gap alternatives of the left and right.
#
if alt_score > mitosis_scores[midx] + gap_cost:
for idx in lidx, ridx:
old_y = y[idx]
if old_y < start_end_end:
x[old_y] = old_y + gap_off
else:
x[old_y] = old_y
y[lidx] = midx + mitosis_off
y[ridx] = midx + mitosis_off
good_mitoses[midx] = True
continue
x[pidx] = pidx + gap_off
y[pidx+gap_off] = pidx
x[midx+mitosis_off] = midx+mitosis_off
y[midx+mitosis_off] = midx+mitosis_off
if np.sum(good_mitoses) == 0:
good_mitoses = np.zeros((0, 3), int)
good_mitosis_scores = np.zeros(0)
else:
good_mitoses, good_mitosis_scores = \
mitoses[good_mitoses], mitosis_scores[good_mitoses]
#
#-------------------------------------
#
# Rerun to see if reverted mitoses could close gaps.
#
if np.any(x[mitoses[:, MPIDX]] != np.arange(len(mitoses)) + mitosis_off):
rerun_end = np.ones(mitosis_end, bool)
rerun_start = np.ones(mitosis_end, bool)
rerun_end[:start_end_end] = x[:start_end_end] < mitosis_off
rerun_end[mitosis_off:] = False
rerun_start[:start_end_end] = y[:start_end_end] < mitosis_off
rerun_start[mitosis_off:] = False
mask = rerun_end[i] & rerun_start[j]
i, j, c = i[mask], j[mask], c[mask]
i = np.hstack((i,
good_mitoses[:, MPIDX],
good_mitoses[:, MDLIDX] + gap_off,
good_mitoses[:, MDRIDX] + gap_off))
j = np.hstack((j,
good_mitoses[:, MPIDX] + gap_off,
good_mitoses[:, MDLIDX],
good_mitoses[:, MDRIDX]))
c = np.hstack((c, np.zeros(len(good_mitoses) *3)))
x, y = lapjv(i, j, c)
#
# Fixups to measurements
#
# fixup[N] gets the fixup dictionary for image set, N
#
# fixup[N][FEATURE] gets a tuple of a list of object numbers and
# values.
#
fixups = {}
def add_fixup(feature, image_number, object_number, value):
if image_number not in fixups:
fixups[image_number] = { feature: ([object_number], [value])}
else:
fid = fixups[image_number]
if feature not in fid:
fid[feature] = ([object_number], [value])
else:
object_numbers, values = fid[feature]
object_numbers.append(object_number)
values.append(value)
#attaches different segments together if they are matches through the IAP
a = -np.ones(len(F)+1, dtype="int32")
b = -np.ones(len(F)+1, dtype="int32")
c = -np.ones(len(F)+1, dtype="int32")
d = -np.ones(len(F)+1, dtype="int32")
z = np.zeros(len(F)+1, dtype="int32")
# relationships is a list of parent-child relationships. Each element
# is a two-tuple of parent and child and each parent/child is a
# two-tuple of image index and object number:
#
# [((<parent-image-index>, <parent-object-number>),
# (<child-image-index>, <child-object-number>))...]
#
relationships = []
#
# Starts can be linked to the following:
# ends (start_end_off <= j < start_end_off+start_end_len)
# gap alternatives (gap_off <= j < merge_off+merge_len)
# splits (split_off <= j < split_off+split_len)
# mitosis left (mitosis_left_child_off <= j < ....)
# mitosis right (mitosis_right_child_off <= j < ....)
#
# Discard starts linked to self = "do nothing"
#
start_idxs = np.where(
y[:start_end_end] != np.arange(gap_off, gap_end))[0]
for i in start_idxs:
my_image_index = int(F[i, IIDX])
my_image_number = image_numbers[my_image_index]
my_object_index = int(F[i, OIIDX])
my_object_number = int(F[i, ONIDX])
yi = y[i]
if yi < gap_end:
#-------------------------------
#
# GAP
#
# y[i] gives index of last hooked to first
#
b[i+1] = yi+1
c[yi+1] = i+1
#
# Hook our parent image/object number to found parent
#
parent_image_index = int(L[yi, IIDX])
parent_object_number = int(L[yi, ONIDX])
parent_image_number = image_numbers[parent_image_index]
parent_image_numbers[my_image_index][my_object_index] = \
parent_image_number
parent_object_numbers[my_image_index][my_object_index] = \
parent_object_number
relationships.append(
((parent_image_index, parent_object_number),
(my_image_index, my_object_number)))
add_fixup(F_LINK_TYPE, my_image_number, my_object_number,
LT_GAP)
add_fixup(F_GAP_LENGTH, my_image_number, my_object_number,
my_image_index - parent_image_index)
add_fixup(F_GAP_SCORE, my_image_number, my_object_number,
scores[yi])
#
# One less new object
#
new_object_count[my_image_index] -= 1
#
# One less lost object (the lost object is recorded in
# the image set after the parent)
#
lost_object_count[parent_image_index + 1] -= 1
logger.debug("Gap closing: %d:%d to %d:%d, score=%f" %
(parent_image_number, parent_object_number,
image_numbers[my_image_index],
object_numbers[my_image_index][my_object_index],
score_matrix[yi, i]))
elif yi >= split_off and yi < split_end:
#------------------------------------
#
# SPLIT
#
p2_idx = split_p2idx[yi - split_off]
parent_image_index = int(P2[p2_idx, IIDX])
parent_image_number = image_numbers[parent_image_index]
parent_object_number = int(P2[p2_idx, ONIDX])
b[i+1] = P2[p2_idx, LIDX]
c[b[i+1]] = i+1
parent_image_numbers[my_image_index][my_object_index] = \
parent_image_number
parent_object_numbers[my_image_index][my_object_index] = \
parent_object_number
relationships.append(
((parent_image_index, parent_object_number),
(my_image_index, my_object_number)))
add_fixup(F_LINK_TYPE, my_image_number, my_object_number,
LT_SPLIT)
add_fixup(F_SPLIT_SCORE, my_image_number, my_object_number,
split_scores[yi - split_off])
#
# one less new object
#
new_object_count[my_image_index] -= 1
#
# one more split object
#
split_count[my_image_index] += 1
logger.debug("split: %d:%d to %d:%d, score=%f" %
(parent_image_number, parent_object_number,
image_numbers[my_image_index],
object_numbers[my_image_index][my_object_index],
split_scores[y[i] - split_off]))
#---------------------
#
# Process ends (parents)
#
end_idxs = np.where(
x[:start_end_end] != np.arange(gap_off, gap_end))[0]
for i in end_idxs:
if(x[i] < start_end_end):
a[i+1] = x[i]+1
d[a[i+1]] = i+1
elif(x[i] >= merge_off and x[i] < merge_end):
#-------------------
#
# MERGE
#
# Handle merged objects. A merge hooks the end (L) of
# a segment (the parent) to a gap alternative in P1 (the child)
#
p1_idx = merge_p1idx[x[i]-merge_off]
a[i+1] = P1[p1_idx, LIDX]
d[a[i+1]] = i+1
parent_image_index = int(L[i, IIDX])
parent_object_number = int(L[i, ONIDX])
parent_image_number = image_numbers[parent_image_index]
child_image_index = int(P1[p1_idx, IIDX])
child_object_number = int(P1[p1_idx, ONIDX])
relationships.append(
((parent_image_index, parent_object_number),
(child_image_index, child_object_number)))
add_fixup(F_MERGE_SCORE, parent_image_number,
parent_object_number,
merge_scores[x[i] - merge_off])
lost_object_count[parent_image_index+1] -= 1
merge_count[child_image_index] += 1
logger.debug("Merge: %d:%d to %d:%d, score=%f" %
(image_numbers[parent_image_index]
, parent_object_number,
image_numbers[child_image_index],
child_object_number,
merge_scores[x[i] - merge_off]))
for (mlidx, mridx, mpidx), score in\
zip(good_mitoses, good_mitosis_scores):
#
# The parent is attached, one less lost object
#
lost_object_count[int(L[mpidx, IIDX])+1] -= 1
a[mpidx+1] = F[mlidx, LIDX]
d[a[mpidx+1]] = mpidx+1
parent_image_index = int(L[mpidx, IIDX])
parent_image_number = image_numbers[parent_image_index]
parent_object_number = int(L[mpidx, ONIDX])
split_count[int(F[lidx, IIDX])] += 1
for idx in mlidx, mridx:
#--------------------------------------
#
# MITOSIS child
#
my_image_index = int(F[idx, IIDX])
my_image_number = image_numbers[my_image_index]
my_object_index = int(F[idx, OIIDX])
my_object_number = int(F[idx, ONIDX])
b[idx+1] = int(L[mpidx, LIDX])
c[b[idx+1]] = idx+1
parent_image_numbers[my_image_index][my_object_index] = \
parent_image_number
parent_object_numbers[my_image_index][my_object_index] = \
parent_object_number
relationships.append(
((parent_image_index, parent_object_number),
(my_image_index, my_object_number)))
add_fixup(F_LINK_TYPE, my_image_number, my_object_number,
LT_MITOSIS)
add_fixup(F_MITOSIS_SCORE, my_image_number, my_object_number,
score)
new_object_count[my_image_index] -= 1
logger.debug("Mitosis: %d:%d to %d:%d and %d, score=%f" %
(parent_image_number, parent_object_number,
image_numbers[F[mlidx, IIDX]],
F[mlidx, ONIDX],
F[mridx, ONIDX],
score))
#
# At this point a gives the label # of the track that connects
# to the end of the indexed track. b gives the label # of the
# track that connects to the start of the indexed track.
# We convert these into edges.
#
# aa and bb are the vertices of an edge list and aa[i],bb[i]
# make up an edge
#
connect_mask = (a != -1)
aa = a[connect_mask]
bb = np.argwhere(connect_mask).flatten()
connect_mask = (b != -1)
aa = np.hstack((aa, b[connect_mask]))
bb = np.hstack((bb, np.argwhere(connect_mask).flatten()))
#
# Connect self to self for indices that do not connect
#
disconnect_mask = (a == -1) & (b == -1)
aa = np.hstack((aa, np.argwhere(disconnect_mask).flatten()))
bb = np.hstack((bb, np.argwhere(disconnect_mask).flatten()))
z = all_connected_components(aa, bb)
newlabel = [z[label[i]] for i in range(len(label))]
#
# Replace the labels for the image sets in the group
# inside the list retrieved from the measurements
#
m_link_type = self.measurement_name(F_LINK_TYPE)
for i, image_number in enumerate(image_numbers):
n_objects = len(newlabel[i])
m.add_measurement(cpmeas.IMAGE,
self.image_measurement_name(F_LOST_OBJECT_COUNT),
lost_object_count[i], True, image_number)
m.add_measurement(cpmeas.IMAGE,
self.image_measurement_name(F_NEW_OBJECT_COUNT),
new_object_count[i], True, image_number)
m.add_measurement(cpmeas.IMAGE,
self.image_measurement_name(F_MERGE_COUNT),
merge_count[i], True, image_number)
m.add_measurement(cpmeas.IMAGE,
self.image_measurement_name(F_SPLIT_COUNT),
split_count[i], True, image_number)
if n_objects == 0:
continue
m.add_measurement(object_name,
self.measurement_name(F_LABEL),
newlabel[i], can_overwrite = True,
image_set_number = image_number)
m.add_measurement(object_name,
self.measurement_name(F_PARENT_IMAGE_NUMBER),
parent_image_numbers[i],
can_overwrite = True,
image_set_number = image_number)
m.add_measurement(object_name,
self.measurement_name(F_PARENT_OBJECT_NUMBER),
parent_object_numbers[i],
can_overwrite = True,
image_set_number = image_number)
is_fixups = fixups.get(image_number, None)
if (is_fixups is not None) and (F_LINK_TYPE in is_fixups):
link_types = m[object_name, m_link_type, image_number]
object_numbers, values = [
np.array(_) for _ in is_fixups[F_LINK_TYPE]]
link_types[object_numbers-1] = values
m[object_name, m_link_type, image_number] = link_types
for feature, data_type in (
(F_GAP_LENGTH, np.int32),
(F_GAP_SCORE, np.float32),
(F_MERGE_SCORE, np.float32),
(F_SPLIT_SCORE, np.float32),
(F_MITOSIS_SCORE, np.float32)):
if data_type == np.int32:
values = np.zeros(n_objects, data_type)
else:
values = np.ones(n_objects, data_type) * np.NaN
if (is_fixups is not None) and (feature in is_fixups):
object_numbers, fixup_values = [
np.array(_) for _ in is_fixups[feature]]
values[object_numbers-1] = fixup_values.astype(data_type)
m[object_name, self.measurement_name(feature), image_number] =\
values
#
# Write the relationships.
#
if len(relationships) > 0:
relationships = np.array(relationships)
parent_image_numbers = image_numbers[relationships[:, 0, 0]]
child_image_numbers = image_numbers[relationships[:, 1, 0]]
parent_object_numbers = relationships[:, 0, 1]
child_object_numbers = relationships[:, 1, 1]
m.add_relate_measurement(
self.module_num, R_PARENT, object_name, object_name,
parent_image_numbers, parent_object_numbers,
child_image_numbers, child_object_numbers)
self.recalculate_group(workspace, image_numbers)
def calculate_area_penalty(self, a1, a2):
'''Calculate a penalty for areas that don't match
Ideally, area should be conserved while tracking. We divide the larger
of the two by the smaller of the two to get the area penalty
which is then multiplied by the distance.
Note that this differs from Jaqaman eqn 5 which has an asymmetric
penalty (sqrt((a1 + a2) / b) for a1+a2 > b and b / (a1 + a2) for
a1+a2 < b. I can't think of a good reason why they should be
asymmetric.
'''
result = a1 / a2
result[result < 1] = 1/result[result < 1]
result[np.isnan(result)] = np.inf
return result
def get_gap_pair_scores(self, F, L, max_gap):
'''Compute scores for matching last frame with first to close gaps
F - an N x 3 (or more) array giving X, Y and frame # of the first object
in each track
L - an N x 3 (or more) array giving X, Y and frame # of the last object
in each track
max_gap - the maximum allowed # of frames between the last and first
Returns: an M x 2 array of M pairs where the first element of the array
is the index of the track whose last frame is to be joined to
the track whose index is the second element of the array.
an M-element vector of scores.
'''
#
# There have to be at least two things to match
#
nothing = (np.zeros((0, 2), int), np.zeros(0))
if F.shape[0] <= 1:
return nothing
X = 0
Y = 1
IIDX = 2
AIDX = 6
#
# Create an indexing ordered by the last frame index and by the first
#
i = np.arange(len(F))
j = np.arange(len(F))
f_iidx = F[:, IIDX].astype(int)
l_iidx = L[:, IIDX].astype(int)
i_lorder = np.lexsort((i, l_iidx))
j_forder = np.lexsort((j, f_iidx))
i = i[i_lorder]
j = j[j_forder]
i_counts = np.bincount(l_iidx)
j_counts = np.bincount(f_iidx)
i_indexes = Indexes([i_counts])
j_indexes = Indexes([j_counts])
#
# The lowest possible F for each L is 1+L
#
j_self = np.minimum(np.arange(len(i_counts)),
len(j_counts) - 1)
j_first_idx = j_indexes.fwd_idx[j_self] + j_counts[j_self]
#
# The highest possible F for each L is L + max_gap. j_end is the
# first illegal value... just past that.
#
j_last = np.minimum(np.arange(len(i_counts)) + max_gap,
len(j_counts)-1)
j_end_idx = j_indexes.fwd_idx[j_last] + j_counts[j_last]
#
# Structure the i and j block ranges
#
ij_counts = j_end_idx - j_first_idx
ij_indexes = Indexes([i_counts, ij_counts])
if ij_indexes.length == 0:
return nothing
#
# The index into L of the first element of the pair
#
ai = i[i_indexes.fwd_idx[ij_indexes.rev_idx] + ij_indexes.idx[0]]
#
# The index into F of the second element of the pair
#
aj = j[j_first_idx[ij_indexes.rev_idx] + ij_indexes.idx[1]]
#
# The distances
#
d = np.sqrt((L[ai, X] - F[aj, X]) ** 2 +
(L[ai, Y] - F[aj, Y]) ** 2)
#
# Rho... the area penalty
#
rho = self.calculate_area_penalty(L[ai, AIDX], F[aj, AIDX])
return np.column_stack((ai, aj)), d * rho
def get_mitotic_triple_scores(self, F, L):
'''Compute scores for matching a parent to two daughters
F - an N x 3 (or more) array giving X, Y and frame # of the first object
in each track
L - an N x 3 (or more) array giving X, Y and frame # of the last object
in each track
Returns: an M x 3 array of M triples where the first column is the
index in the L array of the parent cell and the remaining
columns are the indices of the daughters in the F array
an M-element vector of distances of the parent from the expected
'''
X = 0
Y = 1
IIDX = 2
AIDX = 6
if len(F) <= 1:
return np.zeros((0, 3), np.int32), np.zeros(0, np.int32)
max_distance = self.mitosis_max_distance.value
# Find all daughter pairs within same frame
i, j = np.where(F[:, np.newaxis, IIDX] == F[np.newaxis, :, IIDX])
i, j = i[i < j], j[i < j] # get rid of duplicates and self-compares
#
# Calculate the maximum allowed distance before one or the other
# daughter is farther away than the maximum allowed from the center
#
# That's the max_distance * 2 minus the distance
#
dmax = max_distance * 2 - np.sqrt(np.sum((F[i, :2] - F[j, :2]) ** 2, 1))
mask = dmax >= 0
i, j = i[mask], j[mask]
if len(i) == 0:
return np.zeros((0, 3), np.int32), np.zeros(0, np.int32)
center_x = (F[i, X] + F[j, X]) / 2
center_y = (F[i, Y] + F[j, Y]) / 2
frame = F[i, IIDX]
# Find all parent-daughter pairs where the parent
# is in the frame previous to the daughters
ij, k = [_.flatten() for _ in np.mgrid[0:len(i), 0:len(L)]]
mask = F[i[ij], IIDX] == L[k, IIDX]+1
ij, k = ij[mask], k[mask]
if len(ij) == 0:
return np.zeros((0, 3), np.int32), np.zeros(0, np.int32)
d = np.sqrt((center_x[ij] - L[k, X]) ** 2 +
(center_y[ij] - L[k, Y]) ** 2)
mask = d <= dmax[ij]
ij, k, d = ij[mask], k[mask], d[mask]
if len(ij) == 0:
return np.zeros((0, 3), np.int32), np.zeros(0, np.int32)
rho = self.calculate_area_penalty(
F[i[ij], AIDX] + F[j[ij], AIDX], L[k, AIDX])
return np.column_stack((i[ij], j[ij], k)), d * rho
def recalculate_group(self, workspace, image_numbers):
'''Recalculate all measurements once post_group has run
workspace - the workspace being operated on
image_numbers - the image numbers of the group's image sets' measurements
'''
m = workspace.measurements
object_name = self.object_name.value
assert isinstance(m, cpmeas.Measurements)
image_index = np.zeros(np.max(image_numbers)+1, int)
image_index[image_numbers] = np.arange(len(image_numbers))
image_index[0] = -1
index_to_imgnum = np.array(image_numbers)
parent_image_numbers, parent_object_numbers = [
[ m.get_measurement(
object_name, self.measurement_name(feature), image_number)
for image_number in image_numbers]
for feature in (F_PARENT_IMAGE_NUMBER, F_PARENT_OBJECT_NUMBER)]
#
# Do all_connected_components on the graph of parents to find groups
# that share the same ancestor
#
count = np.array([len(x) for x in parent_image_numbers])
idx = Indexes(count)
if idx.length == 0:
# Nothing to do
return
parent_image_numbers = np.hstack(parent_image_numbers).astype(int)
parent_object_numbers = np.hstack(parent_object_numbers).astype(int)
parent_image_indexes = image_index[parent_image_numbers]
parent_object_indexes = parent_object_numbers - 1
i = np.arange(idx.length)
i = i[parent_image_numbers != 0]
j = idx.fwd_idx[parent_image_indexes[i]] + parent_object_indexes[i]
# Link self to self too
i = np.hstack((i, np.arange(idx.length)))
j = np.hstack((j, np.arange(idx.length)))
labels = all_connected_components(i, j)
nlabels = np.max(labels) + 1
#
# Set the ancestral index for each label
#
ancestral_index = np.zeros(nlabels, int)
ancestral_index[labels[parent_image_numbers == 0]] =\
np.argwhere(parent_image_numbers == 0).flatten().astype(int)
ancestral_image_index = idx.rev_idx[ancestral_index]
ancestral_object_index = \
ancestral_index - idx.fwd_idx[ancestral_image_index]
#
# Blow these up to one per object for convenience
#
ancestral_index = ancestral_index[labels]
ancestral_image_index = ancestral_image_index[labels]
ancestral_object_index = ancestral_object_index[labels]
def start(image_index):
'''Return the start index in the array for the given image index'''
return idx.fwd_idx[image_index]
def end(image_index):
'''Return the end index in the array for the given image index'''
return start(image_index) + idx.counts[0][image_index]
def slyce(image_index):
return slice(start(image_index), end(image_index))
class wrapped(object):
'''make an indexable version of a measurement, with parent and ancestor fetching'''
def __init__(self, feature_name):
self.feature_name = feature_name
self.backing_store = np.hstack([
m.get_measurement(object_name, feature_name, i)
for i in image_numbers])
def __getitem__(self, index):
return self.backing_store[slyce(index)]
def __setitem__(self, index, val):
self.backing_store[slyce(index)] = val
m.add_measurement(object_name, self.feature_name, val,
image_set_number = image_numbers[index],
can_overwrite=True)
def get_parent(self, index, no_parent=None):
result = np.zeros(idx.counts[0][index],
self.backing_store.dtype)
my_slice = slyce(index)
mask = parent_image_numbers[my_slice] != 0
if not np.all(mask):
if np.isscalar(no_parent) or (no_parent is None):
result[~mask] = no_parent
else:
result[~mask] = no_parent[~mask]
if np.any(mask):
result[mask] = self.backing_store[
idx.fwd_idx[parent_image_indexes[my_slice][mask]] +
parent_object_indexes[my_slice][mask]]
return result
def get_ancestor(self, index):
return self.backing_store[ancestral_index[slyce(index)]]
#
# Recalculate the trajectories
#
x = wrapped(M_LOCATION_CENTER_X)
y = wrapped(M_LOCATION_CENTER_Y)
trajectory_x = wrapped(self.measurement_name(F_TRAJECTORY_X))
trajectory_y = wrapped(self.measurement_name(F_TRAJECTORY_Y))
integrated = wrapped(self.measurement_name(F_INTEGRATED_DISTANCE))
dists = wrapped(self.measurement_name(F_DISTANCE_TRAVELED))
displ = wrapped(self.measurement_name(F_DISPLACEMENT))
linearity = wrapped(self.measurement_name(F_LINEARITY))
lifetimes = wrapped(self.measurement_name(F_LIFETIME))
label = wrapped(self.measurement_name(F_LABEL))
final_age = wrapped(self.measurement_name(F_FINAL_AGE))
age = {} # Dictionary of per-label ages
if self.wants_lifetime_filtering.value:
minimum_lifetime = self.min_lifetime.value if self.wants_minimum_lifetime.value else -np.Inf
maximum_lifetime = self.max_lifetime.value if self.wants_maximum_lifetime.value else np.Inf
for image_number in image_numbers:
index = image_index[image_number]
this_x = x[index]
if len(this_x) == 0:
continue
this_y = y[index]
last_x = x.get_parent(index, no_parent=this_x)
last_y = y.get_parent(index, no_parent=this_y)
x_diff = this_x - last_x
y_diff = this_y - last_y
#
# TrajectoryX,Y = X,Y distances traveled from step to step
#
trajectory_x[index] = x_diff
trajectory_y[index] = y_diff
#
# DistanceTraveled = Distance traveled from step to step
#
dists[index] = np.sqrt(x_diff * x_diff + y_diff * y_diff)
#
# Integrated distance = accumulated distance for lineage
#
integrated[index] = integrated.get_parent(index, no_parent=0) + dists[index]
#
# Displacement = crow-fly distance from initial ancestor
#
x_tot_diff = this_x - x.get_ancestor(index)
y_tot_diff = this_y - y.get_ancestor(index)
tot_distance = np.sqrt(x_tot_diff * x_tot_diff +
y_tot_diff * y_tot_diff)
displ[index] = tot_distance
#
# Linearity = ratio of displacement and integrated
# distance. NaN for new cells is ok.
#
linearity[index] = tot_distance / integrated[index]
#
# Add 1 to lifetimes / one for new
#
lifetimes[index] = lifetimes.get_parent(index, no_parent=0) + 1
#
# Age = overall lifetime of each label
#
for this_label, this_lifetime in zip(label[index],lifetimes[index]):
age[this_label] = this_lifetime
all_labels = age.keys()
all_ages = age.values()
if self.wants_lifetime_filtering.value:
labels_to_filter = [k for k, v in age.iteritems() if v <= minimum_lifetime or v >= maximum_lifetime]
for image_number in image_numbers:
index = image_index[image_number]
# Fill in final object ages
this_label = label[index]
this_lifetime = lifetimes[index]
this_age = final_age[index]
ind = np.array(all_labels).searchsorted(this_label)
i = np.array(all_ages)[ind] == this_lifetime
this_age[i] = this_lifetime[i]
final_age[index] = this_age
# Filter object ages below the minimum
if self.wants_lifetime_filtering.value:
if len(labels_to_filter) > 0:
this_label = label[index].astype(float)
this_label[np.in1d(this_label,np.array(labels_to_filter))] = np.NaN
label[index] = this_label
m.add_experiment_measurement(F_EXPT_ORIG_NUMTRACKS, nlabels)
if self.wants_lifetime_filtering.value:
m.add_experiment_measurement(F_EXPT_FILT_NUMTRACKS, nlabels-len(labels_to_filter))
def map_objects(self, workspace, new_of_old, old_of_new, i, j):
'''Record the mapping of old to new objects and vice-versa
workspace - workspace for current image set
new_to_old - an array of the new labels for every old label
old_to_new - an array of the old labels for every new label
i, j - the coordinates for each new object.
'''
m = workspace.measurements
assert isinstance(m, cpmeas.Measurements)
image_number = m.get_current_image_measurement(cpp.IMAGE_NUMBER)
new_of_old = new_of_old.astype(int)
old_of_new = old_of_new.astype(int)
old_object_numbers = self.get_saved_object_numbers(workspace).astype(int)
max_object_number = self.get_max_object_number(workspace)
old_count = len(new_of_old)
new_count = len(old_of_new)
#
# Record the new objects' parents
#
parents = old_of_new.copy()
parents[parents != 0] =\
old_object_numbers[(old_of_new[parents!=0]-1)].astype(parents.dtype)
self.add_measurement(workspace, F_PARENT_OBJECT_NUMBER, old_of_new)
parent_image_numbers = np.zeros(len(old_of_new))
parent_image_numbers[parents != 0] = image_number - 1
self.add_measurement(workspace, F_PARENT_IMAGE_NUMBER,
parent_image_numbers)
#
# Assign object IDs to the new objects
#
mapping = np.zeros(new_count, int)
if old_count > 0 and new_count > 0:
mapping[old_of_new != 0] = \
old_object_numbers[old_of_new[old_of_new != 0] - 1]
miss_count = np.sum(old_of_new == 0)
lost_object_count = np.sum(new_of_old == 0)
else:
miss_count = new_count
lost_object_count = old_count
nunmapped = np.sum(mapping==0)
new_max_object_number = max_object_number + nunmapped
mapping[mapping == 0] = np.arange(max_object_number+1,
new_max_object_number + 1)
self.set_max_object_number(workspace, new_max_object_number)
self.add_measurement(workspace, F_LABEL, mapping)
self.set_saved_object_numbers(workspace, mapping)
#
# Compute distances and trajectories
#
diff_i = np.zeros(new_count)
diff_j = np.zeros(new_count)
distance = np.zeros(new_count)
integrated_distance = np.zeros(new_count)
displacement = np.zeros(new_count)
linearity = np.ones(new_count)
orig_i = i.copy()
orig_j = j.copy()
old_i, old_j = self.get_saved_coordinates(workspace)
old_distance = self.get_saved_distances(workspace)
old_orig_i, old_orig_j = self.get_orig_coordinates(workspace)
has_old = (old_of_new != 0)
if np.any(has_old):
old_indexes = old_of_new[has_old]-1
orig_i[has_old] = old_orig_i[old_indexes]
orig_j[has_old] = old_orig_j[old_indexes]
diff_i[has_old] = i[has_old] - old_i[old_indexes]
diff_j[has_old] = j[has_old] - old_j[old_indexes]
distance[has_old] = np.sqrt(diff_i[has_old]**2 + diff_j[has_old]**2)
integrated_distance[has_old] = (old_distance[old_indexes] + distance[has_old])
displacement[has_old] = np.sqrt((i[has_old]-orig_i[has_old])**2 + (j[has_old]-orig_j[has_old])**2)
linearity[has_old] = displacement[has_old] / integrated_distance[has_old]
self.add_measurement(workspace, F_TRAJECTORY_X, diff_j)
self.add_measurement(workspace, F_TRAJECTORY_Y, diff_i)
self.add_measurement(workspace, F_DISTANCE_TRAVELED, distance)
self.add_measurement(workspace, F_DISPLACEMENT, displacement)
self.add_measurement(workspace, F_INTEGRATED_DISTANCE, integrated_distance)
self.add_measurement(workspace, F_LINEARITY, linearity)
self.set_saved_distances(workspace, integrated_distance)
self.set_orig_coordinates(workspace, (orig_i, orig_j))
self.set_saved_coordinates(workspace, (i,j))
#
# Update the ages
#
age = np.ones(new_count, int)
if np.any(has_old):
old_age = self.get_saved_ages(workspace)
age[has_old] = old_age[old_of_new[has_old]-1]+1
self.add_measurement(workspace, F_LIFETIME, age)
final_age = np.NaN*np.ones(new_count, float) # Initialize to NaN; will re-calc later
self.add_measurement(workspace, F_FINAL_AGE, final_age)
self.set_saved_ages(workspace, age)
self.set_saved_object_numbers(workspace, mapping)
#
# Add image measurements
#
self.add_image_measurement(workspace, F_NEW_OBJECT_COUNT,
np.sum(parents==0))
self.add_image_measurement(workspace, F_LOST_OBJECT_COUNT,
lost_object_count)
#
# Find parents with more than one child. These are the progenetors
# for daughter cells.
#
if np.any(parents != 0):
h = np.bincount(parents[parents != 0])
split_count = np.sum(h > 1)
else:
split_count = 0
self.add_image_measurement(workspace, F_SPLIT_COUNT, split_count)
#
# Find children with more than one parent. These are the merges
#
if np.any(new_of_old != 0):
h = np.bincount(new_of_old[new_of_old != 0])
merge_count = np.sum(h > 1)
else:
merge_count = 0
self.add_image_measurement(workspace, F_MERGE_COUNT, merge_count)
#########################################
#
# Compile the relationships between children and parents
#
#########################################
last_object_numbers = np.arange(1, len(new_of_old) + 1)
new_object_numbers = np.arange(1, len(old_of_new)+1)
r_parent_object_numbers = np.hstack((
old_of_new[old_of_new != 0],
last_object_numbers[new_of_old != 0]))
r_child_object_numbers = np.hstack((
new_object_numbers[parents != 0], new_of_old[new_of_old != 0]))
if len(r_child_object_numbers) > 0:
#
# Find unique pairs
#
order = np.lexsort((r_child_object_numbers, r_parent_object_numbers))
r_child_object_numbers = r_child_object_numbers[order]
r_parent_object_numbers = r_parent_object_numbers[order]
to_keep = np.hstack((
[True],
(r_parent_object_numbers[1:] != r_parent_object_numbers[:-1]) |
(r_child_object_numbers[1:] != r_child_object_numbers[:-1])))
r_child_object_numbers = r_child_object_numbers[to_keep]
r_parent_object_numbers = r_parent_object_numbers[to_keep]
r_image_numbers = np.ones(
r_parent_object_numbers.shape[0],
r_parent_object_numbers.dtype) * image_number
if len(r_child_object_numbers) > 0:
m.add_relate_measurement(
self.module_num, R_PARENT,
self.object_name.value, self.object_name.value,
r_image_numbers - 1, r_parent_object_numbers,
r_image_numbers, r_child_object_numbers)
def recalculate_kalman_filters(self, workspace, image_numbers):
'''Rerun the kalman filters to improve the motion models'''
m = workspace.measurements
object_name = self.object_name.value
object_number = m[object_name, cpmeas.OBJECT_NUMBER, image_numbers]
# ########################
#
# Create an indexer that lets you do the following
#
# parent_x = x[idx.fwd_idx[image_number - fi] + object_number - 1]
# parent_y = y[idx.fwd_idx[image_number - fi] + object_number - 1]
#
# #######################
x = m[object_name, M_LOCATION_CENTER_X, image_numbers]
fi = np.min(image_numbers)
max_image = np.max(image_numbers) + 1
counts = np.zeros(max_image - fi, int)
counts[image_numbers - fi] = np.array([len(xx) for xx in x])
idx = Indexes(counts)
x = np.hstack(x)
y = np.hstack(m[object_name, M_LOCATION_CENTER_Y, image_numbers])
area = np.hstack(
m[object_name,
self.measurement_name(F_AREA),
image_numbers])
parent_image_number = np.hstack(
m[object_name,
self.measurement_name(F_PARENT_IMAGE_NUMBER),
image_numbers])
parent_object_number = np.hstack(
m[object_name,
self.measurement_name(F_PARENT_OBJECT_NUMBER),
image_numbers])
link_type = np.hstack(
m[object_name,
self.measurement_name(F_LINK_TYPE),
image_numbers])
link_distance = np.hstack(
m[object_name,
self.measurement_name(F_LINKING_DISTANCE),
image_numbers])
movement_model = np.hstack(
m[object_name,
self.measurement_name(F_MOVEMENT_MODEL),
image_numbers])
models = self.get_kalman_models()
kalman_models = [
cpfilter.static_kalman_model() if model == F_STATIC_MODEL
else cpfilter.velocity_kalman_model()
for model, elements in models]
kalman_states = [
cpfilter.KalmanState(kalman_model.observation_matrix,
kalman_model.translation_matrix)
for kalman_model in kalman_models]
#
# Initialize the last image set's states using no information
#
# TO_DO - use the kalman state information in the measurements
# to construct the kalman models that will best predict
# the penultimate image set.
#
n_objects = counts[-1]
if n_objects > 0:
this_slice = slice(idx.fwd_idx[-1], idx.fwd_idx[-1] + n_objects)
ii = y[this_slice]
jj = x[this_slice]
new_kalman_states = []
r = np.column_stack(
(area[this_slice].astype(float) / np.pi, np.zeros(n_objects),
np.zeros(n_objects), area[this_slice].astype(float)))\
.reshape(n_objects, 2, 2)
for kalman_state in kalman_states:
new_kalman_states.append(cpfilter.kalman_filter(
kalman_state, -np.ones(n_objects, int),
np.column_stack((ii, jj)),
np.zeros(n_objects), r))
kalman_states = new_kalman_states
else:
this_slice = slice(idx.fwd_idx[-1], idx.fwd_idx[-1])
#
# Update the kalman states and take any new linkage distances
# and movement models that are better
#
for image_number in reversed(sorted(image_numbers)[:-1]):
i = image_number - fi
n_objects = counts[i]
child_object_number = np.zeros(n_objects, int)
next_slice = this_slice
this_slice = slice(idx.fwd_idx[i], idx.fwd_idx[i] + counts[i])
next_links = link_type[next_slice]
next_has_link = (next_links == LT_PHASE_1)
if any(next_has_link):
next_parents = parent_object_number[next_slice]
next_object_number = np.arange(counts[i+1]) + 1
child_object_number[next_parents[next_has_link]-1] = \
next_object_number[next_has_link]
has_child = child_object_number != 0
if np.any(has_child):
kid_idx = child_object_number[has_child] - 1
ii = y[this_slice]
jj = x[this_slice]
r = np.column_stack(
(area[this_slice].astype(float) / np.pi, np.zeros(n_objects),
np.zeros(n_objects), area[this_slice].astype(float)))\
.reshape(n_objects, 2, 2)
new_kalman_states = []
errors = link_distance[next_slice]
model_used = movement_model[next_slice]
for (model, elements), kalman_state in zip(models, kalman_states):
assert isinstance(kalman_state, cpfilter.KalmanState)
n_elements = len(elements)
q = np.zeros((n_objects, n_elements, n_elements))
if np.any(has_child):
obs = kalman_state.predicted_obs_vec
dk = np.sqrt((obs[kid_idx, 0] - ii[has_child])**2 +
(obs[kid_idx, 1] - jj[has_child])**2)
this_model = np.where(dk < errors[kid_idx])[0]
if len(this_model) > 0:
km_model = KM_NO_VEL if model == F_STATIC_MODEL \
else KM_VEL
model_used[kid_idx[this_model]] = km_model
errors[kid_idx[this_model]] = dk[this_model]
for j in range(n_elements):
q[has_child, j, j] = kalman_state.noise_var[kid_idx, j]
updated_state = cpfilter.kalman_filter(
kalman_state, child_object_number - 1,
np.column_stack((ii, jj)), q, r)
new_kalman_states.append(updated_state)
if np.any(has_child):
# fix child linking distances and models
mname = self.measurement_name(F_LINKING_DISTANCE)
m[object_name, mname, image_number+1] = errors
mname = self.measurement_name(F_MOVEMENT_MODEL)
m[object_name, mname, image_number+1] = model_used
kalman_states = new_kalman_states
def get_kalman_feature_names(self):
if self.tracking_method != TM_LAP:
return []
return sum(
[sum(
[[ kalman_feature(model, F_STATE, element),
kalman_feature(model, F_NOISE, element)] +
[ kalman_feature(model, F_COV, element, e2)
for e2 in elements]
for element in elements],[])
for model, elements in self.get_kalman_models()], [])
def get_measurement_columns(self, pipeline):
result = [(self.object_name.value,
self.measurement_name(feature),
coltype)
for feature, coltype in F_ALL_COLTYPE_ALL]
result += [(cpmeas.IMAGE, self.image_measurement_name(feature), coltype)
for feature, coltype in F_IMAGE_COLTYPE_ALL]
attributes = { cpmeas.MCA_AVAILABLE_POST_GROUP: True }
if self.tracking_method == TM_LAP:
result += [( self.object_name.value,
self.measurement_name(name),
coltype) for name, coltype in (
(F_AREA, cpmeas.COLTYPE_INTEGER),
(F_LINK_TYPE, cpmeas.COLTYPE_INTEGER),
(F_LINKING_DISTANCE, cpmeas.COLTYPE_FLOAT),
(F_STANDARD_DEVIATION, cpmeas.COLTYPE_FLOAT),
(F_MOVEMENT_MODEL, cpmeas.COLTYPE_INTEGER))]
result += [( self.object_name.value,
self.measurement_name(name),
cpmeas.COLTYPE_FLOAT) for name in
list(self.get_kalman_feature_names())]
if self.wants_second_phase:
result += [
(self.object_name.value, self.measurement_name(name), coltype)
for name, coltype in (
(F_GAP_LENGTH, cpmeas.COLTYPE_INTEGER),
(F_GAP_SCORE, cpmeas.COLTYPE_FLOAT),
(F_MERGE_SCORE, cpmeas.COLTYPE_FLOAT),
(F_SPLIT_SCORE, cpmeas.COLTYPE_FLOAT),
(F_MITOSIS_SCORE, cpmeas.COLTYPE_FLOAT))]
# Add the post-group attribute to all measurements
result = [ ( c[0], c[1], c[2], attributes) for c in result]
else:
pg_meas = [
self.measurement_name(feature)
for feature in F_LINKING_DISTANCE, F_MOVEMENT_MODEL]
result = [
c if c[1] not in pg_meas else (c[0], c[1], c[2], attributes)
for c in result]
return result
def get_object_relationships(self, pipeline):
'''Return the object relationships produced by this module'''
object_name = self.object_name.value
if self.wants_second_phase and self.tracking_method == TM_LAP:
when = cpmeas.MCA_AVAILABLE_POST_GROUP
else:
when = cpmeas.MCA_AVAILABLE_EACH_CYCLE
return [(R_PARENT, object_name, object_name, when)]
def get_categories(self, pipeline, object_name):
if object_name in (self.object_name.value, cpmeas.IMAGE):
return [F_PREFIX]
elif object_name == cpmeas.EXPERIMENT:
return [F_PREFIX]
else:
return []
def get_measurements(self, pipeline, object_name, category):
if object_name == self.object_name.value and category == F_PREFIX:
result = list(F_ALL)
if self.tracking_method == TM_LAP:
result += [F_AREA, F_LINKING_DISTANCE, F_STANDARD_DEVIATION,
F_LINK_TYPE, F_MOVEMENT_MODEL]
if self.wants_second_phase:
result += [F_GAP_LENGTH, F_GAP_SCORE, F_MERGE_SCORE,
F_SPLIT_SCORE, F_MITOSIS_SCORE]
result += self.get_kalman_feature_names()
return result
if object_name == cpmeas.IMAGE:
result = F_IMAGE_ALL
return result
if object_name == cpmeas.EXPERIMENT and category == F_PREFIX:
return [F_EXPT_ORIG_NUMTRACKS, F_EXPT_FILT_NUMTRACKS]
return []
def get_measurement_objects(self, pipeline, object_name, category,
measurement):
if (object_name == cpmeas.IMAGE and category == F_PREFIX and
measurement in F_IMAGE_ALL):
return [ self.object_name.value]
return []
def get_measurement_scales(self, pipeline, object_name, category, feature,image_name):
if self.tracking_method == TM_LAP:
return []
if feature in self.get_measurements(pipeline, object_name, category):
return [str(self.pixel_radius.value)]
return []
def upgrade_settings(self, setting_values, variable_revision_number,
module_name, from_matlab):
if from_matlab and variable_revision_number == 3:
wants_image = setting_values[10] != cps.DO_NOT_USE
measurement = '_'.join(setting_values[2:6])
setting_values = [ setting_values[0], # tracking method
setting_values[1], # object name
measurement,
setting_values[6], # pixel_radius
setting_values[7], # display_type
wants_image,
setting_values[10]]
variable_revision_number = 1
from_matlab = False
if (not from_matlab) and variable_revision_number == 1:
setting_values = setting_values + ["100","100"]
variable_revision_number = 2
if (not from_matlab) and variable_revision_number == 2:
# Added phase 2 parameters
setting_values = setting_values + [
"40","40","40","50","50","50","5"]
variable_revision_number = 3
if (not from_matlab) and variable_revision_number == 3:
# Added Kalman choices:
# Model
# radius std
# radius limit
setting_values = (setting_values[:7] +
[ M_BOTH, "3", "2,10"] +
setting_values[9:])
variable_revision_number = 4
if (not from_matlab) and variable_revision_number == 4:
# Added lifetime filtering: Wants filtering + min/max allowed lifetime
setting_values = setting_values + [cps.NO, cps.YES, "1", cps.NO, "100"]
variable_revision_number = 5
if (not from_matlab) and variable_revision_number == 5:
# Added mitosis alternative score + mitosis_max_distance
setting_values = setting_values + ["80", "40"]
variable_revision_number = 6
return setting_values, variable_revision_number, from_matlab
|
LeeKamentsky/CellProfiler
|
cellprofiler/modules/trackobjects.py
|
Python
|
gpl-2.0
| 145,080
|
[
"Gaussian"
] |
adf441f00e52c63777acb655e40ba763e344741ffe2d38921bb3f6a327fa2bd4
|
"""
Acceptance Tests for Course Information
"""
from common.test.acceptance.pages.studio.course_info import CourseUpdatesPage
from common.test.acceptance.tests.studio.base_studio_test import StudioCourseTest
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.index import DashboardPage
class UsersCanAddUpdatesTest(StudioCourseTest):
"""
Series of Bok Choy Tests to test the Course Updates page
"""
def _create_and_verify_update(self, message):
"""
Helper method to create and verify and update based on the message.
Arguments:
message (str): Message to add to the update.
"""
self.course_updates_page.visit()
self.assertTrue(self.course_updates_page.is_new_update_button_present())
self.course_updates_page.click_new_update_button()
self.course_updates_page.submit_update(message)
self.assertTrue(self.course_updates_page.is_first_update_message(message))
def setUp(self, is_staff=False, test_xss=True):
super(UsersCanAddUpdatesTest, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPage(self.browser)
self.course_updates_page = CourseUpdatesPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def test_course_updates_page_exists(self):
"""
Scenario: User can access Course Updates Page
Given I have opened a new course in Studio
And I go to the course updates page
When I visit the page
Then I should see any course updates
And I should see the new update button
"""
self.course_updates_page.visit()
self.course_updates_page.wait_for_page()
self.assertTrue(self.course_updates_page.is_new_update_button_present)
def test_new_course_update_is_present(self):
"""
Scenario: Users can add updates
Given I have opened a new course in Studio
And I go to the course updates page
When I add a new update with the text "Hello"
Then I should see the update "Hello"
And I see a "saving" notification
"""
self._create_and_verify_update('Hello')
def test_new_course_update_can_be_edited(self):
"""
Scenario: Users can edit updates
Given I have opened a new course in Studio
And I go to the course updates page
When I add a new update with the text "Hello"
And I modify the text to "Goodbye"
Then I should see the update "Goodbye"
"""
self._create_and_verify_update('Hello')
self.assertTrue(self.course_updates_page.is_edit_button_present())
self.course_updates_page.click_edit_update_button()
self.course_updates_page.submit_update('Goodbye')
self.assertFalse(self.course_updates_page.is_first_update_message('Hello'))
self.assertTrue(self.course_updates_page.is_first_update_message('Goodbye'))
def test_delete_course_update(self):
"""
Scenario: Users can delete updates
Given I have opened a new course in Studio
And I go to the course updates page
And I add a new update with the text "Hello"
And I delete the update
And I confirm the prompt
Then I should not see the update "Hello"
"""
self._create_and_verify_update('Hello')
self.course_updates_page.click_delete_update_button()
self.assertTrue(self.course_updates_page.is_course_update_list_empty())
def test_user_edit_date(self):
"""
Scenario: Users can edit update dates
Given I have opened a new course in Studio
And I go to the course updates page
And I add a new update with the text "Hello"
When I edit the date to "06/01/13"
Then I should see the date "June 1, 2013"
"""
self._create_and_verify_update('Hello')
self.course_updates_page.click_edit_update_button()
self.course_updates_page.set_date('06/01/2013')
self.course_updates_page.click_new_update_save_button()
self.assertTrue(self.course_updates_page.is_first_update_date('June 1, 2013'))
def test_outside_tag_preserved(self):
"""
Scenario: Text outside of tags is preserved
Given I have opened a new course in Studio
And I go to the course updates page
When I add a new update with the text "before <strong>middle</strong> after"
Then I should see the update "before <strong>middle</strong> after"
And when I reload the page
Then I should see the update "before <strong>middle</strong> after"
"""
self._create_and_verify_update('before <strong>middle</strong> after')
self.course_updates_page.visit()
self.assertTrue(self.course_updates_page.is_first_update_message('before <strong>middle</strong> after'))
def test_asset_change_in_updates(self):
"""
Scenario: Static links are rewritten when previewing a course update
Given I have opened a new course in Studio
And I go to the course updates page
When I add a new update with the text "<img src='/static/my_img.jpg'/>"
# Can only do partial text matches because of the quotes with in quotes (and regexp step matching).
Then I should see the asset update to "my_img.jpg"
And I change the update from "/static/my_img.jpg" to "<img src='/static/modified.jpg'/>"
Then I should see the asset update to "modified.jpg"
And when I reload the page
Then I should see the asset update to "modified.jpg"
"""
self.course_updates_page.visit()
self.assertTrue(self.course_updates_page.is_new_update_button_present())
self.course_updates_page.click_new_update_button()
self.course_updates_page.submit_update("<img src='/static/my_img.jpg'/>")
self.assertTrue(self.course_updates_page.first_update_contains_html("my_img.jpg"))
self.course_updates_page.click_edit_update_button()
self.course_updates_page.submit_update("<img src='/static/modified.jpg'/>")
self.assertFalse(self.course_updates_page.first_update_contains_html("my_img.jpg"))
self.assertTrue(self.course_updates_page.first_update_contains_html("modified.jpg"))
self.course_updates_page.visit()
self.assertTrue(self.course_updates_page.first_update_contains_html("modified.jpg"))
|
TheMOOCAgency/edx-platform
|
common/test/acceptance/tests/studio/test_studio_course_info.py
|
Python
|
agpl-3.0
| 6,771
|
[
"VisIt"
] |
4b1b5bd062f4b9dd18dc779e5846669a89a9bc08d37b68a85e3e069066d10ba4
|
import os
# remove any visible GPU, force tensorflow to run only on CPU
os.environ['CUDA_VISIBLE_DEVICES'] = ""
import tensorflow as tf
from multiprocessing import cpu_count
from six.moves import cPickle
from numbers import Number
from collections import defaultdict, OrderedDict
from .visualization import *
_path = os.path.dirname(os.path.realpath(__file__))
_SESSION = tf.Session(config=tf.ConfigProto(**{
'intra_op_parallelism_threads': cpu_count() - 1,
'allow_soft_placement': True,
'log_device_placement': False,
}))
def get_value(x):
"""evaluate any tensorflow variable to get it real value"""
if isinstance(x, (tuple, list)):
return _SESSION.run(x)
return x.eval(session=_SESSION)
def load_data():
"""
Loading preprocessed data from pickle file
Format of the data: "Sky1" -> [array_of_galaxies, halos_position]
For exmple: to get all galaxies in "Sky1"
>>> train['Sky1'][0]
To get all halos in "Sky1"
>>> train['Sky1'][1]
Note
----
Each Galaxy position contain: [x, y, e1, e2]
Each Halos position contain: [nb_halo, refX, refY, x1, y1, x2, y2, x3, y3]
Return
------
train_data, test_data
"""
train_path = os.path.join(_path, "train.dat")
test_path = os.path.join(_path, "test.dat")
if not os.path.exists(train_path):
raise Exception("Cannot find train data at path:" + train_path)
if not os.path.exists(test_path):
raise Exception("Cannot find test data at path:" + test_path)
train = cPickle.load(open(train_path, 'rb'))
test = cPickle.load(open(test_path, 'rb'))
return train, test
def freqcount(x, key=None, count=1, normalize=False, sort=False):
""" x: list, iterable
Parameters
----------
key: callable
extract the key from each item in the list
count: callable, int
extract the count from each item in the list
normalize: bool
if normalize, all the values are normalized from 0. to 1. (
which sum up to 1. in total).
sort: boolean
if True, the list will be sorted in ascent order.
Return
------
dict: x(obj) -> freq(int)
"""
freq = defaultdict(int)
if key is None:
key = lambda x: x
if count is None:
count = 1
if isinstance(count, Number):
_ = int(count)
count = lambda x: _
for i in x:
c = count(i)
i = key(i)
freq[i] += c
# always return the same order
s = float(sum(v for v in freq.values()))
freq = OrderedDict([(k, freq[k] / s if normalize else freq[k])
for k in sorted(freq.keys())])
if sort:
freq = OrderedDict(sorted(freq.items(), key=lambda x: x[1]))
return freq
|
trungnt13/BAY2-uef17
|
data/__init__.py
|
Python
|
gpl-3.0
| 2,750
|
[
"Galaxy"
] |
8c6e75085ab2921a72786b7d585a2e30b27f48c523c839be77d7b23ae8d6372f
|
# -*- coding: utf-8 -*-
from collections import namedtuple
Grant = namedtuple('Grant', 'url, grantee, location, title, type, total_support, '
'year, description, break_down, urls')
GRANTS = [
Grant(
u'dream-yard',
u'DreamYard Project',
u'United States',
u'Hive Fashion DreamYard Summer Intensive',
u'learning-webmaking',
u'$8,250',
2012,
u'<p> Mozilla provided a grant to <a href="http://www.dreamyard.com/">'
u'DreamYard Arts Center</a> in the Bronx, NY, in conjunction with '
u'<a href="http://explorecreateshare.org/2012/07/20/'
u'next-seasons-hottest-trend-hive-fashion/">Hive Fashion</a>, '
u'to support a DIY Fashion intensive for teens in August 2012.</p>',
u'',
u'',
),
Grant(
u'compumentor',
u'Compumentor',
u'United States',
u'2007 TechSoup Netsquared Conference',
u'free-culture-community',
u'$2,000',
2007,
u'<p>Mozilla contributed to the 2007 TechSoup <a href="http://www.netsquared.org">'
u'Netsquared Conference</a> Innovation Fund to support innovative software applications '
u'created by and for non-profit organizations.</p>',
u'',
u'',
),
Grant(
u'codethink',
u'Codethink Ltd.',
u'United Kingdom',
u'Accessibility Research',
u'open-source-technology',
u'$4,427',
2007,
u'<p>Mozilla made a grant to <a href="http://www.codethink.co.uk/">Codethink Ltd.</a> '
u'to do a feasibility study for migrating the AT-SPI accessibility '
u'interface to use D-Bus.</p>',
u'',
u'',
),
Grant(
u'charles-chen',
u'Charles Chen',
u'United States',
u'Fire Vox',
u'open-source-technology',
u'$11,976',
2007,
u'<p>Mozilla supported the work of Charles Chen to implement ARIA widgets in the '
u'<a href="http://www.accessfirefox.org/Fire_Vox.php">Fire Vox</a> open source '
u'screen reader extension for Firefox.</p>',
u'',
u'',
),
Grant(
u'ariel-rios',
u'Ariel Rios',
u'United States',
u'GNOME Accessibility',
u'open-source-technology',
u'$12,471',
2007,
u'<p>Mozilla supported the work of Ariel Rios to implement the AT-SPI Collection '
u'interface for better Firefox accessibility on Linux.</p>',
u'',
u'',
),
Grant(
u'aapd',
u'American Association of People with Disabilities',
u'United States',
u'AAPD',
u'free-culture-community',
u'$1,000',
2007,
u'<p>Mozilla sponsored the <a href="http://www.aapd.com/">AAPD</a> Leadership Gala '
u'and related activities.</p>',
u'',
u'',
),
Grant(
u'peoples-production-house',
u'People’s Production House',
u'United States',
u'World’s Fair 2.0 Design Intensive Incubator',
u'learning-webmaking',
u'$14,500',
2012,
u'<p>This grant to the <a href="http://peoplesproductionhouse.org/">'
u'People’s Production House</a> supported the implementation of three '
u'design workshops for youth in conjunction with World’s Fair 2.0, a '
u'cell-phone based journalism scavenger hunt that investigates the borough '
u'of Queens’ history - past and present. The final Design Intensive '
u'took place during Maker Faire, and involved youth in the installation of '
u'their work at the New York Hall of Science.</p>',
u'',
u'',
),
Grant(
u'participatory-culture-foundation',
u'Participatory Culture Foundation',
u'United States',
u'NewsHour Open Election 2012',
u'free-culture-community',
u'$266,530.42',
2012,
u'<p>As part of the NewsHour Open Election 2012 project, supported by the '
u'Corporation for Public Broadcasting, and in partnership with PBS NewsHour and '
u'Mozilla, the <a href="http://pculture.org/">Participatory Culture Foundation</a> '
u'has received support to develop crowd-sourcing technologies to enable citizen '
u'volunteers to translate and caption 2012 election coverage into dozens of languages, '
u'as well as for the deaf and hard-of-hearing. These technologies will make election '
u'news, speeches and debates more accessible for diverse audiences, helping to increase '
u'their understanding of, and engagement in, the political process.</p>',
u'',
u'',
),
Grant(
u'global-kids-inc',
u'Global Kids Inc.',
u'United States',
u'PenPal News',
u'learning-webmaking',
u'$15,000',
2012,
u'<p> Mozilla provided a grant to <a href="http://www.globalkids.org/">'
u'Global Kids Inc.</a>, in conjunction with Hive NYC, for the development of '
u'PenPal News software. PenPal News is a web app that uses news as a '
u'conversation-starter to connect middle and high school-aged youth '
u'domestically and internationally.</p>',
u'',
u'',
),
Grant(
u'public_knowledge',
u'Public Knowledge',
u'United States',
u'Public Knowledge',
u'user-sovereignty',
u'$5,000',
2012,
u'<p><a href="http://www.publicknowledge.org/">Public Knowledge</a> preserves the '
u'openness of the Internet and the public’s access to knowledge, promotes creativity '
u'through balanced copyright, and upholds and protects the rights of consumers to use '
u'innovative technology lawfully.</p>',
u'',
u'',
),
Grant(
u'institute_of_play',
u'Institute of Play',
u'United States',
u'Hive Activity Delivery Mechanism',
u'learning-webmaking',
u'$12,604',
2012,
u'<p>This grant to the <a href="http://www.instituteofplay.org/">Institute of Play</a> '
u'provided support for the Hive Activity Delivery Mechanism Project, which seeks to '
u'develop a sharing model for Hive-developed learning activities that represents the '
u'collaboration, experimentation and youth-focus that typifies the '
u'Hive and its members.</p>',
u'',
u'',
),
Grant(
u'cbc',
u'CBC Radio Canada',
u'Canada',
u'Marshall McLuhan Project',
u'free-culture-community',
u'$10,000',
2011,
u'<p>This grant was given to the <a href="http://www.cbc.ca">'
u'Canadian Broadcasting Corporation</a> to support the creation of on-line '
u'content to engage Canadians in the celebration of the 100th anniversary of '
u'the birth of Marshall McLuhan.</p>',
u'',
u'',
),
Grant(
u'big-blue-button',
u'Blindside Networks',
u'Canada',
u'BigBlueButton',
u'open-source-technology',
u'$11,000',
2011,
u'<p><a href="http://www.blindsidenetworks.com/">Blindside Networks</a> '
u'is a company dedicated to helping universities, colleges, and commercial '
u'companies deliver a high-quality learning experience to remote students. '
u'The goal of the BigBlueButton open source project is to enable remote students '
u'to have a high-quality learning experience. This grant supported converting '
u'BigBlueButton 0.8-beta to use popcorn.js, the HTML5 media framework designed '
u'for synchronized playback of media.</p>',
u'',
u'',
),
Grant(
u'depaul-university',
u'DePaul University',
u'United States',
u'Digital Youth Mentor',
u'learning-webmaking',
u'$25,000',
2011,
u'<p>This grant was made to <a href="http://www.depaul.edu">DePaul University</a> '
u'to support the employment of a Digital Youth Mentor.</p>',
u'',
u'',
),
Grant(
u'new-youth-city',
u'New Youth City Learning Network',
u'United States',
u'Hackasaurus',
u'learning-webmaking',
u'$25,000',
2011,
u'<p>This grant to the <a href="http://dmlcentral.net/projects/3658">'
u'New Youth City Learning Network</a> at the Social Science Research Centre '
u'supported the development of Hackasaurus. Hackasaurus is a set of tools that '
u'are under development to help teenagers closely review, remix and redesign '
u'the Web. Hackasaurus was prototyped with youth over the course of several '
u'workshops and jam days in New York and Chicago.</p>',
u'',
u'',
),
Grant(
u'henrik-moltke',
u'Henrik Moltke',
u'Germany',
u'Hyperaudio',
u'free-culture-community',
u'$10,000',
2011,
u'<p>This grant supported the development of a compelling concept and implementation '
u'plan for the <a href="http://www.hyperaudio.org/">Hyperaudio</a> project.</p>',
u'',
u'',
),
Grant(
u'bay-area-video-coalition',
u'Bay Area Video Coalition',
u'United States',
u'Zero Divide/Mozilla Youth Media Project',
u'open-source-technology',
u'$88,500',
2012,
u'<p>The <a href="http://www.bavc.org/">Bay Area Video Coalition (BAVC)</a> '
u'was an implementation partner in the Mozilla Foundation/Zero Divide youth '
u'media project in 2011. They worked together to test software prototypes for '
u'Butter, a user interface for WebMadeMovies; to instruct and lead youth '
u'participants to create 3-4 web-native productions with these tools; and to '
u'create a modular, openly-licensed curriculum to make it easier for people to '
u'create HTML5/open video projects of their own.</p><p>In 2012, Mozilla provided '
u'a grant to BAVC to support the <a href="http://bavc.org/creative_code">'
u'Open Source track at BAVC’s Digital Pathways</a>, as part of a broader partnership '
u'between BAVC and Mozilla to encourage next-generation integrated '
u'learning and career skills.</p>',
{
u'2011': ['Amount: $73,500'],
u'2012': ['Amount: $15,000']
},
u'',
),
Grant(
u'universal-subtitles',
u'Universal Subtitles',
u'United States',
u'Universal Subtitles',
u'free-culture-community',
u'$100,000',
2011,
u'<p>In 2011, Mozilla provided a grant to support the development of '
u'<a href="http://www.universalsubtitles.org">Universal Subtitles</a> '
u'(now known as Amara). Amara gives individuals, communities, and larger '
u'organizations the power to overcome accessibility and language barriers '
u'for online video. The tools are free and open source and make the work of '
u'subtitling and translating video simpler, more appealing, and, most of all, '
u'more collaborative.</p>',
u'',
u'',
),
Grant(
u'adaptive-technology-resource-centre',
u'Adaptive Technology Resource Centre',
u'Canada',
u'Adaptive Technology Resource Centre',
u'open-source-technology',
u'$10,000',
2006,
u'<p>This grant was made to the Adaptive Technology Resource Centre at '
u'the University of Toronto (now the <a href="http://idrc.ocad.ca/">'
u'Inclusive Design Research Centre</a> at the Ontario College of Art and Design). '
u'It enabled the development of an accessible Thunderbird user interface as well as '
u'its adoption through evangelism, mentoring, community-building, and technical '
u'leadership, with a focus on working with the jQuery community to implement ARIA '
u'support in this popular toolkit.</p>',
u'',
u'',
),
Grant(
u'benetech',
u'Benetech',
u'United States',
u'Benetech DAISY Reader for Firefox',
u'free-culture-community',
u'$50,000',
2009,
u'<p>Mozilla provided funding over two years to <a href="http://www.benetech.org/">'
u'Benetech</a>, a corporation dedicated to leveraging technology innovation and '
u'business expertise to solve unmet social needs. This funding supports the development '
u'of an open source, browser-based DAISY reader that enables people with print '
u'disabilities to read accessible text using Firefox.</p>',
{
u'2008': ['Amount: $25,000'],
u'2009': ['Amount: $25,000']
},
u'',
),
Grant(
u'nvda',
u'NV Access',
u'Australia',
u'NVDA Screen Reader',
u'open-source-technology',
u'$135,000',
2010,
u'<p>Mozilla made grants to <a href="http://www.nvaccess.org/">NV Access</a> '
u'from 2007 to 2010 to support the development of '
u'<a href="http://www.nvda-project.org/">NonVisual Desktop Access (NVDA)</a>, '
u'a free and open source screen reader for the Microsoft Windows operating system. '
u'Providing feedback via synthetic speech and Braille, it enables blind or vision '
u'impaired people to access computers running Windows for no more '
u'cost than a sighted person.</p>',
{
u'2007': ['Initial Support: $10,000', 'Support for full time work of James Teh: $80,000'],
u'2009': ['Expanding work: $25,000'],
u'2010': ['Growing influence: $20,000']
},
[
u'http://www.nvda-project.org/blog/'
u'Mozilla_Foundation_grant_allows_for_employment_of_NVDA_full-time_developer',
u'http://www.nvda-project.org/blog/First_Work_on_Web_Access_Grant',
u'http://www.nvda-project.org/blog/NewMozillaGrantFurthersNVDA',
u'http://www.nvda-project.org/blog/NVDAPresentationAtCSUN2009'
]
),
Grant(
u'firebug-accessibility',
u'University of Illinois Urbana-Champaign & The Paciello Group ',
u'United States',
u'Firebug Accessibility',
u'open-source-technology',
u'$120,009',
2010,
u'<p>This grant provided funds to the <a href="http://illinois.edu/">'
u'University of Illinois Urbana-Champaign</a> and '
u'<a href="http://www.paciellogroup.com/">The Paciello Group</a> in 2009 '
u'and 2010 for their joint work on Firebug accessibility. The goal was to '
u'mainstream accessibility for web applications by building accessibility '
u'testing functions and associated test cases into '
u'<a href="http://getfirebug.com/">Firebug</a>, a popular tool used by many '
u'web developers.</p>',
{
u'2009': ['Phase One: $25,000', 'Phase Two: $25,000', 'Phase Three: $25,000'],
u'2010': ['Phase Four: $25,000', 'Phase Five: $20,009']
},
u'',
),
Grant(
u'vquence',
u'Vquence',
u'Australia',
u'Vquence',
u'open-source-technology',
u'$75,000',
2010,
u'<p>In the spring of 2008 Mozilla became concerned about the lack of '
u'support for deaf and blind Firefox users. Mozilla identified '
u'<a href="http://www.gingertech.net/">Dr. Silvia Pfeiffer</a> and her '
u'company Vquence as the best resource for creating a plan for open '
u'video accessibility. By providing grants in 2008, 2009 and 2010, '
u'Mozilla supported the technology that implemented Firefox video '
u'accessibility features, such as text subtitles for the hearing-impaired '
u'and audio descriptions for blind users.</p>',
{
u'2008': ['Amount: $25,000'],
u'2009': ['Amount: $25,000'],
u'2010': ['Amount: $25,000']
},
[
u'http://frankhecker.com/2009/06/30/new-mozilla-accessibility-projects/',
]
),
Grant(
u'web4all',
u'World Wide Web Consortium',
u'UK',
u'Web4All Conference',
u'free-culture-community',
u'$4,000',
2010,
u'<p>Mozilla has sponsored the <a href="http://www.w4a.info/">Web4All Conference</a> '
u'for several years, and has also sponsored several speakers to be able to attend. '
u'The Web4All Conference is an annual cross-disciplinary gathering focused on '
u'Scientific Enquiry, Research, Development and Engineering. Views bridge academia, '
u'commerce and industry, and arguments encompassing a range of beliefs across the '
u'design-accessibility spectrum are presented.</p>',
{
u'2007': ['Amount: $1,000'],
u'2008': ['Amount: $1,000'],
u'2009': ['Amount: $1,000'],
u'2010': ['Amount: $1,000'],
},
u'',
),
Grant(
u'creative-commons',
u'Creative Commons',
u'United States',
u'Creative Commons Pledge',
u'free-culture-community',
u'$300,000',
2010,
u'<p>In December 2007, Mozilla decided to participate in '
u'<a href="http://creativecommons.org/">Creative Commons</a> "5x5 Challenge." '
u'Beginning in 2008, Mozilla pledged $100,000 per year for five years to support '
u'open licensing on the web, developing hybrid organizations, and maturing the '
u'concept of the web as an ecology of shared ideas.</p>',
{
u'2008': ['Amount: $100,000'],
u'2009': ['Amount: $100,000'],
u'2010': ['Amount: $100,000'],
},
u'',
),
Grant(
u'foms',
u'Annodex Association',
u'Australia',
u'Foundations of Open Media Software Workshop',
u'free-culture-community',
u'$15,000',
2009,
u'<p>These grants provided sponsorship for the 2007, 2008 and 2009 '
u'<a href="http://www.foms-workshop.org">Foundations of Open Media Software (FOMS)</a> '
u'workshop in Hobart, Australia. The bulk of these funds were used to cover the travel '
u'expenses of key participants who otherwise would have been unable to attend. '
u'This meeting hosts important discussions on open codecs, HTML specifications, '
u'browsers and hands-on work towards specifications for video in browsers.</p>',
{
u'2007': ['Amount: $5,000'],
u'2008': ['Amount: $5,000'],
u'2009': ['Amount: $5,000']
},
u'',
),
Grant(
u'free-culture-conference',
u'Berkeley Center for Law and Technology',
u'United States',
u'Free Culture Conference',
u'free-culture-community',
u'$5,000',
2008,
u'<p>This grant provided sponsorship for the Free Culture Conference put '
u'on by the <a href="http://www.law.berkeley.edu/bclt.htm">'
u'Berkeley Center for Law and Technology</a>, held October 11 and 12, 2008 '
u'in Berkeley, California. The Free Culture Conference is a yearly touchstone '
u'event for the advancement of free cultures, where members are free to '
u'participate without artificial limits.</p>',
u'',
u'',
),
Grant(
u'fscons',
u'FFKP',
u'Sweden',
u'Free Society Conference and Nordic Summit',
u'free-culture-community',
u'$1,300',
2009,
u'<p>This grant provided sponsorship for the third '
u'<a href="https://fscons.org/2009/">Free Society Conference and '
u'Nordic Summit (FSCONS)</a> held November 13-15, 2009, in Goteborg, Sweden. '
u'FSCONS is jointly organized by Free Software Foundation Europe, '
u'Creative Commons and Wikipedia Sverige.</p>',
u'',
u'',
),
Grant(
u'free-software-foundation',
u'Free Software Foundation',
u'United States',
u'LinuxBIOS Support',
u'free-culture-community',
u'$10,000',
2007,
u'<p>In 2007, Mozilla provided $10,000 to support the LinuxBIOS-related '
u'activities of the <a href="http://www.fsf.org/">Free Software Foundation</a>. '
u'This grant went toward software development, infrastructure and communications. '
u'The Free Software Foundation ported coreboot to the alix.2c3 board, a board '
u'useful in building routers, firewalls, and wifi access points.</p>',
u'',
u'',
),
Grant(
u'gnome',
u'GNOME',
u'United States',
u'GNOME Accessibility',
u'open-source-technology',
u'$48,000',
2010,
u'<p>Mozilla offered grants in support of '
u'<a href="http://projects.gnome.org/outreach/a11y/">GNOME’s Outreach '
u'Program for Accessibility</a>. The <a href="http://www.gnome.org/">'
u'GNOME Foundation</a> sponsors the GNOME project to provide a free desktop '
u'environment for Linux systems. Mozilla and GNOME have been longtime '
u'collaborators on open source and accessibility issues.</p><p>See the '
u'<a href="reports/gnome-haeger-report/">grant final report</a> for more details.</p>',
{
u'2007': ['General Accessibility Support: $10,000'],
u'2008': ['Orca rich document browsing extension: $8,000'],
u'2009': ['GNOME Outreach Program: Accessibility: $10,000', 'CSUN Accessibility Conference: $10,000'],
u'2010': ['General Accessibility Support: $10,000']
},
[
u'https://blog.mozilla.org/blog/2010/02/04/mozilla-gnome-accessibility/',
]
),
Grant(
u'ifosslr',
u'International Free and Open Source Software Law Review (IFOSSLR)',
u'Europe',
u'IFOSSLR Launch',
u'user-sovereignty',
u'$10,000',
2009,
u'<p>This grant funded the launch of the <a href="http://www.ifosslr.org/">'
u'International Free and Open Source Software Law Review (IFOSSLR)</a>, a '
u'collaborative legal publication aiming to increase knowledge and understanding '
u'among lawyers about Free and Open Source Software issues. Topics included copyright, '
u'licence implementation, licence interpretation, software patents, open standards, '
u'case law and statutory changes.</p>',
u'',
u'',
),
Grant(
u'mozdev',
u'MozDev',
u'United States',
u'MozDev Support',
u'open-source-technology',
u'$90,000',
2008,
u'<p>Mozilla supported the <a href="http://www.mozdev.org/about.html">'
u'MozDev Community Organization</a> by providing general funds to support '
u'MozDev’s operations. MozDev is a software development community dedicated '
u'to making quality applications and extensions freely available to all computer '
u'users. Its goal is to help establish Mozilla as a viable application development '
u'platform. Since 2006, Mozilla grants have funded the majority of MozDev’s budget. '
u'This support gives back to the community that contributes so much to establishing '
u'Mozilla as a viable application development platform and the community that builds '
u'quality applications and extensions.</p>',
{
u'2006': ['Amount: $30,000'],
u'2007': ['Amount: $30,000'],
u'2008': ['Amount: $30,000']
},
u'',
),
Grant(
u'nonprofit-software-development-summit',
u'Aspiration',
u'United States',
u'Nonprofit Software Development Summit',
u'free-culture-community',
u'$5,000',
2009,
u'<p>This grant supported the <a href="http://www.aspirationtech.org/events/devsummit09">'
u'Nonprofit Software Development Summit</a>, held November 18-20, 2009 in Oakland. '
u'This was the third annual convening of people and organizations developing software '
u'tools, web applications and other technology to support nonprofits and social '
u'justice causes. <a href="http://www.aspirationtech.org/">Aspiration</a>, '
u'the conference organizer, is a non-profit organization that connects nonprofits '
u'with software solutions that help them better carry out their work.</p>',
u'',
u'',
),
Grant(
u'open-source-software-institute',
u'Open Source Software Institute',
u'United States',
u'OCSP Stapling',
u'open-source-technology',
u'$30,000',
2007,
u'<p>This grant to the <a href="http://www.oss-institute.org/">'
u'Open Source Software Institute</a>, in cooperation with the NSS '
u'development team and Mozilla developers, investigated the problem of '
u'providing OCSP stapling support for Apache and other open source '
u'SSL/TLS-enabled server software incorporating the OpenSSL library. '
u'The Open Source Software Institute (OSSI) was identified as having '
u'extensive experience with OpenSSL, and was the lead organization '
u'responsible for getting US government FIPS 140-2 validation of OpenSSL.</p>',
u'',
u'',
),
Grant(
u'open-video-alliance',
u'Open Video Alliance',
u'United States',
u'Open Video Alliance',
u'free-culture-community',
u'$30,000',
2009,
u'<p>Mozilla offered support to <a href="http://openvideoalliance.org/">'
u'Open Video Alliance</a> activities in support of the open video movement. '
u'Open Video Alliance is a coalition of organizations and individuals committed '
u'to the idea that the power of the moving image should belong to everyone. '
u'This grant funded various efforts in the open video movement, such as the '
u'operations of openvideoalliance.org, the branding of open video products, '
u'outreach to the public media, fundraising and video production.</p>',
u'',
u'',
),
Grant(
u'perl-foundation',
u'Perl Foundation',
u'United States',
u'Perl6 Support',
u'open-source-technology',
u'$10,000',
2007,
u'<p>Mozilla provided a grant to the <a href="http://www.perlfoundation.org/">'
u'Perl Foundation</a>, a non-profit dedicated to the advancement of the Perl '
u'programming language through open discussion, collaboration, design and code. '
u'This grant supported the development of Perl 6.</p>',
u'',
u'',
),
Grant(
u'personal-democracy-forum',
u'Personal Democracy Forum',
u'United States',
u'Personal Democracy Forum',
u'user-sovereignty',
u'$15,000',
2009,
u'<p>For two years Mozilla sponsored the <a href="http://personaldemocracy.com/'
u'pdf-conference/personal-democracy-forum-conference">Personal Democracy Forum</a>, '
u'a forum for discussion on how politics and technology intersect. Each year top '
u'opinion-makers, political practitioners, technologists and journalists come '
u'together to network, exchange ideas and explore how technology and the internet '
u'are changing politics, democracy and society.</p>',
{
u'2008': ['Amount: $10,000'],
u'2009': ['Amount: $5,000']
},
u'',
),
Grant(
u'software-freedom-conservancy',
u'Software Freedom Conservancy',
u'United States',
u'Software Freedom Conservancy',
u'free-culture-community',
u'$30,000',
2012,
u'<p>Mozilla provided funding to help the '
u'<a href="http://conservancy.softwarefreedom.org/">Software Freedom Conservancy</a> '
u'serve additional open source projects and work more closely with peer projects. '
u'As from 2008, Mozilla\'s funding helped the Conservancy to provide administrative, '
u'financial management, coordination and logistical services to twenty FLOSS '
u'(Free, Libre and Open Source Software) projects including Foresight Linux, '
u'Sugar Labs, jQuery, Amarok, Darcs, OpenInkpot, and K-3D.</p>',
{
u'2008': ['Amount: $10,000'],
u'2009': ['Amount: $10,000'],
u'2012': ['Amount: $10,000']
},
u'',
),
Grant(
u'seneca',
u'Seneca College',
u'Canada',
u'Seneca College',
u'learning-webmaking',
u'$327,860',
2011,
u'<p>Since 2005, <a href="http://www.senecac.on.ca/">Seneca College</a> '
u'in Toronto has worked closely with the Mozilla community to create a set '
u'of Mozilla-specific courses, engage hundreds of students directly in Mozilla '
u'development projects, and host and record dozens of Mozilla events and talks. '
u'Seneca’s faculty and students are key contributors to the Mozilla project, '
u'and have gained significant experience bootstrapping new contributors into the '
u'Mozilla technology and culture. Seneca College of Applied Arts and Technology is a '
u'community college for applied arts and technology in Toronto, Ontario. </p>',
{
u'2006': ['Amount: $50,000'],
u'2007': ['Amount: $100,000'],
u'2009': ['Amount: $80,910'],
u'2011': ['Amount: $96,950']
},
u'',
),
Grant(
u'leigh-school',
u'Leigh School',
u'New Zealand',
u'Leigh School',
u'learning-webmaking',
u'$2,500',
2009,
u'<p>This grant is supporting ICT components for courses and the purchase of '
u'equipment and software to support the ICT components of courses at '
u'<a href="http://www.leigh.school.nz/">Leigh School</a>, a primary school in '
u'New Zealand dedicated to a broad curriculum that includes computers and technology.</p>',
u'',
u'',
),
Grant(
u'peer2peer-university',
u'Phillip Schmidt (P2PU)',
u'United States',
u'Peer2Peer University',
u'learning-webmaking',
u'$25,500',
2011,
u'<p>Mozilla issued a grant to Phillip Schmidt in 2009 '
u'(<a href="http://www.p2pu.org/">P2PU</a>) to enable the creation of '
u'an online course called <a href="https://wiki.mozilla.org/Education/EduCourse">'
u'Open|Web|Content|Education</a>, where educators learned about open content licensing, '
u'open web technologies and open teaching methods. In 2011, Mozilla provided a '
u'grant to P2PU to support <a href="https://p2pu.org/en/schools/school-of-webcraft/sets/'
u'webmaking-101/">Webmaking 101</a> and the <a href="https://p2pu.org/en/groups/schools/'
u'school-of-webcraft/">School of Webcraft</a> community coordination.</p><p>P2PU combines '
u'open educational resources, structured courses, and recognition of knowledge and '
u'learning to offer high-quality low-cost education opportunities. It is run and '
u'governed by volunteers.</p>',
{
u'2009': ['Open|Web|Content|Education: $2,500'],
u'2011': ['Webmaking 101 - Project Management & School of Webcraft - Community Coordination: $23,000']
},
u'',
),
Grant(
u'ushaidi-chile',
u'Ushahidi',
u'United States and Chile',
u'Ushahidi Chile',
u'free-culture-community',
u'$10,000',
2010,
u'<p>In a crisis environment, maintaining lines of communication is critically important. '
u'<a href="http://www.ushahidi.com/">Ushahidi</a> developed an open source platform that '
u'enables citizen reporting in crisis situations. A deadly earthquake struck Chile on '
u'February 27, 2010, cutting off many vulnerable people from traditional sources of '
u'information. Mozilla awarded a grant to enable Ushahidi volunteers to train Chilean '
u'civilians and government officials to utilize the Ushahidi platform during the relief '
u'effort.</p><p>See the <a href="reports/ushahidi-chile-report/">final grant report</a> '
u'for more details.</p>',
u'',
[
u'http://blog.ushahidi.com/index.php/2010/03/15/mozilla-foundation-supports-ushahidi-chile/',
]
),
Grant(
u'atlan',
u'Atlan Laboratories',
u'United States',
u'FIPS 140-2 Validation',
u'open-source-technology',
u'$25,000',
2008,
u'<p>This grant to Atlan Labs, along with funding from Red Hat and Sun Microsystems, '
u'supported FIPS 140-2 validation for the latest version of Network Security Services '
u'(NSS). Federal Information Processing Standards Publications (FIPS PUBS) '
u'140-1 and 140-2 are US government standards for implementations of cryptographic '
u'modules - that is, hardware or software that encrypts and decrypts data or '
u'performs other cryptographic operations. Atlan Labs was a a cybersecurity '
u'product testing firm based in McLean, Virginia that provided Federal Information '
u'Processing Standard (FIPS) 140-2 and 201 validations. Atlan was acquired by '
u'<a href="http://www.saic.com/infosec/testing-accreditation/">SAIC</a> in July 2009.</p>',
u'',
u'',
),
Grant(
u'automated-calendar-testing',
u'Merike Sell',
u'Estonia',
u'Calendar Automated Testing',
u'open-source-technology',
u'$4,500',
2009,
u'<p>This grant is funding the development of calendar automated testing for the '
u'Mozilla calendar code. This was originally an idea presented at the 2009 '
u'Google Summer of Code, and Mozilla Calendar developers became interested in '
u'funding technology that would enable automated testing. Merike Sell is an active '
u'member of the Mozilla developer and localization communites who live in Estonia.</p>',
u'',
u'',
),
Grant(
u'w3c-validator',
u'World Wide Web Consortium',
u'International',
u'W3C Validator',
u'open-source-technology',
u'$15,000',
2009,
u'<p>The Mozilla Foundation is a member of the <a href="http://www.w3.org/">'
u'World Wide Web Consortium</a>, and various Mozilla people represent Mozilla in '
u'W3C working groups and other W3C contexts. This grant was issued beyond Mozilla’s '
u'existing W3C membership dues, and funded work on '
u'<a href="http://jigsaw.w3.org/css-validator/">W3C CSS Validator</a> by giving to '
u'ERCIM, the W3C’s donation program.</p>',
u'',
u'',
),
Grant(
u'jambu',
u'Jambu',
u'United States',
u'Jambu',
u'open-source-technology',
u'$25,000',
2007,
u'<p><a href="www.oatsoft.org/Software/jambu">Jambu</a> is a pointer and switch '
u'project that improves accessibility for people with physical disabilities. '
u'This grant supported the improvement of switch access to Firefox on Windows, '
u'with the greater goal of providing transparent alternative input access to computers. '
u'Users served by this project may include adults who have experienced a debilitating '
u'accident or stroke, people with congential physical disabilities, children with '
u'multiple disabilities, and those with learning difficulties or limited education '
u'who often need to learn to use a switch through specialist educational programs.</p>',
{
u'2006': ['Phase 1: $15,000'],
u'2007': ['Phase 2: $10,000'],
},
u'',
),
Grant(
u'nu',
u'Northeastern University',
u'United States',
u'Graduate-level work of PhD students at Northeastern University',
u'open-source-technology',
u'$283,085',
2010,
u'<p>Since 2009 Mozilla has supported the graduate-level work of PhD students at '
u'<a href="http://www.ccs.neu.edu/">Northeastern University</a>, developing new tools '
u'for the standardization, streamlining, and testing of JavaScript. In 2009 Mozilla '
u'contributed $99,115 to the research efforts of '
u'<a href="http://www.ccs.neu.edu/home/samth/">Sam Tobin-Hochstadt</a>. In 2010 '
u'Mozilla made two gifts: one of $107,596 to further support Mr. Tobin-Hochstadt’s '
u'research and another gift of $76,374 to <a href="http://www.ccs.neu.edu/home/dimvar/">'
u'Demetrios Vardoulakis</a>.</p>',
{
u'2009': ['PhD Research of Sam Tobin-Hochstadt: $99,115'],
u'2010': ['PhD research of Sam Tobin-Hochstadt and Demetrios Vardoulakis: $107,596 and $76,374']
},
u'',
),
Grant(
u'owasp',
u'OWASP',
u'United States',
u'The Open Web Application Security Project',
u'open-source-technology',
u'$15,000',
2010,
u'<p>This grant supports the <a href="http://www.owasp.org/index.php/Main_Page">'
u'Open Web Application Security Project</a>, which focuses on improving the security '
u'of application software. OWASP\'s mission is to make application security visible, '
u'so that people and organizations can make informed decisions about true '
u'application security risks.</p>',
u'',
u'',
),
Grant(
u'webaim',
u'WebAIM',
u'United States',
u'WebAIM',
u'open-source-technology',
u'$15,000',
2006,
u'<p>In 2006, Mozilla provided a grant to <a href="http://webaim.org/">WebAIM</a>, '
u'an accessibility organization based at Utah State University, to develop XUL '
u'accessibility guidelines and an accompanying evaluation tool. WebAIM has provided '
u'comprehensive web accessibility solutions since 1999. These years of experience '
u'have made WebAIM one of the leading providers of web accessibility expertise '
u'internationally. WebAIM is a non-profit organization within the Center for '
u'Persons with Disabilities at Utah State University.</p>',
u'',
u'',
),
]
|
glogiotatidis/bedrock
|
bedrock/grants/grants_db.py
|
Python
|
mpl-2.0
| 39,061
|
[
"ORCA"
] |
4f23c622e5ec5c7b8a7afa3176da1c16385a946b13ee18b2778273d72762d0a8
|
#!/usr/bin/env python
from optparse import OptionParser, OptionGroup
import re
import tempfile
from bs_align import output
from bs_align.bs_pair_end import *
from bs_align.bs_single_end import *
from bs_align.bs_rrbs import *
import os
#import re
#from bs_utils.utils import *
if __name__ == '__main__':
parser = OptionParser(usage="Usage: %prog {-i <single> | -1 <mate1> -2 <mate2>} -g <genome.fa> [options]")
# option group 1
opt_group = OptionGroup(parser, "For single end reads")
opt_group.add_option("-i", "--input", type="string", dest="infilename",help="Input read file (FORMAT: sequences, qseq, fasta, fastq). Ex: read.fa or read.fa.gz", metavar="INFILE")
parser.add_option_group(opt_group)
# option group 2
opt_group = OptionGroup(parser, "For pair end reads")
opt_group.add_option("-1", "--input_1", type="string", dest="infilename_1",help="Input read file, mate 1 (FORMAT: sequences, qseq, fasta, fastq)", metavar="FILE")
opt_group.add_option("-2", "--input_2", type="string", dest="infilename_2",help="Input read file, mate 2 (FORMAT: sequences, qseq, fasta, fastq)", metavar="FILE")
opt_group.add_option("-I", "--minins",type = "int",dest = "min_insert_size", help="The minimum insert size for valid paired-end alignments [Default: %default]", default = 0)
opt_group.add_option("-X", "--maxins",type = "int",dest = "max_insert_size", help="The maximum insert size for valid paired-end alignments [Default: %default]", default = 500)
parser.add_option_group(opt_group)
# option group 3
opt_group = OptionGroup(parser, "Reduced Representation Bisulfite Sequencing Options")
opt_group.add_option("-r", "--rrbs", action="store_true", dest="rrbs", default = False, help = 'Map reads to the Reduced Representation genome')
opt_group.add_option("-c", "--cut-site", type="string",dest="cut_format", help="Cutting sites of restriction enzyme. Ex: MspI(C-CGG), Mael:(C-TAG), double-enzyme MspI&Mael:(C-CGG,C-TAG). [Default: %default]", metavar="pattern", default = "C-CGG")
opt_group.add_option("-L", "--low", type = "int", dest="rrbs_low_bound", help="Lower bound of fragment length (excluding C-CGG ends) [Default: %default]", default = 20)
opt_group.add_option("-U", "--up", type = "int", dest="rrbs_up_bound", help="Upper bound of fragment length (excluding C-CGG ends) [Default: %default]", default = 500)
parser.add_option_group(opt_group)
# option group 4
opt_group = OptionGroup(parser, "General options")
opt_group.add_option("-t", "--tag", type="string", dest="taginfo",help="[Y]es for undirectional lib, [N]o for directional [Default: %default]", metavar="TAG", default = 'N')
opt_group.add_option("-s","--start_base",type = "int",dest = "cutnumber1", help="The first cycle of the read to be mapped [Default: %default]", default = 1)
opt_group.add_option("-e","--end_base",type = "int",dest = "cutnumber2", help="The last cycle of the read to be mapped [Default: %default]", default = 200)
opt_group.add_option("-a", "--adapter", type="string", dest="adapter_file",help="Input text file of your adaptor sequences (to be trimmed from the 3'end of the reads, ). "
"Input one seq for dir. lib., twon seqs for undir. lib. One line per sequence. "
"Only the first 10bp will be used", metavar="FILE", default = '')
opt_group.add_option("--am",type = "int",dest = "adapter_mismatch", help="Number of mismatches allowed in adapter [Default: %default]", default = 0)
opt_group.add_option("-g", "--genome", type="string", dest="genome",help="Name of the reference genome (should be the same as \"-f\" in bs_seeker2-build.py ) [ex. chr21_hg18.fa]")
opt_group.add_option("-m", "--mismatches",type = "float", dest="no_mismatches",help="Number(>=1)/Percentage([0, 1)) of mismatches in one read. Ex: 4 (allow 4 mismatches) or 0.04 (allow 4% mismatches) [Default: %default]", default = 4)
opt_group.add_option("--aligner", dest="aligner",help="Aligner program for short reads mapping: " + ', '.join(supported_aligners) + " [Default: %default]", metavar="ALIGNER", default = BOWTIE)
opt_group.add_option("-p", "--path", dest="aligner_path", help="Path to the aligner program. Detected: " +' '*70+ '\t'.join(('%s: %s '+' '*70) % (al, aligner_path[al]) for al in sorted(supported_aligners)),
metavar="PATH"
)
opt_group.add_option("-d", "--db", type="string", dest="dbpath",help="Path to the reference genome library (generated in preprocessing genome) [Default: %default]" , metavar="DBPATH", default = reference_genome_path)
opt_group.add_option("-l", "--split_line",type = "int", dest="no_split",help="Number of lines per split (the read file will be split into small files for mapping. The result will be merged. [Default: %default]", default = 4000000, metavar="INT")
opt_group.add_option("-o", "--output", type="string", dest="outfilename",help="The name of output file [INFILE.bs(se|pe|rrbs)]", metavar="OUTFILE")
opt_group.add_option("-f", "--output-format", type="string", dest="output_format",help="Output format: "+', '.join(output.formats)+" [Default: %default]", metavar="FORMAT", default = output.BAM)
opt_group.add_option("--no-header", action="store_true", dest="no_SAM_header",help="Suppress SAM header lines [Default: %default]", default = False)
try:
opt_group.add_option("--temp_dir", type="string", dest="temp_dir",help="The path to your temporary directory [Detected: %default]", metavar="PATH", default = os.environ["TMPDIR"])
except:
opt_group.add_option("--temp_dir", type="string", dest="temp_dir",help="The path to your temporary directory [Detected: %default]", metavar="PATH", default = tempfile.gettempdir())
opt_group.add_option("--XS",type = "string", dest="XS_filter",help="Filter definition for tag XS, format X,Y. X=0.8 and y=5 indicate that for one read, if #(mCH sites)/#(all CH sites)>0.8 and #(mCH sites)>5, then tag XS=1; or else tag XS=0. [Default: %default]", default = "0.5,5") # added by weilong
opt_group.add_option("-M", "--multiple-hit", metavar="FileName", type="string", dest="Output_multiple_hit", default = None, help = 'File to store reads with multiple-hits')
opt_group.add_option("-u", "--unmapped", metavar="FileName", type="string", dest="Output_unmapped_hit", default = None, help = 'File to store unmapped reads')
opt_group.add_option("-v", "--version", action="store_true", dest="version",help="show version of BS-Seeker2", metavar="version", default = False)
parser.add_option_group(opt_group)
# option group 5
opt_group = OptionGroup(parser, "Aligner Options",
"You may specify any additional options for the aligner. You just have to prefix them with " +
', '.join('%s for %s' % (aligner_options_prefixes[aligner], aligner) for aligner in supported_aligners)+
', and BS-Seeker2 will pass them on. For example: --bt-p 4 will increase the number of threads for bowtie to 4, '
'--bt--tryhard will instruct bowtie to try as hard as possible to find valid alignments when they exist, and so on. ')
parser.add_option_group(opt_group)
#----------------------------------------------------------------
# separate aligner options from BS Seeker options
aligner_options = {}
bs_seeker_options = []
i = 1
while i < len(sys.argv):
arg = sys.argv[i]
m = re.match(r'^%s' % '|'.join('(%s)'% aligner_options_prefixes[al] for al in supported_aligners), arg)
if m:
a_opt = arg.replace(m.group(0),'-',1)
aligner_options[a_opt] = []
while i + 1 < len(sys.argv) and sys.argv[i+1][0] != '-':
aligner_options[a_opt].append(sys.argv[i+1])
i += 1
if len(aligner_options[a_opt]) == 0: # if it is a key-only option
aligner_options[a_opt] = True
else:
bs_seeker_options.append(arg)
i += 1
(options, args) = parser.parse_args(args = bs_seeker_options)
# if no options were given by the user, print help and exit
if len(sys.argv) == 1:
parser.print_help()
exit(0)
if options.version :
show_version()
exit (-1)
else :
show_version()
# check parameters
# input read files
if options.infilename and (options.infilename_1 or options.infilename_2):
error('-i and [-1|-2] options are exclusive. You should use only one of them.')
if not (options.infilename or (options.infilename_1 and options.infilename_2)):
error('You should set either -i or -1 and -2 options.')
# Calculate the length of read
if options.infilename :
read_file = options.infilename
elif options.infilename_1 :
read_file = options.infilename_1
else :
error('You should at least specify -i or -1 options.')
try :
if read_file.endswith(".gz") : # support input file ending with ".gz"
read_inf = gzip.open(read_file, "rb")
else :
read_inf=open(read_file,"r")
except IOError :
print "[Error] Cannot open input file : %s" % read_file
exit(-1)
oneline = read_inf.readline()
oneline = read_inf.readline() # get the second line
read_len = min(len(oneline), (options.cutnumber2-options.cutnumber1))
read_inf.close()
# mismatch allowed: bowtie 1,build-in parameter '-m'; bowtie 2, post-filter paramter
# mismatch should no greater than the read length
no_mismatches = float(options.no_mismatches)
if (no_mismatches < 1) :
int_no_mismatches=int(no_mismatches * read_len)
else :
int_no_mismatches=int(no_mismatches)
str_no_mismatches=str(options.no_mismatches) # pass to specific mode
# -t, directional / un-directional library
asktag=str(options.taginfo).upper()
if asktag not in 'YN':
error('-t option should be either Y or N, not %s' % asktag)
# -a
if options.aligner not in supported_aligners:
error('-a option should be: %s' % ' ,'.join(supported_aligners)+'.')
# path for aligner
aligner_exec = os.path.expanduser( os.path.join(options.aligner_path or aligner_path[options.aligner], options.aligner) )
# -g
if options.genome is None:
error('-g is a required option')
genome = os.path.split(options.genome)[1]
genome_subdir = genome
# try to guess the location of the reference genome for RRBS
if options.rrbs:
if options.rrbs_low_bound and options.rrbs_up_bound:
if options.cut_format == "C-CGG" :
genome_subdir += '_rrbs_%d_%d' % (options.rrbs_low_bound, options.rrbs_up_bound)
else :
genome_subdir += '_rrbs_%s_%d_%d' % ( re.sub(",","-",re.sub("-", "", options.cut_format)), options.rrbs_low_bound, options.rrbs_up_bound)
else:
possible_refs = filter(lambda dir: dir.startswith(genome+'_rrbs_'), os.listdir(options.dbpath))
if len(possible_refs) == 1:
genome_subdir = possible_refs[0]
else:
error('Cannot localize unambiguously the reference genome for RRBS. '
'Please, specify the options \"--low\" and \"--up\" that you used at the index-building step.\n'
'Possible choices are:\n' + '\n'.join([pr.split('_rrbs_')[-1].replace('_',', ') for pr in possible_refs]))
db_path = os.path.expanduser(os.path.join(options.dbpath, genome_subdir + '_' + options.aligner))
if not os.path.isdir(db_path):
error('Index DIR \"' + genome_subdir + '..\" cannot be found in ' + options.dbpath +'.\n\tPlease run the bs_seeker2-build.py '
'to create it with the correct parameters for -g, -r, --low, --up and --aligner.')
# default aligner options
aligner_options_defaults = {
BOWTIE : { '-e' : 40*int_no_mismatches,
'--nomaqround' : True,
'--norc' : True,
#'-k' : 2,
# -k=2; report two best hits, and filter by error rates
'--quiet' : True,
'--best' : True,
# '--suppress' : '2,5,6',
'--sam' : True,
'--sam-nohead' : True,
'-p' : 2
},
BOWTIE2 : {
#'-M' : 5,
'--norc' : True,
'--quiet' : True,
'-p' : 2,
'--sam-nohead' : True,
# run bowtie2 in local mode by default
'--local' : '--end-to-end' not in aligner_options,
#'--mm' : True,
#'-k' : 2
},
SOAP : { '-v' : int_no_mismatches,
'-p' : 2,
'-r' : 2,
'-M' : 4
},
RMAP : { '-M' : 2
# to do # control for only mapping on + strand
}
}
if '--end-to-end' not in aligner_options:
aligner_options_defaults[BOWTIE2].update({'-D' : 50})
#aligner_options_defaults[BOWTIE2].update({'-D' : 50, '-R': 3, '-N': 0, '-L': 15, '-i' : 'S,1,0.50'})
else:
aligner_options_defaults[BOWTIE2].update({'-D' : 50, '-L': 15, '--score-min': 'L,-0.6,-0.6' })
aligner_options = dict(aligner_options_defaults[options.aligner], **aligner_options)
aligner_options_string = lambda : ' %s ' % (' '.join(opt_key +
(' ' + ' '.join(map(str,opt_val)) # join all values if the value is an array
if type(opt_val) is list else
('' if type(opt_val) is bool and opt_val # output an empty string if it is a key-only option
else ' ' +str(opt_val)) # output the value if it is a single value
)
for opt_key, opt_val in aligner_options.iteritems() if opt_val not in [None, False]))
# tmp_path = (options.outfilename or options.infilename or options.infilename_1) +'-'+ options.aligner+ '-TMP'
# clear_dir(tmp_path)
options.output_format = options.output_format.lower()
if options.output_format not in output.formats:
error('Output format should be one of: ' + ', '.join(output.formats))
if options.outfilename:
outfilename = options.outfilename
logfilename = outfilename
elif options.infilename is not None:
logfilename = options.infilename+'_'+ ('rr' if options.rrbs else '') + 'bsse'
outfilename = logfilename + '.' + options.output_format
else:
logfilename = options.infilename_1+'_'+ ('rr' if options.rrbs else '') + 'bspe'
outfilename = logfilename + '.' + options.output_format
outfilename = os.path.expanduser(outfilename)
logfilename = os.path.expanduser(logfilename)
outfile = output.outfile(outfilename, options.output_format, deserialize(os.path.join(db_path, 'refname')), ' '.join(sys.argv), options.no_SAM_header)
open_log(logfilename+'.bs_seeker2_log')
aligner_title = options.aligner
if options.aligner == BOWTIE2 :
if '--end-to-end' in aligner_options :
aligner_title = aligner_title + "-e2e"
else:
aligner_title = aligner_title + "-local"
if options.aligner == BOWTIE :
logm("Mode: Bowtie")
elif options.aligner == BOWTIE2 :
if '--end-to-end' not in aligner_options :
logm("Mode: Bowtie2, local alignment")
else :
logm("Mode: Bowtie2, end-to-end alignment")
tmp_path = tempfile.mkdtemp(prefix='bs_seeker2_%s_-%s-TMP-' % (os.path.split(outfilename)[1], aligner_title ), dir = options.temp_dir)
(XS_x, XS_y) = options.XS_filter.split(",")
XS_pct = float(XS_x)
XS_count = int(XS_y)
logm('Filter for tag XS: #(mCH)/#(all CH)>%.2f%% and #(mCH)>%d' % (XS_pct*100, XS_count))
logm('Temporary directory: %s' % tmp_path)
logm('Reduced Representation Bisulfite Sequencing: %s' % str(options.rrbs))
if options.infilename is not None:
logm('Single end')
aligner_command = aligner_exec + aligner_options_string() + \
{ BOWTIE : ' -k 2 %(reference_genome)s -f %(input_file)s %(output_file)s',
BOWTIE2 : ' -k 2 -x %(reference_genome)s -f -U %(input_file)s -S %(output_file)s',
SOAP : ' -D %(reference_genome)s.fa.index -o %(output_file)s -a %(input_file)s',
RMAP : ' -c %(reference_genome)s.fa -o %(output_file)s %(input_file)s'
}[options.aligner]
logm ('Aligner command: %s' % aligner_command)
# single end reads
if options.rrbs: # RRBS scan
bs_rrbs(options.infilename,
asktag,
options.adapter_file,
int(options.cutnumber1),
int(options.cutnumber2),
options.no_split,
str_no_mismatches,
aligner_command,
db_path,
tmp_path,
outfile,
XS_pct,
XS_count,
options.adapter_mismatch,
options.Output_multiple_hit,
options.Output_unmapped_hit,
options.cut_format
)
else: # Normal single end scan
bs_single_end( options.infilename,
asktag,
options.adapter_file,
int(options.cutnumber1),
int(options.cutnumber2),
options.no_split,
str_no_mismatches,
aligner_command,
db_path,
tmp_path,
outfile,
XS_pct,
XS_count,
options.adapter_mismatch,
options.Output_multiple_hit,
options.Output_unmapped_hit
)
else:
logm('Pair end')
# pair end specific default options
aligner_options = dict({BOWTIE: {'--fr' : True,
'-X' : options.max_insert_size,
'-I' : options.min_insert_size if options.min_insert_size > 0 else None,
'-a' : True # "-k 2" in bowtie would not report the best two
},
BOWTIE2 : {
'--fr' : True,
'-X' : options.max_insert_size,
'-I' : options.min_insert_size if options.min_insert_size > 0 else None,
'--no-discordant' : True,
'--no-mixed' : True,
'-k' : 2
},
SOAP: {
'-x' : options.max_insert_size,
'-m' : options.min_insert_size if options.min_insert_size > 0 else 100
}}[options.aligner],
# integrating 'rmappe' is different from others
**aligner_options)
aligner_command = aligner_exec + aligner_options_string() + \
{ BOWTIE : ' %(reference_genome)s -f -1 %(input_file_1)s -2 %(input_file_2)s %(output_file)s',
BOWTIE2 : ' -x %(reference_genome)s -f -1 %(input_file_1)s -2 %(input_file_2)s -S %(output_file)s',
SOAP : ' -D %(reference_genome)s.fa.index -o %(output_file)s -a %(input_file_1)s -b %(input_file_2)s -2 %(output_file)s.unpaired' #,
# RMAP : # rmappe, also paste two inputs into one file.
}[options.aligner]
logm('Aligner command: %s' % aligner_command)
if '--end-to-end' not in aligner_options:
aligner_options_defaults[BOWTIE2].update({'-D' : 50})
else:
aligner_options_defaults[BOWTIE2].update({'-D' : 50, '-L': 15, '--score-min': 'L,-0.6,-0.6' })
bs_pair_end(options.infilename_1,
options.infilename_2,
asktag,
options.adapter_file,
int(options.cutnumber1),
int(options.cutnumber2),
options.no_split,
str_no_mismatches,
aligner_command,
db_path,
tmp_path,
outfile,
XS_pct,
XS_count,
options.adapter_mismatch,
options.Output_multiple_hit,
options.Output_unmapped_hit
)
outfile.close()
|
BioInfoTools/BSVF
|
bin/BSseeker2/bs_seeker2-align.py
|
Python
|
lgpl-3.0
| 22,609
|
[
"Bowtie"
] |
b63351d00892fd69582836c51a21330f1155f069b5dde1a17ceef3475952bdbf
|
# -*- coding: utf-8 -*-
# Copyright 2012 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import time
class CookiesTest:
def test_create_and_access_a_cookie(self):
"""Should be able to create and access a cookie"""
browser = self.get_new_browser()
browser.visit(self.EXAMPLE_APP)
browser.cookies.add({"sha": "zam"})
assert "zam" == browser.cookies["sha"]
browser.quit()
def test_create_many_cookies_at_once_as_dict(self):
"""Should be able to create many cookies at once as dict"""
browser = self.get_new_browser()
browser.visit(self.EXAMPLE_APP)
cookies = {"sha": "zam", "foo": "bar"}
browser.cookies.add(cookies)
assert "zam" == browser.cookies["sha"]
assert "bar" == browser.cookies["foo"]
browser.quit()
def test_create_some_cookies_and_delete_them_all(self):
"""Should be able to delete all cookies"""
browser = self.get_new_browser()
browser.visit(self.EXAMPLE_APP)
browser.cookies.add({"whatever": "and ever"})
browser.cookies.add({"anothercookie": "im bored"})
browser.cookies.delete()
assert {} == browser.cookies
browser.quit()
def test_create_and_delete_a_cookie(self):
"""Should be able to create and destroy a cookie"""
browser = self.get_new_browser()
browser.visit(self.EXAMPLE_APP)
browser.cookies.delete()
browser.cookies.add({"cookie": "with milk"})
browser.cookies.delete("cookie")
assert {} == browser.cookies
browser.quit()
def test_create_and_delete_many_cookies(self):
"""Should be able to create and destroy many cookies"""
browser = self.get_new_browser()
browser.visit(self.EXAMPLE_APP)
browser.cookies.delete()
browser.cookies.add({"acookie": "cooked"})
browser.cookies.add({"anothercookie": "uncooked"})
browser.cookies.add({"notacookie": "halfcooked"})
browser.cookies.delete("acookie", "notacookie")
assert "uncooked" == browser.cookies["anothercookie"]
browser.quit()
def test_try_to_destroy_an_absent_cookie_and_nothing_happens(self):
browser = self.get_new_browser()
browser.visit(self.EXAMPLE_APP)
browser.cookies.delete()
browser.cookies.add({"foo": "bar"})
browser.cookies.delete("mwahahahaha")
{"foo": "bar"} == browser.cookies
browser.quit()
def test_create_and_get_all_cookies(self):
"""Should be able to create some cookies and retrieve them all"""
browser = self.get_new_browser()
browser.visit(self.EXAMPLE_APP)
browser.cookies.delete()
browser.cookies.add({"taco": "shrimp"})
browser.cookies.add({"lavar": "burton"})
assert 2 == len(browser.cookies.all())
browser.cookies.delete()
assert {} == browser.cookies.all()
browser.quit()
def test_create_and_use_contains(self):
"""Should be able to create many cookies at once as dict"""
browser = self.get_new_browser()
browser.visit(self.EXAMPLE_APP)
cookies = {"sha": "zam"}
browser.cookies.add(cookies)
assert "sha" in browser.cookies
assert "foo" not in browser.cookies
browser.quit()
def test_cookies_extra_parameters(self):
"""Cookie can be created with extra parameters."""
timestamp = int(time.time() + 120)
self.browser.cookies.add({'sha': 'zam'}, expiry=timestamp)
cookie = self.browser.driver.get_cookie('sha')
assert timestamp == cookie["expiry"]
|
cobrateam/splinter
|
tests/cookies.py
|
Python
|
bsd-3-clause
| 3,771
|
[
"VisIt"
] |
258664378e01a1b346e03c721e75a37761b3592a8642e60108dc4b2098e9817d
|
# -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------------
# sphinxext.py - Kaa specific extensions for Sphinx
# -----------------------------------------------------------------------------
# Copyright 2009-2012 Dirk Meyer, Jason Tackaberry
#
# Please see the file AUTHORS for a complete list of authors.
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version
# 2.1 as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
#
# -----------------------------------------------------------------------------
"""
Defines the following new directives:
.. kaaclass:: kaa.SomeClassName
Top-most directive for all other custom kaa directives. There are no
options.
A synopsis is automatically included, which provides (in this order)
- The class hierarchy
- Any class attributes explicitly provided via the classattrs
directive.
- Methods via the automethod directive
- Properties via the autoproperties directive
- Signals via the autosignals directive
Any of following directives can be nested inside a kaaclass directive.
Arguments following the directive are ignored (the class name is gotten
from the outside kaaclass directive):
.. classattrs::
.. attribute:: SOME_CONSTANT
Description of class variable SOME_CONSTANT.
.. attribute:: [...]
Any number of attribute directives may be nested under a classattrs
directive. They will all be included in the Class Attributes
synopsis table in the order specified here.
.. automethods::
Automatically insert all methods defined in the class specified in the
outer kaaclass directive. Additional methods may be defined like so:
.. method:: additional_method(arg1, arg2)
A brief, one-line description of additional_method()
:param arg1: don't forget to document any arguments.
Takes the following options:
:inherit:
Includes all members from parent classes.
:add: meth1[, meth2[, meth3[, ...]]]
Includes the methods specified from parent classes.
:remove: meth1[, meth2[, meth3[, ...]]]
Prevents the specified methods from appearing where they would
normally be auto-included.
:order: meth1[, meth2[, meth3[, ...]]]
Overrides the order for which the methods are listed. Not all
methods need to be specified here: methods that are specified
will be listed first and in the given order. All other methods
will follow in the canonical order.
.. autoproperties::
Automatically insert all properties defined in the class specified in the
outer kaaclass directive. Additional properties (or attributes that
aren't necessarily implemented as properties) may be defined like so:
.. attribute:: some_other_prop
A brief, one-line description of some_other_prop.
More detailed description if desired.
Options are the same as the automethods directive.
.. autosignals::
Automatically insert all signals defined in the class specified in the
outer kaaclass directive. Additional signals maybe defined like so:
.. attribute:: signals.some_other_signal
A brief, one-line description of some_other_signal.
.. describe:: def callback(arg1, arg2, ...)
:param arg1: don't forget to document callback arguments.
A more detailed description of signal, if desired.
Note that the signals name following the attribute directive is
prefixed with 'signals.' This is important. The 'signals.' part
is stripped for display purposes.
Options are the same as the automethods directive.
Example usage:
.. kaaclass:: kaa.SomeClass
.. classattrs::
.. attribute:: SOME_CONST
Definition of SOME_COST.
Can of course contain :attr:`references`.
.. attribute:: some_other_class_variable
.. automethods::
:add: superclass_method_foo
:remove: deprecated_method
:order: superclass_method_foo, read, write, custom_method, close
.. method:: custom_method(arg1, arg2)
Short description of custom_method.
:param arg1: and of course the argument descriptions.
:param arg2: same here.
Additional info about custom_method which won't show up in the
synopsis table.
.. autoproperties::
:inherit:
:remove: stupid_super_class_method
.. autosignals::
"""
# Python imports
import re
import collections
# Sphinx imports
from sphinx.util.compat import make_admonition
from sphinx.ext.autodoc import prepare_docstring
import sphinx.addnodes
# Docutils imports
from docutils.parsers.rst import directives
from docutils import nodes
from docutils.statemachine import ViewList, StringList
from docutils.parsers.rst import directives
# Kaa imports
from kaa.core import Object
# Custom nodes
class synopsis(nodes.paragraph):
@staticmethod
def visit(document, node):
document.body.append('<div class="heading">%s</div>' % node['title'])
if node['title'] != 'Class Hierarchy':
if node['has_members']:
document.body.append('\n<table>\n')
else:
document.body.append('<div class="nomembers">This class has no %s.</div>' % node['title'].lower())
@staticmethod
def depart(document, node):
if node['title'] != 'Class Hierarchy':
if node['has_members']:
document.body.append('</table>\n')
class hierarchy_row(nodes.paragraph):
@staticmethod
def visit(document, node):
prefix = '%s%s' % (' ' * 5 * (node.level-1), ('', '└─ ')[node.level != 0])
document.body.append(prefix)
if node.level == node.depth:
document.body.append('<tt class="xref docutils literal current">')
@staticmethod
def depart(document, node):
if node.level == node.depth:
document.body.append('</tt>')
document.body.append('<br />')
class td(nodes.paragraph):
@staticmethod
def visit(document, node):
if node.attributes.get('heading'):
document.body.append('<th>')
else:
document.body.append(document.starttag(node, 'td', ''))
@staticmethod
def depart(document, node):
if node.attributes.get('heading'):
document.body.append('</th>')
else:
document.body.append('</td>')
class subsection(nodes.paragraph):
@staticmethod
def visit(document, node):
document.body.append('<h4>%s</h4>' % node['title'])
if node['title'] == 'Synopsis':
document.body.append('<div class="kaa synopsis">\n')
@staticmethod
def depart(document, node):
if node['title'] == 'Synopsis':
document.body.append('\n</div>\n')
def get_signals(cls, inherit, add, remove):
if inherit:
signals = Object._get_all_signals(cls)
else:
signals = getattr(cls, '__kaasignals__', {}).copy()
if add:
all = Object._get_all_signals(cls)
for key in add:
signals[key] = all[key]
for key in remove:
del signals[key]
for key, val in signals.items():
yield key, val
def get_members(cls, inherit, add, remove, pre_filter, post_filter):
if inherit:
keys = dir(cls)
else:
keys = cls.__dict__.keys()
keys = set([ name for name in keys if pre_filter(name, getattr(cls, name)) ])
keys.update(set(add))
keys = keys.difference(set(remove))
keys = [ name for name in keys if post_filter(name, getattr(cls, name)) ]
for name in sorted(keys):
yield name, getattr(cls, name)
def get_methods(cls, inherit=False, add=[], remove=[]):
return get_members(cls, inherit, add, remove,
lambda name, attr: not name.startswith('_'),
lambda name, attr: isinstance(attr, collections.Callable))
def get_properties(cls, inherit=False, add=[], remove=[]):
return get_members(cls, inherit, add, remove,
lambda name, attr: not name.startswith('_'),
lambda name, attr: isinstance(attr, property))
def get_class(fullname):
mod, clsname = fullname.rsplit('.', 1)
cls = getattr(__import__(mod, None, None, ['']), clsname)
return cls
def normalize_class_name(mod, name):
for i in reversed(range(mod.count('.')+1)):
fullname = '%s.%s' % (mod.rsplit('.', i)[0], name)
try:
get_class(fullname)
break
except (ImportError, AttributeError):
pass
else:
fullname = '%s.%s' % (mod, name)
# Special exception for kaa.base: rename 'base' to 'kaa'
for prefix in ('base.', 'kaa.base.'):
if fullname.startswith(prefix):
fullname = 'kaa.' + fullname[len(prefix):]
return fullname
def append_class_hierarchy(node, state, cls, level=0, clstree=None):
if clstree is None:
clstree = []
name = normalize_class_name(cls.__module__, cls.__name__)
clstree.append((level, name))
for c in cls.__bases__:
if c != object:
append_class_hierarchy(node, state, c, level+1, clstree)
if level == 0:
clstree = sorted(set(clstree), key=lambda x: -x[0])
depth = max(clstree, key=lambda x: x[0])[0]
for level, name in [ (abs(level-depth), cls) for level, cls in clstree ]:
row = hierarchy_row()
row.level, row.depth = level, depth
if level != depth:
name = ':class:`%s`' % name
list = ViewList()
list.append(name, '')
state.nested_parse(list, 0, row)
node.append(row)
def auto_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
env = state.document.settings.env
inherit = 'inherit' in options
add = options.get('add', [])
remove = options.get('remove', [])
cls = env._kaa_current_class
clsname = env._kaa_current_class_name
env._kaa_class_info.append(name)
list = ViewList()
section = subsection()
section['title'] = name[4:].title()
if name == 'automethods':
for attrname, method in get_methods(cls, inherit, add, remove):
list.append('.. automethod:: %s.%s' % (clsname, attrname), '')
elif name == 'autoproperties':
for attrname, prop in get_properties(cls, inherit, add, remove):
list.append('.. autoattribute:: %s.%s' % (clsname, attrname), '')
elif name == 'autosignals':
for attrname, docstr in get_signals(cls, inherit, add, remove):
list.append('.. attribute:: signals.%s' % attrname, '')
list.append('', '')
for line in docstr.split('\n'):
list.append(line, '')
list.append('', '')
if not len(list) and not content:
return []
state.nested_parse(list, 0, section)
state.nested_parse(content, 0, section)
if name == 'autosignals':
# We're using signals.foo for signals attribute names. We don't
# want to output 'signals.' for the displayable signal name, so
# we need to strip that out.
for child in section.children:
if not isinstance(child, sphinx.addnodes.desc) or not child.children:
continue
# Change displayed signal name from signals.foo to foo.
desc_sig = child.children[0]
name_prefix = str(desc_sig[0].children[0])
if name_prefix != 'signals.':
# Signal names can have dashes (-) but if they do, sphinx
# considers this an invalid attribute name (because we're
# using '.. attribute') and so generates
# <desc_name>signals.foo-bar</descname>
# and the desc_signature has no ids attribute, which we
# need to set to make it linkable.
desc_sig[0].children[0] = nodes.Text(name_prefix[8:])
sig_id = '%s.%s' % (clsname, name_prefix)
else:
# Removes <descaddname>signals.</descaddname>
desc_sig.remove(desc_sig[0])
sig_id = '%s.signals.%s' % (clsname, desc_sig[0].children[0])
# Add this signal to Sphinx's descref dict so references
# to this signal are properly resolved.
desc_sig['ids'] = [sig_id]
if hasattr(env, 'domains'):
# Sphinx 1.0
env.domains['py'].data['objects'][sig_id] = (env.docname, 'attribute')
elif hasattr(env, 'descrefs'):
# Sphinx 0.6
env.descrefs[sig_id] = (env.docname, 'attribute')
if 'order' in options:
def keyfunc(member):
try:
return options['order'].index(member[0])
except ValueError:
return 100000
sorted = section.copy()
members = [] # (name, [child1, child2, ...])
for node in section.children:
if isinstance(node, sphinx.addnodes.index):
name = node['entries'][0][1].split()[0].rstrip('()')
members.append((name, []))
members[-1][1].append(node)
members.sort(key=keyfunc)
for name, children in members:
sorted.extend(children)
section = sorted
return [section]
def members_option(arg):
if arg is None:
return ['__all__']
return [ x.strip() for x in arg.split(',') ]
def classattrs_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
env = state.document.settings.env
section = subsection()
section['title'] = 'Class Attributes'
state.nested_parse(content, 0, section)
return [section]
def kaaclass_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
env = state.document.settings.env
env._kaa_class_info = []
cls = get_class(arguments[0])
env._kaa_current_class = cls
env._kaa_current_class_name = clsname = arguments[0]
list = ViewList()
list.append('.. autoclass:: %s' % arguments[0], '')
list.append('', '')
if 'synopsis' in options:
list.append(' .. autosynopsis::', '')
list.append('', '')
for line in content:
list.append(' ' + line, '')
para = nodes.paragraph()
state.nested_parse(list, 0, para)
return [para]
def synopsis_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
env = state.document.settings.env
cls = env._kaa_current_class
clsname = env._kaa_current_class_name
env.currmodule, env.currclass = clsname.rsplit('.', 1)
para = nodes.paragraph()
section_synopsis = subsection(title='Synopsis')
para.append(section_synopsis)
state.nested_parse(content, 0, para)
syn = synopsis(title='Class Hierarchy')
syn_para = nodes.paragraph(classes=['hierarchy'])
section_synopsis.append(syn)
append_class_hierarchy(syn_para, state, cls)
syn.append(syn_para)
ci = env._kaa_class_info
append_synopsis_section(state, section_synopsis, para, 'Class Attributes', 'attr', 'classattrs' not in ci)
append_synopsis_section(state, section_synopsis, para, 'Methods', 'meth', 'automethods' not in ci)
append_synopsis_section(state, section_synopsis, para, 'Properties', 'attr', 'autoproperties' not in ci)
append_synopsis_section(state, section_synopsis, para, 'Signals', 'attr', 'autosignals' not in ci)
return [para]
def find_subsection_node(search_node, title):
for node in search_node.traverse(subsection):
if node['title'] == title:
return node
def append_synopsis_section(state, section_synopsis, search_node, title, role, optional=False):
env = state.document.settings.env
clsname = env._kaa_current_class_name
cls = env._kaa_current_class
# Crawl through the nodes for section titled the given title ('Methods',
# 'Properties', etc) and look for all the <desc> nodes, which contain
# methods or attributes. Construct a list called members whose first
# element contains the name of the member, and whose last element contains
# the first paragraph node of the description.
members = []
subsection_node = find_subsection_node(search_node, title)
if subsection_node and subsection_node.children:
desc_nodes = subsection_node.children[0].traverse(sphinx.addnodes.desc, descend=0, siblings=1)
else:
desc_nodes = []
for node in desc_nodes:
sig = node.first_child_matching_class(sphinx.addnodes.desc_signature)
content = node.first_child_matching_class(sphinx.addnodes.desc_content)
pidx = node.children[content].first_child_matching_class(nodes.paragraph)
name = node.children[sig]['ids'][0].split('.')[-1]
desc = nodes.Text('')
if pidx is not None:
desc = node.children[content].children[pidx].deepcopy()
if subsection_node['title'] == 'Properties':
prop = getattr(cls, name.split('.')[-1], None)
perm = 'unknown'
if prop:
if prop.fset and not prop.fget:
perm = 'write-only'
elif prop.fget and not prop.fset:
perm = 'read-only'
else:
perm = 'read/write'
members.append((name, nodes.Text(perm), desc))
else:
members.append((name, desc))
# If no members found and this section is optional (Class Attributes),
# we're done.
if not members and optional:
return
# Create a new synopsis section with the given title.
syn = synopsis(title=title, has_members=bool(members))
section_synopsis.append(syn)
# Loop through all members and add rows to the synopsis section table.
for info in members:
row = nodes.row()
syn.append(row)
# First columns is a <th> with the member name, cross referenced
# to the actual member on this page.
name = info[0]
col = td(heading=True)
row.append(col)
list = ViewList()
if title == 'Signals':
name = 'signals.' + name
list.append(':%s:`~%s`' % (role, clsname + '.' + name), '')
state.nested_parse(list, 0, col)
# Add remaining columns from member info.
for col_info in info[1:]:
col = td()
col.append(col_info)
row.append(col)
# Last column has 'desc' class (disables nowrap).
col['classes'] = ['desc']
def setup(app):
auto_options = {
'inherit': directives.flag,
'add': members_option,
'remove': members_option,
'order': members_option,
}
app.add_node(subsection, html=(subsection.visit, subsection.depart))
app.add_node(synopsis, html=(synopsis.visit, synopsis.depart))
app.add_node(td, html=(td.visit, td.depart))
app.add_node(hierarchy_row, html=(hierarchy_row.visit, hierarchy_row.depart))
app.add_directive('kaaclass', kaaclass_directive, 1, (0, 1, 1), synopsis=directives.flag)
app.add_directive('autosynopsis', synopsis_directive, 1, (0, 1, 1))
app.add_directive('autoproperties', auto_directive, 1, (0, 1, 1), **auto_options)
app.add_directive('automethods', auto_directive, 1, (0, 1, 1), **auto_options)
app.add_directive('autosignals', auto_directive, 1, (0, 1, 1), **auto_options)
app.add_directive('classattrs', classattrs_directive, 1, (0, 1, 0))
|
freevo/kaa-base
|
src/distribution/sphinxext.py
|
Python
|
lgpl-2.1
| 20,688
|
[
"VisIt"
] |
a314d42ab4223ab3496434ae8197baeb1b02fcbc57a89bd737fc3426fe6e2a1c
|
import numpy as np
from scipy import ndimage as ndi
from scipy import stats
from ..util import img_as_float, pad
from ..feature import peak_local_max
from ..feature.util import _prepare_grayscale_input_2D
from ..feature.corner_cy import _corner_fast
from ._hessian_det_appx import _hessian_matrix_det
from ..transform import integral_image
from .._shared.utils import safe_as_int
def _compute_derivatives(image, mode='constant', cval=0):
"""Compute derivatives in x and y direction using the Sobel operator.
Parameters
----------
image : ndarray
Input image.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
imx : ndarray
Derivative in x-direction.
imy : ndarray
Derivative in y-direction.
"""
imy = ndi.sobel(image, axis=0, mode=mode, cval=cval)
imx = ndi.sobel(image, axis=1, mode=mode, cval=cval)
return imx, imy
def structure_tensor(image, sigma=1, mode='constant', cval=0):
"""Compute structure tensor using sum of squared differences.
The structure tensor A is defined as::
A = [Axx Axy]
[Axy Ayy]
which is approximated by the weighted sum of squared differences in a local
window around each pixel in the image.
Parameters
----------
image : ndarray
Input image.
sigma : float
Standard deviation used for the Gaussian kernel, which is used as a
weighting function for the local summation of squared differences.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
Axx : ndarray
Element of the structure tensor for each pixel in the input image.
Axy : ndarray
Element of the structure tensor for each pixel in the input image.
Ayy : ndarray
Element of the structure tensor for each pixel in the input image.
Examples
--------
>>> from skimage.feature import structure_tensor
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 1
>>> Axx, Axy, Ayy = structure_tensor(square, sigma=0.1)
>>> Axx
array([[ 0., 0., 0., 0., 0.],
[ 0., 1., 0., 1., 0.],
[ 0., 4., 0., 4., 0.],
[ 0., 1., 0., 1., 0.],
[ 0., 0., 0., 0., 0.]])
"""
image = _prepare_grayscale_input_2D(image)
imx, imy = _compute_derivatives(image, mode=mode, cval=cval)
# structure tensore
Axx = ndi.gaussian_filter(imx * imx, sigma, mode=mode, cval=cval)
Axy = ndi.gaussian_filter(imx * imy, sigma, mode=mode, cval=cval)
Ayy = ndi.gaussian_filter(imy * imy, sigma, mode=mode, cval=cval)
return Axx, Axy, Ayy
def hessian_matrix(image, sigma=1, mode='constant', cval=0):
"""Compute Hessian matrix.
The Hessian matrix is defined as::
H = [Hxx Hxy]
[Hxy Hyy]
which is computed by convolving the image with the second derivatives
of the Gaussian kernel in the respective x- and y-directions.
Parameters
----------
image : ndarray
Input image.
sigma : float
Standard deviation used for the Gaussian kernel, which is used as
weighting function for the auto-correlation matrix.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
Hxx : ndarray
Element of the Hessian matrix for each pixel in the input image.
Hxy : ndarray
Element of the Hessian matrix for each pixel in the input image.
Hyy : ndarray
Element of the Hessian matrix for each pixel in the input image.
Examples
--------
>>> from skimage.feature import hessian_matrix
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 1
>>> Hxx, Hxy, Hyy = hessian_matrix(square, sigma=0.1)
>>> Hxx
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]])
"""
image = _prepare_grayscale_input_2D(image)
# window extent to the left and right, which covers > 99% of the normal
# distribution
window_ext = max(1, np.ceil(3 * sigma))
ky, kx = np.mgrid[-window_ext:window_ext + 1, -window_ext:window_ext + 1]
# second derivative Gaussian kernels
gaussian_exp = np.exp(-(kx ** 2 + ky ** 2) / (2 * sigma ** 2))
kernel_xx = 1 / (2 * np.pi * sigma ** 4) * (kx ** 2 / sigma ** 2 - 1)
kernel_xx *= gaussian_exp
kernel_xx /= kernel_xx.sum()
kernel_xy = 1 / (2 * np.pi * sigma ** 6) * (kx * ky)
kernel_xy *= gaussian_exp
kernel_xy /= kernel_xx.sum()
kernel_yy = kernel_xx.transpose()
Hxx = ndi.convolve(image, kernel_xx, mode=mode, cval=cval)
Hxy = ndi.convolve(image, kernel_xy, mode=mode, cval=cval)
Hyy = ndi.convolve(image, kernel_yy, mode=mode, cval=cval)
return Hxx, Hxy, Hyy
def hessian_matrix_det(image, sigma):
"""Computes the approximate Hessian Determinant over an image.
This method uses box filters over integral images to compute the
approximate Hessian Determinant as described in [1]_.
Parameters
----------
image : array
The image over which to compute Hessian Determinant.
sigma : float
Standard deviation used for the Gaussian kernel, used for the Hessian
matrix.
Returns
-------
out : array
The array of the Determinant of Hessians.
References
----------
.. [1] Herbert Bay, Andreas Ess, Tinne Tuytelaars, Luc Van Gool,
"SURF: Speeded Up Robust Features"
ftp://ftp.vision.ee.ethz.ch/publications/articles/eth_biwi_00517.pdf
Notes
-----
The running time of this method only depends on size of the image. It is
independent of `sigma` as one would expect. The downside is that the
result for `sigma` less than `3` is not accurate, i.e., not similar to
the result obtained if someone computed the Hessian and took it's
determinant.
"""
image = img_as_float(image)
image = integral_image(image)
return np.array(_hessian_matrix_det(image, sigma))
def _image_orthogonal_matrix22_eigvals(M00, M01, M11):
l1 = (M00 + M11) / 2 + np.sqrt(4 * M01 ** 2 + (M00 - M11) ** 2) / 2
l2 = (M00 + M11) / 2 - np.sqrt(4 * M01 ** 2 + (M00 - M11) ** 2) / 2
return l1, l2
def structure_tensor_eigvals(Axx, Axy, Ayy):
"""Compute Eigen values of structure tensor.
Parameters
----------
Axx : ndarray
Element of the structure tensor for each pixel in the input image.
Axy : ndarray
Element of the structure tensor for each pixel in the input image.
Ayy : ndarray
Element of the structure tensor for each pixel in the input image.
Returns
-------
l1 : ndarray
Larger eigen value for each input matrix.
l2 : ndarray
Smaller eigen value for each input matrix.
Examples
--------
>>> from skimage.feature import structure_tensor, structure_tensor_eigvals
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 1
>>> Axx, Axy, Ayy = structure_tensor(square, sigma=0.1)
>>> structure_tensor_eigvals(Axx, Axy, Ayy)[0]
array([[ 0., 0., 0., 0., 0.],
[ 0., 2., 4., 2., 0.],
[ 0., 4., 0., 4., 0.],
[ 0., 2., 4., 2., 0.],
[ 0., 0., 0., 0., 0.]])
"""
return _image_orthogonal_matrix22_eigvals(Axx, Axy, Ayy)
def hessian_matrix_eigvals(Hxx, Hxy, Hyy):
"""Compute Eigen values of Hessian matrix.
Parameters
----------
Hxx : ndarray
Element of the Hessian matrix for each pixel in the input image.
Hxy : ndarray
Element of the Hessian matrix for each pixel in the input image.
Hyy : ndarray
Element of the Hessian matrix for each pixel in the input image.
Returns
-------
l1 : ndarray
Larger eigen value for each input matrix.
l2 : ndarray
Smaller eigen value for each input matrix.
Examples
--------
>>> from skimage.feature import hessian_matrix, hessian_matrix_eigvals
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 1
>>> Hxx, Hxy, Hyy = hessian_matrix(square, sigma=0.1)
>>> hessian_matrix_eigvals(Hxx, Hxy, Hyy)[0]
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]])
"""
return _image_orthogonal_matrix22_eigvals(Hxx, Hxy, Hyy)
def corner_kitchen_rosenfeld(image, mode='constant', cval=0):
"""Compute Kitchen and Rosenfeld corner measure response image.
The corner measure is calculated as follows::
(imxx * imy**2 + imyy * imx**2 - 2 * imxy * imx * imy)
/ (imx**2 + imy**2)
Where imx and imy are the first and imxx, imxy, imyy the second
derivatives.
Parameters
----------
image : ndarray
Input image.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
response : ndarray
Kitchen and Rosenfeld response image.
"""
imx, imy = _compute_derivatives(image, mode=mode, cval=cval)
imxx, imxy = _compute_derivatives(imx, mode=mode, cval=cval)
imyx, imyy = _compute_derivatives(imy, mode=mode, cval=cval)
numerator = (imxx * imy ** 2 + imyy * imx ** 2 - 2 * imxy * imx * imy)
denominator = (imx ** 2 + imy ** 2)
response = np.zeros_like(image, dtype=np.double)
mask = denominator != 0
response[mask] = numerator[mask] / denominator[mask]
return response
def corner_harris(image, method='k', k=0.05, eps=1e-6, sigma=1):
"""Compute Harris corner measure response image.
This corner detector uses information from the auto-correlation matrix A::
A = [(imx**2) (imx*imy)] = [Axx Axy]
[(imx*imy) (imy**2)] [Axy Ayy]
Where imx and imy are first derivatives, averaged with a gaussian filter.
The corner measure is then defined as::
det(A) - k * trace(A)**2
or::
2 * det(A) / (trace(A) + eps)
Parameters
----------
image : ndarray
Input image.
method : {'k', 'eps'}, optional
Method to compute the response image from the auto-correlation matrix.
k : float, optional
Sensitivity factor to separate corners from edges, typically in range
`[0, 0.2]`. Small values of k result in detection of sharp corners.
eps : float, optional
Normalisation factor (Noble's corner measure).
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used as
weighting function for the auto-correlation matrix.
Returns
-------
response : ndarray
Harris response image.
References
----------
.. [1] http://kiwi.cs.dal.ca/~dparks/CornerDetection/harris.htm
.. [2] http://en.wikipedia.org/wiki/Corner_detection
Examples
--------
>>> from skimage.feature import corner_harris, corner_peaks
>>> square = np.zeros([10, 10])
>>> square[2:8, 2:8] = 1
>>> square.astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> corner_peaks(corner_harris(square), min_distance=1)
array([[2, 2],
[2, 7],
[7, 2],
[7, 7]])
"""
Axx, Axy, Ayy = structure_tensor(image, sigma)
# determinant
detA = Axx * Ayy - Axy ** 2
# trace
traceA = Axx + Ayy
if method == 'k':
response = detA - k * traceA ** 2
else:
response = 2 * detA / (traceA + eps)
return response
def corner_shi_tomasi(image, sigma=1):
"""Compute Shi-Tomasi (Kanade-Tomasi) corner measure response image.
This corner detector uses information from the auto-correlation matrix A::
A = [(imx**2) (imx*imy)] = [Axx Axy]
[(imx*imy) (imy**2)] [Axy Ayy]
Where imx and imy are first derivatives, averaged with a gaussian filter.
The corner measure is then defined as the smaller eigenvalue of A::
((Axx + Ayy) - sqrt((Axx - Ayy)**2 + 4 * Axy**2)) / 2
Parameters
----------
image : ndarray
Input image.
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used as
weighting function for the auto-correlation matrix.
Returns
-------
response : ndarray
Shi-Tomasi response image.
References
----------
.. [1] http://kiwi.cs.dal.ca/~dparks/CornerDetection/harris.htm
.. [2] http://en.wikipedia.org/wiki/Corner_detection
Examples
--------
>>> from skimage.feature import corner_shi_tomasi, corner_peaks
>>> square = np.zeros([10, 10])
>>> square[2:8, 2:8] = 1
>>> square.astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> corner_peaks(corner_shi_tomasi(square), min_distance=1)
array([[2, 2],
[2, 7],
[7, 2],
[7, 7]])
"""
Axx, Axy, Ayy = structure_tensor(image, sigma)
# minimum eigenvalue of A
response = ((Axx + Ayy) - np.sqrt((Axx - Ayy) ** 2 + 4 * Axy ** 2)) / 2
return response
def corner_foerstner(image, sigma=1):
"""Compute Foerstner corner measure response image.
This corner detector uses information from the auto-correlation matrix A::
A = [(imx**2) (imx*imy)] = [Axx Axy]
[(imx*imy) (imy**2)] [Axy Ayy]
Where imx and imy are first derivatives, averaged with a gaussian filter.
The corner measure is then defined as::
w = det(A) / trace(A) (size of error ellipse)
q = 4 * det(A) / trace(A)**2 (roundness of error ellipse)
Parameters
----------
image : ndarray
Input image.
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used as
weighting function for the auto-correlation matrix.
Returns
-------
w : ndarray
Error ellipse sizes.
q : ndarray
Roundness of error ellipse.
References
----------
.. [1] http://www.ipb.uni-bonn.de/uploads/tx_ikgpublication/foerstner87.fast.pdf
.. [2] http://en.wikipedia.org/wiki/Corner_detection
Examples
--------
>>> from skimage.feature import corner_foerstner, corner_peaks
>>> square = np.zeros([10, 10])
>>> square[2:8, 2:8] = 1
>>> square.astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> w, q = corner_foerstner(square)
>>> accuracy_thresh = 0.5
>>> roundness_thresh = 0.3
>>> foerstner = (q > roundness_thresh) * (w > accuracy_thresh) * w
>>> corner_peaks(foerstner, min_distance=1)
array([[2, 2],
[2, 7],
[7, 2],
[7, 7]])
"""
Axx, Axy, Ayy = structure_tensor(image, sigma)
# determinant
detA = Axx * Ayy - Axy ** 2
# trace
traceA = Axx + Ayy
w = np.zeros_like(image, dtype=np.double)
q = np.zeros_like(image, dtype=np.double)
mask = traceA != 0
w[mask] = detA[mask] / traceA[mask]
q[mask] = 4 * detA[mask] / traceA[mask] ** 2
return w, q
def corner_fast(image, n=12, threshold=0.15):
"""Extract FAST corners for a given image.
Parameters
----------
image : 2D ndarray
Input image.
n : int
Minimum number of consecutive pixels out of 16 pixels on the circle
that should all be either brighter or darker w.r.t testpixel.
A point c on the circle is darker w.r.t test pixel p if
`Ic < Ip - threshold` and brighter if `Ic > Ip + threshold`. Also
stands for the n in `FAST-n` corner detector.
threshold : float
Threshold used in deciding whether the pixels on the circle are
brighter, darker or similar w.r.t. the test pixel. Decrease the
threshold when more corners are desired and vice-versa.
Returns
-------
response : ndarray
FAST corner response image.
References
----------
.. [1] Edward Rosten and Tom Drummond
"Machine Learning for high-speed corner detection",
http://www.edwardrosten.com/work/rosten_2006_machine.pdf
.. [2] Wikipedia, "Features from accelerated segment test",
https://en.wikipedia.org/wiki/Features_from_accelerated_segment_test
Examples
--------
>>> from skimage.feature import corner_fast, corner_peaks
>>> square = np.zeros((12, 12))
>>> square[3:9, 3:9] = 1
>>> square.astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> corner_peaks(corner_fast(square, 9), min_distance=1)
array([[3, 3],
[3, 8],
[8, 3],
[8, 8]])
"""
image = _prepare_grayscale_input_2D(image)
image = np.ascontiguousarray(image)
response = _corner_fast(image, n, threshold)
return response
def corner_subpix(image, corners, window_size=11, alpha=0.99):
"""Determine subpixel position of corners.
A statistical test decides whether the corner is defined as the
intersection of two edges or a single peak. Depending on the classification
result, the subpixel corner location is determined based on the local
covariance of the grey-values. If the significance level for either
statistical test is not sufficient, the corner cannot be classified, and
the output subpixel position is set to NaN.
Parameters
----------
image : ndarray
Input image.
corners : (N, 2) ndarray
Corner coordinates `(row, col)`.
window_size : int, optional
Search window size for subpixel estimation.
alpha : float, optional
Significance level for corner classification.
Returns
-------
positions : (N, 2) ndarray
Subpixel corner positions. NaN for "not classified" corners.
References
----------
.. [1] http://www.ipb.uni-bonn.de/uploads/tx_ikgpublication/\
foerstner87.fast.pdf
.. [2] http://en.wikipedia.org/wiki/Corner_detection
Examples
--------
>>> from skimage.feature import corner_harris, corner_peaks, corner_subpix
>>> img = np.zeros((10, 10))
>>> img[:5, :5] = 1
>>> img[5:, 5:] = 1
>>> img.astype(int)
array([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1]])
>>> coords = corner_peaks(corner_harris(img), min_distance=2)
>>> coords_subpix = corner_subpix(img, coords, window_size=7)
>>> coords_subpix
array([[ 4.5, 4.5]])
"""
# window extent in one direction
wext = (window_size - 1) // 2
image = pad(image, pad_width=wext, mode='constant', constant_values=0)
# add pad width, make sure to not modify the input values in-place
corners = safe_as_int(corners + wext)
# normal equation arrays
N_dot = np.zeros((2, 2), dtype=np.double)
N_edge = np.zeros((2, 2), dtype=np.double)
b_dot = np.zeros((2, ), dtype=np.double)
b_edge = np.zeros((2, ), dtype=np.double)
# critical statistical test values
redundancy = window_size ** 2 - 2
t_crit_dot = stats.f.isf(1 - alpha, redundancy, redundancy)
t_crit_edge = stats.f.isf(alpha, redundancy, redundancy)
# coordinates of pixels within window
y, x = np.mgrid[- wext:wext + 1, - wext:wext + 1]
corners_subpix = np.zeros_like(corners, dtype=np.double)
for i, (y0, x0) in enumerate(corners):
# crop window around corner + border for sobel operator
miny = y0 - wext - 1
maxy = y0 + wext + 2
minx = x0 - wext - 1
maxx = x0 + wext + 2
window = image[miny:maxy, minx:maxx]
winx, winy = _compute_derivatives(window, mode='constant', cval=0)
# compute gradient suares and remove border
winx_winx = (winx * winx)[1:-1, 1:-1]
winx_winy = (winx * winy)[1:-1, 1:-1]
winy_winy = (winy * winy)[1:-1, 1:-1]
# sum of squared differences (mean instead of gaussian filter)
Axx = np.sum(winx_winx)
Axy = np.sum(winx_winy)
Ayy = np.sum(winy_winy)
# sum of squared differences weighted with coordinates
# (mean instead of gaussian filter)
bxx_x = np.sum(winx_winx * x)
bxx_y = np.sum(winx_winx * y)
bxy_x = np.sum(winx_winy * x)
bxy_y = np.sum(winx_winy * y)
byy_x = np.sum(winy_winy * x)
byy_y = np.sum(winy_winy * y)
# normal equations for subpixel position
N_dot[0, 0] = Axx
N_dot[0, 1] = N_dot[1, 0] = - Axy
N_dot[1, 1] = Ayy
N_edge[0, 0] = Ayy
N_edge[0, 1] = N_edge[1, 0] = Axy
N_edge[1, 1] = Axx
b_dot[:] = bxx_y - bxy_x, byy_x - bxy_y
b_edge[:] = byy_y + bxy_x, bxx_x + bxy_y
# estimated positions
try:
est_dot = np.linalg.solve(N_dot, b_dot)
est_edge = np.linalg.solve(N_edge, b_edge)
except np.linalg.LinAlgError:
# if image is constant the system is singular
corners_subpix[i, :] = np.nan, np.nan
continue
# residuals
ry_dot = y - est_dot[0]
rx_dot = x - est_dot[1]
ry_edge = y - est_edge[0]
rx_edge = x - est_edge[1]
# squared residuals
rxx_dot = rx_dot * rx_dot
rxy_dot = rx_dot * ry_dot
ryy_dot = ry_dot * ry_dot
rxx_edge = rx_edge * rx_edge
rxy_edge = rx_edge * ry_edge
ryy_edge = ry_edge * ry_edge
# determine corner class (dot or edge)
# variance for different models
var_dot = np.sum(winx_winx * ryy_dot - 2 * winx_winy * rxy_dot
+ winy_winy * rxx_dot)
var_edge = np.sum(winy_winy * ryy_edge + 2 * winx_winy * rxy_edge
+ winx_winx * rxx_edge)
# test value (F-distributed)
if var_dot < np.spacing(1) and var_edge < np.spacing(1):
t = np.nan
elif var_dot == 0:
t = np.inf
else:
t = var_edge / var_dot
# 1 for edge, -1 for dot, 0 for "not classified"
corner_class = int(t < t_crit_edge) - int(t > t_crit_dot)
if corner_class == -1:
corners_subpix[i, :] = y0 + est_dot[0], x0 + est_dot[1]
elif corner_class == 0:
corners_subpix[i, :] = np.nan, np.nan
elif corner_class == 1:
corners_subpix[i, :] = y0 + est_edge[0], x0 + est_edge[1]
# subtract pad width
corners_subpix -= wext
return corners_subpix
def corner_peaks(image, min_distance=10, threshold_abs=0, threshold_rel=0.1,
exclude_border=True, indices=True, num_peaks=np.inf,
footprint=None, labels=None):
"""Find corners in corner measure response image.
This differs from `skimage.feature.peak_local_max` in that it suppresses
multiple connected peaks with the same accumulator value.
Parameters
----------
* : *
See :py:meth:`skimage.feature.peak_local_max`.
Examples
--------
>>> from skimage.feature import peak_local_max
>>> response = np.zeros((5, 5))
>>> response[2:4, 2:4] = 1
>>> response
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 1., 0.],
[ 0., 0., 1., 1., 0.],
[ 0., 0., 0., 0., 0.]])
>>> peak_local_max(response, exclude_border=False)
array([[2, 2],
[2, 3],
[3, 2],
[3, 3]])
>>> corner_peaks(response, exclude_border=False)
array([[2, 2]])
>>> corner_peaks(response, exclude_border=False, min_distance=0)
array([[2, 2],
[2, 3],
[3, 2],
[3, 3]])
"""
peaks = peak_local_max(image, min_distance=min_distance,
threshold_abs=threshold_abs,
threshold_rel=threshold_rel,
exclude_border=exclude_border,
indices=False, num_peaks=num_peaks,
footprint=footprint, labels=labels)
if min_distance > 0:
coords = np.transpose(peaks.nonzero())
for r, c in coords:
if peaks[r, c]:
peaks[r - min_distance:r + min_distance + 1,
c - min_distance:c + min_distance + 1] = False
peaks[r, c] = True
if indices is True:
return np.transpose(peaks.nonzero())
else:
return peaks
|
jwiggins/scikit-image
|
skimage/feature/corner.py
|
Python
|
bsd-3-clause
| 27,165
|
[
"Gaussian"
] |
2f98dff357d7d2eb15bb607829cfa1a857ad5103fba3596e8eed4c35959d13c4
|
import argparse
from rdkit import Chem
from rdkit.Chem.rdmolfiles import SmilesWriter
parser = argparse.ArgumentParser()
parser.add_argument('inputfile', help="sdf filename for convert to smiles")
args = parser.parse_args()
sdf = Chem.SDMolSupplier( args.inputfile )
writer = SmilesWriter("converted.smi")
for mol in sdf:
writer.write( mol )
writer.close()
|
ptosco/rdkit
|
Contrib/Fastcluster/testdata/sdf2smi.py
|
Python
|
bsd-3-clause
| 364
|
[
"RDKit"
] |
b9ab22ecd834e679fbdf9b39df67fb74407abab62b9eca250982cce068c7e6d9
|
#!/usr/bin/env python
##
from wrfpy.config import config
import csv
import os
import astral
from netCDF4 import Dataset
from netCDF4 import date2num
import numpy as np
import bisect
from datetime import datetime
import glob
from pathos.multiprocessing import ProcessPool as Pool
class readObsTemperature(config):
def __init__(self, dtobj, nstationtypes=None, dstationtypes=None):
config.__init__(self)
# optional define station types to be used
self.nstationtypes = nstationtypes # stationtypes at night
self.dstationtypes = dstationtypes # stationtypes during daytime
# define datestr
datestr = datetime.strftime(dtobj, '%Y-%m-%d_%H:%M:%S')
# define name of csv file
self.wrf_rundir = self.config['filesystem']['work_dir']
fname = 'obs_stations_' + datestr + '.csv'
self.csvfile = os.path.join(self.wrf_rundir, fname)
try:
# try to read an existing csv file
self.read_csv(datestr)
except IOError:
if self.config['options_urbantemps']['urban_stations']:
# reading existing csv file failed, start from scratch
self.urbStations = self.config['options_urbantemps']['urban_stations']
self.verify_input()
self.obs_temp_p(dtobj)
self.write_csv(datestr)
else:
raise
def verify_input(self):
'''
verify input and create list of files
'''
try:
f = Dataset(self.urbStations, 'r')
f.close()
self.filelist = [self.urbStations]
except IOError:
# file is not a netcdf file, assuming a txt file containing a
# list of netcdf files
if os.path.isdir(self.urbStations):
# path is actually a directory, not a file
self.filelist = glob.glob(os.path.join(self.urbStations, '*nc'))
else:
# re-raise error
raise
def obs_temp_p(self, dtobj):
'''
get observed temperature in amsterdam parallel
'''
self.dtobjP = dtobj
pool = Pool()
obs = pool.map(self.obs_temp, self.filelist)
self.obs = [ob for ob in obs if ob is not None]
def obs_temp(self, f):
'''
get observed temperature in amsterdam per station
'''
try:
obs = Dataset(f, 'r')
obs_lon = obs.variables['longitude'][0]
obs_lat = obs.variables['latitude'][0]
elevation = 0
try:
stationtype = obs.stationtype
except AttributeError:
stationtype = None
stobs = (obs_lat, obs_lon, elevation, stationtype)
use_station = self.filter_stationtype(stobs, self.dtobjP)
if use_station:
dt = obs.variables['time']
# convert datetime object to dt.units units
dtobj_num = date2num(self.dtobjP, units=dt.units,
calendar=dt.calendar)
# make use of the property that the array is already
# sorted to find the closest date
try:
ind = bisect.bisect_left(dt[:], dtobj_num)
except RuntimeError:
return
if ((ind == 0) or (ind == len(dt))):
return None
else:
am = np.argmin([abs(dt[ind]-dtobj_num),
abs(dt[ind-1]-dtobj_num)])
if (am == 0):
idx = ind
else:
idx = ind - 1
if abs((dt[:]-dtobj_num)[idx]) > 900:
# ignore observation if time difference
# between model and observation is > 15 minutes
return None
temp = obs.variables['temperature'][idx]
sname = f[:] # stationname
obs.close()
# append results to lists
obs_temp = temp
obs_stype = stationtype
obs_sname= sname
except IOError:
return None
except AttributeError:
return None
try:
return (obs_lat, obs_lon, obs_temp, obs_stype, obs_sname)
except UnboundLocalError:
return None
def filter_stationtype(self, stobs, dtobj):
'''
check if it is day or night based on the solar angle
construct location
'''
lat = stobs[0]
lon = stobs[1]
elevation = 0 # placeholder
loc = astral.Location(info=('name', 'region', lat, lon, 'UTC',
elevation))
solar_elevation = loc.solar_elevation(dtobj)
# set stime according to day/night based on solar angle
if (solar_elevation > 0):
stime = 'day'
else:
stime = 'night'
if ((stime == 'day') and self.dstationtypes):
try:
mask = any([x.lower() in stobs[3].lower() for
x in self.dstationtypes])
except AttributeError:
mask = False
elif ((stime == 'night') and self.nstationtypes):
try:
mask = any([x.lower() in stobs[3].lower() for
x in self.nstationtypes])
except AttributeError:
mask = False
else:
mask = True
return mask
def write_csv(self, datestr):
'''
write output of stations used to csv file
'''
with open(self.csvfile, 'wb') as out:
csv_out = csv.writer(out)
csv_out.writerow(['lat', 'lon', 'temperature', 'stationtype',
'stationname'])
for row in self.obs:
csv_out.writerow(row)
def read_csv(self, datestr):
'''
read station temperatures from csv file
'''
# initialize variables in csv file
obs_lat = []
obs_lon = []
obs_temp = []
obs_stype = []
obs_sname = []
# start reading csv file
with open(self.csvfile, 'r') as inp:
reader = csv.reader(inp)
next(reader) # skip header
for row in reader:
# append variables
obs_lat.append(float(row[0]))
obs_lon.append(float(row[1]))
obs_temp.append(float(row[2]))
obs_stype.append(str(row[3]))
obs_sname.append(str(row[4]))
# zip variables
self.obs = zip(obs_lat, obs_lon, obs_temp, obs_stype, obs_sname)
|
rvanharen/wrfpy
|
wrfpy/readObsTemperature.py
|
Python
|
apache-2.0
| 6,814
|
[
"NetCDF"
] |
66bf4060b938a9c896b91ccdbe3478c163c1c524c1abc1017b30a3976b0e539b
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
import copy
from datetime import datetime
import os
import pickle
import re
import sys
import traceback
import argparse
import dateutil.parser
from toast import Weather
import toast
from toast.mpi import MPI, finalize
import healpy as hp
import numpy as np
import toast.map as tm
import toast.qarray as qa
import toast.timing as timing
import toast.tod as tt
import toast.todmap as ttm
if tt.tidas_available:
from toast.tod.tidas import OpTidasExport, TODTidas
if tt.spt3g_available:
from toast.tod.spt3g import Op3GExport, TOD3G
if 'TOAST_STARTUP_DELAY' in os.environ:
import numpy as np
import time
delay = np.float(os.environ['TOAST_STARTUP_DELAY'])
wait = np.random.rand() * delay
# print('Sleeping for {} seconds before importing TOAST'.format(wait),
# flush=True)
time.sleep(wait)
# import warnings
# warnings.filterwarnings('error')
# warnings.simplefilter('ignore', ImportWarning)
# warnings.simplefilter('ignore', ResourceWarning)
# warnings.simplefilter('ignore', DeprecationWarning)
# warnings.filterwarnings("ignore", message="numpy.dtype size changed")
# warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
XAXIS, YAXIS, ZAXIS = np.eye(3)
def parse_arguments(comm):
parser = argparse.ArgumentParser(
description="Simulate ground-based boresight pointing. Simulate "
"atmosphere and make maps for some number of noise Monte Carlos.",
fromfile_prefix_chars='@')
parser.add_argument('--groupsize',
required=False, type=np.int,
help='Size of a process group assigned to a CES')
parser.add_argument('--timezone', required=False, type=np.int, default=0,
help='Offset to apply to MJD to separate days [hours]')
parser.add_argument('--coord', required=False, default='C',
help='Sky coordinate system [C,E,G]')
parser.add_argument('--schedule', required=True,
help='Comma-separated list CES schedule files '
'(from toast_ground_schedule.py)')
parser.add_argument('--weather',
required=False,
help='Comma-separated list of TOAST weather files for '
'every schedule. Repeat the same file if the '
'schedules share observing site.')
parser.add_argument('--samplerate',
required=False, default=100.0, type=np.float,
help='Detector sample rate (Hz)')
parser.add_argument('--scanrate',
required=False, default=1.0, type=np.float,
help='Scanning rate [deg / s]')
parser.add_argument('--scan_accel',
required=False, default=1.0, type=np.float,
help='Scanning rate change [deg / s^2]')
parser.add_argument('--sun_angle_min',
required=False, default=30.0, type=np.float,
help='Minimum azimuthal distance between the Sun and '
'the bore sight [deg]')
parser.add_argument('--conserve_memory', dest='conserve_memory',
required=False, action='store_true',
help='Conserve memory')
parser.add_argument('--no_conserve_memory', dest='conserve_memory',
required=False, action='store_false',
help='Do not conserve memory')
parser.set_defaults(conserve_memory=True)
parser.add_argument('--polyorder',
required=False, type=np.int,
help='Polynomial order for the polyfilter')
parser.add_argument('--wbin_ground',
required=False, type=np.float,
help='Ground template bin width [degrees]')
parser.add_argument('--gain_sigma',
required=False, type=np.float,
help='Gain error distribution')
parser.add_argument('--hwprpm',
required=False, default=0.0, type=np.float,
help='The rate (in RPM) of the HWP rotation')
parser.add_argument('--hwpstep', required=False, default=None,
help='For stepped HWP, the angle in degrees '
'of each step')
parser.add_argument('--hwpsteptime',
required=False, default=0.0, type=np.float,
help='For stepped HWP, the the time in seconds '
'between steps')
parser.add_argument('--input_map', required=False,
help='Input map for signal')
parser.add_argument('--input_pysm_model', required=False,
help='Comma separated models for on-the-fly PySM '
'simulation, e.g. s3,d6,f1,a2"')
parser.add_argument('--apply_beam', required=False, action='store_true',
help='Apply beam convolution to input map with '
'gaussian beam parameters defined in focalplane')
parser.add_argument('--skip_atmosphere',
required=False, default=False, action='store_true',
help='Disable simulating the atmosphere.')
parser.add_argument('--skip_noise',
required=False, default=False, action='store_true',
help='Disable simulating detector noise.')
parser.add_argument('--skip_bin',
required=False, default=False, action='store_true',
help='Disable binning the map.')
parser.add_argument('--skip_hits',
required=False, default=False, action='store_true',
help='Do not save the 3x3 matrices and hitmaps')
parser.add_argument('--skip_destripe',
required=False, default=False, action='store_true',
help='Do not destripe the data')
parser.add_argument('--skip_daymaps',
required=False, default=False, action='store_true',
help='Do not bin daily maps')
parser.add_argument('--atm_lmin_center',
required=False, default=0.01, type=np.float,
help='Kolmogorov turbulence dissipation scale center')
parser.add_argument('--atm_lmin_sigma',
required=False, default=0.001, type=np.float,
help='Kolmogorov turbulence dissipation scale sigma')
parser.add_argument('--atm_lmax_center',
required=False, default=10.0, type=np.float,
help='Kolmogorov turbulence injection scale center')
parser.add_argument('--atm_lmax_sigma',
required=False, default=10.0, type=np.float,
help='Kolmogorov turbulence injection scale sigma')
parser.add_argument('--atm_gain',
required=False, default=1e-4, type=np.float,
help='Atmospheric gain factor.')
parser.add_argument('--atm_zatm',
required=False, default=40000.0, type=np.float,
help='atmosphere extent for temperature profile')
parser.add_argument('--atm_zmax',
required=False, default=200.0, type=np.float,
help='atmosphere extent for water vapor integration')
parser.add_argument('--atm_xstep',
required=False, default=10.0, type=np.float,
help='size of volume elements in X direction')
parser.add_argument('--atm_ystep',
required=False, default=10.0, type=np.float,
help='size of volume elements in Y direction')
parser.add_argument('--atm_zstep',
required=False, default=10.0, type=np.float,
help='size of volume elements in Z direction')
parser.add_argument('--atm_nelem_sim_max',
required=False, default=1000, type=np.int,
help='controls the size of the simulation slices')
parser.add_argument('--atm_gangsize',
required=False, default=1, type=np.int,
help='size of the gangs that create slices')
parser.add_argument('--atm_wind_time',
required=False, default=36000.0, type=np.float,
help='Maximum time to simulate without discontinuity')
parser.add_argument('--atm_z0_center',
required=False, default=2000.0, type=np.float,
help='central value of the water vapor distribution')
parser.add_argument('--atm_z0_sigma',
required=False, default=0.0, type=np.float,
help='sigma of the water vapor distribution')
parser.add_argument('--atm_T0_center',
required=False, default=280.0, type=np.float,
help='central value of the temperature distribution')
parser.add_argument('--atm_T0_sigma',
required=False, default=10.0, type=np.float,
help='sigma of the temperature distribution')
parser.add_argument('--atm_cache',
required=False, default='atm_cache',
help='Atmosphere cache directory')
parser.add_argument('--outdir',
required=False, default='out',
help='Output directory')
parser.add_argument('--zip',
required=False, default=False, action='store_true',
help='Compress the output fits files')
parser.add_argument('--debug',
required=False, default=False, action='store_true',
help='Write diagnostics')
parser.add_argument('--flush',
required=False, default=False, action='store_true',
help='Flush every print statement.')
parser.add_argument('--nside',
required=False, default=512, type=np.int,
help='Healpix NSIDE')
parser.add_argument('--madam_prefix',
required=False, default='toast',
help='Output map prefix')
parser.add_argument('--madam_iter_max',
required=False, default=1000, type=np.int,
help='Maximum number of CG iterations in Madam')
parser.add_argument('--madam_baseline_length',
required=False, default=10000.0, type=np.float,
help='Destriping baseline length (seconds)')
parser.add_argument('--madam_baseline_order',
required=False, default=0, type=np.int,
help='Destriping baseline polynomial order')
parser.add_argument('--madam_precond_width',
required=False, default=1, type=np.int,
help='Madam preconditioner width')
parser.add_argument('--madam_noisefilter',
required=False, default=False, action='store_true',
help='Destripe with the noise filter enabled')
parser.add_argument('--madampar',
required=False, default=None,
help='Madam parameter file')
parser.add_argument('--no_madam_allreduce',
required=False, default=False, action='store_true',
help='Do not use allreduce communication in Madam')
parser.add_argument('--common_flag_mask',
required=False, default=1, type=np.uint8,
help='Common flag mask')
parser.add_argument('--MC_start',
required=False, default=0, type=np.int,
help='First Monte Carlo noise realization')
parser.add_argument('--MC_count',
required=False, default=1, type=np.int,
help='Number of Monte Carlo noise realizations')
parser.add_argument('--fp',
required=False, default=None,
help='Pickle file containing a dictionary of detector '
'properties. The keys of this dict are the detector '
'names, and each value is also a dictionary with keys '
'"quat" (4 element ndarray), "fwhm" (float, arcmin), '
'"fknee" (float, Hz), "alpha" (float), and '
'"NET" (float).')
parser.add_argument('--focalplane_radius',
required=False, type=np.float,
help='Override focal plane radius [deg]')
parser.add_argument('--freq',
required=True,
help='Comma-separated list of frequencies with '
'identical focal planes')
parser.add_argument('--tidas',
required=False, default=None,
help='Output TIDAS export path')
parser.add_argument('--spt3g',
required=False, default=None,
help='Output SPT3G export path')
args = timing.add_arguments_and_parse(parser, timing.FILE(noquotes=True))
if len(args.freq.split(',')) != 1:
# Multi frequency run. We don't support multiple copies of
# scanned signal.
if args.input_map:
raise RuntimeError('Multiple frequencies are not supported when '
'scanning from a map')
if not args.skip_atmosphere and args.weather is None:
raise RuntimeError('Cannot simulate atmosphere without a TOAST '
'weather file')
if args.tidas is not None:
if not tt.tidas_available:
raise RuntimeError("TIDAS not found- cannot export")
if args.spt3g is not None:
if not tt.spt3g_available:
raise RuntimeError("SPT3G not found- cannot export")
if comm.comm_world.rank == 0:
print('\nAll parameters:')
print(args, flush=args.flush)
print('')
if args.groupsize:
comm = toast.Comm(groupsize=args.groupsize)
if comm.comm_world.rank == 0:
if not os.path.isdir(args.outdir):
try:
os.makedirs(args.outdir)
except FileExistsError:
pass
return args, comm
def name2id(name, maxval=2 ** 16):
""" Map a name into an index.
"""
value = 0
for c in name:
value += ord(c)
return value % maxval
def load_weather(args, comm, schedules):
""" Load TOAST weather file(s) and attach them to the schedules.
"""
if args.weather is None:
return
start = MPI.Wtime()
autotimer = timing.auto_timer()
if comm.comm_world.rank == 0:
weathers = []
weatherdict = {}
for fname in args.weather.split(','):
if fname not in weatherdict:
if not os.path.isfile(fname):
raise RuntimeError('No such weather file: {}'.format(fname))
start1 = MPI.Wtime()
weatherdict[fname] = Weather(fname)
stop1 = MPI.Wtime()
print('Load {}: {:.2f} seconds'.format(fname, stop1 - start1),
flush=args.flush)
weathers.append(weatherdict[fname])
else:
weathers = None
weathers = comm.comm_world.bcast(weathers)
if len(weathers) == 1 and len(schedules) > 1:
weathers *= len(schedules)
if len(weathers) != len(schedules):
raise RuntimeError(
'Number of weathers must equal number of schedules or be 1.')
for schedule, weather in zip(schedules, weathers):
schedule.append(weather)
stop = MPI.Wtime()
if comm.comm_world.rank == 0:
print('Loading weather {:.3f} s'.format(stop - start), flush=args.flush)
del autotimer
return
def load_schedule(args, comm):
""" Load the observing schedule(s).
"""
start = MPI.Wtime()
autotimer = timing.auto_timer()
schedules = []
if comm.comm_world.rank == 0:
for fn in args.schedule.split(','):
if not os.path.isfile(fn):
raise RuntimeError('No such schedule file: {}'.format(fn))
start1 = MPI.Wtime()
with open(fn, 'r') as f:
while True:
line = f.readline()
if line.startswith('#'):
continue
(site_name, telescope, site_lat, site_lon,
site_alt) = line.split()
site_alt = float(site_alt)
site = (site_name, telescope, site_lat, site_lon, site_alt)
break
all_ces = []
for line in f:
if line.startswith('#'):
continue
(start_date, start_time, stop_date, stop_time, mjdstart,
mjdstop, name, azmin, azmax, el, rs, sun_el1, sun_az1,
sun_el2, sun_az2, moon_el1, moon_az1, moon_el2, moon_az2,
moon_phase, scan, subscan) = line.split()
start_time = start_date + ' ' + start_time
stop_time = stop_date + ' ' + stop_time
# Define season as a calendar year. This can be
# changed later and could even be in the schedule file.
season = int(start_date.split('-')[0])
try:
start_time = dateutil.parser.parse(start_time
+ ' +0000')
stop_time = dateutil.parser.parse(stop_time + ' +0000')
except Exception:
start_time = dateutil.parser.parse(start_time)
stop_time = dateutil.parser.parse(stop_time)
start_timestamp = start_time.timestamp()
stop_timestamp = stop_time.timestamp()
all_ces.append([
start_timestamp, stop_timestamp, name, float(mjdstart),
int(scan), int(subscan), float(azmin), float(azmax),
float(el), season, start_date])
schedules.append([site, all_ces])
stop1 = MPI.Wtime()
print('Load {}: {:.2f} seconds'.format(fn, stop1 - start1),
flush=args.flush)
schedules = comm.comm_world.bcast(schedules)
stop = MPI.Wtime()
if comm.comm_world.rank == 0:
print('Loading schedule {:.3f} s'.format(stop - start),
flush=args.flush)
del autotimer
return schedules
def get_focalplane_radius(args, focalplane, rmin=1.0):
""" Find the furthest angular distance from the boresight
"""
if args.focalplane_radius:
return args.focalplane_radius
autotimer = timing.auto_timer()
cosangs = []
for det in focalplane:
quat = focalplane[det]['quat']
vec = qa.rotate(quat, ZAXIS)
cosangs.append(np.dot(ZAXIS, vec))
mincos = np.amin(cosangs)
maxdist = max(np.degrees(np.arccos(mincos)), rmin)
del autotimer
return maxdist * 1.001
def load_focalplanes(args, comm, schedules):
""" Attach a focalplane to each of the schedules.
"""
start = MPI.Wtime()
autotimer = timing.auto_timer()
# Load focalplane information
focalplanes = []
if comm.comm_world.rank == 0:
for fpfile in args.fp.split(','):
start1 = MPI.Wtime()
with open(fpfile, 'rb') as picklefile:
focalplane = pickle.load(picklefile)
stop1 = MPI.Wtime()
print('Load {}: {:.2f} seconds'.format(fpfile, stop1 - start1),
flush=args.flush)
focalplanes.append(focalplane)
start1 = stop1
focalplanes = comm.comm_world.bcast(focalplanes)
if len(focalplanes) == 1 and len(schedules) > 1:
focalplanes *= len(schedules)
if len(focalplanes) != len(schedules):
raise RuntimeError(
'Number of focalplanes must equal number of schedules or be 1.')
detweights = {}
for schedule, focalplane in zip(schedules, focalplanes):
schedule.append(focalplane)
for detname, det in focalplane.items():
net = det['NET']
detweight = 1.0 / (args.samplerate * net * net)
if detname in detweights and detweights[detname] != detweight:
raise RuntimeError(
'Detector weight for {} changes'.format(detname))
detweights[detname] = detweight
stop = MPI.Wtime()
if comm.comm_world.rank == 0:
print('Load focalplane(s): {:.2f} seconds'.format(stop - start),
flush=args.flush)
del autotimer
return detweights
def get_analytic_noise(args, focalplane):
""" Create a TOAST noise object.
Create a noise object from the 1/f noise parameters contained in the
focalplane database.
"""
autotimer = timing.auto_timer()
detectors = sorted(focalplane.keys())
fmin = {}
fknee = {}
alpha = {}
NET = {}
rates = {}
for d in detectors:
rates[d] = args.samplerate
fmin[d] = focalplane[d]['fmin']
fknee[d] = focalplane[d]['fknee']
alpha[d] = focalplane[d]['alpha']
NET[d] = focalplane[d]['NET']
del autotimer
return tt.AnalyticNoise(rate=rates, fmin=fmin, detectors=detectors,
fknee=fknee, alpha=alpha, NET=NET)
def get_breaks(comm, all_ces, nces, args):
""" List operational day limits in the list of CES:s.
"""
autotimer = timing.auto_timer()
breaks = []
if args.skip_daymaps:
return breaks
do_break = False
for i in range(nces - 1):
# If current and next CES are on different days, insert a break
tz = args.timezone / 24
start1 = all_ces[i][3] # MJD start
start2 = all_ces[i + 1][3] # MJD start
scan1 = all_ces[i][4]
scan2 = all_ces[i + 1][4]
if scan1 != scan2 and do_break:
breaks.append(nces + i + 1)
do_break = False
continue
day1 = int(start1 + tz)
day2 = int(start2 + tz)
if day1 != day2:
if scan1 == scan2:
# We want an entire CES, even if it crosses the day bound.
# Wait until the scan number changes.
do_break = True
else:
breaks.append(nces + i + 1)
nbreak = len(breaks)
if nbreak < comm.ngroups - 1:
if comm.comm_world.rank == 0:
print('WARNING: there are more process groups than observing days. '
'Will try distributing by observation.', flush=True)
breaks = []
for i in range(nces - 1):
scan1 = all_ces[i][4]
scan2 = all_ces[i + 1][4]
if scan1 != scan2:
breaks.append(nces + i + 1)
nbreak = len(breaks)
if nbreak != comm.ngroups - 1:
raise RuntimeError(
'Number of observing days ({}) does not match number of process '
'groups ({}).'.format(nbreak + 1, comm.ngroups))
del autotimer
return breaks
def create_observation(args, comm, all_ces_tot, ices, noise):
""" Create a TOAST observation.
Create an observation for the CES scan defined by all_ces_tot[ices].
"""
autotimer = timing.auto_timer()
ces, site, fp, fpradius, detquats, weather = all_ces_tot[ices]
(CES_start, CES_stop, CES_name, mjdstart, scan, subscan,
azmin, azmax, el, season, date) = ces
_, _, site_lat, site_lon, site_alt = site
totsamples = int((CES_stop - CES_start) * args.samplerate)
# create the TOD for this observation
try:
tod = tt.TODGround(
comm.comm_group, detquats, totsamples,
detranks=comm.comm_group.size, firsttime=CES_start,
rate=args.samplerate, site_lon=site_lon, site_lat=site_lat,
site_alt=site_alt, azmin=azmin, azmax=azmax, el=el,
scanrate=args.scanrate, scan_accel=args.scan_accel,
CES_start=None, CES_stop=None, sun_angle_min=args.sun_angle_min,
coord=args.coord, sampsizes=None)
except RuntimeError as e:
raise RuntimeError('Failed to create TOD for {}-{}-{}: "{}"'
''.format(CES_name, scan, subscan, e))
# Create the observation
site_name = site[0]
telescope_name = site[1]
site_id = name2id(site_name)
telescope_id = name2id(telescope_name)
obs = {}
obs['name'] = 'CES-{}-{}-{}-{}-{}'.format(site_name, telescope_name,
CES_name, scan, subscan)
obs['tod'] = tod
obs['baselines'] = None
obs['noise'] = noise
obs['id'] = int(mjdstart * 10000)
obs['intervals'] = tod.subscans
obs['site'] = site_name
obs['telescope'] = telescope_name
obs['site_id'] = site_id
obs['telescope_id'] = telescope_id
obs['fpradius'] = fpradius
obs['weather'] = weather
obs['start_time'] = CES_start
obs['altitude'] = site_alt
obs['season'] = season
obs['date'] = date
obs['MJD'] = mjdstart
obs['focalplane'] = fp
del autotimer
return obs
def create_observations(args, comm, schedules, mem_counter):
""" Create and distribute TOAST observations for every CES in schedules.
"""
start = MPI.Wtime()
autotimer = timing.auto_timer()
data = toast.Data(comm)
# Loop over the schedules, distributing each schedule evenly across
# the process groups. For now, we'll assume that each schedule has
# the same number of operational days and the number of process groups
# matches the number of operational days. Relaxing these constraints
# will cause the season break to occur on different process groups
# for different schedules and prevent splitting the communicator.
for schedule in schedules:
if args.weather is None:
site, all_ces, focalplane = schedule
weather = None
else:
site, all_ces, weather, focalplane = schedule
fpradius = get_focalplane_radius(args, focalplane)
# Focalplane information for this schedule
detectors = sorted(focalplane.keys())
detquats = {}
for d in detectors:
detquats[d] = focalplane[d]['quat']
# Noise model for this schedule
noise = get_analytic_noise(args, focalplane)
all_ces_tot = []
nces = len(all_ces)
for ces in all_ces:
all_ces_tot.append((ces, site, focalplane, fpradius,
detquats, weather))
breaks = get_breaks(comm, all_ces, nces, args)
groupdist = toast.distribute_uniform(nces, comm.ngroups, breaks=breaks)
group_firstobs = groupdist[comm.group][0]
group_numobs = groupdist[comm.group][1]
for ices in range(group_firstobs, group_firstobs + group_numobs):
obs = create_observation(args, comm, all_ces_tot, ices, noise)
data.obs.append(obs)
if args.skip_atmosphere:
for ob in data.obs:
tod = ob['tod']
tod.free_azel_quats()
if comm.comm_group.rank == 0:
print('Group # {:4} has {} observations.'.format(
comm.group, len(data.obs)), flush=args.flush)
if len(data.obs) == 0:
raise RuntimeError('Too many tasks. Every MPI task must '
'be assigned to at least one observation.')
mem_counter.exec(data)
comm.comm_world.barrier()
stop = MPI.Wtime()
if comm.comm_world.rank == 0:
print('Simulated scans in {:.2f} seconds'
''.format(stop - start), flush=args.flush)
# Split the data object for each telescope for separate mapmaking.
# We could also split by site.
if len(schedules) > 1:
telescope_data = data.split('telescope')
if len(telescope_data) == 1:
# Only one telescope available
telescope_data = []
else:
telescope_data = []
telescope_data.insert(0, ('all', data))
del autotimer
return data, telescope_data
def expand_pointing(args, comm, data, mem_counter):
""" Expand boresight pointing to every detector.
"""
start = MPI.Wtime()
autotimer = timing.auto_timer()
hwprpm = args.hwprpm
hwpstep = None
if args.hwpstep is not None:
hwpstep = float(args.hwpstep)
hwpsteptime = args.hwpsteptime
if comm.comm_world.rank == 0:
print('Expanding pointing', flush=args.flush)
pointing = tt.OpPointingHpix(
nside=args.nside, nest=True, mode='IQU',
hwprpm=hwprpm, hwpstep=hwpstep, hwpsteptime=hwpsteptime)
pointing.exec(data)
# Only purge the pointing if we are NOT going to export the
# data to a TIDAS volume
if (args.tidas is None) and (args.spt3g is None):
for ob in data.obs:
tod = ob['tod']
tod.free_radec_quats()
comm.comm_world.barrier()
stop = MPI.Wtime()
if comm.comm_world.rank == 0:
print('Pointing generation took {:.3f} s'.format(stop - start),
flush=args.flush)
mem_counter.exec(data)
del autotimer
return
def get_submaps(args, comm, data):
""" Get a list of locally hit pixels and submaps on every process.
"""
if not args.skip_bin or args.input_map:
autotimer = timing.auto_timer()
if comm.comm_world.rank == 0:
print('Scanning local pixels', flush=args.flush)
start = MPI.Wtime()
# Prepare for using distpixels objects
nside = args.nside
subnside = 16
if subnside > nside:
subnside = nside
subnpix = 12 * subnside * subnside
# get locally hit pixels
lc = tm.OpLocalPixels()
localpix = lc.exec(data)
if localpix is None:
raise RuntimeError(
'Process {} has no hit pixels. Perhaps there are fewer '
'detectors than processes in the group?'.format(
comm.comm_world.rank))
# find the locally hit submaps.
localsm = np.unique(np.floor_divide(localpix, subnpix))
comm.comm_world.barrier()
stop = MPI.Wtime()
elapsed = stop - start
if comm.comm_world.rank == 0:
print('Local submaps identified in {:.3f} s'.format(elapsed),
flush=args.flush)
else:
localpix, localsm = None, None
del autotimer
return localpix, localsm, subnpix
def add_sky_signal(data, totalname_freq, signalname):
""" Add previously simulated sky signal to the atmospheric noise.
"""
if signalname is not None:
autotimer = timing.auto_timer()
for obs in data.obs:
tod = obs['tod']
for det in tod.local_dets:
cachename_in = '{}_{}'.format(signalname, det)
cachename_out = '{}_{}'.format(totalname_freq, det)
ref_in = tod.cache.reference(cachename_in)
if tod.cache.exists(cachename_out):
ref_out = tod.cache.reference(cachename_out)
ref_out += ref_in
else:
ref_out = tod.cache.put(cachename_out, ref_in)
del ref_in, ref_out
del autotimer
return
def simulate_sky_signal(args, comm, data, mem_counter, schedules, subnpix,
localsm):
""" Use PySM to simulate smoothed sky signal.
"""
autotimer = timing.auto_timer()
# Convolve a signal TOD from PySM
start = MPI.Wtime()
signalname = 'signal'
op_sim_pysm = ttm.OpSimPySM(
comm=comm.comm_rank, out=signalname, pysm_model=args.input_pysm_model,
focalplanes=[s[3] for s in schedules], nside=args.nside,
subnpix=subnpix, localsm=localsm, apply_beam=args.apply_beam,
coord=args.coord)
op_sim_pysm.exec(data)
stop = MPI.Wtime()
if comm.comm_world.rank == 0:
print('PySM took {:.2f} seconds'.format(stop - start),
flush=args.flush)
mem_counter.exec(data)
del autotimer
return signalname
def scan_sky_signal(args, comm, data, mem_counter, localsm, subnpix):
""" Scan sky signal from a map.
"""
signalname = None
if args.input_map:
autotimer = timing.auto_timer()
if comm.comm_world.rank == 0:
print('Scanning input map', flush=args.flush)
start = MPI.Wtime()
npix = 12 * args.nside ** 2
# Scan the sky signal
if comm.comm_world.rank == 0 and not os.path.isfile(args.input_map):
raise RuntimeError(
'Input map does not exist: {}'.format(args.input_map))
distmap = tm.DistPixels(
comm=comm.comm_world, size=npix, nnz=3,
dtype=np.float32, submap=subnpix, local=localsm)
mem_counter._objects.append(distmap)
distmap.read_healpix_fits(args.input_map)
scansim = tt.OpSimScan(distmap=distmap, out='signal')
scansim.exec(data)
stop = MPI.Wtime()
if comm.comm_world.rank == 0:
print('Read and sampled input map: {:.2f} seconds'
''.format(stop - start), flush=args.flush)
signalname = 'signal'
mem_counter.exec(data)
del autotimer
return signalname
def setup_sigcopy(args):
""" Determine if an extra copy of the atmospheric signal is needed.
When we simulate multichroic focal planes, the frequency-independent
part of the atmospheric noise is simulated first and then the
frequency scaling is applied to a copy of the atmospheric noise.
"""
if len(args.freq.split(',')) == 1:
totalname = 'total'
totalname_freq = 'total'
else:
totalname = 'total'
totalname_freq = 'total_freq'
return totalname, totalname_freq
def setup_madam(args):
""" Create a Madam parameter dictionary.
Initialize the Madam parameters from the command line arguments.
"""
autotimer = timing.auto_timer()
pars = {}
cross = args.nside // 2
submap = 16
if submap > args.nside:
submap = args.nside
pars['temperature_only'] = False
pars['force_pol'] = True
pars['kfirst'] = not args.skip_destripe
pars['write_map'] = not args.skip_destripe
pars['write_binmap'] = not args.skip_bin
pars['write_matrix'] = not args.skip_hits
pars['write_wcov'] = not args.skip_hits
pars['write_hits'] = not args.skip_hits
pars['nside_cross'] = cross
pars['nside_submap'] = submap
if args.no_madam_allreduce:
pars['allreduce'] = False
else:
pars['allreduce'] = True
pars['reassign_submaps'] = True
pars['pixlim_cross'] = 1e-3
pars['pixmode_cross'] = 2
pars['pixlim_map'] = 1e-2
pars['pixmode_map'] = 2
# Instead of fixed detector weights, we'll want to use scaled noise
# PSD:s that include the atmospheric noise
pars['radiometers'] = True
pars['noise_weights_from_psd'] = True
if args.madampar is not None:
pat = re.compile(r'\s*(\S+)\s*=\s*(\S+(\s+\S+)*)\s*')
comment = re.compile(r'^#.*')
with open(args.madampar, 'r') as f:
for line in f:
if comment.match(line) is None:
result = pat.match(line)
if result is not None:
key, value = result.group(1), result.group(2)
pars[key] = value
pars['base_first'] = args.madam_baseline_length
pars['basis_order'] = args.madam_baseline_order
pars['nside_map'] = args.nside
if args.madam_noisefilter:
if args.madam_baseline_order != 0:
raise RuntimeError('Madam cannot build a noise filter when baseline'
'order is higher than zero.')
pars['kfilter'] = True
else:
pars['kfilter'] = False
pars['precond_width'] = args.madam_precond_width
pars['fsample'] = args.samplerate
pars['iter_max'] = args.madam_iter_max
pars['file_root'] = args.madam_prefix
del autotimer
return pars
def scale_atmosphere_by_frequency(args, comm, data, freq, totalname_freq, mc):
""" Scale atmospheric fluctuations by frequency.
Assume that cached signal under totalname_freq is pure atmosphere
and scale the absorption coefficient according to the frequency.
If the focalplane is included in the observation and defines
bandpasses for the detectors, the scaling is computed for each
detector separately.
"""
if args.skip_atmosphere:
return
autotimer = timing.auto_timer()
start = MPI.Wtime()
for obs in data.obs:
tod = obs['tod']
todcomm = tod.mpicomm
site_id = obs['site_id']
weather = obs['weather']
if 'focalplane' in obs:
focalplane = obs['focalplane']
else:
focalplane = None
start_time = obs['start_time']
weather.set(site_id, mc, start_time)
altitude = obs['altitude']
air_temperature = weather.air_temperature
surface_pressure = weather.surface_pressure
pwv = weather.pwv
# Use the entire processing group to sample the absorption
# coefficient as a function of frequency
freqmin = 0
freqmax = 2 * freq
nfreq = 1001
freqstep = (freqmax - freqmin) / (nfreq - 1)
nfreq_task = int(nfreq // todcomm.size) + 1
my_ifreq_min = nfreq_task * todcomm.rank
my_ifreq_max = min(nfreq, nfreq_task * (todcomm.rank + 1))
my_nfreq = my_ifreq_max - my_ifreq_min
if my_nfreq > 0:
my_freqs = freqmin + np.arange(my_ifreq_min,
my_ifreq_max) * freqstep
my_absorption = np.zeros(my_nfreq)
err = toast.ctoast.atm_get_absorption_coefficient_vec(
altitude, air_temperature, surface_pressure, pwv,
my_freqs[0], my_freqs[-1], my_nfreq, my_absorption)
if err != 0:
raise RuntimeError(
'Failed to get absorption coefficient vector')
else:
my_freqs = np.array([])
my_absorption = np.array([])
freqs = np.hstack(todcomm.allgather(my_freqs))
absorption = np.hstack(todcomm.allgather(my_absorption))
# loading = toast.ctoast.atm_get_atmospheric_loading(
# altitude, pwv, freq)
for det in tod.local_dets:
try:
# Use detector bandpass from the focalplane
center = focalplane[det]['bandcenter_ghz']
width = focalplane[det]['bandwidth_ghz']
except Exception:
# Use default values for the entire focalplane
center = freq
width = .2 * freq
nstep = 101
# Interpolate the absorption coefficient to do a top hat
# integral across the bandpass
det_freqs = np.linspace(center - width / 2, center + width / 2,
nstep)
absorption_det = np.mean(np.interp(det_freqs, freqs, absorption))
cachename = '{}_{}'.format(totalname_freq, det)
ref = tod.cache.reference(cachename)
ref *= absorption_det
del ref
comm.comm_world.barrier()
stop = MPI.Wtime()
if comm.comm_world.rank == 0:
print('Atmosphere scaling took {:.3f} s'.format(stop - start),
flush=args.flush)
del autotimer
return
def update_atmospheric_noise_weights(args, comm, data, freq, mc):
""" Update atmospheric noise weights.
Estimate the atmospheric noise level from weather parameters and
encode it as a noise_scale in the observation. Madam will apply
the noise_scale to the detector weights. This approach assumes
that the atmospheric noise dominates over detector noise. To be
more precise, we would have to add the squared noise weights but
we do not have their relative calibration.
"""
if args.weather and not args.skip_atmosphere:
autotimer = timing.auto_timer()
start = MPI.Wtime()
for obs in data.obs:
site_id = obs['site_id']
weather = obs['weather']
start_time = obs['start_time']
weather.set(site_id, mc, start_time)
altitude = obs['altitude']
absorption = toast.ctoast.atm_get_absorption_coefficient(
altitude, weather.air_temperature, weather.surface_pressure,
weather.pwv, freq)
obs['noise_scale'] = absorption * weather.air_temperature
comm.comm_world.barrier()
stop = MPI.Wtime()
if comm.comm_world.rank == 0:
print('Atmosphere weighting took {:.3f} s'.format(stop - start),
flush=args.flush)
del autotimer
else:
for obs in data.obs:
obs['noise_scale'] = 1.
return
def simulate_atmosphere(args, comm, data, mc, mem_counter,
totalname):
if not args.skip_atmosphere:
autotimer = timing.auto_timer()
if comm.comm_world.rank == 0:
print('Simulating atmosphere', flush=args.flush)
if args.atm_cache and not os.path.isdir(args.atm_cache):
try:
os.makedirs(args.atm_cache)
except FileExistsError:
pass
start = MPI.Wtime()
# Simulate the atmosphere signal
atm = tt.OpSimAtmosphere(
out=totalname, realization=mc,
lmin_center=args.atm_lmin_center,
lmin_sigma=args.atm_lmin_sigma,
lmax_center=args.atm_lmax_center, gain=args.atm_gain,
lmax_sigma=args.atm_lmax_sigma, zatm=args.atm_zatm,
zmax=args.atm_zmax, xstep=args.atm_xstep,
ystep=args.atm_ystep, zstep=args.atm_zstep,
nelem_sim_max=args.atm_nelem_sim_max,
verbosity=int(args.debug), gangsize=args.atm_gangsize,
z0_center=args.atm_z0_center, z0_sigma=args.atm_z0_sigma,
apply_flags=False, common_flag_mask=args.common_flag_mask,
cachedir=args.atm_cache, flush=args.flush,
wind_time=args.atm_wind_time)
atm.exec(data)
comm.comm_world.barrier()
stop = MPI.Wtime()
if comm.comm_world.rank == 0:
print('Atmosphere simulation took {:.3f} s'.format(stop - start),
flush=args.flush)
mem_counter.exec(data)
del autotimer
return
def copy_atmosphere(args, comm, data, mem_counter, totalname, totalname_freq):
""" Copy the atmospheric signal.
Make a copy of the atmosphere so we can scramble the gains and apply
frequency-dependent scaling.
"""
if totalname != totalname_freq:
autotimer = timing.auto_timer()
if comm.comm_world.rank == 0:
print('Copying atmosphere from {} to {}'.format(
totalname, totalname_freq), flush=args.flush)
cachecopy = tt.OpCacheCopy(totalname, totalname_freq, force=True)
cachecopy.exec(data)
mem_counter.exec(data)
del autotimer
return
def simulate_noise(args, comm, data, mc, mem_counter, totalname_freq):
if not args.skip_noise:
autotimer = timing.auto_timer()
if comm.comm_world.rank == 0:
print('Simulating noise', flush=args.flush)
start = MPI.Wtime()
nse = tt.OpSimNoise(out=totalname_freq, realization=mc)
nse.exec(data)
comm.comm_world.barrier()
stop = MPI.Wtime()
if comm.comm_world.rank == 0:
print('Noise simulation took {:.3f} s'.format(stop - start),
flush=args.flush)
mem_counter.exec(data)
del autotimer
return
def scramble_gains(args, comm, data, mc, mem_counter, totalname_freq):
if args.gain_sigma:
autotimer = timing.auto_timer()
if comm.comm_world.rank == 0:
print('Scrambling gains', flush=args.flush)
start = MPI.Wtime()
scrambler = tt.OpGainScrambler(
sigma=args.gain_sigma, name=totalname_freq, realization=mc)
scrambler.exec(data)
comm.comm_world.barrier()
stop = MPI.Wtime()
if comm.comm_world.rank == 0:
print('Gain scrambling took {:.3f} s'.format(stop - start),
flush=args.flush)
mem_counter.exec(data)
del autotimer
return
def setup_output(args, comm, mc, freq):
outpath = '{}/{:08}/{:03}'.format(args.outdir, mc, int(freq))
if comm.comm_world.rank == 0:
if not os.path.isdir(outpath):
try:
os.makedirs(outpath)
except FileExistsError:
pass
return outpath
def apply_polyfilter(args, comm, data, mem_counter, totalname_freq):
if args.polyorder:
autotimer = timing.auto_timer()
if comm.comm_world.rank == 0:
print('Polyfiltering signal', flush=args.flush)
start = MPI.Wtime()
polyfilter = tt.OpPolyFilter(
order=args.polyorder, name=totalname_freq,
common_flag_mask=args.common_flag_mask)
polyfilter.exec(data)
comm.comm_world.barrier()
stop = MPI.Wtime()
if comm.comm_world.rank == 0:
print('Polynomial filtering took {:.3f} s'.format(stop - start),
flush=args.flush)
mem_counter.exec(data)
del autotimer
return
def apply_groundfilter(args, comm, data, mem_counter, totalname_freq):
if args.wbin_ground:
autotimer = timing.auto_timer()
if comm.comm_world.rank == 0:
print('Ground filtering signal', flush=args.flush)
start = MPI.Wtime()
groundfilter = tt.OpGroundFilter(
wbin=args.wbin_ground, name=totalname_freq,
common_flag_mask=args.common_flag_mask)
groundfilter.exec(data)
comm.comm_world.barrier()
stop = MPI.Wtime()
if comm.comm_world.rank == 0:
print('Ground filtering took {:.3f} s'.format(stop - start),
flush=args.flush)
mem_counter.exec(data)
del autotimer
return
def output_tidas(args, comm, data, totalname):
if args.tidas is None:
return
autotimer = timing.auto_timer()
tidas_path = os.path.abspath(args.tidas)
comm.comm_world.Barrier()
if comm.comm_world.rank == 0:
print('Exporting data to a TIDAS volume at {}'.format(tidas_path),
flush=args.flush)
start = MPI.Wtime()
export = OpTidasExport(tidas_path, TODTidas, backend="hdf5",
use_intervals=True,
create_opts={"group_dets":"sim"},
ctor_opts={"group_dets":"sim"},
cache_name=totalname)
export.exec(data)
comm.comm_world.Barrier()
stop = MPI.Wtime()
if comm.comm_world.rank == 0:
print('Wrote simulated data to {}:{} in {:.2f} s'
''.format(tidas_path, "total",
stop - start), flush=args.flush)
del autotimer
return
def output_spt3g(args, comm, data, totalname):
if args.spt3g is None:
return
autotimer = timing.auto_timer()
spt3g_path = os.path.abspath(args.spt3g)
comm.comm_world.Barrier()
if comm.comm_world.rank == 0:
print('Exporting data to SPT3G directory tree at {}'.format(spt3g_path),
flush=args.flush)
start = MPI.Wtime()
export = Op3GExport(spt3g_path, TOD3G, use_intervals=True,
export_opts={"prefix" : "sim"},
cache_name=totalname)
export.exec(data)
comm.comm_world.Barrier()
stop = MPI.Wtime()
if comm.comm_world.rank == 0:
print('Wrote simulated data to {}:{} in {:.2f} s'
''.format(spt3g_path, "total",
stop - start), flush=args.flush)
del autotimer
return
def get_time_communicators(comm, data):
""" Split the world communicator by time.
"""
autotimer = timing.auto_timer()
time_comms = [('all', comm.comm_world)]
# A process will only have data for one season and one day. If more
# than one season is observed, we split the communicator to make
# season maps.
my_season = data.obs[0]['season']
seasons = np.array(comm.comm_world.allgather(my_season))
do_seasons = np.any(seasons != my_season)
if do_seasons:
season_comm = comm.comm_world.Split(my_season, comm.comm_world.rank)
time_comms.append((str(my_season), season_comm))
# Split the communicator to make daily maps. We could easily split
# by month as well
my_day = int(data.obs[0]['MJD'])
my_date = data.obs[0]['date']
days = np.array(comm.comm_world.allgather(my_day))
do_days = np.any(days != my_day)
if do_days:
day_comm = comm.comm_world.Split(my_day, comm.comm_world.rank)
time_comms.append((my_date, day_comm))
del autotimer
return time_comms
def apply_madam(args, comm, time_comms, data, telescope_data, freq, madampars,
mem_counter, mc, firstmc, outpath, detweights, totalname_madam,
first_call=True, extra_prefix=None):
""" Use libmadam to bin and optionally destripe data.
Bin and optionally destripe all conceivable subsets of the data.
"""
if comm.comm_world.rank == 0:
print('Making maps', flush=args.flush)
start = MPI.Wtime()
autotimer = timing.auto_timer()
pars = copy.deepcopy(madampars)
pars['path_output'] = outpath
file_root = pars['file_root']
if len(file_root) > 0 and not file_root.endswith('_'):
file_root += '_'
if extra_prefix is not None:
file_root += '{}_'.format(extra_prefix)
file_root += '{:03}'.format(int(freq))
if first_call:
if mc != firstmc:
pars['write_matrix'] = False
pars['write_wcov'] = False
pars['write_hits'] = False
else:
pars['kfirst'] = False
pars['write_map'] = False
pars['write_binmap'] = True
pars['write_matrix'] = False
pars['write_wcov'] = False
pars['write_hits'] = False
outputs = [pars['write_map'], pars['write_binmap'], pars['write_hits'],
pars['write_wcov'], pars['write_matrix']]
if not np.any(outputs):
if comm.comm_world.rank == 0:
print('No Madam outputs requested. Skipping.', flush=args.flush)
return
if args.madam_noisefilter or not pars['kfirst']:
madam_intervals = None
else:
madam_intervals = 'intervals'
madam = tm.OpMadam(
params=pars, detweights=detweights, name=totalname_madam,
common_flag_mask=args.common_flag_mask, purge_tod=False,
intervals=madam_intervals, conserve_memory=args.conserve_memory)
if 'info' in madam.params:
info = madam.params['info']
else:
info = 3
for time_name, time_comm in time_comms:
for tele_name, tele_data in telescope_data:
if len(time_name.split('-')) == 3:
# Special rules for daily maps
if args.skip_daymaps:
continue
if ((len(telescope_data) > 1) and (tele_name == 'all')):
# Skip daily maps over multiple telescopes
continue
if first_call:
# Do not destripe daily maps
kfirst_save = pars['kfirst']
write_map_save = pars['write_map']
write_binmap_save = pars['write_binmap']
pars['kfirst'] = False
pars['write_map'] = False
pars['write_binmap'] = True
start1 = MPI.Wtime()
madam.params['file_root'] = '{}_telescope_{}_time_{}'.format(
file_root, tele_name, time_name)
if time_comm == comm.comm_world:
madam.params['info'] = info
else:
# Cannot have verbose output from concurrent mapmaking
madam.params['info'] = 0
if time_comm.rank == 0:
print('Mapping {}'.format(madam.params['file_root']),
flush=args.flush)
madam.exec(tele_data, time_comm)
time_comm.barrier()
stop1 = MPI.Wtime()
if time_comm.rank == 0:
print('Mapping {} took {:.3f} s'.format(
madam.params['file_root'], stop1 - start1),
flush=args.flush)
if len(time_name.split('-')) == 3 and first_call:
# Restore destriping parameters
pars['kfirst'] = kfirst_save
pars['write_map'] = write_map_save
pars['write_binmap'] = write_binmap_save
comm.comm_world.barrier()
stop = MPI.Wtime()
if comm.comm_world.rank == 0:
print('Madam took {:.3f} s'.format(stop - start), flush=args.flush)
mem_counter.exec(data)
del autotimer
return
def main():
# This is the 2-level toast communicator. By default,
# there is just one group which spans MPI_COMM_WORLD.
comm = toast.Comm()
if comm.comm_world.rank == 0:
print('Running with {} processes at {}'.format(
comm.comm_world.size, str(datetime.now())), flush=True)
global_timer = timing.simple_timer("Total time")
global_timer.start()
args, comm = parse_arguments(comm)
autotimer = timing.auto_timer("@{}".format(timing.FILE()))
# Initialize madam parameters
madampars = setup_madam(args)
# Load and broadcast the schedule file
schedules = load_schedule(args, comm)
# Load the weather and append to schedules
load_weather(args, comm, schedules)
# load or simulate the focalplane
detweights = load_focalplanes(args, comm, schedules)
# Create the TOAST data object to match the schedule. This will
# include simulating the boresight pointing.
mem_counter = tt.OpMemoryCounter()
data, telescope_data = create_observations(args, comm, schedules,
mem_counter)
# Split the communicator for day and season mapmaking
time_comms = get_time_communicators(comm, data)
# Expand boresight quaternions into detector pointing weights and
# pixel numbers
expand_pointing(args, comm, data, mem_counter)
# Prepare auxiliary information for distributed map objects
_, localsm, subnpix = get_submaps(args, comm, data)
if args.input_pysm_model:
signalname = simulate_sky_signal(args, comm, data, mem_counter,
schedules, subnpix, localsm)
else:
signalname = scan_sky_signal(args, comm, data, mem_counter, localsm,
subnpix)
# Set up objects to take copies of the TOD at appropriate times
totalname, totalname_freq = setup_sigcopy(args)
# Loop over Monte Carlos
firstmc = int(args.MC_start)
nmc = int(args.MC_count)
freqs = [float(freq) for freq in args.freq.split(',')]
nfreq = len(freqs)
for mc in range(firstmc, firstmc + nmc):
simulate_atmosphere(args, comm, data, mc, mem_counter, totalname)
# Loop over frequencies with identical focal planes and identical
# atmospheric noise.
for ifreq, freq in enumerate(freqs):
if comm.comm_world.rank == 0:
print('Processing frequency {}GHz {} / {}, MC = {}'
''.format(freq, ifreq + 1, nfreq, mc), flush=args.flush)
copy_atmosphere(args, comm, data, mem_counter, totalname,
totalname_freq)
scale_atmosphere_by_frequency(args, comm, data, freq,
totalname_freq, mc)
update_atmospheric_noise_weights(args, comm, data, freq, mc)
add_sky_signal(data, totalname_freq, signalname)
mcoffset = ifreq * 1000000
simulate_noise(args, comm, data, mc + mcoffset, mem_counter,
totalname_freq)
scramble_gains(args, comm, data, mc + mcoffset, mem_counter,
totalname_freq)
if (mc == firstmc) and (ifreq == 0):
# For the first realization and frequency, optionally
# export the timestream data.
output_tidas(args, comm, data, totalname)
output_spt3g(args, comm, data, totalname)
outpath = setup_output(args, comm, mc, freq)
# Bin and destripe maps
apply_madam(args, comm, time_comms, data, telescope_data, freq,
madampars, mem_counter, mc + mcoffset, firstmc, outpath,
detweights, totalname_freq,
first_call=True)
if args.polyorder or args.wbin_ground:
# Filter signal
apply_polyfilter(args, comm, data, mem_counter, totalname_freq)
apply_groundfilter(args, comm, data, mem_counter,
totalname_freq)
# Bin maps
apply_madam(args, comm, time_comms, data, telescope_data, freq,
madampars, mem_counter, mc + mcoffset, firstmc,
outpath, detweights, totalname_freq,
first_call=False, extra_prefix='filtered')
mem_counter.exec(data)
comm.comm_world.barrier()
global_timer.stop()
if comm.comm_world.rank == 0:
global_timer.report()
del autotimer
return
if __name__ == '__main__':
try:
main()
tman = timing.timing_manager()
tman.report()
except Exception as e:
print('Exception occurred: "{}"'.format(e), flush=True)
if MPI.COMM_WORLD.size == 1:
raise
exc_type, exc_value, exc_traceback = sys.exc_info()
print('*** print_tb:')
traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
print('*** print_exception:')
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=5, file=sys.stdout)
print('*** print_exc:')
traceback.print_exc()
print('*** format_exc, first and last line:')
formatted_lines = traceback.format_exc().splitlines()
print(formatted_lines[0])
print(formatted_lines[-1])
print('*** format_exception:')
print(repr(traceback.format_exception(exc_type, exc_value,
exc_traceback)))
print('*** extract_tb:')
print(repr(traceback.extract_tb(exc_traceback)))
print('*** format_tb:')
print(repr(traceback.format_tb(exc_traceback)))
print('*** tb_lineno:', exc_traceback.tb_lineno, flush=True)
toast.raise_error(6) # typical error code for SIGABRT
MPI.COMM_WORLD.Abort(6)
finalize()
|
tskisner/pytoast
|
pipelines/toast_ground_sim.py
|
Python
|
bsd-2-clause
| 59,891
|
[
"Gaussian"
] |
c35b95d58cc03a8cc3bc330691e52345c82337c615479dae36f271999685d7e7
|
#!/usr/bin/env python
# Script to convert Tinker PDB atom namings to Amber
# Note this won't change coordinates/atom ordering. To
# get atom ordering consistent with an Amber prmtop, the
# resulting PDB should be read into Leap & re-saved.
# Usage: tinker_pdb_to_amber.py input_pdb.pdb output_pdb.pdb
import mdtraj as md
import sys
inpdb = sys.argv[1]
outpdb = sys.argv[2]
p = md.load(inpdb)
# Rename water H atoms
for r in p.top.residues:
if r.name =='HOH':
r.atom(1).name = 'H1'
r.atom(2).name = 'H2'
# Rename N-term H atoms
for c in p.top.chains:
for a in c.residue(0).atoms:
if a.name == 'H':
a.name = 'H1'
# Rename His to Hid/Hie/Hip
for r in p.top.residues:
if r.name =='HIS':
try:
r.atom('HD1')
r.atom('HE2')
r.name = 'HIP'
continue
except KeyError:
pass
try:
r.atom('HD1')
r.name = 'HID'
continue
except KeyError:
pass
try:
r.atom('HE2')
r.name = 'HIE'
continue
except KeyError:
print "Residue %s should be a histidine but doesn't have HD1 or HE2" % r
p.save(outpdb)
|
rtb1c13/scripts
|
General/tinker_pdb_to_amber.py
|
Python
|
gpl-2.0
| 1,237
|
[
"Amber",
"MDTraj",
"TINKER"
] |
8b07ebf39d7f9ca48bdb2de7ff2fcb75e09675fd12b9b6ae160d9d039b339f5d
|
"""This module defines an ASE interface to ABINIT.
http://www.abinit.org/
"""
import os
from glob import glob
from os.path import join, isfile, islink
import numpy as np
from ase.data import atomic_numbers
from ase.units import Bohr, Hartree, fs
from ase.data import chemical_symbols
from ase.io.abinit import read_abinit
from ase.calculators.calculator import FileIOCalculator, Parameters, kpts2mp, \
ReadError
keys_with_units = {
'toldfe': 'eV',
'tsmear': 'eV',
'paoenergyshift': 'eV',
'zmunitslength': 'Bohr',
'zmunitsangle': 'rad',
'zmforcetollength': 'eV/Ang',
'zmforcetolangle': 'eV/rad',
'zmmaxdispllength': 'Ang',
'zmmaxdisplangle': 'rad',
'ecut': 'eV',
'pawecutdg': 'eV',
'dmenergytolerance': 'eV',
'electronictemperature': 'eV',
'oneta': 'eV',
'onetaalpha': 'eV',
'onetabeta': 'eV',
'onrclwf': 'Ang',
'onchemicalpotentialrc': 'Ang',
'onchemicalpotentialtemperature': 'eV',
'mdmaxcgdispl': 'Ang',
'mdmaxforcetol': 'eV/Ang',
'mdmaxstresstol': 'eV/Ang**3',
'mdlengthtimestep': 'fs',
'mdinitialtemperature': 'eV',
'mdtargettemperature': 'eV',
'mdtargetpressure': 'eV/Ang**3',
'mdnosemass': 'eV*fs**2',
'mdparrinellorahmanmass': 'eV*fs**2',
'mdtaurelax': 'fs',
'mdbulkmodulus': 'eV/Ang**3',
'mdfcdispl': 'Ang',
'warningminimumatomicdistance': 'Ang',
'rcspatial': 'Ang',
'kgridcutoff': 'Ang',
'latticeconstant': 'Ang'}
class Abinit(FileIOCalculator):
"""Class for doing ABINIT calculations.
The default parameters are very close to those that the ABINIT
Fortran code would use. These are the exceptions::
calc = Abinit(label='abinit', xc='LDA', ecut=400, toldfe=1e-5)
"""
implemented_properties = ['energy', 'forces', 'stress', 'magmom']
command = 'abinis < PREFIX.files > PREFIX.log'
default_parameters = dict(
xc='LDA',
smearing=None,
kpts=None,
charge=0.0,
raw=None,
pps='fhi')
def __init__(self, restart=None, ignore_bad_restart_file=False,
label='abinit', atoms=None, scratch=None, **kwargs):
"""Construct ABINIT-calculator object.
Parameters
==========
label: str
Prefix to use for filenames (label.in, label.txt, ...).
Default is 'abinit'.
Examples
========
Use default values:
>>> h = Atoms('H', calculator=Abinit(ecut=200, toldfe=0.001))
>>> h.center(vacuum=3.0)
>>> e = h.get_potential_energy()
"""
self.scratch = scratch
self.species = None
self.ppp_list = None
FileIOCalculator.__init__(self, restart, ignore_bad_restart_file,
label, atoms, **kwargs)
def check_state(self, atoms):
system_changes = FileIOCalculator.check_state(self, atoms)
# Ignore boundary conditions:
if 'pbc' in system_changes:
system_changes.remove('pbc')
return system_changes
def set(self, **kwargs):
changed_parameters = FileIOCalculator.set(self, **kwargs)
if changed_parameters:
self.reset()
def write_input(self, atoms, properties=None, system_changes=None):
"""Write input parameters to files-file."""
FileIOCalculator.write_input(self, atoms, properties, system_changes)
if ('numbers' in system_changes or
'initial_magmoms' in system_changes):
self.initialize(atoms)
fh = open(self.label + '.files', 'w')
fh.write('%s\n' % (self.prefix + '.in')) # input
fh.write('%s\n' % (self.prefix + '.txt')) # output
fh.write('%s\n' % (self.prefix + 'i')) # input
fh.write('%s\n' % (self.prefix + 'o')) # output
# XXX:
# scratch files
#scratch = self.scratch
#if scratch is None:
# scratch = dir
#if not os.path.exists(scratch):
# os.makedirs(scratch)
#fh.write('%s\n' % (os.path.join(scratch, prefix + '.abinit')))
fh.write('%s\n' % (self.prefix + '.abinit'))
# Provide the psp files
for ppp in self.ppp_list:
fh.write('%s\n' % (ppp)) # psp file path
fh.close()
# Abinit will write to label.txtA if label.txt already exists,
# so we remove it if it's there:
filename = self.label + '.txt'
if os.path.isfile(filename):
os.remove(filename)
param = self.parameters
param.write(self.label + '.ase')
fh = open(self.label + '.in', 'w')
inp = {}
inp.update(param)
for key in ['xc', 'smearing', 'kpts', 'pps', 'raw']:
del inp[key]
smearing = param.get('smearing')
if 'tsmear' in param or 'occopt' in param:
assert smearing is None
if smearing is not None:
inp['occopt'] = {'fermi-dirac': 3,
'gaussian': 7}[smearing[0].lower()]
inp['tsmear'] = smearing[1]
inp['natom'] = len(atoms)
if 'nbands' in param:
inp['nband'] = param.nbands
del inp['nbands']
if 'ixc' not in param:
inp['ixc'] = {'LDA': 7,
'PBE': 11,
'revPBE': 14,
'RPBE': 15,
'WC': 23}[param.xc]
magmoms = atoms.get_initial_magnetic_moments()
if magmoms.any():
inp['nsppol'] = 2
fh.write('spinat\n')
for n, M in enumerate(magmoms):
fh.write('%.14f %.14f %.14f\n' % (0, 0, M))
else:
inp['nsppol'] = 1
for key in sorted(inp.keys()):
value = inp[key]
unit = keys_with_units.get(key)
if unit is None:
fh.write('%s %s\n' % (key, value))
else:
if 'fs**2' in unit:
value /= fs**2
elif 'fs' in unit:
value /= fs
fh.write('%s %e %s\n' % (key, value, unit))
if param.raw is not None:
for line in param.raw:
if isinstance(line, tuple):
fh.write(' '.join(['%s' % x for x in line]) + '\n')
else:
fh.write('%s\n' % line)
fh.write('#Definition of the unit cell\n')
fh.write('acell\n')
fh.write('%.14f %.14f %.14f Angstrom\n' % (1.0, 1.0, 1.0))
fh.write('rprim\n')
for v in atoms.cell:
fh.write('%.14f %.14f %.14f\n' % tuple(v))
fh.write('chkprim 0 # Allow non-primitive cells\n')
fh.write('#Definition of the atom types\n')
fh.write('ntypat %d\n' % (len(self.species)))
fh.write('znucl')
for n, Z in enumerate(self.species):
fh.write(' %d' % (Z))
fh.write('\n')
fh.write('#Enumerate different atomic species\n')
fh.write('typat')
fh.write('\n')
self.types = []
for Z in atoms.numbers:
for n, Zs in enumerate(self.species):
if Z == Zs:
self.types.append(n + 1)
n_entries_int = 20 # integer entries per line
for n, type in enumerate(self.types):
fh.write(' %d' % (type))
if n > 1 and ((n % n_entries_int) == 1):
fh.write('\n')
fh.write('\n')
fh.write('#Definition of the atoms\n')
fh.write('xangst\n')
for pos in atoms.positions:
fh.write('%.14f %.14f %.14f\n' % tuple(pos))
if 'kptopt' not in param:
mp = kpts2mp(atoms, param.kpts)
fh.write('kptopt 1\n')
fh.write('ngkpt %d %d %d\n' % tuple(mp))
fh.write('nshiftk 1\n')
fh.write('shiftk\n')
fh.write('%.1f %.1f %.1f\n' % tuple((np.array(mp) + 1) % 2 * 0.5))
fh.write('chkexit 1 # abinit.exit file in the running directory terminates after the current SCF\n')
fh.close()
def read(self, label):
"""Read results from ABINIT's text-output file."""
FileIOCalculator.read(self, label)
filename = self.label + '.txt'
if not os.path.isfile(filename):
raise ReadError
self.atoms = read_abinit(self.label + '.in')
self.parameters = Parameters.read(self.label + '.ase')
self.initialize(self.atoms)
self.read_results()
def read_results(self):
filename = self.label + '.txt'
text = open(filename).read().lower()
if ('error' in text or
'was not enough scf cycles to converge' in text):
raise ReadError
for line in iter(text.split('\n')):
if line.rfind('natom ') > -1:
natoms = int(line.split()[-1])
lines = iter(text.split('\n'))
# Stress:
# Printed in the output in the following format [Hartree/Bohr^3]:
# sigma(1 1)= 4.02063464E-04 sigma(3 2)= 0.00000000E+00
# sigma(2 2)= 4.02063464E-04 sigma(3 1)= 0.00000000E+00
# sigma(3 3)= 4.02063464E-04 sigma(2 1)= 0.00000000E+00
for line in lines:
if line.rfind(
'cartesian components of stress tensor (hartree/bohr^3)') > -1:
stress = np.empty(6)
for i in range(3):
entries = lines.next().split()
stress[i] = float(entries[2])
stress[i + 3] = float(entries[5])
self.results['stress'] = stress * Hartree / Bohr**3
break
else:
raise RuntimeError
# Energy [Hartree]:
# Warning: Etotal could mean both electronic energy and free energy!
etotal = None
efree = None
if 'PAW method is used'.lower() in text: # read DC energy according to M. Torrent
for line in iter(text.split('\n')):
if line.rfind('>>>>> internal e=') > -1:
etotal = float(line.split('=')[-1])*Hartree # second occurence!
for line in iter(text.split('\n')):
if line.rfind('>>>> etotal (dc)=') > -1:
efree = float(line.split('=')[-1])*Hartree
else:
for line in iter(text.split('\n')):
if line.rfind('>>>>> internal e=') > -1:
etotal = float(line.split('=')[-1])*Hartree # first occurence!
break
for line in iter(text.split('\n')):
if line.rfind('>>>>>>>>> etotal=') > -1:
efree = float(line.split('=')[-1])*Hartree
if efree is None:
raise RuntimeError('Total energy not found')
if etotal is None:
etotal = efree
# Energy extrapolated to zero Kelvin:
self.results['energy'] = (etotal + efree) / 2
self.results['free_energy'] = efree
# Forces:
for line in lines:
if line.rfind('cartesian forces (ev/angstrom) at end:') > -1:
forces = []
for i in range(natoms):
forces.append(np.array(
[float(f) for f in lines.next().split()[1:]]))
self.results['forces'] = np.array(forces)
break
else:
raise RuntimeError
#
self.width = self.read_electronic_temperature()
self.nband = self.read_number_of_bands()
self.niter = self.read_number_of_iterations()
self.nelect = self.read_number_of_electrons()
self.results['magmom'] = self.read_magnetic_moment()
def initialize(self, atoms):
numbers = atoms.get_atomic_numbers().copy()
self.species = []
for a, Z in enumerate(numbers):
if Z not in self.species:
self.species.append(Z)
self.spinpol = atoms.get_initial_magnetic_moments().any()
if 'ABINIT_PP_PATH' in os.environ:
pppaths = os.environ['ABINIT_PP_PATH'].split(':')
else:
pppaths = []
self.ppp_list = []
if self.parameters.xc != 'LDA':
xcname = 'GGA'
else:
xcname = 'LDA'
pps = self.parameters.pps
if pps not in ['fhi', 'hgh', 'hgh.sc', 'hgh.k', 'tm', 'paw']:
raise ValueError('Unexpected PP identifier %s' % pps)
for Z in self.species:
symbol = chemical_symbols[abs(Z)]
number = atomic_numbers[symbol]
if pps == 'fhi':
name = '%02d-%s.%s.fhi' % (number, symbol, xcname)
elif pps in ['paw']:
hghtemplate = '%s-%s-%s.paw' # E.g. "H-GGA-hard-uspp.paw"
name = hghtemplate % (symbol, xcname, '*')
elif pps in ['hgh.k']:
hghtemplate = '%s-q%s.hgh.k' # E.g. "Co-q17.hgh.k"
name = hghtemplate % (symbol, '*')
elif pps in ['tm']:
hghtemplate = '%d%s%s.pspnc' # E.g. "44ru.pspnc"
name = hghtemplate % (number, symbol.lower(), '*')
elif pps in ['hgh', 'hgh.sc']:
hghtemplate = '%d%s.%s.hgh' # E.g. "42mo.6.hgh"
# There might be multiple files with different valence
# electron counts, so we must choose between
# the ordinary and the semicore versions for some elements.
#
# Therefore we first use glob to get all relevant files,
# then pick the correct one afterwards.
name = hghtemplate % (number, symbol.lower(), '*')
found = False
for path in pppaths:
if (pps.startswith('paw') or
pps.startswith('hgh') or
pps.startswith('tm')):
filenames = glob(join(path, name))
if not filenames:
continue
assert len(filenames) in [0, 1, 2]
if pps == 'paw':
selector = max # Semicore or hard
# warning: see download.sh in
# abinit-pseudopotentials*tar.gz for additional
# information!
S = selector(
[str(os.path.split(name)[1].split('-')[2][:-4])
for name in filenames])
name = hghtemplate % (symbol, xcname, S)
elif pps == 'hgh':
selector = min # Lowest valence electron count
Z = selector([int(os.path.split(name)[1].split('.')[1])
for name in filenames])
name = hghtemplate % (number, symbol.lower(), str(Z))
elif pps == 'hgh.k':
selector = max # Semicore - highest electron count
Z = selector(
[int(os.path.split(name)[1].split('-')[1][:-6][1:])
for name in filenames])
name = hghtemplate % (symbol, Z)
elif pps == 'tm':
selector = max # Semicore - highest electron count
# currently only one version of psp per atom
name = hghtemplate % (number, symbol.lower(), '')
else:
assert pps == 'hgh.sc'
selector = max # Semicore - highest electron count
Z = selector([int(os.path.split(name)[1].split('.')[1])
for name in filenames])
name = hghtemplate % (number, symbol.lower(), str(Z))
filename = join(path, name)
if isfile(filename) or islink(filename):
found = True
self.ppp_list.append(filename)
break
if not found:
raise RuntimeError('No pseudopotential for %s!' % symbol)
def get_number_of_iterations(self):
return self.niter
def read_number_of_iterations(self):
niter = None
for line in open(self.label + '.txt'):
if line.find(' At SCF step') != -1: # find the last iteration number
niter = int(line.split(',')[0].split()[-1].strip())
return niter
def get_electronic_temperature(self):
return self.width * Hartree
def read_electronic_temperature(self):
width = None
# only in log file!
for line in open(self.label + '.log'): # find last one
if line.find('tsmear') != -1:
width = float(line.split()[1].strip())
return width
def get_number_of_electrons(self):
return self.nelect
def read_number_of_electrons(self):
nelect = None
# only in log file!
for line in open(self.label + '.log'): # find last one
if line.find('with nelect') != -1:
nelect = float(line.split('=')[1].strip())
return nelect
def get_number_of_bands(self):
return self.nband
def read_number_of_bands(self):
nband = None
for line in open(self.label + '.txt'): # find last one
if line.find(' nband') != -1: # nband, or nband1, nband*
nband = int(line.split()[-1].strip())
return nband
def get_kpts_info(self, kpt=0, spin=0, mode='eigenvalues'):
return self.read_kpts_info(kpt, spin, mode)
def get_k_point_weights(self):
return self.get_kpts_info(kpt=0, spin=0, mode='k_point_weights')
def get_bz_k_points(self):
raise NotImplementedError
def get_ibz_k_points(self):
return self.get_kpts_info(kpt=0, spin=0, mode='ibz_k_points')
def get_spin_polarized(self):
return self.spinpol
def get_number_of_spins(self):
return 1 + int(self.spinpol)
def read_magnetic_moment(self):
magmom = None
if not self.get_spin_polarized():
magmom = 0.0
else: # only for spinpolarized system Magnetisation is printed
for line in open(self.label + '.txt'):
if line.find('Magnetisation') != -1: # last one
magmom = float(line.split('=')[-1].strip())
return magmom
def get_fermi_level(self):
return self.read_fermi()
def get_eigenvalues(self, kpt=0, spin=0):
return self.get_kpts_info(kpt, spin, 'eigenvalues')
def get_occupations(self, kpt=0, spin=0):
return self.get_kpts_info(kpt, spin, 'occupations')
def read_fermi(self):
"""Method that reads Fermi energy in Hartree from the output file
and returns it in eV"""
E_f=None
filename = self.label + '.txt'
text = open(filename).read().lower()
assert 'error' not in text
for line in iter(text.split('\n')):
if line.rfind('fermi (or homo) energy (hartree) =') > -1:
E_f = float(line.split('=')[1].strip().split()[0])
return E_f*Hartree
def read_kpts_info(self, kpt=0, spin=0, mode='eigenvalues'):
""" Returns list of last eigenvalues, occupations, kpts weights, or
kpts coordinates for given kpt and spin.
Due to the way of reading output the spins are exchanged in spin-polarized case. """
# output may look like this (or without occupation entries); 8 entries per line:
#
# Eigenvalues (hartree) for nkpt= 20 k points:
# kpt# 1, nband= 3, wtk= 0.01563, kpt= 0.0625 0.0625 0.0625 (reduced coord)
# -0.09911 0.15393 0.15393
# occupation numbers for kpt# 1
# 2.00000 0.00000 0.00000
# kpt# 2, nband= 3, wtk= 0.04688, kpt= 0.1875 0.0625 0.0625 (reduced coord)
# ...
#
assert mode in ['eigenvalues', 'occupations', 'ibz_k_points',
'k_point_weights'], mode
if self.get_spin_polarized():
spin = {0: 1, 1: 0}[spin]
if spin == 0:
spinname = ''
else:
spinname = 'SPIN UP'.lower()
# number of lines of eigenvalues/occupations for a kpt
nband = self.get_number_of_bands()
n_entries_float = 8 # float entries per line
n_entry_lines = max(1, int((nband - 0.1) / n_entries_float) + 1)
filename = self.label + '.txt'
text = open(filename).read().lower()
assert 'error' not in text
lines = text.split('\n')
text_list = []
# find the begining line of last eigenvalues
contains_eigenvalues = 0
for n, line in enumerate(lines):
if spin == 0:
if line.rfind('eigenvalues (hartree) for nkpt') > -1:
#if line.rfind('eigenvalues ( ev ) for nkpt') > -1: #MDTMP
contains_eigenvalues = n
else:
if (line.rfind('eigenvalues (hartree) for nkpt') > -1 and
line.rfind(spinname) > -1): # find the last 'SPIN UP'
contains_eigenvalues = n
# find the end line of eigenvalues starting from contains_eigenvalues
text_list = [lines[contains_eigenvalues]]
for line in lines[contains_eigenvalues + 1:]:
text_list.append(line)
# find a blank line or eigenvalues of second spin
if (not line.strip() or
line.rfind('eigenvalues (hartree) for nkpt') > -1):
break
# remove last (blank) line
text_list = text_list[:-1]
assert contains_eigenvalues, 'No eigenvalues found in the output'
n_kpts = int(text_list[0].split('nkpt=')[1].strip().split()[0])
# get rid of the "eigenvalues line"
text_list = text_list[1:]
# join text eigenvalues description with eigenvalues
# or occupation numbers for kpt# with occupations
contains_occupations = False
for line in text_list:
if line.rfind('occupation numbers') > -1:
contains_occupations = True
break
if mode == 'occupations':
assert contains_occupations, 'No occupations found in the output'
if contains_occupations:
range_kpts = 2*n_kpts
else:
range_kpts = n_kpts
values_list = []
offset = 0
for kpt_entry in range(range_kpts):
full_line = ''
for entry_line in range(n_entry_lines+1):
full_line = full_line+str(text_list[offset+entry_line])
first_line = text_list[offset]
if mode == 'occupations':
if first_line.rfind('occupation numbers') > -1:
# extract numbers
full_line = [float(v) for v in full_line.split('#')[1].strip().split()[1:]]
values_list.append(full_line)
elif mode in ['eigenvalues', 'ibz_k_points', 'k_point_weights']:
if first_line.rfind('reduced coord') > -1:
# extract numbers
if mode == 'eigenvalues':
full_line = [Hartree*float(v) for v in full_line.split(')')[1].strip().split()[:]]
#full_line = [float(v) for v in full_line.split(')')[1].strip().split()[:]] #MDTMP
elif mode == 'ibz_k_points':
full_line = [float(v) for v in full_line.split('kpt=')[1].strip().split('(')[0].split()]
else:
full_line = float(full_line.split('wtk=')[1].strip().split(',')[0].split()[0])
values_list.append(full_line)
offset = offset+n_entry_lines+1
if mode in ['occupations', 'eigenvalues']:
return np.array(values_list[kpt])
else:
return np.array(values_list)
|
suttond/MODOI
|
ase/calculators/abinit.py
|
Python
|
lgpl-3.0
| 24,084
|
[
"ABINIT",
"ASE",
"DIRAC",
"Gaussian"
] |
67d622c2a4b809e37ac614b268f001d7ee57fb2ee6a4707497948906b2253297
|
"""K-means clustering"""
# Authors: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Thomas Rueckstiess <ruecksti@in.tum.de>
# James Bergstra <james.bergstra@umontreal.ca>
# Jan Schlueter <scikit-learn@jan-schlueter.de>
# Nelle Varoquaux
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# License: BSD
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, ClusterMixin, TransformerMixin
from ..metrics.pairwise import euclidean_distances
from ..utils.sparsefuncs import mean_variance_axis0
from ..utils import check_arrays
from ..utils import check_random_state
from ..utils import atleast2d_or_csr
from ..utils import as_float_array
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from . import _k_means
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, n_local_trials=None, random_state=None,
x_squared_norms=None):
"""Init n_clusters seeds according to k-means++
Parameters
-----------
X: array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters: integer
The number of seeds to choose
n_local_trials: integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
x_squared_norms: array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
random_state = check_random_state(random_state)
centers = np.empty((n_clusters, n_features))
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
if x_squared_norms is None:
x_squared_norms = _squared_norms(X)
closest_dist_sq = euclidean_distances(
centers[0], X, Y_norm_squared=x_squared_norms, squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in xrange(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(closest_dist_sq.cumsum(), rand_vals)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in xrange(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis0(X)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def k_means(X, n_clusters, init='k-means++', precompute_distances=True,
n_init=10, max_iter=300, verbose=False,
tol=1e-4, random_state=None, copy_x=True, n_jobs=1, k=None):
"""K-means clustering algorithm.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debuging. For n_jobs below -1,
(n_cpus + 1 - n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
"""
random_state = check_random_state(random_state)
if not k is None:
n_clusters = k
warnings.warn("Parameter k has been renamed to 'n_clusters'"
" and will be removed in release 0.14.",
DeprecationWarning, stacklevel=2)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X) or hasattr(init, '__array__'):
X_mean = X.mean(axis=0)
if not sp.issparse(X):
if copy_x:
X = X.copy()
X -= X_mean
if hasattr(init, '__array__'):
init = np.asarray(init).copy()
init -= X_mean
if not n_init == 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in the k-means instead of %d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# precompute squared norms of data points
x_squared_norms = _squared_norms(X)
best_labels, best_inertia, best_centers = None, None, None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers = _kmeans_single(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
precompute_distances=precompute_distances, tol=tol,
x_squared_norms=x_squared_norms, random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_kmeans_single)(X, n_clusters, max_iter=max_iter,
init=init, verbose=verbose, tol=tol,
precompute_distances=precompute_distances,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
if not sp.issparse(X):
if not copy_x:
X += X_mean
best_centers += X_mean
return best_centers, best_labels, best_inertia
def _kmeans_single(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None, random_state=None,
tol=1e-4, precompute_distances=True):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X: array-like of floats, shape (n_samples, n_features)
The observations to cluster.
k: int
The number of clusters to form as well as the number of
centroids to generate.
max_iter: int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init: {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol: float, optional
The relative increment in the results before declaring convergence.
verbose: boolean, optional
Verbosity mode
x_squared_norms: array, optional
Precomputed x_squared_norms. Calculated if not given.
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
centroid: float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label: integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia: float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
"""
random_state = check_random_state(random_state)
if x_squared_norms is None:
x_squared_norms = _squared_norms(X)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print 'Initialization complete'
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=np.float64)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignement is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
if verbose:
print 'Iteration %i, inertia %s' % (i, inertia)
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
if np.sum((centers_old - centers) ** 2) < tol:
if verbose:
print 'Converged to similar centers at iteration', i
break
return best_labels, best_inertia, best_centers
def _squared_norms(X):
"""Compute the squared euclidean norms of the rows of X"""
if sp.issparse(X):
return _k_means.csr_row_norm_l2(X, squared=True)
else:
# TODO: implement a cython version to avoid the memory copy of the
# input data
return (X ** 2).sum(axis=1)
def _labels_inertia_precompute_dense(X, x_squared_norms, centers):
n_samples = X.shape[0]
k = centers.shape[0]
distances = euclidean_distances(centers, X, x_squared_norms,
squared=True)
labels = np.empty(n_samples, dtype=np.int32)
labels.fill(-1)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(k):
dist = distances[center_id]
labels[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia = mindist.sum()
return labels, inertia
def _labels_inertia(X, x_squared_norms, centers,
precompute_distances=True, distances=None):
"""E step of the K-means EM algorithm
Compute the labels and the inertia of the given samples and centers
Parameters
----------
X: float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms: array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers: float64 array, shape (k, n_features)
The cluster centers.
distances: float64 array, shape (n_samples,)
Distances for each sample to its closest center.
Returns
-------
labels: int array of shape(n)
The resulting assignment
inertia: float
The value of the inertia criterion with the assignment
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = - np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=np.float64)
if sp.issparse(X):
inertia = _k_means._assign_labels_csr(
X, x_squared_norms, centers, labels, distances=distances)
else:
if precompute_distances:
return _labels_inertia_precompute_dense(X, x_squared_norms,
centers)
inertia = _k_means._assign_labels_array(
X, x_squared_norms, centers, labels, distances=distances)
return labels, inertia
def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X: array, shape (n_samples, n_features)
k: int
number of centroids
init: {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
x_squared_norms: array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accurracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers: array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.random_integers(
0, n_samples - 1, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
if init == 'k-means++':
centers = _k_init(X, k, random_state=random_state,
x_squared_norms=x_squared_norms)
elif init == 'random':
seeds = random_state.permutation(n_samples)[:k]
centers = X[seeds]
elif hasattr(init, '__array__'):
centers = init
elif callable(init):
centers = init(X, k, random_state=random_state)
else:
raise ValueError("the init parameter for the k-means should "
"be 'k-means++' or 'random' or an ndarray, "
"'%s' (type '%s') was passed." % (init, type(init)))
if sp.issparse(centers):
centers = centers.toarray()
return centers
class KMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""K-Means clustering
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int
Maximum number of iterations of the k-means algorithm for a
single run.
n_init: int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
precompute_distances : boolean
Precompute distances (faster but takes more memory).
tol: float, optional default: 1e-4
Relative tolerance w.r.t. inertia to declare convergence
n_jobs: int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debuging. For n_jobs below -1,
(n_cpus + 1 - n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
`cluster_centers_`: array, [n_clusters, n_features]
Coordinates of cluster centers
`labels_`:
Labels of each point
`inertia_`: float
The value of the inertia criterion associated with the chosen
partition.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
See also
--------
MiniBatchKMeans:
Alternative online implementation that does incremental updates
of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster to than the default batch implementation.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10, max_iter=300,
tol=1e-4, precompute_distances=True,
verbose=0, random_state=None, copy_x=True, n_jobs=1, k=None):
if hasattr(init, '__array__'):
n_clusters = init.shape[0]
init = np.asanyarray(init, dtype=np.float64)
self.n_clusters = n_clusters
self.k = k
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = atleast2d_or_csr(X, dtype=np.float64)
if X.shape[0] < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (
X.shape[0], self.n_clusters))
return X
def _check_test_data(self, X):
X = atleast2d_or_csr(X)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError("Incorrect number of features. "
"Got %d features, expected %d" % (
n_features, expected_n_features))
if not X.dtype.kind is 'f':
warnings.warn("Got data type %s, converted to float "
"to avoid overflows" % X.dtype,
RuntimeWarning, stacklevel=2)
X = X.astype(np.float)
return X
def _check_fitted(self):
if not hasattr(self, "cluster_centers_"):
raise AttributeError("Model has not been trained yet.")
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
if not self.k is None:
n_clusters = self.k
warnings.warn("Parameter k has been renamed by 'n_clusters'"
" and will be removed in release 0.14.",
DeprecationWarning, stacklevel=2)
self.n_clusters = n_clusters
else:
n_clusters = self.n_clusters
self.random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
self.cluster_centers_, self.labels_, self.inertia_ = k_means(
X, n_clusters=n_clusters, init=self.init, n_init=self.n_init,
max_iter=self.max_iter, verbose=self.verbose,
precompute_distances=self.precompute_distances,
tol=self.tol, random_state=self.random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs)
return self
def fit_predict(self, X):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
X = self._check_fit_data(X)
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
self._check_fitted()
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
Y : array, shape [n_samples,]
Index of the closest center each sample belongs to.
"""
self._check_fitted()
X = self._check_test_data(X)
x_squared_norms = _squared_norms(X)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score: float
Opposite of the value of X on the K-means objective.
"""
self._check_fitted()
X = self._check_test_data(X)
x_squared_norms = _squared_norms(X)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
def _mini_batch_step(X, x_squared_norms, centers, counts,
old_center_buffer, compute_squared_diff,
distances=None):
"""Incremental update of the centers for the Minibatch K-Means algorithm
Parameters
----------
X: array, shape (n_samples, n_features)
The original data array.
x_squared_norms: array, shape (n_samples,)
Squared euclidean norm of each data point.
centers: array, shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
counts: array, shape (k,)
The vector in which we keep track of the numbers of elements in a
cluster. This array is MODIFIED IN PLACE
distances: array, dtype float64, shape (n_samples), optional
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to it's closest center.
"""
# Perform label assignement to nearest centers
nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers,
distances=distances)
# implementation for the sparse CSR reprensation completely written in
# cython
if sp.issparse(X):
return inertia, _k_means._mini_batch_update_csr(
X, x_squared_norms, centers, counts, nearest_center,
old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
count = center_mask.sum()
if count > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= counts[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += np.sum(X[center_mask], axis=0)
# update the count statistics for this center
counts[center_idx] += count
# inplace rescale to compute mean of all points (old and new)
centers[center_idx] /= counts[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
squared_diff += np.sum(
(centers[center_idx] - old_center_buffer) ** 2)
return inertia, squared_diff
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulte the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
'mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print progress_msg
# Early stopping based on absolute tolerance on squared change of
# centers postion (using EWA smoothing)
if tol > 0.0 and ewa_diff < tol:
if verbose:
print 'Converged (small centers change) at iteration %d/%d' % (
iteration_idx + 1, n_iter)
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if (ewa_inertia_min is None or ewa_inertia < ewa_inertia_min):
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print ('Converged (lack of improvement in inertia)'
' at iteration %d/%d' % (
iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across sucessive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
class MiniBatchKMeans(KMeans):
"""Mini-Batch K-Means clustering
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
max_no_improvement : int, optional
Control early stopping based on the consecutive number of mini
batches that does not yield an improvement on the smoothed inertia.
To disable convergence detection based on inertia, set
max_no_improvement to None.
tol : float, optional
Control early stopping based on the relative center changes as
measured by a smoothed, variance-normalized of the mean center
squared position changes. This early stopping heuristics is
closer to the one used for the batch variant of the algorithms
but induces a slight computational and memory overhead over the
inertia heuristic.
To disable convergence detection based on normalized center
change, set tol to 0.0 (default).
batch_size: int, optional, default: 100
Size of the mini batches.
init_size: int, optional, default: 3 * batch_size
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accurracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
compute_labels: boolean
Compute label assignements and inertia for the complete dataset
once the minibatch optimization has converged in fit.
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
`cluster_centers_`: array, [n_clusters, n_features]
Coordinates of cluster centers
`labels_`:
Labels of each point (if compute_labels is set to True).
`inertia_`: float
The value of the inertia criterion associated with the chosen
partition (if compute_labels is set to True). The inertia is
defined as the sum of square distances of samples to their nearest
neighbor.
Notes
-----
See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
"""
def __init__(self, n_clusters=8, init='k-means++', max_iter=100,
batch_size=100, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, k=None):
super(MiniBatchKMeans, self).__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init,
k=k)
self.max_no_improvement = max_no_improvement
self.batch_size = batch_size
self.compute_labels = compute_labels
self.init_size = init_size
def fit(self, X, y=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X: array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster
"""
self.random_state = check_random_state(self.random_state)
if self.k is not None:
warnings.warn("Parameter k has been replaced by 'n_clusters'"
" and will be removed in release 0.14.",
DeprecationWarning, stacklevel=2)
self.n_clusters = self.k
X = check_arrays(X, sparse_format="csr", copy=False,
check_ccontiguous=True, dtype=np.float64)[0]
n_samples, n_features = X.shape
if n_samples < self.n_clusters:
raise ValueError("Number of samples smaller than number "
"of clusters.")
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=np.float64)
x_squared_norms = _squared_norms(X)
if self.tol > 0.0:
tol = _tolerance(X, self.tol)
# using tol-based early stopping needs the allocation of a
# dedicated before which can be expensive for high dim data:
# hence we allocate it outside of the main loop
old_center_buffer = np.zeros(n_features, np.double)
else:
tol = 0.0
# no need for the center buffer if tol-based early stopping is
# disabled
old_center_buffer = np.zeros(0, np.double)
distances = np.zeros(self.batch_size, dtype=np.float64)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
init_size = self.init_size
if init_size is None:
init_size = 3 * self.batch_size
if init_size > n_samples:
init_size = n_samples
self.init_size_ = init_size
validation_indices = self.random_state.random_integers(0, n_samples -
1, init_size)
X_valid = X[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
# perform several inits with random sub-sets
best_inertia = None
for init_idx in range(self.n_init):
if self.verbose:
print "Init %d/%d with method: %s" % (
init_idx + 1, self.n_init, self.init)
counts = np.zeros(self.n_clusters, dtype=np.int32)
# TODO: once the `k_means` function works with sparse input we
# should refactor the following init to use it instead.
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans
cluster_centers = _init_centroids(
X, self.n_clusters, self.init,
random_state=self.random_state,
x_squared_norms=x_squared_norms,
init_size=init_size)
# Compute the label assignement on the init dataset
batch_inertia, centers_squared_diff = _mini_batch_step(
X_valid, x_squared_norms[validation_indices],
cluster_centers, counts, old_center_buffer, False,
distances=distances)
# Keep only the best cluster centers across independent inits on
# the common validation set
_, inertia = _labels_inertia(X_valid, x_squared_norms_valid,
cluster_centers)
if self.verbose:
print "Inertia for init %d/%d: %f" % (
init_idx + 1, self.n_init, inertia)
if best_inertia is None or inertia < best_inertia:
self.cluster_centers_ = cluster_centers
self.counts_ = counts
best_inertia = inertia
# Empty context to be used inplace by the convergence check routine
convergence_context = {}
# Perform the iterative optimization until the final convergence
# criterion
for iteration_idx in xrange(n_iter):
# Sample a minibatch from the full dataset
minibatch_indices = self.random_state.random_integers(
0, n_samples - 1, self.batch_size)
# Perform the actual update step on the minibatch data
batch_inertia, centers_squared_diff = _mini_batch_step(
X[minibatch_indices], x_squared_norms[minibatch_indices],
self.cluster_centers_, self.counts_,
old_center_buffer, tol > 0.0, distances=distances)
# Monitor convergence and do early stopping if necessary
if _mini_batch_convergence(
self, iteration_idx, n_iter, tol, n_samples,
centers_squared_diff, batch_inertia, convergence_context,
verbose=self.verbose):
break
if self.compute_labels:
if self.verbose:
print 'Computing label assignements and total inertia'
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
def partial_fit(self, X, y=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X: array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster.
"""
self.random_state = check_random_state(self.random_state)
X = check_arrays(X, sparse_format="csr", copy=False)[0]
n_samples, n_features = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=np.float64)
if n_samples == 0:
return self
x_squared_norms = _squared_norms(X)
if (not hasattr(self, 'counts_')
or not hasattr(self, 'cluster_centers_')):
# this is the first call partial_fit on this object:
# initialize the cluster centers
self.cluster_centers_ = _init_centroids(
X, self.n_clusters, self.init, random_state=self.random_state,
x_squared_norms=x_squared_norms, init_size=self.init_size)
self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)
_mini_batch_step(X, x_squared_norms, self.cluster_centers_,
self.counts_, np.zeros(0, np.double), 0)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
|
mrshu/scikit-learn
|
sklearn/cluster/k_means_.py
|
Python
|
bsd-3-clause
| 45,375
|
[
"Gaussian"
] |
2469bb4b75fa145797071ee28c3ab90b076b4c9d1956d6253c5edcfb934839d9
|
# $HeadURL$
"""
Encoding and decoding for dirac, Ids:
i -> int
I -> long
f -> float
b -> bool
s -> string
z -> datetime
n -> none
l -> list
t -> tuple
d -> dictionary
"""
__RCSID__ = "$Id$"
import types
import datetime
import os
import inspect
import traceback
from pprint import pprint
# Setting this environment variable to any value will enable the dump of the debugging
# call stack
DIRAC_DEBUG_DENCODE_CALLSTACK = bool(os.environ.get('DIRAC_DEBUG_DENCODE_CALLSTACK', False))
# Depth of the stack to look for with inspect
CONTEXT_DEPTH = 100
def printDebugCallstack():
""" Prints information about the current stack as well as the caller parameters.
The purpose of this method is to track down all the places in DIRAC that might
not survive the change to JSON encoding.
:returns: None
"""
def stripArgs(frame):
""" Keeps only the parameters and their values from a frame
:param frame: frame object
:returns: dict {param name: value}
"""
# Get all the arguments of the call
allArgs = inspect.getargvalues(frame)
# Keep only the arguments that are parameters of the call, as well as their value
return dict([(argName, allArgs.locals[argName]) for argName in allArgs.args])
tb = traceback.format_stack()
frames = inspect.stack(context=CONTEXT_DEPTH)
# print the traceback that leads us here
# remove the last element which is the traceback module call
for line in tb[:-1]:
print line
# Now we try to navigate up to the caller of dEncode.
# For this, we find the frame in which we enter dEncode.
# We keep the parameters to display it.
# Then we navigate to the parent frame, and we display the file
# and line number where this call was done
try:
framesIter = iter(frames)
for frame in framesIter:
# First check that we are using either 'encode' or 'decode' function
if frame[3] in ('encode', 'decode'):
# Then check it is the good file
if frame[1].endswith('DIRAC/Core/Utilities/DEncode.py'):
# Keep the arguments of the DEncode call
dencArgs = stripArgs(frame[0])
# Take the calling frame
frame = next(framesIter)
print "Calling frame: %s" % (frame[1:3],)
print "With arguments ",
pprint(dencArgs)
break
except BaseException:
pass
print "=" * 100
print
print
_dateTimeObject = datetime.datetime.utcnow()
_dateTimeType = type(_dateTimeObject)
_dateType = type(_dateTimeObject.date())
_timeType = type(_dateTimeObject.time())
g_dEncodeFunctions = {}
g_dDecodeFunctions = {}
def encodeInt(iValue, eList):
"""Encoding ints """
eList.extend(("i", str(iValue), "e"))
def decodeInt(data, i):
"""Decoding ints """
i += 1
end = data.index('e', i)
value = int(data[i:end])
return (value, end + 1)
g_dEncodeFunctions[types.IntType] = encodeInt
g_dDecodeFunctions["i"] = decodeInt
def encodeLong(iValue, eList):
""" Encoding longs """
# corrected by KGG eList.extend( ( "l", str( iValue ), "e" ) )
eList.extend(("I", str(iValue), "e"))
def decodeLong(data, i):
""" Decoding longs """
i += 1
end = data.index('e', i)
value = long(data[i:end])
return (value, end + 1)
g_dEncodeFunctions[types.LongType] = encodeLong
g_dDecodeFunctions["I"] = decodeLong
def encodeFloat(iValue, eList):
""" Encoding floats """
eList.extend(("f", str(iValue), "e"))
def decodeFloat(data, i):
""" Decoding floats """
i += 1
end = data.index('e', i)
if end + 1 < len(data) and data[end + 1] in ('+', '-'):
eI = end
end = data.index('e', end + 1)
value = float(data[i:eI]) * 10 ** int(data[eI + 1:end])
else:
value = float(data[i:end])
return (value, end + 1)
g_dEncodeFunctions[types.FloatType] = encodeFloat
g_dDecodeFunctions["f"] = decodeFloat
def encodeBool(bValue, eList):
""" Encoding booleans """
if bValue:
eList.append("b1")
else:
eList.append("b0")
def decodeBool(data, i):
""" Decoding booleans """
if data[i + 1] == "0":
return (False, i + 2)
else:
return (True, i + 2)
g_dEncodeFunctions[types.BooleanType] = encodeBool
g_dDecodeFunctions["b"] = decodeBool
def encodeString(sValue, eList):
""" Encoding strings """
eList.extend(('s', str(len(sValue)), ':', sValue))
def decodeString(data, i):
""" Decoding strings """
i += 1
colon = data.index(":", i)
value = int(data[i: colon])
colon += 1
end = colon + value
return (data[colon: end], end)
g_dEncodeFunctions[types.StringType] = encodeString
g_dDecodeFunctions["s"] = decodeString
def encodeUnicode(sValue, eList):
""" Encoding unicode strings """
valueStr = sValue.encode('utf-8')
eList.extend(('u', str(len(valueStr)), ':', valueStr))
def decodeUnicode(data, i):
""" Decoding unicode strings """
i += 1
colon = data.index(":", i)
value = int(data[i: colon])
colon += 1
end = colon + value
return (unicode(data[colon: end], 'utf-8'), end)
g_dEncodeFunctions[types.UnicodeType] = encodeUnicode
g_dDecodeFunctions["u"] = decodeUnicode
def encodeDateTime(oValue, eList):
""" Encoding datetime """
if isinstance(oValue, _dateTimeType):
tDateTime = (oValue.year, oValue.month, oValue.day,
oValue.hour, oValue.minute, oValue.second,
oValue.microsecond, oValue.tzinfo)
eList.append("za")
# corrected by KGG encode( tDateTime, eList )
g_dEncodeFunctions[type(tDateTime)](tDateTime, eList)
elif isinstance(oValue, _dateType):
tData = (oValue.year, oValue.month, oValue. day)
eList.append("zd")
# corrected by KGG encode( tData, eList )
g_dEncodeFunctions[type(tData)](tData, eList)
elif isinstance(oValue, _timeType):
tTime = (oValue.hour, oValue.minute, oValue.second, oValue.microsecond, oValue.tzinfo)
eList.append("zt")
# corrected by KGG encode( tTime, eList )
g_dEncodeFunctions[type(tTime)](tTime, eList)
else:
raise Exception("Unexpected type %s while encoding a datetime object" % str(type(oValue)))
def decodeDateTime(data, i):
""" Decoding datetime """
i += 1
dataType = data[i]
# corrected by KGG tupleObject, i = decode( data, i + 1 )
tupleObject, i = g_dDecodeFunctions[data[i + 1]](data, i + 1)
if dataType == 'a':
dtObject = datetime.datetime(*tupleObject)
elif dataType == 'd':
dtObject = datetime.date(*tupleObject)
elif dataType == 't':
dtObject = datetime.time(*tupleObject)
else:
raise Exception("Unexpected type %s while decoding a datetime object" % dataType)
return (dtObject, i)
g_dEncodeFunctions[_dateTimeType] = encodeDateTime
g_dEncodeFunctions[_dateType] = encodeDateTime
g_dEncodeFunctions[_timeType] = encodeDateTime
g_dDecodeFunctions['z'] = decodeDateTime
def encodeNone(_oValue, eList):
""" Encoding None """
eList.append("n")
def decodeNone(_data, i):
""" Decoding None """
return (None, i + 1)
g_dEncodeFunctions[types.NoneType] = encodeNone
g_dDecodeFunctions['n'] = decodeNone
def encodeList(lValue, eList):
""" Encoding list """
eList.append("l")
for uObject in lValue:
g_dEncodeFunctions[type(uObject)](uObject, eList)
eList.append("e")
def decodeList(data, i):
""" Decoding list """
oL = []
i += 1
while data[i] != "e":
ob, i = g_dDecodeFunctions[data[i]](data, i)
oL.append(ob)
return(oL, i + 1)
g_dEncodeFunctions[types.ListType] = encodeList
g_dDecodeFunctions["l"] = decodeList
def encodeTuple(lValue, eList):
""" Encoding tuple """
if DIRAC_DEBUG_DENCODE_CALLSTACK:
print '=' * 45, "Encoding tuples", '=' * 45
printDebugCallstack()
eList.append("t")
for uObject in lValue:
g_dEncodeFunctions[type(uObject)](uObject, eList)
eList.append("e")
def decodeTuple(data, i):
""" Decoding tuple """
if DIRAC_DEBUG_DENCODE_CALLSTACK:
print '=' * 45, "Decoding tuples", '=' * 45
printDebugCallstack()
oL, i = decodeList(data, i)
return (tuple(oL), i)
g_dEncodeFunctions[types.TupleType] = encodeTuple
g_dDecodeFunctions["t"] = decodeTuple
def encodeDict(dValue, eList):
""" Encoding dictionary """
if DIRAC_DEBUG_DENCODE_CALLSTACK:
# If we have numbers as keys
if any([isinstance(x, (int, float, long)) for x in dValue]):
print '=' * 40, "Encoding dict with numeric keys", '=' * 40
printDebugCallstack()
eList.append("d")
for key in sorted(dValue):
g_dEncodeFunctions[type(key)](key, eList)
g_dEncodeFunctions[type(dValue[key])](dValue[key], eList)
eList.append("e")
def decodeDict(data, i):
""" Decoding dictionary """
oD = {}
i += 1
while data[i] != "e":
if DIRAC_DEBUG_DENCODE_CALLSTACK:
# If we have numbers as keys
if data[i] in ('i', 'I', 'f'):
print '=' * 40, "Decoding dict with numeric keys", '=' * 40
printDebugCallstack()
k, i = g_dDecodeFunctions[data[i]](data, i)
oD[k], i = g_dDecodeFunctions[data[i]](data, i)
return (oD, i + 1)
g_dEncodeFunctions[types.DictType] = encodeDict
g_dDecodeFunctions["d"] = decodeDict
# Encode function
def encode(uObject):
""" Generic encoding function """
try:
eList = []
# print "ENCODE FUNCTION : %s" % g_dEncodeFunctions[ type( uObject ) ]
g_dEncodeFunctions[type(uObject)](uObject, eList)
return "".join(eList)
except Exception:
raise
def decode(data):
""" Generic decoding function """
if not data:
return data
try:
# print "DECODE FUNCTION : %s" % g_dDecodeFunctions[ sStream [ iIndex ] ]
return g_dDecodeFunctions[data[0]](data, 0)
except Exception:
raise
if __name__ == "__main__":
gObject = {2: "3", True: (3, None), 2.0 * 10 ** 20: 2.0 * 10 ** -10}
print "Initial: %s" % gObject
gData = encode(gObject)
print "Encoded: %s" % gData
print "Decoded: %s, [%s]" % decode(gData)
|
andresailer/DIRAC
|
Core/Utilities/DEncode.py
|
Python
|
gpl-3.0
| 9,819
|
[
"DIRAC"
] |
29bedb2bc060ee65787fb7160f197a234f568d4acc4cb1bc164a9830a8b3273b
|
#!/usr/bin/env python
# Script which test the different filtering thresholds per barcode
# Returns per barcode the detected species which match the criteria
import sys
import os
### Get the OTU abundance from the file (This is per barcode)
def GetOTUabundance(statFile, pOTU):
# Local variables
f = open(statFile)
abundance={}
#OTUabun=100
for line in f:
# Remove the enter from the end of the line
line = line.rstrip()
### Get the different barcode from the statistics file
if (line.startswith("############ Statistics for barcode: ")):
barcode=line.split("############ Statistics for barcode: ")[1].replace(" ############", "")
if not(barcode in abundance.keys()):
abundance[barcode]=1
#print barcode
else:
if (line.startswith("# combined file: ")):
assignedReads=int(line.split("\t")[1])
OTUabun=assignedReads*(pOTU/100)
#print barcode+"\t"+str(assignedReads)+"\t"+str(OTUabun)
abundance[barcode]=OTUabun
### Close the file and return the dictionary
f.close()
return abundance
### Function to retrieve the different organisms from the blast summary
def GetHitsPerBarcode(abundance, InFile, pident, OutFile):
# Local variables
f = open(InFile, "r")
output = open(OutFile, "w")
CountSpec={}
OTU=""
qlen=0
for line in f:
# Remove the enter from the end of the line
line = line.rstrip()
### Get barcodes but ignore title lines
if (line.startswith("#####")):
if (line.startswith("##### Results for:")):
output.write("\n"+line+"\n")
barcode=line.split("##### Results for: ")[1].replace(" #####", "")
output.write("OTU abun "+barcode+":\t"+str(abundance[barcode])+"\n")
### Get a different length per barcode
if ( barcode == "ITS2" ):
qlen=100
elif (barcode == "rbcL-mini"):
qlen=140
elif ( barcode == "trnL_P6loop" ):
qlen=10
else:
qlen=200
else:
### Ignore the blast line of the output
if (line.startswith("OTU")):
splitLine = line.split("\t")
### Check if the size of the OTU is above the OTU abundance
if (abundance[barcode] <= int(splitLine[0].split("size=")[1].replace(";",""))):
### Get the top hit (based on bitscore)
if (OTU == splitLine[0]):
if not (splitLine[4] < bitscore):
### Is your line matching the criteria (Query length and percentage of identity)
if ( (int(splitLine[1] ) >= qlen) and (float(splitLine[3]) >= pident) ):
output.write(line+"\n")
else:
### Get the next values
OTU=splitLine[0]
bitscore=splitLine[4]
### Is your line matching the criteria (Query length and percentage of identity)
if ( (int(splitLine[1] ) >= qlen) and (float(splitLine[3]) >= pident) ):
output.write(line+"\n")
else:
### Skip the empty lines
if (line != ""):
### Only get the title lines from the blast output
if (line.startswith("qseqid")):
#print line
output.write(line+"\n")
### Close the files
output.close()
f.close()
### Retrieve the hits per barcode
def GetAllHitsPerBarcode(abundance, InFile, pident, OutFile):
# Local variables
f = open(InFile, "r")
output = open(OutFile, "w")
CountSpec={}
OTU=""
qlen=0
for line in f:
# Remove the enter from the end of the line
line = line.rstrip()
### Get barcodes but ignore title lines
if (line.startswith("#####")):
if (line.startswith("##### Results for:")):
output.write("\n"+line+"\n")
barcode=line.split("##### Results for: ")[1].replace(" #####", "")
output.write("OTU abun "+barcode+":\t"+str(abundance[barcode])+"\n")
### Get a different length per barcode
if ( barcode == "ITS2" ):
qlen=100
elif (barcode == "rbcL-mini"):
qlen=140
elif ( barcode == "trnL_P6loop" ):
qlen=10
else:
qlen=200
else:
### Ignore the blast line of the output
if (line.startswith("OTU")):
splitLine = line.split("\t")
### Check if the size of the OTU is above the OTU abundance
if (abundance[barcode] <= int(splitLine[0].split("size=")[1].replace(";",""))):
if ( (int(splitLine[1] ) >= qlen) and (float(splitLine[3]) >= pident) ):
output.write(line+"\n")
else:
### Skip the empty lines
if (line != ""):
### Only get the title lines from the blast output
if (line.startswith("qseqid")):
output.write(line+"\n")
### Close the files
output.close()
f.close()
### Check all the input and call all the functions
def main(argv):
### Check the input
if (len(argv) == 6 ):
### Catch the variable files
statFile=argv[0]
InFile=argv[1]
FullInFile=argv[2]
OutName=argv[3]
### Variables
pOTU=float(argv[4])
pident=int(argv[5])
### Local variables
OutFile=OutName+"_"+str(pident)+"_"+str(pOTU)+".tsv"
FullOutFile=OutName+"_"+str(pident)+"_"+str(pOTU)+"_Full.tsv"
### Call your functions
abundance=GetOTUabundance(statFile, pOTU)
GetHitsPerBarcode(abundance, InFile, pident, OutFile)
GetAllHitsPerBarcode(abundance, FullInFile, pident, FullOutFile)
else:
print "Wrong type of arguments: python CheckCriteriaBlastSingleFile.py <inFile> <OutFile>"
### Call your main function
if __name__ == "__main__":
main(sys.argv[1:])
|
RIKILT/CITESspeciesDetect
|
CheckCriteriaBlastSingleSample.py
|
Python
|
bsd-3-clause
| 5,204
|
[
"BLAST"
] |
4c6ddc239dc6d7cdc7ff29cd4c6fd7d2deeb5917c82aed06168607fafcc5cec8
|
#!/usr/bin/env python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GCI specific views for Programs.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
'"Mario Ferraro <fadinlight@gmail.com>"',
'"Daniel Hans" <daniel.m.hans@gmail.com>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from google.appengine.ext import db
from django import forms
from django import http
from django.utils import simplejson
from django.utils.translation import ugettext
from soc.logic import accounts
from soc.logic import dicts
from soc.logic.helper import timeline as timeline_helper
from soc.logic.models.host import logic as host_logic
from soc.logic.models.user import logic as user_logic
from soc.views import out_of_band
from soc.views import helper
from soc.views.helper import decorators
from soc.views.helper import dynaform
from soc.views.helper import lists
from soc.views.helper import params as params_helper
from soc.views.helper import redirects
from soc.views.helper import widgets
from soc.views.models import document as document_view
from soc.views.models import program
from soc.views.sitemap import sidebar
from soc.modules.gci.logic.models import mentor as gci_mentor_logic
from soc.modules.gci.logic.models import org_admin as gci_org_admin_logic
from soc.modules.gci.logic.models import program as gci_program_logic
from soc.modules.gci.logic.models import student as gci_student_logic
from soc.modules.gci.logic.models import task as gci_task_logic
from soc.modules.gci.logic.models.org_app_survey import logic as org_app_logic
from soc.modules.gci.models import task as gci_task_model
from soc.modules.gci.views.helper import access as gci_access
from soc.modules.gci.views.helper import redirects as gci_redirects
import soc.modules.gci.logic.models.program
class View(program.View):
"""View methods for the GCI Program model.
"""
DEF_LIST_PUBLIC_TASKS_MSG_FMT = ugettext(
'Lists all publicly visible tasks of %s. Use this to find '
'a task suited for you.')
DEF_LIST_VALID_TASKS_MSG_FMT = ugettext(
'Lists all the Unapproved, Unpublished and published tasks of %s.')
DEF_NO_TASKS_MSG = ugettext(
'There are no tasks to be listed.')
DEF_PARTICIPATING_ORGS_MSG_FMT = ugettext(
'The following is a list of all the participating organizations under '
'the program %(name)s. To know more about each organization and see '
'the tasks published by them please visit the corresponding links.')
DEF_TASK_QUOTA_ALLOCATION_MSG = ugettext(
"Assign task quotas to each organization.")
DEF_TASK_QUOTA_ERROR_MSG_FMT = ugettext(
"Task Quota limit for the organizations %s do not contain"
" a valid number(>0) and has not been updated.")
DEF_LIST_RANKING_MSG_FMT = ugettext(
"Shows current ranking of %s.")
DEF_REQUEST_TASKS_MSG = ugettext(
'You can request more tasks from organizations which do not have '
'any open tasks at the moment. Just click on the organization that '
'is currently blocking your work and you will be able to send a message '
'to their admins.')
def __init__(self, params=None):
"""Defines the fields and methods required for the program View class
to provide the user with list, public, create, edit and delete views.
Params:
params: a dict with params for this View
"""
rights = gci_access.GCIChecker(params)
rights['show'] = ['allow']
rights['create'] = [('checkSeeded', ['checkHasRoleForScope',
host_logic])]
rights['edit'] = [('checkIsHostForProgram', [gci_program_logic.logic])]
rights['delete'] = ['checkIsDeveloper']
rights['assign_task_quotas'] = [
('checkIsHostForProgram', [gci_program_logic.logic])]
rights['accepted_orgs'] = [('checkIsAfterEvent',
['accepted_organization_announced_deadline',
'__all__', gci_program_logic.logic])]
rights['list_participants'] = [('checkIsHostForProgram',
[gci_program_logic.logic])]
rights['task_difficulty'] = [('checkIsHostForProgram',
[gci_program_logic.logic])]
rights['task_type'] = [('checkIsHostForProgram',
[gci_program_logic.logic])]
rights['type_tag_edit'] = [('checkIsHostForProgram',
[gci_program_logic.logic])]
rights['list_self'] = [('checkIsAfterEvent',
['tasks_publicly_visible',
'__all__', gci_program_logic.logic]),
'checkIsUser']
rights['list_tasks'] = [('checkIsAfterEvent',
['tasks_publicly_visible',
'__all__', gci_program_logic.logic])]
rights['show_ranking'] = ['allow']
rights['request_tasks'] = [
('checkHasRoleForKeyFieldsAsScope', [gci_student_logic.logic]),
('checkIsAfterEvent', ['tasks_publicly_visible', '__all__',
gci_program_logic.logic]),
('checkIsBeforeEvent', ['task_claim_deadline', '__all__',
gci_program_logic.logic])]
new_params = {}
new_params['logic'] = soc.modules.gci.logic.models.program.logic
new_params['rights'] = rights
new_params['name'] = "GCI Program"
new_params['module_name'] = "program"
new_params['sidebar_grouping'] = 'Programs'
new_params['document_prefix'] = 'gci_program'
new_params['module_package'] = 'soc.modules.gci.views.models'
new_params['url_prefix'] = 'gci'
new_params['url_name'] = 'gci/program'
new_params['extra_dynaexclude'] = ['task_difficulties', 'task_types',
'ranking_schema']
patterns = []
patterns += [
(r'^%(url_name)s/(?P<access_type>assign_task_quotas)/%(key_fields)s$',
'%(module_package)s.%(module_name)s.assign_task_quotas',
'Assign task quota limits'),
(r'^%(url_name)s/(?P<access_type>task_difficulty)/%(key_fields)s$',
'%(module_package)s.%(module_name)s.task_difficulty_edit',
'Edit Task Difficulty Tags'),
(r'^%(url_name)s/(?P<access_type>task_type)/%(key_fields)s$',
'%(module_package)s.%(module_name)s.task_type_edit',
'Edit Task Type Tags'),
(r'^%(url_name)s/(?P<access_type>type_tag_edit)/%(key_fields)s$',
'%(module_package)s.%(module_name)s.task_type_tag_edit',
'Edit a Task Type Tag'),
(r'^%(url_name)s/(?P<access_type>list_self)/%(key_fields)s$',
'%(module_package)s.%(module_name)s.list_my_tasks',
'List of my starred tasks'),
(r'^%(url_name)s/(?P<access_type>list_tasks)/%(key_fields)s$',
'%(module_package)s.%(module_name)s.list_tasks',
'List of all Tasks in'),
(r'^%(url_name)s/(?P<access_type>show_ranking)/%(key_fields)s$',
'%(module_package)s.%(module_name)s.show_ranking',
'Show ranking'),
(r'^%(url_name)s/(?P<access_type>request_tasks)/%(key_fields)s$',
'%(module_package)s.%(module_name)s.request_tasks',
'Request more tasks'),
]
new_params['public_field_keys'] = ["name", "scope_path"]
new_params['public_field_names'] = ["Program Name", "Program Owner"]
new_params['extra_django_patterns'] = patterns
new_params['org_app_logic'] = org_app_logic
# used to list the participants in this program
new_params['participants_logic'] = [
(gci_org_admin_logic.logic, 'program'),
(gci_mentor_logic.logic, 'program'),
(gci_student_logic.logic, 'scope')]
params = dicts.merge(params, new_params, sub_merge=True)
super(View, self).__init__(params=params)
dynafields = [
{'name': 'overview_task_difficulties',
'base': forms.CharField,
'label': 'Task Difficulty Levels',
'group': 'Task Settings',
'widget': widgets.ReadOnlyInput(),
'required': False,
'help_text': ugettext('Lists all the difficulty levels that '
'can be assigned to a task. Edit them '
'from the Program menu on sidebar.'),
},
{'name': 'overview_task_types',
'base': forms.CharField,
'label': 'Task Type Tags',
'group': 'Task Settings',
'widget': widgets.ReadOnlyInput(),
'required': False,
'help_text': ugettext('Lists all the types a task can be in. '
'Edit them from the Program menu on sidebar.'),
},
]
dynaproperties = params_helper.getDynaFields(dynafields)
edit_form = dynaform.extendDynaForm(
dynaform=self._params['edit_form'],
dynaproperties=dynaproperties)
self._params['edit_form'] = edit_form
def _editGet(self, request, entity, form):
"""See base.View._editGet().
"""
# TODO: can't a simple join operation do this?
tds = gci_task_model.TaskDifficultyTag.get_by_scope(entity)
if tds:
td_str = ''
for td in tds[:-1]:
td_str += str(td) + ', '
td_str += str(tds[-1])
form.fields['overview_task_difficulties'].initial = td_str
tts = gci_task_model.TaskTypeTag.get_by_scope(entity)
if tts:
tt_str = ''
for tt in tts[:-1]:
tt_str += str(tt) + ', '
tt_str += str(tts[-1])
form.fields['overview_task_types'].initial = tt_str
return super(View, self)._editGet(request, entity, form)
@decorators.merge_params
@decorators.check_access
def assignTaskQuotas(self, request, access_type, page_name=None,
params=None, filter=None, **kwargs):
"""View that allows to assign task quotas for accepted GCI organization.
This view allows the program admin to set the task quota limits
and change them at any time when the program is active.
"""
logic = params['logic']
entity = logic.getFromKeyFieldsOr404(kwargs)
if request.method == 'POST':
return self.assignTaskQuotasPost(request, entity, params=params)
else:
return self.assignTaskQuotasGet(request, entity, params=params)
def assignTaskQuotasPost(self, request, entity, params):
"""Handles the POST request for the assign task quota limit list.
"""
# TODO: Once GAE Task APIs arrive, this view will be managed by them
# TODO to TODO(Lennard): GAE required anymore?
from soc.modules.gci.logic.models import organization as gci_org_logic
post_dict = request.POST
org_items = simplejson.loads(post_dict.get('data', '[]'))
org_entities = []
for org_key_name in org_items.keys():
org_entity = gci_org_logic.logic.getFromKeyName(org_key_name)
try:
org_task_quota = int(org_items[org_key_name]['task_quota_limit'])
except ValueError:
org_task_quota = 0
org_entity.task_quota_limit = org_task_quota
org_entities.append(org_entity)
db.put(org_entities)
return http.HttpResponseRedirect('')
def assignTaskQuotasGet(self, request, entity, params=None):
"""Handles the GET request for the assign task quota limit list.
"""
from soc.modules.gci.views.models import organization as gci_org_view
slots_params = gci_org_view.view.getParams().copy()
slots_params['list_description'] = self.DEF_TASK_QUOTA_ALLOCATION_MSG
slots_params['quota_field_keys'] = ['name', 'task_quota_limit']
slots_params['quota_field_names'] = ['Organization', 'Task Quota']
slots_params['quota_field_props'] = {'task_quota_limit':{'editable':True}}
slots_params['quota_button_global'] = [{
'id': 'save_tasks_quota',
'caption': 'Update Quotas',
'type': 'post_edit',
'parameters': {'url': ''}}]
filter = {
'scope': entity,
'status': ['new', 'active']
}
page_name = params.get('page_name', 'Assign task quota limits')
return self.list(request, 'allow', page_name=page_name,
params=slots_params, filter=filter,
visibility='quota')
@decorators.merge_params
def getExtraMenus(self, id, user, params=None):
"""See soc.views.models.program.View.getExtraMenus().
"""
from soc.modules.gci.views.models.org_app_survey import view as org_app_view
params['org_app_view'] = org_app_view
# TODO: the largest part of this method can be moved to the core Program
logic = params['logic']
rights = params['rights']
# only get all invisible and visible programs
fields = {'status': ['invisible', 'visible']}
entities = logic.getForFields(fields)
menus = []
rights.setCurrentUser(id, user)
for entity in entities:
items = []
if entity.status == 'visible':
# show the documents for this program, even for not logged in users
items += document_view.view.getMenusForScope(entity, params)
items += self._getTimeDependentEntries(entity, params, id, user)
try:
# check if the current user is a host for this program
rights.doCachedCheck('checkIsHostForProgram',
{'scope_path': entity.scope_path,
'link_id': entity.link_id}, [logic])
if entity.status == 'invisible':
# still add the document links so hosts can see how it looks like
items += self._getTimeDependentEntries(entity, params, id, user)
items += self._getHostEntries(entity, params, 'gci')
# add link to Assign Task Quota limits
items += [(gci_redirects.getAssignTaskQuotasRedirect(entity, params),
'Assign Task Quota limits', 'any_access')]
# add link to edit Task Difficulty Levels
items += [(gci_redirects.getDifficultyEditRedirect(
entity, {'url_name': 'gci/program'}),
"Edit Task Difficulty Levels", 'any_access')]
# add link to edit Task Type Tags
items += [(gci_redirects.getTaskTypeEditRedirect(
entity, {'url_name': 'gci/program'}),
"Edit Task Type Tags", 'any_access')]
except out_of_band.Error:
pass
items = sidebar.getSidebarMenu(id, user, items, params=params)
if not items:
continue
menu = {}
menu['heading'] = entity.short_name
menu['items'] = items
menu['group'] = 'Programs'
menus.append(menu)
return menus
def _getTimeDependentEntries(self, gci_program_entity, params, id, user):
"""Returns a list with time dependent menu items.
"""
items = []
timeline_entity = gci_program_entity.timeline
# add show ranking item
if timeline_helper.isAfterEvent(timeline_entity, 'tasks_publicly_visible'):
items += [(gci_redirects.getShowRankingRedirect(
gci_program_entity, {'url_name': 'gci/program'}),
'Show Ranking', 'any_access')]
mentor_entity = None
org_admin_entity = None
org_app_survey = org_app_logic.getForProgram(gci_program_entity)
if org_app_survey and \
timeline_helper.isActivePeriod(org_app_survey, 'survey'):
# add the organization signup link
items += [
(redirects.getTakeSurveyRedirect(
org_app_survey, {'url_name': 'gci/org_app'}),
"Apply to become an Organization", 'any_access')]
if user and org_app_survey and timeline_helper.isAfterEvent(
org_app_survey, 'survey_start'):
main_admin_fields = {
'main_admin': user,
'survey': org_app_survey,
}
backup_admin_fields = {
'backup_admin': user,
'survey': org_app_survey
}
org_app_record_logic = org_app_logic.getRecordLogic()
if org_app_record_logic.getForFields(main_admin_fields, unique=True) or \
org_app_record_logic.getForFields(backup_admin_fields, unique=True):
# add the 'List my Organization Applications' link
items += [
(redirects.getListSelfRedirect(org_app_survey,
{'url_name' : 'gci/org_app'}),
"List My Organization Applications", 'any_access')]
# get the student entity for this user and program
filter = {'user': user,
'scope': gci_program_entity,
'status': ['active', 'inactive']}
student_entity = gci_student_logic.logic.getForFields(filter, unique=True)
# students can register after successfully completing their first
# task. So if a user has completed one task he is still a student
filter = {
'user': user,
'program': gci_program_entity,
}
has_completed_task = gci_task_logic.logic.getForFields(
filter, unique=True)
if student_entity or (user and has_completed_task):
items += self._getStudentEntries(gci_program_entity, student_entity,
params, id, user, 'gci')
else:
# get mentor and org_admin entity for this user and program
filter = {
'user': user,
'program': gci_program_entity,
'status': 'active'
}
mentor_entity = gci_mentor_logic.logic.getForFields(filter, unique=True)
org_admin_entity = gci_org_admin_logic.logic.getForFields(
filter, unique=True)
if timeline_helper.isAfterEvent(
timeline_entity, 'accepted_organization_announced_deadline'):
if mentor_entity or org_admin_entity:
items += self._getOrganizationEntries(
gci_program_entity, org_admin_entity,
mentor_entity, params, id, user)
if timeline_helper.isBeforeEvent(timeline_entity, 'program_end'):
# add apply to become a mentor link
items += [
('/gci/org/apply_mentor/%s' % (
gci_program_entity.key().id_or_name()),
"Apply to become a Mentor", 'any_access')]
if timeline_helper.isAfterEvent(
timeline_entity, 'accepted_organization_announced_deadline'):
url = redirects.getAcceptedOrgsRedirect(
gci_program_entity, params)
# add a link to list all the organizations
items += [(url, "List participating Organizations", 'any_access')]
user_fields = {
'user': user,
'status': 'active'
}
host_entity = host_logic.getForFields(user_fields, unique=True)
# for org admins this link should be visible only after accepted
# organizations are announced and for other public after the tasks
# are public but for program host it must be visible always
if (host_entity or
((org_admin_entity or mentor_entity) and timeline_helper.isAfterEvent(
timeline_entity, 'tasks_publicly_visible')) or
(timeline_helper.isAfterEvent(
timeline_entity, 'tasks_publicly_visible'))):
url = gci_redirects.getListAllTasksRedirect(
gci_program_entity, params)
# add a link to list all the organizations
items += [(url, "List all tasks", 'any_access')]
if user:
# add a link to show all tasks of interest
items += [(gci_redirects.getListMyTasksRedirect(
gci_program_entity, params),
'List my Tasks', 'any_access')]
return items
def _getStudentEntries(self, gci_program_entity, student_entity,
params, id, user, prefix):
"""Returns a list with menu items for students in a specific program.
"""
items = []
timeline_entity = gci_program_entity.timeline
# this check is done because of the GCI student registration
# specification mentioned in previous method, a user can have
# a task and hence task listed without being a student
if student_entity:
items += super(View, self)._getStudentEntries(
gci_program_entity, student_entity, params, id, user, prefix)
if timeline_helper.isActivePeriod(timeline_entity, 'program'):
items += [
(gci_redirects.getSubmitFormsRedirect(
student_entity, {'url_name': 'gci/student'}),
"Submit Forms", 'any_access')
]
else:
# add a sidebar entry for the user to register as student if not
# since he has completed one task
filter = {
'user': user,
'program': gci_program_entity,
'status': 'AwaitingRegistration'
}
if gci_task_logic.logic.getForFields(filter, unique=True):
if timeline_helper.isActivePeriod(timeline_entity, 'student_signup'):
# this user does not have a role yet for this program
items += [(redirects.getStudentApplyRedirect(
gci_program_entity, {'url_name': 'gci/student'}),
"Register as a Student", 'any_access')]
return items
def _getOrganizationEntries(self, gci_program_entity, org_admin_entity,
mentor_entity, params, id, user):
"""Returns a list with menu items for org admins and mentors in a
specific program. Note: this method is called only after the
accepted organizations are announced
"""
items = []
timeline_entity = gci_program_entity.timeline
if mentor_entity and timeline_helper.isAfterEvent(
timeline_entity, 'accepted_organization_announced_deadline'):
# add a link to show all tasks that the mentor is assigned to
items += [(gci_redirects.getListMentorTasksRedirect(
mentor_entity, {'url_name':'gci/mentor'}),
"List starred tasks", 'any_access')]
return items
@decorators.merge_params
@decorators.check_access
def taskDifficultyEdit(self, request, access_type, page_name=None,
params=None, **kwargs):
"""View method used to edit Difficulty Level tags.
"""
params = dicts.merge(params, self._params)
try:
program_entity = self._logic.getFromKeyFieldsOr404(kwargs)
except out_of_band.Error, error:
return helper.responses.errorResponse(
error, request, template=params['error_public'])
if request.POST:
return self.taskDifficultyEditPost(request, program_entity, params)
else: #request.GET
return self.taskDifficultyEditGet(request, program_entity, page_name, params)
def taskDifficultyEditGet(self, request, program_entity, page_name, params):
"""View method for edit task difficulty tags GET requests.
"""
context = helper.responses.getUniversalContext(request)
helper.responses.useJavaScript(context, params['js_uses_all'])
context['page_name'] = page_name
context['program_key_name'] = program_entity.key().name()
difficulty_tags = gci_task_model.TaskDifficultyTag.get_by_scope(
program_entity)
difficulties = []
for difficulty in difficulty_tags:
difficulties.append({
'name': difficulty.tag,
'value': difficulty.value })
context['difficulties'] = simplejson.dumps(difficulties)
template = 'modules/gci/program/tag/difficulty.html'
return self._constructResponse(request, program_entity, context, None,
params, template=template)
def taskDifficultyEditPost(self, request, program_entity, params):
"""View method for edit task difficulty tags POST requests.
"""
post_dict = request.POST
operation = simplejson.loads(post_dict.get('operation'))
# invalid request
INVALID_REQUEST_RESPONSE = http.HttpResponse()
INVALID_REQUEST_RESPONSE.status_code = 400
if not operation:
return INVALID_REQUEST_RESPONSE
op = operation.get('op')
# TODO(ljvderijk): How do we want to deal with the setting of the value
# property in the tag since it now requires an extra put.
data = operation['data']
if op == 'add':
for tag_data in data:
tag = gci_task_model.TaskDifficultyTag.get_or_create(
program_entity, tag_data['name'])
tag.value = int(tag_data['value'])
tag.put()
elif op == 'change':
current_tag_data = data[0]
new_tag_data = data[1]
current_tag_name = current_tag_data['name']
new_tag_name = new_tag_data['name']
current_tag = gci_task_model.TaskDifficultyTag.get_by_scope_and_name(
program_entity, current_tag_name)
if not current_tag:
return INVALID_REQUEST_RESPONSE
if current_tag_name != new_tag_name:
# rename tag
new_tag = gci_task_model.TaskDifficultyTag.copy_tag(
program_entity, current_tag_name, new_tag_name)
# TODO(ljvderijk): The tag copy method should work with new fields
new_tag.order = current_tag.order
new_tag.value = int(new_tag_data['value'])
new_tag.put()
else:
# change value of the tag
current_tag.value = int(new_tag_data['value'])
current_tag.put()
elif op == 'delete':
for tag_data in data:
gci_task_model.TaskDifficultyTag.delete_tag(
program_entity, tag_data['name'])
elif op == 'reorder':
tags = []
for i in range(0, len(data)):
tag_data = data[i]
tag = gci_task_model.TaskDifficultyTag.get_by_scope_and_name(
program_entity, tag_data['name'])
tag.order = i
tags.append(tag)
db.put(tags)
return http.HttpResponse()
@decorators.merge_params
@decorators.check_access
def taskTypeEdit(self, request, access_type, page_name=None,
params=None, **kwargs):
"""View method used to edit Task Type tags.
"""
params = dicts.merge(params, self._params)
try:
entity = self._logic.getFromKeyFieldsOr404(kwargs)
except out_of_band.Error, error:
return helper.responses.errorResponse(
error, request, template=params['error_public'])
context = helper.responses.getUniversalContext(request)
helper.responses.useJavaScript(context, params['js_uses_all'])
context['page_name'] = page_name
context['program_key_name'] = entity.key().name()
context['task_types'] = gci_task_model.TaskTypeTag.get_by_scope(
entity)
params['edit_template'] = 'modules/gci/program/tag/task_type.html'
return self._constructResponse(request, entity, context, None, params)
@decorators.merge_params
@decorators.check_access
def taskTypeTagEdit(self, request, access_type, page_name=None,
params=None, **kwargs):
"""View method used to edit a supplied Task Type tag.
"""
get_params = request.GET
order = get_params.getlist('order')
program_entity = gci_program_logic.logic.getFromKeyFields(kwargs)
if order:
for index, elem in enumerate(order):
gci_task_model.TaskTypeTag.update_order(
program_entity, elem, index)
return http.HttpResponse()
else:
tag_data = get_params.getlist('tag_data')
tag_name = tag_data[0].strip()
tag_value = tag_data[1].strip()
if tag_name:
if not tag_value:
gci_task_model.TaskTypeTag.delete_tag(
program_entity, tag_name)
elif tag_name != tag_value:
gci_task_model.TaskTypeTag.copy_tag(
program_entity, tag_name, tag_value)
else:
gci_task_model.TaskTypeTag.get_or_create(program_entity, tag_value)
return http.HttpResponse(tag_value)
@decorators.merge_params
@decorators.check_access
def acceptedOrgs(self, request, access_type,
page_name=None, params=None, **kwargs):
"""List all the accepted orgs for the given program.
"""
from soc.modules.gci.views.models.organization import view as org_view
from soc.modules.gci.views.models.org_app_survey import view as org_app_view
logic = params['logic']
program_entity = logic.getFromKeyFieldsOr404(kwargs)
return super(View, self).acceptedOrgs(
request, page_name, params, program_entity, org_view, org_app_view)
@decorators.merge_params
@decorators.check_access
def requestMoreTasks(self, request, access_type,
page_name=None, params=None, **kwargs):
"""List of all organization which allows students to request new tasks
from organizations which do not have any open tasks.
"""
from soc.modules.gci.views.models.organization import view as org_view
logic = params['logic']
program_entity = logic.getFromKeyFieldsOr404(kwargs)
rt_params = org_view.getParams().copy()
rt_params['list_msg'] = self.DEF_REQUEST_TASKS_MSG
rt_params['participating_field_keys'] = [
'name', 'home_page', 'pub_mailing_list', 'open_tasks']
rt_params['participating_field_names'] = [
'Organization', 'Home Page', 'Public Mailing List', 'Open Tasks']
rt_params['participating_field_extra'] = lambda entity: {
'open_tasks': len(gci_task_logic.logic.getForFields({
'scope': entity, 'status': ['Open', 'Reopened']}))
}
rt_params['participating_row_extra'] = lambda entity: {
'link': gci_redirects.getRequestTaskRedirect(
entity, {'url_name': rt_params['url_name']})
} if canRequestTask(entity) else {}
def canRequestTask(entity):
"""Checks if a task may be requested from particular organization.
"""
fields = {
'scope': entity,
'status': ['Open', 'Reopened']
}
task = gci_task_logic.logic.getForFields(fields, unique=True)
return False if task else True
filter = {
'scope': program_entity,
'status': 'active'
}
return self.list(request, 'allow', page_name=page_name,
params=rt_params, filter=filter, visibility='participating')
def getListTasksData(self, request, params, tasks_filter):
"""Returns the list data for all tasks list for program host and
all public tasks for others.
Args:
request: HTTPRequest object
params: params of the task entity for the list
tasks_filter: dictionary that must be passed to obtain the tasks data
"""
idx = lists.getListIndex(request)
# default list settings
visibility = 'public'
if idx == 0:
all_d = gci_task_model.TaskDifficultyTag.all().fetch(100)
all_t = gci_task_model.TaskTypeTag.all().fetch(100)
args = [all_d, all_t]
contents = lists.getListData(request, params, tasks_filter,
visibility=visibility, args=args)
else:
return lists.getErrorResponse(request, "idx not valid")
return lists.getResponse(request, contents)
@decorators.merge_params
@decorators.check_access
def listTasks(self, request, access_type, page_name=None, params=None,
**kwargs):
"""View where all the tasks can be searched from.
"""
from soc.modules.gci.views.models.task import view as task_view
logic = params['logic']
program_entity = logic.getFromKeyFieldsOr404(kwargs)
page_name = '%s %s' % (page_name, program_entity.name)
list_params = task_view.getParams().copy()
user_account = user_logic.getCurrentUser()
user_fields = {
'user': user_account,
'status': 'active'
}
host_entity = host_logic.getForFields(user_fields, unique=True)
tasks_filter = {
'program': program_entity,
'status': ['Open', 'Reopened', 'ClaimRequested']
}
if host_entity:
list_params['list_description'] = self.DEF_LIST_VALID_TASKS_MSG_FMT % (
program_entity.name)
tasks_filter['status'].extend([
'Claimed', 'ActionNeeded', 'Closed', 'AwaitingRegistration',
'NeedsWork', 'NeedsReview','Unapproved', 'Unpublished'])
else:
list_params.setdefault('public_field_ignore', []).append('mentors')
list_params['list_description'] = self.DEF_LIST_PUBLIC_TASKS_MSG_FMT % (
program_entity.name)
list_params['public_row_extra'] = lambda entity, *args: {
'link': redirects.getPublicRedirect(entity, list_params)
}
list_params['public_conf_min_num'] = list_params['public_conf_limit'] = 100
if lists.isDataRequest(request):
return self.getListTasksData(request, list_params, tasks_filter)
contents = []
order = ['-modified_on']
tasks_list = lists.getListGenerator(request, list_params,
order=order, idx=0)
contents.append(tasks_list)
return self._list(request, list_params, contents, page_name)
def getListMyTasksData(self, request, task_params, subscription_params,
program, user):
"""Returns the list data for the starred tasks of the current user.
Args:
request: HTTPRequest object
task_params: params of the task entity for the list
subscription_params: params for the task subscription entity for the list
program: the GCIProgram to show the tasks for
user: The user entity to show the tasks for
"""
idx = lists.getListIndex(request)
all_d = gci_task_model.TaskDifficultyTag.all().fetch(100)
all_t = gci_task_model.TaskTypeTag.all().fetch(100)
args = [all_d, all_t]
if idx == 0:
filter = {
'program': program,
'user': user,
'status': ['ClaimRequested', 'Claimed', 'ActionNeeded',
'Closed', 'AwaitingRegistration', 'NeedsWork',
'NeedsReview']
}
contents = lists.getListData(request, task_params, filter, args=args)
elif idx == 1:
filter = {'subscribers': user}
contents = lists.getListData(request, subscription_params, filter,
args=args)
else:
return lists.getErrorResponse(request, 'idx not valid')
return lists.getResponse(request, contents)
@decorators.merge_params
@decorators.check_access
def listMyTasks(self, request, access_type, page_name=None,
params=None, **kwargs):
"""Displays a list of all starred tasks for the current user.
If the current user is a student it also lists all tasks claimed by them.
See base.View.list() for more details.
"""
from soc.modules.gci.views.models import task as gci_task_view
from soc.modules.gci.views.models import task_subscription as \
gci_subscription_view
program = gci_program_logic.logic.getFromKeyFieldsOr404(kwargs)
user = user_logic.getCurrentUser()
task_params = gci_task_view.view.getParams().copy()
task_params['list_description'] = ugettext(
'Tasks that you have claimed.')
subscription_params = gci_subscription_view.view.getParams().copy()
subscription_params['list_description'] = ugettext(
'Tasks that you have starred.')
if lists.isDataRequest(request):
return self.getListMyTasksData(request, task_params,
subscription_params, program, user)
contents = []
fields = {'user': user,
'status': ['active', 'inactive'],
}
if gci_student_logic.logic.getForFields(fields, unique=True):
order = ['modified_on']
tasks_list = lists.getListGenerator(request, task_params,
order=order, idx=0)
contents.append(tasks_list)
starred_tasks_list = lists.getListGenerator(request, subscription_params,
idx=1)
contents.append(starred_tasks_list)
return self._list(request, task_params, contents, page_name)
@decorators.merge_params
@decorators.check_access
def showRanking(self, request, access_type,
page_name=None, params=None, **kwargs):
"""Shows the ranking for the program specified by **kwargs.
Args:
request: the standard Django HTTP request object
access_type : the name of the access type which should be checked
page_name: the page name displayed in templates as page and header title
params: a dict with params for this View
kwargs: the Key Fields for the specified entity
"""
from soc.modules.gci.views.models.student_ranking import view as ranking_view
from soc.modules.gci.views.models.student import view as student_view
sparams = student_view.getParams()
user_account = user_logic.getCurrentUser()
user_fields = {
'user': user_account,
'status': 'active'
}
host_entity = host_logic.getForFields(user_fields, unique=True)
is_host = host_entity or user_logic.isDeveloper(user=user_account)
logic = params['logic']
program = logic.getFromKeyFieldsOr404(kwargs)
list_params = ranking_view.getParams().copy()
list_params['list_description'] = self.DEF_LIST_RANKING_MSG_FMT % (
program.name)
list_params['public_field_keys'] = ["student", "points", "number"]
list_params['public_field_names'] = ["Student", "Points", "Number of tasks"]
list_params['public_conf_extra'] = {
"rowNum": -1,
"rowList": [],
}
list_params['public_field_prefetch'] = ['student']
def getExtraFields(entity, *args):
res = {
'student': entity.student.user.name,
'number': len(entity.tasks)
}
if is_host:
fields = sparams['admin_field_keys']
extra = dicts.toDict(entity.student, fields)
res.update(extra)
res['group_name'] = entity.student.scope.name
res['birth_date'] = entity.student.birth_date.isoformat()
res['account_name'] = accounts.normalizeAccount(entity.student.user.account).email()
res['forms_submitted'] = "Yes" if (entity.student.consent_form and entity.student.student_id_form) else "No"
return res
list_params['public_field_extra'] = getExtraFields
list_params['public_row_extra'] = lambda entity, *args: {
'link': gci_redirects.getShowRankingDetails(entity, list_params)
}
list_params['public_field_props'] = {
'points': {
'sorttype': 'integer',
},
'number': {
'sorttype': 'integer',
},
}
if is_host:
list_params['public_field_keys'] += ["forms_submitted"]
list_params['public_field_names'] += ["Forms submitted"]
list_params['public_field_hidden'] = sparams['admin_field_hidden'] + sparams['admin_field_keys']
list_params['public_field_keys'].extend(sparams['admin_field_keys'])
list_params['public_field_names'].extend(sparams['admin_field_names'])
ranking_filter = {
'scope': program
}
order = ['-points']
if lists.isDataRequest(request):
contents = lists.getListData(request, list_params, ranking_filter)
return lists.getResponse(request, contents)
contents = [lists.getListGenerator(
request, list_params, order=order, idx=0)]
return self._list(request, list_params, contents=contents,
page_name=page_name)
view = View()
admin = decorators.view(view.admin)
accepted_orgs = decorators.view(view.acceptedOrgs)
assign_task_quotas = decorators.view(view.assignTaskQuotas)
create = decorators.view(view.create)
delete = decorators.view(view.delete)
edit = decorators.view(view.edit)
list = decorators.view(view.list)
list_my_tasks = decorators.view(view.listMyTasks)
list_participants = decorators.view(view.listParticipants)
list_tasks = decorators.view(view.listTasks)
public = decorators.view(view.public)
request_tasks = decorators.view(view.requestMoreTasks)
show_ranking = decorators.view(view.showRanking)
export = decorators.view(view.export)
home = decorators.view(view.home)
task_type_tag_edit = decorators.view(view.taskTypeTagEdit)
task_difficulty_edit = decorators.view(view.taskDifficultyEdit)
task_type_edit = decorators.view(view.taskTypeEdit)
|
SRabbelier/Melange
|
app/soc/modules/gci/views/models/program.py
|
Python
|
apache-2.0
| 40,076
|
[
"VisIt"
] |
536b7507ef66a31659e56d7b9f468308c8647f0c5ff3d75d8cdece9001018eab
|
#!/usr/bin/env python
#
# $File: importMS.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
from simuPOP.utils import importPopulation, export
pop = sim.Population([20,20], loci=[10, 10])
# simulate a population but mutate only a subset of loci
pop.evolve(
preOps=[
sim.InitSex(),
sim.SNPMutator(u=0.1, v=0.01, loci=range(5, 17))
],
matingScheme=sim.RandomMating(),
gen=100
)
# export first chromosome, all individuals
export(pop, format='ms', output='ms.txt')
# export first chromosome, subpops as replicates
export(pop, format='ms', output='ms_subPop.txt', splitBy='subPop')
# export all chromosomes, but limit to all males in subPop 1
pop.setVirtualSplitter(sim.SexSplitter())
export(pop, format='ms', output='ms_chrom.txt', splitBy='chrom', subPops=[(1,0)])
#
print(open('ms_chrom.txt').read())
# import as haploid sequence
pop = importPopulation(format='ms', filename='ms.txt')
# import as diploid
pop = importPopulation(format='ms', filename='ms.txt', ploidy=2)
# import as a single chromosome
pop = importPopulation(format='ms', filename='ms_subPop.txt', mergeBy='subPop')
|
BoPeng/simuPOP
|
docs/importMS.py
|
Python
|
gpl-2.0
| 2,127
|
[
"VisIt"
] |
86fd23d9970a4741cf9fda8f7e7c29feb1d1572687c8bdaea7a97a7ecb1c8e74
|
#!/usr/bin/env python
# Author: Andrew Jewett (jewett.aij at g mail)
# License: MIT License (See LICENSE.md)
# Copyright (c) 2013
"""
Reorder the atoms in the Angles section of a data file to make sure that
atoms have a "canonical order" (for example the first atom has a lower
id than the last atom, for angle and dihedral interactions.
(This helps us detect potential problems like dupicate Angle interactions.)
"""
from operator import itemgetter
import importlib
import os
import sys
sys.path.append(os.getcwd())
g_program_name = __file__.split('/')[-1]
def main():
in_stream = sys.stdin
section_name = ''
if len(sys.argv) == 3:
section_name = sys.argv[1]
bond_pattern_module_name = sys.argv[2]
# If the file name ends in ".py", then strip off this suffix.
# The next line does not work. Too lazy do care why.
# bond_pattern_module_name=bond_pattern_module_name.rstrip('.py')
# Do this instead
pc = bond_pattern_module_name.rfind('.py')
if pc != -1:
bond_pattern_module_name = bond_pattern_module_name[0:pc]
else:
sys.stderr.write('Usage Example:\n\n'
' ' + g_program_name + ' Angles nbody_angles.py < angles.txt > new_angles.txt\n\n'
' In this example \"angles.txt\" contains only the \"Angles\" section of\n'
' a LAMMPS DATA file. (Either a text-editor, or the \n'
' \"extract_lammps_data.py\" script can be used to select a section from\n'
' a LAMMPS DATA file\n\n'
'Error(' + g_program_name +
'): expected exactly one argument:\n'
' \"Angles\", \"Dihedrals\", or \"Impropers\"\n')
exit(-1)
# Ordering rules are defined in a seperate module named
# nbody_angles.py, nbody_dihedrals.py, nbody_impropers.py
# Load that now.
# search locations
package_opts = [[bond_pattern_module_name, __package__],
['nbody_alt_symmetry.'+bond_pattern_module_name,
__package__]]
if __package__:
for i in range(0, len(package_opts)):
package_opts[i][0] = '.' + package_opts[i][0]
grph = None
for name, pkg in package_opts:
try:
# define grph.bond_pattern, grph.canonical_order
grph = importlib.import_module(name, pkg)
break
except (ImportError, SystemError, ValueError):
pass
if grph is None:
sys.stderr.write('Error: Unable to locate file \"' +
bond_pattern_module_name + '\"\n'
' (Did you mispell the file name?\n'
' Check the \"nbody_alt_symmetry/\" directory.)\n')
sys.exit(-1)
# This module defines the graph representing the bond pattern for this type
# of interaction. (The number of vertices and edges for the graph corresponds
# to the number of atoms and bonds in this type of interaction.)
natoms = grph.bond_pattern.GetNumVerts()
nbonds = grph.bond_pattern.GetNumEdges()
for line_orig in in_stream:
line = line_orig.rstrip('\n')
comment = ''
if '#' in line_orig:
ic = line.find('#')
line = line_orig[:ic]
comment = ' ' + line_orig[ic:].rstrip('\n')
tokens = line.strip().split()
swapped = False
if len(tokens) == 2 + natoms:
all_integers = True
abids_l = [[0 for i in range(0, natoms)],
[0 for i in range(0, nbonds)]]
for i in range(0, natoms):
if not tokens[2 + i].isdigit():
all_integers = False
if all_integers:
for i in range(0, natoms):
abids_l[0][i] = int(tokens[2 + i])
else:
for i in range(0, natoms):
abids_l[0][i] = tokens[2 + i]
abids = grph.canonical_order((tuple(abids_l[0]), tuple(abids_l[1])))
for i in range(0, natoms):
tokens[2 + i] = str(abids[0][i])
sys.stdout.write(' '.join(tokens) + comment + '\n')
return
if __name__ == '__main__':
main()
|
jewettaij/moltemplate
|
moltemplate/nbody_reorder_atoms.py
|
Python
|
mit
| 4,360
|
[
"LAMMPS"
] |
d2c772e45cf701717312a6202eef1657762f0f3013a61c65d501f8670f97bfb0
|
from wtforms import validators, FormField, SelectField
from wtforms_alchemy import ModelForm, ModelFieldList
from models import ShoppingItem, Visit, ShoppingCategory
class CategoryForm(ModelForm):
class Meta:
model = ShoppingCategory
include = ['id']
'''field_args = {
'id': {
'validators': [validators.InputRequired()]
},
'name': {
'validators': [validators.Optional()]
},
'dailyLimit': {
'validators': [validators.Optional()]
}
}'''
class ShoppingItemForm(ModelForm):
class Meta:
model = ShoppingItem
include = ['id']
field_args = {
'id': {
'validators': [validators.Optional()]
},
'name': {
'validators': [validators.Optional()]
}
}
category = SelectField(u'Category', coerce=int)
class CheckoutForm(ModelForm):
class Meta:
datetime_format = '%m/%d/%Y %H:%M:%S'
model = Visit
include = ['id', 'family_id']
field_args = {
'id': {
'validators': [validators.Optional()]
},
'checkin': {
'validators': [validators.InputRequired()]
},
'checkout': {
'validators': [validators.Optional()]
},
'family_id': {
'validators': [validators.InputRequired()]
}
}
items = ModelFieldList(FormField(ShoppingItemForm), min_entries=0)
|
jlutz777/FreeStore
|
forms/checkout.py
|
Python
|
mit
| 1,609
|
[
"VisIt"
] |
bd703ee3034809aecd5fd9e7e5e055388cc120e3678f27bdde2c7b5287349d3f
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""Tools/Utilities/Generate SoundEx Codes"""
#-------------------------------------------------------------------------
#
# Gtk modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.soundex import soundex
from gramps.gui.autocomp import fill_combo
from gramps.gen.plug import Gramplet
from gramps.gen.constfunc import cuni
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
#-------------------------------------------------------------------------
#
# SoundGen
#
#-------------------------------------------------------------------------
class SoundGen(Gramplet):
"""
Generates SoundEx codes.
"""
def init(self):
self.gui.WIDGET = self.build_gui()
self.gui.get_container_widget().remove(self.gui.textview)
self.gui.get_container_widget().add_with_viewport(self.gui.WIDGET)
def build_gui(self):
"""
Build the GUI interface.
"""
grid = Gtk.Grid()
grid.set_border_width(6)
grid.set_row_spacing(6)
grid.set_column_spacing(20)
label1 = Gtk.Label(_("Name:"))
label1.set_alignment(0, 0.5)
grid.attach(label1, 0, 0, 1, 1)
label2 = Gtk.Label(_("SoundEx code:"))
label2.set_alignment(0, 0.5)
grid.attach(label2, 0, 1, 1, 1)
self.autocomp = Gtk.ComboBox.new_with_entry()
grid.attach(self.autocomp, 1, 0, 1, 1)
self.value = Gtk.Label()
self.value.set_alignment(0, 0.5)
grid.attach(self.value, 1, 1, 1, 1)
self.name = self.autocomp.get_child()
self.name.connect('changed', self.on_apply_clicked)
grid.show_all()
return grid
def db_changed(self):
if not self.dbstate.open:
return
names = []
person = None
for person in self.dbstate.db.iter_people():
lastname = person.get_primary_name().get_surname()
if lastname not in names:
names.append(lastname)
names.sort()
fill_combo(self.autocomp, names)
if person:
n = person.get_primary_name().get_surname()
self.name.set_text(n)
try:
se_text = soundex(n)
except UnicodeEncodeError:
se_text = soundex('')
self.value.set_text(se_text)
else:
self.name.set_text("")
def on_apply_clicked(self, obj):
try:
se_text = soundex(cuni(obj.get_text()))
except UnicodeEncodeError:
se_text = soundex('')
self.value.set_text(se_text)
|
pmghalvorsen/gramps_branch
|
gramps/plugins/gramplet/soundgen.py
|
Python
|
gpl-2.0
| 3,732
|
[
"Brian"
] |
ab7ed8e3807a4acd5a54ccd408eab755e2384fab8772282a5044070eef2d5b3d
|
import numpy as np
import theano
import theano.tensor as T
import unittest
import tempfile
from numpy.testing import assert_equal, assert_array_equal, assert_array_almost_equal
from nose.tools import assert_true
from smartlearner import views, stopping_criteria, Trainer, tasks
from smartlearner.direction_modifiers import GradientNoise
from smartlearner.optimizers import SGD
from smartlearner.testing import DummyLoss, DummyBatchScheduler
from smartlearner.utils import sharedX
floatX = theano.config.floatX
class DummyLossWithGradient(DummyLoss):
def __init__(self, cost, param):
super().__init__()
self.cost = cost
self.param = param
def _get_gradients(self):
gparam = T.grad(cost=self.cost, wrt=self.param)
return {self.param: gparam}
def getstate(self):
return {"param": self.param.get_value()}
def setstate(self, state):
self.param.set_value(state["param"])
class TestGradientNoise(unittest.TestCase):
def _build_experiment(self):
# Create an Nd gaussian function to optimize. This function is not
# well-conditioned and there exists no perfect gradient step to converge in
# only one iteration.
N = 4
center = 5*np.ones((1, N)).astype(floatX)
param = sharedX(np.zeros((1, N)))
cost = T.sum(0.5*T.dot(T.dot((param-center), np.diag(1./np.arange(1, N+1))), (param-center).T))
loss = DummyLossWithGradient(cost, param)
optimizer = SGD(loss)
direction_modifier = GradientNoise()
optimizer.append_direction_modifier(direction_modifier)
trainer = Trainer(optimizer, DummyBatchScheduler())
# Monitor the learning rate.
logger = tasks.Logger(views.MonitorVariable(direction_modifier.t),
views.MonitorVariable(direction_modifier.std),
views.MonitorVariable(list(optimizer.directions.values())[0]),
views.MonitorVariable(list(loss.gradients.values())[0]))
trainer.append_task(logger)
return trainer, logger, direction_modifier
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.max_epoch = 10
self.trainer, self.logger, self.direction_modifier = self._build_experiment()
self.trainer.append_task(stopping_criteria.MaxEpochStopping(self.max_epoch))
self.trainer.train()
def test_behaviour(self):
t_per_update = np.array(self.logger.get_variable_history(0)).flatten()
expected_t_per_update = np.arange(1, self.max_epoch+1)
assert_array_equal(t_per_update, expected_t_per_update)
# Directions should not be the same as gradients at first.
for i in range(self.max_epoch):
assert_true(not np.allclose(abs(self.logger[i][2]), abs(self.logger[i][3])))
std_per_update = np.array(self.logger.get_variable_history(1)).flatten()
# std is expected to decay at each update.
assert_true(np.all(np.diff(std_per_update) < 0))
def test_save_load(self):
# Save training and resume it.
with tempfile.TemporaryDirectory() as experiment_dir:
# Save current training state of the experiment.
self.trainer.save(experiment_dir)
# Load previous training state of the experiment.
trainer, logger, direction_modifier = self._build_experiment()
trainer.load(experiment_dir)
assert_equal(direction_modifier._eta, self.direction_modifier._eta)
assert_equal(direction_modifier._gamma, self.direction_modifier._gamma)
assert_equal(direction_modifier.t.get_value(), self.direction_modifier.t.get_value())
assert_equal(direction_modifier.std.get_value(), self.direction_modifier.std.get_value())
assert_array_equal(direction_modifier._srng.rstate, self.direction_modifier._srng.rstate)
for state, expected_state in zip(direction_modifier._srng.state_updates,
self.direction_modifier._srng.state_updates):
assert_array_equal(state[0].get_value(), expected_state[0].get_value())
def test_resume(self):
trainer1, logger1, direction_modifier1 = self._build_experiment()
trainer1.append_task(stopping_criteria.MaxEpochStopping(5))
trainer1.train()
# Save training and resume it.
with tempfile.TemporaryDirectory() as experiment_dir:
# Save current training state of the experiment.
trainer1.save(experiment_dir)
# Load previous training state of the experiment.
trainer2, logger2, direction_modifier2 = self._build_experiment()
trainer2.append_task(stopping_criteria.MaxEpochStopping(10))
trainer2.load(experiment_dir)
trainer2.train()
# Check that concatenating `logger1` with `logger2` is the same as `self.logger`.
learning_rate_per_update_part1 = np.array(logger1.get_variable_history(0)).flatten()
learning_rate_per_update_part2 = np.array(logger2.get_variable_history(0)).flatten()
expected_learning_rate_per_update = np.array(self.logger.get_variable_history(0)).flatten()
assert_array_equal(np.r_[learning_rate_per_update_part1, learning_rate_per_update_part2],
expected_learning_rate_per_update)
# Check that concatenating `logger1` with `logger2` is the same as `self.logger`.
learning_rate_per_update_part1 = np.array(logger1.get_variable_history(1)).flatten()
learning_rate_per_update_part2 = np.array(logger2.get_variable_history(1)).flatten()
expected_learning_rate_per_update = np.array(self.logger.get_variable_history(1)).flatten()
assert_array_equal(np.r_[learning_rate_per_update_part1, learning_rate_per_update_part2],
expected_learning_rate_per_update)
# Check that concatenating `logger1` with `logger2` is the same as `self.logger`.
learning_rate_per_update_part1 = np.array(logger1.get_variable_history(2)).flatten()
learning_rate_per_update_part2 = np.array(logger2.get_variable_history(2)).flatten()
expected_learning_rate_per_update = np.array(self.logger.get_variable_history(2)).flatten()
assert_array_almost_equal(np.r_[learning_rate_per_update_part1, learning_rate_per_update_part2],
expected_learning_rate_per_update)
|
ASalvail/smartlearner
|
tests/direction_modifiers/test_gradient_noise.py
|
Python
|
bsd-3-clause
| 6,530
|
[
"Gaussian"
] |
9530be10bb7fd2c8a93f460c4dbfc0c1f28234bdd371ea3b1819b235a86fcce6
|
# -*- coding: utf-8 -*-
"""
pyClanSphere.utils.forms
~~~~~~~~~~~~~~~~~~~~~~~~
This module implements a sophisticated form validation and rendering
system that is based on diva with concepts from django newforms and
wtforms incorporated.
It can validate nested structures and works in both ways. It can also
handle intelligent backredirects (via :mod:`pyClanSphere.utils.http`) and supports
basic CSRF protection.
For usage information see :class:`Form`.
:copyright: (c) 2009 - 2010 by the pyClanSphere Team,
see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime, date
from itertools import chain
from threading import Lock
try:
from hashlib import sha1
except ImportError:
from sha import new as sha1
from werkzeug import cached_property, html, escape, MultiDict
from jinja2 import Markup
from pyClanSphere.application import get_request, url_for, get_application
from pyClanSphere.database import db
from pyClanSphere.i18n import _, ngettext, lazy_gettext, parse_datetime, \
format_system_datetime, parse_date
from pyClanSphere.utils.http import get_redirect_target, _redirect, redirect_to
from pyClanSphere.utils.datastructures import OrderedDict, missing
from pyClanSphere.utils.recaptcha import get_recaptcha_html, validate_recaptcha
from pyClanSphere.utils.validators import ValidationError
_last_position_hint = -1
_position_hint_lock = Lock()
def fill_dict(_dict, **kwargs):
"""A helper to fill the dict passed with the items passed as keyword
arguments if they are not yet in the dict. If the dict passed was
`None` a new dict is created and returned.
This can be used to prepopulate initial dicts in overriden constructors:
class MyForm(forms.Form):
foo = forms.TextField()
bar = forms.TextField()
def __init__(self, initial=None):
forms.Form.__init__(self, forms.fill_dict(initial,
foo="nothing",
bar="nothing"
))
"""
if _dict is None:
return kwargs
for key, value in kwargs.iteritems():
if key not in _dict:
_dict[key] = value
return _dict
def set_fields(obj, data, *fields):
"""Set all the fields on obj with data if changed."""
for field in fields:
value = data[field]
if getattr(obj, field) != value:
setattr(obj, field, value)
def _next_position_hint():
"""Return the next position hint."""
global _last_position_hint
_position_hint_lock.acquire()
try:
_last_position_hint += 1
return _last_position_hint
finally:
_position_hint_lock.release()
def _decode(data):
"""Decodes the flat dictionary d into a nested structure.
>>> _decode({'foo': 'bar'})
{'foo': 'bar'}
>>> _decode({'foo.0': 'bar', 'foo.1': 'baz'})
{'foo': ['bar', 'baz']}
>>> data = _decode({'foo.bar': '1', 'foo.baz': '2'})
>>> data == {'foo': {'bar': '1', 'baz': '2'}}
True
More complex mappings work too:
>>> _decode({'foo.bar.0': 'baz', 'foo.bar.1': 'buzz'})
{'foo': {'bar': ['baz', 'buzz']}}
>>> _decode({'foo.0.bar': '23', 'foo.1.baz': '42'})
{'foo': [{'bar': '23'}, {'baz': '42'}]}
>>> _decode({'foo.0.0': '23', 'foo.0.1': '42'})
{'foo': [['23', '42']]}
>>> _decode({'foo': ['23', '42']})
{'foo': ['23', '42']}
Missing items in lists are ignored for convenience reasons:
>>> _decode({'foo.42': 'a', 'foo.82': 'b'})
{'foo': ['a', 'b']}
This can be used for help client side DOM processing (inserting and
deleting rows in dynamic forms).
It also supports werkzeug's multi dicts:
>>> _decode(MultiDict({"foo": ['1', '2']}))
{'foo': ['1', '2']}
>>> _decode(MultiDict({"foo.0": '1', "foo.1": '2'}))
{'foo': ['1', '2']}
Those two submission ways can also be used combined:
>>> _decode(MultiDict({"foo": ['1'], "foo.0": '2', "foo.1": '3'}))
{'foo': ['1', '2', '3']}
This function will never raise exceptions except for argument errors
but the recovery behavior for invalid form data is undefined.
"""
list_marker = object()
value_marker = object()
if isinstance(data, MultiDict):
listiter = data.iterlists()
else:
listiter = ((k, [v]) for k, v in data.iteritems())
def _split_key(name):
result = name.split('.')
for idx, part in enumerate(result):
if part.isdigit():
result[idx] = int(part)
return result
def _enter_container(container, key):
if key not in container:
return container.setdefault(key, {list_marker: False})
return container[key]
def _convert(container):
if value_marker in container:
force_list = False
values = container.pop(value_marker)
if container.pop(list_marker):
force_list = True
values.extend(_convert(x[1]) for x in
sorted(container.items()))
if not force_list and len(values) == 1:
values = values[0]
return values
elif container.pop(list_marker):
return [_convert(x[1]) for x in sorted(container.items())]
return dict((k, _convert(v)) for k, v in container.iteritems())
result = {list_marker: False}
for key, values in listiter:
parts = _split_key(key)
if not parts:
continue
container = result
for part in parts:
last_container = container
container = _enter_container(container, part)
last_container[list_marker] = isinstance(part, (int, long))
container[value_marker] = values[:]
return _convert(result)
def _bind(obj, form, memo):
"""Helper for the field binding. This is inspired by the way `deepcopy`
is implemented.
"""
if memo is None:
memo = {}
obj_id = id(obj)
if obj_id in memo:
return memo[obj_id]
rv = obj._bind(form, memo)
memo[obj_id] = rv
return rv
def _force_dict(value):
"""If the value is not a dict, raise an exception."""
if value is None or not isinstance(value, dict):
return {}
return value
def _force_list(value):
"""If the value is not a list, make it one."""
if value is None:
return []
try:
if isinstance(value, basestring):
raise TypeError()
return list(value)
except TypeError:
return [value]
def _make_widget(field, name, value, errors):
"""Shortcut for widget creation."""
return field.widget(field, name, value, errors)
def _make_name(parent, child):
"""Joins a name."""
if parent is None:
result = child
else:
result = '%s.%s' % (parent, child)
# try to return a ascii only bytestring if possible
try:
return str(result)
except UnicodeError:
return unicode(result)
def _to_string(value):
"""Convert a value to unicode, None means empty string."""
if value is None:
return u''
return unicode(value)
def _to_list(value):
"""Similar to `_force_list` but always succeeds and never drops data."""
if value is None:
return []
if isinstance(value, basestring):
return [value]
try:
return list(value)
except TypeError:
return [value]
def _value_matches_choice(value, choice):
"""Checks if a given value matches a choice."""
# this algorithm is also implemented in `MultiChoiceField.convert`
# for better scaling with multiple items. If it's changed here, it
# must be changed for the multi choice field too.
return choice == value or _to_string(choice) == _to_string(value)
def _iter_choices(choices):
"""Iterate over choices."""
if choices is not None:
for choice in choices:
if not isinstance(choice, tuple):
choice = (choice, choice)
yield choice
def _is_choice_selected(field, value, choice):
"""Checks if a choice is selected. If the field is a multi select
field it's checked if the choice is in the passed iterable of values,
otherwise it's checked if the value matches the choice.
"""
if field.multiple_choices:
for value in value:
if _value_matches_choice(value, choice):
return True
return False
return _value_matches_choice(value, choice)
class _Renderable(object):
"""Mixin for renderable HTML objects."""
def render(self):
return u''
def __call__(self, *args, **kwargs):
return self.render(*args, **kwargs)
class Widget(_Renderable):
"""Baseclass for all widgets. All widgets share a common interface
that can be used from within templates.
Take this form as an example:
>>> class LoginForm(Form):
... username = TextField(required=True)
... password = TextField(widget=PasswordInput)
... flags = MultiChoiceField(choices=[1, 2, 3])
...
>>> form = LoginForm()
>>> form.validate({'username': '', 'password': '',
... 'flags': [1, 3]})
False
>>> widget = form.as_widget()
You can get the subwidgets by using the normal indexing operators:
>>> username = widget['username']
>>> password = widget['password']
To render a widget you can usually invoke the `render()` method. All
keyword parameters are used as HTML attribute in the resulting tag.
You can also call the widget itself (``username()`` instead of
``username.render()``) which does the same if there are no errors for
the field but adds the default error list after the widget if there
are errors.
Widgets have some public attributes:
`errors`
gives the list of errors:
>>> username.errors
[u'This field is required.']
This error list is printable:
>>> print username.errors()
<ul class="errors"><li>This field is required.</li></ul>
Like any other sequence that yields list items it provides
`as_ul` and `as_ol` methods:
>>> print username.errors.as_ul()
<ul><li>This field is required.</li></ul>
Keep in mind that ``widget.errors()`` is equivalent to
``widget.errors.as_ul(class_='errors', hide_empty=True)``.
`value`
returns the value of the widget as primitive. For basic
widgets this is always a string, for widgets with subwidgets or
widgets with multiple values a dict or a list:
>>> username.value
u''
>>> widget['flags'].value
[u'1', u'3']
`name` gives you the name of the field for form submissions:
>>> username.name
'username'
Please keep in mind that the name is not always that obvious. pyClanSphere
supports nested form fields so it's a good idea to always use the
name attribute.
`id`
gives you the default domain for the widget. This is either none
if there is no idea for the field or `f_` + the field name with
underscores instead of dots:
>>> username.id
'f_username'
`all_errors`
like `errors` but also contains the errors of child
widgets.
"""
disable_dt = False
def __init__(self, field, name, value, all_errors):
self._field = field
self._value = value
self._all_errors = all_errors
self.name = name
def hidden(self):
"""Return one or multiple hidden fields for the current value. This
also handles subwidgets. This is useful for transparent form data
passing.
"""
fields = []
def _add_field(name, value):
fields.append(html.input(type='hidden', name=name, value=value))
def _to_hidden(value, name):
if isinstance(value, list):
for idx, value in enumerate(value):
_to_hidden(value, _make_name(name, idx))
elif isinstance(value, dict):
for key, value in value.iteritems():
_to_hidden(value, _make_name(name, key))
else:
_add_field(name, value)
_to_hidden(self.value, self.name)
return u'\n'.join(fields)
@property
def localname(self):
"""The local name of the field."""
return self.name.rsplit('.', 1)[-1]
@property
def id(self):
"""The proposed id for this widget."""
if self.name is not None:
return 'f_' + self.name.replace('.', '__')
@property
def value(self):
"""The primitive value for this widget."""
return self._field.to_primitive(self._value)
@property
def label(self):
"""The label for the widget."""
if self._field.label is not None:
return Label(unicode(self._field.label), self.id)
@property
def help_text(self):
"""The help text of the widget."""
if self._field.help_text is not None:
return unicode(self._field.help_text)
@property
def errors(self):
"""The direct errors of this widget."""
if self.name in self._all_errors:
return self._all_errors[self.name]
return ErrorList()
@property
def all_errors(self):
"""The current errors and the errors of all child widgets."""
items = sorted(self._all_errors.items())
if self.name is None:
return ErrorList(chain(*(item[1] for item in items)))
result = ErrorList()
for key, value in items:
if key == self.name or key.startswith(self.name + '.'):
result.extend(value)
return result
def as_dd(self, **attrs):
"""Return a dt/dd item."""
rv = []
if not self.disable_dt:
label = self.label
if label:
rv.append(html.dt(label()))
rv.append(html.dd(self(**attrs)))
if self.help_text:
rv.append(html.dd(self.help_text, class_='explanation'))
return Markup(u''.join(rv))
def _attr_setdefault(self, attrs):
"""Add an ID to the attrs if there is none."""
if 'id' not in attrs and self.id is not None:
attrs['id'] = self.id
def __call__(self, **attrs):
"""The default display is the form + error list as ul if needed."""
return self.render(**attrs) + Markup(self.errors())
class Label(_Renderable):
"""Holds a label."""
def __init__(self, text, linked_to=None):
self.text = text
self.linked_to = linked_to
def render(self, **attrs):
attrs.setdefault('for', self.linked_to)
return Markup(html.label(escape(self.text), **attrs))
class InternalWidget(Widget):
"""Special widgets are widgets that can't be used on arbitrary
form fields but belong to others.
"""
def __init__(self, parent):
self._parent = parent
value = name = None
errors = all_errors = property(lambda x: ErrorList())
class Input(Widget):
"""A widget that is a HTML input field."""
hide_value = False
type = None
def render(self, **attrs):
self._attr_setdefault(attrs)
value = self.value
if self.hide_value:
value = u''
return Markup(html.input(name=self.name, value=value, type=self.type,
**attrs))
class TextInput(Input):
"""A widget that holds text."""
type = 'text'
class PasswordInput(TextInput):
"""A widget that holds a password."""
type = 'password'
hide_value = True
class HiddenInput(Input):
"""A hidden input field for text."""
type = 'hidden'
class Textarea(Widget):
"""Displays a textarea."""
def _attr_setdefault(self, attrs):
Widget._attr_setdefault(self, attrs)
attrs.setdefault('rows', 8)
attrs.setdefault('cols', 40)
def render(self, **attrs):
self._attr_setdefault(attrs)
return Markup(html.textarea(self.value, name=self.name, **attrs))
class Checkbox(Widget):
"""A simple checkbox."""
@property
def checked(self):
return self.value != u'False'
def with_help_text(self, **attrs):
"""Render the checkbox with help text."""
data = self(**attrs)
if self.help_text:
data += Markup(u' ' + html.label(self.help_text, class_='explanation',
for_=self.id))
return data
def as_dd(self, **attrs):
"""Return a dt/dd item."""
rv = []
label = self.label
if label:
rv.append(html.dt(label()))
rv.append(html.dd(self.with_help_text()))
return Markup(u''.join(rv))
def as_li(self, **attrs):
"""Return a li item."""
rv = [self.render(**attrs)]
if self.label:
rv.append(u' ' + self.label())
if self.help_text:
rv.append(html.div(self.help_text, class_='explanation'))
rv.append(self.errors())
return Markup(html.li(u''.join(rv)))
def render(self, **attrs):
self._attr_setdefault(attrs)
return Markup(html.input(name=self.name, type='checkbox',
checked=self.checked, **attrs))
class SelectBox(Widget):
"""A select box."""
def _attr_setdefault(self, attrs):
Widget._attr_setdefault(self, attrs)
attrs.setdefault('multiple', self._field.multiple_choices)
def render(self, **attrs):
self._attr_setdefault(attrs)
items = []
for choice in self._field.choices:
if isinstance(choice, tuple):
key, value = choice
else:
key = value = choice
selected = _is_choice_selected(self._field, self.value, key)
items.append(html.option(unicode(value), value=unicode(key),
selected=selected))
return Markup(html.select(name=self.name, *items, **attrs))
class _InputGroupMember(InternalWidget):
"""A widget that is a single radio button."""
# override the label descriptor
label = None
inline_label = True
def __init__(self, parent, value, label):
InternalWidget.__init__(self, parent)
self.value = unicode(value)
self.label = Label(label, self.id)
@property
def name(self):
return self._parent.name
@property
def id(self):
return 'f_%s_%s' % (self._parent.name, self.value)
@property
def checked(self):
return _is_choice_selected(self._parent._field, self._parent.value,
self.value)
def render(self, **attrs):
self._attr_setdefault(attrs)
return Markup(html.input(type=self.type, name=self.name, value=self.value,
checked=self.checked, **attrs))
class RadioButton(_InputGroupMember):
"""A radio button in an input group."""
type = 'radio'
class GroupCheckbox(_InputGroupMember):
"""A checkbox in an input group."""
type = 'checkbox'
class _InputGroup(Widget):
def __init__(self, field, name, value, all_errors):
Widget.__init__(self, field, name, value, all_errors)
self.choices = []
self._subwidgets = {}
for value, label in _iter_choices(self._field.choices):
widget = self.subwidget(self, value, label)
self.choices.append(widget)
self._subwidgets[value] = widget
def __getitem__(self, value):
"""Return a subwidget."""
return self._subwidgets[value]
def _as_list(self, list_type, attrs):
if attrs.pop('hide_empty', False) and not self.choices:
return u''
self._attr_setdefault(attrs)
empty_msg = attrs.pop('empty_msg', None)
label = not attrs.pop('nolabel', False)
class_ = attrs.pop('class_', attrs.pop('class', None))
if class_ is None:
class_ = 'choicegroup'
attrs['class'] = class_
choices = [u'<li>%s %s</li>' % (
choice(),
label and choice.label() or u''
) for choice in self.choices]
if not choices:
if empty_msg is None:
empty_msg = _('No choices.')
choices.append(u'<li>%s</li>' % _(empty_msg))
return list_type(*choices, **attrs)
def as_ul(self, **attrs):
"""Render the radio buttons widget as <ul>"""
return Markup(self._as_list(html.ul, attrs))
def as_ol(self, **attrs):
"""Render the radio buttons widget as <ol>"""
return Markup(self._as_list(html.ol, attrs))
def render(self, **attrs):
return self.as_ul(**attrs)
class RadioButtonGroup(_InputGroup):
"""A group of radio buttons."""
subwidget = RadioButton
class CheckboxGroup(_InputGroup):
"""A group of checkboxes."""
subwidget = GroupCheckbox
class MappingWidget(Widget):
"""Special widget for dict-like fields."""
def __init__(self, field, name, value, all_errors):
Widget.__init__(self, field, name, _force_dict(value), all_errors)
self._subwidgets = {}
def __getitem__(self, name):
subwidget = self._subwidgets.get(name)
if subwidget is None:
# this could raise a KeyError we pass through
subwidget = _make_widget(self._field.fields[name],
_make_name(self.name, name),
self._value.get(name),
self._all_errors)
self._subwidgets[name] = subwidget
return subwidget
def as_dl(self, **attrs):
return Markup(html.dl(*[x.as_dd() for x in self], **attrs))
def __call__(self, *args, **kwargs):
return self.as_dl(*args, **kwargs)
def __iter__(self):
for key in self._field.fields:
yield self[key]
class FormWidget(MappingWidget):
"""A widget for forms."""
def get_hidden_fields(self):
"""This method is called by the `hidden_fields` property to return
a list of (key, value) pairs for the special hidden fields.
"""
fields = []
if self._field.form.request is not None:
if self._field.form.csrf_protected:
fields.append(('_csrf_token', self.csrf_token))
if self._field.form.csrf_protected and self._field.form.csrf_use_source:
fields.append(('_csrf_source_path', get_request().path))
if self._field.form.redirect_tracking:
target = self.redirect_target
if target is not None:
fields.append(('_redirect_target', target))
return fields
@property
def hidden_fields(self):
"""The hidden fields as string."""
return Markup(u''.join(html.input(type='hidden', name=name, value=value)
for name, value in self.get_hidden_fields()))
@cached_property
def captcha(self, theme=None):
"""The captcha if one exists for this form."""
if self._field.form.captcha_protected:
if theme is None:
theme = get_application().theme.settings['recaptcha.theme']
return Markup(get_recaptcha_html(theme=theme))
@property
def csrf_token(self):
"""Forward the CSRF check token for templates."""
return self._field.form.csrf_token
@property
def redirect_target(self):
"""The redirect target for this form."""
return self._field.form.redirect_target
def default_actions(self, **attrs):
"""Returns a default action div with a submit button."""
label = attrs.pop('label', None)
if label is None:
label = _('Submit')
attrs.setdefault('class', 'actions')
return Markup(html.div(html.input(type='submit', value=label), **attrs))
def render(self, action='', method='post', **attrs):
self._attr_setdefault(attrs)
with_errors = attrs.pop('with_errors', False)
# support jinja's caller
caller = attrs.pop('caller', None)
if caller is not None:
body = caller()
else:
body = self.as_dl() + self.default_actions()
hidden = self.hidden_fields
if hidden:
# if there are hidden fields we put an invisible div around
# it. the HTML standard doesn't allow input fields as direct
# childs of a <form> tag...
body = '<div style="display: none">%s</div>%s' % (hidden, body)
if with_errors:
body = self.errors() + body
return html.form(body, action=action, method=method, **attrs)
def __call__(self, *args, **attrs):
attrs.setdefault('with_errors', True)
return self.render(*args, **attrs)
class ListWidget(Widget):
"""Special widget for list-like fields."""
def __init__(self, field, name, value, all_errors):
Widget.__init__(self, field, name, _force_list(value), all_errors)
self._subwidgets = {}
def as_ul(self, **attrs):
return self._as_list(html.ul, attrs)
def as_ol(self, **attrs):
return self._as_list(html.ol, attrs)
def _as_list(self, factory, attrs):
if attrs.pop('hide_empty', False) and not self:
return u''
items = []
for index in xrange(len(self) + attrs.pop('extra_rows', 1)):
items.append(html.li(self[index]()) for item in self)
# add an invisible item for the validator
if not items:
items.append(html.li(style='display: none'))
return factory(*items, **attrs)
def __getitem__(self, index):
if not isinstance(index, (int, long)):
raise TypeError('list widget indices must be integers')
subwidget = self._subwidgets.get(index)
if subwidget is None:
try:
value = self._value[index]
except IndexError:
# return an widget without value if we try
# to access a field not in the list
value = None
subwidget = _make_widget(self._field.field,
_make_name(self.name, index), value,
self._all_errors)
self._subwidgets[index] = subwidget
return subwidget
def __iter__(self):
for index in xrange(len(self)):
yield self[index]
def __len__(self):
return len(self._value)
def __call__(self, *args, **kwargs):
return Markup(self.as_ul(*args, **kwargs))
class ErrorList(_Renderable, list):
"""The class that is used to display the errors."""
def render(self, **attrs):
return self.as_ul(**attrs)
def as_ul(self, **attrs):
return self._as_list(html.ul, attrs)
def as_ol(self, **attrs):
return self._as_list(html.ol, attrs)
def _as_list(self, factory, attrs):
if attrs.pop('hide_empty', False) and not self:
return u''
return factory(*(html.li(item) for item in self), **attrs)
def __call__(self, **attrs):
attrs.setdefault('class', attrs.pop('class_', 'errors'))
attrs.setdefault('hide_empty', True)
return self.render(**attrs)
class MultipleValidationErrors(ValidationError):
"""A validation error subclass for multiple errors raised by
subfields. This is used by the mapping and list fields.
"""
def __init__(self, errors):
ValidationError.__init__(self, '%d error%s' % (
len(errors), len(errors) != 1 and 's' or ''
))
self.errors = errors
def __unicode__(self):
return ', '.join(map(unicode, self.errors.itervalues()))
def unpack(self, key=None):
rv = {}
for name, error in self.errors.iteritems():
rv.update(error.unpack(_make_name(key, name)))
return rv
class FieldMeta(type):
def __new__(cls, name, bases, d):
messages = {}
for base in reversed(bases):
if hasattr(base, 'messages'):
messages.update(base.messages)
if 'messages' in d:
messages.update(d['messages'])
d['messages'] = messages
return type.__new__(cls, name, bases, d)
class Field(object):
"""Abstract field base class."""
__metaclass__ = FieldMeta
messages = dict(required=lazy_gettext('This field is required.'))
form = None
widget = TextInput
# these attributes are used by the widgets to get an idea what
# choices to display. Not every field will also validate them.
multiple_choices = False
choices = ()
# fields that have this attribute set get special treatment on
# validation. It means that even though a value was not in the
# submitted data it's validated against a default value.
validate_on_omission = False
def __init__(self, label=None, help_text=None, validators=None,
widget=None, messages=None, default=missing):
self._position_hint = _next_position_hint()
self.label = label
self.help_text = help_text
if validators is None:
validators = []
self.validators = validators
self.custom_converter = None
if widget is not None:
self.widget = widget
if messages:
self.messages = self.messages.copy()
self.messages.update(messages)
self._default = default
assert not issubclass(self.widget, InternalWidget), \
'can\'t use internal widgets as widgets for fields'
def __call__(self, value):
value = self.convert(value)
self.apply_validators(value)
return value
def __copy__(self):
return _bind(self, None, None)
def apply_validators(self, value):
"""Applies all validators on the value."""
if self.should_validate(value):
for validate in self.validators:
validate(self.form, value)
def should_validate(self, value):
"""Per default validate if the value is not None. This method is
called before the custom validators are applied to not perform
validation if the field is empty and not required.
For example a validator like `is_valid_ip` is never called if the
value is an empty string and the field hasn't raised a validation
error when checking if the field is required.
"""
return value is not None
def convert(self, value):
"""This can be overridden by subclasses and performs the value
conversion.
"""
return unicode(value)
def to_primitive(self, value):
"""Convert a value into a primitve (string or a list/dict of lists,
dicts or strings).
This method must never fail!
"""
return _to_string(value)
def get_default(self):
if callable(self._default):
return self._default()
return self._default
def _bind(self, form, memo):
"""Method that binds a field to a form. If `form` is None, a copy of
the field is returned."""
if form is not None and self.bound:
raise TypeError('%r already bound' % type(obj).__name__)
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.validators = self.validators[:]
rv.messages = self.messages.copy()
if form is not None:
rv.form = form
return rv
@property
def bound(self):
"""True if the form is bound."""
return 'form' in self.__dict__
def __repr__(self):
rv = object.__repr__(self)
if self.bound:
rv = rv[:-1] + ' [bound]>'
return rv
class Mapping(Field):
"""Apply a set of fields to a dictionary of values.
>>> field = Mapping(name=TextField(), age=IntegerField())
>>> field({'name': u'John Doe', 'age': u'42'})
{'age': 42, 'name': u'John Doe'}
Although it's possible to reassign the widget after field construction
it's not recommended because the `MappingWidget` is the only builtin
widget that is able to handle mapping structures.
"""
widget = MappingWidget
def __init__(self, *args, **fields):
Field.__init__(self)
if len(args) == 1:
if fields:
raise TypeError('keyword arguments and dict given')
self.fields = OrderedDict(args[0])
else:
if args:
raise TypeError('no positional arguments allowed if keyword '
'arguments provided.')
self.fields = OrderedDict(fields)
self.fields.sort(key=lambda i: i[1]._position_hint)
def convert(self, value):
value = _force_dict(value)
errors = {}
result = {}
for name, field in self.fields.iteritems():
try:
result[name] = field(value.get(name))
except ValidationError, e:
errors[name] = e
if errors:
raise MultipleValidationErrors(errors)
return result
def to_primitive(self, value):
value = _force_dict(value)
result = {}
for key, field in self.fields.iteritems():
result[key] = field.to_primitive(value.get(key))
return result
def _bind(self, form, memo):
rv = Field._bind(self, form, memo)
rv.fields = OrderedDict()
for key, field in self.fields.iteritems():
rv.fields[key] = _bind(field, form, memo)
return rv
class FormMapping(Mapping):
"""Like a mapping but does csrf protection and stuff."""
widget = FormWidget
def convert(self, value):
if self.form is None:
raise TypeError('form mapping without form passed is unable '
'to convert data')
if self.form.csrf_protected and self.form.request is not None:
token = self.form.request.values.get('_csrf_token')
source_path = self.form.request.values.get('_csrf_source_path')
if source_path:
if token != self.form.generate_csrf_token(source_path):
raise ValidationError(_(u'Invalid security token submitted.'))
else:
if token != self.form.csrf_token:
raise ValidationError(_(u'Invalid security token submitted.'))
if self.form.captcha_protected:
request = self.form.request
if request is None:
raise RuntimeError('captcha protected forms need a request')
if not validate_recaptcha(request.values.get('recaptcha_challenge_field'),
request.values.get('recaptcha_response_field'),
request.remote_addr):
raise ValidationError(_('You entered an invalid captcha.'))
return Mapping.convert(self, value)
class FormAsField(Mapping):
"""If a form is converted into a field the returned field object is an
instance of this class. The behavior is mostly equivalent to a normal
:class:`Mapping` field with the difference that it as an attribute called
:attr:`form_class` that points to the form class it was created from.
"""
def __init__(self):
raise TypeError('can\'t create %r instances' %
self.__class__.__name__)
class Multiple(Field):
"""Apply a single field to a sequence of values.
>>> field = Multiple(IntegerField())
>>> field([u'1', u'2', u'3'])
[1, 2, 3]
Recommended widgets:
- `ListWidget` -- the default one and useful if multiple complex
fields are in use.
- `CheckboxGroup` -- useful in combination with choices
- `SelectBoxWidget` -- useful in combination with choices
"""
widget = ListWidget
messages = dict(too_small=None, too_big=None)
validate_on_omission = True
def __init__(self, field, label=None, help_text=None, min_size=None,
max_size=None, validators=None, widget=None, messages=None,
default=missing):
Field.__init__(self, label, help_text, validators, widget, messages,
default)
self.field = field
self.min_size = min_size
self.max_size = max_size
@property
def multiple_choices(self):
return self.max_size is None or self.max_size > 1
def convert(self, value):
value = _force_list(value)
if self.min_size is not None and len(value) < self.min_size:
message = self.messages['too_small']
if message is None:
message = ngettext(u'Please provide at least %d item.',
u'Please provide at least %d items.',
self.min_size) % self.min_size
raise ValidationError(message)
if self.max_size is not None and len(value) > self.max_size:
message = self.messages['too_big']
if message is None:
message = ngettext(u'Please provide no more than %d item.',
u'Please provide no more than %d items.',
self.min_size) % self.min_size
raise ValidationError(message)
result = []
errors = {}
for idx, item in enumerate(value):
try:
result.append(self.field(item))
except ValidationError, e:
errors[idx] = e
if errors:
raise MultipleValidationErrors(errors)
return result
def to_primitive(self, value):
return map(self.field.to_primitive, _force_list(value))
def _bind(self, form, memo):
rv = Field._bind(self, form, memo)
rv.field = _bind(self.field, form, memo)
return rv
class CommaSeparated(Multiple):
"""Works like the multiple field but for comma separated values:
>>> field = CommaSeparated(IntegerField())
>>> field(u'1, 2, 3')
[1, 2, 3]
The default widget is a `TextInput` but `Textarea` would be a possible
choices as well.
"""
widget = TextInput
def __init__(self, field, label=None, help_text=None, min_size=None,
max_size=None, sep=u',', validators=None, widget=None,
messages=None, default=missing):
Multiple.__init__(self, field, label, help_text, min_size,
max_size, validators, widget, messages,
default)
self.sep = sep
def convert(self, value):
if isinstance(value, basestring):
value = filter(None, [x.strip() for x in value.split(self.sep)])
return Multiple.convert(self, value)
def to_primitive(self, value):
if value is None:
return u''
if isinstance(value, basestring):
return value
return (self.sep + u' ').join(map(self.field.to_primitive, value))
class LineSeparated(CommaSeparated):
"""Works like `CommaSeparated` but uses multiple lines.
The default widget is a `Textarea` and taht is pretty much the only thing
that makes sense for this widget.
"""
widget = Textarea
def convert(self, value):
if isinstance(value, basestring):
value = filter(None, [x.strip() for x in value.splitlines()])
return Multiple.convert(self, value)
def to_primitive(self, value):
if value is None:
return u''
if isinstance(value, basestring):
return value
return u'\n'.join(map(self.field.to_primitive, value))
class TextField(Field):
"""Field for strings.
>>> field = TextField(required=True, min_length=6)
>>> field('foo bar')
u'foo bar'
>>> field('')
Traceback (most recent call last):
...
ValidationError: This field is required.
"""
messages = dict(too_short=None, too_long=None)
def __init__(self, label=None, help_text=None, required=False,
min_length=None, max_length=None, validators=None,
widget=None, messages=None, default=missing):
Field.__init__(self, label, help_text, validators, widget, messages,
default)
self.required = required
self.min_length = min_length
self.max_length = max_length
def convert(self, value):
value = _to_string(value)
if self.required:
if not value:
raise ValidationError(self.messages['required'])
elif value:
if self.min_length is not None and len(value) < self.min_length:
message = self.messages['too_short']
if message is None:
message = ngettext(u'Please enter at least %d character.',
u'Please enter at least %d characters.',
self.min_length) % self.min_length
raise ValidationError(message)
if self.max_length is not None and len(value) > self.max_length:
message = self.messages['too_long']
if message is None:
message = ngettext(u'Please enter no more than %d character.',
u'Please enter no more than %d characters.',
self.max_length) % self.max_length
raise ValidationError(message)
return value
def should_validate(self, value):
"""Validate if the string is not empty."""
return bool(value)
class DateTimeField(Field):
"""Field for datetime objects.
>>> field = DateTimeField()
>>> field('1970-01-12 00:00')
datetime.datetime(1970, 1, 12, 0, 0)
>>> field('foo')
Traceback (most recent call last):
...
ValidationError: Please enter a valid date.
"""
messages = dict(invalid_date=lazy_gettext('Please enter a valid date.'))
def __init__(self, label=None, help_text=None, required=False,
rebase=True, validators=None, widget=None, messages=None,
default=missing):
Field.__init__(self, label, help_text, validators, widget, messages,
default)
self.required = required
self.rebase = rebase
def convert(self, value):
if isinstance(value, datetime):
return value
value = _to_string(value)
if not value:
if self.required:
raise ValidationError(self.messages['required'])
return None
try:
return parse_datetime(value, rebase=self.rebase)
except ValueError:
raise ValidationError(self.messages['invalid_date'])
def to_primitive(self, value):
if isinstance(value, datetime):
value = format_system_datetime(value, rebase=self.rebase)
return value
class DateField(DateTimeField):
"""A Field for date input, timezone neutral"""
messages = dict(invalid_date=lazy_gettext('Please enter a valid date.'))
def __init__(self, label=None, help_text=None, required=False,
validators=None, widget=None, messages=None,
default=missing):
Field.__init__(self, label, help_text, validators, widget, messages,
default)
self.required = required
def convert(self, value):
if isinstance(value, date):
return value
value = _to_string(value)
if not value:
if self.required:
raise ValidationError(self.messages['required'])
return None
try:
return parse_date(value)
except ValueError:
raise ValidationError(self.messages['invalid_date'])
def to_primitive(self, value):
if isinstance(value, datetime):
value = format_system_datetime(value, dateonly=True)
return value
class ModelField(Field):
"""A field that queries for a model.
The first argument is the name of the model, the second the named
argument for `filter_by` (eg: `User` and ``'username'``). If the
key is not given (None) the primary key is assumed.
"""
messages = dict(not_found=lazy_gettext(u'“%(value)s” does not exist'))
def __init__(self, model, key=None, label=None, help_text=None,
required=False, message=None, validators=None, widget=None,
messages=None, default=missing, on_not_found=None):
Field.__init__(self, label, help_text, validators, widget, messages,
default)
self.model = model
self.key = key
self.required = required
self.message = message
self.on_not_found = on_not_found
def convert(self, value):
if isinstance(value, self.model):
return value
if not value:
if self.required:
raise ValidationError(self.messages['required'])
return None
value = self._coerce_value(value)
if self.key is None:
rv = self.model.query.get(value)
else:
rv = self.model.query.filter_by(**{self.key: value}).first()
if rv is None:
if self.on_not_found is not None:
self.on_not_found(value)
raise ValidationError(self.messages['not_found'] %
{'value': value})
return rv
def _coerce_value(self, value):
return value
def to_primitive(self, value):
if value is None:
return u''
elif isinstance(value, self.model):
if self.key is None:
value = db.class_mapper(self.model) \
.primary_key_from_instance(value)[0]
else:
value = getattr(value, self.key)
return unicode(value)
class HiddenModelField(ModelField):
"""A hidden field that points to a model identified by primary key.
Can be used to pass models through a form.
"""
widget = HiddenInput
# these messages should never show up unless ...
# ... the user tempered with the form data
# ... or the object went away in the meantime.
messages = dict(
invalid=lazy_gettext('Invalid value.'),
not_found=lazy_gettext('Key does not exist.')
)
def __init__(self, model, key=None, required=False, message=None,
validators=None, widget=None, messages=None,
default=missing):
ModelField.__init__(self, model, key, None, None, required,
message, validators, widget, messages,
default)
def _coerce_value(self, value):
try:
return int(value)
except (TypeError, ValueError):
raise ValidationError(self.messages['invalid'])
class ChoiceField(Field):
"""A field that lets a user select one out of many choices.
A choice field accepts some choices that are valid values for it.
Values are compared after converting to unicode which means that
``1 == "1"``:
>>> field = ChoiceField(choices=[1, 2, 3])
>>> field('1')
1
>>> field('42')
Traceback (most recent call last):
...
ValidationError: Please enter a valid choice.
Two values `a` and `b` are considered equal if either ``a == b`` or
``primitive(a) == primitive(b)`` where `primitive` is the primitive
of the value. Primitives are created with the following algorithm:
1. if the object is `None` the primitive is the empty string
2. otherwise the primitive is the string value of the object
A choice field also accepts lists of tuples as argument where the
first item is used for comparing and the second for displaying
(which is used by the `SelectBoxWidget`):
>>> field = ChoiceField(choices=[(0, 'inactive'), (1, 'active')])
>>> field('0')
0
Because all fields are bound to the form before validation it's
possible to assign the choices later:
>>> class MyForm(Form):
... status = ChoiceField()
...
>>> form = MyForm()
>>> form.status.choices = [(0, 'inactive', 1, 'active')]
>>> form.validate({'status': '0'})
True
>>> form.data
{'status': 0}
If a choice field is set to "not required" and a `SelectBox` is used
as widget you have to provide an empty choice or the field cannot be
left blank.
>>> field = ChoiceField(required=False, choices=[('', _('Nothing')),
... ('1', _('Something'))])
"""
widget = SelectBox
messages = dict(
invalid_choice=lazy_gettext('Please enter a valid choice.')
)
def __init__(self, label=None, help_text=None, required=True,
choices=None, validators=None, widget=None, messages=None,
default=missing):
Field.__init__(self, label, help_text, validators, widget, messages,
default)
self.required = required
self.choices = choices
def convert(self, value):
if not value and not self.required:
return
if self.choices:
for choice in self.choices:
if isinstance(choice, tuple):
choice = choice[0]
if _value_matches_choice(value, choice):
return choice
raise ValidationError(self.messages['invalid_choice'])
def _bind(self, form, memo):
rv = Field._bind(self, form, memo)
if self.choices is not None:
rv.choices = list(self.choices)
return rv
class MultiChoiceField(ChoiceField):
"""A field that lets a user select multiple choices."""
multiple_choices = True
messages = dict(too_small=None, too_big=None)
validate_on_omission = True
def __init__(self, label=None, help_text=None, choices=None,
min_size=None, max_size=None, validators=None,
widget=None, messages=None, default=missing):
ChoiceField.__init__(self, label, help_text, min_size > 0, choices,
validators, widget, messages, default)
self.min_size = min_size
self.max_size = max_size
def convert(self, value):
result = []
known_choices = {}
for choice in self.choices:
if isinstance(choice, tuple):
choice = choice[0]
known_choices[choice] = choice
known_choices.setdefault(_to_string(choice), choice)
x = _to_list(value)
for value in _to_list(value):
for version in value, _to_string(value):
if version in known_choices:
result.append(known_choices[version])
break
else:
raise ValidationError(_(u'“%s” is not a valid choice') %
value)
if self.min_size is not None and len(result) < self.min_size:
message = self.messages['too_small']
if message is None:
message = ngettext(u'Please provide at least %d item.',
u'Please provide at least %d items.',
self.min_size) % self.min_size
raise ValidationError(message)
if self.max_size is not None and len(result) > self.max_size:
message = self.messages['too_big']
if message is None:
message = ngettext(u'Please provide no more than %d item.',
u'Please provide no more than %d items.',
self.min_size) % self.min_size
raise ValidationError(message)
return result
def to_primitive(self, value):
return map(unicode, _force_list(value))
class IntegerField(Field):
"""Field for integers.
>>> field = IntegerField(min_value=0, max_value=99)
>>> field('13')
13
>>> field('thirteen')
Traceback (most recent call last):
...
ValidationError: Please enter a whole number.
>>> field('193')
Traceback (most recent call last):
...
ValidationError: Ensure this value is less than or equal to 99.
"""
messages = dict(
too_small=None,
too_big=None,
no_integer=lazy_gettext('Please enter a whole number.')
)
def __init__(self, label=None, help_text=None, required=False,
min_value=None, max_value=None, validators=None,
widget=None, messages=None, default=missing):
Field.__init__(self, label, help_text, validators, widget, messages,
default)
self.required = required
self.min_value = min_value
self.max_value = max_value
def convert(self, value):
value = _to_string(value)
if not value:
if self.required:
raise ValidationError(self.messages['required'])
return None
try:
value = int(value)
except ValueError:
raise ValidationError(self.messages['no_integer'])
if self.min_value is not None and value < self.min_value:
message = self.messages['too_small']
if message is None:
message = _(u'Ensure this value is greater than or '
u'equal to %s.') % self.min_value
raise ValidationError(message)
if self.max_value is not None and value > self.max_value:
message = self.messages['too_big']
if message is None:
message = _(u'Ensure this value is less than or '
u'equal to %s.') % self.max_value
raise ValidationError(message)
return int(value)
class BooleanField(Field):
"""Field for boolean values.
>>> field = BooleanField()
>>> field('1')
True
>>> field = BooleanField()
>>> field('')
False
"""
widget = Checkbox
validate_on_omission = True
choices = [
(u'True', lazy_gettext(u'True')),
(u'False', lazy_gettext(u'False'))
]
def convert(self, value):
return value != u'False' and bool(value)
def to_primitive(self, value):
if self.convert(value):
return u'True'
return u'False'
class FormMeta(type):
"""Meta class for forms. Handles form inheritance and registers
validator functions.
"""
def __new__(cls, name, bases, d):
fields = {}
validator_functions = {}
root_validator_functions = []
for base in reversed(bases):
if hasattr(base, '_root_field'):
# base._root_field is always a FormMapping field
fields.update(base._root_field.fields)
root_validator_functions.extend(base._root_field.validators)
for key, value in d.iteritems():
if key.startswith('validate_') and callable(value):
validator_functions[key[9:]] = value
elif isinstance(value, Field):
fields[key] = value
d[key] = FieldDescriptor(key)
for field_name, func in validator_functions.iteritems():
if field_name in fields:
fields[field_name].validators.append(func)
d['_root_field'] = root = FormMapping(**fields)
context_validate = d.get('context_validate')
root.validators.extend(root_validator_functions)
if context_validate is not None:
root.validators.append(context_validate)
return type.__new__(cls, name, bases, d)
def as_field(cls):
"""Returns a field object for this form. The field object returned
is independent of the form and can be modified in the same manner as
a bound field.
"""
field = object.__new__(FormAsField)
field.__dict__.update(cls._root_field.__dict__)
field.form_class = cls
field.validators = cls._root_field.validators[:]
field.fields = cls._root_field.fields.copy()
return field
@property
def validators(cls):
return cls._root_field.validators
@property
def fields(cls):
return cls._root_field.fields
class FieldDescriptor(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, type=None):
try:
return (obj or type).fields[self.name]
except KeyError:
raise AttributeError(self.name)
def __set__(self, obj, value):
obj.fields[self.name] = value
def __delete__(self, obj):
if self.name not in obj.fields:
raise AttributeError('%r has no attribute %r' %
(type(obj).__name__, self.name))
del obj.fields[self.name]
class Form(object):
"""Form base class.
>>> class PersonForm(Form):
... name = TextField(required=True)
... age = IntegerField()
>>> form = PersonForm()
>>> form.validate({'name': 'johnny', 'age': '42'})
True
>>> form.data['name']
u'johnny'
>>> form.data['age']
42
Let's cause a simple validation error:
>>> form = PersonForm()
>>> form.validate({'name': '', 'age': 'fourty-two'})
False
>>> print form.errors['age'][0]
Please enter a whole number.
>>> print form.errors['name'][0]
This field is required.
You can also add custom validation routines for fields by adding methods
that start with the prefix ``validate_`` and the field name that take the
value as argument. For example:
>>> class PersonForm(Form):
... name = TextField(required=True)
... age = IntegerField()
...
... def validate_name(self, value):
... if not value.isalpha():
... raise ValidationError(u'The value must only contain letters')
>>> form = PersonForm()
>>> form.validate({'name': 'mr.t', 'age': '42'})
False
>>> form.errors
{'name': [u'The value must only contain letters']}
You can also validate multiple fields in the context of other fields.
That validation is performed after all other validations. Just add a
method called ``context_validate`` that is passed the dict of all fields::
>>> class RegisterForm(Form):
... username = TextField(required=True)
... password = TextField(required=True)
... password_again = TextField(required=True)
...
... def context_validate(self, data):
... if data['password'] != data['password_again']:
... raise ValidationError(u'The two passwords must be the same')
>>> form = RegisterForm()
>>> form.validate({'username': 'admin', 'password': 'blah',
... 'password_again': 'blag'})
...
False
>>> form.errors
{None: [u'The two passwords must be the same']}
Forms can be used as fields for other forms. To create a form field of
a form you can call the `as_field` class method::
>>> field = RegisterForm.as_field()
This field can be used like any other field class. What's important about
forms as fields is that validators don't get an instance of `RegisterForm`
passed as `form` / `self` but the form where it's used in if the field is
used from a form.
Form fields are bound to the form on form instanciation. This makes it
possible to modify a particular instance of the form. For example you
can create an instance of it and drop some fiels by using
``del form.fields['name']`` or reassign choices of choice fields. It's
however not easily possible to add new fields to an instance because newly
added fields wouldn't be bound. The fields that are stored directly on
the form can also be accessed with their name like a regular attribute.
Example usage:
>>> class StatusForm(Form):
... status = ChoiceField()
...
>>> StatusForm.status.bound
False
>>> form = StatusForm()
>>> form.status.bound
True
>>> form.status.choices = [u'happy', u'unhappy']
>>> form.validate({'status': u'happy'})
True
>>> form['status']
u'happy'
Fields support default values. These however are not as useful as you
might think. These defaults are just annotations for external handling.
The form validation system does not respect those values.
They are for example used in the configuration system.
Example:
>>> field = TextField(default=u'foo')
"""
__metaclass__ = FormMeta
csrf_protected = True
csrf_use_source = False
redirect_tracking = True
captcha_protected = False
def __init__(self, initial=None):
self.request = get_request()
if initial is None:
initial = {}
self.initial = initial
self.invalid_redirect_targets = set()
self._root_field = _bind(self.__class__._root_field, self, {})
self.reset()
def __getitem__(self, key):
return self.data[key]
def __contains__(self, key):
return key in self.data
def as_widget(self):
"""Return the form as widget."""
# if there is submitted data, use that for the widget
if self.raw_data is not None:
data = self.raw_data
# otherwise go with the data from the source (eg: database)
else:
data = self.data
return _make_widget(self._root_field, None, data, self.errors)
def add_invalid_redirect_target(self, *args, **kwargs):
"""Add an invalid target. Invalid targets are URLs we don't want to
visit again. For example if a post is deleted from the post edit page
it's a bad idea to redirect back to the edit page because in that
situation the edit page would return a page not found.
This function accepts the same parameters as `url_for`.
"""
self.invalid_redirect_targets.add(url_for(*args, **kwargs))
@property
def redirect_target(self):
"""The back-redirect target for this form."""
return get_redirect_target(self.invalid_redirect_targets,
self.request)
def redirect(self, *args, **kwargs):
"""Redirects to the url rule given or back to the URL where we are
comming from if `redirect_tracking` is enabled.
"""
target = None
if self.redirect_tracking:
target = self.redirect_target
if target is None:
return redirect_to(*args, **kwargs)
return _redirect(target)
@property
def csrf_token(self):
return self.generate_csrf_token()
def generate_csrf_token(self, path=None):
"""The unique CSRF security token for this form."""
if self.request is None:
raise AttributeError('no csrf token because form not bound '
'to request')
if path is None:
path = self.request.path
user_id = -1
if self.request.user.is_somebody:
user_id = self.request.user.id
login_time = self.request.session.get('lt', -1)
key = self.request.app.cfg['secret_key']
return sha1(('%s|%s|%s|%s' % (path, login_time, user_id, key))
.encode('utf-8')).hexdigest()
@property
def is_valid(self):
"""True if the form is valid."""
return not self.errors
@property
def has_changed(self):
"""True if the form has changed."""
return self._root_field.to_primitive(self.initial) != \
self._root_field.to_primitive(self.data)
@property
def fields(self):
return self._root_field.fields
@property
def validators(self):
return self._root_field.validators
def reset(self):
"""Resets the form."""
self.data = self.initial.copy()
self.errors = {}
self.raw_data = None
def validate(self, data):
"""Validate the form against the data passed."""
self.raw_data = _decode(data)
# for each field in the root that requires validation on value
# omission we add `None` into the raw data dict. Because the
# implicit switch between initial data and user submitted data
# only happens on the "root level" for obvious reasons we only
# have to hook the data in here.
for name, field in self._root_field.fields.iteritems():
if field.validate_on_omission and name not in self.raw_data:
self.raw_data.setdefault(name)
d = self.data.copy()
d.update(self.raw_data)
errors = {}
try:
data = self._root_field(d)
except ValidationError, e:
errors = e.unpack()
self.errors = errors
if errors:
return False
self.data.update(data)
return True
|
jokey2k/pyClanSphere
|
pyClanSphere/utils/forms.py
|
Python
|
bsd-3-clause
| 65,405
|
[
"VisIt"
] |
94a8eac6247326c3b8ef314386c8b190386cbf2a0556fab51b57ca47f946395e
|
# -*- coding: utf-8 -*-
# PEP8:OK, LINT:OK, PY3:OK
#############################################################################
## This file may be used under the terms of the GNU General Public
## License version 2.0 or 3.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following information to ensure GNU
## General Public Licensing requirements will be met:
## http:#www.fsf.org/licensing/licenses/info/GPLv2.html and
## http:#www.gnu.org/copyleft/gpl.html.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#############################################################################
# metadata
' Vagrant Ninja '
__version__ = ' 2.6 '
__license__ = ' GPL '
__author__ = ' juancarlospaco '
__email__ = ' juancarlospaco@ubuntu.com '
__url__ = 'github.com/juancarlospaco'
__date__ = '10/10/2013'
__prj__ = 'vagrant'
__docformat__ = 'html'
__source__ = ''
__full_licence__ = ''
# imports
from os import environ, linesep, chmod, remove, path, chdir, makedirs
from sip import setapi
from datetime import datetime
from subprocess import check_output as getoutput
from random import choice
from getpass import getuser
try:
from os import startfile
except ImportError:
from subprocess import Popen
from PyQt4.QtGui import (QLabel, QCompleter, QDirModel, QPushButton, QMenu,
QDockWidget, QVBoxLayout, QLineEdit, QIcon, QCheckBox, QColor, QMessageBox,
QGraphicsDropShadowEffect, QGroupBox, QComboBox, QTabWidget, QButtonGroup,
QAbstractButton, QScrollArea, QSpinBox)
from PyQt4.QtCore import Qt, QDir, QProcess, QUrl
from PyQt4.QtNetwork import QNetworkProxy
try:
from PyKDE4.kdeui import KTextEdit as QTextEdit
except ImportError:
from PyQt4.QtGui import QTextEdit # lint:ok
from ninja_ide.core import plugin
# API 2
(setapi(a, 2) for a in ("QDate", "QDateTime", "QString", "QTime", "QUrl",
"QTextStream", "QVariant"))
# constans
HELPMSG = '''<h3>Vagrant</h3>
Vagrant provides easy to configure, reproducible, and portable work environments
built on top of industry-standard technology and controlled by a single
consistent workflow.<br>Machines are provisioned on top of VirtualBox.
Provisioning tools automatically install and configure software on the machine.
<br><br><b>If you are Developer</b>, Vagrant will isolate dependencies and
configuration within a single disposable, consistent environment, without
sacrificing any of tools you are used to working with (editors, debuggers, etc).
Once you or someone else creates a single Vagrantfile, you just need to vagrant
up and everything is installed and configured for you to work.
Other members of your team create their development environments from the same
configuration, so whether you are working on Linux, OSX, or Windows, all your
team members are running code in the same environment, against the same
dependencies, all configured same way. Say goodbye to "works on my machine" bugs
.<br><br>Visit <a href="http://vagrantup.com">Vagrantup.com</a> and
<a href="http://virtualbox.org">Virtualbox.org</a><br><br>
''' + ''.join((__doc__, __version__, __license__, 'by', __author__, __email__))
VBOXGUI = '''
config.vm.provider :virtualbox do |vb|
vb.gui = true # false for NO GUI
vb.customize ["modifyvm", :id, "--memory", "{}"] # RAM for VM
vb.customize ["modifyvm", :id, "--cpuexecutioncap", "{}"] # CPU for VM
end
'''
APTGET_PROXY = '''# proxy support for the VM
echo "Acquire::http::Proxy 'http://{}';" | tee /etc/apt/apt.conf.d/99proxy
echo "Acquire::https::Proxy 'https://{}';" >> /etc/apt/apt.conf.d/99proxy
echo "Acquire::ftp::Proxy 'ftp://{}';" >> /etc/apt/apt.conf.d/99proxy
export http_proxy='http://{}'
export https_proxy='https://{}'
export ftp_proxy='ftp://{}'
'''
CONFIG = '''
Vagrant.configure("2") do |config|
config.vm.box = "{}"
config.vm.hostname = "{}"
config.vm.box_url = "{}://cloud-images.ubuntu.com/vagrant/{}/current/{}-server-cloudimg-{}-vagrant-disk1.box"
config.vm.provision :shell, :path => "bootstrap.sh"
{}
{}
end
'''
BASE = path.abspath(path.join(path.expanduser("~"), 'vagrant'))
###############################################################################
class Main(plugin.Plugin):
" Main Class "
def initialize(self, *args, **kwargs):
" Init Main Class "
super(Main, self).initialize(*args, **kwargs)
self.completer, self.dirs = QCompleter(self), QDirModel(self)
self.dirs.setFilter(QDir.AllEntries | QDir.NoDotAndDotDot)
self.completer.setModel(self.dirs)
self.completer.setCaseSensitivity(Qt.CaseInsensitive)
self.completer.setCompletionMode(QCompleter.PopupCompletion)
self.desktop, self.project, menu = '', '', QMenu('Vagrant')
menu.addAction('UP', lambda: self.vagrant_c('up'))
menu.addAction('HALT', lambda: self.vagrant_c('halt'))
menu.addAction('RELOAD', lambda: self.vagrant_c('reload'))
menu.addAction('STATUS', lambda: self.vagrant_c('status'))
menu.addAction('SUSPEND', lambda: self.vagrant_c('suspend'))
menu.addAction('RESUME', lambda: self.vagrant_c('resume'))
menu.addAction('PROVISION', lambda: self.vagrant_c('provision'))
menu.addAction('PACKAGE', lambda: self.vagrant_c('package'))
menu.addAction('INIT', lambda: self.vagrant_c('init'))
menu.addSeparator()
menu.addAction('DESTROY (!!!)', lambda: self.vagrant_c('destroy'))
self.locator.get_service('explorer').add_project_menu(menu, lang='all')
self.process = QProcess()
self.process.readyReadStandardOutput.connect(self.readOutput)
self.process.readyReadStandardError.connect(self.readErrors)
self.process.finished.connect(self._process_finished)
self.process.error.connect(self._process_finished)
# Proxy support, by reading http_proxy os env variable
proxy_url = QUrl(environ.get('http_proxy', ''))
QNetworkProxy.setApplicationProxy(QNetworkProxy(QNetworkProxy.HttpProxy
if str(proxy_url.scheme()).startswith('http')
else QNetworkProxy.Socks5Proxy, proxy_url.host(), proxy_url.port(),
proxy_url.userName(), proxy_url.password())) \
if 'http_proxy' in environ else None
self.mainwidget = QTabWidget()
self.mainwidget.tabCloseRequested.connect(lambda:
self.mainwidget.setTabPosition(1)
if self.mainwidget.tabPosition() == 0
else self.mainwidget.setTabPosition(0))
self.mainwidget.setStyleSheet('QTabBar{font-weight:bold;}')
self.mainwidget.setMovable(True)
self.mainwidget.setTabsClosable(True)
self.dock, self.scrollable = QDockWidget(), QScrollArea()
self.scrollable.setWidgetResizable(True)
self.scrollable.setWidget(self.mainwidget)
self.dock.setWindowTitle(__doc__)
self.dock.setStyleSheet('QDockWidget::title{text-align: center;}')
self.dock.setWidget(self.scrollable)
self.locator.get_service('misc').add_widget(self.dock,
QIcon.fromTheme("virtualbox"), __doc__)
self.tab1, self.tab2, self.tab3 = QGroupBox(), QGroupBox(), QGroupBox()
self.tab4, self.tab5, self.tab6 = QGroupBox(), QGroupBox(), QGroupBox()
for a, b in ((self.tab1, 'Basics'), (self.tab2, 'General Options'),
(self.tab3, 'VM Package Manager'), (self.tab4, 'VM Provisioning'),
(self.tab5, 'VM Desktop GUI'), (self.tab6, 'Run')):
a.setTitle(b)
a.setToolTip(b)
self.mainwidget.addTab(a, QIcon.fromTheme("virtualbox"), b)
QPushButton(QIcon.fromTheme("help-about"), 'About', self.dock
).clicked.connect(lambda: QMessageBox.information(self.dock, __doc__,
HELPMSG))
self.vmname = QLineEdit(self.get_name())
self.vmname.setPlaceholderText('type_your_VM_name_here_without_spaces')
self.vmname.setToolTip('Type VM name, no spaces or special characters')
self.target = QLabel('<b>Vagrant Target Folder: ' +
path.join(BASE, self.vmname.text()))
self.vmname.textChanged.connect(lambda: self.target.setText(
'<b>Vagrant Target Folder: ' + path.join(BASE, self.vmname.text())))
self.btn1 = QPushButton(QIcon.fromTheme("face-smile-big"), 'Suggestion')
self.btn1.setToolTip('Suggest me a Random VM name !')
self.btn1.clicked.connect(lambda: self.vmname.setText(self.get_name()))
self.vmcode, self.vmarch = QComboBox(), QComboBox()
self.vmcode.addItems(['saucy', 'raring', 'quantal', 'precise'])
self.vmarch.addItems(['x86_64 (amd64) 64-Bits', 'x86 (i386) 32-Bits'])
vboxg1 = QVBoxLayout(self.tab1)
for each_widget in (QLabel('<b>Name for VM'), self.vmname, self.btn1,
QLabel('<b>Choose Ubuntu Codename for the VM:</b>'), self.vmcode,
QLabel('<b>Choose Architecture for VM:'), self.vmarch, self.target):
vboxg1.addWidget(each_widget)
self.chrt = QCheckBox('LOW CPU priority for Backend Process')
self.chttps = QComboBox()
self.chttps.addItems(['https', 'http'])
try:
self.vinfo1 = QLabel('''<b> Vagrant Backend Version: </b> {},
<b> VirtualBox Backend Version: </b> {}. '''.format(
getoutput('vagrant --version', shell=1).strip(),
getoutput('vboxmanage --version', shell=1).strip()))
except:
self.vinfo1 = QLabel('<b>Warning: Failed to query Vagrant Backend!')
self.qckb1 = QCheckBox(' Open target directory later')
self.qckb1.setToolTip('Open the target directory when finished')
self.qckb2 = QCheckBox(' Save a LOG file to target later')
self.qckb2.setToolTip('Save a read-only .LOG file to target')
self.qckb3 = QCheckBox(' NO run Headless Mode, use a Window')
self.qckb3.setToolTip('Show the VM on a Window GUI instead of Headless')
self.cpu, self.ram = QSpinBox(), QSpinBox()
self.cpu.setRange(25, 99)
self.cpu.setValue(99)
self.ram.setRange(512, 4096)
self.ram.setValue(1024)
vboxg2 = QVBoxLayout(self.tab2)
for each_widget in (self.qckb1, self.qckb2, self.qckb3, self.chrt,
QLabel('<b>Max CPU Limit for VM:</b>'), self.cpu,
QLabel('<b>Max RAM Limit for VM:</b>'), self.ram,
QLabel('<b>Download Protocol Type:</b>'), self.chttps, self.vinfo1):
vboxg2.addWidget(each_widget)
self.qckb10 = QCheckBox('Run apt-get update on the created VM')
self.qckb11 = QCheckBox('Run apt-get dist-upgrade on the created VM')
self.qckb12 = QCheckBox('Run apt-get check on the created VM')
self.qckb12 = QCheckBox('Run apt-get clean on the created VM')
self.qckb13 = QCheckBox('Run apt-get autoremove on the created VM')
self.qckb14 = QCheckBox('Try to Fix Broken packages if any on the VM')
self.aptproxy, self.portredirect = QLineEdit(), QLineEdit('8000, 9000')
self.aptproxy.setPlaceholderText(' user:password@proxyaddress:port ')
vboxg3 = QVBoxLayout(self.tab3)
for each_widget in (self.qckb10, self.qckb11, self.qckb12, self.qckb13,
self.qckb14,
QLabel('<b>Network Proxy for apt-get on the VM'), self.aptproxy,
QLabel('<b>Network Port Redirects for the VM'), self.portredirect):
vboxg3.addWidget(each_widget)
self.aptpkg = QTextEdit('build-essential git python-pip vim mc wget')
self.aptppa, self.pippkg = QLineEdit(), QTextEdit('virtualenv yolk')
self.aptppa.setPlaceholderText(' ppa:ninja-ide-developers/daily ')
self.requirements = QLineEdit()
self.requirements.setPlaceholderText(' /full/path/to/requirements.txt ')
self.requirements.setCompleter(self.completer)
vboxg4 = QVBoxLayout(self.tab4)
for each_widget in (QLabel('<b>Custom APT Ubuntu package'), self.aptpkg,
QLabel('<b>Custom APT Ubuntu PPA:</b> '), self.aptppa,
QLabel('<b>Custom PIP Python packages:</b> '), self.pippkg,
QLabel('<b>Custom PIP Python requirements: '), self.requirements):
vboxg4.addWidget(each_widget)
self.buttonGroup = QButtonGroup()
self.buttonGroup.buttonClicked[QAbstractButton].connect(self.get_de_pkg)
vboxg5 = QVBoxLayout(self.tab5)
for i, d in enumerate(('Ubuntu Unity', 'KDE Plasma', 'LXDE', 'XFCE')):
button = QPushButton(d)
button.setCheckable(True)
button.setMinimumSize(75, 50)
button.setToolTip(d)
vboxg5.addWidget(button)
self.buttonGroup.addButton(button)
self.output = QTextEdit('''
We have persistent objects, they are called files. -Ken Thompson. ''')
self.runbtn = QPushButton(QIcon.fromTheme("media-playback-start"),
'Start Vagrant Instrumentation Now !')
self.runbtn.setMinimumSize(75, 50)
self.runbtn.clicked.connect(self.build)
glow = QGraphicsDropShadowEffect(self)
glow.setOffset(0)
glow.setBlurRadius(99)
glow.setColor(QColor(99, 255, 255))
self.runbtn.setGraphicsEffect(glow)
self.stopbt = QPushButton(QIcon.fromTheme("media-playback-stop"),
'Stop Vagrant')
self.stopbt.clicked.connect(lambda: self.process.stop())
self.killbt = QPushButton(QIcon.fromTheme("application-exit"),
'Force Kill Vagrant')
self.killbt.clicked.connect(lambda: self.process.kill())
vboxg6 = QVBoxLayout(self.tab6)
for each_widget in (QLabel('<b>Multiprocess Output Logs'), self.output,
self.runbtn, self.stopbt, self.killbt):
vboxg6.addWidget(each_widget)
[a.setChecked(True) for a in (self.qckb1, self.qckb2, self.qckb3,
self.qckb10, self.qckb11, self.qckb12, self.qckb13, self.qckb14,
self.chrt)]
self.mainwidget.setCurrentIndex(5)
def get_de_pkg(self, button):
' get package from desktop name '
if button.text() in 'Ubuntu Unity':
self.desktop = 'ubuntu-desktop'
elif button.text() in 'KDE Plasma':
self.desktop = 'kubuntu-desktop'
elif button.text() in 'LXDE':
self.desktop = 'lubuntu-desktop'
else:
self.desktop = 'xubuntu-desktop'
return self.desktop
def get_name(self):
' return a random name of stars, planets and moons of solar system '
return choice((getuser(), 'sun', 'mercury', 'venus', 'earth', 'mars',
'neptun', 'ceres', 'pluto', 'haumea', 'makemake', 'eris', 'moon',
'saturn', 'europa', 'ganymede', 'callisto', 'mimas', 'enceladus',
'tethys', 'dione', 'rhea', 'titan', 'iapetus', 'miranda', 'ariel',
'umbriel', 'titania', 'oberon', 'triton', 'charon', 'orcus', 'io',
'ixion', 'varuna', 'quaoar', 'sedna', 'methone', 'jupiter', ))
def readOutput(self):
"""Read and append output to the logBrowser"""
self.output.append(str(self.process.readAllStandardOutput()))
def readErrors(self):
"""Read and append errors to the logBrowser"""
self.output.append(self.formatErrorMsg(str(
self.process.readAllStandardError())))
def formatErrorMsg(self, msg):
"""Format error messages in red color"""
return self.formatMsg(msg, 'red')
def formatInfoMsg(self, msg):
"""Format informative messages in blue color"""
return self.formatMsg(msg, 'green')
def formatMsg(self, msg, color):
"""Format message with the given color"""
return '<font color="{}">{}</font>'.format(color, msg)
def build(self):
"""Main function calling vagrant to generate the vm"""
self.output.setText('')
self.output.append(self.formatInfoMsg('INFO:{}'.format(datetime.now())))
self.runbtn.setDisabled(True)
base = path.join(BASE, self.vmname.text())
try:
self.output.append(self.formatInfoMsg('INFO: Dir: {}'.format(base)))
makedirs(base)
except:
self.output.append(self.formatErrorMsg('ERROR:Target Folder Exist'))
self.output.append(self.formatInfoMsg('INFO: Changed {}'.format(base)))
chdir(base)
try:
self.output.append(self.formatInfoMsg('INFO:Removing Vagrant file'))
remove(path.join(base, 'Vagrantfile'))
except:
self.output.append(self.formatErrorMsg('ERROR:Remove Vagrant file'))
self.output.append(self.formatInfoMsg(' INFO: OK: Runing Vagrant Init'))
cmd1 = getoutput('chrt --verbose -i 0 vagrant init', shell=True)
self.output.append(self.formatInfoMsg('INFO:OK:Completed Vagrant Init'))
self.output.append(self.formatInfoMsg('INFO: Command: {}'.format(cmd1)))
cfg = CONFIG.format(self.vmname.text(), self.vmname.text(),
self.chttps.currentText(), self.vmcode.currentText(),
self.vmcode.currentText(),
'amd64' if self.vmarch.currentIndex() is 0 else 'i386',
'\n'.join(([
' config.vm.network :forwarded_port, host: {}, guest: {}'.format(
a, a) for a in str(self.portredirect.text()).split(',')])),
VBOXGUI.format(self.ram.value(), self.cpu.value())
if self.qckb3.isChecked() is True else '')
self.output.append(self.formatInfoMsg('INFO:OK:Config: {}'.format(cfg)))
with open(path.join(base, 'Vagrantfile'), 'w') as f:
f.write(cfg)
self.output.append(self.formatInfoMsg('INFO: Writing Vagrantfile'))
f.close()
proxy = APTGET_PROXY.format(self.aptproxy.text(), self.aptproxy.text(),
self.aptproxy.text(), self.aptproxy.text(), self.aptproxy.text(),
self.aptproxy.text())
prv = '\n'.join(('#!/usr/bin/env bash', '# -*- coding: utf-8 -*-',
linesep * 2, "PS1='\[\e[1;32m\][\u@\h \W]\$\[\e[0m\] ' ; HISTSIZE=5000",
'# Vagrant Bootstrap Provisioning generated by Vagrant Ninja!', linesep,
proxy if len(self.aptproxy.text()) >= 5 else '',
'add-apt-repository -s -y {}'.format(str(self.aptppa.text()).strip()),
'apt-get -V -u -m -y update' if self.qckb10.isChecked() is True else '',
'apt-get -y -m dist-upgrade' if self.qckb11.isChecked() is True else '',
'apt-get -y -m autoremove' if self.qckb11.isChecked() is True else '',
'apt-get -y clean' if self.qckb11.isChecked() is True else '',
'dpkg --configure -a' if self.qckb11.isChecked() is True else '',
'apt-get -y -f install' if self.qckb11.isChecked() is True else '',
'apt-get -y check' if self.qckb11.isChecked() is True else '',
'apt-get -y --force-yes install {}'.format(self.aptpkg.toPlainText()),
'pip install --verbose {}'.format(self.pippkg.toPlainText()),
'pip install --verbose -r {}'.format(self.requirements.text()),
'apt-get -y --force-yes -m install {}'.format(self.desktop), linesep,
'git config --global user.name "{}"'.format(getuser()),
'git config --global color.branch auto',
'git config --global color.diff auto',
'git config --global color.interactive auto',
'git config --global color.status auto',
'git config --global credential.helper cache',
'git config --global user.email "{}@gmail.com"'.format(getuser()),
'git config --global push.default simple',
'ufw status ; service ufw stop ; ufw disable ; swapoff --verbose --all',
'export LANGUAGE=en_US.UTF-8', 'export LANG=en_US.UTF-8',
'export LC_ALL=en_US.UTF-8', 'locale-gen en_US.UTF-8',
'dpkg-reconfigure locales', ))
self.output.append(self.formatInfoMsg('INFO:OK:Script: {}'.format(prv)))
with open(path.join(base, 'bootstrap.sh'), 'w') as f:
f.write(prv)
self.output.append(self.formatInfoMsg('INFO: Writing bootstrap.sh'))
f.close()
try:
chmod('bootstrap.sh', 0775) # Py2
self.output.append(self.formatInfoMsg('INFO: bootstrap.sh is 775'))
except:
chmod('bootstrap.sh', 0o775) # Py3
self.output.append(self.formatInfoMsg('INFO: bootstrap.sh is o775'))
self.output.append(self.formatInfoMsg(''' INFO: OK:
Vagrant Up needs time, depends on your Internet Connection Speed !'''))
self.output.append(self.formatInfoMsg('INFO: OK: Running Vagrant Up !'))
self.process.start('{}vagrant up'.format('chrt --verbose -i 0 '
if self.chrt.isChecked() is True else ''))
if not self.process.waitForStarted():
self.output.append(self.formatErrorMsg('ERROR: FAIL: Vagrant Fail'))
self.runbtn.setEnabled(True)
return
self.runbtn.setEnabled(True)
chdir(path.expanduser("~"))
def _process_finished(self):
"""finished sucessfully"""
self.output.append(self.formatInfoMsg('INFO:{}'.format(datetime.now())))
if self.qckb2.isChecked() is True:
LOG_FILE = path.join(BASE, self.vmname.text(), 'vagrant_ninja.log')
with open(LOG_FILE, 'w') as f:
self.output.append(self.formatInfoMsg('INFO: OK: Writing .LOG'))
f.write(self.output.toPlainText())
f.close()
if self.qckb1.isChecked() is True:
self.output.append(self.formatInfoMsg('INFO:Opening Target Folder'))
try:
startfile(BASE)
except:
Popen(["xdg-open", BASE])
chdir(path.expanduser("~"))
def vagrant_c(self, option):
' run the choosed menu option, kind of quick-mode '
self.output.setText('')
self.output.append(self.formatInfoMsg('INFO:{}'.format(datetime.now())))
self.runbtn.setDisabled(True)
chdir(path.abspath(
self.locator.get_service('explorer').get_current_project_item().path))
self.process.start('chrt --verbose -i 0 vagrant {}'.format(option))
if not self.process.waitForStarted():
self.output.append(self.formatErrorMsg('ERROR: FAIL: Vagrant Fail'))
self.runbtn.setEnabled(True)
return
self.runbtn.setEnabled(True)
self.output.append(self.formatInfoMsg('INFO:{}'.format(datetime.now())))
chdir(path.expanduser("~"))
def finish(self):
' clear when finish '
self.process.kill()
###############################################################################
if __name__ == "__main__":
print(__doc__)
|
juancarlospaco/vagrant
|
main.py
|
Python
|
gpl-3.0
| 23,005
|
[
"VisIt"
] |
50707446f6ad223fca15e2a38386494efc65daa574c106be046ae9bba9f473ff
|
################################################################################
# Copyright (C) 2013-2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
import numpy as np
import functools
from bayespy.utils import misc
"""
This module contains a sketch of a new implementation of the framework.
"""
def message_sum_multiply(plates_parent, dims_parent, *arrays):
"""
Compute message to parent and sum over plates.
Divide by the plate multiplier.
"""
# The shape of the full message
shapes = [np.shape(array) for array in arrays]
shape_full = misc.broadcasted_shape(*shapes)
# Find axes that should be summed
shape_parent = plates_parent + dims_parent
sum_axes = misc.axes_to_collapse(shape_full, shape_parent)
# Compute the multiplier for cancelling the
# plate-multiplier. Because we are summing over the
# dimensions already in this function (for efficiency), we
# need to cancel the effect of the plate-multiplier
# applied in the message_to_parent function.
r = 1
for j in sum_axes:
if j >= 0 and j < len(plates_parent):
r *= shape_full[j]
elif j < 0 and j < -len(dims_parent):
r *= shape_full[j]
# Compute the sum-product
m = misc.sum_multiply(*arrays,
axis=sum_axes,
sumaxis=True,
keepdims=True) / r
# Remove extra axes
m = misc.squeeze_to_dim(m, len(shape_parent))
return m
class Moments():
"""
Base class for defining the expectation of the sufficient statistics.
The benefits:
* Write statistic-specific features in one place only. For instance,
covariance from Gaussian message.
* Different nodes may have identically defined statistic so you need to
implement related features only once. For instance, Gaussian and
GaussianARD differ on the prior but the moments are the same.
* General processing nodes which do not change the type of the moments may
"inherit" the features from the parent node. For instance, slicing
operator.
* Conversions can be done easily in both of the above cases if the message
conversion is defined in the moments class. For instance,
GaussianMarkovChain to Gaussian and VaryingGaussianMarkovChain to
Gaussian.
"""
_converters = {}
class NoConverterError(Exception):
pass
def get_instance_converter(self, **kwargs):
"""Default converter within a moments class is an identity.
Override this method when moment class instances are not identical if
they have different attributes.
"""
if len(kwargs) > 0:
raise NotImplementedError(
"get_instance_converter not implemented for class {0}"
.format(self.__class__.__name__)
)
return None
def get_instance_conversion_kwargs(self):
"""
Override this method when moment class instances are not identical if
they have different attributes.
"""
return {}
@classmethod
def add_converter(cls, moments_to, converter):
cls._converters = cls._converters.copy()
cls._converters[moments_to] = converter
return
def get_converter(self, moments_to):
"""
Finds conversion to another moments type if possible.
Note that a conversion from moments A to moments B may require
intermediate conversions. For instance: A->C->D->B. This method finds
the path which uses the least amount of conversions and returns that
path as a single conversion. If no conversion path is available, an
error is raised.
The search algorithm starts from the original moments class and applies
all possible converters to get a new list of moments classes. This list
is extended by adding recursively all parent classes because their
converters are applicable. Then, all possible converters are applied to
this list to get a new list of current moments classes. This is iterated
until the algorithm hits the target moments class or its subclass.
"""
# Check if there is no need for a conversion
#
# TODO/FIXME: This isn't sufficient. Moments can have attributes that
# make them incompatible (e.g., ndim in GaussianMoments).
if isinstance(self, moments_to):
return lambda X: X
# Initialize variables
visited = set()
visited.add(self.__class__)
converted_list = [(self.__class__, [])]
# Each iteration step consists of two parts:
# 1) form a set of the current classes and all their parent classes
# recursively
# 2) from the current set, apply possible conversions to get a new set
# of classes
# Repeat these two steps until in step (1) you hit the target class.
while len(converted_list) > 0:
# Go through all parents recursively so we can then use all
# converters that are available
current_list = []
for (moments_class, converter_path) in converted_list:
if issubclass(moments_class, moments_to):
# Shortest conversion path found, return the resulting total
# conversion function
return misc.composite_function(converter_path)
current_list.append((moments_class, converter_path))
parents = list(moments_class.__bases__)
for parent in parents:
# Recursively add parents
for p in parent.__bases__:
if isinstance(p, Moments):
parents.append(p)
# Add un-visited parents
if issubclass(parent, Moments) and parent not in visited:
visited.add(parent)
current_list.append((parent, converter_path))
# Find all converters and extend the converter paths
converted_list = []
for (moments_class, converter_path) in current_list:
for (conv_mom_cls, conv) in moments_class._converters.items():
if conv_mom_cls not in visited:
visited.add(conv_mom_cls)
converted_list.append((conv_mom_cls,
converter_path + [conv]))
raise self.NoConverterError("No conversion defined from %s to %s"
% (self.__class__.__name__,
moments_to.__name__))
def compute_fixed_moments(self, x):
# This method can't be static because the computation of the moments may
# depend on, for instance, ndim in Gaussian arrays.
raise NotImplementedError("compute_fixed_moments not implemented for "
"%s"
% (self.__class__.__name__))
@classmethod
def from_values(cls, x):
raise NotImplementedError("from_values not implemented "
"for %s"
% (cls.__name__))
def ensureparents(func):
@functools.wraps(func)
def wrapper(self, *parents, **kwargs):
# Convert parents to proper nodes
if self._parent_moments is None:
raise ValueError(
"Parent moments must be defined for {0}"
.format(self.__class__.__name__)
)
parents = [
Node._ensure_moments(
parent,
moments.__class__,
**moments.get_instance_conversion_kwargs()
)
for (parent, moments) in zip(parents, self._parent_moments)
]
# parents = list(parents)
# for (ind, parent) in enumerate(parents):
# parents[ind] = self._ensure_moments(parent,
# self._parent_moments[ind])
# Run the function
return func(self, *parents, **kwargs)
return wrapper
class Node():
"""
Base class for all nodes.
mask
dims
plates
parents
children
name
Sub-classes must implement:
1. For computing the message to children:
get_moments(self):
2. For computing the message to parents:
_get_message_and_mask_to_parent(self, index)
Sub-classes may need to re-implement:
1. If they manipulate plates:
_compute_weights_to_parent(index, weights)
_plates_to_parent(self, index)
_plates_from_parent(self, index)
"""
# These are objects of the _parent_moments_class. If the default way of
# creating them is not correct, write your own creation code.
_moments = None
_parent_moments = None
plates = None
_id_counter = 0
@ensureparents
def __init__(self, *parents, dims=None, plates=None, name="",
notify_parents=True, plotter=None, plates_multiplier=None,
allow_dependent_parents=False):
self.parents = parents
self.dims = dims
self.name = name
self._plotter = plotter
if not allow_dependent_parents:
parent_id_list = []
for parent in parents:
parent_id_list = parent_id_list + list(parent._get_id_list())
if len(parent_id_list) != len(set(parent_id_list)):
raise ValueError("Parent nodes are not independent")
# Inform parent nodes
if notify_parents:
for (index,parent) in enumerate(self.parents):
parent._add_child(self, index)
# Check plates
parent_plates = [self._plates_from_parent(index)
for index in range(len(self.parents))]
if any(p is None for p in parent_plates):
raise ValueError("Method _plates_from_parent returned None")
# Get and validate the plates for this node
plates = self._total_plates(plates, *parent_plates)
if self.plates is None:
self.plates = plates
# By default, ignore all plates
self.mask = np.array(False)
# Children
self.children = set()
# Get and validate the plate multiplier
parent_plates_multiplier = [self._plates_multiplier_from_parent(index)
for index in range(len(self.parents))]
#if plates_multiplier is None:
# plates_multiplier = parent_plates_multiplier
plates_multiplier = self._total_plates(plates_multiplier,
*parent_plates_multiplier)
self.plates_multiplier = plates_multiplier
def get_pdf_nodes(self):
return tuple(
node
for (child, _) in self.children
for node in child._get_pdf_nodes_conditioned_on_parents()
)
def _get_pdf_nodes_conditioned_on_parents(self):
return self.get_pdf_nodes()
def _get_id_list(self):
"""
Returns the stochastic ID list.
This method is used to check that same stochastic nodes are not direct
parents of a node several times. It is only valid if there are
intermediate stochastic nodes.
To put it another way: each ID corresponds to one factor q(..) in the
posterior approximation. Different IDs mean different factors, thus they
mean independence. The parents must have independent factors.
Stochastic nodes should return their unique ID. Deterministic nodes
should return the IDs of their parents. Constant nodes should return
empty list of IDs.
"""
raise NotImplementedError()
@classmethod
def _total_plates(cls, plates, *parent_plates):
if plates is None:
# By default, use the minimum number of plates determined
# from the parent nodes
try:
return misc.broadcasted_shape(*parent_plates)
except ValueError:
raise ValueError(
"The plates of the parents do not broadcast: {0}".format(
parent_plates
)
)
else:
# Check that the parent_plates are a subset of plates.
for (ind, p) in enumerate(parent_plates):
if not misc.is_shape_subset(p, plates):
raise ValueError("The plates %s of the parents "
"are not broadcastable to the given "
"plates %s."
% (p,
plates))
return plates
@staticmethod
def _ensure_moments(node, moments_class, **kwargs):
try:
converter = node._moments.get_converter(moments_class)
except AttributeError:
from .constant import Constant
return Constant(
moments_class.from_values(node, **kwargs),
node
)
else:
node = converter(node)
converter = node._moments.get_instance_converter(**kwargs)
if converter is not None:
from .converters import NodeConverter
return NodeConverter(converter, node)
return node
def _compute_plates_to_parent(self, index, plates):
# Sub-classes may want to overwrite this if they manipulate plates
return plates
def _compute_plates_from_parent(self, index, plates):
# Sub-classes may want to overwrite this if they manipulate plates
return plates
def _compute_plates_multiplier_from_parent(self, index, plates_multiplier):
# TODO/FIXME: How to handle this properly?
return plates_multiplier
def _plates_to_parent(self, index):
return self._compute_plates_to_parent(index, self.plates)
def _plates_from_parent(self, index):
return self._compute_plates_from_parent(index,
self.parents[index].plates)
def _plates_multiplier_from_parent(self, index):
return self._compute_plates_multiplier_from_parent(
index,
self.parents[index].plates_multiplier
)
@property
def plates_multiplier(self):
""" Plate multiplier is applied to messages to parents """
return self.__plates_multiplier
@plates_multiplier.setter
def plates_multiplier(self, value):
# TODO/FIXME: Check that multiplier is consistent with plates
self.__plates_multiplier = value
return
def get_shape(self, ind):
return self.plates + self.dims[ind]
def _add_child(self, child, index):
"""
Add a child node.
Parameters
----------
child : node
index : int
The parent index of this node for the child node.
The child node recognizes its parents by their index
number.
"""
self.children.add((child, index))
def _remove_child(self, child, index):
"""
Remove a child node.
"""
self.children.remove((child, index))
def get_mask(self):
return self.mask
## def _get_message_mask(self):
## return self.mask
def _set_mask(self, mask):
# Sub-classes may overwrite this method if they have some other masks to
# be combined (for instance, observation mask)
self.mask = mask
def _update_mask(self):
# Combine masks from children
mask = np.array(False)
for (child, index) in self.children:
mask = np.logical_or(mask, child._mask_to_parent(index))
# Set the mask of this node
self._set_mask(mask)
if not misc.is_shape_subset(np.shape(self.mask), self.plates):
raise ValueError("The mask of the node %s has updated "
"incorrectly. The plates in the mask %s are not a "
"subset of the plates of the node %s."
% (self.name,
np.shape(self.mask),
self.plates))
# Tell parents to update their masks
for parent in self.parents:
parent._update_mask()
def _compute_weights_to_parent(self, index, weights):
"""Compute the mask used for messages sent to parent[index].
The mask tells which plates in the messages are active. This method is
used for obtaining the mask which is used to set plates in the messages
to parent to zero.
Sub-classes may want to overwrite this method if they do something to
plates so that the mask is somehow altered.
"""
return weights
def _mask_to_parent(self, index):
"""
Get the mask with respect to parent[index].
The mask tells which plate connections are active. The mask is "summed"
(logical or) and reshaped into the plate shape of the parent. Thus, it
can't be used for masking messages, because some plates have been summed
already. This method is used for propagating the mask to parents.
"""
mask = self._compute_weights_to_parent(index, self.mask) != 0
# Check the shape of the mask
plates_to_parent = self._plates_to_parent(index)
if not misc.is_shape_subset(np.shape(mask), plates_to_parent):
raise ValueError("In node %s, the mask being sent to "
"parent[%d] (%s) has invalid shape: The shape of "
"the mask %s is not a sub-shape of the plates of "
"the node with respect to the parent %s. It could "
"be that this node (%s) is manipulating plates "
"but has not overwritten the method "
"_compute_weights_to_parent."
% (self.name,
index,
self.parents[index].name,
np.shape(mask),
plates_to_parent,
self.__class__.__name__))
# "Sum" (i.e., logical or) over the plates that have unit length in
# the parent node.
parent_plates = self.parents[index].plates
s = misc.axes_to_collapse(np.shape(mask), parent_plates)
mask = np.any(mask, axis=s, keepdims=True)
mask = misc.squeeze_to_dim(mask, len(parent_plates))
return mask
def _message_to_child(self):
u = self.get_moments()
# Debug: Check that the message has appropriate shape
for (ui, dim) in zip(u, self.dims):
ndim = len(dim)
if ndim > 0:
if np.shape(ui)[-ndim:] != dim:
raise RuntimeError(
"A bug found by _message_to_child for %s: "
"The variable axes of the moments %s are not equal to "
"the axes %s defined by the node %s. A possible reason "
"is that the plates of the node are inferred "
"incorrectly from the parents, and the method "
"_plates_from_parents should be implemented."
% (self.__class__.__name__,
np.shape(ui)[-ndim:],
dim,
self.name))
if not misc.is_shape_subset(np.shape(ui)[:-ndim],
self.plates):
raise RuntimeError(
"A bug found by _message_to_child for %s: "
"The plate axes of the moments %s are not a subset of "
"the plate axes %s defined by the node %s."
% (self.__class__.__name__,
np.shape(ui)[:-ndim],
self.plates,
self.name))
else:
if not misc.is_shape_subset(np.shape(ui), self.plates):
raise RuntimeError(
"A bug found by _message_to_child for %s: "
"The plate axes of the moments %s are not a subset of "
"the plate axes %s defined by the node %s."
% (self.__class__.__name__,
np.shape(ui),
self.plates,
self.name))
return u
def _message_to_parent(self, index, u_parent=None):
# Compute the message, check plates, apply mask and sum over some plates
if index >= len(self.parents):
raise ValueError("Parent index larger than the number of parents")
# Compute the message and mask
(m, mask) = self._get_message_and_mask_to_parent(index, u_parent=u_parent)
mask = misc.squeeze(mask)
# Plates in the mask
plates_mask = np.shape(mask)
# The parent we're sending the message to
parent = self.parents[index]
# Plates with respect to the parent
plates_self = self._plates_to_parent(index)
# Plate multiplier of the parent
multiplier_parent = self._plates_multiplier_from_parent(index)
# Check if m is a logpdf function (for black-box variational inference)
if callable(m):
return m
def m_function(*args):
lpdf = m(*args)
# Log pdf only contains plate axes!
plates_m = np.shape(lpdf)
r = (self.broadcasting_multiplier(plates_self,
plates_m,
plates_mask,
parent.plates) *
self.broadcasting_multiplier(self.plates_multiplier,
multiplier_parent))
axes_msg = misc.axes_to_collapse(plates_m, parent.plates)
m[i] = misc.sum_multiply(mask_i, m[i], r,
axis=axes_msg,
keepdims=True)
# Remove leading singular plates if the parent does not have
# those plate axes.
m[i] = misc.squeeze_to_dim(m[i], len(shape_parent))
return m_function
raise NotImplementedError()
# Compact the message to a proper shape
for i in range(len(m)):
# Empty messages are given as None. We can ignore those.
if m[i] is not None:
try:
r = self.broadcasting_multiplier(self.plates_multiplier,
multiplier_parent)
except:
raise ValueError("The plate multipliers are incompatible. "
"This node (%s) has %s and parent[%d] "
"(%s) has %s"
% (self.name,
self.plates_multiplier,
index,
parent.name,
multiplier_parent))
ndim = len(parent.dims[i])
# Source and target shapes
if ndim > 0:
dims = misc.broadcasted_shape(np.shape(m[i])[-ndim:],
parent.dims[i])
from_shape = plates_self + dims
else:
from_shape = plates_self
to_shape = parent.get_shape(i)
# Add variable axes to the mask
mask_i = misc.add_trailing_axes(mask, ndim)
# Apply mask and sum plate axes as necessary (and apply plate
# multiplier)
m[i] = r * misc.sum_multiply_to_plates(np.where(mask_i, m[i], 0),
to_plates=to_shape,
from_plates=from_shape,
ndim=0)
return m
def _message_from_children(self, u_self=None):
msg = [np.zeros(shape) for shape in self.dims]
#msg = [np.array(0.0) for i in range(len(self.dims))]
isfunction = None
for (child,index) in self.children:
m = child._message_to_parent(index, u_parent=u_self)
if callable(m):
if isfunction is False:
raise NotImplementedError()
elif isfunction is None:
msg = m
else:
def join(m1, m2):
return (m1[0] + m2[0], m1[1] + m2[1])
msg = lambda x: join(m(x), msg(x))
isfunction = True
else:
if isfunction is True:
raise NotImplementedError()
else:
isfunction = False
for i in range(len(self.dims)):
if m[i] is not None:
# Check broadcasting shapes
sh = misc.broadcasted_shape(self.get_shape(i), np.shape(m[i]))
try:
# Try exploiting broadcasting rules
msg[i] += m[i]
except ValueError:
msg[i] = msg[i] + m[i]
return msg
def _message_from_parents(self, exclude=None):
return [list(parent._message_to_child())
if ind != exclude else
None
for (ind,parent) in enumerate(self.parents)]
def get_moments(self):
raise NotImplementedError()
def delete(self):
"""
Delete this node and the children
"""
for (ind, parent) in enumerate(self.parents):
parent._remove_child(self, ind)
for (child, _) in self.children:
child.delete()
@staticmethod
def broadcasting_multiplier(plates, *args):
return misc.broadcasting_multiplier(plates, *args)
## """
## Compute the plate multiplier for given shapes.
## The first shape is compared to all other shapes (using NumPy
## broadcasting rules). All the elements which are non-unit in the first
## shape but 1 in all other shapes are multiplied together.
## This method is used, for instance, for computing a correction factor for
## messages to parents: If this node has non-unit plates that are unit
## plates in the parent, those plates are summed. However, if the message
## has unit axis for that plate, it should be first broadcasted to the
## plates of this node and then summed to the plates of the parent. In
## order to avoid this broadcasting and summing, it is more efficient to
## just multiply by the correct factor. This method computes that
## factor. The first argument is the full plate shape of this node (with
## respect to the parent). The other arguments are the shape of the message
## array and the plates of the parent (with respect to this node).
## """
## # Check broadcasting of the shapes
## for arg in args:
## misc.broadcasted_shape(plates, arg)
## # Check that each arg-plates are a subset of plates?
## for arg in args:
## if not misc.is_shape_subset(arg, plates):
## raise ValueError("The shapes in args are not a sub-shape of "
## "plates.")
## r = 1
## for j in range(-len(plates),0):
## mult = True
## for arg in args:
## # if -j <= len(arg) and arg[j] != 1:
## if not (-j > len(arg) or arg[j] == 1):
## mult = False
## if mult:
## r *= plates[j]
## return r
def move_plates(self, from_plate, to_plate):
return _MovePlate(self,
from_plate,
to_plate,
name=self.name + ".move_plates")
def add_plate_axis(self, to_plate):
return AddPlateAxis(to_plate)(self,
name=self.name+".add_plate_axis")
def __getitem__(self, index):
return Slice(self, index,
name=(self.name+".__getitem__"))
def has_plotter(self):
"""
Return True if the node has a plotter
"""
return callable(self._plotter)
def set_plotter(self, plotter):
self._plotter = plotter
def plot(self, fig=None, **kwargs):
"""
Plot the node distribution using the plotter of the node
Because the distributions are in general very difficult to plot, the
user must specify some functions which performs the plotting as
wanted. See, for instance, bayespy.plot.plotting for available plotters,
that is, functions that perform plotting for a node.
"""
if fig is None:
import matplotlib.pyplot as plt
fig = plt.gcf()
if callable(self._plotter):
ax = self._plotter(self, fig=fig, **kwargs)
fig.suptitle('q(%s)' % self.name)
return ax
else:
raise Exception("No plotter defined, can not plot")
@staticmethod
def _compute_message(*arrays, plates_from=(), plates_to=(), ndim=0):
"""
A general function for computing messages by sum-multiply
The function computes the product of the input arrays and then sums to
the requested plates.
"""
# Check that the plates broadcast properly
if not misc.is_shape_subset(plates_to, plates_from):
raise ValueError("plates_to must be broadcastable to plates_from")
# Compute the explicit shape of the product
shapes = [np.shape(array) for array in arrays]
arrays_shape = misc.broadcasted_shape(*shapes)
# Compute plates and dims that are present
if ndim == 0:
arrays_plates = arrays_shape
dims = ()
else:
arrays_plates = arrays_shape[:-ndim]
dims = arrays_shape[-ndim:]
# Compute the correction term. If some of the plates that should be
# summed are actually broadcasted, one must multiply by the size of the
# corresponding plate
r = Node.broadcasting_multiplier(plates_from, arrays_plates, plates_to)
# For simplicity, make the arrays equal ndim
arrays = misc.make_equal_ndim(*arrays)
# Keys for the input plates: (N-1, N-2, ..., 0)
nplates = len(arrays_plates)
in_plate_keys = list(range(nplates-1, -1, -1))
# Keys for the output plates
out_plate_keys = [key
for key in in_plate_keys
if key < len(plates_to) and plates_to[-key-1] != 1]
# Keys for the dims
dim_keys = list(range(nplates, nplates+ndim))
# Total input and output keys
in_keys = len(arrays) * [in_plate_keys + dim_keys]
out_keys = out_plate_keys + dim_keys
# Compute the sum-product with correction
einsum_args = misc.zipper_merge(arrays, in_keys) + [out_keys]
y = r * np.einsum(*einsum_args)
# Reshape the result and apply correction
nplates_result = min(len(plates_to), len(arrays_plates))
if nplates_result == 0:
plates_result = []
else:
plates_result = [min(plates_to[ind], arrays_plates[ind])
for ind in range(-nplates_result, 0)]
y = np.reshape(y, plates_result + list(dims))
return y
from .deterministic import Deterministic
def slicelen(s, length=None):
if length is not None:
s = slice(*(s.indices(length)))
return max(0, misc.ceildiv(s.stop - s.start, s.step))
class Slice(Deterministic):
"""
Basic slicing for plates.
Slicing occurs when index is a slice object (constructed by start:stop:step
notation inside of brackets), an integer, or a tuple of slice objects and
integers.
Currently, accept slices, newaxis, ellipsis and integers. For instance, does
not accept lists/tuples to pick multiple indices of the same axis.
Ellipsis expand to the number of : objects needed to make a selection tuple
of the same length as x.ndim. Only the first ellipsis is expanded, any
others are interpreted as :.
Similar to:
http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#basic-slicing
"""
def __init__(self, X, slices, **kwargs):
self._moments = X._moments
self._parent_moments = (X._moments,)
# Force a list
if not isinstance(slices, tuple):
slices = [slices]
else:
slices = list(slices)
#
# Expand Ellipsis
#
# Compute the number of required axes and how Ellipsis is expanded
num_axis = 0
ellipsis_index = None
for (k, s) in enumerate(slices):
if misc.is_scalar_integer(s) or isinstance(s, slice):
num_axis += 1
elif s is None:
pass
elif s is Ellipsis:
# Index is an ellipsis, e.g., [...]
if ellipsis_index is None:
# Expand ...
ellipsis_index = k
else:
# Interpret ... as :
num_axis += 1
slices[k] = slice(None)
else:
raise TypeError("Invalid argument type: {0}".format(s.__class__))
if num_axis > len(X.plates):
raise IndexError("Too many indices")
# The number of plates that were not given explicit slicing (either
# Ellipsis was used or the number of slices was smaller than the number
# of plate axes)
expand_len = len(X.plates) - num_axis
if ellipsis_index is not None:
# Replace Ellipsis with correct number of :
k = ellipsis_index
del slices[k]
slices = slices[:k] + [slice(None)] * expand_len + slices[k:]
else:
# Add trailing : so that each plate has explicit slicing
slices = slices + [slice(None)] * expand_len
#
# Preprocess indexing:
# - integer indices to non-negative values
# - slice start/stop values to non-negative
# - slice start/stop values based on the size of the plate
#
# Index for parent plates
j = 0
for (k, s) in enumerate(slices):
if misc.is_scalar_integer(s):
# Index is an integer, e.g., [3]
if s < 0:
# Handle negative index
s += X.plates[j]
if s < 0 or s >= X.plates[j]:
raise IndexError("Index out of range")
# Store the preprocessed integer index
slices[k] = s
j += 1
elif isinstance(s, slice):
# Index is a slice, e.g., [2:6]
# Normalize the slice
s = slice(*(s.indices(X.plates[j])))
if slicelen(s) <= 0:
raise IndexError("Slicing leads to empty plates")
slices[k] = s
j += 1
self.slices = slices
super().__init__(X,
dims=X.dims,
**kwargs)
def _plates_to_parent(self, index):
return self.parents[index].plates
def _plates_from_parent(self, index):
plates = list(self.parents[index].plates)
# Compute the plates. Note that Ellipsis has already been preprocessed
# to a proper number of :
k = 0
for s in self.slices:
# Then, each case separately: slice, newaxis, integer
if isinstance(s, slice):
# Slice, e.g., [2:5]
N = slicelen(s)
if N <= 0:
raise IndexError("Slicing leads to empty plates")
plates[k] = N
k += 1
elif s is None:
# [np.newaxis]
plates = plates[:k] + [1] + plates[k:]
k += 1
elif misc.is_scalar_integer(s):
# Integer, e.g., [3]
del plates[k]
else:
raise RuntimeError("BUG: Unknown index type. Should capture earlier.")
return tuple(plates)
@staticmethod
def __reverse_indexing(slices, m_child, plates, dims):
"""
A helpful function for performing reverse indexing/slicing
"""
j = -1 # plate index for parent
i = -1 # plate index for child
child_slices = ()
parent_slices = ()
msg_plates = ()
# Compute plate axes in the message from children
ndim = len(dims)
if ndim > 0:
m_plates = np.shape(m_child)[:-ndim]
else:
m_plates = np.shape(m_child)
for s in reversed(slices):
if misc.is_scalar_integer(s):
# Case: integer
parent_slices = (s,) + parent_slices
msg_plates = (plates[j],) + msg_plates
j -= 1
elif s is None:
# Case: newaxis
if -i <= len(m_plates):
child_slices = (0,) + child_slices
i -= 1
elif isinstance(s, slice):
# Case: slice
if -i <= len(m_plates):
child_slices = (slice(None),) + child_slices
parent_slices = (s,) + parent_slices
if ((-i > len(m_plates) or m_plates[i] == 1)
and slicelen(s) == plates[j]):
# Broadcasting can be applied. The message does not need
# to be explicitly shaped to the full size
msg_plates = (1,) + msg_plates
else:
# No broadcasting. Must explicitly form the full size
# axis
msg_plates = (plates[j],) + msg_plates
j -= 1
i -= 1
else:
raise RuntimeError("BUG: Unknown index type. Should capture earlier.")
# Set the elements of the message
m_parent = np.zeros(msg_plates + dims)
if np.ndim(m_parent) == 0 and np.ndim(m_child) == 0:
m_parent = m_child
elif np.ndim(m_parent) == 0:
m_parent = m_child[child_slices]
elif np.ndim(m_child) == 0:
m_parent[parent_slices] = m_child
else:
m_parent[parent_slices] = m_child[child_slices]
return m_parent
def _compute_weights_to_parent(self, index, weights):
"""
Compute the mask to the parent node.
"""
if index != 0:
raise ValueError("Invalid index")
parent = self.parents[0]
return self.__reverse_indexing(self.slices,
weights,
parent.plates,
())
def _compute_message_to_parent(self, index, m, u):
"""
Compute the message to a parent node.
"""
if index != 0:
raise ValueError("Invalid index")
parent = self.parents[0]
# Apply reverse indexing for the message arrays
msg = [self.__reverse_indexing(self.slices,
m_child,
parent.plates,
dims)
for (m_child, dims) in zip(m, parent.dims)]
return msg
def _compute_moments(self, u):
"""
Get the moments with an added plate axis.
"""
# Process each moment
for n in range(len(u)):
# Compute the effective plates in the message/moment
ndim = len(self.dims[n])
if ndim > 0:
shape = np.shape(u[n])[:-ndim]
else:
shape = np.shape(u[n])
# Construct a list of slice objects
u_slices = []
# Index for the shape
j = -len(self.parents[0].plates)
for (k, s) in enumerate(self.slices):
if s is None:
# [np.newaxis]
if -j < len(shape):
# Only add newaxis if there are some axes before
# this. It does not make any difference if you added
# leading unit axes
u_slices.append(s)
else:
# slice or integer index
if -j <= len(shape):
# The moment has this axis, so it is not broadcasting it
if shape[j] != 1:
# Use the slice as it is
u_slices.append(s)
elif isinstance(s, slice):
# Slice.
# The moment is using broadcasting, just pick the
# first element but use slice in order to keep the
# axis
u_slices.append(slice(0,1,1))
else:
# Integer.
# The moment is using broadcasting, just pick the
# first element
u_slices.append(0)
j += 1
# Slice the message/moment
u[n] = u[n][tuple(u_slices)]
return u
def AddPlateAxis(to_plate):
if to_plate >= 0:
raise Exception("Give negative value for axis index to_plate.")
class _AddPlateAxis(Deterministic):
def __init__(self, X, **kwargs):
nonlocal to_plate
N = len(X.plates) + 1
# Check the parameters
if to_plate >= 0 or to_plate < -N:
raise ValueError("Invalid plate position to add.")
# Use positive indexing only
## if to_plate < 0:
## to_plate += N
# Use negative indexing only
if to_plate >= 0:
to_plate -= N
#self.to_plate = to_plate
super().__init__(X,
dims=X.dims,
**kwargs)
def _plates_to_parent(self, index):
plates = list(self.plates)
plates.pop(to_plate)
return tuple(plates)
#return self.plates[:to_plate] + self.plates[(to_plate+1):]
def _plates_from_parent(self, index):
plates = list(self.parents[index].plates)
plates.insert(len(plates)-to_plate+1, 1)
return tuple(plates)
def _compute_weights_to_parent(self, index, weights):
# Remove the added mask plate
if abs(to_plate) <= np.ndim(weights):
sh_weighs = list(np.shape(weights))
sh_weights.pop(to_plate)
weights = np.reshape(weights, sh_weights)
return weights
def _compute_message_to_parent(self, index, m, *u_parents):
"""
Compute the message to a parent node.
"""
# Remove the added message plate
for i in range(len(m)):
# Remove the axis
if np.ndim(m[i]) >= abs(to_plate) + len(self.dims[i]):
axis = to_plate - len(self.dims[i])
sh_m = list(np.shape(m[i]))
sh_m.pop(axis)
m[i] = np.reshape(m[i], sh_m)
return m
def _compute_moments(self, u):
"""
Get the moments with an added plate axis.
"""
# Get parents' moments
#u = self.parents[0].message_to_child()
# Move a plate axis
u = list(u)
for i in range(len(u)):
# Make sure the moments have all the axes
#diff = len(self.plates) + len(self.dims[i]) - np.ndim(u[i]) - 1
#u[i] = misc.add_leading_axes(u[i], diff)
# The location of the new axis/plate:
axis = np.ndim(u[i]) - abs(to_plate) - len(self.dims[i]) + 1
if axis > 0:
# Add one axes to the correct position
sh_u = list(np.shape(u[i]))
sh_u.insert(axis, 1)
u[i] = np.reshape(u[i], sh_u)
return u
return _AddPlateAxis
class NodeConstantScalar(Node):
@staticmethod
def compute_fixed_u_and_f(x):
""" Compute u(x) and f(x) for given x. """
return ([x], 0)
def __init__(self, a, **kwargs):
self.u = [a]
super().__init__(self,
plates=np.shape(a),
dims=[()],
**kwargs)
def start_optimization(self):
# FIXME: Set the plate sizes appropriately!!
x0 = self.u[0]
#self.gradient = np.zeros(np.shape(x0))
def transform(x):
# E.g., for positive scalars you could have exp here.
self.gradient = np.zeros(np.shape(x0))
self.u[0] = x
def gradient():
# This would need to apply the gradient of the
# transformation to the computed gradient
return self.gradient
return (x0, transform, gradient)
def add_to_gradient(self, d):
self.gradient += d
def message_to_child(self, gradient=False):
if gradient:
return (self.u, [ [np.ones(np.shape(self.u[0])),
#self.gradient] ])
self.add_to_gradient] ])
else:
return self.u
def stop_optimization(self):
#raise Exception("Not implemented for " + str(self.__class__))
pass
|
bayespy/bayespy
|
bayespy/inference/vmp/nodes/node.py
|
Python
|
mit
| 47,028
|
[
"Gaussian"
] |
23ecd969aeeb3ea95896f218f4adc69034a217238d070247e0ae9ff10e0efc55
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Utilities for statistical operations
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import scipy.stats as stats
def norm(x, mu, sigma=1.0):
""" Scipy norm function """
return stats.norm(loc=mu, scale=sigma).pdf(x)
def ln_norm(x, mu, sigma=1.0):
""" Natural log of scipy norm function truncated at zero """
return np.log(stats.norm(loc=mu, scale=sigma).pdf(x))
def lognorm(x, mu, sigma=1.0):
""" Log-normal function from scipy """
return stats.lognorm(sigma, scale=mu).pdf(x)
def log10norm(x, mu, sigma=1.0):
""" Scale scipy lognorm from natural log to base 10
x : input parameter
mu : mean of the underlying log10 gaussian
sigma : variance of underlying log10 gaussian
"""
return stats.lognorm(sigma * np.log(10), scale=mu).pdf(x)
def ln_log10norm(x, mu, sigma=1.0):
""" Natural log of base 10 lognormal """
return np.log(stats.lognorm(sigma * np.log(10), scale=mu).pdf(x))
def gauss(x, mu, sigma=1.0):
"""Gaussian """
s2 = sigma * sigma
return 1. / np.sqrt(2 * s2 * np.pi) * np.exp(-(x - mu) * (x - mu) / (2 * s2))
def lngauss(x, mu, sigma=1.0):
"""Natural log of a Gaussian"""
s2 = sigma * sigma
return -0.5 * np.log(2 * s2 * np.pi) - np.power(x - mu, 2) / (2 * s2)
def lgauss(x, mu, sigma=1.0, logpdf=False):
""" Log10 normal distribution...
Parameters
----------
x : `numpy.array` or list
Parameter of interest for scanning the pdf
mu : float
Peak of the lognormal distribution (mean of the underlying
normal distribution is log10(mu)
sigma : float
Standard deviation of the underlying normal distribution
logpdf : bool
Define the PDF in log space
Returns
-------
vals : `numpy.array`
Output values, same shape as x
"""
x = np.array(x, ndmin=1)
lmu = np.log10(mu)
s2 = sigma * sigma
lx = np.zeros(x.shape)
v = np.zeros(x.shape)
lx[x > 0] = np.log10(x[x > 0])
v = 1. / np.sqrt(2 * s2 * np.pi) * np.exp(-(lx - lmu)**2 / (2 * s2))
if not logpdf:
v /= (x * np.log(10.))
if x.size > 1:
v[x <= 0] = -np.inf
return v
def lnlgauss(x, mu, sigma=1.0, logpdf=False):
"""Log-likelihood of the natural log of a Gaussian
"""
x = np.array(x, ndmin=1)
lmu = np.log10(mu)
s2 = sigma * sigma
lx = np.zeros(x.shape)
v = np.zeros(x.shape)
mask = x > 0
inv_mask = np.invert(mask)
lx[mask] = np.log10(x[mask])
v = -0.5 * np.log(2 * s2 * np.pi) - np.power(lx - lmu, 2) / (2 * s2)
if not logpdf:
v -= 2.302585 * lx + np.log(np.log(10.))
if inv_mask.any():
v[inv_mask] = -np.inf
return v
|
kadrlica/dmsky
|
dmsky/utils/stat_funcs.py
|
Python
|
mit
| 2,830
|
[
"Gaussian"
] |
329c0198001981ddd00a1685ac037cdf76bb296fa8680ffb605ebaeb92b2ca7c
|
#!/usr/bin/python
import os, re
import plmd
class Setup (plmd.PLMD_module):
def __init__(self, config):
# Load the config file
self.config = plmd.PLMD_Config( config )
# Confirm with user
self.printStage("Step 1: Starting up PLMD. Submission file parameters:")
# Add GPU to nodecontrol if applicable
if self.config.gpuEnabled == True:
self.config.nodeControl += ":gpus="+str(self.config.gpuCores)
# AMD setup variable
self.aMDinput = ""
print "\n== Submission Parameters"
print "========================"
print "submissionName: " + self.config.name
print "nodeControl: " + self.config.nodeControl
print "wallClock: " + self.config.wallClock
print "mdRuns: " + self.config.mdRuns
# Confirmation from user
if self.config.quiet == False:
var = raw_input("\nPlease confirm these submission parameters with any key press. Press 'n' to discontinue")
if var == 'n':
raise Exception('Submission was not confirmed')
# Create submission file for submitting case to HPC queue
def hpcCreateSubmission( self, caseName ):
# Create in-files
self.amberCreateInput( caseName )
# User information
self.printStage( "Stage 3, Case: "+caseName+". Creating HPC submission files" )
caseID = caseName.split("/")[-1]
# Create new submission file
TEMPLATE = ""
if self.config.gpuEnabled == True:
TEMPLATE = open( self.config.PLMDHOME+"/src/templates/explicit_gpu_submit.txt", 'r')
else:
TEMPLATE = open( self.config.PLMDHOME+"/src/templates/explicit_submit.txt", 'r')
# What to call the logfile
amdLogFile = ""
if self.config.amdEnabled == True:
amdLogFile = "outAMD"
else:
amdLogFile = "outMD"
# Replace stuff within
TEMP = TEMPLATE.read().replace("[FOLDER]", caseName ). \
replace("[NAME]", self.config.name+"_"+caseID ). \
replace("[CPUCONTROL]", self.config.nodeControl ). \
replace("[WALLCLOCK]", self.config.wallClock ). \
replace("[MDRUNS]", self.config.mdRuns ). \
replace("[LOGFILENAME]", amdLogFile )
TEMPLATE.close()
# Write the submission file
FILE = open(caseName+"/submit_run.sh","w");
FILE.write( TEMP );
FILE.close();
print "Create submission file: "+caseName+"/submit_run.sh"
# Create MMPBSA Submission file
def hpcMMPBSASubmissionCreate(self, caseName ):
# Create in-files
self.amberCreateInput( caseName )
# Run ante-MMPBSA.py
self.runAnteMMPBSA(caseName)
# User information
self.printStage( "Stage 3, Case: "+caseName+". Creating HPC MMPBSA submission files." )
caseID = caseName.split("/")[-1]
# Add all trajectory files to ptraj script
self.num_files = self.getNumberOfFiles( caseName+'/md-files/' )
complexFiles = ""
for i in range(1,self.num_files):
complexFiles += caseName+'/md-files/equil'+ str(i)+ ".mdcrd "
# Replace stuff within
TEMPLATE = open( self.config.PLMDHOME+"/src/templates/mmpbsa_submit.txt", 'r')
TEMP = TEMPLATE.read().replace("[FOLDER]", caseName ). \
replace("[NAME]", self.config.name+"_"+caseID ). \
replace("[CPUCONTROL]", self.config.nodeControl ). \
replace("[WALLCLOCK]", self.config.wallClock ). \
replace("[CASEID]", str(caseName.split("/")[-1]) ). \
replace("[COMPLEXFILES]", complexFiles )
TEMPLATE.close()
# Create folder for this
self.createFolder( caseName+"/mmpbsa" , True )
# Write the submission file
FILE = open(caseName+"/submit_mmpbsa.sh","w");
FILE.write( TEMP );
FILE.close();
print "Create submission file: "+caseName+"/submit_mmpbsa.sh"
# Get number of files
def getNumberOfFiles( self, path ):
return len([f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f)) and ".mdcrd" in f and "equil" in f] )
def runAnteMMPBSA(self, caseName):
# Delete old files?
#os.system("rm -rf "+caseName+"/md-files/complex.prmtop "+caseName+"/md-files/receptor.prmtop "+caseName+"/md-files/ligand.prmtop")
command = "ante-MMPBSA.py \
-p "+caseName+"/md-files/peptide.prmtop \
-c "+caseName+"/md-files/complex.prmtop \
-r "+caseName+"/md-files/receptor.prmtop \
-l "+caseName+"/md-files/ligand.prmtop \
-s \":WAT\" \
-n \""+self.qmRegion+"\""
os.system(command)
# Submit to HPC cluster
def hpcSubmission( self, caseName ):
# User information
self.printStage( "Stage 4, Case: "+caseName+". Submitting to HPC" )
# Do submission
os.system( "qsub "+caseName+"/submit_run.sh" )
# Submit MMPBSA run
def hpcMMPBSASubmission(self, caseName):
# User information
self.printStage( "Stage 4, MMPBSA Case: "+caseName+". Submitting to HPC" )
# Do submission
os.system( "qsub "+caseName+"/submit_mmpbsa.sh" )
# Create all the amber input files for a case
def amberCreateInput( self, caseName ):
# User information
self.printStage( "Stage 2, Case: "+caseName+". Creating Amber input files" )
# The template files for the amber imput files
templateFiles = [
self.config.PLMDHOME+"/src/templates/explicit_min.txt",
self.config.PLMDHOME+"/src/templates/explicit_heat.txt",
self.config.PLMDHOME+"/src/templates/explicit_equil.txt",
self.config.PLMDHOME+"/src/templates/explicit_mmpbsa.txt"
]
# Open the pdb file created by LEaP to find residues
self.ligandResnames = []
self.peptideResnames = []
peptides = 0
with open(caseName+"/pdb-files/finalLEaP_nowat.pdb",'r') as fl:
for line in fl:
# Check for TER commands
if "TER" in line:
# Increase count
peptides += 1
else:
# If we're not done with peptide, add resname
if peptides < self.config.peptideCount:
if line[17:20] not in self.peptideResnames:
self.peptideResnames.append( line[17:20] )
else:
if line[17:20] not in self.ligandResnames:
self.ligandResnames.append( line[17:20] )
# Set the QM region of this case
self.calcQMregion( caseName )
# Go through each template file
for templateFile in templateFiles:
# Enable quantum variable
if self.config.ligandCount <= 0 or self.config.qmEnable == False:
self.config.qmEnable = 0
else:
self.config.qmEnable = 1
# Special things on equilfile
if "equil" in templateFile:
# GPU Optimization
if self.config.gpuEnabled == True:
self.config.ntt = "3"
self.config.ntb = "1"
self.config.ntp = "0"
self.config.gamma_ln = "2.0"
# Enable aMD
if self.config.amdEnabled == True:
# Check for the latest equil log file to get aMD data. Otherwise abort
ePot, eDih = 0,0
for subdir,dirs,files in os.walk( caseName+"/md-logs/" ):
for filename in files:
if filename == "outMD1.log":
with open( subdir+filename , "r") as fi:
startSearch = False
for line in fi:
# Start Search
if "A V E R A G E S O V E R" in line:
startSearch = True
# Do Search, only take first
if startSearch == True:
m1 = re.search('EPtot(\s+?)=(\s+?)(-?\d+\.?\d+)', line)
if m1 and ePot == 0:
ePot = float(m1.group(3))
m2 = re.search('DIHED(\s+?)=(\s+?)(-?\d+\.?\d+)', line)
if m2 and eDih == 0:
eDih = float(m2.group(3))
# End Search
if "Density" in line:
startSearch = False
# Check that we found values
if ePot == 0 and eDih == 0:
raise Exception('To run aMD, a outMD1.log file must be present. This file is needed for information about the energies in the system.')
# Confirm aMD parameters
self.printStage( "Stage 2.5, Case: "+caseName+". aMD settings" )
# Get residues & atoms in the system
atoms, residues = 0,[]
with open( caseName+"/pdb-files/finalLEaP.pdb",'r' ) as fi:
for line in fi:
if "ATOM" in line:
atoms += 1
if line[17:20] in self.peptideResnames:
resID = str(int(line[22:26]))
if resID not in residues:
residues.append( resID )
# Input data
print "ATOMS: "+str(atoms)
print "RESIDUES: "+str(len(residues))
print "EPOT: "+str(ePot)
print "DIHED: "+str(eDih)
# alphaP Calc
self.alphaP = self.config.ePA * atoms
print "alphaP = "+str(self.config.ePA)+" * "+str(atoms)+" = "+str(self.alphaP)+" kcal mol-1"
# EthreshP Calc
self.EthreshP = ePot + self.alphaP
print "EthreshP = "+str(ePot)+" + "+str(self.alphaP)+" = "+str(self.EthreshP)+" kcal mol-1"
# EthreshD Calc
self.EthreshD = eDih + self.config.ePR * len(residues)
print "EthreshD = "+str(eDih)+" + "+str( self.config.ePR )+" * "+str(len(residues))+" = "+str(self.EthreshD)+" kcal mol-1"
# alphaD Calc
self.alphaD = self.config.aDf * self.config.ePR * len(residues)
print "alphaD = "+str( self.config.aDf ) + " * " + str(self.config.ePR) + " * "+str(len(residues))+" = "+str(self.alphaD)+" kcal mol-1"
# Create entry for the input file
self.aMDinput = ",iamd="+str(self.config.iamd)+\
",ethreshd="+str(self.EthreshD)+\
",alphad="+str(self.alphaD)+\
",ethreshp="+str(self.EthreshP)+\
",alphap="+str(self.alphaP)
print self.aMDinput
# Confirm
if self.config.quiet == False:
self.confirmProgress()
# Load templates, change variables, and save in case folder
TEMPLATE = open(templateFile, 'r')
TEMP = TEMPLATE.read().replace("[NTC]", self.config.ntc ). \
replace("[NTF]", self.config.ntf ). \
replace("[NTB]", self.config.ntb ). \
replace("[NTT]", self.config.ntt ). \
replace("[NTP]", self.config.ntp ). \
replace("[GAMMALN]", self.config.gamma_ln ). \
replace("[QMCHARGE]", self.config.qmCharge ). \
replace("[QMTHEORY]", self.config.qmTheory ). \
replace("[QMREGION]", self.qmRegion ). \
replace("[TIMESTEPS]", self.config.timestepNumber ). \
replace("[DT]", str(self.config.timestepSize) ). \
replace("[PEPTIDERESI]", str(self.peptideRegion) ). \
replace("[EABLEQM]", str(self.config.qmEnable) ). \
replace("[QMSHAKE]", self.config.qmShake ). \
replace("[AMDsetup]", self.aMDinput ). \
replace("[COMPLEXIDS]", self.complexids ). \
replace("[COMPLEXCHARGE]", str(self.config.qmCharge) ). \
replace("[LIGANDCHARGE]", str(self.config.qmCharge) ). \
replace("[INTERVAL]", str(self.config.mmpbsaInterval) ). \
replace("[TIMESTEPPERFRAME]", str(self.config.timestepPerFrame) )
# If not QM, delete qmmm dict from TEMP
if self.config.qmEnable == 0:
# Must be compiled first, so as to use DOTALL that will match newlines also
TEMP = re.sub(re.compile('&qmmm(.+)\s/\n', re.DOTALL), "", TEMP )
# Save the input file with same name, but change extension to .in
saveFile = os.path.basename(templateFile).split(".")[0]+".in"
FILE = open(caseName+"/in_files/"+saveFile,"w");
FILE.write( TEMP );
FILE.close();
# Function which analyses a final pdb file and figures out the QM region (ligand region)
def calcQMregion( self, caseName ):
# Open the pdb file created by LEaP
with open(caseName+"/pdb-files/finalLEaP_nowat.pdb",'r') as fl:
pdb = fl.readlines()
# Set QM & peptide region
qmRegion = []
peptideRegion = []
complexIDs = []
receptorIDs = []
ligandIDs = []
# Go throug the file and find all residues having the resname of the ligand
for line in pdb:
if line[22:26]:
resID = str(int(line[22:26]))
if line[17:20] in self.ligandResnames:
qmRegion.append( resID )
if resID not in ligandIDs:
ligandIDs.append(resID)
elif line[17:20] in self.peptideResnames:
peptideRegion.append( resID )
if resID not in receptorIDs:
receptorIDs.append(resID)
if resID not in complexIDs:
complexIDs.append(resID)
# Define the region string, as per Amber specifications
if not qmRegion:
# List was empty, not QM region
self.qmRegion = ""
else:
# Set the QM region to the start-end ligand residues
self.qmRegion = ":"+qmRegion[0]+"-"+qmRegion[ len(qmRegion)-1 ]
# Set the peptide region
if peptideRegion:
self.peptideRegion = ":"+peptideRegion[0]+"-"+peptideRegion[-1]
else:
self.peptideRegion = ""
# If the QM region is set to be overwritten, return that overwrite
if self.config.qmRegionOverwrite != "false":
self.qmRegion = self.config.qmRegionOverwrite
# Get complex IDs for MMPBSA
self.complexids = ";".join(ligandIDs)
|
MathiasGruber/plmd
|
src/plmd/caseSubmit.py
|
Python
|
gpl-2.0
| 17,406
|
[
"ADF",
"Amber"
] |
b3205fb87d143cf5777d394babcc38c74c2ff71ebb289f306fd83e80314aa1a5
|
#!/usr/bin/env python
"""
Ben Payne
ben.is.located@gmail.com
Yoga graph
Use:
This work is licensed under the Creative Commons Attribution-ShareAlike 4.0 International License.
To view a copy of this license, visit http://creativecommons.org/licenses/by-sa/4.0/.
"""
import networkx as nx # format for directed graph
import yoga_db as ydb # nodes and edges of graph
#import yoga_lib as ylib # library of functions for acting on graph
DG = nx.DiGraph() # initialize directed graph using networkx
DG = ydb.pose_properties(DG) # load node properties
DG = ydb.pose_transitions(DG) # load edges
nx.nx_agraph.write_dot(DG, "all_nodes.gv")
# see https://dreampuf.github.io/GraphvizOnline
# to plot the graphviz
|
bhpayne/yoga_graph
|
src/generate_dot.py
|
Python
|
gpl-2.0
| 716
|
[
"VisIt"
] |
a86239b4a29151e7139a090f3779d83518b95fc788334d36bf8f7e4f479144b0
|
"""Test template support in VTK-Python
VTK-python decides which template specializations
to wrap according to which ones are used in typedefs
and which ones appear as superclasses of other classes.
In addition, the wrappers are hard-coded to wrap the
vtkDenseArray and vtkSparseArray classes over a broad
range of types.
Created on May 29, 2011 by David Gobbi
"""
import sys
import exceptions
import vtk
from vtk.test import Testing
arrayTypes = ['char', 'int8', 'uint8', 'int16', 'uint16',
'int32', 'uint32', int, 'uint', 'int64', 'uint64',
'float32', float, str, 'unicode', vtk.vtkVariant]
arrayCodes = ['c', 'b', 'B', 'h', 'H',
'i', 'I', 'l', 'L', 'q', 'Q',
'f', 'd']
class TestTemplates(Testing.vtkTest):
def testDenseArray(self):
"""Test vtkDenseArray template"""
for t in (arrayTypes + arrayCodes):
a = vtk.vtkDenseArray[t]()
a.Resize(1)
i = vtk.vtkArrayCoordinates(0)
if t in ['bool', '?']:
value = 1
a.SetValue(i, value)
result = a.GetValue(i)
self.assertEqual(value, result)
elif t in ['float32', 'float64', 'float', 'f', 'd']:
value = 3.125
a.SetValue(i, value)
result = a.GetValue(i)
self.assertEqual(value, result)
elif t in ['char', 'c']:
value = 'c'
a.SetValue(i, value)
result = a.GetValue(i)
self.assertEqual(value, result)
elif t in [str, 'str', 'unicode']:
value = unicode("hello")
a.SetValue(i, value)
result = a.GetValue(i)
self.assertEqual(value, result)
elif t in ['vtkVariant', vtk.vtkVariant]:
value = vtk.vtkVariant("world")
a.SetValue(i, value)
result = a.GetValue(i)
self.assertEqual(value, result)
else:
value = 12
a.SetValue(i, value)
result = a.GetValue(i)
self.assertEqual(value, result)
def testSparseArray(self):
"""Test vtkSparseArray template"""
for t in (arrayTypes + arrayCodes):
a = vtk.vtkSparseArray[t]()
a.Resize(1)
i = vtk.vtkArrayCoordinates(0)
if t in ['bool', '?']:
value = 0
a.SetValue(i, value)
result = a.GetValue(i)
self.assertEqual(value, result)
elif t in ['float32', 'float64', 'float', 'f', 'd']:
value = 3.125
a.SetValue(i, value)
result = a.GetValue(i)
self.assertEqual(value, result)
elif t in ['char', 'c']:
value = 'c'
a.SetValue(i, value)
result = a.GetValue(i)
self.assertEqual(value, result)
elif t in [str, 'str', 'unicode']:
value = unicode("hello")
a.SetValue(i, value)
result = a.GetValue(i)
self.assertEqual(value, result)
elif t in ['vtkVariant', vtk.vtkVariant]:
value = vtk.vtkVariant("world")
a.SetValue(i, value)
result = a.GetValue(i)
self.assertEqual(value, result)
else:
value = 12
a.SetValue(i, value)
result = a.GetValue(i)
self.assertEqual(value, result)
def testArray(self):
"""Test array CreateArray"""
o = vtk.vtkArray.CreateArray(vtk.vtkArray.DENSE, vtk.VTK_DOUBLE)
self.assertEqual(o.__class__, vtk.vtkDenseArray[float])
def testVector(self):
"""Test vector templates"""
# make sure Rect inherits operators
r = vtk.vtkRectf(0, 0, 2, 2)
self.assertEqual(r[2], 2.0)
c = vtk.vtkColor4ub()
self.assertEqual(list(c), [0, 0, 0, 255])
e = vtk.vtkVector['float32', 3]([0.0, 1.0, 2.0])
self.assertEqual(list(e), [0.0, 1.0, 2.0])
i = vtk.vtkVector3['i']()
self.assertEqual(list(i), [0, 0, 0])
if __name__ == "__main__":
Testing.main([(TestTemplates, 'test')])
|
daviddoria/PointGraphsPhase1
|
Common/Testing/Python/TestTemplates.py
|
Python
|
bsd-3-clause
| 4,339
|
[
"VTK"
] |
8eb473105aabdade8edb6288856936af230ac9e06b392565286e178d5d35a7fb
|
from paraview.simple import *
import tonic
from tonic import paraview as pv
dataset_destination_path = '/tmp/spherical'
# Initial ParaView scene setup
Cone()
Show()
view = Render()
# Choose data location
dh = tonic.DataHandler(dataset_destination_path)
camera = pv.create_spherical_camera(view, dh, range(0, 360, 30), range(-60, 61, 30))
# Create data
dh.registerData(name='image', type='blob', mimeType='image/png', fileName='.png')
# Loop over data
for pos in camera:
pv.update_camera(view, pos)
WriteImage(dh.getDataAbsoluteFilePath('image'))
# Write metadata
dh.writeDataDescriptor()
|
Kitware/tonic-data-generator
|
scripts/paraview/samples/camera-spherical.py
|
Python
|
bsd-3-clause
| 603
|
[
"ParaView"
] |
049e9c1cb5c54f71fb73bd7b0f34402ad2815b5567d9ae240b41c0e42cb36a82
|
import os
import sys
from setuptools import setup, find_packages
_here = os.path.dirname(__file__)
f = open(os.path.join(_here, 'README.md'), 'r')
README = f.read()
f.close()
install_requires = ['lxml']
if sys.version_info[0] == 2:
# python2 does not have mock in the standard lib
install_requires.append('mock')
setup(name="mp.importer",
version="0.1",
description="Utilities to ease imports of content to MetroPublisher.",
packages=find_packages(),
long_description=README,
license='BSD',
author="Vanguardistas LLC",
author_email='brian@vanguardistas.net',
install_requires=install_requires,
classifiers=[
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
],
test_suite="mp.importer.tests",
)
|
kiarasky/mp.importer
|
setup.py
|
Python
|
mit
| 1,078
|
[
"Brian"
] |
152a563d826631d9dcb63465daf3e18060859779d7672074021fc7cc4fb1631f
|
#!/galaxy/home/mgehrin/hiclib/bin/python
"""
Application to convert AXT file to FASTA file. Reads an AXT file from standard
input and writes a FASTA file to standard out.
usage: %prog < axt_file > fasta_file
"""
__author__ = "Bob Harris (rsharris@bx.psu.edu)"
import sys
import bx.align.axt
def usage(s=None):
message = """
axt_to_fasta < axt_file > fasta_file
"""
if (s == None): sys.exit (message)
else: sys.exit ("%s\n%s" % (s,message))
def main():
# check the command line
if (len(sys.argv) > 1):
usage("give me no arguments")
# convert the alignment blocks
reader = bx.align.axt.Reader(sys.stdin,support_ids=True,\
species1="",species2="")
for a in reader:
if ("id" in a.attributes): id = a.attributes["id"]
else: id = None
print_component_as_fasta(a.components[0],id)
print_component_as_fasta(a.components[1],id)
print
# $$$ this should be moved to a bx.align.fasta module
def print_component_as_fasta(c,id=None):
header = ">%s_%s_%s" % (c.src,c.start,c.start+c.size)
if (id != None): header += " " + id
print header
print c.text
if __name__ == "__main__": main()
|
bxlab/HiFive_Paper
|
Scripts/HiCLib/bx-python-0.7.1/build/scripts-2.7/axt_to_fasta.py
|
Python
|
bsd-3-clause
| 1,174
|
[
"Galaxy"
] |
92bece6951854fc2460bb1dd028e10c1c062f849d11a16600f0496ff48e6e52f
|
"""
media (:mod:`skrf.media.media`)
========================================
Media class.
.. autosummary::
:toctree: generated/
Media
DefinedGammaZ0
"""
from numbers import Number
import warnings
import numpy as npy
from numpy import real, imag, ones, any, gradient, array
from scipy import stats
from scipy.constants import c, inch, mil
from ..frequency import Frequency
from ..network import Network, connect
from .. import tlineFunctions as tf
from .. import mathFunctions as mf
from ..constants import NumberLike, to_meters, ZERO
from typing import Union
from abc import ABC, abstractmethod
import re
from copy import deepcopy as copy
class Media(ABC):
"""
Abstract Base Class for a single mode on a transmission line media.
This class init's with `frequency` and `z0` (the port impedance);
attributes shared by all media. Methods defined here make use of the
properties :
* `gamma` - (complex) media propagation constant
* `Z0` - (complex) media characteristic impedance
Which define the properties of a specific media. Any sub-class of Media
must implement these properties. `gamma` and `Z0` should return
complex arrays of the same length as `frequency`. `gamma` must
follow the convention:
* positive real(gamma) = attenuation
* positive imag(gamma) = forward propagation
Parameters
----------
frequency : :class:`~skrf.frequency.Frequency` object or None
frequency band of this transmission line medium.
Defaults to None, which produces a 1-10ghz band with 101 points.
z0 : number, array-like, or None
the port impedance for media. Only needed if its different
from the characteristic impedance of the media.
If z0 is None then will default to Z0.
Default is None.
Note
----
The `z0` parameter (port impedance) is needed in some cases.
:class:`~skrf.media.rectangularWaveguide.RectangularWaveguide`
is an example where you may need this, because the
characteristic impedance is frequency dependent, but the
touchstone's created by most VNA's have z0=1 or 50. So to
prevent accidental impedance mis-match, you may want to manually
set the `z0`.
"""
def __init__(self, frequency: Union['Frequency', None] = None,
z0: Union[NumberLike, None] = None):
if frequency is None:
frequency = Frequency(1,10,101,'ghz')
self.frequency = frequency.copy()
self.z0 = z0
def mode(self, **kw) -> 'Media':
r"""
Create another mode in this medium.
Convenient way to return a copy this Media object
with eventually different properties.
Parameters
----------
\*\*kwargs : keyword arguments passed to the copy
Returns
-------
copy : :class:`Media`
A copy of this Media object with \*\*kwargs attribute
"""
out = copy(self)
for k in kw:
setattr(self, k, kw[k])
return out
def copy(self) -> 'Media':
"""
Copy of this Media object.
Returns
-------
copy : :class:`Media`
A copy of this Media object
"""
return copy(self)
def __eq__(self,other):
"""
Test for numerical equality (up to :data:`~skrf.constants.ZERO`).
"""
if self.frequency != other.frequency:
return False
if max(abs(self.Z0 - other.Z0)) > ZERO:
return False
if max(abs(self.gamma - other.gamma)) > ZERO:
return False
if max(abs(self.z0 - other.z0)) > ZERO:
return False
return True
def __len__(self) -> int:
"""
Length of frequency axis.
"""
return len(self.frequency)
@property
def npoints(self) -> int:
"""
Number of points of the frequency axis.
Returns
-------
npoints : int
Number of points of the frequency axis.
"""
return self.frequency.npoints
@npoints.setter
def npoints(self, val):
self.frequency.npoints = val
@property
def z0(self) -> npy.ndarray:
"""
Characteristic Impedance.
Returns
-------
z0 : :class:`numpy.ndarray`
"""
if self._z0 is None:
return self.Z0
return self._z0*ones(len(self))
@z0.setter
def z0(self, val):
self._z0 = val
@property
@abstractmethod
def gamma(self):
r"""
Propagation constant.
In skrf, defined as :math:`\gamma = \alpha + j \beta`.
Returns
-------
gamma : :class:`numpy.ndarray`
complex propagation constant for this media
Note
----
`gamma` must adhere to the following convention:
* positive real(gamma) = attenuation
* positive imag(gamma) = forward propagation
"""
return None
@property
def alpha(self) -> npy.ndarray:
"""
Real (attenuation) component of gamma.
Returns
-------
alpha : :class:`numpy.ndarray`
"""
return real(self.gamma)
@property
def beta(self) -> npy.ndarray:
"""
Imaginary (propagating) component of gamma.
Returns
-------
beta : :class:`numpy.ndarray`
"""
return imag(self.gamma)
@property
@abstractmethod
def Z0(self):
return None
@property
def v_p(self) -> npy.ndarray:
r"""
Complex phase velocity (in m/s).
.. math::
j \cdot \omega / \gamma
Note
----
The `j` is used so that real phase velocity corresponds to
propagation
where:
* :math:`\omega` is angular frequency (rad/s),
* :math:`\gamma` is complex propagation constant (rad/m)
Returns
-------
v_p : :class:`numpy.ndarray`
See Also
--------
propagation_constant
gamma
"""
return 1j*(self.frequency.w/self.gamma)
@property
def v_g(self):
r"""
Complex group velocity (in m/s).
.. math::
j \cdot d \omega / d \gamma
where:
* :math:`\omega` is angular frequency (rad/s),
* :math:`\gamma` is complex propagation constant (rad/m)
Note
----
the `j` is used to make propagation real, this is needed because
skrf defined the gamma as :math:`\gamma= \alpha +j\beta`.
Returns
-------
v_g : :class:`numpy.ndarray`
References
----------
https://en.wikipedia.org/wiki/Group_velocity
See Also
--------
propagation_constant
v_p
gamma
"""
dw = self.frequency.dw
dk = gradient(self.gamma)
return dw/dk
def get_array_of(self, x):
try:
if len(x)!= len(self):
# we have to make a decision
pass
except(TypeError):
y = x* ones(len(self))
return y
## Other Functions
def theta_2_d(self, theta: NumberLike, deg:bool = True, bc: bool = True) -> NumberLike:
r"""
Convert electrical length to physical distance.
The electrical length is given by :math:`d=\theta/\beta`.
The given electrical length can be given either at the center frequency
or on the entire band depending of the parameter `bc`.
Parameters
----------
theta : number
electrical length, at band center (see deg for unit)
deg : Boolean, optional
is theta in degrees?
Default is True (theta is assumed in degrees)
bc : bool, optional.
evaluate only at band center, or across the entire band?
Default is True (evaluation assumed at band center)
Returns
--------
d : number, array-like
physical distance in meters
"""
if deg == True:
theta = mf.degree_2_radian(theta)
gamma = self.gamma
if bc:
return 1.0*theta/npy.imag(gamma[int(gamma.size/2)])
else:
return 1.0*theta/npy.imag(gamma)
def electrical_length(self, d: NumberLike, deg: bool = False) -> NumberLike:
r"""
Calculate the complex electrical length for a given distance.
Electrical length is given by :math:`\theta=\gamma d`.
Parameters
----------
d: number or array-like
delay distance, in meters
deg: Boolean, optional
return electrical length in deg?
Default is False (returns electrical length in radians)
Returns
-------
theta: number or array-like
complex electrical length in radians or degrees, depending on
value of deg.
"""
gamma = self.gamma
if deg == False:
return gamma*d
elif deg == True:
return mf.radian_2_degree(gamma*d)
## Network creation
# lumped elements
def match(self, nports: int = 1, z0: Union[NumberLike, None] = None,
z0_norm: bool = False, **kwargs) -> Network:
r"""
Perfect matched load (:math:`\Gamma_0 = 0`).
Parameters
----------
nports : int
number of ports
z0 : number, or array-like or None
port impedance. Default is
None, in which case the Media's :attr:`z0` is used.
This sets the resultant Network's
:attr:`~skrf.network.Network.z0`.
z0_norm : bool
is z0 normalized to this media's `z0`?
\*\*kwargs : key word arguments
passed to :class:`~skrf.network.Network` initializer
Returns
-------
match : :class:`~skrf.network.Network` object
a n-port match
Examples
--------
>>> my_match = my_media.match(2,z0 = 50, name='Super Awesome Match')
"""
result = Network(**kwargs)
result.frequency = self.frequency
result.s = npy.zeros((self.frequency.npoints, nports, nports),\
dtype=complex)
if z0 is None:
z0 = self.z0
elif isinstance(z0, str):
z0 = parse_z0(z0)*self.z0
if z0_norm:
z0 = z0*self.z0
result.z0 = z0
return result
def load(self, Gamma0: NumberLike, nports: int = 1, **kwargs) -> Network:
r"""
Load of given reflection coefficient.
Parameters
----------
Gamma0 : number, array-like
Reflection coefficient of load (linear, not in db). If its
an array it must be of shape: `kxnxn`, where k is number of frequency
points in media, and n is `nports`
nports : int
number of ports
\*\*kwargs : key word arguments
passed to :func:`match`, which is called initially to create a
'blank' network.
Returns
-------
load : :class:`~skrf.network.Network` object
n-port load, where S = Gamma0*eye(...)
See Also
--------
match
open
short
"""
result = self.match(nports, **kwargs)
result.s = npy.array(Gamma0).reshape(-1, 1, 1) * \
npy.eye(nports, dtype=complex).reshape((-1, nports, nports)).\
repeat(self.frequency.npoints, 0)
#except(ValueError):
# for f in range(self.frequency.npoints):
# result.s[f,:,:] = Gamma0[f]*npy.eye(nports, dtype=complex)
return result
def short(self, nports: int = 1, **kwargs) -> Network:
r"""
Short (:math:`\Gamma_0 = -1`)
Parameters
----------
nports : int
number of ports
\*\*kwargs : key word arguments
passed to :func:`match`, which is called initially to create a
'blank' network.
Returns
-------
match : :class:`~skrf.network.Network` object
a n-port short circuit
See Also
--------
match
open
load
"""
return self.load(-1., nports, **kwargs)
def open(self, nports: int = 1, **kwargs) -> Network:
r"""
Open (:math:`\Gamma_0 = 1`).
Parameters
----------
nports : int
number of ports
\*\*kwargs : key word arguments
passed to :func:`match`, which is called initially to create a
'blank' network.
Returns
-------
match : :class:`~skrf.network.Network` object
a n-port open circuit
See Also
--------
match
load
short
"""
return self.load(1., nports, **kwargs)
def resistor(self, R: NumberLike, *args, **kwargs) -> Network:
r"""
Resistor.
Parameters
----------
R : number, array
Resistance , in Ohms. If this is an array, must be of
same length as frequency vector.
\*args, \*\*kwargs : arguments, key word arguments
passed to :func:`match`, which is called initially to create a
'blank' network.
Returns
-------
resistor : a 2-port :class:`~skrf.network.Network`
See Also
--------
match
short
open
load
capacitor
inductor
"""
result = self.match(nports=2, *args, **kwargs)
y= npy.zeros(shape=result.s.shape, dtype=complex)
y[:,0,0] = 1./R
y[:,1,1] = 1./R
y[:,0,1] = -1./R
y[:,1,0] = -1./R
result.y = y
return result
def capacitor(self, C: NumberLike, **kwargs) -> Network:
r"""
Capacitor.
Parameters
----------
C : number, array
Capacitance, in Farads. If this is an array, must be of
same length as frequency vector.
\*\*kwargs : key word arguments
passed to :func:`match`, which is called initially to create a
'blank' network.
Returns
-------
capacitor : a 2-port :class:`~skrf.network.Network`
See Also
--------
match
short
open
load
resistor
inductor
"""
result = self.match(nports=2, **kwargs)
w = self.frequency.w
y= npy.zeros(shape=result.s.shape, dtype=complex)
y[:,0,0] = 1j*w*C
y[:,1,1] = 1j*w*C
y[:,0,1] = -1j*w*C
y[:,1,0] = -1j*w*C
result.y = y
return result
def inductor(self, L: NumberLike, **kwargs) -> Network:
r"""
Inductor.
Parameters
----------
L : number, array
Inductance, in Henrys. If this is an array, must be of
same length as frequency vector.
\*\*kwargs : key word arguments
passed to :func:`match`, which is called initially to create a
'blank' network.
Returns
-------
inductor : a 2-port :class:`~skrf.network.Network`
See Also
--------
match
short
open
load
capacitor
resistor
"""
result = self.match(nports=2, **kwargs)
w = self.frequency.w
y = npy.zeros(shape=result.s.shape, dtype=complex)
y[:,0,0] = 1./(1j*w*L)
y[:,1,1] = 1./(1j*w*L)
y[:,0,1] = -1./(1j*w*L)
y[:,1,0] = -1./(1j*w*L)
result.y = y
return result
def impedance_mismatch(self, z1: NumberLike, z2: NumberLike, **kwargs) -> Network:
r"""
Two-port network for an impedance mismatch.
Parameters
----------
z1 : number, or array-like
complex impedance of port 1
z2 : number, or array-like
complex impedance of port 2
\*\*kwargs : key word arguments
passed to :func:`match`, which is called initially to create a
'blank' network.
Returns
-------
missmatch : :class:`~skrf.network.Network` object
a 2-port network representing the impedance mismatch
Notes
-----
If z1 and z2 are arrays, they must be of same length
as the :attr:`Media.frequency.npoints`
See Also
--------
match
short
open
load
capacitor
inductor
resistor
"""
result = self.match(nports=2, **kwargs)
gamma = tf.zl_2_Gamma0(z1,z2)
result.s[:,0,0] = gamma
result.s[:,1,1] = -gamma
result.s[:,1,0] = (1+gamma)*npy.sqrt(1.0*z1/z2)
result.s[:,0,1] = (1-gamma)*npy.sqrt(1.0*z2/z1)
return result
# splitter/couplers
def tee(self, **kwargs) -> Network:
r"""
Ideal, lossless tee. (3-port splitter).
Parameters
----------
\*\*kwargs : key word arguments
passed to :func:`match`, which is called initially to create a
'blank' network.
Returns
-------
tee : :class:`~skrf.network.Network` object
a 3-port splitter
See Also
----------
splitter : this just calls splitter(3)
match : called to create a 'blank' network
"""
return self.splitter(3,**kwargs)
def splitter(self, nports,**kwargs) -> Network:
r"""
Ideal, lossless n-way splitter.
Parameters
----------
nports : int
number of ports
\*\*kwargs : key word arguments
passed to :func:`match`, which is called initially to create a
'blank' network.
Returns
-------
tee : :class:`~skrf.network.Network` object
a n-port splitter
See Also
--------
match : called to create a 'blank' network
"""
n=nports
result = self.match(n, **kwargs)
for f in range(self.frequency.npoints):
result.s[f,:,:] = (2*1./n-1)*npy.eye(n) + \
npy.sqrt((1-((2.-n)/n)**2)/(n-1))*\
(npy.ones((n,n))-npy.eye(n))
return result
# transmission line
def to_meters(self, d: NumberLike, unit: str = 'deg') -> NumberLike:
"""
Translate various units of distance into meters.
This is a method of media to allow for electrical lengths as
inputs. For dispersive media, mean group velocity is used to
translate time-based units to distance.
Parameters
----------
d : number or array-like
the value
unit : str
the unit to that x is in:
['deg','rad','m','cm','um','in','mil','s','us','ns','ps']
Returns
-------
d_m : number, array-like
d in meters
See Also
--------
skrf.constants.to_meters
"""
unit = unit.lower()
#import pdb;pdb.set_trace()
d_dict ={'deg':self.theta_2_d(d,deg=True),
'rad':self.theta_2_d(d,deg=False),
}
if unit in d_dict:
return d_dict[unit]
else:
# mean group velocity is used to translate time-based
# units to distance
if 's' in unit:
# they are specifying a time unit so calculate
# the group velocity. (note this fails for media of
# too little points, as it uses gradient)
v_g = -self.v_g.imag.mean()
else:
v_g = c
return to_meters(d=d,unit=unit, v_g=v_g)
def thru(self, **kwargs) -> Network:
r"""
Matched transmission line of length 0.
Parameters
----------
\*\*kwargs : key word arguments
passed to :func:`match`, which is called initially to create a
'blank' network.
Returns
-------
thru : :class:`~skrf.network.Network` object
matched transmission line of 0 length
See Also
--------
line : this just calls line(0)
open, short, match
"""
return self.line(0, **kwargs)
def line(self, d: NumberLike, unit: str = 'deg',
z0: Union[NumberLike, str, None] = None, embed: bool = False, **kwargs) -> Network:
r"""
Transmission line of a given length and impedance.
The units of `length` are interpreted according to the value
of `unit`. If `z0` is not None, then a line specified impedance
is produced. if `embed` is also True, then the line is embedded
in this media's z0 environment, creating a mismatched line.
Parameters
----------
d : number
the length of transmission line (see unit argument)
unit : ['deg','rad','m','cm','um','in','mil','s','us','ns','ps']
the units of d. See :func:`to_meters`, for details
z0 : number, string, or array-like or None
the characteristic impedance of the line, if different
from self.z0. To set z0 in terms of normalized impedance,
pass a string, like `z0='1+.2j'`
embed : bool
if `Z0` is given, should the line be embedded in z0
environment? or left in a `z` environment. if embedded,
there will be reflections
\*\*kwargs : key word arguments
passed to :func:`match`, which is called initially to create a
'blank' network.
Returns
-------
line : :class:`~skrf.network.Network` object
matched transmission line of given length
Examples
--------
>>> my_media.line(1, 'mm', z0=100)
>>> my_media.line(90, 'deg', z0='2') # set z0 as normalized impedance
"""
if isinstance(z0,str):
z0 = parse_z0(z0)* self.z0
kwargs.update({'z0':z0})
result = self.match(nports=2,**kwargs)
theta = self.electrical_length(self.to_meters(d=d, unit=unit))
s11 = npy.zeros(self.frequency.npoints, dtype=complex)
s21 = npy.exp(-1*theta)
result.s = \
npy.array([[s11, s21],[s21,s11]]).transpose().reshape(-1,2,2)
if embed:
result = self.thru()**result**self.thru()
return result
def delay_load(self, Gamma0: NumberLike, d: Number, unit: str = 'deg', **kwargs) -> Network:
r"""
Delayed load.
A load with reflection coefficient `Gamma0` at the end of a
matched line of length `d`.
Parameters
----------
Gamma0 : number, array-like
reflection coefficient of load (not in dB)
d : number
the length of transmission line (see unit argument)
unit : ['deg','rad','m','cm','um','in','mil','s','us','ns','ps']
the units of d. See :func:`to_meters`, for details
\*\*kwargs : key word arguments
passed to :func:`match`, which is called initially to create a
'blank' network.
Returns
-------
delay_load : :class:`~skrf.network.Network` object
a delayed load
Examples
----------
>>> my_media.delay_load(-.5, 90, 'deg', Z0=50)
Note
----
This calls ::
line(d, unit, **kwargs) ** load(Gamma0, **kwargs)
See Also
--------
line : creates the network for line
load : creates the network for the load
delay_short
delay_open
"""
return self.line(d=d, unit=unit,**kwargs)**\
self.load(Gamma0=Gamma0,**kwargs)
def delay_short(self, d: Number, unit: str = 'deg', **kwargs) -> Network:
r"""
Delayed Short.
A transmission line of given length terminated with a short.
Parameters
----------
d : number
the length of transmission line (see unit argument)
unit : ['deg','rad','m','cm','um','in','mil','s','us','ns','ps']
the units of d. See :func:`to_meters`, for details
\*\*kwargs : key word arguments
passed to :func:`match`, which is called initially to create a
'blank' network.
Returns
-------
delay_short : :class:`~skrf.network.Network` object
a delayed short
See Also
--------
delay_load
delay_open
"""
return self.delay_load(Gamma0=-1., d=d, unit=unit, **kwargs)
def delay_open(self, d: Number, unit: str = 'deg', **kwargs) -> Network:
r"""
Delayed open transmission line.
Parameters
----------
d : number
the length of transmission line (see unit argument)
unit : ['deg','rad','m','cm','um','in','mil','s','us','ns','ps']
the units of d. See :func:`to_meters`, for details
\*\*kwargs : key word arguments
passed to :func:`match`, which is called initially to create a
'blank' network.
Returns
-------
delay_open : :class:`~skrf.network.Network` object
a delayed open
See Also
--------
delay_load
delay_short
"""
return self.delay_load(Gamma0=1., d=d, unit=unit,**kwargs)
def shunt(self, ntwk: Network, **kwargs) -> Network:
r"""
Shunts a :class:`~skrf.network.Network`.
This creates a :func:`tee` and connects
`ntwk` to port 1, and returns the result.
Parameters
----------
ntwk : :class:`~skrf.network.Network` object
\*\*kwargs : keyword arguments
passed to :func:`tee`
Returns
-------
shunted_ntwk : :class:`~skrf.network.Network` object
a shunted a ntwk. The resultant shunted_ntwk will have
(2 + ntwk.number_of_ports -1) ports.
See Also
--------
shunt_delay_load
shunt_delay_open
shunt_delay_short
shunt_capacitor
shunt_inductor
"""
return connect(self.tee(**kwargs),1,ntwk,0)
def shunt_delay_load(self, *args, **kwargs) -> Network:
r"""
Shunted delayed load.
Parameters
----------
\*args,\*\*kwargs : arguments, keyword arguments
passed to func:`delay_load`
Returns
--------
shunt_delay_load : :class:`~skrf.network.Network` object
a shunted delayed load (2-port)
Notes
-----
This calls::
shunt(delay_load(*args, **kwargs))
See Also
--------
shunt
shunt_delay_open
shunt_delay_short
shunt_capacitor
shunt_inductor
"""
return self.shunt(self.delay_load(*args, **kwargs))
def shunt_delay_open(self,*args,**kwargs) -> Network:
r"""
Shunted delayed open.
Parameters
----------
\*args,\*\*kwargs : arguments, keyword arguments
passed to func:`delay_open`
Returns
-------
shunt_delay_open : :class:`~skrf.network.Network` object
shunted delayed open (2-port)
Notes
-----
This calls::
shunt(delay_open(*args, **kwargs))
See Also
--------
shunt
shunt_delay_load
shunt_delay_short
shunt_capacitor
shunt_inductor
"""
return self.shunt(self.delay_open(*args, **kwargs))
def shunt_delay_short(self, *args, **kwargs) -> Network:
r"""
Shunted delayed short.
Parameters
----------
\*args,\*\*kwargs : arguments, keyword arguments
passed to func:`delay_open`
Returns
-------
shunt_delay_load : :class:`~skrf.network.Network` object
shunted delayed open (2-port)
Notes
-----
This calls::
shunt(delay_short(*args, **kwargs))
See Also
--------
shunt
shunt_delay_load
shunt_delay_open
shunt_capacitor
shunt_inductor
"""
return self.shunt(self.delay_short(*args, **kwargs))
def shunt_capacitor(self, C: NumberLike, *args, **kwargs) -> Network:
r"""
Shunted capacitor.
Parameters
----------
C : number, array-like
Capacitance in Farads.
\*args,\*\*kwargs : arguments, keyword arguments
passed to func:`delay_open`
Returns
-------
shunt_capacitor : :class:`~skrf.network.Network` object
shunted capacitor (2-port)
Notes
-----
This calls::
shunt(capacitor(C,*args, **kwargs))
See Also
--------
shunt
shunt_delay_load
shunt_delay_open
shunt_delay_short
shunt_inductor
"""
return self.shunt(self.capacitor(C=C,*args,**kwargs)**self.short())
def shunt_inductor(self, L: NumberLike, *args, **kwargs) -> Network:
r"""
Shunted inductor.
Parameters
----------
L : number, array-like
Inductance in Farads.
\*args,\*\*kwargs : arguments, keyword arguments
passed to func:`delay_open`
Returns
-------
shunt_inductor : :class:`~skrf.network.Network` object
shunted inductor(2-port)
Notes
-----
This calls::
shunt(inductor(C,*args, **kwargs))
See Also
--------
shunt
shunt_delay_load
shunt_delay_open
shunt_delay_short
shunt_capacitor
"""
return self.shunt(self.inductor(L=L,*args,**kwargs)**self.short())
def attenuator(self, s21: NumberLike, db: bool = True, d: Number = 0,
unit: str = 'deg', name: str = '', **kwargs) -> Network:
"""
Ideal matched attenuator of a given length.
Parameters
----------
s21 : number, array-like
the attenuation
db : bool, optional
is s21 in dB? otherwise assumes linear. Default is True (dB).
d : number, optional
length of attenuator. Default is 0.
unit : ['deg','rad','m','cm','um','in','mil','s','us','ns','ps']
the units of d. See :func:`to_meters`, for details.
Default is 'deg'
Returns
-------
ntwk : :class:`~skrf.network.Network` object
2-port attenuator
"""
if db:
s21 = mf.db_2_magnitude(s21)
result = self.match(nports=2)
result.s[:,0,1] = s21
result.s[:,1,0] = s21
result = result**self.line(d=d, unit = unit, **kwargs)
result.name = name
return result
def lossless_mismatch(self, s11: NumberLike, db: bool = True, **kwargs) -> Network:
"""
Lossless, symmetric mismatch defined by its return loss.
Parameters
----------
s11 : complex number, number, or array-like
the reflection coefficient. if db==True, then phase is ignored
db : bool, optional
is s11 in db? otherwise assumes linear. Default is True (dB)
Returns
-------
ntwk : :class:`~skrf.network.Network` object
2-port lossless mismatch
"""
result = self.match(nports=2,**kwargs)
if db:
s11 = mf.db_2_magnitude(s11)
result.s[:,0,0] = s11
result.s[:,1,1] = s11
s21_mag = npy.sqrt(1- npy.abs(s11)**2)
s21_phase = (npy.angle(s11) \
+ npy.pi/2 *(npy.angle(s11)<=0) \
- npy.pi/2 *(npy.angle(s11)>0))
result.s[:,0,1] = s21_mag* npy.exp(1j*s21_phase)
result.s[:,1,0] = result.s[:,0,1]
return result
def isolator(self, source_port: int = 0, **kwargs) -> Network:
"""
Two-port isolator.
Parameters
-------------
source_port: int in [0,1], optional
port at which power can flow from. Default is 0.
Returns
-------
ntwk : :class:`~skrf.network.Network` object
2-port isolator
"""
result = self.thru(**kwargs)
if source_port==0:
result.s[:,0,1]=0
elif source_port==1:
result.s[:,1,0]=0
return result
## Noisy Networks
def white_gaussian_polar(self, phase_dev: Number, mag_dev: Number,
n_ports: int = 1, **kwargs) -> Network:
r"""
Complex zero-mean gaussian white-noise network.
Creates a network whose s-matrix is complex zero-mean gaussian
white-noise, of given standard deviations for phase and
magnitude components.
This 'noise' network can be added to networks to simulate
additive noise.
Parameters
----------
phase_mag : number
standard deviation of magnitude
phase_dev : number
standard deviation of phase
n_ports : int
number of ports.
\*\*kwargs : passed to :class:`~skrf.network.Network`
initializer
Returns
--------
result : :class:`~skrf.network.Network` object
a noise network
"""
shape = (self.frequency.npoints, n_ports,n_ports)
phase_rv= stats.norm(loc=0, scale=phase_dev).rvs(size = shape)
mag_rv = stats.norm(loc=0, scale=mag_dev).rvs(size = shape)
result = Network(**kwargs)
result.frequency = self.frequency
result.s = mag_rv*npy.exp(1j*phase_rv)
return result
def random(self, n_ports: int = 1, reciprocal: bool = False, matched: bool = False,
symmetric: bool = False, **kwargs) -> Network:
r"""
Complex random network.
Creates a n-port network whose s-matrix is filled with random
complex numbers. Optionally, result can be matched or reciprocal.
Parameters
----------
n_ports : int
number of ports.
reciprocal : bool
makes s-matrix symmetric ($S_{mn} = S_{nm}$)
symmetric : bool
makes s-matrix diagonal have single value ($S_{mm}=S_{nn}$)
matched : bool
makes diagonals of s-matrix zero
\*\*kwargs : passed to :class:`~skrf.network.Network`
initializer
Returns
-------
result : :class:`~skrf.network.Network` object
the network
"""
result = self.match(nports = n_ports, **kwargs)
result.s = mf.rand_c(self.frequency.npoints, n_ports,n_ports)
if reciprocal and n_ports>1:
for m in range(n_ports):
for n in range(n_ports):
if m>n:
result.s[:,m,n] = result.s[:,n,m]
if symmetric:
for m in range(n_ports):
for n in range(n_ports):
if m==n:
result.s[:,m,n] = result.s[:,0,0]
if matched:
for m in range(n_ports):
for n in range(n_ports):
if m==n:
result.s[:,m,n] = 0
return result
## OTHER METHODS
def extract_distance(self, ntwk: Network) -> NumberLike:
"""
Determines physical distance from a transmission or reflection Network.
Given a matched transmission or reflection measurement the
physical distance is estimated at each frequency point based on
the scattering parameter phase of the ntwk and propagation constant.
Note
----
If the Network is a reflect measurement, the returned distance will
be twice the physical distance.
Parameters
----------
ntwk : `Network`
A one-port network of either the reflection or the transmission.
Returns
-------
d : number or array_like
physical distance
Examples
--------
>>> air = rf.air50
>>> l = air.line(1, 'cm')
>>> d_found = air.extract_distance(l.s21)
>>> d_found
"""
if ntwk.nports ==1:
dphi = gradient(ntwk.s_rad_unwrap.flatten())
dgamma = gradient(self.gamma.imag)
return -dphi/dgamma
else:
raise ValueError('ntwk must be one-port. Select s21 or s12 for a two-port.')
def plot(self, *args, **kw):
return self.frequency.plot(*args, **kw)
def write_csv(self, filename: str = 'f,gamma,Z0,z0.csv'):
"""
write this media's frequency, gamma, Z0, and z0 to a csv file.
Parameters
----------
filename : string, optional
file name to write out data to.
Default is 'f,gamma,Z0,z0.csv', so you probably want to specify it.
See Also
--------
from_csv : class method to initialize Media object from a
csv file written from this function
"""
header = 'f[%s], Re(Z0), Im(Z0), Re(gamma), Im(gamma), Re(port Z0), Im(port Z0)\n'%self.frequency.unit
g,z,pz = self.gamma, \
self.Z0, self.z0
data = npy.vstack(\
[self.frequency.f_scaled, z.real, z.imag, \
g.real, g.imag, pz.real, pz.imag]).T
npy.savetxt(filename,data,delimiter=',',header=header)
class DefinedGammaZ0(Media):
"""
A media directly defined by its propagation constant and characteristic impedance.
Parameters
----------
frequency : :class:`~skrf.frequency.Frequency` object or None
frequency band of this transmission line medium.
Default is None, which produces a 1-10ghz band with 101 points.
z0 : number, array-like, or None
The port impedance for media. Only needed if its different
from the characteristic impedance of the transmission
line. if `z0` is `None` then it will default to `Z0`
gamma : number or array-like, optional
complex propagation constant. `gamma` must adhere to
the following convention:
* positive real(gamma) = attenuation
* positive imag(gamma) = forward propagation
Default is 1j (lossless).
Z0 : number or array-like, optional.
complex characteristic impedance of the media.
Default is 50 ohm.
"""
def __init__(self, frequency: Union[Frequency, None] = None,
z0: Union[NumberLike, None] = None, Z0: NumberLike = 50,
gamma: NumberLike = 1j):
super(DefinedGammaZ0, self).__init__(frequency=frequency,
z0=z0)
self.gamma= gamma
self.Z0 = Z0
@classmethod
def from_csv(cls, filename: str, *args, **kwargs) -> Media:
"""
Create a Media from numerical values stored in a csv file.
The csv file format must be written by the function :func:`write_csv`,
or similar method which produces the following format::
f[$unit], Re(Z0), Im(Z0), Re(gamma), Im(gamma), Re(port Z0), Im(port Z0)
1, 1, 1, 1, 1, 1, 1
2, 1, 1, 1, 1, 1, 1
.....
See Also
--------
write_csv
"""
try:
f = open(filename)
except(TypeError):
# they may have passed a file
f = filename
header = f.readline()
# this is not the correct way to do this ... but whatever
f_unit = header.split(',')[0].split('[')[1].split(']')[0]
f,z_re,z_im,g_re,g_im,pz_re,pz_im = \
npy.loadtxt(f, delimiter=',').T
return cls(
frequency = Frequency.from_f(f, unit=f_unit),
Z0 = z_re+1j*z_im,
gamma = g_re+1j*g_im,
z0 = pz_re+1j*pz_im,
*args, **kwargs
)
@property
def npoints(self):
return self.frequency.npoints
@npoints.setter
def npoints(self,val):
# this is done to trigger checks on vector lengths for
# gamma/Z0/z0
new_freq= self.frequency.copy()
new_freq.npoints = val
self.frequency = new_freq
@property
def frequency(self):
return self._frequency
@frequency.setter
def frequency(self, val):
if hasattr(self, '_frequency') and self._frequency is not None:
# they are updating the frequency, we may have to do something
attrs_to_test = [self._gamma, self._Z0, self._z0]
if any([has_len(k) for k in attrs_to_test]):
raise NotImplementedError('updating a Media frequency, with non-constant gamma/Z0/z0 is not worked out yet')
self._frequency = val
@property
def Z0(self):
"""
Characteristic Impedance of the media.
"""
return self._Z0*ones(len(self))
@Z0.setter
def Z0(self, val):
self._Z0 = val
@property
def gamma(self):
"""
Propagation constant.
Returns
---------
gamma : :class:`numpy.ndarray`
complex propagation constant for this media
Notes
------
`gamma` must adhere to the following convention:
* positive real(gamma) = attenuation
* positive imag(gamma) = forward propagation
"""
return self._gamma*ones(len(self))
@gamma.setter
def gamma(self, val):
self._gamma = val
def has_len(x: NumberLike) -> bool:
"""
Test of x has any length (ie is a vector).
This is slightly non-trivial because [3] has len() but is
doesn't really have any length.
"""
try:
return (len(array(x))>1)
except TypeError:
return False
def parse_z0(s: str) -> NumberLike:
"""
Parse a z0 string.
Parameters
----------
s : str
z0 string, like '50+10j'
Returns
-------
z0 : npy.ndarray
Raises
------
ValueError
If could not arse the z0 string.
"""
# they passed a string for z0, try to parse it
re_numbers = re.compile(r'\d+')
numbers = re.findall(re_numbers, s)
if len(numbers)==2:
out = float(numbers[0]) +1j*float(numbers[1])
elif len(numbers)==1:
out = float(numbers[0])
else:
raise ValueError('couldnt parse z0 string')
return out
|
temmeand/scikit-rf
|
skrf/media/media.py
|
Python
|
bsd-3-clause
| 42,952
|
[
"Gaussian"
] |
7426433041e8cd53d47966c274ec69f2eba74aab04adb662b913d700a94a8596
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2015 by Gaik Tamazian
# gaik (dot) tamazian (at) gmail (dot) com
import logging
import subprocess
from itertools import chain
logging.basicConfig()
logger = logging.getLogger(__name__)
class MakeBlastDb(object):
"""
The class implements a wrapper to launch makeblastdb from the
NCBI BLAST+ package.
"""
def __init__(self, fasta, out_name=None):
"""
Create a BLAST database from the specified FASTA file.
:param fasta: a name of a FASTA file of sequences to create a
BLAST database from
:param out_name: the output BLAST database name
:type fasta: str
:type out_name: str
"""
self.__fasta = fasta
self.__out_name = out_name
def launch(self):
"""
Launch makeblastn with the specified parameters.
"""
options = ['makeblastdb', '-in', self.__fasta, '-dbtype',
'nucl']
if self.__out_name is not None:
options += ['-out', self.__out_name]
subprocess.check_call(options)
class BlastN(object):
"""
The class implements a wrapper to launch blastn from the NCBI
BLAST+ package.
"""
def __init__(self, query, database, output):
"""
Create a BlastN object to align the specified query to the
specified database.
:param query: a name of a FASTA file of query sequences to be
aligned
:param database: a name of a BLAST database to align the query
sequences to
:type query: str
:type database: str
"""
self.__query = query
self.__database = database
self.__output = output
self.__parameters = {}
def get(self, parameter):
"""
Get a value of the specified parameter.
:param parameter: a parameter name
:type parameter: str
:return: the specified parameter value or None if it was not
specified
"""
return self.__parameters.setdefault(parameter)
def set(self, parameter, value):
"""
Set the value of a blastn option.
:param parameter: a parameter name
:param value: a parameter value
:type parameter: str
"""
self.__parameters[parameter] = value
def launch(self):
"""
Launch blastn with the specified parameters.
"""
options = ['blastn', '-query', self.__query, '-db',
self.__database, '-out', self.__output] + \
map(str, list(chain.from_iterable(
self.__parameters.iteritems())))
subprocess.check_call(options)
|
gtamazian/Chromosomer
|
chromosomer/wrapper/blast.py
|
Python
|
mit
| 2,727
|
[
"BLAST"
] |
a01e99cf06b7138e671fa731fe7a63e38f8c55382ed3e9b7b9cb2bcd2239a7e7
|
#!/usr/bin/env python
#TOR manager module developed by Marios Kourtesis <name.surname@gmail.com>
import commands
import socket
import time
from multiprocessing import Process
from framework.dependency_management.dependency_resolver import BaseComponent
from framework.lib.general import cprint
class TOR_manager(BaseComponent):
'''
This class is responsible for TOR management.
'''
COMPONENT_NAME = "tor_manager"
#here is done the initialization of arguments and connections
def __init__(self, args):
self.register_in_service_locator()
#If the args are empty it will filled with the default values
self.error_handler = self.get_component("error_handler")
if args[0] == '':
self.ip = "127.0.0.1"
else:
self.ip = args[0]
if args[1] == '':
self.port = 9050
else:
try:
self.port = int(args[1])
except ValueError:
self.error_handler.FrameworkAbort("Invalid TOR port")
if args[2] == '':
self.TOR_control_port = 9051
else:
try:
self.TOR_control_port = int(args[2])
except ValueError:
self.error_handler.FrameworkAbort("Invalid TOR Controlport")
if args[3] == '':
self.password = "owtf"
else:
self.password = args[3]
if args[4] == '':
self.time = 5
else:
try:
self.time = int(args[4])
except ValueError:
self.error_handler.FrameworkAbort("Invalid TOR Time")
if self.time < 1:
self.error_handler.FrameworkAbort("Invalid TOR Time")
self.TOR_Connection = self.Open_connection()
self.Authenticate()
#This function is handling the authentication process to TOR control connection.
def Authenticate(self):
self.TOR_Connection.send('AUTHENTICATE "{0}"\r\n'.format(self.password))
responce = self.TOR_Connection.recv(1024)
if responce.startswith('250'): #250 is the success responce
cprint("Successfully Authenticated to TOR control")
else:
self.error_handler.FrameworkAbort("Authentication Error : " + responce)
#Opens a new connection to TOR control
def Open_connection(self):
try:
s = socket.socket()
s.connect((self.ip, self.TOR_control_port))
cprint("Connected to TOR control")
return s
except Exception as error:
self.error_handler.FrameworkAbort("Can't connect to the TOR daemon : " + error.strerror)
#Starts a new TOR_control_process which will renew the IP address.
def Run(self):
tor_ctrl_proc = Process(target=TOR_control_process, args=(self,))
tor_ctrl_proc.start()
return tor_ctrl_proc
#checks if TOR is running
@staticmethod
def is_tor_running():
output = commands.getoutput("ps -A|grep -a \" tor$\"|wc -l")
if output == "1":
return True
elif output == "0":
return False
@staticmethod
def msg_start_tor(self):
cprint ("""Error : TOR daemon is not running
(Tips: service tor start)""")
#TOR configuration Info
@staticmethod
def msg_configure_tor():
cprint("""
1)Open torrc file usually located at '/etc/tor/torrc'
if you can't find torrc file visit https://www.torproject.org/docs/faq.html.en#torrc
2)Enable the TOR control port by uncommenting(removing the hash(#) symbol)
or adding the following line
should look like this "ControlPort 9051".
3)Generate a new hashed password by running the following command
"tor --hash-password 'your_password'
4)Uncomment "HashedControlPassword" and add the previously generated hash
should look like the following but with you hash
HashedControlPassword 16:52B319480CED2E0860BAEA7565ECCF628A59FEE59B6E0592CD3F01C710
Recommended Setting:
ControlPort 9051
HashedControlPassword 16:52B319480CED2E0860BAEA7565ECCF628A59FEE59B6E0592CD3F01C710
The above hashed password is 'owtf'
Advantages of recommended settings
You can run owtf TOR mode without specifying the options
ex. ./owtf.py -o OWTF-WVS-001 http:my.website.com --tor ::::
which is the same with 127.0.0.1:9050:9051:owtf:5
""")
#Sends an NEWNYM message to TOR control in order to renew the IP address
def renew_ip(self):
self.TOR_Connection.send("signal NEWNYM\r\n")
responce = self.TOR_Connection.recv(1024)
if responce.startswith('250'):
cprint("TOR : IP renewed")
return True
else:
cprint("[TOR]Warning: IP can't renewed")
return False
#This will run in a new process in order to renew the IP address after certain time.
def TOR_control_process(self):
while 1:
while self.renew_ip() == True:
time.sleep(self.time * 60) # time converted in minutes
else:
time.sleep(10) # will try again to renew IP in 10 seconds
|
DePierre/owtf
|
framework/http/proxy/tor_manager.py
|
Python
|
bsd-3-clause
| 5,312
|
[
"VisIt"
] |
382aa9c4633ed9c8a8c84dad06b70afc7faa9acd85b68001108d6ee2358cc223
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Example of "structured points" dataset
# vtkStructuredPoints is a child class of vtkImageData
import vtk
dx = 0.2
grid = vtk.vtkStructuredPoints()
#grid = vtk.vtkImageData()
grid.SetOrigin(0.1, 0.1, 0.1) # default values
grid.SetSpacing(dx, dx, dx)
grid.SetDimensions(5, 8, 10) # number of points in each direction
array = vtk.vtkDoubleArray()
array.SetNumberOfComponents(1) # this is 3 for a vector
array.SetNumberOfTuples(grid.GetNumberOfPoints())
for i in range(grid.GetNumberOfPoints()):
array.SetValue(i, i/2.0)
grid.GetPointData().AddArray(array)
array.SetName("my_data1")
# write structured points to disk...
writer = vtk.vtkStructuredPointsWriter()
writer.SetInputData(grid)
writer.SetFileName("points.vtk")
writer.Write()
writer = vtk.vtkXMLImageDataWriter()
writer.SetInputData(grid)
writer.SetFileName("points.vti")
writer.Write()
# display grid... (to be finished)
"""
mapper = vtk.vtkDataSetMapper()
mapper.SetInputData(grid)
#mapper.ScalarVisibilityOff()
actor = vtk.vtkActor()
actor.GetProperty().SetRepresentationToWireframe()
actor.GetProperty().SetColor(0, 0, 0)
actor.SetMapper(mapper)
ren = vtk.vtkRenderer()
ren.SetBackground(0.1, 0.2, 0.4)
ren.AddActor(actor)
window = vtk.vtkRenderWindow()
window.SetSize(800, 800)
window.AddRenderer(ren)
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetRenderWindow(window)
ren.ResetCamera()
window.Render()
interactor.Start()
"""
|
rboman/progs
|
classes/sph/sandbox/strpoints.py
|
Python
|
apache-2.0
| 1,465
|
[
"VTK"
] |
e6323577734caf2f01ba47f0ea5ad7910289734fe9e741056debbfc4a827c56a
|
import os
import scipy.stats
import numpy
import matplotlib.pylab as pl
import pandas as pd
# program path on this machine
#===================================================================
blastneuron_DIR = "/home/xiaoxiaol/work/src/blastneuron"
PRUNE_SHORT_BRANCH = blastneuron_DIR + "/bin/prune_short_branch"
PRE_PROCESSING = blastneuron_DIR + "/bin/pre_processing"
NEURON_RETRIEVE = blastneuron_DIR + "/bin/neuron_retrieve"
BATCH_COMPUTE = blastneuron_DIR + "/bin/batch_compute" # compute faetures
#V3D="/Users/xiaoxiaoliu/work/v3d/v3d_external/bin/vaa3d64.app/Contents/MacOS/vaa3d64"
V3D="/local1/xiaoxiaol/work/v3d/v3d_external/bin/vaa3d"
####### SETTING #####################################################
data_DIR= "/home/xiaoxiaol/work/data/lims2/nr_june_25_filter_aligned"
#RUN downloadSWC.py to grab data into local dir data_DIR+'/original'
LIST_CSV_FILE = data_DIR+'/list.csv'
######################################################################
original_data_linker_file = data_DIR+'/original/mylinker.ano' # will be genereated
preprocessed_data_linker_file = data_DIR+'/preprocessed/mylinker.ano'
feature_file = data_DIR + '/preprocessed/prep_features.nfb'
#===================================================================
def prune(inputswc_fn, outputswc_fn):
cmd = PRUNE_SHORT_BRANCH + " -i "+inputswc_fn + " -o "+outputswc_fn
os.system(cmd)
print cmd
return
def preprocessing(inputswc_fn, outputswc_fn):
cmd = PRE_PROCESSING+ " -i "+inputswc_fn + " -o "+outputswc_fn
os.system(cmd)
return
def neuronretrive(inputswc_fn, feature_fn, result_fn, retrieve_number, logfile):
cmd = NEURON_RETRIEVE + " -d " + feature_fn + " -q " +inputswc_fn + " -n "+ \
str(retrieve_number) +" -o "+result_fn+" -m 1,3" + " >" + logfile
print cmd
os.system(cmd)
return
def featurecomputing(input_linker_fn, feature_fn):
cmd = BATCH_COMPUTE + " -i "+input_linker_fn + " -o " + feature_fn
os.system(cmd)
print cmd
return
#def genLinkerFile(swcDir, linker_file):
# cmd = V3D + " -x linker_file_gen -f linker -i "+ swcDir +" -o "+ linker_file +" -p 1"
# print cmd
# os.system(cmd)
# return
def removeLinkerFilePath(inputLinkFile, outputLinkFile):
with open(outputLinkFile, 'w') as out_f:
with open (inputLinkFile,'r') as in_f:
for inline in in_f:
outline = 'SWCFILE=' + inline.split('/')[-1]
out_f.write(outline)
in_f.close()
out_f.close()
return
def genLinkerFileFromList(listCSVFile, linkFile):
df = pd.read_csv(listCSVFile, sep=',',header=0)
fns = df.orca_path
with open(linkFile, 'w') as f:
for i in range(len(fns)):
line = "SWCFILE="+fns[i]+'\n'
f.write(line)
f.close()
return
def pullListFromDB(outputFolder):
#outputListCSVFile = outputFolder +'/list.csv'
# copy data to local disk?
return
#==================================================================================================
def main():
#TODO: pullListFromDB() update from lims2 to grab all neuron reconstructions into list.csv
genLinkerFileFromList(LIST_CSV_FILE, original_data_linker_file)
if not os.path.exists(data_DIR+'/pruned'):
os.mkdir(data_DIR+'/pruned')
if not os.path.exists(data_DIR+'/preprocessed'):
os.mkdir(data_DIR+'/preprocessed')
with open(original_data_linker_file,'r') as f:
for line in f:
input_swc_path = (line.strip()).split('=')[1] #SWCFILE=*
swc_fn = input_swc_path.split('/')[-1]
pruned_swc_fn = data_DIR+'/pruned/'+ swc_fn
prune(input_swc_path, pruned_swc_fn)
preprocessed_swc_fn = data_DIR+'/preprocessed/'+ swc_fn
preprocessing(pruned_swc_fn, preprocessed_swc_fn)
removeLinkerFilePath(original_data_linker_file, preprocessed_data_linker_file)
##batch computing
featurecomputing(preprocessed_data_linker_file,feature_file)
if __name__ == "__main__":
main()
|
XiaoxiaoLiu/morphology_analysis
|
stats_analysis/process_lims_data.py
|
Python
|
gpl-3.0
| 4,053
|
[
"NEURON"
] |
419107a68dcfaa0efe7dee05662f8f3edd9fe1a3b75ed60b7fee9dcdf778bace
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements classes (called Fitters) which combine optimization
algorithms (typically from `scipy.optimize`) with statistic functions to perform
fitting. Fitters are implemented as callable classes. In addition to the data
to fit, the ``__call__`` method takes an instance of
`~astropy.modeling.core.FittableModel` as input, and returns a copy of the
model with its parameters determined by the optimizer.
Optimization algorithms, called "optimizers" are implemented in
`~astropy.modeling.optimizers` and statistic functions are in
`~astropy.modeling.statistic`. The goal is to provide an easy to extend
framework and allow users to easily create new fitters by combining statistics
with optimizers.
There are two exceptions to the above scheme.
`~astropy.modeling.fitting.LinearLSQFitter` uses Numpy's `~numpy.linalg.lstsq`
function. `~astropy.modeling.fitting.LevMarLSQFitter` uses
`~scipy.optimize.leastsq` which combines optimization and statistic in one
implementation.
"""
import abc
import inspect
import operator
import warnings
from functools import reduce, wraps
import numpy as np
from .utils import poly_map_domain, _combine_equivalency_dict
from ..units import Quantity
from ..utils.exceptions import AstropyUserWarning
from .optimizers import (SLSQP, Simplex)
from .statistic import (leastsquare)
# Check pkg_resources exists
try:
from pkg_resources import iter_entry_points
HAS_PKG = True
except ImportError:
HAS_PKG = False
__all__ = ['LinearLSQFitter', 'LevMarLSQFitter', 'FittingWithOutlierRemoval',
'SLSQPLSQFitter', 'SimplexLSQFitter', 'JointFitter', 'Fitter']
# Statistic functions implemented in `astropy.modeling.statistic.py
STATISTICS = [leastsquare]
# Optimizers implemented in `astropy.modeling.optimizers.py
OPTIMIZERS = [Simplex, SLSQP]
from .optimizers import (DEFAULT_MAXITER, DEFAULT_EPS, DEFAULT_ACC)
class ModelsError(Exception):
"""Base class for model exceptions"""
class ModelLinearityError(ModelsError):
""" Raised when a non-linear model is passed to a linear fitter."""
class UnsupportedConstraintError(ModelsError, ValueError):
"""
Raised when a fitter does not support a type of constraint.
"""
class _FitterMeta(abc.ABCMeta):
"""
Currently just provides a registry for all Fitter classes.
"""
registry = set()
def __new__(mcls, name, bases, members):
cls = super().__new__(mcls, name, bases, members)
if not inspect.isabstract(cls) and not name.startswith('_'):
mcls.registry.add(cls)
return cls
def fitter_unit_support(func):
"""
This is a decorator that can be used to add support for dealing with
quantities to any __call__ method on a fitter which may not support
quantities itself. This is done by temporarily removing units from all
parameters then adding them back once the fitting has completed.
"""
@wraps(func)
def wrapper(self, model, x, y, z=None, **kwargs):
equivalencies = kwargs.pop('equivalencies', None)
data_has_units = (isinstance(x, Quantity) or
isinstance(y, Quantity) or
isinstance(z, Quantity))
model_has_units = model._has_units
if data_has_units or model_has_units:
if model._supports_unit_fitting:
# We now combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(
model.inputs, equivalencies, model.input_units_equivalencies)
# If input_units is defined, we transform the input data into those
# expected by the model. We hard-code the input names 'x', and 'y'
# here since FittableModel instances have input names ('x',) or
# ('x', 'y')
if model.input_units is not None:
if isinstance(x, Quantity):
x = x.to(model.input_units['x'], equivalencies=input_units_equivalencies['x'])
if isinstance(y, Quantity) and z is not None:
y = y.to(model.input_units['y'], equivalencies=input_units_equivalencies['y'])
# We now strip away the units from the parameters, taking care to
# first convert any parameters to the units that correspond to the
# input units (to make sure that initial guesses on the parameters)
# are in the right unit system
model = model.without_units_for_data(x=x, y=y, z=z)
# We strip away the units from the input itself
add_back_units = False
if isinstance(x, Quantity):
add_back_units = True
xdata = x.value
else:
xdata = np.asarray(x)
if isinstance(y, Quantity):
add_back_units = True
ydata = y.value
else:
ydata = np.asarray(y)
if z is not None:
if isinstance(y, Quantity):
add_back_units = True
zdata = z.value
else:
zdata = np.asarray(z)
# We run the fitting
if z is None:
model_new = func(self, model, xdata, ydata, **kwargs)
else:
model_new = func(self, model, xdata, ydata, zdata, **kwargs)
# And finally we add back units to the parameters
if add_back_units:
model_new = model_new.with_units_from_data(x=x, y=y, z=z)
return model_new
else:
raise NotImplementedError("This model does not support being fit to data with units")
else:
return func(self, model, x, y, z=z, **kwargs)
return wrapper
class Fitter(metaclass=_FitterMeta):
"""
Base class for all fitters.
Parameters
----------
optimizer : callable
A callable implementing an optimization algorithm
statistic : callable
Statistic function
"""
def __init__(self, optimizer, statistic):
if optimizer is None:
raise ValueError("Expected an optimizer.")
if statistic is None:
raise ValueError("Expected a statistic function.")
if inspect.isclass(optimizer):
# a callable class
self._opt_method = optimizer()
elif inspect.isfunction(optimizer):
self._opt_method = optimizer
else:
raise ValueError("Expected optimizer to be a callable class or a function.")
if inspect.isclass(statistic):
self._stat_method = statistic()
else:
self._stat_method = statistic
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [other_args], [input coordinates]]
other_args may include weights or any other quantities specific for
a statistic
Notes
-----
The list of arguments (args) is set in the `__call__` method.
Fitters may overwrite this method, e.g. when statistic functions
require other arguments.
"""
model = args[0]
meas = args[-1]
_fitter_to_model_params(model, fps)
res = self._stat_method(meas, model, *args[1:-1])
return res
@abc.abstractmethod
def __call__(self):
"""
This method performs the actual fitting and modifies the parameter list
of a model.
Fitter subclasses should implement this method.
"""
raise NotImplementedError("Subclasses should implement this method.")
# TODO: I have ongoing branch elsewhere that's refactoring this module so that
# all the fitter classes in here are Fitter subclasses. In the meantime we
# need to specify that _FitterMeta is its metaclass.
class LinearLSQFitter(metaclass=_FitterMeta):
"""
A class performing a linear least square fitting.
Uses `numpy.linalg.lstsq` to do the fitting.
Given a model and data, fits the model to the data and changes the
model's parameters. Keeps a dictionary of auxiliary fitting information.
Notes
-----
Note that currently LinearLSQFitter does not support compound models.
"""
supported_constraints = ['fixed']
supports_masked_input = True
def __init__(self):
self.fit_info = {'residuals': None,
'rank': None,
'singular_values': None,
'params': None
}
@staticmethod
def _deriv_with_constraints(model, param_indices, x=None, y=None):
if y is None:
d = np.array(model.fit_deriv(x, *model.parameters))
else:
d = np.array(model.fit_deriv(x, y, *model.parameters))
if model.col_fit_deriv:
return d[param_indices]
else:
return d[..., param_indices]
def _map_domain_window(self, model, x, y=None):
"""
Maps domain into window for a polynomial model which has these
attributes.
"""
if y is None:
if hasattr(model, 'domain') and model.domain is None:
model.domain = [x.min(), x.max()]
if hasattr(model, 'window') and model.window is None:
model.window = [-1, 1]
return poly_map_domain(x, model.domain, model.window)
else:
if hasattr(model, 'x_domain') and model.x_domain is None:
model.x_domain = [x.min(), x.max()]
if hasattr(model, 'y_domain') and model.y_domain is None:
model.y_domain = [y.min(), y.max()]
if hasattr(model, 'x_window') and model.x_window is None:
model.x_window = [-1., 1.]
if hasattr(model, 'y_window') and model.y_window is None:
model.y_window = [-1., 1.]
xnew = poly_map_domain(x, model.x_domain, model.x_window)
ynew = poly_map_domain(y, model.y_domain, model.y_window)
return xnew, ynew
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, rcond=None):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
Input coordinates
y : array-like
Input coordinates
z : array-like (optional)
Input coordinates.
If the dependent (``y`` or ``z``) co-ordinate values are provided
as a `numpy.ma.MaskedArray`, any masked points are ignored when
fitting. Note that model set fitting is significantly slower when
there are masked points (not just an empty mask), as the matrix
equation has to be solved for each model separately when their
co-ordinate grids differ.
weights : array (optional)
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
rcond : float, optional
Cut-off ratio for small singular values of ``a``.
Singular values are set to zero if they are smaller than ``rcond``
times the largest singular value of ``a``.
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
if not model.fittable:
raise ValueError("Model must be a subclass of FittableModel")
if not model.linear:
raise ModelLinearityError('Model is not linear in parameters, '
'linear fit methods should not be used.')
if hasattr(model, "submodel_names"):
raise ValueError("Model must be simple, not compound")
_validate_constraints(self.supported_constraints, model)
model_copy = model.copy()
_, fitparam_indices = _model_to_fit_params(model_copy)
if model_copy.n_inputs == 2 and z is None:
raise ValueError("Expected x, y and z for a 2 dimensional model.")
farg = _convert_input(x, y, z, n_models=len(model_copy),
model_set_axis=model_copy.model_set_axis)
has_fixed = any(model_copy.fixed.values())
if has_fixed:
# The list of fixed params is the complement of those being fitted:
fixparam_indices = [idx for idx in
range(len(model_copy.param_names))
if idx not in fitparam_indices]
# Construct matrix of user-fixed parameters that can be dotted with
# the corresponding fit_deriv() terms, to evaluate corrections to
# the dependent variable in order to fit only the remaining terms:
fixparams = np.asarray([getattr(model_copy,
model_copy.param_names[idx]).value
for idx in fixparam_indices])
if len(farg) == 2:
x, y = farg
# map domain into window
if hasattr(model_copy, 'domain'):
x = self._map_domain_window(model_copy, x)
if has_fixed:
lhs = self._deriv_with_constraints(model_copy,
fitparam_indices,
x=x)
fixderivs = self._deriv_with_constraints(model_copy,
fixparam_indices,
x=x)
else:
lhs = model_copy.fit_deriv(x, *model_copy.parameters)
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x)
rhs = y
else:
x, y, z = farg
# map domain into window
if hasattr(model_copy, 'x_domain'):
x, y = self._map_domain_window(model_copy, x, y)
if has_fixed:
lhs = self._deriv_with_constraints(model_copy,
fitparam_indices, x=x, y=y)
fixderivs = self._deriv_with_constraints(model_copy,
fixparam_indices, x=x, y=y)
else:
lhs = model_copy.fit_deriv(x, y, *model_copy.parameters)
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x, y)
if len(model_copy) > 1:
if z.ndim > 2:
# Basically this code here is making the assumption that if
# z has 3 dimensions it represents multiple models where
# the value of z is one plane per model. It's then
# flattening each plane and transposing so that the model
# axis is *last*.
model_axis = model_copy.model_set_axis or 0
rhs = z.reshape((z.shape[model_axis], -1)).T
else:
rhs = z.T
else:
rhs = z.flatten()
# If the derivative is defined along rows (as with non-linear models)
if model_copy.col_fit_deriv:
lhs = np.asarray(lhs).T
# Some models (eg. Polynomial1D) don't flatten multi-dimensional inputs
# when constructing their Vandermonde matrix, which can lead to obscure
# failures below. Ultimately, np.linalg.lstsq can't handle >2D matrices,
# so just raise a slightly more informative error when this happens:
if lhs.ndim > 2:
raise ValueError('{0} gives unsupported >2D derivative matrix for '
'this x/y'.format(type(model_copy).__name__))
# Subtract any terms fixed by the user from (a copy of) the RHS, in
# order to fit the remaining terms correctly:
if has_fixed:
if model_copy.col_fit_deriv:
fixderivs = np.asarray(fixderivs).T # as for lhs above
rhs = rhs - fixderivs.dot(fixparams) # evaluate user-fixed terms
# Subtract any terms implicit in the model from the RHS, which, like
# user-fixed terms, affect the dependent variable but are not fitted:
if sum_of_implicit_terms is not None:
# If we have a model set, the extra axis must be added to
# sum_of_implicit_terms as its innermost dimension, to match the
# dimensionality of rhs after _convert_input "rolls" it as needed
# by np.linalg.lstsq. The vector then gets broadcast to the right
# number of sets (columns). This assumes all the models share the
# same input co-ordinates, as is currently the case.
if len(model_copy) > 1:
sum_of_implicit_terms = sum_of_implicit_terms[..., np.newaxis]
rhs = rhs - sum_of_implicit_terms
if weights is not None:
weights = np.asarray(weights, dtype=float)
if len(x) != len(weights):
raise ValueError("x and weights should have the same length")
if rhs.ndim == 2:
lhs *= weights[:, np.newaxis]
# Don't modify in-place in case rhs was the original dependent
# variable array
rhs = rhs * weights[:, np.newaxis]
else:
lhs *= weights[:, np.newaxis]
rhs = rhs * weights
if rcond is None:
rcond = len(x) * np.finfo(x.dtype).eps
scl = (lhs * lhs).sum(0)
lhs /= scl
masked = np.any(np.ma.getmask(rhs))
if len(model_copy) == 1 or not masked:
# If we're fitting one or more models over a common set of points,
# we only have to solve a single matrix equation, which is an order
# of magnitude faster than calling lstsq() once per model below:
good = ~rhs.mask if masked else slice(None) # latter is a no-op
# Solve for one or more models:
lacoef, resids, rank, sval = np.linalg.lstsq(lhs[good],
rhs[good], rcond)
else:
# Where fitting multiple models with masked pixels, initialize an
# empty array of coefficients and populate it one model at a time.
# The shape matches the number of coefficients from the Vandermonde
# matrix and the number of models from the RHS:
lacoef = np.zeros(lhs.shape[-1:] + rhs.shape[-1:], dtype=rhs.dtype)
# Loop over the models and solve for each one. By this point, the
# model set axis is the second of two. Transpose rather than using,
# say, np.moveaxis(array, -1, 0), since it's slightly faster and
# lstsq can't handle >2D arrays anyway. This could perhaps be
# optimized by collecting together models with identical masks
# (eg. those with no rejected points) into one operation, though it
# will still be relatively slow when calling lstsq repeatedly.
for model_rhs, model_lacoef in zip(rhs.T, lacoef.T):
# Cull masked points on both sides of the matrix equation:
good = ~model_rhs.mask
model_lhs = lhs[good]
model_rhs = model_rhs[good][..., np.newaxis]
# Solve for this model:
t_coef, resids, rank, sval = np.linalg.lstsq(model_lhs,
model_rhs, rcond)
model_lacoef[:] = t_coef.T
self.fit_info['residuals'] = resids
self.fit_info['rank'] = rank
self.fit_info['singular_values'] = sval
lacoef = (lacoef.T / scl).T
self.fit_info['params'] = lacoef
# TODO: Only Polynomial models currently have an _order attribute;
# maybe change this to read isinstance(model, PolynomialBase)
if hasattr(model_copy, '_order') and rank != model_copy._order:
warnings.warn("The fit may be poorly conditioned\n",
AstropyUserWarning)
_fitter_to_model_params(model_copy, lacoef.flatten())
return model_copy
class FittingWithOutlierRemoval:
"""
This class combines an outlier removal technique with a fitting procedure.
Basically, given a number of iterations ``niter``, outliers are removed
and fitting is performed for each iteration.
Parameters
----------
fitter : An Astropy fitter
An instance of any Astropy fitter, i.e., LinearLSQFitter,
LevMarLSQFitter, SLSQPLSQFitter, SimplexLSQFitter, JointFitter.
outlier_func : function
A function for outlier removal.
niter : int (optional)
Number of iterations.
outlier_kwargs : dict (optional)
Keyword arguments for outlier_func.
"""
def __init__(self, fitter, outlier_func, niter=3, **outlier_kwargs):
self.fitter = fitter
self.outlier_func = outlier_func
self.niter = niter
self.outlier_kwargs = outlier_kwargs
def __str__(self):
return ("Fitter: {0}\nOutlier function: {1}\nNum. of iterations: {2}" +
("\nOutlier func. args.: {3}"))\
.format(self.fitter__class__.__name__,
self.outlier_func.__name__, self.niter,
self.outlier_kwargs)
def __repr__(self):
return ("{0}(fitter: {1}, outlier_func: {2}," +
" niter: {3}, outlier_kwargs: {4})")\
.format(self.__class__.__name__,
self.fitter.__class__.__name__,
self.outlier_func.__name__, self.niter,
self.outlier_kwargs)
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Parameters
----------
model : `~astropy.modeling.FittableModel`
An analytic model which will be fit to the provided data.
This also contains the initial guess for an optimization
algorithm.
x : array-like
Input coordinates.
y : array-like
Data measurements (1D case) or input coordinates (2D case).
z : array-like (optional)
Data measurements (2D case).
weights : array-like (optional)
Weights to be passed to the fitter.
kwargs : dict (optional)
Keyword arguments to be passed to the fitter.
Returns
-------
filtered_data : numpy.ma.core.MaskedArray
Data used to perform the fitting after outlier removal.
fitted_model : `~astropy.modeling.FittableModel`
Fitted model after outlier removal.
"""
fitted_model = self.fitter(model, x, y, z, weights=weights, **kwargs)
filtered_weights = weights
if z is None:
filtered_data = y
for n in range(self.niter):
filtered_data = self.outlier_func(filtered_data - fitted_model(x),
**self.outlier_kwargs)
filtered_data += fitted_model(x)
if weights is not None:
filtered_weights = weights[~filtered_data.mask]
fitted_model = self.fitter(fitted_model,
x[~filtered_data.mask],
filtered_data.data[~filtered_data.mask],
weights=filtered_weights,
**kwargs)
else:
filtered_data = z
for n in range(self.niter):
filtered_data = self.outlier_func(filtered_data - fitted_model(x, y),
**self.outlier_kwargs)
filtered_data += fitted_model(x, y)
if weights is not None:
filtered_weights = weights[~filtered_data.mask]
fitted_model = self.fitter(fitted_model,
x[~filtered_data.mask],
y[~filtered_data.mask],
filtered_data.data[~filtered_data.mask],
weights=filtered_weights,
**kwargs)
return filtered_data, fitted_model
class LevMarLSQFitter(metaclass=_FitterMeta):
"""
Levenberg-Marquardt algorithm and least squares statistic.
Attributes
----------
fit_info : dict
The `scipy.optimize.leastsq` result for the most recent fit (see
notes).
Notes
-----
The ``fit_info`` dictionary contains the values returned by
`scipy.optimize.leastsq` for the most recent fit, including the values from
the ``infodict`` dictionary it returns. See the `scipy.optimize.leastsq`
documentation for details on the meaning of these values. Note that the
``x`` return value is *not* included (as it is instead the parameter values
of the returned model).
Additionally, one additional element of ``fit_info`` is computed whenever a
model is fit, with the key 'param_cov'. The corresponding value is the
covariance matrix of the parameters as a 2D numpy array. The order of the
matrix elements matches the order of the parameters in the fitted model
(i.e., the same order as ``model.param_names``).
"""
supported_constraints = ['fixed', 'tied', 'bounds']
"""
The constraint types supported by this fitter type.
"""
def __init__(self):
self.fit_info = {'nfev': None,
'fvec': None,
'fjac': None,
'ipvt': None,
'qtf': None,
'message': None,
'ierr': None,
'param_jac': None,
'param_cov': None}
super().__init__()
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [weights], [input coordinates]]
"""
model = args[0]
weights = args[1]
_fitter_to_model_params(model, fps)
meas = args[-1]
if weights is None:
return np.ravel(model(*args[2: -1]) - meas)
else:
return np.ravel(weights * (model(*args[2: -1]) - meas))
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None,
maxiter=DEFAULT_MAXITER, acc=DEFAULT_ACC,
epsilon=DEFAULT_EPS, estimate_jacobian=False):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array (optional)
input coordinates
weights : array (optional)
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
maxiter : int
maximum number of iterations
acc : float
Relative error desired in the approximate solution
epsilon : float
A suitable step length for the forward-difference
approximation of the Jacobian (if model.fjac=None). If
epsfcn is less than the machine precision, it is
assumed that the relative errors in the functions are
of the order of the machine precision.
estimate_jacobian : bool
If False (default) and if the model has a fit_deriv method,
it will be used. Otherwise the Jacobian will be estimated.
If True, the Jacobian will be estimated in any case.
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
from scipy import optimize
model_copy = _validate_model(model, self.supported_constraints)
farg = (model_copy, weights, ) + _convert_input(x, y, z)
if model_copy.fit_deriv is None or estimate_jacobian:
dfunc = None
else:
dfunc = self._wrap_deriv
init_values, _ = _model_to_fit_params(model_copy)
fitparams, cov_x, dinfo, mess, ierr = optimize.leastsq(
self.objective_function, init_values, args=farg, Dfun=dfunc,
col_deriv=model_copy.col_fit_deriv, maxfev=maxiter, epsfcn=epsilon,
xtol=acc, full_output=True)
_fitter_to_model_params(model_copy, fitparams)
self.fit_info.update(dinfo)
self.fit_info['cov_x'] = cov_x
self.fit_info['message'] = mess
self.fit_info['ierr'] = ierr
if ierr not in [1, 2, 3, 4]:
warnings.warn("The fit may be unsuccessful; check "
"fit_info['message'] for more information.",
AstropyUserWarning)
# now try to compute the true covariance matrix
if (len(y) > len(init_values)) and cov_x is not None:
sum_sqrs = np.sum(self.objective_function(fitparams, *farg)**2)
dof = len(y) - len(init_values)
self.fit_info['param_cov'] = cov_x * sum_sqrs / dof
else:
self.fit_info['param_cov'] = None
return model_copy
@staticmethod
def _wrap_deriv(params, model, weights, x, y, z=None):
"""
Wraps the method calculating the Jacobian of the function to account
for model constraints.
`scipy.optimize.leastsq` expects the function derivative to have the
above signature (parlist, (argtuple)). In order to accommodate model
constraints, instead of using p directly, we set the parameter list in
this function.
"""
if weights is None:
weights = 1.0
if any(model.fixed.values()) or any(model.tied.values()):
# update the parameters with the current values from the fitter
_fitter_to_model_params(model, params)
if z is None:
full = np.array(model.fit_deriv(x, *model.parameters))
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
else:
full = np.array([np.ravel(_) for _ in model.fit_deriv(x, y, *model.parameters)])
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
pars = [getattr(model, name) for name in model.param_names]
fixed = [par.fixed for par in pars]
tied = [par.tied for par in pars]
tied = list(np.where([par.tied is not False for par in pars],
True, tied))
fix_and_tie = np.logical_or(fixed, tied)
ind = np.logical_not(fix_and_tie)
if not model.col_fit_deriv:
residues = np.asarray(full_deriv[np.nonzero(ind)]).T
else:
residues = full_deriv[np.nonzero(ind)]
return [np.ravel(_) for _ in residues]
else:
if z is None:
return [np.ravel(_) for _ in np.ravel(weights) * np.array(model.fit_deriv(x, *params))]
else:
if not model.col_fit_deriv:
return [np.ravel(_) for _ in (
np.ravel(weights) * np.array(model.fit_deriv(x, y, *params)).T).T]
else:
return [np.ravel(_) for _ in (weights * np.array(model.fit_deriv(x, y, *params)))]
class SLSQPLSQFitter(Fitter):
"""
SLSQP optimization algorithm and least squares statistic.
Raises
------
ModelLinearityError
A linear model is passed to a nonlinear fitter
"""
supported_constraints = SLSQP.supported_constraints
def __init__(self):
super().__init__(optimizer=SLSQP, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array (optional)
input coordinates
weights : array (optional)
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
verblevel : int
0-silent
1-print summary upon completion,
2-print summary after each iteration
maxiter : int
maximum number of iterations
epsilon : float
the step size for finite-difference derivative estimates
acc : float
Requested accuracy
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self._opt_method.supported_constraints)
farg = _convert_input(x, y, z)
farg = (model_copy, weights, ) + farg
p0, _ = _model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, p0, farg, **kwargs)
_fitter_to_model_params(model_copy, fitparams)
return model_copy
class SimplexLSQFitter(Fitter):
"""
Simplex algorithm and least squares statistic.
Raises
------
ModelLinearityError
A linear model is passed to a nonlinear fitter
"""
supported_constraints = Simplex.supported_constraints
def __init__(self):
super().__init__(optimizer=Simplex, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array (optional)
input coordinates
weights : array (optional)
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
maxiter : int
maximum number of iterations
acc : float
Relative error in approximate solution
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model,
self._opt_method.supported_constraints)
farg = _convert_input(x, y, z)
farg = (model_copy, weights, ) + farg
p0, _ = _model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, p0, farg, **kwargs)
_fitter_to_model_params(model_copy, fitparams)
return model_copy
class JointFitter(metaclass=_FitterMeta):
"""
Fit models which share a parameter.
For example, fit two gaussians to two data sets but keep
the FWHM the same.
Parameters
----------
models : list
a list of model instances
jointparameters : list
a list of joint parameters
initvals : list
a list of initial values
"""
def __init__(self, models, jointparameters, initvals):
self.models = list(models)
self.initvals = list(initvals)
self.jointparams = jointparameters
self._verify_input()
self.fitparams = self._model_to_fit_params()
# a list of model.n_inputs
self.modeldims = [m.n_inputs for m in self.models]
# sum all model dimensions
self.ndim = np.sum(self.modeldims)
def _model_to_fit_params(self):
fparams = []
fparams.extend(self.initvals)
for model in self.models:
params = [p.flatten() for p in model.parameters]
joint_params = self.jointparams[model]
param_metrics = model._param_metrics
for param_name in joint_params:
slice_ = param_metrics[param_name]['slice']
del params[slice_]
fparams.extend(params)
return fparams
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
the fitted parameters - result of an one iteration of the
fitting algorithm
args : dict
tuple of measured and input coordinates
args is always passed as a tuple from optimize.leastsq
"""
lstsqargs = list(args)
fitted = []
fitparams = list(fps)
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fitparams[:numjp]
del fitparams[:numjp]
for model in self.models:
joint_params = self.jointparams[model]
margs = lstsqargs[:model.n_inputs + 1]
del lstsqargs[:model.n_inputs + 1]
# separate each model separately fitted parameters
numfp = len(model._parameters) - len(joint_params)
mfparams = fitparams[:numfp]
del fitparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the
# parameter is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]['slice']
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
modelfit = model.evaluate(margs[:-1], *mparams)
fitted.extend(modelfit - margs[-1])
return np.ravel(fitted)
def _verify_input(self):
if len(self.models) <= 1:
raise TypeError("Expected >1 models, {} is given".format(
len(self.models)))
if len(self.jointparams.keys()) < 2:
raise TypeError("At least two parameters are expected, "
"{} is given".format(len(self.jointparams.keys())))
for j in self.jointparams.keys():
if len(self.jointparams[j]) != len(self.initvals):
raise TypeError("{} parameter(s) provided but {} expected".format(
len(self.jointparams[j]), len(self.initvals)))
def __call__(self, *args):
"""
Fit data to these models keeping some of the parameters common to the
two models.
"""
from scipy import optimize
if len(args) != reduce(lambda x, y: x + 1 + y + 1, self.modeldims):
raise ValueError("Expected {} coordinates in args but {} provided"
.format(reduce(lambda x, y: x + 1 + y + 1,
self.modeldims), len(args)))
self.fitparams[:], _ = optimize.leastsq(self.objective_function,
self.fitparams, args=args)
fparams = self.fitparams[:]
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fparams[:numjp]
del fparams[:numjp]
for model in self.models:
# extract each model's fitted parameters
joint_params = self.jointparams[model]
numfp = len(model._parameters) - len(joint_params)
mfparams = fparams[:numfp]
del fparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the parameter
# is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]['slice']
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
model.parameters = np.array(mparams)
def _convert_input(x, y, z=None, n_models=1, model_set_axis=0):
"""Convert inputs to float arrays."""
x = np.asanyarray(x, dtype=float)
y = np.asanyarray(y, dtype=float)
if z is not None:
z = np.asanyarray(z, dtype=float)
# For compatibility with how the linear fitter code currently expects to
# work, shift the dependent variable's axes to the expected locations
if n_models > 1:
if z is None:
if y.shape[model_set_axis] != n_models:
raise ValueError(
"Number of data sets (y array is expected to equal "
"the number of parameter sets)")
# For a 1-D model the y coordinate's model-set-axis is expected to
# be last, so that its first dimension is the same length as the x
# coordinates. This is in line with the expectations of
# numpy.linalg.lstsq:
# http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.lstsq.html
# That is, each model should be represented by a column. TODO:
# Obviously this is a detail of np.linalg.lstsq and should be
# handled specifically by any fitters that use it...
y = np.rollaxis(y, model_set_axis, y.ndim)
else:
# Shape of z excluding model_set_axis
z_shape = z.shape[:model_set_axis] + z.shape[model_set_axis + 1:]
if not (x.shape == y.shape == z_shape):
raise ValueError("x, y and z should have the same shape")
if z is None:
farg = (x, y)
else:
farg = (x, y, z)
return farg
# TODO: These utility functions are really particular to handling
# bounds/tied/fixed constraints for scipy.optimize optimizers that do not
# support them inherently; this needs to be reworked to be clear about this
# distinction (and the fact that these are not necessarily applicable to any
# arbitrary fitter--as evidenced for example by the fact that JointFitter has
# its own versions of these)
# TODO: Most of this code should be entirely rewritten; it should not be as
# inefficient as it is.
def _fitter_to_model_params(model, fps):
"""
Constructs the full list of model parameters from the fitted and
constrained parameters.
"""
_, fit_param_indices = _model_to_fit_params(model)
has_tied = any(model.tied.values())
has_fixed = any(model.fixed.values())
has_bound = any(b != (None, None) for b in model.bounds.values())
if not (has_tied or has_fixed or has_bound):
# We can just assign directly
model.parameters = fps
return
fit_param_indices = set(fit_param_indices)
offset = 0
param_metrics = model._param_metrics
for idx, name in enumerate(model.param_names):
if idx not in fit_param_indices:
continue
slice_ = param_metrics[name]['slice']
shape = param_metrics[name]['shape']
# This is determining which range of fps (the fitted parameters) maps
# to parameters of the model
size = reduce(operator.mul, shape, 1)
values = fps[offset:offset + size]
# Check bounds constraints
if model.bounds[name] != (None, None):
_min, _max = model.bounds[name]
if _min is not None:
values = np.fmax(values, _min)
if _max is not None:
values = np.fmin(values, _max)
model.parameters[slice_] = values
offset += size
# This has to be done in a separate loop due to how tied parameters are
# currently evaluated (the fitted parameters need to actually be *set* on
# the model first, for use in evaluating the "tied" expression--it might be
# better to change this at some point
if has_tied:
for idx, name in enumerate(model.param_names):
if model.tied[name]:
value = model.tied[name](model)
slice_ = param_metrics[name]['slice']
model.parameters[slice_] = value
def _model_to_fit_params(model):
"""
Convert a model instance's parameter array to an array that can be used
with a fitter that doesn't natively support fixed or tied parameters.
In particular, it removes fixed/tied parameters from the parameter
array.
These may be a subset of the model parameters, if some of them are held
constant or tied.
"""
fitparam_indices = list(range(len(model.param_names)))
if any(model.fixed.values()) or any(model.tied.values()):
params = list(model.parameters)
param_metrics = model._param_metrics
for idx, name in list(enumerate(model.param_names))[::-1]:
if model.fixed[name] or model.tied[name]:
slice_ = param_metrics[name]['slice']
del params[slice_]
del fitparam_indices[idx]
return (np.array(params), fitparam_indices)
else:
return (model.parameters, fitparam_indices)
def _validate_constraints(supported_constraints, model):
"""Make sure model constraints are supported by the current fitter."""
message = 'Optimizer cannot handle {0} constraints.'
if (any(model.fixed.values()) and
'fixed' not in supported_constraints):
raise UnsupportedConstraintError(
message.format('fixed parameter'))
if any(model.tied.values()) and 'tied' not in supported_constraints:
raise UnsupportedConstraintError(
message.format('tied parameter'))
if (any(tuple(b) != (None, None) for b in model.bounds.values()) and
'bounds' not in supported_constraints):
raise UnsupportedConstraintError(
message.format('bound parameter'))
if model.eqcons and 'eqcons' not in supported_constraints:
raise UnsupportedConstraintError(message.format('equality'))
if model.ineqcons and 'ineqcons' not in supported_constraints:
raise UnsupportedConstraintError(message.format('inequality'))
def _validate_model(model, supported_constraints):
"""
Check that model and fitter are compatible and return a copy of the model.
"""
if not model.fittable:
raise ValueError("Model does not appear to be fittable.")
if model.linear:
warnings.warn('Model is linear in parameters; '
'consider using linear fitting methods.',
AstropyUserWarning)
elif len(model) != 1:
# for now only single data sets ca be fitted
raise ValueError("Non-linear fitters can only fit "
"one data set at a time.")
_validate_constraints(supported_constraints, model)
model_copy = model.copy()
return model_copy
def populate_entry_points(entry_points):
"""
This injects entry points into the `astropy.modeling.fitting` namespace.
This provides a means of inserting a fitting routine without requirement
of it being merged into astropy's core.
Parameters
----------
entry_points : a list of `~pkg_resources.EntryPoint`
entry_points are objects which encapsulate
importable objects and are defined on the
installation of a package.
Notes
-----
An explanation of entry points can be found `here <http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`
"""
for entry_point in entry_points:
name = entry_point.name
try:
entry_point = entry_point.load()
except Exception as e:
# This stops the fitting from choking if an entry_point produces an error.
warnings.warn(AstropyUserWarning('{type} error occurred in entry '
'point {name}.' .format(type=type(e).__name__, name=name)))
else:
if not inspect.isclass(entry_point):
warnings.warn(AstropyUserWarning(
'Modeling entry point {0} expected to be a '
'Class.' .format(name)))
else:
if issubclass(entry_point, Fitter):
name = entry_point.__name__
globals()[name] = entry_point
__all__.append(name)
else:
warnings.warn(AstropyUserWarning(
'Modeling entry point {0} expected to extend '
'astropy.modeling.Fitter' .format(name)))
# this is so fitting doesn't choke if pkg_resources doesn't exist
if HAS_PKG:
populate_entry_points(iter_entry_points(group='astropy.modeling', name=None))
|
DougBurke/astropy
|
astropy/modeling/fitting.py
|
Python
|
bsd-3-clause
| 51,488
|
[
"Gaussian"
] |
a9020ca29ee094296669f63896374d63eb548bff9f7007c4fbe4dbcad7fd4fd0
|
#!/usr/bin/env python
# generate Python Manifest for the OpenEmbedded build system
# (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
# (C) 2007 Jeremy Laine
# licensed under MIT, see COPYING.MIT
import os
import sys
import time
VERSION = "2.6.6"
__author__ = "Michael 'Mickey' Lauer <mlauer@vanille-media.de>"
__version__ = "20110222"
class MakefileMaker:
def __init__( self, outfile ):
"""initialize"""
self.packages = {}
self.targetPrefix = "${libdir}/python%s/" % VERSION[:3]
self.output = outfile
self.out( """
# WARNING: This file is AUTO GENERATED: Manual edits will be lost next time I regenerate the file.
# Generator: '%s' Version %s (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
# Visit the Python for Embedded Systems Site => http://www.Vanille.de/projects/python.spy
""" % ( sys.argv[0], __version__ ) )
#
# helper functions
#
def out( self, data ):
"""print a line to the output file"""
self.output.write( "%s\n" % data )
def setPrefix( self, targetPrefix ):
"""set a file prefix for addPackage files"""
self.targetPrefix = targetPrefix
def doProlog( self ):
self.out( """ """ )
self.out( "" )
def addPackage( self, name, description, dependencies, filenames ):
"""add a package to the Makefile"""
if type( filenames ) == type( "" ):
filenames = filenames.split()
fullFilenames = []
for filename in filenames:
if filename[0] != "$":
fullFilenames.append( "%s%s" % ( self.targetPrefix, filename ) )
else:
fullFilenames.append( filename )
self.packages[name] = description, dependencies, fullFilenames
def doBody( self ):
"""generate body of Makefile"""
global VERSION
#
# generate provides line
#
provideLine = 'PROVIDES+="'
for name in sorted(self.packages):
provideLine += "%s " % name
provideLine += '"'
self.out( provideLine )
self.out( "" )
#
# generate package line
#
packageLine = 'PACKAGES="${PN}-core-dbg '
for name in sorted(self.packages):
if name != '${PN}-core-dbg':
packageLine += "%s " % name
packageLine += '${PN}-modules"'
self.out( packageLine )
self.out( "" )
#
# generate package variables
#
for name, data in sorted(self.packages.iteritems()):
desc, deps, files = data
#
# write out the description, revision and dependencies
#
self.out( 'DESCRIPTION_%s="%s"' % ( name, desc ) )
self.out( 'RDEPENDS_%s="%s"' % ( name, deps ) )
line = 'FILES_%s="' % name
#
# check which directories to make in the temporary directory
#
dirset = {} # if python had a set-datatype this would be sufficient. for now, we're using a dict instead.
for target in files:
dirset[os.path.dirname( target )] = True
#
# generate which files to copy for the target (-dfR because whole directories are also allowed)
#
for target in files:
line += "%s " % target
line += '"'
self.out( line )
self.out( "" )
self.out( 'DESCRIPTION_${PN}-modules="All Python modules"' )
line = 'RDEPENDS_${PN}-modules="'
for name, data in sorted(self.packages.iteritems()):
if name not in ['${PN}-core-dbg', '${PN}-dev']:
line += "%s " % name
self.out( "%s \"" % line )
self.out( 'ALLOW_EMPTY_${PN}-modules = "1"' )
def doEpilog( self ):
self.out( """""" )
self.out( "" )
def make( self ):
self.doProlog()
self.doBody()
self.doEpilog()
if __name__ == "__main__":
if len( sys.argv ) > 1:
os.popen( "rm -f ./%s" % sys.argv[1] )
outfile = file( sys.argv[1], "w" )
else:
outfile = sys.stdout
m = MakefileMaker( outfile )
# Add packages here. Only specify dlopen-style library dependencies here, no ldd-style dependencies!
# Parameters: revision, name, description, dependencies, filenames
#
m.addPackage( "${PN}-core", "Python Interpreter and core modules (needed!)", "",
"__future__.* _abcoll.* abc.* copy.* copy_reg.* ConfigParser.* " +
"genericpath.* getopt.* linecache.* new.* " +
"os.* posixpath.* struct.* " +
"warnings.* site.* stat.* " +
"UserDict.* UserList.* UserString.* " +
"lib-dynload/binascii.so lib-dynload/_struct.so lib-dynload/time.so " +
"lib-dynload/xreadlines.so types.* platform.* ${bindir}/python*" )
m.addPackage( "${PN}-core-dbg", "Python core module debug information", "${PN}-core",
"config/.debug lib-dynload/.debug ${bindir}/.debug ${libdir}/.debug" )
m.addPackage( "${PN}-dev", "Python Development Package", "${PN}-core",
"${includedir} ${libdir}/libpython2.6.so" ) # package
m.addPackage( "${PN}-idle", "Python Integrated Development Environment", "${PN}-core ${PN}-tkinter",
"${bindir}/idle idlelib" ) # package
m.addPackage( "${PN}-pydoc", "Python Interactive Help Support", "${PN}-core ${PN}-lang ${PN}-stringold ${PN}-re",
"${bindir}/pydoc pydoc.*" )
m.addPackage( "${PN}-smtpd", "Python Simple Mail Transport Daemon", "${PN}-core ${PN}-netserver ${PN}-email ${PN}-mime",
"${bindir}/smtpd.*" )
m.addPackage( "${PN}-audio", "Python Audio Handling", "${PN}-core",
"wave.* chunk.* sndhdr.* lib-dynload/ossaudiodev.so lib-dynload/audioop.so" )
m.addPackage( "${PN}-bsddb", "Python Berkeley Database Bindings", "${PN}-core",
"bsddb lib-dynload/_bsddb.so" ) # package
m.addPackage( "${PN}-codecs", "Python Codecs, Encodings & i18n Support", "${PN}-core ${PN}-lang",
"codecs.* encodings gettext.* locale.* lib-dynload/_locale.so lib-dynload/unicodedata.so stringprep.* xdrlib.*" )
m.addPackage( "${PN}-compile", "Python Bytecode Compilation Support", "${PN}-core",
"py_compile.* compileall.*" )
m.addPackage( "${PN}-compiler", "Python Compiler Support", "${PN}-core",
"compiler" ) # package
m.addPackage( "${PN}-compression", "Python High Level Compression Support", "${PN}-core ${PN}-zlib",
"gzip.* zipfile.* tarfile.* lib-dynload/bz2.so" )
m.addPackage( "${PN}-crypt", "Python Basic Cryptographic and Hashing Support", "${PN}-core",
"hashlib.* md5.* sha.* lib-dynload/crypt.so lib-dynload/_hashlib.so lib-dynload/_sha256.so lib-dynload/_sha512.so" )
m.addPackage( "${PN}-textutils", "Python Option Parsing, Text Wrapping and Comma-Separated-Value Support", "${PN}-core ${PN}-io ${PN}-re ${PN}-stringold",
"lib-dynload/_csv.so csv.* optparse.* textwrap.*" )
m.addPackage( "${PN}-curses", "Python Curses Support", "${PN}-core",
"curses lib-dynload/_curses.so lib-dynload/_curses_panel.so" ) # directory + low level module
m.addPackage( "${PN}-ctypes", "Python C Types Support", "${PN}-core",
"ctypes lib-dynload/_ctypes.so" ) # directory + low level module
m.addPackage( "${PN}-datetime", "Python Calendar and Time support", "${PN}-core ${PN}-codecs",
"_strptime.* calendar.* lib-dynload/datetime.so" )
m.addPackage( "${PN}-db", "Python File-Based Database Support", "${PN}-core",
"anydbm.* dumbdbm.* whichdb.* " )
m.addPackage( "${PN}-debugger", "Python Debugger", "${PN}-core ${PN}-io ${PN}-lang ${PN}-re ${PN}-stringold ${PN}-shell ${PN}-pprint",
"bdb.* pdb.*" )
m.addPackage( "${PN}-difflib", "Python helpers for computing deltas between objects.", "${PN}-lang ${PN}-re",
"difflib.*" )
m.addPackage( "${PN}-distutils", "Python Distribution Utilities", "${PN}-core",
"config distutils" ) # package
m.addPackage( "${PN}-doctest", "Python framework for running examples in docstrings.", "${PN}-core ${PN}-lang ${PN}-io ${PN}-re ${PN}-unittest ${PN}-debugger ${PN}-difflib",
"doctest.*" )
# FIXME consider adding to some higher level package
m.addPackage( "${PN}-elementtree", "Python elementree", "${PN}-core",
"lib-dynload/_elementtree.so" )
m.addPackage( "${PN}-email", "Python Email Support", "${PN}-core ${PN}-io ${PN}-re ${PN}-mime ${PN}-audio ${PN}-image ${PN}-netclient",
"imaplib.* email" ) # package
m.addPackage( "${PN}-fcntl", "Python's fcntl Interface", "${PN}-core",
"lib-dynload/fcntl.so" )
m.addPackage( "${PN}-hotshot", "Python Hotshot Profiler", "${PN}-core",
"hotshot lib-dynload/_hotshot.so" )
m.addPackage( "${PN}-html", "Python HTML Processing", "${PN}-core",
"formatter.* htmlentitydefs.* htmllib.* markupbase.* sgmllib.* " )
m.addPackage( "${PN}-gdbm", "Python GNU Database Support", "${PN}-core",
"lib-dynload/gdbm.so" )
m.addPackage( "${PN}-image", "Python Graphical Image Handling", "${PN}-core",
"colorsys.* imghdr.* lib-dynload/imageop.so lib-dynload/rgbimg.so" )
m.addPackage( "${PN}-io", "Python Low-Level I/O", "${PN}-core ${PN}-math",
"lib-dynload/_socket.so lib-dynload/_ssl.so lib-dynload/select.so lib-dynload/termios.so lib-dynload/cStringIO.so " +
"pipes.* socket.* ssl.* tempfile.* StringIO.* " )
m.addPackage( "${PN}-json", "Python JSON Support", "${PN}-core ${PN}-math ${PN}-re",
"json" ) # package
m.addPackage( "${PN}-lang", "Python Low-Level Language Support", "${PN}-core",
"lib-dynload/_bisect.so lib-dynload/_collections.so lib-dynload/_heapq.so lib-dynload/_weakref.so lib-dynload/_functools.so " +
"lib-dynload/array.so lib-dynload/itertools.so lib-dynload/operator.so lib-dynload/parser.so " +
"atexit.* bisect.* code.* codeop.* collections.* dis.* functools.* heapq.* inspect.* keyword.* opcode.* symbol.* repr.* token.* " +
"tokenize.* traceback.* weakref.*" )
m.addPackage( "${PN}-logging", "Python Logging Support", "${PN}-core ${PN}-io ${PN}-lang ${PN}-pickle ${PN}-stringold",
"logging" ) # package
m.addPackage( "${PN}-mailbox", "Python Mailbox Format Support", "${PN}-core ${PN}-mime",
"mailbox.*" )
m.addPackage( "${PN}-math", "Python Math Support", "${PN}-core",
"lib-dynload/cmath.so lib-dynload/math.so lib-dynload/_random.so random.* sets.*" )
m.addPackage( "${PN}-mime", "Python MIME Handling APIs", "${PN}-core ${PN}-io",
"mimetools.* uu.* quopri.* rfc822.*" )
m.addPackage( "${PN}-mmap", "Python Memory-Mapped-File Support", "${PN}-core ${PN}-io",
"lib-dynload/mmap.so " )
m.addPackage( "${PN}-multiprocessing", "Python Multiprocessing Support", "${PN}-core ${PN}-io ${PN}-lang",
"lib-dynload/_multiprocessing.so multiprocessing" ) # package
m.addPackage( "${PN}-netclient", "Python Internet Protocol Clients", "${PN}-core ${PN}-crypt ${PN}-datetime ${PN}-io ${PN}-lang ${PN}-logging ${PN}-mime",
"*Cookie*.* " +
"base64.* cookielib.* ftplib.* gopherlib.* hmac.* httplib.* mimetypes.* nntplib.* poplib.* smtplib.* telnetlib.* urllib.* urllib2.* urlparse.* uuid.* rfc822.* mimetools.*" )
m.addPackage( "${PN}-netserver", "Python Internet Protocol Servers", "${PN}-core ${PN}-netclient",
"cgi.* *HTTPServer.* SocketServer.*" )
m.addPackage( "${PN}-numbers", "Python Number APIs", "${PN}-core ${PN}-lang ${PN}-re",
"decimal.* numbers.*" )
m.addPackage( "${PN}-pickle", "Python Persistence Support", "${PN}-core ${PN}-codecs ${PN}-io ${PN}-re",
"pickle.* shelve.* lib-dynload/cPickle.so" )
m.addPackage( "${PN}-pkgutil", "Python Package Extension Utility Support", "${PN}-core",
"pkgutil.*")
m.addPackage( "${PN}-pprint", "Python Pretty-Print Support", "${PN}-core",
"pprint.*" )
m.addPackage( "${PN}-profile", "Python Basic Profiling Support", "${PN}-core ${PN}-textutils",
"profile.* pstats.* cProfile.* lib-dynload/_lsprof.so" )
m.addPackage( "${PN}-re", "Python Regular Expression APIs", "${PN}-core",
"re.* sre.* sre_compile.* sre_constants* sre_parse.*" ) # _sre is builtin
m.addPackage( "${PN}-readline", "Python Readline Support", "${PN}-core",
"lib-dynload/readline.so rlcompleter.*" )
m.addPackage( "${PN}-resource", "Python Resource Control Interface", "${PN}-core",
"lib-dynload/resource.so" )
m.addPackage( "${PN}-shell", "Python Shell-Like Functionality", "${PN}-core ${PN}-re",
"cmd.* commands.* dircache.* fnmatch.* glob.* popen2.* shlex.* shutil.*" )
m.addPackage( "${PN}-robotparser", "Python robots.txt parser", "${PN}-core ${PN}-netclient",
"robotparser.*")
m.addPackage( "${PN}-subprocess", "Python Subprocess Support", "${PN}-core ${PN}-io ${PN}-re ${PN}-fcntl ${PN}-pickle",
"subprocess.*" )
m.addPackage( "${PN}-sqlite3", "Python Sqlite3 Database Support", "${PN}-core ${PN}-datetime ${PN}-lang ${PN}-crypt ${PN}-io ${PN}-threading ${PN}-zlib",
"lib-dynload/_sqlite3.so sqlite3/dbapi2.* sqlite3/__init__.* sqlite3/dump.*" )
m.addPackage( "${PN}-sqlite3-tests", "Python Sqlite3 Database Support Tests", "${PN}-core ${PN}-sqlite3",
"sqlite3/test" )
m.addPackage( "${PN}-stringold", "Python String APIs [deprecated]", "${PN}-core ${PN}-re",
"lib-dynload/strop.so string.*" )
m.addPackage( "${PN}-syslog", "Python Syslog Interface", "${PN}-core",
"lib-dynload/syslog.so" )
m.addPackage( "${PN}-terminal", "Python Terminal Controlling Support", "${PN}-core ${PN}-io",
"pty.* tty.*" )
m.addPackage( "${PN}-tests", "Python Tests", "${PN}-core",
"test" ) # package
m.addPackage( "${PN}-threading", "Python Threading & Synchronization Support", "${PN}-core ${PN}-lang",
"_threading_local.* dummy_thread.* dummy_threading.* mutex.* threading.* Queue.*" )
m.addPackage( "${PN}-tkinter", "Python Tcl/Tk Bindings", "${PN}-core",
"lib-dynload/_tkinter.so lib-tk" ) # package
m.addPackage( "${PN}-unittest", "Python Unit Testing Framework", "${PN}-core ${PN}-stringold ${PN}-lang",
"unittest.*" )
m.addPackage( "${PN}-unixadmin", "Python Unix Administration Support", "${PN}-core",
"lib-dynload/nis.so lib-dynload/grp.so lib-dynload/pwd.so getpass.*" )
m.addPackage( "${PN}-xml", "Python basic XML support.", "${PN}-core ${PN}-elementtree ${PN}-re",
"lib-dynload/pyexpat.so xml xmllib.*" ) # package
m.addPackage( "${PN}-xmlrpc", "Python XMLRPC Support", "${PN}-core ${PN}-xml ${PN}-netserver ${PN}-lang",
"xmlrpclib.* SimpleXMLRPCServer.*" )
m.addPackage( "${PN}-zlib", "Python zlib Support.", "${PN}-core",
"lib-dynload/zlib.so" )
m.addPackage( "${PN}-mailbox", "Python Mailbox Format Support", "${PN}-core ${PN}-mime",
"mailbox.*" )
m.make()
|
xifengchuo/openembedded
|
contrib/python/generate-manifest-2.6.py
|
Python
|
mit
| 14,802
|
[
"VisIt"
] |
0ff9b5dff428e7d56865d823904dd2ea745202112b548919b47f1e052bfc08cb
|
#!/usr/bin/env python
__copyright__ = "Copyright 2015, http://radical.rutgers.edu"
__license__ = "MIT"
import sys
import radical.pilot as rp
import radical.utils as ru
dh = ru.DebugHelper ()
CNT = 0
RUNTIME = 10
SLEEP = 1
CORES = 16
UNITS = 1
SCHED = rp.SCHED_DIRECT_SUBMISSION
RESOURCE = 'xsede.stampede'
PROJECT = 'TG-MCB090174'
QUEUE = 'development'
SCHEMA = 'ssh'
#------------------------------------------------------------------------------
#
def pilot_state_cb (pilot, state):
if not pilot:
return
print "[Callback]: ComputePilot '%s' state: %s." % (pilot.uid, state)
if state == rp.FAILED:
sys.exit (1)
#------------------------------------------------------------------------------
#
def unit_state_cb (unit, state):
if not unit:
return
global CNT
print "[Callback]: unit %s on %s: %s." % (unit.uid, unit.pilot_id, state)
if state in [rp.FAILED, rp.DONE, rp.CANCELED]:
CNT += 1
print "[Callback]: # %6d" % CNT
if state == rp.FAILED:
print "stderr: %s" % unit.stderr
sys.exit(2)
#------------------------------------------------------------------------------
#
def wait_queue_size_cb(umgr, wait_queue_size):
print "[Callback]: wait_queue_size: %s." % wait_queue_size
#------------------------------------------------------------------------------
#
if __name__ == "__main__":
# we can optionally pass session name to RP
if len(sys.argv) > 1:
session_name = sys.argv[1]
else:
session_name = None
# Create a new session. No need to try/except this: if session creation
# fails, there is not much we can do anyways...
session = rp.Session(name=session_name)
print "session id: %s" % session.uid
# all other pilot code is now tried/excepted. If an exception is caught, we
# can rely on the session object to exist and be valid, and we can thus tear
# the whole RP stack down via a 'session.close()' call in the 'finally'
# clause...
try:
pmgr = rp.PilotManager(session=session)
pmgr.register_callback(pilot_state_cb)
pdesc = rp.ComputePilotDescription()
pdesc.resource = RESOURCE
pdesc.cores = CORES
pdesc.project = PROJECT
pdesc.queue = QUEUE
pdesc.runtime = RUNTIME
pdesc.cleanup = False
pdesc.access_schema = SCHEMA
pilot = pmgr.submit_pilots(pdesc)
umgr = rp.UnitManager(session=session, scheduler=SCHED)
umgr.register_callback(unit_state_cb, rp.UNIT_STATE)
umgr.register_callback(wait_queue_size_cb, rp.WAIT_QUEUE_SIZE)
umgr.add_pilots(pilot)
cuds = list()
for unit_count in range(0, UNITS):
cud = rp.ComputeUnitDescription()
cud.pre_exec = [
'module load gromacs',
'echo 2 | trjconv -f tmp.gro -s tmp.gro -o tmpha.gro',
'module load -intel +intel/14.0.1.106',
'export PYTHONPATH=/home1/03036/jp43/.local/lib/python2.7/site-packages',
'module load python/2.7.6',
'export PATH=/home1/03036/jp43/.local/bin:$PATH',
'echo "Using mpirun_rsh: `which mpirun_rsh`"'
]
cud.executable = "/opt/apps/intel14/mvapich2_2_0/python/2.7.6/lib/python2.7/site-packages/mpi4py/bin/python-mpi"
cud.arguments = ["lsdm.py", "-f", "config.ini", "-c",
"tmpha.gro", "-n" "neighbors.nn", "-w", "weight.w"]
cud.cores = 4
cud.mpi = True
cud.input_staging = [
'issue_572_files/config.ini',
'issue_572_files/lsdm.py',
'issue_572_files/tmp.gro'
]
cuds.append(cud)
units = umgr.submit_units(cuds)
umgr.wait_units()
for cu in units:
print "* Task %s state %s, exit code: %s, started: %s, finished: %s" \
% (cu.uid, cu.state, cu.exit_code, cu.start_time, cu.stop_time)
except Exception as e:
# Something unexpected happened in the pilot code above
print "caught Exception: %s" % e
raise
except (KeyboardInterrupt, SystemExit) as e:
# the callback called sys.exit(), and we can here catch the
# corresponding KeyboardInterrupt exception for shutdown. We also catch
# SystemExit (which gets raised if the main threads exits for some other
# reason).
print "need to exit now: %s" % e
finally:
# always clean up the session, no matter if we caught an exception or
# not.
print "closing session"
session.close ()
# the above is equivalent to
#
# session.close (cleanup=True, terminate=True)
#
# it will thus both clean out the session's database record, and kill
# all remaining pilots (none in our example).
#-------------------------------------------------------------------------------
|
JensTimmerman/radical.pilot
|
tests/issue_572.py
|
Python
|
mit
| 5,093
|
[
"Gromacs"
] |
1fb757cbc30a63ff101ac8f4803770e1a8f550dac5d01122115808fd206ed71a
|
# -*- coding: utf-8 -*-
"""
Visualization tools used without Keras.
Makes performance graphs for training and validating.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
class TrainValPlotter:
"""
Class for plotting train/val curves.
Instructions
------------
1. Use tvp.plot_curves(train, val) once or more on pairs of
train/val data.
2. When all lines are plotted, use tvp.apply_layout() once for proper
scaling, ylims, etc.
"""
def __init__(self):
# White space added below and above points
self.y_lim_padding = [0.10, 0.25]
# Store all plotted points for setting x/y lims
self._xpoints_train = np.array([])
self._xpoints_val = np.array([])
self._ypoints_train = np.array([])
self._ypoints_val = np.array([])
def plot_curves(self,
train_data,
val_data=None,
train_label="training",
val_label="validation",
color=None,
smooth_sigma=None,
tlw=0.5,
vlw=0.5,
vms=3):
"""
Plot a training and optionally a validation line.
The data can contain nan's.
Parameters
----------
train_data : List
X data [0] and y data [1] of the train curve. Will be plotted as
connected dots.
val_data : List, optional
Optional X data [0] and y data [1] of the validation curve.
Will be plotted as a faint solid line of the same color as train.
train_label : str, optional
Label for the train line in the legend.
val_label : str, optional
Label for the validation line in the legend.
color : str, optional
Color used for the train/val line.
smooth_sigma : int, optional
Apply gaussian blur to the train curve with given sigma.
tlw : float
Linewidth of train curve.
vlw : float
Linewidth of val curve.
vms : float
Markersize of the val curve.
"""
if train_data is None and val_data is None:
raise ValueError(
"Can not plot when no train and val data is given.")
if train_data is not None:
epoch, y_data = train_data
if smooth_sigma is not None:
y_data = gaussian_smooth(y_data, smooth_sigma)
self._xpoints_train = np.concatenate((self._xpoints_train, epoch))
self._ypoints_train = np.concatenate((self._ypoints_train, y_data))
train_plot = plt.plot(
epoch, y_data, color=color, ls='-',
zorder=3, label=train_label, lw=tlw, alpha=0.5)
train_color = train_plot[0].get_color()
else:
train_color = color
if val_data is not None:
self._xpoints_val = np.concatenate((self._xpoints_val,
val_data[0]))
self._ypoints_val = np.concatenate((self._ypoints_val,
val_data[1]))
val_data_clean = skip_nans(val_data)
# val plot always has the same color as the train plot
plt.plot(val_data_clean[0], val_data_clean[1], color=train_color,
marker='o', zorder=3, lw=vlw, markersize=vms, label=val_label)
def apply_layout(self,
title=None,
x_label="Epoch",
y_label=None,
grid=True,
legend=True,
x_lims=None,
y_lims="auto",
x_ticks="auto",
logy=False):
"""
Apply given layout.
Can caluclate good y_lims and x_ticks automatically.
Parameters
----------
title : str
Title of the plot.
x_label : str
X label of the plot.
y_label : str
Y label of the plot.
grid : bool
If true, show a grid.
legend : bool
If true, show a legend.
x_lims : List
X limits of the data.
y_lims : List or str
Y limits of the data. "auto" for auto-calculation.
x_ticks : List
Positions of the major x ticks.
logy : bool
If true, make y axis log.
"""
if logy:
plt.yscale("log")
if x_ticks is not None:
if x_ticks == "auto":
all_x_points = np.concatenate((self._xpoints_train,
self._xpoints_val))
x_ticks = get_epoch_xticks(all_x_points)
else:
x_ticks = x_ticks
plt.xticks(x_ticks)
if x_lims is not None:
plt.xlim(x_lims)
if y_lims is not None:
if y_lims == "auto":
y_lims = get_ylims(self._ypoints_train,
self._ypoints_val,
fraction=self.y_lim_padding)
else:
y_lims = y_lims
plt.ylim(y_lims)
if legend:
plt.legend(loc='upper right')
plt.xlabel(x_label)
plt.ylabel(y_label)
if title is not None:
title = plt.title(title)
title.set_position([.5, 1.04])
if grid:
plt.grid(True, zorder=0, linestyle='dotted')
def gaussian_smooth(y, sigma, truncate=4):
""" Smooth a 1d ndarray with a gaussian filter. """
# kernel_width = 2 * sigma * truncate + 1
kernel_x = np.arange(-truncate * sigma, truncate * sigma + 1)
kernel = _gauss(kernel_x, 0, sigma)
y = np.pad(np.asarray(y), int(len(kernel)/2), "edge")
blurred = np.convolve(y, kernel, "valid")
return blurred
def _gauss(x, mu=0, sigma=1):
return (1/(np.sqrt(2*np.pi)*sigma)) * np.exp(-np.power(x - mu, 2.) / (2 * np.power(sigma, 2.)))
def plot_history(train_data,
val_data=None,
train_label="training",
val_label="validation",
color=None,
**kwargs):
"""
Plot the train/val curves in a single plot.
For backward compat. Functionality moved to TrainValPlotter
"""
tvp = TrainValPlotter()
tvp.plot_curves(train_data,
val_data,
train_label=train_label,
val_label=val_label,
color=color)
tvp.apply_layout(**kwargs)
def skip_nans(data):
"""
Skip over nan values, so that all dots are connected.
Parameters
----------
data : List
Contains x and y data as ndarrays. The y values may contain nans.
Returns
-------
data_clean : List
Contains x and y data as ndarrays. Points with y=nan are skipped.
"""
not_nan = ~np.isnan(data[1])
data_clean = data[0][not_nan], data[1][not_nan]
return data_clean
def get_ylims(y_points_train, y_points_val=None, fraction=0.25):
"""
Get the y limits for the summary plot.
For the training data, limits are calculated while ignoring data points
which are far from the median (in terms of the median distance
from the median).
This is because there are outliers sometimes in the training data,
especially early on in the training.
Parameters
----------
y_points_train : List
y data of the train curve.
y_points_val : List or None
Y data of the validation curve.
fraction : float or List
How much whitespace of the total y range is added below and above
the lines.
Returns
-------
y_lims : tuple
Minimum, maximum of the data.
"""
assert not (y_points_train is None and
y_points_val is None), "train and val data are None"
def reject_outliers(data, threshold):
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d / mdev if mdev else 0.
no_outliers = data[s < threshold]
lims = np.amin(no_outliers), np.amax(no_outliers)
return lims
mins, maxs = [], []
if y_points_train is not None and len(y_points_train) != 0:
y_train = y_points_train[~np.isnan(y_points_train)]
y_lims_train = reject_outliers(y_train, 5)
mins.append(y_lims_train[0])
maxs.append(y_lims_train[1])
if y_points_val is not None and len(y_points_val) != 0:
y_val = y_points_val[~np.isnan(y_points_val)]
if len(y_val) == 1:
y_lim_val = y_val[0], y_val[0]
else:
y_lim_val = np.amin(y_val), np.amax(y_val)
mins.append(y_lim_val[0])
maxs.append(y_lim_val[1])
if len(mins) == 1:
y_lims = (mins[0], maxs[0])
else:
y_lims = np.amin(mins), np.amax(maxs)
if y_lims[0] == y_lims[1]:
y_range = 0.1 * y_lims[0]
else:
y_range = y_lims[1] - y_lims[0]
try:
fraction = float(fraction)
padding = [fraction, fraction]
except TypeError:
# is a list
padding = fraction
if padding != [0., 0.]:
y_lims = (y_lims[0] - padding[0] * y_range, y_lims[1] + padding[1] * y_range)
return y_lims
def get_epoch_xticks(x_points):
"""
Calculates the xticks for the train and validation summary plot.
One tick per poch. Less the larger #epochs is.
Parameters
----------
x_points : List
A list of the x coordinates of all points.
Returns
-------
x_ticks_major : numpy.ndarray
Array containing the ticks.
"""
if len(x_points) == 0:
raise ValueError("x-coordinates are empty!")
minimum, maximum = np.amin(x_points), np.amax(x_points)
start_epoch, end_epoch = np.floor(minimum), np.ceil(maximum)
# reduce number of x_ticks by factor of 2 if n_epochs > 20
n_epochs = end_epoch - start_epoch
x_ticks_stepsize = 1 + np.floor(n_epochs / 20.)
x_ticks_major = np.arange(
start_epoch, end_epoch + x_ticks_stepsize, x_ticks_stepsize)
return x_ticks_major
def update_summary_plot(orga):
"""
Plot and save all metrics of the given validation- and train-data
into a pdf file, each metric in its own plot.
If metric pairs of a variable and its error are found (e.g. e_loss
and e_err_loss), they will have the same color and appear back to
back in the plot.
Parameters
----------
orga : object Organizer
Contains all the configurable options in the OrcaNet scripts.
"""
plt.ioff()
pdf_name = orga.io.get_subfolder("plots", create=True) + "/summary_plot.pdf"
# Extract the names of the metrics
all_metrics = orga.history.get_metrics()
# Sort them
all_metrics = sort_metrics(all_metrics)
# Plot them w/ custom color cycle
colors = ['#000000', '#332288', '#88CCEE', '#44AA99', '#117733', '#999933',
'#DDCC77', '#CC6677', '#882255', '#AA4499', '#661100', '#6699CC',
'#AA4466', '#4477AA'] # ref. personal.sron.nl/~pault/
color_counter = 0
with PdfPages(pdf_name) as pdf:
for metric_no, metric in enumerate(all_metrics):
# If this metric is an err metric of a variable, color it the same
if all_metrics[metric_no-1] == metric.replace("_err", ""):
color_counter -= 1
orga.history.plot_metric(
metric, color=colors[color_counter % len(colors)])
color_counter += 1
pdf.savefig()
plt.clf()
orga.history.plot_lr()
color_counter += 1
pdf.savefig()
plt.close()
def sort_metrics(metric_names):
"""
Sort a list of metrics, so that errors are right after their variable.
The format of the metric names have to be e.g. e_loss and e_err_loss
for this to work.
Example
----------
>>> sort_metrics( ['e_loss', 'loss', 'e_err_loss', 'dx_err_loss'] )
['e_loss', 'e_err_loss', 'loss', 'dx_err_loss']
Parameters
----------
metric_names : List
List of metric names.
Returns
-------
sorted_metrics : List
List of sorted metric names with the same length as the input.
"""
sorted_metrics = [0] * len(metric_names)
counter = 0
for metric_name in metric_names:
if "err_" in metric_name:
if metric_name.replace("err_", "") not in metric_names:
sorted_metrics[counter] = metric_name
counter += 1
continue
sorted_metrics[counter] = metric_name
counter += 1
err_loss = metric_name.split("_loss")[0]+"_err_loss"
if err_loss in metric_names:
sorted_metrics[counter] = err_loss
counter += 1
assert 0 not in sorted_metrics, "Something went wrong with the sorting of " \
"metrics! Given was {}, output was " \
"{}. ".format(metric_names, sorted_metrics)
return sorted_metrics
|
ViaFerrata/DL_pipeline_TauAppearance
|
orcanet/utilities/visualization.py
|
Python
|
agpl-3.0
| 13,254
|
[
"Gaussian"
] |
7ced705645c8631a79491b20ce1b80428c8ba2c3ec1ff3933dd8bda19178aca7
|
__author__ = 'chris hamm'
#NetworkClient_r9D
#Created: 1/10/2015
#Added functions to parse chunk objects (Has now been decided that these will not be needed)
#Added a command logging system that records what commands has been received and sent to server/controller. These will be displayed after the client shuts down and after the socket is closed
#-Record of commands sent to Controller from the Client
#-Record of Commands sent to Server from the Client
#-Record of Commands received from the Controller
#-Record of Commands received from the Server
import socket
import platform
from Chunk import Chunk
#===================================================================
#Client constructor/class definition
#===================================================================
#CLASS NAME WILL NOT CHANGE BETWEEN VERSIONS
class NetworkClient():
#class variables
pipe = 0
port = 49200
clientSocket = 0
serverSaysKeepSearching = True
serverIssuedDoneCommand = False
serverIP = "127.0.1.1"
myOperatingSystem = None
myIPAddress = "127.0.1.1"
chunk = Chunk()
key = 0
recordOfOutboundCommandsFromClientToController = {} #dictionary that keeps a record of how many commands were sent to the controller
recordOfOutboundCommandsFromClientToServer = {} #dictionary that keeps a record of how many commands were sent to the server
recordOfInboundCommandsFromController = {} #dictionary that keeps a record of how many commands were received from the controller
recordOfInboundCommandsFromServer = {} #dictionary that keeps a record of how many commands were received from the server
#-----------------------------------------------------------------------
#constructor
#-----------------------------------------------------------------------
def __init__(self, pipeendconnectedtocontroller):
self.pipe = pipeendconnectedtocontroller
self.clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print "STATUS: client socket created successfully"
try: #Main Client Loop
print "STATUS: Entering Main Client Loop"
#......................................................................
#getOS try block
#......................................................................
try:
print "*************************************"
print " Network Client"
print "*************************************"
print "OS DETECTION:"
#Detecting Windows
if platform.system() == "Windows":
print platform.system()
self.myOperatingSystem = "Windows"
print platform.win32_ver()
#Detecting GNU/Linux
elif platform.system() == "Linux":
print platform.system()
self.myOperatingSystem = "Linux"
print platform.dist()
#Detecting OSX
elif platform.system() == "Darwin":
print platform.system()
self.myOperatingSystem = "Darwin"
print platform.mac_ver()
#Detecting an OS that is not listed
else:
print platform.system()
self.myOperatingSystem = "Other"
print platform.version()
print platform.release()
print "*************************************"
except Exception as inst:
print "========================================================================================"
print "ERROR: An exception was thrown in getOS try block"
#the exception instance
print type(inst)
#arguments stored in .args
print inst.args
#_str_ allows args to be printed directly
print inst
print "========================================================================================"
#......................................................................
#get the client IP address of this machine
#......................................................................
try: #get client IP try block
print "STATUS: Getting your Network IP address"
if(platform.system()=="Windows"):
self.myIPAddress = socket.gethostbyname(socket.gethostname())
print str(self.myIPAddress)
elif(platform.system()=="Linux"):
import fcntl
import struct
import os
def get_interface_ip(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, struct.pack('256s', ifname[:15]))[20:24])
#end of def
def get_lan_ip():
ip = socket.gethostbyname(socket.gethostname())
if ip.startswith("127.") and os.name != "nt":
interfaces = ["eth0","eth1","eth2","wlan0","wlan1","wifi0","ath0","ath1","ppp0"]
for ifname in interfaces:
try:
ip = get_interface_ip(ifname)
print "IP address was retrieved from the " + str(ifname) + " interface."
break
except IOError:
pass
return ip
#end of def
self.myIPAddress = get_lan_ip()
print self.myIPAddress
elif(platform.system()=="Darwin"):
self.myIPAddress= socket.gethostbyname(socket.gethostname())
print self.myIPAddress
else:
#NOTE MAY REMOVE THIS AND USE THE LINUX IP DETECTION METHOD FOR THIS IN THE FUTURE
print "INFO: The system has detected that you are not running Windows, OS X, or Linux."
print "INFO: Using generic IP address retrieval method"
self.myIPAddress = socket.gethostbyname(socket.gethostname())
print self.myIPAddress
except Exception as inst:
print "========================================================================================"
print "ERROR: An exception was thrown in get client IP try block"
print type(inst)
print inst.args
print inst
print "========================================================================================"
#......................................................................
#Setup the initial Command record values in the dictionaries
#......................................................................
self.recordOfOutboundCommandsFromClientToController['done'] = 0
self.recordOfOutboundCommandsFromClientToController['connected'] = 0
self.recordOfOutboundCommandsFromClientToController['doingStuff'] = 0
self.recordOfOutboundCommandsFromClientToServer['NEXT'] = 0
self.recordOfOutboundCommandsFromClientToServer['FOUNDSOLUTION'] = 0
self.recordOfOutboundCommandsFromClientToServer['CRASHED'] = 0
self.recordOfInboundCommandsFromController['serverIP'] = 0
self.recordOfInboundCommandsFromServer['DONE'] = 0
#......................................................................
#Retreive the server's IP from the controller class
#......................................................................
try: #get serverIP try block
print "STATUS: Attempting to get serverIP from controller"
self.receiveServerIPFromController()
print "STATUS: Successfully received serverIP from controller"
except Exception as inst:
print "========================================================================================"
print "ERROR: An exception was thrown in serverIP try block"
#the exception instance
print type(inst)
#arguments stored in .args
print inst.args
#_str_ allows args to be printed directly
print inst
print "========================================================================================"
#......................................................................
#Connect to the Server
#......................................................................
try:
print "STATUS: Attempting to connect to server"
self.clientSocket.connect((self.serverIP, self.port))
print "STATUS: Successfully connected to server"
except socket.timeout as msg:
print "========================================================================================"
print "ERROR: the connection has timed out. Check to see if you entered the correct IP Address."
print "Error code: " + str(msg[0]) + " Message: " + msg[1]
print "Socket timeout set to: " + self.clientSocket.gettimeout + " seconds"
print "========================================================================================"
except socket.error as msg:
print "========================================================================================"
print "ERROR: Failed to connect to server"
print "Error code: " + str(msg[0]) + " Message: " + msg[1]
raise Exception("Failed to connect to server")
#print "========================================================================================"
self.sendConnectedCommandToCOntroller()
#......................................................................
#Client primary while loop
#......................................................................
try:
while self.serverSaysKeepSearching:
self.clientSocket.settimeout(2.0)
######################## SERVER-CLIENT Communication #############################################
#///////////////////////////////////////////////////////////////////////
#checking for server commands
#///////////////////////////////////////////////////////////////////////
try: #checking for server commands try block
print "STATUS: Checking for server commands..."
theInput = self.clientSocket.recv(2048)
if(len(theInput) > 1):
if theInput == "DONE":
self.sendDoneCommandToController()
print " "
print "INFO: Server has issued the DONE command."
print " "
self.serverSaysKeepSearching = False
self.serverIssuedDoneCommand = True
break
#If the server wants to give us the next chunk, take it
#Server should be sending "NEXT" -> params -> data in seperate strings all to us
elif theInput == "NEXT":
try:
#and store it locally till controller is ready for it
self.chunk.params = self.clientSocket.recv(2048)
self.chunk.data = self.clientSocket.recv(2048)
#let controller know we're ready to give it a chunk
self.sendDoingStuffCommandToController()
#send chunk object to controller
self.pipe.send(self.chunk)
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the checking for server commands Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
else:
print "ERROR: Received Unknown Command From The Server"
print "The unknown command: '" + theInput + "'"
else:
print "INFO: Received Empty String From Server."
except socket.timeout as inst:
print "STATUS: Socket timed out. No new server command"
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the checking for server commands Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
########################## Client - Controller Communication #########################################
#///////////////////////////////////////////////////////////////////////
#check for controller commands
#///////////////////////////////////////////////////////////////////////
print "STATUS: Checking for controller commands... "
if(self.pipe.poll()):
recv = self.pipe.recv() #Gets stuck on this line ##########
print "INFO: Received a controller command"
#if controller says next, say "next" to server
if(recv == "next"):
print "INFO: Received next command from controller"
self.sendNextCommandToServer()
#if controller says "found" then send "found" and the key to the server
elif(recv == "found"):
print "INFO: Received found command from controller"
print "STATUS: Retrieving key"
if(self.pipe.poll()):
self.key = self.pipe.recv()
print "INFO: the key has been received"
self.sendFoundSolutionToServer()
else:
print "ERROR: unknown command was received"
print "The unknown command: '" + recv + "'"
else:
print "INFO: No command was received from the controller class"
#......................................................................
#end of server says keep searching while loop
#......................................................................
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Client Primary Loop Try Block"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "============================================================================================="
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Main Client Loop Try Block"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "============================================================================================="
finally:
if(self.serverIssuedDoneCommand == False):
print "ERROR: Quitting before Done Command was Issued. Sending CRASH Command to server."
self.sendCrashedCommandToServer()
print "INFO: CRASH Command was sent to the server"
#SEND MESSAGE AGAIN JUST IN CASE
self.sendCrashedCommandToServer()
print "INFO: Aux Crash Command was sent to the server"
print "Closing the socket"
self.clientSocket.close() #closes the socket safely
print "Socket has been closed"
print " "
try:
print " "
print "COMMAND RECORDS: Part 1/4"
print "Printing Outbound Commands From Client to Controller"
print "-----------------------------------------------------"
#print done
if(self.recordOfOutboundCommandsFromClientToController['done'] > 0):
print "# of done Commands sent to Controller: " + str(self.recordOfOutboundCommandsFromClientToController['done'])
else:
print "# of done Commands sent to Controller: 0"
#print connected
if(self.recordOfOutboundCommandsFromClientToController['connected'] > 0):
print "# of connected Commands sent to Controller: " + str(self.recordOfOutboundCommandsFromClientToController['connected'])
else:
print "# of connected Commands sent to Controller: 0"
#print doingStuff
if(self.recordOfOutboundCommandsFromClientToController['doingStuff'] > 0):
print "# of doingStuff Commands sent to Controller: " + str(self.recordOfOutboundCommandsFromClientToController['doingStuff'])
else:
print "# of doingStuff Commands sent to Controller: 0"
print "(END OF OUTBOUND COMMANDS FROM CLIENT TO CONTROLLER)"
print "-------------------------------------------------------"
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Finally Block, Print Outbound commands from client to controller section"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "============================================================================================="
try:
print " "
print "COMMAND RECORDS: Part 2/4"
print "Printing Outbound Commands From Client To Server"
print "--------------------------------------------------"
#print NEXT
if(self.recordOfOutboundCommandsFromClientToServer['NEXT'] > 0):
print "# of NEXT Commands sent to Server: " + str(self.recordOfOutboundCommandsFromClientToServer['NEXT'])
else:
print "# of NEXT Commands sent to Server: 0"
#print FOUNDSOLUTION
if(self.recordOfOutboundCommandsFromClientToServer['FOUNDSOLUTION'] > 0):
print "# of FOUNDSOLUTION Commands sent to Server: " + str(self.recordOfOutboundCommandsFromClientToServer['FOUNDSOLUTION'])
else:
print "# of FOUNDSOLUTION Commands sent to to Server: 0"
#print CRASHED
if(self.recordOfOutboundCommandsFromClientToServer['CRASHED'] > 0):
print "# of CRASHED Commands sent to Server: " + str(self.recordOfOutboundCommandsFromClientToServer['CRASHED'])
else:
print "# of CRASHED Commands sent to Server: 0"
print "(END OF OUTBOUND COMMANDS FROM CLIENT TO SERVER)"
print "--------------------------------------------------"
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Finally Block, Print Outbound Commands From Client to Server Section"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "============================================================================================="
try:
print " "
print "COMMAND RECORDS: Part 3/4"
print "Printing Inbound Commands From The Controller"
print "-----------------------------------------------"
#print serverIP
if(self.recordOfInboundCommandsFromController['serverIP'] > 0):
print "# of serverIP Commands received from Controller: " + str(self.recordOfInboundCommandsFromController['serverIP'])
else:
print "# of serverIP Commands received from Controller: 0"
print "(END OF INBOUND COMMANDS FROM CONTROLLER)"
print "-----------------------------------------------"
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Finally Block, Print Inbound Commands from Controller Section"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "============================================================================================="
try:
print " "
print "COMMAND RECORDS: Part 4/4"
print "Printing Inbound Commands from the Server"
print "-----------------------------------------------"
#print DONE
if(self.recordOfInboundCommandsFromServer['DONE'] > 0):
print "# of DONE Commands received from the Server: " + str(self.recordOfInboundCommandsFromServer['DONE'])
else:
print "# of DONE Commands received from the Server: 0"
print "(END OF INBOUND COMMANDS FROM THE SERVER)"
print "------------------------------------------------"
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Finally Block, Print Inbound Commands from the Server Section"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "============================================================================================="
#-----------------------------------------------------------------------
#End of constructor block
#-----------------------------------------------------------------------
#======================================================================================
#CLIENT-SERVER COMMUNICATION FUNCTIONS
#This section contains methods the client will use to communicate with the server.
#======================================================================================
#-----------------------------------------------------------------------
#Outbound communication functions
#-----------------------------------------------------------------------
#......................................................................
#NEXT
#......................................................................
def sendNextCommandToServer(self):
#sends the NEXT command to the serve
try:
self.clientSocket.send("NEXT " + self.myIPAddress)
print "INFO: The NEXT command was sent to the server"
self.recordOfOutboundCommandsFromClientToServer['NEXT'] = (self.recordOfOutboundCommandsFromClientToServer['NEXT'] + 1)
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Client-Server sendNextCommand Function Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#......................................................................
#FOUNDSOLUTION
#......................................................................
def sendFoundSolutionToServer(self):
#sends the FOUNDSOLUTION command to the server, and key
try:
self.clientSocket.send("FOUNDSOLUTION " + self.myIPAddress)
self.clientSocket.send(self.key)
print "INFO: The FOUNDSOLUTION command was sent to the server as well as the key"
self.recordOfOutboundCommandsFromClientToServer['FOUNDSOLUTION'] = (self.recordOfOutboundCommandsFromClientToServer['FOUNDSOLUTION'] + 1)
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Client-Server sendFoundSolution Function Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#......................................................................
#CRASHED
#......................................................................
def sendCrashedCommandToServer(self):
#sends the CRASHED command to the server
try:
self.clientSocket.send("CRASHED " + self.myIPAddress)
print " "
print "INFO: The IP Address of the crashed client was sent to the server."
print " "
self.recordOfOutboundCommandsFromClientToServer['CRASHED'] = (self.recordOfOutboundCommandsFromClientToServer['CRASHED'] + 1)
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Client-Server sendCrashedCommand Function Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#......................................................................
#INVALIDCOMMAND (No longer used, Throws an Error instead)
#......................................................................
'''def sendInvalidCommandToServer(self):
#sends INVALIDCOMMAND command to server
try:
self.clientSocket.send("INVALIDCOMMAND")
print "INFO: The INVALIDCOMMAND command was sent to the server"
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Client-Server sendInvalidCommand Function Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "=============================================================================================" '''
#-----------------------------------------------------------------------
#Inbound communication functions
#-----------------------------------------------------------------------
#......................................................................
#DONE
#......................................................................
def checkForDoneCommand(self, inboundString):
try:
if inboundString == "DONE":
print "INFO: Received the DONE command"
self.recordOfInboundCommandsFromServer['DONE'] = (self.recordOfInboundCommandsFromServer['DONE'] + 1)
return True
else:
return False
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Client-Server checkForDoneCommand Function Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#next part of problem
#not sure what to check for here
#......................................................................
#INVALIDCOMMAND
#......................................................................
def checkForInvalidCommand(self, inboundString):
try:
if inboundString == "INVALIDCOMMAND":
print "INFO: Received the INVALIDCOMMAND command"
return True
else:
return False
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Client-Server checkForInvalidCommand Function Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#======================================================================================
#CLIENT-CONTROLLER COMMUNICATION FUNCTIONS
#This section contains methods the client will use to communicate with the controller class
#======================================================================================
#-----------------------------------------------------------------------
#Outbound communication functions with controller
#-----------------------------------------------------------------------
#......................................................................
#done
#......................................................................
def sendDoneCommandToController(self):
try:
self.pipe.send("done")
print "INFO: The DONE command was sent to the Controller"
self.recordOfOutboundCommandsFromClientToController['done'] = (self.recordOfOutboundCommandsFromClientToController['done'] + 1)
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Client-Controller sendDoneCommand Function Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#......................................................................
#connected
#......................................................................
def sendConnectedCommandToCOntroller(self):
try:
self.pipe.send("connected")
print "INFO: The CONNECTED command was sent to the Controller"
self.recordOfOutboundCommandsFromClientToController['connected'] = (self.recordOfOutboundCommandsFromClientToController['connected'] + 1)
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Client-Controller sendConnectedCommand Function Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#......................................................................
#doingStuff
#......................................................................
def sendDoingStuffCommandToController(self):
try:
self.pipe.send("doingStuff")
print "INFO: The DOINGSTUFF command was sent to the Controller"
self.recordOfOutboundCommandsFromClientToController['doingStuff'] = (self.recordOfOutboundCommandsFromClientToController['doingStuff'] + 1)
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Client-Controller sendDoingStuffCommand Function Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#---------------------------------------------------------------------
#Inbound communications from Controller
#---------------------------------------------------------------------
#......................................................................
#serverIP
#......................................................................
def receiveServerIPFromController(self):
try:
#self.pipe.send("doingStuff")
print "INFO: Waiting to receive the serverIP from Controller (function block)"
self.serverIP = self.pipe.recv()
print "INFO: The ServerIP was received from the Controller (function block)"
self.recordOfInboundCommandsFromController['serverIP'] = (self.recordOfInboundCommandsFromController['serverIP'] + 1)
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Client-Controller receiveServerIP Function Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#==================================================================================================
#CHUNK PARSING FUNCTIONS
#==================================================================================================
#-------------------------------------------------------------------------------------------------
#Determine the method being used (bruteforce,dictionary,rainbowmaker,rainbowuser)
#-------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------
#Determine the algorithm being used (md5,sha1,sha256,sha512)
#-------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------
#Obtain the hash code
#-------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------
#Determine the Alphabet Choice (a,A,m,M,d)
#-------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------
#Determine the minCharacters (1,10,16)
#-------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------
#Determine the maxCharacters (1,10,16)
#-------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------
#Determine the Prefix (adf,234,qw3#k)
#-------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------
#Determine the File Location (0,1213,23665)
#-------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------
#Determine the Width (1,100,100000)
#-------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------
#Determine the Height (1,100,10000)
#-------------------------------------------------------------------------------------------------
|
COCS4950G7/COSC4950
|
Source/Rainbow/NetworkClient.py
|
Python
|
gpl-3.0
| 40,561
|
[
"ADF"
] |
cb538d13c8adaa4cbd0b429d1663a11dd631429e94caf2421c42042a34cf2100
|
"""
Bistability with NaP
Reference:
Wang X-J (2008)
Attractor network models
In Encyclopedia of Neuroscience, volume 1, pp. 667-679 Edited by Squire LR. Oxford: Academic Press.
@author: Guangyu Robert Yang @ 2017/4
"""
from __future__ import division
from collections import OrderedDict
import random as pyrand # Import before Brian floods the namespace
# Once your code is working, turn units off for speed
# import brian_no_units
from brian import *
# Make Brian faster
set_global_preferences(
useweave=True,
usecodegen=True,
usecodegenweave=True,
usecodegenstateupdate=True,
usenewpropagate=True,
usecodegenthreshold=True,
gcc_options=['-ffast-math', '-march=native']
)
#=========================================================================================
# Equations
#=========================================================================================
equation = '''
dV/dt = (-g_L*(V-V_L) -g_NaP*m_NaP*(V-V_Na) + I) / C_m : mV
m_NaP = 1./(1+exp(-(V+45*mV)/(5*mV))) : 1
I : amp
'''
#=========================================================================================
# Model Parameters
#=========================================================================================
modelparamsLIF = dict(
V_L = -70*mV,
Vth = 100*mV, # disabling spiking
Vreset = -55*mV,
g_L = 25*nS,
tau_m = 20*ms,
C_m = 0.5*nF,
tau_ref= 2*ms,
V_Na = 55*mV,
g_NaP = 15*nS
)
#=========================================================================================
# Model
#=========================================================================================
class Model(NetworkOperation):
def __init__(self, modelparams='LIF', dt=0.02*ms, n_neuron=1, stim=None):
#---------------------------------------------------------------------------------
# Initialize
#---------------------------------------------------------------------------------
# Create clocks
clocks = OrderedDict()
clocks['main'] = Clock(dt)
clocks['mons'] = Clock(0.1*ms)
super(Model, self).__init__(clock=clocks['main'])
#---------------------------------------------------------------------------------
# Complete the model specification
#---------------------------------------------------------------------------------
# Model parameters
if isinstance(modelparams, str):
if modelparams == 'LIF':
params = modelparamsLIF.copy()
else:
raise ValueError('Unknown model params')
elif isinstance(modelparams, dict):
params = modelparams.copy()
else:
raise ValueError('Unknown modelparams type')
#---------------------------------------------------------------------------------
# Neuron populations
#---------------------------------------------------------------------------------
net = OrderedDict() # Network objects
net['neuron'] = NeuronGroup(n_neuron,
Equations(equation, **params),
threshold=params['Vth'],
reset=params['Vreset'],
refractory=params['tau_ref'],
clock=clocks['main'],
order=2, freeze=True)
#---------------------------------------------------------------------------------
# External input
#---------------------------------------------------------------------------------
if stim is not None:
net['neuron'].I = stim
#---------------------------------------------------------------------------------
# Record spikes
#---------------------------------------------------------------------------------
mons = OrderedDict()
var_list = ['V']
mons['spike'] = SpikeMonitor(net['neuron'], record=True)
mons['pop'] = PopulationRateMonitor(net['neuron'], bin=0.1)
for var in var_list:
mons[var] = StateMonitor(net['neuron'], var, record=True, clock=clocks['mons'])
#---------------------------------------------------------------------------------
# Setup
#---------------------------------------------------------------------------------
self.params = params
self.net = net
self.mons = mons
self.clocks = clocks
self.n_neuron = n_neuron
# Add network objects and monitors to NetworkOperation's contained_objects
self.contained_objects += self.net.values() + self.mons.values()
def reinit(self, seed=123):
# Re-initialize random number generators
pyrand.seed(seed)
np.random.seed(seed)
# Reset network components, monitors, and clocks
for n in self.net.values() + self.mons.values() + self.clocks.values():
n.reinit()
# Randomly initialize membrane potentials
self.net['neuron'].V = self.params['V_L']
#/////////////////////////////////////////////////////////////////////////////////////////
if __name__ == '__main__':
dt = 0.02*ms
T = 0.5*second
n_neuron = 1
modelparams = 'LIF'
# Set up the stimulus
dt_stim = 1*ms
i_stim = int(T/dt_stim)+1
t_stim = np.arange(i_stim)/i_stim*T
stim = np.zeros(len(t_stim))
stim[(50*ms<t_stim)*(t_stim<100*ms)] = 1.0*nA
stim[(350*ms<t_stim)*(t_stim<400*ms)] =-1.0*nA
stim_ = TimedArray(stim, dt=dt_stim)
# Setup the network
model = Model(modelparams, dt, n_neuron, stim_)
network = Network(model)
model.reinit(seed=1234)
# Run the network
network.run(T, report='text')
# Plot the results
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(model.mons['V'].times/ms, model.mons['V'][0]/mV)
plt.xlabel('Time (ms)')
plt.ylabel('Voltage (mV)')
plt.subplot(2, 1, 2)
plt.plot(t_stim/ms, stim/nA)
plt.xlabel('Time (ms)')
plt.ylabel('Input (nA)')
plt.savefig('Bistability_NaP_trace.pdf')
plt.show()
|
xjwanglab/book
|
BistabilityNaP/BistabilityNaP.py
|
Python
|
mit
| 6,185
|
[
"Brian",
"NEURON"
] |
f5a3da6d9cb30d2a2aab6509ae659310ed06b869742c645a1661559d9df33c5c
|
import subprocess
import MySQLdb
import os
import shutil
import json
import uuid
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import generic_protein
from biokbase.workspace.client import Workspace as workspaceService
from GenomeAnnotationAPI.GenomeAnnotationAPIClient import GenomeAnnotationAPI
class PangenomeOrthomclBuilder:
'''
Module Name:
PangenomeOrthomclBuilder
'''
def __init__(self, scratch, workspaceURL, params, token, provenance):
self.scratch = scratch
self.workspaceURL = workspaceURL
self.params = params
self.token = token
self.provenance = provenance
self.plbin = "/kb/deployment/plbin"
self.log = ""
self.ws = workspaceService(self.workspaceURL, token=self.token)
def run(self):
self.log_line("Input parameters: " + json.dumps(self.params))
if os.path.exists(self.scratch):
shutil.rmtree(self.scratch)
os.makedirs(self.scratch)
self.startup_mysql()
self.prepare_mysql_db()
orthomcl_cfg = self.prepare_othomcl_config()
self.orthomcl_install_schema(orthomcl_cfg)
genomeset = self.load_genomeset_object()
genome_refs = self.prepare_genome_refs(genomeset)
compliant_fasta_dir = self.scratch + "/compliantFasta"
feature_info = self.load_genome_features_prepare_fasta(genome_refs, compliant_fasta_dir)
self.orthomcl_filter_fasta(compliant_fasta_dir)
protdb = "goodProteins.fasta" # created by orthomclFilterFasta
blast_output = self.run_blast(protdb)
sim_seq_file = self.orthomcl_blast_parser(compliant_fasta_dir, blast_output)
self.load_blast_output_to_db(orthomcl_cfg, sim_seq_file)
self.orthomcl_pairs(orthomcl_cfg)
self.prepare_mcl_input(orthomcl_cfg)
mcl_output_file = self.run_mcl()
groups_file = self.orthomcl_group_mcl_output(mcl_output_file)
orthologs = [];
ids_in_orths = {};
cluster_ind = self.parse_orthomcl_groups(groups_file, feature_info, orthologs,
ids_in_orths)
self.add_single_gene_families(feature_info, orthologs, ids_in_orths, cluster_ind)
return self.save_pangenome_and_report(genome_refs, orthologs)
def startup_mysql(self):
self.log_line("Starting mysql service")
self.log_process(subprocess.Popen(["service", "mysql", "start"],
cwd=self.scratch, stdout=subprocess.PIPE, stderr=subprocess.PIPE))
def prepare_mysql_db(self):
self.log_line("Preparing database")
db = MySQLdb.connect(host="localhost", user="root", passwd="12345");
cur = db.cursor()
cur.execute("DROP DATABASE IF EXISTS orthomcl")
cur.execute("CREATE DATABASE orthomcl")
cur.close()
db.close()
def prepare_othomcl_config(self):
self.log_line("Preparing orthomcl config file")
orthomcl_cfg = self.scratch + "/orthomcl.cfg"
f = open(orthomcl_cfg, "w")
f.write("dbVendor=mysql\n");
f.write("dbConnectString=dbi:mysql:orthomcl:mysql_local_infile=1:localhost:" +
"3306\n")
f.write("dbLogin=root\n")
f.write("dbPassword=12345\n")
f.write("similarSequencesTable=SimilarSequences\n")
f.write("orthologTable=Ortholog\n")
f.write("inParalogTable=InParalog\n")
f.write("coOrthologTable=CoOrtholog\n")
f.write("interTaxonMatchView=InterTaxonMatch\n")
f.write("percentMatchCutoff=50\n")
f.write("evalueExponentCutoff=-5\n")
f.write("oracleIndexTblSpc=NONE\n")
f.close()
return orthomcl_cfg
def orthomcl_install_schema(self, orthomcl_cfg):
self.log_line("Running orthomclInstallSchema")
self.log_process(subprocess.Popen(["perl", self.plbin + "/orthomclInstallSchema",
orthomcl_cfg], cwd=self.scratch, stdout=subprocess.PIPE,
stderr=subprocess.PIPE))
def load_genomeset_object(self):
genomeset = None
if "input_genomeset_ref" in self.params and self.params["input_genomeset_ref"] is not None:
self.log_line("Loading GenomeSet object from workspace")
genomeset = self.ws.get_objects([{"ref": self.params["input_genomeset_ref"]}])[0]["data"]
return genomeset
def prepare_genome_refs(self, genomeset):
self.log_line("Preparing genome refs")
genome_refs = []
if genomeset is not None:
for param_key in genomeset["elements"]:
genome_refs.append(genomeset["elements"][param_key]["ref"])
self.log_line("Genome references from genome set: " + ", ".join(genome_refs))
if "input_genome_refs" in self.params and self.params["input_genome_refs"] is not None:
for genome_ref in self.params["input_genome_refs"]:
if genome_ref is not None:
genome_refs.append(genome_ref)
self.log_line("Final list of genome references: " + ", ".join(genome_refs))
if len(genome_refs) < 2:
raise ValueError("Number of genomes should be more than 1")
if len(genome_refs) > 20:
self.log_line("WARNING! Number of genomes exceeds 20, which can make " +
"all-against-all blastp working unexpectedly long.")
return genome_refs
def load_genome_features_prepare_fasta(self, genome_refs, compliant_fasta_dir):
feature_info = {}
os.makedirs(compliant_fasta_dir)
for genome_pos, genome_ref in enumerate(genome_refs):
############################# Genome loading ##########################
self.log_line("Loading Genome object from workspace for ref [" +
genome_ref + "]")
info = self.ws.get_object_info_new({"objects": [{"ref": genome_ref}]})[0]
genome_ref = str(info[6]) + "/" + str(info[0]) + "/" + str(info[4])
gaapi = GenomeAnnotationAPI(os.environ['SDK_CALLBACK_URL'], token=self.token)
genome = gaapi.get_genome_v1({"genomes": [{"ref": genome_ref}],
"included_fields": ["scientific_name"],
"included_feature_fields": ["id", "protein_translation",
"type", "function"
]})["genomes"][0]["data"]
############################# Features + Fasta ##########################
self.log_line("Preparing fasta file for ref [" + genome_ref + "]")
genome_id = str(genome_pos + 1)
records = []
for feature_pos, feature in enumerate(genome["features"]):
feature_id = feature["id"]
sequence = feature.get("protein_translation")
if sequence:
id = str(feature_pos + 1)
record = SeqRecord(Seq(sequence), id=id, description="")
records.append(record)
func = feature.get("function")
feature_info[genome_id + "|" + id] = {"fid": feature_id, "fpos":
feature_pos, "gref": genome_ref, "func": func}
fasta_file = self.scratch + "/" + genome_id + ".fasta"
SeqIO.write(records, fasta_file, "fasta")
############################# Adjusting Fasta by Orthomcl ##########################
self.log_line("Running orthomclAdjustFasta for ref [" + genome_ref + "]")
self.log_process(subprocess.Popen(["perl", self.plbin + "/orthomclAdjustFasta",
genome_id, fasta_file, "1"], cwd=compliant_fasta_dir,
stdout=subprocess.PIPE, stderr=subprocess.PIPE))
return feature_info
def orthomcl_filter_fasta(self, compliant_fasta_dir):
self.log_line("Running orthomclFilterFasta")
self.log_process(subprocess.Popen(["perl", self.plbin + "/orthomclFilterFasta",
compliant_fasta_dir, "50", "10"], cwd=self.scratch,
stdout=subprocess.PIPE, stderr=subprocess.PIPE))
def run_blast(self, protdb):
############################# Formatdb ##########################
self.log_line("Running formatdb")
self.log_process(subprocess.Popen(["formatdb", "-i", protdb],
cwd=self.scratch, stdout=subprocess.PIPE, stderr=subprocess.PIPE))
############################# BLAST ##########################
self.log_line("Running blastp")
blastp_args = ["blastall", "-p", "blastp", "-d", protdb, "-i", protdb, "-F", "m S",
"-v", self.get_param(self.params, "num_descriptions", "100000"),
"-b", self.get_param(self.params, "num_alignments", "100000"),
"-e", self.get_param(self.params, "evalue", "1e-5"),
"-m", "8", # Alignment view is tabular (for orthomclBlastParser)
"-a", "1"] # Number of processors is always 1
self.add_param(self.params, "word_size", "-W", blastp_args)
self.add_param(self.params, "gapopen", "-G", blastp_args)
self.add_param(self.params, "gapextend", "-E", blastp_args)
self.add_param(self.params, "matrix", "-M", blastp_args)
self.add_param(self.params, "threshold", "-f", blastp_args)
self.add_param(self.params, "comp_based_stats", "-C", blastp_args)
self.add_param(self.params, "seg", "-F", blastp_args)
self.add_param(self.params, "lcase_masking", "-U", blastp_args, True)
self.add_param(self.params, "xdrop_gap_final", "-Z", blastp_args)
self.add_param(self.params, "window_size", "-A", blastp_args)
self.add_param(self.params, "use_sw_tback", "-s", blastp_args, True)
self.log_line("Blastp command line: " + " ".join(blastp_args))
blast_output = self.scratch + "/blastres.txt"
with open(blast_output, "w") as outfile:
self.log_process(subprocess.Popen(blastp_args, cwd=self.scratch,
stdout=outfile, stderr=subprocess.PIPE))
return blast_output
def orthomcl_blast_parser(self, compliant_fasta_dir, blast_output):
self.log_line("Running orthomclBlastParser")
sim_seq_file = self.scratch + "/similarSequences.txt"
with open(sim_seq_file, "w") as outfile:
self.log_process(subprocess.Popen(["perl", self.plbin + "/orthomclBlastParser",
blast_output, compliant_fasta_dir], cwd=self.scratch, stdout=outfile,
stderr=subprocess.PIPE))
return sim_seq_file
def load_blast_output_to_db(self, orthomcl_cfg, sim_seq_file):
self.log_line("Running orthomclLoadBlast")
self.log_process(subprocess.Popen(["perl", self.plbin + "/orthomclLoadBlast",
orthomcl_cfg, sim_seq_file], cwd=self.scratch, stdout=subprocess.PIPE,
stderr=subprocess.PIPE))
def orthomcl_pairs(self, orthomcl_cfg):
self.log_line("Running orthomclPairs")
orthomcl_pairs_file = self.scratch + "/orthomcl_pairs.log"
self.log_process(subprocess.Popen(["perl", self.plbin + "/orthomclPairs",
orthomcl_cfg, orthomcl_pairs_file, "cleanup=no"], cwd=self.scratch,
stdout=subprocess.PIPE, stderr=subprocess.PIPE))
return orthomcl_pairs_file
def prepare_mcl_input(self, orthomcl_cfg):
self.log_line("Running orthomclDumpPairsFiles")
self.log_process(subprocess.Popen(["perl", self.plbin + "/orthomclDumpPairsFiles",
orthomcl_cfg], cwd=self.scratch, stdout=subprocess.PIPE,
stderr=subprocess.PIPE))
def run_mcl(self):
self.log_line("Running mcl")
mcl_output_file = self.scratch + "/mclOutput"
mcl_args = ["mcl", "mclInput", "--abc",
"-I", self.get_param(self.params, "mcl_main_i", "1.5"),
"-o", mcl_output_file]
self.add_param(self.params, "mcl_p", "-P", mcl_args)
self.add_param(self.params, "mcl_s", "-S", mcl_args)
self.add_param(self.params, "mcl_r", "-R", mcl_args)
self.add_param(self.params, "mcl_pct", "-pct", mcl_args)
self.add_param(self.params, "mcl_warn_p", "-warn-pct", mcl_args)
self.add_param(self.params, "mcl_warn_factor", "-warn-factor", mcl_args)
self.add_param(self.params, "mcl_init_l", "-l", mcl_args)
self.add_param(self.params, "mcl_main_l", "-L", mcl_args)
self.add_param(self.params, "mcl_init_i", "-i", mcl_args)
self.log_line("Mcl command line: " + " ".join(mcl_args))
self.log_process(subprocess.Popen(mcl_args, cwd=self.scratch,
stdout=subprocess.PIPE, stderr=subprocess.PIPE))
return mcl_output_file
def orthomcl_group_mcl_output(self, mcl_output_file):
self.log_line("Running orthomclMclToGroups")
groups_file = self.scratch + "/groups.txt"
with open(groups_file, "w") as outfile, open(mcl_output_file, "r") as infile:
self.log_process(subprocess.Popen(["perl", self.plbin + "/orthomclMclToGroups",
"grp", "1000"], cwd=self.scratch, stdin=infile, stdout=outfile,
stderr=subprocess.PIPE))
return groups_file
def parse_orthomcl_groups(self, groups_file, feature_info, orthologs, ids_in_orths):
self.log_line("Parsing groups file")
cluster_ind = 0
with open(groups_file, "r") as infile:
for line_pos, line in enumerate(infile.readlines()):
cluster_ind = line_pos + 1
cluster_id = "cluster" + str(cluster_ind)
function = ""
items = []
words = line.rstrip().split(" ")
for id in words[1:]:
feature = feature_info[id]
items.append([feature["fid"], feature["fpos"], feature["gref"]])
func = feature["func"]
if func is not None and len(func) > len(function):
function = func
ids_in_orths[id] = True
orthologs.append({"function": function, "id": cluster_id,
"orthologs": items})
return cluster_ind
def add_single_gene_families(self, feature_info, orthologs, ids_in_orths, cluster_ind):
self.log_line("Adding single-gene families (they're not reported by OrthoMCL)")
singles = 0
for id in feature_info:
if id in ids_in_orths:
continue
cluster_ind += 1
singles += 1
cluster_id = "cluster" + str(cluster_ind)
feature = feature_info[id]
function = feature["func"]
items = [[feature["fid"], feature["fpos"], feature["gref"]]]
orthologs.append({"function": function, "id": cluster_id,
"orthologs": items})
self.log_line(str(singles) + " single-gene families were added")
def save_pangenome_and_report(self, genome_refs, orthologs):
self.log_line("Saving pangenome object")
output_obj_name = self.params["output_pangenome_id"]
pangenome = {"genome_refs": genome_refs, "id": output_obj_name, "name":
output_obj_name, "orthologs": orthologs, "type": "orthomcl"}
input_ws_objects = []
if "input_genomeset_ref" in self.params and self.params["input_genomeset_ref"] is not None:
input_ws_objects.append(self.params["input_genomeset_ref"])
if "input_genome_refs" in self.params and self.params["input_genome_refs"] is not None:
for genome_ref in self.params["input_genome_refs"]:
if genome_ref is not None:
input_ws_objects.append(genome_ref)
self.provenance[0]["input_ws_objects"] = input_ws_objects
self.provenance[0]["description"] = "Orthologous groups construction using OrthoMCL tool"
info = self.ws.save_objects({"workspace": self.params["output_workspace"], "objects":
[{"type": "KBaseGenomes.Pangenome", "name": output_obj_name,
"data": pangenome, "provenance": self.provenance}]})[0]
pangenome_ref = str(info[6]) + "/" + str(info[0]) + "/" + str(info[4])
report = "Input genomes: " + str(len(genome_refs)) + "\n" + \
"Output orthologs: " + str(len(orthologs)) + "\n"
report_obj = {"objects_created": [{"ref": pangenome_ref,
"description": "Pangenome object"}], "text_message": report}
report_name = "orthomcl_report_" + str(hex(uuid.getnode()))
report_info = self.ws.save_objects({"workspace": self.params["output_workspace"],
"objects": [{"type": "KBaseReport.Report", "data": report_obj,
"name": report_name, "meta": {}, "hidden": 1, "provenance": self.provenance}]})[0]
return {"pangenome_ref": pangenome_ref,
"report_name": report_name, "report_ref": str(report_info[6]) + "/" +
str(report_info[0]) + "/" + str(report_info[4])}
def log_line(self, line):
self.log += line + "\n"
print(line)
def log_lines(self, lines):
for line in lines:
if len(line) > 0:
self.log_line("|" + line)
def log_process(self, process):
process_out = process.communicate()
output = process_out[0]
if output is not None and len(output) > 0:
self.log_line("Output:")
self.log_lines(output.split("\n"))
errors = process_out[1]
if errors is not None and len(errors) > 0:
self.log_line("Errors:")
self.log_lines(errors.split("\n"))
def get_param(self, params, param_name, def_value):
ret = None
if param_name in params and params[param_name] is not None and \
len(str(params[param_name])) > 0:
ret = str(params[param_name])
else:
ret = str(def_value)
return ret
def add_param(self, params, param_name, cli_arg, target_args, bool=False):
if param_name in params and params[param_name] is not None and \
len(str(params[param_name])) > 0:
value = params[param_name]
if bool:
if value == 1:
target_args.append(cli_arg)
else:
target_args.append(cli_arg)
target_args.append(str(value))
|
rsutormin/PangenomeOrthomcl
|
lib/PangenomeOrthomcl/PangenomeOrthomclBuilder.py
|
Python
|
mit
| 18,725
|
[
"BLAST"
] |
24fdaab50b4a577c22ec715cb2fdfe3de52a045a2e827d3cfeabaaa35c6802fb
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""A library for parsing and interpreting results from computational chemistry packages.
The goals of cclib are centered around the reuse of data obtained from various
computational chemistry programs and typically contained in output files. Specifically,
cclib extracts (parses) data from the output files generated by multiple programs
and provides a consistent interface to access them.
Currently supported programs:
ADF, Firefly, GAMESS(US), GAMESS-UK, Gaussian,
Jaguar, Molpro, MOPAC, NWChem, ORCA, Psi, Q-Chem
Another aim is to facilitate the implementation of algorithms that are not specific
to any particular computational chemistry package and to maximise interoperability
with other open source computational chemistry and cheminformatic software libraries.
To this end, cclib provides a number of bridges to help transfer data to other libraries
as well as example methods that take parsed data as input.
"""
__version__ = "1.5"
from . import parser
from . import progress
from . import method
from . import bridge
from . import io
# The test module can be imported if it was installed with cclib.
try:
from . import test
except ImportError:
pass
|
Schamnad/cclib
|
src/cclib/__init__.py
|
Python
|
bsd-3-clause
| 1,379
|
[
"ADF",
"Firefly",
"GAMESS",
"Gaussian",
"Jaguar",
"MOPAC",
"Molpro",
"NWChem",
"ORCA",
"Q-Chem",
"cclib"
] |
ca72773fe6abd10ea9455a4e2364a8867320fd0af0b227315d486adf88f396b7
|
import wx
import wx.grid
import wx.lib.scrolledpanel
import os
import os.path
import time
import platform
import multiprocessing
import webbrowser
import datetime
from threading import Thread
from tools import *
class KICPanel(wx.lib.scrolledpanel.ScrolledPanel):
def __init__(self, parent, W, H):
#if (platform.system() == "Windows"):
wx.lib.scrolledpanel.ScrolledPanel.__init__(self, parent, id=-1, pos=(10, 60), size=(340, H-330), name="ProtFixbb")
winh = H-330
#else:
#wx.lib.scrolledpanel.ScrolledPanel.__init__(self, parent, id=-1, pos=(10, 60), size=(340, H-330), name="ProtMinimization")
#winh = H-290
self.SetBackgroundColour("#333333")
self.parent = parent
if (platform.system() == "Windows"):
self.lblProt = wx.StaticText(self, -1, "Kinematic Closure", (25, 15), (270, 25), wx.ALIGN_CENTRE)
self.lblProt.SetFont(wx.Font(12, wx.DEFAULT, wx.ITALIC, wx.BOLD))
elif (platform.system() == "Darwin"):
self.lblProt = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/lblKIC.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(25, 15), size=(270, 25))
else:
self.lblProt = wx.StaticText(self, -1, "Kinematic Closure", (70, 15), style=wx.ALIGN_CENTRE)
self.lblProt.SetFont(wx.Font(12, wx.DEFAULT, wx.ITALIC, wx.BOLD))
resizeTextControlForUNIX(self.lblProt, 0, self.GetSize()[0]-20)
self.lblProt.SetForegroundColour("#FFFFFF")
if (platform.system() == "Darwin"):
self.HelpBtn = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/HelpBtn.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(295, 10), size=(25, 25))
else:
self.HelpBtn = wx.Button(self, id=-1, label="?", pos=(295, 10), size=(25, 25))
self.HelpBtn.SetForegroundColour("#0000FF")
self.HelpBtn.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.HelpBtn.Bind(wx.EVT_BUTTON, self.showHelp)
self.HelpBtn.SetToolTipString("Display the help file for this window")
if (platform.system() == "Windows"):
self.lblInst = wx.StaticText(self, -1, "Remodel existing loops or generate loops de novo", (0, 45), (320, 25), wx.ALIGN_CENTRE)
self.lblInst.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
elif (platform.system() == "Darwin"):
self.lblInst = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/lblInstKIC.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(0, 45), size=(320, 25))
else:
self.lblInst = wx.StaticText(self, -1, "Remodel existing loops or generate loops de novo", (5, 45), style=wx.ALIGN_CENTRE)
self.lblInst.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
resizeTextControlForUNIX(self.lblInst, 0, self.GetSize()[0]-20)
self.lblInst.SetForegroundColour("#FFFFFF")
if (platform.system() == "Windows"):
self.lblModel = wx.StaticText(self, -1, "Model", (10, 90), (140, 20), wx.ALIGN_CENTRE)
self.lblModel.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
elif (platform.system() == "Darwin"):
self.lblModel = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/lblModelKIC.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(10, 90), size=(140, 20))
else:
self.lblModel = wx.StaticText(self, -1, "Model", (10, 90), style=wx.ALIGN_CENTRE)
self.lblModel.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
resizeTextControlForUNIX(self.lblModel, 10, 140)
self.lblModel.SetForegroundColour("#FFFFFF")
self.modelMenu = wx.ComboBox(self, pos=(10, 110), size=(140, 25), choices=[], style=wx.CB_READONLY)
self.modelMenu.Bind(wx.EVT_COMBOBOX, self.modelMenuSelect)
self.modelMenu.SetToolTipString("Model on which to perform loop modeling")
self.selectedModel = ""
if (platform.system() == "Windows"):
self.lblPivot = wx.StaticText(self, -1, "Pivot Residue", (170, 90), (140, 20), wx.ALIGN_CENTRE)
self.lblPivot.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
elif (platform.system() == "Darwin"):
self.lblPivot = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/lblPivot.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(170, 90), size=(140, 20))
else:
self.lblPivot = wx.StaticText(self, -1, "Pivot Residue", (170, 90), style=wx.ALIGN_CENTRE)
self.lblPivot.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
resizeTextControlForUNIX(self.lblPivot, 170, 140)
self.lblPivot.SetForegroundColour("#FFFFFF")
self.menuPivot = wx.ComboBox(self, pos=(170, 110), size=(140, 25), choices=[], style=wx.CB_READONLY)
self.menuPivot.Bind(wx.EVT_COMBOBOX, self.viewMenuSelect)
self.menuPivot.Disable()
self.menuPivot.SetToolTipString("Select the loop residue that will serve as the KIC pivot point")
if (platform.system() == "Windows"):
self.lblBegin = wx.StaticText(self, -1, "Loop Begin", (10, 140), (120, 20), wx.ALIGN_CENTRE)
self.lblBegin.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
elif (platform.system() == "Darwin"):
self.lblBegin = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/lblBegin.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(10, 140), size=(140, 20))
else:
self.lblBegin = wx.StaticText(self, -1, "Loop Begin", (10, 140), style=wx.ALIGN_CENTRE)
self.lblBegin.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
resizeTextControlForUNIX(self.lblBegin, 10, 140)
self.lblBegin.SetForegroundColour("#FFFFFF")
self.beginMenu = wx.ComboBox(self, pos=(10, 160), size=(140, 25), choices=[], style=wx.CB_READONLY)
self.beginMenu.Bind(wx.EVT_COMBOBOX, self.beginMenuSelect)
self.beginMenu.Bind(wx.EVT_RIGHT_DOWN, self.rightClick)
self.beginMenu.SetToolTipString("Loop N-terminus")
self.loopBegin = -1
if (platform.system() == "Windows"):
self.lblEnd = wx.StaticText(self, -1, "Loop End", (170, 140), (140, 20), wx.ALIGN_CENTRE)
self.lblEnd.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
elif (platform.system() == "Darwin"):
self.lblEnd = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/lblEnd.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(170, 140), size=(140, 20))
else:
self.lblEnd = wx.StaticText(self, -1, "Loop End", (170, 140), style=wx.ALIGN_CENTRE)
self.lblEnd.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
resizeTextControlForUNIX(self.lblEnd, 170, 140)
self.lblEnd.SetForegroundColour("#FFFFFF")
self.endMenu = wx.ComboBox(self, pos=(170, 160), size=(140, 25), choices=[], style=wx.CB_READONLY)
self.endMenu.Bind(wx.EVT_COMBOBOX, self.endMenuSelect)
self.endMenu.Bind(wx.EVT_RIGHT_DOWN, self.rightClick)
self.endMenu.SetToolTipString("Loop C-terminus")
self.loopEnd = -1
if (platform.system() == "Windows"):
self.lblLoopType = wx.StaticText(self, -1, "Remodel Type", (10, 190), (140, 20), wx.ALIGN_CENTRE)
self.lblLoopType.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
elif (platform.system() == "Darwin"):
self.lblLoopType = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/lblRemodelType.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(10, 190), size=(140, 20))
else:
self.lblLoopType = wx.StaticText(self, -1, "Remodel Type", (10, 190), style=wx.ALIGN_CENTRE)
self.lblLoopType.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
resizeTextControlForUNIX(self.lblLoopType, 10, 140)
self.lblLoopType.SetForegroundColour("#FFFFFF")
if (platform.system() == "Darwin"):
self.btnLoopType = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnLoopType_Refine.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(10, 210), size=(140, 25))
else:
self.btnLoopType = wx.Button(self, id=-1, label="Refine", pos=(10, 210), size=(140, 25))
self.btnLoopType.SetForegroundColour("#000000")
self.btnLoopType.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.btnLoopType.Bind(wx.EVT_BUTTON, self.changeLoopType)
self.loopType = "Refine"
self.btnLoopType.SetToolTipString("Refine a pre-existing loop using the high resolution KIC remodeler only")
if (platform.system() == "Windows"):
self.lblSequence = wx.StaticText(self, -1, "Loop Sequence", (170, 190), (140, 20), wx.ALIGN_CENTRE)
self.lblSequence.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
elif (platform.system() == "Darwin"):
self.lblSequence = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/lblSequence.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(170, 190), size=(140, 20))
else:
self.lblSequence = wx.StaticText(self, -1, "Loop Sequence", (170, 190), style=wx.ALIGN_CENTRE)
self.lblSequence.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
resizeTextControlForUNIX(self.lblSequence, 170, 140)
self.lblSequence.SetForegroundColour("#FFFFFF")
self.txtSequence = wx.TextCtrl(self, -1, pos=(170, 210), size=(140, 25))
self.txtSequence.SetValue("")
self.txtSequence.SetToolTipString("Primary sequence for a de novo loop")
self.txtSequence.Disable()
if (platform.system() == "Darwin"):
self.btnAdd = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnAdd.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(10, 240), size=(90, 25))
else:
self.btnAdd = wx.Button(self, id=-1, label="Add", pos=(10, 240), size=(90, 25))
self.btnAdd.SetForegroundColour("#000000")
self.btnAdd.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.btnAdd.Bind(wx.EVT_BUTTON, self.add)
self.btnAdd.SetToolTipString("Add the selected residues to the list of loops")
if (platform.system() == "Darwin"):
self.btnRemove = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnRemove.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(115, 240), size=(90, 25))
else:
self.btnRemove = wx.Button(self, id=-1, label="Remove", pos=(115, 240), size=(90, 25))
self.btnRemove.SetForegroundColour("#000000")
self.btnRemove.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.btnRemove.Bind(wx.EVT_BUTTON, self.remove)
self.btnRemove.SetToolTipString("Remove the selected residues from the list of loops")
if (platform.system() == "Darwin"):
self.btnClear = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnClear.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(220, 240), size=(90, 25))
else:
self.btnClear = wx.Button(self, id=-1, label="Clear", pos=(220, 240), size=(90, 25))
self.btnClear.SetForegroundColour("#000000")
self.btnClear.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.btnClear.Bind(wx.EVT_BUTTON, self.clear)
self.btnClear.SetToolTipString("Clear the list of loops")
self.grdLoops = wx.grid.Grid(self)
self.grdLoops.CreateGrid(0, 4)
self.grdLoops.SetSize((320, 200))
self.grdLoops.SetPosition((0, 270))
self.grdLoops.SetLabelFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.grdLoops.DisableDragColSize()
self.grdLoops.DisableDragRowSize()
self.grdLoops.SetColLabelValue(0, "Sequence")
self.grdLoops.SetColLabelValue(1, "Start")
self.grdLoops.SetColLabelValue(2, "Pivot")
self.grdLoops.SetColLabelValue(3, "End")
self.grdLoops.SetRowLabelSize(80)
self.grdLoops.SetColSize(0, 90)
self.grdLoops.SetColSize(1, 50)
self.grdLoops.SetColSize(2, 50)
self.grdLoops.SetColSize(3, 50)
self.grdLoops.Bind(wx.grid.EVT_GRID_CELL_LEFT_CLICK, self.gridClick)
self.loops = []
self.selectedr = -1
ypos = self.grdLoops.GetPosition()[1] + self.grdLoops.GetSize()[1] + 10
if (platform.system() == "Windows"):
self.lblAdvanced = wx.StaticText(self, -1, "Advanced Options", (0, ypos), (320, 20), wx.ALIGN_CENTRE)
self.lblAdvanced.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
elif (platform.system() == "Darwin"):
self.lblAdvanced = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/lblAdvanced.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(0, ypos), size=(320, 20))
else:
self.lblAdvanced = wx.StaticText(self, -1, "Advanced Options", (0, ypos), style=wx.ALIGN_CENTRE)
self.lblAdvanced.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
resizeTextControlForUNIX(self.lblAdvanced, 0, 320)
self.lblAdvanced.SetForegroundColour("#FFFFFF")
if (platform.system() == "Windows"):
self.lblPerturb = wx.StaticText(self, -1, "KIC Type:", (10, ypos+33), (100, 20), wx.ALIGN_CENTRE)
self.lblPerturb.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
elif (platform.system() == "Darwin"):
self.lblPerturb = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/lblPerturb.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(10, ypos+33), size=(100, 20))
else:
self.lblPerturb = wx.StaticText(self, -1, "KIC Type:", (10, ypos+33), style=wx.ALIGN_CENTRE)
self.lblPerturb.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
resizeTextControlForUNIX(self.lblPerturb, 10, 100)
self.lblPerturb.SetForegroundColour("#FFFFFF")
if (platform.system() == "Darwin"):
self.btnPerturb = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnPerturb_Refine.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(120, ypos+30), size=(200, 25))
else:
self.btnPerturb = wx.Button(self, id=-1, label="Perturb+Refine", pos=(120, ypos+30), size=(200, 25))
self.btnPerturb.SetForegroundColour("#000000")
self.btnPerturb.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.btnPerturb.Bind(wx.EVT_BUTTON, self.changePerturbType)
self.perturbType = "Perturb+Refine"
self.btnPerturb.SetToolTipString("Perform KIC coarse perturbation followed by high resolution refinement")
self.btnPerturb.Disable()
if (platform.system() == "Windows"):
self.lblNStruct = wx.StaticText(self, -1, "NStruct:", (20, ypos+63), (100, 20), wx.ALIGN_CENTRE)
self.lblNStruct.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
elif (platform.system() == "Darwin"):
self.lblNStruct = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/lblNStruct.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(20, ypos+63), size=(100, 20))
else:
self.lblNStruct = wx.StaticText(self, -1, "NStruct:", (20, ypos+63), style=wx.ALIGN_CENTRE)
self.lblNStruct.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
resizeTextControlForUNIX(self.lblNStruct, 20, 100)
self.lblNStruct.SetForegroundColour("#FFFFFF")
self.txtNStruct = wx.TextCtrl(self, -1, pos=(155, ypos+60), size=(140, 25))
self.txtNStruct.SetValue("1")
self.txtNStruct.SetToolTipString("Number of models to generate (each KIC simulation typically takes 5-10 minutes)")
self.txtNStruct.Disable()
#if (platform.system() == "Darwin"):
# self.btnOutputDir = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnOutputDir.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(20, 350), size=(100, 25))
#else:
# self.btnOutputDir = wx.Button(self, id=-1, label="Output Dir", pos=(20, 350), size=(100, 25))
# self.btnOutputDir.SetForegroundColour("#000000")
# self.btnOutputDir.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
#self.btnOutputDir.Bind(wx.EVT_BUTTON, self.setOutputDir)
#self.btnOutputDir.SetToolTipString("Set the directory to which outputted structures will be written, if NStruct > 1")
#self.btnOutputDir.Disable()
#if (platform.system() == "Windows"):
# self.lblDir = wx.StaticText(self, -1, "", (130, 355), (190, 20), wx.ALIGN_CENTRE)
# self.lblDir.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
#else:
# self.lblDir = wx.StaticText(self, -1, "", (130, 355), style=wx.ALIGN_CENTRE)
# self.lblDir.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
# resizeTextControlForUNIX(self.lblDir, 130, 190)
#self.lblDir.SetForegroundColour("#FFFFFF")
#self.outputdir = ""
if (platform.system() == "Windows"):
self.lblLine = wx.StaticText(self, -1, "==========================", (0, ypos+90), (320, 20), wx.ALIGN_CENTRE)
self.lblLine.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
elif (platform.system() == "Darwin"):
self.lblLine = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/lblLine.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(0, ypos+90), size=(320, 20))
else:
self.lblLine = wx.StaticText(self, -1, "==========================", (0, ypos+90), style=wx.ALIGN_CENTRE)
self.lblLine.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
resizeTextControlForUNIX(self.lblLine, 20, 120)
self.lblLine.SetForegroundColour("#FFFFFF")
if (platform.system() == "Windows"):
self.lblPostKIC = wx.StaticText(self, -1, "Post-Loop Modeling", (0, ypos+115), (320, 20), wx.ALIGN_CENTRE)
self.lblPostKIC.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.BOLD))
elif (platform.system() == "Darwin"):
self.lblPostKIC = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/lblPostKIC.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(0, ypos+115), size=(320, 20))
else:
self.lblPostKIC = wx.StaticText(self, -1, "Post-Loop Modeling", (0, ypos+115), style=wx.ALIGN_CENTRE)
self.lblPostKIC.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.BOLD))
resizeTextControlForUNIX(self.lblPostKIC, 0, self.GetSize()[0]-20)
self.lblPostKIC.SetForegroundColour("#FFFFFF")
if (platform.system() == "Darwin"):
self.scoretypeMenu = wx.ComboBox(self, pos=(7, ypos+145), size=(305, 25), choices=[], style=wx.CB_READONLY)
else:
self.scoretypeMenu = wx.ComboBox(self, pos=(7, ypos+145), size=(305, 25), choices=[], style=wx.CB_READONLY | wx.CB_SORT)
self.scoretypeMenu.Bind(wx.EVT_COMBOBOX, self.scoretypeMenuSelect)
self.scoretypeMenu.Disable() # Is only enabled after a design and before accepting it
self.scoretypeMenu.SetToolTipString("Scoretype by which PyMOL residues will be colored")
if (platform.system() == "Windows"):
self.lblModelView = wx.StaticText(self, -1, "View Structure:", (20, ypos+183), (120, 20), wx.ALIGN_CENTRE)
self.lblModelView.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
elif (platform.system() == "Darwin"):
self.lblModelView = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/lblModelView.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(20, ypos+183), size=(120, 20))
else:
self.lblModelView = wx.StaticText(self, -1, "View Structure:", (20, ypos+183), style=wx.ALIGN_CENTRE)
self.lblModelView.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
resizeTextControlForUNIX(self.lblModelView, 20, 120)
self.lblModelView.SetForegroundColour("#FFFFFF")
self.viewMenu = wx.ComboBox(self, pos=(175, ypos+180), size=(120, 25), choices=[], style=wx.CB_READONLY)
self.viewMenu.Bind(wx.EVT_COMBOBOX, self.viewMenuSelect)
self.viewMenu.Disable()
self.viewMenu.SetToolTipString("Select loop positions to view in PyMOL")
if (platform.system() == "Darwin"):
self.btnServerToggle = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnServer_Off.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(40, ypos+215), size=(100, 25))
else:
self.btnServerToggle = wx.Button(self, id=-1, label="Server Off", pos=(40, ypos+215), size=(100, 25))
self.btnServerToggle.SetForegroundColour("#000000")
self.btnServerToggle.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.BOLD))
self.btnServerToggle.Bind(wx.EVT_BUTTON, self.serverToggle)
self.btnServerToggle.SetToolTipString("Perform KIC simulations locally")
self.serverOn = False
if (platform.system() == "Darwin"):
self.btnKIC = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnKIC.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(180, ypos+215), size=(100, 25))
else:
self.btnKIC = wx.Button(self, id=-1, label="KIC!", pos=(180, ypos+215), size=(100, 25))
self.btnKIC.SetForegroundColour("#000000")
self.btnKIC.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.BOLD))
self.btnKIC.Bind(wx.EVT_BUTTON, self.KICClick)
self.btnKIC.SetToolTipString("Begin KIC simulation with selected parameters")
self.buttonState = "KIC!"
self.scrollh = self.btnKIC.GetPosition()[1] + self.btnKIC.GetSize()[1] + 5
self.SetScrollbars(1, 1, 320, self.scrollh)
self.winscrollpos = 0
self.Bind(wx.EVT_SCROLLWIN, self.scrolled)
def showHelp(self, event):
# Open the help page
if (platform.system() == "Darwin"):
try:
browser = webbrowser.get("Safari")
except:
print "Could not load Safari! The help files are located at " + self.scriptdir + "/help"
return
browser.open(self.parent.parent.scriptdir + "/help/kic.html")
else:
webbrowser.open(self.parent.parent.scriptdir + "/help/kic.html")
def setSeqWin(self, seqWin):
self.seqWin = seqWin
# So the sequence window knows about what model "designed_view" really is
self.seqWin.setProtocolPanel(self)
def setPyMOL(self, pymol):
self.pymol = pymol
self.cmd = pymol.cmd
self.stored = pymol.stored
def setSelectWin(self, selectWin):
self.selectWin = selectWin
self.selectWin.setProtPanel(self)
def scrolled(self, event):
self.winscrollpos = self.GetScrollPos(wx.VERTICAL)
event.Skip()
def activate(self):
# Get the list of all the models in the sequence viewer
modelList = []
for r in range(0, self.seqWin.SeqViewer.NumberRows):
model = self.seqWin.getModelForChain(r)
if (not(model in modelList)):
modelList.append(model)
# Update the combobox list if the list has changed
if (modelList != self.modelMenu.GetItems()):
self.modelMenu.Clear()
self.modelMenu.AppendItems(modelList)
self.selectedModel = ""
if (platform.system() == "Windows"):
self.modelMenu.SetSelection(-1)
else:
self.modelMenu.SetSelection(0)
self.modelMenuSelect(None)
# Did we lose the model for the data in the loops grid? If so, clear the loops
if (len(self.loops) > 0 and not(self.loops[0][2] in modelList)):
self.loops = []
self.updateLoops()
# If the user was deleting things in the sequence window, the specified begin and end positions might
# not be valid anymore so we should erase them
poseindx = self.seqWin.getPoseIndexForModel(self.selectedModel)
if (poseindx >= 0):
naa = 0
for ch in self.seqWin.poses[poseindx][0]:
for residue in ch:
if (residue.resname in "ALA CYS ASP GLU PHE GLY HIS ILE LYS LEU MET ASN PRO GLN ARG SER THR VAL TRP TYR "):
naa = naa + 1
if (len(self.beginMenu.GetItems()) != naa-1):
self.selectedModel = ""
self.modelMenuSelect(None)
self.Scroll(0, self.winscrollpos)
def rightClick(self, event):
# Attempt to fill in loop values from a selection to bypass having to use the ComboBox
try:
topLefts = self.seqWin.SeqViewer.GetSelectionBlockTopLeft()
bottomRights = self.seqWin.SeqViewer.GetSelectionBlockBottomRight()
row = topLefts[0][0]
begin = 9999999
end = 0
for i in range(0, len(topLefts)):
for r in range(topLefts[i][0], bottomRights[i][0]+1):
if (r != row):
continue
for c in range(topLefts[i][1], bottomRights[i][1]+1):
if (c > end and self.seqWin.sequences[row][c] != "-"):
end = c
if (c < begin and self.seqWin.sequences[row][c] != "-"):
begin = c
if (begin == end):
# Have to get at least two residues
return
model = self.seqWin.IDs[row]
chain = model[len(model)-1]
model = model[:len(model)-2]
beginres = chain + ":" + self.seqWin.sequences[row][begin] + str(self.seqWin.indxToSeqPos[row][begin][1])
endres = chain + ":" + self.seqWin.sequences[row][end] + str(self.seqWin.indxToSeqPos[row][end][1])
mindx = self.modelMenu.GetItems().index(model)
bindx = self.beginMenu.GetItems().index(beginres)
eindx = self.endMenu.GetItems().index(endres)
self.modelMenu.SetSelection(mindx)
self.beginMenu.SetSelection(bindx)
self.endMenu.SetSelection(eindx)
chain = self.beginMenu.GetStringSelection()[0]
seqpos = self.beginMenu.GetStringSelection()[3:].strip()
rindx = self.seqWin.getRosettaIndex(self.selectedModel, chain, seqpos)
self.loopBegin = rindx
chain = self.endMenu.GetStringSelection()[0]
seqpos = self.endMenu.GetStringSelection()[3:].strip()
rindx = self.seqWin.getRosettaIndex(self.selectedModel, chain, seqpos)
self.loopEnd = rindx
self.focusView(self.endMenu.GetStringSelection(), self.selectedModel)
self.populatePivots()
except:
pass
def gridClick(self, event):
# Set the selected residue's row to blue so it is easy to see what the selection is
self.selectedr = event.GetRow()
if (self.selectedr >= self.grdLoops.NumberRows):
self.selectedr = -1
for r in range(0, self.grdLoops.NumberRows):
if (r == self.selectedr):
for c in range(0, self.grdLoops.NumberCols):
self.grdLoops.SetCellBackgroundColour(r, c, "light blue")
else:
for c in range(0, self.grdLoops.NumberCols):
self.grdLoops.SetCellBackgroundColour(r, c, "white")
self.grdLoops.Refresh()
self.loopBegin = self.loops[self.selectedr][3]
self.loopEnd = self.loops[self.selectedr][5]
self.populatePivots()
# Load this loop's data into the controls and focus it
self.modelMenu.SetSelection(self.modelMenu.GetItems().index(self.loops[self.selectedr][2]))
chainID, resindx = self.seqWin.getResidueInfo(self.loops[self.selectedr][2], self.loops[self.selectedr][3]+1)
if (len(chainID.strip()) == 0):
chainID = "_"
self.beginMenu.SetSelection(self.beginMenu.GetItems().index(chainID + ":" + self.seqWin.getResidueTypeFromRosettaIndx(self.loops[self.selectedr][2], self.loops[self.selectedr][3]+1) + str(resindx)))
chainID, resindx = self.seqWin.getResidueInfo(self.loops[self.selectedr][2], self.loops[self.selectedr][4]+1)
if (len(chainID.strip()) == 0):
chainID = "_"
self.menuPivot.SetSelection(self.menuPivot.GetItems().index(chainID + ":" + self.seqWin.getResidueTypeFromRosettaIndx(self.loops[self.selectedr][2], self.loops[self.selectedr][4]+1) + str(resindx)))
chainID, resindx = self.seqWin.getResidueInfo(self.loops[self.selectedr][2], self.loops[self.selectedr][5])
if (len(chainID.strip()) == 0):
chainID = "_"
self.endMenu.SetSelection(self.endMenu.GetItems().index(chainID + ":" + self.seqWin.getResidueTypeFromRosettaIndx(self.loops[self.selectedr][2], self.loops[self.selectedr][5]) + str(resindx)))
self.focusView(self.endMenu.GetStringSelection(), self.loops[self.selectedr][2])
event.Skip()
def modelMenuSelect(self, event):
# Update the list of positions with the new model
if (self.selectedModel == self.modelMenu.GetStringSelection()):
return
self.selectedModel = self.modelMenu.GetStringSelection()
logInfo("Selected model " + self.selectedModel)
# Get the location of the pose
poseindx = self.seqWin.getPoseIndexForModel(self.selectedModel)
# Read the positions
pose = self.seqWin.poses[poseindx]
positions = []
for ch in pose[0]:
for residue in ch:
if ("ALA CYS ASP GLU PHE GLY HIS ILE LYS LEU MET ASN PRO GLN ARG SER THR VAL TRP TYR ".find(residue.resname) >= 0):
chain = ch.id
if (len(chain.strip()) == 0):
chain = "_"
label = chain + ":" + AA3to1(residue.resname) + str(residue.id[1])
positions.append(label)
# Update the beginning and ending positions menus with the available sequence positions
self.beginMenu.Clear()
self.beginMenu.AppendItems(positions[0:len(positions)-1])
if (platform.system() == "Windows"):
self.beginMenu.SetSelection(-1)
self.loopBegin = -1
else:
self.beginMenu.SetSelection(0)
self.loopBegin = 1
self.endMenu.Clear()
self.endMenu.AppendItems(positions[1:])
if (platform.system() == "Windows"):
self.endMenu.SetSelection(-1)
self.loopEnd = -1
else:
self.endMenu.SetSelection(0)
self.loopEnd = 2
self.txtNStruct.Enable()
self.populatePivots()
def changeLoopType(self, event):
if (self.loopType == "Refine"):
self.loopType = "Reconstruct"
if (platform.system() == "Darwin"):
self.btnLoopType.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnLoopType_Reconstruct.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnLoopType.SetLabel(self.loopType)
self.btnLoopType.SetToolTipString("Reconstruct the current loop using the wildtype sequence")
self.btnPerturb.Enable()
self.txtNStruct.Enable()
elif (self.loopType == "Reconstruct"):
self.loopType = "De Novo"
if (platform.system() == "Darwin"):
self.btnLoopType.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnLoopType_DeNovo.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnLoopType.SetLabel(self.loopType)
self.btnLoopType.SetToolTipString("Construct a new loop with a new sequence")
self.txtSequence.Enable()
else:
self.loopType = "Refine"
if (platform.system() == "Darwin"):
self.btnLoopType.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnLoopType_Refine.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnLoopType.SetLabel(self.loopType)
self.btnLoopType.SetToolTipString("Refine a pre-existing loop using the high resolution KIC remodeler only")
self.txtSequence.Disable()
self.btnPerturb.Disable()
self.txtNStruct.Disable()
logInfo("Changed loop type to " + self.loopType)
def changePerturbType(self, event):
if (self.perturbType == "Perturb+Refine"):
self.perturbType = "Perturb Only, Fullatom"
if (platform.system() == "Darwin"):
self.btnPerturb.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnPerturb_Fullatom.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnPerturb.SetLabel(self.perturbType)
self.btnPerturb.SetToolTipString("Perform only KIC coarse perturbations but convert outputted models to repacked fullatom PDBs")
#elif (self.perturbType == "Perturb Only, Fullatom"):
# self.perturbType = "Perturb Only, Centroid"
# self.btnPerturb.SetToolTipString("Perform only KIC coarse perturbations and leave outputted PDBs in coarse centroid mode")
else:
self.perturbType = "Perturb+Refine"
if (platform.system() == "Darwin"):
self.btnPerturb.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnPerturb_Refine.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnPerturb.SetLabel(self.perturbType)
self.btnPerturb.SetToolTipString("Perform KIC coarse perturbation followed by high resolution refinement")
logInfo("Changed perturbation type to " + self.perturbType)
def setOutputDir(self, event):
logInfo("Clicked Output Dir button")
dlg = wx.DirDialog(
self, message="Choose a directory",
defaultPath=self.seqWin.cwd,
style=wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST)
if (dlg.ShowModal() == wx.ID_OK):
path = dlg.GetPath()
self.outputdir = str(path)
# Change cwd to the last opened file
self.seqWin.cwd = self.outputdir
self.seqWin.saveWindowData(None)
self.lblDir.SetLabel(self.outputdir)
self.lblDir.SetForegroundColour("#FFFFFF")
if (platform.system() == "Linux"):
resizeTextControlForUNIX(self.lblDir, 130, 190)
logInfo("Set output directory as " + self.outputdir)
else:
logInfo("Cancelled out of Load PDB")
def populatePivots(self):
self.menuPivot.Enable()
# Get the location of the pose
poseindx = self.seqWin.getPoseIndexForModel(self.selectedModel)
# Read the positions
pose = self.seqWin.poses[poseindx]
positions = []
ires = 1
for ch in pose[0]:
for residue in ch:
if (ires >= self.loopBegin and ires <= self.loopEnd):
if ("ALA CYS ASP GLU PHE GLY HIS ILE LYS LEU MET ASN PRO GLN ARG SER THR VAL TRP TYR ".find(residue.resname) >= 0):
chain = ch.id
if (len(chain.strip()) == 0):
chain = "_"
label = chain + ":" + AA3to1(residue.resname) + str(residue.id[1])
positions.append(label)
ires = ires + 1
self.menuPivot.Clear()
self.menuPivot.AppendItems(positions)
self.menuPivot.SetSelection(0)
def beginMenuSelect(self, event):
try:
chain = self.beginMenu.GetStringSelection()[0]
seqpos = self.beginMenu.GetStringSelection()[3:].strip()
rindx = self.seqWin.getRosettaIndex(self.selectedModel, chain, seqpos)
self.loopBegin = rindx
# If this new loop begin is further down than what is set for loop end, then it needs
# to be reset and the user should be notified
if (self.loopEnd >= 0 and self.loopEnd <= rindx):
if (platform.system() == "Windows"):
self.endMenu.SetSelection(-1)
self.loopEnd = -1
else:
self.endMenu.SetSelection(self.beginMenu.GetSelection()) # This clears the menu, SetStringSelection/SetValue doesn't seem to work
self.endMenuSelect(event)
wx.MessageBox("Your selected end loop value is no longer valid. Please choose an ending position after the one you've selected here.", "Loop End No Longer Valid", wx.OK|wx.ICON_EXCLAMATION)
if (self.loopBegin >= 0 and self.loopEnd >= 0 and self.loopBegin < self.loopEnd):
# Populate the pivot menu
self.populatePivots()
else:
self.menuPivot.Disable()
self.focusView(self.beginMenu.GetStringSelection(), self.selectedModel)
logInfo("Selected " + self.beginMenu.GetStringSelection() + " as the beginning of the loop")
except:
# Probably the user left the field blank, do nothing
pass
def endMenuSelect(self, event):
try:
chain = self.endMenu.GetStringSelection()[0]
seqpos = self.endMenu.GetStringSelection()[3:].strip()
rindx = self.seqWin.getRosettaIndex(self.selectedModel, chain, seqpos)
self.loopEnd = rindx
# If this new loop begin is further up than what is set for loop begin, then it needs
# to be reset and the user should be notified
if (self.loopBegin >= 0 and self.loopBegin >= rindx):
if (platform.system() == "Windows"):
self.beginMenu.SetSelection(-1)
self.loopBegin = -1
else:
self.beginMenu.SetSelection(self.endMenu.GetSelection()) # This clears the menu, SetStringSelection/SetValue doesn't seem to work
self.beginMenuSelect(event)
wx.MessageBox("Your selected begin loop value is no longer valid. Please choose a beginning position before the one you've selected here.", "Loop Begin No Longer Valid", wx.OK|wx.ICON_EXCLAMATION)
if (self.loopBegin >= 0 and self.loopEnd >= 0 and self.loopBegin < self.loopEnd):
# Populate the pivot menu
self.populatePivots()
else:
self.menuPivot.Disable()
self.focusView(self.endMenu.GetStringSelection(), self.selectedModel)
logInfo("Selected " + self.endMenu.GetStringSelection() + " as the ending of the loop")
except:
# Probably the user left the field blank, do nothing
pass
def updateLoops(self):
# Redraw the loops grid with current loop information
scrollpos = self.grdLoops.GetScrollPos(wx.VERTICAL)
if (self.grdLoops.NumberRows > 0):
self.grdLoops.DeleteRows(0, self.grdLoops.NumberRows)
if (len(self.loops) > 0):
self.grdLoops.AppendRows(len(self.loops))
row = 0
for [loopType, sequence, model, begin, pivot, end] in self.loops:
self.grdLoops.SetRowLabelValue(row, loopType)
self.grdLoops.SetCellValue(row, 0, sequence)
chainID, resindx = self.seqWin.getResidueInfo(model, begin)
if (len(chainID.strip()) == 0):
chainID = "_"
self.grdLoops.SetCellValue(row, 1, chainID + "|" + self.seqWin.getResidueTypeFromRosettaIndx(model, begin) + str(resindx))
chainID, resindx = self.seqWin.getResidueInfo(model, pivot)
if (len(chainID.strip()) == 0):
chainID = "_"
self.grdLoops.SetCellValue(row, 2, chainID + "|" + self.seqWin.getResidueTypeFromRosettaIndx(model, pivot) + str(resindx))
chainID, resindx = self.seqWin.getResidueInfo(model, end)
if (len(chainID.strip()) == 0):
chainID = "_"
self.grdLoops.SetCellValue(row, 3, chainID + "|" + self.seqWin.getResidueTypeFromRosettaIndx(model, end) + str(resindx))
readOnly = wx.grid.GridCellAttr()
readOnly.SetReadOnly(True)
readOnly.SetAlignment(wx.ALIGN_CENTRE, wx.ALIGN_CENTRE)
readOnly.SetBackgroundColour("#FFFFFF")
self.grdLoops.SetRowAttr(row, readOnly)
row += 1
self.grdLoops.Scroll(0, scrollpos)
def add(self, event):
# Is the loop valid?
if (self.loopBegin < 0 or self.loopBegin < 0 or self.loopBegin >= self.loopEnd):
dlg = wx.MessageDialog(self, "You do not have a valid loop specified!", "Loop Not Valid", wx.OK | wx.ICON_ERROR | wx.CENTRE)
dlg.ShowModal()
dlg.Destroy()
return
# If we're doing a de novo search, is the sequence specified?
if (self.loopType == "De Novo"):
sequence = self.txtSequence.GetValue().strip().upper()
for AA in sequence:
if (not(AA in "ACDEFGHIKLMNPQRSTVWY")):
wx.MessageBox("The sequence you have provided is invalid. Please only use canonical amino acids.", "Sequence Invalid", wx.OK|wx.ICON_EXCLAMATION)
return
if (len(sequence) == 0):
wx.MessageBox("You have indicated that you want to design a loop de novo but have not provided the putative sequence of the loop. Please provide one or switch to use a pre-existing loop.", "No Sequence Indicated", wx.OK|wx.ICON_EXCLAMATION)
return
else:
sequence = ""
# Did the model change? If yes, and loops is not empty, then tell the user that this
# will remove all loops to make room for the new model
if (len(self.loops) > 0 and self.modelMenu.GetValue() != self.loops[0][2]):
dlg = wx.MessageDialog(self, "You are attempting to add a loop for a different model. If you continue, all current loops will be removed. Is this okay?", "Loop Model Changed", wx.YES_NO | wx.ICON_QUESTION | wx.CENTRE)
if (dlg.ShowModal() == wx.ID_NO):
return
dlg.Destroy()
self.loops = []
# Does this loop overlap with a previously-specified loop? If so, do not add
i = 1
for loopType, s, model, begin, pivot, end in self.loops:
if ((self.loopBegin >= begin and self.loopBegin <= end) or (self.loopEnd >= begin and self.loopEnd <= end)):
dlg = wx.MessageDialog(self, "The loop you have indicated overlaps with loop " + str(i) + ". Either change the current loop or remove loop " + str(i) + ".", "Loop Overlap", wx.OK | wx.ICON_ERROR | wx.CENTRE)
dlg.ShowModal()
dlg.Destroy()
return
i += 1
# Add this loop to the list of loops currently active
self.loops.append([self.loopType, sequence, self.modelMenu.GetValue(), self.loopBegin, self.menuPivot.GetSelection() + self.loopBegin, self.loopEnd])
self.updateLoops()
def remove(self, event):
# For this function, remove the indicated loop
self.activate()
logInfo("Remove button clicked")
if (self.selectedr >= 0 and self.selectedr < len(self.loops)):
self.loops.pop(self.selectedr)
self.selectedr = -1
self.updateLoops()
def clear(self, event):
logInfo("Clear button clicked")
# Remove everything
self.loops = []
self.updateLoops()
def viewMenuSelect(self, event):
try:
self.focusView(self.viewMenu.GetStringSelection(), self.selectedModel, "kic_view")
logInfo("Viewing " + self.viewMenu.GetStringSelection())
except:
# Probably the user left the field blank, do nothing
pass
def focusView(self, posID, origmodel, newmodel=None):
model = origmodel
loopEnd = self.loopEnd
if (posID != "Whole Loop"):
chain = posID[0]
seqpos = posID[3:].strip()
# Loop end needs to be recalculated if this is a view of the de novo loop since the
# de novo loop may be a different size
if (newmodel and len(self.txtSequence.GetValue()) > 0):
loopEnd = self.loopBegin + len(self.txtSequence.GetValue()) + 1 # For the anchor
else:
i = 1
wholeloop_data = []
for ch in self.KICView[0]:
for residue in ch:
if (i >= self.loopBegin and i <= loopEnd):
chain = ch.id
seqpos = str(residue.id[1])
wholeloop_data.append((chain, seqpos))
i = i + 1
# Find the neighborhood view
if (newmodel):
firstmodel = newmodel
else:
firstmodel = origmodel
self.cmd.hide("all")
if (chain == " " or chain == "_"):
self.cmd.select("viewsele", "resi " + seqpos + " and model " + firstmodel)
else:
self.cmd.select("viewsele", "resi " + seqpos + " and model " + firstmodel + " and chain " + chain)
# If the loop is validly defined, let's show the whole loop instead of individual residues
if ((self.loopBegin >= 0 and self.loopEnd >= 0 and not(newmodel)) or posID == "Whole Loop"):
for i in range(self.loopBegin, loopEnd):
if (not(newmodel)):
(chain, seqpos) = self.seqWin.getResidueInfo(self.selectedModel, i)
else:
(chain, seqpos) = wholeloop_data[i-self.loopBegin]
if (chain == "_" or len(chain.strip()) == 0):
self.cmd.select("viewsele", "viewsele or (resi " + str(seqpos) + " and model " + firstmodel + ")")
else:
self.cmd.select("viewsele", "viewsele or (resi " + str(seqpos) + " and chain " + chain + " and model " + firstmodel + ")")
self.cmd.select("exviewsele", "model " + firstmodel + " within 12 of viewsele")
self.cmd.show("cartoon", "exviewsele")
self.cmd.hide("ribbon", "exviewsele")
self.cmd.show("sticks", "exviewsele")
self.cmd.set_bond("stick_radius", 0.1, "exviewsele")
# Display energy labels for new structures
if (newmodel):
relabelEnergies(self.KICView, self.residue_E, newmodel, self.scoretypeMenu.GetStringSelection(), self.cmd, seqpos)
self.cmd.label("not exviewsele", "")
self.cmd.zoom("exviewsele")
#if (chain == " " or chain == "_"):
# self.cmd.select("viewsele", "resi " + seqpos + " and model " + firstmodel)
#else:
# self.cmd.select("viewsele", "resi " + seqpos + " and model " + firstmodel + " and chain " + chain)
self.cmd.show("sticks", "viewsele")
self.cmd.set_bond("stick_radius", 0.25, "viewsele")
# Highlight this residue in PyMOL
self.cmd.select("seqsele", "viewsele")
if (newmodel):
# If this is after a protocol, also show the original structure in green for comparison
self.cmd.select("oldsele", "model " + origmodel + " and symbol c")
self.cmd.color("green", "oldsele")
self.cmd.set("cartoon_color", "green", "oldsele")
#if (chain == " " or chain == "_"):
#self.cmd.select("viewsele", "resi " + seqpos + " and model " + origmodel)
#else:
#self.cmd.select("viewsele", "resi " + seqpos + " and model " + origmodel + " and chain " + chain)
#self.cmd.select("viewsele", "model " + origmodel + " within 12 of viewsele")
self.cmd.select("exviewsele", "model " + origmodel + " within 12 of viewsele")
self.cmd.show("cartoon", "exviewsele")
self.cmd.hide("ribbon", "exviewsele")
self.cmd.show("sticks", "exviewsele")
self.cmd.set_bond("stick_radius", 0.1, "exviewsele")
self.cmd.zoom("exviewsele")
self.cmd.delete("oldsele")
#if (chain == " " or chain == "_"):
#self.cmd.select("exviewsele", "resi " + seqpos + " and model " + origmodel)
#else:
#self.cmd.select("viewsele", "resi " + seqpos + " and model " + origmodel + " and chain " + chain)
#self.cmd.show("sticks", "viewsele")
#self.cmd.set_bond("stick_radius", 0.25, "viewsele")
self.cmd.enable("seqsele")
self.cmd.delete("viewsele")
self.cmd.select("exviewsele", "solvent")
self.cmd.hide("everything", "exviewsele")
self.cmd.delete("exviewsele")
self.seqWin.selectUpdate(False)
def scoretypeMenuSelect(self, event):
# Make sure there is even a PyMOL_Mover pose loaded
if (self.selectedModel == ""):
return
logInfo("Changed scoretype view to " + self.scoretypeMenu.GetStringSelection())
recolorEnergies(self.KICView, self.residue_E, "kic_view", self.scoretypeMenu.GetStringSelection(), self.cmd)
self.viewMenuSelect(event) # To update all the labels
def serverToggle(self, event):
if (self.serverOn):
self.serverOn = False
if (platform.system() == "Darwin"):
self.btnServerToggle.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnServer_Off.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnServerToggle.SetLabel("Server Off")
self.btnServerToggle.SetToolTipString("Perform KIC simulations locally")
logInfo("Turned off KIC server usage")
else:
self.serverOn = True
if (platform.system() == "Darwin"):
self.btnServerToggle.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnServer_On.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnServerToggle.SetLabel("Server On")
self.btnServerToggle.SetToolTipString("Perform KIC simulations on a remote server")
logInfo("Turned on KIC server usage")
def cancelKIC(self):
logInfo("Canceled KIC operation")
try:
os.remove("coarsekicinput")
except:
pass
try:
os.remove("coarsekicinputtemp")
except:
pass
try:
os.remove("repacked.pdb")
except:
pass
try:
os.remove("finekicinput")
except:
pass
self.tmrKIC.Stop()
self.seqWin.cannotDelete = False
self.scoretypeMenu.Disable()
self.viewMenu.Disable()
self.modelMenu.Enable()
self.beginMenu.Enable()
self.endMenu.Enable()
self.btnLoopType.Enable()
if (self.loopType == "De Novo"):
self.txtSequence.Enable()
if (platform.system() == "Darwin"):
self.btnKIC.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnKIC.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnKIC.SetLabel("KIC!")
self.buttonState = "KIC!"
self.btnKIC.SetToolTipString("Perform KIC simulation with selected parameters")
deleteInputFiles()
self.parent.parent.restartDaemon()
self.parent.GoBtn.Enable()
# Get rid of the messages
for i in range(0, len(self.seqWin.msgQueue)):
if (self.seqWin.msgQueue[i].find("Performing KIC loop modeling") >= 0):
self.seqWin.msgQueue.pop(i)
break
for i in range(0, len(self.seqWin.msgQueue)):
if (self.seqWin.msgQueue[i].find("Performing rotamer repacking") >= 0):
self.seqWin.msgQueue.pop(i)
break
for i in range(0, len(self.seqWin.msgQueue)):
if (self.seqWin.msgQueue[i].find("Performing refined KIC loop modeling") >= 0):
self.seqWin.msgQueue.pop(i)
break
if (len(self.seqWin.msgQueue) > 0):
self.seqWin.labelMsg.SetLabel(self.seqWin.msgQueue[len(self.seqWin.msgQueue)-1])
else:
self.seqWin.labelMsg.SetLabel("")
self.seqWin.labelMsg.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.BOLD))
self.seqWin.labelMsg.SetForegroundColour("#FFFFFF")
def KICClick(self, event):
# This is also the "Finalize!" button
if (self.buttonState == "KIC!"):
# First we have to make sure that the loops are defined and that the sequence is valid
if (len(self.loops) == 0):
wx.MessageBox("Please specify at least one valid loop to model", "No Loops Provided", wx.OK|wx.ICON_EXCLAMATION)
return
try:
if (int(self.txtNStruct.GetValue()) <= 0):
raise Exception
except:
wx.MessageBox("Please enter a positive value for the number of structures.", "Invalid NStruct", wx.OK|wx.ICON_EXCLAMATION)
return
#if (int(self.txtNStruct.GetValue()) > 1 and len(self.outputdir.strip()) == 0):
#wx.MessageBox("If you want to generate more than one structure, you need to indicate a directory to which all these structures will be outputted.", "Specify an Output Directory", wx.OK|wx.ICON_EXCLAMATION)
#return
self.seqWin.labelMsg.SetLabel("Performing KIC loop modeling, please be patient...")
self.seqWin.labelMsg.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.BOLD))
self.seqWin.labelMsg.SetForegroundColour("#FFFFFF")
self.seqWin.msgQueue.append("Performing KIC loop modeling, please be patient...")
self.seqWin.cannotDelete = True
self.parent.GoBtn.Disable()
self.modelMenu.Disable()
self.btnLoopType.Disable()
self.beginMenu.Disable()
self.endMenu.Disable()
self.txtSequence.Disable()
if (platform.system() == "Darwin"):
self.btnKIC.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnKIC_Cancel.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnKIC.SetLabel("Cancel!")
self.buttonState = "Cancel!"
self.btnKIC.SetToolTipString("Cancel the KIC simulation")
self.stage = 1
#thrKIC = Thread(target=self.threadKIC, args=())
#thrKIC.start()
logInfo("Clicked the KIC button")
if (len(self.txtSequence.GetValue().strip())):
logInfo("The new loop sequence is " + self.txtSequence.GetValue().strip())
self.tmrKIC = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.threadKIC, self.tmrKIC)
self.tmrKIC.Start(1000)
elif (self.buttonState == "Cancel!"):
dlg = wx.MessageDialog(self, "Are you sure you want to cancel the KIC simulation? All progress will be lost.", "Cancel KIC Simulation", wx.YES_NO | wx.ICON_QUESTION | wx.CENTRE)
result = dlg.ShowModal()
if (result == wx.ID_YES):
self.cancelKIC()
dlg.Destroy()
else:
# Finalize button, ask whether the changes will be accepted or rejected
dlg = wx.MessageDialog(self, "Do you want to accept the results of this loop modeling session?", "Accept/Reject Model", wx.YES_NO | wx.CANCEL | wx.ICON_QUESTION | wx.CENTRE)
result = dlg.ShowModal()
if (result == wx.ID_YES):
logInfo("Accepted KIC model")
accept = True
elif (result == wx.ID_NO):
logInfo("Rejected KIC model")
accept = False
else:
logInfo("Cancelled Finalize operation")
dlg.Destroy()
return
dlg.Destroy()
self.scoretypeMenu.Disable()
self.viewMenu.Disable()
self.modelMenu.Enable()
self.beginMenu.Enable()
self.endMenu.Enable()
self.btnLoopType.Enable()
if (self.loopType == "De Novo"):
self.txtSequence.Enable()
if (platform.system() == "Darwin"):
self.btnKIC.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnKIC.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnKIC.SetLabel("KIC!")
self.buttonState = "KIC!"
self.btnKIC.SetToolTipString("Perform KIC simulation with selected parameters")
self.cmd.label("all", "")
self.seqWin.cannotDelete = False
if (not(accept)):
self.cmd.remove("kic_view")
self.cmd.delete("kic_view")
return
# Get rid of the original pose, save the designed pose, and reload the structure in PyMOL
poseindx = -1
for r in range(0, len(self.seqWin.IDs)):
if (self.seqWin.IDs[r].find(self.selectedModel) >= 0):
poseindx = r
break
try:
self.cmd.remove(self.selectedModel)
self.cmd.delete(self.selectedModel)
self.cmd.remove("kic_view")
self.cmd.delete("kic_view")
self.cmd.load(self.selectedModel + "_K.pdb", self.selectedModel)
#self.KICView.pdb_info().name(str(self.selectedModel + ".pdb"))
self.seqWin.reloadPose(poseindx, self.selectedModel, self.selectedModel + "_K.pdb")
defaultPyMOLView(self.cmd, self.selectedModel)
del self.KICView
# IMPORTANT: You have to replace the model in the sandbox with the new designed model
os.remove(self.selectedModel + ".pdb")
os.rename(self.selectedModel + "_K.pdb", self.selectedModel + ".pdb")
except:
# Some weird error happened, do nothing instead of crashing
print "Bug at accept button click"
pass
def recoverFromError(self, msg=""):
# This function tells the user what the error was and tries to revert the protocol
# back to the pre-daemon state so the main GUI can continue to be used
if (len(msg) == 0):
f = open("errreport", "r")
errmsg = "An error was encountered during the protocol:\n\n"
for aline in f:
errmsg = errmsg + str(aline)
f.close()
os.remove("errreport")
else:
errmsg = msg
logInfo("Error Encountered")
logInfo(errmsg)
if (platform.system() == "Windows"):
sessioninfo = os.path.expanduser("~") + "\\InteractiveRosetta\\sessionlog"
else:
sessioninfo = os.path.expanduser("~") + "/.InteractiveRosetta/sessionlog"
errmsg = errmsg + "\n\nIf you don't know what caused this, send the file " + sessioninfo + " to a developer along with an explanation of what you did."
# You have to use a MessageDialog because the MessageBox doesn't always work for some reason
dlg = wx.MessageDialog(self, errmsg, "Error Encountered", wx.OK|wx.ICON_EXCLAMATION)
dlg.ShowModal()
dlg.Destroy()
self.seqWin.cannotDelete = False
self.parent.GoBtn.Enable()
self.modelMenu.Enable()
self.btnLoopType.Enable()
self.beginMenu.Enable()
self.endMenu.Enable()
self.txtSequence.Enable()
self.btnKIC.Enable()
if (platform.system() == "Darwin"):
self.btnKIC.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnKIC.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnKIC.SetLabel("KIC!")
self.buttonState = "KIC!"
# Get rid of the messages
for i in range(0, len(self.seqWin.msgQueue)):
if (self.seqWin.msgQueue[i].find("Performing KIC loop modeling") >= 0):
self.seqWin.msgQueue.pop(i)
break
for i in range(0, len(self.seqWin.msgQueue)):
if (self.seqWin.msgQueue[i].find("Performing rotamer repacking") >= 0):
self.seqWin.msgQueue.pop(i)
break
for i in range(0, len(self.seqWin.msgQueue)):
if (self.seqWin.msgQueue[i].find("Performing refined KIC loop modeling") >= 0):
self.seqWin.msgQueue.pop(i)
break
if (len(self.seqWin.msgQueue) > 0):
self.seqWin.labelMsg.SetLabel(self.seqWin.msgQueue[len(self.seqWin.msgQueue)-1])
else:
self.seqWin.labelMsg.SetLabel("")
self.seqWin.labelMsg.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.BOLD))
self.seqWin.labelMsg.SetForegroundColour("#FFFFFF")
def threadKIC(self, event):
# Why am I doing this ridiculous timer thing for this KIC protocol?
# Because apparently on Linux there's some kind of weird bug that manifests when you
# attempt to run time.sleep loops looking for files to be generated
# Pango develops a phobia of periods in strings if you do that????
# Using this staged timer setup eliminates the error
# What is the problem? I don't know. Why does this fix it? I don't know
# The people on StackOverflow said to do it and it fixed it -_-
# I think it has something to do with Linux not liking things like "time.sleep"
# and calls to wx in threads
# Dump a file with the loop modeling parameters for the daemon to pick up
goToSandbox()
if (self.stage == 1):
self.tmrKIC.Stop()
self.timeoutCount = 0
self.nstruct = int(self.txtNStruct.GetValue())
f = open("coarsekicinputtemp", "w")
pdbfile = self.selectedModel + ".pdb"
# Dump the PDB from PyMOL first in case the coordinates were altered by the user
self.cmd.save(pdbfile.strip(), "model " + self.selectedModel)
fixPyMOLSave(pdbfile.strip())
f.write("PDBFILE\t" + pdbfile.strip() + "\n")
f2 = open(pdbfile, "r")
f.write("BEGIN PDB DATA\n")
for aline in f2:
f.write(aline.strip() + "\n")
f.write("END PDB DATA\n")
f2.close()
#f.write("REMODEL\t" + self.loopType.upper() + "\n")
#chain = self.beginMenu.GetStringSelection()[0]
#seqpos = self.beginMenu.GetStringSelection()[3:]
#loopBegin = self.seqWin.getRosettaIndex(self.selectedModel, chain, seqpos)
#f.write("LOOPBEGIN\t" + str(loopBegin) + "\n")
#chain = self.endMenu.GetStringSelection()[0]
#seqpos = self.endMenu.GetStringSelection()[3:]
#loopEnd = self.seqWin.getRosettaIndex(self.selectedModel, chain, seqpos)
#f.write("LOOPEND\t" + str(loopEnd) + "\n")
#if (self.loopType == "De Novo"):
#f.write("SEQUENCE\t" + self.txtSequence.GetValue().strip().upper() + "\n")
#f.write("PIVOT\t" + str(self.menuPivot.GetSelection()) + "\n")
# Write the loops information
for [loopType, sequence, model, begin, pivot, end] in self.loops:
f.write("LOOP\t" + loopType.upper() + "\t" + sequence.strip() + "\t" + str(begin) + "\t" + str(pivot) + "\t" + str(end) + "\n")
f.write("NSTRUCT\t" + str(self.nstruct) + "\n")
f.write("PERTURB\t" + self.perturbType + "\n")
#f.write("OUTPUTDIR\t" + self.outputdir + "\n")
f.close()
appendScorefxnParamsInfoToFile("coarsekicinputtemp", self.selectWin.weightsfile)
if (self.serverOn):
try:
self.ID = sendToServer("coarsekicinput")
dlg = wx.TextEntryDialog(None, "Enter a description for this submission:", "Job Description", "")
if (dlg.ShowModal() == wx.ID_OK):
desc = dlg.GetValue()
desc = desc.replace("\t", " ").replace("\n", " ").strip()
else:
desc = self.ID
# First make sure this isn't a duplicate
alreadythere = False
try:
f = open("downloadwatch", "r")
for aline in f:
if (len(aline.split("\t")) >= 2 and aline.split("\t")[0] == "KIC" and aline.split("\t")[1] == self.ID.strip()):
alreadythere = True
break
f.close()
except:
pass
if (not(alreadythere)):
f = open("downloadwatch", "a")
f.write("KIC\t" + self.ID.strip() + "\t" + str(datetime.datetime.now().strftime("%A, %B %d - %I:%M:%S %p")) + "\t" + getServerName() + "\t" + desc + "\n")
f.close()
dlg = wx.MessageDialog(self, "InteractiveROSETTA is now watching the server for job ID " + desc.strip() + ". You will be notified when the package is available for download.", "Listening for Download", wx.OK | wx.ICON_EXCLAMATION | wx.CENTRE)
dlg.ShowModal()
dlg.Destroy()
# Re-enable everything since we're not waiting for the local daemon to do anything
self.scoretypeMenu.Disable()
self.viewMenu.Disable()
self.modelMenu.Enable()
self.beginMenu.Enable()
self.endMenu.Enable()
self.btnLoopType.Enable()
if (self.loopType == "De Novo"):
self.txtSequence.Enable()
if (platform.system() == "Darwin"):
self.btnKIC.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnKIC.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnKIC.SetLabel("KIC!")
self.buttonState = "KIC!"
self.btnKIC.SetToolTipString("Perform KIC simulation with selected parameters")
self.cmd.label("all", "")
self.seqWin.cannotDelete = False
self.parent.GoBtn.Enable()
# Pop this message out of the queue
for i in range(0, len(self.seqWin.msgQueue)):
if (self.seqWin.msgQueue[i].find("Performing KIC loop modeling") >= 0):
self.seqWin.msgQueue.pop(i)
break
if (len(self.seqWin.msgQueue) > 0):
self.seqWin.labelMsg.SetLabel(self.seqWin.msgQueue[len(self.seqWin.msgQueue)-1])
else:
self.seqWin.labelMsg.SetLabel("")
logInfo("Coarse KIC input sent to server daemon with ID " + self.ID)
return
except:
dlg = wx.MessageDialog(self, "The server could not be reached! Ensure that you have specified a valid server and that you have an network connection.", "Server Could Not Be Reached", wx.OK | wx.ICON_EXCLAMATION | wx.CENTRE)
dlg.ShowModal()
dlg.Destroy()
return
else:
os.rename("coarsekicinputtemp", "coarsekicinput")
self.usingServer = False
logInfo("Coarse KIC input uploaded locally at coarsekicinput")
self.stage = 2
if (self.perturbType == "Perturb Only, Centroid"):# or self.loopType == "Refine"):
self.stage = 4
self.looptimecount = 0
self.timeout = 18000000
self.progress = wx.ProgressDialog("KIC Progress", "Modeling loops in centroid mode...", 100, style=wx.PD_CAN_ABORT | wx.PD_APP_MODAL | wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME)
self.loop_indx = 0
self.last_progress_indx = 99
self.tmrKIC.Start(1000)
elif (self.stage == 2):
# This is really annoying, here's the ugly memory problem again
# So first we have to do a coarse KIC job in the daemon
# This involves using centroid residues, so those have to be repacked in another
# instance of the daemon process because the repacking step pushes the memory usage too
# high, so first wait for the "repackmetemp.pdb" structure to show up, kill the daemon
# and restart it to do the repacking step
if (os.path.isfile("repackmetemp_0.pdb")):
self.tmrKIC.Stop()
# Pop this message out of the queue
for i in range(0, len(self.seqWin.msgQueue)):
if (self.seqWin.msgQueue[i].find("Performing KIC loop modeling") >= 0):
self.seqWin.msgQueue.pop(i)
break
self.seqWin.labelMsg.SetLabel("Performing rotamer repacking, please be patient...")
self.seqWin.labelMsg.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.BOLD))
self.seqWin.labelMsg.SetForegroundColour("#FFFFFF")
self.seqWin.msgQueue.append("Performing rotamer repacking, please be patient...")
self.parent.parent.restartDaemon()
for decoy in range(0, self.nstruct):
os.rename("repackmetemp_" + str(decoy) + ".pdb", "repackme_" + str(decoy) + ".pdb") # So the new daemon sees it
logInfo("repackmetemp.pdb sent to be rotamer repacked")
self.stage = 3
if (self.perturbType == "Perturb Only, Fullatom"):
self.stage = 4
self.tmrKIC.Start(1000)
elif (os.path.isfile("errreport")):
# Something went wrong, tell the user about it (loop sequence probably too short)
self.tmrKIC.Stop()
self.parent.parent.restartDaemon() # Has to happen because coarse KIC is threaded
self.recoverFromError()
self.looptimecount = self.looptimecount + 1
if (self.looptimecount > self.timeout):
# The loop was probably too short and coarse KIC will run forever
# Kill the daemon and tell the user about it
self.tmrKIC.Stop()
# First delete that input file so the new daemon doesn't pick it up right away
try:
os.remove("coarsekicinput")
except:
pass
self.parent.parent.restartDaemon() # Has to happen because coarse KIC is threaded
self.recoverFromError("ERROR: The loop sequence is too short and cannot bridge the endpoint residues!")
elif (self.stage == 3):
# Now we have to wait for the output of the repacking step and restart the daemon again
# so we can finish up with a fine-grained KIC step
if (os.path.isfile("repacked_0.pdb")):
self.tmrKIC.Stop()
# Pop this message out of the queue
for i in range(0, len(self.seqWin.msgQueue)):
if (self.seqWin.msgQueue[i].find("Performing rotamer repacking") >= 0):
self.seqWin.msgQueue.pop(i)
break
self.seqWin.labelMsg.SetLabel("Performing refined KIC loop modeling, please be patient...")
self.seqWin.labelMsg.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.BOLD))
self.seqWin.labelMsg.SetForegroundColour("#FFFFFF")
self.seqWin.msgQueue.append("Performing refined KIC loop modeling, please be patient...")
self.parent.parent.restartDaemon()
os.rename("finekicinputtemp", "finekicinput") # So the new daemon sees it
logInfo("Repacked coarse structure sent to fine grained KIC")
self.stage = 4
self.tmrKIC.Start(1000)
elif (os.path.isfile("errreport")):
# Something went wrong, tell the user about it
self.tmrKIC.Stop()
self.recoverFromError()
elif (self.stage == 4):
if (self.usingServer):
# See if the file has been uploaded yet and bring it here if so
queryServerForResults("kicoutput-" + self.ID)
queryServerForResults("coarsekicoutput-" + self.ID)
self.timeoutCount = self.timeoutCount + 1
if (self.timeoutCount >= serverTimeout):
self.tmrKIC.Stop()
# If this is taking too long, maybe there's something wrong with the server
# Ask the user if they want to continue waiting or use the local daemon instead
dlg = wx.MessageDialog(self, "The server is taking a long time to respond. Continue to wait? Pressing No will run the calculations locally.", "Delayed Server Response", wx.YES_NO | wx.ICON_EXCLAMATION | wx.CENTRE)
if (dlg.ShowModal() == wx.ID_YES):
# Reset the counter
self.timeoutCount = 0
else:
self.usingServer = False
self.timeoutCount = 0
os.rename("coarsekicinputtemp", "coarsekicinput")
logInfo("Server took too long to respond so the local daemon was used")
self.stage = 2
dlg.Destroy()
self.tmrKIC.Start(1000)
# Read the output dumped by the child process (finally!)
if (os.path.isfile("repackedtemp.pdb")):
# Flip back so the timer sees repacked.pdb and runs the local daemon
os.rename("coarsekicinputtemp", "finekicinputtemp")
os.rename("repackedtemp.pdb", "repacked.pdb")
# Pop this message out of the queue
for i in range(0, len(self.seqWin.msgQueue)):
if (self.seqWin.msgQueue[i].find("Performing KIC loop modeling") >= 0):
self.seqWin.msgQueue.pop(i)
break
self.usingServer = False
self.timeoutCount = 0
self.stage = 3
elif (os.path.isfile("kicoutput")):
self.tmrKIC.Stop()
try:
self.progress.Destroy()
except:
pass
self.residue_E = []
f = open("kicoutput", "r")
for aline in f:
if (aline[0:6] == "OUTPUT"):
pdbfile = aline.split("\t")[1].strip()
self.KICView = self.seqWin.pdbreader.get_structure("kic_view", pdbfile)
elif (aline[0:9] == "LOOPBEGIN"):
self.loopBegin = int(aline.split("\t")[1])
elif (aline[0:7] == "LOOPEND"):
self.loopEnd = int(aline.split("\t")[1])
elif (aline[0:6] == "ENERGY"):
if (aline.split()[1] == "total_score"):
# This is the scoretype line, row 0 in residue_E
self.residue_E.append(aline.split()[1:])
else:
self.residue_E.append([])
indx = len(self.residue_E) - 1
for E in aline.split()[1:]:
self.residue_E[indx].append(float(E))
f.close()
logInfo("Found KIC output at kicoutput")
# Add the nonzero scoretypes to the energy viewing list from the current score function
self.scoretypeMenu.Clear()
for scoretype in self.residue_E[0]:
try:
toAdd = scoretypes[str(scoretype)]
except:
toAdd = str(scoretype)
self.scoretypeMenu.Append(toAdd)
self.scoretypeMenu.Enable()
# Pop this message out of the queue
for i in range(0, len(self.seqWin.msgQueue)):
if (self.seqWin.msgQueue[i].find("Performing refined KIC loop modeling") >= 0):
self.seqWin.msgQueue.pop(i)
break
elif (self.seqWin.msgQueue[i].find("Performing rotamer repacking") >= 0):
self.seqWin.msgQueue.pop(i)
break
elif (self.seqWin.msgQueue[i].find("Performing KIC loop modeling") >= 0):
self.seqWin.msgQueue.pop(i)
break
if (len(self.seqWin.msgQueue) > 0):
self.seqWin.labelMsg.SetLabel(self.seqWin.msgQueue[len(self.seqWin.msgQueue)-1])
else:
self.seqWin.labelMsg.SetLabel("")
self.seqWin.labelMsg.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.BOLD))
self.seqWin.labelMsg.SetForegroundColour("#FFFFFF")
# Add these loop residues to the view menu so the user can look at the new loop
viewoptions = []
i = 1
for ch in self.KICView[0]:
for residue in ch:
if (i >= self.loopBegin and i <= self.loopEnd):
chain = ch.id
seqpos = str(residue.id[1])
resn = AA3to1(residue.resname)
viewoptions.append(chain + ":" + resn + seqpos)
i = i + 1
viewoptions.append("Whole Loop")
self.viewMenu.Clear()
self.viewMenu.AppendItems(viewoptions)
self.viewMenu.Enable()
self.parent.GoBtn.Enable()
self.btnKIC.Enable()
#self.enableControls()
#self.selectedModel = ""
if (platform.system() == "Darwin"):
self.btnKIC.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnKIC_Finalize.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnKIC.SetLabel("Finalize!")
self.buttonState = "Finalize!"
self.btnKIC.SetToolTipString("Accept or reject protocol results")
os.remove("kicoutput")
# Load the designed pose as the "kic_view" model so the user can look at the results
self.cmd.load(pdbfile, "kic_view")
self.cmd.hide("everything", "model kic_view")
# To get the energy values in the B-factors
recolorEnergies(self.KICView, self.residue_E, "kic_view", "Total Energy", self.cmd)
self.seqWin.pdbwriter.set_structure(self.KICView)
self.seqWin.pdbwriter.save(pdbfile)
recolorEnergies(self.KICView, self.residue_E, "kic_view", self.scoretypeMenu.GetStringSelection(), self.cmd)
return
elif (os.path.isfile("errreport")):
# Something went wrong, tell the user about it
try:
self.progress.Destroy()
except:
pass
self.tmrKIC.Stop()
self.recoverFromError()
return
if (os.path.isfile("scanprogress")):
f = open("scanprogress", "r")
data = f.readlines()
f.close()
if (len(data) == 0):
return
try:
lastline = None
for j in range(len(data)-1, -1, -1):
if (data[j].strip().startswith("protocols.loops.loop_mover.refine.LoopMover_Refine_KIC: refinement cycle")):
lastline = data[j].strip()
break
if (lastline is None):
raise Exception()
outercycles = lastline.split()[len(lastline.split())-2]
innercycles = lastline.split()[len(lastline.split())-1]
outer_num = int(outercycles.split("/")[0])
outer_den = int(outercycles.split("/")[1])
inner_num = int(innercycles.split("/")[0])
inner_den = int(innercycles.split("/")[1])
maxtrials = outer_den * inner_den
currpos = (outer_num-1) * inner_den + inner_num
indx = int(currpos * 100.0 / maxtrials)
if (indx >= 100):
# This should be destroyed when the refined KIC output is available
indx = 99
except:
return
if (indx >= 100):
try:
self.progress.Destroy()
except:
pass
else:
if (self.last_progress_indx > indx):
self.loop_indx += 1
(keepGoing, skip) = self.progress.Update(indx, "Refining loop " + str(self.loop_indx) + " in fullatom mode...")
else:
(keepGoing, skip) = self.progress.Update(indx)
self.last_progress_indx = indx
if (not(keepGoing)):
# User clicked "Cancel" on the progress bar
self.cancelKIC()
self.progress.Destroy()
|
schenc3/InteractiveROSETTA
|
InteractiveROSETTA/scripts/kic.py
|
Python
|
gpl-2.0
| 83,223
|
[
"PyMOL"
] |
3509d990e6c370dab85f8eb0655cf0efeee962c4366607ede4153fb6ede11ba2
|
# coding: utf8
{
'': '',
' Quotas: %(quotas)s x%(quota_amount).2f': ' Quotas: %(quotas)s x%(quota_amount).2f',
' Transaction number: %s': ' Transaction number: %s',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'%s rows deleted': '%s rows deleted',
'%s rows updated': '%s rows updated',
'/absolute/folder/path': '/absolute/folder/path',
'About': 'About',
'Account': 'Cuenta',
'Accounting': 'Contabilidad',
'Accounts plan': 'Accounts plan',
'Actions': 'Actions',
'Activate period': 'Activate period',
'Active user: ': 'Usuario activo: ',
'Add article': 'Ingresar artículo',
'Add check': 'Ingresar cheque',
'Add item': 'Ingresar ítem',
'Add payment method': 'Ingresar método de pago',
'Add tax': 'Ingresar impuesto',
'Administrative interface': 'Interfaz administrativa',
'Administrative panel': 'Panel administrativo',
'Advanced': 'Avanzado',
'All tables modified': 'All tables modified',
'Allocate': 'Asignar',
'Allocate orders': 'Allocate orders',
'Allocated': 'Asignada/o',
'Amount': 'Importe',
'Appadmin': 'Appadmin',
'Apply payment': 'Apply payment',
'Archivo': 'Archivo',
'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',
'Articles': 'Artículos',
'Articles list': 'Lista de artículos',
'Assign travel': 'Assign travel <translate>',
'Auto apply': 'Auto-aplicar',
'Available databases and tables': 'Available databases and tables',
'Ayuda': 'Ayuda',
'Back to list': 'Volver a la lista',
'Backup': 'Copia de seguridad',
'Bank': 'Bank',
'Banks': 'Banks',
'Batch': 'Batch',
'Bill': 'Bill',
'Bill checked': 'Bill checked',
'Billing': 'Facturación',
'Blank for price list values': 'En blanco para valores de la lista de precios',
'Branch': 'Sucursal',
'Branches': 'Sucursales',
'Browse': 'Explorar',
'By article': 'Por artículo',
'CA': 'CC',
'CRUD': 'ABM',
'CSV parameters file: /absolute/path/file_name.csv': 'CSV parameters file: /absolute/path/file_name.csv',
'CSV table files path: /absolute/path/tables_folder': 'CSV table files path: /absolute/path/tables_folder',
'Calculate movements difference....': 'Calcular diferencia de movimientos....',
'Calculated difference: %s': 'Calculated difference: %s',
'Cancel': 'Cancel',
'Cannot be empty': 'No puede ser vacío',
'Cash': 'Caja',
'Cash/transfer': 'Cash/transfer',
'Change': 'Cambiar',
'Change layout colors': 'Change layout colors',
'Change location': 'Cambiar ubicación',
'Change password': 'Cambiar la contraseña',
'Change stock': 'Cambiar existencias',
'Change update taxes value to %s': 'Cambiar/actualizar valor de impuesto a %s',
'Change user': 'Cambiar el usuario',
'Check to delete': 'Check to delete',
'Check to delete:': 'Check to delete:',
'Checks': 'Checks',
'Checks list': 'Checks list',
'Choose a concept': 'Seleccionar concepto',
'Choose a document type': 'Choose a document type',
'Choose a price list': 'Elija una lista de precios',
'Client IP': 'Cliente IP',
'Closing': 'Cierre',
'Code': 'Código',
'Collect': 'Collect',
'Color': 'Color',
'Compras': 'Compras',
'Concept': 'Concepto',
'Contabilidad': 'Contabilidad',
'Contact Group': 'Grupo de contactos',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Could not change': 'Could not change',
'Could not load the firm contact information': 'No se pudo cargar la información de contacto de empresa',
'Could not process the operation': 'Could not process the operation',
'Could not process the operation: it is not editable': 'Could not process the operation: it is not editable',
'Could not process the receipt': 'Could not process the receipt',
'Create': 'Crear',
'Create down payment': 'Create down payment <translate>',
'Create fee': 'Crear arancel',
'Create invoice': 'Crear factura',
'Create invoice batch': 'Create invoice batch',
'Create order': 'Crear pedido',
'Create payment': 'Create payment',
'Create/Edit orders': 'Crear/editar pedidos',
'Credit': 'Credit',
'Credit card': 'Tarjeta de crédito',
'Crm': 'Crm',
'Csv to db': 'Csv to db',
'Current account': 'Cuenta corriente',
'Current account calculated amount': 'Valor calculado de la cuenta corriente',
'Current account list/payments': 'Cuenta corriente: lista/pagos',
'Current account payment data': 'Información de pagos de cuenta corriente',
'Current account payment options': 'Current account payment options',
'Current account quotas': 'Cuotas de cuenta corriente',
'Current account report': 'Informe de cuenta corriente',
'Current account value: %s': 'Current account value: %s',
'Current accounts': 'Current accounts',
'Current accounts data': 'Current accounts data',
'Current accounts detail': 'Current accounts detail',
'Current accounts payment': 'Current accounts payment',
'Current accounts payments': 'Pagos de cuentas corrientes',
'Current accounts type': 'Current accounts type',
'Current accounts type: %(at)s': 'Current accounts type: %(at)s',
'Current language': 'Lenguaje actual',
'Current request': 'Current request',
'Current response': 'Current response',
'Current session': 'Current session',
'Customer': 'Deudor',
'Customer Panel': 'Panel de Clientes',
'Customer control panel': 'Panel de control de clientes',
'Customer control panel (requires registration and login)': 'Panel de control de clientes (requiere registro y autenticación)',
'Customer current account': 'Cuenta corriente de Deudor',
'Customer current account status': 'Customer current account status',
'Customer deletion date': 'Fecha de eliminación del deuddor',
'Customer firm name': 'Razón social del deudor',
'Customer panel': 'Customer panel',
'Customer starting date': 'Fecha de inicio del deudor',
'Customer/Supplier data': 'Customer/Supplier data',
'DB Model': 'DB Model',
'Database': 'Base de datos',
'Date': 'Date',
'Dates: ': 'Dates: ',
'Db to csv': 'Db to csv',
'Deactivate access levels': 'Desactivar niveles de acceso',
'Debit': 'Debit',
'Debt limit: %s': 'Debt limit: %s',
'Default': 'Default',
'Default salesperson': 'Vendedor por defecto',
'Delete value is %s': 'Delete value is %s',
'Delete:': 'Delete:',
'Description': 'Descripción',
'Design': 'Diseño',
'Desktop App': 'Aplicación de escritorio',
'Difference': 'Difference',
'Difference: %s': 'Diferencia: %s',
'Discount by customer': 'Descuento por deudor',
'Discount/Surcharges': 'Descuentos/Recargos',
'Discounts/Surcharges': 'Discounts/Surcharges',
'Document': 'Comprobante',
'Done': 'Done',
'Due date': 'Due date',
'E-mail': 'E-mail',
'Edit': 'Editar',
'Edit current record': 'Edit current record',
'Edit in movements': 'Edit in movements',
'Edit order number': 'Edit order number',
'Efectivo': 'Efectivo',
'Ending': 'Ending',
'Entries': 'Entries',
'Entries: %s': 'Ingresos: %s',
'Entry': 'Entry',
'Erasing record %s': 'Erasing record %s',
'Error trying to get the operation customer/supplier data from database': 'Error trying to get the operation customer/supplier data from database',
'Error: could not calculate the total debt.': 'Error: could not calculate the total debt.',
'Errors': 'Errors',
'Esta es la plantilla accounting/offset_account.html': 'Esta es la plantilla accounting/offset_account.html',
'Existencias': 'Existencias',
'Exits: %s': 'Salidas: %s',
'Facilitate collection': 'Facilitate collection <translate>',
'False if deferred payment (df), True if paid with cash, ch (check) or current account': 'Falso si es pago diferido (df), Verdadero si el pago es en efvo., ch (cheque) o cuenta corriente',
'Family': 'Family',
'Fax': 'Fax',
'Fee': 'Fee',
'Fees': 'Fees',
'Fees list': 'Fees list',
'File': 'Archivo',
'File CRUD': 'ABM Archivos',
'File name': 'File name',
'Financials': 'Financials',
'Finantial situation': 'Situación financiera',
'Firm': 'Razón social',
'First name': 'First name',
'Fiscal controller': 'Fiscal controller',
'For PostgreSQL databases. Use this option with care. A superuser database conection is required': 'For PostgreSQL databases. Use this option with care. A superuser database conection is required',
'For purchases: %(pt)s payment is recorded as concept id %s(c)': 'For purchases: %(pt)s payment is recorded as concept id %s(c)',
'For purchases: %s payment is recorded as concept id %s': 'Para compras: %s pago es registrado como concepto id %s',
'Form accepted': 'Form accepted',
'Form data: %(fd)s': 'Form data: %(fd)s',
'Form data: %s': 'Form data: %s',
'Forms': 'Formularios',
'Formulas': 'Formulas',
'Funds': 'Funds',
'Generate': 'Generar',
'GestionLibre': 'GestiónLibre',
'GestionLibre %(version)s': 'GestionLibre %(version)s',
'GestionLibre %s': 'GestionLibre %s',
'GestionLibre Prealpha v4': 'GestionLibre Prealpha v4',
'Group %(group_id)s created': 'Group %(group_id)s created',
'Group ID': 'ID de grupo',
'Group uniquely assigned to user %(id)s': 'Group uniquely assigned to user %(id)s',
'Header form': 'Header form',
'Help': 'Ayuda',
'ID': 'ID',
'Import': 'Importar',
'Import csv dir': 'Import csv dir',
'Import example db from CSV': 'Import example db from CSV',
'Import legacy tables': 'Import legacy tables',
'Import/Export': 'Import/Export',
'Increase/Decrease stock values': 'Increase/Decrease stock values',
'Increase/decrease stock values': 'Increase/decrease stock values',
'Index': 'Inicio',
'Initialize': 'Initialize',
'Insert movements element': 'Ingresar elemento de movimientos',
'Insert order element': 'Insert order element',
'Installment': 'Installment',
'Installment created': 'Installment created',
'Installments': 'Planes de pago',
'Insufficient source stock quantity': 'Insufficient source stock quantity',
'Insufficient stock value.': 'Insufficient stock value.',
'Internal State': 'Internal State',
'Invalid Query': 'Invalid Query',
'Invalid email': 'Invalid email',
'Invalid login': 'Invalid login',
'Invoice header type': 'Tipo de encabezado de factura',
'Item added': 'Item added',
'Item value input: %s': 'Item value input: %s',
'Journal Entries': 'Libros diarios',
'Journal Entry': 'Libro diario',
'Journal entries': 'Libros diarios',
'Journal entry': 'Journal entry',
'Journal entry total amount': 'Suma total del libro diario',
'Label': 'Etiqueta',
'Labels': 'Labels',
'Languages': 'Lenguajes',
'Last name': 'Last name',
'Layout': 'Layout',
'Layout colors': 'Colores de la interfaz',
'List fees': 'List fees',
'List installments': 'List installments',
'List of operation elements': 'Lista de elementos de la operación',
'List of operations': 'Lista de operaciones',
'List of order elements': 'List of order elements',
'List order allocation operations': 'Lista de operaciones de asignaciones de pedidos',
'List order allocations': 'Lista de asignaciones de pedidos',
'Lists': 'Lists',
'Logged in': 'Logged in',
'Logged out': 'Logged out',
'Login': 'Iniciar sesión',
'Login accepted': 'Login accepted',
'Logout': 'Terminar sesión',
'Lost password?': 'Lost password?',
'Map': 'Mapeo',
'Menu Model': 'Menu Model',
'Migration': 'Migration',
'Model': 'Modelo',
'Modify header': 'Modificar encabezado',
'Modify movements element': 'Modify movements element',
'Modify operation item': 'Modify operation item',
'Modify operation number': 'Modificar número de operación',
'Modify sales order element': 'Modify sales order element',
'Move stock items': 'Move stock items',
'Movement (offset): %(mo)s: %(a)s': 'Movement (offset): %(mo)s: %(a)s',
'Movements': 'Movimientos',
'Movements (Operations)': 'Movimientos (operaciones)',
'Movements add check': 'Movements add check',
'Movements add discount surcharge': 'Movements add discount surcharge',
'Movements add item': 'Movements add item',
'Movements add payment method': 'Movements add payment method',
'Movements add tax': 'Movements add tax',
'Movements articles': 'Movements articles',
'Movements current account concept': 'Movements current account concept',
'Movements current account data': 'Movements current account data',
'Movements current account quotas': 'Movements current account quotas',
'Movements detail': 'Detalle de operación',
'Movements element': 'Movements element',
'Movements header': 'Movements header',
'Movements list': 'Lista de movimientos',
'Movements modify check': 'Movements modify check',
'Movements modify element': 'Movements modify element',
'Movements modify header': 'Movements modify header',
'Movements modify item': 'Movements modify item',
'Movements option update stock': 'Movements option update stock',
'Movements option update taxes': 'Movements option update taxes',
'Movements panel': 'Panel de movimientos',
'Movements price list': 'Movements price list',
'Movements process': 'Movements process',
'Movements process. Operation: %s': 'Registrar movimientos. Operación: %s',
'Movements select': 'Movements select',
'Movements select warehouse': 'Movements select warehouse',
'Movements start': 'Movements start',
'Moving to new record': 'Moving to new record',
'Name': 'Nombre',
'New Record': 'New Record',
'New customer': 'New customer',
'New customer order element': 'New customer order element',
'New customer order modify element': 'New customer order modify element',
'New expenses invoice': 'New expenses invoice',
'New fee': 'New fee',
'New function': 'New function',
'New installment': 'Nuevo plan de pago',
'New invoice': 'New invoice',
'New operation': 'Nueva operación',
'New operation (movements form)': 'Nueva operación (formulario de movimientos)',
'New operation check': 'New operation check',
'New operation item': 'Nuevo ítem de operación',
'New operation tax': 'New operation tax',
'New option': 'Nueva opción',
'New option created.': 'New option created.',
'New order allocation': 'New order allocation',
'New packing slip from this allocation': 'Nuevo remito desde esta asignación de pedidos',
'New query': 'Nueva consulta',
'New subcustomer': 'New subcustomer',
'No databases in this application': 'No databases in this application',
'No document type specified': 'No document type specified',
'No tax id selected': 'No tax id selected',
'None selected': 'No se seleccionó un elemento',
'Number': 'Número',
'Object or table name': 'Nombre de tabla u objeto',
'Observations': 'Observaciones',
'Operation': 'Operación',
'Operation %(operation)s is not editable': 'La operación %(operation)s no se puede editar',
'Operation %s is not editable': 'La operación %s no es editable',
'Operation detail': 'Detalle de la operación',
'Operation details: %s': 'Operation details: %s',
'Operation discounts and surcharges': 'Descuentos y recargos de la operación',
'Operation header': 'Encabezado de la operación',
'Operation header incomplete. Please select a document type': 'Operation header incomplete. Please select a document type',
'Operation id(s): %s': 'Operation id(s): %s',
'Operation installment': 'Operation installment',
'Operation modified': 'Operación modificada',
'Operation number %(id)s': 'Operation number %(id)s',
'Operation number %s': 'Número de operación %s',
'Operation processed': 'Operation processed',
'Operation processing failed: debt limit reached': 'Operation processing failed: debt limit reached',
'Operation processing result': 'Resultado del registro de la operación',
'Operation successfully processed': 'La operación se registró correctamente',
'Operation: %(o)s. Amount: %(a)s. Value: %(v)s. Concept: %(c)s, Quantity: %(q)s': 'Operation: %(o)s. Amount: %(a)s. Value: %(v)s. Concept: %(c)s, Quantity: %(q)s',
'Operation: %(o)s. Amount: %(a)s. Value: %(v)s. Concept: %(c)s, Quantity: %(q)s, Movement: %(m)s': 'Operation: %(o)s. Amount: %(a)s. Value: %(v)s. Concept: %(c)s, Quantity: %(q)s, Movement: %(m)s',
'Operation: %s. Amount: %s. Value: %s. Concept: %s, Quantity: %s, Movement: %s': 'Operación: %s. Importe: %s. Valor: %s. Concepto: %s, Cantidad: %s, Movimiento: %s',
'Operations': 'Operaciones',
'Operations list': 'Lista de operaciones',
'Option': 'Option',
'Option modified.': 'Option modified.',
'Options': 'Opciones',
'Order allocation': 'Asignación de pedidos',
'Order allocation %s': 'Order allocation %s',
'Order allocation list': 'Lista de asignación de pedidos',
'Order list': 'Lista de pedidos',
'Order number': 'Order number',
'Ordered': 'Pedido/a',
'Origin': 'Origen',
'Other': 'Otros',
'Output': 'Output',
'Packing slip': 'Remito',
'Page setup': 'Configurar página',
'Parameters': 'Parámetros',
'Passages': 'Passages',
'Password': 'Password',
"Password fields don't match": "Password fields don't match",
'Password reset': 'Reiniciar contraseña',
'Pay': 'Pay',
'Per item printing': 'Impresión por ítem',
'Period': 'Ciclo/Período',
'Please choose different warehouses': 'Please choose different warehouses',
"Please insert your firm's tax id": 'Por favor ingrese la identificación tributaria de su empresa',
'Points to order / invoice / packingslips': 'Apunta a pedidos / facturas / remitos',
'Populate tables': 'Populate tables',
'Populate_with_legacy_db Insert Error: Table %(table)s, row %(n)s: %(e)s': 'Populate_with_legacy_db Insert Error: Table %(table)s, row %(n)s: %(e)s',
'Post register specify firm': 'Post register specify firm',
'Post registration form': 'Post registration form',
'Post-registration form': 'Formulario post-registro',
'Postal address': 'Dirección postal',
'Posted': 'Registrado',
'Predefine documents': 'Predefinir comprobantes',
'Price check': 'Price check',
'Price list': 'Lista de precios',
'Price lists': 'Price lists',
'Prices': 'Precios',
'Print this document': 'Imprimir este documento',
'Print...': 'Impresión...',
'Process': 'Registrar',
'Process jurisdictions': 'Procesar jurisdicciones',
'Process operation': 'Registrar operación',
'Processes': 'Processes',
'Product': 'Producto',
'Product billing': 'Product billing',
'Product code': 'Código de producto',
'Production': 'Production',
'Profile': 'Profile',
'Prototype app': 'Prototype app',
'Purchases': 'Compras',
'Quantity': 'Cantidad',
'Queries': 'Consultas',
'Query:': 'Query:',
'Quit': 'Salir',
'Quota': 'Quota',
'Quotas': 'Quotas',
'RIA Create/Edit operations': 'Modo RIA crear/editar operaciones',
'RIA Product billing': 'Modo RIA facturación de productos',
'RIA Receipt': 'Modo RIA recibos',
'RIA Stock': 'Modo RIA existencias',
'RIA Stock main menu': 'RIA Stock main menu',
'Read': 'Read',
'Receipt items list': 'Receipt items list',
'Receipt number': 'Receipt number',
'Receipt processed': 'Receipt processed',
'Receipts list': 'Receipts list',
'Receive': 'Recibir',
'Record %(id)s created': 'Record %(id)s created',
'Record %(id)s updated': 'Record %(id)s updated',
'Record Created': 'Record Created',
'Record ID': 'ID del registro',
'Record Updated': 'Record Updated',
'Record updated': 'Record updated',
'Redirecting from event': 'Redirecting from event',
'Referenced table': 'Tabla referenciada',
'Register': 'Registrarse',
'Registration': 'Registration',
'Registration identifier': 'Registration identifier',
'Registration key': 'Registration key',
'Registration successful': 'Registration successful',
'Remember me (for 30 days)': 'Remember me (for 30 days)',
'Replica': 'Replica',
'Reportes': 'Reportes',
'Reports': 'Reportes',
'Reset': 'Reiniciar',
'Reset Password key': 'Reset Password key',
'Reset operation': 'Reiniciar operación',
'Reset order': 'Reset order',
'Reset packing slip': 'Reset packing slip',
'Reset receipt': 'Reset receipt',
'Revert payment application': 'Revert payment application',
'Ria movements': 'Ria movements',
'Ria movements process': 'Ria movements process',
'Ria movements reset': 'Ria movements reset',
'Ria new customer order': 'Ria new customer order',
'Ria new customer order reset': 'Ria new customer order reset',
'Ria product billing': 'Ria product billing',
'Ria product billing start': 'Ria product billing start',
'Ria stock': 'Ria stock',
'Role': 'Rol',
'Rows in table': 'Rows in table',
'Rows selected': 'Rows selected',
'SCM': 'SCM',
'Sales': 'Ventas',
'Sales contact': 'Contacto de ventas',
'Scm': 'Scm',
'Se requiere un usuario autenticado': 'Se requiere un usuario autenticado',
'Securities': 'Securities',
'Security policies': 'Políticas de seguridad',
'Select': 'Select',
'Select an operation type': 'Seleccione una clase de operación',
'Select price list': 'Selecciones una lista de precios',
'Select warehouse': 'Seleccione un depósito',
'Selection action: %s': 'Selection action: %s',
'Send': 'Enviar',
'Session closed by user input': 'Sesión finalizada por acción del usuario',
'Session data: %s': 'Session data: %s',
'Set colors as default': 'Establecer como colores por defecto',
'Set default layout colors': 'Set default layout colors',
'Set language': 'Set language',
'Set options': 'Set options',
'Setting offset concept to %s': 'Setting offset concept to %s',
'Setup': 'Configuración',
'Specify firm': 'Especificar razón social',
'Starting': 'Starting',
'Stock': 'Existencias',
'Stock item update': 'Stock item update',
'Stock list': 'Listado de existencias',
'Stock movement': 'Movimiento de existencias',
'Stock query': 'Consulta de existencias',
'Stock updated': 'Stock updated',
'Stock value changed': 'Stock value changed',
'Storage folder': 'Storage folder',
'Structures': 'Structures',
'Stylesheet': 'Stylesheet',
'Subcustomer': 'Cliente',
'Subcustomer current account': 'Cuenta corriente cliente',
'Submit': 'Submit',
'Summary': 'Summary',
'Supplier': 'Proveedor',
'System tables': 'Tablas del sistema',
'TAX ID': 'Identificación impositiva',
'Tables': 'Tables',
'Tax _id': 'Tax _id',
'Tax id': 'Clave impositiva',
'Taxes are': 'Acción para impuestos',
'Telephone numbers': 'Números telefónicos',
'Terms of payment': 'Terms of payment <translate>',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.',
'The CSV data was stored at your web2py root folder': 'The CSV data was stored at your web2py root folder',
'The db load failed with these errors: ': 'The db load failed with these errors: ',
'The db records were uploaded correctly': 'The db records were uploaded correctly',
'The following operations were created': 'The following operations were created',
'The form has errors': 'The form has errors',
'The item specified was not found in the warehouse': 'The item specified was not found in the warehouse',
'The item will be removed without confirmation': 'Se eliminará el ítem sin confirmación',
'The links': 'Enlaces',
'The operation has current account movements: %s': 'The operation has current account movements: %s',
'The operation processing failed. Booking ok: %(rs)s. Stock ok: %(st)s': 'The operation processing failed. Booking ok: %(rs)s. Stock ok: %(st)s',
'The user entered does not exist': 'The user entered does not exist',
'This action requires authenticated users': 'Se requiere un usuario autenticado',
'This is the webapp index view of': 'Esta es la vista inicial de la interfaz web de',
'Timestamp': 'Fecha y hora',
'Total': 'Total',
'Total amount': 'Monto total',
'Total debt': 'Total adeudado',
'Transfers': 'Transferencias',
'Trying with': 'Trying with',
'Type of current account': 'Tipo de cuenta corriente',
'Update': 'Actualización',
'Update fee': 'Update fee',
'Update installment': 'Update installment',
'Update order allocation': 'Actualizar asignación de pedido',
'Update quota': 'Update quota',
'Update:': 'Update:',
'Updating stock id: %(st)s as %(vl)s': 'Updating stock id: %(st)s as %(vl)s',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.',
'User': 'User',
'User %(id)s Logged-in': 'User %(id)s Logged-in',
'User %(id)s Logged-out': 'User %(id)s Logged-out',
'User %(id)s Registered': 'User %(id)s Registered',
'User ID': 'ID de usuario',
'VAT sub-journal': 'Subdiario IVA',
"Valid firm tax id's": 'Identificación tributaria válida',
'Value': 'Valor',
'Values: %s': 'Values: %s',
'Various': 'Varios',
'Ventanas': 'Ventanas',
'Ventas': 'Ventas',
'Verify': 'Verificar',
'Verify Password': 'Verify Password',
'View': 'View',
'WARNING: JOURNAL ENTRY IS UNBALANCED': 'WARNING: JOURNAL ENTRY IS UNBALANCED',
'Warehouse': 'Depósito',
'Warning! Wrong document type.': 'Warning! Wrong document type.',
'Web interface': 'Interfaz web',
'Welcome': 'Welcome',
'Welcome to web2py and GestionLibre': 'Welcome to web2py and GestionLibre',
'Wiki': 'Wiki',
'Windows': 'Ventanas',
"You have not specified you firm's TAX ID. Please visit the": "You have not specified you firm's TAX ID. Please visit the",
'abbr': 'abrev',
'account': 'cuenta',
'accounting': 'accounting',
'accounting period': 'Ejercicio contable',
'accumulated': 'acumulada/o',
'addition': 'ingresado/a',
'additions': 'ingresos',
'address': 'direcciones',
'adherent': 'adherente',
'agreement': 'acuerdo',
'aliquot': 'alícuota',
'allowance': 'allowance <translate>',
'amount': 'importe',
'and try again': 'and try again',
'appadmin': 'appadmin',
'appadmin is disabled because insecure channel': 'appadmin is disabled because insecure channel',
'args': 'args',
'authorization code': 'código de autorización',
'avoidance': 'avoidance <translate>',
'balance': 'balance',
'balanced': 'balanceado',
'bank': 'banco',
'bank check': 'cheque',
'bank checks': 'cheques',
'banks': 'bancos',
'bd': 'bd',
'birth': 'nacimiento',
'books': 'books <translate>',
'bouncer': 'rechazado',
'branch': 'sucursal',
'budget': 'budget <translate>',
'cache': 'cache',
'calculate': 'calcular',
'canceled': 'cancelada/o',
'cancellation': 'cancelación',
'capacity': 'capacidad',
'cash': 'Caja',
'cash box': 'caja',
'category': 'categoría',
'check limit': 'límite de cheques',
'checkbook': 'chequera',
'city': 'ciudad',
'closed': 'cerrada/o',
'code': 'código',
'coefficient': 'coeficiente',
'collected': 'cobrada/o',
'collection': 'colección',
'collections': 'colecciones',
'color': 'color',
'commission': 'comisión',
'compress': 'comprimir',
'concept': 'concepto',
'condition': 'condición',
'confirm printing': 'confirmar impresión',
'contact': 'contacto',
'continuous': 'continuo',
'contribution': 'contribución',
'contribution discount': 'descuento por contribución',
'copies': 'copias',
'cost center': 'centro de costo',
'countable': 'contable',
'country': 'país',
'coupons': 'cupones',
'credit': 'crédito',
'crm': 'crm',
'current account': 'cuenta corriente',
'current account limit': 'límite de cuenta corriente',
'customer': 'deudor',
'customer group': 'grupo deudores',
'customize me!': 'customize me!',
'data uploaded': 'data uploaded',
'database': 'database',
'database %s select': 'database %s select',
'datum': 'datum <translate>',
'days': 'días',
'db': 'db',
'debit': 'débito',
'debt limit': 'límite de deuda',
'default': 'default',
'deletion': 'eliminación',
'department': 'departamento',
'description': 'descripción',
'descriptions': 'descripciones',
'design': 'design',
'desired': 'deseada/o',
'detail': 'detalle',
'disabled': 'deshabilitada/o',
'discount': 'descuento',
'discounts': 'descuentos',
'discriminate': 'discriminar',
'discriminated': 'discriminada/o',
'document': 'comprobante',
'document purchases': 'comprobante de compras',
'document sales': 'comprobante de ventas',
'does not update stock': 'no actualizar las existencias',
'done!': 'done!',
'down payment': 'down payment <translate>',
'draft': 'borrador',
'due date': 'fecha de vencimiento',
'due_date': 'fecha de vencimiento',
'email': 'email',
'ending': 'finaliza',
'ending quota': 'última cuota',
'enter a number between %(min)g and %(max)g': 'ingrese un número entre %(min)g y %(max)g',
'enter an integer between %(min)g and %(max)g': 'ingrese un entero entre %(min)g y %(max)g',
'enter from %(min)g to %(max)g characters': 'ingrese de %(min)g a %(max)g caracteres',
'entry': 'ingreso',
'exchanged': 'intercambiada/o',
'exit': 'salida',
'expenditure': 'gasto',
'export as csv file': 'export as csv file',
'extra': 'extra',
'extra hours': 'horas extra',
'extras': 'extras',
'failure': 'inasistencias',
'family': 'familia',
'fax': 'fax',
'fee': 'arancel',
'fees': 'aranceles',
'file': 'archivo',
'filename.ext': 'filename.ext',
'financials': 'financials',
'first due': 'primer vencimiento',
'first name': 'nombre',
'fiscal': 'fiscal',
'fiscal controller': 'Controlador fiscal',
'fixed': 'fija/o',
'floor': 'piso',
'form': 'formulario',
'format': 'formato',
'formula': 'fórmula',
'from table': 'from table',
'fund': 'fondo',
'government increase': 'aumento del gobierno',
'gross receipts': 'ingresos brutos',
'half bonus': 'medio aguinaldo',
'healthcare': 'obra social',
'hour': 'hora',
'hourly': 'horaria/o',
'i.e. third party payment transaction number': 'i.e. third party payment transaction number',
'id': 'id',
'id 1': 'id 1',
'id number': 'número de id',
'identity card': 'tarjeta identificatoria',
'index value': 'valor de índice',
'insert new': 'insert new',
'insert new %s': 'insert new %s',
'installment': 'plan de pago',
'interests': 'intereses',
'internal': 'interna/o',
'invalid request': 'invalid request',
'invert': 'invertir',
'invoice': 'factura',
'invoices': 'facturas',
'issue': 'issue <translate>',
'journal entry': 'libro diario',
'journalized': 'journalized <translate>',
'jurisdiction': 'jurisdicción',
'kinship': 'parentezco',
'labor union': 'sindicato',
'language': 'lenguaje',
'large family': 'familia numerosa',
'last name': 'apellido',
'late payment': 'pago con retraso',
'legal name': 'razón social',
'lines': 'líneas',
'liquidated': 'liquidado',
'liquidation': 'liquidación',
'lot': 'lote',
'marital status': 'estado civil',
'measure': 'unidad de medida',
'migration': 'migration',
'module': 'módulo',
'month': 'mes',
'monthly amount': 'importe mensual',
'movement': 'movimiento',
'msg': 'msg',
'multiple pages': 'múltiples páginas',
'name': 'nombre',
'nationality': 'nacionalidad',
'nationality id': 'id de nacionalidad',
'net': 'neto',
'new record inserted': 'new record inserted',
'next': 'próxima/o',
'next 100 rows': 'next 100 rows',
'not logged in': 'no autenticado',
'not updated': 'no actualizadar',
'notes': 'notas',
'number': 'número',
'observations': 'observaciones',
'operation': 'operación',
'operation 1': 'operación 1',
'operation 2': 'operación 2',
'operations': 'operations',
'or import from csv file': 'or import from csv file',
'order number': 'número de orden',
'orderable': 'asignable a pedidos',
'orders': 'pedidos',
'other': 'otras/os',
'output': 'output',
'own': 'propia/o',
'packing slips': 'remitos',
'pages': 'páginas',
'paid': 'paga/o',
'paid quotas': 'cuotas pagas',
'paid vacation': 'vacaciones pagas',
'password': 'contraseña',
'patronal': 'patronal',
'payment': 'pago',
'payment method': 'payment method <translate>',
'payment terms': 'payment terms <translate>',
'payroll': 'payroll <translate>',
'pension': 'jubilación',
'per diem': 'per diem <translate>',
'percentage': 'porcentaje',
'place of delivery': 'lugar de entrega',
'plant': 'planta',
'please input your password again': 'please input your password again',
'point of sale': 'punto de venta',
'posted': 'hora/fecha de registro',
'preprinted': 'preimpreso',
'presentation': 'presentación',
'presenteesm': 'presentismo',
'presenteesm discount': 'descuento de presentismo',
'previous 100 rows': 'previous 100 rows',
'price': 'precio',
'price list': 'lista de precios',
'printed': 'impreso',
'printer': 'impresora',
'prints': 'imprime',
'priority': 'prioridad',
'processed': 'registrado',
'products': 'productos',
'profit percentage': 'porcentaje de ganancias',
'quantity': 'cantidad',
'quantity 1': 'cantidad 1',
'quantity 2': 'cantidad 2',
'queries': 'consultas',
'quota': 'cuota',
'quotas': 'cuotas',
'rate': 'rate <translate>',
'receipt': 'recibo',
'receipts': 'recibos',
'receives': 'recibe',
'record': 'record',
'record does not exist': 'record does not exist',
'record id': 'record id',
'registration': 'registration',
'registration key': 'clave de registro',
'rejection': 'rechazo',
'remunerative': 'remunerativa/o',
'repair': 'reparar',
'replica': 'replica',
'replicate': 'replicar',
'replicated': 'replicada/o',
'represent': 'represent',
'requires': 'requires',
'reserved': 'reservada/o',
'reset password key': 'clave para reconfigurar contraseña',
'retentions': 'retenciones',
'role': 'rol',
'salary': 'salario',
'salesperson': 'personal de ventas',
'schedule': 'agenda',
'schooling': 'escolaridad',
'scm': 'scm',
'scrap': 'scrap <translate>',
'second due': 'segundo vencimiento',
'selected': 'selected',
'seniority': 'antigüedad',
'seniority years': 'años de antigüedad',
'separate': 'separada/o',
'session.difference :%s': 'session.diferencia :%s',
'setup': 'setup',
'sex': 'sexo',
'sick days': 'inasistencia por enfermedad',
'situation': 'situación',
'size': 'tamaño',
'social services': 'social services <translate>',
'source': 'fuente',
'spouse': 'esposa',
'staff': 'personal',
'staff category': 'categoría de personal',
'starting': 'comienza',
'starting quota': 'cuota inicial',
'state': 'estado',
'statement': 'statement <translate>',
'stock': 'existencias',
'stock quantity': 'cantidad en existencia',
'street': 'calle',
'subcategory': 'subcategoría',
'subcustomer': 'cliente',
'subject': 'asunto',
'supplier': 'proveedor',
'surcharge': 'recargo',
'surcharges': 'recargos',
'suspended': 'suspendida/o',
'table': 'table',
'table number': 'número de tabla',
'tax': 'impuesto',
'tax identificar': 'identificar impuesto',
'tax identification': 'clave impositiva',
'taxed': 'gravada/o',
'telephone': 'teléfono',
'term': 'término',
'text': 'texto',
'ticket': 'ticket',
'times': 'times <translate>',
'transport': 'transporte',
'type': 'tipo',
'unable to parse csv file': 'unable to parse csv file',
'unitary': 'unitaria/o',
'units': 'unidades',
'updated': 'actualizar',
'updates stock': 'actualizar existencias',
'upper limit': 'límite superior',
'user': 'usuario',
'vacations': 'vacaciones',
'valuation': 'valuación',
'value': 'valor',
'value already in database or empty': 'valor en la base de datos o vacío',
'value not in database': 'value not in database',
'voided': 'anulado',
'voluntary': 'voluntaria/o',
'warehouse': 'depósito',
'with old record': 'with old record',
'year': 'año',
'zip code': 'código postal',
}
|
reingart/gestionlibre
|
languages/es.py
|
Python
|
agpl-3.0
| 34,315
|
[
"VisIt"
] |
1821440997573378706936914bea9cdc933593597e1b19d4216e92e81233911f
|
"""
depth_connectivity.py
---
Assign bathymetry to a grid based on cell-to-cell connectivity
derived from a high-resolution DEM.
Adapted from Holleman and Stacey, JPO, 2014.
Primary entry point:
edge_depths=edge_connection_depth(g,dem,edge_mask=None,centers='lowest')
see end of file
"""
# Copied from .../research/spatialdata/us/ca/lidar/direct_biased/direct_biased.py
from __future__ import print_function
import numpy as np
import pdb
from scipy.ndimage import label
from .. import utils
if 1:
debug=0
else:
debug=1
import matplotlib.pyplot as plt
try:
# gone away as of mpl 1.3
from matplotlib.nxutils import points_inside_poly
except ImportError:
from matplotlib import path
def points_inside_poly(points,ijs):
# closed path likes the first/last nodes to coincide
ijs=np.concatenate( (ijs,ijs[:1]) )
p=path.Path(ijs,closed=True)
return p.contains_points(points)
def greedy_edgemin_to_node(g,orig_node_depth,edge_min_depth):
"""
A simple approach to moving edge depths to nodes, when the
hydro model (i.e. DFM) will use the minimum of the nodes to
set the edge.
It sounds roundabout because it is, but there is not a
supported way to assign edge depth directly.
For each edge, want to enforce a minimum depth in two sense:
1. one of its nodes is at the minimum depth
2. neither of the nodes are below the minimum depth
and..
3. the average of the two nodes is close to the average DEM depth
of the edge
Not yet sure of how to get all of those. This method focuses on
the first point, but in some situations that is still problematic.
The 3rd point is not attempted at all, but in DFM would only be
relevant for nonlinear edge depths which are possibly not even supported
for 3D runs.
"""
conn_depth=np.nan*orig_node_depth
# N.B. nans sort to the end
edge_min_ordering=np.argsort(edge_min_depth)
# The greedy aspect is that we start with edges at the
# lowest target depth, ensuring their elevations before
# setting higher edges
for j in edge_min_ordering:
if np.isnan(edge_min_depth[j]):
break # done with all of the target depths
nodes=g.edges['nodes'][j]
# is this edge is already low enough, based on minimum of
# node elevations set so far?
if ( np.any( np.isfinite(conn_depth[nodes]) ) and
(np.nanmin(conn_depth[nodes])<=edge_min_depth[j] ) ):
continue # yes, move on.
orig_z=orig_node_depth[nodes]
# original code skipped edges where either of the nodes were nan
# in the original grid -- seems unnecessary
# instead, now choose the node to update based orig_node_depth is possible,
# considering nan to be above finite. failing that, choose nodes[0] arbitrarily.
if np.isnan(orig_z[0]):
if np.isnan(orig_z[1]):
target_node=nodes[0] # arbitrary
else:
target_node=nodes[1]
elif np.isnan(orig_z[1]):
target_node=nodes[0]
elif orig_z[0]<orig_z[1]:
target_node=nodes[0]
else:
target_node=nodes[1]
conn_depth[target_node]=edge_min_depth[j]
missing=np.isnan(conn_depth)
conn_depth[missing]=orig_node_depth[missing]
return conn_depth
def greedy_edge_mean_to_node(g,orig_node_depth=None,edge_depth=None,n_iter=100):
"""
Return node depths such that the mean of the node depths on each
edge approximate the provided edge_mean_depth.
The approach is iterative, starting with the largest errors, visiting
each edge a max of once.
Still in development, has not been tested.
"""
from scipy.optimize import fmin
if edge_depth is None:
if 'depth' in g.edges.dtype.names:
edge_depth=g.edges['depth']
assert edge_depth is not None
if orig_node_depth is None:
if 'depth' in g.nodes.dtype.names:
orig_node_depth=g.nodes['depth']
else:
# Rough starting guess:
orig_node_depth=np.zeros( g.Nnodes(), 'f8')
for n in range(g.Nnodes()):
orig_node_depth[n] = edge_depth[g.node_to_edges(n)].mean()
# The one we'll be updating:
conn_depth=orig_node_depth.copy()
node_mean=conn_depth[g.edges['nodes']].mean(axis=1)
errors=node_mean - edge_depth
errors[ np.isnan(errors) ] = 0.0
potential=np.ones(g.Nedges())
for loop in range(n_iter):
verbose= (loop%100==0)
# Find an offender
j_bad=np.argmax(potential*errors)
if potential[j_bad]==0:
print("DONE")
break
potential[j_bad]=0 # only visit each edge once.
# Get the neighborhood of nodes:
# nodes=
jj_nbrs=np.concatenate( [ g.node_to_edges(n)
for n in g.edges['nodes'][j_bad] ] )
jj_nbrs=np.unique(jj_nbrs)
jj_nbrs = jj_nbrs[ np.isfinite(edge_depth[jj_nbrs]) ]
n_bad=g.edges['nodes'][j_bad]
def cost(ds):
# Cost function over the two depths of the ends of j_bad:
conn_depth[n_bad]=ds
new_errors=conn_depth[g.edges['nodes'][jj_nbrs]].mean(axis=1) - edge_depth[jj_nbrs]
# weight high edges 10x more than low edges:
cost=new_errors.clip(0,np.inf).sum() - 0.5 * new_errors.clip(-np.inf,0).sum()
return cost
ds0=conn_depth[n_bad]
cost0=cost(ds0)
ds=fmin(cost,ds0,disp=False)
costn=cost(ds)
conn_depth[n_bad]=ds
if verbose:
print("Loop %d: %d/%d edges starting error: j=%d => %.4f"%(loop,potential.sum(),len(potential),
j_bad,errors[j_bad]))
node_mean=conn_depth[g.edges['nodes']].mean(axis=1)
errors=node_mean - edge_depth
errors[ np.isnan(errors) ] = 0.0
if verbose:
print(" ending error: j=%d => %.4f"%(j_bad,errors[j_bad]))
return conn_depth
def points_to_mask(hull_ijs,nx,ny):
# This seems inefficient, but actually timed out at 0.3ms
# very reasonable.
# Create vertex coordinates for each grid cell...
# (<0,0> is at the top left of the grid in this system)
x, y = np.meshgrid(np.arange(nx), np.arange(ny))
x, y = x.flatten(), y.flatten()
points = np.vstack((x,y)).T
mask = points_inside_poly(points, hull_ijs)
return mask.reshape((ny,nx))
def min_connection_elevation(ijs,min_depth,max_depth,F):
while max_depth - min_depth > 0.01: # cm accuracy
# use a binary search, and the numpy labelling routines
mid_depth = 0.5*(max_depth+min_depth)
Fthresh = F <= mid_depth
labels,nlabels = label(Fthresh)
l1,l2 = labels[ ijs[:,1],ijs[:,0]]
if l1 != l2:
# too shallow -
min_depth = mid_depth
else:
# deep enough
max_depth = mid_depth
return 0.5*(min_depth+max_depth)
def min_graph_elevation_for_edge(g,dem,j,starts='lowest'):
"""
g: unstructured_grid
j: edge index
dem: a Field subclass which supports extract_tile().
starts:
'circumcenter' connections are between voronoi centers
'centroid' connections are between cell centroids
'lowest' connections are between lowest point in cell
returns: the minimum edge elevation at which the cells adjacent
to j are hydraulically connected. nan if j is not adjacent to
two cells (i.e. boundary).
"""
# get the bounding box for the neighboring cells.
nc = g.edges['cells'][j]
if nc[0]<0 or nc[1]<0:
return np.nan
nc0_nodes=list(g.cell_to_nodes(nc[0]))
nc1_nodes=list(g.cell_to_nodes(nc[1]))
all_nodes=( nc0_nodes + nc1_nodes )
pnts = g.nodes['x'][all_nodes]
# asserts/assumes that the extents are multiples of dx,dy.
dx=dem.dx ; dy=dem.dy
dxy=np.array([dx,dy])
#assert dem.extents[0] % dem.dx == 0
#assert dem.extents[2] % dem.dy == 0
# protects from roundoff cases
pad=1
ll = np.floor(pnts.min(axis=0) / dxy - pad) * dxy
ur = np.ceil(pnts.max(axis=0) / dxy + pad) * dxy
xxyy = [ll[0],ur[0],ll[1],ur[1]]
# for a raster field, crop is much much faster than extract_tile
# tile = dem.extract_tile(xxyy)
tile=dem.crop(xxyy)
# Some of the above is for precise usage of SimpleGrid.
# but in some cases we're dealing with a MultiRasterField, and the
# local resolution is coarser:
dx=tile.dx ; dy=tile.dy
if tile is None:
return np.nan
# if the tile is not fully populated, also give up
if ( (tile.extents[0]>xxyy[0]) or
(tile.extents[1]<xxyy[1]) or
(tile.extents[2]>xxyy[2]) or
(tile.extents[3]<xxyy[3]) ):
print("Tile clipped by edge of DEM")
return np.nan
if debug:
fig=plt.figure(101)
fig.clf()
ax=fig.add_subplot(1,1,1)
tile.plot(interpolation='nearest',ax=ax)
ax.set_title('Extracted tile')
# old code manually constructed the convex hull, but with quads
# and so forth, it gets complicated - punt to shapely for the moment.
# hull_poly=g.cell_polygon(nc[0]).union(g.cell_polygon(nc[1]))
# hull_points=np.array(hull_poly.exterior)
nA,nB=g.edges['nodes'][j]
# nA_idx0=nc0_nodes.index(nA)
nB_idx0=nc0_nodes.index(nB)
nA_idx1=nc1_nodes.index(nA)
# rearrange so that nc0_nodes starts with B, ends with A
nc0_nodes=nc0_nodes[nB_idx0:] + nc0_nodes[:nB_idx0]
assert nc0_nodes[-1] == nA
# rearrange so that nc1_nodes starts with A, ends with B
nc1_nodes=nc1_nodes[nA_idx1:] + nc1_nodes[:nA_idx1]
assert nc1_nodes[-1] == nB
# A,B appear consecutively in nc0, reversed in nc1
hull_nodes=nc0_nodes + nc1_nodes[1:-1]
hull_points=g.nodes['x'][hull_nodes]
tile_origin = np.array( [ tile.extents[0], tile.extents[2]] )
tile_dxy = np.array( [tile.dx,tile.dy] )
def xy_to_ij(xy):
return (( xy - tile_origin ) / tile_dxy).astype(np.int32)
hull_ijs = xy_to_ij(hull_points)
# blank out the dem outside the two cells
ny, nx = tile.F.shape
valid = points_to_mask(hull_ijs,nx,ny)
F = tile.F.copy()
F[~valid] = 1e6
if starts in ['circumcenter','centroid']:
# map the two cell centers ij indices into the tile:
if starts=='circumcenter':
centers = g.cells_center(nc)[nc]
else:
centers = g.cells_centroid(nc)
lcenters = centers - tile_origin
# note that this is i -> x coordinate, j -> y coordinate
ijs = (lcenters / tile_dxy).astype(np.int32)
elif starts=='lowest':
nc0_ijs=xy_to_ij( g.nodes['x'][nc0_nodes] )
nc1_ijs=xy_to_ij( g.nodes['x'][nc1_nodes] )
valid0 = points_to_mask(nc0_ijs,nx,ny)
valid1 = points_to_mask(nc1_ijs,nx,ny)
j0,i0 = np.nonzero(valid0)
j1,i1 = np.nonzero(valid1)
linear0_min = F[valid0].argmin()
linear1_min = F[valid1].argmin()
ijs = np.array([ [i0[linear0_min],j0[linear0_min]],
[i1[linear1_min],j1[linear1_min]] ] )
else:
raise ValueError("'%s' not understood"%starts)
if debug:
fig=plt.figure(102)
fig.clf()
ax=fig.add_subplot(1,1,1)
ax.imshow(F,origin='lower',interpolation='nearest',vmin=-2,vmax=4)
ax.plot( ijs[:,0],ijs[:,1],'go')
ax.set_title('Centers')
if not valid[ijs[0,1],ijs[0,0]] or not valid[ijs[1,1],ijs[1,0]]:
print("Cell circumcenter(s) not in cell!")
return np.nan
# will probably end up grabbing the real cell depths here, rather
# than estimating by a point measurement on the DEM.
lcenter_depths = F[ijs[:,1],ijs[:,0]]
# clearly path cannot have max. elevation lower than either of the
# endpoints:
min_depth = lcenter_depths.max()
max_depth = F[valid].max()
# and this part takes 3.5ms - tolerable.
return min_connection_elevation(ijs,min_depth,max_depth,F)
def edge_connection_depth(g,dem,edge_mask=None,centers='circumcenter'):
"""
Return an array g.Nedges() where the selected edges have
a depth value corresponding to the minimum elevation at which
adjacent cells are hydraulically connected, evaluated on the
dem.
g: instance of UnstructuredGrid
dem: field.SimpleGrid instance, usually GdalGrid
edge_mask: bitmask for which edges to calculate, defaults to bounds of dem.
centers controls the reference point for each cell.
'circumcenter': use cell circumcenter
'centroid': use cell centroid
'lowest': use lowest point within the cell.
"""
if edge_mask is None:
# use to default to all edges
# edge_mask=np.ones(g.Nedges(),'b1')
# this makes more sense, though
edge_mask=g.edge_clip_mask(dem.bounds())
sel_edges=np.nonzero(edge_mask)[0]
count=np.sum(edge_mask)
edge_elevations=np.nan*np.ones(g.Nedges())
g.edge_to_cells()
for ji,j in enumerate(sel_edges):
if ji%100==0:
print("%d/%d"%(ji,count))
elev = min_graph_elevation_for_edge(g,dem,j,starts=centers)
edge_elevations[j] = elev
return edge_elevations
def poly_mean_elevation(dem,pnts):
# asserts/assumes that the extents are multiples of dx,dy.
dx=dem.dx ; dy=dem.dy
dxy=np.array([dx,dy])
# protects from roundoff cases
pad=1
ll = np.floor(pnts.min(axis=0) / dxy - pad) * dxy
ur = np.ceil(pnts.max(axis=0) / dxy + pad) * dxy
xxyy = [ll[0],ur[0],ll[1],ur[1]]
# crop first - much faster
tile=dem.crop(xxyy)
# Some of the above is for precise usage of SimpleGrid.
# but in some cases we're dealing with a MultiRasterField, and the
# local resolution is coarser:
dx=tile.dx ; dy=tile.dy
if tile is None:
return np.nan
# if the tile is not fully populated, also give up
if ( (tile.extents[0]>xxyy[0]) or
(tile.extents[1]<xxyy[1]) or
(tile.extents[2]>xxyy[2]) or
(tile.extents[3]<xxyy[3]) ):
print("Tile clipped by edge of DEM")
return np.nan
tile_origin = np.array( [ tile.extents[0], tile.extents[2]] )
tile_dxy = np.array( [tile.dx,tile.dy] )
def xy_to_ij(xy):
return (( xy - tile_origin ) / tile_dxy).astype(np.int32)
hull_ijs = xy_to_ij(pnts)
# blank out the dem outside the two cells
ny, nx = tile.F.shape
valid = points_to_mask(hull_ijs,nx,ny)
return tile.F[valid].mean()
def cell_mean_depth(g,dem):
"""
Calculate "true" mean depth for each cell, at the resolution of
the DEM. This does not split pixels, though.
"""
cell_z_bed=np.nan*np.ones(g.Ncells())
for c in utils.progress(range(g.Ncells())):
cell_z_bed[c]=poly_mean_elevation(dem, g.nodes['x'][ g.cell_to_nodes(c) ])
return cell_z_bed
|
rustychris/stompy
|
stompy/grid/depth_connectivity.py
|
Python
|
mit
| 15,086
|
[
"VisIt"
] |
ec9e3782da6578708fb4acb8ccfa3a73aea12690a52dd5713854c4eaaae8b48d
|
"""
EvMenu
This implements a full menu system for Evennia. It is considerably
more flexible than the older contrib/menusystem.py and also uses
menu plugin modules.
To start the menu, just import the EvMenu class from this module,
```python
from evennia.utils.evmenu import EvMenu
EvMenu(caller, menu_module_path,
startnode="node1",
cmdset_mergetype="Replace", cmdset_priority=1,
allow_quit=True, cmd_on_quit="look")
```
Where `caller` is the Object to use the menu on - it will get a new
cmdset while using the Menu. The menu_module_path is the python path
to a python module containing function defintions. By adjusting the
keyword options of the Menu() initialization call you can start the
menu at different places in the menu definition file, adjust if the
menu command should overload the normal commands or not, etc.
The menu is defined in a module (this can be the same module as the
command definition too) with function defintions:
```python
def node1(caller):
# (this is the start node if called like above)
# code
return text, options
def node_with_other_namen(caller, input_string):
# code
return text, options
```
Where caller is the object using the menu and input_string is the
command entered by the user on the *previous* node (the command
entered to get to this node). The node function code will only be
executed once per node-visit and the system will accept nodes with
both one or two arguments interchangeably.
The return values must be given in the above order, but each can be
returned as None as well. If the options are returned as None, the
menu is immediately exited and the default "look" command is called.
text (str, tuple or None): Text shown at this node. If a tuple, the second
element in the tuple is a help text to display at this node when
the user enters the menu help command there.
options (tuple, dict or None): ( {'key': name, # can also be a list of aliases. A special key is "_default", which
# marks this option as the default fallback when no other
# option matches the user input.
'desc': description, # option description
'goto': nodekey, # node to go to when chosen
'exec': nodekey, # node or callback to trigger as callback when chosen. If a node
# key is given the node will be executed once but its return u
# values are ignored. If a callable is given, it must accept
# one or two args, like any node.
{...}, ...)
If key is not given, the option will automatically be identified by
its number 1..N.
Example:
```python
# in menu_module.py
def node1(caller):
text = ("This is a node text",
"This is help text for this node")
options = ({"key": "testing",
"desc": "Select this to go to node 2",
"goto": "node2",
"exec": "callback1"},
{"desc": "Go to node 3.",
"goto": "node3"})
return text, options
def callback1(caller):
# this is called when choosing the "testing" option in node1
# (before going to node2). It needs not have return values.
caller.msg("Callback called!")
def node2(caller):
text = '''
This is node 2. It only allows you to go back
to the original node1. This extra indent will
be stripped. We don't include a help text.
'''
options = {"goto": "node1"}
return text, options
def node3(caller):
text = "This ends the menu since there are no options."
return text, None
```
When starting this menu with `Menu(caller, "path.to.menu_module")`,
the first node will look something like this:
This is a node text
______________________________________
testing: Select this to go to node 2
2: Go to node 3
Where you can both enter "testing" and "1" to select the first option.
If the client supports MXP, they may also mouse-click on "testing" to
do the same. When making this selection, a function "callback1" in the
same Using `help` will show the help text, otherwise a list of
available commands while in menu mode.
The menu tree is exited either by using the in-menu quit command or by
reaching a node without any options.
For a menu demo, import CmdTestDemo from this module and add it to
your default cmdset. Run it with this module, like `testdemo
evennia.utils.evdemo`.
"""
from textwrap import dedent
from inspect import isfunction, getargspec
from django.conf import settings
from evennia import Command, CmdSet
from evennia.utils.evtable import EvTable
from evennia.utils.ansi import ANSIString, strip_ansi
from evennia.utils.utils import mod_import, make_iter, pad, m_len
from evennia.commands import cmdhandler
# read from protocol NAWS later?
_MAX_TEXT_WIDTH = settings.CLIENT_DEFAULT_WIDTH
# we use cmdhandler instead of evennia.syscmdkeys to
# avoid some cases of loading before evennia init'd
_CMD_NOMATCH = cmdhandler.CMD_NOMATCH
_CMD_NOINPUT = cmdhandler.CMD_NOINPUT
# Return messages
# i18n
from django.utils.translation import ugettext as _
_ERR_NOT_IMPLEMENTED = _("Menu node '{nodename}' is not implemented. Make another choice.")
_ERR_GENERAL = _("Error in menu node '{nodename}'.")
_ERR_NO_OPTION_DESC = _("No description.")
_HELP_FULL = _("Commands: <menu option>, help, quit")
_HELP_NO_QUIT = _("Commands: <menu option>, help")
_HELP_NO_OPTIONS = _("Commands: help, quit")
_HELP_NO_OPTIONS_NO_QUIT = _("Commands: help")
_HELP_NO_OPTION_MATCH = _("Choose an option or try 'help'.")
class EvMenuError(RuntimeError):
"""
Error raised by menu when facing internal errors.
"""
pass
#------------------------------------------------------------
#
# Menu command and command set
#
#------------------------------------------------------------
class CmdEvMenuNode(Command):
"""
Menu options.
"""
key = "look"
aliases = ["l", _CMD_NOMATCH, _CMD_NOINPUT]
locks = "cmd:all()"
help_category = "Menu"
def func(self):
"""
Implement all menu commands.
"""
caller = self.caller
menu = caller.ndb._menutree
if not menu:
err = "Menu object not found as %s.ndb._menutree!" % (caller)
self.caller.msg(err)
raise EvMenuError(err)
# flags and data
raw_string = self.raw_string
cmd = raw_string.strip().lower()
options = menu.options
allow_quit = menu.allow_quit
cmd_on_quit = menu.cmd_on_quit
default = menu.default
print "cmd, options:", cmd, options
if cmd in options:
# this will overload the other commands
# if it has the same name!
goto, callback = options[cmd]
if callback:
menu.callback(callback, raw_string)
if goto:
menu.goto(goto, raw_string)
elif cmd in ("look", "l"):
caller.msg(menu.nodetext)
elif cmd in ("help", "h"):
caller.msg(menu.helptext)
elif allow_quit and cmd in ("quit", "q", "exit"):
menu.close_menu()
if cmd_on_quit is not None:
caller.execute_cmd(cmd_on_quit)
elif default:
goto, callback = default
if callback:
menu.callback(callback, raw_string)
if goto:
menu.goto(goto, raw_string)
else:
caller.msg(_HELP_NO_OPTION_MATCH)
if not (options or default):
# no options - we are at the end of the menu.
menu.close_menu()
if cmd_on_quit is not None:
caller.execute_cmd(cmd_on_quit)
class EvMenuCmdSet(CmdSet):
"""
The Menu cmdset replaces the current cmdset.
"""
key = "menu_cmdset"
priority = 1
mergetype = "Replace"
no_objs = True
no_exits = True
no_channels = False
def at_cmdset_creation(self):
"""
Called when creating the set.
"""
self.add(CmdEvMenuNode())
#------------------------------------------------------------
#
# Menu main class
#
#------------------------------------------------------------
class EvMenu(object):
"""
This object represents an operational menu. It is initialized from
a menufile.py instruction.
"""
def __init__(self, caller, menufile, startnode="start",
cmdset_mergetype="Replace", cmdset_priority=1,
allow_quit=True, cmd_on_quit="look"):
"""
Initialize the menu tree and start the caller onto the first node.
Args:
caller (str): The user of the menu.
menufile (str): The full or relative path to the menufile.
startnode (str, optional): The starting node in the menufile.
cmdset_mergetype (str, optional): 'Replace' (default) means the menu
commands will be exclusive - no other normal commands will
be usable while the user is in the menu. 'Union' means the
menu commands will be integrated with the existing commands
(it will merge with `merge_priority`), if so, make sure that
the menu's command names don't collide with existing commands
in an unexpected way. Also the CMD_NOMATCH and CMD_NOINPUT will
be overloaded by the menu cmdset. Other cmdser mergetypes
has little purpose for the menu.
cmdset_priority (int, optional): The merge priority for the
menu command set. The default (1) is usually enough for most
types of menus.
allow_quit (bool, optional): Allow user to use quit or
exit to leave the menu at any point. Recommended during
development!
cmd_on_quit (str or None, optional): When exiting the menu
(either by reaching a node with no options or by using the
in-built quit command (activated with `allow_quit`), this
command string will be executed. Set to None to not call
any command.
Raises:
EvMenuError: If the start/end node is not found in menu tree.
"""
self._caller = caller
self._startnode = startnode
self._menutree = self._parse_menufile(menufile)
if startnode not in self._menutree:
raise EvMenuError("Start node '%s' not in menu tree!" % startnode)
# variables made available to the command
self.allow_quit = allow_quit
self.cmd_on_quit = cmd_on_quit
self.default = None
self.nodetext = None
self.helptext = None
self.options = None
# store ourself on the object
self._caller.ndb._menutree = self
# set up the menu command on the caller
menu_cmdset = EvMenuCmdSet()
menu_cmdset.mergetype = str(cmdset_mergetype).lower().capitalize() or "Replace"
menu_cmdset.priority = int(cmdset_priority)
self._caller.cmdset.add(menu_cmdset)
# start the menu
self.goto(self._startnode, "")
def _parse_menufile(self, menufile):
"""
Parse a menufile, split it into #node sections, convert
each to an executable python code and store in a dictionary map.
Args:
menufile (str or module): The python.path to the menufile,
or the python module itself.
Returns:
menutree (dict): A {nodekey: func}
"""
module = mod_import(menufile)
return dict((key, func) for key, func in module.__dict__.items()
if isfunction(func) and not key.startswith("_"))
def _format_node(self, nodetext, optionlist):
"""
Format the node text + option section
Args:
nodetext (str): The node text
optionlist (list): List of (key, desc) pairs.
Returns:
string (str): The options section, including
all needed spaces.
Notes:
This will adjust the columns of the options, first to use
a maxiumum of 4 rows (expanding in columns), then gradually
growing to make use of the screen space.
"""
#
# handle the node text
#
nodetext = dedent(nodetext).strip()
nodetext_width_max = max(m_len(line) for line in nodetext.split("\n"))
if not optionlist:
# return the node text "naked".
separator1 = "_" * nodetext_width_max + "\n\n" if nodetext_width_max else ""
separator2 = "\n" if nodetext_width_max else "" + "_" * nodetext_width_max
return separator1 + nodetext + separator2
#
# handle the options
#
# column separation distance
colsep = 4
nlist = len(optionlist)
# get the widest option line in the table.
table_width_max = -1
table = []
for key, desc in optionlist:
table_width_max = max(table_width_max,
max(m_len(p) for p in key.split("\n")) +
max(m_len(p) for p in desc.split("\n")) + colsep)
raw_key = strip_ansi(key)
if raw_key != key:
# already decorations in key definition
table.append(ANSIString(" {lc%s{lt%s{le: %s" % (raw_key, key, desc)))
else:
# add a default white color to key
table.append(ANSIString(" {lc%s{lt{w%s{n{le: %s" % (raw_key, raw_key, desc)))
ncols = (_MAX_TEXT_WIDTH // table_width_max) + 1 # number of ncols
nlastcol = nlist % ncols # number of elements left in last row
# get the amount of rows needed (start with 4 rows)
nrows = 4
while nrows * ncols < nlist:
nrows += 1
ncols = nlist // nrows # number of full columns
nlastcol = nlist % nrows # number of elements in last column
# get the final column count
ncols = ncols + 1 if nlastcol > 0 else ncols
if ncols > 1:
# only extend if longer than one column
table.extend([" " for i in xrange(nrows-nlastcol)])
# build the actual table grid
table = [table[icol*nrows:(icol*nrows) + nrows] for icol in xrange(0, ncols)]
# adjust the width of each column
total_width = 0
for icol in xrange(len(table)):
col_width = max(max(m_len(p) for p in part.split("\n")) for part in table[icol]) + colsep
table[icol] = [pad(part, width=col_width + colsep, align="l") for part in table[icol]]
total_width += col_width
# format the table into columns
table = EvTable(table=table, border="none")
# build the page
total_width = max(total_width, nodetext_width_max)
separator1 = "_" * total_width + "\n\n" if nodetext_width_max else ""
separator2 = "\n" + "_" * total_width + "\n\n" if total_width else ""
return separator1 + nodetext + separator2 + unicode(table)
def _execute_node(self, nodename, raw_string):
"""
Execute a node.
Args:
nodename (str): Name of node.
raw_string (str): The raw default string entered on the
previous node (only used if the node accepts it as an
argument)
Returns:
nodetext, options (tuple): The node text (a string or a
tuple and the options tuple, if any.
"""
try:
node = self._menutree[nodename]
except KeyError:
self._caller.msg(_ERR_NOT_IMPLEMENTED.format(nodename=nodename))
raise EvMenuError
try:
# the node should return data as (text, options)
if len(getargspec(node).args) > 1:
# a node accepting raw_string
nodetext, options = node(self._caller, raw_string)
else:
# a normal node, only accepting caller
nodetext, options = node(self._caller)
except KeyError:
self._caller.msg(_ERR_NOT_IMPLEMENTED.format(nodename=nodename))
raise EvMenuError
except Exception:
self._caller.msg(_ERR_GENERAL.format(nodename=nodename))
raise
return nodetext, options
def callback(self, nodename, raw_string):
"""
Run a node as a callback. This makes no use of the return
values from the node.
Args:
nodename (str): Name of node.
raw_string (str): The raw default string entered on the
previous node (only used if the node accepts it as an
argument)
"""
if callable(nodename):
# this is a direct callable - execute it directly
try:
if len(getargspec(nodename).args) > 1:
# callable accepting raw_string
nodename(self._caller, raw_string)
else:
# normal callable, only the caller as arg
nodename(self._caller)
except Exception:
self._caller.msg(_ERR_GENERAL.format(nodename=nodename))
raise
else:
# nodename is a string; lookup as node
try:
# execute the node; we make no use of the return values here.
self._execute_node(nodename, raw_string)
except EvMenuError:
return
def goto(self, nodename, raw_string):
"""
Run a node by name
Args:
nodename (str): Name of node.
raw_string (str): The raw default string entered on the
previous node (only used if the node accepts it as an
argument)
"""
try:
# execute the node, make use of the returns.
nodetext, options = self._execute_node(nodename, raw_string)
except EvMenuError:
return
# validation of the node return values
helptext = ""
if hasattr(nodetext, "__iter__"):
if len(nodetext) > 1:
nodetext, helptext = nodetext[:2]
else:
nodetext = nodetext[0]
nodetext = str(nodetext) or ""
options = [options] if isinstance(options, dict) else options
# this will be displayed in the given order
display_options = []
# this is used for lookup
self.options = {}
self.default = None
if options:
for inum, dic in enumerate(options):
# fix up the option dicts
keys = make_iter(dic.get("key"))
if "_default" in keys:
keys = [key for key in keys if key != "_default"]
desc = dic.get("desc", dic.get("text", _ERR_NO_OPTION_DESC).strip())
goto, execute = dic.get("goto", None), dic.get("exec", None)
self.default = (goto, execute)
else:
keys = list(make_iter(dic.get("key", str(inum+1).strip()))) + [str(inum+1)]
desc = dic.get("desc", dic.get("text", _ERR_NO_OPTION_DESC).strip())
goto, execute = dic.get("goto", None), dic.get("exec", None)
if keys:
display_options.append((keys[0], desc))
for key in keys:
if goto or execute:
self.options[strip_ansi(key).strip().lower()] = (goto, execute)
self.nodetext = self._format_node(nodetext, display_options)
# handle the helptext
if helptext:
self.helptext = helptext
elif options:
self.helptext = _HELP_FULL if self.allow_quit else _HELP_NO_QUIT
else:
self.helptext = _HELP_NO_OPTIONS if self.allow_quit else _HELP_NO_OPTIONS_NO_QUIT
self._caller.execute_cmd("look")
def close_menu(self):
"""
Shutdown menu; occurs when reaching the end node.
"""
self._caller.cmdset.remove(EvMenuCmdSet)
del self._caller.ndb._menutree
# -------------------------------------------------------------------------------------------------
#
# Simple input shortcuts
#
# -------------------------------------------------------------------------------------------------
class CmdGetInput(Command):
"""
Enter your data and press return.
"""
key = _CMD_NOMATCH
aliases = _CMD_NOINPUT
def func(self):
"This is called when user enters anything."
caller = self.caller
callback = caller.ndb._getinputcallback
prompt = caller.ndb._getinputprompt
result = self.raw_string
ok = not callback(caller, prompt, result)
if ok:
# only clear the state if the callback does not return
# anything
del caller.ndb._getinputcallback
del caller.ndb._getinputprompt
caller.cmdset.remove(InputCmdSet)
class InputCmdSet(CmdSet):
"""
This stores the input command
"""
key = "input_cmdset"
priority = 1
mergetype = "Replace"
no_objs = True
no_exits = True
no_channels = False
def at_cmdset_creation(self):
"called once at creation"
self.add(CmdGetInput())
def get_input(caller, prompt, callback):
"""
This is a helper function for easily request input from
the caller.
Args:
caller (Player or Object): The entity being asked
the question. This should usually be an object
controlled by a user.
prompt (str): This text will be shown to the user,
in order to let them know their input is needed.
callback (callable): A function that will be called
when the user enters a reply. It must take three
arguments: the `caller`, the `prompt` text and the
`result` of the input given by the user. If the
callback doesn't return anything or return False,
the input prompt will be cleaned up and exited. If
returning True, the prompt will remain and continue to
accept input.
Raises:
RuntimeError: If the given callback is not callable.
"""
if not callable(callback):
raise RuntimeError("get_input: input callback is not callable.")
caller.ndb._getinputcallback = callback
caller.ndb._getinputprompt = prompt
caller.cmdset.add(InputCmdSet)
caller.msg(prompt)
#------------------------------------------------------------
#
# test menu strucure and testing command
#
#------------------------------------------------------------
def test_start_node(caller):
text = """
This is an example menu.
If you enter anything except the valid options, your input will be
recorded and you will be brought to a menu entry showing your
input.
Select options or use 'quit' to exit the menu.
"""
options = ({"key": ("{yS{net", "s"),
"desc": "Set an attribute on yourself.",
"exec": lambda caller: caller.attributes.add("menuattrtest", "Test value"),
"goto": "test_set_node"},
{"key": ("{yV{niew", "v"),
"desc": "View your own name",
"goto": "test_view_node"},
{"key": ("{yQ{nuit", "quit", "q", "Q"),
"desc": "Quit this menu example.",
"goto": "test_end_node"},
{"key": "_default",
"goto": "test_displayinput_node"})
return text, options
def test_set_node(caller):
text = ("""
The attribute 'menuattrtest' was set to
{w%s{n
(check it with examine after quitting the menu).
This node's has only one option, and one of its key aliases is the
string "_default", meaning it will catch any input, in this case
to return to the main menu. So you can e.g. press <return> to go
back now.
""" % caller.db.menuattrtest,
# optional help text for this node
"""
This is the help entry for this node. It is created by returning
the node text as a tuple - the second string in that tuple will be
used as the help text.
""")
options = {"key": ("back (default)", "_default"),
"desc": "back to main",
"goto": "test_start_node"}
return text, options
def test_view_node(caller):
text = """
Your name is {g%s{n!
click {lclook{lthere{le to trigger a look command under MXP.
This node's option has no explicit key (nor the "_default" key
set), and so gets assigned a number automatically. You can infact
-always- use numbers (1...N) to refer to listed options also if you
don't see a string option key (try it!).
""" % caller.key
options = {"desc": "back to main",
"goto": "test_start_node"}
return text, options
def test_displayinput_node(caller, raw_string):
text = """
You entered the text:
"{w%s{n"
... which could now be handled or stored here in some way if this
was not just an example.
This node has an option with a single alias "_default", which
makes it hidden from view. It catches all input (except the
in-menu help/quit commands) and will, in this case, bring you back
to the start node.
""" % raw_string
options = {"key": "_default",
"goto": "test_start_node"}
return text, options
def test_end_node(caller):
text = """
This is the end of the menu and since it has no options the menu
will exit here, followed by a call of the "look" command.
"""
return text, None
class CmdTestMenu(Command):
"""
Test menu
Usage:
testmenu <menumodule>
Starts a demo menu from a menu node definition module.
"""
key = "testmenu"
def func(self):
if not self.args:
self.caller.msg("Usage: testmenu menumodule")
return
# start menu
EvMenu(self.caller, self.args.strip(), startnode="test_start_node", cmdset_mergetype="Replace")
|
TheTypoMaster/evennia
|
evennia/utils/evmenu.py
|
Python
|
bsd-3-clause
| 26,765
|
[
"VisIt"
] |
e397bed5456b739ed3f3904297534afc9081bb86f32b56b9c22ae216e1d134cf
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import sys
PYVERSION = sys.version.split()[0]
if PYVERSION >= "3" or PYVERSION < "2.6":
exit("[CRITICAL] incompatible Python version detected ('%s'). For successfully running sqlmap you'll have to use version 2.6 or 2.7 (visit 'http://www.python.org/download/')" % PYVERSION)
extensions = ("gzip", "ssl", "sqlite3", "zlib")
try:
for _ in extensions:
__import__(_)
except ImportError:
errMsg = "missing one or more core extensions (%s) " % (", ".join("'%s'" % _ for _ in extensions))
errMsg += "most probably because current version of Python has been "
errMsg += "built without appropriate dev packages (e.g. 'libsqlite3-dev')"
exit(errMsg)
|
pwnieexpress/raspberry_pwn
|
src/pentest/sqlmap/lib/utils/versioncheck.py
|
Python
|
gpl-3.0
| 820
|
[
"VisIt"
] |
850dae71ff51fb6228775f86c771aa77d0159bd9f054fa1a9230be82b7b98471
|
""" Definition for RHEAS Datasets decorators.
.. module:: datasets.decorators
:synopsis: Definition of the Datasets decorators
.. moduleauthor:: Kostas Andreadis <kandread@jpl.nasa.gov>
"""
from functools import wraps
import tempfile
import shutil
import urllib
from datetime import datetime
from ftplib import FTP
import re
from pydap.client import open_url
import netCDF4 as netcdf4
import numpy as np
from osgeo import gdal
import datasets
def resetDatetime(dt):
"""Set time to 00:00 to align with daily data."""
return datetime(dt.year, dt.month, dt.day, 0, 0)
def path(fetch):
"""Decorator for getting files from local path."""
@wraps(fetch)
def wrapper(*args, **kwargs):
url, bbox, dt = fetch(*args, **kwargs)
outpath = tempfile.mkdtemp()
filename = url.format(dt.year, dt.month, dt.day)
try:
shutil.copy(filename, outpath)
lfilename = filename.split("/")[-1]
except:
lfilename = None
return outpath, lfilename, bbox, dt
return wrapper
def http(fetch):
"""Decorator for downloading files from HTTP sites."""
@wraps(fetch)
def wrapper(*args, **kwargs):
url, bbox, dt = fetch(*args, **kwargs)
outpath = tempfile.mkdtemp()
filename = url.format(dt.year, dt.month, dt.day)
try:
lfilename = filename.split("/")[-1]
urllib.urlcleanup()
urllib.urlretrieve(filename, "{0}/{1}".format(outpath, lfilename))
except:
lfilename = None
return outpath, lfilename, bbox, dt
return wrapper
def ftp(fetch):
"""Decorator for downloading files from FTP sites."""
@wraps(fetch)
def wrapper(*args, **kwargs):
url, bbox, dt = fetch(*args, **kwargs)
ftpurl = url.split("/")[2]
outpath = tempfile.mkdtemp()
try:
conn = FTP(ftpurl)
conn.login()
conn.cwd("/".join(url.split("/")[3:-1]).format(dt.year, dt.month, dt.day))
name = url.split("/")[-1].format(dt.year, dt.month, dt.day)
filenames = [f for f in conn.nlst() if re.match(r".*{0}.*".format(name), f) is not None]
if len(filenames) > 0:
filename = filenames[0]
with open("{0}/{1}".format(outpath, filename), 'wb') as f:
conn.retrbinary("RETR {0}".format(filename), f.write)
filenames.append("{0}/{1}".format(outpath, filename))
else:
filename = None
except:
filename = None
return outpath, filename, bbox, dt
return wrapper
def opendap(fetch):
"""Decorator for fetching data from Opendap servers."""
@wraps(fetch)
def wrapper(*args, **kwargs):
url, varname, bbox, dt = fetch(*args, **kwargs)
ds = open_url(url)
for var in ds.keys():
if var.lower().startswith("lon") or var.lower() == "x":
lonvar = var
if var.lower().startswith("lat") or var.lower() == "y":
latvar = var
if var.lower().startswith("time") or var.lower() == "t":
timevar = var
lat = ds[latvar][:].data
lon = ds[lonvar][:].data
lon[lon > 180] -= 360
res = abs(lat[0]-lat[1]) # assume rectangular grid
i1, i2, j1, j2 = datasets.spatialSubset(np.sort(lat)[::-1], np.sort(lon), res, bbox)
t = ds[timevar]
tt = netcdf4.num2date(t[:].data, units=t.units)
ti = [tj for tj in range(len(tt)) if resetDatetime(tt[tj]) >= dt[0] and resetDatetime(tt[tj]) <= dt[1]]
if len(ti) > 0:
lati = np.argsort(lat)[::-1][i1:i2]
loni = np.argsort(lon)[j1:j2]
if len(ds[varname].data[0].shape) > 3:
data = ds[varname].data[0][ti[0]:ti[-1]+1, 0, lati[0]:lati[-1]+1, loni[0]:loni[-1]+1]
else:
data = ds[varname].data[0][ti[0]:ti[-1]+1, 0, lati[0]:lati[-1]+1, loni[0]:loni[-1]+1]
dt = tt[ti]
else:
data = None
dt = None
lat = np.sort(lat)[::-1][i1:i2]
lon = np.sort(lon)[j1:j2]
return data, lat, lon, dt
return wrapper
def netcdf(fetch):
"""Decorator for fetching NetCDF files (local or from Opendap servers)."""
@wraps(fetch)
def wrapper(*args, **kwargs):
url, varname, bbox, dt = fetch(*args, **kwargs)
ds = netcdf4.Dataset(url)
for var in ds.variables:
if var.lower().startswith("lon") or var.lower() == "x":
lonvar = var
if var.lower().startswith("lat") or var.lower() == "y":
latvar = var
if var.lower().startswith("time") or var.lower() == "t":
timevar = var
lat = ds.variables[latvar][:]
lon = ds.variables[lonvar][:]
lon[lon > 180] -= 360
res = abs(lat[0]-lat[1]) # assume rectangular grid
i1, i2, j1, j2 = datasets.spatialSubset(np.sort(lat)[::-1], np.sort(lon), res, bbox)
t = ds.variables[timevar]
tt = netcdf4.num2date(t[:], units=t.units)
ti = [tj for tj in range(len(tt)) if resetDatetime(tt[tj]) >= dt[0] and resetDatetime(tt[tj]) <= dt[1]]
if len(ti) > 0:
lati = np.argsort(lat)[::-1][i1:i2]
loni = np.argsort(lon)[j1:j2]
if len(ds.variables[varname].shape) > 3:
data = ds.variables[varname][ti, 0, lati, loni]
else:
data = ds.variables[varname][ti, lati, loni]
dt = tt[ti]
else:
data = None
dt = None
lat = np.sort(lat)[::-1][i1:i2]
lon = np.sort(lon)[j1:j2]
return data, lat, lon, dt
return wrapper
def geotiff(fetch):
"""Decorator for reading data from raster files."""
@wraps(fetch)
def wrapper(*args, **kwargs):
outpath, filename, bbox, dt = fetch(*args, **kwargs)
if filename is not None:
lfilename = datasets.uncompress(filename, outpath)
f = gdal.Open("{0}/{1}".format(outpath, lfilename))
xul, xres, _, yul, _, yres = f.GetGeoTransform()
data = f.ReadAsArray()
nr, nc = data.shape
lat = np.arange(yul + yres/2.0, yul + yres * nr, yres)
lon = np.arange(xul + xres/2.0, xul + xres * nc, xres)
i1, i2, j1, j2 = datasets.spatialSubset(lat, lon, xres, bbox)
data = data[i1:i2, j1:j2]
lat = lat[i1:i2]
lon = lon[j1:j2]
shutil.rmtree(outpath)
else:
data = lat = lon = None
return data, lat, lon, dt
return wrapper
|
nasa/RHEAS
|
src/datasets/decorators.py
|
Python
|
mit
| 6,705
|
[
"NetCDF"
] |
1fc7f2a130687ce6696a831499aaff13d3e513004650e292f83e476e775a77b5
|
"""Module to read and write atoms in cif file format.
See http://www.iucr.org/resources/cif/spec/version1.1/cifsyntax for a
description of the file format. STAR extensions as save frames,
global blocks, nested loops and multi-data values are not supported.
"""
import shlex
import re
import numpy as np
from ase.parallel import paropen
from ase.lattice.spacegroup import crystal
from ase.lattice.spacegroup.spacegroup import spacegroup_from_data
def get_lineno(fileobj):
"""Returns the line number of current line in fileobj."""
pos = fileobj.tell()
try:
fileobj.seek(0)
s = fileobj.read(pos)
lineno = s.count('\n')
finally:
fileobj.seek(pos)
return lineno
def unread_line(fileobj):
"""Unread the last line read from *fileobj*."""
# If previous line ends with CRLF, we have to back up one extra
# character before entering the loop below
if fileobj.tell() > 2:
fileobj.seek(-2, 1)
if fileobj.read(2) == '\r\n':
fileobj.seek(-1, 1)
while True:
if fileobj.tell() == 0:
break
fileobj.seek(-2, 1)
if fileobj.read(1) in ('\n', '\r'):
break
def convert_value(value):
"""Convert CIF value string to corresponding python type."""
value = value.strip()
if re.match('(".*")|(\'.*\')$', value):
return value[1:-1]
elif re.match(r'[+-]?\d+$', value):
return int(value)
elif re.match(r'[+-]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][+-]?\d+)?$', value):
return float(value)
elif re.match(r'[+-]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][+-]?\d+)?\(\d+\)$',
value):
return float(value[:value.index('(')]) # strip off uncertainties
else:
return value
def parse_multiline_string(fileobj, line):
"""Parse semicolon-enclosed multiline string and return it."""
assert line[0] == ';'
lines = [line[1:].lstrip()]
while True:
line = fileobj.readline().strip()
if line == ';':
break
lines.append(line)
return '\n'.join(lines).strip()
def parse_singletag(fileobj, line):
"""Parse a CIF tag (entries starting with underscore). Returns
a key-value pair."""
kv = line.split(None, 1)
if len(kv) == 1:
key = line
line = fileobj.readline().strip()
while not line or line[0] == '#':
line = fileobj.readline().strip()
if line[0] == ';':
value = parse_multiline_string(fileobj, line)
else:
value = line
else:
key, value = kv
return key, convert_value(value)
def parse_loop(fileobj):
"""Parse a CIF loop. Returns a dict with column tag names as keys
and a lists of the column content as values."""
header = []
line = fileobj.readline().strip()
while line.startswith('_'):
header.append(line.lower())
line = fileobj.readline().strip()
columns = dict([(h, []) for h in header])
tokens = []
while True:
lowerline = line.lower()
if (not line or
line.startswith('_') or
lowerline.startswith('data_') or
lowerline.startswith('loop_')):
break
if line.startswith('#'):
line = fileobj.readline().strip()
continue
if line.startswith(';'):
t = [parse_multiline_string(fileobj, line)]
else:
t = shlex.split(line)
line = fileobj.readline().strip()
tokens.extend(t)
if len(tokens) < len(columns):
continue
assert len(tokens) == len(header)
for h, t in zip(header, tokens):
columns[h].append(convert_value(t))
tokens = []
if line:
unread_line(fileobj)
return columns
def parse_items(fileobj, line):
"""Parse a CIF data items and return a dict with all tags."""
tags = {}
while True:
line = fileobj.readline()
if not line:
break
line = line.strip()
lowerline = line.lower()
if not line or line.startswith('#'):
continue
elif line.startswith('_'):
key, value = parse_singletag(fileobj, line)
tags[key.lower()] = value
elif lowerline.startswith('loop_'):
tags.update(parse_loop(fileobj))
elif lowerline.startswith('data_'):
unread_line(fileobj)
break
elif line.startswith(';'):
temp = parse_multiline_string(fileobj, line)
else:
raise ValueError('%s:%d: Unexpected CIF file entry: "%s"'%(
fileobj.name, get_lineno(fileobj), line))
return tags
def parse_block(fileobj, line):
"""Parse a CIF data block and return a tuple with the block name
and a dict with all tags."""
assert line.lower().startswith('data_')
blockname = line.split('_', 1)[1].rstrip()
tags = parse_items(fileobj, line)
return blockname, tags
def parse_cif(fileobj):
"""Parse a CIF file. Returns a list of blockname and tag
pairs. All tag names are converted to lower case."""
if isinstance(fileobj, basestring):
fileobj = open(fileobj)
blocks = []
while True:
line = fileobj.readline()
if not line:
break
line = line.strip()
if not line or line.startswith('#'):
continue
blocks.append(parse_block(fileobj, line))
return blocks
def tags2atoms(tags, store_tags=False, **kwargs):
"""Returns an Atoms object from a cif tags dictionary. If
*store_tags* is true, the *info* attribute of the returned Atoms
object will be populated with all the cif tags. Keyword arguments
are passed to the Atoms constructor."""
a = tags['_cell_length_a']
b = tags['_cell_length_b']
c = tags['_cell_length_c']
alpha = tags['_cell_angle_alpha']
beta = tags['_cell_angle_beta']
gamma = tags['_cell_angle_gamma']
scaled_positions = np.array([tags['_atom_site_fract_x'],
tags['_atom_site_fract_y'],
tags['_atom_site_fract_z']]).T
symbols = []
if '_atom_site_type_symbol' in tags:
labels = tags['_atom_site_type_symbol']
else:
labels = tags['_atom_site_label']
for s in labels:
# Strip off additional labeling on chemical symbols
m = re.search(r'([A-Z][a-z]?)', s)
symbol = m.group(0)
symbols.append(symbol)
# Symmetry specification, see
# http://www.iucr.org/resources/cif/dictionaries/cif_sym for a
# complete list of official keys. In addition we also try to
# support some commonly used depricated notations
no = None
if '_space_group.it_number' in tags:
no = tags['_space_group.it_number']
elif '_space_group_it_number' in tags:
no = tags['_space_group_it_number']
elif '_symmetry_int_tables_number' in tags:
no = tags['_symmetry_int_tables_number']
symbolHM = None
if '_space_group.Patterson_name_h-m' in tags:
symbolHM = tags['_space_group.patterson_name_h-m']
elif '_symmetry_space_group_name_h-m' in tags:
symbolHM = tags['_symmetry_space_group_name_h-m']
sitesym = None
if '_space_group_symop.operation_xyz' in tags:
sitesym = tags['_space_group_symop.operation_xyz']
elif '_symmetry_equiv_pos_as_xyz' in tags:
sitesym = tags['_symmetry_equiv_pos_as_xyz']
spacegroup = 1
if sitesym is not None:
spacegroup = spacegroup_from_data(no=no, symbol=symbolHM,
sitesym=sitesym)
elif no is not None:
spacegroup = no
elif symbolHM is not None:
spacegroup = symbolHM
else:
spacegroup = 1
if store_tags:
info = tags.copy()
if 'info' in kwargs:
info.update(kwargs['info'])
kwargs['info'] = info
atoms = crystal(symbols, basis=scaled_positions,
cellpar=[a, b, c, alpha, beta, gamma],
spacegroup=spacegroup, **kwargs)
return atoms
def read_cif(fileobj, index=-1, store_tags=False, **kwargs):
"""Read Atoms object from CIF file. *index* specifies the data
block number or name (if string) to return.
If *index* is None or a slice object, a list of atoms objects will
be returned. In the case of *index* is *None* or *slice(None)*,
only blocks with valid crystal data will be included.
If *store_tags* is true, the *info* attribute of the returned
Atoms object will be populated with all tags in the corresponding
cif data block.
Keyword arguments are passed on to the Atoms constructor."""
blocks = parse_cif(fileobj)
if isinstance(index, str):
tags = dict(blocks)[index]
return tags2atoms(tags, **kwargs)
elif isinstance(index, int):
name, tags = blocks[index]
return tags2atoms(tags, **kwargs)
elif index is None or index == slice(None):
# Return all CIF blocks with valid crystal data
images = []
for name, tags in blocks:
try:
atoms = tags2atoms(tags)
images.append(atoms)
except KeyError:
pass
if not images:
# No block contained a a valid atoms object
# Provide an useful error by try converting the first
# block to atoms
name, tags = blocks[0]
tags2atoms(tags)
return images
else:
return [tags2atoms(tags) for name, tags in blocks[index]]
def write_cif(fileobj, images):
"""Write *images* to CIF file."""
if isinstance(fileobj, str):
fileobj = paropen(fileobj, 'w')
if not isinstance(images, (list, tuple)):
images = [images]
for i, atoms in enumerate(images):
fileobj.write('data_image%d\n' % i)
from numpy import arccos, pi, dot
from numpy.linalg import norm
cell = atoms.cell
a = norm(cell[0])
b = norm(cell[1])
c = norm(cell[2])
alpha = arccos(dot(cell[1], cell[2])/(b*c))*180./pi
beta = arccos(dot(cell[0], cell[2])/(a*c))*180./pi
gamma = arccos(dot(cell[0], cell[1])/(a*b))*180./pi
fileobj.write('_cell_length_a %g\n' % a)
fileobj.write('_cell_length_b %g\n' % b)
fileobj.write('_cell_length_c %g\n' % c)
fileobj.write('_cell_angle_alpha %g\n' % alpha)
fileobj.write('_cell_angle_beta %g\n' % beta)
fileobj.write('_cell_angle_gamma %g\n' % gamma)
fileobj.write('\n')
if atoms.pbc.all():
fileobj.write('_symmetry_space_group_name_H-M %s\n' % 'P 1')
fileobj.write('_symmetry_int_tables_number %d\n' % 1)
fileobj.write('\n')
fileobj.write('loop_\n')
fileobj.write(' _symmetry_equiv_pos_as_xyz\n')
fileobj.write(" 'x, y, z'\n")
fileobj.write('\n')
fileobj.write('loop_\n')
fileobj.write(' _atom_site_label\n')
fileobj.write(' _atom_site_occupancy\n')
fileobj.write(' _atom_site_fract_x\n')
fileobj.write(' _atom_site_fract_y\n')
fileobj.write(' _atom_site_fract_z\n')
fileobj.write(' _atom_site_thermal_displace_type\n')
fileobj.write(' _atom_site_B_iso_or_equiv\n')
fileobj.write(' _atom_site_type_symbol\n')
scaled = atoms.get_scaled_positions()
no = {}
for i, atom in enumerate(atoms):
symbol = atom.symbol
if symbol in no:
no[symbol] += 1
else:
no[symbol] = 1
fileobj.write(
' %-8s %6.4f %7.5f %7.5f %7.5f %4s %6.3f %s\n'%(
'%s%d' % (symbol, no[symbol]),
1.0,
scaled[i][0],
scaled[i][1],
scaled[i][2],
'Biso',
1.0,
symbol))
|
grhawk/ASE
|
tools/ase/io/cif.py
|
Python
|
gpl-2.0
| 12,115
|
[
"ASE",
"CRYSTAL"
] |
2187966d86399e9af99d5cb193b4fdde665a86d374cada6d0677d079a8392b1d
|
# -*-python-*-
#
# Copyright (C) 2006-2013 The ViewCVS Group. All Rights Reserved.
#
# By using this file, you agree to the terms and conditions set forth in
# the LICENSE.html file which can be found at the top level of the ViewVC
# distribution or at http://viewvc.org/license-1.html.
#
# For more information, visit http://viewvc.org/
#
# -----------------------------------------------------------------------
import vcauth
import vclib
import fnmatch
import string
class ViewVCAuthorizer(vcauth.GenericViewVCAuthorizer):
"""A simple top-level module authorizer."""
def __init__(self, username, params={}):
forbidden = params.get('forbidden', '')
self.forbidden = map(string.strip,
filter(None, string.split(forbidden, ',')))
def check_root_access(self, rootname):
return 1
def check_universal_access(self, rootname):
# If there aren't any forbidden paths, we can grant universal read
# access. Otherwise, we make no claim.
if not self.forbidden:
return 1
return None
def check_path_access(self, rootname, path_parts, pathtype, rev=None):
# No path? No problem.
if not path_parts:
return 1
# Not a directory? We aren't interested.
if pathtype != vclib.DIR:
return 1
# At this point we're looking at a directory path.
module = path_parts[0]
default = 1
for pat in self.forbidden:
if pat[0] == '!':
default = 0
if fnmatch.fnmatchcase(module, pat[1:]):
return 1
elif fnmatch.fnmatchcase(module, pat):
return 0
return default
|
marcellodesales/svnedge-console
|
svn-server/lib/viewvc/vcauth/forbidden/__init__.py
|
Python
|
agpl-3.0
| 1,602
|
[
"VisIt"
] |
e522032de63baf71e37601531b3338fb6ac55c1c827db53edb06a4740261c1a7
|
"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression example computed in two different ways:
1. A noise-free case
2. A noisy case with known noise-level per datapoint
In both cases, the kernel's parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``alpha`` is applied as a Tikhonov
regularization of the assumed covariance between the training points.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>s
# Licence: BSD 3 clause
import numpy as np
from matplotlib import pyplot as pl
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
# ----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x, return_std=True)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
# ----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Instanciate a Gaussian Process model
gp = GaussianProcessRegressor(kernel=kernel, alpha=(dy / y) ** 2,
n_restarts_optimizer=10)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x, return_std=True)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
|
kashif/scikit-learn
|
examples/gaussian_process/plot_gpr_noisy_targets.py
|
Python
|
bsd-3-clause
| 3,680
|
[
"Gaussian"
] |
e87c560a15586a1462cd9b785f76ff6f6b4221b68b240b18e1b97070a943c449
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Abinit Post Process Application
author: Martin Alexandre
last edited: May 2013
"""
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvasQT
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.ticker import FormatStrFormatter,ScalarFormatter
try:
from PyQt4 import Qt,QtGui,QtCore
except:
pass;
#---------------------------------------------------#
#---------------------------------------------------#
#------------------CANVAS(NOwindows)----------------#
#---------------------------------------------------#
#---------------------------------------------------#
class Canvas(FigureCanvas):
def __init__(self, parent=None, width=6, height=4, dpi=100,x=0,y=0,pxlbl="",pylbl="",point = False,adjust=False):
self.fig = Figure(figsize=(width, height), dpi=dpi,)
FigureCanvas.__init__(self,self.fig)
self.setPlot(x,y,pxlbl,pylbl,adjust=adjust)
def setPlot(self,x,y,xlbl,ylbl,point =False, adjust=False):
try :
self.formater = ScalarFormatter(useOffset=True, useMathText=False, useLocale=None)
self.formater.set_useOffset(0)
except:
self.formater = ScalarFormatter(useOffset=True, useMathText=False)
self.axes = self.fig.add_subplot(111)
self.axes.clear()
if point :
self.axes.plot(x,y,'.',markersize=15)
else :
self.axes.plot(x,y)
try:
if (max(y)-min(y)) <10**-10:
self.axes.set_ylim(min(y)-1,max(y)+1)
except:
pass;
self.axes.set_xlabel(xlbl)
self.axes.set_ylabel(ylbl)
self.axes.figure.set_facecolor('white')
self.axes.grid('on')
self.axes.yaxis.set_major_formatter(self.formater)
self.axes.xaxis.set_major_formatter(self.formater)
if adjust:
self.adjust_x_lim(x,y)
self.draw()
def addLegend(self,plegend,markerscale=1):
self.axes.legend(plegend,loc=1,markerscale=markerscale)
self.draw()
def addPlot(self,x,y,bar = False, point = False,marker='.',marker_size=25 ):
if bar == True:
self.axes.bar(x, y, width=0.01)
elif point == True :
self.axes.plot(x, y,marker,markersize=marker_size)
else:
self.axes.plot(x,y)
self.draw()
def adjust_x_lim(self, x, y):
test = 0
xmin = 0
xmax = 0
for i in range(len(x)):
if y[i] >= 0.0002:
xmin = x[i]
break
for i in range(len(x)):
if y[i] <= 0.002 :
test += 1
if test > 60 :
xmax = x[i-50]
break
self.axes.set_xlim(xmin,xmax)
self.draw()
#---------------------------------------------------#
#---------------------------------------------------#
#------------------CANVAS(Windows)------------------#
#---------------------------------------------------#
#---------------------------------------------------#
class CanvasQT(FigureCanvasQT):
def __init__(self, parent=None, width=6, height=4, dpi=100,x=0,y=0,pxlbl="",pylbl="",point = False,adjust=False,marker='.',marker_size=25):
self.fig = Figure(figsize=(width, height), dpi=dpi,)
FigureCanvasQT.__init__(self,self.fig)
FigureCanvasQT.setSizePolicy(self,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Expanding)
FigureCanvasQT.updateGeometry(self)
self.setPlot(x,y,pxlbl,pylbl,point =point,adjust=adjust,marker=marker,marker_size=marker_size)
def setPlot(self,x,y,xlbl,ylbl,point =False, adjust=False,marker='.', marker_size=25):
try :
self.formater = ScalarFormatter(useOffset=True, useMathText=False, useLocale=None)
self.formater.set_useOffset(0)
except:
self.formater = ScalarFormatter(useOffset=False, useMathText=False)
self.axes = self.fig.add_subplot(111)
self.axes.clear()
if point :
self.axes.plot(x,y,marker,markersize=marker_size)
else :
self.axes.plot(x,y)
self.axes.set_xlabel(xlbl)
self.axes.set_ylabel(ylbl)
try :
if (max(y)-min(y)) <10**-10:
self.axes.set_ylim(min(y)-1,max(y)+1)
except :
pass;
self.axes.figure.set_facecolor('white')
self.axes.grid('on')
self.axes.yaxis.set_major_formatter(self.formater)
self.axes.xaxis.set_major_formatter(self.formater)
if adjust:
self.adjust_x_lim(x,y)
self.draw()
def addLegend(self,plegend,markerscale=1):
self.axes.legend(plegend,loc=1,markerscale=markerscale)
self.draw()
def addPlot(self,x,y,bar = False, point = False,marker='k+',marker_size=25):
if bar == True:
self.axes.bar(x, y, width=0.01)
elif point == True :
self.axes.plot(x,y,marker,markersize=marker_size)
else:
self.axes.plot(x,y)
self.draw()
def adjust_x_lim(self, x, y):
test = 0
xmin = 0
xmax = 0
for i in range(len(x)):
if y[i] >= 0.0002:
xmin = x[i]
break
for i in range(len(x)):
if y[i] <= 0.002 :
test += 1
if test > 60 :
xmax = x[i-50]
break
self.axes.set_xlim(xmin,xmax)
self.draw()
|
jmbeuken/abinit
|
scripts/post_processing/appa/utility/canvas.py
|
Python
|
gpl-3.0
| 5,606
|
[
"ABINIT"
] |
6193ffd55ad44838a2618a98838e83cd3f66a368d2ac88d3545f593139dc88ab
|
# This module handles the skeleton descriptions stored in trajectory files.
#
# Written by Konrad Hinsen
# last revision: 2001-4-19
#
_undocumented = 1
import MMTK
import MMTK.Environment
import MMTK.ForceFields
import string, types
#
# Atoms
#
class A:
def __init__(self, name, index, type = None):
self.name = name
self.index = index
self.type = type
def make(self, info, conf = None):
atom = MMTK.Atom(self.type, name = self.name)
self.assignIndex(atom, info, conf)
return atom
def assignIndex(self, atom, info, conf):
atom.setArray(None, [self.index])
info[self.index] = atom
if conf is not None:
atom.setPosition(MMTK.Vector(conf[self.index]))
#
# Composite chemical objects
#
class Composite:
def __init__(self, name, list, type = None, **kwargs):
self.name = name
self.list = list
self.type = type
self.kwargs = kwargs
def make(self, info, conf = None):
object = self._class(self.type, name=self.name)
for sub in self.list:
sub.assignIndex(getattr(object, sub.name), info, conf)
if self.kwargs.has_key('dc'):
for a1, a2, d in self.kwargs['dc']:
object.addDistanceConstraint(info[a1], info[a2], d)
return object
def assignIndex(self, object, info, conf):
for sub in self.list:
sub.assignIndex(getattr(object, sub.name), info, conf)
class G(Composite):
pass
class M(Composite):
_class = MMTK.Molecule
class C(Composite):
_class = MMTK.Complex
class AC(Composite):
def make(self, info, conf = None):
atoms = map(lambda a, i=info, c=conf: a.make(i, c), self.list)
return MMTK.AtomCluster(atoms, name = self.name)
#class X(Composite):
# _class = MMTK.Crystal
class S(Composite):
def make(self, info, conf = None):
import MMTK.Proteins
n_residues = len(self.type)/3
residues = map(lambda i, s = self.type: s[3*i:3*i+3],
range(n_residues))
self.kwargs['name'] = self.name
chain = apply(MMTK.Proteins.PeptideChain, (residues,), self.kwargs)
for i in range(len(self.list)):
self.list[i].assignIndex(chain[i], info, conf)
chain[i].name = self.list[i].name
return chain
class N(Composite):
def make(self, info, conf = None):
import MMTK.NucleicAcids
n_residues = len(self.type)/3
residues = map(lambda i, s = self.type: string.strip(s[3*i:3*i+3]),
range(n_residues))
self.kwargs['name'] = self.name
chain = apply(MMTK.NucleicAcids.NucleotideChain, (residues,),
self.kwargs)
for i in range(len(self.list)):
self.list[i].assignIndex(chain[i], info, conf)
return chain
#
# Collections and universes
#
class c:
def __init__(self, creation, objects):
self.creation = creation
self.objects = objects
def make(self, info, conf = None):
local = {}
collection = eval(self.creation, vars(MMTK), local)
attr = None
for o in self.objects:
if type(o) == types.StringType:
attr = o
elif attr:
setattr(collection, attr, o.make(info, conf))
else:
collection.addObject(o.make(info, conf))
return collection
#
# Objects constructed from a list of other objects (e.g. proteins)
#
class l:
def __init__(self, class_name, name, objects):
self.class_name = class_name
self.objects = objects
self.name = name
def make(self, info, conf = None):
import MMTK.Proteins
classes = {'Protein': MMTK.Proteins.Protein}
return classes[self.class_name] \
(map(lambda o, i=info, c=conf: o.make(i, c), self.objects),
name = self.name)
#
# Objects without subobjects
#
class o:
def __init__(self, creation):
self.creation = creation
def make(self, info, conf = None):
local = {}
object = eval(self.creation, vars(MMTK), local)
return object
|
fxia22/ASM_xf
|
PythonD/site_python/MMTK/Skeleton.py
|
Python
|
gpl-2.0
| 3,867
|
[
"CRYSTAL"
] |
f681e8c74bb34da3b712bdf939bd3c60b32c5270284541d0fb095ff7c1bca4d9
|
'''
This is a very simple example referenced in the beginner's tutorial:
https://enigmampc.github.io/catalyst/beginner-tutorial.html
Run this example, by executing the following from your terminal:
catalyst run -f buy_btc_simple.py -x bitfinex --start 2016-1-1 --end 2017-9-30 -o buy_btc_simple_out.pickle
If you want to run this code using another exchange, make sure that
the asset is available on that exchange. For example, if you were to run
it for exchange Poloniex, you would need to edit the following line:
context.asset = symbol('btc_usdt') # note 'usdt' instead of 'usd'
and specify exchange poloniex as follows:
catalyst run -f buy_btc_simple.py -x poloniex --start 2016-1-1 --end 2017-9-30 -o buy_btc_simple_out.pickle
To see which assets are available on each exchange, visit:
https://www.enigma.co/catalyst/status
'''
from catalyst.api import order, record, symbol
import matplotlib.pyplot as plt
def initialize(context):
context.asset = symbol('btc_usd')
def handle_data(context, data):
order(context.asset, 1)
record(btc = data.current(context.asset, 'price'))
def analyze(context, perf):
ax1 = plt.subplot(211)
perf.portfolio_value.plot(ax=ax1)
ax1.set_ylabel('portfolio value')
ax2 = plt.subplot(212, sharex=ax1)
perf.btc.plot(ax=ax2)
ax2.set_ylabel('bitcoin price')
plt.show()
|
sovicak/AnonymniAnalytici
|
2018_02_15_cryptocurrencies_trading/algorithms/buy_btc_simple.py
|
Python
|
mit
| 1,343
|
[
"VisIt"
] |
33df22bf01baf6b62f80e296b57d5d5b7b36de3c4a81e59680e70695c7a5d016
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import calendar
import ccdproc
import collections
import coloredlogs
import datetime
import glob
import logging
import math
import numpy as np
import os
import pandas
import random
import re
import scipy
import subprocess
import sys
import time
from astropy.utils import iers
iers.Conf.iers_auto_url.set('ftp://cddis.gsfc.nasa.gov/pub/products/iers/finals2000A.all')
from astroplan import Observer
from astropy import units as u
from astropy.io import fits
from astropy.convolution import convolve, Gaussian1DKernel, Box1DKernel
from astropy.coordinates import EarthLocation
from astropy.modeling import (models, fitting, Model)
from astropy.stats import sigma_clip
from astropy.time import Time
from astroscrappy import detect_cosmics
from matplotlib import pyplot as plt
from scipy import signal
from threading import Timer
from . import check_version
__version__ = __import__('goodman_pipeline').__version__
log = logging.getLogger(__name__)
def astroscrappy_lacosmic(ccd, red_path=None, save_mask=False):
mask, ccd.data = detect_cosmics(ccd.data)
ccd.header['GSP_COSM'] = ('LACosmic',
"Cosmic ray rejection method")
log.info("Cosmic rays rejected using astroscrappy's lacosmic")
if save_mask and red_path is not None:
mask_ccd = ccd.copy()
mask_ccd.mask = mask
new_file_name = 'crmask_' + mask_ccd.header['GSP_FNAM']
mask_ccd.header['GSP_FNAM'] = new_file_name
log.info("Saving binary mask of cosmic rays to "
"{:s}".format(new_file_name))
write_fits(ccd=mask_ccd,
full_path=os.path.join(red_path, new_file_name))
return ccd
def add_wcs_keys(ccd):
"""Adds generic keyword for linear wavelength solution to the header
Linear wavelength solutions require a set of standard fits keywords. Later
on they will be updated accordingly.
The main goal of putting them here is to have consistent and nicely ordered
headers.
Notes:
This does NOT add a WCS solution, just the keywords.
Args:
ccd (CCDData) A :class:~astropy.nddata.CCDData` instance with no wcs
keywords.
Returns:
ccd (CCDData) A :class:`~astropy.nddata.CCDData` instance with modified
header with added WCS keywords
"""
log.debug("Adding FITS LINEAR wcs keywords to header.")
ccd.header.set('BANDID1',
value='spectrum - background none, weights none, '
'clean no',
comment='')
ccd.header.set('APNUM1',
value='1 1 0 0',
comment='')
ccd.header.set('WCSDIM',
value=1,
comment='')
ccd.header.set('CTYPE1',
value='LINEAR',
comment='')
ccd.header.set('CRVAL1',
value=1,
comment='')
ccd.header.set('CRPIX1',
value=1,
comment='')
ccd.header.set('CDELT1',
value=1,
comment='')
ccd.header.set('CD1_1',
value=1,
comment='')
ccd.header.set('LTM1_1',
value=1,
comment='')
ccd.header.set('WAT0_001',
value='system=equispec',
comment='')
ccd.header.set('WAT1_001',
value='wtype=linear label=Wavelength units=angstroms',
comment='')
ccd.header.set('DC-FLAG',
value=0,
comment='')
ccd.header.set('DCLOG1',
value='REFSPEC1 = non set',
comment='')
return ccd
def add_linear_wavelength_solution(ccd, x_axis, reference_lamp, crpix=1):
"""Add wavelength solution to the new FITS header
Defines FITS header keyword values that will represent the wavelength
solution in the header so that the image can be read in any other
astronomical tool. (e.g. IRAF)
Args:
ccd (CCDData) Instance of :class:`~astropy.nddata.CCDData`
x_axis (ndarray): Linearized x-axis in angstrom
reference_lamp (str): Name of lamp used to get wavelength solution.
crpix (int): reference pixel for defining wavelength solution.
Default 1. For most cases 1 should be fine.
Returns:
ccd (CCDData) A :class:`~astropy.nddata.CCDData` instance with
linear wavelength solution on it.
"""
assert crpix > 0
new_crpix = crpix
new_crval = x_axis[new_crpix - crpix]
new_cdelt = x_axis[new_crpix] - x_axis[new_crpix - crpix]
ccd.header.set('BANDID1', 'spectrum - background none, weights none, '
'clean no')
ccd.header.set('WCSDIM', 1)
ccd.header.set('CTYPE1', 'LINEAR ')
ccd.header.set('CRVAL1', new_crval)
ccd.header.set('CRPIX1', new_crpix)
ccd.header.set('CDELT1', new_cdelt)
ccd.header.set('CD1_1', new_cdelt)
ccd.header.set('LTM1_1', 1.)
ccd.header.set('WAT0_001', 'system=equispec')
ccd.header.set('WAT1_001', 'wtype=linear label=Wavelength units=angstroms')
ccd.header.set('DC-FLAG', 0)
ccd.header.set('DCLOG1', 'REFSPEC1 = {:s}'.format(os.path.basename(reference_lamp)))
return ccd
def bias_subtract(ccd, master_bias, master_bias_name):
"""Subtract bias from file.
Wrapper for :func:`~ccdproc.subtract_bias`. The main goal is to have a
consistent API for apps using the Goodman Pipeline as a library.
Args:
ccd (CCDData): A file to be bias-subtracted
master_bias (CCDData):
master_bias_name (str): Full path to master bias file, this is added to
the bias-subtracted ccd under `GSP_BIAS`.
Returns:
A bias-subtracted file.
"""
ccd = ccdproc.subtract_bias(ccd=ccd, master=master_bias, add_keyword=False)
log.info("Bias subtracted")
ccd.header.set('GSP_BIAS',
value=os.path.basename(master_bias_name),
comment="Master Bias Image")
return ccd
def bin_reference_data(wavelength, intensity, serial_binning):
"""Bins a 1D array
This method reduces the size of an unbinned array by binning.
The function to combine data is `numpy.mean`.
Args:
wavelength (array): Wavelength axis
intensity (array): Intensity
serial_binning (int): Serial Binning is the binning in the
dispersion axis.
Returns:
Binned wavelength and intensity arrays.
"""
if serial_binning != 1:
b_wavelength = ccdproc.block_reduce(wavelength,
serial_binning,
np.mean)
b_intensity = ccdproc.block_reduce(intensity,
serial_binning,
np.mean)
return b_wavelength, b_intensity
else:
return wavelength, intensity
def call_cosmic_rejection(ccd,
image_name,
out_prefix,
red_path,
keep_files=False,
prefix='c',
method='dcr',
save=False):
"""Call for the appropriate cosmic ray rejection method
There are four options when dealing with cosmic ray rejection in this
pipeline, The default option is called ``default`` and it will choose the
rejection method based on the binning of the image. Note that there are only
two *real* methods: ``dcr`` and ``lacosmic``.
For ``binning 1x1`` the choice will be ``dcr`` for ``binning 2x2`` and
``3x3`` will be ``lacosmic``.
The method ``dcr`` is a program written in C by Wojtek
Pych (http://users.camk.edu.pl/pych/DCR/) that works very well for
spectroscopy the only negative aspect is that integration with python was
difficult and not natively (through subprocess).
The method `lacosmic` is well known but there are different implementations,
we started using :func:`~ccdproc.cosmicray_lacosmic` but later we shifted
towards ``astroscrappy.detect_cosmics``. The LACosmic method was developed
by Pieter G. van Dokkum. See <http://www.astro.yale.edu/dokkum/lacosmic/>
There is also the option of skipping cosmic ray removal by using ``none``.
Args:
ccd (CCCData): a :class:`~astropy.nddata.CCDData` instance.
image_name (str): Science image name.
out_prefix (str): Partial prefix to be added to the image name. Related
to previous processes and not cosmic ray rejection.
red_path (str): Path to reduced data directory.
keep_files (bool): If True, the original file and the cosmic ray mask
will not be deleted. Default is False.
prefix (str): Cosmic ray rejection related prefix to be added to image
name.
method (str): Method to use for cosmic ray rejection. There are four
options: `default`, `dcr`, `lacosmic` and `none`.
save (bool): Disables by default saving the images
Returns:
:class:`~astropy.nddata.CCDData` instance and `out_prefix` which is the
prefix added to the image name.
Raises:
NotImplementedError if the `method` argument is not `dcr`, `lacosmic`
nor `none`.
"""
log.debug("Cosmic ray rejection method from input is '{:s}'".format(method))
binning, _ = [int(i) for i in ccd.header['CCDSUM'].split()]
if method == 'default':
if binning == 1:
method = 'dcr'
log.info('Setting cosmic ray rejection method to:'
' {:s}'.format(method))
elif binning == 2:
method = 'lacosmic'
log.info('Setting cosmic ray rejection method to:'
' {:s}'.format(method))
elif binning == 3:
method = 'lacosmic'
log.info('Setting cosmic ray rejection method to:'
' {:s}'.format(method))
if ccd.header['OBSTYPE'] == 'COMP' and method != 'none':
log.info("Changing cosmic ray rejection method from '{:s}' to 'none'"
" for comparison lamp. Prefix 'c' will be added "
"anyway.".format(method))
method = 'none'
log.debug("Cosmic ray rejection changed to 'none' for this file: "
"{:s}".format(ccd.header['GSP_FNAM']))
out_prefix = prefix + out_prefix
if method == 'dcr':
log.warning('DCR does apply the correction to images if you want '
'the mask use --keep-cosmic-files')
if not os.path.isfile(os.path.join(red_path, 'dcr.par')):
_create = GenerateDcrParFile()
_instrument = ccd.header['INSTCONF']
_binning, _ = ccd.header['CCDSUM'].split()
_create(instrument=_instrument, binning=_binning, path=red_path)
full_path = os.path.join(red_path, f"{out_prefix}_{image_name}")
ccd.header.set('GSP_COSM',
value="DCR",
comment="Cosmic ray rejection method")
write_fits(ccd=ccd, full_path=full_path)
log.info('Saving image: {:s}'.format(full_path))
in_file = f"{out_prefix}_{image_name}"
# This is to return the prefix that will be used by dcr
# Not to be used by dcr_cosmicray_rejection
out_prefix = prefix + out_prefix
ccd = dcr_cosmicray_rejection(data_path=red_path,
in_file=in_file,
prefix=prefix,
keep_cosmic_files=keep_files,
save=save)
return ccd, out_prefix
elif method == 'lacosmic':
ccd = astroscrappy_lacosmic(ccd=ccd,
red_path=red_path,
save_mask=keep_files)
out_prefix = prefix + out_prefix
full_path = os.path.join(red_path, f"{out_prefix}_{image_name}")
if save:
log.info('Saving image: {:s}'.format(full_path))
write_fits(ccd=ccd, full_path=full_path)
return ccd, out_prefix
elif method == 'none':
full_path = os.path.join(red_path, f"{out_prefix}_{image_name}")
if save:
log.info('Saving image: {:s}'.format(full_path))
write_fits(ccd=ccd, full_path=full_path)
return ccd, out_prefix
else:
log.error('Unrecognized Cosmic Method {:s}'.format(method))
raise NotImplementedError
def create_master_bias(bias_files,
raw_data,
reduced_data,
technique):
"""Create Master Bias
Given a :class:`~pandas.DataFrame` object that contains a list of compatible bias.
This function creates the master flat using ccdproc.combine using median
and 3-sigma clipping.
Args:
bias_files (list): List of all bias files to be combined. They have
to be compatible with each other as no check is done in this method.
raw_data (str): Full path to raw data location.
reduced_data (str): Full path to were reduced data will reside.
technique (str): Name of observing technique. Imaging or
Spectroscopy.
Returns:
master_bias (object):
master_bias_name (str):
"""
assert isinstance(bias_files, list)
master_bias_list = []
log.info('Creating master bias')
for image_file in bias_files:
image_full_path = os.path.join(raw_data, image_file)
ccd = read_fits(image_full_path, technique=technique)
log.debug('Loading bias image: ' + image_full_path)
master_bias_list.append(ccd)
# combine bias for spectroscopy
log.info("Combining {} images to create master bias".format(
len(master_bias_list)))
master_bias = ccdproc.combine(master_bias_list,
method='median',
sigma_clip=True,
sigma_clip_low_thresh=3.0,
sigma_clip_high_thresh=3.0,
add_keyword=False)
# add name of images used to create master bias
for n in range(len(bias_files)):
master_bias.header['GSP_IC{:02d}'.format(n + 1)] = (
bias_files[n],
'Image used to create master bias')
master_bias_name = "master_bias_{}_{}_{}_R{:05.2f}_G{:05.2f}.fits".format(
master_bias.header['INSTCONF'].upper(),
technique[0:2].upper(),
"x".join(master_bias.header['CCDSUM'].split()),
float(master_bias.header['RDNOISE']),
float(master_bias.header['GAIN'])
)
write_fits(ccd=master_bias,
full_path=os.path.join(reduced_data, master_bias_name),
combined=True,
overwrite=True)
log.info('Created master bias: ' + master_bias_name)
return master_bias, master_bias_name
def create_master_flats(flat_files,
raw_data,
reduced_data,
technique,
overscan_region,
trim_section,
master_bias_name,
new_master_flat_name,
saturation_threshold,
ignore_bias=False):
"""Creates master flats
Using a list of compatible flat images it combines them using median and
1-sigma clipping. Also it apply all previous standard calibrations to
each image.
Args:
flat_files (list): List of files previously filtered, there is no
compatibility check in this function and is assumed the files are
combinables.
raw_data (str): Full path to raw data.
reduced_data (str): Full path to reduced data. Where reduced data
should be stored.
technique (str): Observing technique. Imaging or Spectroscopy.
overscan_region (str): Defines the area to be used to estimate the
overscan region for overscan correction. Should be in the format.
`[x1:x2.,y1:y2]`.
trim_section (str):Defines the area to be used after trimming
unusable selected parts (edges). In the format `[x1:x2.,y1:y2]`.
master_bias_name (str): Master bias name, can be full path or not.
If it is a relative path, the path will be ignored and will define
the full path as `raw_path` + `basename`.
new_master_flat_name (str): Name of the file to save new master
flat. Can be absolute path or not.
saturation_threshold (int): Saturation threshold, defines the percentage of
pixels above saturation level allowed for flat field images.
ignore_bias (bool): Flag to create master bias without master bias.
Returns:
The master flat :class:`~astropy.nddata.CCDData` instance and the
name of under which the master flat was stored. If it can't build
the master flat it will return None, None.
"""
cleaned_flat_list = []
master_flat_list = []
if os.path.isabs(os.path.dirname(new_master_flat_name)):
master_flat_name = new_master_flat_name
else:
master_flat_name = os.path.join(
reduced_data, os.path.basename(new_master_flat_name))
if not ignore_bias:
if os.path.isabs(os.path.dirname(master_bias_name)) and \
os.path.exists(master_bias_name):
master_bias = read_fits(master_bias_name, technique=technique)
else:
master_bias_name = os.path.join(reduced_data,
os.path.basename(master_bias_name))
master_bias = read_fits(master_bias_name, technique=technique)
master_bias = image_trim(ccd=master_bias,
trim_section=trim_section,
trim_type='trimsec')
log.info('Creating Master Flat')
for flat_file in flat_files:
if os.path.isabs(flat_file):
image_full_path = flat_file
else:
image_full_path = os.path.join(raw_data, flat_file)
log.debug('Loading flat image: ' + image_full_path)
ccd = read_fits(image_full_path, technique=technique)
if ignore_bias and technique == 'Spectroscopy':
ccd = image_overscan(ccd, overscan_region=overscan_region)
ccd = image_trim(ccd=ccd,
trim_section=trim_section,
trim_type='trimsec')
if not ignore_bias:
ccd = ccdproc.subtract_bias(ccd,
master_bias,
add_keyword=False)
ccd.header['GSP_BIAS'] = (
os.path.basename(master_bias_name),
'Master bias image')
else:
log.warning('Ignoring bias on request')
if is_file_saturated(ccd=ccd,
threshold=saturation_threshold):
log.warning('Removing saturated image {:s}. '
'Use --saturation_threshold to change saturation_threshold '
'level'.format(flat_file))
continue
else:
cleaned_flat_list.append(flat_file)
master_flat_list.append(ccd)
if master_flat_list != []:
log.info("Combining {} images to create master flat".format(
len(master_flat_list)))
master_flat = ccdproc.combine(master_flat_list,
method='median',
sigma_clip=True,
sigma_clip_low_thresh=1.0,
sigma_clip_high_thresh=1.0,
add_keyword=False)
# add name of images used to create master bias
for n in range(len(cleaned_flat_list)):
master_flat.header['GSP_IC{:02d}'.format(n + 1)] = (
cleaned_flat_list[n],
'Image used to create master flat')
write_fits(ccd=master_flat,
full_path=master_flat_name,
combined=True)
log.info('Created Master Flat: ' + master_flat_name)
return master_flat, master_flat_name
else:
log.error('Empty flat list. Check that they do not exceed the '
'saturation_threshold limit.')
return None, None
def cross_correlation(reference,
compared,
slit_size,
serial_binning,
selection_bias='none',
mode='full',
plot=False):
"""Do cross correlation of two 1D spectra
It convolves the reference lamp depending on the slit size of the new_array
that corresponds with a comparison lamp.
If the slit is larger than 3 arcseconds the reference lamp is convolved with
a `~astropy.convolution.Box1DKernel` because spectral lines look more like a
block than a line. And if it is smaller or equal to 3 it will
use a `~astropy.convolution.Gaussian1DKernel` ponderated by the binning.
All reference lamp are unbinned, or binning is 1x1.
Args:
reference (array): Reference array.
compared (array): Array to be matched. A new reference lamp.
slit_size (float): Slit width in arcseconds
serial_binning (int): Binning in the spectral axis
selection_bias (str): For arrays expected to be similar therefore a relatively small shift is expected select 'center'.
'none' (default) or 'center'.
mode (str): Correlation mode for `signal.correlate`.
plot (bool): Switch debugging plots on or off.
Returns:
correlation_value (int): Shift value in pixels.
"""
cyaxis2 = compared
if slit_size > 3:
box_width = slit_size / (0.15 * serial_binning)
log.debug('BOX WIDTH: {:f}'.format(box_width))
box_kernel = Box1DKernel(width=box_width)
max_before = np.max(reference)
cyaxis1 = convolve(reference, box_kernel)
max_after = np.max(cyaxis1)
cyaxis1 *= max_before / max_after
else:
kernel_stddev = slit_size / (0.15 * serial_binning)
gaussian_kernel = Gaussian1DKernel(stddev=kernel_stddev)
cyaxis1 = convolve(reference, gaussian_kernel)
cyaxis2 = convolve(compared, gaussian_kernel)
ccorr = signal.correlate(cyaxis1, cyaxis2, mode=mode)
x_ccorr = np.linspace(-int(len(ccorr) / 2.),
int(len(ccorr) / 2.),
len(ccorr))
if selection_bias == 'center':
gaussian_model = models.Gaussian1D(amplitude=1, mean=len(ccorr) / 2., stddev=50)
gaussian_weights = gaussian_model(range(len(x_ccorr)))
gaussian_weighted = gaussian_weights * ccorr
max_index = np.argmax(gaussian_weighted)
elif selection_bias == 'none':
max_index = np.argmax(ccorr)
else:
raise NotImplementedError(f"'selection_bias' {selection_bias} is not valid. Options are 'none' and 'center'")
correlation_value = x_ccorr[max_index]
log.debug(f"Found correlation value of {correlation_value}")
if plot: # pragma: no cover
plt.ion()
plt.title('Cross Correlation')
plt.xlabel('Lag Value')
plt.ylabel('Correlation Value')
plt.plot(x_ccorr, ccorr, label='Original Cross Correlation')
if selection_bias == 'center':
plt.plot(x_ccorr, gaussian_weights, label="Centered Gaussian")
plt.plot(x_ccorr, gaussian_weighted, label='Gaussian Weighted')
plt.legend(loc='best')
plt.draw()
plt.pause(2)
plt.clf()
plt.ioff()
return correlation_value
def classify_spectroscopic_data(path, search_pattern):
"""Classify data by grouping them by a set of keywords.
This function uses :class:`~ccdproc.ImageFileCollection`. First it creates a
collection of information regarding the images located in ``path`` that
match the pattern ``search_pattern``. The information obtained are all
keywords listed in the list ``keywords``.
The :class:`~ccdproc.ImageFileCollection` object is translated into
:class:`~pandas.DataFrame` and then is used much like an SQL database to
select and filter values and in that way put them in groups that are
:class:`~pandas.DataFrame` instances.
The keywords retrieved are:
- ``date``
- ``slit``
- ``date-obs``
- ``obstype``
- ``object``
- ``exptime``
- ``obsra``
- ``obsdec``
- ``grating``
- ``cam_targ``
- ``grt_targ``
- ``filter``
- ``filter2``
- ``gain``
- ``rdnoise``.
Then all data is grouped by matching the following keywords:
- ``slit``
- ``radeg``
- ``decdeg``
- ``grating``
- ``cam_targ``
- ``grt_targ``
- ``filter``
- ``filter2``
- ``gain``
- ``rdnoise``
And finally, every group is classified as: a *comparison lamp-only* group,
an *object-only* group or a *group of object and comparison lamps*. The
comparison lamps present in the last group (``COMP`` + ``OBJECT``) are also
added in the first one (``COMP``-only).
Args:
path (str): Path to data location
search_pattern (str): Prefix to match files.
Returns:
Instance of :class:`goodman_pipeline.core.core.NightDataContainer`
"""
log.debug("Spectroscopic Data Classification")
search_path = os.path.join(path, search_pattern + '*.fits')
file_list = glob.glob(search_path)
if file_list == []:
log.error('No file found using search pattern '
'"{:s}"'.format(search_pattern))
sys.exit('Please use the argument --search-pattern to define the '
'common prefix for the files to be processed.')
data_container = NightDataContainer(path=path,
instrument=str('Red'),
technique=str('Spectroscopy'))
keywords = ['date',
'slit',
'date-obs',
'obstype',
'object',
'exptime',
'obsra',
'obsdec',
'grating',
'cam_targ',
'grt_targ',
'filter',
'filter2',
'gain',
'rdnoise',
'lamp_hga',
'lamp_ne',
'lamp_ar',
'lamp_fe',
'lamp_cu'
]
ifc = ccdproc.ImageFileCollection(path, keywords=keywords, filenames=file_list)
pifc = ifc.summary.to_pandas()
pifc['radeg'] = ''
pifc['decdeg'] = ''
for i in pifc.index.tolist():
radeg, decdeg = ra_dec_to_deg(pifc.obsra.iloc[i], pifc.obsdec.iloc[i])
pifc.iloc[i, pifc.columns.get_loc('radeg')] = '{:.6f}'.format(radeg)
pifc.iloc[i, pifc.columns.get_loc('decdeg')] = '{:.6f}'.format(decdeg)
# now we can compare using degrees
confs = pifc.groupby(['slit',
'radeg',
'decdeg',
'grating',
'cam_targ',
'grt_targ',
'filter',
'filter2',
'gain',
'rdnoise']).size().reset_index().rename(
columns={0: 'count'})
for i in confs.index:
spec_group = pifc[((pifc['slit'] == confs.iloc[i]['slit']) &
(pifc['radeg'] == confs.iloc[i]['radeg']) &
(pifc['decdeg'] == confs.iloc[i]['decdeg']) &
(pifc['grating'] == confs.iloc[i]['grating']) &
(pifc['cam_targ'] == confs.iloc[i]['cam_targ']) &
(pifc['grt_targ'] == confs.iloc[i]['grt_targ']) &
(pifc['filter'] == confs.iloc[i]['filter']) &
(pifc['filter2'] == confs.iloc[i]['filter2']) &
(pifc['gain'] == confs.iloc[i]['gain']) &
(pifc['rdnoise'] == confs.iloc[i]['rdnoise']))]
group_obstype = spec_group.obstype.unique()
if any([value in ['COMP', 'ARC'] for value in group_obstype]) and \
len(group_obstype) == 1:
log.debug('Adding COMP group')
data_container.add_comp_group(comp_group=spec_group)
elif any([value in ['OBJECT', 'SPECTRUM'] for value in group_obstype]) \
and len(group_obstype) == 1:
log.debug('Adding OBJECT group')
data_container.add_object_group(object_group=spec_group)
else:
log.debug('Adding OBJECT-COMP group')
data_container.add_spec_group(spec_group=spec_group)
return data_container
def combine_data(image_list, dest_path, prefix=None, output_name=None,
method="median",
save=False):
"""Combine a list of :class:`~astropy.nddata.CCDData` instances.
Args:
image_list (list): Each element should be an instance of
:class:`~astropy.nddata.CCDData`
dest_path (str): Path to where the new image should saved
prefix (str): Prefix to add to the image file name
output_name (str): Alternatively a file name can be parsed, this will
ignore `prefix`.
method (str): Method for doing the combination, this goes straight to
the call of `ccdproc.combine` function.
save (bool): If True will save the combined images. If False it will
ignore `prefix` or `output_name`.
Returns:
A combined image as a :class:`~astropy.nddata.CCDData` object.
"""
assert len(image_list) > 1
combined_full_path = os.path.join(dest_path, 'combined_file.fits')
if output_name is not None:
combined_full_path = os.path.join(dest_path, output_name)
elif prefix is not None:
sample_image_name = random.choice(image_list).header['GSP_FNAM']
splitted_name = sample_image_name.split('_')
splitted_name[0] = re.sub('_', '', prefix)
splitted_name[1] = 'comb'
splitted_name[-1] = re.sub('.fits', '', splitted_name[-1])
combined_base_name = "_".join(splitted_name)
number = len(glob.glob(
os.path.join(dest_path,
combined_base_name + "*.fits")))
combined_full_path = os.path.join(
dest_path,
combined_base_name + "_{:03d}.fits".format(number + 1))
# combine image
combined_image = ccdproc.combine(image_list,
method=method,
sigma_clip=True,
sigma_clip_low_thresh=1.0,
sigma_clip_high_thresh=1.0,
add_keyword=False)
# add name of files used in the combination process
for i in range(len(image_list)):
image_name = image_list[i].header['GSP_FNAM']
new_image_name = '_' + image_name
if os.path.isfile(os.path.join(dest_path, image_name)):
write_fits(image_list[i],
full_path=os.path.join(dest_path,
new_image_name))
log.info("Deleting file {}".format(image_name))
os.unlink(os.path.join(dest_path, image_name))
else:
log.error("File {} does not exists".format(
os.path.join(dest_path,
image_name)))
combined_image.header.set("GSP_IC{:02d}".format(i + 1),
value=new_image_name,
comment='Image used to create combined')
if save:
write_fits(combined_image,
full_path=combined_full_path,
combined=True)
log.info("Saved combined file to {}".format(combined_full_path))
return combined_image
def convert_time(in_time):
"""Converts time to seconds since epoch
Args:
in_time (str): time obtained from header's keyword DATE-OBS
Returns:
time in seconds since epoch
"""
return calendar.timegm(time.strptime(in_time, "%Y-%m-%dT%H:%M:%S.%f"))
def dcr_cosmicray_rejection(data_path, in_file, prefix,
keep_cosmic_files=False, save=True):
"""Runs an external code for cosmic ray rejection
DCR was created by Wojtek Pych and the code can be obtained from
http://users.camk.edu.pl/pych/DCR/ and is written in C. Contrary to
ccdproc's LACosmic it actually applies the correction, and also doesn't
update the mask attribute since it doesn't work with :class:`~astropy.nddata.CCDData` instances.
The binary takes three positional arguments, they are: 1. input image,
2. output image and 3. cosmic rays image. Also it needs that a dcr.par file
is located in the directory. All this is implemented in this function, if
`delete` is True it will remove the original image and the cosmic rays
image. The removal of the original image is absolutely safe when used in the
context of the goodman pipeline, however if you want to implement it
somewhere else, be careful.
Notes:
This function operates an external code therefore it doesn't return
anything natively, instead it creates a new image. A workaround has been
created that loads the new image and deletes the file.
Args:
data_path (str): Data location
in_file (str): Name of the file to have its cosmic rays removed
prefix (str): Prefix to add to the file with the cosmic rays removed
keep_cosmic_files (bool): True for deleting the input and cosmic ray
file.
save (bool): Toggles the option of saving the image.
"""
log.info('Removing cosmic rays using DCR by Wojtek Pych')
log.debug('See http://users.camk.edu.pl/pych/DCR/')
# add the prefix for the output file
out_file = prefix + in_file
# define the name for the cosmic rays file
cosmic_file = 'cosmic_' + '_'.join(in_file.split('_')[1:])
# define full path for all the files involved
full_path_in = os.path.join(data_path, in_file)
full_path_out = os.path.join(data_path, out_file)
full_path_cosmic = os.path.join(data_path, cosmic_file)
# this is the command for running dcr, all arguments are required
command = 'dcr {:s} {:s} {:s}'.format(full_path_in,
full_path_out,
full_path_cosmic)
log.debug('DCR command:')
log.debug(command)
# get the current working directory to go back to it later in case the
# the pipeline has not been called from the same data directory.
cwd = os.getcwd()
# move to the directory were the data is, dcr is expecting a file dcr.par
os.chdir(data_path)
# call dcr
try:
dcr = subprocess.Popen(command.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as error:
log.error(error)
sys.exit('Your system can not locate the executable file dcr, try '
'moving it to /bin or create a symbolic link\n\n\tcd /bin\n\t'
'sudo ln -s /full/path/to/dcr')
# return False
# if the process is taking too long to respond, kill it
# kill_process = lambda process: process.kill()
def kill_process(process): # pragma: no cover
log.error("DCR Timed out")
process.kill()
dcr_timer = Timer(10, kill_process, [dcr])
try:
dcr_timer.start()
stdout, stderr = dcr.communicate()
finally:
dcr_timer.cancel()
# wait for dcr to terminate
# dcr.wait()
# go back to the original directory. Could be the same.
os.chdir(cwd)
# If no error stderr is an empty string
if stderr != b'':
log.error(stderr)
if b'dcr: not found' in stderr:
sys.exit('Your system can not locate the executable file dcr, try '
'moving it to /bin or create a symbolic link\n\n\tcd '
'/bin\n\tsudo ln -s /full/path/to/dcr')
elif b'ERROR' in stdout:
for output_line in stdout.split(b'\n'):
log.error(output_line.decode("utf-8"))
else:
for output_line in stdout.split(b'\n'):
log.debug(output_line)
# delete extra files only if the execution ended without error
if not keep_cosmic_files and stderr == b'' and b'USAGE:' not in stdout \
and b'ERROR! calc_submean() failed' not in stdout:
try:
log.warning('Removing input file: {:s}'.format(full_path_in))
os.unlink(full_path_in)
except OSError as error:
log.error(error)
try:
log.warning(
'Removing cosmic rays file: {:s}'.format(full_path_cosmic))
os.unlink(full_path_cosmic)
except OSError as error:
log.error(error)
# recovers the saved file and returns the :class:`~astropy.nddata.CCDData`
# instance
if os.path.isfile(full_path_out):
ccd = ccdproc.CCDData.read(full_path_out, unit=u.adu)
if not save:
log.warning("Removing file because the attribute 'save' "
"is set to False")
os.unlink(full_path_out)
return ccd
def define_trim_section(sample_image, technique):
"""Get the initial trim section
The initial trim section is usually defined in the header with the
keyword ``TRIMSEC`` but in the case of Goodman HTS this does not work well.
In particular for spectroscopy where is more likely to have combined
binning and so on.
Args:
sample_image (str): Full path to sample image.
technique (str): The name of the technique, the options are:
Imaging or Spectroscopy.
Returns:
The trim section in the format ``[x1:x2, y1:y2]``
"""
assert os.path.isabs(os.path.dirname(sample_image))
assert os.path.isfile(sample_image)
trim_section = None
# TODO (simon): Consider binning and possibly ROIs for trim section
log.warning('Determining trim section. Assuming you have only one '
'kind of data in this folder')
ccd = read_fits(sample_image, technique=technique)
# serial binning - dispersion binning
# parallel binning - spatial binning
spatial_length, dispersion_length = ccd.data.shape
serial_binning, \
parallel_binning = [int(x) for x
in ccd.header['CCDSUM'].split()]
# Trim section is valid for Blue and Red Camera Binning 1x1 and
# Spectroscopic ROI
if technique == 'Spectroscopy':
# left
low_lim_spectral = int(np.ceil(51. / serial_binning))
# right
high_lim_spectral = int(4110 / serial_binning)
# bottom
low_lim_spatial = 2
# top
# t = int(1896 / parallel_binning)
# TODO (simon): Need testing
# trim_section = '[{:d}:{:d},{:d}:{:d}]'.format(l, r, b, t)
trim_section = '[{:d}:{:d},{:d}:{:d}]'.format(
low_lim_spectral,
high_lim_spectral,
low_lim_spatial,
spatial_length)
elif technique == 'Imaging':
trim_section = ccd.header['TRIMSEC']
log.info('Trim Section: %s', trim_section)
return trim_section
def extraction(ccd,
target_trace,
spatial_profile,
extraction_name):
"""Calls appropriate spectrum extraction routine
This function calls the appropriate extraction function based on
`extraction_name`
Notes:
Optimal extraction is not implemented.
Args:
ccd (CCDData): Instance of :class:`~astropy.nddata.CCDData` containing a
2D spectrum
target_trace (object): Instance of astropy.modeling.Model, a low order
polynomial that defines the trace of the spectrum in the ccd object.
spatial_profile (Model): Instance of :class:`~astropy.modeling.Model`,
a Gaussian model previously fitted to the spatial profile of the 2D
spectrum contained in the ccd object.
extraction_name (str): Extraction type, can be `fractional` or
`optimal` though the optimal extraction is not implemented yet.
Returns:
ccd (CCDData): Instance of :class:`~astropy.nddata.CCDData` containing a
1D spectrum. The attribute 'data' is replaced by the 1D array resulted
from the extraction process.
Raises:
NotImplementedError: When `extraction_name` is `optimal`.
"""
assert isinstance(ccd, ccdproc.CCDData)
assert isinstance(target_trace, Model)
if spatial_profile.__class__.name == 'Gaussian1D':
target_fwhm = spatial_profile.fwhm
elif spatial_profile.__class__.name == 'Moffat1D':
target_fwhm = spatial_profile.fwhm
else:
raise NotImplementedError
if extraction_name == 'fractional':
extracted, background, bkg_info = extract_fractional_pixel(
ccd=ccd,
target_trace=target_trace,
target_fwhm=target_fwhm,
extraction_width=2)
background_1, background_2 = bkg_info
if background_1 is not None:
log.info('Background extraction zone 1: {:s}'.format(background_1))
extracted.header.set('GSP_BKG1', value=background_1)
else:
log.info("Background extraction zone 1: 'none'")
if background_2 is not None:
log.info('Background extraction zone 2: {:s}'.format(background_2))
extracted.header.set('GSP_BKG2', value=background_2)
else:
log.info("Background extraction zone 2: 'none'")
return extracted
elif extraction_name == 'optimal':
raise NotImplementedError
def extract_fractional_pixel(ccd, target_trace, target_fwhm, extraction_width,
background_spacing=3):
"""Performs an spectrum extraction using fractional pixels.
Args:
ccd (CCDData) Instance of :class:`~astropy.nddata.CCDData` that
contains a 2D spectrum.
target_trace (object): Instance of astropy.modeling.models.Model that
defines the trace of the target on the image (ccd).
target_fwhm (float): FWHM value for the spatial profile
fitted to the target.
extraction_width (int): Width of the extraction area as a function of
`target_fwhm`. For instance if `extraction_with` is set to 1 the
function extract 0.5 to each side from the center of the traced
target.
background_spacing (float): Number of `target_stddev` to separate the
target extraction to the background. This is from the edge of the
extraction zone to the edge of the background region.
"""
assert isinstance(ccd, ccdproc.CCDData)
assert isinstance(target_trace, Model)
log.info("Fractional Pixel Extraction for "
"{:s}".format(ccd.header['GSP_FNAM']))
spat_length, disp_length = ccd.data.shape
disp_axis = range(disp_length)
trace_points = target_trace(disp_axis)
apnum1 = None
background_info_1 = None
background_info_2 = None
non_background_sub = []
extracted_spectrum = []
background_list = []
if ccd.header['OBSTYPE'] not in ['OBJECT', 'SPECTRUM']:
log.debug("No background subtraction for OBSTYPE = "
"{:s}".format(ccd.header['OBSTYPE']))
for i in disp_axis:
# this defines the extraction limit for every column
low_limit = trace_points[i] - 0.5 * extraction_width * target_fwhm
high_limit = trace_points[i] + 0.5 * extraction_width * target_fwhm
if apnum1 is None:
# TODO (simon): add secondary targets
apnum1 = '{:d} {:d} {:.2f} {:.2f}'.format(1,
1,
low_limit,
high_limit)
ccd.header.set('APNUM1',
value=apnum1,
comment="Aperture in first column")
ccd.header.set('GSP_EXTR',
value="{:.2f}:{:.2f}".format(low_limit,
high_limit))
log.info("Extraction aperture in first column: {:s}".format(
ccd.header['GSP_EXTR']))
column_sum = fractional_sum(data=ccd.data,
index=i,
low_limit=low_limit,
high_limit=high_limit)
non_background_sub.append(column_sum)
if ccd.header['OBSTYPE'] in ['OBJECT', 'SPECTRUM']:
# background limits
# background_spacing is the distance from the edge of the target's
# limits defined by `int_low_limit` and
# `int_high_limit` in stddev units
background_width = high_limit - low_limit
# define pixel values for background subtraction
# low_background_zone
high_1 = low_limit - background_spacing * target_fwhm
low_1 = high_1 - background_width
# High background zone
low_2 = high_limit + background_spacing * target_fwhm
high_2 = low_2 + background_width
# validate background subtraction zones
background_1 = None
background_2 = None
# this has to be implemented, leaving it True assumes there is no
# restriction for background selection.
# TODO (simon): Implement background subtraction zones validation
neighbouring_target_condition = True
if low_1 > 0 and neighbouring_target_condition:
# integer limits
background_1 = fractional_sum(data=ccd.data,
index=i,
low_limit=low_1,
high_limit=high_1)
else:
log.error("Invalid Zone 1: [{}:{}]".format(low_1, high_1))
if high_2 < spat_length and neighbouring_target_condition:
background_2 = fractional_sum(data=ccd.data,
index=i,
low_limit=low_2,
high_limit=high_2)
else:
log.error("Invalid Zone 2: [{}:{}]".format(low_2, high_2))
# background = 0
if background_1 is not None and background_2 is None:
background = background_1
if background_info_1 is None:
background_info_1 = "{:.2f}:{:.2f} column {:d}".format(
low_1, high_1, i+1)
elif background_1 is None and background_2 is not None:
background = background_2
if background_info_2 is None:
background_info_2 = "{:.2f}:{:.2f} column {:d}".format(
low_2, high_2, i+1)
else:
background = np.mean([background_1, background_2])
if background_info_1 is None:
background_info_1 = "{:.2f}:{:.2f} column {:d}".format(
low_1, high_1, i+1)
if background_info_2 is None:
background_info_2 = "{:.2f}:{:.2f} column {:d}".format(
low_2, high_2, i+1)
# actual background subtraction
background_subtracted_column_sum = column_sum - background
# append column value to list
extracted_spectrum.append(background_subtracted_column_sum)
background_list.append(background)
else:
extracted_spectrum.append(column_sum)
new_ccd = ccd.copy()
new_ccd.data = np.asarray(extracted_spectrum)
if new_ccd.header['NAXIS'] != 1:
for i in range(int(new_ccd.header['NAXIS']), 1, -1):
new_ccd.header.remove(keyword="NAXIS{:d}".format(i))
new_ccd.header.set('NAXIS', value=1)
return new_ccd, np.asarray(background_list), [background_info_1,
background_info_2]
def extract_optimal():
"""Placeholder for optimal extraction method.
Raises:
NotImplementedError
"""
raise NotImplementedError
def evaluate_wavelength_solution(clipped_differences):
"""Calculates Root Mean Square Error for the wavelength solution.
Args:
clipped_differences (ndarray): Numpy masked array of differences
between reference line values in angstrom and the value calculated
using the model of the wavelength solution.
Returns:
Root Mean Square Error, number of points and number of points
rejected in the calculation of the wavelength solution.
"""
n_points = len(clipped_differences)
n_rejections = np.ma.count_masked(clipped_differences)
square_differences = []
for i in range(len(clipped_differences)):
if clipped_differences[i] is not np.ma.masked:
square_differences.append(clipped_differences[i] ** 2)
rms_error = np.sqrt(
np.sum(square_differences) / len(square_differences))
log.info('Wavelength solution RMS Error : {:.3f}'.format(
rms_error))
return rms_error, n_points, n_rejections
def fix_keywords(path, pattern='*.fits'):
"""Fix FITS uncompliance of some keywords
Uses automatic header fixing by :class:`~astropy.nddata.CCDData`. Note that
this only fixes FITS compliance.
Args:
path (str): Path to raw data
pattern (str): Search pattern for listing file in path.
"""
file_list = glob.glob(os.path.join(path, pattern))
for _file in file_list:
log.info("Fixing file {:s}".format(_file))
ccd = ccdproc.CCDData.read(_file, unit='adu')
ccd.write(_file, overwrite=True)
log.info("Fix succeeded!")
def fractional_sum(data, index, low_limit, high_limit):
"""Performs a fractional pixels sum
A fractional pixels sum is required several times while
extracting a 1D spectrum from a 2D spectrum. The method
is actually very simple.
It requires the full data, the column and the range to sum, this
range is given as real numbers. First it separates the limits values as an
integer and fractional parts. Then it will sum the integer's interval and
subtract the `low_limit`'s fractional part and sum the `high_limit`'s
fractional part.
The sum is performed in one operation. It does not do
background subtraction, for which this very same method is used to
get the background sum to be subtracted later.
Args:
data (numpy.ndarray): 2D array that contains the 2D spectrum/image
index (int): Index of the column to be summed.
low_limit (float): Lower limit for the range to be summed.
high_limit (float): Higher limit for the range to be summed.
Returns:
Sum in ADU of all pixels and fractions between `low_limit` and
`high_limit`.
"""
# these are the limits within the full amount of flux on each pixel is
# summed
low_fraction, low_integer = math.modf(low_limit)
high_fraction, high_integer = math.modf(high_limit)
column_sum = np.sum(data[int(low_integer):int(high_integer), index]) - \
data[int(low_integer), index] * low_fraction + \
data[int(high_integer), index] * high_fraction
return column_sum
def get_best_flat(flat_name, path):
"""Look for matching master flat
Given a basename for master flats defined as a combination of key parameters
extracted from the header of the image that we want to flat field, this
function will find the name of the files that matches the base name and then
will choose the first. Ideally this should go further as to check signal,
time gap, etc.
After it identifies the file it will load it using
:class:`~astropy.nddata.CCDData` and return it along the filename.
In the case it fails it will return None instead of master_flat and another
None instead of master_flat_name.
Args:
flat_name (str): Full path of master flat basename. Ends in '*.fits'
for using glob.
path (str): Location to look for flats.
Returns:
master_flat (object): A :class:`~astropy.nddata.CCDData` instance.
master_flat_name (str): Full path to the chosen master flat.
"""
flat_list = glob.glob(os.path.join(path, flat_name))
log.debug('Flat base name {:s}'.format(flat_name))
log.debug('Matching master flats found: {:d}'.format(len(flat_list)))
if len(flat_list) > 0:
master_flat_name = flat_list[0]
# if len(flat_list) == 1:
# master_flat_name = flat_list[0]
# else:
# master_flat_name = flat_list[0]
# elif any('dome' in flat for flat in flat_list):
# master_flat_name =
master_flat = ccdproc.CCDData.read(master_flat_name, unit=u.adu)
log.debug('Found suitable master flat: {:s}'.format(master_flat_name))
return master_flat, master_flat_name
else:
log.error('There is no flat available')
return None, None
def get_central_wavelength(grating, grt_ang, cam_ang):
"""Calculates the central wavelength for a given spectroscopic mode
The equation used to calculate the central wavelength is the following
.. math::
\\lambda_{central} = \\frac{1e6}{GRAT}
\\sin\\left(\\frac{\\alpha \\pi}{180}\\right) +
\\sin\\left(\\frac{\\beta \\pi}{180}\\right)
Args:
grating (str): Grating frequency as a string. Example '400'.
grt_ang (str): Grating Angle as a string. Example '12.0'.
cam_ang (str): Camera Angle as a string. Example '20.0'
Returns:
central_wavelength (float): Central wavelength as a float value.
"""
grating_frequency = float(grating) / u.mm
grt_ang = float(grt_ang) * u.deg
cam_ang = float(cam_ang) * u.deg
alpha = grt_ang.to(u.rad)
beta = cam_ang.to(u.rad) - grt_ang.to(u.rad)
# central_wavelength = (1e6 / grating_frequency) * \
# (np.sin(alpha * np.pi / 180.) +
# np.sin(beta * np.pi / 180.))
central_wavelength = (np.sin(alpha) + np.sin(beta)) / grating_frequency
central_wavelength = central_wavelength.to(u.angstrom)
log.debug('Found {:.3f} as central wavelength'.format(central_wavelength))
return central_wavelength
def get_lines_in_lamp(ccd, plots=False):
"""Identify peaks in a lamp spectrum
Uses `signal.argrelmax` to find peaks in a spectrum i.e emission
lines, then it calls the recenter_lines method that will recenter them
using a "center of mass", because, not always the maximum value (peak)
is the center of the line.
Args:
ccd (CCDData): Lamp `ccdproc.CCDData` instance.
plots (bool): Wether to plot or not.
Returns:
lines_candidates (list): A common list containing pixel values at
approximate location of lines.
"""
if isinstance(ccd, ccdproc.CCDData):
lamp_data = ccd.data
lamp_header = ccd.header
raw_pixel_axis = range(len(lamp_data))
else:
log.error('Error receiving lamp')
return None
no_nan_lamp_data = np.asarray(np.nan_to_num(lamp_data))
filtered_data = np.where(
np.abs(no_nan_lamp_data > no_nan_lamp_data.min() +
0.03 * no_nan_lamp_data.max()),
no_nan_lamp_data,
None)
# replace None to zero and convert it to an array
none_to_zero = [0 if it is None else it for it in filtered_data]
filtered_data = np.array(none_to_zero)
_upper_limit = no_nan_lamp_data.min() + 0.03 * no_nan_lamp_data.max()
slit_size = np.float(re.sub('[a-zA-Z"_*]', '', lamp_header['slit']))
serial_binning, parallel_binning = [
int(x) for x in lamp_header['CCDSUM'].split()]
new_order = int(round(float(slit_size) / (0.15 * serial_binning)))
log.debug('New Order: {:d}'.format(new_order))
peaks = signal.argrelmax(filtered_data, axis=0, order=new_order)[0]
if slit_size >= 5.:
lines_center = recenter_broad_lines(
lamp_data=no_nan_lamp_data,
lines=peaks,
order=new_order)
else:
# lines_center = peaks
lines_center = recenter_lines(no_nan_lamp_data, peaks)
if plots: # pragma: no cover
plt.close('all')
fig, ax = plt.subplots()
fig.canvas.set_window_title('Lines Detected')
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
ax.set_title('Lines detected in Lamp\n'
'{:s}'.format(lamp_header['OBJECT']))
ax.set_xlabel('Pixel Axis')
ax.set_ylabel('Intensity (counts)')
# Build legends without data to avoid repetitions
ax.plot([], color='k', label='Comparison Lamp Data')
ax.plot([], color='k', linestyle=':',
label='Spectral Line Detected')
ax.axhline(_upper_limit, color='r')
for line in peaks:
ax.axvline(line, color='k', linestyle=':')
ax.plot(raw_pixel_axis, no_nan_lamp_data, color='k')
ax.legend(loc='best')
plt.tight_layout()
plt.show()
return lines_center
def get_overscan_region(sample_image, technique):
"""Get the right overscan region for spectroscopy
It works for the following ROI:
Spectroscopic 1x1
Spectroscopic 2x2
Spectroscopic 3x3
The limits where measured on a Spectroscopic 1x1 image and then divided
by the binning size. This was checked
that it actually works as expected.
Notes:
The regions are 1-based i.e. different to Python convention.
For Imaging there is no overscan region.
Args:
sample_image (str): Full path to randomly chosen image.
technique (str): Observing technique, either `Spectroscopy` or
`Imaging`
Returns:
overscan_region (str) Region for overscan in the format
'[min:max,:]' where min is the starting point and max is the end
point of the overscan region.
"""
assert os.path.isabs(os.path.dirname(sample_image))
assert os.path.isfile(sample_image)
log.debug('Overscan Sample File ' + sample_image)
ccd = ccdproc.CCDData.read(sample_image, unit=u.adu)
# Image height - spatial direction
spatial_length, dispersion_length = ccd.data.shape
# Image width - spectral direction
# w = ccd.data.shape[1]
# Take the binnings
serial_binning, parallel_binning = \
[int(x) for x in ccd.header['CCDSUM'].split()]
if technique == 'Spectroscopy':
log.info('Overscan regions has been tested for ROI '
'Spectroscopic 1x1, 2x2 and 3x3')
# define l r b and t to avoid local variable might be
# defined before assignment warning
low_lim_spectral, \
high_lim_spectral, \
low_lim_spatial, \
high_lim_spatial = [None] * 4
if ccd.header['INSTCONF'] == 'Red':
# for red camera it is necessary to eliminate the first
# rows/columns (depends on the point of view) because
# they come with an abnormal high signal. Usually the
# first 5 pixels. In order to find the corresponding
# value for the subsequent binning divide by the
# binning size.
# The numbers 6 and 49 where obtained from visual
# inspection
# left
low_lim_spectral = int(np.ceil(6. / serial_binning))
# right
high_lim_spectral = int(49. / serial_binning)
# bottom
low_lim_spatial = 1
# top
high_lim_spatial = spatial_length
elif ccd.header['INSTCONF'] == 'Blue':
# 16 is the length of the overscan region with no
# binning.
# left
low_lim_spectral = 1
# right
high_lim_spectral = int(16. / serial_binning)
# bottom
low_lim_spatial = 1
# top
high_lim_spatial = spatial_length
overscan_region = '[{:d}:{:d},{:d}:{:d}]'.format(
low_lim_spectral,
high_lim_spectral,
low_lim_spatial,
high_lim_spatial)
elif technique == 'Imaging':
log.warning("Imaging mode doesn't have overscan "
"region. Use bias instead.")
overscan_region = None
else:
overscan_region = None
log.info('Overscan Region: %s', overscan_region)
return overscan_region
def get_slit_trim_section(master_flat, debug_plots=False):
"""Find the slit edges to trim all data
Using a master flat, ideally with good signal to noise ratio, this function
will identify the edges of the slit projected into the detector. Having this
done will allow to reduce the overall processing time and also reduce the
introduction of artifacts due to non-illuminated regions in the detectors,
such as NaNs -INF +INF, etc.
Args:
master_flat (CCDData): A :class:`~astropy.nddata.CCDData` instance.
debug_plots (bool): Flag to show debugging plots
Returns:
slit_trim_section (str): Trim section in spatial direction in the format
[:,slit_lower_limit:slit_higher_limit]
"""
x, y = master_flat.data.shape
# Using the middle point to make calculations, usually flats have good
# illumination already at the middle.
middle = int(y / 2.)
ccd_section = master_flat.data[:, middle:middle + 200]
ccd_section_median = np.median(ccd_section, axis=1)
spatial_axis = range(len(ccd_section_median))
# set values for initial box model definition
box_max = np.max(ccd_section_median)
box_center = len(ccd_section_median) / 2.
box_width = .75 * len(ccd_section_median)
# box model definition
box_model = models.Box1D(amplitude=box_max, x_0=box_center, width=box_width)
box_fitter = fitting.SimplexLSQFitter()
fitted_box = box_fitter(box_model, spatial_axis, ccd_section_median)
# the number of pixels that will be removed from the detected edge of the
# image on each side
offset = 10
if fitted_box.width.value < x:
log.debug("Slit detected. Adding a 10 pixels offset")
else:
log.debug("Slit limits not detected. Setting additional "
"offset to 0")
offset = 0
# Here we force the slit limits within the boundaries of the data (image)
# this defines a preliminary set of slit limit
l_lim = 1 + fitted_box.x_0.value - 0.5 * fitted_box.width.value + offset
h_lim = 1 + fitted_box.x_0.value + 0.5 * fitted_box.width.value - offset
low_lim = int(np.max([1 + offset, l_lim + 1]))
high_lim = int(np.min([h_lim, len(ccd_section_median) - offset]))
# define the slit trim section as (IRAF)
# convert o 1-based
slit_trim_section = '[1:{:d},{:d}:{:d}]'.format(y,
low_lim,
high_lim)
log.debug("Slit Trim Section: {:s}".format(slit_trim_section))
# debugging plots that have to be manually turned on
if debug_plots: # pragma: no cover
manager = plt.get_current_fig_manager()
manager.window.showMaximized()
plt.title('Slit Edge Detection')
plt.plot(box_model(spatial_axis), color='c', label='Initial Box1D')
plt.plot(fitted_box(spatial_axis), color='k', label='Fitted Box1D')
plt.plot(ccd_section_median, label='Median Along Disp.')
# plt.plot(pseudo_derivative, color='g', label='Pseudo Derivative')
# plt.axvline(None, color='r', label='Detected Edges')
# -1 to make it zero-based.
plt.axvline(low_lim - 1, color='r', label='Detected Edges')
plt.axvline(high_lim - 1, color='r')
# for peak in peaks:
# plt.axvline(peak, color='r')
plt.legend(loc='best')
plt.show()
return slit_trim_section
def get_spectral_characteristics(ccd, pixel_size, instrument_focal_length):
"""Calculates some Goodman's specific spectroscopic values.
From the header value for Grating, Grating Angle and Camera Angle it is
possible to estimate what are the wavelength values at the edges as well
as in the center. It was necessary to add offsets though, since the
formulas provided are slightly off. The values are only an estimate.
Args:
ccd (CCDData): Lamp `ccdproc.CCDData` instance
pixel_size (float): Pixel size in microns
instrument_focal_length (float): Instrument focal length
Returns:
spectral_characteristics (dict): Contains the following parameters:
center: Center Wavelength
blue: Blue limit in Angstrom
red: Red limit in Angstrom
alpha: Angle
beta: Angle
pix1: Pixel One
pix2: Pixel Two
"""
# TODO (simon): find a definite solution for this, this only work
# TODO (simon): (a little) for one configuration
blue_correction_factor = -50 * u.angstrom
red_correction_factor = -37 * u.angstrom
grating_frequency = float(re.sub('[A-Za-z_-]',
'',
ccd.header['GRATING'])) / u.mm
grating_angle = float(ccd.header['GRT_ANG']) * u.deg
camera_angle = float(ccd.header['CAM_ANG']) * u.deg
# serial binning - dispersion binning
# parallel binning - spatial binning
serial_binning, parallel_binning = [
int(x) for x in ccd.header['CCDSUM'].split()]
pixel_count = len(ccd.data)
# Calculations
# TODO (simon): Check whether is necessary to remove the
# TODO (simon): slit_offset variable
alpha = grating_angle.to(u.rad)
beta = camera_angle.to(u.rad) - grating_angle.to(u.rad)
center_wavelength = (np.sin(alpha) + np.sin(beta)) / grating_frequency
limit_angle = np.arctan(pixel_count * ((pixel_size * serial_binning) / instrument_focal_length) / 2)
blue_limit = ((np.sin(alpha) + np.sin(beta - limit_angle.to(u.rad))) / grating_frequency).to(u.angstrom) + blue_correction_factor
red_limit = ((np.sin(alpha) + np.sin(beta + limit_angle.to(u.rad))) / grating_frequency).to(u.angstrom) + red_correction_factor
pixel_one = 0
pixel_two = 0
log.debug(
'Center Wavelength : {:.3f} Blue Limit : '
'{:.3f} Red Limit : {:.3f} '.format(center_wavelength.to(u.angstrom),
blue_limit,
red_limit))
spectral_characteristics = {'center': center_wavelength,
'blue': blue_limit,
'red': red_limit,
'alpha': alpha,
'beta': beta,
'pix1': pixel_one,
'pix2': pixel_two}
return spectral_characteristics
def get_twilight_time(date_obs):
"""Get end/start time of evening/morning twilight
Notes:
Taken from David Sanmartim's development
Args:
date_obs (list): List of all the dates from data.
Returns:
twilight_evening (str): Evening twilight time in the format
'YYYY-MM-DDTHH:MM:SS.SS'
twilight_morning (str): Morning twilight time in the format
'YYYY-MM-DDTHH:MM:SS.SS'
sun_set_time (str): Sun set time in the format
'YYYY-MM-DDTHH:MM:SS.SS'
sun_rise_time (str): Sun rise time in the format
'YYYY-MM-DDTHH:MM:SS.SS'
"""
# observatory(str): Observatory name.
observatory = 'SOAR Telescope'
geodetic_location = ['-70d44m01.11s', '-30d14m16.41s', 2748]
# longitude (str): Geographic longitude in string format
longitude = geodetic_location[0]
# latitude (str): Geographic latitude in string format.
latitude = geodetic_location[1]
# elevation (int): Geographic elevation in meters above sea level
elevation = geodetic_location[2]
# timezone (str): Time zone.
timezone = 'UTC'
# description(str): Observatory description
description = 'Soar Telescope on Cerro Pachon, Chile'
soar_loc = EarthLocation.from_geodetic(longitude,
latitude,
elevation * u.m,
ellipsoid='WGS84')
soar = Observer(name=observatory,
location=soar_loc,
timezone=timezone,
description=description)
time_first_frame, time_last_frame = Time(min(date_obs)), Time(
max(date_obs))
twilight_evening = soar.twilight_evening_astronomical(
Time(time_first_frame), which='nearest').isot
twilight_morning = soar.twilight_morning_astronomical(
Time(time_last_frame), which='nearest').isot
sun_set_time = soar.sun_set_time(
Time(time_first_frame), which='nearest').isot
sun_rise_time = soar.sun_rise_time(
Time(time_last_frame), which='nearest').isot
log.debug('Sun Set ' + sun_set_time)
log.debug('Sun Rise ' + sun_rise_time)
return (twilight_evening,
twilight_morning,
sun_set_time,
sun_rise_time)
def identify_targets(ccd,
fit_model,
background_threshold,
nfind=3,
profile_min_width=None,
profile_max_width=None,
plots=False):
"""Identify Spectroscopic Targets
Wrapper to the class `IdentifySpectroscopicTargets`.
Args:
ccd (CCDData): Image containing spectra
fit_model (str): Name of the model to be fitted `moffat` or `gaussian`.
background_threshold (int): Number of background levels for target
discrimination.
nfind (int): Maximum number of targets passing the background threshold
to be returned, they are order from most intense peak to least intense.
profile_min_width (float): Minimum FWHM (moffat) or STDDEV (gaussian) for spatial profile model.
profile_max_width (float): Maximum FWHM (moffat) or STDDEV (gaussian) for spatial profile model.
plots (bool): Flat for plotting results.
Returns:
identified_targets (list): List of models successfully fitted.
"""
identify = IdentifySpectroscopicTargets()
identified_targets = identify(ccd=ccd,
nfind=nfind,
background_threshold=background_threshold,
model_name=fit_model,
profile_min_width=profile_min_width,
profile_max_width=profile_max_width,
plots=plots)
return identified_targets
def identify_technique(target, obstype, slit, grating, wavmode, roi):
"""Identify whether is Imaging or Spectroscopic data
Args:
target (str): Target name as in the keyword `OBJECT` this is useful in
Automated aquisition mode, such as AEON.
obstype (str): Observation type as in `OBSTYPE`
slit (str): Value of `SLIT` keyword.
grating (str): Value of `GRATING` keyword.
wavmode (str): Value of `WAVMODE` keyword.
roi (str): Value of `ROI` keyword.
Returns:
Observing technique as a string. Either `Imaging` or `Spectroscopy`.
"""
if 'Spectroscopic' in roi or \
obstype in ['ARC', 'SPECTRUM', 'COMP'] or \
slit not in ['NO_MASK', '<NO MASK>'] or \
grating not in ['NO_GRATING', '<NO GRATING>'] or \
'_SP_' in target:
technique = 'Spectroscopy'
elif 'Imaging' in roi or \
obstype in ['EXPOSE'] or\
wavmode == 'IMAGING' or '_IM_' in target:
technique = 'Imaging'
else:
technique = 'Unknown'
return technique
def image_overscan(ccd, overscan_region, add_keyword=False):
"""Apply overscan correction to data
Uses ccdproc.subtract_overscan to perform the task.
Notes:
The overscan_region argument uses FITS convention, just like IRAF,
therefore is 1 based. i.e. it starts in 1 not 0.
Args:
ccd (CCDData) A :class:`~astropy.nddata.CCDData` instance to be
overscan corrected.
overscan_region (str): The overscan region in the format `[x1:x2,y1:y2]`
where x is the spectral axis and y is the spatial axis.
add_keyword (bool): Tells ccdproc whether to add a keyword or not.
Default False.
Returns:
ccd (CCDData) Overscan corrected :class:`~astropy.nddata.CCDData`
instance
"""
if overscan_region is not None:
log.debug(
'Applying overscan Correction: {:s}'.format(overscan_region))
ccd = ccdproc.subtract_overscan(ccd=ccd,
median=True,
fits_section=overscan_region,
add_keyword=add_keyword)
ccd.header['GSP_OVER'] = (overscan_region, 'Overscan region')
else:
log.debug("Overscan region is None, returning the original data.")
# ccd.header['GSP_OVER'] = ('none', 'Overscan region')
return ccd
def image_trim(ccd, trim_section, trim_type='trimsec', add_keyword=False):
"""Trim image to a given section
Notes:
The overscan_region argument uses FITS convention, just like IRAF,
therefore is 1 based. i.e. it starts in 1 not 0.
Args:
ccd (CCDData) A :class:`~astropy.nddata.CCDData` instance.
trim_section (str): The trimming section in the format `[x1:x2,y1:y2]`
where x is the spectral axis and y is the spatial axis.
trim_type (str): trimsec or slit trim.
add_keyword (bool): Tells ccdproc whether to add a keyword or not.
Default False.
Returns:
ccd (CCDData) Trimmed :class:`~astropy.nddata.CCDData` instance
"""
if trim_section is not None:
ccd = ccdproc.trim_image(ccd=ccd,
fits_section=trim_section,
add_keyword=add_keyword)
if trim_type == 'trimsec':
ccd.header['GSP_TRIM'] = (trim_section, 'Trim section from TRIMSEC')
elif trim_type == 'slit':
ccd.header['GSP_SLIT'] = (trim_section,
'Slit trim section, slit illuminated '
'area only.')
else:
log.warning('Unrecognized trim type: {}'.format(trim_type))
ccd.header['GSP_TRIM'] = (trim_section,
'Image trimmed by unrecognized method: '
'{:s}'.format(trim_type))
else:
log.info("{:s} trim section is not "
"defined.".format(trim_type.capitalize()))
log.debug("Trim section is None, returning the same data.")
return ccd
def interpolate(spectrum, interpolation_size):
"""Creates an interpolated version of the input spectrum
This method creates an interpolated version of the input array, it is
used mainly for a spectrum but it can also be used with any
unidimensional array. The reason for doing interpolation is
that it allows to find the lines and its respective center more
precisely.
Args:
spectrum (array): an uncalibrated spectrum or any unidimensional
array.
interpolation_size (int): Number of points to interpolate. (points added
between two existing ones)
Returns:
Two dimensional array containing x-axis and interpolated array.
The x-axis preserves original pixel values.
"""
x_axis = range(spectrum.size)
first_x = x_axis[0]
last_x = x_axis[-1]
new_x_axis = np.linspace(first_x,
last_x,
spectrum.size * interpolation_size)
tck = scipy.interpolate.splrep(x_axis, spectrum, s=0)
new_spectrum = scipy.interpolate.splev(new_x_axis, tck, der=0)
return [new_x_axis, new_spectrum]
def is_file_saturated(ccd, threshold):
"""Detects a saturated image
It counts the number of pixels above the saturation_threshold level, then finds
which percentage they represents and if it is above the threshold it
will return True. The percentage threshold can be set using the command
line argument ``--saturation_threshold``.
Args:
ccd (CCDData): Image to be tested for saturation_threshold
threshold (float): Percentage of saturated pixels allowed. Default 1.
Returns:
True for saturated and False for non-saturated
"""
saturation_values = SaturationValues()
pixels_above_saturation = np.count_nonzero(
ccd.data[np.where(
ccd.data > saturation_values.get_saturation_value(
ccd=ccd))])
total_pixels = np.count_nonzero(ccd.data)
saturated_percent = (pixels_above_saturation * 100.) / total_pixels
if saturated_percent >= float(threshold):
log.warning(
"The current image has more than {:.2f} percent "
"of pixels above saturation_threshold level".format(float(threshold)))
return True
else:
return False
def linearize_spectrum(data, wavelength_solution, plots=False):
"""Produces a linearized version of the spectrum
Storing wavelength solutions in a FITS header is not simple at all for
non-linear solutions therefore is easier for the final user and for the
development code to have the spectrum linearized. It first finds a
spline representation of the data, then creates a linear wavelength axis
(angstrom) and finally it resamples the data from the spline
representation to the linear wavelength axis.
It also applies a median filter of kernel size three to smooth the
linearized spectrum. Sometimes the splines produce funny things when
the original data is too steep.
Args:
data (Array): The non-linear spectrum
wavelength_solution (object): Mathematical model representing the
wavelength solution.
plots (bool): Whether to show the plots or not.
Returns:
linear_data (list): Contains two elements: Linear wavelength axis
and the smoothed linearized data itself.
"""
pixel_axis = range(len(data))
if any(np.isnan(data)):
log.error("there are nans")
sys.exit(0)
if wavelength_solution is not None:
x_axis = wavelength_solution(pixel_axis)
try: # pragma: no cover
plt.imshow(data)
plt.show()
except TypeError:
pass
new_x_axis = np.linspace(x_axis[0], x_axis[-1], len(data))
tck = scipy.interpolate.splrep(x_axis, data, s=0)
linearized_data = scipy.interpolate.splev(new_x_axis,
tck,
der=0)
smoothed_linearized_data = signal.medfilt(linearized_data)
if plots: # pragma: no cover
fig6 = plt.figure(6)
plt.xlabel('Wavelength (Angstrom)')
plt.ylabel('Intensity (Counts)')
fig6.canvas.set_window_title('Linearized Data')
plt.plot(x_axis,
data,
color='k',
label='Data')
plt.plot(new_x_axis,
linearized_data,
color='r',
linestyle=':',
label='Linearized Data')
plt.plot(new_x_axis,
smoothed_linearized_data,
color='m',
alpha=0.5,
label='Smoothed Linearized Data')
fig6.tight_layout()
plt.legend(loc=3)
plt.show()
fig7 = plt.figure(7)
plt.xlabel('Pixels')
plt.ylabel('Angstroms')
fig7.canvas.set_window_title('Wavelength Solution')
plt.plot(x_axis, color='b', label='Non linear wavelength-axis')
plt.plot(new_x_axis, color='r', label='Linear wavelength-axis')
fig7.tight_layout()
plt.legend(loc=3)
plt.show()
linear_data = [new_x_axis, smoothed_linearized_data]
return linear_data
def name_master_flats(header,
technique,
reduced_data,
sun_set,
sun_rise,
evening_twilight,
morning_twilight,
target_name='',
get=False):
"""Defines the name of a master flat or what master flat is compatible
with a given data
Given the header of a flat image this method will look for certain
keywords that are unique to a given instrument configuration therefore
they are used to discriminate compatibility.
It can be used to define a master flat's name when creating it or find
a base name to match existing master flat files thus finding a
compatible one for a given non-flat image.
Args:
header (object): Fits header. Instance of
:class:`~astropy.io.fits.header.Header`
technique (str): Observing technique, either Spectroscopy or
Imaging.
reduced_data (str): Full path to reduced data directory
sun_set (str): Sunset time formatted as "%Y-%m-%dT%H:%M:%S.%f"
sun_rise (str): Sunrise time formatted as "%Y-%m-%dT%H:%M:%S.%f"
evening_twilight (str): End of evening twilight formatted as
"%Y-%m-%dT%H:%M:%S.%f"
morning_twilight (str): Start of morning twilight in the format
"%Y-%m-%dT%H:%M:%S.%f"
target_name (str): Optional science target name to be added to the
master flat name.
get (bool): This option is used when trying to find a suitable
master flat for a given data.
Returns:
A master flat name, or basename to find a match among existing
files.
"""
master_flat_name = os.path.join(reduced_data, 'master_flat')
sunset = datetime.datetime.strptime(sun_set,
"%Y-%m-%dT%H:%M:%S.%f")
sunrise = datetime.datetime.strptime(sun_rise,
"%Y-%m-%dT%H:%M:%S.%f")
afternoon_twilight = datetime.datetime.strptime(evening_twilight,
"%Y-%m-%dT%H:%M:%S.%f")
morning_twilight = datetime.datetime.strptime(morning_twilight,
"%Y-%m-%dT%H:%M:%S.%f")
date_obs = datetime.datetime.strptime(header['DATE-OBS'],
"%Y-%m-%dT%H:%M:%S.%f")
if target_name != '':
target_name = '_' + target_name
if not get:
# TODO (simon): There must be a pythonic way to do this
if afternoon_twilight < date_obs < morning_twilight:
dome_sky = '_night'
elif (sunset < date_obs < afternoon_twilight) or \
(morning_twilight < date_obs < sunrise):
dome_sky = '_sky'
else:
dome_sky = '_dome'
else:
dome_sky = '*'
if technique == 'Spectroscopy':
if header['GRATING'] != '<NO GRATING>':
flat_grating = '_' + re.sub('[A-Za-z_ ]',
'',
header['GRATING'])
# self.spec_mode is an instance of SpectroscopicMode
spectroscopic_mode = SpectroscopicMode()
wavmode = spectroscopic_mode(header=header)
else:
flat_grating = '_no_grating'
wavmode = ''
flat_slit = re.sub('[A-Za-z_ ]',
'',
header['SLIT'])
filter2 = header['FILTER2']
if filter2 == '<NO FILTER>':
filter2 = ''
else:
filter2 = '_' + filter2
master_flat_name += target_name \
+ flat_grating \
+ wavmode \
+ filter2 \
+ '_' \
+ flat_slit \
+ dome_sky \
+ '.fits'
elif technique == 'Imaging':
flat_filter = re.sub('[- ]', '_', header['FILTER'])
flat_filter = re.sub('[<> ]', '', flat_filter)
master_flat_name += '_' + flat_filter + dome_sky + '.fits'
return master_flat_name
def normalize_master_flat(master, name, method='simple', order=15):
"""Master flat normalization method
This function normalize a master flat in three possible ways:
*mean*: simply divide the data by its mean
*simple*: Calculates the median along the spatial axis in order to obtain
the dispersion profile. Then fits a
:class:`~astropy.modeling.polynomial.Chebyshev1D` model and apply this to
all the data.
*full*: This is for experimental purposes only because it takes a lot of
time to process. It will fit a model to each line along the dispersion axis
and then divide it by the fitted model. I do not recommend this method
unless you have a good reason as well as a very powerful computer.
Args:
master (CCDData): Master flat. Has to be a
:class:`~astropy.nddata.CCDData` instance.
name (str): Full path of master flat prior to normalization.
method (str): Normalization method, 'mean', 'simple' or 'full'.
order (int): Order of the polynomial to be fitted.
Returns:
master (CCDData): The normalized master flat.
:class:`~astropy.nddata.CCDData` instance.
"""
assert isinstance(master, ccdproc.CCDData)
master = master.copy()
# define new name, base path and full new name
new_name = 'norm_' + os.path.basename(name)
path = os.path.dirname(name)
norm_name = os.path.join(path, new_name)
if method == 'mean':
log.debug('Normalizing by mean')
master.data /= master.data.mean()
master.header['GSP_NORM'] = ('mean', 'Flat normalization method')
elif method == 'simple' or method == 'full':
log.debug('Normalizing flat by {:s} model'.format(method))
# Initialize Fitting models and fitter
model_init = models.Chebyshev1D(degree=order)
model_fitter = fitting.LevMarLSQFitter()
# get data shape
x_size, y_size = master.data.shape
x_axis = range(y_size)
if method == 'simple':
# get profile along dispersion axis to fit a model to use for
# normalization
profile = np.median(master.data, axis=0)
# do the actual fit
fit = model_fitter(model_init, x_axis, profile)
# convert fit into an array
fit_array = fit(x_axis)
# pythonic way to divide an array by a vector
master.data = master.data / fit_array[None, :]
# master.header.add_history('Flat Normalized by simple model')
master.header['GSP_NORM'] = ('simple', 'Flat normalization method')
elif method == 'full':
log.warning('This part of the code was left here for '
'experimental purposes only')
log.warning('This procedure takes a lot to process, you might '
'want to see other method such as "simple" or '
'"mean".')
for i in range(x_size):
fit = model_fitter(model_init, x_axis, master.data[i])
master.data[i] = master.data[i] / fit(x_axis)
master.header['GSP_NORM'] = ('full', 'Flat normalization method')
# write normalized flat to a file
write_fits(ccd=master,
full_path=norm_name,
parent_file=name)
return master, norm_name
def ra_dec_to_deg(right_ascension, declination):
"""Converts right ascension and declination to degrees
Args:
right_ascension (str): Right ascension in the format hh:mm:ss.sss
declination (str): Declination in the format dd:mm:ss.sss
Returns:
right_ascension_deg (float): Right ascension in degrees
declination_deg (float): Declination in degrees
"""
right_ascension = right_ascension.split(":")
declination = declination.split(":")
# RIGHT ASCENSION conversion
right_ascension_deg = (float(right_ascension[0])
+ (float(right_ascension[1])
+ (float(right_ascension[2]) / 60.)) / 60.) * \
(360. / 24.)
# DECLINATION conversion
if float(declination[0]) == abs(float(declination[0])):
sign = 1
else:
sign = -1
declination_deg = sign * (abs(float(declination[0]))
+ (float(declination[1])
+ (float(declination[2]) / 60.)) / 60.)
return right_ascension_deg, declination_deg
def read_fits(full_path, technique='Unknown'):
"""Read fits files while adding important information to the header
It is necessary to record certain data to the image header so that's the
reason for this wrapper of :meth:`~astropy.nddata.CCDData.read` to exist.
It will add the following keywords. In most cases, if the keyword already
exist it will skip it except for `GSP_FNAM`, `GSP_PATH` and `BUNIT`.
GSP_VERS: Goodman Spectroscopic Pipeline version number
GSP_ONAM: Original File name
GSP_PNAM: Parent file name or name of the file from which this one
originated after some process or just a copy.
GSP_FNAM: Current file name.
GSP_PATH: Path to file at the moment of reading.
GSP_TECH: Observing technique. `Spectroscopy` or `Imaging`.
GSP_DATE: Date of first reading.
GSP_OVER: Overscan region.
GSP_TRIM: Trim section (region).
GSP_SLIT: Slit trim section, obtained from the slit illuminated area.
GSP_BIAS: Master bias image used. Default `none`.
GSP_FLAT: Master flat image used. Default `none`.
GSP_SCTR: Science target file
GSP_NORM: Flat normalization method.
GSP_COSM: Cosmic ray rejection method.
GSP_EXTR: Extraction window at first column
GSP_BKG1: First background extraction zone
GSP_BKG2: Second background extraction zone
GSP_WRMS: Wavelength solution RMS Error.
GSP_WPOI: Number of points used to calculate the wavelength solution
Error.
GSP_WREJ: Number of points rejected.
Args:
full_path (str): Full path to file.
technique (str): Observing technique. 'Imaging' or 'Spectroscopy'.
Returns:
Instance of :class:`~astropy.nddata.CCDData` corresponding to the file
from `full_path`.
"""
assert os.path.isfile(full_path)
ccd = ccdproc.CCDData.read(full_path, unit=u.adu)
all_keys = [key for key in ccd.header.keys()]
ccd.header.set('GSP_VERS',
value=__version__,
comment='Goodman Spectroscopic Pipeline Version')
if 'GSP_ONAM' not in all_keys:
ccd.header.set('GSP_ONAM',
value=os.path.basename(full_path),
comment='Original file name')
if 'GSP_PNAM' not in all_keys:
ccd.header.set('GSP_PNAM',
value=os.path.basename(full_path),
comment='Parent file name')
ccd.header.set('GSP_FNAM',
value=os.path.basename(full_path),
comment='Current file name')
ccd.header.set('GSP_PATH',
value=os.path.dirname(full_path),
comment='Location at moment of reduce')
if 'GSP_TECH' not in all_keys:
ccd.header.set('GSP_TECH',
value=technique,
comment='Observing technique')
if 'GSP_DATE' not in all_keys:
ccd.header.set('GSP_DATE',
value=time.strftime("%Y-%m-%d"),
comment='Processing date')
if 'GSP_OVER' not in all_keys:
ccd.header.set('GSP_OVER',
value='none',
comment='Overscan region')
if 'GSP_TRIM' not in all_keys:
ccd.header.set('GSP_TRIM',
value='none',
comment='Trim section')
if 'GSP_SLIT' not in all_keys:
ccd.header.set('GSP_SLIT',
value='none',
comment='Slit trim section, slit illuminated area only')
if 'GSP_BIAS' not in all_keys:
ccd.header.set('GSP_BIAS',
value='none',
comment='Master bias image')
if 'GSP_FLAT' not in all_keys:
ccd.header.set('GSP_FLAT',
value='none',
comment='Master flat image')
if 'GSP_NORM' not in all_keys:
ccd.header.set('GSP_NORM',
value='none',
comment='Flat normalization method')
if 'GSP_COSM' not in all_keys:
ccd.header.set('GSP_COSM',
value='none',
comment='Cosmic ray rejection method')
if 'GSP_TMOD' not in all_keys:
ccd.header.set('GSP_TMOD',
value='none',
comment='Model name used to fit trace')
if 'GSP_EXTR' not in all_keys:
ccd.header.set('GSP_EXTR',
value='none',
comment='Extraction window at first column')
if 'GSP_BKG1' not in all_keys:
ccd.header.set('GSP_BKG1',
value='none',
comment='First background extraction zone')
if 'GSP_BKG2' not in all_keys:
ccd.header.set('GSP_BKG2',
value='none',
comment='Second background extraction zone')
if 'GSP_WRMS' not in all_keys:
ccd.header.set('GSP_WRMS',
value='none',
comment='Wavelength solution RMS Error')
if 'GSP_WPOI' not in all_keys:
ccd.header.set('GSP_WPOI',
value='none',
comment='Number of points used to '
'calculate wavelength solution')
if 'GSP_WREJ' not in all_keys:
ccd.header.set('GSP_WREJ',
value='none',
comment='Number of points rejected')
if '' not in all_keys:
ccd.header.add_blank('-- Goodman Spectroscopic Pipeline --',
before='GSP_VERS')
ccd.header.add_blank('-- GSP END --', after='GSP_WREJ')
ccd.header.set('BUNIT', after='CCDSUM')
return ccd
def recenter_broad_lines(lamp_data, lines, order):
"""Recenter broad lines
Notes:
This method is used to recenter broad lines only, there is a special
method for dealing with narrower lines.
Args:
lamp_data (ndarray): numpy.ndarray instance. It contains the lamp
data.
lines (list): A line list in pixel values.
order (float): A rough estimate of the FWHM of the lines in pixels
in the data. It is calculated using the slit size divided by the
pixel scale multiplied by the binning.
Returns:
A list containing the recentered line positions.
"""
# TODO (simon): use slit size information for a square function
# TODO (simon): convolution
new_line_centers = []
gaussian_kernel = Gaussian1DKernel(stddev=2.)
lamp_data = convolve(lamp_data, gaussian_kernel)
for line in lines:
lower_index = max(0, int(line - order))
upper_index = min(len(lamp_data), int(line + order))
lamp_sample = lamp_data[lower_index:upper_index]
x_axis = np.linspace(lower_index, upper_index, len(lamp_sample))
line_max = np.max(lamp_sample)
gaussian_model = models.Gaussian1D(amplitude=line_max,
mean=line,
stddev=order)
fit_gaussian = fitting.LevMarLSQFitter()
fitted_gaussian = fit_gaussian(gaussian_model, x_axis, lamp_sample)
new_line_centers.append(fitted_gaussian.mean.value)
return new_line_centers
def recenter_lines(data, lines, plots=False):
"""Finds the centroid of an emission line
For every line center (pixel value) it will scan left first until the
data stops decreasing, it assumes it is an emission line and then will
scan right until it stops decreasing too. Defined those limits it will
use the line data in between and calculate the centroid.
Notes:
This method is used to recenter relatively narrow lines only, there
is a special method for dealing with broad lines.
Args:
data (ndarray): numpy.ndarray instance. or the data attribute of a
:class:`~astropy.nddata.CCDData` instance.
lines (list): A line list in pixel values.
plots (bool): If True will plot spectral line as well as the input
center and the recentered value.
Returns:
A list containing the recentered line positions.
"""
new_center = []
x_size = data.shape[0]
median = np.median(data)
for line in lines:
# TODO (simon): Check if this definition is valid, so far is not
# TODO (cont..): critical
left_limit = 0
right_limit = 1
condition = True
left_index = int(line)
while condition and left_index - 2 > 0:
if (data[left_index - 1] > data[left_index]) and \
(data[left_index - 2] > data[left_index - 1]):
condition = False
left_limit = left_index
elif data[left_index] < median:
condition = False
left_limit = left_index
else:
left_limit = left_index
left_index -= 1
# id right limit
condition = True
right_index = int(line)
while condition and right_index + 2 < x_size - 1:
if (data[right_index + 1] > data[right_index]) and \
(data[right_index + 2] > data[right_index + 1]):
condition = False
right_limit = right_index
elif data[right_index] < median:
condition = False
right_limit = right_index
else:
right_limit = right_index
right_index += 1
index_diff = [abs(line - left_index), abs(line - right_index)]
sub_x_axis = range(line - min(index_diff),
(line + min(index_diff)) + 1)
sub_data = data[line - min(index_diff):(line + min(index_diff)) + 1]
centroid = np.sum(sub_x_axis * sub_data) / np.sum(sub_data)
# checks for asymmetries
differences = [abs(data[line] - data[left_limit]),
abs(data[line] - data[right_limit])]
if max(differences) / min(differences) >= 2.:
if plots: # pragma: no cover
plt.axvspan(line - 1, line + 1, color='g', alpha=0.3)
new_center.append(line)
else:
new_center.append(centroid)
if plots: # pragma: no cover
fig, ax = plt.subplots(1, 1)
fig.canvas.set_window_title('Lines Detected in Lamp')
ax.axhline(median, color='b')
ax.plot(range(len(data)),
data,
color='k',
label='Lamp Data')
for line in lines:
ax.axvline(line + 1,
color='k',
linestyle=':',
label='First Detected Center')
for center in new_center:
ax.axvline(center,
color='k',
linestyle='.-',
label='New Center')
plt.show()
return new_center
def record_trace_information(ccd, trace_info):
"""Adds trace information to fits header
Notes:
Example of trace_info.
OrderedDict([('GSP_TMOD', ['Polynomial1D', 'Model name used to fit trace']),
('GSP_TORD', [2, 'Degree of the model used to fit target trace']),
('GSP_TC00', [80.92244303468138, 'Parameter c0']),
('GSP_TC01', [0.0018921968204536187, 'Parameter c1']),
('GSP_TC02', [-7.232545448865748e-07, 'Parameter c2']),
('GSP_TERR', [0.18741058188097284, 'RMS error of target trace'])])
Args:
ccd (CCDData): ccdproc.CCDData instance to have trace info recorded into its
header.
trace_info (OrderedDict): Ordered Dictionary with a set of fits keywords
associated to a list of values corresponding to value and comment.
Returns:
ccd (CCDData): Same ccdproc.CCDData instance with the header modified.
"""
last_keyword = None
for info_key in trace_info:
info_value, info_comment = trace_info[info_key]
log.debug(
"Adding trace information: "
"{:s} = {:s} / {:s}".format(info_key,
str(info_value),
info_comment))
if last_keyword is None:
ccd.header.set(info_key,
value=info_value,
comment=info_comment)
last_keyword = info_key
else:
ccd.header.set(info_key,
value=info_value,
comment=info_comment,
after=last_keyword)
last_keyword = info_key
return ccd
def save_extracted(ccd, destination, prefix='e', target_number=1):
"""Save extracted spectrum while adding a prefix.
Args:
ccd (CCDData) :class:`~astropy.nddata.CCDData` instance
destination (str): Path where the file will be saved.
prefix (str): Prefix to be added to images. Default `e`.
target_number (int): Secuential number of spectroscopic target.
Returns:
:class:`~astropy.nddata.CCDData` instance of the image just recorded.
although is not really necessary.
"""
assert isinstance(ccd, ccdproc.CCDData)
assert os.path.isdir(destination)
file_name = ccd.header['GSP_FNAM']
if target_number > 0:
new_suffix = '_target_{:d}.fits'.format(target_number)
file_name = re.sub('.fits', new_suffix, file_name)
if ccd.header['OBSTYPE'] in ['COMP', 'ARC']:
extraction_region = re.sub(':','-', ccd.header['GSP_EXTR'])
file_name = re.sub('.fits', '_{:s}.fits'.format(extraction_region),
file_name)
new_file_name = prefix + file_name
else:
new_file_name = prefix + file_name
log.info("Saving uncalibrated(w) extracted spectrum to file: "
"{:s}".format(new_file_name))
full_path = os.path.join(destination, new_file_name)
ccd = write_fits(ccd=ccd, full_path=full_path, parent_file=file_name)
return ccd
def search_comp_group(object_group, comp_groups, reference_data):
"""Search for a suitable comparison lamp group
In case a science target was observed without comparison lamps, usually
right before or right after, this function will look for a compatible set
obtained at a different time or pointing.
Notes:
This methodology is not recommended for radial velocity studies.
Args:
object_group (DataFrame): A :class:`~pandas.DataFrame` instances
containing a group of images for a given scientific target.
comp_groups (list): A list in which every element is a
:class:`~pandas.DataFrame`
that contains information regarding groups of comparison lamps.
reference_data (ReferenceData): Instance of
`goodman.pipeline.core.ReferenceData` contains all information
related to the reference lamp library.
Returns:
"""
log.debug('Finding a suitable comparison lamp group')
object_confs = object_group.groupby(['grating',
'cam_targ',
'grt_targ',
'filter',
'filter2']
).size().reset_index()
# .rename(columns={0: 'count'})
for comp_group in comp_groups:
if ((comp_group['grating'] == object_confs.iloc[0]['grating']) &
(comp_group['cam_targ'] == object_confs.iloc[0]['cam_targ']) &
(comp_group['grt_targ'] == object_confs.iloc[0]['grt_targ']) &
(comp_group['filter'] == object_confs.iloc[0]['filter']) &
(comp_group['filter2'] == object_confs.iloc[0]['filter2']
)).all():
if reference_data.check_comp_group(comp_group) is not None:
log.debug('Found a matching comparison lamp group')
return comp_group
raise NoMatchFound
def setup_logging(debug=False, generic=False): # pragma: no cover
"""configures logging
Notes:
Logging file name is set to default 'goodman_log.txt'.
If --debug is activated then the format of the message is different.
"""
log_filename = 'goodman_log.txt'
if '--debug' in sys.argv or debug:
log_format = '[%(asctime)s][%(levelname)8s]: %(message)s ' \
'[%(module)s.%(funcName)s:%(lineno)d]'
logging_level = logging.DEBUG
else:
log_format = '[%(asctime)s][%(levelname).1s]: %(message)s'
logging_level = logging.INFO
date_format = '%H:%M:%S'
formatter = logging.Formatter(fmt=log_format,
datefmt=date_format)
logging.basicConfig(level=logging_level,
format=log_format,
datefmt=date_format)
log = logging.getLogger(__name__)
coloredlogs.install(level=logging_level, logger=log, fmt=log_format)
file_handler = logging.FileHandler(filename=log_filename)
file_handler.setFormatter(fmt=formatter)
file_handler.setLevel(level=logging_level)
log.addHandler(file_handler)
if not generic:
log.info("Starting Goodman HTS Pipeline Log")
log.info("Local Time : {:}".format(
datetime.datetime.now()))
log.info("Universal Time: {:}".format(
datetime.datetime.utcnow()))
try:
latest_release = check_version.get_last()
if "dev" in __version__:
log.warning("Running Development version: {:s}".format(__version__))
log.info("Latest Release: {:s}".format(latest_release))
elif check_version.am_i_updated(__version__):
if __version__ == latest_release:
log.info("Pipeline Version: {:s} (latest)".format(__version__))
else:
log.warning("Current Version: {:s}".format(__version__))
log.info("Latest Release: {:s}".format(latest_release))
else:
log.warning("Current Version '{:s}' is outdated.".format(
__version__))
log.info("Latest Release: {:s}".format(latest_release))
except ConnectionRefusedError:
log.error('Unauthorized GitHub API Access reached maximum')
log.info("Current Version: {:s}".format(__version__))
def trace(ccd,
model,
trace_model,
model_fitter,
sampling_step,
nfwhm=1,
plots=False):
"""Find the trace of a spectrum
This function is called by the `trace_targets` function, the difference is
that it only takes single models only not `CompoundModels` so this function
is called for every single target. `CompoundModels` are a bit tricky when
you need each model separated so all `CompoundModels` have been removed.
Notes:
This method forces the trace to go withing a rectangular region of
center `model.mean.value` and width `2 * nsigmas`, this is for allowing
the tracing of low SNR targets. The assumption is valid since the
spectra are always well aligned to the detectors's pixel columns.
(dispersion axis)
Args:
ccd (CCDData) A :class:`~astropy.nddata.CCDData` instance, 2D image.
model (Model): An astropy.modeling.Model instance that contains
information regarding the target to be traced.
trace_model (object): An astropy.modeling.Model instance, usually a low
order polynomial.
model_fitter (Fitter): An astropy.modeling.fitting.Fitter instance. Will
fit the sampled points to construct the trace model
sampling_step (int): Step for sampling the spectrum.
nfwhm (int): Number of fwhm to each side of the mean to be used for
searching the trace.
plots (bool): Toggles debugging plot
Returns:
An `astropy.modeling.Model` instance, that defines the trace of the
spectrum.
"""
assert isinstance(ccd, ccdproc.CCDData)
assert isinstance(model, Model)
assert isinstance(trace_model, Model)
spatial_length, dispersion_length = ccd.data.shape
sampling_axis = range(0, dispersion_length, sampling_step)
sample_values = []
if model.__class__.name == 'Gaussian1D':
model_fwhm = model.fwhm
model_mean = model.mean.value
elif model.__class__.name == 'Moffat1D':
model_fwhm = model.fwhm
model_mean = model.x_0.value
else:
raise NotImplementedError
sample_center = float(model_mean)
lower_limit_list = []
upper_limit_list = []
lower_limit = None
upper_limit = None
for point in sampling_axis:
lower_limit = np.max([0, int(sample_center - nfwhm * model_fwhm)])
upper_limit = np.min([int(sample_center + nfwhm * model_fwhm),
spatial_length])
lower_limit_list.append(lower_limit)
upper_limit_list.append(upper_limit)
sample = ccd.data[lower_limit:upper_limit, point:point + sampling_step]
sample_median = np.median(sample, axis=1)
try:
sample_peak = np.argmax(sample_median)
except ValueError: # pragma: no cover
log.error('Nfwhm {}'.format(nfwhm))
log.error('Model Stddev {}'.format(model_fwhm))
log.error('sample_center {}'.format(sample_center))
log.error('sample {}'.format(sample))
log.error('sample_median {}'.format(sample_median))
log.error('lower_limit {}'.format(lower_limit))
log.error('upper_limit {}'.format(upper_limit))
log.error('point {}'.format(point))
log.error('point + sampling_step {}'.format(point + sampling_step))
log.error("Spatial length: {}, Dispersion length {}".format(
spatial_length,
dispersion_length))
sys.exit()
sample_values.append(sample_peak + lower_limit)
if np.abs(sample_peak + lower_limit - model_mean)\
< nfwhm * model_fwhm:
sample_center = int(sample_peak + lower_limit)
else:
sample_center = float(model_mean)
trace_model.c2.fixed = True
fitted_trace = model_fitter(trace_model, sampling_axis, sample_values)
sampling_differences = [
(fitted_trace(sampling_axis[i]) - sample_values[i]) ** 2
for i in range(len(sampling_axis))]
rms_error = np.sqrt(
np.sum(np.array(sampling_differences))/len(sampling_differences))
log.debug("RMS Error of unclipped trace differences {:.3f}".format(
rms_error))
clipped_values = sigma_clip(sampling_differences,
sigma=2,
maxiters=3,
cenfunc=np.ma.median)
if np.ma.is_masked(clipped_values):
_sampling_axis = list(sampling_axis)
_sample_values = list(sample_values)
sampling_axis = []
sample_values = []
for i in range(len(clipped_values)):
if clipped_values[i] is not np.ma.masked:
sampling_axis.append(_sampling_axis[i])
sample_values.append(_sample_values[i])
log.debug("Re-fitting the trace for a better trace.")
trace_model.c2.fixed = False
fitted_trace = model_fitter(trace_model, sampling_axis, sample_values)
sampling_differences = [
(fitted_trace(sampling_axis[i]) - sample_values[i]) ** 2 for i in
range(len(sampling_axis))]
rms_error = np.sqrt(
np.sum(np.array(sampling_differences)) / len(sampling_differences))
log.debug(
"RMS Error after sigma-clipping trace differences {:.3f}".format(
rms_error))
trace_info = collections.OrderedDict()
trace_info['GSP_TMOD'] = [fitted_trace.__class__.__name__,
'Model name used to fit trace']
trace_info['GSP_TORD'] = [fitted_trace.degree,
'Degree of the model used to fit target trace']
for i in range(fitted_trace.degree + 1):
trace_info['GSP_TC{:02d}'.format(i)] = [
fitted_trace.__getattribute__('c{:d}'.format(i)).value,
'Parameter c{:d}'.format(i)]
trace_info['GSP_TERR'] = [rms_error, 'RMS error of target trace']
log.info("Target tracing RMS error: {:.3f}".format(rms_error))
if plots: # pragma: no cover
z1 = np.mean(ccd.data) - 0.5 * np.std(ccd.data)
z2 = np.median(ccd.data) + np.std(ccd.data)
fig, ax = plt.subplots()
fig.canvas.set_window_title(ccd.header['GSP_FNAM'])
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
ax.set_title("Tracing information\n{:s}\n"
"RMS Error {:.2f}".format(ccd.header['GSP_FNAM'],
rms_error))
ax.imshow(ccd.data, clim=(z1, z2), cmap='gray')
ax.plot(sampling_axis,
sample_values,
color='b',
marker='o',
alpha=0.4,
label='Sampling points')
sampling_axis_limits = range(0, dispersion_length, sampling_step)
low_span = fitted_trace(sampling_axis_limits) - (fitted_trace(sampling_axis_limits) - np.mean(lower_limit_list))
up_span = fitted_trace(sampling_axis_limits) + (np.mean(upper_limit_list) - fitted_trace(sampling_axis_limits))
ax.fill_between(sampling_axis_limits,
low_span,
up_span,
where=up_span > low_span,
facecolor='g',
interpolate=True,
alpha=0.3,
label='Aproximate extraction window')
ax.plot(fitted_trace(range(dispersion_length)),
color='r',
linestyle='--',
label='Fitted Trace Model')
# plt.plot(model(range(spatial_length)))
ax.legend(loc='best')
plt.tight_layout()
if plt.isinteractive():
plt.draw()
plt.pause(2)
else:
plt.show()
return fitted_trace, trace_info
def trace_targets(ccd, target_list, sampling_step=5, pol_deg=2, nfwhm=5,
plots=False):
"""Find the trace of the target's spectrum on the image
This function defines a low order polynomial that trace the location of the
spectrum. The attributes pol_deg and sampling_step define the polynomial
degree and the spacing in pixels for the samples. For every sample a
gaussian model is fitted and the center (mean) is recorded and since
spectrum traces vary smoothly this value is used as a new center for the
base model used to fit the spectrum profile.
Notes:
This doesn't work for extended sources. Also this calls for the function
`trace` for doing the actual trace, the difference is that this method
is at a higher level.
Args:
ccd (CCDData) Instance of :class:`~astropy.nddata.CCDData`
target_list (list): List of single target profiles.
sampling_step (int): Frequency of sampling in pixels
pol_deg (int): Polynomial degree for fitting the trace
plots (bool): If True will show plots (debugging)
nfwhm (int): Number of fwhm from spatial profile center to search for
a target. default 10.
Returns:
all_traces (list): List that contains traces that are
astropy.modeling.Model instance
"""
# added two assert for debugging purposes
assert isinstance(ccd, ccdproc.CCDData)
assert all([isinstance(profile, Model) for profile in target_list])
# Initialize model fitter
model_fitter = fitting.LevMarLSQFitter()
# Initialize the model to fit the traces
trace_model = models.Polynomial1D(degree=pol_deg)
# List that will contain all the Model instances corresponding to traced
# targets
all_traces = []
for profile in target_list:
single_trace, trace_info = trace(ccd=ccd,
model=profile,
trace_model=trace_model,
model_fitter=model_fitter,
sampling_step=sampling_step,
nfwhm=nfwhm,
plots=plots)
if 0 < single_trace.c0.value < ccd.shape[0]:
log.debug('Adding trace to list')
all_traces.append([single_trace, profile, trace_info])
else:
log.error("Unable to trace target.")
log.error('Trace is out of boundaries. Center: '
'{:.4f}'.format(single_trace.c0.value))
if plots: # pragma: no cover
z1 = np.mean(ccd.data) - 0.5 * np.std(ccd.data)
z2 = np.median(ccd.data) + np.std(ccd.data)
fig, ax = plt.subplots()
fig.canvas.set_window_title(ccd.header['GSP_FNAM'])
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
ax.set_title("Trace(s) for {:s}".format(ccd.header['GSP_FNAM']))
ax.imshow(ccd.data, clim=(z1, z2), cmap='gray')
ax.plot([], color='r', label='Trace(s)')
for strace, prof, trace_info in all_traces:
ax.plot(strace(range(ccd.data.shape[1])), color='r')
ax.legend(loc='best')
plt.tight_layout()
plt.show()
return all_traces
def validate_ccd_region(ccd_region, regexp='^\[\d*:\d*,\d*:\d*\]$'):
compiled_reg_exp = re.compile(regexp)
if not compiled_reg_exp.match(ccd_region):
raise SyntaxError("ccd regions must be defined in the format "
"'[x1:x2,y1:y2]'")
else:
return True
def write_fits(ccd,
full_path,
combined=False,
parent_file=None,
overwrite=True):
"""Write fits while adding information to the header.
This is a wrapper for allowing to save files while being able to add
information into the header. Mostly for historical reasons.
Args:
ccd (CCDData) A :class:`~astropy.nddata.CCDData` instance to be saved
to fits.
full_path (str): Full path of file.
combined (bool): True if `ccd` is the result of combining images.
parent_file (str): Name of the file from which ccd originated. If
combined is True this will be set to `combined`.
overwrite (bool): Overwrite files, default True.
Returns:
:class:`~astropy.nddata.CCDData` instance.
"""
assert isinstance(ccd, ccdproc.CCDData)
if os.path.isabs(full_path) and not os.path.isdir(os.path.dirname(full_path)):
log.error("Directory {} does not exist. Creating it right now."
"".format(os.path.dirname(full_path)))
os.mkdir(os.path.dirname(full_path))
# Original File Name
# This should be set only once.
if combined:
ccd.header.set('GSP_ONAM',
value=os.path.basename(full_path))
ccd.header.set('GSP_PNAM',
value='combined')
# Parent File Name
if not combined and parent_file is not None:
ccd.header.set('GSP_PNAM',
value=os.path.basename(parent_file))
# Current File Name
ccd.header.set('GSP_FNAM', value=os.path.basename(full_path))
ccd.header.set('GSP_PATH', value=os.path.dirname(full_path))
# write to file
log.info("Saving FITS file to {:s}".format(os.path.basename(full_path)))
ccd.write(full_path, overwrite=overwrite)
assert os.path.isfile(full_path)
return ccd
# classes definition
class GenerateDcrParFile(object):
"""Creates dcr.par file based on lookup table
`dcr` parameters depend heavily on binning, this class generates a file
using the default format. The lookup table considers camera and binning.
"""
_format = [
"THRESH = {:.1f} // Threshold (in STDDEV)",
"XRAD = {:d} // x-radius of the box (size = 2 * radius)",
"YRAD = {:d} // y-radius of the box (size = 2 * radius)",
"NPASS = {:d} // Maximum number of cleaning passes",
"DIAXIS = {:d} // Dispersion axis: 0 - no dispersion, 1 - X, 2 - Y",
"LRAD = {:d} // Lower radius of region for replacement statistics",
"URAD = {:d} // Upper radius of region for replacement statistics",
"GRAD = {:d} // Growing radius",
"VERBOSE = {:d} // Verbose level [0,1,2]",
"END"]
_columns = ['parameter',
'red-1',
'red-2',
'red-3',
'blue-1',
'blue-2',
'blue-3']
_lookup = [
['thresh', 3.0, 4.0, 3.0, 3.0, 3.0, 3.0],
['xrad', 9, 7, 9, 8, 9, 9],
['yrad', 9, 9, 9, 8, 9, 9],
['npass', 5, 5, 5, 5, 5, 5],
['diaxis', 0, 0, 0, 0, 0, 0],
['lrad', 1, 1, 1, 1, 1, 1],
['urad', 3, 3, 3, 3, 3, 3],
['grad', 1, 0, 1, 1, 1, 1],
['verbose', 1, 1, 1, 1, 1, 1]
]
def __init__(self, par_file_name='dcr.par'):
"""
Args:
par_file_name:
"""
self._file_name = par_file_name
self._df = pandas.DataFrame(self._lookup, columns=self._columns)
self._binning = "{:s}-{:s}"
self._data_format = "\n".join(self._format)
def __call__(self, instrument='Red', binning='1', path='default'):
"""
Args:
instrument (str): Instrument from INSTCONF keyword
binning (str): Serial (dispersion) Binning from the header.
path (str): Directory where to save the file.
"""
assert any([instrument == option for option in ['Red', 'Blue']])
b = self._binning.format(instrument.lower(), binning)
self._data_format = self._data_format.format(
self._df[b][self._df.parameter == 'thresh'].values[0],
int(self._df[b][self._df.parameter == 'xrad'].values[0]),
int(self._df[b][self._df.parameter == 'yrad'].values[0]),
int(self._df[b][self._df.parameter == 'npass'].values[0]),
int(self._df[b][self._df.parameter == 'diaxis'].values[0]),
int(self._df[b][self._df.parameter == 'lrad'].values[0]),
int(self._df[b][self._df.parameter == 'urad'].values[0]),
int(self._df[b][self._df.parameter == 'grad'].values[0]),
int(self._df[b][self._df.parameter == 'verbose'].values[0]))
self._create_file(path=path)
def _create_file(self, path):
"""Creates `dcr.par` file
Args:
path (str): Path to where to save the `dcr.par` file.
"""
if os.path.isdir(path):
full_path = os.path.join(path, self._file_name)
else:
full_path = os.path.join(os.getcwd(), self._file_name)
with open(full_path, 'w') as dcr_par:
dcr_par.write(self._data_format)
class NightDataContainer(object):
"""This class is designed to be the organized data container. It doesn't
store image data but a list of :class:`~pandas.DataFrame` objects. Also it
stores critical variables such as sunrise and sunset times.
"""
def __init__(self, path, instrument, technique):
"""Initializes all the variables for the class
Args:
path (str): Full path to the directory where raw data is located
instrument (str): `Red` or `Blue` stating whether the data was taken
using the Red or Blue Goodman Camera.
technique (str): `Spectroscopy` or `Imaging` stating what kind of
data was taken.
"""
self.full_path = path
self.instrument = instrument
self.technique = technique
self.gain = None
self.rdnoise = None
self.roi = None
self.is_empty = True
"""For imaging use"""
self.bias = None
self.day_flats = None
self.dome_flats = None
self.sky_flats = None
self.data_groups = None
"""For spectroscopy use"""
# comp_groups will store :class:`~pandas.DataFrame` (groups) that
# contain only OBSTYPE == COMP, they should be requested only when
# needed, for the science case when for every science target is
# observed with comparison lamps and quartz (if)
self.comp_groups = None
# object_groups will store :class:`~pandas.DataFrame` (groups) with only
# OBSTYPE == OBJECT this is the case when the observer takes comparison
# lamps only at the beginning or end of the night.
self.object_groups = None
# spec_groups will store :class:`~pandas.DataFrame` (groups) with a set
# of OBJECT and COMP, this is usually the case for radial velocity
# studies.
self.spec_groups = None
"""Time reference points"""
self.sun_set_time = None
self.sun_rise_time = None
self.evening_twilight = None
self.morning_twilight = None
def __repr__(self):
"""Produces a nice summary of the information contained"""
if self.is_empty:
return str("Empty Data Container")
else:
class_info = str("{:s}\n"
"Full Path: {:s}\n"
"Instrument: {:s}\n"
"Technique: {:s}".format(str(self.__class__),
self.full_path,
self.instrument,
self.technique))
if all([self.gain, self.rdnoise, self.roi]):
class_info += str("\nGain: {:.2f}\n"
"Readout Noise: {:.2f}\n"
"ROI: {:s}".format(self.gain,
self.rdnoise,
self.roi))
class_info += str("\nIs Empty: {:s}\n".format(str(self.is_empty)))
group_info = "\nData Grouping Information\n"
group_info += "BIAS Group:\n"
group_info += self._get_group_repr(self.bias)
group_info += "Day FLATs Group:\n"
group_info += self._get_group_repr(self.day_flats)
group_info += "Dome FLATs Group:\n"
group_info += self._get_group_repr(self.dome_flats)
group_info += "Sky FLATs Group:\n"
group_info += self._get_group_repr(self.sky_flats)
if self.technique == 'Spectroscopy':
group_info += "COMP Group:\n"
group_info += self._get_group_repr(self.comp_groups)
group_info += "OBJECT Group\n"
group_info += self._get_group_repr(self.object_groups)
group_info += "OBJECT + COMP Group:\n"
group_info += self._get_group_repr(self.spec_groups)
# group_info += self._get_group_repr(self.data_groups)
class_info += group_info
elif self.technique == 'Imaging':
group_info += "DATA Group:\n"
group_info += self._get_group_repr(self.data_groups)
class_info += group_info
return class_info
@staticmethod
def _get_group_repr(group):
"""Converts the file names in each group to string
This class has a __repr__ method and in this method the file names
contained in the different groups gets formatted as a string for
displaying in a readable way.
"""
group_str = ""
if group is not None:
for i in range(len(group)):
if len(group) == 1:
group_str += "Files in Group\n"
else:
group_str += "Files in Group {:d}\n".format(i + 1)
for _file in group[i]['file']:
group_str += " {:s}\n".format(_file)
return group_str
else:
return " Group is Empty\n"
def add_bias(self, bias_group):
"""Adds a bias group
Args:
bias_group (DataFrame): A :class:`~pandas.DataFrame` Contains a set
of keyword values of grouped image metadata
"""
if len(bias_group) < 2:
if self.technique == 'Imaging':
log.error('Imaging mode needs BIAS to work properly. '
'Go find some.')
else:
log.warning('BIAS are needed for optimal results.')
else:
if self.bias is None:
self.bias = [bias_group]
else:
self.bias.append(bias_group)
if self.bias is not None:
self.is_empty = False
def add_day_flats(self, day_flats):
""""Adds a daytime flat group
Args:
day_flats (DataFrame): A :class:`~pandas.DataFrame` Contains a set
of keyword values of grouped image metadata
"""
if self.day_flats is None:
self.day_flats = [day_flats]
else:
self.day_flats.append(day_flats)
if self.day_flats is not None:
self.is_empty = False
def add_data_group(self, data_group):
"""Adds a data group
Args:
data_group (DataFrame): A :class:`~pandas.DataFrame` Contains a set
of keyword values of grouped image metadata
"""
if self.data_groups is None:
self.data_groups = [data_group]
else:
self.data_groups.append(data_group)
if self.data_groups is not None:
self.is_empty = False
def add_comp_group(self, comp_group):
"""Adds a comp-only group
All comparison lamps groups are added here. The ones that may have been
taken in the afternoon (isolated) or along science target. This will
act as a pool of comparison lamp groups for eventual science targets
taken without comparison lamps.
Args:
comp_group (DataFrame): A :class:`~pandas.DataFrame` Contains a set
of keyword values of grouped image metadata
"""
if self.comp_groups is None:
self.comp_groups = [comp_group]
else:
self.comp_groups.append(comp_group)
if self.comp_groups is not None:
self.is_empty = False
def add_object_group(self, object_group):
"""Adds a object-only group
Args:
object_group (DataFrame): A :class:`~pandas.DataFrame` Contains a
set of keyword values of grouped image metadata
"""
if self.object_groups is None:
self.object_groups = [object_group]
else:
self.object_groups.append(object_group)
if self.object_groups is not None:
self.is_empty = False
def add_spec_group(self, spec_group):
"""Adds a data group containing object and comp
The comparison lamp groups are also added to a general pool of
comparison lamps.
Args:
spec_group (DataFrame): A :class:`~pandas.DataFrame` Contains a set
of keyword values of grouped image metadata
"""
if self.spec_groups is None:
self.spec_groups = [spec_group]
else:
self.spec_groups.append(spec_group)
if self.spec_groups is not None:
self.is_empty = False
comp_group = spec_group[spec_group.obstype == 'COMP']
self.add_comp_group(comp_group=comp_group)
def set_sun_times(self, sun_set, sun_rise):
"""Sets values for sunset and sunrise
Args:
sun_set (str): Sun set time in the format 'YYYY-MM-DDTHH:MM:SS.SS'
sun_rise (str):Sun rise time in the format 'YYYY-MM-DDTHH:MM:SS.SS'
"""
self.sun_set_time = sun_set
self.sun_rise_time = sun_rise
def set_twilight_times(self, evening, morning):
"""Sets values for evening and morning twilight
Args:
evening (str): Evening twilight time in the format
'YYYY-MM-DDTHH:MM:SS.SS'
morning (str): Morning twilight time in the format
'YYYY-MM-DDTHH:MM:SS.SS'
"""
self.evening_twilight = evening
self.morning_twilight = morning
def set_readout(self, gain, rdnoise, roi):
"""Set Gain, Read noise and ROI.
Args:
gain (float): Gain from header
rdnoise (float): Read noise from header.
roi (str): ROI from header.
"""
self.gain = gain
self.rdnoise = rdnoise
self.roi = roi
class NoMatchFound(Exception): # pragma: no cover
"""Exception for when no match is found."""
def __init__(self, message="No match found"):
Exception.__init__(self, message)
class NoTargetException(Exception): # pragma: no cover
"""Exception to be raised when no target is identified"""
def __init__(self):
Exception.__init__(self, 'No targets identified.')
class NotEnoughLinesDetected(Exception): # pragma: no cover
"""Exception for when there are no lines detected."""
def __init__(self):
Exception.__init__(self, 'Not enough lines detected.')
class ReferenceData(object):
"""Contains spectroscopic reference lines values and filename to templates.
This class stores:
- file names for reference fits spectrum
- file names for CSV tables with reference lines and relative
intensities
- line positions only for the elements used in SOAR comparison lamps
"""
def __init__(self, reference_dir):
"""Init method for the ReferenceData class
This methods uses ccdproc.ImageFileCollection on the `reference_dir` to
capture all possible reference lamps. The reference lamps have a list
of lines detected on the data registered to the header as GSP_P??? where
??? are numbers from 001 to 999. Also the pixel values are stored in
keywords of the form GSP_A???.
Args:
reference_dir (str): full path to the reference data directory
"""
self.log = logging.getLogger(__name__)
self.reference_dir = reference_dir
reference_collection = ccdproc.ImageFileCollection(self.reference_dir)
self.ref_lamp_collection = reference_collection.summary.to_pandas()
self.lines_pixel = None
self.lines_angstrom = None
self._ccd = None
self.nist = {}
self.lamp_status_keywords = [
'LAMP_HGA',
'LAMP_NE',
'LAMP_AR',
'LAMP_FE',
'LAMP_CU',
'LAMP_QUA',
'LAMP_QPE',
'LAMP_BUL',
'LAMP_DOM',
'LAMP_DPE']
def get_reference_lamp(self, header):
"""Finds a suitable template lamp from the catalog
Args:
header (Header): FITS header of image we are looking a reference
lamp.
Returns:
full path to best matching reference lamp or None.
"""
if all([keyword in [hkey for hkey in header.keys()] for keyword in self.lamp_status_keywords]):
self.log.info("Searching matching reference lamp")
filtered_collection = self.ref_lamp_collection[(
(self.ref_lamp_collection['lamp_hga'] == header['LAMP_HGA']) &
(self.ref_lamp_collection['lamp_ne'] == header['LAMP_NE']) &
(self.ref_lamp_collection['lamp_ar'] == header['LAMP_AR']) &
(self.ref_lamp_collection['lamp_fe'] == header['LAMP_FE']) &
(self.ref_lamp_collection['lamp_cu'] == header['LAMP_CU']) &
(self.ref_lamp_collection['wavmode'] == header['wavmode']))]
if filtered_collection.empty:
error_message = "Unable to find a match in the reference library for: "\
"LAMP_HGA = {}, "\
"LAMP_NE = {}, "\
"LAMP_AR = {}, "\
"LAMP_FE = {}, "\
"LAMP_CU = {}, "\
"WAVMODE = {} ".format(header['LAMP_HGA'],
header['LAMP_NE'],
header['LAMP_AR'],
header['LAMP_FE'],
header['LAMP_CU'],
header['WAVMODE'])
self.log.error(error_message)
raise NoMatchFound(error_message)
else:
filtered_collection = self.ref_lamp_collection[
(self.ref_lamp_collection['object'] == header['object']) &
# TODO (simon): Wavemode can be custom (GRT_TARG, CAM_TARG, GRATING)
(self.ref_lamp_collection['wavmode'] == re.sub(' ', '_', header['wavmode']).upper())]
if filtered_collection.empty:
error_message = "Unable to find matching "\
"reference lamp for: "\
"OBJECT = {}, "\
"WAVMODE = {}".format(header['OBJECT'],
header['WAVMODE'])
self.log.error(error_message)
raise NoMatchFound(error_message)
if len(filtered_collection) == 1:
self.log.info(
"Reference Lamp Found: {:s}"
"".format("".join(filtered_collection.file.to_string(index=False).split())))
full_path = os.path.join(self.reference_dir,
"".join(filtered_collection.file.to_string(
index=False).split()))
self._ccd = ccdproc.CCDData.read(full_path, unit=u.adu)
self._recover_lines()
return self._ccd
else:
raise NotImplementedError(
"Found {} matches".format(len(filtered_collection)))
def lamp_exists(self, header):
"""Checks whether a matching lamp exist or not
Args:
object_name (str): Name of the lamp from 'OBJECT' keyword.
grating (str): Grating from 'GRATING' keyword.
grt_targ (float): Grating target from keyword 'GRT_TARG'.
cam_targ (float): Camera target from keyword 'CAM_TARG'.
Returns:
True of False depending if a single matching lamp exist.
Raises:
NotImplementedError if there are more than one lamp found.
"""
filtered_collection = self.ref_lamp_collection[
(self.ref_lamp_collection['lamp_hga'] == header['LAMP_HGA']) &
(self.ref_lamp_collection['lamp_ne'] == header['LAMP_NE']) &
(self.ref_lamp_collection['lamp_ar'] == header['LAMP_AR']) &
(self.ref_lamp_collection['lamp_cu'] == header['LAMP_CU']) &
(self.ref_lamp_collection['lamp_fe'] == header['LAMP_FE']) &
(self.ref_lamp_collection['grating'] == header['GRATING']) &
(self.ref_lamp_collection['grt_targ'] == header['GRT_TARG']) &
(self.ref_lamp_collection['cam_targ'] == header['CAM_TARG'])]
if filtered_collection.empty:
return False
elif len(filtered_collection) == 1:
return True
else:
raise NotImplementedError
def check_comp_group(self, comp_group):
"""Check if comparison lamp group has matching reference lamps
Args:
comp_group (DataFrame): A :class:`~pandas.DataFrame` instance that
contains meta-data for a group of comparison lamps.
Returns:
"""
lamps = comp_group.groupby(['grating',
'grt_targ',
'cam_targ',
'lamp_hga',
'lamp_ne',
'lamp_ar',
'lamp_fe',
'lamp_cu']).size().reset_index(
).rename(columns={0: 'count'})
# for the way the input is created this should run only once but the
# for loop has been left in case this happens.
for i in lamps.index:
pseudo_header = fits.Header()
# pseudo_header.set('OBJECT', value=lamps.iloc[i]['object'])
pseudo_header.set('GRATING', value=lamps.iloc[i]['grating'])
pseudo_header.set('GRT_TARG', value=lamps.iloc[i]['grt_targ'])
pseudo_header.set('CAM_TARG', value=lamps.iloc[i]['cam_targ'])
pseudo_header.set('LAMP_HGA', value=lamps.iloc[i]['lamp_hga'])
pseudo_header.set('LAMP_NE', value=lamps.iloc[i]['lamp_ne'])
pseudo_header.set('LAMP_AR', value=lamps.iloc[i]['lamp_ar'])
pseudo_header.set('LAMP_FE', value=lamps.iloc[i]['lamp_fe'])
pseudo_header.set('LAMP_CU', value=lamps.iloc[i]['lamp_cu'])
if self.lamp_exists(header=pseudo_header):
new_group = comp_group[
(comp_group['grating'] == lamps.iloc[i]['grating']) &
(comp_group['grt_targ'] == lamps.iloc[i]['grt_targ']) &
(comp_group['cam_targ'] == lamps.iloc[i]['cam_targ']) &
(comp_group['lamp_hga'] == lamps.iloc[i]['lamp_hga']) &
(comp_group['lamp_ne'] == lamps.iloc[i]['lamp_ne']) &
(comp_group['lamp_ar'] == lamps.iloc[i]['lamp_ar']) &
(comp_group['lamp_fe'] == lamps.iloc[i]['lamp_fe']) &
(comp_group['lamp_cu'] == lamps.iloc[i]['lamp_cu'])]
return new_group
else:
self.log.warning("The target's comparison lamps do not have "
"reference lamps.")
self.log.debug("In this case a compatible lamp will be "
"obtained from all the lamps obtained in the "
"data or present in the files.")
self.log.debug("Using the full set of comparison lamps "
"for extraction.")
return comp_group
return None
def _recover_lines(self):
"""Read lines from the reference lamp's header."""
self.log.info("Recovering line information from reference Lamp.")
self.lines_pixel = []
self.lines_angstrom = []
pixel_keys = self._ccd.header['GSP_P*']
for pixel_key in pixel_keys:
if re.match(r'GSP_P\d{3}', pixel_key) is not None:
angstrom_key = re.sub('GSP_P', 'GSP_A', pixel_key)
assert pixel_key[-3:] == angstrom_key[-3:]
assert angstrom_key in self._ccd.header
if int(float(self._ccd.header[angstrom_key])) != 0:
self.lines_pixel.append(float(self._ccd.header[pixel_key]))
self.lines_angstrom.append(
float(self._ccd.header[angstrom_key]))
else:
self.log.debug(
"File: {:s}".format(self._ccd.header['GSP_FNAM']))
self.log.debug(
"Ignoring keywords: {:s}={:f}, {:s}={:f}".format(
pixel_key,
self._ccd.header[pixel_key],
angstrom_key,
float(self._ccd.header[angstrom_key])))
@staticmethod
def _order_validation(lines_array):
"""Checks that the array of lines only increases."""
previous = None
for line_value in lines_array:
if previous is not None:
try:
assert line_value > previous
previous = line_value
except AssertionError:
log.error("Error: Line {:f} is not larger "
"than {:f}".format(line_value, previous))
return False
else:
previous = line_value
return True
def _load_nist_list(self, **kwargs):
"""Load all csv files from strong lines in NIST."""
nist_path = kwargs.get(
'path',
os.path.join(os.path.dirname(
sys.modules['goodman_pipeline'].__file__),
'data/nist_list'))
assert os.path.isdir(nist_path)
nist_files = glob.glob(os.path.join(nist_path, "*.txt"))
for nist_file in nist_files:
key = os.path.basename(nist_file)[22:-4]
nist_data = pandas.read_csv(nist_file, names=['intensity',
'air_wavelength',
'spectrum',
'reference'])
self.nist[key] = nist_data
class SaturationValues(object):
"""Contains a complete table of readout modes and 50% half well
"""
def __init__(self, ccd=None):
"""Defines a :class:`~pandas.DataFrame` with saturation_threshold information
Both, Red and Blue cameras have tabulated saturation_threshold values depending
on the readout configurations. It defines a :class:`~pandas.DataFrame`
object.
Notes:
For the purposes of this documentation *50% full well* is the same
as ``saturation_threshold level`` though they are not the same thing.
Args:
ccd (CCDData): Image to be tested for saturation_threshold
"""
self.log = logging.getLogger(__name__)
self.__saturation = None
columns = ['camera',
'read_rate',
'analog_attn',
'gain',
'read_noise',
'half_full_well',
'saturates_before']
saturation_table = [['Blue', 50, 0, 0.25, 3.33, 279600, True],
['Blue', 50, 2, 0.47, 3.35, 148723, True],
['Blue', 50, 3, 0.91, 3.41, 76813, True],
['Blue', 100, 0, 0.56, 3.69, 124821, True],
['Blue', 100, 2, 1.06, 3.72, 65943, True],
['Blue', 100, 3, 2.06, 3.99, 33932, False],
['Blue', 200, 0, 1.4, 4.74, 49928, False],
['Blue', 200, 2, 2.67, 5.12, 26179, False],
['Blue', 400, 0, 5.67, 8.62, 12328, False],
['Red', 100, 3, 1.54, 3.45, 66558, True],
['Red', 100, 2, 3.48, 5.88, 29454, False],
['Red', 344, 3, 1.48, 3.89, 69257, True],
['Red', 344, 0, 3.87, 7.05, 26486, False],
['Red', 750, 2, 1.47, 5.27, 69728, True],
['Red', 750, 2, 1.45, 5.27, 69728, True],
['Red', 750, 0, 3.77, 8.99, 27188, False],
['Red', 750, 0, 3.78, 8.99, 27188, False]]
self._sdf = pandas.DataFrame(saturation_table,
columns=columns)
if ccd is not None:
self.get_saturation_value(ccd=ccd)
@property
def saturation_value(self):
"""Saturation value in counts
In fact the value it returns is the 50% of full potential well,
Some configurations reach digital saturation_threshold before 50% of full
potential well, they are specified in the last column:
``saturates_before``.
Returns:
None if the value has not been defined
"""
if self.__saturation is None:
self.log.error('Saturation value not set')
return None
else:
return self.__saturation
def get_saturation_value(self, ccd):
"""Defines the saturation_threshold level
Args:
ccd (CCDData): Image to be tested for saturation_threshold
Returns:
The saturation_threshold value or None
"""
hfw = self._sdf.half_full_well[
(self._sdf.camera == ccd.header['INSTCONF']) &
(self._sdf.gain == ccd.header['GAIN']) &
(self._sdf.read_noise == ccd.header['RDNOISE'])]
if hfw.empty:
self.log.critical('Unable to obtain saturation_threshold level')
self.__saturation = None
return None
else:
self.__saturation = float("".join(hfw.to_string(index=False).split()))
self.log.debug("Set saturation_threshold level as {:.0f}".format(
self.__saturation))
return self.__saturation
class SpectroscopicMode(object):
def __init__(self):
"""Init method for the Spectroscopic Mode
This method defines a :class:`~pandas.DataFrame` instance that contains
all the current standard wavelength modes for Goodman HTS.
"""
self.log = logging.getLogger(__name__)
columns = ['grating_freq', 'wavmode', 'camtarg', 'grttarg', 'ob_filter']
spec_mode = [['400', 'm1', '11.6', '5.8', 'None'],
['400', 'm2', '16.1', '7.5', 'GG455'],
['600', 'UV', '15.25', '7.0', 'None'],
['600', 'Blue', '17.0', '7.0', 'None'],
['600', 'Mid', '20.0', '10.0', 'GG385'],
['600', 'Red', '27.0', '12.0', 'GG495'],
['930', 'm1', '20.6', '10.3', 'None'],
['930', 'm2', '25.2', '12.6', 'None'],
['930', 'm3', '29.9', '15.0', 'GG385'],
['930', 'm4', '34.6', '18.3', 'GG495'],
['930', 'm5', '39.4', '19.7', 'GG495'],
['930', 'm6', '44.2', '22.1', 'OG570'],
['1200', 'm0', '26.0', '16.3', 'None'],
['1200', 'm1', '29.5', '16.3', 'None'],
['1200', 'm2', '34.4', '18.7', 'None'],
['1200', 'm3', '39.4', '20.2', 'None'],
['1200', 'm4', '44.4', '22.2', 'GG455'],
['1200', 'm5', '49.6', '24.8', 'GG455'],
['1200', 'm6', '54.8', '27.4', 'GG495'],
['1200', 'm7', '60.2', '30.1', 'OG570'],
['1800', 'Custom', 'None', 'None', 'None'],
['2100', 'Custom', 'None', 'None', 'None'],
['2400', 'Custom', 'None', 'None', 'None']
]
self.modes_data_frame = pandas.DataFrame(spec_mode, columns=columns)
def __call__(self,
header=None,
grating=None,
camera_targ=None,
grating_targ=None,
blocking_filter=None):
"""Get spectroscopic mode
This method can be called either parsing a header alone or the rest of
values separated.
Args:
header (Header): FITS header.
grating (str): Grating as in the FITS header.
camera_targ (str): Camera target angle as in the FITS header.
grating_targ (str): Grating target angle as in the FITS header.
blocking_filter (str): Order blocking filter as in the FITS header.
Returns:
string that defines the instrument wavelength mode.
"""
if all(x is None for x in (
grating, camera_targ, grating_targ, blocking_filter)) and \
header is not None:
grating = str(re.sub('[A-Za-z_-]', '', header['grating']))
camera_targ = str(header['cam_targ'])
grating_targ = str(header['grt_targ'])
blocking_filter = str(header['filter2'])
return self.get_mode(grating=grating,
camera_targ=camera_targ,
grating_targ=grating_targ,
blocking_filter=blocking_filter)
elif not all(x is None for x in (
grating, camera_targ, grating_targ, blocking_filter)):
grating = re.sub('[A-Za-z_-]', '', grating)
return self.get_mode(grating=grating,
camera_targ=camera_targ,
grating_targ=grating_targ,
blocking_filter=blocking_filter)
else:
raise SyntaxError("Either a fits header or grating, camera angle, "
"grating angle and order blocking filter are "
"required.")
def get_mode(self, grating, camera_targ, grating_targ, blocking_filter):
"""Get the camera's optical configuration mode.
This method is useful for data that does not have the WAVMODE keyword
Args:
grating (str): Grating frequency as string
camera_targ (str): Camera target angle as in the header.
grating_targ (str): Grating target angle as in the header.
blocking_filter (str): Order blocking filter listed on the header.
Returns:
string that defines the wavelength mode used
"""
if any(grat == grating for grat in ('1800', '2100', '2400')):
central_wavelength = get_central_wavelength(grating=grating,
grt_ang=grating_targ,
cam_ang=camera_targ)
central_wavelength.to(u.nm)
return 'Custom_{:d}nm'.format(int(round(central_wavelength.value)))
else:
_mode = self.modes_data_frame[
((self.modes_data_frame['grating_freq'] == grating) &
(self.modes_data_frame['camtarg'] == camera_targ) &
(self.modes_data_frame['grttarg'] == grating_targ) &
(self.modes_data_frame['ob_filter'] == blocking_filter))]
if _mode.empty:
central_wavelength = get_central_wavelength(
grating=grating,
grt_ang=grating_targ,
cam_ang=camera_targ)
central_wavelength.to(u.nm)
return 'Custom_{:d}nm'.format(int(round(
central_wavelength.value)))
else:
return "".join(_mode['wavmode'].to_string(index=False).split())
def get_cam_grt_targ_angle(self, grating, mode):
"""Get the camera and grating target values grating and mode
Args:
grating (float): Grating frequency in lines/mm (unitless value)
mode (str): Name of the grating's mode for which the camera and
grating target values are required.
Returns:
Camera and grating target values. None and None if no such values
exists.
"""
if any(grat == str(grating) for grat in ('1800', '2100', '2400')):
self.log.warning("Grating {:s} does not define "
"modes.".format(str(grating)))
return None, None
else:
angle = self.modes_data_frame[
((self.modes_data_frame['grating_freq'] == str(grating)) &
(self.modes_data_frame['wavmode'] == mode))]
if angle.empty:
self.log.error("No data")
return None, None
else:
return ("".join(angle['camtarg'].to_string(index=False).split()),
"".join(angle['grttarg'].to_string(index=False).split()))
class IdentifySpectroscopicTargets(object):
def __init__(self):
self.nfind = 1
self.plots = False
self.background_threshold = 3
self.profile_model = []
self.profile_min_width = None
self.profile_max_width = None
self.model_name = None
self.ccd = None
self.slit_size = None
self.serial_binning = None
self.order = None
self.file_name = None
self.background_model = None
self.background_level = None
self.spatial_profile = None
self.all_peaks = None
self.selected_peaks = None
def __call__(self,
ccd,
nfind=3,
background_threshold=3,
model_name='gaussian',
profile_min_width=None,
profile_max_width=None,
plots=False):
assert isinstance(ccd, ccdproc.CCDData)
assert ccd.header['OBSTYPE'] in ['OBJECT', 'SPECTRUM'], \
"Can't search for targets in files with" \
" OBSTYPE = {}".format(ccd.header['OBSTYPE'])
self.file_name = ccd.header['GSP_FNAM']
log.info('Searching spectroscopic targets in file: {:s}'
''.format(self.file_name))
self.ccd = ccd
self.nfind = nfind
self.plots = plots
self.model_name = model_name
self.background_threshold = background_threshold
self.profile_min_width = profile_min_width
self.profile_max_width = profile_max_width
self.slit_size = re.sub('[a-zA-Z"_*]', '', self.ccd.header['SLIT'])
log.debug('Slit size: {:s}'.format(self.slit_size))
self.serial_binning = int(self.ccd.header['CCDSUM'].split()[0])
log.debug('Serial binning: {:d}'.format(self.serial_binning))
self.order = int(round(float(self.slit_size) / (0.15 * self.serial_binning)))
if self.plots: # pragma: no cover
z1 = np.mean(self.ccd.data) - 0.5 * np.std(self.ccd.data)
z2 = np.median(self.ccd.data) + np.std(self.ccd.data)
plt.switch_backend('Qt5Agg')
fig, ax = plt.subplots()
fig.canvas.set_window_title(self.file_name)
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
ax.set_title(self.file_name)
ax.imshow(ccd.data, clim=(z1, z2), cmap='gray')
ax.set_xlabel('Dispersion Axis (x)')
ax.set_ylabel('Spatial Axis (y)')
fig.tight_layout()
plt.show()
self.spatial_profile = np.median(ccd.data, axis=1)
# assert all([self.spatial_profile, self.file_name])
self.fit_background()
self.subtract_background()
self.get_peaks()
self.filter_peaks()
self.fit_model()
if self.profile_model == []:
log.error("Impossible to identify targets.")
else:
log.info('Identified {:d} target{:s}'.format(
len(self.profile_model),
['s' if len(self.profile_model) > 1 else ''][0]))
return self.profile_model
def fit_background(self, spatial_profile=None, file_name=None, plots=False):
"""
Args:
spatial_profile :
file_name (String):
plots:
Returns:
"""
if spatial_profile is None and self.spatial_profile is not None:
spatial_profile = self.spatial_profile
else:
raise NotImplementedError
if file_name is None and self.file_name is not None:
file_name = self.file_name
else:
raise NotImplementedError
log.info('Fitting Linear1D model to spatial profile to detect '
'background shape')
clipped_profile = sigma_clip(spatial_profile, sigma=2, maxiters=5)
linear_model = models.Linear1D(slope=0,
intercept=np.median(spatial_profile))
linear_fitter = fitting.LinearLSQFitter()
# the fitters do not support masked arrays so we need to have a new
# array without the masked (clipped) elements.
new_profile = clipped_profile[~clipped_profile.mask]
# also the indexes are different
new_x_axis = np.array([i for i in range(len(clipped_profile))
if not clipped_profile.mask[i]])
self.background_model = linear_fitter(linear_model, new_x_axis, new_profile)
if plots or self.plots: # pragma: no cover
fig, ax = plt.subplots()
fig.canvas.set_window_title(file_name)
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
ax.set_title('Background Fitting Model Defined')
ax.plot(spatial_profile, color='k', label='Median profile')
ax.plot(linear_model(range(len(spatial_profile))),
color='r',
label='Background Linear Model')
ax.set_xlabel("Spatial Axis (Pixels)")
ax.set_ylabel("Median Intensity")
ax.legend(loc='best')
plt.tight_layout()
plt.show()
fig, ax = plt.subplots()
fig.canvas.set_window_title(file_name)
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
ax.set_title('Background Fitted Model')
ax.plot(spatial_profile, color='k', label='Median profile')
ax.plot(self.background_model(range(len(spatial_profile))),
color='r',
label='Fitted Background Linear Model')
ax.set_xlabel("Spatial Axis (Pixels)")
ax.set_ylabel("Median Intensity")
ax.legend(loc='best')
plt.tight_layout()
plt.show()
return self.background_model
def subtract_background(self, spatial_profile=None, background_model=None,
file_name=None, plots=False):
"""
Args:
spatial_profile:
background_model:
file_name:
plots:
Returns:
"""
if not all([spatial_profile, background_model, file_name]):
if self.spatial_profile is not None:
spatial_profile = self.spatial_profile
if self.background_model is not None:
background_model = self.background_model
if self.file_name is None:
file_name = ''
else:
file_name = self.file_name
log.info('Subtracting background shape and level spatial profile for '
'better target identification')
background_array = background_model(range(len(spatial_profile)))
background_subtracted = spatial_profile - background_array
background_subtracted[background_subtracted < 0] = 0
self.spatial_profile = background_subtracted.copy()
clipped_final_profile = sigma_clip(self.spatial_profile, sigma=3, maxiters=3)
new_x_axis = [i for i in range(len(clipped_final_profile)) if
not clipped_final_profile.mask[i]]
clipped_final_profile = clipped_final_profile[
~clipped_final_profile.mask]
self.background_level = np.abs(np.max(clipped_final_profile) -
np.min(clipped_final_profile))
log.debug('New background level after subtraction was found to be '
'{:.2f}'.format(self.background_level))
if plots or self.plots: # pragma: no cover
plt.ioff()
plt.close()
fig, ax = plt.subplots()
fig.canvas.set_window_title(file_name)
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
ax.set_title('Median Along Dispersion Axis (spatial)')
ax.plot(background_subtracted, label='Background Subtracted Data')
ax.plot(new_x_axis,
clipped_final_profile,
color='r',
label='Sigma Clipped Data')
ax.axhline(self.background_level, color='m', label='Min-Max Difference')
ax.set_xlabel("Spatial Axis (Pixels)")
ax.set_ylabel("Median Intensity")
plt.legend(loc='best')
plt.tight_layout()
if plt.isinteractive():
plt.draw()
plt.pause(5)
else:
plt.show()
return self.spatial_profile, self.background_level
def get_peaks(self,
spatial_profile=None,
order=None,
file_name=None,
plots=False):
"""
Args:
spatial_profile: Background subtracted profile
order:
file_name:
plots:
Returns:
"""
if not all([spatial_profile, order, file_name]):
if self.spatial_profile is not None:
spatial_profile = self.spatial_profile
if self.order is not None:
order = self.order
if self.file_name is None:
file_name = ''
else:
file_name = self.file_name
log.info("Finding all peaks in spatial profile")
spatial_profile = signal.medfilt(spatial_profile, kernel_size=1)
_upper_limit = spatial_profile.min() + 0.03 * spatial_profile.max()
filtered_profile = np.where(np.abs(
spatial_profile > spatial_profile.min() + 0.03 * spatial_profile.max()),
spatial_profile,
None)
none_to_zero_prof = [0 if it is None else it for it in filtered_profile]
filtered_profile = np.array(none_to_zero_prof)
# order *= 2
self.all_peaks = signal.argrelmax(filtered_profile,
axis=0,
order=order)[0]
log.debug("Found {:d} peaks".format(len(self.all_peaks)))
if plots or self.plots: # pragma: no cover
plt.ioff()
fig, ax = plt.subplots()
fig.canvas.set_window_title(file_name)
ax.set_title('All detected Peaks')
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
for peak in self.all_peaks:
ax.axvline(peak, color='r', alpha=0.7)
ax.plot(spatial_profile, label='Background subtracted profile')
ax.axhline(_upper_limit, color='g', label='Peak Detection Threshold')
ax.plot([], color='r', label='Peak location')
ax.set_xlabel("Spatial Axis (Pixels)")
ax.set_ylabel("Background subtracted median intensity")
ax.legend(loc='best')
plt.tight_layout()
plt.show()
return self.all_peaks
def filter_peaks(self,
spatial_profile=None,
detected_peaks=None,
nfind=None,
background_threshold=None,
file_name=None,
plots=False):
"""
Args:
spatial_profile:
detected_peaks:
nfind:
background_threshold:
file_name:
plots:
Returns:
"""
if not all([spatial_profile,
detected_peaks,
nfind,
background_threshold,
file_name]):
if self.spatial_profile is not None:
spatial_profile = self.spatial_profile
if self.all_peaks is not None:
detected_peaks = self.all_peaks
if self.nfind is not None:
nfind = self.nfind
if self.background_threshold is not None:
background_threshold = self.background_threshold
if self.file_name is None:
file_name = ''
else:
file_name = self.file_name
else:
raise NotImplementedError
log.info("Selecting the {:d} most intense peaks out of {:d} found"
"".format(nfind, len(detected_peaks)))
peak_data_values = [spatial_profile[i] for i in detected_peaks]
sorted_values = np.sort(peak_data_values)[::-1]
detection_limit = spatial_profile.min() + 0.03 * spatial_profile.max()
n_strongest_values = sorted_values[:nfind]
self.selected_peaks = []
log.info("Validating peaks by setting threshold {:d} times the "
"background level {:.2f}".format(background_threshold,
detection_limit))
log.debug('Intensity threshold set to: {:.2f}'
''.format(background_threshold * detection_limit))
for peak_value in n_strongest_values:
index = np.where(peak_data_values == peak_value)[0]
if peak_value > background_threshold * detection_limit:
self.selected_peaks.append(detected_peaks[index[0]])
log.info(
'Selecting peak: Centered: {:.1f} Intensity {:.3f}'.format(
self.selected_peaks[-1], peak_value))
else:
log.debug('Discarding peak: Center {:.1f} Intensity {:.3f} '
'Reason: Below intensity threshold ({:.2f})'
''.format(detected_peaks[index[0]],
peak_value,
background_threshold * detection_limit))
if plots or self.plots: # pragma: no cover
plt.ioff()
fig, ax = plt.subplots()
fig.canvas.set_window_title(file_name)
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
ax.plot(spatial_profile, label='Background subtracted profile')
ax.axhline(detection_limit, color='g', label='Upper limit for peak detection')
ax.axhline(background_threshold * detection_limit,
color='m',
label="Intensity Threshold")
for peak in self.selected_peaks:
ax.axvline(peak, color='r', label='Peak location')
ax.set_xlabel("Spatial Axis (Pixels)")
ax.set_ylabel("Background subtracted median intensity")
ax.legend(loc='best')
plt.tight_layout()
plt.show()
return self.selected_peaks
def fit_model(self,
spatial_profile=None,
selected_peaks=None,
order=None,
model_name=None,
file_name=None,
plots=False):
if not all([spatial_profile,
selected_peaks,
order,
model_name,
file_name]):
if self.spatial_profile is not None:
spatial_profile = self.spatial_profile
if self.all_peaks is not None:
selected_peaks = self.selected_peaks
if self.order is not None:
order = self.order
if self.model_name is not None:
model_name = self.model_name
if self.file_name is None:
file_name = ''
else:
file_name = self.file_name
else:
raise NotImplementedError
fitter = fitting.LevMarLSQFitter()
if model_name == 'gaussian':
self.profile_model = self._fit_gaussian(
fitter=fitter,
spatial_profile=spatial_profile,
selected_peaks=selected_peaks,
order=order,
file_name=file_name,
plots=plots or self.plots,
stddev_min=self.profile_min_width,
stddev_max=self.profile_max_width)
return self.profile_model
if model_name == 'moffat':
self.profile_model = self._fit_moffat(
fitter=fitter,
spatial_profile=spatial_profile,
selected_peaks=selected_peaks,
order=order,
file_name=file_name,
plots=plots or self.plots,
fwhm_min=self.profile_min_width,
fwhm_max=self.profile_max_width)
return self.profile_model
@staticmethod
def _fit_gaussian(fitter,
spatial_profile,
selected_peaks,
order,
file_name,
plots,
stddev_min=None,
stddev_max=None):
log.info("Fitting 'Gaussian1D' to spatial profile of targets.")
profile_model = []
if stddev_min is None:
stddev_min = 0
log.debug(f"Setting STDDEV minimum value to {stddev_min} pixels. Set it with `--target-min-width`.")
if stddev_max is None:
stddev_max = 4 * order
log.debug(f"Setting STDDEV maximum value to {stddev_max} pixels. Set it with `--target-max-width`.")
log.debug(f"Using minimum STDDEV = {stddev_min} pixels.")
log.debug(f"Using maximum STDDEV = {stddev_max} pixels.")
for peak in selected_peaks:
peak_value = spatial_profile[peak]
gaussian = models.Gaussian1D(amplitude=peak_value,
mean=peak,
stddev=order).rename(
'Gaussian_{:}'.format(peak))
fitted_gaussian = fitter(gaussian,
range(len(spatial_profile)),
spatial_profile)
# this ensures the profile returned are valid
if (fitted_gaussian.stddev.value > stddev_min) and \
(fitted_gaussian.stddev.value < stddev_max):
profile_model.append(fitted_gaussian)
log.info(
"Recording target centered at: {:.2f}, STDDEV: {:.2f}"
"".format(fitted_gaussian.mean.value,
fitted_gaussian.stddev.value))
else:
log.error(f"Discarding target with STDDEV: {fitted_gaussian.stddev.value}. "
f"Outside of limits {stddev_min} - {stddev_max}. Set new limits with "
f"`--profile-min-width` and `--profile-max-width`")
if plots: # pragma: no cover
fig, ax = plt.subplots()
fig.canvas.set_window_title(file_name)
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
ax.set_title('Successfully fitted profiles')
ax.plot(spatial_profile, color='k', label='Median Profile')
for profile in profile_model:
ax.plot(profile(range(len(spatial_profile))),
label=profile.name)
ax.set_xlabel("Spatial Axis (Pixels)")
ax.set_ylabel("Median Intensity")
ax.legend(loc='best')
plt.tight_layout()
plt.show()
return profile_model
@staticmethod
def _fit_moffat(fitter,
spatial_profile,
selected_peaks,
order,
file_name,
plots,
fwhm_min=None,
fwhm_max=None):
log.info("Fitting 'Moffat1D' to spatial profile of targets.")
if fwhm_min is None:
fwhm_min = 0.5 * order
log.debug(f"Setting FWHM minimum value to {fwhm_min} pixels. Set it with `--target-min-width`.")
if fwhm_max is None:
fwhm_max = 4 * order
log.debug(f"Setting FWHM maximum value to {fwhm_max} pixels. Set it with `--target-max-width`.")
log.debug(f"Using minimum FWHM = {fwhm_min} pixels.")
log.debug(f"Using maximum FWHM = {fwhm_max} pixels.")
profile_model = []
for peak in selected_peaks:
peak_value = spatial_profile[peak]
moffat = models.Moffat1D(amplitude=peak_value,
x_0=peak,
gamma=order).rename(
'Moffat_{:}'.format(peak))
fitted_moffat = fitter(moffat,
range(len(spatial_profile)),
spatial_profile)
# this ensures the profile returned are valid
if (fitted_moffat.fwhm > fwhm_min) and \
(fitted_moffat.fwhm < fwhm_max):
profile_model.append(fitted_moffat)
log.info(
"Recording target centered at: {:.2f}, FWHM: {:.2f}"
"".format(fitted_moffat.x_0.value,
fitted_moffat.fwhm))
else:
log.error("Discarding target centered at: {:.3f}".format(
fitted_moffat.x_0.value))
if fitted_moffat.fwhm < 0:
log.error("Moffat model FWHM is negative")
elif 0 <= fitted_moffat.fwhm < fwhm_min:
log.error(f"Moffat model FWHM is too small: {fitted_moffat.fwhm}. "
"Set a minimum limit with `--profile-min-width`.")
else:
log.error(f"Moffat model FWHM is {fitted_moffat.fwhm}, larger than current limit {fwhm_max}. "
f"Set a maximum limit with `--profile-max-width`.")
if plots: # pragma: no cover
fig, ax = plt.subplots()
fig.canvas.set_window_title(file_name)
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
ax.set_title('Successfully fitted profiles')
ax.plot(spatial_profile, color='k', label='Median Profile')
for profile in profile_model:
ax.plot(profile(range(len(spatial_profile))),
label=profile.name)
ax.set_xlabel("Spatial Axis (Pixels)")
ax.set_ylabel("Median Intensity")
ax.legend(loc='best')
plt.tight_layout()
plt.show()
return profile_model
|
simontorres/goodman
|
goodman_pipeline/core/core.py
|
Python
|
bsd-3-clause
| 181,354
|
[
"Gaussian"
] |
7dab4f5050108cfe7ecfd9e2e8f6e1fd15c4b33ab917ce5464e41ea8a60a2eca
|
''' DIRAC Transformation DB
Transformation database is used to collect and serve the necessary information
in order to automate the task of job preparation for high level transformations.
This class is typically used as a base class for more specific data processing
databases
'''
import re, time, threading, copy
from types import IntType, LongType, StringTypes, ListType, TupleType, DictType
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Base.DB import DB
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Core.Utilities.List import stringListToString, intListToString, breakListIntoChunks
from DIRAC.Core.Utilities.Shifter import setupShifterProxyInEnv
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Utilities.Subprocess import pythonCall
__RCSID__ = "$Id$"
MAX_ERROR_COUNT = 10
#############################################################################
class TransformationDB( DB ):
""" TransformationDB class
"""
def __init__( self, maxQueueSize = 10, dbIn = None ):
''' The standard constructor takes the database name (dbname) and the name of the
configuration section (dbconfig)
'''
if not dbIn:
DB.__init__( self, 'TransformationDB', 'Transformation/TransformationDB', maxQueueSize )
self.lock = threading.Lock()
self.filters = ()
res = self.__updateFilters()
if not res['OK']:
gLogger.fatal( "Failed to create filters" )
self.allowedStatusForTasks = ( 'Unused', 'ProbInFC' )
self.TRANSPARAMS = [ 'TransformationID',
'TransformationName',
'Description',
'LongDescription',
'CreationDate',
'LastUpdate',
'AuthorDN',
'AuthorGroup',
'Type',
'Plugin',
'AgentType',
'Status',
'FileMask',
'TransformationGroup',
'GroupSize',
'InheritedFrom',
'Body',
'MaxNumberOfTasks',
'EventsPerTask',
'TransformationFamily']
self.mutable = [ 'TransformationName',
'Description',
'LongDescription',
'AgentType',
'Status',
'MaxNumberOfTasks',
'TransformationFamily',
'Body'] # for the moment include TransformationFamily
self.TRANSFILEPARAMS = ['TransformationID',
'FileID',
'Status',
'TaskID',
'TargetSE',
'UsedSE',
'ErrorCount',
'LastUpdate',
'InsertedTime']
self.TRANSFILETASKPARAMS = ['TransformationID',
'FileID',
'TaskID']
self.TASKSPARAMS = [ 'TaskID',
'TransformationID',
'ExternalStatus',
'ExternalID',
'TargetSE',
'CreationTime',
'LastUpdateTime']
self.ADDITIONALPARAMETERS = ['TransformationID',
'ParameterName',
'ParameterValue',
'ParameterType'
]
default_task_statuses = ['Created', 'Submitted', 'Checking', 'Staging', 'Waiting', 'Running',
'Done', 'Completed', 'Killed', 'Stalled', 'Failed', 'Rescheduled']
default_file_statuses = ['Unused', 'Assigned', 'Processed', 'Problematic']
self.tasksStatuses = default_task_statuses + Operations().getValue( 'Transformations/TasksStates', [] )
self.fileStatuses = default_file_statuses + Operations().getValue( 'Transformations/FilesStatuses', [] )
result = self.__initializeDB()
if not result[ 'OK' ]:
self.log.fatal( "Cannot initialize TransformationDB!", result[ 'Message' ] )
def _generateTables( self ):
""" _generateTables
Method that returns a dictionary with all the tables to be created. It also
makes easier its extension by DIRAC plugins.
"""
retVal = self._query( "SHOW tables" )
if not retVal[ 'OK' ]:
return retVal
tablesInDB = [ t[0] for t in retVal[ 'Value' ] ]
tablesD = {}
if 'AdditionalParameters' not in tablesInDB:
tablesD[ 'AdditionalParameters' ] = {'Fields': {'ParameterName': 'VARCHAR(32) NOT NULL',
'ParameterType': "VARCHAR(32) DEFAULT 'StringType'",
'ParameterValue': 'LONGBLOB NOT NULL',
'TransformationID': 'INTEGER NOT NULL'},
'PrimaryKey': ['TransformationID', 'ParameterName'],
'Engine': 'InnoDB'
}
if 'DataFiles' not in tablesInDB:
tablesD['DataFiles'] = {'Fields': {'FileID': 'INTEGER NOT NULL AUTO_INCREMENT',
'LFN': 'VARCHAR(255) UNIQUE',
'Status': "VARCHAR(32) DEFAULT 'AprioriGood'"},
'Indexes': {'Status': ['Status']},
'PrimaryKey': ['FileID'],
'Engine': 'InnoDB'
}
if 'Replicas' not in tablesInDB:
tablesD['Replicas'] = {'Fields': {'FileID': 'INTEGER NOT NULL',
'PFN': 'VARCHAR(255)',
'SE': 'VARCHAR(32)',
'Status': "VARCHAR(32) DEFAULT 'AprioriGood'"},
'Indexes': {'Status': ['Status']},
'PrimaryKey': ['FileID', 'SE'],
'Engine': 'InnoDB'
}
if 'TaskInputs' not in tablesInDB:
tablesD['TaskInputs'] = {'Fields': {'InputVector': 'BLOB',
'TaskID': 'INTEGER NOT NULL',
'TransformationID': 'INTEGER NOT NULL'},
'PrimaryKey': ['TransformationID', 'TaskID'],
'Engine': 'InnoDB'
}
if 'TransformationFileTasks' not in tablesInDB:
tablesD['TransformationFileTasks'] = {'Fields': {'FileID': 'INTEGER NOT NULL',
'TaskID': 'INTEGER NOT NULL',
'TransformationID': 'INTEGER NOT NULL'},
'PrimaryKey': ['TransformationID', 'FileID', 'TaskID'],
'Engine': 'InnoDB'
}
if 'TransformationFiles' not in tablesInDB:
tablesD['TransformationFiles'] = {'Fields': { 'TransformationID': 'INTEGER NOT NULL',
'FileID': 'INTEGER NOT NULL',
'TaskID': 'INTEGER',
'ErrorCount': 'INT(4) NOT NULL DEFAULT 0',
'InsertedTime': 'DATETIME',
'LastUpdate': 'DATETIME',
'Status': 'VARCHAR(32) DEFAULT "Unused"',
'TargetSE': 'VARCHAR(255) DEFAULT "Unknown"',
'UsedSE': 'VARCHAR(255) DEFAULT "Unknown"'},
'Indexes': {'Status': ['Status'],
'TransformationID': ['TransformationID']},
'PrimaryKey': ['TransformationID', 'FileID'],
'Engine': 'InnoDB'
}
if 'TransformationInputDataQuery' not in tablesInDB:
tablesD['TransformationInputDataQuery'] = {'Fields': {'ParameterName': 'VARCHAR(512) NOT NULL',
'ParameterType': 'VARCHAR(8) NOT NULL',
'ParameterValue': 'BLOB NOT NULL',
'TransformationID': 'INTEGER NOT NULL'},
'PrimaryKey': ['TransformationID', 'ParameterName'],
'Engine': 'InnoDB'
}
if 'TransformationLog' not in tablesInDB:
tablesD['TransformationLog'] = {'Fields': {'Author': 'VARCHAR(255) NOT NULL DEFAULT "Unknown"',
'Message': 'VARCHAR(255) NOT NULL',
'MessageDate': 'DATETIME NOT NULL',
'TransformationID': 'INTEGER NOT NULL'},
'Indexes': {'MessageDate': ['MessageDate'],
'TransformationID': ['TransformationID']},
'Engine': 'InnoDB'
}
if 'TransformationTasks' not in tablesInDB:
## The engine of that table must stay MyISAM, because the addTaskToTransformation needs
# that when inserting a row, the LAST_INSERT_ID returns the last task ID for
# the given transformation. This only works because TaskID is NOT an INDEX
# and because the engine is MyISAM.
tablesD['TransformationTasks'] = {'Fields': {'CreationTime': 'DATETIME NOT NULL',
'ExternalID': "char(16) DEFAULT ''",
'ExternalStatus': "char(16) DEFAULT 'Created'",
'LastUpdateTime': 'DATETIME NOT NULL',
'TargetSE': "char(255) DEFAULT 'Unknown'",
'TaskID': 'INTEGER NOT NULL AUTO_INCREMENT',
'TransformationID': 'INTEGER NOT NULL'},
'Indexes': {'ExternalStatus': ['ExternalStatus']},
'PrimaryKey': ['TransformationID', 'TaskID'],
'Engine': 'MyISAM'
},
if 'Transformations' not in tablesInDB:
tablesD['Transformations'] = {'Fields': {'AgentType': "CHAR(32) DEFAULT 'Manual'",
'AuthorDN': 'VARCHAR(255) NOT NULL',
'AuthorGroup': 'VARCHAR(255) NOT NULL',
'Body': 'LONGBLOB',
'CreationDate': 'DATETIME',
'Description': 'VARCHAR(255)',
'EventsPerTask': 'INT NOT NULL DEFAULT 0',
'FileMask': 'VARCHAR(255)',
'GroupSize': 'INT NOT NULL DEFAULT 1',
'InheritedFrom': 'INTEGER DEFAULT 0',
'LastUpdate': 'DATETIME',
'LongDescription': 'BLOB',
'MaxNumberOfTasks': 'INT NOT NULL DEFAULT 0',
'Plugin': "CHAR(32) DEFAULT 'None'",
'Status': "CHAR(32) DEFAULT 'New'",
'TransformationFamily': "varchar(64) default '0'",
'TransformationGroup': "varchar(64) NOT NULL default 'General'",
'TransformationID': 'INTEGER NOT NULL AUTO_INCREMENT',
'TransformationName': 'VARCHAR(255) NOT NULL',
'Type': "CHAR(32) DEFAULT 'Simulation'"},
'Indexes': {'TransformationName': ['TransformationName']},
'PrimaryKey': ['TransformationID'],
'Engine': 'InnoDB'
}
if 'TransformationCounters' not in tablesInDB:
tablesD['TransformationCounters'] = {'Fields': {'TransformationID' : "INTEGER NOT NULL"},
'PrimaryKey': ['TransformationID'],
'Engine': 'InnoDB'
}
##Get from the CS the list of columns names
for status in self.tasksStatuses + self.fileStatuses:
tablesD['TransformationCounters']['Fields'][status] = 'INTEGER DEFAULT 0'
return S_OK( tablesD )
def __initializeDB( self ):
''' Initialize: create tables if needed
'''
tablesToBeCreated = self._generateTables()
if not tablesToBeCreated[ 'OK' ]:
return tablesToBeCreated
tablesToBeCreated = tablesToBeCreated[ 'Value' ]
if tablesToBeCreated:
gLogger.verbose( "Creating tables %s" % ( ', '.join( tablesToBeCreated.keys() ) ) )
result = self._createTables( tablesToBeCreated )
if result['OK'] and result['Value']:
self.log.info( "TransformationDB: created tables %s" % result['Value'] )
if not result['OK']:
return result
#Get the available counters
retVal = self._query( "EXPLAIN TransformationCounters" )
if not retVal[ 'OK' ]:
return retVal
TSCounterFields = [ t[0] for t in retVal[ 'Value' ] ]
for status in list( set( self.tasksStatuses + self.fileStatuses ) - set( TSCounterFields ) ):
altertable = "ALTER TABLE TransformationCounters ADD COLUMN `%s` INTEGER DEFAULT 0" % status
retVal = self._update( altertable )
if not retVal['OK']:
return retVal
return S_OK()
# This is here to ensure full compatibility between different versions of the MySQL DB schema
self.isTransformationTasksInnoDB = True
res = self._query( "SELECT Engine FROM INFORMATION_SCHEMA.TABLES WHERE table_name = 'TransformationTasks'" )
if not res['OK']:
raise RuntimeError, res['Message']
else:
engine = res['Value'][0][0]
if engine.lower() != 'innodb':
self.isTransformationTasksInnoDB = False
def getName( self ):
""" Get the database name
"""
return self.dbName
###########################################################################
#
# These methods manipulate the Transformations table
#
def addTransformation( self, transName, description, longDescription, authorDN, authorGroup, transType,
plugin, agentType, fileMask,
transformationGroup = 'General',
groupSize = 1,
inheritedFrom = 0,
body = '',
maxTasks = 0,
eventsPerTask = 0,
addFiles = True,
connection = False ):
''' Add new transformation definition including its input streams
'''
connection = self.__getConnection( connection )
res = self._getTransformationID( transName, connection = connection )
if res['OK']:
return S_ERROR( "Transformation with name %s already exists with TransformationID = %d" % ( transName,
res['Value'] ) )
elif res['Message'] != "Transformation does not exist":
return res
self.lock.acquire()
res = self._escapeString( body )
if not res['OK']:
return S_ERROR( "Failed to parse the transformation body" )
body = res['Value']
req = "INSERT INTO Transformations (TransformationName,Description,LongDescription, \
CreationDate,LastUpdate,AuthorDN,AuthorGroup,Type,Plugin,AgentType,\
FileMask,Status,TransformationGroup,GroupSize,\
InheritedFrom,Body,MaxNumberOfTasks,EventsPerTask)\
VALUES ('%s','%s','%s',\
UTC_TIMESTAMP(),UTC_TIMESTAMP(),'%s','%s','%s','%s','%s',\
'%s','New','%s',%d,\
%d,%s,%d,%d);" % \
( transName, description, longDescription,
authorDN, authorGroup, transType, plugin, agentType,
fileMask, transformationGroup, groupSize,
inheritedFrom, body, maxTasks, eventsPerTask )
res = self._update( req, connection )
if not res['OK']:
self.lock.release()
return res
transID = res['lastRowId']
self.lock.release()
# If the transformation has an input data specification
if fileMask:
self.filters.append( ( transID, re.compile( fileMask ) ) )
if inheritedFrom:
res = self._getTransformationID( inheritedFrom, connection = connection )
if not res['OK']:
gLogger.error( "Failed to get ID for parent transformation: %s, now deleting" % res['Message'] )
return self.deleteTransformation( transID, connection = connection )
originalID = res['Value']
# FIXME: this is not the right place to change status information, and in general the whole should not be here
res = self.setTransformationParameter( originalID, 'Status', 'Completing',
author = authorDN, connection = connection )
if not res['OK']:
gLogger.error( "Failed to update parent transformation status: %s, now deleting" % res['Message'] )
return self.deleteTransformation( transID, connection = connection )
message = 'Creation of the derived transformation (%d)' % transID
self.__updateTransformationLogging( originalID, message, authorDN, connection = connection )
res = self.getTransformationFiles( condDict = {'TransformationID':originalID}, connection = connection )
if not res['OK']:
gLogger.error( "Could not get transformation files: %s, now deleting" % res['Message'] )
return self.deleteTransformation( transID, connection = connection )
if res['Records']:
res = self.__insertExistingTransformationFiles( transID, res['Records'], connection = connection )
if not res['OK']:
gLogger.error( "Could not insert files: %s, now deleting" % res['Message'] )
return self.deleteTransformation( transID, connection = connection )
if addFiles and fileMask:
self.__addExistingFiles( transID, connection = connection )
message = "Created transformation %d" % transID
self.__updateTransformationLogging( transID, message, authorDN, connection = connection )
return S_OK( transID )
def getTransformations( self, condDict = {}, older = None, newer = None, timeStamp = 'LastUpdate',
orderAttribute = None, limit = None, extraParams = False, offset = None, connection = False ):
''' Get parameters of all the Transformations with support for the web standard structure '''
connection = self.__getConnection( connection )
req = "SELECT %s FROM Transformations %s" % ( intListToString( self.TRANSPARAMS ),
self.buildCondition( condDict, older, newer, timeStamp,
orderAttribute, limit, offset = offset ) )
res = self._query( req, connection )
if not res['OK']:
return res
webList = []
resultList = []
for row in res['Value']:
# Prepare the structure for the web
rList = []
transDict = {}
count = 0
for item in row:
transDict[self.TRANSPARAMS[count]] = item
count += 1
if type( item ) not in [IntType, LongType]:
rList.append( str( item ) )
else:
rList.append( item )
webList.append( rList )
if extraParams:
res = self.__getAdditionalParameters( transDict['TransformationID'], connection = connection )
if not res['OK']:
return res
transDict.update( res['Value'] )
resultList.append( transDict )
result = S_OK( resultList )
result['Records'] = webList
result['ParameterNames'] = copy.copy( self.TRANSPARAMS )
return result
def getTransformation( self, transName, extraParams = False, connection = False ):
'''Get Transformation definition and parameters of Transformation identified by TransformationID
'''
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.getTransformations( condDict = {'TransformationID':transID}, extraParams = extraParams,
connection = connection )
if not res['OK']:
return res
if not res['Value']:
return S_ERROR( "Transformation %s did not exist" % transName )
return S_OK( res['Value'][0] )
def getTransformationParameters( self, transName, parameters, connection = False ):
''' Get the requested parameters for a supplied transformation '''
if type( parameters ) in StringTypes:
parameters = [parameters]
extraParams = False
for param in parameters:
if not param in self.TRANSPARAMS:
extraParams = True
res = self.getTransformation( transName, extraParams = extraParams, connection = connection )
if not res['OK']:
return res
transParams = res['Value']
paramDict = {}
for reqParam in parameters:
if not reqParam in transParams.keys():
return S_ERROR( "Parameter %s not defined for transformation" % reqParam )
paramDict[reqParam] = transParams[reqParam]
if len( paramDict ) == 1:
return S_OK( paramDict[reqParam] )
return S_OK( paramDict )
def getTransformationWithStatus( self, status, connection = False ):
''' Gets a list of the transformations with the supplied status '''
req = "SELECT TransformationID FROM Transformations WHERE Status = '%s';" % status
res = self._query( req, conn = connection )
if not res['OK']:
return res
transIDs = []
for tupleIn in res['Value']:
transIDs.append( tupleIn[0] )
return S_OK( transIDs )
def getTableDistinctAttributeValues( self, table, attributes, selectDict, older = None, newer = None,
timeStamp = None, connection = False ):
tableFields = { 'Transformations' : self.TRANSPARAMS,
'TransformationTasks' : self.TASKSPARAMS,
'TransformationFiles' : self.TRANSFILEPARAMS}
possibleFields = tableFields.get( table, [] )
return self.__getTableDistinctAttributeValues( table, possibleFields, attributes, selectDict, older, newer,
timeStamp, connection = connection )
def __getTableDistinctAttributeValues( self, table, possible, attributes, selectDict, older, newer,
timeStamp, connection = False ):
connection = self.__getConnection( connection )
attributeValues = {}
for attribute in attributes:
if possible and ( not attribute in possible ):
return S_ERROR( 'Requested attribute (%s) does not exist in table %s' % ( attribute, table ) )
res = self.getDistinctAttributeValues( table, attribute, condDict = selectDict, older = older, newer = newer,
timeStamp = timeStamp, connection = connection )
if not res['OK']:
return S_ERROR( 'Failed to serve values for attribute %s in table %s' % ( attribute, table ) )
attributeValues[attribute] = res['Value']
return S_OK( attributeValues )
def __updateTransformationParameter( self, transID, paramName, paramValue, connection = False ):
if not ( paramName in self.mutable ):
return S_ERROR( "Can not update the '%s' transformation parameter" % paramName )
if paramName == 'Body':
res = self._escapeString( paramValue )
if not res['OK']:
return S_ERROR( "Failed to parse parameter value" )
paramValue = res['Value']
req = "UPDATE Transformations SET %s=%s, LastUpdate=UTC_TIMESTAMP() WHERE TransformationID=%d" % ( paramName,
paramValue,
transID )
return self._update( req, connection )
req = "UPDATE Transformations SET %s='%s', LastUpdate=UTC_TIMESTAMP() WHERE TransformationID=%d" % ( paramName,
paramValue,
transID )
return self._update( req, connection )
def _getTransformationID( self, transName, connection = False ):
''' Method returns ID of transformation with the name=<name> '''
try:
transName = long( transName )
cmd = "SELECT TransformationID from Transformations WHERE TransformationID=%d;" % transName
except:
if type( transName ) not in StringTypes:
return S_ERROR( "Transformation should ID or name" )
cmd = "SELECT TransformationID from Transformations WHERE TransformationName='%s';" % transName
res = self._query( cmd, connection )
if not res['OK']:
gLogger.error( "Failed to obtain transformation ID for transformation", "%s:%s" % ( transName, res['Message'] ) )
return res
elif not res['Value']:
gLogger.verbose( "Transformation %s does not exist" % ( transName ) )
return S_ERROR( "Transformation does not exist" )
return S_OK( res['Value'][0][0] )
def __deleteTransformation( self, transID, connection = False ):
req = "DELETE FROM Transformations WHERE TransformationID=%d;" % transID
return self._update( req, connection )
def __updateFilters( self, connection = False ):
''' Get filters for all defined input streams in all the transformations.
If transID argument is given, get filters only for this transformation.
'''
resultList = []
# Define the general filter first
self.database_name = self.__class__.__name__
value = Operations().getValue( 'InputDataFilter/%sFilter' % self.database_name, '' )
if value:
refilter = re.compile( value )
resultList.append( ( 0, refilter ) )
# Per transformation filters
req = "SELECT TransformationID,FileMask FROM Transformations;"
res = self._query( req, connection )
if not res['OK']:
return res
for transID, mask in res['Value']:
if mask:
refilter = re.compile( mask )
resultList.append( ( transID, refilter ) )
self.filters = resultList
return S_OK( resultList )
def __filterFile( self, lfn, filters = None ):
'''Pass the input file through a supplied filter or those currently active '''
result = []
if filters:
for transID, refilter in filters:
if refilter.search( lfn ):
result.append( transID )
else:
for transID, refilter in self.filters:
if refilter.search( lfn ):
result.append( transID )
return result
###########################################################################
#
# These methods manipulate the AdditionalParameters tables
#
def setTransformationParameter( self, transName, paramName, paramValue, author = '', connection = False ):
''' Add a parameter for the supplied transformations '''
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
message = ''
if paramName in self.TRANSPARAMS:
res = self.__updateTransformationParameter( transID, paramName, paramValue, connection = connection )
if res['OK']:
self._escapeString( paramValue )
if not res['OK']:
return S_ERROR( "Failed to parse parameter value" )
paramValue = res['Value']
message = '%s updated to %s' % ( paramName, paramValue )
else:
res = self.__addAdditionalTransformationParameter( transID, paramName, paramValue, connection = connection )
if res['OK']:
message = 'Added additional parameter %s' % paramName
if message:
self.__updateTransformationLogging( transID, message, author, connection = connection )
return res
def getAdditionalParameters( self, transName, connection = False ):
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
return self.__getAdditionalParameters( transID, connection = connection )
def deleteTransformationParameter( self, transName, paramName, author = '', connection = False ):
''' Delete a parameter from the additional parameters table '''
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
if paramName in self.TRANSPARAMS:
return S_ERROR( "Can not delete core transformation parameter" )
res = self.__deleteTransformationParameters( transID, parameters = [paramName], connection = connection )
if not res['OK']:
return res
self.__updateTransformationLogging( transID, 'Removed additional parameter %s' % paramName, author,
connection = connection )
return res
def __addAdditionalTransformationParameter( self, transID, paramName, paramValue, connection = False ):
req = "DELETE FROM AdditionalParameters WHERE TransformationID=%d AND ParameterName='%s'" % ( transID, paramName )
res = self._update( req, connection )
if not res['OK']:
return res
res = self._escapeString( paramValue )
if not res['OK']:
return S_ERROR( "Failed to parse parameter value" )
paramValue = res['Value']
paramType = 'StringType'
if type( paramValue ) in [IntType, LongType]:
paramType = 'IntType'
req = "INSERT INTO AdditionalParameters (%s) VALUES (%s,'%s',%s,'%s');" % ( ', '.join( self.ADDITIONALPARAMETERS ),
transID, paramName,
paramValue, paramType )
return self._update( req, connection )
def __getAdditionalParameters( self, transID, connection = False ):
req = "SELECT %s FROM AdditionalParameters WHERE TransformationID = %d" % ( ', '.join( self.ADDITIONALPARAMETERS ),
transID )
res = self._query( req, connection )
if not res['OK']:
return res
paramDict = {}
for transID, parameterName, parameterValue, parameterType in res['Value']:
parameterType = eval( parameterType )
if parameterType in [IntType, LongType]:
parameterValue = int( parameterValue )
paramDict[parameterName] = parameterValue
return S_OK( paramDict )
def __deleteTransformationParameters( self, transID, parameters = [], connection = False ):
''' Remove the parameters associated to a transformation '''
req = "DELETE FROM AdditionalParameters WHERE TransformationID=%d" % transID
if parameters:
req = "%s AND ParameterName IN (%s);" % ( req, stringListToString( parameters ) )
return self._update( req, connection )
###########################################################################
#
# These methods manipulate the TransformationFiles table
#
def addFilesToTransformation( self, transName, lfns, connection = False ):
''' Add a list of LFNs to the transformation directly '''
if not lfns:
return S_ERROR( 'Zero length LFN list' )
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.__getFileIDsForLfns( lfns, connection = connection )
if not res['OK']:
return res
fileIDs, _lfnFilesIDs = res['Value']
failed = {}
successful = {}
missing = []
fileIDsValues = set( fileIDs.values() )
for lfn in lfns:
if lfn not in fileIDsValues:
missing.append( lfn )
if missing:
res = self.__addDataFiles( missing, connection = connection )
if not res['OK']:
return res
for lfn, fileID in res['Value'].items():
fileIDs[fileID] = lfn
# must update the fileIDs
if fileIDs:
res = self.__addFilesToTransformation( transID, fileIDs.keys(), connection = connection )
if not res['OK']:
return res
for fileID in fileIDs.keys():
lfn = fileIDs[fileID]
successful[lfn] = "Present"
if fileID in res['Value']:
successful[lfn] = "Added"
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
def getTransformationFiles( self, condDict = {}, older = None, newer = None, timeStamp = 'LastUpdate',
orderAttribute = None, limit = None, offset = None, connection = False ):
''' Get files for the supplied transformations with support for the web standard structure '''
connection = self.__getConnection( connection )
req = "SELECT %s FROM TransformationFiles" % ( intListToString( self.TRANSFILEPARAMS ) )
originalFileIDs = {}
if condDict or older or newer:
if condDict.has_key( 'LFN' ):
lfns = condDict.pop( 'LFN' )
if type( lfns ) in StringTypes:
lfns = [lfns]
res = self.__getFileIDsForLfns( lfns, connection = connection )
if not res['OK']:
return res
originalFileIDs, _ignore = res['Value']
condDict['FileID'] = originalFileIDs.keys()
for val in condDict.itervalues():
if not val:
return S_OK( [] )
req = "%s %s" % ( req, self.buildCondition( condDict, older, newer, timeStamp, orderAttribute, limit,
offset = offset ) )
res = self._query( req, connection )
if not res['OK']:
return res
transFiles = res['Value']
fileIDs = [int( row[1] ) for row in transFiles]
webList = []
resultList = []
if not fileIDs:
originalFileIDs = {}
else:
if not originalFileIDs:
res = self.__getLfnsForFileIDs( fileIDs, connection = connection )
if not res['OK']:
return res
originalFileIDs = res['Value'][1]
for row in transFiles:
lfn = originalFileIDs[row[1]]
# Prepare the structure for the web
rList = [lfn]
fDict = {}
fDict['LFN'] = lfn
count = 0
for item in row:
fDict[self.TRANSFILEPARAMS[count]] = item
count += 1
if type( item ) not in [IntType, LongType]:
rList.append( str( item ) )
else:
rList.append( item )
webList.append( rList )
resultList.append( fDict )
result = S_OK( resultList )
# result['LFNs'] = originalFileIDs.values()
result['Records'] = webList
result['ParameterNames'] = ['LFN'] + self.TRANSFILEPARAMS
return result
def getFileSummary( self, lfns, connection = False ):
''' Get file status summary in all the transformations '''
connection = self.__getConnection( connection )
condDict = {'LFN':lfns}
res = self.getTransformationFiles( condDict = condDict, connection = connection )
if not res['OK']:
return res
resDict = {}
for fileDict in res['Value']:
lfn = fileDict['LFN']
transID = fileDict['TransformationID']
if not resDict.has_key( lfn ):
resDict[lfn] = {}
if not resDict[lfn].has_key( transID ):
resDict[lfn][transID] = {}
resDict[lfn][transID] = fileDict
failedDict = {}
for lfn in lfns:
if not resDict.has_key( lfn ):
failedDict[lfn] = 'Did not exist in the Transformation database'
return S_OK( {'Successful':resDict, 'Failed':failedDict} )
def setFileStatusForTransformation( self, transID, fileStatusDict = {}, connection = False ):
""" Set file status for the given transformation, based on
fileStatusDict {fileID_A: 'statusA', fileID_B: 'statusB', ...}
The ErrorCount is incremented automatically here
"""
if not fileStatusDict:
return S_OK()
# Building the request with "ON DUPLICATE KEY UPDATE"
req = "INSERT INTO TransformationFiles (TransformationID, FileID, Status, ErrorCount, LastUpdate) VALUES "
updatesList = []
for fileID, status in fileStatusDict.items():
updatesList.append( "(%d, %d, '%s', 0, UTC_TIMESTAMP())" % ( transID, fileID, status ) )
req += ','.join( updatesList )
req += " ON DUPLICATE KEY UPDATE Status=VALUES(Status),ErrorCount=ErrorCount+1,LastUpdate=VALUES(LastUpdate)"
return self._update( req, connection )
def getTransformationStats( self, transName, connection = False ):
''' Get number of files in Transformation Table for each status '''
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.getCounters( 'TransformationFiles', ['TransformationID', 'Status'], {'TransformationID':transID} )
if not res['OK']:
return res
statusDict = {}
total = 0
for attrDict, count in res['Value']:
status = attrDict['Status']
if not re.search( '-', status ):
statusDict[status] = count
total += count
statusDict['Total'] = total
return S_OK( statusDict )
def getTransformationFilesCount( self, transName, field, selection = {}, connection = False ):
''' Get the number of files in the TransformationFiles table grouped by the supplied field '''
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
selection['TransformationID'] = transID
if field not in self.TRANSFILEPARAMS:
return S_ERROR( "Supplied field not in TransformationFiles table" )
res = self.getCounters( 'TransformationFiles', ['TransformationID', field], selection )
if not res['OK']:
return res
countDict = {}
total = 0
for attrDict, count in res['Value']:
countDict[attrDict[field]] = count
total += count
countDict['Total'] = total
return S_OK( countDict )
def __addFilesToTransformation( self, transID, fileIDs, connection = False ):
req = "SELECT FileID from TransformationFiles"
req = req + " WHERE TransformationID = %d AND FileID IN (%s);" % ( transID, intListToString( fileIDs ) )
res = self._query( req, connection )
if not res['OK']:
return res
for tupleIn in res['Value']:
fileIDs.remove( tupleIn[0] )
if not fileIDs:
return S_OK( [] )
req = "INSERT INTO TransformationFiles (TransformationID,FileID,LastUpdate,InsertedTime) VALUES"
for fileID in fileIDs:
req = "%s (%d,%d,UTC_TIMESTAMP(),UTC_TIMESTAMP())," % ( req, transID, fileID )
req = req.rstrip( ',' )
res = self._update( req, connection )
if not res['OK']:
return res
return S_OK( fileIDs )
def __addExistingFiles( self, transID, connection = False ):
''' Add files that already exist in the DataFiles table to the transformation specified by the transID
'''
for tID, _filter in self.filters:
if tID == transID:
filters = [( tID, filter )]
break
if not filters:
return S_ERROR( 'No filters defined for transformation %d' % transID )
res = self.__getAllFileIDs( connection = connection )
if not res['OK']:
return res
fileIDs, _lfnFilesIDs = res['Value']
passFilter = []
for fileID, lfn in fileIDs.items():
if self.__filterFile( lfn, filters ):
passFilter.append( fileID )
return self.__addFilesToTransformation( transID, passFilter, connection = connection )
def __insertExistingTransformationFiles( self, transID, fileTuplesList, connection = False ):
""" Inserting already transformation files in TransformationFiles table (e.g. for deriving transformations)
"""
gLogger.info( "Inserting %d files in TransformationFiles" % len( fileTuplesList ) )
# splitting in various chunks, in case it is too big
for fileTuples in breakListIntoChunks( fileTuplesList, 10000 ):
gLogger.verbose( "Adding first %d files in TransformationFiles (out of %d)" % ( len( fileTuples ),
len( fileTuplesList ) ) )
req = "INSERT INTO TransformationFiles (TransformationID,Status,TaskID,FileID,TargetSE,UsedSE,LastUpdate) VALUES"
candidates = False
for ft in fileTuples:
_lfn, originalID, fileID, status, taskID, targetSE, usedSE, _errorCount, _lastUpdate, _insertTime = ft[:10]
if status not in ( 'Unused', 'Removed' ):
candidates = True
if not re.search( '-', status ):
status = "%s-inherited" % status
if taskID:
taskID = str( int( originalID ) ).zfill( 8 ) + '_' + str( int( taskID ) ).zfill( 8 )
req = "%s (%d,'%s','%s',%d,'%s','%s',UTC_TIMESTAMP())," % ( req, transID, status, taskID,
fileID, targetSE, usedSE )
if not candidates:
continue
req = req.rstrip( "," )
res = self._update( req, connection )
if not res['OK']:
return res
return S_OK()
def __assignTransformationFile( self, transID, taskID, se, fileIDs, connection = False ):
''' Make necessary updates to the TransformationFiles table for the newly created task
'''
req = "UPDATE TransformationFiles SET TaskID='%d',UsedSE='%s',Status='Assigned',LastUpdate=UTC_TIMESTAMP()"
req = ( req + " WHERE TransformationID = %d AND FileID IN (%s);" ) % ( taskID, se, transID, intListToString( fileIDs ) )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "Failed to assign file to task", res['Message'] )
fileTuples = []
for fileID in fileIDs:
fileTuples.append( ( "(%d,%d,%d)" % ( transID, fileID, taskID ) ) )
req = "INSERT INTO TransformationFileTasks (TransformationID,FileID,TaskID) VALUES %s" % ','.join( fileTuples )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "Failed to assign file to task", res['Message'] )
return res
def __setTransformationFileStatus( self, fileIDs, status, connection = False ):
req = "UPDATE TransformationFiles SET Status = '%s' WHERE FileID IN (%s);" % ( status, intListToString( fileIDs ) )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "Failed to update file status", res['Message'] )
return res
def __setTransformationFileUsedSE( self, fileIDs, usedSE, connection = False ):
req = "UPDATE TransformationFiles SET UsedSE = '%s' WHERE FileID IN (%s);" % ( usedSE, intListToString( fileIDs ) )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "Failed to update file usedSE", res['Message'] )
return res
def __resetTransformationFile( self, transID, taskID, connection = False ):
req = "UPDATE TransformationFiles SET TaskID=NULL, UsedSE='Unknown', Status='Unused'\
WHERE TransformationID = %d AND TaskID=%d;" % ( transID, taskID )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "Failed to reset transformation file", res['Message'] )
return res
def __deleteTransformationFiles( self, transID, connection = False ):
''' Remove the files associated to a transformation '''
req = "DELETE FROM TransformationFiles WHERE TransformationID = %d;" % transID
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "Failed to delete transformation files", res['Message'] )
return res
###########################################################################
#
# These methods manipulate the TransformationFileTasks table
#
def __deleteTransformationFileTask( self, transID, taskID, connection = False ):
''' Delete the file associated to a given task of a given transformation
from the TransformationFileTasks table for transformation with TransformationID and TaskID
'''
req = "DELETE FROM TransformationFileTasks WHERE TransformationID=%d AND TaskID=%d" % ( transID, taskID )
return self._update( req, connection )
def __deleteTransformationFileTasks( self, transID, connection = False ):
''' Remove all associations between files, tasks and a transformation '''
req = "DELETE FROM TransformationFileTasks WHERE TransformationID = %d;" % transID
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "Failed to delete transformation files/task history", res['Message'] )
return res
###########################################################################
#
# These methods manipulate the TransformationTasks table
#
def getTransformationTasks( self, condDict = {}, older = None, newer = None, timeStamp = 'CreationTime',
orderAttribute = None, limit = None, inputVector = False,
offset = None, connection = False ):
connection = self.__getConnection( connection )
req = "SELECT %s FROM TransformationTasks %s" % ( intListToString( self.TASKSPARAMS ),
self.buildCondition( condDict, older, newer, timeStamp,
orderAttribute, limit, offset = offset ) )
res = self._query( req, connection )
if not res['OK']:
return res
webList = []
resultList = []
for row in res['Value']:
# Prepare the structure for the web
rList = []
taskDict = {}
count = 0
for item in row:
taskDict[self.TASKSPARAMS[count]] = item
count += 1
if type( item ) not in [IntType, LongType]:
rList.append( str( item ) )
else:
rList.append( item )
webList.append( rList )
if inputVector:
taskDict['InputVector'] = ''
taskID = taskDict['TaskID']
transID = taskDict['TransformationID']
res = self.getTaskInputVector( transID, taskID )
if res['OK']:
if res['Value'].has_key( taskID ):
taskDict['InputVector'] = res['Value'][taskID]
resultList.append( taskDict )
result = S_OK( resultList )
result['Records'] = webList
result['ParameterNames'] = self.TASKSPARAMS
return result
def getTasksForSubmission( self, transName, numTasks = 1, site = '', statusList = ['Created'],
older = None, newer = None, connection = False ):
''' Select tasks with the given status (and site) for submission '''
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
condDict = {"TransformationID":transID}
if statusList:
condDict["ExternalStatus"] = statusList
if site:
numTasks = 0
res = self.getTransformationTasks( condDict = condDict, older = older, newer = newer,
timeStamp = 'CreationTime', orderAttribute = None, limit = numTasks,
inputVector = True, connection = connection )
if not res['OK']:
return res
tasks = res['Value']
# Now prepare the tasks
resultDict = {}
for taskDict in tasks:
if len( resultDict ) >= numTasks:
break
taskDict['Status'] = taskDict.pop( 'ExternalStatus' )
taskDict['InputData'] = taskDict.pop( 'InputVector' )
taskDict.pop( 'LastUpdateTime' )
taskDict.pop( 'CreationTime' )
taskDict.pop( 'ExternalID' )
taskID = taskDict['TaskID']
resultDict[taskID] = taskDict
if site:
resultDict[taskID]['Site'] = site
return S_OK( resultDict )
def deleteTasks( self, transName, taskIDbottom, taskIDtop, author = '', connection = False ):
''' Delete tasks with taskID range in transformation '''
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
for taskID in range( taskIDbottom, taskIDtop + 1 ):
res = self.__removeTransformationTask( transID, taskID, connection = connection )
if not res['OK']:
return res
message = "Deleted tasks from %d to %d" % ( taskIDbottom, taskIDtop )
self.__updateTransformationLogging( transID, message, author, connection = connection )
return res
def reserveTask( self, transName, taskID, connection = False ):
''' Reserve the taskID from transformation for submission '''
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.__checkUpdate( "TransformationTasks", "ExternalStatus", "Reserved", {"TransformationID":transID,
"TaskID":taskID},
connection = connection )
if not res['OK']:
return res
if not res['Value']:
return S_ERROR( 'Failed to set Reserved status for job %d - already Reserved' % int( taskID ) )
# The job is reserved, update the time stamp
res = self.setTaskStatus( transID, taskID, 'Reserved', connection = connection )
if not res['OK']:
return S_ERROR( 'Failed to set Reserved status for job %d - failed to update the time stamp' % int( taskID ) )
return S_OK()
def setTaskStatusAndWmsID( self, transName, taskID, status, taskWmsID, connection = False ):
''' Set status and ExternalID for job with taskID in production with transformationID
'''
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.__setTaskParameterValue( transID, taskID, 'ExternalStatus', status, connection = connection )
if not res['OK']:
return res
return self.__setTaskParameterValue( transID, taskID, 'ExternalID', taskWmsID, connection = connection )
def setTaskStatus( self, transName, taskID, status, connection = False ):
''' Set status for job with taskID in production with transformationID '''
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
if type( taskID ) != ListType:
taskIDList = [taskID]
else:
taskIDList = list( taskID )
for taskID in taskIDList:
res = self.__setTaskParameterValue( transID, taskID, 'ExternalStatus', status, connection = connection )
if not res['OK']:
return res
return S_OK()
def getTransformationTaskStats( self, transName = '', connection = False ):
''' Returns dictionary with number of jobs per status for the given production.
'''
connection = self.__getConnection( connection )
if transName:
res = self._getTransformationID( transName, connection = connection )
if not res['OK']:
gLogger.error( "Failed to get ID for transformation", res['Message'] )
return res
res = self.getCounters( 'TransformationTasks', ['ExternalStatus'], {'TransformationID':res['Value']},
connection = connection )
else:
res = self.getCounters( 'TransformationTasks', ['ExternalStatus', 'TransformationID'], {},
connection = connection )
if not res['OK']:
return res
statusDict = {}
total = 0
for attrDict, count in res['Value']:
status = attrDict['ExternalStatus']
statusDict[status] = count
total += count
statusDict['TotalCreated'] = total
return S_OK( statusDict )
def __setTaskParameterValue( self, transID, taskID, paramName, paramValue, connection = False ):
req = "UPDATE TransformationTasks SET %s='%s', LastUpdateTime=UTC_TIMESTAMP()" % ( paramName, paramValue )
req = req + " WHERE TransformationID=%d AND TaskID=%d;" % ( transID, taskID )
return self._update( req, connection )
def __deleteTransformationTasks( self, transID, connection = False ):
''' Delete all the tasks from the TransformationTasks table for transformation with TransformationID
'''
req = "DELETE FROM TransformationTasks WHERE TransformationID=%d" % transID
return self._update( req, connection )
def __deleteTransformationTask( self, transID, taskID, connection = False ):
''' Delete the task from the TransformationTasks table for transformation with TransformationID
'''
req = "DELETE FROM TransformationTasks WHERE TransformationID=%d AND TaskID=%d" % ( transID, taskID )
return self._update( req, connection )
####################################################################
#
# These methods manipulate the TransformationInputDataQuery table
#
def createTransformationInputDataQuery( self, transName, queryDict, author = '', connection = False ):
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
return self.__addInputDataQuery( transID, queryDict, author = author, connection = connection )
def __addInputDataQuery( self, transID, queryDict, author = '', connection = False ):
res = self.getTransformationInputDataQuery( transID, connection = connection )
if res['OK']:
return S_ERROR( "Input data query already exists for transformation" )
if res['Message'] != 'No InputDataQuery found for transformation':
return res
for parameterName in sorted( queryDict.keys() ):
parameterValue = queryDict[parameterName]
if not parameterValue:
continue
parameterType = 'String'
if type( parameterValue ) in [ListType, TupleType]:
if type( parameterValue[0] ) in [IntType, LongType]:
parameterType = 'Integer'
parameterValue = [str( x ) for x in parameterValue]
parameterValue = ';;;'.join( parameterValue )
else:
if type( parameterValue ) in [IntType, LongType]:
parameterType = 'Integer'
parameterValue = str( parameterValue )
if type( parameterValue ) == DictType:
parameterType = 'Dict'
parameterValue = str( parameterValue )
res = self.insertFields( 'TransformationInputDataQuery', ['TransformationID', 'ParameterName',
'ParameterValue', 'ParameterType'],
[transID, parameterName, parameterValue, parameterType], conn = connection )
if not res['OK']:
message = 'Failed to add input data query'
self.deleteTransformationInputDataQuery( transID, connection = connection )
break
else:
message = 'Added input data query'
self.__updateTransformationLogging( transID, message, author, connection = connection )
return res
def deleteTransformationInputDataQuery( self, transName, author = '', connection = False ):
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
req = "DELETE FROM TransformationInputDataQuery WHERE TransformationID=%d;" % transID
res = self._update( req, connection )
if not res['OK']:
return res
if res['Value']:
# Add information to the transformation logging
message = 'Deleted input data query'
self.__updateTransformationLogging( transID, message, author, connection = connection )
return res
def getTransformationInputDataQuery( self, transName, connection = False ):
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
req = "SELECT ParameterName,ParameterValue,ParameterType FROM TransformationInputDataQuery"
req = req + " WHERE TransformationID=%d;" % transID
res = self._query( req, connection )
if not res['OK']:
return res
queryDict = {}
for parameterName, parameterValue, parameterType in res['Value']:
if re.search( ';;;', str( parameterValue ) ):
parameterValue = parameterValue.split( ';;;' )
if parameterType == 'Integer':
parameterValue = [int( x ) for x in parameterValue]
elif parameterType == 'Integer':
parameterValue = int( parameterValue )
elif parameterType == 'Dict':
parameterValue = eval( parameterValue )
queryDict[parameterName] = parameterValue
if not queryDict:
return S_ERROR( "No InputDataQuery found for transformation" )
return S_OK( queryDict )
###########################################################################
#
# These methods manipulate the TaskInputs table
#
def getTaskInputVector( self, transName, taskID, connection = False ):
''' Get input vector for the given task '''
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
if type( taskID ) != ListType:
taskIDList = [taskID]
else:
taskIDList = list( taskID )
taskString = ','.join( ["'" + str( x ) + "'" for x in taskIDList] )
req = "SELECT TaskID,InputVector FROM TaskInputs WHERE TaskID in (%s) AND TransformationID='%d';" % ( taskString,
transID )
res = self._query( req )
inputVectorDict = {}
if res['OK'] and res['Value']:
for row in res['Value']:
inputVectorDict[row[0]] = row[1]
return S_OK( inputVectorDict )
def __insertTaskInputs( self, transID, taskID, lfns, connection = False ):
vector = str.join( ';', lfns )
fields = ['TransformationID', 'TaskID', 'InputVector']
values = [transID, taskID, vector]
res = self.insertFields( 'TaskInputs', fields, values, connection )
if not res['OK']:
gLogger.error( "Failed to add input vector to task %d" % taskID )
return res
def __deleteTransformationTaskInputs( self, transID, taskID = 0, connection = False ):
''' Delete all the tasks inputs from the TaskInputs table for transformation with TransformationID
'''
req = "DELETE FROM TaskInputs WHERE TransformationID=%d" % transID
if taskID:
req = "%s AND TaskID=%d" % ( req, int( taskID ) )
return self._update( req, connection )
###########################################################################
#
# These methods manipulate the TransformationLog table
#
def __updateTransformationLogging( self, transName, message, authorDN, connection = False ):
''' Update the Transformation log table with any modifications
'''
if not authorDN:
res = getProxyInfo( False, False )
if res['OK']:
authorDN = res['Value']['subject']
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
req = "INSERT INTO TransformationLog (TransformationID,Message,Author,MessageDate)"
req = req + " VALUES (%s,'%s','%s',UTC_TIMESTAMP());" % ( transID, message, authorDN )
return self._update( req, connection )
def getTransformationLogging( self, transName, connection = False ):
''' Get logging info from the TransformationLog table
'''
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
req = "SELECT TransformationID, Message, Author, MessageDate FROM TransformationLog"
req = req + " WHERE TransformationID=%s ORDER BY MessageDate;" % ( transID )
res = self._query( req )
if not res['OK']:
return res
transList = []
for transID, message, authorDN, messageDate in res['Value']:
transDict = {}
transDict['TransformationID'] = transID
transDict['Message'] = message
transDict['AuthorDN'] = authorDN
transDict['MessageDate'] = messageDate
transList.append( transDict )
return S_OK( transList )
def __deleteTransformationLog( self, transID, connection = False ):
''' Remove the entries in the transformation log for a transformation
'''
req = "DELETE FROM TransformationLog WHERE TransformationID=%d;" % transID
return self._update( req, connection )
###########################################################################
#
# These methods manipulate the DataFiles table
#
def __getAllFileIDs( self, connection = False ):
''' Get all the fileIDs for the supplied list of lfns
'''
req = "SELECT LFN,FileID FROM DataFiles;"
res = self._query( req, connection )
if not res['OK']:
return res
fids = {}
lfns = {}
for lfn, fileID in res['Value']:
fids[fileID] = lfn
lfns[lfn] = fileID
return S_OK( ( fids, lfns ) )
def __getFileIDsForLfns( self, lfns, connection = False ):
""" Get file IDs for the given list of lfns
warning: if the file is not present, we'll see no errors
"""
req = "SELECT LFN,FileID FROM DataFiles WHERE LFN in (%s);" % ( stringListToString( lfns ) )
res = self._query( req, connection )
if not res['OK']:
return res
fids = {}
lfns = {}
for lfn, fileID in res['Value']:
fids[fileID] = lfn
lfns[lfn] = fileID
return S_OK( ( fids, lfns ) )
def __getLfnsForFileIDs( self, fileIDs, connection = False ):
''' Get lfns for the given list of fileIDs
'''
req = "SELECT LFN,FileID FROM DataFiles WHERE FileID in (%s);" % stringListToString( fileIDs )
res = self._query( req, connection )
if not res['OK']:
return res
fids = {}
lfns = {}
for lfn, fileID in res['Value']:
fids[lfn] = fileID
lfns[fileID] = lfn
return S_OK( ( fids, lfns ) )
def __addDataFiles( self, lfns, connection = False ):
''' Add a file to the DataFiles table and retrieve the FileIDs
'''
res = self.__getFileIDsForLfns( lfns, connection = connection )
if not res['OK']:
return res
_fileIDs, lfnFileIDs = res['Value']
for lfn in lfns:
if not lfn in lfnFileIDs.keys():
req = "INSERT INTO DataFiles (LFN,Status) VALUES ('%s','New');" % lfn
res = self._update( req, connection )
if not res['OK']:
return res
lfnFileIDs[lfn] = res['lastRowId']
return S_OK( lfnFileIDs )
def __setDataFileStatus( self, fileIDs, status, connection = False ):
''' Set the status of the supplied files
'''
req = "UPDATE DataFiles SET Status = '%s' WHERE FileID IN (%s);" % ( status, intListToString( fileIDs ) )
return self._update( req, connection )
###########################################################################
#
# Fill in / get the counters
#
def updateTransformationCounters( self, counterDict, connection = False ):
''' Insert in the table or update the transformation counters given a dict
'''
##first, check that all the keys in this dict are among those expected
for key in self.tasksStatuses + self.fileStatuses:
if key not in counterDict.keys():
return S_ERROR( "Key %s not in the table" % key )
res = self.getFields( "TransformationCounters", ['TransformationID'],
condDict = {'TransformationID' : counterDict['TransformationID']}, conn = connection )
if not res['OK']:
return res
if len( res['Value'] ): #if the Transformation is already in:
res = self.updateFields( "TransformationCounters",
condDict = {'TransformationID' : counterDict['TransformationID']},
updateDict = counterDict,
conn = connection )
if not res['OK']:
return res
else:
res = self.insertFields( "TransformationCounters", inDict = counterDict, conn = connection )
if not res['OK']:
return res
return S_OK()
def getTransformationsCounters( self, TransIDs, connection = False ):
''' Get all the counters for the given transformationIDs
'''
fields = ['TransformationID'] + self.tasksStatuses + self.fileStatuses
res = self.getFields( "TransformationCounters",
outFields = fields, condDict = {'TransformationID' : TransIDs},
conn = connection )
if not res['OK']:
return res
resList = []
for row in res['Value']:
resList.append( dict( zip( fields, row ) ) )
return S_OK( resList )
###########################################################################
#
# These methods manipulate multiple tables
#
def addTaskForTransformation( self, transID, lfns = [], se = 'Unknown', connection = False ):
''' Create a new task with the supplied files for a transformation.
'''
res = self._getConnectionTransID( connection, transID )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
# Be sure the all the supplied LFNs are known to the database for the supplied transformation
fileIDs = []
if lfns:
res = self.getTransformationFiles( condDict = {'TransformationID':transID, 'LFN':lfns}, connection = connection )
if not res['OK']:
return res
foundLfns = set()
for fileDict in res['Value']:
fileIDs.append( fileDict['FileID'] )
lfn = fileDict['LFN']
if fileDict['Status'] in self.allowedStatusForTasks:
foundLfns.add( lfn )
else:
gLogger.error( "Supplied file not in %s status but %s" % ( self.allowedStatusForTasks, fileDict['Status'] ), lfn )
unavailableLfns = set( lfns ) - foundLfns
if unavailableLfns:
gLogger.error( "Supplied files not found for transformation", sorted( unavailableLfns ) )
return S_ERROR( "Not all supplied files available in the transformation database" )
# Insert the task into the jobs table and retrieve the taskID
self.lock.acquire()
req = "INSERT INTO TransformationTasks(TransformationID, ExternalStatus, ExternalID, TargetSE,"
req = req + " CreationTime, LastUpdateTime)"
req = req + " VALUES (%s,'%s','%d','%s', UTC_TIMESTAMP(), UTC_TIMESTAMP());" % ( transID, 'Created', 0, se )
res = self._update( req, connection )
if not res['OK']:
self.lock.release()
gLogger.error( "Failed to publish task for transformation", res['Message'] )
return res
# With InnoDB, TaskID is computed by a trigger, which sets the local variable @last (per connection)
# @last is the last insert TaskID. With multi-row inserts, will be the first new TaskID inserted.
# The trigger TaskID_Generator must be present with the InnoDB schema (defined in TransformationDB.sql)
if self.isTransformationTasksInnoDB:
res = self._query( "SELECT @last;", connection )
else:
res = self._query( "SELECT LAST_INSERT_ID();", connection )
self.lock.release()
if not res['OK']:
return res
taskID = int( res['Value'][0][0] )
gLogger.verbose( "Published task %d for transformation %d." % ( taskID, transID ) )
# If we have input data then update their status, and taskID in the transformation table
if lfns:
res = self.__insertTaskInputs( transID, taskID, lfns, connection = connection )
if not res['OK']:
self.__removeTransformationTask( transID, taskID, connection = connection )
return res
res = self.__assignTransformationFile( transID, taskID, se, fileIDs, connection = connection )
if not res['OK']:
self.__removeTransformationTask( transID, taskID, connection = connection )
return res
return S_OK( taskID )
def extendTransformation( self, transName, nTasks, author = '', connection = False ):
''' Extend SIMULATION type transformation by nTasks number of tasks
'''
connection = self.__getConnection( connection )
res = self.getTransformation( transName, connection = connection )
if not res['OK']:
gLogger.error( "Failed to get transformation details", res['Message'] )
return res
transType = res['Value']['Type']
transID = res['Value']['TransformationID']
extendableProds = Operations().getValue( 'Transformations/ExtendableTransfTypes', ['Simulation', 'MCSimulation'] )
if transType.lower() not in [ep.lower() for ep in extendableProds]:
return S_ERROR( 'Can not extend non-SIMULATION type production' )
taskIDs = []
for _task in range( nTasks ):
res = self.addTaskForTransformation( transID, connection = connection )
if not res['OK']:
return res
taskIDs.append( res['Value'] )
# Add information to the transformation logging
message = 'Transformation extended by %d tasks' % nTasks
self.__updateTransformationLogging( transName, message, author, connection = connection )
return S_OK( taskIDs )
def cleanTransformation( self, transName, author = '', connection = False ):
''' Clean the transformation specified by name or id '''
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.__deleteTransformationFileTasks( transID, connection = connection )
if not res['OK']:
return res
res = self.__deleteTransformationFiles( transID, connection = connection )
if not res['OK']:
return res
res = self.__deleteTransformationTaskInputs( transID, connection = connection )
if not res['OK']:
return res
res = self.__deleteTransformationTasks( transID, connection = connection )
if not res['OK']:
return res
self.__updateTransformationLogging( transID, "Transformation Cleaned", author, connection = connection )
return S_OK( transID )
def deleteTransformation( self, transName, author = '', connection = False ):
''' Remove the transformation specified by name or id '''
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.cleanTransformation( transID, author = author, connection = connection )
if not res['OK']:
return res
res = self.__deleteTransformationLog( transID, connection = connection )
if not res['OK']:
return res
res = self.__deleteTransformationParameters( transID, connection = connection )
if not res['OK']:
return res
res = self.__deleteTransformation( transID, connection = connection )
if not res['OK']:
return res
res = self.__updateFilters()
if not res['OK']:
return res
return S_OK()
def __removeTransformationTask( self, transID, taskID, connection = False ):
res = self.__deleteTransformationTaskInputs( transID, taskID, connection = connection )
if not res['OK']:
return res
res = self.__deleteTransformationFileTask( transID, taskID, connection = connection )
if not res['OK']:
return res
res = self.__resetTransformationFile( transID, taskID, connection = connection )
if not res['OK']:
return res
return self.__deleteTransformationTask( transID, taskID, connection = connection )
def __checkUpdate( self, table, param, paramValue, selectDict = {}, connection = False ):
''' Check whether the update will perform an update '''
req = "UPDATE %s SET %s = '%s'" % ( table, param, paramValue )
if selectDict:
req = "%s %s" % ( req, self.buildCondition( selectDict ) )
return self._update( req, connection )
def __getConnection( self, connection ):
if connection:
return connection
res = self._getConnection()
if res['OK']:
return res['Value']
gLogger.warn( "Failed to get MySQL connection", res['Message'] )
return connection
def _getConnectionTransID( self, connection, transName ):
connection = self.__getConnection( connection )
res = self._getTransformationID( transName, connection = connection )
if not res['OK']:
gLogger.error( "Failed to get ID for transformation", res['Message'] )
return res
transID = res['Value']
resDict = {'Connection':connection, 'TransformationID':transID}
return S_OK( resDict )
####################################################################################
#
# This part should correspond to the DIRAC Standard File Catalog interface
#
####################################################################################
def exists( self, lfns, connection = False ):
''' Check the presence of the lfn in the TransformationDB DataFiles table
'''
gLogger.info( "TransformationDB.exists: Attempting to determine existence of %s files." % len( lfns ) )
res = self.__getFileIDsForLfns( lfns, connection = connection )
if not res['OK']:
return res
fileIDs, _lfnFilesIDs = res['Value']
failed = {}
successful = {}
fileIDsValues = set( fileIDs.values() )
for lfn in lfns:
if not lfn in fileIDsValues:
successful[lfn] = False
else:
successful[lfn] = True
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
def addFile( self, fileDicts, force = False, connection = False ):
""" Add a new file to the TransformationDB together with its first replica.
In the input dict, the only mandatory info are PFN and SE
"""
gLogger.info( "TransformationDB.addFile: Attempting to add %s files." % len( fileDicts.keys() ) )
successful = {}
failed = {}
# Determine which files pass the filters and are to be added to transformations
transFiles = {}
filesToAdd = []
for lfn in fileDicts.keys():
fileTrans = self.__filterFile( lfn )
if not ( fileTrans or force ):
successful[lfn] = True
else:
filesToAdd.append( lfn )
for trans in fileTrans:
if not transFiles.has_key( trans ):
transFiles[trans] = []
transFiles[trans].append( lfn )
# Add the files to the DataFiles and Replicas tables
if filesToAdd:
connection = self.__getConnection( connection )
res = self.__addDataFiles( filesToAdd, connection = connection )
if not res['OK']:
return res
lfnFileIDs = res['Value']
for lfn in filesToAdd:
if lfnFileIDs.has_key( lfn ):
successful[lfn] = True
else:
failed[lfn] = True
# Add the files to the transformations
# TODO: THIS SHOULD BE TESTED WITH A TRANSFORMATION WITH A FILTER
for transID, lfns in transFiles.items():
fileIDs = []
for lfn in lfns:
if lfnFileIDs.has_key( lfn ):
fileIDs.append( lfnFileIDs[lfn] )
if fileIDs:
res = self.__addFilesToTransformation( transID, fileIDs, connection = connection )
if not res['OK']:
gLogger.error( "Failed to add files to transformation", "%s %s" % ( transID, res['Message'] ) )
failed[lfn] = True
successful[lfn] = False
else:
successful[lfn] = True
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
def removeFile( self, lfns, connection = False ):
''' Remove file specified by lfn from the ProcessingDB
'''
gLogger.info( "TransformationDB.removeFile: Attempting to remove %s files." % len( lfns ) )
failed = {}
successful = {}
connection = self.__getConnection( connection )
if not lfns:
return S_ERROR( "No LFNs supplied" )
res = self.__getFileIDsForLfns( lfns, connection = connection )
if not res['OK']:
return res
fileIDs, lfnFilesIDs = res['Value']
for lfn in lfns:
if not lfnFilesIDs.has_key( lfn ):
successful[lfn] = 'File did not exist'
if fileIDs:
res = self.__setTransformationFileStatus( fileIDs.keys(), 'Deleted', connection = connection )
if not res['OK']:
return res
res = self.__setDataFileStatus( fileIDs.keys(), 'Deleted', connection = connection )
if not res['OK']:
return S_ERROR( "TransformationDB.removeFile: Failed to remove files." )
for lfn in lfnFilesIDs.keys():
if not failed.has_key( lfn ):
successful[lfn] = True
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
def addDirectory( self, path, force = False ):
''' Adds all the files stored in a given directory in file catalog '''
gLogger.info( "TransformationDB.addDirectory: Attempting to populate %s." % path )
res = pythonCall( 30, self.__addDirectory, path, force )
if not res['OK']:
gLogger.error( "Failed to invoke addDirectory with shifter proxy" )
return res
return res['Value']
def __addDirectory( self, path, force ):
res = setupShifterProxyInEnv( "ProductionManager" )
if not res['OK']:
return S_OK( "Failed to setup shifter proxy" )
catalog = FileCatalog()
start = time.time()
res = catalog.listDirectory( path )
if not res['OK']:
gLogger.error( "TransformationDB.addDirectory: Failed to get files. %s" % res['Message'] )
return res
if not path in res['Value']['Successful']:
gLogger.error( "TransformationDB.addDirectory: Failed to get files." )
return res
gLogger.info( "TransformationDB.addDirectory: Obtained %s files in %s seconds." % ( path, time.time() - start ) )
successful = []
failed = []
for lfn in res['Value']['Successful'][path]["Files"].keys():
res = self.addFile( {lfn:{}}, force = force )
if not res['OK']:
failed.append( lfn )
continue
if not lfn in res['Value']['Successful']:
failed.append( lfn )
else:
successful.append( lfn )
return {"OK":True, "Value": len( res['Value']['Successful'] ), "Successful":successful, "Failed": failed }
|
sposs/DIRAC
|
TransformationSystem/DB/TransformationDB.py
|
Python
|
gpl-3.0
| 81,020
|
[
"DIRAC"
] |
dc46d728126b505f1bdfae41fbd5c81450c215fc01b3d74ffbee869f2c1e551e
|
# Copyright (C) 2009-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
import sys
import os
from optparse import OptionParser
def process (file_name):
"""
cette methode prend en entree le nom d'un fichier vtk cree par le
module HEXABLOCK de Salome, et remplace les "," par des "."
"""
wr_data = ""
with open(file_name, 'r') as f:
read_data = f.read()
wr_data = read_data.replace(',', '.')
pass
with open(file_name, 'w') as f:
f.write(wr_data)
pass
pass
if __name__ == '__main__':
usage = "usage: %prog file_name"
parser = OptionParser(usage=usage)
(options, args) = parser.parse_args()
if len(args) != 1:
print usage
sys.exit(1)
file_name = os.path.join(os.environ['TMP'], args[0])
print file_name
process(file_name)
sys.exit()
|
FedoraScientific/salome-hexablock
|
doc/pyplots/process_vtk.py
|
Python
|
lgpl-2.1
| 1,650
|
[
"VTK"
] |
e073f48c769a31bf52aa97b0ea7e234dc0b3ea8f9cd42051914c93ddfbc788ec
|
from __future__ import division
import logging
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
from brocclib.assign import Assigner
from brocclib.get_xml import NcbiEutils
from brocclib.taxonomy_db import NcbiLocal, TAXONOMY_DB_FP
from brocclib.parse import iter_fasta, read_blast
'''
Created on Aug 29, 2011
@author: Serena, Kyle
'''
CONSENSUS_THRESHOLDS = [
("species", 0.6),
("genus", 0.6),
("family", 0.6),
("order", 0.7),
("class", 0.8),
("phylum", 0.9),
("kingdom", 0.9),
("superkingdom", 0.9),
]
def parse_args(argv=None):
parser = optparse.OptionParser(description=(
"BROCC uses a consensus method determine taxonomic assignments from "
"BLAST hits."))
parser.add_option("--min_id", type="float", default=80.0, help=(
"minimum identity required for a db hit to be considered at any "
"level [default: %default]"))
parser.add_option("--min_cover", type="float", default=.7, help=(
"minimum coverage required for a db hit to be considered "
"[default: %default]"))
parser.add_option("--min_species_id", type="float", help=(
"minimum identity required for a db hit to be "
"considered at species level [default: %default]"))
parser.add_option("--min_genus_id", type="float", help=(
"minimum identity required for a db hit to be "
"considered at genus level [default: %default]"))
parser.add_option("--min_winning_votes", type="int", default=4, help=(
"minimum number of votes needed to establish a consensus "
"after removal of generic taxa [default: %default]"))
parser.add_option("--taxonomy_db", default=TAXONOMY_DB_FP, help=(
"location of sqlite3 database holding a local copy of the "
"NCBI taxonomy [default: %default]"))
parser.add_option("-v", "--verbose", action="store_true",
help="output message after every query sequence is classified")
parser.add_option("-i", "--input_fasta_file", dest="fasta_file",
help="input fasta file of query sequences [REQUIRED]")
parser.add_option("-b", "--input_blast_file", dest="blast_file",
help="input blast file [REQUIRED]")
parser.add_option("-o", "--output_directory",
help="output directory [REQUIRED]")
parser.add_option("-a", "--amplicon", help=(
"amplicon being classified, either 'ITS' or '18S'. If this option is "
"not supplied, both --min_species_id and --min_genus_id must be "
"specified"))
opts, args = parser.parse_args(argv)
if opts.amplicon == "ITS":
opts.min_genus_id = 83.05
opts.min_species_id = 95.2
elif opts.amplicon == "18S":
opts.min_genus_id = 96.0
opts.min_species_id = 99.0
elif opts.amplicon:
parser.error("Provided amplicon %s not recognized." % opts.amplicon)
else:
if not (opts.min_species_id and opts.min_genus_id):
parser.error("Must specify --amplicon, or provide both --min_species_id and --min_genus_id.")
return opts
def main(argv=None):
opts = parse_args(argv)
# Configure
if opts.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.WARNING)
if os.path.exists(opts.taxonomy_db):
taxa_db = NcbiLocal(opts.taxonomy_db)
else:
sys.stderr.write(
"Did not detect a local copy of the NCBI taxonomy.\n"
"Using NCBI EUtils to get taxonomic info instead.\n\n"
"The NCBI taxonomy can be dowloaded with the script "
"create_local_taxonomy_db.py\n"
"This will greatly speed up the assignment process.\n"
)
taxa_db = NcbiEutils()
consensus_thresholds = [t for _, t in CONSENSUS_THRESHOLDS]
assigner = Assigner(
opts.min_cover, opts.min_species_id, opts.min_genus_id, opts.min_id,
consensus_thresholds, opts.min_winning_votes, taxa_db)
# Read input files
with open(opts.fasta_file) as f:
sequences = list(iter_fasta(f))
with open(opts.blast_file) as f:
blast_hits = read_blast(f)
# Open output files
if not os.path.exists(opts.output_directory):
os.mkdir(opts.output_directory)
standard_taxa_file = open(
os.path.join(opts.output_directory, "Standard_Taxonomy.txt"), "w")
log_file = open(os.path.join(opts.output_directory, "brocc.log"), "w")
log_file.write(
"Sequence\tWinner_Votes\tVotes_Cast\tGenerics_Pruned\tLevel\t"
"Classification\n")
# Set up log for voting details
vote_logger = logging.getLogger("brocc.votes")
vote_logger.setLevel(logging.DEBUG)
vote_handler = logging.FileHandler(os.path.join(opts.output_directory, "voting_log.txt"))
vote_handler.setLevel(logging.DEBUG)
vote_formatter = logging.Formatter('%(message)s')
vote_handler.setFormatter(vote_formatter)
vote_logger.addHandler(vote_handler)
vote_logger.propagate = False
# Do the work
for name, seq in sequences:
seq_hits = blast_hits[name]
# This is where the magic happens
a = assigner.assign(name, seq, seq_hits)
standard_taxa_file.write(a.format_for_standard_taxonomy())
log_file.write(a.format_for_log())
# Close output files
standard_taxa_file.close()
log_file.close()
def run_comparison(argv=None):
p = optparse.OptionParser()
p.add_option("--keep_temp", action="store_true")
opts, args = p.parse_args(argv)
base_file_paths = [os.path.splitext(fp)[0] for fp in args]
for base_fp in set(base_file_paths):
fasta_fp = "{0}.fasta".format(base_fp)
blast_fp = "{0}_blast.txt".format(base_fp)
output_dir = tempfile.mkdtemp(prefix="brocc")
brocc_args = [
"-i", fasta_fp, "-b", blast_fp, "-o", output_dir,
"-a" "ITS"]
main(brocc_args)
base_filename = os.path.basename(base_fp)
voting_src = os.path.join(output_dir, "voting_log.txt")
voting_dest = "{0}_voting_log.txt".format(base_filename)
shutil.copyfile(voting_src, voting_dest)
observed_assignments_fp = os.path.join(
output_dir, "Standard_Taxonomy.txt")
expected_assignments_fp = "{0}_assignments.txt".format(base_fp)
diff_fp = "{0}_diff.txt".format(base_filename)
with open(diff_fp, "w") as f:
subprocess.call(
["diff", observed_assignments_fp, expected_assignments_fp],
stdout=f,
)
if not opts.keep_temp:
shutil.rmtree(output_dir)
|
kylebittinger/brocc
|
brocclib/command.py
|
Python
|
gpl-3.0
| 6,648
|
[
"BLAST"
] |
5634e7cca073e9adb98d2669e21b5aa96c4b129f130d1408c6b823409ec91a31
|
from collections import defaultdict
import datetime
import restkit.errors
import time
import numbers
from django.utils.datastructures import SortedDict
from corehq.apps.reports.standard.cases.basic import CaseListReport
from corehq.apps.reports.standard.cases.data_sources import CaseDisplay
from corehq.pillows.base import restore_property_dict
from dimagi.utils.couch.database import get_db
from dimagi.utils.decorators.memoized import memoized
from corehq.apps.fixtures.models import FixtureDataType, FixtureDataItem
from corehq.apps.reports.standard import ProjectReportParametersMixin, DatespanMixin, CustomProjectReport
from corehq.apps.reports.datatables import (DataTablesColumn, NumericColumn,
DataTablesHeader)
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports import util
from hsph.reports import HSPHSiteDataMixin
from hsph.fields import AllocatedToFilter, IHForCHFField, DCTLToFIDAFilter
from corehq.apps.api.es import ReportCaseES
from django.utils.translation import ugettext as _
from datetime import date, timedelta
def numeric_cell(val):
if isinstance(val, numbers.Number):
return util.format_datatables_data(text=val, sort_key=val)
else:
return val
def short_date_format(date):
return date.strftime('%d-%b')
def datestring_minus_days(datestring, days):
date = datetime.datetime.strptime(datestring[:10], '%Y-%m-%d')
return (date - datetime.timedelta(days=days)).isoformat()
def get_user_site_map(domain):
user_site_map = defaultdict(list)
data_type = FixtureDataType.by_domain_tag(domain, 'site').first()
fixtures = FixtureDataItem.by_data_type(domain, data_type.get_id)
for fixture in fixtures:
for user in fixture.get_users():
user_site_map[user._id].append(fixture.fields_without_attributes['site_id'])
return user_site_map
def get_facility_map(domain):
from hsph.fields import FacilityField
facilities = FacilityField.getFacilities(domain=domain)
return dict([(facility.get('val'), facility.get('text'))
for facility in facilities])
class FIDAPerformanceReport(GenericTabularReport, CustomProjectReport,
ProjectReportParametersMixin, DatespanMixin):
"""
BetterBirth Shared Dropbox/Updated ICT package/Reporting Specs/FIDA Performance_v2.xls
"""
name = "FIDA Performance"
slug = "hsph_fida_performance"
fields = [
'corehq.apps.reports.filters.users.UserTypeFilter',
'corehq.apps.reports.filters.dates.DatespanFilter',
'hsph.fields.DCTLToFIDAFilter',
]
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn("Name of FIDA"),
DataTablesColumn("Name of Team Leader"),
NumericColumn("No. of Facilities Covered"),
NumericColumn("No. of Facility Visits"),
NumericColumn("No. of Facilities with less than 2 visits/week"),
DataTablesColumn("Average Time per Birth Record"),
NumericColumn("Average Number of Birth Records Uploaded Per Visit"),
NumericColumn("No. of Births with no phone details"),
NumericColumn("No. of Births with no address"),
NumericColumn("No. of Births with no contact info"),
NumericColumn("No. of Home Visits assigned"),
NumericColumn("No. of Home Visits completed"),
NumericColumn("No. of Home Visits completed per day"),
NumericColumn("No. of Home Visits Open at 30 Days"))
@property
def rows(self):
user_data = DCTLToFIDAFilter.get_user_data(
self.request_params, domain=self.domain)
self.override_user_ids = user_data['leaf_user_ids']
user_site_map = get_user_site_map(self.domain)
# ordered keys with default values
keys = SortedDict([
('fidaName', None),
('teamLeaderName', None),
('facilitiesCovered', 0),
('facilityVisits', 0),
('facilitiesVisitedLessThanTwicePerWeek', None),
('avgBirthRegistrationTime', None),
('birthRegistrationsPerVisit', None),
('noPhoneDetails', 0),
('noAddress', 0),
('noContactInfo', 0),
('homeVisitsAssigned', 0),
('homeVisitsCompleted', 0),
('homeVisitsCompletedPerDay', 0),
('homeVisitsOpenAt30Days', 0)
])
rows = []
db = get_db()
startdate = self.datespan.startdate_param_utc[:10]
enddate = self.datespan.enddate_param_utc[:10]
to_date = lambda string: datetime.datetime.strptime(
string, "%Y-%m-%d").date()
weeks = (to_date(enddate) - to_date(startdate)).days // 7
for user in self.users:
user_id = user.user_id
row = db.view('hsph/fida_performance',
startkey=["all", self.domain, user_id, startdate],
endkey=["all", self.domain, user_id, enddate],
reduce=True,
wrapper=lambda r: r['value']
).first() or {}
workingDays = db.view('hsph/fida_performance',
startkey=["workingDays", self.domain, user_id, startdate],
endkey=["workingDays", self.domain, user_id, enddate],
reduce=False,
wrapper=lambda r: r['value']['workingDay']).all()
workingDays = set(workingDays)
row['fidaName'] = self.table_cell(
user.raw_username, user.username_in_report)
dctl = user_data['user_parent_map'][user['user_id']]
row['teamLeaderName'] = self.table_cell(
dctl.raw_username,
dctl.username_in_report)
row['facilitiesCovered'] = len(user_site_map[user_id])
row['facilitiesVisitedLessThanTwicePerWeek'] = len(
filter(
lambda count: count < weeks * 2,
[row.get(site_id + 'Visits', 0)
for site_id in user_site_map[user_id]]
)
)
if row.get('avgBirthRegistrationTime'):
row['avgBirthRegistrationTime'] = time.strftime(
'%M:%S', time.gmtime(row['avgBirthRegistrationTime']))
else:
row['avgBirthRegistrationTime'] = None
if workingDays:
row['homeVisitsCompletedPerDay'] = round(
row.get('homeVisitsCompleted', 0) / float(len(workingDays)), 1)
else:
row['homeVisitsCompletedPerDay'] = 0.0
# These queries can fail if startdate is less than N days before
# enddate. We just catch and supply a default value.
try:
row['homeVisitsAssigned'] = db.view('hsph/fida_performance',
startkey=['assigned', self.domain, user_id, startdate],
endkey=['assigned', self.domain, user_id,
datestring_minus_days(enddate, 21)],
reduce=True,
wrapper=lambda r: r['value']['homeVisitsAssigned']
).first()
except restkit.errors.RequestFailed:
row['homeVisitsAssigned'] = 0
try:
row['homeVisitsOpenAt30Days'] = db.view('hsph/fida_performance',
startkey=['open30Days', self.domain, user_id, startdate],
endkey=['open30Days', self.domain, user_id,
datestring_minus_days(enddate, 29)],
reduce=True,
wrapper=lambda r: r['value']['homeVisitsOpenAt30Days']
).first()
except restkit.errors.RequestFailed:
row['homeVisitsOpenAt30Days'] = 0
list_row = []
for k, v in keys.items():
val = row.get(k, v)
if val is None:
val = '---'
list_row.append(numeric_cell(val))
rows.append(list_row)
return rows
class FacilityRegistrationsReport(GenericTabularReport, CustomProjectReport,
ProjectReportParametersMixin, DatespanMixin,
HSPHSiteDataMixin):
"""
BetterBirth Shared Dropbox/Updated ICT package/Reporting Specs/Facility
Registrations_v2_ss.xls
"""
name = "Facility Registrations"
slug = "hsph_facility_registrations"
fields = ['corehq.apps.reports.filters.users.UserTypeFilter',
'corehq.apps.reports.filters.dates.DatespanFilter',
'hsph.fields.DCTLToFIDAFilter',
'hsph.fields.SiteField']
@property
@memoized
def facility_name_map(self):
from hsph.fields import FacilityField
facilities = FacilityField.getFacilities(domain=self.domain)
return dict([(facility.get('val'), facility.get('text'))
for facility in facilities])
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn("Facility"),
DataTablesColumn("FIDA"),
DataTablesColumn("Team Leader"),
NumericColumn("No. of visits by FIDA"),
NumericColumn("No. of birth registrations"),
NumericColumn("No. of births with no phone details"),
NumericColumn("No. of births with no address"),
NumericColumn("No. of births with no contact info"))
@property
def rows(self):
user_data = DCTLToFIDAFilter.get_user_data(
self.request_params, domain=self.domain)
self.override_user_ids = user_data['leaf_user_ids']
db = get_db()
site_map = self.selected_site_map or self.site_map
# hack
facilities = IHForCHFField.get_selected_facilities(
site_map, domain=self.domain)
facilities = facilities['ihf'] + facilities['chf']
rows = []
for user in self.users:
for site_id in facilities:
key = [self.domain, user.user_id, site_id]
data = db.view('hsph/facility_registrations',
startkey=key + [self.datespan.startdate_param_utc],
endkey=key + [self.datespan.enddate_param_utc],
reduce=True,
wrapper=lambda r: r['value']
).first()
if data:
dctl = user_data['user_parent_map'][user['user_id']]
rows.append([
self.facility_name_map[site_id],
self.table_cell(
user.raw_username,
user.username_in_report),
self.table_cell(
dctl.raw_username,
dctl.username_in_report),
numeric_cell(data.get('facilityVisits', 0)),
numeric_cell(data.get('birthRegistrations', 0)),
numeric_cell(data.get('noPhoneDetails', 0)),
numeric_cell(data.get('noAddress', 0)),
numeric_cell(data.get('noContactInfo', 0)),
])
return rows
class HSPHCaseDisplay(CaseDisplay):
@property
@memoized
def _date_admission(self):
return self.parse_date(self.case['date_admission'])
@property
def region(self):
try:
return self.report.get_region_name(self.case['region_id'])
except AttributeError:
return ""
@property
def district(self):
try:
return self.report.get_district_name(
self.case['region_id'], self.case['district_id'])
except AttributeError:
return ""
@property
def site(self):
try:
return self.report.get_site_name(
self.case['region_id'], self.case['district_id'],
self.case['site_number'])
except AttributeError:
return ""
@property
def patient_id(self):
return self.case.get('patient_id', '')
@property
def status(self):
return "Closed" if self.case['closed'] else "Open"
@property
def mother_name(self):
return self.case.get('name_mother', '')
@property
def date_admission(self):
return short_date_format(self._date_admission)
@property
def address(self):
return self.case.get('house_address', '')
@property
@memoized
def allocated_to(self):
# this logic is duplicated for elasticsearch in CaseReport.case_filter
UNKNOWN = "Unknown"
CALL_CENTER = "Call Center"
FIELD = "Field"
if self.case['closed']:
if 'closed_by' not in self.case:
return UNKNOWN
if self.case['closed_by'] in ("cati", "cati_tl"):
return CALL_CENTER
elif self.case['closed_by'] in ("fida", "field_manager"):
return FIELD
else:
return UNKNOWN
else:
today = datetime.datetime.now()
if today <= self._date_admission + datetime.timedelta(days=21):
return CALL_CENTER
else:
return FIELD
@property
def allocated_start(self):
try:
delta = datetime.timedelta(
days=8 if self.allocated_to == "Call Center" else 21)
return short_date_format(self._date_admission + delta)
except AttributeError:
return ""
@property
def allocated_end(self):
try:
delta = datetime.timedelta(
days=20 if self.allocated_to == 'Call Center' else 29)
return short_date_format(self._date_admission + delta)
except AttributeError:
return ""
@property
def outside_allocated_period(self):
if self.case['closed_on']:
compare_date = self.parse_date(
self.case['closed_on']).replace(tzinfo=None)
else:
compare_date = datetime.datetime.utcnow().replace(tzinfo=None)
return 'Yes' if (compare_date - self._date_admission).days > 29 else 'No'
class CaseReport(CaseListReport, CustomProjectReport, HSPHSiteDataMixin,
DatespanMixin):
name = 'Case Report'
slug = 'case_report'
fields = (
'corehq.apps.reports.filters.users.UserTypeFilter',
'corehq.apps.reports.filters.dates.DatespanFilter',
'hsph.fields.SiteField',
'hsph.fields.AllocatedToFilter',
'hsph.fields.DCTLToFIDAFilter',
'corehq.apps.reports.filters.select.SelectOpenCloseFilter',
)
default_case_type = 'birth'
@property
@memoized
def case_es(self):
return ReportCaseES(self.domain)
@property
def headers(self):
headers = DataTablesHeader(
DataTablesColumn("Region"),
DataTablesColumn("District"),
DataTablesColumn("Site"),
DataTablesColumn("Patient ID"),
DataTablesColumn("Status"),
DataTablesColumn("Mother Name"),
DataTablesColumn("Date of Admission"),
DataTablesColumn("Address of Patient"),
DataTablesColumn("Allocated To"),
DataTablesColumn("Allocated Start"),
DataTablesColumn("Allocated End"),
DataTablesColumn("Outside Allocated Period")
)
headers.no_sort = True
return headers
@property
def case_filter(self):
allocated_to = self.request_params.get(AllocatedToFilter.slug, '')
region_id = self.request_params.get('hsph_region', '')
district_id = self.request_params.get('hsph_district', '')
site_num = str(self.request_params.get('hsph_site', ''))
filters = [{
'range': {
'opened_on': {
"from": self.datespan.startdate_param_utc,
"to": self.datespan.enddate_param_utc
}
}
}]
if site_num:
filters.append({'term': {'site_number.#value': site_num.lower()}})
if district_id:
filters.append({'term': {'district_id.#value': district_id.lower()}})
if region_id:
filters.append({'term': {'region_id.#value': region_id.lower()}})
if allocated_to:
max_date_admission = (datetime.date.today() -
datetime.timedelta(days=21)).strftime("%Y-%m-%d")
call_center_filter = {
'or': [
{'and': [
{'term': {'closed': True}},
{'prefix': {'closed_by': 'cati'}}
]},
{'and': [
{'term': {'closed': False}},
{'range': {
'date_admission.#value': {
'from': max_date_admission
}
}}
]}
]
}
if allocated_to == 'cati':
filters.append(call_center_filter)
else:
filters.append({'not': call_center_filter})
return {'and': filters} if filters else {}
@property
def shared_pagination_GET_params(self):
user_data = DCTLToFIDAFilter.get_user_data(
self.request_params, domain=self.domain)
self.override_user_ids = user_data['leaf_user_ids']
params = super(CaseReport, self).shared_pagination_GET_params
slugs = [
AllocatedToFilter.slug,
'hsph_region',
'hsph_district',
'hsph_site',
'startdate',
'enddate'
]
for slug in slugs:
params.append({
'name': slug,
'value': self.request_params.get(slug, '')
})
return params
@property
def rows(self):
case_displays = (HSPHCaseDisplay(self, restore_property_dict(self.get_case(case)))
for case in self.es_results['hits'].get('hits', []))
for disp in case_displays:
yield [
disp.region,
disp.district,
disp.site,
disp.patient_id,
disp.status,
disp.case_link,
disp.date_admission,
disp.address,
disp.allocated_to,
disp.allocated_start,
disp.allocated_end,
disp.outside_allocated_period,
]
class FacilityWiseFollowUpReport(GenericTabularReport, DatespanMixin,
HSPHSiteDataMixin, CustomProjectReport,
ProjectReportParametersMixin):
name = "Facility Wise Follow Up Report"
slug = "hsph_facility_wise_follow_up"
fields = ['corehq.apps.reports.filters.dates.DatespanFilter',
'hsph.fields.DCTLToFIDAFilter',
'hsph.fields.SiteField']
show_all_rows_option = True
def _parse_date(self, date_str):
y, m, d = [int(val) for val in date_str.split('-')]
return date(y, m, d)
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn(_("Region")),
DataTablesColumn(_("District")),
DataTablesColumn(_("Site")),
DataTablesColumn(_("Fida Name")),
DataTablesColumn(_("Births")),
DataTablesColumn(_("Open Cases")),
DataTablesColumn(_("Not Yet Open for Follow Up")),
DataTablesColumn(_("Open for CATI Follow Up")),
DataTablesColumn(_("Open for FADA Follow Up")),
DataTablesColumn(_("TT Closed Cases")),
DataTablesColumn(_("Followed Up By Call Center")),
DataTablesColumn(_("Followed Up By Field")),
DataTablesColumn(_("Lost to Follow Up")),
)
@property
def rows(self):
user_data = DCTLToFIDAFilter.get_user_data(
self.request_params, domain=self.domain)
self.override_user_ids = user_data['leaf_user_ids']
startdate = self.datespan.startdate_param_utc[:10]
enddate = self.datespan.enddate_param_utc[:10]
all_keys = get_db().view('hsph/facility_wise_follow_up',
reduce=True,
group=True, group_level=5)
rpt_keys = []
key_start = []
if not self.selected_site_map:
self._selected_site_map = self.site_map
facility_map = get_facility_map(self.domain)
# make sure key elements are strings
report_sites = [[str(item) for item in rk] for rk in self.generate_keys()]
for entry in all_keys:
if entry['key'][0:3] in report_sites:
if self.individual:
if entry['key'][-1] == self.individual:
rpt_keys.append(entry)
elif self.user_ids:
if entry['key'][-1] in self.user_ids:
rpt_keys.append(entry)
else:
rpt_keys = all_keys
def get_view_results(case_type, start_dte, end_dte):
my_start_key=key_start + [case_type] + [start_dte]
if not start_dte:
my_start_key = key_start + [case_type]
data = get_db().view('hsph/facility_wise_follow_up',
reduce=True,
startkey=my_start_key,
endkey=key_start + [case_type] + [end_dte]
)
return sum([ item['value'] for item in data])
rows = []
today = date.today()
for item in rpt_keys:
key_start = item['key']
region_id, district_id, site_number, site_id, user_id = item['key']
region_name = self.get_region_name(region_id)
district_name = self.get_district_name(region_id, district_id)
site_name = facility_map.get(site_id, site_id)
fida = self.usernames.get(user_id, "")
births = get_view_results('births', startdate, enddate)
open_cases = get_view_results('open_cases', startdate, enddate)
# Not closed and If today < date_admission + 8
start = today - timedelta(days=7)
not_yet_open_for_follow_up = get_view_results('needing_follow_up',
start.strftime('%Y-%m-%d'), today.strftime('%Y-%m-%d'))
# Not closed and if (date_admission + 8) <= today <= (date_admission + 21)
start = today - timedelta(days=21)
end = today - timedelta(days=8)
open_for_cati_follow_up = get_view_results('needing_follow_up',
start.strftime('%Y-%m-%d'), end.strftime('%Y-%m-%d'))
# Not closed and today > date_admission+21
end = today - timedelta(days = 22)
open_for_fada_follow_up = get_view_results('needing_follow_up',
"", end.strftime('%Y-%m-%d'))
closed_cases = get_view_results('closed_cases', startdate, enddate)
lost_to_follow_up = get_view_results('lost_to_follow_up', startdate, enddate)
followed_up_by_call_center = get_view_results(
'followed_up_by_call_center', startdate, enddate)
followed_up_by_field = get_view_results('followed_up_by_field',
startdate, enddate)
rows.append([region_name, district_name, site_name, fida, births,
open_cases, not_yet_open_for_follow_up, open_for_cati_follow_up,
open_for_fada_follow_up, closed_cases, followed_up_by_call_center,
followed_up_by_field, lost_to_follow_up])
return rows
|
SEL-Columbia/commcare-hq
|
custom/_legacy/hsph/reports/field_management.py
|
Python
|
bsd-3-clause
| 24,096
|
[
"VisIt"
] |
65822240ef3f4431197a58e0fc5e227cc457068932d80ff9d4e73def84c60e0c
|
# Copyright (C) 2003 CAMP
# Please see the accompanying LICENSE file for further information.
from math import pi
import sys
import numpy as np
from numpy.fft import fftn, ifftn, fft2, ifft2
from gpaw.transformers import Transformer
from gpaw.fd_operators import Laplace, LaplaceA, LaplaceB
from gpaw import PoissonConvergenceError
from gpaw.utilities.blas import axpy
from gpaw.utilities.gauss import Gaussian
from gpaw.utilities.ewald import madelung
from gpaw.utilities.tools import construct_reciprocal
import gpaw.mpi as mpi
import _gpaw
class PoissonSolver:
def __init__(self, nn=3, relax='J', eps=2e-10):
self.relax = relax
self.nn = nn
self.eps = eps
self.charged_periodic_correction = None
self.maxiter = 1000
# Relaxation method
if relax == 'GS':
# Gauss-Seidel
self.relax_method = 1
elif relax == 'J':
# Jacobi
self.relax_method = 2
else:
raise NotImplementedError('Relaxation method %s' % relax)
def get_method(self):
return ['Gauss-Seidel', 'Jacobi'][self.relax_method - 1]
def get_stencil(self):
return self.nn
def set_grid_descriptor(self, gd):
# Should probably be renamed initialize
self.gd = gd
self.dv = gd.dv
gd = self.gd
scale = -0.25 / pi
if self.nn == 'M':
if not gd.orthogonal:
raise RuntimeError('Cannot use Mehrstellen stencil with '
'non orthogonal cell.')
self.operators = [LaplaceA(gd, -scale, allocate=False)]
self.B = LaplaceB(gd, allocate=False)
else:
self.operators = [Laplace(gd, scale, self.nn, allocate=False)]
self.B = None
self.interpolators = []
self.restrictors = []
level = 0
self.presmooths = [2]
self.postsmooths = [1]
# Weights for the relaxation,
# only used if 'J' (Jacobi) is chosen as method
self.weights = [2.0 / 3.0]
while level < 4:
try:
gd2 = gd.coarsen()
except ValueError:
break
self.operators.append(Laplace(gd2, scale, 1, allocate=False))
self.interpolators.append(Transformer(gd2, gd, allocate=False))
self.restrictors.append(Transformer(gd, gd2, allocate=False))
self.presmooths.append(4)
self.postsmooths.append(4)
self.weights.append(1.0)
level += 1
gd = gd2
self.levels = level
def initialize(self, load_gauss=False):
# Should probably be renamed allocate
gd = self.gd
self.rhos = [gd.empty()]
self.phis = [None]
self.residuals = [gd.empty()]
for level in range(self.levels):
gd2 = gd.coarsen()
self.phis.append(gd2.empty())
self.rhos.append(gd2.empty())
self.residuals.append(gd2.empty())
gd = gd2
assert len(self.phis) == len(self.rhos)
level += 1
assert level == self.levels
for obj in self.operators + self.interpolators + self.restrictors:
obj.allocate()
if self.B is not None:
self.B.allocate()
self.step = 0.66666666 / self.operators[0].get_diagonal_element()
self.presmooths[level] = 8
self.postsmooths[level] = 8
if load_gauss:
self.load_gauss()
def load_gauss(self):
if not hasattr(self, 'rho_gauss'):
gauss = Gaussian(self.gd)
self.rho_gauss = gauss.get_gauss(0)
self.phi_gauss = gauss.get_gauss_pot(0)
def solve(self, phi, rho, charge=None, eps=None, maxcharge=1e-6,
zero_initial_phi=False):
if eps is None:
eps = self.eps
actual_charge = self.gd.integrate(rho)
background = (actual_charge / self.gd.dv /
self.gd.get_size_of_global_array().prod())
if charge is None:
charge = actual_charge
if abs(charge) <= maxcharge:
# System is charge neutral. Use standard solver
return self.solve_neutral(phi, rho - background, eps=eps)
elif abs(charge) > maxcharge and self.gd.pbc_c.all():
# System is charged and periodic. Subtract a homogeneous
# background charge
if self.charged_periodic_correction is None:
print "+-----------------------------------------------------+"
print "| Calculating charged periodic correction using the |"
print "| Ewald potential from a lattice of probe charges in |"
print "| a homogenous background density |"
print "+-----------------------------------------------------+"
self.charged_periodic_correction = madelung(self.gd.cell_cv)
print "Potential shift will be ", \
self.charged_periodic_correction , "Ha."
# Set initial guess for potential
if zero_initial_phi:
phi[:] = 0.0
else:
phi -= charge * self.charged_periodic_correction
iters = self.solve_neutral(phi, rho - background, eps=eps)
phi += charge * self.charged_periodic_correction
return iters
elif abs(charge) > maxcharge and not self.gd.pbc_c.any():
# The system is charged and in a non-periodic unit cell.
# Determine the potential by 1) subtract a gaussian from the
# density, 2) determine potential from the neutralized density
# and 3) add the potential from the gaussian density.
# Load necessary attributes
self.load_gauss()
# Remove monopole moment
q = actual_charge / np.sqrt(4 * pi) # Monopole moment
rho_neutral = rho - q * self.rho_gauss # neutralized density
# Set initial guess for potential
if zero_initial_phi:
phi[:] = 0.0
else:
axpy(-q, self.phi_gauss, phi) #phi -= q * self.phi_gauss
# Determine potential from neutral density using standard solver
niter = self.solve_neutral(phi, rho_neutral, eps=eps)
# correct error introduced by removing monopole
axpy(q, self.phi_gauss, phi) #phi += q * self.phi_gauss
return niter
else:
# System is charged with mixed boundaryconditions
raise NotImplementedError
def solve_neutral(self, phi, rho, eps=2e-10):
self.phis[0] = phi
if self.B is None:
self.rhos[0][:] = rho
else:
self.B.apply(rho, self.rhos[0])
niter = 1
maxiter = self.maxiter
while self.iterate2(self.step) > eps and niter < maxiter:
niter += 1
if niter == maxiter:
charge = np.sum(rho.ravel()) * self.dv
print 'CHARGE, eps:', charge, eps
msg = 'Poisson solver did not converge in %d iterations!' % maxiter
raise PoissonConvergenceError(msg)
# Set the average potential to zero in periodic systems
if np.alltrue(self.gd.pbc_c):
phi_ave = self.gd.comm.sum(np.sum(phi.ravel()))
N_c = self.gd.get_size_of_global_array()
phi_ave /= np.product(N_c)
phi -= phi_ave
return niter
def iterate(self, step, level=0):
residual = self.residuals[level]
niter = 0
while True:
niter += 1
if level > 0 and niter == 1:
residual[:] = -self.rhos[level]
else:
self.operators[level].apply(self.phis[level], residual)
residual -= self.rhos[level]
error = self.gd.comm.sum(np.vdot(residual, residual))
if niter == 1 and level < self.levels:
self.restrictors[level].apply(residual, self.rhos[level + 1])
self.phis[level + 1][:] = 0.0
self.iterate(4.0 * step, level + 1)
self.interpolators[level].apply(self.phis[level + 1], residual)
self.phis[level] -= residual
continue
residual *= step
self.phis[level] -= residual
if niter == 2:
break
return error
def iterate2(self, step, level=0):
"""Smooths the solution in every multigrid level"""
residual = self.residuals[level]
if level < self.levels:
self.operators[level].relax(self.relax_method,
self.phis[level],
self.rhos[level],
self.presmooths[level],
self.weights[level])
self.operators[level].apply(self.phis[level], residual)
residual -= self.rhos[level]
self.restrictors[level].apply(residual,
self.rhos[level + 1])
self.phis[level + 1][:] = 0.0
self.iterate2(4.0 * step, level + 1)
self.interpolators[level].apply(self.phis[level + 1], residual)
self.phis[level] -= residual
self.operators[level].relax(self.relax_method,
self.phis[level],
self.rhos[level],
self.postsmooths[level],
self.weights[level])
if level == 0:
self.operators[level].apply(self.phis[level], residual)
residual -= self.rhos[level]
error = self.gd.comm.sum(np.dot(residual.ravel(),
residual.ravel())) * self.dv
return error
def estimate_memory(self, mem):
# XXX Memory estimate works only for J and GS, not FFT solver
# Poisson solver appears to use same amount of memory regardless
# of whether it's J or GS, which is a bit strange
gdbytes = self.gd.bytecount()
nbytes = -gdbytes # No phi on finest grid, compensate ahead
for level in range(self.levels):
nbytes += 3 * gdbytes # Arrays: rho, phi, residual
gdbytes //= 8
mem.subnode('rho, phi, residual [%d levels]' % self.levels, nbytes)
for i, obj in enumerate(self.restrictors + self.interpolators):
obj.estimate_memory(mem.subnode('Transformer %d' % i))
for i, operator in enumerate(self.operators):
name = operator.__class__.__name__
operator.estimate_memory(mem.subnode('Operator %d [%s]' % (i,
name)))
if self.B is not None:
name = self.B.__class__.__name__
self.B.estimate_memory(mem.subnode('B [%s]' % name))
def __repr__(self):
template = 'PoissonSolver(relax=\'%s\', nn=%s, eps=%e)'
representation = template % (self.relax, repr(self.nn), self.eps)
return representation
class PoissonFFTSolver(PoissonSolver):
"""FFT implementation of the Poisson solver."""
def __init__(self):
self.charged_periodic_correction = None
def get_method(self):
return 'FFT solver of the first kind'
def initialize(self, gd, load_gauss=False):
# XXX this won't work now, but supposedly this class will be deprecated
# in favour of FFTPoissonSolver, no?
self.gd = gd
if self.gd.comm.size > 1:
raise RuntimeError('Cannot do parallel FFT.')
self.k2, self.N3 = construct_reciprocal(self.gd)
if load_gauss:
gauss = Gaussian(self.gd)
self.rho_gauss = gauss.get_gauss(0)
self.phi_gauss = gauss.get_gauss_pot(0)
def solve_neutral(self, phi, rho, eps=None):
phi[:] = np.real(ifftn(fftn(rho) * 4 * pi / self.k2))
return 1
def solve_screened(self, phi, rho, screening=0):
phi[:] = np.real(ifftn(fftn(rho) * 4 * pi / (self.k2 + screening**2)))
return 1
class FFTPoissonSolver(PoissonSolver):
"""FFT Poisson solver for general unit cells."""
relax_method = 0
nn = 999
def __init__(self, eps=2e-10):
self.charged_periodic_correction = None
self.eps = eps
def get_method(self):
return 'FFT solver of the second kind'
def set_grid_descriptor(self, gd):
assert gd.pbc_c.all()
self.gd = gd
def initialize(self):
if self.gd.comm.rank == 0:
self.k2_Q, self.N3 = construct_reciprocal(self.gd)
def solve_neutral(self, phi_g, rho_g, eps=None):
if self.gd.comm.size == 1:
phi_g[:] = ifftn(fftn(rho_g) * 4.0 * pi / self.k2_Q).real
else:
rho_g = self.gd.collect(rho_g)
if self.gd.comm.rank == 0:
globalphi_g = ifftn(fftn(rho_g) * 4.0 * pi / self.k2_Q).real
else:
globalphi_g = None
self.gd.distribute(globalphi_g, phi_g)
return 1
class FixedBoundaryPoissonSolver(PoissonSolver):
"""Solve the Poisson equation with FFT in two directions,
and with central differential method in the third direction."""
def __init__(self, nn=1):
self.nn = nn
self.charged_periodic_correction = None
assert self.nn == 1
def get_method(self):
return 'Fixed-boundary %s' % PoissonSolver.get_method(self)
def set_grid_descriptor(self, gd):
assert gd.pbc_c.all()
assert gd.orthogonal
self.gd = gd
def initialize(self, b_phi1, b_phi2):
distribution = np.zeros([self.gd.comm.size], int)
if self.gd.comm.rank == 0:
d3 = b_phi1.shape[2]
gd = self.gd
N_c1 = gd.N_c[:2, np.newaxis]
i_cq = np.indices(gd.N_c[:2]).reshape((2, -1))
i_cq += N_c1 // 2
i_cq %= N_c1
i_cq -= N_c1 // 2
B_vc = 2.0 * np.pi * gd.icell_cv.T[:2, :2]
k_vq = np.dot(B_vc, i_cq)
k_vq *= k_vq
k_vq2 = np.sum(k_vq, axis=0)
k_vq2 = k_vq2.reshape(-1)
b_phi1 = fft2(b_phi1, None, (0,1))
b_phi2 = fft2(b_phi2, None, (0,1))
b_phi1 = b_phi1[:, :, -1].reshape(-1)
b_phi2 = b_phi2[:, :, 0].reshape(-1)
loc_b_phi1 = np.array_split(b_phi1, self.gd.comm.size)
loc_b_phi2 = np.array_split(b_phi2, self.gd.comm.size)
loc_k_vq2 = np.array_split(k_vq2, self.gd.comm.size)
self.loc_b_phi1 = loc_b_phi1[0]
self.loc_b_phi2 = loc_b_phi2[0]
self.k_vq2 = loc_k_vq2[0]
for i in range(self.gd.comm.size):
distribution[i] = len(loc_b_phi1[i])
self.gd.comm.broadcast(distribution, 0)
for i in range(1, self.gd.comm.size):
self.gd.comm.ssend(loc_b_phi1[i], i, 135)
self.gd.comm.ssend(loc_b_phi2[i], i, 246)
self.gd.comm.ssend(loc_k_vq2[i], i, 169)
else:
self.gd.comm.broadcast(distribution, 0)
self.loc_b_phi1 = np.zeros([distribution[self.gd.comm.rank]],
dtype=complex)
self.loc_b_phi2 = np.zeros([distribution[self.gd.comm.rank]],
dtype=complex)
self.k_vq2 = np.zeros([distribution[self.gd.comm.rank]])
self.gd.comm.receive(self.loc_b_phi1, 0, 135)
self.gd.comm.receive(self.loc_b_phi2, 0, 246)
self.gd.comm.receive(self.k_vq2, 0, 169)
k_distribution = np.arange(np.sum(distribution))
self.k_distribution = np.array_split(k_distribution,
self.gd.comm.size)
self.d1, self.d2, self.d3 = self.gd.N_c
self.r_distribution = np.array_split(np.arange(self.d3),
self.gd.comm.size)
self.comm_reshape = not (self.gd.parsize_c[0] == 1
and self.gd.parsize_c[1] == 1)
def solve(self, phi_g, rho_g, charge=None):
if charge is None:
actual_charge = self.gd.integrate(rho_g)
else:
actual_charge = charge
if self.charged_periodic_correction is None:
self.charged_periodic_correction = madelung(self.gd.cell_cv)
background = (actual_charge / self.gd.dv /
self.gd.get_size_of_global_array().prod())
self.solve_neutral(phi_g, rho_g - background)
phi_g += actual_charge * self.charged_periodic_correction
def scatter_r_distribution(self, global_rho_g, dtype=float):
d1, d2, d3 = self.d1, self.d2, self.d3
comm = self.gd.comm
index = self.r_distribution[comm.rank]
if comm.rank == 0:
rho_g1 = global_rho_g[:, :, index]
for i in range(1, comm.size):
ind = self.r_distribution[i]
comm.ssend(global_rho_g[:, :, ind].copy(), i, 178)
else:
rho_g1 = np.zeros([d1, d2, len(index)], dtype=dtype)
comm.receive(rho_g1, 0, 178)
return rho_g1
def gather_r_distribution(self, rho_g, dtype=complex):
comm = self.gd.comm
index = self.r_distribution[comm.rank]
d1, d2, d3 = self.d1, self.d2, self.d3
if comm.rank == 0:
global_rho_g = np.zeros([d1, d2, d3], dtype)
global_rho_g[:, :, index] = rho_g
for i in range(1, comm.size):
ind = self.r_distribution[i]
rho_gi = np.zeros([d1, d2, len(ind)], dtype)
comm.receive(rho_gi, i, 368)
global_rho_g[:, :, ind] = rho_gi
else:
comm.ssend(rho_g, 0, 368)
global_rho_g = None
return global_rho_g
def scatter_k_distribution(self, global_rho_g):
comm = self.gd.comm
index = self.k_distribution[comm.rank]
if comm.rank == 0:
rho_g = global_rho_g[index]
for i in range(1, comm.size):
ind = self.k_distribution[i]
comm.ssend(global_rho_g[ind], i, 370)
else:
rho_g = np.zeros([len(index), self.d3], dtype=complex)
comm.receive(rho_g, 0, 370)
return rho_g
def gather_k_distribution(self, phi_g):
comm = self.gd.comm
index = self.k_distribution[comm.rank]
d12 = self.d1 * self.d2
if comm.rank == 0:
global_phi_g = np.zeros([d12, self.d3], dtype=complex)
global_phi_g[index] = phi_g
for i in range(1, comm.size):
ind = self.k_distribution[i]
phi_gi = np.zeros([len(ind), self.d3], dtype=complex)
comm.receive(phi_gi, i, 569)
global_phi_g[ind] = phi_gi
else:
comm.ssend(phi_g, 0, 569)
global_phi_g = None
return global_phi_g
def solve_neutral(self, phi_g, rho_g):
# b_phi1 and b_phi2 are the boundary Hartree potential values
# of left and right sides
if self.comm_reshape:
global_rho_g0 = self.gd.collect(rho_g)
rho_g1 = self.scatter_r_distribution(global_rho_g0)
else:
rho_g1 = rho_g
# use copy() to avoid the C_contiguous=False
rho_g2 = fft2(rho_g1, None, (0, 1)).copy()
global_rho_g = self.gather_r_distribution(rho_g2)
if self.gd.comm.rank == 0:
global_rho_g.shape = (self.d1 * self.d2, self.d3)
rho_g3 = self.scatter_k_distribution(global_rho_g)
du0 = np.zeros(self.d3 - 1, dtype=complex)
du20 = np.zeros(self.d3 - 2, dtype=complex)
h2 = self.gd.h_cv[2, 2] ** 2
phi_g1 = np.zeros(rho_g3.shape, dtype=complex)
index = self.k_distribution[self.gd.comm.rank]
for phi, rho, rv2, bp1, bp2, i in zip(phi_g1, rho_g3,
self.k_vq2,
self.loc_b_phi1,
self.loc_b_phi2, range(len(index))):
A = np.zeros(self.d3, dtype=complex) + 2 + h2 * rv2
phi = rho * np.pi * 4 * h2
phi[0] += bp1
phi[-1] += bp2
du = du0 - 1
dl = du0 - 1
du2 = du20 - 1
_gpaw.linear_solve_tridiag(self.d3, A, du, dl, du2, phi)
phi_g1[i] = phi
global_phi_g = self.gather_k_distribution(phi_g1)
if self.gd.comm.rank == 0:
global_phi_g.shape = (self.d1, self.d2, self.d3)
phi_g2 = self.scatter_r_distribution(global_phi_g, dtype=complex)
# use copy() to avoid the C_contiguous=False
phi_g3 = ifft2(phi_g2, None, (0, 1)).real.copy()
if self.comm_reshape:
global_phi_g = self.gather_r_distribution(phi_g3, dtype=float)
self.gd.distribute(global_phi_g, phi_g)
else:
phi_g[:] = phi_g3
|
qsnake/gpaw
|
gpaw/poisson.py
|
Python
|
gpl-3.0
| 21,264
|
[
"GPAW",
"Gaussian"
] |
289145c871f3817abdf8429a8eb037ae197510c451dd48c24c70f9964bf932ab
|
# Copyright: (c) 2013, James Cammarata <jcammarata@ansible.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
import re
import shutil
import time
import yaml
from jinja2 import Environment, FileSystemLoader
import ansible.constants as C
from ansible import context
from ansible.cli import CLI
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.collection import build_collection, install_collections, parse_collections_requirements_file, \
publish_collection
from ansible.galaxy.login import GalaxyLogin
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.token import GalaxyToken
from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.playbook.role.requirement import RoleRequirement
from ansible.utils.collection_loader import is_collection_ref
from ansible.utils.display import Display
from ansible.utils.plugin_docs import get_versioned_doclink
display = Display()
class GalaxyCLI(CLI):
'''command to manage Ansible roles in shared repositories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.'''
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
def __init__(self, args):
# Inject role into sys.argv[1] as a backwards compatibility step
if len(args) > 1 and args[1] not in ['-h', '--help'] and 'role' not in args and 'collection' not in args:
# TODO: Should we add a warning here and eventually deprecate the implicit role subcommand choice
args.insert(1, 'role')
self.api = None
self.galaxy = None
super(GalaxyCLI, self).__init__(args)
def init_parser(self):
''' create an options parser for bin/ansible '''
super(GalaxyCLI, self).init_parser(
desc="Perform various Role related operations.",
)
# common
common = opt_help.argparse.ArgumentParser(add_help=False)
common.add_argument('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination')
common.add_argument('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=C.GALAXY_IGNORE_CERTS,
help='Ignore SSL certificate validation errors.')
opt_help.add_verbosity_options(common)
# options that apply to more than one action
user_repo = opt_help.argparse.ArgumentParser(add_help=False)
user_repo.add_argument('github_user', help='GitHub username')
user_repo.add_argument('github_repo', help='GitHub repository')
offline = opt_help.argparse.ArgumentParser(add_help=False)
offline.add_argument('--offline', dest='offline', default=False, action='store_true',
help="Don't query the galaxy API when creating roles")
default_roles_path = C.config.get_configuration_definition('DEFAULT_ROLES_PATH').get('default', '')
roles_path = opt_help.argparse.ArgumentParser(add_help=False)
roles_path.add_argument('-p', '--roles-path', dest='roles_path', type=opt_help.unfrack_path(pathsep=True),
default=C.DEFAULT_ROLES_PATH, action=opt_help.PrependListAction,
help='The path to the directory containing your roles. The default is the first writable one'
'configured via DEFAULT_ROLES_PATH: %s ' % default_roles_path)
force = opt_help.argparse.ArgumentParser(add_help=False)
force.add_argument('-f', '--force', dest='force', action='store_true', default=False,
help='Force overwriting an existing role or collection')
# Add sub parser for the Galaxy role type (role or collection)
type_parser = self.parser.add_subparsers(metavar='TYPE', dest='type')
type_parser.required = True
# Define the actions for the collection object type
collection = type_parser.add_parser('collection',
parents=[common],
help='Manage an Ansible Galaxy collection.')
collection_parser = collection.add_subparsers(metavar='ACTION', dest='collection')
collection_parser.required = True
build_parser = collection_parser.add_parser(
'build', help='Build an Ansible collection artifact that can be published to Ansible Galaxy.',
parents=[common, force])
build_parser.set_defaults(func=self.execute_build)
build_parser.add_argument(
'args', metavar='collection', nargs='*', default=('./',),
help='Path to the collection(s) directory to build. This should be the directory that contains the '
'galaxy.yml file. The default is the current working directory.')
build_parser.add_argument(
'--output-path', dest='output_path', default='./',
help='The path in which the collection is built to. The default is the current working directory.')
self.add_init_parser(collection_parser, [common, force])
cinstall_parser = collection_parser.add_parser('install', help='Install collection from Ansible Galaxy',
parents=[force, common])
cinstall_parser.set_defaults(func=self.execute_install)
cinstall_parser.add_argument('args', metavar='collection_name', nargs='*',
help='The collection(s) name or path/url to a tar.gz collection artifact. This '
'is mutually exclusive with --requirements-file.')
cinstall_parser.add_argument('-p', '--collections-path', dest='collections_path', default='./',
help='The path to the directory containing your collections.')
cinstall_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors during installation and continue with the next specified '
'collection. This will not ignore dependency conflict errors.')
cinstall_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be installed.')
cinstall_exclusive = cinstall_parser.add_mutually_exclusive_group()
cinstall_exclusive.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help="Don't download collections listed as dependencies")
cinstall_exclusive.add_argument('--force-with-deps', dest='force_with_deps', action='store_true', default=False,
help="Force overwriting an existing collection and its dependencies")
publish_parser = collection_parser.add_parser(
'publish', help='Publish a collection artifact to Ansible Galaxy.',
parents=[common])
publish_parser.set_defaults(func=self.execute_publish)
publish_parser.add_argument(
'args', metavar='collection_path', help='The path to the collection tarball to publish.')
publish_parser.add_argument(
'--api-key', dest='api_key',
help='The Ansible Galaxy API key which can be found at https://galaxy.ansible.com/me/preferences. '
'You can also use ansible-galaxy login to retrieve this key.')
publish_parser.add_argument(
'--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import validation results.")
# Define the actions for the role object type
role = type_parser.add_parser('role',
parents=[common],
help='Manage an Ansible Galaxy role.')
role_parser = role.add_subparsers(metavar='ACTION', dest='role')
role_parser.required = True
delete_parser = role_parser.add_parser('delete', parents=[user_repo, common],
help='Removes the role from Galaxy. It does not remove or alter the actual GitHub repository.')
delete_parser.set_defaults(func=self.execute_delete)
import_parser = role_parser.add_parser('import', help='Import a role', parents=[user_repo, common])
import_parser.set_defaults(func=self.execute_import)
import_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True, help="Don't wait for import results.")
import_parser.add_argument('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)')
import_parser.add_argument('--role-name', dest='role_name', help='The name the role should have, if different than the repo name')
import_parser.add_argument('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_user/github_repo.')
info_parser = role_parser.add_parser('info', help='View more details about a specific role.',
parents=[offline, common, roles_path])
info_parser.set_defaults(func=self.execute_info)
info_parser.add_argument('args', nargs='+', help='role', metavar='role_name[,version]')
rinit_parser = self.add_init_parser(role_parser, [offline, force, common])
rinit_parser.add_argument('--type',
dest='role_type',
action='store',
default='default',
help="Initialize using an alternate role type. Valid types include: 'container', 'apb' and 'network'.")
install_parser = role_parser.add_parser('install', help='Install Roles from file(s), URL(s) or tar file(s)',
parents=[force, common, roles_path])
install_parser.set_defaults(func=self.execute_install)
install_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors and continue with the next specified role.')
install_parser.add_argument('-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported')
install_parser.add_argument('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true',
default=False, help='Use tar instead of the scm archive option when packaging the role')
install_parser.add_argument('args', help='Role name, URL or tar file', metavar='role', nargs='*')
install_exclusive = install_parser.add_mutually_exclusive_group()
install_exclusive.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help="Don't download roles listed as dependencies")
install_exclusive.add_argument('--force-with-deps', dest='force_with_deps', action='store_true', default=False,
help="Force overwriting an existing role and it's dependencies")
remove_parser = role_parser.add_parser('remove', help='Delete roles from roles_path.', parents=[common, roles_path])
remove_parser.set_defaults(func=self.execute_remove)
remove_parser.add_argument('args', help='Role(s)', metavar='role', nargs='+')
list_parser = role_parser.add_parser('list', help='Show the name and version of each role installed in the roles_path.',
parents=[common, roles_path])
list_parser.set_defaults(func=self.execute_list)
list_parser.add_argument('role', help='Role', nargs='?', metavar='role')
login_parser = role_parser.add_parser('login', parents=[common],
help="Login to api.github.com server in order to use ansible-galaxy role "
"sub command such as 'import', 'delete', 'publish', and 'setup'")
login_parser.set_defaults(func=self.execute_login)
login_parser.add_argument('--github-token', dest='token', default=None,
help='Identify with github token rather than username and password.')
search_parser = role_parser.add_parser('search', help='Search the Galaxy database by tags, platforms, author and multiple keywords.',
parents=[common])
search_parser.set_defaults(func=self.execute_search)
search_parser.add_argument('--platforms', dest='platforms', help='list of OS platforms to filter by')
search_parser.add_argument('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
search_parser.add_argument('--author', dest='author', help='GitHub username')
search_parser.add_argument('args', help='Search terms', metavar='searchterm', nargs='*')
setup_parser = role_parser.add_parser('setup', help='Manage the integration between Galaxy and the given source.',
parents=[roles_path, common])
setup_parser.set_defaults(func=self.execute_setup)
setup_parser.add_argument('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see ID values.')
setup_parser.add_argument('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.')
setup_parser.add_argument('source', help='Source')
setup_parser.add_argument('github_user', help='GitHub username')
setup_parser.add_argument('github_repo', help='GitHub repository')
setup_parser.add_argument('secret', help='Secret')
def add_init_parser(self, parser, parents):
galaxy_type = parser.dest
obj_name_kwargs = {}
if galaxy_type == 'collection':
obj_name_kwargs['type'] = GalaxyCLI._validate_collection_name
init_parser = parser.add_parser('init',
help='Initialize new {0} with the base structure of a {0}.'.format(galaxy_type),
parents=parents)
init_parser.set_defaults(func=self.execute_init)
init_parser.add_argument('--init-path',
dest='init_path',
default='./',
help='The path in which the skeleton {0} will be created. The default is the current working directory.'.format(galaxy_type))
init_parser.add_argument('--{0}-skeleton'.format(galaxy_type),
dest='{0}_skeleton'.format(galaxy_type),
default=C.GALAXY_ROLE_SKELETON,
help='The path to a {0} skeleton that the new {0} should be based upon.'.format(galaxy_type))
init_parser.add_argument('{0}_name'.format(galaxy_type),
help='{0} name'.format(galaxy_type.capitalize()),
**obj_name_kwargs)
return init_parser
def post_process_args(self, options):
options = super(GalaxyCLI, self).post_process_args(options)
display.verbosity = options.verbosity
return options
def run(self):
super(GalaxyCLI, self).run()
self.galaxy = Galaxy()
self.api = GalaxyAPI(self.galaxy)
context.CLIARGS['func']()
@staticmethod
def exit_without_ignore(rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not context.CLIARGS['ignore_errors']:
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
@staticmethod
def _display_role_info(role_info):
text = [u"", u"Role: %s" % to_text(role_info['name'])]
text.append(u"\tdescription: %s" % role_info.get('description', ''))
for k in sorted(role_info.keys()):
if k in GalaxyCLI.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in GalaxyCLI.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
return u'\n'.join(text)
@staticmethod
def _resolve_path(path):
return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
@staticmethod
def _validate_collection_name(name):
if is_collection_ref('ansible_collections.{0}'.format(name)):
return name
raise AnsibleError("Invalid collection name, must be in the format <namespace>.<collection>")
############################
# execute actions
############################
def execute_role(self):
"""
Perform the action on an Ansible Galaxy role. Must be combined with a further action like delete/install/init
as listed below.
"""
# To satisfy doc build
pass
def execute_collection(self):
"""
Perform the action on an Ansible Galaxy collection. Must be combined with a further action like init/install as
listed below.
"""
# To satisfy doc build
pass
def execute_build(self):
"""
Build an Ansible Galaxy collection artifact that can be stored in a central repository like Ansible Galaxy.
"""
force = context.CLIARGS['force']
output_path = GalaxyCLI._resolve_path(context.CLIARGS['output_path'])
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
elif os.path.isfile(b_output_path):
raise AnsibleError("- the output collection directory %s is a file - aborting" % to_native(output_path))
for collection_path in context.CLIARGS['args']:
collection_path = GalaxyCLI._resolve_path(collection_path)
build_collection(collection_path, output_path, force)
def execute_init(self):
"""
Creates the skeleton framework of a role or collection that complies with the Galaxy metadata format.
"""
galaxy_type = context.CLIARGS['type']
init_path = context.CLIARGS['init_path']
force = context.CLIARGS['force']
obj_skeleton = context.CLIARGS['{0}_skeleton'.format(galaxy_type)]
obj_name = context.CLIARGS['{0}_name'.format(galaxy_type)]
inject_data = dict(
author='your name',
description='your description',
company='your company (optional)',
license='license (GPL-2.0-or-later, MIT, etc)',
issue_tracker_url='http://example.com/issue/tracker',
repository_url='http://example.com/repository',
documentation_url='http://docs.example.com',
homepage_url='http://example.com',
min_ansible_version=ansible_version[:3], # x.y
ansible_plugin_list_dir=get_versioned_doclink('plugins/plugins.html'),
)
if galaxy_type == 'role':
inject_data['role_name'] = obj_name
inject_data['role_type'] = context.CLIARGS['role_type']
inject_data['license'] = 'license (GPL-2.0-or-later, MIT, etc)'
obj_path = os.path.join(init_path, obj_name)
elif galaxy_type == 'collection':
namespace, collection_name = obj_name.split('.', 1)
inject_data['namespace'] = namespace
inject_data['collection_name'] = collection_name
inject_data['license'] = 'GPL-2.0-or-later'
obj_path = os.path.join(init_path, namespace, collection_name)
b_obj_path = to_bytes(obj_path, errors='surrogate_or_strict')
if os.path.exists(b_obj_path):
if os.path.isfile(obj_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % to_native(obj_path))
elif not force:
raise AnsibleError("- the directory %s already exists. "
"You can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % to_native(obj_path))
if obj_skeleton is not None:
skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
else:
obj_skeleton = self.galaxy.default_role_skeleton_path
skeleton_ignore_expressions = ['^.*/.git_keep$']
obj_skeleton = os.path.expanduser(obj_skeleton)
skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
if not os.path.exists(obj_skeleton):
raise AnsibleError("- the skeleton path '{0}' does not exist, cannot init {1}".format(
to_native(obj_skeleton), galaxy_type)
)
template_env = Environment(loader=FileSystemLoader(obj_skeleton))
# create role directory
if not os.path.exists(b_obj_path):
os.makedirs(b_obj_path)
for root, dirs, files in os.walk(obj_skeleton, topdown=True):
rel_root = os.path.relpath(root, obj_skeleton)
rel_dirs = rel_root.split(os.sep)
rel_root_dir = rel_dirs[0]
if galaxy_type == 'collection':
# A collection can contain templates in playbooks/*/templates and roles/*/templates
in_templates_dir = rel_root_dir in ['playbooks', 'roles'] and 'templates' in rel_dirs
else:
in_templates_dir = rel_root_dir == 'templates'
dirs[:] = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)]
for f in files:
filename, ext = os.path.splitext(f)
if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
continue
elif ext == ".j2" and not in_templates_dir:
src_template = os.path.join(rel_root, f)
dest_file = os.path.join(obj_path, rel_root, filename)
template_env.get_template(src_template).stream(inject_data).dump(dest_file, encoding='utf-8')
else:
f_rel_path = os.path.relpath(os.path.join(root, f), obj_skeleton)
shutil.copyfile(os.path.join(root, f), os.path.join(obj_path, f_rel_path))
for d in dirs:
b_dir_path = to_bytes(os.path.join(obj_path, rel_root, d), errors='surrogate_or_strict')
if not os.path.exists(b_dir_path):
os.makedirs(b_dir_path)
display.display("- %s was created successfully" % obj_name)
def execute_info(self):
"""
prints out detailed information about an installed role as well as info available from the galaxy API.
"""
roles_path = context.CLIARGS['roles_path']
data = ''
for role in context.CLIARGS['args']:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['installed_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
remote_data = False
if not context.CLIARGS['offline']:
remote_data = self.api.lookup_role_by_name(role, False)
if remote_data:
role_info.update(remote_data)
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec = req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data = self._display_role_info(role_info)
# FIXME: This is broken in both 1.9 and 2.0 as
# _display_role_info() always returns something
if not data:
data = u"\n- the role %s was not found" % role
self.pager(data)
def execute_install(self):
"""
uses the args list of roles to be installed, unless -f was specified. The list of roles
can be a name (which will be downloaded via the galaxy API and github), or it can be a local tar archive file.
"""
if context.CLIARGS['type'] == 'collection':
collections = context.CLIARGS['args']
force = context.CLIARGS['force']
output_path = context.CLIARGS['collections_path']
# TODO: use a list of server that have been configured in ~/.ansible_galaxy
servers = [context.CLIARGS['api_server']]
ignore_certs = context.CLIARGS['ignore_certs']
ignore_errors = context.CLIARGS['ignore_errors']
requirements_file = context.CLIARGS['requirements']
no_deps = context.CLIARGS['no_deps']
force_deps = context.CLIARGS['force_with_deps']
if collections and requirements_file:
raise AnsibleError("The positional collection_name arg and --requirements-file are mutually exclusive.")
elif not collections and not requirements_file:
raise AnsibleError("You must specify a collection name or a requirements file.")
if requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
collection_requirements = parse_collections_requirements_file(requirements_file)
else:
collection_requirements = []
for collection_input in collections:
name, dummy, requirement = collection_input.partition(':')
collection_requirements.append((name, requirement or '*', None))
output_path = GalaxyCLI._resolve_path(output_path)
collections_path = C.COLLECTIONS_PATHS
if len([p for p in collections_path if p.startswith(output_path)]) == 0:
display.warning("The specified collections path '%s' is not part of the configured Ansible "
"collections paths '%s'. The installed collection won't be picked up in an Ansible "
"run." % (to_text(output_path), to_text(":".join(collections_path))))
if os.path.split(output_path)[1] != 'ansible_collections':
output_path = os.path.join(output_path, 'ansible_collections')
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
install_collections(collection_requirements, output_path, servers, (not ignore_certs), ignore_errors,
no_deps, force, force_deps)
return 0
role_file = context.CLIARGS['role_file']
if not context.CLIARGS['args'] and role_file is None:
# the user needs to specify one of either --role-file or specify a single user/role name
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
no_deps = context.CLIARGS['no_deps']
force_deps = context.CLIARGS['force_with_deps']
force = context.CLIARGS['force'] or force_deps
roles_left = []
if role_file:
try:
f = open(role_file, 'r')
if role_file.endswith('.yaml') or role_file.endswith('.yml'):
try:
required_roles = yaml.safe_load(f.read())
except Exception as e:
raise AnsibleError(
"Unable to load data from the requirements file (%s): %s" % (role_file, to_native(e))
)
if required_roles is None:
raise AnsibleError("No roles found in file: %s" % role_file)
for role in required_roles:
if "include" not in role:
role = RoleRequirement.role_yaml_parse(role)
display.vvv("found role %s in yaml file" % str(role))
if "name" not in role and "scm" not in role:
raise AnsibleError("Must specify name or src for role")
roles_left.append(GalaxyRole(self.galaxy, **role))
else:
with open(role["include"]) as f_include:
try:
roles_left += [
GalaxyRole(self.galaxy, **r) for r in
(RoleRequirement.role_yaml_parse(i) for i in yaml.safe_load(f_include))
]
except Exception as e:
msg = "Unable to load data from the include requirements file: %s %s"
raise AnsibleError(msg % (role_file, e))
else:
raise AnsibleError("Invalid role requirements file")
f.close()
except (IOError, OSError) as e:
raise AnsibleError('Unable to open %s: %s' % (role_file, to_native(e)))
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in context.CLIARGS['args']:
role = RoleRequirement.role_yaml_parse(rname.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
for role in roles_left:
# only process roles in roles files when names matches if given
if role_file and context.CLIARGS['args'] and role.name not in context.CLIARGS['args']:
display.vvv('Skipping role %s' % role.name)
continue
display.vvv('Processing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None:
if role.install_info['version'] != role.version or force:
if force:
display.display('- changing role %s from %s to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
role.remove()
else:
display.warning('- %s (%s) is already installed - use --force to change version to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
continue
else:
if not force:
display.display('- %s is already installed, skipping.' % str(role))
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning(u"- %s was NOT installed successfully: %s " % (role.name, to_text(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
if not role.metadata:
display.warning("Meta file %s is empty. Skipping dependencies." % role.path)
else:
role_dependencies = role.metadata.get('dependencies') or []
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None:
if dep_role not in roles_left:
display.display('- adding dependency: %s' % to_text(dep_role))
roles_left.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
if dep_role.install_info['version'] != dep_role.version:
if force_deps:
display.display('- changing dependant role %s from %s to %s' %
(dep_role.name, dep_role.install_info['version'], dep_role.version or "unspecified"))
dep_role.remove()
roles_left.append(dep_role)
else:
display.warning('- dependency %s (%s) from role %s differs from already installed version (%s), skipping' %
(to_text(dep_role), dep_role.version, role.name, dep_role.install_info['version']))
else:
if force_deps:
roles_left.append(dep_role)
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
removes the list of roles passed as arguments from the local system.
"""
if not context.CLIARGS['args']:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in context.CLIARGS['args']:
role = GalaxyRole(self.galaxy, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, to_native(e)))
return 0
def execute_list(self):
"""
lists the roles installed on the local system or matches a single role passed as an argument.
"""
def _display_role(gr):
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (gr.name, version))
if context.CLIARGS['role']:
# show the requested role, if it exists
name = context.CLIARGS['role']
gr = GalaxyRole(self.galaxy, name)
if gr.metadata:
display.display('# %s' % os.path.dirname(gr.path))
_display_role(gr)
else:
display.display("- the role %s was not found" % name)
else:
# show all valid roles in the roles_path directory
roles_path = context.CLIARGS['roles_path']
path_found = False
warnings = []
for path in roles_path:
role_path = os.path.expanduser(path)
if not os.path.exists(role_path):
warnings.append("- the configured path %s does not exist." % role_path)
continue
elif not os.path.isdir(role_path):
warnings.append("- the configured path %s, exists, but it is not a directory." % role_path)
continue
display.display('# %s' % role_path)
path_files = os.listdir(role_path)
path_found = True
for path_file in path_files:
gr = GalaxyRole(self.galaxy, path_file, path=path)
if gr.metadata:
_display_role(gr)
for w in warnings:
display.warning(w)
if not path_found:
raise AnsibleOptionsError("- None of the provided paths was usable. Please specify a valid path with --roles-path")
return 0
def execute_publish(self):
"""
Publish a collection into Ansible Galaxy.
"""
api_key = context.CLIARGS['api_key'] or GalaxyToken().get()
api_server = context.CLIARGS['api_server']
collection_path = GalaxyCLI._resolve_path(context.CLIARGS['args'])
ignore_certs = context.CLIARGS['ignore_certs']
wait = context.CLIARGS['wait']
publish_collection(collection_path, api_server, api_key, ignore_certs, wait)
def execute_search(self):
''' searches for roles on the Ansible Galaxy server'''
page_size = 1000
search = None
if context.CLIARGS['args']:
search = '+'.join(context.CLIARGS['args'])
if not search and not context.CLIARGS['platforms'] and not context.CLIARGS['galaxy_tags'] and not context.CLIARGS['author']:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=context.CLIARGS['platforms'],
tags=context.CLIARGS['galaxy_tags'], author=context.CLIARGS['author'], page_size=page_size)
if response['count'] == 0:
display.display("No roles match your search.", color=C.COLOR_ERROR)
return True
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return True
def execute_login(self):
"""
verify user's identify via Github and retrieve an auth token from Ansible Galaxy.
"""
# Authenticate with github and retrieve a token
if context.CLIARGS['token'] is None:
if C.GALAXY_TOKEN:
github_token = C.GALAXY_TOKEN
else:
login = GalaxyLogin(self.galaxy)
github_token = login.create_github_token()
else:
github_token = context.CLIARGS['token']
galaxy_response = self.api.authenticate(github_token)
if context.CLIARGS['token'] is None and C.GALAXY_TOKEN is None:
# Remove the token we created
login.remove_github_token()
# Store the Galaxy token
token = GalaxyToken()
token.set(galaxy_response['token'])
display.display("Successfully logged into Galaxy as %s" % galaxy_response['username'])
return 0
def execute_import(self):
""" used to import a role into Ansible Galaxy """
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
if len(context.CLIARGS['args']) < 2:
raise AnsibleError("Expected a github_username and github_repository. Use --help.")
github_user = to_text(context.CLIARGS['args'][0], errors='surrogate_or_strict')
github_repo = to_text(context.CLIARGS['args'][1], errors='surrogate_or_strict')
if context.CLIARGS['check_status']:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo,
reference=context.CLIARGS['reference'],
role_name=context.CLIARGS['role_name'])
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
color=C.COLOR_CHANGED)
return 0
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not context.CLIARGS['wait']:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
if context.CLIARGS['check_status'] or context.CLIARGS['wait']:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if task[0]['state'] in ['SUCCESS', 'FAILED']:
finished = True
else:
time.sleep(10)
return 0
def execute_setup(self):
""" Setup an integration from Github or Travis for Ansible Galaxy roles"""
if context.CLIARGS['setup_list']:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']), color=C.COLOR_OK)
return 0
if context.CLIARGS['remove_id']:
# Remove a secret
self.api.remove_secret(context.CLIARGS['remove_id'])
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
source = context.CLIARGS['source']
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
secret = context.CLIARGS['secret']
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
""" Delete a role from Ansible Galaxy. """
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
display.display(resp['status'])
return True
|
pgmillon/ansible
|
lib/ansible/cli/galaxy.py
|
Python
|
gpl-3.0
| 46,300
|
[
"Galaxy"
] |
e7e5ecd13c99f309b68e4aa7541ddcd87e9f014b3792654520acbc289f43ca71
|
#encoding=utf-8
"""
Courseware views functions
"""
import logging
import urllib
import json
import cgi
from collections import OrderedDict
from datetime import datetime
from django.utils import translation
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from django.conf import settings
from django.core.context_processors import csrf
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.utils.timezone import UTC
from django.views.decorators.http import require_GET, require_POST, require_http_methods
from django.http import Http404, HttpResponse, HttpResponseBadRequest
from django.shortcuts import redirect
from certificates import api as certs_api
from edxmako.shortcuts import render_to_response, render_to_string, marketing_link
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from django.db import transaction
from markupsafe import escape
from courseware import grades
from courseware.access import has_access, in_preview_mode, _adjust_start_date_for_beta_testers
from courseware.courses import (
get_courses, get_course,
get_studio_url, get_course_with_access,
sort_by_announcement,
sort_by_start_date,
)
from courseware.masquerade import setup_masquerade
from openedx.core.djangoapps.credit.api import (
get_credit_requirement_status,
is_user_eligible_for_credit,
is_credit_course
)
from courseware.model_data import FieldDataCache
from .module_render import toc_for_course, get_module_for_descriptor, get_module, get_module_by_usage_id
from .entrance_exams import (
course_has_entrance_exam,
get_entrance_exam_content,
get_entrance_exam_score,
user_must_complete_entrance_exam,
user_has_passed_entrance_exam
)
from courseware.user_state_client import DjangoXBlockUserStateClient
from course_modes.models import CourseMode
from open_ended_grading import open_ended_notifications
from open_ended_grading.views import StaffGradingTab, PeerGradingTab, OpenEndedGradingTab
from student.models import UserTestGroup, CourseEnrollment
from student.views import is_course_blocked
from util.cache import cache, cache_if_anonymous
from xblock.fragment import Fragment
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError, NoPathToItem
from xmodule.tabs import CourseTabList
from xmodule.x_module import STUDENT_VIEW
import shoppingcart
from shoppingcart.models import CourseRegistrationCode
from shoppingcart.utils import is_shopping_cart_enabled
from opaque_keys import InvalidKeyError
from util.milestones_helpers import get_prerequisite_courses_display
from microsite_configuration import microsite
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.keys import CourseKey, UsageKey
from instructor.enrollment import uses_shib
from util.db import commit_on_success_with_read_committed
import survey.utils
import survey.views
from util.views import ensure_valid_course_key
from eventtracking import tracker
import analytics
from courseware.url_helpers import get_redirect_url
from django.utils.timezone import localtime
log = logging.getLogger("edx.courseware")
template_imports = {'urllib': urllib}
CONTENT_DEPTH = 2
def user_groups(user):
"""
TODO (vshnayder): This is not used. When we have a new plan for groups, adjust appropriately.
"""
if not user.is_authenticated():
return []
# TODO: Rewrite in Django
key = 'user_group_names_{user.id}'.format(user=user)
cache_expiration = 60 * 60 # one hour
# Kill caching on dev machines -- we switch groups a lot
group_names = cache.get(key)
if settings.DEBUG:
group_names = None
if group_names is None:
group_names = [u.name for u in UserTestGroup.objects.filter(users=user)]
cache.set(key, group_names, cache_expiration)
return group_names
@ensure_csrf_cookie
@cache_if_anonymous()
#呈现所需要添加课程
def courses(request):
"""
Render "find courses" page. The course selection work is done in courseware.courses.
"""
courses_list = []
course_discovery_meanings = getattr(settings, 'COURSE_DISCOVERY_MEANINGS', {})
Ething=settings.FEATURES.get('ENABLE_COURSE_DISCOVERY')
#if not settings.FEATURES.get('ENABLE_COURSE_DISCOVERY'):
if True:
http_host=request.META.get('HTTP_HOST')
courses_list = get_courses(request.user, request.META.get('HTTP_HOST'))
if microsite.get_value("ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"]):
courses_list = sort_by_start_date(courses_list)
else:
courses_list = sort_by_announcement(courses_list)
return render_to_response(
"nercel-templates/col-course-list.html",
{'courses': courses_list, 'course_discovery_meanings': course_discovery_meanings}
)
def render_accordion(request, course, chapter, section, field_data_cache):
"""
Draws navigation bar. Takes current position in accordion as
parameter.
If chapter and section are '' or None, renders a default accordion.
course, chapter, and section are the url_names.
Returns the html string
"""
# grab the table of contents
toc = toc_for_course(request, course, chapter, section, field_data_cache)
context = dict([
('toc', toc),
('course_id', course.id.to_deprecated_string()),
('csrf', csrf(request)['csrf_token']),
('due_date_display_format', course.due_date_display_format)
] + template_imports.items())
return render_to_string('courseware/accordion.html', context)
def get_current_child(xmodule, min_depth=None):
"""
Get the xmodule.position's display item of an xmodule that has a position and
children. If xmodule has no position or is out of bounds, return the first
child with children extending down to content_depth.
For example, if chapter_one has no position set, with two child sections,
section-A having no children and section-B having a discussion unit,
`get_current_child(chapter, min_depth=1)` will return section-B.
Returns None only if there are no children at all.
"""
def _get_default_child_module(child_modules):
"""Returns the first child of xmodule, subject to min_depth."""
if not child_modules:
default_child = None
elif not min_depth > 0:
default_child = child_modules[0]
else:
content_children = [child for child in child_modules if
child.has_children_at_depth(min_depth - 1) and child.get_display_items()]
default_child = content_children[0] if content_children else None
return default_child
if not hasattr(xmodule, 'position'):
return None
if xmodule.position is None:
return _get_default_child_module(xmodule.get_display_items())
else:
# position is 1-indexed.
pos = xmodule.position - 1
children = xmodule.get_display_items()
if 0 <= pos < len(children):
child = children[pos]
elif len(children) > 0:
# module has a set position, but the position is out of range.
# return default child.
child = _get_default_child_module(children)
else:
child = None
return child
def redirect_to_course_position(course_module, content_depth):
"""
Return a redirect to the user's current place in the course.
If this is the user's first time, redirects to COURSE/CHAPTER/SECTION.
If this isn't the users's first time, redirects to COURSE/CHAPTER,
and the view will find the current section and display a message
about reusing the stored position.
If there is no current position in the course or chapter, then selects
the first child.
"""
urlargs = {'course_id': course_module.id.to_deprecated_string()}
chapter = get_current_child(course_module, min_depth=content_depth)
if chapter is None:
# oops. Something bad has happened.
raise Http404("No chapter found when loading current position in course")
urlargs['chapter'] = chapter.url_name
if course_module.position is not None:
return redirect(reverse('courseware_chapter', kwargs=urlargs))
# Relying on default of returning first child
section = get_current_child(chapter, min_depth=content_depth - 1)
if section is None:
raise Http404("No section found when loading current position in course")
urlargs['section'] = section.url_name
return redirect(reverse('courseware_section', kwargs=urlargs))
def save_child_position(seq_module, child_name):
"""
child_name: url_name of the child
"""
for position, c in enumerate(seq_module.get_display_items(), start=1):
if c.location.name == child_name:
# Only save if position changed
if position != seq_module.position:
seq_module.position = position
# Save this new position to the underlying KeyValueStore
seq_module.save()
def save_positions_recursively_up(user, request, field_data_cache, xmodule, course=None):
"""
Recurses up the course tree starting from a leaf
Saving the position property based on the previous node as it goes
"""
current_module = xmodule
while current_module:
parent_location = modulestore().get_parent_location(current_module.location)
parent = None
if parent_location:
parent_descriptor = modulestore().get_item(parent_location)
parent = get_module_for_descriptor(
user,
request,
parent_descriptor,
field_data_cache,
current_module.location.course_key,
course=course
)
if parent and hasattr(parent, 'position'):
save_child_position(parent, current_module.location.name)
current_module = parent
def chat_settings(course, user):
"""
Returns a dict containing the settings required to connect to a
Jabber chat server and room.
"""
domain = getattr(settings, "JABBER_DOMAIN", None)
if domain is None:
log.warning('You must set JABBER_DOMAIN in the settings to '
'enable the chat widget')
return None
return {
'domain': domain,
# Jabber doesn't like slashes, so replace with dashes
'room': "{ID}_class".format(ID=course.id.replace('/', '-')),
'username': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
# TODO: clearly this needs to be something other than the username
# should also be something that's not necessarily tied to a
# particular course
'password': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
}
@login_required
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@ensure_valid_course_key
@commit_on_success_with_read_committed
def index(request, course_id, chapter=None, section=None,
position=None):
"""
Displays courseware accordion and associated content. If course, chapter,
and section are all specified, renders the page, or returns an error if they
are invalid.
If section is not specified, displays the accordion opened to the right chapter.
If neither chapter or section are specified, redirects to user's most recent
chapter, or the first chapter if this is the user's first visit.
Arguments:
- request : HTTP request
- course_id : course id (str: ORG/course/URL_NAME)
- chapter : chapter url_name (str)
- section : section url_name (str)
- position : position in module, eg of <sequential> module (str)
Returns:
- HTTPresponse
"""
course_key = CourseKey.from_string(course_id)
user = User.objects.prefetch_related("groups").get(id=request.user.id)
redeemed_registration_codes = CourseRegistrationCode.objects.filter(
course_id=course_key,
registrationcoderedemption__redeemed_by=request.user
)
# Redirect to dashboard if the course is blocked due to non-payment.
if is_course_blocked(request, redeemed_registration_codes, course_key):
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
log.warning(
u'User %s cannot access the course %s because payment has not yet been received',
user,
course_key.to_deprecated_string()
)
return redirect(reverse('dashboard'))
request.user = user # keep just one instance of User
with modulestore().bulk_operations(course_key):
return _index_bulk_op(request, course_key, chapter, section, position)
# pylint: disable=too-many-statements
def _index_bulk_op(request, course_key, chapter, section, position):
"""
Render the index page for the specified course.
"""
# Verify that position a string is in fact an int
if position is not None:
try:
int(position)
except ValueError:
raise Http404(u"Position {} is not an integer!".format(position))
user = request.user
course = get_course_with_access(user, 'load', course_key, depth=2)
staff_access = has_access(user, 'staff', course)
registered = registered_for_course(course, user)
if not registered:
# TODO (vshnayder): do course instructors need to be registered to see course?
log.debug(u'User %s tried to view course %s but is not enrolled', user, course.location.to_deprecated_string())
return redirect(reverse('about_course', args=[course_key.to_deprecated_string()]))
# see if all pre-requisites (as per the milestones app feature) have been fulfilled
# Note that if the pre-requisite feature flag has been turned off (default) then this check will
# always pass
if not has_access(user, 'view_courseware_with_prerequisites', course):
# prerequisites have not been fulfilled therefore redirect to the Dashboard
log.info(
u'User %d tried to view course %s '
u'without fulfilling prerequisites',
user.id, unicode(course.id))
return redirect(reverse('dashboard'))
# Entrance Exam Check
# If the course has an entrance exam and the requested chapter is NOT the entrance exam, and
# the user hasn't yet met the criteria to bypass the entrance exam, redirect them to the exam.
if chapter and course_has_entrance_exam(course):
chapter_descriptor = course.get_child_by(lambda m: m.location.name == chapter)
if chapter_descriptor and not getattr(chapter_descriptor, 'is_entrance_exam', False) \
and user_must_complete_entrance_exam(request, user, course):
log.info(u'User %d tried to view course %s without passing entrance exam', user.id, unicode(course.id))
return redirect(reverse('courseware', args=[unicode(course.id)]))
# check to see if there is a required survey that must be taken before
# the user can access the course.
if survey.utils.must_answer_survey(course, user):
return redirect(reverse('course_survey', args=[unicode(course.id)]))
masquerade = setup_masquerade(request, course_key, staff_access)
try:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_key, user, course, depth=2)
course_module = get_module_for_descriptor(
user, request, course, field_data_cache, course_key, course=course
)
if course_module is None:
log.warning(u'If you see this, something went wrong: if we got this'
u' far, should have gotten a course module for this user')
return redirect(reverse('about_course', args=[course_key.to_deprecated_string()]))
studio_url = get_studio_url(course, 'course')
context = {
'csrf': csrf(request)['csrf_token'],
'accordion': render_accordion(request, course, chapter, section, field_data_cache),
'COURSE_TITLE': course.display_name_with_default,
'course': course,
'init': '',
'fragment': Fragment(),
'staff_access': staff_access,
'studio_url': studio_url,
'masquerade': masquerade,
'xqa_server': settings.FEATURES.get('XQA_SERVER', "http://your_xqa_server.com"),
}
now = datetime.now(UTC())
effective_start = _adjust_start_date_for_beta_testers(user, course, course_key)
if not in_preview_mode() and staff_access and now < effective_start:
# Disable student view button if user is staff and
# course is not yet visible to students.
context['disable_student_access'] = True
has_content = course.has_children_at_depth(CONTENT_DEPTH)
if not has_content:
# Show empty courseware for a course with no units
return render_to_response('courseware/courseware.html', context)
elif chapter is None:
# Check first to see if we should instead redirect the user to an Entrance Exam
if course_has_entrance_exam(course):
exam_chapter = get_entrance_exam_content(request, course)
if exam_chapter:
exam_section = None
if exam_chapter.get_children():
exam_section = exam_chapter.get_children()[0]
if exam_section:
return redirect('courseware_section',
course_id=unicode(course_key),
chapter=exam_chapter.url_name,
section=exam_section.url_name)
# passing CONTENT_DEPTH avoids returning 404 for a course with an
# empty first section and a second section with content
return redirect_to_course_position(course_module, CONTENT_DEPTH)
# Only show the chat if it's enabled by the course and in the
# settings.
show_chat = course.show_chat and settings.FEATURES['ENABLE_CHAT']
if show_chat:
context['chat'] = chat_settings(course, user)
# If we couldn't load the chat settings, then don't show
# the widget in the courseware.
if context['chat'] is None:
show_chat = False
context['show_chat'] = show_chat
chapter_descriptor = course.get_child_by(lambda m: m.location.name == chapter)
if chapter_descriptor is not None:
save_child_position(course_module, chapter)
else:
raise Http404('No chapter descriptor found with name {}'.format(chapter))
chapter_module = course_module.get_child_by(lambda m: m.location.name == chapter)
if chapter_module is None:
# User may be trying to access a chapter that isn't live yet
if masquerade and masquerade.role == 'student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masquerading as student: no chapter %s', chapter)
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
raise Http404
if course_has_entrance_exam(course):
# Message should not appear outside the context of entrance exam subsection.
# if section is none then we don't need to show message on welcome back screen also.
if getattr(chapter_module, 'is_entrance_exam', False) and section is not None:
context['entrance_exam_current_score'] = get_entrance_exam_score(request, course)
context['entrance_exam_passed'] = user_has_passed_entrance_exam(request, course)
if section is not None:
section_descriptor = chapter_descriptor.get_child_by(lambda m: m.location.name == section)
if section_descriptor is None:
# Specifically asked-for section doesn't exist
if masquerade and masquerade.role == 'student': # don't 404 if staff is masquerading as student
log.debug('staff masquerading as student: no section %s', section)
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
raise Http404
## Allow chromeless operation
if section_descriptor.chrome:
chrome = [s.strip() for s in section_descriptor.chrome.lower().split(",")]
if 'accordion' not in chrome:
context['disable_accordion'] = True
if 'tabs' not in chrome:
context['disable_tabs'] = True
if section_descriptor.default_tab:
context['default_tab'] = section_descriptor.default_tab
# cdodge: this looks silly, but let's refetch the section_descriptor with depth=None
# which will prefetch the children more efficiently than doing a recursive load
section_descriptor = modulestore().get_item(section_descriptor.location, depth=None)
# Load all descendants of the section, because we're going to display its
# html, which in general will need all of its children
field_data_cache.add_descriptor_descendents(
section_descriptor, depth=None
)
section_module = get_module_for_descriptor(
request.user,
request,
section_descriptor,
field_data_cache,
course_key,
position,
course=course
)
if section_module is None:
# User may be trying to be clever and access something
# they don't have access to.
raise Http404
# Save where we are in the chapter
save_child_position(chapter_module, section)
context['fragment'] = section_module.render(STUDENT_VIEW)
context['section_title'] = section_descriptor.display_name_with_default
else:
# section is none, so display a message
studio_url = get_studio_url(course, 'course')
prev_section = get_current_child(chapter_module)
if prev_section is None:
# Something went wrong -- perhaps this chapter has no sections visible to the user.
# Clearing out the last-visited state and showing "first-time" view by redirecting
# to courseware.
course_module.position = None
course_module.save()
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
prev_section_url = reverse('courseware_section', kwargs={
'course_id': course_key.to_deprecated_string(),
'chapter': chapter_descriptor.url_name,
'section': prev_section.url_name
})
context['fragment'] = Fragment(content=render_to_string(
'courseware/welcome-back.html',
{
'course': course,
'studio_url': studio_url,
'chapter_module': chapter_module,
'prev_section': prev_section,
'prev_section_url': prev_section_url
}
))
result = render_to_response('courseware/courseware.html', context)
except Exception as e:
# Doesn't bar Unicode characters from URL, but if Unicode characters do
# cause an error it is a graceful failure.
if isinstance(e, UnicodeEncodeError):
raise Http404("URL contains Unicode characters")
if isinstance(e, Http404):
# let it propagate
raise
# In production, don't want to let a 500 out for any reason
if settings.DEBUG:
raise
else:
log.exception(
u"Error in index view: user={user}, course={course}, chapter={chapter}"
u" section={section} position={position}".format(
user=user,
course=course,
chapter=chapter,
section=section,
position=position
))
try:
result = render_to_response('courseware/courseware-error.html', {
'staff_access': staff_access,
'course': course
})
except:
# Let the exception propagate, relying on global config to at
# at least return a nice error message
log.exception("Error while rendering courseware-error page")
raise
return result
@ensure_csrf_cookie
@ensure_valid_course_key
def jump_to_id(request, course_id, module_id):
"""
This entry point allows for a shorter version of a jump to where just the id of the element is
passed in. This assumes that id is unique within the course_id namespace
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
items = modulestore().get_items(course_key, qualifiers={'name': module_id})
if len(items) == 0:
raise Http404(
u"Could not find id: {0} in course_id: {1}. Referer: {2}".format(
module_id, course_id, request.META.get("HTTP_REFERER", "")
))
if len(items) > 1:
log.warning(
u"Multiple items found with id: {0} in course_id: {1}. Referer: {2}. Using first: {3}".format(
module_id, course_id, request.META.get("HTTP_REFERER", ""), items[0].location.to_deprecated_string()
))
return jump_to(request, course_id, items[0].location.to_deprecated_string())
@ensure_csrf_cookie
def jump_to(_request, course_id, location):
"""
Show the page that contains a specific location.
If the location is invalid or not in any class, return a 404.
Otherwise, delegates to the index view to figure out whether this user
has access, and what they should see.
"""
try:
course_key = CourseKey.from_string(course_id)
usage_key = UsageKey.from_string(location).replace(course_key=course_key)
except InvalidKeyError:
raise Http404(u"Invalid course_key or usage_key")
try:
redirect_url = get_redirect_url(course_key, usage_key)
except ItemNotFoundError:
raise Http404(u"No data at this location: {0}".format(usage_key))
except NoPathToItem:
raise Http404(u"This location is not in any class: {0}".format(usage_key))
return redirect(redirect_url)
#处理课程信息界面的请求
@ensure_csrf_cookie
@ensure_valid_course_key
def course_info(request, course_id):
"""
Display the course's info.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
#根据course_id取得courseId
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
with modulestore().bulk_operations(course_key):
course = get_course_with_access(request.user, 'load', course_key)
# If the user needs to take an entrance exam to access this course, then we'll need
# to send them to that specific course module before allowing them into other areas
if user_must_complete_entrance_exam(request, request.user, course):
return redirect(reverse('courseware', args=[unicode(course.id)]))
# check to see if there is a required survey that must be taken before
# the user can access the course.
if request.user.is_authenticated() and survey.utils.must_answer_survey(course, request.user):
return redirect(reverse('course_survey', args=[unicode(course.id)]))
#取得职员权限
staff_access = has_access(request.user, 'staff', course)
#职员掩饰一些信息。
masquerade = setup_masquerade(request, course_key, staff_access) # allow staff to masquerade on the info page
#取得studio地址
studio_url = get_studio_url(course, 'course_info')
# link to where the student should go to enroll in the course:
# about page if there is not marketing site, SITE_NAME if there is
#参数课程id+course_about的地址。
url_to_enroll = reverse(course_about, args=[course_id])
if settings.FEATURES.get('ENABLE_MKTG_SITE'):
url_to_enroll = marketing_link('COURSES')
show_enroll_banner = request.user.is_authenticated() and not CourseEnrollment.is_enrolled(request.user, course.id)
context = {
'request': request,
'course_id': course_key.to_deprecated_string(),
'cache': None,
'course': course,
'staff_access': staff_access,
'masquerade': masquerade,
'studio_url': studio_url,
'show_enroll_banner': show_enroll_banner,
'url_to_enroll': url_to_enroll,
}
now = datetime.now(UTC())
#取得有效开始时间。
effective_start = _adjust_start_date_for_beta_testers(request.user, course, course_key)
#判断用户是否在预览版本,且是职员权限,课程开始时间在当前的时间后面。
if not in_preview_mode() and staff_access and now < effective_start:
# Disable student view button if user is staff and
# course is not yet visible to students.
context['disable_student_access'] = True
return render_to_response('courseware/info.html', context)
@ensure_csrf_cookie
@ensure_valid_course_key
def static_tab(request, course_id, tab_slug):
"""
Display the courses tab with the given name.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
tab = CourseTabList.get_tab_by_slug(course.tabs, tab_slug)
if tab is None:
raise Http404
contents = get_static_tab_contents(
request,
course,
tab
)
if contents is None:
raise Http404
return render_to_response('courseware/static_tab.html', {
'course': course,
'tab': tab,
'tab_contents': contents,
})
@ensure_csrf_cookie
@ensure_valid_course_key
def syllabus(request, course_id):
"""
Display the course's syllabus.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = has_access(request.user, 'staff', course)
return render_to_response('courseware/syllabus.html', {
'course': course,
'staff_access': staff_access,
})
#检测课程是否注册。
def registered_for_course(course, user):
"""
Return True if user is registered for course, else False
"""
if user is None:
return False
if user.is_authenticated():
return CourseEnrollment.is_enrolled(user, course.id)
else:
return False
def get_cosmetic_display_price(course, registration_price):
"""
Return Course Price as a string preceded by correct currency, or 'Free'
"""
currency_symbol = settings.PAID_COURSE_REGISTRATION_CURRENCY[1]
price = course.cosmetic_display_price
if registration_price > 0:
price = registration_price
if price:
# Translators: This will look like '$50', where {currency_symbol} is a symbol such as '$' and {price} is a
# numerical amount in that currency. Adjust this display as needed for your language.
return _("{currency_symbol}{price}").format(currency_symbol=currency_symbol, price=price)
else:
# Translators: This refers to the cost of the course. In this case, the course costs nothing so it is free.
return _('Free')
#课程大纲的信息。
@ensure_csrf_cookie
@cache_if_anonymous()
def course_about(request, course_id):
"""
Display the course's about page.
Assumes the course_id is in a valid format.
"""
#假定course_id是合法的格式
#显示课程界面的信息。
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
with modulestore().bulk_operations(course_key):
#可见信允许"see_exists"
permission_name = microsite.get_value(
'COURSE_ABOUT_VISIBILITY_PERMISSION',
settings.COURSE_ABOUT_VISIBILITY_PERMISSION
)
#验证获取课程信息,depth=0,course descriptor
course = get_course_with_access(request.user, permission_name, course_key)
#取得距开课还有所少天的时间
flag = course.start_date_is_still_default
if flag:
days="没有设置开始日期"
else:
curr_date=datetime.now()
start_date=course.start
localtime(start_date)
start_date=start_date.replace(tzinfo=None)
if curr_date >= start_date:
days="课程已经开始"
else:
days=(start_date - curr_date).days
days=str(days)
#默认为False
if microsite.get_value('ENABLE_MKTG_SITE', settings.FEATURES.get('ENABLE_MKTG_SITE', False)):
return redirect(reverse('info', args=[course.id.to_deprecated_string()]))
#检测课程是否注册。
registered = registered_for_course(course, request.user)
#取得staff_access权限
staff_access = has_access(request.user, 'staff', course)
#取得studio_url
studio_url = get_studio_url(course, 'settings/details')
#拥有load权限。
if has_access(request.user, 'load', course):
course_target = reverse('info', args=[course.id.to_deprecated_string()])
else:
course_target = reverse('about_course', args=[course.id.to_deprecated_string()])
#取得显示课件连接地址。
show_courseware_link = (
(
has_access(request.user, 'load', course)
and has_access(request.user, 'view_courseware_with_prerequisites', course)
)
or settings.FEATURES.get('ENABLE_LMS_MIGRATION')
)
# Note: this is a flow for payment for course registration, not the Verified Certificate flow.
registration_price = 0
in_cart = False
reg_then_add_to_cart_link = ""
#付费课程,取出课程是否付费和加入购物车。
_is_shopping_cart_enabled = is_shopping_cart_enabled()
if _is_shopping_cart_enabled:
registration_price = CourseMode.min_course_price_for_currency(course_key,
settings.PAID_COURSE_REGISTRATION_CURRENCY[0])
if request.user.is_authenticated():
cart = shoppingcart.models.Order.get_cart_for_user(request.user)
in_cart = shoppingcart.models.PaidCourseRegistration.contained_in_order(cart, course_key) or \
shoppingcart.models.CourseRegCodeItem.contained_in_order(cart, course_key)
reg_then_add_to_cart_link = "{reg_url}?course_id={course_id}&enrollment_action=add_to_cart".format(
reg_url=reverse('register_user'), course_id=urllib.quote(str(course_id)))
#取出课程零售价格,默认维“Free”
course_price = get_cosmetic_display_price(course, registration_price)
#False
can_add_course_to_cart = _is_shopping_cart_enabled and registration_price
# Used to provide context to message to student if enrollment not allowed
#如果不允许注册,用于向学生提供提示信息
#用户是否可以注册。
can_enroll = has_access(request.user, 'enroll', course)
invitation_only = course.invitation_only
#选课的人是否已经满
is_course_full = CourseEnrollment.objects.is_course_full(course)
#选课总人数:
# Register button should be disabled if one of the following is true:
# - Student is already registered for course
# - Course is already full
# - Student cannot enroll in course
#注册按钮是否激活
active_reg_button = not(registered or is_course_full or not can_enroll)
#是否是shib_course课程
is_shib_course = uses_shib(course)
# get prerequisite courses display names
#取得先决课程的名称;
pre_requisite_courses = get_prerequisite_courses_display(course)
return render_to_response('nercel-templates/col-registerCourse.html', {
'course': course,
'days':days,
'staff_access': staff_access,
'studio_url': studio_url,
'registered': registered,
'course_target': course_target,
'is_cosmetic_price_enabled': settings.FEATURES.get('ENABLE_COSMETIC_DISPLAY_PRICE'),
'course_price': course_price,
'in_cart': in_cart,
'reg_then_add_to_cart_link': reg_then_add_to_cart_link,
'show_courseware_link': show_courseware_link,
'is_course_full': is_course_full,
'can_enroll': can_enroll,
'invitation_only': invitation_only,
'active_reg_button': active_reg_button,
'is_shib_course': is_shib_course,
# We do not want to display the internal courseware header, which is used when the course is found in the
# context. This value is therefor explicitly set to render the appropriate header.
'disable_courseware_header': True,
'can_add_course_to_cart': can_add_course_to_cart,
'cart_link': reverse('shoppingcart.views.show_cart'),
'pre_requisite_courses': pre_requisite_courses
})
@ensure_csrf_cookie
@cache_if_anonymous('org')
@ensure_valid_course_key
def mktg_course_about(request, course_id):
"""This is the button that gets put into an iframe on the Drupal site."""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
permission_name = microsite.get_value(
'COURSE_ABOUT_VISIBILITY_PERMISSION',
settings.COURSE_ABOUT_VISIBILITY_PERMISSION
)
course = get_course_with_access(request.user, permission_name, course_key)
except (ValueError, Http404):
# If a course does not exist yet, display a "Coming Soon" button
return render_to_response(
'courseware/mktg_coming_soon.html', {'course_id': course_key.to_deprecated_string()}
)
registered = registered_for_course(course, request.user)
if has_access(request.user, 'load', course):
course_target = reverse('info', args=[course.id.to_deprecated_string()])
else:
course_target = reverse('about_course', args=[course.id.to_deprecated_string()])
allow_registration = has_access(request.user, 'enroll', course)
show_courseware_link = (has_access(request.user, 'load', course) or
settings.FEATURES.get('ENABLE_LMS_MIGRATION'))
course_modes = CourseMode.modes_for_course_dict(course.id)
context = {
'course': course,
'registered': registered,
'allow_registration': allow_registration,
'course_target': course_target,
'show_courseware_link': show_courseware_link,
'course_modes': course_modes,
}
# The edx.org marketing site currently displays only in English.
# To avoid displaying a different language in the register / access button,
# we force the language to English.
# However, OpenEdX installations with a different marketing front-end
# may want to respect the language specified by the user or the site settings.
force_english = settings.FEATURES.get('IS_EDX_DOMAIN', False)
if force_english:
translation.activate('en-us')
if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'):
# Drupal will pass organization names using a GET parameter, as follows:
# ?org=Harvard
# ?org=Harvard,MIT
# If no full names are provided, the marketing iframe won't show the
# email opt-in checkbox.
org = request.GET.get('org')
if org:
org_list = org.split(',')
# HTML-escape the provided organization names
org_list = [cgi.escape(org) for org in org_list]
if len(org_list) > 1:
if len(org_list) > 2:
# Translators: The join of three or more institution names (e.g., Harvard, MIT, and Dartmouth).
org_name_string = _("{first_institutions}, and {last_institution}").format(
first_institutions=u", ".join(org_list[:-1]),
last_institution=org_list[-1]
)
else:
# Translators: The join of two institution names (e.g., Harvard and MIT).
org_name_string = _("{first_institution} and {second_institution}").format(
first_institution=org_list[0],
second_institution=org_list[1]
)
else:
org_name_string = org_list[0]
context['checkbox_label'] = ungettext(
"I would like to receive email from {institution_series} and learn about its other programs.",
"I would like to receive email from {institution_series} and learn about their other programs.",
len(org_list)
).format(institution_series=org_name_string)
try:
return render_to_response('courseware/mktg_course_about.html', context)
finally:
# Just to be safe, reset the language if we forced it to be English.
if force_english:
translation.deactivate()
@login_required
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@transaction.commit_manually
@ensure_valid_course_key
def progress(request, course_id, student_id=None):
"""
Wraps "_progress" with the manual_transaction context manager just in case
there are unanticipated errors.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
with modulestore().bulk_operations(course_key):
with grades.manual_transaction():
return _progress(request, course_key, student_id)
def _progress(request, course_key, student_id):
"""
Unwrapped version of "progress".
User progress. We show the grade bar and every problem score.
Course staff are allowed to see the progress of students in their class.
"""
course = get_course_with_access(request.user, 'load', course_key, depth=None, check_if_enrolled=True)
# check to see if there is a required survey that must be taken before
# the user can access the course.
if survey.utils.must_answer_survey(course, request.user):
return redirect(reverse('course_survey', args=[unicode(course.id)]))
staff_access = has_access(request.user, 'staff', course)
if student_id is None or student_id == request.user.id:
# always allowed to see your own profile
student = request.user
else:
# Requesting access to a different student's profile
if not staff_access:
raise Http404
try:
student = User.objects.get(id=student_id)
# Check for ValueError if 'student_id' cannot be converted to integer.
except (ValueError, User.DoesNotExist):
raise Http404
# NOTE: To make sure impersonation by instructor works, use
# student instead of request.user in the rest of the function.
# The pre-fetching of groups is done to make auth checks not require an
# additional DB lookup (this kills the Progress page in particular).
student = User.objects.prefetch_related("groups").get(id=student.id)
courseware_summary = grades.progress_summary(student, request, course)
studio_url = get_studio_url(course, 'settings/grading')
grade_summary = grades.grade(student, request, course)
if courseware_summary is None:
#This means the student didn't have access to the course (which the instructor requested)
raise Http404
# checking certificate generation configuration
show_generate_cert_btn = certs_api.cert_generation_enabled(course_key)
context = {
'course': course,
'courseware_summary': courseware_summary,
'studio_url': studio_url,
'grade_summary': grade_summary,
'staff_access': staff_access,
'student': student,
'passed': is_course_passed(course, grade_summary),
'show_generate_cert_btn': show_generate_cert_btn,
'credit_course_requirements': _credit_course_requirements(course_key, student),
}
if show_generate_cert_btn:
context.update(certs_api.certificate_downloadable_status(student, course_key))
# showing the certificate web view button if feature flags are enabled.
if settings.FEATURES.get('CERTIFICATES_HTML_VIEW', False):
if certs_api.get_active_web_certificate(course) is not None:
context.update({
'show_cert_web_view': True,
'cert_web_view_url': u'{url}'.format(
url=certs_api.get_certificate_url(
user_id=student.id,
course_id=unicode(course.id),
verify_uuid=None
)
)
})
else:
context.update({
'is_downloadable': False,
'is_generating': True,
'download_url': None
})
with grades.manual_transaction():
response = render_to_response('courseware/progress.html', context)
return response
def _credit_course_requirements(course_key, student):
"""Return information about which credit requirements a user has satisfied.
Arguments:
course_key (CourseKey): Identifier for the course.
student (User): Currently logged in user.
Returns: dict
"""
# If credit eligibility is not enabled or this is not a credit course,
# short-circuit and return `None`. This indicates that credit requirements
# should NOT be displayed on the progress page.
if not (settings.FEATURES.get("ENABLE_CREDIT_ELIGIBILITY", False) and is_credit_course(course_key)):
return None
# Retrieve the status of the user for each eligibility requirement in the course.
# For each requirement, the user's status is either "satisfied", "failed", or None.
# In this context, `None` means that we don't know the user's status, either because
# the user hasn't done something (for example, submitting photos for verification)
# or we're waiting on more information (for example, a response from the photo
# verification service).
requirement_statuses = get_credit_requirement_status(course_key, student.username)
# If the user has been marked as "eligible", then they are *always* eligible
# unless someone manually intervenes. This could lead to some strange behavior
# if the requirements change post-launch. For example, if the user was marked as eligible
# for credit, then a new requirement was added, the user will see that they're eligible
# AND that one of the requirements is still pending.
# We're assuming here that (a) we can mitigate this by properly training course teams,
# and (b) it's a better user experience to allow students who were at one time
# marked as eligible to continue to be eligible.
# If we need to, we can always manually move students back to ineligible by
# deleting CreditEligibility records in the database.
if is_user_eligible_for_credit(student.username, course_key):
eligibility_status = "eligible"
# If the user has *failed* any requirements (for example, if a photo verification is denied),
# then the user is NOT eligible for credit.
elif any(requirement['status'] == 'failed' for requirement in requirement_statuses):
eligibility_status = "not_eligible"
# Otherwise, the user may be eligible for credit, but the user has not
# yet completed all the requirements.
else:
eligibility_status = "partial_eligible"
return {
'eligibility_status': eligibility_status,
'requirements': requirement_statuses,
}
@login_required
@ensure_valid_course_key
def submission_history(request, course_id, student_username, location):
"""Render an HTML fragment (meant for inclusion elsewhere) that renders a
history of all state changes made by this user for this problem location.
Right now this only works for problems because that's all
StudentModuleHistory records.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
usage_key = course_key.make_usage_key_from_deprecated_string(location)
except (InvalidKeyError, AssertionError):
return HttpResponse(escape(_(u'Invalid location.')))
course = get_course_with_access(request.user, 'load', course_key)
staff_access = has_access(request.user, 'staff', course)
# Permission Denied if they don't have staff access and are trying to see
# somebody else's submission history.
if (student_username != request.user.username) and (not staff_access):
raise PermissionDenied
user_state_client = DjangoXBlockUserStateClient()
try:
history_entries = user_state_client.get_history(student_username, usage_key)
except DjangoXBlockUserStateClient.DoesNotExist:
return HttpResponse(escape(_(u'User {username} has never accessed problem {location}').format(
username=student_username,
location=location
)))
context = {
'history_entries': history_entries,
'username': student_username,
'location': location,
'course_id': course_key.to_deprecated_string()
}
return render_to_response('courseware/submission_history.html', context)
def notification_image_for_tab(course_tab, user, course):
"""
Returns the notification image path for the given course_tab if applicable, otherwise None.
"""
tab_notification_handlers = {
StaffGradingTab.type: open_ended_notifications.staff_grading_notifications,
PeerGradingTab.type: open_ended_notifications.peer_grading_notifications,
OpenEndedGradingTab.type: open_ended_notifications.combined_notifications
}
if course_tab.name in tab_notification_handlers:
notifications = tab_notification_handlers[course_tab.name](course, user)
if notifications and notifications['pending_grading']:
return notifications['img_path']
return None
def get_static_tab_contents(request, course, tab):
"""
Returns the contents for the given static tab
"""
loc = course.id.make_usage_key(
tab.type,
tab.url_slug,
)
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, modulestore().get_item(loc), depth=0
)
tab_module = get_module(
request.user, request, loc, field_data_cache, static_asset_path=course.static_asset_path, course=course
)
logging.debug('course_module = {0}'.format(tab_module))
html = ''
if tab_module is not None:
try:
html = tab_module.render(STUDENT_VIEW).content
except Exception: # pylint: disable=broad-except
html = render_to_string('courseware/error-message.html', None)
log.exception(
u"Error rendering course={course}, tab={tab_url}".format(course=course, tab_url=tab['url_slug'])
)
return html
@require_GET
@ensure_valid_course_key
def get_course_lti_endpoints(request, course_id):
"""
View that, given a course_id, returns the a JSON object that enumerates all of the LTI endpoints for that course.
The LTI 2.0 result service spec at
http://www.imsglobal.org/lti/ltiv2p0/uml/purl.imsglobal.org/vocab/lis/v2/outcomes/Result/service.html
says "This specification document does not prescribe a method for discovering the endpoint URLs." This view
function implements one way of discovering these endpoints, returning a JSON array when accessed.
Arguments:
request (django request object): the HTTP request object that triggered this view function
course_id (unicode): id associated with the course
Returns:
(django response object): HTTP response. 404 if course is not found, otherwise 200 with JSON body.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
course = get_course(course_key, depth=2)
except ValueError:
return HttpResponse(status=404)
anonymous_user = AnonymousUser()
anonymous_user.known = False # make these "noauth" requests like module_render.handle_xblock_callback_noauth
lti_descriptors = modulestore().get_items(course.id, qualifiers={'category': 'lti'})
lti_noauth_modules = [
get_module_for_descriptor(
anonymous_user,
request,
descriptor,
FieldDataCache.cache_for_descriptor_descendents(
course_key,
anonymous_user,
descriptor
),
course_key,
course=course
)
for descriptor in lti_descriptors
]
endpoints = [
{
'display_name': module.display_name,
'lti_2_0_result_service_json_endpoint': module.get_outcome_service_url(
service_name='lti_2_0_result_rest_handler') + "/user/{anon_user_id}",
'lti_1_1_result_service_xml_endpoint': module.get_outcome_service_url(
service_name='grade_handler'),
}
for module in lti_noauth_modules
]
return HttpResponse(json.dumps(endpoints), content_type='application/json')
@login_required
def course_survey(request, course_id):
"""
URL endpoint to present a survey that is associated with a course_id
Note that the actual implementation of course survey is handled in the
views.py file in the Survey Djangoapp
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
redirect_url = reverse('info', args=[course_id])
# if there is no Survey associated with this course,
# then redirect to the course instead
if not course.course_survey_name:
return redirect(redirect_url)
return survey.views.view_student_survey(
request.user,
course.course_survey_name,
course=course,
redirect_url=redirect_url,
is_required=course.course_survey_required,
)
def is_course_passed(course, grade_summary=None, student=None, request=None):
"""
check user's course passing status. return True if passed
Arguments:
course : course object
grade_summary (dict) : contains student grade details.
student : user object
request (HttpRequest)
Returns:
returns bool value
"""
nonzero_cutoffs = [cutoff for cutoff in course.grade_cutoffs.values() if cutoff > 0]
success_cutoff = min(nonzero_cutoffs) if nonzero_cutoffs else None
if grade_summary is None:
grade_summary = grades.grade(student, request, course)
return success_cutoff and grade_summary['percent'] >= success_cutoff
@require_POST
def generate_user_cert(request, course_id):
"""Start generating a new certificate for the user.
Certificate generation is allowed if:
* The user has passed the course, and
* The user does not already have a pending/completed certificate.
Note that if an error occurs during certificate generation
(for example, if the queue is down), then we simply mark the
certificate generation task status as "error" and re-run
the task with a management command. To students, the certificate
will appear to be "generating" until it is re-run.
Args:
request (HttpRequest): The POST request to this view.
course_id (unicode): The identifier for the course.
Returns:
HttpResponse: 200 on success, 400 if a new certificate cannot be generated.
"""
if not request.user.is_authenticated():
log.info(u"Anon user trying to generate certificate for %s", course_id)
return HttpResponseBadRequest(
_('You must be signed in to {platform_name} to create a certificate.').format(
platform_name=settings.PLATFORM_NAME
)
)
student = request.user
course_key = CourseKey.from_string(course_id)
course = modulestore().get_course(course_key, depth=2)
if not course:
return HttpResponseBadRequest(_("Course is not valid"))
if not is_course_passed(course, None, student, request):
return HttpResponseBadRequest(_("Your certificate will be available when you pass the course."))
certificate_status = certs_api.certificate_downloadable_status(student, course.id)
if certificate_status["is_downloadable"]:
return HttpResponseBadRequest(_("Certificate has already been created."))
elif certificate_status["is_generating"]:
return HttpResponseBadRequest(_("Certificate is being created."))
else:
# If the certificate is not already in-process or completed,
# then create a new certificate generation task.
# If the certificate cannot be added to the queue, this will
# mark the certificate with "error" status, so it can be re-run
# with a management command. From the user's perspective,
# it will appear that the certificate task was submitted successfully.
certs_api.generate_user_certificates(student, course.id, course=course, generation_mode='self')
_track_successful_certificate_generation(student.id, course.id)
return HttpResponse()
def _track_successful_certificate_generation(user_id, course_id): # pylint: disable=invalid-name
"""
Track a successful certificate generation event.
Arguments:
user_id (str): The ID of the user generting the certificate.
course_id (CourseKey): Identifier for the course.
Returns:
None
"""
if settings.FEATURES.get('SEGMENT_IO_LMS') and hasattr(settings, 'SEGMENT_IO_LMS_KEY'):
event_name = 'edx.bi.user.certificate.generate' # pylint: disable=no-member
tracking_context = tracker.get_tracker().resolve_context() # pylint: disable=no-member
analytics.track(
user_id,
event_name,
{
'category': 'certificates',
'label': unicode(course_id)
},
context={
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
@require_http_methods(["GET", "POST"])
def render_xblock(request, usage_key_string, check_if_enrolled=True):
"""
Returns an HttpResponse with HTML content for the xBlock with the given usage_key.
The returned HTML is a chromeless rendering of the xBlock (excluding content of the containing courseware).
"""
usage_key = UsageKey.from_string(usage_key_string)
usage_key = usage_key.replace(course_key=modulestore().fill_in_run(usage_key.course_key))
course_key = usage_key.course_key
with modulestore().bulk_operations(course_key):
# verify the user has access to the course, including enrollment check
course = get_course_with_access(request.user, 'load', course_key, check_if_enrolled=check_if_enrolled)
# get the block, which verifies whether the user has access to the block.
block, _ = get_module_by_usage_id(
request, unicode(course_key), unicode(usage_key), disable_staff_debug_info=True, course=course
)
context = {
'fragment': block.render('student_view', context=request.GET),
'course': course,
'disable_accordion': True,
'allow_iframing': True,
'disable_header': True,
'disable_window_wrap': True,
'disable_preview_menu': True,
'staff_access': has_access(request.user, 'staff', course),
'xqa_server': settings.FEATURES.get('XQA_SERVER', 'http://your_xqa_server.com'),
}
return render_to_response('courseware/courseware-chromeless.html', context)
|
xuxiao19910803/edx-platform
|
lms/djangoapps/courseware/views.py
|
Python
|
agpl-3.0
| 61,622
|
[
"VisIt"
] |
9f9fc30fa5696f77d724c89e3003885d72c3099524cc366047ec31d9e5385832
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.utils
from frappe.utils import cstr, flt, getdate, comma_and
from frappe import _
from frappe.model.mapper import get_mapped_doc
from erpnext.controllers.selling_controller import SellingController
form_grid_templates = {
"sales_order_details": "templates/form_grid/item_grid.html"
}
class SalesOrder(SellingController):
tname = 'Sales Order Item'
fname = 'sales_order_details'
person_tname = 'Target Detail'
partner_tname = 'Partner Target Detail'
territory_tname = 'Territory Target Detail'
def validate_mandatory(self):
# validate transaction date v/s delivery date
if self.delivery_date:
if getdate(self.transaction_date) > getdate(self.delivery_date):
frappe.throw(_("Expected Delivery Date cannot be before Sales Order Date"))
def validate_po(self):
# validate p.o date v/s delivery date
if self.po_date and self.delivery_date and getdate(self.po_date) > getdate(self.delivery_date):
frappe.throw(_("Expected Delivery Date cannot be before Purchase Order Date"))
if self.po_no and self.customer:
so = frappe.db.sql("select name from `tabSales Order` \
where ifnull(po_no, '') = %s and name != %s and docstatus < 2\
and customer = %s", (self.po_no, self.name, self.customer))
if so and so[0][0]:
frappe.msgprint(_("Warning: Sales Order {0} already exists against same Purchase Order number").format(so[0][0]))
def validate_for_items(self):
check_list, flag = [], 0
chk_dupl_itm = []
for d in self.get('sales_order_details'):
e = [d.item_code, d.description, d.warehouse, d.prevdoc_docname or '']
f = [d.item_code, d.description]
if frappe.db.get_value("Item", d.item_code, "is_stock_item") == 'Yes':
if not d.warehouse:
frappe.throw(_("Reserved warehouse required for stock item {0}").format(d.item_code))
if e in check_list:
frappe.throw(_("Item {0} has been entered twice").format(d.item_code))
else:
check_list.append(e)
else:
if f in chk_dupl_itm:
frappe.throw(_("Item {0} has been entered twice").format(d.item_code))
else:
chk_dupl_itm.append(f)
# used for production plan
d.transaction_date = self.transaction_date
tot_avail_qty = frappe.db.sql("select projected_qty from `tabBin` \
where item_code = %s and warehouse = %s", (d.item_code,d.warehouse))
d.projected_qty = tot_avail_qty and flt(tot_avail_qty[0][0]) or 0
def validate_sales_mntc_quotation(self):
for d in self.get('sales_order_details'):
if d.prevdoc_docname:
res = frappe.db.sql("select name from `tabQuotation` where name=%s and order_type = %s", (d.prevdoc_docname, self.order_type))
if not res:
frappe.msgprint(_("Quotation {0} not of type {1}").format(d.prevdoc_docname, self.order_type))
def validate_order_type(self):
super(SalesOrder, self).validate_order_type()
def validate_delivery_date(self):
if self.order_type == 'Sales' and not self.delivery_date:
frappe.throw(_("Please enter 'Expected Delivery Date'"))
self.validate_sales_mntc_quotation()
def validate_proj_cust(self):
if self.project_name and self.customer_name:
res = frappe.db.sql("""select name from `tabProject` where name = %s
and (customer = %s or ifnull(customer,'')='')""",
(self.project_name, self.customer))
if not res:
frappe.throw(_("Customer {0} does not belong to project {1}").format(self.customer, self.project_name))
def validate(self):
super(SalesOrder, self).validate()
self.validate_order_type()
self.validate_delivery_date()
self.validate_mandatory()
self.validate_proj_cust()
self.validate_po()
self.validate_uom_is_integer("stock_uom", "qty")
self.validate_for_items()
self.validate_warehouse()
from erpnext.stock.doctype.packed_item.packed_item import make_packing_list
make_packing_list(self,'sales_order_details')
self.validate_with_previous_doc()
if not self.status:
self.status = "Draft"
from erpnext.utilities import validate_status
validate_status(self.status, ["Draft", "Submitted", "Stopped",
"Cancelled"])
if not self.billing_status: self.billing_status = 'Not Billed'
if not self.delivery_status: self.delivery_status = 'Not Delivered'
def validate_warehouse(self):
from erpnext.stock.utils import validate_warehouse_company
warehouses = list(set([d.warehouse for d in
self.get(self.fname) if d.warehouse]))
for w in warehouses:
validate_warehouse_company(w, self.company)
def validate_with_previous_doc(self):
super(SalesOrder, self).validate_with_previous_doc(self.tname, {
"Quotation": {
"ref_dn_field": "prevdoc_docname",
"compare_fields": [["company", "="], ["currency", "="]]
}
})
def update_enquiry_status(self, prevdoc, flag):
enq = frappe.db.sql("select t2.prevdoc_docname from `tabQuotation` t1, `tabQuotation Item` t2 where t2.parent = t1.name and t1.name=%s", prevdoc)
if enq:
frappe.db.sql("update `tabOpportunity` set status = %s where name=%s",(flag,enq[0][0]))
def update_prevdoc_status(self, flag):
for quotation in list(set([d.prevdoc_docname for d in self.get(self.fname)])):
if quotation:
doc = frappe.get_doc("Quotation", quotation)
if doc.docstatus==2:
frappe.throw(_("Quotation {0} is cancelled").format(quotation))
doc.set_status(update=True)
def on_submit(self):
super(SalesOrder, self).on_submit()
self.update_stock_ledger(update_stock = 1)
self.check_credit(self.grand_total)
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype, self.grand_total, self)
self.update_prevdoc_status('submit')
frappe.db.set(self, 'status', 'Submitted')
def on_cancel(self):
# Cannot cancel stopped SO
if self.status == 'Stopped':
frappe.throw(_("Stopped order cannot be cancelled. Unstop to cancel."))
self.check_nextdoc_docstatus()
self.update_stock_ledger(update_stock = -1)
self.update_prevdoc_status('cancel')
frappe.db.set(self, 'status', 'Cancelled')
def check_nextdoc_docstatus(self):
# Checks Delivery Note
submit_dn = frappe.db.sql_list("""select t1.name from `tabDelivery Note` t1,`tabDelivery Note Item` t2
where t1.name = t2.parent and t2.against_sales_order = %s and t1.docstatus = 1""", self.name)
if submit_dn:
frappe.throw(_("Delivery Notes {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_dn)))
# Checks Sales Invoice
submit_rv = frappe.db.sql_list("""select t1.name
from `tabSales Invoice` t1,`tabSales Invoice Item` t2
where t1.name = t2.parent and t2.sales_order = %s and t1.docstatus = 1""",
self.name)
if submit_rv:
frappe.throw(_("Sales Invoice {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_rv)))
#check maintenance schedule
submit_ms = frappe.db.sql_list("""select t1.name from `tabMaintenance Schedule` t1,
`tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1""", self.name)
if submit_ms:
frappe.throw(_("Maintenance Schedule {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_ms)))
# check maintenance visit
submit_mv = frappe.db.sql_list("""select t1.name from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1""",self.name)
if submit_mv:
frappe.throw(_("Maintenance Visit {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_mv)))
# check production order
pro_order = frappe.db.sql_list("""select name from `tabProduction Order`
where sales_order = %s and docstatus = 1""", self.name)
if pro_order:
frappe.throw(_("Production Order {0} must be cancelled before cancelling this Sales Order").format(comma_and(pro_order)))
def check_modified_date(self):
mod_db = frappe.db.get_value("Sales Order", self.name, "modified")
date_diff = frappe.db.sql("select TIMEDIFF('%s', '%s')" %
( mod_db, cstr(self.modified)))
if date_diff and date_diff[0][0]:
frappe.throw(_("{0} {1} has been modified. Please refresh.").format(self.doctype, self.name))
def stop_sales_order(self):
self.check_modified_date()
self.update_stock_ledger(-1)
frappe.db.set(self, 'status', 'Stopped')
frappe.msgprint(_("{0} {1} status is Stopped").format(self.doctype, self.name))
def unstop_sales_order(self):
self.check_modified_date()
self.update_stock_ledger(1)
frappe.db.set(self, 'status', 'Submitted')
frappe.msgprint(_("{0} {1} status is Unstopped").format(self.doctype, self.name))
def update_stock_ledger(self, update_stock):
from erpnext.stock.utils import update_bin
for d in self.get_item_list():
if frappe.db.get_value("Item", d['item_code'], "is_stock_item") == "Yes":
args = {
"item_code": d['item_code'],
"warehouse": d['reserved_warehouse'],
"reserved_qty": flt(update_stock) * flt(d['reserved_qty']),
"posting_date": self.transaction_date,
"voucher_type": self.doctype,
"voucher_no": self.name,
"is_amended": self.amended_from and 'Yes' or 'No'
}
update_bin(args)
def on_update(self):
pass
def get_portal_page(self):
return "order" if self.docstatus==1 else None
@frappe.whitelist()
def make_material_request(source_name, target_doc=None):
def postprocess(source, doc):
doc.material_request_type = "Purchase"
doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Material Request",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Material Request Item",
"field_map": {
"parent": "sales_order_no",
"stock_uom": "uom"
}
}
}, target_doc, postprocess)
return doc
@frappe.whitelist()
def make_delivery_note(source_name, target_doc=None):
def set_missing_values(source, target):
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source, target, source_parent):
target.base_amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.base_rate)
target.amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.rate)
target.qty = flt(source.qty) - flt(source.delivered_qty)
target_doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Delivery Note",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Delivery Note Item",
"field_map": {
"rate": "rate",
"name": "prevdoc_detail_docname",
"parent": "against_sales_order",
},
"postprocess": update_item,
"condition": lambda doc: doc.delivered_qty < doc.qty
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doc, set_missing_values)
return target_doc
@frappe.whitelist()
def make_sales_invoice(source_name, target_doc=None):
def postprocess(source, target):
set_missing_values(source, target)
#Get the advance paid Journal Vouchers in Sales Invoice Advance
target.get_advances()
def set_missing_values(source, target):
target.is_pos = 0
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source, target, source_parent):
target.amount = flt(source.amount) - flt(source.billed_amt)
target.base_amount = target.amount * flt(source_parent.conversion_rate)
target.qty = source.rate and target.amount / flt(source.rate) or source.qty
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Sales Invoice",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Sales Invoice Item",
"field_map": {
"name": "so_detail",
"parent": "sales_order",
},
"postprocess": update_item,
"condition": lambda doc: doc.base_amount==0 or doc.billed_amt < doc.amount
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doc, postprocess)
def set_advance_vouchers(source, target):
advance_voucher_list = []
advance_voucher = frappe.db.sql("""
select
t1.name as voucher_no, t1.posting_date, t1.remark, t2.account,
t2.name as voucher_detail_no, {amount_query} as payment_amount, t2.is_advance
from
`tabJournal Voucher` t1, `tabJournal Voucher Detail` t2
""")
return doclist
@frappe.whitelist()
def make_maintenance_schedule(source_name, target_doc=None):
maint_schedule = frappe.db.sql("""select t1.name
from `tabMaintenance Schedule` t1, `tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s and t1.docstatus=1""", source_name)
if not maint_schedule:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Schedule",
"field_map": {
"name": "sales_order_no"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Schedule Item",
"field_map": {
"parent": "prevdoc_docname"
},
"add_if_empty": True
}
}, target_doc)
return doclist
@frappe.whitelist()
def make_maintenance_visit(source_name, target_doc=None):
visit = frappe.db.sql("""select t1.name
from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s
and t1.docstatus=1 and t1.completion_status='Fully Completed'""", source_name)
if not visit:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Visit",
"field_map": {
"name": "sales_order_no"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Visit Purpose",
"field_map": {
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype"
},
"add_if_empty": True
}
}, target_doc)
return doclist
|
gangadharkadam/sher
|
erpnext/selling/doctype/sales_order/sales_order.py
|
Python
|
agpl-3.0
| 14,276
|
[
"VisIt"
] |
d953842f2e4c013bef220a174b599de7793bd6353a26f640b7f9782c12117d5e
|
#IMPORTS
import numpy as np
import pyfits as pf
import matplotlib.pyplot as plt
from scipy.optimize import leastsq
from scipy.optimize import curve_fit
from scipy.optimize import fmin
import os
from calendar import timegm
# This method uses images in 'focus_dir' to find the best focus for the Telescope
# The directory 'focus_dir' should contain a series of images at different secondary focus positions
# The code then runs through, gets the FWHM of each, fits a quadratic function and minimizes FWHM for the best focus position
def focusfind(focus_dir):
def quadratic(x,a,b,c):
return a*x**2 + b*x + c
# Get image list
images = getImgList(focus_dir)
focuses = []
metrics = []
# Get star coordinates
c = []
for x in images:
c1 = findstar(x)
if list(c1) != [-1, -1]:
c.append(c1);break
if len(c) == 0:
print "No stars.";return
print c
s = 20
# Run through images and add fwhm & sec focus to lists
for im in images:
fwhm_avg = 0
count = 0
for x,y in c:
# Extract star
starBox = im[0].data[x-s:x+s, y-s:y+s]
# Background subtract
median = np.median(starBox)
for a in range(starBox.shape[0]):
for b in range(starBox.shape[1]):
starBox[a,b]-=median
# Fit 2D gaussian and deduce FWHM
try:
sig_x,sig_y = fitgaussian(starBox)[3:5]
except:
print "Could not fit Gaussian";continue
fwhm_x = 2*np.sqrt( -2*(abs(sig_x**2))*np.log(0.5) )
fwhm_y = 2*np.sqrt( -2*(abs(sig_y**2))*np.log(0.5) )
fwhm_avg += 0.395*(fwhm_x+fwhm_y)/2
count += 1
if count == 0:
print "Image analysis failed."
continue
fwhm_avg /= count
metrics.append(fwhm_avg)
focuses.append(im[0].header['FOCPOS'])
# Fit Gaussian to lists
metrics = np.array(metrics)
focuses = np.array(focuses)
a, b, c = curve_fit(quadratic, focuses, metrics)[0]
# Minimize Gaussian
foc = focuses[np.argmin(metrics)]
minFoc = fmin(quadratic, foc, [a, b, c])[0]
# Plot data
smooth = np.arange(focuses[np.argmin(focuses)],
focuses[np.argmax(focuses)],
0.005)
plt.figure()
plt.plot(focuses, metrics, 'bo')
plt.plot(minFoc, quadratic(minFoc, a, b, c), 'ro')
plt.plot(smooth, quadratic(smooth, a, b, c), 'g-')
plt.annotate("Optimised Focus: %.4fmm" % (minFoc),
xy=(minFoc,quadratic(minFoc,a,b,c)),
xytext=(minFoc,quadratic(minFoc,a,b,c)+3),
arrowprops=dict(arrowstyle='->',
shrinkA=0))
plt.ylabel('FWHM(px)')
plt.xlabel('Secondary Focus Position (mm)')
return minFoc
#This method convolves two 2D matrices and uses the correlation matrix to return the offset
def getshift(img1,img2,n=1):
if type(img1)==pf.hdu.hdulist.HDUList: img1 = img1[0].data
if type(img2)==pf.hdu.hdulist.HDUList: img2 = img2[0].data
x = img1.shape[0]
y = img1.shape[1]
conv = convolve( img1[0:x/n,0:y/n], img2[0:x/n,0:y/n] )
peak = conv[np.unravel_index(np.nanargmax(conv),conv.shape)]
for a in range(conv.shape[0]):
for b in range(conv.shape[1]):
if conv[a,b] < peak/2:
conv[a,b] = 0
params = fitgaussian(conv)
#Convert to RA and Dec
return -params[2]*0.395,-params[1]*0.395
#Returns the Full-width-half-max of a star (given or auto-found) in an image
## Works by fitting a 2D Gaussian function to the data and using the beam width to calculate FWHM
def getfwhm(img,c=(-1,-1)):
#Check type and make sure to convert to np.array
if type(img) == pf.hdu.hdulist.HDUList: img = img[0].data
elif type(img) == str: img = pf.open(img)[0].data
#No star coords given, try find star, return -1 if failed
if c==(-1,-1):
c = findstar(img)
if c==(-1,-1): return -1
#If star coords given, extract star & fit 2d gaussian
else:
s = 10
x,y = c
starBox = img[ x-s:x+s+1, y-s:y+s+1]
#Background subtract
median = np.median(starBox)
for a in range(starBox.shape[0]):
for b in range(starBox.shape[1]):
starBox[a,b] -= median
#Fit 2D gaussian and deduce FWHM
try: sig_x,sig_y = fitgaussian(starBox)[3:5]
except: print "Could not fit Gaussian";return -1
fwhm_x = 2*np.sqrt( -2*(abs(sig_x)**2)*np.log(0.5) )
fwhm_y = 2*np.sqrt( -2*(abs(sig_y)**2)*np.log(0.5) )
li = [fwhm_x,fwhm_y,fwhm_x/fwhm_y,(fwhm_x+fwhm_y)/2]
for i in range(len(li)):
if i == 2: continue
li[i] *= 0.395
return li
#This method takes in an image and finds a useable star
## It works by roughly the following algorithm:
# Select bright but not saturated pixel
#
def findstar(img,m=25):
cap = 40000
if type(img) == pf.hdu.hdulist.HDUList: data = img[0].data
elif type(img) == np.ndarray: data = img #If array
else:
print("Unrecognized input type for findStar(img)")
return (-1,-1)
#Crop data within a certain margin, use deep copy
dataCropped = np.empty_like(data[ m:data.shape[0]-m , m:data.shape[1]-m ])
dataCropped[:] = data[ m:data.shape[0]-m , m:data.shape[1]-m ]
#Keep taking brightest until px < 90% of Full Well Value
found = False
while(not(found)):
i,j = np.unravel_index( np.nanargmax(dataCropped), dataCropped.shape )
import sys;sys.stdout.flush()
# Mask entire saturated area so that we don't select any more of these saturated pixels
if dataCropped[i,j] > cap :
#Create a box around the original saturated pixel
u,d,r,l = 1,1,1,1
box = None
done = False
count = 0
#Keep expanding box till all connected saturated pixels are enclosed
while not(done):
count+=1
if count>100: done=True
#Expand box
box = dataCropped[ max(0,i-l):min(i+r+1,data.shape[0]-1)
, max(0,j-d):min(j+u+1,data.shape[1]-1)]
#By default, we do not want to expand
expand_up,expand_down,expand_left,expand_right = False,False,False,False
#Run vertically through right- and left-most columns of box
x_0=0
x_e=box.shape[0]-1
for y in range(box.shape[1]):
#If there is a saturated pixel in the left-most column, we need to keep expanding left
if box[x_0,y] > cap:
expand_left = True; l+=1
#If there is a saturated pixel in the right-most column, we need to keep expanding right
if box[x_e,y] > cap:
expand_right = True; r+=1
#Run horizontally across top and bottom rows of box
y_0=0
y_e=box.shape[1]-1
for x in range(box.shape[0]):
#If there is a saturated pixel in the top row, we need to keep expanding up
if box[x,y_0] > cap:
expand_down = True; d+=1
#If there is a saturated pixel in the bottom row, we need to keep expanding down
if box[x,y_e] > cap:
expand_up = True; u+=1
#Check if box has reached edge of image on any side and stop expanding if so
if i-l<=0: expand_left = False
if j-d<=0: expand_down = False
if i+r+1>=data.shape[0]-1: expand_right = False
if j+u+1>=data.shape[1]-1: expand_up = False
#If we have finally enclosed all saturated pixels, exit the loop
if not(expand_up or expand_down or expand_left or expand_right):
done=True
border = 20
u+=border;d+=border;l+=border;r+=border
#Expand box again by 'border' amount, so we mask the surrounding area too
box = dataCropped[ max(0,i-l):min(i+r+1,data.shape[0]-1)
, max(0,j-d):min(j+u+1,data.shape[1]-1)]
#Set all pixels to zero; this is masking the saturated pixels so we don't select them again
for a in range(box.shape[0]):
for b in range(box.shape[1]):
dataCropped[a+max(0,i-l),b+max(0,j-d)]=0
else: found = True #If our selection is not saturated, it may be used
#Add back on margin values to convert to main coord-system
i += m
j += m
minVal = 2000
#Set a lower threshold on how bright star must be
if data[i,j] < minVal:
print("No star brighter than %s found") % minVal
return (-1,-1)
#Return position of star
return (i,j)
#Simple quadratic function
def quadratic(x,a,b,c): return a*x**2 + b*x + c
#Take in directory containing FITS images and return python list of pyfits objects
def getImgList(path):
try:
fileList = os.listdir(path) #Parse filenames to list
imageList = [] #Create empty list to hold FITS objects
for infile in fileList:
imageList.append(pf.open(path+"\\"+infile)) #Build FITS list
return imageList
except Exception:
print("Error in getting image list")
#Extract r,i,g,B bands from Rainbow Camera fits image
def extractBands(img,runType='all'):
if type(img) == pf.hdu.hdulist.HDUList: data = img[0].data
elif type(img) == str: data = (pf.open(img))[0].data
else: data = img
try:
#Get 1/8 of dimensions of matrix
x = data.shape[0]
y = data.shape[1]
#Parse central box from each quadrant using deep copies
data1 = np.empty_like( data[ 0 : x/2 , 0 : y/2 ] )
data1[:] = data[ 0 : x/2 , 0 : y/2 ]
data2 = np.empty_like(data1)
data2[:] = np.array(data[ x/2 : x , 0 : y/2 ])
data3 = np.empty_like(data1)
data3[:] = np.array(data[ 0 : x/2 , y/2 : y ])
data4 = np.empty_like(data1)
data4[:] = np.array(data[ x/2 : x , y/2 : y ])
tuple1 = [data1, (0,0)]
tuple2 = [data2, (x/2,0)]
tuple3 = [data3, (0,y/2) ]
tuple4 = [data4, (x/2,y/2) ]
allTuples = [tuple1,tuple2,tuple3,tuple4]
#Return appropriate band data
if runType == 'B': return [tuple2]
elif runType == 'g': return [tuple4]
elif runType == 'r': return [tuple1]
elif runType == 'i': return [tuple3]
else: return allTuples #Default
except Exception:
print("Error while parsing bands from FITS")
#Parse time from the 'utc' value in a fits header
def getTime(fits):
utc = fits[0].header['utc']
utc = utc.split(':')
for i in range(len(utc)): utc[i] = int(float(utc[i]))
utc = [utc[0], int(utc[1]/30), utc[1]%30]+utc[2:]
return timegm(utc)
## I DID NOT WRITE THE FOLLOWING METHODS: convolve,fitgaussian,gaussian,moments
##THEY ARE 'OFF-THE-SHELF'... OR 'OFF-THE-GOOGLE', I should say.
#Convolution function
def convolve(image1, image2, MinPad=True, pad=True):
""" Not so simple convolution """
try:
#Just for comfort:
FFt = np.fft.fft2
iFFt = np.fft.ifft2
#The size of the images:
r1,c1 = image1.shape
r2,c2 = image2.shape
#MinPad results simpler padding,smaller images:
if MinPad:
r = r1+r2
c = c1+c2
else:
#if the Numerical Recipies says so:
r = 2*max(r1,r2)
c = 2*max(c1,c2)
#For nice FFT, we need the power of 2:
if pad:
pr2 = int(np.log(r)/np.log(2.0) + 1.0 )
pc2 = int(np.log(c)/np.log(2.0) + 1.0 )
rOrig = r
cOrig = c
r = 2**pr2
c = 2**pc2
#end of if pad
#numpy fft has the padding built in, which can save us some steps
#here. The thing is the s(hape) parameter:
fftimage = FFt(image1,s=(r,c)) * FFt(image2,s=(r,c))
if pad:
x,y = ((iFFt(fftimage))[:rOrig,:cOrig]).real.shape #Crop to ignore correlations of less than 1/2 overlap
return ((iFFt(fftimage))[:rOrig,:cOrig]).real[x/4:3*x/4, y/4:3*y/4]
else:
x,y = (iFFt(fftimage)).real.shape#Crop to ignore correlations of less than 1/2 overlap
return (iFFt(fftimage)).real[x/4:3*x/4, y/4:3*y/4]
except Exception:
print("Error in convolution")
return
#Gaussian fitting function
def fitgaussian(data):
try:
"""Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution found by a fit"""
params = moments(data)
errorfunction = lambda p: np.ravel(gaussian(*p)(*np.indices(data.shape)) -
data)
p, success = leastsq(errorfunction, params)
return p
except Exception:
print("Error in fitgaussian")
return []
#Gaussian function
def gaussian(height, center_x, center_y, width_x, width_y):
"""Returns a gaussian function with the given parameters"""
try:
width_x = float(width_x)
width_y = float(width_y)
return lambda x,y: height*np.exp(
-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)
except Exception:
print("Error in gaussian method")
return
def moments(data):
try:
"""Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution by calculating its
moments """
total = data.sum()
X, Y = np.indices(data.shape)
x = (X*data).sum()/total
y = (Y*data).sum()/total
col = data[:, int(y)]
width_x = np.sqrt(abs((np.arange(col.size)-y)**2*col).sum()/col.sum())
row = data[int(x), :]
width_y = np.sqrt(abs((np.arange(row.size)-x)**2*row).sum()/row.sum())
height = data.max()
return height, x, y, width_x, width_y
except Exception:
print("Error in moments()")
return
|
scizen9/kpy
|
guider/sedmtools.py
|
Python
|
gpl-2.0
| 14,768
|
[
"Gaussian"
] |
57a543985de19b27f2a898a920be105b3956852f9bdf921607359cb1eff1f896
|
# Version: 0.19
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/python-versioneer/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible with: Python 3.6, 3.7, 3.8, 3.9 and pypy3
* [![Latest Version][pypi-image]][pypi-url]
* [![Build Status][travis-image]][travis-url]
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere in your $PATH
* add a `[versioneer]` section to your setup.cfg (see [Install](INSTALL.md))
* run `versioneer install` in your source tree, commit the results
* Verify version information with `python setup.py version`
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes).
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See [details.md](details.md) in the Versioneer
source tree for descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/python-versioneer/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other languages) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/python-versioneer/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## Similar projects
* [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a non-vendored build-time
dependency
* [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of
versioneer
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
[pypi-image]: https://img.shields.io/pypi/v/versioneer.svg
[pypi-url]: https://pypi.python.org/pypi/versioneer/
[travis-image]:
https://img.shields.io/travis/com/python-versioneer/python-versioneer.svg
[travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer
"""
import configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = (
"Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND')."
)
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print(
"Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py)
)
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.ConfigParser()
with open(setup_cfg) as f:
parser.read_file(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print(f"unable to find command, tried {commands}")
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY[
"git"
] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs)
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '{}' doesn't start with prefix '{}'".format(
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except OSError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.19) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except OSError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(
r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
)
if not mo:
mo = re.search(
r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set {} to '{}'".format(filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert (
cfg.versionfile_source is not None
), "please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print(f"got version from file {versionfile_abs} {ver}")
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass(cmdclass=None):
"""Get the custom setuptools/distutils subclasses used by Versioneer.
If the package uses a different cmdclass (e.g. one from numpy), it
should be provide as an argument.
"""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to its pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/python-versioneer/python-versioneer/issues/52
cmds = {} if cmdclass is None else cmdclass.copy()
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "build_py" in cmds:
_build_py = cmds["build_py"]
elif "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "setuptools" in sys.modules:
from setuptools.command.build_ext import build_ext as _build_ext
else:
from distutils.command.build_ext import build_ext as _build_ext
class cmd_build_ext(_build_ext):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_ext.run(self)
if self.inplace:
# build_ext --inplace will only build extensions in
# build/lib<..> dir with no _version.py to write to.
# As in place builds will already have a _version.py
# in the module dir, we do not need to write one.
return
# now locate _version.py in the new build/ directory and replace
# it with an updated value
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_ext"] = cmd_build_ext
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if "py2exe" in sys.modules: # py2exe enabled?
from py2exe.distutils_buildexe import py2exe as _py2exe
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "sdist" in cmds:
_sdist = cmds["sdist"]
elif "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(
target_versionfile, self._versioneer_generated_versions
)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Do main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (OSError, configparser.NoSectionError, configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg", file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py")
if os.path.exists(ipy):
try:
with open(ipy) as f:
old = f.read()
except OSError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in) as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except OSError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(
" appending versionfile_source ('%s') to MANIFEST.in"
% cfg.versionfile_source
)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
|
jreback/pandas
|
versioneer.py
|
Python
|
bsd-3-clause
| 70,095
|
[
"Brian"
] |
79431ab39d9fcafb372b2803c73c83ab1f5b485632e4fdd1be660d1fd91413be
|
"""
Defines the database models
"""
import json
from datetime import datetime
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Order(db.Model):
"""ORM object for the orders.
Takes drink, message and optional time of order.
order_id is automatically assigned.
Running eval on the output of __repr__() initializes
a new copy of the original object.
"""
order_id = db.Column(db.Integer, primary_key = True)
drink = db.Column(db.String(64), nullable = False)
message = db.Column(db.String(256), nullable = False)
order_received = db.Column(db.DateTime)
ctime_format = "%a %b %d %H:%M:%S %Y"
def __init__(self, drink, message, order_received = None):
if order_received is None:
order_received = datetime.now()
if not isinstance(order_received, datetime):
try:
order_received = datetime.strptime(order_received, self.ctime_format)
except ValueError:
raise ValueError('order_received must be datetime instance')
if False in (isinstance(drink, str), isinstance(message, str)):
raise ValueError('drink and message must be strings')
self.order_received = order_received
self.drink = drink
self.message = message
def save_order(self, database, commit=True):
database.session.add(self)
if commit: database.session.commit()
def __repr__(self):
return 'Order("{}", "{}", "{}")'.format(
self.drink, self.message, self.order_received)
@property
def nicely_formatted(self):
return '{}, {} (order received: {})'.format(
self.drink, self.message, self.order_received.ctime())
@property
def make_as_json(self):
return json.dumps([self.make_as_dict])
@property
def make_as_dict(self):
return {
'drink': self.drink,
'message': self.message,
'order_received': self.order_received.ctime()}
def prepare_demo_data():
"""Prepare demo data and return it as json
Now it wont feel so lonely, when we launch the app.
"""
dummy_orders = [Order(*_) for _ in (
('Negroni', 'If you bring it here fast, I\'ll sing you a song.'),
('Espresso Martini', 'Hurry up, I\'m thirsty!'),
('Strawberry Daiquiri', 'Last time I had this was at a Bieber concert'),
('Magic Potion', 'Ya wouldn\'t happen to have any tiramisu, would ya?'),
('Injection attack', '<script> a = function(){ return "DROP TABLE Users or whatever"}</script>'),
('Rosy Martini', 'Shaken not stirred'))]
dummy_data = [order.make_as_dict for order in dummy_orders]
json_data = json.dumps(dummy_data)
return json_data
|
Laspimon/transact
|
app/members.py
|
Python
|
mit
| 2,761
|
[
"ESPResSo"
] |
80e9b2d56863c24184722ae492e7a97d5e6d56d7fa7274e67ec29f024c640b07
|
#!/usr/bin/env python
"""
Laplace equation with shifted periodic BCs.
Display using::
./postproc.py laplace_shifted_periodic.vtk --wireframe -b -d'u,plot_warp_scalar,rel_scaling=1'
or use the --show option.
"""
from __future__ import absolute_import
import sys
sys.path.append('.')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
from sfepy.base.base import output
from sfepy.discrete import (FieldVariable, Integral, Equation, Equations,
Function, Problem)
from sfepy.discrete.fem import FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import (Conditions, EssentialBC,
LinearCombinationBC)
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.mesh.mesh_generators import gen_block_mesh
import sfepy.discrete.fem.periodic as per
def run(domain, order):
omega = domain.create_region('Omega', 'all')
bbox = domain.get_mesh_bounding_box()
min_x, max_x = bbox[:, 0]
min_y, max_y = bbox[:, 1]
eps = 1e-8 * (max_x - min_x)
gamma1 = domain.create_region('Gamma1',
'vertices in (x < %.10f)' % (min_x + eps),
'facet')
gamma2 = domain.create_region('Gamma2',
'vertices in (x > %.10f)' % (max_x - eps),
'facet')
gamma3 = domain.create_region('Gamma3',
'vertices in y < %.10f' % (min_y + eps),
'facet')
gamma4 = domain.create_region('Gamma4',
'vertices in y > %.10f' % (max_y - eps),
'facet')
field = Field.from_args('fu', nm.float64, 1, omega, approx_order=order)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
integral = Integral('i', order=2*order)
t1 = Term.new('dw_laplace(v, u)',
integral, omega, v=v, u=u)
eq = Equation('eq', t1)
eqs = Equations([eq])
fix1 = EssentialBC('fix1', gamma1, {'u.0' : 0.4})
fix2 = EssentialBC('fix2', gamma2, {'u.0' : 0.0})
def get_shift(ts, coors, region):
return nm.ones_like(coors[:, 0])
dof_map_fun = Function('dof_map_fun', per.match_x_line)
shift_fun = Function('shift_fun', get_shift)
sper = LinearCombinationBC('sper', [gamma3, gamma4], {'u.0' : 'u.0'},
dof_map_fun, 'shifted_periodic',
arguments=(shift_fun,))
ls = ScipyDirect({})
nls = Newton({}, lin_solver=ls)
pb = Problem('laplace', equations=eqs)
pb.set_bcs(ebcs=Conditions([fix1, fix2]), lcbcs=Conditions([sper]))
pb.set_solver(nls)
state = pb.solve()
return pb, state
helps = {
'dims' :
'dimensions of the block [default: %(default)s]',
'centre' :
'centre of the block [default: %(default)s]',
'shape' :
'numbers of vertices along each axis [default: %(default)s]',
'show' : 'show the results figure',
}
def main():
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-d', '--dims', metavar='dims',
action='store', dest='dims',
default='[1.0, 1.0]', help=helps['dims'])
parser.add_argument('-c', '--centre', metavar='centre',
action='store', dest='centre',
default='[0.0, 0.0]', help=helps['centre'])
parser.add_argument('-s', '--shape', metavar='shape',
action='store', dest='shape',
default='[11, 11]', help=helps['shape'])
parser.add_argument('--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
dims = nm.array(eval(options.dims), dtype=nm.float64)
centre = nm.array(eval(options.centre), dtype=nm.float64)
shape = nm.array(eval(options.shape), dtype=nm.int32)
output('dimensions:', dims)
output('centre: ', centre)
output('shape: ', shape)
mesh = gen_block_mesh(dims, shape, centre, name='block-fem')
fe_domain = FEDomain('domain', mesh)
pb, state = run(fe_domain, 1)
pb.save_state('laplace_shifted_periodic.vtk', state)
if options.show:
from sfepy.postprocess.viewer import Viewer
from sfepy.postprocess.domain_specific import DomainSpecificPlot
view = Viewer('laplace_shifted_periodic.vtk')
view(rel_scaling=1,
domain_specific={'u' : DomainSpecificPlot('plot_warp_scalar',
['rel_scaling=1'])},
is_scalar_bar=True, is_wireframe=True,
opacity=0.3)
if __name__ == '__main__':
main()
|
sfepy/sfepy
|
examples/diffusion/laplace_shifted_periodic.py
|
Python
|
bsd-3-clause
| 5,046
|
[
"VTK"
] |
77d2093e3a600356a040fcc2ff357d3306c5d546fabecffcfb1ce8e63766253d
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import re
import urllib.parse as urlparse
import json
import os
import time
import traceback
from urllib.parse import urlencode
from bs4 import BeautifulSoup
from selenium.webdriver import DesiredCapabilities
from selenium.webdriver import FirefoxProfile
from selenium.common.exceptions import WebDriverException
from .browser.headlessBrowser import HeadlessBrowser
from .util.scheduler import Scheduler
from .util.urleliminate import UrlEliminator
from .util.findPageForm import findPageForm
from .proxy.proxy import ProxyDaemon
from .setting import Setting
from .autosql import Autosql
from .util.lookup import lookup, initialize
from .util.utils import execute
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class Crawler(object):
"""网页爬虫管理类,负责调度各项任务。
此类主要包含几个模块:
1. browser: 浏览器模块
2. scheduler: 任务调度模块
3. elimiator: url去重模块
4. proxy: 代理模块
5. salScanner: sqlmap任务调度模块
"""
def __init__(self, base_dir, sqlmap_ip, sqlmap_port, target=None, data=None, setting=None):
self.base_dir = base_dir
self.setting = setting if setting else Setting(True)
self.entry = target if target else self.setting.url
if not self.entry:
raise ValueError("Empty target")
self.setting.display()
# initialize http/https proxy and start browser
self.proxy = self.initProxy()
self.initBrowser(self.proxy)
# task scheduler
self.scheduler = Scheduler()
self.scheduler.add_task(self.entry, 0, data)
# eliminate duplicate url
self.eliminator = UrlEliminator(entry=self.entry, setting=self.setting) # mark initial page/url visited
# initialize sqlmap manager
self.sqlScanner = Autosql(sqlmap_ip, sqlmap_port)
def run(self):
"""启动扫描任务
初始化完成后,调用本函数启动扫描
:return:
"""
while True:
try:
self.scheduler.run(self.browser, self.sqlScanner, self.setting)
break
except WebDriverException:
if execute("ps | awk '{print $4}' | grep firefox"): # still alive or not
self.scheduler.flush()
logger.error(traceback.format_exc())
break
# restart headless browser
self.initBrowser(self.proxy)
except:
logger.error(traceback.format_exc())
self.scheduler.flush()
break
def report(self):
self.scheduler.wait()
self.sqlScanner.wait_task(interval=10)
timestrip = time.strftime("%Y-%m-%d", time.localtime())
with open(os.path.join(self.setting.output, "report_%s.json" % timestrip), "w") as f:
cont = {task: data for task, data in self.sqlScanner.data_tasks().items()
if data and len(data) > 0}
f.write(json.dumps(cont))
def raw_report(self):
"""返回sqlmap扫描结果
:return: 返回值为三元组(ret, content, simple)
ret: 执行结果, False为失败, True为成功
content: sqlmap返回的完整报告,字典类型
simple: 解析content抽取重要数据生成的报告,字典类型
"""
initialize(self.base_dir)
self.scheduler.wait()
self.sqlScanner.wait_task(interval=10)
cont = {task: data for task, data in self.sqlScanner.data_tasks().items()
if data and len(data) >= 2}
simple = list()
for task, data in cont.items():
val = dict()
for each in data:
typ = each["type"]
if typ == 0:
val["x_url"] = task
for string in ["url", "query", "data"]:
val[string] = each["value"][string] if each["value"][string] else ""
elif typ == 1:
payload = list()
for vector in each["value"]:
for no, content in vector["data"].items():
payload.append({
"description": content["title"],
"vector": content["vector"],
"payload": content["payload"],
"method": vector['place']
})
for each_payload in payload:
lookup(each_payload, translate=True)
each_payload['vid'] = ''
each_payload['reference'] = dict()
if not isinstance(each_payload['vector'], str):
each_payload['vector'] = json.dumps(each_payload['vector'])
val["vuls"] = payload
simple.append(val)
return cont, {"result": simple}
def close(self):
"""关闭所有相关组件
扫描完成后,关闭浏览器、sqlmap以及proxy
:return:
"""
self.browser.close()
# delete all tasks
self.sqlScanner.flush_tasks()
# make sure close proxy at last
self.proxy.stop()
def initBrowser(self, proxy):
profile = self.setProxy(proxy)
capabilities = DesiredCapabilities.FIREFOX.copy()
capabilities['acceptSslCerts'] = True
capabilities['acceptInsecureCerts'] = True
# initialize headless browser
try:
self.browser = HeadlessBrowser(firefox_profile=profile, capabilities=capabilities)
except WebDriverException:
self.browser = HeadlessBrowser(firefox_profile=profile)
# catch signal whenever a page is loaded
self.browser.onfinish.connect(self.parse_page)
self.browser.state_experiment(self.setting.experiment)
def initProxy(self):
proxy = ProxyDaemon(cadir=os.path.join(self.base_dir, "ssl/"))
proxy.daemon = True
proxy.proxy.requested.connect(self.handle_request)
proxy.start()
return proxy
def setProxy(self, proxy):
profile = FirefoxProfile()
profile.accept_untrusted_certs = True
profile.assume_untrusted_cert_issuer = True
prefix = "network.proxy."
profile.set_preference("%stype" % prefix, 1)
for type in ["http", "ssl", "ftp", "socks"]:
profile.set_preference("%s%s" % (prefix, type), proxy.getHost())
profile.set_preference("%s%s_port" % (prefix, type), int(proxy.getPort()))
return profile
def handle_request(self, flow):
# logger.debug("*"*16)
# logger.debug(flow.request.pretty_host)
# logger.debug("proxy: %s" % flow.request.url)
# logger.debug(flow.request.method)
_url = str(flow.request.url)
if "mozilla" in _url: # well, we're using firefox...
return
_data = dict()
_depth = self.browser.current_depth
if flow.request.method == "POST":
for k in flow.request.query:
_data[k] = flow.request.query[k]
self.add_task(_url, _depth+1, data=_data)
else:
self.add_task(_url, _depth+1)
def add_task(self, url, depth, data=None):
if self.eliminator.visit(url):
self.scheduler.add_task(url, depth, data=data)
def parse_page(self, page):
"""
Parse page
:param page: see browser.page.Page class
:return:
"""
if not page:
logger.error("skip this page")
return
try:
match = re.search(r"(?si)<html[^>]*>(.+)</html>", page.source_page)
if match:
content = "<html>%s</html>" % match.group(1)
soup = BeautifulSoup(content, "html.parser")
tags = soup('a')
if not tags:
tags = re.finditer(r'(?si)<a[^>]+href="(?P<href>[^>"]+)"', content)
for tag in tags:
href = tag.get("href") if hasattr(tag, "get") else tag.group("href")
if href:
url = urlparse.urljoin(page.url, href)
self.add_task(url, page.depth+1)
except Exception as e:
logger.error("[parse page error]")
logger.error(traceback.format_exc())
finally:
# logger.debug("seaching for forms...")
for url, method, data in findPageForm(page.source_page, page.url):
logger.debug("find one form in %s" % url)
if method.upper() == "GET":
url = "%s?%s" % (url, urlencode(data))
self.add_task(url, page.depth+1)
elif method.upper() == "POST":
self.add_task(url, page.depth+1,
json.loads(data) if isinstance(data, str) else data)
|
CvvT/crawler_sqlmap
|
crawler/crawler.py
|
Python
|
apache-2.0
| 9,101
|
[
"VisIt"
] |
a99d9df6c32246b2febbebef30376525f791552b26162d695f0ff23099b4433a
|
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import copy
import vtk
from .ClipperFilterBase import ClipperFilterBase
class PlaneClipper(ClipperFilterBase):
"""
Clip object using a plane.
"""
@staticmethod
def getOptions():
opt = ClipperFilterBase.getOptions()
opt.add('origin', [0.5, 0.5, 0.5], "The origin of the clipping plane.")
opt.add('normal', [1, 0, 0], "The outward normal of the clipping plane.")
return opt
def __init__(self, **kwargs):
super(PlaneClipper, self).__init__(vtkclipfunction=vtk.vtkPlane, **kwargs)
def update(self, **kwargs):
"""
Update the normal and origin of the clipping plane.
"""
super(PlaneClipper, self).update(**kwargs)
origin = self.getPosition(copy.copy(self.getOption('origin')))
self._vtkclipfunction.SetNormal(self.getOption('normal'))
self._vtkclipfunction.SetOrigin(origin)
|
nuclear-wizard/moose
|
python/chigger/filters/PlaneClipper.py
|
Python
|
lgpl-2.1
| 1,242
|
[
"MOOSE",
"VTK"
] |
ae1d05b673ed76733825c3f208022d380467eacbbe6511cd3798e953ed0dbc21
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_sslprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of SSLProfile Avi RESTful Object
description:
- This module is used to configure SSLProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
accepted_ciphers:
description:
- Ciphers suites represented as defined by U(http://www.openssl.org/docs/apps/ciphers.html).
- Default value when not specified in API or module is interpreted by Avi Controller as AES:3DES:RC4.
accepted_versions:
description:
- Set of versions accepted by the server.
cipher_enums:
description:
- Enum options - tls_ecdhe_ecdsa_with_aes_128_gcm_sha256, tls_ecdhe_ecdsa_with_aes_256_gcm_sha384, tls_ecdhe_rsa_with_aes_128_gcm_sha256,
- tls_ecdhe_rsa_with_aes_256_gcm_sha384, tls_ecdhe_ecdsa_with_aes_128_cbc_sha256, tls_ecdhe_ecdsa_with_aes_256_cbc_sha384,
- tls_ecdhe_rsa_with_aes_128_cbc_sha256, tls_ecdhe_rsa_with_aes_256_cbc_sha384, tls_rsa_with_aes_128_gcm_sha256, tls_rsa_with_aes_256_gcm_sha384,
- tls_rsa_with_aes_128_cbc_sha256, tls_rsa_with_aes_256_cbc_sha256, tls_ecdhe_ecdsa_with_aes_128_cbc_sha, tls_ecdhe_ecdsa_with_aes_256_cbc_sha,
- tls_ecdhe_rsa_with_aes_128_cbc_sha, tls_ecdhe_rsa_with_aes_256_cbc_sha, tls_rsa_with_aes_128_cbc_sha, tls_rsa_with_aes_256_cbc_sha,
- tls_rsa_with_3des_ede_cbc_sha, tls_rsa_with_rc4_128_sha.
description:
description:
- User defined description for the object.
dhparam:
description:
- Dh parameters used in ssl.
- At this time, it is not configurable and is set to 2048 bits.
enable_ssl_session_reuse:
description:
- Enable ssl session re-use.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
name:
description:
- Name of the object.
required: true
prefer_client_cipher_ordering:
description:
- Prefer the ssl cipher ordering presented by the client during the ssl handshake over the one specified in the ssl profile.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
send_close_notify:
description:
- Send 'close notify' alert message for a clean shutdown of the ssl connection.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
ssl_rating:
description:
- Sslrating settings for sslprofile.
ssl_session_timeout:
description:
- The amount of time before an ssl session expires.
- Default value when not specified in API or module is interpreted by Avi Controller as 86400.
tags:
description:
- List of tag.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create SSL profile with list of allowed ciphers
avi_sslprofile:
controller: ''
username: ''
password: ''
accepted_ciphers: >
ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA:
ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-ECDSA-AES256-SHA384:
AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:
AES256-SHA:DES-CBC3-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:
ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA
accepted_versions:
- type: SSL_VERSION_TLS1
- type: SSL_VERSION_TLS1_1
- type: SSL_VERSION_TLS1_2
cipher_enums:
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384
- TLS_RSA_WITH_AES_128_GCM_SHA256
- TLS_RSA_WITH_AES_256_GCM_SHA384
- TLS_RSA_WITH_AES_128_CBC_SHA256
- TLS_RSA_WITH_AES_256_CBC_SHA256
- TLS_RSA_WITH_AES_128_CBC_SHA
- TLS_RSA_WITH_AES_256_CBC_SHA
- TLS_RSA_WITH_3DES_EDE_CBC_SHA
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
name: PFS-BOTH-RSA-EC
send_close_notify: true
ssl_rating:
compatibility_rating: SSL_SCORE_EXCELLENT
performance_rating: SSL_SCORE_EXCELLENT
security_score: '100.0'
tenant_ref: Demo
'''
RETURN = '''
obj:
description: SSLProfile (api/sslprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
accepted_ciphers=dict(type='str',),
accepted_versions=dict(type='list',),
cipher_enums=dict(type='list',),
description=dict(type='str',),
dhparam=dict(type='str',),
enable_ssl_session_reuse=dict(type='bool',),
name=dict(type='str', required=True),
prefer_client_cipher_ordering=dict(type='bool',),
send_close_notify=dict(type='bool',),
ssl_rating=dict(type='dict',),
ssl_session_timeout=dict(type='int',),
tags=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'sslprofile',
set([]))
if __name__ == '__main__':
main()
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/network/avi/avi_sslprofile.py
|
Python
|
bsd-3-clause
| 7,849
|
[
"VisIt"
] |
1c8af7f35522c778f0275199f8b2a8340bf2ed98396e402cd8ee3904fc78af6d
|
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
import copy
import io
import os
import pickle
import re
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from unittest import mock
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS, CacheKeyWarning, cache, caches,
)
from django.core.cache.utils import make_template_fragment_key
from django.db import close_old_connections, connection, connections
from django.http import (
HttpRequest, HttpResponse, HttpResponseNotModified, StreamingHttpResponse,
)
from django.middleware.cache import (
CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory, SimpleTestCase, TestCase, TransactionTestCase,
ignore_warnings, override_settings,
)
from django.test.signals import setting_changed
from django.utils import timezone, translation
from django.utils.cache import (
get_cache_key, learn_cache_key, patch_cache_control,
patch_response_headers, patch_vary_headers,
)
from django.utils.deprecation import RemovedInDjango21Warning
from django.views.decorators.cache import cache_page
from .models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpicklable:
def __getstate__(self):
raise pickle.PickleError()
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(SimpleTestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertTrue(result)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Non-existent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertIsNone(cache.get("key1"))
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertFalse(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.incr('answer')
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.decr('answer')
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertIsNone(cache.get("stuff"))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertIsNone(cache.get("expire2"))
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
cache.set_many({'a': 1, 'b': 2})
cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.incr_version('answer')
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.decr_version('answer')
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist')
def test_get_or_set(self):
self.assertEqual(cache.get_or_set('mykey', 'default'), 'default')
self.assertEqual(cache.get_or_set('mykey', None), None)
def test_get_or_set_callable(self):
def my_callable():
return 'default'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'default')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'default')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, exclude=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `exclude` is a set of cache names denoting which `_caches_setting_base` keys
# should be omitted.
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
exclude = exclude or set()
setting = {k: base.copy() for k in _caches_setting_base.keys() if k not in exclude}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests:
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertDictEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertDictEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry"))
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
"""
Followe memcached's convention where a timeout greater than 30 days is
treated as an absolute expiration timestamp instead of a relative
offset (#12399).
"""
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
"""
Passing in None into timeout results in a value that is cached forever
"""
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
added = cache.add('key1', 'new eggs', None)
self.assertIs(added, False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_zero_timeout(self):
"""
Passing in zero into timeout results in a value that is not cached
"""
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
cache.add('key2', 'ham', 0)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count += 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def _perform_invalid_key_test(self, key, expected_warning):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
cache.set(key, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
self.assertEqual(str(w[0].message.args[0]), expected_warning)
finally:
cache.key_func = old_func
def test_invalid_key_characters(self):
# memcached doesn't allow whitespace or control characters in keys.
key = 'key with spaces and 清'
expected_warning = (
"Cache key contains characters that will cause errors if used "
"with memcached: %r" % key
)
self._perform_invalid_key_test(key, expected_warning)
def test_invalid_key_length(self):
# memcached limits key length to 250.
key = ('a' * 250) + '清'
expected_warning = (
'Cache key will cause errors if used with memcached: '
'%r (longer than %s)' % (key, 250)
)
self._perform_invalid_key_test(key, expected_warning)
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1']), {'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertDictEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2']), {'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3']), {'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertDictEqual(cache.get_many(['ford4', 'arthur4']), {'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpicklable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
# Shouldn't fail silently if trying to cache an unpicklable type.
with self.assertRaises(pickle.PickleError):
cache.add('unpicklable', Unpicklable())
def test_set_fail_on_pickleerror(self):
with self.assertRaises(pickle.PickleError):
cache.set('unpicklable', Unpicklable())
def test_get_or_set(self):
self.assertIsNone(cache.get('projector'))
self.assertEqual(cache.get_or_set('projector', 42), 42)
self.assertEqual(cache.get('projector'), 42)
self.assertEqual(cache.get_or_set('null', None), None)
def test_get_or_set_callable(self):
def my_callable():
return 'value'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'value')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'value')
def test_get_or_set_version(self):
msg = "get_or_set() missing 1 required positional argument: 'default'"
cache.get_or_set('brian', 1979, version=2)
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set('brian')
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set('brian', version=1)
self.assertIsNone(cache.get('brian', version=1))
self.assertEqual(cache.get_or_set('brian', 42, version=1), 42)
self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979)
self.assertIsNone(cache.get('brian', version=3))
def test_get_or_set_racing(self):
with mock.patch('%s.%s' % (settings.CACHES['default']['BACKEND'], 'add')) as cache_add:
# Simulate cache.add() failing to add a value. In that case, the
# default value should be returned.
cache_add.return_value = False
self.assertEqual(cache.get_or_set('key', 'default'), 'default')
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super(DBCacheTests, self).setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super(DBCacheTests, self).tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0, interactive=False)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 18)
def test_second_call_doesnt_crash(self):
out = io.StringIO()
management.call_command('createcachetable', stdout=out)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Use another table name to avoid the 'table already exists' message.
LOCATION='createcachetable_dry_run_mode'
))
def test_createcachetable_dry_run_mode(self):
out = io.StringIO()
management.call_command('createcachetable', dry_run=True, stdout=out)
output = out.getvalue()
self.assertTrue(output.startswith("CREATE TABLE"))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
out = io.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=out,
)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' created.\n")
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter:
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def allow_migrate(self, db, app_label, **hints):
if app_label == 'django_cache':
return db == 'other'
return None
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
multi_db = True
@override_settings(DATABASE_ROUTERS=[DBCacheRouter()])
def test_createcachetable_observes_database_router(self):
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable',
database='default',
verbosity=0, interactive=False)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections['other'].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using='other'):
management.call_command('createcachetable',
database='other',
verbosity=0, interactive=False)
class PicklingSideEffect:
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
if self.cache._lock.active_writers:
self.locked = True
return {}
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super(LocMemCacheTests, self).setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertIsNone(caches['other'].get('value'))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
cache.add('add', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
cache.incr(key)
self.assertEqual(expire, cache._expire_info[_key])
cache.decr(key)
self.assertEqual(expire, cache._expire_info[_key])
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
configured_caches = {}
for _cache_params in settings.CACHES.values():
configured_caches[_cache_params['BACKEND']] = _cache_params
MemcachedCache_params = configured_caches.get('django.core.cache.backends.memcached.MemcachedCache')
PyLibMCCache_params = configured_caches.get('django.core.cache.backends.memcached.PyLibMCCache')
# The memcached backends don't support cull-related options like `MAX_ENTRIES`.
memcached_excluded_caches = {'cull', 'zero_cull'}
class BaseMemcachedTests(BaseCacheTests):
# By default it's assumed that the client doesn't clean up connections
# properly, in which case the backend must do so after each request.
should_disconnect_on_close = True
def test_location_multiple_servers(self):
locations = [
['server1.tld', 'server2:11211'],
'server1.tld;server2:11211',
'server1.tld,server2:11211',
]
for location in locations:
params = {'BACKEND': self.base_params['BACKEND'], 'LOCATION': location}
with self.settings(CACHES={'default': params}):
self.assertEqual(cache._servers, ['server1.tld', 'server2:11211'])
def test_invalid_key_characters(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
# when using the ascii protocol.
with self.assertRaises(Exception):
cache.set('key with spaces', 'value')
def test_invalid_key_length(self):
# memcached limits key length to 250
with self.assertRaises(Exception):
cache.set('a' * 251, 'value')
def test_default_never_expiring_timeout(self):
# Regression test for #22845
with self.settings(CACHES=caches_setting_for_tests(
base=self.base_params,
exclude=memcached_excluded_caches,
TIMEOUT=None)):
cache.set('infinite_foo', 'bar')
self.assertEqual(cache.get('infinite_foo'), 'bar')
def test_default_far_future_timeout(self):
# Regression test for #22845
with self.settings(CACHES=caches_setting_for_tests(
base=self.base_params,
exclude=memcached_excluded_caches,
# 60*60*24*365, 1 year
TIMEOUT=31536000)):
cache.set('future_foo', 'bar')
self.assertEqual(cache.get('future_foo'), 'bar')
def test_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_zero_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_memcached_deletes_key_on_failed_set(self):
# By default memcached allows objects up to 1MB. For the cache_db session
# backend to always use the current session, memcached needs to delete
# the old key if it fails to set.
# pylibmc doesn't seem to have SERVER_MAX_VALUE_LENGTH as far as I can
# tell from a quick check of its source code. This is falling back to
# the default value exposed by python-memcached on my system.
max_value_length = getattr(cache._lib, 'SERVER_MAX_VALUE_LENGTH', 1048576)
cache.set('small_value', 'a')
self.assertEqual(cache.get('small_value'), 'a')
large_value = 'a' * (max_value_length + 1)
try:
cache.set('small_value', large_value)
except Exception:
# Some clients (e.g. pylibmc) raise when the value is too large,
# while others (e.g. python-memcached) intentionally return True
# indicating success. This test is primarily checking that the key
# was deleted, so the return/exception behavior for the set()
# itself is not important.
pass
# small_value should be deleted, or set if configured to accept larger values
value = cache.get('small_value')
self.assertTrue(value is None or value == large_value)
def test_close(self):
# For clients that don't manage their connections properly, the
# connection is closed when the request is complete.
signals.request_finished.disconnect(close_old_connections)
try:
with mock.patch.object(cache._lib.Client, 'disconnect_all', autospec=True) as mock_disconnect:
signals.request_finished.send(self.__class__)
self.assertIs(mock_disconnect.called, self.should_disconnect_on_close)
finally:
signals.request_finished.connect(close_old_connections)
@unittest.skipUnless(MemcachedCache_params, "MemcachedCache backend not configured")
@override_settings(CACHES=caches_setting_for_tests(
base=MemcachedCache_params,
exclude=memcached_excluded_caches,
))
class MemcachedCacheTests(BaseMemcachedTests, TestCase):
base_params = MemcachedCache_params
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key in settings.CACHES:
self.assertEqual(caches[cache_key]._cache.pickleProtocol, pickle.HIGHEST_PROTOCOL)
@override_settings(CACHES=caches_setting_for_tests(
base=MemcachedCache_params,
exclude=memcached_excluded_caches,
OPTIONS={'server_max_value_length': 9999},
))
def test_memcached_options(self):
self.assertEqual(cache._cache.server_max_value_length, 9999)
@unittest.skipUnless(PyLibMCCache_params, "PyLibMCCache backend not configured")
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
))
class PyLibMCCacheTests(BaseMemcachedTests, TestCase):
base_params = PyLibMCCache_params
# libmemcached manages its own connections.
should_disconnect_on_close = False
# By default, pylibmc/libmemcached don't verify keys client-side and so
# this test triggers a server-side bug that causes later tests to fail
# (#19914). The `verify_keys` behavior option could be set to True (which
# would avoid triggering the server-side bug), however this test would
# still fail due to https://github.com/lericson/pylibmc/issues/219.
@unittest.skip("triggers a memcached-server bug, causing subsequent tests to fail")
def test_invalid_key_characters(self):
pass
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
OPTIONS={
'binary': True,
'behaviors': {'tcp_nodelay': True},
},
))
def test_pylibmc_options(self):
self.assertTrue(cache._cache.binary)
self.assertEqual(cache._cache.behaviors['tcp_nodelay'], int(True))
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
OPTIONS={'tcp_nodelay': True},
))
def test_pylibmc_legacy_options(self):
deprecation_message = (
"Specifying pylibmc cache behaviors as a top-level property "
"within `OPTIONS` is deprecated. Move `tcp_nodelay` into a dict named "
"`behaviors` inside `OPTIONS` instead."
)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
self.assertEqual(cache._cache.behaviors['tcp_nodelay'], int(True))
self.assertEqual(len(warns), 1)
self.assertIsInstance(warns[0].message, RemovedInDjango21Warning)
self.assertEqual(str(warns[0].message), deprecation_message)
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super(FileBasedCacheTests, self).setUp()
self.dirname = tempfile.mkdtemp()
# Caches location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super(FileBasedCacheTests, self).tearDown()
# Call parent first, as cache.clear() may recreate cache base directory
shutil.rmtree(self.dirname)
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
os.path.exists(self.dirname)
def test_get_ignores_enoent(self):
cache.set('foo', 'bar')
os.unlink(cache._key_to_file('foo'))
# Returns the default instead of erroring.
self.assertEqual(cache.get('foo', 'baz'), 'baz')
def test_get_does_not_ignore_non_enoent_errno_values(self):
with mock.patch('builtins.open', side_effect=IOError):
with self.assertRaises(IOError):
cache.get('foo')
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(SimpleTestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class CacheClosingTests(SimpleTestCase):
def test_close(self):
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(SimpleTestCase):
"""
Settings having Cache arguments with a TIMEOUT=None create Caches that will
set non-expiring keys.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined in
django.core.cache.backends.base.BaseCache.__init__().
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIsNone(cache.default_timeout)
self.assertIsNone(cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNotNone(cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNone(cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
ALLOWED_HOSTS=['.example.com'],
)
class CacheUtils(SimpleTestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.host = 'www.example.com'
self.path = '/cache/test/'
self.factory = RequestFactory(HTTP_HOST=self.host)
def tearDown(self):
cache.clear()
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = True if not update_cache else update_cache
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# A specified key_prefix is taken into account.
key_prefix = 'localprefix'
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# The querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertNotEqual(get_cache_key(request1), get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, {'private'}),
('', {'private': True}, {'private'}),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, {'private'}),
('private', {'public': True}, {'public'}),
('public', {'public': True}, {'public'}),
('public', {'private': True}, {'private'}),
('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}),
('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(SimpleTestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=[
('en', 'English'),
('es', 'Spanish'),
],
)
class CacheI18nTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
# This is tightly coupled to the implementation,
# but it's the most straightforward way to test the key.
tz = timezone.get_current_timezone_name()
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = timezone.get_current_timezone_name()
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_with_non_ascii_tzname(self):
# Timezone-dependent cache keys should use ASCII characters only
# (#17476). The implementation here is a bit odd (timezone.utc is an
# instance, not a class), but it simulates the correct conditions.
class CustomTzName(timezone.utc):
pass
request = self.factory.get(self.path)
response = HttpResponse()
with timezone.override(CustomTzName):
CustomTzName.zone = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(
sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active"
)
CustomTzName.name = 'Hora estándar de Argentina' # unicode
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(
sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active"
)
@ignore_warnings(category=RemovedInDjango21Warning) # USE_ETAGS=True
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# The cache can be recovered
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, en_message.encode())
# ETags are used.
self.assertTrue(get_cache_data.has_header('ETag'))
# ETags can be disabled.
with self.settings(USE_ETAGS=False):
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertFalse(get_cache_data.has_header('ETag'))
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# This test passes on Python < 3.3 even without the corresponding code
# in UpdateCacheMiddleware, because pickling a StreamingHttpResponse
# fails (http://bugs.python.org/issue14288). LocMemCache silently
# swallows the exception and doesn't store the response in cache.
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
def csrf_view(request):
return HttpResponse(csrf(request)['csrf_token'])
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(SimpleTestCase):
def setUp(self):
super(CacheMiddlewareTest, self).setUp()
self.factory = RequestFactory()
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super(CacheMiddlewareTest, self).tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
# Value of DEFAULT_CACHE_ALIAS from django.core.cache
self.assertEqual(as_view_decorator.cache_alias, 'default')
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertIsNone(result)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertIsNone(result)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
def test_sensitive_cookie_not_cached(self):
"""
Django must prevent caching of responses that set a user-specific (and
maybe security sensitive) cookie in response to a cookie-less request.
"""
csrf_middleware = CsrfViewMiddleware()
cache_middleware = CacheMiddleware()
request = self.factory.get('/view/')
self.assertIsNone(cache_middleware.process_request(request))
csrf_middleware.process_view(request, csrf_view, (), {})
response = csrf_view(request)
response = csrf_middleware.process_response(request, response)
response = cache_middleware.process_response(request, response)
# Inserting a CSRF cookie in a cookie-less request prevented caching.
self.assertIsNone(cache_middleware.process_request(request))
def test_304_response_has_http_caching_headers_but_not_cached(self):
original_view = mock.Mock(return_value=HttpResponseNotModified())
view = cache_page(2)(original_view)
request = self.factory.get('/view/')
# The view shouldn't be cached on the second call.
view(request).close()
response = view(request)
response.close()
self.assertEqual(original_view.call_count, 2)
self.assertIsInstance(response, HttpResponseNotModified)
self.assertIn('Cache-Control', response)
self.assertIn('Expires', response)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(SimpleTestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the ETag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# A specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# The querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
@override_settings(USE_ETAGS=False)
def test_without_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertFalse(response.has_header('ETag'))
@ignore_warnings(category=RemovedInDjango21Warning)
@override_settings(USE_ETAGS=True)
def test_with_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertTrue(response.has_header('ETag'))
class TestMakeTemplateFragmentKey(SimpleTestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key, 'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key, 'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key, 'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
class CacheHandlerTest(SimpleTestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertIs(cache1, cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertIsNot(c[0], c[1])
|
mattseymour/django
|
tests/cache/tests.py
|
Python
|
bsd-3-clause
| 91,717
|
[
"Brian"
] |
7c712187b10a8cfd99ab65ea634d2b2c62897ef2d43b03972e95824d856341fb
|
from django.db import models
from taggit.managers import TaggableManager
ARTIFACT_TYPES = (
(0, 'Gold'),
(1, 'Treasure'),
(2, 'Weapon'),
(3, 'Magic Weapon'),
(4, 'Container'),
(5, 'Light Source'),
(6, 'Drinkable'),
(7, 'Readable'),
(8, 'Door/Gate'),
(9, 'Edible'),
(10, 'Bound Monster'),
(11, 'Wearable'), # armor/shield
(12, 'Disguised Monster'),
(13, 'Dead Body'),
(14, 'User 1'),
(15, 'User 2'),
(16, 'User 3'),
)
AXE = 1
BOW = 2
CLUB = 3
SPEAR = 4
SWORD = 5
WEAPON_TYPES = (
(AXE, 'Axe'),
(BOW, 'Bow'),
(CLUB, 'Club'),
(SPEAR, 'Spear'),
(SWORD, 'Sword')
)
CLOTHING_TYPES = (
(0, 'Clothes or Armor/Shield'),
(1, 'Coats, Capes, etc.'),
(2, 'Shoes, boots'),
(3, 'Gloves'),
(4, 'Hats, headwear'),
(5, 'Jewelry'),
(6, 'Undergarments'),
)
ARMOR_TYPES = (
(0, 'Armor'),
(1, 'Shield'),
(2, 'Helmet'),
(3, 'Gloves'),
(4, 'Ring'),
)
MARKDOWN_CHOICES = [(False, "Plain text"), (True, "Markdown")]
class Author(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Adventure(models.Model):
name = models.CharField(max_length=50)
description = models.TextField(default='', blank=True)
full_description = models.TextField(default='', blank=True)
intro_text = models.TextField(
default='', blank=True,
help_text="Text shown to the adventurer when they begin the adventure. Use this to set up the story. Split"
" it into multiple pages by using a line containing three hyphens as a break. Supports Markdown."
)
intro_question = models.TextField(
default='', blank=True,
help_text="If you want to ask the adventurer a question when they start the adventure, put"
" the question text here. The answer will be available in the game object."
)
slug = models.SlugField(null=True)
edx = models.CharField(null=True, max_length=50, blank=True)
edx_version = models.FloatField(default=0, blank=True, null=True)
edx_room_offset = models.IntegerField(default=0, null=True, blank=True)
edx_artifact_offset = models.IntegerField(default=0, null=True, blank=True)
edx_effect_offset = models.IntegerField(default=0, null=True, blank=True)
edx_monster_offset = models.IntegerField(default=0, null=True, blank=True)
edx_program_file = models.CharField(null=True, max_length=50, blank=True)
directions = models.IntegerField(default=6)
dead_body_id = models.IntegerField(
default=0, blank=True, null=True,
help_text="The artifact ID of the first dead body. Leave blank to not use dead body artifacts.")
active = models.BooleanField(default=0)
# the first and last index of hints read from the hints file - used with the import_hints management command
first_hint = models.IntegerField(null=True, blank=True)
last_hint = models.IntegerField(null=True, blank=True)
date_published = models.DateField(null=True, blank=True)
featured_month = models.CharField(null=True, blank=True, max_length=7)
tags = TaggableManager(blank=True)
authors = models.ManyToManyField(Author)
def __str__(self):
return self.name
@property
def times_played(self):
return ActivityLog.objects.filter(type='start adventure', adventure_id=self.id).count()
@property
def avg_ratings(self):
return self.ratings.all().aggregate(models.Avg('overall'), models.Avg('combat'), models.Avg('puzzle'))
@property
def rooms_count(self):
return Room.objects.filter(adventure_id=self.id).count()
@property
def artifacts_count(self):
return Artifact.objects.filter(adventure_id=self.id).count()
@property
def effects_count(self):
return Effect.objects.filter(adventure_id=self.id).count()
@property
def monsters_count(self):
return Monster.objects.filter(adventure_id=self.id).count()
class Meta:
ordering = ['name']
class Room(models.Model):
adventure = models.ForeignKey(Adventure, on_delete=models.CASCADE, related_name='rooms')
room_id = models.IntegerField(default=0) # The in-game room ID.
name = models.CharField(max_length=255)
is_markdown = models.BooleanField(default=False, choices=MARKDOWN_CHOICES, verbose_name="Text format")
description = models.TextField(max_length=1000)
# The ID of an effect to display after the description
effect = models.IntegerField(null=True, blank=True)
# The ID of an effect to display after the description, without a paragraph break.
effect_inline = models.IntegerField(null=True, blank=True)
is_dark = models.BooleanField(default=False)
dark_name = models.CharField(null=True, blank=True, max_length=255,
help_text="The name shown if the room is dark and the player doesn't have a light. "
"Leave blank to use the standard 'in the dark' message.")
dark_description = models.TextField(
null=True, blank=True, max_length=1000,
help_text="The description shown if the room is dark and the player doesn't"
" have a light. Leave blank to use the standard 'it's too dark to see' message.")
data = models.TextField(
max_length=1000, null=True, blank=True,
help_text="Adventure-specific data for this room, e.g., room type or environment "
"(road, cave, snow, etc.). Data can be used in custom code. Enter as a "
"JSON object."
)
def __str__(self):
return self.name
class RoomExit(models.Model):
adventure = models.ForeignKey(Adventure, on_delete=models.CASCADE, related_name='room_exits', null=True)
direction = models.CharField(max_length=2)
room_from = models.ForeignKey(Room, on_delete=models.CASCADE, related_name='exits')
room_to = models.IntegerField(default=0) # Not a real foreign key. Yet.
door_id = models.IntegerField(null=True, blank=True)
effect_id = models.IntegerField(null=True, blank=True,
help_text="The effect will be shown when the player moves in this direction. "
"You can also enter a zero for the connection and an effect ID to set up "
"a custom message on a non-existent exit, e.g., if the player can't go in"
" the ocean without a boat, etc.")
def __str__(self):
return str(self.room_from) + " " + self.direction
def save(self, **kwargs):
if self.room_from and self.adventure_id != self.room_from.adventure_id:
self.adventure_id = self.room_from.adventure_id
super().save(**kwargs)
class Artifact(models.Model):
adventure = models.ForeignKey(Adventure, on_delete=models.CASCADE, related_name='artifacts')
artifact_id = models.IntegerField(default=0) # The in-game artifact ID.
article = models.CharField(max_length=20, null=True, blank=True,
help_text="Optional article or adjective that appears before the name, "
"e.g., 'a', 'the', 'some'.")
name = models.CharField(max_length=255)
synonyms = models.CharField(
null=True, max_length=255, blank=True,
help_text="Other terms for this artifact. E.g., if the artifact name is 'secret door in"
" north wall' you could have a synonym of 'door' to help the player find it.")
is_markdown = models.BooleanField(default=False, choices=MARKDOWN_CHOICES, verbose_name="Text format")
description = models.TextField(max_length=1000)
# The ID of an effect to display after the description
effect = models.IntegerField(null=True, blank=True)
# The ID of an effect to display after the description, without a paragraph break.
effect_inline = models.IntegerField(null=True, blank=True)
room_id = models.IntegerField(
null=True, blank=True,
help_text="If in a room, the room ID"
)
monster_id = models.IntegerField(
null=True, blank=True,
help_text="If carried by a monster, the monster ID"
)
container_id = models.IntegerField(
null=True, blank=True,
help_text="If in a container, the container ID"
)
guard_id = models.IntegerField(
null=True, blank=True,
help_text="If a bound monster, the ID of a monster that prevents the player from freeing it. For other "
"artifact types, the ID of a monster that prevents the player from picking it up."
)
weight = models.IntegerField(
default=0,
help_text="Weight in Gronds. Enter -999 for something that can't be picked up, or 999 to show the message "
"'Don't be absurd' if the player tries to pick it up."
)
value = models.IntegerField(default=0)
type = models.IntegerField(null=True, choices=ARTIFACT_TYPES)
is_worn = models.BooleanField(default=False)
is_open = models.BooleanField(default=False)
key_id = models.IntegerField(
null=True, blank=True,
help_text="If a container, door, or bound monster, the artifact ID of the key that opens it"
)
linked_door_id = models.IntegerField(
null=True, blank=True,
help_text="To make a two-sided door, enter the artifact ID of the other side of the door. "
"They will open and close as a set."
)
hardiness = models.IntegerField(
null=True, blank=True,
help_text="If a door or container that can be smashed open, how much damage does it take to open it?")
weapon_type = models.IntegerField(null=True, blank=True, choices=WEAPON_TYPES)
hands = models.IntegerField(default=1, choices=(
(1, 'One-handed'),
(2, 'Two-handed')
))
weapon_odds = models.IntegerField(null=True, blank=True)
dice = models.IntegerField(null=True, blank=True)
sides = models.IntegerField(null=True, blank=True)
clothing_type = models.IntegerField(null=True, choices=CLOTHING_TYPES, help_text="Reserved for future use.")
armor_class = models.IntegerField(
null=True, default=0,
help_text="(Armor only) How many hits does this armor protect against?"
)
armor_type = models.IntegerField(null=True, blank=True, choices=ARMOR_TYPES)
armor_penalty = models.IntegerField(
default=0, null=True,
help_text="(Armor only) How much does this reduce the player's chance to hit, if they don't have enough "
"armor expertise?"
)
get_all = models.BooleanField(
default=True,
help_text="Will the 'get all' command pick up this item?"
)
embedded = models.BooleanField(
default=False,
help_text="Check this box to make the item not appear in the artifacts list until the player looks at it.")
hidden = models.BooleanField(
default=False,
help_text="(For secret doors only) Check this box for embedded secret doors, so that the player can't "
"pass through them before finding them.")
quantity = models.IntegerField(
null=True, blank=True,
help_text="Drinks or bites, fuel for light source, etc."
)
effect_id = models.IntegerField(
null=True, blank=True,
help_text="First effect ID for Readable artifacts"
)
num_effects = models.IntegerField(
null=True, blank=True,
help_text="Number of effects for Readable artifacts"
)
data = models.TextField(
max_length=1000, null=True, blank=True,
help_text="Adventure-specific data for this artifact, e.g., elemental weapon, etc."
"Enter as a JSON object."
)
def __str__(self):
return self.name
class ArtifactMarking(models.Model):
"""
Markings on a readable artifact
"""
artifact = models.ForeignKey(Artifact, on_delete=models.CASCADE)
marking = models.TextField(max_length=65535)
class Effect(models.Model):
STYLES = (
('', 'Normal'),
('emphasis', 'Bold'),
('success', 'Success (green)'),
('special', 'Special 1 (blue)'),
('special2', 'Special 1 (purple)'),
('warning', 'Warning (orange)'),
('danger', 'Danger (red)'),
)
adventure = models.ForeignKey(Adventure, on_delete=models.CASCADE, related_name='effects')
effect_id = models.IntegerField(default=0) # The in-game effect ID.
is_markdown = models.BooleanField(default=False, choices=MARKDOWN_CHOICES, verbose_name="Text format")
text = models.TextField(max_length=65535)
style = models.CharField(max_length=20, null=True, blank=True, choices=STYLES) # display effect text in color
next = models.IntegerField(null=True, blank=True,
help_text="The next chained effect. Used with EDX conversions.")
next_inline = models.IntegerField(null=True, blank=True,
help_text="The next chained effect, no line break. Used with EDX conversions.")
def __str__(self):
return self.text[0:50]
class Monster(models.Model):
FRIENDLINESS = (
('friend', 'Always Friendly'),
('neutral', 'Always Neutral'),
('hostile', 'Always Hostile'),
('random', 'Random'),
)
COMBAT_CODES = (
(1, "Attacks using generic ATTACK message (e.g., slime, snake, bird)"),
(0, "Uses weapon, or with natural weapons if specified (default)"),
(-1, "Use weapon if it has one, otherwise natural weapons"),
(-2, "Never fights"),
)
adventure = models.ForeignKey(Adventure, on_delete=models.CASCADE, related_name='monsters')
monster_id = models.IntegerField(default=0) # The in-game monster ID.
article = models.CharField(max_length=20, null=True, blank=True,
help_text="Optional article or adjective that appears before the name, "
"e.g., 'a', 'the', 'some'. Does not apply to group monsters.")
name = models.CharField(max_length=255)
name_plural = models.CharField(
max_length=255, null=True, blank=True,
help_text="The plural form of the name. Used only with group monsters.")
synonyms = models.CharField(
null=True, max_length=255, blank=True,
help_text="Other names used for this monster. If the name is 'python' a synonym might be 'snake'")
is_markdown = models.BooleanField(default=False, choices=MARKDOWN_CHOICES, verbose_name="Text format")
description = models.TextField(max_length=1000)
# The ID of an effect to display after the description
effect = models.IntegerField(null=True, help_text="Used only with EDX conversions")
# The ID of an effect to display after the description, without a paragraph break.
effect_inline = models.IntegerField(null=True, help_text="Used only with EDX conversions")
count = models.IntegerField(default=1)
hardiness = models.IntegerField(default=12)
agility = models.IntegerField(default=12)
friendliness = models.CharField(max_length=10, choices=FRIENDLINESS)
friend_odds = models.IntegerField(default=50,
help_text="Used only when 'Friendliness' is 'Random'"
)
combat_code = models.IntegerField(default=0, choices=COMBAT_CODES)
courage = models.IntegerField(default=100)
pursues = models.BooleanField(default=True, help_text="Will the monster pursue a fleeing player?")
room_id = models.IntegerField(null=True, blank=True)
container_id = models.IntegerField(
null=True, blank=True,
help_text="Container artifact ID where this monster starts. The monster will enter the room as soon as the "
"container is opened. e.g., a vampire who awakes when you open his coffin"
)
gender = models.CharField(max_length=6, choices=(
('male', 'Male'),
('female', 'Female'),
('none', 'None'),
), null=True, blank=True)
weapon_id = models.IntegerField(
null=True, blank=True,
help_text="Enter an artifact ID, or zero for natural weapons. Leave blank for no weapon.")
attack_odds = models.IntegerField(
default=50,
help_text="Base attack odds, before agility and armor adjustments. Weapon type does not matter.")
weapon_dice = models.IntegerField(
default=1,
help_text="Applies to natural weapons only. For an artifact weapon, the weapon's dice and sides will be used.")
weapon_sides = models.IntegerField(default=4,
help_text="Applies to natural weapons only.")
defense_bonus = models.IntegerField(
default=0,
help_text="Gives the monster an additional percent bonus to avoid being hit. (Rare)"
)
armor_class = models.IntegerField(default=0)
special = models.CharField(max_length=255, null=True, blank=True)
data = models.TextField(
max_length=1000, null=True, blank=True,
help_text="Adventure-specific data for this monster, e.g., type of monster like "
"vampire, undead, soldier, frost, etc. Data can be used in custom code. "
"Enter as a JSON object."
)
combat_verbs = models.CharField(
max_length=255, null=True, blank=True,
help_text="Custom combat verbs for this monster, e.g., 'stings' or 'breathes fire at'. "
"Leave blank to use the standard verbs.")
def __str__(self):
return self.name
class Hint(models.Model):
"""
Represents a hint for the adventure hints system
"""
adventure = models.ForeignKey(Adventure, on_delete=models.CASCADE, related_name='hints', null=True)
index = models.IntegerField(null=True)
edx = models.CharField(max_length=50, null=True, blank=True)
question = models.CharField(max_length=255)
def __str__(self):
return self.question
class HintAnswer(models.Model):
"""
Represents an answer to a hint. Each hint may have more than one answer.
"""
adventure = models.ForeignKey(Adventure, on_delete=models.CASCADE, related_name='hint_answers', null=True)
hint = models.ForeignKey(Hint, on_delete=models.CASCADE, related_name='answers')
index = models.IntegerField(null=True)
answer = models.TextField(max_length=1000, help_text="Supports Markdown.")
spoiler = models.BooleanField(default=False,
help_text="Obscure the answer until the user shows it.")
def save(self, **kwargs):
if self.hint and self.adventure_id != self.hint.adventure_id:
self.adventure_id = self.hint.adventure_id
super().save(**kwargs)
class PlayerProfile(models.Model):
social_id = models.CharField(max_length=100, null=True)
uuid = models.CharField(max_length=255, null=True)
class Player(models.Model):
"""
Represents the player saved in the main hall.
"""
name = models.CharField(max_length=255)
gender = models.CharField(max_length=6, choices=(
('m', 'Male'),
('f', 'Female')
))
hardiness = models.IntegerField(default=12)
agility = models.IntegerField(default=12)
charisma = models.IntegerField(default=12)
gold = models.IntegerField(default=200)
gold_in_bank = models.IntegerField(default=0)
wpn_axe = models.IntegerField("Axe ability", default=5)
wpn_bow = models.IntegerField("Bow/missile ability", default=-10)
wpn_club = models.IntegerField("Club ability", default=20)
wpn_spear = models.IntegerField("Spear/Polearm ability", default=10)
wpn_sword = models.IntegerField("Sword ability", default=0)
armor_expertise = models.IntegerField(default=0)
spl_blast = models.IntegerField("Blast ability", default=0)
spl_heal = models.IntegerField("Heal ability", default=0)
spl_power = models.IntegerField("Power ability", default=0)
spl_speed = models.IntegerField("Speed ability", default=0)
uuid = models.CharField(max_length=255, null=True)
def __str__(self):
return self.name
def log(self, type, adventure_id=None):
l = ActivityLog(player=self, type=type, adventure_id=adventure_id)
l.save()
class PlayerArtifact(models.Model):
"""
The items (weapons, armor, shield) in the player's inventory in the main hall
"""
TYPES = (
(2, 'Weapon'),
(3, 'Magic Weapon'),
(11, 'Wearable'), # armor/shield
)
ARMOR_TYPES = (
(0, 'Armor'),
(1, 'Shield'), # different in EDX - see manual
(2, 'Helmet'),
(3, 'Gloves'),
(4, 'Ring'),
)
HANDS = (
(1, 'One-handed'),
(2, 'Two-handed')
)
player = models.ForeignKey(Player, on_delete=models.CASCADE, related_name='inventory')
name = models.CharField(max_length=255)
description = models.TextField(max_length=1000)
type = models.IntegerField(choices=TYPES)
weight = models.IntegerField(default=0)
value = models.IntegerField(default=0)
weapon_type = models.IntegerField(default=0, choices=WEAPON_TYPES, null=True)
hands = models.IntegerField(choices=HANDS, default=1)
weapon_odds = models.IntegerField(default=0, null=True)
dice = models.IntegerField(default=1, null=True)
sides = models.IntegerField(default=1, null=True)
armor_type = models.IntegerField(default=0, choices=ARMOR_TYPES, null=True)
armor_class = models.IntegerField(default=0, null=True)
armor_penalty = models.IntegerField(default=0, null=True)
def __str__(self):
return "{} {}".format(self.player, self.name)
class ActivityLog(models.Model):
"""
Used to track player activity (going on adventures, etc.)
"""
player = models.ForeignKey(Player, null=True, blank=True, on_delete=models.CASCADE, related_name='activity_log')
type = models.CharField(max_length=255)
value = models.IntegerField(null=True, blank=True)
adventure = models.ForeignKey(Adventure, on_delete=models.CASCADE, related_name='activity_log', null=True)
created = models.DateTimeField(auto_now_add=True, null=True)
|
kdechant/eamon
|
adventure/models.py
|
Python
|
mit
| 22,320
|
[
"BLAST"
] |
dd0b680a06f69ec93057f1aad3a5315d79814a855c333aea828a19aced97e36d
|
# -*- coding: utf-8 -*-
"""
Acceptance tests for CMS Video Module.
"""
import os
from mock import patch
from nose.plugins.attrib import attr
from unittest import skipIf
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.studio.video.video import VideoComponentPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ..helpers import UniqueCourseTest, is_youtube_available, YouTubeStubConfig
@skipIf(is_youtube_available() is False, 'YouTube is not available!')
class CMSVideoBaseTest(UniqueCourseTest):
"""
CMS Video Module Base Test Class
"""
def setUp(self):
"""
Initialization of pages and course fixture for tests
"""
super(CMSVideoBaseTest, self).setUp()
self.video = VideoComponentPage(self.browser)
# This will be initialized later
self.unit_page = None
self.outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.assets = []
self.addCleanup(YouTubeStubConfig.reset)
def _create_course_unit(self, youtube_stub_config=None, subtitles=False):
"""
Create a Studio Video Course Unit and Navigate to it.
Arguments:
youtube_stub_config (dict)
subtitles (bool)
"""
if youtube_stub_config:
YouTubeStubConfig.configure(youtube_stub_config)
if subtitles:
self.assets.append('subs_3_yD_cEKoCk.srt.sjson')
self.navigate_to_course_unit()
def _create_video(self):
"""
Create Xblock Video Component.
"""
self.video.create_video()
video_xblocks = self.video.xblocks()
# Total video xblock components count should be equals to 2
# Why 2? One video component is created by default for each test. Please see
# test_studio_video_module.py:CMSVideoTest._create_course_unit
# And we are creating second video component here.
self.assertTrue(video_xblocks == 2)
def _install_course_fixture(self):
"""
Prepare for tests by creating a course with a section, subsection, and unit.
Performs the following:
Create a course with a section, subsection, and unit
Create a user and make that user a course author
Log the user into studio
"""
if self.assets:
self.course_fixture.add_asset(self.assets)
# Create course with Video component
self.course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('video', 'Video')
)
)
)
).install()
# Auto login and register the course
AutoAuthPage(
self.browser,
staff=False,
username=self.course_fixture.user.get('username'),
email=self.course_fixture.user.get('email'),
password=self.course_fixture.user.get('password')
).visit()
def _navigate_to_course_unit_page(self):
"""
Open the course from the dashboard and expand the section and subsection and click on the Unit link
The end result is the page where the user is editing the newly created unit
"""
# Visit Course Outline page
self.outline.visit()
# Visit Unit page
self.unit_page = self.outline.section('Test Section').subsection('Test Subsection').expand_subsection().unit(
'Test Unit').go_to()
self.video.wait_for_video_component_render()
def navigate_to_course_unit(self):
"""
Install the course with required components and navigate to course unit page
"""
self._install_course_fixture()
self._navigate_to_course_unit_page()
def edit_component(self, xblock_index=1):
"""
Open component Edit Dialog for first component on page.
Arguments:
xblock_index: number starting from 1 (0th entry is the unit page itself)
"""
self.unit_page.xblocks[xblock_index].edit()
def open_advanced_tab(self):
"""
Open components advanced tab.
"""
# The 0th entry is the unit page itself.
self.unit_page.xblocks[1].open_advanced_tab()
def open_basic_tab(self):
"""
Open components basic tab.
"""
# The 0th entry is the unit page itself.
self.unit_page.xblocks[1].open_basic_tab()
def save_unit_settings(self):
"""
Save component settings.
"""
# The 0th entry is the unit page itself.
self.unit_page.xblocks[1].save_settings()
@attr('shard_4')
class CMSVideoTest(CMSVideoBaseTest):
"""
CMS Video Test Class
"""
def test_youtube_stub_proxy(self):
"""
Scenario: YouTube stub server proxies YouTube API correctly
Given youtube stub server proxies YouTube API
And I have created a Video component
Then I can see video button "play"
And I click video button "play"
Then I can see video button "pause"
"""
self._create_course_unit(youtube_stub_config={'youtube_api_blocked': False})
self.assertTrue(self.video.is_button_shown('play'))
self.video.click_player_button('play')
self.video.wait_for_state('playing')
self.assertTrue(self.video.is_button_shown('pause'))
def test_youtube_stub_blocks_youtube_api(self):
"""
Scenario: YouTube stub server can block YouTube API
Given youtube stub server blocks YouTube API
And I have created a Video component
Then I do not see video button "play"
"""
self._create_course_unit(youtube_stub_config={'youtube_api_blocked': True})
self.assertFalse(self.video.is_button_shown('play'))
def test_autoplay_is_disabled(self):
"""
Scenario: Autoplay is disabled in Studio
Given I have created a Video component
Then when I view the video it does not have autoplay enabled
"""
self._create_course_unit()
self.assertFalse(self.video.is_autoplay_enabled)
def test_video_creation_takes_single_click(self):
"""
Scenario: Creating a video takes a single click
And creating a video takes a single click
"""
self._create_course_unit()
# This will create a video by doing a single click and then ensure that video is created
self._create_video()
def test_captions_hidden_correctly(self):
"""
Scenario: Captions are hidden correctly
Given I have created a Video component with subtitles
And I have hidden captions
Then when I view the video it does not show the captions
"""
self._create_course_unit(subtitles=True)
self.video.hide_captions()
self.assertFalse(self.video.is_captions_visible())
def test_video_controls_shown_correctly(self):
"""
Scenario: Video controls for all videos show correctly
Given I have created two Video components
And first is private video
When I reload the page
Then video controls for all videos are visible
"""
self._create_course_unit(youtube_stub_config={'youtube_api_private_video': True})
self.video.create_video()
# change id of first default video
self.edit_component(1)
self.open_advanced_tab()
self.video.set_field_value('YouTube ID', 'sampleid123')
self.save_unit_settings()
# again open unit page and check that video controls show for both videos
self._navigate_to_course_unit_page()
self.assertTrue(self.video.is_controls_visible())
def test_captions_shown_correctly(self):
"""
Scenario: Captions are shown correctly
Given I have created a Video component with subtitles
Then when I view the video it does show the captions
"""
self._create_course_unit(subtitles=True)
self.assertTrue(self.video.is_captions_visible())
def test_captions_toggling(self):
"""
Scenario: Captions are toggled correctly
Given I have created a Video component with subtitles
And I have toggled captions
Then when I view the video it does show the captions
"""
self._create_course_unit(subtitles=True)
self.video.click_player_button('CC')
self.assertFalse(self.video.is_captions_visible())
self.video.click_player_button('CC')
self.assertTrue(self.video.is_captions_visible())
def test_caption_line_focus(self):
"""
Scenario: When enter key is pressed on a caption, an outline shows around it
Given I have created a Video component with subtitles
And Make sure captions are opened
Then I focus on first caption line
And I see first caption line has focused
"""
self._create_course_unit(subtitles=True)
self.video.show_captions()
self.video.focus_caption_line(2)
self.assertTrue(self.video.is_caption_line_focused(2))
def test_slider_range_works(self):
"""
Scenario: When start and end times are specified, a range on slider is shown
Given I have created a Video component with subtitles
And Make sure captions are closed
And I edit the component
And I open tab "Advanced"
And I set value "00:00:12" to the field "Video Start Time"
And I set value "00:00:24" to the field "Video Stop Time"
And I save changes
And I click video button "play"
Then I see a range on slider
"""
self._create_course_unit(subtitles=True)
self.video.hide_captions()
self.edit_component()
self.open_advanced_tab()
self.video.set_field_value('Video Start Time', '00:00:12')
self.video.set_field_value('Video Stop Time', '00:00:24')
self.save_unit_settings()
self.video.click_player_button('play')
@attr('a11y')
class CMSVideoA11yTest(CMSVideoBaseTest):
"""
CMS Video Accessibility Test Class
"""
def setUp(self):
browser = os.environ.get('SELENIUM_BROWSER', 'firefox')
# the a11y tests run in CI under phantomjs which doesn't
# support html5 video or flash player, so the video tests
# don't work in it. We still want to be able to run these
# tests in CI, so override the browser setting if it is
# phantomjs.
if browser == 'phantomjs':
browser = 'firefox'
with patch.dict(os.environ, {'SELENIUM_BROWSER': browser}):
super(CMSVideoA11yTest, self).setUp()
def test_video_player_a11y(self):
# Limit the scope of the audit to the video player only.
self.outline.a11y_audit.config.set_scope(include=["div.video"])
self.outline.a11y_audit.config.set_rules({
"ignore": [
'link-href', # TODO: AC-223
],
})
self._create_course_unit()
self.outline.a11y_audit.check_for_accessibility_errors()
|
JCBarahona/edX
|
common/test/acceptance/tests/video/test_studio_video_module.py
|
Python
|
agpl-3.0
| 11,750
|
[
"VisIt"
] |
99333970e5ab6de9caff16e70caac8fe881a90024b56a7e27ad5f39312349db0
|
#-----------------------------------------------------------------------------
# Copyright (c) 2010-2012 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import time
import os
import threading
import zmq
from zmq.tests import BaseZMQTestCase
from zmq.eventloop import ioloop
from zmq.eventloop.minitornado.ioloop import _Timeout
try:
from tornado.ioloop import PollIOLoop, IOLoop as BaseIOLoop
except ImportError:
from zmq.eventloop.minitornado.ioloop import IOLoop as BaseIOLoop
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def printer():
os.system("say hello")
raise Exception
print (time.time())
class Delay(threading.Thread):
def __init__(self, f, delay=1):
self.f=f
self.delay=delay
self.aborted=False
self.cond=threading.Condition()
super(Delay, self).__init__()
def run(self):
self.cond.acquire()
self.cond.wait(self.delay)
self.cond.release()
if not self.aborted:
self.f()
def abort(self):
self.aborted=True
self.cond.acquire()
self.cond.notify()
self.cond.release()
class TestIOLoop(BaseZMQTestCase):
def test_simple(self):
"""simple IOLoop creation test"""
loop = ioloop.IOLoop()
dc = ioloop.PeriodicCallback(loop.stop, 200, loop)
pc = ioloop.PeriodicCallback(lambda : None, 10, loop)
pc.start()
dc.start()
t = Delay(loop.stop,1)
t.start()
loop.start()
if t.isAlive():
t.abort()
else:
self.fail("IOLoop failed to exit")
def test_timeout_compare(self):
"""test timeout comparisons"""
loop = ioloop.IOLoop()
t = _Timeout(1, 2, loop)
t2 = _Timeout(1, 3, loop)
self.assertEqual(t < t2, id(t) < id(t2))
t2 = _Timeout(2,1, loop)
self.assertTrue(t < t2)
def test_poller_events(self):
"""Tornado poller implementation maps events correctly"""
req,rep = self.create_bound_pair(zmq.REQ, zmq.REP)
poller = ioloop.ZMQPoller()
poller.register(req, ioloop.IOLoop.READ)
poller.register(rep, ioloop.IOLoop.READ)
events = dict(poller.poll(0))
self.assertEqual(events.get(rep), None)
self.assertEqual(events.get(req), None)
poller.register(req, ioloop.IOLoop.WRITE)
poller.register(rep, ioloop.IOLoop.WRITE)
events = dict(poller.poll(1))
self.assertEqual(events.get(req), ioloop.IOLoop.WRITE)
self.assertEqual(events.get(rep), None)
poller.register(rep, ioloop.IOLoop.READ)
req.send(b'hi')
events = dict(poller.poll(1))
self.assertEqual(events.get(rep), ioloop.IOLoop.READ)
self.assertEqual(events.get(req), None)
def test_instance(self):
"""Test IOLoop.instance returns the right object"""
loop = ioloop.IOLoop.instance()
self.assertEqual(loop.__class__, ioloop.IOLoop)
loop = BaseIOLoop.instance()
self.assertEqual(loop.__class__, ioloop.IOLoop)
def test_close_all(self):
"""Test close(all_fds=True)"""
loop = ioloop.IOLoop.instance()
req,rep = self.create_bound_pair(zmq.REQ, zmq.REP)
loop.add_handler(req, lambda msg: msg, ioloop.IOLoop.READ)
loop.add_handler(rep, lambda msg: msg, ioloop.IOLoop.READ)
self.assertEqual(req.closed, False)
self.assertEqual(rep.closed, False)
loop.close(all_fds=True)
self.assertEqual(req.closed, True)
self.assertEqual(rep.closed, True)
|
ellisonbg/pyzmq
|
zmq/tests/test_ioloop.py
|
Python
|
lgpl-3.0
| 4,155
|
[
"Brian"
] |
3092fb9fb552dac6dc2fe5095b5e5cd5b13a318c1005c81377b0c1362224753f
|
#######################################################################################
# Python-code: Shiny Bubblebeam wrapper
# Author: Adam L Borne
# Contributers: Paul A Stewart, Brent Kuenzi
#######################################################################################
# This program runs the R script that generates a bubble plot in shiny. Generates
# a unique app for each run of the tool for galaxy integration.
#######################################################################################
# Copyright (C) Adam Borne.
# Permission is granted to copy, distribute and/or modify this document
# under the terms of the GNU Free Documentation License, Version 1.3
# or any later version published by the Free Software Foundation;
# with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts.
# A copy of the license is included in the section entitled "GNU
# Free Documentation License".
#######################################################################################
## REQUIRED INPUT ##
# 1) list_file: SaintExpress output file.
# 2) prey_file: Prey file listing gene name, sequence legnth, and gene id.
# 3) crapome: Crapome file can be created at http://crapome.org. (default = "None")
#######################################################################################
import os
import sys
import time
input_list = open(sys.argv[1], 'r')
prey_input = open(sys.argv[2], 'r')
inter_input = open(sys.argv[4], 'r')
stamped_app = r"shiny_bubble" + str(time.strftime('_%d_%m_%Y_%H_%M'))
cmd = r"mkdir /srv/shiny-server/" + str(stamped_app)
os.system(cmd)
cmd1 = r"cp -r /srv/shiny-server/shiny_bubble/. /srv/shiny-server/" + str(stamped_app)
os.system(cmd1)
if sys.argv[3] == 'None':
glob_manip = open('/srv/shiny-server/shiny_bubble/global.R', 'r')
glob_write = open('/srv/shiny-server/'+ str(stamped_app) + '/global.R', 'w')
for code_line in glob_manip:
if r"working <- as.data.frame" in code_line:
glob_write.write("working <- as.data.frame(merge_files(\"EGFR_list.txt\", \"EGFR_prey.txt\", FALSE))\n")
else:
glob_write.write(code_line)
else:
crapome = open(sys.argv[3], 'r')
crap_file = open('/srv/shiny-server/'+ str(stamped_app) + '/EGFR_crap.txt', 'w')
for line in crapome:
crap_file.write(line)
crapome.close()
input_file = open('/srv/shiny-server/'+ str(stamped_app) + '/EGFR_list.txt', 'w')
for line in input_list:
input_file.write(line)
prey_file = open('/srv/shiny-server/'+ str(stamped_app) + '/EGFR_prey.txt', 'w')
for line in prey_input:
prey_file.write(line)
inter_file = open('/srv/shiny-server/'+ str(stamped_app) + '/inter.txt', 'w')
for line in inter_input:
inter_file.write(line)
#crapome = open(sys.argv[3], 'r')
#crap_file = open('/srv/shiny-server/'+ str(stamped_app) + '/EGFR_crap.txt', 'w')
#for line in crapome:
# crap_file.write(line)
#crapome.close()
input_file.close()
prey_file.close()
inter_file.close()
#cmd1 = r"touch '/srv/shiny-server/" + str(stamped_app) + r"/restart.txt"
#os.system(cmd1)
with open("shiny.txt", "wt") as x:
x.write("<html><body> Open <a href=\"http://54.213.221.126:3838/" +
str(stamped_app) + "\">APOSTL Interactive Analysis</a> in your browser to view shiny app. If there are issues with the sizing within galaxy you can right"
+ " click and open in a new tab or window.</body></html>")
os.rename('shiny.txt', str(sys.argv[5]))
|
bornea/APOSTL
|
shiny_bubble/APOSTL_Interactive_Analysis.py
|
Python
|
gpl-2.0
| 3,548
|
[
"Galaxy"
] |
2fd4b273add241f1c700c3e55be4ce70c9e9e53b4ce00e24c83b666391a81042
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Security (SSL) Settings
Usage:
import libcloud.security
libcloud.security.VERIFY_SSL_CERT = True
# Optional.
libcloud.security.CA_CERTS_PATH.append('/path/to/cacert.txt')
"""
import os
import ssl
__all__ = [
'VERIFY_SSL_CERT',
'SSL_VERSION',
'CA_CERTS_PATH'
]
VERIFY_SSL_CERT = True
SSL_VERSION = ssl.PROTOCOL_TLSv1
# True to use certifi CA bundle path when certifi library is available
USE_CERTIFI = os.environ.get('LIBCLOUD_SSL_USE_CERTIFI', True)
USE_CERTIFI = str(USE_CERTIFI).lower() in ['true', '1']
# File containing one or more PEM-encoded CA certificates
# concatenated together.
CA_CERTS_PATH = None
# Insert certifi CA bundle path to the front of Libcloud CA bundle search
# path if certifi is available
try:
import certifi
except ImportError:
has_certifi = False
else:
has_certifi = True
if has_certifi and USE_CERTIFI:
certifi_ca_bundle_path = certifi.where()
CA_CERTS_PATH.insert(0, certifi_ca_bundle_path)
# Allow user to explicitly specify which CA bundle to use, using an environment
# variable
environment_cert_file = os.getenv('SSL_CERT_FILE', None)
if environment_cert_file is not None:
# Make sure the file exists
if not os.path.exists(environment_cert_file):
raise ValueError('Certificate file %s doesn\'t exist' %
(environment_cert_file))
if not os.path.isfile(environment_cert_file):
raise ValueError('Certificate file can\'t be a directory')
# If a provided file exists we ignore other common paths because we
# don't want to fall-back to a potentially less restrictive bundle
CA_CERTS_PATH = [environment_cert_file]
CA_CERTS_UNAVAILABLE_ERROR_MSG = (
'No CA Certificates were found in CA_CERTS_PATH. For information on '
'how to get required certificate files, please visit '
'https://libcloud.readthedocs.org/en/latest/other/'
'ssl-certificate-validation.html'
)
VERIFY_SSL_DISABLED_MSG = (
'SSL certificate verification is disabled, this can pose a '
'security risk. For more information how to enable the SSL '
'certificate verification, please visit the libcloud '
'documentation.'
)
|
SecurityCompass/libcloud
|
libcloud/security.py
|
Python
|
apache-2.0
| 2,959
|
[
"VisIt"
] |
6870f5dd3dcc5a3a24e8a28a88cd6c2a89ded142c6a54cae5fc186843b8e4218
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
from pyscf import lib
class KnownValues(unittest.TestCase):
def test_call_in_background_skip(self):
def bg_raise():
def raise1():
raise ValueError
with lib.call_in_background(raise1) as f:
f()
raise IndexError
self.assertRaises(lib.ThreadRuntimeError, bg_raise)
def test_index_tril_to_pair(self):
i_j = (numpy.random.random((2,30)) * 100).astype(int)
i0 = numpy.max(i_j, axis=0)
j0 = numpy.min(i_j, axis=0)
ij = i0 * (i0+1) // 2 + j0
i1, j1 = lib.index_tril_to_pair(ij)
self.assertTrue(numpy.all(i0 == i1))
self.assertTrue(numpy.all(j0 == j1))
def test_class_as_method(self):
class A:
def f1(self):
return 'a'
f2 = lib.alias(f1)
class B(A):
def f1(self):
return 'b'
b = B()
self.assertEqual(b.f2(), 'b')
if __name__ == "__main__":
unittest.main()
|
gkc1000/pyscf
|
pyscf/lib/test/test_misc.py
|
Python
|
apache-2.0
| 1,673
|
[
"PySCF"
] |
3a4a33ae76ca011589426c9275a2932cd4b6db7cdac19ce66aa3c7a80fe69bb4
|
# Animation.py
# Aaron Taylor
# Moose Abumeeiz
#
# This is the class for all animations in the game, based on time
# it will advance the frame when it is the correct time
#
from pygame import *
from time import time as cTime
class Animation:
"""Class to handle all animation timing"""
def __init__(self, frames, interval, shouldLoop=True):
self.frames = frames
self.frameCount = len(self.frames)
self.interval = interval/self.frameCount # Wait between frames
self.shouldLoop = shouldLoop
self.lastFrame = cTime() # Creation time
self.currentIndex = -1 # There will be an step right away, counter act it
self.frame = self.frames[self.currentIndex] # Start off current frame
self.looped = False # Has made a complete loop
# Assume all images are the same size
self.width = self.frames[0].get_width()
self.height = self.frames[0].get_height()
def resize(self, percent):
'Resize all frames'
# Create new height
self.width = int(self.width*percent)
self.height = int(self.height*percent)
# Resize all frames
self.frames = [transform.scale(self.frames[i], (self.width, self.height)) for i in range(len(self.frames))]
self.frame = self.frames[self.currentIndex] # Set new frame incase no step
def setInterval(self, interval):
'Change animation interval'
# Re-set the frame interval
self.interval = interval/self.frameCount
def setFrame(self, index):
'Sets the current frame index'
# Ensure changing it wont cause an error
if index < self.frameCount:
self.currentIndex = index
self.frame = self.frames[self.currentIndex]
def reset(self, time):
'Reset animation to start'
# Re-set the current index and re-set the current frame
self.currentIndex = 0
self.frame = self.frames[self.currentIndex]
self.lastFrame = time
def step(self):
'Step the animation forward a frame'
self.currentIndex += 1
if self.currentIndex >= self.frameCount:
# The animation has surpassed the last frame, restart it
if self.shouldLoop:
self.currentIndex = 0
self.looped = True
else:
self.currentIndex -= 1
self.frame = self.frames[self.currentIndex]
def render(self, time):
'Return the current frame'
# Decide wether or not we should advance a frame
if time-self.lastFrame >= self.interval:
self.step()
self.lastFrame = time
return self.frame
|
ExPHAT/binding-of-isaac
|
Animation.py
|
Python
|
mit
| 2,346
|
[
"MOOSE"
] |
4131f6a5ca50dfb3127b987511bd6175bfc59f8a78d77388632d81af26b5035c
|
""" This tests only need the PilotAgentsDB, and connects directly to it
Suggestion: for local testing, run this with::
python -m pytest -c ../pytest.ini -vv tests/Integration/WorkloadManagementSystem/Test_PilotAgentsDB.py
"""
# pylint: disable=wrong-import-position
import DIRAC
DIRAC.initialize() # Initialize configuration
from mock import patch
from DIRAC import gLogger
from DIRAC.WorkloadManagementSystem.DB.PilotAgentsDB import PilotAgentsDB
from DIRAC.WorkloadManagementSystem.DB.PilotAgentsDB import PivotedPilotSummaryTable
gLogger.setLevel("DEBUG")
paDB = PilotAgentsDB()
def preparePilots(stateCount, testSite, testCE, testGroup):
"""
Set up a bunch of pilots in different states.
:param list stateCount: number of pilots per state. States are:'Submitted', 'Done', 'Failed',
'Aborted', 'Running', 'Waiting', 'Scheduled', 'Ready'
:param str testSite: Site name
:param str testCE: CE name
:param str testGroup: group name
:return list pilot reference list:
"""
pilotRef = []
nPilots = sum(stateCount)
for i in range(nPilots):
pilotRef.append("pilotRef_" + str(i))
res = paDB.addPilotTQReference(
pilotRef,
123,
"ownerDN",
testGroup,
)
assert res["OK"] is True, res["Message"]
index = 0
for j, num in enumerate(stateCount):
for i in range(num):
pNum = i + index
res = paDB.setPilotStatus(
"pilotRef_" + str(pNum),
PivotedPilotSummaryTable.pstates[j],
destination=testCE,
statusReason="Test States",
gridSite=testSite,
queue=None,
benchmark=None,
currentJob=num,
updateTime=None,
conn=False,
)
assert res["OK"] is True, res["Message"]
index += num
return pilotRef
def cleanUpPilots(pilotRef):
"""
Delete all pilots pointed to by pilotRef
:param lipilotRef:
:return:
"""
for elem in pilotRef:
res = paDB.deletePilot(elem)
assert res["OK"] is True, res["Message"]
def test_basic():
"""usual insert/verify"""
res = paDB.addPilotTQReference(
["pilotRef"],
123,
"ownerDN",
"ownerGroup",
)
assert res["OK"] is True
res = paDB.deletePilot("pilotRef")
# FIXME: to expand...
@patch("DIRAC.WorkloadManagementSystem.DB.PilotAgentsDB.getVOForGroup")
def test_getGroupedPilotSummary(mocked_fcn):
"""
Test 'pivoted' pilot summary method.
:return: None
"""
stateCount = [10, 50, 7, 3, 12, 8, 6, 4]
testGroup = "ownerGroup"
testGroupVO = "ownerGroupVO"
testCE = "TestCE"
testSite = "TestSite"
mocked_fcn.return_value = "ownerGroupVO"
pilotRef = preparePilots(stateCount, testSite, testCE, testGroup)
selectDict = {}
columnList = ["GridSite", "DestinationSite", "OwnerGroup"]
res = paDB.getGroupedPilotSummary(selectDict, columnList)
cleanUpPilots(pilotRef)
expectedParameterList = [
"Site",
"CE",
"OwnerGroup",
"Submitted",
"Done",
"Failed",
"Aborted",
"Running",
"Waiting",
"Scheduled",
"Ready",
"Aborted_Hour",
"Total",
"PilotsPerJob",
"PilotJobEff",
"Status",
]
assert res["OK"] is True, res["Message"]
values = res["Value"]
assert "ParameterNames" in values, "ParameterNames key missing in result"
assert values["ParameterNames"] == expectedParameterList, "Expected and obtained ParameterNames differ"
assert "Records" in values, "Records key missing in result"
# in the setup with one Site/CE/OwnerGroup there will be only one record:
assert len(values["Records"]) == 1
record = values["Records"][0]
assert len(record) == len(expectedParameterList)
assert record[0] == testSite
assert record[1] == testCE
assert record[2] == testGroupVO
# pilot state counts:
for i, entry in enumerate(record[3:11]):
assert entry == stateCount[i], " found entry: %s, expected stateCount: %d " % (str(entry), stateCount[i])
# all pilots have the same timestamp, so Aborted_Hour count is the same as Aborted:
assert record[expectedParameterList.index("Aborted")] == record[expectedParameterList.index("Aborted_Hour")]
# Total
total = record[expectedParameterList.index("Total")]
assert total == sum(stateCount)
# pilot efficiency
delta = 0.01
accuracy = (
record[expectedParameterList.index("PilotJobEff")]
- 100.0 * (total - record[expectedParameterList.index("Aborted")]) / total
)
assert accuracy <= delta, " Pilot eff accuracy %d should be < %d " % (accuracy, delta)
# there aren't any jobs, so:
assert record[expectedParameterList.index("Status")] == "Idle"
def test_PivotedPilotSummaryTable():
"""
Test the 'pivoted' query only. Check whether the number of pilots in different states returned by
the query is correct.
:return: None
"""
# PivotedPilotSummaryTable pstates gives pilot possible states (table.pstates)
# pstates = ['Submitted', 'Done', 'Failed', 'Aborted', 'Running', 'Waiting', 'Scheduled', 'Ready']
stateCount = [10, 50, 7, 3, 12, 8, 6, 4]
testGroup = "ownerGroup"
testCE = "TestCE"
testSite = "TestSite"
pilotRef = preparePilots(stateCount, testSite, testCE, testGroup)
table = PivotedPilotSummaryTable(["GridSite", "DestinationSite", "OwnerGroup"])
sqlQuery = table.buildSQL()
res = paDB._query(sqlQuery)
assert res["OK"] is True, res["Message"]
columns = table.getColumnList()
# first 3 columns are: Site, CE and a group (VO mapping comes later, not in the SQL above)
assert "Site" in columns
assert columns.index("Site") == 0
assert "CE" in columns
assert columns.index("CE") == 1
assert "OwnerGroup" in columns
assert columns.index("OwnerGroup") == 2
# pilot numbers by states:
assert "Total" in columns
# with the setup above there will be only one row, first 3 elements must match the columns.
row = res["Value"][0]
assert row[0] == testSite
assert row[1] == testCE
assert row[2] == testGroup
total = row[columns.index("Total")]
assert total == sum(stateCount), res["Value"]
for i, state in enumerate(table.pstates):
assert state in columns
assert row[columns.index(state)] == stateCount[i], " state: %s, stateCount: %d " % (state, stateCount[i])
cleanUpPilots(pilotRef)
|
DIRACGrid/DIRAC
|
tests/Integration/WorkloadManagementSystem/Test_PilotAgentsDB.py
|
Python
|
gpl-3.0
| 6,648
|
[
"DIRAC"
] |
96ca688247e38be699486bde1180090b8a36b550b9384b53a99c002fcb0ddb6c
|
"""
Octave (and Matlab) code printer
The `OctaveCodePrinter` converts SymPy expressions into Octave expressions.
It uses a subset of the Octave language for Matlab compatibility.
A complete code generator, which uses `octave_code` extensively, can be found
in `sympy.utilities.codegen`. The `codegen` module can be used to generate
complete source code files.
"""
from __future__ import print_function, division
from sympy.core import Mul, Pow, S, Rational
from sympy.core.compatibility import string_types, range
from sympy.core.mul import _keep_coeff
from sympy.printing.codeprinter import CodePrinter, Assignment
from sympy.printing.precedence import precedence
from re import search
# List of known functions. First, those that have the same name in
# SymPy and Octave. This is almost certainly incomplete!
known_fcns_src1 = ["sin", "cos", "tan", "asin", "acos", "atan", "atan2",
"sinh", "cosh", "tanh", "asinh", "acosh", "atanh",
"log", "exp", "erf", "gamma", "sign", "floor", "csc",
"sec", "cot", "coth", "acot", "acoth", "erfc",
"besselj", "bessely", "besseli", "besselk",
"erfinv", "erfcinv", "factorial" ]
# These functions have different names ("Sympy": "Octave"), more
# generally a mapping to (argument_conditions, octave_function).
known_fcns_src2 = {
"Abs": "abs",
"ceiling": "ceil",
"conjugate": "conj",
"DiracDelta": "dirac",
"Heaviside": "heaviside",
}
class OctaveCodePrinter(CodePrinter):
"""
A printer to convert expressions to strings of Octave/Matlab code.
"""
printmethod = "_octave"
language = "Octave"
_operators = {
'and': '&',
'or': '|',
'not': '~',
}
_default_settings = {
'order': None,
'full_prec': 'auto',
'precision': 16,
'user_functions': {},
'human': True,
'contract': True,
'inline': True,
}
# Note: contract is for expressing tensors as loops (if True), or just
# assignment (if False). FIXME: this should be looked a more carefully
# for Octave.
def __init__(self, settings={}):
super(OctaveCodePrinter, self).__init__(settings)
self.known_functions = dict(zip(known_fcns_src1, known_fcns_src1))
self.known_functions.update(dict(known_fcns_src2))
userfuncs = settings.get('user_functions', {})
self.known_functions.update(userfuncs)
def _rate_index_position(self, p):
return p*5
def _get_statement(self, codestring):
return "%s;" % codestring
def _get_comment(self, text):
return "% {0}".format(text)
def _declare_number_const(self, name, value):
return "{0} = {1};".format(name, value)
def _format_code(self, lines):
return self.indent_code(lines)
def _traverse_matrix_indices(self, mat):
# Octave uses Fortran order (column-major)
rows, cols = mat.shape
return ((i, j) for j in range(cols) for i in range(rows))
def _get_loop_opening_ending(self, indices):
open_lines = []
close_lines = []
for i in indices:
# Octave arrays start at 1 and end at dimension
var, start, stop = map(self._print,
[i.label, i.lower + 1, i.upper + 1])
open_lines.append("for %s = %s:%s" % (var, start, stop))
close_lines.append("end")
return open_lines, close_lines
def _print_Mul(self, expr):
# print complex numbers nicely in Octave
if (expr.is_number and expr.is_imaginary and
expr.as_coeff_Mul()[0].is_integer):
return "%si" % self._print(-S.ImaginaryUnit*expr)
# cribbed from str.py
prec = precedence(expr)
c, e = expr.as_coeff_Mul()
if c < 0:
expr = _keep_coeff(-c, e)
sign = "-"
else:
sign = ""
a = [] # items in the numerator
b = [] # items that are in the denominator (if any)
if self.order not in ('old', 'none'):
args = expr.as_ordered_factors()
else:
# use make_args in case expr was something like -x -> x
args = Mul.make_args(expr)
# Gather args for numerator/denominator
for item in args:
if (item.is_commutative and item.is_Pow and item.exp.is_Rational
and item.exp.is_negative):
if item.exp != -1:
b.append(Pow(item.base, -item.exp, evaluate=False))
else:
b.append(Pow(item.base, -item.exp))
elif item.is_Rational and item is not S.Infinity:
if item.p != 1:
a.append(Rational(item.p))
if item.q != 1:
b.append(Rational(item.q))
else:
a.append(item)
a = a or [S.One]
a_str = [self.parenthesize(x, prec) for x in a]
b_str = [self.parenthesize(x, prec) for x in b]
# from here it differs from str.py to deal with "*" and ".*"
def multjoin(a, a_str):
# here we probably are assuming the constants will come first
r = a_str[0]
for i in range(1, len(a)):
mulsym = '*' if a[i-1].is_number else '.*'
r = r + mulsym + a_str[i]
return r
if len(b) == 0:
return sign + multjoin(a, a_str)
elif len(b) == 1:
divsym = '/' if b[0].is_number else './'
return sign + multjoin(a, a_str) + divsym + b_str[0]
else:
divsym = '/' if all([bi.is_number for bi in b]) else './'
return (sign + multjoin(a, a_str) +
divsym + "(%s)" % multjoin(b, b_str))
def _print_Pow(self, expr):
powsymbol = '^' if all([x.is_number for x in expr.args]) else '.^'
PREC = precedence(expr)
if expr.exp == S.Half:
return "sqrt(%s)" % self._print(expr.base)
if expr.is_commutative:
if expr.exp == -S.Half:
sym = '/' if expr.base.is_number else './'
return "1" + sym + "sqrt(%s)" % self._print(expr.base)
if expr.exp == -S.One:
sym = '/' if expr.base.is_number else './'
return "1" + sym + "%s" % self.parenthesize(expr.base, PREC)
return '%s%s%s' % (self.parenthesize(expr.base, PREC), powsymbol,
self.parenthesize(expr.exp, PREC))
def _print_MatPow(self, expr):
PREC = precedence(expr)
return '%s^%s' % (self.parenthesize(expr.base, PREC),
self.parenthesize(expr.exp, PREC))
def _print_Pi(self, expr):
return 'pi'
def _print_ImaginaryUnit(self, expr):
return "1i"
def _print_Exp1(self, expr):
return "exp(1)"
def _print_GoldenRatio(self, expr):
# FIXME: how to do better, e.g., for octave_code(2*GoldenRatio)?
#return self._print((1+sqrt(S(5)))/2)
return "(1+sqrt(5))/2"
def _print_NumberSymbol(self, expr):
if self._settings["inline"]:
return self._print(expr.evalf(self._settings["precision"]))
else:
# assign to a variable, perhaps more readable for longer program
return super(OctaveCodePrinter, self)._print_NumberSymbol(expr)
def _print_Assignment(self, expr):
from sympy.functions.elementary.piecewise import Piecewise
from sympy.tensor.indexed import IndexedBase
# Copied from codeprinter, but remove special MatrixSymbol treatment
lhs = expr.lhs
rhs = expr.rhs
# We special case assignments that take multiple lines
if not self._settings["inline"] and isinstance(expr.rhs, Piecewise):
# Here we modify Piecewise so each expression is now
# an Assignment, and then continue on the print.
expressions = []
conditions = []
for (e, c) in rhs.args:
expressions.append(Assignment(lhs, e))
conditions.append(c)
temp = Piecewise(*zip(expressions, conditions))
return self._print(temp)
if self._settings["contract"] and (lhs.has(IndexedBase) or
rhs.has(IndexedBase)):
# Here we check if there is looping to be done, and if so
# print the required loops.
return self._doprint_loops(rhs, lhs)
else:
lhs_code = self._print(lhs)
rhs_code = self._print(rhs)
return self._get_statement("%s = %s" % (lhs_code, rhs_code))
def _print_Infinity(self, expr):
return 'inf'
def _print_NegativeInfinity(self, expr):
return '-inf'
def _print_NaN(self, expr):
return 'NaN'
def _print_list(self, expr):
return '{' + ', '.join(self._print(a) for a in expr) + '}'
_print_tuple = _print_list
_print_Tuple = _print_list
def _print_BooleanTrue(self, expr):
return "true"
def _print_BooleanFalse(self, expr):
return "false"
def _print_bool(self, expr):
return str(expr).lower()
# Could generate quadrature code for definite Integrals?
#_print_Integral = _print_not_supported
def _print_MatrixBase(self, A):
# Handle zero dimensions:
if (A.rows, A.cols) == (0, 0):
return '[]'
elif A.rows == 0 or A.cols == 0:
return 'zeros(%s, %s)' % (A.rows, A.cols)
elif (A.rows, A.cols) == (1, 1):
# Octave does not distinguish between scalars and 1x1 matrices
return self._print(A[0, 0])
elif A.rows == 1:
return "[%s]" % A.table(self, rowstart='', rowend='', colsep=' ')
elif A.cols == 1:
# note .table would unnecessarily equispace the rows
return "[%s]" % "; ".join([self._print(a) for a in A])
return "[%s]" % A.table(self, rowstart='', rowend='',
rowsep=';\n', colsep=' ')
def _print_SparseMatrix(self, A):
from sympy.matrices import Matrix
L = A.col_list();
# make row vectors of the indices and entries
I = Matrix([[k[0] + 1 for k in L]])
J = Matrix([[k[1] + 1 for k in L]])
AIJ = Matrix([[k[2] for k in L]])
return "sparse(%s, %s, %s, %s, %s)" % (self._print(I), self._print(J),
self._print(AIJ), A.rows, A.cols)
# FIXME: Str/CodePrinter could define each of these to call the _print
# method from higher up the class hierarchy (see _print_NumberSymbol).
# Then subclasses like us would not need to repeat all this.
_print_Matrix = \
_print_DenseMatrix = \
_print_MutableDenseMatrix = \
_print_ImmutableMatrix = \
_print_ImmutableDenseMatrix = \
_print_MatrixBase
_print_MutableSparseMatrix = \
_print_ImmutableSparseMatrix = \
_print_SparseMatrix
def _print_MatrixElement(self, expr):
return self._print(expr.parent) + '(%s, %s)'%(expr.i+1, expr.j+1)
def _print_MatrixSlice(self, expr):
def strslice(x, lim):
l = x[0] + 1
h = x[1]
step = x[2]
lstr = self._print(l)
hstr = 'end' if h == lim else self._print(h)
if step == 1:
if l == 1 and h == lim:
return ':'
if l == h:
return lstr
else:
return lstr + ':' + hstr
else:
return ':'.join((lstr, self._print(step), hstr))
return (self._print(expr.parent) + '(' +
strslice(expr.rowslice, expr.parent.shape[0]) + ', ' +
strslice(expr.colslice, expr.parent.shape[1]) + ')')
def _print_Indexed(self, expr):
inds = [ self._print(i) for i in expr.indices ]
return "%s(%s)" % (self._print(expr.base.label), ", ".join(inds))
def _print_Idx(self, expr):
return self._print(expr.label)
def _print_Identity(self, expr):
return "eye(%s)" % self._print(expr.shape[0])
def _print_hankel1(self, expr):
return "besselh(%s, 1, %s)" % (self._print(expr.order),
self._print(expr.argument))
def _print_hankel2(self, expr):
return "besselh(%s, 2, %s)" % (self._print(expr.order),
self._print(expr.argument))
# Note: as of 2015, Octave doesn't have spherical Bessel functions
def _print_jn(self, expr):
from sympy.functions import sqrt, besselj
x = expr.argument
expr2 = sqrt(S.Pi/(2*x))*besselj(expr.order + S.Half, x)
return self._print(expr2)
def _print_yn(self, expr):
from sympy.functions import sqrt, bessely
x = expr.argument
expr2 = sqrt(S.Pi/(2*x))*bessely(expr.order + S.Half, x)
return self._print(expr2)
def _print_airyai(self, expr):
return "airy(0, %s)" % self._print(expr.args[0])
def _print_airyaiprime(self, expr):
return "airy(1, %s)" % self._print(expr.args[0])
def _print_airybi(self, expr):
return "airy(2, %s)" % self._print(expr.args[0])
def _print_airybiprime(self, expr):
return "airy(3, %s)" % self._print(expr.args[0])
def _print_Piecewise(self, expr):
if expr.args[-1].cond != True:
# We need the last conditional to be a True, otherwise the resulting
# function may not return a result.
raise ValueError("All Piecewise expressions must contain an "
"(expr, True) statement to be used as a default "
"condition. Without one, the generated "
"expression may not evaluate to anything under "
"some condition.")
lines = []
if self._settings["inline"]:
# Express each (cond, expr) pair in a nested Horner form:
# (condition) .* (expr) + (not cond) .* (<others>)
# Expressions that result in multiple statements won't work here.
ecpairs = ["({0}).*({1}) + (~({0})).*(".format
(self._print(c), self._print(e))
for e, c in expr.args[:-1]]
elast = "%s" % self._print(expr.args[-1].expr)
pw = " ...\n".join(ecpairs) + elast + ")"*len(ecpairs)
# Note: current need these outer brackets for 2*pw. Would be
# nicer to teach parenthesize() to do this for us when needed!
return "(" + pw + ")"
else:
for i, (e, c) in enumerate(expr.args):
if i == 0:
lines.append("if (%s)" % self._print(c))
elif i == len(expr.args) - 1 and c == True:
lines.append("else")
else:
lines.append("elseif (%s)" % self._print(c))
code0 = self._print(e)
lines.append(code0)
if i == len(expr.args) - 1:
lines.append("end")
return "\n".join(lines)
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
# code mostly copied from ccode
if isinstance(code, string_types):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
tab = " "
inc_regex = ('^function ', '^if ', '^elseif ', '^else$', '^for ')
dec_regex = ('^end$', '^elseif ', '^else$')
# pre-strip left-space from the code
code = [ line.lstrip(' \t') for line in code ]
increase = [ int(any([search(re, line) for re in inc_regex]))
for line in code ]
decrease = [ int(any([search(re, line) for re in dec_regex]))
for line in code ]
pretty = []
level = 0
for n, line in enumerate(code):
if line == '' or line == '\n':
pretty.append(line)
continue
level -= decrease[n]
pretty.append("%s%s" % (tab*level, line))
level += increase[n]
return pretty
def octave_code(expr, assign_to=None, **settings):
r"""Converts `expr` to a string of Octave (or Matlab) code.
The string uses a subset of the Octave language for Matlab compatibility.
Parameters
==========
expr : Expr
A sympy expression to be converted.
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This can be helpful for
expressions that generate multi-line statements.
precision : integer, optional
The precision for numbers such as pi [default=16].
user_functions : dict, optional
A dictionary where keys are ``FunctionClass`` instances and values are
their string representations. Alternatively, the dictionary value can
be a list of tuples i.e. [(argument_test, cfunction_string)]. See
below for examples.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
inline: bool, optional
If True, we try to create single-statement code instead of multiple
statements. [default=True].
Examples
========
>>> from sympy import octave_code, symbols, sin, pi
>>> x = symbols('x')
>>> octave_code(sin(x).series(x).removeO())
'x.^5/120 - x.^3/6 + x'
>>> from sympy import Rational, ceiling, Abs
>>> x, y, tau = symbols("x, y, tau")
>>> octave_code((2*tau)**Rational(7, 2))
'8*sqrt(2)*tau.^(7/2)'
Note that element-wise (Hadamard) operations are used by default between
symbols. This is because its very common in Octave to write "vectorized"
code. It is harmless if the values are scalars.
>>> octave_code(sin(pi*x*y), assign_to="s")
's = sin(pi*x.*y);'
If you need a matrix product "*" or matrix power "^", you can specify the
symbol as a ``MatrixSymbol``.
>>> from sympy import Symbol, MatrixSymbol
>>> n = Symbol('n', integer=True, positive=True)
>>> A = MatrixSymbol('A', n, n)
>>> octave_code(3*pi*A**3)
'(3*pi)*A^3'
This class uses several rules to decide which symbol to use a product.
Pure numbers use "*", Symbols use ".*" and MatrixSymbols use "*".
A HadamardProduct can be used to specify componentwise multiplication ".*"
of two MatrixSymbols. There is currently there is no easy way to specify
scalar symbols, so sometimes the code might have some minor cosmetic
issues. For example, suppose x and y are scalars and A is a Matrix, then
while a human programmer might write "(x^2*y)*A^3", we generate:
>>> octave_code(x**2*y*A**3)
'(x.^2.*y)*A^3'
Matrices are supported using Octave inline notation. When using
``assign_to`` with matrices, the name can be specified either as a string
or as a ``MatrixSymbol``. The dimenions must align in the latter case.
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([[x**2, sin(x), ceiling(x)]])
>>> octave_code(mat, assign_to='A')
'A = [x.^2 sin(x) ceil(x)];'
``Piecewise`` expressions are implemented with logical masking by default.
Alternatively, you can pass "inline=False" to use if-else conditionals.
Note that if the ``Piecewise`` lacks a default term, represented by
``(expr, True)`` then an error will be thrown. This is to prevent
generating an expression that may not evaluate to anything.
>>> from sympy import Piecewise
>>> pw = Piecewise((x + 1, x > 0), (x, True))
>>> octave_code(pw, assign_to=tau)
'tau = ((x > 0).*(x + 1) + (~(x > 0)).*(x));'
Note that any expression that can be generated normally can also exist
inside a Matrix:
>>> mat = Matrix([[x**2, pw, sin(x)]])
>>> octave_code(mat, assign_to='A')
'A = [x.^2 ((x > 0).*(x + 1) + (~(x > 0)).*(x)) sin(x)];'
Custom printing can be defined for certain types by passing a dictionary of
"type" : "function" to the ``user_functions`` kwarg. Alternatively, the
dictionary value can be a list of tuples i.e., [(argument_test,
cfunction_string)]. This can be used to call a custom Octave function.
>>> from sympy import Function
>>> f = Function('f')
>>> g = Function('g')
>>> custom_functions = {
... "f": "existing_octave_fcn",
... "g": [(lambda x: x.is_Matrix, "my_mat_fcn"),
... (lambda x: not x.is_Matrix, "my_fcn")]
... }
>>> mat = Matrix([[1, x]])
>>> octave_code(f(x) + g(x) + g(mat), user_functions=custom_functions)
'existing_octave_fcn(x) + my_fcn(x) + my_mat_fcn([1 x])'
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx, ccode
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e = Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> octave_code(e.rhs, assign_to=e.lhs, contract=False)
'Dy(i) = (y(i + 1) - y(i))./(t(i + 1) - t(i));'
"""
return OctaveCodePrinter(settings).doprint(expr, assign_to)
def print_octave_code(expr, **settings):
"""Prints the Octave (or Matlab) representation of the given expression.
See `octave_code` for the meaning of the optional arguments.
"""
print(octave_code(expr, **settings))
|
kaichogami/sympy
|
sympy/printing/octave.py
|
Python
|
bsd-3-clause
| 22,542
|
[
"DIRAC"
] |
c6da626f7aeab8f8202069267f2d213abc896554e71d82568286806db16277f8
|
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2003-2005 Donald N. Allingham
# Copyright (C) 2008 Stefan Siegel
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2021 Mirko Leonhaeuser
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Original version written by Alex Roitman, largely based on relationship.py
# by Don Allingham and on valuable input from Dr. Martin Senftleben
# Modified by Joachim Breitner to not use „Großcousine“, in accordance with
# http://de.wikipedia.org/wiki/Verwandtschaftsbeziehung
# Rewritten from scratch for Gramps 3 by Stefan Siegel,
# loosely based on rel_fr.py
"""
German-specific classes for relationships.
"""
#-------------------------------------------------------------------------
#
# standard python modules
#
#-------------------------------------------------------------------------
import re
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.lib import Person
import gramps.gen.relationship
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
_ordinal = [ 'nullte',
'erste', 'zweite', 'dritte', 'vierte', 'fünfte', 'sechste',
'siebte', 'achte', 'neunte', 'zehnte', 'elfte', 'zwölfte',
]
_removed = [ '',
'', 'Groß', 'Urgroß',
'Alt', 'Altgroß', 'Alturgroß',
'Ober', 'Obergroß', 'Oberurgroß',
'Stamm', 'Stammgroß', 'Stammurgroß',
'Ahnen', 'Ahnengroß', 'Ahnenurgroß',
'Urahnen', 'Urahnengroß', 'Urahnenurgroß',
'Erz', 'Erzgroß', 'Erzurgroß',
'Erzahnen', 'Erzahnengroß', 'Erzahnenurgroß',
]
_lineal_up = {
'many': '%(p)sEltern%(s)s',
'unknown': '%(p)sElter%(s)s', # "Elter" sounds strange but is correct
'male': '%(p)sVater%(s)s',
'female': '%(p)sMutter%(s)s',
}
_lineal_down = {
'many': '%(p)sKinder%(s)s',
'unknown': '%(p)sKind%(s)s',
'male': '%(p)sSohn%(s)s',
'female': '%(p)sTochter%(s)s',
}
_collateral_up = {
'many': '%(p)sOnkel und %(p)sTanten%(s)s',
'unknown': '%(p)sOnkel oder %(p)sTante%(s)s',
'male': '%(p)sOnkel%(s)s',
'female': '%(p)sTante%(s)s',
}
_collateral_down = {
'many': '%(p)sNeffen und %(p)sNichten%(s)s',
'unknown': '%(p)sNeffe oder %(p)sNichte%(s)s',
'male': '%(p)sNeffe%(s)s',
'female': '%(p)sNichte%(s)s',
}
_collateral_same = {
'many': '%(p)sCousins und %(p)sCousinen%(s)s',
'unknown': '%(p)sCousin oder %(p)sCousine%(s)s',
'male': '%(p)sCousin%(s)s',
'female': '%(p)sCousine%(s)s',
}
_collateral_sib = {
'many': '%(p)sGeschwister%(s)s',
'unknown': '%(p)sGeschwisterkind%(s)s',
'male': '%(p)sBruder%(s)s',
'female': '%(p)sSchwester%(s)s',
}
_schwager = {
'many': '%(p)sSchwager%(s)s',
'unknown': '%(p)sSchwager%(s)s',
'male': '%(p)sSchwager%(s)s',
'female': '%(p)sSchwägerin%(s)s',
}
_schwippschwager = {
'many': '%(p)sSchwippschwager%(s)s',
'unknown': '%(p)sSchwippschwager%(s)s',
'male': '%(p)sSchwippschwager%(s)s',
'female': '%(p)sSchwippschwägerin%(s)s',
}
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
class RelationshipCalculator(gramps.gen.relationship.RelationshipCalculator):
"""
RelationshipCalculator Class
"""
def __init__(self):
gramps.gen.relationship.RelationshipCalculator.__init__(self)
def _make_roman(self, num):
roman = ''
for v, r in [(1000, 'M'), (900, 'CM'), (500, 'D'), (400, 'CD'),
( 100, 'C'), ( 90, 'XC'), ( 50, 'L'), ( 40, 'XL'),
( 10, 'X'), ( 9, 'IX'), ( 5, 'V'), ( 4, 'IV'),
( 1, 'I')]:
while num > v:
num -= v
roman += r
return roman
def _fix_caps(self, string):
return re.sub(r'(?<=[^\s(/A-Z])[A-Z]', lambda m: m.group().lower(), string)
def _removed_text(self, degree, removed):
if (degree, removed) == (0, -2):
return 'Enkel'
elif (degree, removed) == (0, -3):
return 'Urenkel'
removed = abs(removed)
if removed < len(_removed):
return _removed[removed]
else:
return '(%s) Urgroß' % self._make_roman(removed-1)
def _degree_text(self, degree, removed):
if removed == 0:
degree -= 1 # a cousin has same degree as his parent (uncle/aunt)
if degree <= 1:
return ''
if degree < len(_ordinal):
return ' %sn Grades' % _ordinal[degree]
else:
return ' %d. Grades' % degree
def _gender_convert(self, gender):
if gender == Person.MALE:
return 'male'
elif gender == Person.FEMALE:
return 'female'
else:
return 'unknown'
def _get_relationship_string(self, Ga, Gb, gender,
reltocommon_a='', reltocommon_b='',
only_birth=True,
in_law_a=False, in_law_b=False):
common_ancestor_count = 0
if reltocommon_a == '':
reltocommon_a = self.REL_FAM_BIRTH
if reltocommon_b == '':
reltocommon_b = self.REL_FAM_BIRTH
if reltocommon_a[-1] in [self.REL_MOTHER, self.REL_FAM_BIRTH,
self.REL_FAM_BIRTH_MOTH_ONLY] and \
reltocommon_b[-1] in [self.REL_MOTHER, self.REL_FAM_BIRTH,
self.REL_FAM_BIRTH_MOTH_ONLY]:
common_ancestor_count += 1 # same female ancestor
if reltocommon_a[-1] in [self.REL_FATHER, self.REL_FAM_BIRTH,
self.REL_FAM_BIRTH_FATH_ONLY] and \
reltocommon_b[-1] in [self.REL_FATHER, self.REL_FAM_BIRTH,
self.REL_FAM_BIRTH_FATH_ONLY]:
common_ancestor_count += 1 # same male ancestor
degree = min(Ga, Gb)
removed = Ga-Gb
if degree == 0 and removed < 0:
# for descendants the "in-law" logic is reversed
(in_law_a, in_law_b) = (in_law_b, in_law_a)
rel_str = ''
pre = ''
post = ''
if in_law_b and degree == 0:
pre += 'Stief'
elif (not only_birth) or common_ancestor_count == 0:
pre += 'Stief-/Adoptiv'
if in_law_a and (degree, removed) != (1, 0):
# A "Schwiegerbruder" really is a "Schwager" (handled later)
pre += 'Schwieger'
if degree != 0 and common_ancestor_count == 1:
pre += 'Halb'
pre += self._removed_text(degree, removed)
post += self._degree_text(degree, removed)
if in_law_b and degree != 0 and (degree, removed) != (1, 0):
# A "Bruder (angeheiratet)" also is a "Schwager" (handled later)
post += ' (angeheiratet)'
if degree == 0:
# lineal relationship
if removed > 0:
rel_str = _lineal_up[gender]
elif removed < 0:
rel_str = _lineal_down[gender]
elif in_law_a or in_law_b:
rel_str = 'Partner'
else:
rel_str = 'Proband'
else:
# collateral relationship
if removed > 0:
rel_str = _collateral_up[gender]
elif removed < 0:
rel_str = _collateral_down[gender]
elif degree == 1:
if in_law_a or in_law_b:
if in_law_a and in_law_b:
rel_str = _schwippschwager[gender]
else:
rel_str = _schwager[gender]
else:
rel_str = _collateral_sib[gender]
else:
rel_str = _collateral_same[gender]
return self._fix_caps(rel_str % {'p': pre, 's': post})
def get_plural_relationship_string(self, Ga, Gb,
reltocommon_a='', reltocommon_b='',
only_birth=True,
in_law_a=False, in_law_b=False):
return self._get_relationship_string(Ga, Gb, 'many',
reltocommon_a, reltocommon_b,
only_birth, in_law_a, in_law_b)
def get_single_relationship_string(self, Ga, Gb, gender_a, gender_b,
reltocommon_a, reltocommon_b,
only_birth=True,
in_law_a=False, in_law_b=False):
return self._get_relationship_string(Ga, Gb,
self._gender_convert(gender_b),
reltocommon_a, reltocommon_b,
only_birth, in_law_a, in_law_b)
def get_sibling_relationship_string(self, sib_type, gender_a, gender_b,
in_law_a=False, in_law_b=False):
if sib_type in [self.NORM_SIB, self.UNKNOWN_SIB]:
# the NORM_SIB translation is generic and suitable for UNKNOWN_SIB
rel = self.REL_FAM_BIRTH
only_birth = True
elif sib_type == self.HALF_SIB_FATHER:
rel = self.REL_FAM_BIRTH_FATH_ONLY
only_birth = True
elif sib_type == self.HALF_SIB_MOTHER:
rel = self.REL_FAM_BIRTH_MOTH_ONLY
only_birth = True
elif sib_type == self.STEP_SIB:
rel = self.REL_FAM_NONBIRTH
only_birth = False
return self._get_relationship_string(1, 1,
self._gender_convert(gender_b),
rel, rel,
only_birth, in_law_a, in_law_b)
if __name__ == "__main__":
# Test function. Call it as follows from the command line (so as to find
# imported modules):
# export PYTHONPATH=/path/to/gramps/src
# python src/plugins/rel/rel_de.py
# (Above not needed here)
"""TRANSLATORS, copy this if statement at the bottom of your
rel_xx.py module, and test your work with:
python src/plugins/rel/rel_xx.py
"""
from gramps.gen.relationship import test
rc = RelationshipCalculator()
test(rc, True)
|
Nick-Hall/gramps
|
gramps/plugins/rel/rel_de.py
|
Python
|
gpl-2.0
| 11,357
|
[
"Brian"
] |
fc9e40daf086f2fa41b190dfd5b3cd1401f2621ab6b3fec7eedd560263676561
|
"""Courses API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
from .base import BaseModel
class CoursesAPI(BaseCanvasAPI):
"""Courses API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for CoursesAPI."""
super(CoursesAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.CoursesAPI")
def list_your_courses(
self,
enrollment_role=None,
enrollment_role_id=None,
enrollment_state=None,
enrollment_type=None,
exclude_blueprint_courses=None,
include=None,
state=None,
):
"""
List your courses.
Returns the paginated list of active courses for the current user.
"""
path = {}
data = {}
params = {}
# OPTIONAL - enrollment_type
"""
When set, only return courses where the user is enrolled as this type. For
example, set to "teacher" to return only courses where the user is
enrolled as a Teacher. This argument is ignored if enrollment_role is given.
"""
if enrollment_type is not None:
self._validate_enum(
enrollment_type, ["teacher", "student", "ta", "observer", "designer"]
)
params["enrollment_type"] = enrollment_type
# OPTIONAL - enrollment_role
"""
Deprecated
When set, only return courses where the user is enrolled with the specified
course-level role. This can be a role created with the
{api:RoleOverridesController#add_role Add Role API} or a base role type of
'StudentEnrollment', 'TeacherEnrollment', 'TaEnrollment', 'ObserverEnrollment',
or 'DesignerEnrollment'.
"""
if enrollment_role is not None:
params["enrollment_role"] = enrollment_role
# OPTIONAL - enrollment_role_id
"""
When set, only return courses where the user is enrolled with the specified
course-level role. This can be a role created with the
{api:RoleOverridesController#add_role Add Role API} or a built_in role type of
'StudentEnrollment', 'TeacherEnrollment', 'TaEnrollment', 'ObserverEnrollment',
or 'DesignerEnrollment'.
"""
if enrollment_role_id is not None:
params["enrollment_role_id"] = enrollment_role_id
# OPTIONAL - enrollment_state
"""
When set, only return courses where the user has an enrollment with the given state.
This will respect section/course/term date overrides.
"""
if enrollment_state is not None:
self._validate_enum(
enrollment_state, ["active", "invited_or_pending", "completed"]
)
params["enrollment_state"] = enrollment_state
# OPTIONAL - exclude_blueprint_courses
"""
When set, only return courses that are not configured as blueprint courses.
"""
if exclude_blueprint_courses is not None:
params["exclude_blueprint_courses"] = exclude_blueprint_courses
# OPTIONAL - include
"""
- "needs_grading_count": Optional information to include with each Course.
When needs_grading_count is given, and the current user has grading
rights, the total number of submissions needing grading for all
assignments is returned.
- "syllabus_body": Optional information to include with each Course.
When syllabus_body is given the user-generated html for the course
syllabus is returned.
- "public_description": Optional information to include with each Course.
When public_description is given the user-generated text for the course
public description is returned.
- "total_scores": Optional information to include with each Course.
When total_scores is given, any student enrollments will also
include the fields 'computed_current_score', 'computed_final_score',
'computed_current_grade', and 'computed_final_grade', as well as (if
the user has permission) 'unposted_current_score',
'unposted_final_score', 'unposted_current_grade', and
'unposted_final_grade' (see Enrollment documentation for more
information on these fields). This argument is ignored if the course is
configured to hide final grades.
- "current_grading_period_scores": Optional information to include with
each Course. When current_grading_period_scores is given and total_scores
is given, any student enrollments will also include the fields
'has_grading_periods',
'totals_for_all_grading_periods_option', 'current_grading_period_title',
'current_grading_period_id', current_period_computed_current_score',
'current_period_computed_final_score',
'current_period_computed_current_grade', and
'current_period_computed_final_grade', as well as (if the user has permission)
'current_period_unposted_current_score',
'current_period_unposted_final_score',
'current_period_unposted_current_grade', and
'current_period_unposted_final_grade' (see Enrollment documentation for
more information on these fields). In addition, when this argument is
passed, the course will have a 'has_grading_periods' attribute
on it. This argument is ignored if the total_scores argument is not
included. If the course is configured to hide final grades, the
following fields are not returned:
'totals_for_all_grading_periods_option',
'current_period_computed_current_score',
'current_period_computed_final_score',
'current_period_computed_current_grade',
'current_period_computed_final_grade',
'current_period_unposted_current_score',
'current_period_unposted_final_score',
'current_period_unposted_current_grade', and
'current_period_unposted_final_grade'
- "grading_periods": Optional information to include with each Course. When
grading_periods is given, a list of the grading periods associated with
each course is returned.
- "term": Optional information to include with each Course. When
term is given, the information for the enrollment term for each course
is returned.
- "account": Optional information to include with each Course. When
account is given, the account json for each course is returned.
- "course_progress": Optional information to include with each Course.
When course_progress is given, each course will include a
'course_progress' object with the fields: 'requirement_count', an integer
specifying the total number of requirements in the course,
'requirement_completed_count', an integer specifying the total number of
requirements in this course that have been completed, and
'next_requirement_url', a string url to the next requirement item, and
'completed_at', the date the course was completed (null if incomplete).
'next_requirement_url' will be null if all requirements have been
completed or the current module does not require sequential progress.
"course_progress" will return an error message if the course is not
module based or the user is not enrolled as a student in the course.
- "sections": Section enrollment information to include with each Course.
Returns an array of hashes containing the section ID (id), section name
(name), start and end dates (start_at, end_at), as well as the enrollment
type (enrollment_role, e.g. 'StudentEnrollment').
- "storage_quota_used_mb": The amount of storage space used by the files in this course
- "total_students": Optional information to include with each Course.
Returns an integer for the total amount of active and invited students.
- "passback_status": Include the grade passback_status
- "favorites": Optional information to include with each Course.
Indicates if the user has marked the course as a favorite course.
- "teachers": Teacher information to include with each Course.
Returns an array of hashes containing the {api:Users:UserDisplay UserDisplay} information
for each teacher in the course.
- "observed_users": Optional information to include with each Course.
Will include data for observed users if the current user has an
observer enrollment.
- "tabs": Optional information to include with each Course.
Will include the list of tabs configured for each course. See the
{api:TabsController#index List available tabs API} for more information.
- "course_image": Optional course image data for when there is a course image
and the course image feature flag has been enabled
- "concluded": Optional information to include with each Course. Indicates whether
the course has been concluded, taking course and term dates into account.
"""
if include is not None:
self._validate_enum(
include,
[
"needs_grading_count",
"syllabus_body",
"public_description",
"total_scores",
"current_grading_period_scores",
"grading_periods",
"term",
"account",
"course_progress",
"sections",
"storage_quota_used_mb",
"total_students",
"passback_status",
"favorites",
"teachers",
"observed_users",
"course_image",
"concluded",
],
)
params["include"] = include
# OPTIONAL - state
"""
If set, only return courses that are in the given state(s).
By default, "available" is returned for students and observers, and
anything except "deleted", for all other enrollment types
"""
if state is not None:
self._validate_enum(
state, ["unpublished", "available", "completed", "deleted"]
)
params["state"] = state
self.logger.debug(
"GET /api/v1/courses with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses".format(**path),
data=data,
params=params,
all_pages=True,
)
def list_courses_for_user(
self, user_id, enrollment_state=None, homeroom=None, include=None, state=None
):
"""
List courses for a user.
Returns a paginated list of active courses for this user. To view the course list for a user other than yourself, you must be either an observer of that user or an administrator.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""
ID
"""
path["user_id"] = user_id
# OPTIONAL - include
"""
- "needs_grading_count": Optional information to include with each Course.
When needs_grading_count is given, and the current user has grading
rights, the total number of submissions needing grading for all
assignments is returned.
- "syllabus_body": Optional information to include with each Course.
When syllabus_body is given the user-generated html for the course
syllabus is returned.
- "public_description": Optional information to include with each Course.
When public_description is given the user-generated text for the course
public description is returned.
- "total_scores": Optional information to include with each Course.
When total_scores is given, any student enrollments will also
include the fields 'computed_current_score', 'computed_final_score',
'computed_current_grade', and 'computed_final_grade' (see Enrollment
documentation for more information on these fields). This argument
is ignored if the course is configured to hide final grades.
- "current_grading_period_scores": Optional information to include with
each Course. When current_grading_period_scores is given and total_scores
is given, any student enrollments will also include the fields
'has_grading_periods',
'totals_for_all_grading_periods_option', 'current_grading_period_title',
'current_grading_period_id', current_period_computed_current_score',
'current_period_computed_final_score',
'current_period_computed_current_grade', and
'current_period_computed_final_grade', as well as (if the user has permission)
'current_period_unposted_current_score',
'current_period_unposted_final_score',
'current_period_unposted_current_grade', and
'current_period_unposted_final_grade' (see Enrollment documentation for
more information on these fields). In addition, when this argument is
passed, the course will have a 'has_grading_periods' attribute
on it. This argument is ignored if the course is configured to hide final
grades or if the total_scores argument is not included.
- "grading_periods": Optional information to include with each Course. When
grading_periods is given, a list of the grading periods associated with
each course is returned.
- "term": Optional information to include with each Course. When
term is given, the information for the enrollment term for each course
is returned.
- "account": Optional information to include with each Course. When
account is given, the account json for each course is returned.
- "course_progress": Optional information to include with each Course.
When course_progress is given, each course will include a
'course_progress' object with the fields: 'requirement_count', an integer
specifying the total number of requirements in the course,
'requirement_completed_count', an integer specifying the total number of
requirements in this course that have been completed, and
'next_requirement_url', a string url to the next requirement item, and
'completed_at', the date the course was completed (null if incomplete).
'next_requirement_url' will be null if all requirements have been
completed or the current module does not require sequential progress.
"course_progress" will return an error message if the course is not
module based or the user is not enrolled as a student in the course.
- "sections": Section enrollment information to include with each Course.
Returns an array of hashes containing the section ID (id), section name
(name), start and end dates (start_at, end_at), as well as the enrollment
type (enrollment_role, e.g. 'StudentEnrollment').
- "storage_quota_used_mb": The amount of storage space used by the files in this course
- "total_students": Optional information to include with each Course.
Returns an integer for the total amount of active and invited students.
- "passback_status": Include the grade passback_status
- "favorites": Optional information to include with each Course.
Indicates if the user has marked the course as a favorite course.
- "teachers": Teacher information to include with each Course.
Returns an array of hashes containing the {api:Users:UserDisplay UserDisplay} information
for each teacher in the course.
- "observed_users": Optional information to include with each Course.
Will include data for observed users if the current user has an
observer enrollment.
- "tabs": Optional information to include with each Course.
Will include the list of tabs configured for each course. See the
{api:TabsController#index List available tabs API} for more information.
- "course_image": Optional course image data for when there is a course image
and the course image feature flag has been enabled
- "concluded": Optional information to include with each Course. Indicates whether
the course has been concluded, taking course and term dates into account.
"""
if include is not None:
self._validate_enum(
include,
[
"needs_grading_count",
"syllabus_body",
"public_description",
"total_scores",
"current_grading_period_scores",
"grading_periods",
"term",
"account",
"course_progress",
"sections",
"storage_quota_used_mb",
"total_students",
"passback_status",
"favorites",
"teachers",
"observed_users",
"course_image",
"concluded",
],
)
params["include"] = include
# OPTIONAL - state
"""
If set, only return courses that are in the given state(s).
By default, "available" is returned for students and observers, and
anything except "deleted", for all other enrollment types
"""
if state is not None:
self._validate_enum(
state, ["unpublished", "available", "completed", "deleted"]
)
params["state"] = state
# OPTIONAL - enrollment_state
"""
When set, only return courses where the user has an enrollment with the given state.
This will respect section/course/term date overrides.
"""
if enrollment_state is not None:
self._validate_enum(
enrollment_state, ["active", "invited_or_pending", "completed"]
)
params["enrollment_state"] = enrollment_state
# OPTIONAL - homeroom
"""
If set, only return homeroom courses.
"""
if homeroom is not None:
params["homeroom"] = homeroom
self.logger.debug(
"GET /api/v1/users/{user_id}/courses with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/users/{user_id}/courses".format(**path),
data=data,
params=params,
all_pages=True,
)
def get_user_progress(self, course_id, user_id):
"""
Get user progress.
Return progress information for the user and course
You can supply +self+ as the user_id to query your own progress in a course. To query another user's progress,
you must be a teacher in the course, an administrator, or a linked observer of the user.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# REQUIRED - PATH - user_id
"""
ID
"""
path["user_id"] = user_id
self.logger.debug(
"GET /api/v1/courses/{course_id}/users/{user_id}/progress with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/users/{user_id}/progress".format(**path),
data=data,
params=params,
single_item=True,
)
def create_new_course(
self,
account_id,
course_allow_student_forum_attachments=None,
course_allow_student_wiki_edits=None,
course_allow_wiki_comments=None,
course_apply_assignment_group_weights=None,
course_course_code=None,
course_course_format=None,
course_default_view=None,
course_end_at=None,
course_grade_passback_setting=None,
course_grading_standard_id=None,
course_hide_final_grades=None,
course_integration_id=None,
course_is_public=None,
course_is_public_to_auth_users=None,
course_license=None,
course_name=None,
course_open_enrollment=None,
course_public_description=None,
course_public_syllabus=None,
course_public_syllabus_to_auth=None,
course_restrict_enrollments_to_course_dates=None,
course_self_enrollment=None,
course_sis_course_id=None,
course_start_at=None,
course_syllabus_body=None,
course_term_id=None,
course_time_zone=None,
enable_sis_reactivation=None,
enroll_me=None,
offer=None,
):
"""
Create a new course.
Create a new course
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""
ID
"""
path["account_id"] = account_id
# OPTIONAL - course[name]
"""
The name of the course. If omitted, the course will be named "Unnamed
Course."
"""
if course_name is not None:
data["course[name]"] = course_name
# OPTIONAL - course[course_code]
"""
The course code for the course.
"""
if course_course_code is not None:
data["course[course_code]"] = course_course_code
# OPTIONAL - course[start_at]
"""
Course start date in ISO8601 format, e.g. 2011-01-01T01:00Z
This value is ignored unless 'restrict_enrollments_to_course_dates' is set to true.
"""
if course_start_at is not None:
if issubclass(course_start_at.__class__, str):
course_start_at = self._validate_iso8601_string(course_start_at)
elif issubclass(course_start_at.__class__, date) or issubclass(
course_start_at.__class__, datetime
):
course_start_at = course_start_at.strftime("%Y-%m-%dT%H:%M:%S+00:00")
data["course[start_at]"] = course_start_at
# OPTIONAL - course[end_at]
"""
Course end date in ISO8601 format. e.g. 2011-01-01T01:00Z
This value is ignored unless 'restrict_enrollments_to_course_dates' is set to true.
"""
if course_end_at is not None:
if issubclass(course_end_at.__class__, str):
course_end_at = self._validate_iso8601_string(course_end_at)
elif issubclass(course_end_at.__class__, date) or issubclass(
course_end_at.__class__, datetime
):
course_end_at = course_end_at.strftime("%Y-%m-%dT%H:%M:%S+00:00")
data["course[end_at]"] = course_end_at
# OPTIONAL - course[license]
"""
The name of the licensing. Should be one of the following abbreviations
(a descriptive name is included in parenthesis for reference):
- 'private' (Private Copyrighted)
- 'cc_by_nc_nd' (CC Attribution Non-Commercial No Derivatives)
- 'cc_by_nc_sa' (CC Attribution Non-Commercial Share Alike)
- 'cc_by_nc' (CC Attribution Non-Commercial)
- 'cc_by_nd' (CC Attribution No Derivatives)
- 'cc_by_sa' (CC Attribution Share Alike)
- 'cc_by' (CC Attribution)
- 'public_domain' (Public Domain).
"""
if course_license is not None:
data["course[license]"] = course_license
# OPTIONAL - course[is_public]
"""
Set to true if course is public to both authenticated and unauthenticated users.
"""
if course_is_public is not None:
data["course[is_public]"] = course_is_public
# OPTIONAL - course[is_public_to_auth_users]
"""
Set to true if course is public only to authenticated users.
"""
if course_is_public_to_auth_users is not None:
data["course[is_public_to_auth_users]"] = course_is_public_to_auth_users
# OPTIONAL - course[public_syllabus]
"""
Set to true to make the course syllabus public.
"""
if course_public_syllabus is not None:
data["course[public_syllabus]"] = course_public_syllabus
# OPTIONAL - course[public_syllabus_to_auth]
"""
Set to true to make the course syllabus public for authenticated users.
"""
if course_public_syllabus_to_auth is not None:
data["course[public_syllabus_to_auth]"] = course_public_syllabus_to_auth
# OPTIONAL - course[public_description]
"""
A publicly visible description of the course.
"""
if course_public_description is not None:
data["course[public_description]"] = course_public_description
# OPTIONAL - course[allow_student_wiki_edits]
"""
If true, students will be able to modify the course wiki.
"""
if course_allow_student_wiki_edits is not None:
data["course[allow_student_wiki_edits]"] = course_allow_student_wiki_edits
# OPTIONAL - course[allow_wiki_comments]
"""
If true, course members will be able to comment on wiki pages.
"""
if course_allow_wiki_comments is not None:
data["course[allow_wiki_comments]"] = course_allow_wiki_comments
# OPTIONAL - course[allow_student_forum_attachments]
"""
If true, students can attach files to forum posts.
"""
if course_allow_student_forum_attachments is not None:
data[
"course[allow_student_forum_attachments]"
] = course_allow_student_forum_attachments
# OPTIONAL - course[open_enrollment]
"""
Set to true if the course is open enrollment.
"""
if course_open_enrollment is not None:
data["course[open_enrollment]"] = course_open_enrollment
# OPTIONAL - course[self_enrollment]
"""
Set to true if the course is self enrollment.
"""
if course_self_enrollment is not None:
data["course[self_enrollment]"] = course_self_enrollment
# OPTIONAL - course[restrict_enrollments_to_course_dates]
"""
Set to true to restrict user enrollments to the start and end dates of the
course. This parameter is required when using the API, as this option is
not displayed in the Course Settings page. This value must be set to true
in order to specify a course start date and/or end date.
"""
if course_restrict_enrollments_to_course_dates is not None:
data[
"course[restrict_enrollments_to_course_dates]"
] = course_restrict_enrollments_to_course_dates
# OPTIONAL - course[term_id]
"""
The unique ID of the term to create to course in.
"""
if course_term_id is not None:
data["course[term_id]"] = course_term_id
# OPTIONAL - course[sis_course_id]
"""
The unique SIS identifier.
"""
if course_sis_course_id is not None:
data["course[sis_course_id]"] = course_sis_course_id
# OPTIONAL - course[integration_id]
"""
The unique Integration identifier.
"""
if course_integration_id is not None:
data["course[integration_id]"] = course_integration_id
# OPTIONAL - course[hide_final_grades]
"""
If this option is set to true, the totals in student grades summary will
be hidden.
"""
if course_hide_final_grades is not None:
data["course[hide_final_grades]"] = course_hide_final_grades
# OPTIONAL - course[apply_assignment_group_weights]
"""
Set to true to weight final grade based on assignment groups percentages.
"""
if course_apply_assignment_group_weights is not None:
data[
"course[apply_assignment_group_weights]"
] = course_apply_assignment_group_weights
# OPTIONAL - course[time_zone]
"""
The time zone for the course. Allowed time zones are
{http://www.iana.org/time-zones IANA time zones} or friendlier
{http://api.rubyonrails.org/classes/ActiveSupport/TimeZone.html Ruby on Rails time zones}.
"""
if course_time_zone is not None:
data["course[time_zone]"] = course_time_zone
# OPTIONAL - offer
"""
If this option is set to true, the course will be available to students
immediately.
"""
if offer is not None:
data["offer"] = offer
# OPTIONAL - enroll_me
"""
Set to true to enroll the current user as the teacher.
"""
if enroll_me is not None:
data["enroll_me"] = enroll_me
# OPTIONAL - course[default_view]
"""
The type of page that users will see when they first visit the course
* 'feed' Recent Activity Dashboard
* 'modules' Course Modules/Sections Page
* 'assignments' Course Assignments List
* 'syllabus' Course Syllabus Page
other types may be added in the future
"""
if course_default_view is not None:
self._validate_enum(
course_default_view,
["feed", "wiki", "modules", "syllabus", "assignments"],
)
data["course[default_view]"] = course_default_view
# OPTIONAL - course[syllabus_body]
"""
The syllabus body for the course
"""
if course_syllabus_body is not None:
data["course[syllabus_body]"] = course_syllabus_body
# OPTIONAL - course[grading_standard_id]
"""
The grading standard id to set for the course. If no value is provided for this argument the current grading_standard will be un-set from this course.
"""
if course_grading_standard_id is not None:
data["course[grading_standard_id]"] = course_grading_standard_id
# OPTIONAL - course[grade_passback_setting]
"""
Optional. The grade_passback_setting for the course. Only 'nightly_sync', 'disabled', and '' are allowed
"""
if course_grade_passback_setting is not None:
data["course[grade_passback_setting]"] = course_grade_passback_setting
# OPTIONAL - course[course_format]
"""
Optional. Specifies the format of the course. (Should be 'on_campus', 'online', or 'blended')
"""
if course_course_format is not None:
data["course[course_format]"] = course_course_format
# OPTIONAL - enable_sis_reactivation
"""
When true, will first try to re-activate a deleted course with matching sis_course_id if possible.
"""
if enable_sis_reactivation is not None:
data["enable_sis_reactivation"] = enable_sis_reactivation
self.logger.debug(
"POST /api/v1/accounts/{account_id}/courses with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"POST",
"/api/v1/accounts/{account_id}/courses".format(**path),
data=data,
params=params,
single_item=True,
)
def upload_file(self, course_id):
"""
Upload a file.
Upload a file to the course.
This API endpoint is the first step in uploading a file to a course.
See the {file:file_uploads.html File Upload Documentation} for details on
the file upload workflow.
Only those with the "Manage Files" permission on a course can upload files
to the course. By default, this is Teachers, TAs and Designers.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
self.logger.debug(
"POST /api/v1/courses/{course_id}/files with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"POST",
"/api/v1/courses/{course_id}/files".format(**path),
data=data,
params=params,
no_data=True,
)
def list_students(self, course_id):
"""
List students.
Returns the paginated list of students enrolled in this course.
DEPRECATED: Please use the {api:CoursesController#users course users} endpoint
and pass "student" as the enrollment_type.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
self.logger.debug(
"GET /api/v1/courses/{course_id}/students with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/students".format(**path),
data=data,
params=params,
all_pages=True,
)
def list_users_in_course_users(
self,
course_id,
enrollment_role=None,
enrollment_role_id=None,
enrollment_state=None,
enrollment_type=None,
include=None,
search_term=None,
sort=None,
user_id=None,
user_ids=None,
):
"""
List users in course.
Returns the paginated list of users in this course. And optionally the user's enrollments in the course.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# OPTIONAL - search_term
"""
The partial name or full ID of the users to match and return in the results list.
"""
if search_term is not None:
params["search_term"] = search_term
# OPTIONAL - sort
"""
When set, sort the results of the search based on the given field.
"""
if sort is not None:
self._validate_enum(sort, ["username", "last_login", "email", "sis_id"])
params["sort"] = sort
# OPTIONAL - enrollment_type
"""
When set, only return users where the user is enrolled as this type.
"student_view" implies include[]=test_student.
This argument is ignored if enrollment_role is given.
"""
if enrollment_type is not None:
self._validate_enum(
enrollment_type,
["teacher", "student", "student_view", "ta", "observer", "designer"],
)
params["enrollment_type"] = enrollment_type
# OPTIONAL - enrollment_role
"""
Deprecated
When set, only return users enrolled with the specified course-level role. This can be
a role created with the {api:RoleOverridesController#add_role Add Role API} or a
base role type of 'StudentEnrollment', 'TeacherEnrollment', 'TaEnrollment',
'ObserverEnrollment', or 'DesignerEnrollment'.
"""
if enrollment_role is not None:
params["enrollment_role"] = enrollment_role
# OPTIONAL - enrollment_role_id
"""
When set, only return courses where the user is enrolled with the specified
course-level role. This can be a role created with the
{api:RoleOverridesController#add_role Add Role API} or a built_in role id with type
'StudentEnrollment', 'TeacherEnrollment', 'TaEnrollment', 'ObserverEnrollment',
or 'DesignerEnrollment'.
"""
if enrollment_role_id is not None:
params["enrollment_role_id"] = enrollment_role_id
# OPTIONAL - include
"""
- "enrollments":
Optionally include with each Course the user's current and invited
enrollments. If the user is enrolled as a student, and the account has
permission to manage or view all grades, each enrollment will include a
'grades' key with 'current_score', 'final_score', 'current_grade' and
'final_grade' values.
- "locked": Optionally include whether an enrollment is locked.
- "avatar_url": Optionally include avatar_url.
- "bio": Optionally include each user's bio.
- "test_student": Optionally include the course's Test Student,
if present. Default is to not include Test Student.
- "custom_links": Optionally include plugin-supplied custom links for each student,
such as analytics information
- "current_grading_period_scores": if enrollments is included as
well as this directive, the scores returned in the enrollment
will be for the current grading period if there is one. A
'grading_period_id' value will also be included with the
scores. if grading_period_id is nil there is no current grading
period and the score is a total score.
- "uuid": Optionally include the users uuid
"""
if include is not None:
self._validate_enum(
include,
[
"enrollments",
"locked",
"avatar_url",
"test_student",
"bio",
"custom_links",
"current_grading_period_scores",
"uuid",
],
)
params["include"] = include
# OPTIONAL - user_id
"""
If this parameter is given and it corresponds to a user in the course,
the +page+ parameter will be ignored and the page containing the specified user
will be returned instead.
"""
if user_id is not None:
params["user_id"] = user_id
# OPTIONAL - user_ids
"""
If included, the course users set will only include users with IDs
specified by the param. Note: this will not work in conjunction
with the "user_id" argument but multiple user_ids can be included.
"""
if user_ids is not None:
params["user_ids"] = user_ids
# OPTIONAL - enrollment_state
"""
When set, only return users where the enrollment workflow state is of one of the given types.
"active" and "invited" enrollments are returned by default.
"""
if enrollment_state is not None:
self._validate_enum(
enrollment_state,
["active", "invited", "rejected", "completed", "inactive"],
)
params["enrollment_state"] = enrollment_state
self.logger.debug(
"GET /api/v1/courses/{course_id}/users with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/users".format(**path),
data=data,
params=params,
all_pages=True,
)
def list_users_in_course_search_users(
self,
course_id,
enrollment_role=None,
enrollment_role_id=None,
enrollment_state=None,
enrollment_type=None,
include=None,
search_term=None,
sort=None,
user_id=None,
user_ids=None,
):
"""
List users in course.
Returns the paginated list of users in this course. And optionally the user's enrollments in the course.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# OPTIONAL - search_term
"""
The partial name or full ID of the users to match and return in the results list.
"""
if search_term is not None:
params["search_term"] = search_term
# OPTIONAL - sort
"""
When set, sort the results of the search based on the given field.
"""
if sort is not None:
self._validate_enum(sort, ["username", "last_login", "email", "sis_id"])
params["sort"] = sort
# OPTIONAL - enrollment_type
"""
When set, only return users where the user is enrolled as this type.
"student_view" implies include[]=test_student.
This argument is ignored if enrollment_role is given.
"""
if enrollment_type is not None:
self._validate_enum(
enrollment_type,
["teacher", "student", "student_view", "ta", "observer", "designer"],
)
params["enrollment_type"] = enrollment_type
# OPTIONAL - enrollment_role
"""
Deprecated
When set, only return users enrolled with the specified course-level role. This can be
a role created with the {api:RoleOverridesController#add_role Add Role API} or a
base role type of 'StudentEnrollment', 'TeacherEnrollment', 'TaEnrollment',
'ObserverEnrollment', or 'DesignerEnrollment'.
"""
if enrollment_role is not None:
params["enrollment_role"] = enrollment_role
# OPTIONAL - enrollment_role_id
"""
When set, only return courses where the user is enrolled with the specified
course-level role. This can be a role created with the
{api:RoleOverridesController#add_role Add Role API} or a built_in role id with type
'StudentEnrollment', 'TeacherEnrollment', 'TaEnrollment', 'ObserverEnrollment',
or 'DesignerEnrollment'.
"""
if enrollment_role_id is not None:
params["enrollment_role_id"] = enrollment_role_id
# OPTIONAL - include
"""
- "enrollments":
Optionally include with each Course the user's current and invited
enrollments. If the user is enrolled as a student, and the account has
permission to manage or view all grades, each enrollment will include a
'grades' key with 'current_score', 'final_score', 'current_grade' and
'final_grade' values.
- "locked": Optionally include whether an enrollment is locked.
- "avatar_url": Optionally include avatar_url.
- "bio": Optionally include each user's bio.
- "test_student": Optionally include the course's Test Student,
if present. Default is to not include Test Student.
- "custom_links": Optionally include plugin-supplied custom links for each student,
such as analytics information
- "current_grading_period_scores": if enrollments is included as
well as this directive, the scores returned in the enrollment
will be for the current grading period if there is one. A
'grading_period_id' value will also be included with the
scores. if grading_period_id is nil there is no current grading
period and the score is a total score.
- "uuid": Optionally include the users uuid
"""
if include is not None:
self._validate_enum(
include,
[
"enrollments",
"locked",
"avatar_url",
"test_student",
"bio",
"custom_links",
"current_grading_period_scores",
"uuid",
],
)
params["include"] = include
# OPTIONAL - user_id
"""
If this parameter is given and it corresponds to a user in the course,
the +page+ parameter will be ignored and the page containing the specified user
will be returned instead.
"""
if user_id is not None:
params["user_id"] = user_id
# OPTIONAL - user_ids
"""
If included, the course users set will only include users with IDs
specified by the param. Note: this will not work in conjunction
with the "user_id" argument but multiple user_ids can be included.
"""
if user_ids is not None:
params["user_ids"] = user_ids
# OPTIONAL - enrollment_state
"""
When set, only return users where the enrollment workflow state is of one of the given types.
"active" and "invited" enrollments are returned by default.
"""
if enrollment_state is not None:
self._validate_enum(
enrollment_state,
["active", "invited", "rejected", "completed", "inactive"],
)
params["enrollment_state"] = enrollment_state
self.logger.debug(
"GET /api/v1/courses/{course_id}/search_users with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/search_users".format(**path),
data=data,
params=params,
all_pages=True,
)
def list_recently_logged_in_students(self, course_id):
"""
List recently logged in students.
Returns the paginated list of users in this course, ordered by how recently they have
logged in. The records include the 'last_login' field which contains
a timestamp of the last time that user logged into canvas. The querying
user must have the 'View usage reports' permission.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
self.logger.debug(
"GET /api/v1/courses/{course_id}/recent_students with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/recent_students".format(**path),
data=data,
params=params,
all_pages=True,
)
def get_single_user(self, course_id, id):
"""
Get single user.
Return information on a single user.
Accepts the same include[] parameters as the :users: action, and returns a
single user with the same fields as that action.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
self.logger.debug(
"GET /api/v1/courses/{course_id}/users/{id} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/users/{id}".format(**path),
data=data,
params=params,
single_item=True,
)
def search_for_content_share_users(self, course_id, search_term):
"""
Search for content share users.
Returns a paginated list of users you can share content with. Requires the content share
feature and the user must have the manage content permission for the course.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# REQUIRED - search_term
"""
Term used to find users. Will search available share users with the search term in their name.
"""
params["search_term"] = search_term
self.logger.debug(
"GET /api/v1/courses/{course_id}/content_share_users with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/content_share_users".format(**path),
data=data,
params=params,
all_pages=True,
)
def preview_processed_html(self, course_id, html=None):
"""
Preview processed html.
Preview html content processed for this course
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# OPTIONAL - html
"""
The html content to process
"""
if html is not None:
data["html"] = html
self.logger.debug(
"POST /api/v1/courses/{course_id}/preview_html with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"POST",
"/api/v1/courses/{course_id}/preview_html".format(**path),
data=data,
params=params,
no_data=True,
)
def course_activity_stream(self, course_id):
"""
Course activity stream.
Returns the current user's course-specific activity stream, paginated.
For full documentation, see the API documentation for the user activity
stream, in the user api.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
self.logger.debug(
"GET /api/v1/courses/{course_id}/activity_stream with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/activity_stream".format(**path),
data=data,
params=params,
no_data=True,
)
def course_activity_stream_summary(self, course_id):
"""
Course activity stream summary.
Returns a summary of the current user's course-specific activity stream.
For full documentation, see the API documentation for the user activity
stream summary, in the user api.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
self.logger.debug(
"GET /api/v1/courses/{course_id}/activity_stream/summary with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/activity_stream/summary".format(**path),
data=data,
params=params,
no_data=True,
)
def course_todo_items(self, course_id):
"""
Course TODO items.
Returns the current user's course-specific todo items.
For full documentation, see the API documentation for the user todo items, in the user api.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
self.logger.debug(
"GET /api/v1/courses/{course_id}/todo with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/todo".format(**path),
data=data,
params=params,
no_data=True,
)
def delete_conclude_course(self, event, id):
"""
Delete/Conclude a course.
Delete or conclude an existing course
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
# REQUIRED - event
"""
The action to take on the course.
"""
self._validate_enum(event, ["delete", "conclude"])
params["event"] = event
self.logger.debug(
"DELETE /api/v1/courses/{id} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"DELETE",
"/api/v1/courses/{id}".format(**path),
data=data,
params=params,
no_data=True,
)
def get_course_settings(self, course_id):
"""
Get course settings.
Returns some of a course's settings.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
self.logger.debug(
"GET /api/v1/courses/{course_id}/settings with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/settings".format(**path),
data=data,
params=params,
no_data=True,
)
def update_course_settings(
self,
course_id,
allow_student_discussion_editing=None,
allow_student_discussion_topics=None,
allow_student_forum_attachments=None,
allow_student_organized_groups=None,
filter_speed_grader_by_student_group=None,
hide_distribution_graphs=None,
hide_final_grades=None,
hide_sections_on_course_users_page=None,
home_page_announcement_limit=None,
lock_all_announcements=None,
restrict_student_future_view=None,
restrict_student_past_view=None,
show_announcements_on_home_page=None,
syllabus_course_summary=None,
usage_rights_required=None,
):
"""
Update course settings.
Can update the following course settings:
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# OPTIONAL - allow_student_discussion_topics
"""
Let students create discussion topics
"""
if allow_student_discussion_topics is not None:
data["allow_student_discussion_topics"] = allow_student_discussion_topics
# OPTIONAL - allow_student_forum_attachments
"""
Let students attach files to discussions
"""
if allow_student_forum_attachments is not None:
data["allow_student_forum_attachments"] = allow_student_forum_attachments
# OPTIONAL - allow_student_discussion_editing
"""
Let students edit or delete their own discussion posts
"""
if allow_student_discussion_editing is not None:
data["allow_student_discussion_editing"] = allow_student_discussion_editing
# OPTIONAL - allow_student_organized_groups
"""
Let students organize their own groups
"""
if allow_student_organized_groups is not None:
data["allow_student_organized_groups"] = allow_student_organized_groups
# OPTIONAL - filter_speed_grader_by_student_group
"""
Filter SpeedGrader to only the selected student group
"""
if filter_speed_grader_by_student_group is not None:
data[
"filter_speed_grader_by_student_group"
] = filter_speed_grader_by_student_group
# OPTIONAL - hide_final_grades
"""
Hide totals in student grades summary
"""
if hide_final_grades is not None:
data["hide_final_grades"] = hide_final_grades
# OPTIONAL - hide_distribution_graphs
"""
Hide grade distribution graphs from students
"""
if hide_distribution_graphs is not None:
data["hide_distribution_graphs"] = hide_distribution_graphs
# OPTIONAL - hide_sections_on_course_users_page
"""
Disallow students from viewing students in sections they do not belong to
"""
if hide_sections_on_course_users_page is not None:
data[
"hide_sections_on_course_users_page"
] = hide_sections_on_course_users_page
# OPTIONAL - lock_all_announcements
"""
Disable comments on announcements
"""
if lock_all_announcements is not None:
data["lock_all_announcements"] = lock_all_announcements
# OPTIONAL - usage_rights_required
"""
Copyright and license information must be provided for files before they are published.
"""
if usage_rights_required is not None:
data["usage_rights_required"] = usage_rights_required
# OPTIONAL - restrict_student_past_view
"""
Restrict students from viewing courses after end date
"""
if restrict_student_past_view is not None:
data["restrict_student_past_view"] = restrict_student_past_view
# OPTIONAL - restrict_student_future_view
"""
Restrict students from viewing courses before start date
"""
if restrict_student_future_view is not None:
data["restrict_student_future_view"] = restrict_student_future_view
# OPTIONAL - show_announcements_on_home_page
"""
Show the most recent announcements on the Course home page (if a Wiki, defaults to five announcements, configurable via home_page_announcement_limit).
Canvas for Elementary subjects ignore this setting.
"""
if show_announcements_on_home_page is not None:
data["show_announcements_on_home_page"] = show_announcements_on_home_page
# OPTIONAL - home_page_announcement_limit
"""
Limit the number of announcements on the home page if enabled via show_announcements_on_home_page
"""
if home_page_announcement_limit is not None:
data["home_page_announcement_limit"] = home_page_announcement_limit
# OPTIONAL - syllabus_course_summary
"""
Show the course summary (list of assignments and calendar events) on the syllabus page. Default is true.
"""
if syllabus_course_summary is not None:
data["syllabus_course_summary"] = syllabus_course_summary
self.logger.debug(
"PUT /api/v1/courses/{course_id}/settings with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"PUT",
"/api/v1/courses/{course_id}/settings".format(**path),
data=data,
params=params,
no_data=True,
)
def return_test_student_for_course(self, course_id):
"""
Return test student for course.
Returns information for a test student in this course. Creates a test
student if one does not already exist for the course. The caller must have
permission to access the course's student view.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
self.logger.debug(
"GET /api/v1/courses/{course_id}/student_view_student with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/student_view_student".format(**path),
data=data,
params=params,
single_item=True,
)
def get_single_course_courses(self, id, include=None, teacher_limit=None):
"""
Get a single course.
Return information on a single course.
Accepts the same include[] parameters as the list action plus:
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
# OPTIONAL - include
"""
- "all_courses": Also search recently deleted courses.
- "permissions": Include permissions the current user has
for the course.
- "observed_users": include observed users in the enrollments
- "course_image": Optional course image data for when there is a course image
and the course image feature flag has been enabled
- "concluded": Optional information to include with each Course. Indicates whether
the course has been concluded, taking course and term dates into account.
"""
if include is not None:
self._validate_enum(
include,
[
"needs_grading_count",
"syllabus_body",
"public_description",
"total_scores",
"current_grading_period_scores",
"term",
"account",
"course_progress",
"sections",
"storage_quota_used_mb",
"total_students",
"passback_status",
"favorites",
"teachers",
"observed_users",
"all_courses",
"permissions",
"observed_users",
"course_image",
"concluded",
],
)
params["include"] = include
# OPTIONAL - teacher_limit
"""
The maximum number of teacher enrollments to show.
If the course contains more teachers than this, instead of giving the teacher
enrollments, the count of teachers will be given under a _teacher_count_ key.
"""
if teacher_limit is not None:
params["teacher_limit"] = teacher_limit
self.logger.debug(
"GET /api/v1/courses/{id} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{id}".format(**path),
data=data,
params=params,
single_item=True,
)
def get_single_course_accounts(
self, account_id, id, include=None, teacher_limit=None
):
"""
Get a single course.
Return information on a single course.
Accepts the same include[] parameters as the list action plus:
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""
ID
"""
path["account_id"] = account_id
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
# OPTIONAL - include
"""
- "all_courses": Also search recently deleted courses.
- "permissions": Include permissions the current user has
for the course.
- "observed_users": include observed users in the enrollments
- "course_image": Optional course image data for when there is a course image
and the course image feature flag has been enabled
- "concluded": Optional information to include with each Course. Indicates whether
the course has been concluded, taking course and term dates into account.
"""
if include is not None:
self._validate_enum(
include,
[
"needs_grading_count",
"syllabus_body",
"public_description",
"total_scores",
"current_grading_period_scores",
"term",
"account",
"course_progress",
"sections",
"storage_quota_used_mb",
"total_students",
"passback_status",
"favorites",
"teachers",
"observed_users",
"all_courses",
"permissions",
"observed_users",
"course_image",
"concluded",
],
)
params["include"] = include
# OPTIONAL - teacher_limit
"""
The maximum number of teacher enrollments to show.
If the course contains more teachers than this, instead of giving the teacher
enrollments, the count of teachers will be given under a _teacher_count_ key.
"""
if teacher_limit is not None:
params["teacher_limit"] = teacher_limit
self.logger.debug(
"GET /api/v1/accounts/{account_id}/courses/{id} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/accounts/{account_id}/courses/{id}".format(**path),
data=data,
params=params,
single_item=True,
)
def update_course(
self,
id,
course_account_id=None,
course_allow_student_forum_attachments=None,
course_allow_student_wiki_edits=None,
course_allow_wiki_comments=None,
course_apply_assignment_group_weights=None,
course_blueprint=None,
course_blueprint_restrictions=None,
course_blueprint_restrictions_by_object_type=None,
course_course_code=None,
course_course_color=None,
course_course_format=None,
course_default_view=None,
course_enable_pace_plans=None,
course_end_at=None,
course_event=None,
course_friendly_name=None,
course_grade_passback_setting=None,
course_grading_standard_id=None,
course_hide_final_grades=None,
course_homeroom_course=None,
course_homeroom_course_id=None,
course_image_id=None,
course_image_url=None,
course_integration_id=None,
course_is_public=None,
course_is_public_to_auth_users=None,
course_license=None,
course_name=None,
course_open_enrollment=None,
course_public_description=None,
course_public_syllabus=None,
course_public_syllabus_to_auth=None,
course_remove_banner_image=None,
course_remove_image=None,
course_restrict_enrollments_to_course_dates=None,
course_self_enrollment=None,
course_sis_course_id=None,
course_start_at=None,
course_storage_quota_mb=None,
course_syllabus_body=None,
course_syllabus_course_summary=None,
course_sync_enrollments_from_homeroom=None,
course_template=None,
course_term_id=None,
course_time_zone=None,
course_use_blueprint_restrictions_by_object_type=None,
offer=None,
):
"""
Update a course.
Update an existing course.
Arguments are the same as Courses#create, with a few exceptions (enroll_me).
If a user has content management rights, but not full course editing rights, the only attribute
editable through this endpoint will be "syllabus_body"
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
# OPTIONAL - course[account_id]
"""
The unique ID of the account to move the course to.
"""
if course_account_id is not None:
data["course[account_id]"] = course_account_id
# OPTIONAL - course[name]
"""
The name of the course. If omitted, the course will be named "Unnamed
Course."
"""
if course_name is not None:
data["course[name]"] = course_name
# OPTIONAL - course[course_code]
"""
The course code for the course.
"""
if course_course_code is not None:
data["course[course_code]"] = course_course_code
# OPTIONAL - course[start_at]
"""
Course start date in ISO8601 format, e.g. 2011-01-01T01:00Z
This value is ignored unless 'restrict_enrollments_to_course_dates' is set to true,
or the course is already published.
"""
if course_start_at is not None:
if issubclass(course_start_at.__class__, str):
course_start_at = self._validate_iso8601_string(course_start_at)
elif issubclass(course_start_at.__class__, date) or issubclass(
course_start_at.__class__, datetime
):
course_start_at = course_start_at.strftime("%Y-%m-%dT%H:%M:%S+00:00")
data["course[start_at]"] = course_start_at
# OPTIONAL - course[end_at]
"""
Course end date in ISO8601 format. e.g. 2011-01-01T01:00Z
This value is ignored unless 'restrict_enrollments_to_course_dates' is set to true.
"""
if course_end_at is not None:
if issubclass(course_end_at.__class__, str):
course_end_at = self._validate_iso8601_string(course_end_at)
elif issubclass(course_end_at.__class__, date) or issubclass(
course_end_at.__class__, datetime
):
course_end_at = course_end_at.strftime("%Y-%m-%dT%H:%M:%S+00:00")
data["course[end_at]"] = course_end_at
# OPTIONAL - course[license]
"""
The name of the licensing. Should be one of the following abbreviations
(a descriptive name is included in parenthesis for reference):
- 'private' (Private Copyrighted)
- 'cc_by_nc_nd' (CC Attribution Non-Commercial No Derivatives)
- 'cc_by_nc_sa' (CC Attribution Non-Commercial Share Alike)
- 'cc_by_nc' (CC Attribution Non-Commercial)
- 'cc_by_nd' (CC Attribution No Derivatives)
- 'cc_by_sa' (CC Attribution Share Alike)
- 'cc_by' (CC Attribution)
- 'public_domain' (Public Domain).
"""
if course_license is not None:
data["course[license]"] = course_license
# OPTIONAL - course[is_public]
"""
Set to true if course is public to both authenticated and unauthenticated users.
"""
if course_is_public is not None:
data["course[is_public]"] = course_is_public
# OPTIONAL - course[is_public_to_auth_users]
"""
Set to true if course is public only to authenticated users.
"""
if course_is_public_to_auth_users is not None:
data["course[is_public_to_auth_users]"] = course_is_public_to_auth_users
# OPTIONAL - course[public_syllabus]
"""
Set to true to make the course syllabus public.
"""
if course_public_syllabus is not None:
data["course[public_syllabus]"] = course_public_syllabus
# OPTIONAL - course[public_syllabus_to_auth]
"""
Set to true to make the course syllabus to public for authenticated users.
"""
if course_public_syllabus_to_auth is not None:
data["course[public_syllabus_to_auth]"] = course_public_syllabus_to_auth
# OPTIONAL - course[public_description]
"""
A publicly visible description of the course.
"""
if course_public_description is not None:
data["course[public_description]"] = course_public_description
# OPTIONAL - course[allow_student_wiki_edits]
"""
If true, students will be able to modify the course wiki.
"""
if course_allow_student_wiki_edits is not None:
data["course[allow_student_wiki_edits]"] = course_allow_student_wiki_edits
# OPTIONAL - course[allow_wiki_comments]
"""
If true, course members will be able to comment on wiki pages.
"""
if course_allow_wiki_comments is not None:
data["course[allow_wiki_comments]"] = course_allow_wiki_comments
# OPTIONAL - course[allow_student_forum_attachments]
"""
If true, students can attach files to forum posts.
"""
if course_allow_student_forum_attachments is not None:
data[
"course[allow_student_forum_attachments]"
] = course_allow_student_forum_attachments
# OPTIONAL - course[open_enrollment]
"""
Set to true if the course is open enrollment.
"""
if course_open_enrollment is not None:
data["course[open_enrollment]"] = course_open_enrollment
# OPTIONAL - course[self_enrollment]
"""
Set to true if the course is self enrollment.
"""
if course_self_enrollment is not None:
data["course[self_enrollment]"] = course_self_enrollment
# OPTIONAL - course[restrict_enrollments_to_course_dates]
"""
Set to true to restrict user enrollments to the start and end dates of the
course. This parameter is required when using the API, as this option is
not displayed in the Course Settings page. Setting this value to false will
remove the course end date (if it exists), as well as the course start date
(if the course is unpublished).
"""
if course_restrict_enrollments_to_course_dates is not None:
data[
"course[restrict_enrollments_to_course_dates]"
] = course_restrict_enrollments_to_course_dates
# OPTIONAL - course[term_id]
"""
The unique ID of the term to create to course in.
"""
if course_term_id is not None:
data["course[term_id]"] = course_term_id
# OPTIONAL - course[sis_course_id]
"""
The unique SIS identifier.
"""
if course_sis_course_id is not None:
data["course[sis_course_id]"] = course_sis_course_id
# OPTIONAL - course[integration_id]
"""
The unique Integration identifier.
"""
if course_integration_id is not None:
data["course[integration_id]"] = course_integration_id
# OPTIONAL - course[hide_final_grades]
"""
If this option is set to true, the totals in student grades summary will
be hidden.
"""
if course_hide_final_grades is not None:
data["course[hide_final_grades]"] = course_hide_final_grades
# OPTIONAL - course[time_zone]
"""
The time zone for the course. Allowed time zones are
{http://www.iana.org/time-zones IANA time zones} or friendlier
{http://api.rubyonrails.org/classes/ActiveSupport/TimeZone.html Ruby on Rails time zones}.
"""
if course_time_zone is not None:
data["course[time_zone]"] = course_time_zone
# OPTIONAL - course[apply_assignment_group_weights]
"""
Set to true to weight final grade based on assignment groups percentages.
"""
if course_apply_assignment_group_weights is not None:
data[
"course[apply_assignment_group_weights]"
] = course_apply_assignment_group_weights
# OPTIONAL - course[storage_quota_mb]
"""
Set the storage quota for the course, in megabytes. The caller must have
the "Manage storage quotas" account permission.
"""
if course_storage_quota_mb is not None:
data["course[storage_quota_mb]"] = course_storage_quota_mb
# OPTIONAL - offer
"""
If this option is set to true, the course will be available to students
immediately.
"""
if offer is not None:
data["offer"] = offer
# OPTIONAL - course[event]
"""
The action to take on each course.
* 'claim' makes a course no longer visible to students. This action is also called "unpublish" on the web site.
A course cannot be unpublished if students have received graded submissions.
* 'offer' makes a course visible to students. This action is also called "publish" on the web site.
* 'conclude' prevents future enrollments and makes a course read-only for all participants. The course still appears
in prior-enrollment lists.
* 'delete' completely removes the course from the web site (including course menus and prior-enrollment lists).
All enrollments are deleted. Course content may be physically deleted at a future date.
* 'undelete' attempts to recover a course that has been deleted. This action requires account administrative rights.
(Recovery is not guaranteed; please conclude rather than delete a course if there is any possibility the course
will be used again.) The recovered course will be unpublished. Deleted enrollments will not be recovered.
"""
if course_event is not None:
self._validate_enum(
course_event, ["claim", "offer", "conclude", "delete", "undelete"]
)
data["course[event]"] = course_event
# OPTIONAL - course[default_view]
"""
The type of page that users will see when they first visit the course
* 'feed' Recent Activity Dashboard
* 'wiki' Wiki Front Page
* 'modules' Course Modules/Sections Page
* 'assignments' Course Assignments List
* 'syllabus' Course Syllabus Page
other types may be added in the future
"""
if course_default_view is not None:
self._validate_enum(
course_default_view,
["feed", "wiki", "modules", "syllabus", "assignments"],
)
data["course[default_view]"] = course_default_view
# OPTIONAL - course[syllabus_body]
"""
The syllabus body for the course
"""
if course_syllabus_body is not None:
data["course[syllabus_body]"] = course_syllabus_body
# OPTIONAL - course[syllabus_course_summary]
"""
Optional. Indicates whether the Course Summary (consisting of the course's assignments and calendar events) is displayed on the syllabus page. Defaults to +true+.
"""
if course_syllabus_course_summary is not None:
data["course[syllabus_course_summary]"] = course_syllabus_course_summary
# OPTIONAL - course[grading_standard_id]
"""
The grading standard id to set for the course. If no value is provided for this argument the current grading_standard will be un-set from this course.
"""
if course_grading_standard_id is not None:
data["course[grading_standard_id]"] = course_grading_standard_id
# OPTIONAL - course[grade_passback_setting]
"""
Optional. The grade_passback_setting for the course. Only 'nightly_sync' and '' are allowed
"""
if course_grade_passback_setting is not None:
data["course[grade_passback_setting]"] = course_grade_passback_setting
# OPTIONAL - course[course_format]
"""
Optional. Specifies the format of the course. (Should be either 'on_campus' or 'online')
"""
if course_course_format is not None:
data["course[course_format]"] = course_course_format
# OPTIONAL - course[image_id]
"""
This is a file ID corresponding to an image file in the course that will
be used as the course image.
This will clear the course's image_url setting if set. If you attempt
to provide image_url and image_id in a request it will fail.
"""
if course_image_id is not None:
data["course[image_id]"] = course_image_id
# OPTIONAL - course[image_url]
"""
This is a URL to an image to be used as the course image.
This will clear the course's image_id setting if set. If you attempt
to provide image_url and image_id in a request it will fail.
"""
if course_image_url is not None:
data["course[image_url]"] = course_image_url
# OPTIONAL - course[remove_image]
"""
If this option is set to true, the course image url and course image
ID are both set to nil
"""
if course_remove_image is not None:
data["course[remove_image]"] = course_remove_image
# OPTIONAL - course[remove_banner_image]
"""
If this option is set to true, the course banner image url and course
banner image ID are both set to nil
"""
if course_remove_banner_image is not None:
data["course[remove_banner_image]"] = course_remove_banner_image
# OPTIONAL - course[blueprint]
"""
Sets the course as a blueprint course.
"""
if course_blueprint is not None:
data["course[blueprint]"] = course_blueprint
# OPTIONAL - course[blueprint_restrictions]
"""
Sets a default set to apply to blueprint course objects when restricted,
unless _use_blueprint_restrictions_by_object_type_ is enabled.
See the {api:Blueprint_Courses:BlueprintRestriction Blueprint Restriction} documentation
"""
if course_blueprint_restrictions is not None:
data["course[blueprint_restrictions]"] = course_blueprint_restrictions
# OPTIONAL - course[use_blueprint_restrictions_by_object_type]
"""
When enabled, the _blueprint_restrictions_ parameter will be ignored in favor of
the _blueprint_restrictions_by_object_type_ parameter
"""
if course_use_blueprint_restrictions_by_object_type is not None:
data[
"course[use_blueprint_restrictions_by_object_type]"
] = course_use_blueprint_restrictions_by_object_type
# OPTIONAL - course[blueprint_restrictions_by_object_type]
"""
Allows setting multiple {api:Blueprint_Courses:BlueprintRestriction Blueprint Restriction}
to apply to blueprint course objects of the matching type when restricted.
The possible object types are "assignment", "attachment", "discussion_topic", "quiz" and "wiki_page".
Example usage:
course[blueprint_restrictions_by_object_type][assignment][content]=1
"""
if course_blueprint_restrictions_by_object_type is not None:
data[
"course[blueprint_restrictions_by_object_type]"
] = course_blueprint_restrictions_by_object_type
# OPTIONAL - course[homeroom_course]
"""
Sets the course as a homeroom course. The setting takes effect only when the course is associated
with a Canvas for Elementary-enabled account.
"""
if course_homeroom_course is not None:
data["course[homeroom_course]"] = course_homeroom_course
# OPTIONAL - course[sync_enrollments_from_homeroom]
"""
Syncs enrollments from the homeroom that is set in homeroom_course_id. The setting only takes effect when the
course is associated with a Canvas for Elementary-enabled account and sync_enrollments_from_homeroom is enabled.
"""
if course_sync_enrollments_from_homeroom is not None:
data[
"course[sync_enrollments_from_homeroom]"
] = course_sync_enrollments_from_homeroom
# OPTIONAL - course[homeroom_course_id]
"""
Sets the Homeroom Course id to be used with sync_enrollments_from_homeroom. The setting only takes effect when the
course is associated with a Canvas for Elementary-enabled account and sync_enrollments_from_homeroom is enabled.
"""
if course_homeroom_course_id is not None:
data["course[homeroom_course_id]"] = course_homeroom_course_id
# OPTIONAL - course[template]
"""
Enable or disable the course as a template that can be selected by an account
"""
if course_template is not None:
data["course[template]"] = course_template
# OPTIONAL - course[course_color]
"""
Sets a color in hex code format to be associated with the course. The setting takes effect only when the course
is associated with a Canvas for Elementary-enabled account.
"""
if course_course_color is not None:
data["course[course_color]"] = course_course_color
# OPTIONAL - course[friendly_name]
"""
Set a friendly name for the course. If this is provided and the course is associated with a Canvas for
Elementary account, it will be shown instead of the course name. This setting takes priority over
course nicknames defined by individual users.
"""
if course_friendly_name is not None:
data["course[friendly_name]"] = course_friendly_name
# OPTIONAL - course[enable_pace_plans]
"""
Enable or disable Pace Plans for the course. This setting only has an effect when the Pace Plans feature flag is
enabled for the sub-account. Otherwise, Pace Plans are always disabled.
Note: Pace Plans is in active development.
"""
if course_enable_pace_plans is not None:
data["course[enable_pace_plans]"] = course_enable_pace_plans
self.logger.debug(
"PUT /api/v1/courses/{id} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"PUT",
"/api/v1/courses/{id}".format(**path),
data=data,
params=params,
no_data=True,
)
def update_courses(self, account_id, course_ids, event):
"""
Update courses.
Update multiple courses in an account. Operates asynchronously; use the {api:ProgressController#show progress endpoint}
to query the status of an operation.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""
ID
"""
path["account_id"] = account_id
# REQUIRED - course_ids
"""
List of ids of courses to update. At most 500 courses may be updated in one call.
"""
data["course_ids"] = course_ids
# REQUIRED - event
"""
The action to take on each course. Must be one of 'offer', 'conclude', 'delete', or 'undelete'.
* 'offer' makes a course visible to students. This action is also called "publish" on the web site.
* 'conclude' prevents future enrollments and makes a course read-only for all participants. The course still appears
in prior-enrollment lists.
* 'delete' completely removes the course from the web site (including course menus and prior-enrollment lists).
All enrollments are deleted. Course content may be physically deleted at a future date.
* 'undelete' attempts to recover a course that has been deleted. (Recovery is not guaranteed; please conclude
rather than delete a course if there is any possibility the course will be used again.) The recovered course
will be unpublished. Deleted enrollments will not be recovered.
"""
self._validate_enum(event, ["offer", "conclude", "delete", "undelete"])
data["event"] = event
self.logger.debug(
"PUT /api/v1/accounts/{account_id}/courses with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"PUT",
"/api/v1/accounts/{account_id}/courses".format(**path),
data=data,
params=params,
single_item=True,
)
def reset_course(self, course_id):
"""
Reset a course.
Deletes the current course, and creates a new equivalent course with
no content, but all sections and users moved over.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
self.logger.debug(
"POST /api/v1/courses/{course_id}/reset_content with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"POST",
"/api/v1/courses/{course_id}/reset_content".format(**path),
data=data,
params=params,
single_item=True,
)
def get_effective_due_dates(self, course_id, assignment_ids=None):
"""
Get effective due dates.
For each assignment in the course, returns each assigned student's ID
and their corresponding due date along with some grading period data.
Returns a collection with keys representing assignment IDs and values as a
collection containing keys representing student IDs and values representing
the student's effective due_at, the grading_period_id of which the due_at falls
in, and whether or not the grading period is closed (in_closed_grading_period)
The list of assignment IDs for which effective student due dates are
requested. If not provided, all assignments in the course will be used.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# OPTIONAL - assignment_ids
"""
no description
"""
if assignment_ids is not None:
params["assignment_ids"] = assignment_ids
self.logger.debug(
"GET /api/v1/courses/{course_id}/effective_due_dates with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/effective_due_dates".format(**path),
data=data,
params=params,
no_data=True,
)
def permissions(self, course_id, permissions=None):
"""
Permissions.
Returns permission information for the calling user in the given course.
See also the {api:AccountsController#permissions Account} and
{api:GroupsController#permissions Group} counterparts.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# OPTIONAL - permissions
"""
List of permissions to check against the authenticated user.
Permission names are documented in the {api:RoleOverridesController#add_role Create a role} endpoint.
"""
if permissions is not None:
params["permissions"] = permissions
self.logger.debug(
"GET /api/v1/courses/{course_id}/permissions with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/permissions".format(**path),
data=data,
params=params,
no_data=True,
)
def get_bulk_user_progress(self, course_id):
"""
Get bulk user progress.
Returns progress information for all users enrolled in the given course.
You must be a user who has permission to view all grades in the course (such as a teacher or administrator).
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
self.logger.debug(
"GET /api/v1/courses/{course_id}/bulk_user_progress with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/bulk_user_progress".format(**path),
data=data,
params=params,
no_data=True,
)
def get_course_copy_status(self, course_id, id):
"""
Get course copy status.
DEPRECATED: Please use the {api:ContentMigrationsController#create Content Migrations API}
Retrieve the status of a course copy
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
self.logger.debug(
"GET /api/v1/courses/{course_id}/course_copy/{id} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/course_copy/{id}".format(**path),
data=data,
params=params,
no_data=True,
)
# def copy_course_content(self, course_id, except=None, only=None, source_course=None):
# """
# Copy course content.
# DEPRECATED: Please use the {api:ContentMigrationsController#create Content Migrations API}
# Copies content from one course into another. The default is to copy all course
# content. You can control specific types to copy by using either the 'except' option
# or the 'only' option.
# The response is the same as the course copy status endpoint
# """
# path = {}
# data = {}
# params = {}
# # REQUIRED - PATH - course_id
# """
# ID
# """
# path["course_id"] = course_id
# # OPTIONAL - source_course
# """
# ID or SIS-ID of the course to copy the content from
# """
# if source_course is not None:
# data["source_course"] = source_course
# # OPTIONAL - except
# """
# A list of the course content types to exclude, all areas not listed will
# be copied.
# """
# if except is not None:
# self._validate_enum(except, ["course_settings", "assignments", "external_tools", "files", "topics", "calendar_events", "quizzes", "wiki_pages", "modules", "outcomes"])
# data["except"] = except
# # OPTIONAL - only
# """
# A list of the course content types to copy, all areas not listed will not
# be copied.
# """
# if only is not None:
# self._validate_enum(only, ["course_settings", "assignments", "external_tools", "files", "topics", "calendar_events", "quizzes", "wiki_pages", "modules", "outcomes"])
# data["only"] = only
# self.logger.debug("POST /api/v1/courses/{course_id}/course_copy with query params: {params} and form data: {data}".format(params=params, data=data, **path))
# return self.generic_request("POST", "/api/v1/courses/{course_id}/course_copy".format(**path), data=data, params=params, no_data=True)
class Term(BaseModel):
"""Term Model."""
def __init__(self, id=None, name=None, start_at=None, end_at=None):
"""Init method for Term class."""
self._id = id
self._name = name
self._start_at = start_at
self._end_at = end_at
self.logger = logging.getLogger("py3canvas.Term")
@property
def id(self):
"""id."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn(
"Setting values on id will NOT update the remote Canvas instance."
)
self._id = value
@property
def name(self):
"""name."""
return self._name
@name.setter
def name(self, value):
"""Setter for name property."""
self.logger.warn(
"Setting values on name will NOT update the remote Canvas instance."
)
self._name = value
@property
def start_at(self):
"""start_at."""
return self._start_at
@start_at.setter
def start_at(self, value):
"""Setter for start_at property."""
self.logger.warn(
"Setting values on start_at will NOT update the remote Canvas instance."
)
self._start_at = value
@property
def end_at(self):
"""end_at."""
return self._end_at
@end_at.setter
def end_at(self, value):
"""Setter for end_at property."""
self.logger.warn(
"Setting values on end_at will NOT update the remote Canvas instance."
)
self._end_at = value
class Courseprogress(BaseModel):
"""Courseprogress Model."""
def __init__(
self,
requirement_count=None,
requirement_completed_count=None,
next_requirement_url=None,
completed_at=None,
):
"""Init method for Courseprogress class."""
self._requirement_count = requirement_count
self._requirement_completed_count = requirement_completed_count
self._next_requirement_url = next_requirement_url
self._completed_at = completed_at
self.logger = logging.getLogger("py3canvas.Courseprogress")
@property
def requirement_count(self):
"""total number of requirements from all modules."""
return self._requirement_count
@requirement_count.setter
def requirement_count(self, value):
"""Setter for requirement_count property."""
self.logger.warn(
"Setting values on requirement_count will NOT update the remote Canvas instance."
)
self._requirement_count = value
@property
def requirement_completed_count(self):
"""total number of requirements the user has completed from all modules."""
return self._requirement_completed_count
@requirement_completed_count.setter
def requirement_completed_count(self, value):
"""Setter for requirement_completed_count property."""
self.logger.warn(
"Setting values on requirement_completed_count will NOT update the remote Canvas instance."
)
self._requirement_completed_count = value
@property
def next_requirement_url(self):
"""url to next module item that has an unmet requirement. null if the user has completed the course or the current module does not require sequential progress."""
return self._next_requirement_url
@next_requirement_url.setter
def next_requirement_url(self, value):
"""Setter for next_requirement_url property."""
self.logger.warn(
"Setting values on next_requirement_url will NOT update the remote Canvas instance."
)
self._next_requirement_url = value
@property
def completed_at(self):
"""date the course was completed. null if the course has not been completed by this user."""
return self._completed_at
@completed_at.setter
def completed_at(self, value):
"""Setter for completed_at property."""
self.logger.warn(
"Setting values on completed_at will NOT update the remote Canvas instance."
)
self._completed_at = value
class Course(BaseModel):
"""Course Model."""
def __init__(
self,
id=None,
sis_course_id=None,
uuid=None,
integration_id=None,
sis_import_id=None,
name=None,
course_code=None,
workflow_state=None,
account_id=None,
root_account_id=None,
enrollment_term_id=None,
grading_periods=None,
grading_standard_id=None,
grade_passback_setting=None,
created_at=None,
start_at=None,
end_at=None,
locale=None,
enrollments=None,
total_students=None,
calendar=None,
default_view=None,
syllabus_body=None,
needs_grading_count=None,
term=None,
course_progress=None,
apply_assignment_group_weights=None,
permissions=None,
is_public=None,
is_public_to_auth_users=None,
public_syllabus=None,
public_syllabus_to_auth=None,
public_description=None,
storage_quota_mb=None,
storage_quota_used_mb=None,
hide_final_grades=None,
license=None,
allow_student_assignment_edits=None,
allow_wiki_comments=None,
allow_student_forum_attachments=None,
open_enrollment=None,
self_enrollment=None,
restrict_enrollments_to_course_dates=None,
course_format=None,
access_restricted_by_date=None,
time_zone=None,
blueprint=None,
blueprint_restrictions=None,
blueprint_restrictions_by_object_type=None,
template=None,
):
"""Init method for Course class."""
self._id = id
self._sis_course_id = sis_course_id
self._uuid = uuid
self._integration_id = integration_id
self._sis_import_id = sis_import_id
self._name = name
self._course_code = course_code
self._workflow_state = workflow_state
self._account_id = account_id
self._root_account_id = root_account_id
self._enrollment_term_id = enrollment_term_id
self._grading_periods = grading_periods
self._grading_standard_id = grading_standard_id
self._grade_passback_setting = grade_passback_setting
self._created_at = created_at
self._start_at = start_at
self._end_at = end_at
self._locale = locale
self._enrollments = enrollments
self._total_students = total_students
self._calendar = calendar
self._default_view = default_view
self._syllabus_body = syllabus_body
self._needs_grading_count = needs_grading_count
self._term = term
self._course_progress = course_progress
self._apply_assignment_group_weights = apply_assignment_group_weights
self._permissions = permissions
self._is_public = is_public
self._is_public_to_auth_users = is_public_to_auth_users
self._public_syllabus = public_syllabus
self._public_syllabus_to_auth = public_syllabus_to_auth
self._public_description = public_description
self._storage_quota_mb = storage_quota_mb
self._storage_quota_used_mb = storage_quota_used_mb
self._hide_final_grades = hide_final_grades
self._license = license
self._allow_student_assignment_edits = allow_student_assignment_edits
self._allow_wiki_comments = allow_wiki_comments
self._allow_student_forum_attachments = allow_student_forum_attachments
self._open_enrollment = open_enrollment
self._self_enrollment = self_enrollment
self._restrict_enrollments_to_course_dates = (
restrict_enrollments_to_course_dates
)
self._course_format = course_format
self._access_restricted_by_date = access_restricted_by_date
self._time_zone = time_zone
self._blueprint = blueprint
self._blueprint_restrictions = blueprint_restrictions
self._blueprint_restrictions_by_object_type = (
blueprint_restrictions_by_object_type
)
self._template = template
self.logger = logging.getLogger("py3canvas.Course")
@property
def id(self):
"""the unique identifier for the course."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn(
"Setting values on id will NOT update the remote Canvas instance."
)
self._id = value
@property
def sis_course_id(self):
"""the SIS identifier for the course, if defined. This field is only included if the user has permission to view SIS information."""
return self._sis_course_id
@sis_course_id.setter
def sis_course_id(self, value):
"""Setter for sis_course_id property."""
self.logger.warn(
"Setting values on sis_course_id will NOT update the remote Canvas instance."
)
self._sis_course_id = value
@property
def uuid(self):
"""the UUID of the course."""
return self._uuid
@uuid.setter
def uuid(self, value):
"""Setter for uuid property."""
self.logger.warn(
"Setting values on uuid will NOT update the remote Canvas instance."
)
self._uuid = value
@property
def integration_id(self):
"""the integration identifier for the course, if defined. This field is only included if the user has permission to view SIS information."""
return self._integration_id
@integration_id.setter
def integration_id(self, value):
"""Setter for integration_id property."""
self.logger.warn(
"Setting values on integration_id will NOT update the remote Canvas instance."
)
self._integration_id = value
@property
def sis_import_id(self):
"""the unique identifier for the SIS import. This field is only included if the user has permission to manage SIS information."""
return self._sis_import_id
@sis_import_id.setter
def sis_import_id(self, value):
"""Setter for sis_import_id property."""
self.logger.warn(
"Setting values on sis_import_id will NOT update the remote Canvas instance."
)
self._sis_import_id = value
@property
def name(self):
"""the full name of the course."""
return self._name
@name.setter
def name(self, value):
"""Setter for name property."""
self.logger.warn(
"Setting values on name will NOT update the remote Canvas instance."
)
self._name = value
@property
def course_code(self):
"""the course code."""
return self._course_code
@course_code.setter
def course_code(self, value):
"""Setter for course_code property."""
self.logger.warn(
"Setting values on course_code will NOT update the remote Canvas instance."
)
self._course_code = value
@property
def workflow_state(self):
"""the current state of the course one of 'unpublished', 'available', 'completed', or 'deleted'."""
return self._workflow_state
@workflow_state.setter
def workflow_state(self, value):
"""Setter for workflow_state property."""
self.logger.warn(
"Setting values on workflow_state will NOT update the remote Canvas instance."
)
self._workflow_state = value
@property
def account_id(self):
"""the account associated with the course."""
return self._account_id
@account_id.setter
def account_id(self, value):
"""Setter for account_id property."""
self.logger.warn(
"Setting values on account_id will NOT update the remote Canvas instance."
)
self._account_id = value
@property
def root_account_id(self):
"""the root account associated with the course."""
return self._root_account_id
@root_account_id.setter
def root_account_id(self, value):
"""Setter for root_account_id property."""
self.logger.warn(
"Setting values on root_account_id will NOT update the remote Canvas instance."
)
self._root_account_id = value
@property
def enrollment_term_id(self):
"""the enrollment term associated with the course."""
return self._enrollment_term_id
@enrollment_term_id.setter
def enrollment_term_id(self, value):
"""Setter for enrollment_term_id property."""
self.logger.warn(
"Setting values on enrollment_term_id will NOT update the remote Canvas instance."
)
self._enrollment_term_id = value
@property
def grading_periods(self):
"""A list of grading periods associated with the course."""
return self._grading_periods
@grading_periods.setter
def grading_periods(self, value):
"""Setter for grading_periods property."""
self.logger.warn(
"Setting values on grading_periods will NOT update the remote Canvas instance."
)
self._grading_periods = value
@property
def grading_standard_id(self):
"""the grading standard associated with the course."""
return self._grading_standard_id
@grading_standard_id.setter
def grading_standard_id(self, value):
"""Setter for grading_standard_id property."""
self.logger.warn(
"Setting values on grading_standard_id will NOT update the remote Canvas instance."
)
self._grading_standard_id = value
@property
def grade_passback_setting(self):
"""the grade_passback_setting set on the course."""
return self._grade_passback_setting
@grade_passback_setting.setter
def grade_passback_setting(self, value):
"""Setter for grade_passback_setting property."""
self.logger.warn(
"Setting values on grade_passback_setting will NOT update the remote Canvas instance."
)
self._grade_passback_setting = value
@property
def created_at(self):
"""the date the course was created."""
return self._created_at
@created_at.setter
def created_at(self, value):
"""Setter for created_at property."""
self.logger.warn(
"Setting values on created_at will NOT update the remote Canvas instance."
)
self._created_at = value
@property
def start_at(self):
"""the start date for the course, if applicable."""
return self._start_at
@start_at.setter
def start_at(self, value):
"""Setter for start_at property."""
self.logger.warn(
"Setting values on start_at will NOT update the remote Canvas instance."
)
self._start_at = value
@property
def end_at(self):
"""the end date for the course, if applicable."""
return self._end_at
@end_at.setter
def end_at(self, value):
"""Setter for end_at property."""
self.logger.warn(
"Setting values on end_at will NOT update the remote Canvas instance."
)
self._end_at = value
@property
def locale(self):
"""the course-set locale, if applicable."""
return self._locale
@locale.setter
def locale(self, value):
"""Setter for locale property."""
self.logger.warn(
"Setting values on locale will NOT update the remote Canvas instance."
)
self._locale = value
@property
def enrollments(self):
"""A list of enrollments linking the current user to the course. for student enrollments, grading information may be included if include[]=total_scores."""
return self._enrollments
@enrollments.setter
def enrollments(self, value):
"""Setter for enrollments property."""
self.logger.warn(
"Setting values on enrollments will NOT update the remote Canvas instance."
)
self._enrollments = value
@property
def total_students(self):
"""optional: the total number of active and invited students in the course."""
return self._total_students
@total_students.setter
def total_students(self, value):
"""Setter for total_students property."""
self.logger.warn(
"Setting values on total_students will NOT update the remote Canvas instance."
)
self._total_students = value
@property
def calendar(self):
"""course calendar."""
return self._calendar
@calendar.setter
def calendar(self, value):
"""Setter for calendar property."""
self.logger.warn(
"Setting values on calendar will NOT update the remote Canvas instance."
)
self._calendar = value
@property
def default_view(self):
"""the type of page that users will see when they first visit the course - 'feed': Recent Activity Dashboard - 'wiki': Wiki Front Page - 'modules': Course Modules/Sections Page - 'assignments': Course Assignments List - 'syllabus': Course Syllabus Page other types may be added in the future."""
return self._default_view
@default_view.setter
def default_view(self, value):
"""Setter for default_view property."""
self.logger.warn(
"Setting values on default_view will NOT update the remote Canvas instance."
)
self._default_view = value
@property
def syllabus_body(self):
"""optional: user-generated HTML for the course syllabus."""
return self._syllabus_body
@syllabus_body.setter
def syllabus_body(self, value):
"""Setter for syllabus_body property."""
self.logger.warn(
"Setting values on syllabus_body will NOT update the remote Canvas instance."
)
self._syllabus_body = value
@property
def needs_grading_count(self):
"""optional: the number of submissions needing grading returned only if the current user has grading rights and include[]=needs_grading_count."""
return self._needs_grading_count
@needs_grading_count.setter
def needs_grading_count(self, value):
"""Setter for needs_grading_count property."""
self.logger.warn(
"Setting values on needs_grading_count will NOT update the remote Canvas instance."
)
self._needs_grading_count = value
@property
def term(self):
"""optional: the enrollment term object for the course returned only if include[]=term."""
return self._term
@term.setter
def term(self, value):
"""Setter for term property."""
self.logger.warn(
"Setting values on term will NOT update the remote Canvas instance."
)
self._term = value
@property
def course_progress(self):
"""optional: information on progress through the course returned only if include[]=course_progress."""
return self._course_progress
@course_progress.setter
def course_progress(self, value):
"""Setter for course_progress property."""
self.logger.warn(
"Setting values on course_progress will NOT update the remote Canvas instance."
)
self._course_progress = value
@property
def apply_assignment_group_weights(self):
"""weight final grade based on assignment group percentages."""
return self._apply_assignment_group_weights
@apply_assignment_group_weights.setter
def apply_assignment_group_weights(self, value):
"""Setter for apply_assignment_group_weights property."""
self.logger.warn(
"Setting values on apply_assignment_group_weights will NOT update the remote Canvas instance."
)
self._apply_assignment_group_weights = value
@property
def permissions(self):
"""optional: the permissions the user has for the course. returned only for a single course and include[]=permissions."""
return self._permissions
@permissions.setter
def permissions(self, value):
"""Setter for permissions property."""
self.logger.warn(
"Setting values on permissions will NOT update the remote Canvas instance."
)
self._permissions = value
@property
def is_public(self):
"""is_public."""
return self._is_public
@is_public.setter
def is_public(self, value):
"""Setter for is_public property."""
self.logger.warn(
"Setting values on is_public will NOT update the remote Canvas instance."
)
self._is_public = value
@property
def is_public_to_auth_users(self):
"""is_public_to_auth_users."""
return self._is_public_to_auth_users
@is_public_to_auth_users.setter
def is_public_to_auth_users(self, value):
"""Setter for is_public_to_auth_users property."""
self.logger.warn(
"Setting values on is_public_to_auth_users will NOT update the remote Canvas instance."
)
self._is_public_to_auth_users = value
@property
def public_syllabus(self):
"""public_syllabus."""
return self._public_syllabus
@public_syllabus.setter
def public_syllabus(self, value):
"""Setter for public_syllabus property."""
self.logger.warn(
"Setting values on public_syllabus will NOT update the remote Canvas instance."
)
self._public_syllabus = value
@property
def public_syllabus_to_auth(self):
"""public_syllabus_to_auth."""
return self._public_syllabus_to_auth
@public_syllabus_to_auth.setter
def public_syllabus_to_auth(self, value):
"""Setter for public_syllabus_to_auth property."""
self.logger.warn(
"Setting values on public_syllabus_to_auth will NOT update the remote Canvas instance."
)
self._public_syllabus_to_auth = value
@property
def public_description(self):
"""optional: the public description of the course."""
return self._public_description
@public_description.setter
def public_description(self, value):
"""Setter for public_description property."""
self.logger.warn(
"Setting values on public_description will NOT update the remote Canvas instance."
)
self._public_description = value
@property
def storage_quota_mb(self):
"""storage_quota_mb."""
return self._storage_quota_mb
@storage_quota_mb.setter
def storage_quota_mb(self, value):
"""Setter for storage_quota_mb property."""
self.logger.warn(
"Setting values on storage_quota_mb will NOT update the remote Canvas instance."
)
self._storage_quota_mb = value
@property
def storage_quota_used_mb(self):
"""storage_quota_used_mb."""
return self._storage_quota_used_mb
@storage_quota_used_mb.setter
def storage_quota_used_mb(self, value):
"""Setter for storage_quota_used_mb property."""
self.logger.warn(
"Setting values on storage_quota_used_mb will NOT update the remote Canvas instance."
)
self._storage_quota_used_mb = value
@property
def hide_final_grades(self):
"""hide_final_grades."""
return self._hide_final_grades
@hide_final_grades.setter
def hide_final_grades(self, value):
"""Setter for hide_final_grades property."""
self.logger.warn(
"Setting values on hide_final_grades will NOT update the remote Canvas instance."
)
self._hide_final_grades = value
@property
def license(self):
"""license."""
return self._license
@license.setter
def license(self, value):
"""Setter for license property."""
self.logger.warn(
"Setting values on license will NOT update the remote Canvas instance."
)
self._license = value
@property
def allow_student_assignment_edits(self):
"""allow_student_assignment_edits."""
return self._allow_student_assignment_edits
@allow_student_assignment_edits.setter
def allow_student_assignment_edits(self, value):
"""Setter for allow_student_assignment_edits property."""
self.logger.warn(
"Setting values on allow_student_assignment_edits will NOT update the remote Canvas instance."
)
self._allow_student_assignment_edits = value
@property
def allow_wiki_comments(self):
"""allow_wiki_comments."""
return self._allow_wiki_comments
@allow_wiki_comments.setter
def allow_wiki_comments(self, value):
"""Setter for allow_wiki_comments property."""
self.logger.warn(
"Setting values on allow_wiki_comments will NOT update the remote Canvas instance."
)
self._allow_wiki_comments = value
@property
def allow_student_forum_attachments(self):
"""allow_student_forum_attachments."""
return self._allow_student_forum_attachments
@allow_student_forum_attachments.setter
def allow_student_forum_attachments(self, value):
"""Setter for allow_student_forum_attachments property."""
self.logger.warn(
"Setting values on allow_student_forum_attachments will NOT update the remote Canvas instance."
)
self._allow_student_forum_attachments = value
@property
def open_enrollment(self):
"""open_enrollment."""
return self._open_enrollment
@open_enrollment.setter
def open_enrollment(self, value):
"""Setter for open_enrollment property."""
self.logger.warn(
"Setting values on open_enrollment will NOT update the remote Canvas instance."
)
self._open_enrollment = value
@property
def self_enrollment(self):
"""self_enrollment."""
return self._self_enrollment
@self_enrollment.setter
def self_enrollment(self, value):
"""Setter for self_enrollment property."""
self.logger.warn(
"Setting values on self_enrollment will NOT update the remote Canvas instance."
)
self._self_enrollment = value
@property
def restrict_enrollments_to_course_dates(self):
"""restrict_enrollments_to_course_dates."""
return self._restrict_enrollments_to_course_dates
@restrict_enrollments_to_course_dates.setter
def restrict_enrollments_to_course_dates(self, value):
"""Setter for restrict_enrollments_to_course_dates property."""
self.logger.warn(
"Setting values on restrict_enrollments_to_course_dates will NOT update the remote Canvas instance."
)
self._restrict_enrollments_to_course_dates = value
@property
def course_format(self):
"""course_format."""
return self._course_format
@course_format.setter
def course_format(self, value):
"""Setter for course_format property."""
self.logger.warn(
"Setting values on course_format will NOT update the remote Canvas instance."
)
self._course_format = value
@property
def access_restricted_by_date(self):
"""optional: this will be true if this user is currently prevented from viewing the course because of date restriction settings."""
return self._access_restricted_by_date
@access_restricted_by_date.setter
def access_restricted_by_date(self, value):
"""Setter for access_restricted_by_date property."""
self.logger.warn(
"Setting values on access_restricted_by_date will NOT update the remote Canvas instance."
)
self._access_restricted_by_date = value
@property
def time_zone(self):
"""The course's IANA time zone name."""
return self._time_zone
@time_zone.setter
def time_zone(self, value):
"""Setter for time_zone property."""
self.logger.warn(
"Setting values on time_zone will NOT update the remote Canvas instance."
)
self._time_zone = value
@property
def blueprint(self):
"""optional: whether the course is set as a Blueprint Course (blueprint fields require the Blueprint Courses feature)."""
return self._blueprint
@blueprint.setter
def blueprint(self, value):
"""Setter for blueprint property."""
self.logger.warn(
"Setting values on blueprint will NOT update the remote Canvas instance."
)
self._blueprint = value
@property
def blueprint_restrictions(self):
"""optional: Set of restrictions applied to all locked course objects."""
return self._blueprint_restrictions
@blueprint_restrictions.setter
def blueprint_restrictions(self, value):
"""Setter for blueprint_restrictions property."""
self.logger.warn(
"Setting values on blueprint_restrictions will NOT update the remote Canvas instance."
)
self._blueprint_restrictions = value
@property
def blueprint_restrictions_by_object_type(self):
"""optional: Sets of restrictions differentiated by object type applied to locked course objects."""
return self._blueprint_restrictions_by_object_type
@blueprint_restrictions_by_object_type.setter
def blueprint_restrictions_by_object_type(self, value):
"""Setter for blueprint_restrictions_by_object_type property."""
self.logger.warn(
"Setting values on blueprint_restrictions_by_object_type will NOT update the remote Canvas instance."
)
self._blueprint_restrictions_by_object_type = value
@property
def template(self):
"""optional: whether the course is set as a template (requires the Course Templates feature)."""
return self._template
@template.setter
def template(self, value):
"""Setter for template property."""
self.logger.warn(
"Setting values on template will NOT update the remote Canvas instance."
)
self._template = value
class Calendarlink(BaseModel):
"""Calendarlink Model."""
def __init__(self, ics=None):
"""Init method for Calendarlink class."""
self._ics = ics
self.logger = logging.getLogger("py3canvas.Calendarlink")
@property
def ics(self):
"""The URL of the calendar in ICS format."""
return self._ics
@ics.setter
def ics(self, value):
"""Setter for ics property."""
self.logger.warn(
"Setting values on ics will NOT update the remote Canvas instance."
)
self._ics = value
|
tylerclair/py3canvas
|
py3canvas/apis/courses.py
|
Python
|
mit
| 129,580
|
[
"VisIt"
] |
cb6f86e8512b50205433ebf237dbea622da188c1638c8ca5c1add8555bb68cb8
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to send conditional template to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cnos_conditional_template
short_description: Manage switch configuration using templates based on condition on devices running Lenovo CNOS
description:
- This module allows you to work with the running configuration of a switch. It provides a way to
execute a set of CNOS commands on a switch by evaluating the current running configuration and
executing the commands only if the specific settings have not been already configured.
The configuration source can be a set of commands or a template written in the Jinja2 templating language.
This module functions the same as the cnos_template module.
The only exception is that the following inventory variable can be specified
[“condition = <flag string>”]
When this inventory variable is specified as the variable of a task, the template is executed for
the network element that matches the flag string. Usually, templates are used when commands are the
same across a group of network devices. When there is a requirement to skip the execution of the
template on one or more devices, it is recommended to use this module.
This module uses SSH to manage network device configuration.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit our [User Guide]
(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_conditional_template.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
commandfile:
description:
- This specifies the path to the CNOS command file which needs to be applied. This usually
comes from the commands folder. Generally this file is the output of the variables applied
on a template file. So this command is preceded by a template module.
The command file must contain the Ansible keyword {{ inventory_hostname }} and the condition
flag in its filename to ensure that the command file is unique for each switch and condition.
If this is omitted, the command file will be overwritten during iteration. For example,
commandfile=./commands/clos_leaf_bgp_{{ inventory_hostname }}_LP21_commands.txt
required: true
default: Null
condition:
description:
- If you specify condition=<flag string> in the inventory file against any device, the template
execution is done for that device in case it matches the flag setting for that task.
required: true
default: Null
flag:
description:
- If a task needs to be executed, you have to set the flag the same as it is specified in
the inventory for that device.
required: true
default: Null
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_conditional_template. These are written in the main.yml file of the tasks directory.
---
- name: Applying CLI template on VLAG Tier1 Leaf Switch1
cnos_conditional_template:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
outputfile: "./results/vlag_1tier_leaf_switch1_{{ inventory_hostname }}_output.txt"
condition: "{{ hostvars[inventory_hostname]['condition']}}"
flag: "leaf_switch1"
commandfile: "./commands/vlag_1tier_leaf_switch1_{{ inventory_hostname }}_commands.txt"
enablePassword: "anil"
stp_mode1: "disable"
port_range1: "17,18,29,30"
portchannel_interface_number1: 1001
portchannel_mode1: active
slot_chassis_number1: 1/48
switchport_mode1: trunk
'''
RETURN = '''
return value: |
On successful execution, the method returns a message in JSON format
[Template Applied.]
Upon any failure, the method returns an error display string.
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
commandfile=dict(required=True),
outputfile=dict(required=True),
condition=dict(required=True),
flag=dict(required=True),
host=dict(required=True),
deviceType=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
condition = module.params['condition']
flag = module.params['flag']
commandfile = module.params['commandfile']
deviceType = module.params['deviceType']
outputfile = module.params['outputfile']
hostIP = module.params['host']
output = ""
# Here comes the logic against which a template is
# conditionally executed for right Network element.
if (condition != flag):
module.exit_json(changed=True, msg="Template Skipped for this value")
return " "
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Go to config mode
output = output + cnos.waitForDeviceResponse("configure d\n", "(config)#", 2, remote_conn)
# Send commands one by one
#with open(commandfile, "r") as f:
f = open(commandfile, "r")
for line in f:
# Omit the comment lines in template file
if not line.startswith("#"):
# cnos.debugOutput(line)
command = line
if not line.endswith("\n"):
command = command+"\n"
response = cnos.waitForDeviceResponse(command, "#", 2, remote_conn)
errorMsg = cnos.checkOutputForError(response)
output = output + response
if(errorMsg is not None):
break
# To cater to Mufti case
# Write to memory
output = output + cnos.waitForDeviceResponse("save\n", "#", 3, remote_conn)
# Write output to file
file = open(outputfile, "a")
file.write(output)
file.close()
# Logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Template Applied")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
|
adityacs/ansible
|
lib/ansible/modules/network/lenovo/cnos_conditional_template.py
|
Python
|
gpl-3.0
| 8,539
|
[
"VisIt"
] |
e2366a0b81b1055e751b56501cb268cf54a2598a5ef2dc62200225a41db7075a
|
#!/usr/bin/env python
# adds episodes from an external source, like a json file or url.
"""
fields:
title - Talk title
speakers - list of:
name - person's name.
email - email address (hide behind auth)
twitter_id - twitter @username
bio - info about the person
picture_url - head shot
summary - short description of talk, 1 or 2 lines.
description - description of the talk (paragraphs are fine, markdown great)
tags - list of serch terms, including sub topics briefly discussed in the talk.
room - room as described/labled by the venue
room_alias - room as described/labled on conference site
start - '%Y-%m-%dT%H:%M:%S' "2014-11-15T16:35:00",
end - (provide end or duration)
duration - int minutes (preferred)
priority - 0=no video, 5 = maybe video, 9=make sure this gets videod.
released - speakers have given permission to record and distribute.
license - CC license
conf_key - PK in source database - unique, used to update this item
conf_url - URL of talk page
language - Spoken language of the talk ("English")
"""
"""
NOTE: In general it is better to build the export as simple as posible,
even at the expense of deviating from the above fields. Exporting extra
fields is just fine. They will be ignored, or maybe I will use them in
a future version.
For fields yuou don't have, plug in a value. If you don't have 'released'
give me "Yes" and then let the presenters know.
End and Duration: give me what you have in your database
and derive the other one if it isn't too much trouble.
I'll use it to verify the transformations.
"""
"""
datetime and json:
There is a issue here because json doesn't define a date format. Do whatever makes the server side code smallest and easiest to code. Easy to read data is good too.
Here is PyCon 2010's impemtation:
datetime objects are represented as a time tuple of six elements:
(year, month, day, hour, min, sec)
"start": [2010, 2, 19, 9, 30, 0],
"duration": 30, # in min
http://us.pycon.org/2010/conference/schedule/json/
Easy to code, kinda hard to read.
I parse it with
start = datetime.datetime(*row['start'])
good.
This is also good:
json: Start: "2011-06-09 19:00:00"
parser: datetime.datetime.strptime( x, '%Y-%m-%d %H:%M:%S' )
good.
Easy to read, harder to parse/assemble into start duration.
http://2010.osdc.com.au/program/json
# Day: "Tue 23 Nov"
# Time: "09:00 - 17:00"
but if that is how it is stored on the server, don't try to transform it.
Again, keep the server side code simple.
I can fix my consumer easier than I can get someone else's website updated.
"""
def mk_fieldlist():
fields = []
for line in __doc__.split('\n'):
if '-' in line:
field,desc = line.split(' - ',1)
fields.append(field)
print("""printf '{}\\n'|xclip -selection clipboard""".format('\\t'.join(fields)))
# FireFox plugin to view .json data:
# https://addons.mozilla.org/en-US/firefox/addon/10869/
import datetime
import csv
import requests
import html.parser
import os
import urllib.parse
from dateutil.parser import parse
# import dateutil
import pprint
from django.utils.html import strip_tags
from django.template.defaultfilters import slugify
import operator
import xml.etree.ElementTree
import json
# import gdata.calendar.client
# import gdata.calendar.service
# for google calandar:
import pw
# import lxml.etree
import process
from main.models import Client, Show, Location, Episode, Raw_File
def goog(show,url):
# read from goog spreadsheet api
loc,created = Location.objects.get_or_create(
sequence = 1,
name='Illinois Room A', slug='room_a' )
if created: show.locations.add(loc)
loc,created = Location.objects.get_or_create(
sequence = 2,
name='Illinois Room B', slug='room_b' )
if created: show.locations.add(loc)
client = gdata.calendar.service.CalendarService()
client.ClientLogin(pw.goocal_email, pw.goocal_password, client.source)
fcal = client.GetAllCalendarsFeed().entry[7]
print("fcal title:", fcal.title.text)
a_link = fcal.GetAlternateLink()
feed = client.GetCalendarEventFeed(a_link.href)
seq=0
for event in feed.entry:
name = event.title.text + 's talk'
authors = event.title.text
wheres = event.where
room = wheres[0].value_string
location = Location.objects.get(name=room)
goo_start = event.when[0].start_time
goo_end = event.when[0].end_time
print(goo_start)
start = datetime.datetime.strptime(goo_start,'%Y-%m-%dT%H:%M:%S.000-05:00')
end = datetime.datetime.strptime(goo_end,'%Y-%m-%dT%H:%M:%S.000-05:00')
minutes = delta.seconds/60 # - 5 for talk slot that includes break
hours = minutes/60
minutes -= hours*60
duration="%s:%s:00" % ( hours,minutes)
# print name, authors, location, start, duration
print("%s: %s - %s" % ( authors, location, start.time() ))
seq+=1
# broke this, use add_eps()
episode,created = xEpisode.objects.get_or_create(
show=show,
location=location,
start=start,
authors=authors)
if created:
episode.name=name
episode.released=released
episode.start=start
episode.duration=duration
episode.sequence=seq
episode.state=1
episode.save()
return
class add_eps(process.process):
# helpers
def dump_keys(self, schedule):
# try to print out what keys match and don't match
# prints out the veyepar side of the field map list
# so you can cut/paste it into the show specific code.
# if the json object is one big key:value, pull the list out
try:
keys= list(schedule.keys())
key = keys[0]
# schedule=schedule['schedule']
schedule=schedule[key]
except AttributeError as k:
# AttributeError: 'list' object has no attribute 'keys'
pass
except TypeError as k:
# TypeError: list indices must be integers, not str
pass
except KeyError as k:
print(k)
if k != 'schedule': raise
s_keys = set()
for s in schedule:
print(s)
s_keys.update(list(s.keys()))
print("keys found in input:")
print(s_keys)
for k in s_keys:
print(("('{}',''),".format(k)))
print("\n")
v_keys=('id',
'location','sequence',
'name','slug',
'authors','emails', 'twitter_id',
'start','duration',
'released', 'license', 'tags',
'conf_key', 'conf_url',
'host_url', 'public_url',
)
# for f,g in field_maps:
# print "('%s','%s')," % (g,f)
print("keys match 1:1 with veyepar names:")
print([k for k in v_keys if k in s_keys])
for k in [k for k in v_keys if k not in s_keys]:
print(("('{}',''),".format(k)))
print("\n")
for k in v_keys:
k2 = k if k in s_keys else ''
print("('%s','%s')," % (k2,k))
print()
# lines to mix n match in the editor
for k in s_keys:
print("('%s'," % (k,))
print()
for k in v_keys:
print("'%s')," % k)
print()
return
def add_rooms(self, rooms, show):
if self.options.test:
print("test mode, not adding locations to db\n")
return
if not self.options.update:
print("no --update, not adding locations to db\n")
return
seq=0
for room in rooms:
if self.options.verbose: print(room)
seq+=10
# __iexact won't work with ger_or_add to don't try to use it
try:
loc = Location.objects.get(name__iexact=room)
except Location.DoesNotExist:
loc = Location(name=room, sequence=seq)
loc.save()
show.locations.add(loc)
show.save()
def generic_events(self, schedule, field_maps ):
# step one in transforming the show's data into veyepar data
# field_maps is a list of (source,dest) field names
# if source is empty, the create the dest as ''
# if there is an error (like key does not exist in source),
# create dest as None
# TODO:
# consider only creating destination when there is proper source.
# current code make add_eps() simpler.
# something has to contend with whacky source,
# currently it is this.
events=[]
for row in schedule:
if self.options.verbose: print(row)
event={}
for jk,vk in field_maps: # json key, veyepar key
if jk:
# if self.options.verbose: print jk, row[jk]
try:
event[vk] = row[jk]
except:
event[vk] = None
# pass
else:
event[vk] = ''
# save the original row so that we can sanity check end time.
# or transform data
event['raw'] = row
events.append(event)
return events
def add_eps(self, schedule, show):
"""
Given a list of dicts,
diff aginst current veyepar db
or update the db.
"""
# options:
# test - do nothing. Test is for testing the transfromations.
# update - update the db.
# no update will show diff between real and db
# Notes:
# location - room name as stored in Location model.
# considering changing it to the ID of the location record.
#
# raw - the row from the input file before any transormations.
# TODO:
# add a "lock" to prevent updates to a record.
# need to figure out what to do with colisions.
# only these fields in the dict are used, the rest are ignored.
fields=(
# 'state',
'name', 'authors',
'emails',
'twitter_id',
'description',
'start','duration',
'released',
'license',
'conf_url', 'tags',
# 'host_url', # for pycon.ca youtube URLs
)
if self.options.test:
print("test mode, not adding to db")
return
seq=0
for row in schedule:
if self.options.verbose: pprint.pprint( row )
# try to find an existing item in the db
# this assumes we have some handle on the data
episodes = Episode.objects.filter(
show=show,
conf_key=row['conf_key'],
)
location=Location.objects.get(
name__iexact=row['location'])
if episodes:
if len(episodes)>1:
# There should not be more than 1.
# this means the uniquie ID is not unique,
# and there is a dube in the veyepar db.
# import pdb; pdb.set_trace()
import code; code.interact(local=locals())
# then continue on.
episode = episodes[0]
# have an existing episode,
# either update it or diff it.
# get rid of garbage that snuck into the db.
if episode.emails == "<redacted>":
episode.emails = ""
# special case for email: don't blank it out
# use what is in the db.
# up here and now below so the diff doesn't wazz
if episode.emails and not row['emails']:
row['emails'] = episode.emails
else:
episode = None
# this is the show diff part.
diff=False
if episode is None:
diff=True
print("{conf_key} not in db, name:{name}\n{location}".format(
**row))
print()
else:
# print("tags", episode.tags.__repr__(), row['tags'].__repr__())
# check for diffs
diff_fields=[]
if episode.location is None or \
episode.location.name.upper() != row['location'].upper():
diff=True
if episode.location is None:
diff_fields.append(('loc',
"(None)", row['location']))
else:
diff_fields.append(('loc',
episode.location.name, row['location']))
# print(episode.location.name, row['location'])
for f in fields:
# veyepar, remote
a1,a2 = getattr(episode,f), row[f]
if f=="description":
a1 = a1.replace('\r','')
a2 = a2.replace('\r','')
if (a1 or a2) and (a1 != a2):
diff=True
diff_fields.append((f,a1,a2))
# report if different
if diff:
print('veyepar #id name: #%s %s' % (
episode.id, episode.name))
# if self.show.slug=="debconf15":
# host= "encoding2.dc15.debconf.org"
# else:
host= "veyepar.debian.org"
print("http://%s/main/E/%s/" % ( host, episode.id, ))
print(episode.conf_key, episode.conf_url)
if self.options.verbose:
pprint.pprint( diff_fields )
for f,a1,a2 in diff_fields:
if not isinstance(a1,str):
print('veyepar {0}: {1}'.format(f,a1))
print(' conf {0}: {1}'.format(f,a2))
else:
print(f)
if a2 is None or max(len(a1),len(a2)) < 160:
# print a1
# print a2
print('veyepar {0}: {1}'.format(f,a1))
print(' conf {0}: {1}'.format(f,a2))
else:
# long string (prolly description)
for i,cs in enumerate(zip(a1,a2)):
if cs[0] != cs[1]:
"""
print \
"#1, diff found at pos {0}:\n{1}\n{2}".format(
i,cs[0].__repr__(),
cs[1].__repr__())
"""
print("diff found at pos {0}:\nveyepar: {1}\n conf: {2}".format(
i,a1[i:i+80].__repr__(),
a2[i:i+80].__repr__()))
break
print()
"""
if diff and episode.state > 5: # add_to_richard
print(u"not updating conf_key: {conf_key}, name:{name}".format(**row))
print(episode.public_url)
print()
continue
"""
if self.options.update and diff:
if episode is None:
print("adding conf_key: %(conf_key)s, name:%(name)s" % row)
# I am not sure why some fields are here in .create
# and the rest are in setattr( episode, f, row[f] )
# name is here so .save() will create a slug
episode = Episode.objects.create(
show=show, conf_key=row['conf_key'],
start=row['start'],
duration=row['duration'],
name=row['name'],
twitter_id=row['twitter_id'],
language='',
summary=row['description'],
)
episode.sequence=seq
episode.state=1
seq+=1
else:
print(("updating conf_key: {conf_key}, name:{name}").format(**row))
episode.location = location
# copy all the fields
# from the source row to the episode object
for f in fields:
setattr( episode, f, row[f] )
# save whatever data was passed
episode.conf_meta=json.dumps(row['raw'])
episode.save()
def addlocs(self, schedule, show):
""" pycon 2010
seq=0
locs=d['rooms']
for l_id in locs:
l = locs[l_id]
seq+=1
name = l['name']
slug=fnify(name)
slug=slug.replace('_','')
if slug in ["Centennial","Hanover F+G"]:
continue
if slug =="RegencyV":
slug="RegencyVI"
if self.options.verbose: print name, slug
if self.options.test:
# hacked to verify database after cat made some changes.
loc = Location.objects.get(
name=name, slug=slug)
else:
loc,created = Location.objects.get_or_create(
name=name, slug=slug)
if created:
loc.sequence=seq
loc.save()
# save the loc object into the dict
# so that it can be used for the FK object for episodes
l['loc']=loc
"""
seq=0
for row in schedule:
# row=row['node']
if self.options.verbose: print(row)
room = row['room']
if room in [
'',
"Napalese Pagoda",
"Z4 Atrium",
"Maritime Museum",
"Grand Hall - BCEC",
]: continue
loc,created = Location.objects.get_or_create(name=room)
if created:
seq+=1
loc.sequence=seq
loc.save()
show.locations.add(loc)
show.save()
else:
print(row)
def talk_time(self, day, time):
# Day: "Wed 24 Nov"
# Time: "09:00 - 10:00"
start_ts, end_ts = time.split('-')
start_dts = day + ' 2010 ' + start_ts
end_dts = day + ' 2010 ' + end_ts
start_dt = parse(start_dts)
end_dt = parse(end_dts)
delta = end_dt - start_dt
minutes = delta.seconds/60 # - 5 for talk slot that includes break
duration="00:%s:00" % ( minutes)
return start_dt, duration
# start=datetime.datetime.strptime(row['Start'], '%Y-%m-%d %H:%M:%S' )
# start=datetime.datetime.strptime(row['Start'],'%m/%d/%y %I:%M %p')
# pycon dates:
# [ 2010, 9, 7, 15, 0 ]
# start = datetime.datetime(*row['start'])
# minutes = row['duration']
# adjust for time zone:
# start += datetime.timedelta(hours=-7,minutes=0)
def str2bool(self, tf):
return {'true':True,
"false":False}[tf]
def snake_bites(self, schedule,):
print("Snake Bites")
fields=(
'location',
'sequence',
'conf_key','host_url',
'state',
'authors',
'name','slug',
'authors',
'emails',
'description',
'released', 'license',
'start','duration',
'conf_key',
'conf_url', 'tags',
# 'public_url'
)
events=[]
for row in schedule:
pk = row['pk']
row = row['fields']
if self.options.verbose: print(row)
event={}
for f in fields:
event[f] = row[f]
# fields that don't flow thought json that nice.
if not event['conf_key']: event['conf_key'] = "pk{}".format(pk)
event['start'] = datetime.datetime.strptime(
row['start'], '%Y-%m-%dT%H:%M:%S' )
events.append(event)
return events
def zoo_events(self, schedule):
events=[]
for row in schedule:
if self.options.verbose: print(row)
if row['Title'] in [
'Registration',
'Morning Tea', "Lunch", 'Afternoon Tea',
'Speakers Dinner', 'Penguin Dinner',
'Professional Delegates Networking Session',
# 'Sysadmin Miniconf'
]:
continue
if "AGM" in row['Title']:
continue
# if "Lightning talks" in row['Title']:
# continue
# if "Conference Close" in row['Title']:
# continue
event={}
# from /zookeepr/controllers/schedule.py
# row['Id'] = schedule.id
# row['Event'] = schedule.event_id
# I think Id is what is useful
event['conf_key'] = row['Id']
event['name'] = row['Title']
event['location'] = row['Room Name']
event['start'] = datetime.datetime.strptime(
row['Start'], '%Y-%m-%d %H:%M:%S' )
event['duration'] = row['Duration']
event['authors'] = row.get('Presenters','')
if not event['authors'] and " : " in row['Title']:
if event['conf_key'] not in [207,364,]:
event['name'],event['authors'] = row['Title'].split(" : ")
# https://github.com/zookeepr/zookeepr/issues/92
event['emails'] = row.get('Presenter_emails','')
# https://github.com/zookeepr/zookeepr/issues/93
# new code.. seems I either get True or no attribute.
event['released'] = {
'True':True, 'False':False, None:None}[
row.get('video_release',None)]
# easy way:
# make True the default
event['released'] = row.get('video_release',"True") == "True"
event['license'] = "CC-BY-SA"
event['description'] = row['Description']
# there may not be a URL, like for Lunch and Keynote.
# https://github.com/zookeepr/zookeepr/issues/91
event['conf_url'] = row.get('URL','')
event['tags'] = ''
event['twitter_id'] = ''
event['raw'] = row
events.append(event)
return events
def zoo_cages(self, schedule):
rooms=[]
for row in schedule:
# row=row['node']
if self.options.verbose: print(row)
room = row['Room Name']
if room not in rooms: rooms.append(room)
if self.options.verbose: pprint.pprint(rooms)
return rooms
def get_rooms(self, schedule, key='location'):
rooms=set()
for row in schedule:
if self.options.verbose: print(row)
room = row[key]
if room is None: room = "None"
rooms.add(room)
return rooms
def symp_events(self, schedule ):
events=[]
for row in schedule:
if self.options.verbose: pprint.pprint( row )
event={}
event['id'] = row['conf_url']
# event['id'] = row['id']
event['name'] = row['title']
# event['location'] = row['room']
# if event['location']=='Plenary': event['location'] = "Cartoon 1"
# if event['location'] is None: event['location'] = "Track 1"
# if event['location']=='Plenary': event['location'] = "Track 1"
if row['room'] == "Plenary":
row['room'] = "Track I (D5)"
row['room_name'] = "Mission City Ballroom"
# event['location'] = "%s - %s" % (
# row['room_name'], row['room'] )
event['location'] = row['room']
event['start'] = datetime.datetime.strptime(
row['start_iso'], '%Y-%m-%dT%H:%M:%S' )
# if "Poster" in row["tags"]:
# event['start'] += datetime.timedelta(hours=-3)
break_min = 0 ## no time for breaks!
seconds=(row['duration'] - break_min ) * 60
hms = seconds//3600, (seconds%3600)//60, seconds%60
duration = "%02d:%02d:%02d" % hms
event['duration'] = duration
event['authors'] = row['authors']
event['emails'] = row['contact']
event['released'] = row['released']
event['license'] = row['license']
event['description'] = row['description']
event['conf_key'] = row['url']
event['conf_url'] = row['url']
if event['conf_key'] is None: event['conf_key'] = ""
if event['conf_url'] is None: event['conf_url'] = ""
event['conf_key'] = event['conf_key'][-5:]
event['tags'] = ''
# save the original row so that we can sanity check end time.
event['raw'] = row
events.append(event)
return events
def ddu_events(self, schedule ):
# Drupal Down Under 2012
html_parser = html.parser.HTMLParser()
# these fields exist in both json and veyepar:
common_fields = [ 'name', 'authors', 'description',
'start', 'duration',
'released', 'license', 'tags', 'conf_key', 'conf_url']
# mapping of json to veyepar:
field_map = [
('emails','contact'),
('location','room'),
]
html_encoded_fields = [ 'name', 'authors', 'description', ]
events=[]
for row in schedule:
if self.options.verbose: print(row)
event={}
for k in common_fields:
try:
event[k] = row[k]
except KeyError:
event[k] = 'missing'
for k1,k2 in field_map:
event[k1] = row[k2]
if isinstance(event['authors'],dict):
event['authors'] = ", ".join( list(event['authors'].values()) )
if row["entities"] == "true":
for k in html_encoded_fields:
# x = html_parser.unescape('£682m')
event[k] = html_parser.unescape( event[k] )
# x = html_parser.unescape('£682m')
event['start'] = datetime.datetime.strptime(
event['start'], '%Y-%m-%d %H:%M:%S' )
seconds=(int(event['duration'] )) * 60
hms = seconds//3600, (seconds%3600)//60, seconds%60
duration = "%02d:%02d:%02d" % hms
event['duration'] = duration
event['released'] = event['released'].startswith(
"You may publish" )
event['license'] = event['license'].split('(')[1][5:-1]
event['emails']=None
# save the original row so that we can sanity check end time.
event['raw'] = row
events.append(event)
return events
def flourish_events(self, schedule ):
# flourish 2012
# these fields exist in both json and veyepar:
common_fields = [ 'name', 'description',
'authors', 'contact',
'start', 'end',
'released', 'license', 'tags', 'conf_key', 'conf_url']
# mapping of json to veyepar:
field_map = [
('emails','contact'),
('location','room'),
]
events=[]
for row in schedule:
if self.options.verbose: print(row)
event={}
for k in common_fields:
try:
event[k] = row[k]
except KeyError:
event[k] = 'missing'
for k1,k2 in field_map:
event[k1] = row[k2]
event['start'] = datetime.datetime.strptime(
event['start'], '%m/%d/%Y %H:%M:%S' )
event['end'] = datetime.datetime.strptime(
event['end'], '%m/%d/%Y %H:%M:%S' )
delta = event['end'] - event['start']
seconds=delta.seconds
hms = seconds//3600, (seconds%3600)//60, seconds%60
duration = "%02d:%02d:%02d" % hms
event['duration'] = duration
# save the original row so that we can sanity check end time.
event['raw'] = row
events.append(event)
return events
def chipy_events(self, schedule ):
# mapping of json to veyepar:
field_maps = [
('id', 'conf_key'),
('title', 'name'),
('description', 'description'),
('presentors', 'authors'),
('presentors', 'emails'),
('start_time', 'start'),
('length', 'duration'),
('', 'conf_url'),
('', 'tags'),
]
events = self.generic_events(schedule, field_maps)
for event in events:
event['start'] = datetime.datetime.strptime(
event['start'], '%Y-%m-%d %H:%M:%S' )
event['authors'] = event['authors'][0]['name']
event['emails'] = event['emails'][0]['email']
event['location'] = 'room_1'
event['released'] = True
event['license'] = ''
event['duration'] = event['duration'] + ":00"
return events
def goth_events(self, schedule ):
# PyGotham 2011
field_maps = [
('room_number','location'),
('title','name'),
('full_name','authors'),
('talktype',''),
('levels',''),
('key','conf_key'),
('talk_day_time','start'),
('duration_minutes','duration'),
('talk_end_time','end'),
('outline',''),
('desc','description'),
('','conf_url'),
('','released'),
('','emails'),
('','license'),
('','tags'),
]
events = self.generic_events(schedule, field_maps)
for event in events:
event['start'] = datetime.datetime.strptime(
event['start'], '%Y-%m-%d %H:%M:%S' )
seconds=(event['duration'] -10) * 60
hms = seconds//3600, (seconds%3600)//60, seconds%60
duration = "%02d:%02d:%02d" % hms
event['duration'] = duration
return events
def pct_events(self, schedule):
# pyCon Tech
# >>> schedule['events']['28'].keys()
# [u'files', u'room', u'videos', u'title', u'url', u'id', u'tags', u'shorturl', u' sponsors', u'summary', u'presenters', u'duration', u'level', u'type', u'start']
events=[]
for event_id in schedule['events']:
src_event=schedule['events'][event_id]
if self.options.verbose: print(src_event)
if src_event['type'] != 'Social Event':
event={}
# event['id'] = event_id
event['name'] = src_event['title']
event['location'] = schedule['rooms'][src_event['room']]['name']
event['start'] = datetime.datetime(*src_event['start'])
seconds=src_event['duration'] * 60
hms = seconds//3600, (seconds%3600)//60, seconds%60
duration = "%02d:%02d:%02d" % hms
event['duration'] = duration
event['authors'] = src_event['presenters']
event['emails'] = ''
event['license'] = self.options.license
event['description'] = src_event['summary']
event['conf_key'] = src_event['id']
event['conf_url'] = src_event['url']
event['tags'] = ''
# save the original row so that we can sanity check end time.
event['raw'] = src_event
events.append(event)
return events
def pctech(self, schedule, show):
# importing from some other instance
rooms = [schedule['rooms'][r]['name'] for r in schedule['rooms']]
self.add_rooms(rooms,show)
events = self.pct_events(schedule)
self.add_eps(events, show)
return
def pyohio(self, schedule, show):
# print "consumer PyOhio"
rooms = self.get_rooms(schedule,'room')
rooms = [r for r in rooms if r != 'Plenary' ]
self.add_rooms(rooms,show)
events = self.symp_events(schedule)
self.add_eps(events, show)
return
def symposium(self, schedule, show):
# print "consumer symposium"
rooms = self.get_rooms(schedule,'room')
# self.add_rooms(rooms,show)
events = self.symp_events(schedule)
self.add_eps(events, show)
return
def pyconde2011(self, schedule, show):
rooms = self.get_rooms(schedule,'room')
rooms = [r for r in rooms if r != 'Plenary' ]
self.add_rooms(rooms,show)
events = self.symp_events(schedule)
for e in events:
print(e)
end = datetime.datetime.strptime(
e['raw']['end_iso'], '%Y-%m-%dT%H:%M:%S' )
td = end - e['start']
seconds=td.seconds
hms = seconds//3600, (seconds%3600)//60, seconds%60
duration = "%02d:%02d:%02d" % hms
e['duration'] = duration
self.add_eps(events, show)
return
def pygotham(self, schedule, show):
# pygotham 2011
rooms = self.get_rooms(schedule,'room_number')
rooms = list(rooms)
rooms.sort()
print(rooms)
self.add_rooms(rooms,show)
events = self.goth_events(schedule)
self.add_eps(events, show)
return
def scipy_events_v1(self, schedule ):
# SciPy 2012, ver 1
# mapping of json to veyepar:
field_maps = [
('Room','location'),
('Name','name'),
('speaker',''),
('Authors','authors'),
('Contact','emails'),
('Tags','tags'),
('abstract','description'),
('Start','start'),
('Duration','duration'),
('End','end'),
('Affiliations',''),
('','conf_key'),
('','conf_url'),
('','released'),
('','license'),
]
events = self.generic_events(schedule, field_maps)
for event in events:
# print event['raw']
# print (event['location'], event['start'])
event['conf_key'] = hash(str(event['location']) + event['start'])
event['start'] = datetime.datetime.strptime(
event['start'], '%Y-%m-%dT%H:%M:%S' )
# seconds=int(event['duration']) * 60
# hms = seconds//3600, (seconds%3600)//60, seconds%60
# duration = "%02d:%02d:%02d" % hms
# event['duration'] = duration
return events
def scipy_events(self, schedule ):
# SciPy 2012, ver 3
common_fields = [ 'name', 'description',
'authors',
'start', 'duration', 'end',
'released', 'license', 'tags', 'conf_key', ]
# mapping of json to veyepar:
field_maps = [
('contact','emails'),
('','conf_url'),
('room','location'),
]
events = self.generic_events(schedule, field_maps)
for event in events:
event['start'] = datetime.datetime.strptime(
event['start'], '%Y-%m-%d %H:%M:%S' )
event['duration'] = event['duration'] + ":00"
# released flag fliping back to False?
# investigate later, ignore for now.
# event['released'] = event['released']!="0"
# del(event['released'])
if event['description'] is None:
event['description'] = "None"
return events
def scipy_v1(self, schedule, show):
# scipy ver 1 2011
# schedule is {'talks':[talk1, 2, 3...]}
schedule = schedule['talks']
rooms = self.get_rooms_v1(schedule,'Room')
rooms = list(rooms)
rooms.sort()
self.add_rooms(rooms,show)
events = self.scipy_events(schedule)
self.add_eps(events, show)
return
def scipy_v2(self, schedule, show):
# scipy ver 2 2011
for row in schedule:
if row['room'] is None:
row['room'] = "None"
rooms = self.get_rooms(schedule)
rooms = list(rooms)
rooms.sort()
self.add_rooms(rooms,show)
events = self.scipy_events(schedule)
self.add_eps(events, show)
return
def veyepar(self, schedule, show):
events = self.snake_bites(schedule)
rooms = self.get_rooms(events)
self.add_rooms(rooms,show)
self.add_eps(events, show)
return
def desktopsummit(self, schedule, show):
rooms = set(row[2] for row in schedule)
self.add_rooms(rooms,show)
events=[]
for row in schedule:
if self.options.verbose: print(row)
event={}
event['id'] = row[0]
event['name'] = row[1]
event['location'] = row[2]
dt_format='%a, %Y-%m-%d %H:%M'
event['start'] = datetime.datetime.strptime(
row[3], dt_format)
end = datetime.datetime.strptime(
row[4], dt_format)
seconds=(end - event['start']).seconds
hms = seconds//3600, (seconds%3600)//60, seconds%60
duration = "%02d:%02d:%02d" % hms
event['duration'] = duration
event['authors'] = row[5]
event['emails'] = ''
event['license'] = self.options.license
event['description'] = ''
event['conf_key'] = row[0]
event['conf_url'] = row[6]
event['tags'] = ''
# save the original row so that we can sanity check end time.
event['raw'] = row
events.append(event)
self.add_eps(events, show)
return
def ictev_2013(self, schedule, show):
field_maps = [
('Room', 'location'),
('Title', 'name'),
('Timestamp', 'start'),
('Nid', 'conf_key'),
('Presenter', 'authors'),
('Keywords', 'tags'),
('Link', 'conf_url'),
('Duration', 'duration'),
('Description', 'description'),
]
# ('Day',
# ('Time', 'start'),
# 'emails'),
# 'released'),
# 'license'),
# 'host_url'),
events = self.generic_events(schedule, field_maps)
rooms = set(row['location'] for row in events)
self.add_rooms(rooms,show)
html_parser = html.parser.HTMLParser()
for event in events:
event['conf_key'] = event['conf_key'].split('</a>')[0].split('>')[1]
event['name'] = html_parser.unescape(strip_tags( event['name'] ))
event['start'] = datetime.datetime.fromtimestamp(
int(event['start'])) + datetime.timedelta(hours=14)
event['duration'] = "00:%s:00" % ( event['duration'], )
event['conf_url'] = strip_tags(event['conf_url'])
# Bogus, but needed to pass
event['license'] = ''
event['emails'] = ''
event['released'] = True
event['tags'] = "" # strip_tags( event['tags'])
# pprint.pprint(event)
self.add_eps(events, show)
return
def ictev(self, schedule, show):
print("ictev")
# drupal down under 2012
rooms = self.get_rooms(schedule, "Room", )
self.add_rooms(rooms,show)
# print rooms
# these fields exist in both json and veyepar:
common_fields = [ ]
# mapping of json to veyepar:
# thise are veyepar to json - need to be flipped to make work
backward_field_maps = [
('location','Room'),
('name','Title'),
('tags','Keywords'),
('duration','Duration'),
('conf_key','Nid'),
('conf_url','Link')
]
events = self.generic_events(schedule, field_maps)
for event in events:
row = event['raw']
if self.options.verbose: print("event", event)
# authors is either a string or a dict
# if isinstance(event['authors'],dict):
# event['authors'] = ", ".join( event['authors'].values() )
#
start, duration = self.talk_time(row['Day'],row['Time'])
event['start'] = start
event['duration'] = duration
event['license'] = ''
event['authors'] = ''
event['tags'] = ''
event['description'] = ''
event['emails']=None
self.add_eps(events, show)
return
def unfold_origami_unicorn(self, schedule):
# dig out the data from
# {'phpcode_2':{label: "Duration", content: "45"}
ret_rows = []
for s in schedule:
row = {}
for k in s:
v = s[k]
field_name = v['label']
value = v['content']
print("#1", field_name, value)
row[field_name] = value
pprint.pprint(row)
ret_rows.append(row)
return ret_rows
def ddu(self, schedule, show):
# drupal down under 2012
rooms = self.get_rooms(schedule)
self.add_rooms(rooms,show)
events = self.ddu_events(schedule)
self.add_eps(events, show)
return
def flourish(self, schedule, show):
rooms = self.get_rooms(schedule)
self.add_rooms(rooms,show)
events = self.flourish_events(schedule)
self.add_eps(events, show)
return
def chipy(self, schedule, show):
# schedule is al meetings ever
schedule = schedule[-1]['topic_set']
# pprint.pprint( schedule[0] )
rooms = ['room_1']
self.add_rooms(rooms,show)
events = self.chipy_events(schedule)
self.add_eps(events, show)
return
def chipy_v3(self, schedule, show):
schedule = max(schedule, key=operator.itemgetter('when'))
when = schedule['when']
where = schedule['where']
# ['name']
pprint.pprint( schedule['where'] )
schedule = schedule['topics']
schedule = [s for s in schedule if s['approved']]
# schedule = [s for s in schedule if s['start_time']]
for s in schedule:
print((s['title'], s['start_time']))
field_maps = [
('id', 'conf_key'),
('title', 'name'),
('description', 'description'),
('presenters', 'authors'),
('presenters', 'emails'),
('presenters', 'released'),
('license','license'),
('start_time', 'start'),
('length', 'duration'),
('', 'conf_url'),
('', 'tags'),
('', 'twitter_id'),
]
events = self.generic_events(schedule, field_maps)
for event in events:
print("1, event:")
pprint.pprint(event)
event['location'] = where['name']
event['start'] = datetime.datetime.strptime(
event['start'], '%Y-%m-%dT%H:%M:%S' )
event['authors'] = ', '.join(
[ a['name'] for a in event['authors'] ])
event['emails'] = ', '.join(
[ a['email'] for a in event['emails']
if a['email'] ])
# if not event['emails']: # no email found
# event['emails'] = "ChiPy <chicago@python.org>"
event['released'] = all(
[ a['release'] for a in event['released'] ])
event['conf_url'] = "http://www.chipy.org/"
rooms = set(row['location'] for row in events)
self.add_rooms(rooms,show)
# __iexact won't work with ger_or_add to don't try to use it
try:
loc = Location.objects.get(name__iexact=where['name'])
loc.description = where['address']
loc.save()
except Location.DoesNotExist:
# test mode I guess
pass
self.add_eps(events, show)
return
def zoo(self, schedule, show):
# rooms=['Cafeteria', 'Caro', 'Studio', 'C001', 'T101', 'Studio 1', 'Studio 2', 'Studio 3', 'B901', 'T102', 'Mercure Ballarat', 'Mystery Location', 'Ballarat Mining Exchange']
# good rooms=['Caro', 'Studio', 'C001', 'T101', ]
# bad_rooms=['Cafeteria', 'Studio 1', 'Studio 2', 'Studio 3', 'B901', 'T102', 'Mercure Ballarat', 'Mystery Location', 'Ballarat Mining Exchange']
bad_rooms = [ 'Costa Hall Foyer',
'uncatered',
'Super Awesome Venue TBA',
'The Pier - http://www.thepiergeelong.com.au',
'Edge Bar, Western Beach Road',
]
rooms = self.zoo_cages(schedule)
print(rooms)
rooms = [r for r in rooms if r not in bad_rooms]
print(rooms)
schedule = [s for s in schedule if s['Room Name'] in rooms]
# schedule = [s for s in schedule if s['Id'] not in [185,] ]
schedule = [s for s in schedule if s['Id'] in [185,] ]
schedule[0]['Title']="Security Topics in Open Cloud: Advanced Threats, 2015's Vulnerabilities, Advancements in OpenStack Trusted Computing and Hadoop Encryption"
schedule = [s for s in schedule
if s['Title'] not in [
'Short break',] ]
self.add_rooms(rooms,show)
locs=Location.objects.filter(name__in = bad_rooms)
for loc in locs:
loc.active = False
loc.save()
events = self.zoo_events(schedule)
self.add_eps(events, show)
return
def fos_events( self, schedule ):
# fosdem 14 penta
events = []
id = 0
# schedule[0] is <conference></conference>
for day in schedule[1:3]:
# >>> schedule[1].get('date')
# '2012-02-04'
start_date = day.get('date')
print(start_date)
for room in day:
for row in room:
# >>> event.find('start').text
# '10:30'
# >>> [x.tag for x in event]
"""
tags = ['start', 'duration', 'room',
'slug', 'title', 'subtitle',
'track', 'type', 'language',
'abstract', 'description',
'persons', 'links']
for tag in tags:
print tag, row.find(tag).text
"""
event={}
# event['id'] = row[0]
event['name'] = row.find('title').text
event['location'] = row.find('room').text
dt_format='%Y-%m-%d %H:%M'
event['start'] = datetime.datetime.strptime(
"%s %s" % ( start_date,row.find('start').text),
dt_format)
event['duration'] = \
"%s:00" % row.find('duration').text
persons = [p.text for p in
row.find('persons').getchildren() ]
event['authors'] = ', '.join(persons)
event['emails'] = ''
event['released'] = True
event['license'] = "cc-by"
# event['description'] = row.find('description').text
# event['description'] = row.find('abstract').text
event['description'] = row.find('description').text
if event['description'] is None:
event['description'] = ''
event['conf_key'] = row.get('id')
event['conf_url'] = 'https://fosdem.org/2014/schedule/event/%s/' % row.find('slug').text
event['tags'] = ''
# save the original row so that we can sanity check end time.
event['raw'] = row
events.append(event)
id += 1
return events
def fosdem2014(self, schedule, show):
# top of schedule is:
# <conference></conference>
# <day date="2012-02-04" index="1"></day>
# <day date="2012-02-05" index="2"></day>
# each day has a list of rooms
rooms = [ r.get('name') for r in schedule[1] ]
# remove (foo) stuff from
# for room in rooms:
# room = room.split('(')[0].strip()
# rooms = set( rooms )
# probabalby the same rooms the 2nd day.
# rooms = list(rooms)
# ['Janson', 'K.1.105', 'Ferrer', 'H.1301', 'H.1302']
# import code
# code.interact(local=locals())
# return
self.add_rooms(rooms,show)
# sequance the rooms
# this will whack any manual edits
if self.options.update:
seq = 1
for room in rooms:
loc = Location.objects.get(name=room,)
loc.active=True
loc.sequence=seq
loc.save()
seq+=1
events = self.fos_events(schedule)
# no recording in Java room saturday k4201
events = [ event for event in events if not (
event['start'].date() != datetime.datetime(2014,2,1) and \
event['location'] == 'K4201') ]
self.add_eps(events, show)
return
def summit_penta_events( self, schedule ):
# dc14 summit penta based xml
# pyconza2015 dc summit penta based xml
events = []
id = 0
# schedule[0] is <conference></conference>
# for day in schedule[1:3]:
for day in schedule:
# >>> schedule[1].get('date')
# '2012-02-04'
start_date = day.get('date')
print(start_date)
for room in day:
for row in room:
if row.find('persons') is None:
continue
if self.options.verbose: print(row.get('id'))
# import code; code.interact(local=locals())
event={}
event['name'] = row.find('title').text
event['location'] = row.find('room').text
dt_format='%Y-%m-%d %H:%M'
event['start'] = datetime.datetime.strptime(
"%s %s" % ( start_date,row.find('start').text),
dt_format)
event['duration'] = \
row.find('duration').text + ":00"
persons = []
contacts = []
twitters = []
for p in row.find('persons').getchildren():
person = p.text
person = person.replace('\n','')
# person = person.replace('\r','')
person = person.strip()
persons.append(person)
contact = p.get('contact')
if contact not in [
None, 'redacted', "<redacted>" ]:
contacts.append(contact)
twit = p.get('twitter')
if twit not in [ None, ]:
twitter_id = urllib.parse.urlparse(twit).path[1:]
# make sure it starts with an @
if not twitter_id.startswith('@'):
twitter_id = '@' + twitter_id
twitters.append(twitter_id)
event['authors'] = ', '.join(persons)
event['emails'] = ','.join(contacts)
event['twitter_id'] = ' '.join(twitters)
# (10:59:23 PM) vorlon: CarlFK: I'm pretty sure we never set that field.
# event['released'] = row.find('released').text == "True"
event['released'] = True
# event['license'] = row.find('license').text
event['license'] = ""
description = row.find('description').text
# if description is None: description = ''
description = description.replace('\r','')
event['description'] = description
event['conf_key'] = row.get('id')
# event['conf_url'] = 'https://summit.debconf.org' + row.find('conf_url').text
# event['conf_url'] = 'https://za.pycon.org' + row.find('conf_url').text
event['conf_url'] = row.find('full_conf_url').text
event['tags'] = row.find('track').text
# save the original row so that we can sanity check end time.
# event['raw'] = row
event['raw'] = None
# if event['conf_key'] in [ "127", "40"]:
# if row.find('slug').text in [ "hacking-time", ]:
# skip this one
# https://summit.debconf.org/debconf14/meeting/127/hacking-time/
# continue
events.append(event)
id += 1
return events
def summit_penta(self, schedule, show):
# dc14 - summit with penta xml
# top of schedule is:
# <conference></conference>
# <day date="2012-02-04" index="1"></day>
# <day date="2012-02-05" index="2"></day>
# each day has a list of rooms
rooms = [ r.get('name') for r in schedule[1] ]
print("rooms", rooms)
self.add_rooms(rooms,show)
"""
# sequance the rooms
# this will whack any manual edits
if self.options.update:
seq = 1
for room in rooms:
loc = Location.objects.get(name=room,)
loc.active=True
loc.sequence=seq
loc.save()
seq+=1
"""
events = self.summit_penta_events(schedule)
self.add_eps(events, show)
return
def sched(self,schedule,show):
# pprint.pprint(schedule)
rooms = self.get_rooms(schedule, "venue")
self.add_rooms(rooms,show)
field_maps = [
('id','id'),
('venue','location'),
# ('','sequence'),
('name','name'),
# ('','slug'),
('speakers','authors'),
('','emails'),
('description','description'),
('event_start','start'),
('','duration'),
('','released'),
('','license'),
('','tags'),
('event_key','conf_key'),
('','conf_url'),
('','host_url'),
('','public_url'),
]
events = self.generic_events(schedule, field_maps)
for event in events:
if self.options.verbose: print("event", event)
row = event['raw']
if 'speakers' not in list(row.keys()):
# del(event)
# continue
pass
if 'speakers' in list(row.keys()):
# pprint.pprint( row['speakers'] )
authors = ', '.join( s['name'] for s in row['speakers'] )
else:
authors = ''
event['authors'] = authors
# print authors
if 'description' not in list(row.keys()):
event['description']=''
start = parse(event['start'])
end = parse(row['event_end'])
delta = end - start
minutes = delta.seconds/60 # - 5 for talk slot that includes break
duration="00:%s:00" % ( minutes)
event['start'] = start
event['end'] = end
event['duration'] = duration
# event['released'] = False
event['released'] = True
event['license'] = self.options.license
# event['tags'] = ''
#event['description'] = ''
self.add_eps(events, show)
return
def pyconde2012(self,schedule,show):
# pycon 2012 adn 13
# pprint.pprint(schedule)
rooms = self.get_rooms(schedule )
self.add_rooms(rooms,show)
field_maps = [
('conf_key','id'),
('room','location'),
('','sequence'),
('name','name'),
('','slug'),
('authors','authors'),
('contact','emails'),
('description','description'),
('start','start'),
('duration','duration'),
('released','released'),
('license','license'),
('tags','tags'),
('conf_key','conf_key'),
('conf_url','conf_url'),
('','host_url'),
('','public_url'),
]
events = self.generic_events(schedule, field_maps)
for event in events:
if self.options.verbose: print("event", event)
raw = event['raw']
event['authors'] = ', '.join( event['authors'] )
event['emails'] = ', '.join( event['emails'] )
event['start'] = parse(event['start'])
event['duration'] = "00:%s:00" % ( event['duration'] )
event['license'] = ''
self.add_eps(events, show)
return
def pyconca2012(self,schedule,show):
# pprint.pprint(schedule)
schedule = schedule['data']['talk_list']
# return talks, session
# remove rejected talks
schedule = [t for t in schedule if t['schedule_slot_id'] is not None]
rooms = self.get_rooms(schedule )
self.add_rooms(rooms,show)
field_maps = [
('conf_key','id'),
('room','location'),
('','sequence'),
('title','name'),
('','slug'),
('authors','authors'),
('','emails'),
('abstract','description'),
('start','start'),
('duration','duration'),
('video_releaase','released'),
('','license'),
('','tags'),
('conf_key','conf_key'),
('conf_url','conf_url'),
]
events = self.generic_events(schedule, field_maps)
for event in events:
if self.options.verbose: print("event", event)
raw = event['raw']
if self.options.verbose: pprint.pprint(raw)
event['authors'] = \
raw['speaker_first_name'] +' ' + raw['speaker_last_name']
event['emails'] = raw['user']['email']
event['start'] = datetime.datetime.strptime(
event['start'],'%Y-%m-%dT%H:%M:%S-05:00')
event['duration'] = "00:%s:00" % ( event['duration'] )
event['released'] = raw['video_release']
event['license'] = ''
self.add_eps(events, show)
return
def nodepdx(self, schedule, show):
# Troy's json
html_parser = html.parser.HTMLParser()
field_maps = [
#('','location'),
# ('','sequence'),
('title','name'),
('speaker','authors'),
('email','emails'),
('abstract','description'),
('start_time','start'),
('end_time','end'),
('duration','duration'),
('released','released'),
# ('','license'),
# ('topics','tags'),
('start_time','conf_key'),
# ('web_url','conf_url'),
# ('','host_url'),
# ('','public_url'),
]
events = self.generic_events(schedule, field_maps)
rooms = ['room_1']
self.add_rooms(rooms,show)
for event in events:
# create an ID from day, hour, minute
event['conf_key'] = \
event['conf_key'][9] \
+ event['conf_key'][11:13] \
+ event['conf_key'][14:16]
event['start'] = datetime.datetime.strptime(
event['start'],'%Y-%m-%d %H:%M:%S')
event['end'] = datetime.datetime.strptime(
event['end'],'%Y-%m-%d %H:%M:%S')
delta = event['end'] - event['start']
minutes = delta.seconds/60
duration = int( event['duration'].split()[0] )
if minutes != duration:
raise "wtf duration"
event['duration'] = "00:%s:00" % (duration)
# Bogus, but needed to pass
event['location'] = 'room_1'
event['license'] = ''
event['description'] = html_parser.unescape(
strip_tags(event['description']) )
# event['tags'] = ", ".join( event['tags'])
# pprint.pprint(event)
self.add_eps(events, show)
return
def bosc_2014(self, schedule, show):
# remove rows that have no crowdsource_ref, because spreadsheet
# schedule = [s for s in schedule if s['Time Start']]
schedule = [s for s in schedule
if s['conf_key'] and s['start'] ]
# convert all the values to unicode strings
schedule = [{k:d[k].decode('utf-8') for k in d}
for d in schedule ]
field_maps = [
('conf_key','id'),
('conf_key','conf_key'),
('room','location'),
# ('','sequence'),
('name','name'),
('authors','authors'),
('contact','emails'),
('description','description'),
('start','start'),
('end','end'),
('','duration'),
('released','released'),
('license','license'),
('tags','tags'),
('conf_url','conf_url'),
# ('','host_url'),
# ('','public_url'),
]
events = self.generic_events(schedule, field_maps)
for event in events:
event['start'] = datetime.datetime.strptime(
"{0} {1}".format(event['raw']['date'],event['start']),
'%d/%m/%Y %H:%M')
event['end'] = datetime.datetime.strptime(
"{0} {1}".format(event['raw']['date'],event['end']),
'%d/%m/%Y %H:%M')
delta = event['end'] - event['start']
minutes = delta.seconds/60
event['duration'] = "00:{}:00".format(minutes)
# event['duration'] = "00:{0}:00".format(event['duration'])
event['released'] = event['released'].lower() == 'y'
rooms = self.get_rooms(events)
print(rooms)
self.add_rooms(rooms,show)
self.add_eps(events, show)
return
def depy15(self, schedule, show):
room = 'Room LL104'
field_maps = [
('title','name'),
('start_time','start'),
('end_time','end'),
('presenter','authors'),
('description','description'),
('released','released'),
]
events = self.generic_events(schedule, field_maps)
rooms = [room]
self.add_rooms(rooms,show)
for i,event in enumerate(events):
event['location'] = room
event['conf_key'] = str(i)
dt_format='%Y-%m-%d %H:%M'
event['start'] = datetime.datetime.strptime(
event['start'], dt_format)
end = datetime.datetime.strptime(
event['end'], dt_format)
seconds=(end - event['start']).seconds
hms = seconds//3600, (seconds%3600)//60, seconds%60
duration = "%02d:%02d:%02d" % hms
event['duration'] = duration
if event['description'] is None:
event['description'] = ''
event['authors'] = ', '.join(event['authors'].split(' and '))
event['emails'] = ""
event['twitter_id'] = ""
event['license'] = ""
event['conf_url'] = ""
event['tags'] = ""
event['released'] = event['released'] == 'yes'
self.add_eps(events, show)
return
def jupyter_chicago_2016(self, schedule, show):
room = 'Civis'
field_maps = [
('Talk Title','name'),
('start','start'),
('duration','duration'),
('First Name','authors'),
# ('Last Name',''),
('Twitter Handle','twitter_id'),
# ('Bio',''),
# ('Website',''),
('Talk Abstract','description'),
# ('Github Handle',''),
('Email','emails'),
('Do you give us permission to record and release video of your presentation?','released'),
]
events = self.generic_events(schedule, field_maps)
rooms = [room]
self.add_rooms(rooms,show)
# event_date="February 20th, 2016"
event_date="2016-02-16"
for i,event in enumerate(events):
event['location'] = room
event['conf_key'] = str(i)
dt_format='%Y-%m-%d %H:%M'
event['start'] = datetime.datetime.strptime(
event_date + ' ' + event['start'], dt_format)
event['authors'] = \
event['authors']+' '+event['raw']['Last Name']
event['license'] = ""
event['conf_url'] = ""
event['tags'] = ""
event['released'] = event['released'] == 'Yes'
self.add_eps(events, show)
return
def blinkon4(self, schedule, show):
# remove rows that have no crowdsource_ref, because spreadsheet
schedule = [s for s in schedule if s['Start Time']]
# schedule = [s for s in schedule if
# s['crowdsource_ref'] or s['released']]
# convert all the values to unicode strings
schedule = [{k:d[k].decode('utf-8') for k in d}
for d in schedule ]
field_maps = [
# ('Title Slide Includes BlinkOn 4',''),
('Title','name'),
# ('Notes',''),
('Date','start'),
('Start Time','start'),
('End Time',''),
# ('Slides',''),
# ('Internal Video URL',''),
('Description for YouTube','description'),
('Speaker','authors'),
('Should Upload?','released'),
# ('Good Title Slide',''),
# ('Shortname',''),
]
events = self.generic_events(schedule, field_maps)
rooms = ['room 1']
self.add_rooms(rooms,show)
for i,event in enumerate(events):
event['location'] = "room 1"
event['conf_key'] = str(i)
# event['authors'] = ', '.join(event['authors'].split(' & '))
event['start'] = datetime.datetime.strptime(
event['start'], '%Y/%m/%d %H:%M:%S')
print(event['start'])
event['duration'] = "01:00:00"
event['emails'] = ""
event['twitter_id'] = ""
event['license'] = ""
event['conf_url'] = ""
event['tags'] = ""
event['released'] = event['released'] == 'Yes'
self.add_eps(events, show)
return
def wtd_na_2014(self, schedule, show):
# given a google doc sheet,
# export to someting
# read in the local file.
# remove rows that have no crowdsource_ref, because spreadsheet
# schedule = [s for s in schedule if s['Time Start']]
schedule = [s for s in schedule if
s['crowdsource_ref'] or s['released']]
# convert all the values to unicode strings
schedule = [{k:d[k].decode('utf-8') for k in d}
for d in schedule ]
field_maps = [
('key','id'),
('Room/Location','location'),
# ('','sequence'),
('Session Title','name'),
('','authors'),
('Email','emails'),
('Description (Optional)','description'),
('Time Start','start'),
# ('Time End','end'),
('Length','duration'),
('released','released'),
('','license'),
('tags','tags'),
('key','conf_key'),
('crowdsource_ref','conf_url'),
# ('','host_url'),
# ('','public_url'),
]
events = self.generic_events(schedule, field_maps)
rooms = self.get_rooms(events)
self.add_rooms(rooms,show)
for event in events:
if " - " in event['name']:
event['authors'], event['name'] = \
event['name'].split(' - ')
event['authors'] = ', '.join(event['authors'].split(' & '))
event['start'] = datetime.datetime.strptime(
"{0} {1}".format(event['raw']['Date'],event['start']),
'%m/%d/%Y %H:%M')
event['duration'] = "00:{0}:00".format(event['duration'])
event['description'] = event['description'].replace('\n','\r\n')
event['released'] = event['released'].lower() == 'y'
self.add_eps(events, show)
return
def lanyrd(self, schedule, show):
# http://lanyrd.com
field_maps = [
('id','id'),
('space','location'),
# ('','sequence'),
('title','name'),
('speakers','authors'),
('twitter','twitter_id'),
('email','emails'),
('abstract','description'),
('start_time','start'),
('end_time','end'),
# ('','duration'),
# ('','released'),
# ('','license'),
('topics','tags'),
('id','conf_key'),
('web_url','conf_url'),
# ('','host_url'),
# ('','public_url'),
]
rooms = set()
events =[]
# flatten out nested json (I think..)
for day in schedule['sessions']:
events += self.generic_events(day['sessions'], field_maps)
# for session in day['sessions']:
#[u'speakers', u'title', u'event_id', u'start_time', u'space', u'topics', u'times', u'abstract', u'web_url', u'end_time', u'id', u'day']
# pprint.pprint(events[-2])
# events = [e for e in events if e['location'] is not None]
# events = [e for e in events if e['start'] is not None]
# events = [e for e in events
# if e['location'] not in ['Hackers Lounge',] ]
# events = [e for e in events
# if e['conf_key'] not in ['sdktrw','sdktrx'] ]
for event in events:
if "Lunch" in event['name']:
event['location']="Main Room"
if event['location'] is None:
event['location']="room 1"
rooms.add(event['location'].lower())
event['twitter_id'] = " ".join(
a['twitter'] for a in event['authors']
if a['twitter'] is not None)
while len(event['twitter_id'])>50: # 135:
event['twitter_id'] = " ".join(
event['twitter_id'].split()[:-1])
# clobber author object with names.
event['authors'] = ", ".join(
a['name'] for a in event['authors'])
"""
if event['name'] == "Panel: State of OSS .NET":
event['twitter_id'] = "@richcampbell @carlfranklin"
event['authors'] = "Richard Campbell and Carl Franklin"
"""
event['start'] = datetime.datetime.strptime(
event['start'],'%Y-%m-%d %H:%M:%S')
event['end'] = datetime.datetime.strptime(
event['end'],'%Y-%m-%d %H:%M:%S')
delta = event['end'] - event['start']
minutes = delta.seconds/60
event['duration'] = "00:%s:00" % ( minutes)
event['description'] = strip_tags(event['description'])
# if event['location'] is None:
# event['location'] = 'room 1'
event['tags'] = ", ".join( event['tags'])
# Bogus, but needed to pass
# event['emails'] = ''
# event['released'] = bool(event['twitter_id'])
event['released'] = "*" not in event['name']
event['license'] = ''
# rooms = ['room 1']
self.add_rooms(rooms,show)
self.add_eps(events, show)
return
def symposion2(self, schedule, show):
# pycon.us 2013
rooms = self.get_rooms(schedule, "room", )
if self.options.verbose: print(rooms)
self.add_rooms(rooms,show)
field_maps = [
('conf_key','id'),
('room','location'),
('name','name'),
('authors','authors'),
('contact','emails'),
('description','description'),
('start','start'),
('duration','duration'),
('released','released'),
('license','license'),
('kind','tags'),
('conf_key','conf_key'),
('conf_url','conf_url'),
('video_url','host_url'),
]
events = self.generic_events(schedule, field_maps)
for event in events:
# print event
raw = event['raw']
if self.options.verbose: pprint.pprint(raw)
if self.options.verbose: print("event", event)
event['start'] = datetime.datetime.strptime(
event['start'],'%Y-%m-%dT%H:%M:%S')
event['authors'] = ", ".join(event['authors'])
if event['emails'] == ['redacted']:
event['emails'] = ''
else:
event['emails'] = ", ".join(event['emails'])
event['twitter_id'] = ''
# if event['duration'] is None: event['duration']=5
seconds=(int(event['duration'] )) * 60
hms = seconds//3600, (seconds%3600)//60, seconds%60
event['duration'] = "%02d:%02d:%02d" % hms
if event['name']=='Keynote':
event['name'] = \
'%s Keynote - %s' % (
self.show.name, event['authors'])
if not event['description']:
event['description']= \
'Keynote - %s\n%s\n' % (
event['authors'],
event['start'].strftime('%A, %B %d %Y %I %p') )
if event['name'] == "Lightning Talks":
event['name'] = "%s %s Lightning Talks" % (
self.show.name,
event['start'].strftime('%A %p') )
if not event['description']:
event['description']= \
"%s Lightning Talks\n%s" % (
self.show.name,
event['start'].strftime('%A, %B %d %Y %I %p') )
self.add_eps(events, show)
return
# If we need short names?
rooms = {
'Grand Ballroom AB':'AB',
'Grand Ballroom CD':'CD',
'Grand Ballroom EF':'EF',
'Grand Ballroom GH':'GH',
'Great America':'Great America',
'Great America Floor 2B R1':'R1',
'Great America Floor 2B R2':'R2',
'Great America Floor 2B R3':'R3',
'Great America J':'J',
'Great America K':'K',
'Mission City':'Mission City',
'Mission City M1':'M1',
'Mission City M2':'M2',
'Mission City M3':'M3',
'Poster Room':'Poster',
}
def pycon2013(self,schedule,show):
for s in schedule:
if s['room'] == 'Grand Ballroom GH, Great America, Grand Ballroom CD, Grand Ballroom EF, Grand Ballroom AB, Mission City':
s['room'] = "Mission City"
# merge in Zac's poster schedule
f=open('schedules/postervideo.csv')
poster_schedule = csv.DictReader(f)
for poster in poster_schedule:
conf_key=1000+int(poster['poster_id'])
for s in schedule:
if s['kind']=='poster':
if s['conf_key']==conf_key:
# set the room to Poster-[1,2,3,4]
s['room'] = "Poster-%s" % poster['camera']
# don't care about end, use duration=5
start,end = poster['time'].split('-')
h,m = start.split(':')
s['start'] = datetime.datetime(2013, 0o3, 17, int(h), int(m)).isoformat()
self.symposion2(schedule,show)
return
def pydata_2013(self,show):
print("pydata_2013")
# f = open('schedules/pydata2013/day1.csv' )
f = open('schedules/pydata2013/PyData Talks and Speakers.csv', 'rU' )
schedule = csv.DictReader(f)
# schedule = list(csv.reader(f))
# room = "Track %s" % i
events = []
pk = 1
for s in schedule:
# pprint.pprint(s)
# ['IPython-parallel', ' Min Ragan-Kelley', ' IPython', ' A1', ' 10:45am'],
# Title,Name,Email,Company,Room,Start,End,Date
e = { 'conf_key': pk,
'room':s['Room'].strip(),
'location':s['Room'].strip(),
'name':s['Title'],
'authors':s['Name'].strip(),
'emails':s['Email'],
'description':s['Company'].strip(),
'start':parse(s['Date'] + ' ' + s['Start']),
'end':parse(s['Date'] + ' ' + s['End']),
'duration':"0:50:00",
'released':True,
'license':"",
'conf_url':"",
'tags':'',
}
seconds=(e['end'] - e['start']).seconds
hms = seconds//3600, (seconds%3600)//60, seconds%60
duration = "%02d:%02d:%02d" % hms
e['duration'] = duration
"""
e = { 'conf_key': pk,
'room':s[3].strip(),
'location':s[3].strip(),
'name':s[0],
'authors':s[1].strip(),
'emails':'pwang@continuum.io',
'description':s[2].strip(),
'start':parse("Mar 18, 2013" + s[4]),
'duration':"0:90:00",
'released':True,
'license':"",
'conf_url':"",
'tags':'',
}
# 'conf_key':
"""
# pprint.pprint( schedule )
# pprint.pprint( e )
events.append(e)
pk +=1
rooms = self.get_rooms(events)
self.add_rooms(rooms,show)
self.add_eps(events, show)
def pyconca2013(self,schedule,show):
# remvoe the schedule wrapper that protects json
# from evil list constructors.
schedule = schedule['schedule']
# move Pleanary events into the location where the equipment is
for event in schedule:
if not event['room']:
event['room']="None?"
if "Colony Ballroom" in event['room']:
event['room']="Colony Ballroom"
return self.symposion2(schedule,show)
def pyohio2013(self,schedule,show):
# remove events with no room (like Break)
schedule = [s for s in schedule if s['rooms'] ]
for event in schedule:
# move Pleanary events into the location where the equipment is
if "Cartoon 1" in event['room']:
event['room']="Cartoon 1"
if event['conf_url'] is None:
event['conf_url'] = 'http://pyohio.org/schedule/'
# if event['license'] == '':
# event['license'] = 'CC BY-SA 2.5 CA'
if event['authors'] is None:
if "Catherine Devlin" in event['name']:
event['authors'] = ["Catherine Devlin"]
else:
event['authors'] = []
elif "&" in event['authors'][0]:
event['authors']=event['authors'][0].split(' & ')
if ('contact' not in event) or \
(event['contact'] is None):
event['contact'] = []
if event['name'].startswith('**Opening Remarks:**'):
event['name'] = "Panel Discussion: So You Wanna Run a Tech Conference."
event['authors'] = "Catherine Devlin, Eric Floehr, Brian Costlow, Raymond Chandler, Jason Green, Jason Myers".split(", ")
return self.symposion2(schedule,show)
def pytexas2014(self, schedule, show):
# remove events with no room (like Break)
# schedule = [s for s in schedule if s['room'] ]
field_maps = [
('id', 'conf_key'),
('name', 'name'),
('description', 'description'),
('duration', 'duration'),
('start', 'start'),
('room', 'location'),
('url', 'conf_url'),
('speaker', 'authors'),
('speaker', 'emails'),
('released', 'released'),
('license', 'license'),
('language', 'language'),
('', 'tags'),
]
events = self.generic_events(schedule, field_maps)
for event in events:
event['conf_key'] = str(event['conf_key'])
if event['location'] == 'all-rooms':
event['location'] = 'MSC 2300 B'
event['start'] = datetime.datetime.strptime(
event['start'], '%Y-%m-%dT%H:%M:%S' )
event['duration'] = "00:%s:00" % ( event['duration'], )
if event['authors']['name'] is None:
event['authors'] = ''
else:
event['authors'] = event['authors']['name']
if event['emails']['email'] == 'redacted':
event['emails'] = ''
else:
event['emails'] = event['emails']['email']
event['released'] = \
event['released'] and event['raw']['make_recording']
rooms = self.get_rooms(events)
self.add_rooms(rooms,show)
self.add_eps(events, show)
return
def erlang_chi_2014(self,schedule,show):
field_maps = [
('room','location'),
('','sequence'),
('name','name'),
('','slug'),
('speaker','authors'),
('speaker','emails'),
('description','description'),
('start','start'),
('end','end'),
('','duration'),
('released','released'),
('license','license'),
('','tags'),
('id','conf_key'),
('conf_url','conf_url'),
('','host_url'),
('','public_url'),
]
events = self.generic_events(schedule, field_maps)
for event in events:
event['authors'] = event['authors']['name']
event['emails'] = event['emails']['email']
event['start'] = datetime.datetime.strptime(
event['start'], '%Y-%m-%dT%H:%M:%S' )
event['end'] = datetime.datetime.strptime(
event['end'], '%Y-%m-%dT%H:%M:%S' )
delta = event['end'] - event['start']
minutes = delta.seconds/60
event['duration'] = "00:%s:00" % ( minutes)
# event['conf_url'] = "http://www.chicagoerlang.com/{}.html".format(event['conf_key'])
rooms = self.get_rooms(events)
self.add_rooms(rooms,show)
self.add_eps(events, show)
return
def citycode15(self,schedule,show):
field_maps = [
('room','location'),
('title','name'),
('speakers','authors'),
('speakers','emails'),
('speakers','twitter_id'),
('start','start'),
('end','end'),
('duration','duration'),
('released','released'),
('license','license'),
('tags','tags'),
('conf_key','conf_key'),
('conf_url','conf_url'),
('description','description'),
]
events = self.generic_events(schedule, field_maps)
for event in events:
event['authors'] = event['authors'][0]['name']
event['emails'] = event['emails'][0]['email']
event['twitter_id'] = event['twitter_id'][0]['twitter_id']
event['start'] = datetime.datetime.strptime(
event['start'], '%Y-%m-%dT%H:%M:%S' )
event['end'] = datetime.datetime.strptime(
event['end'], '%Y-%m-%dT%H:%M:%S' )
delta = event['end'] - event['start']
minutes = delta.seconds/60
event['duration'] = "00:%s:00" % ( minutes)
event['released'] = event['released'] == "yes"
# event['conf_url'] = "http://www.chicagoerlang.com/{}.html".format(event['conf_key'])
rooms = self.get_rooms(events)
self.add_rooms(rooms,show)
self.add_eps(events, show)
return
def prodconf14(self,schedule,show):
field_maps = [
('room','location'),
('title','name'),
('speaker','authors'),
('description','description'),
('start','start'),
('end','end'),
]
events = self.generic_events(schedule, field_maps)
pk = 1
for event in events:
if self.options.verbose:
print("event:")
pprint.pprint(event)
event['conf_key'] = "pk{}".format(pk)
pk += 1
if event['authors'] is None:
event['authors'] = ', '.join(
[a['name'] for a in event['raw']['speakers']])
else:
event['authors'] = event['authors']['name']
event['emails'] = ''
event['released'] = False
event['license'] = False
event['conf_url'] = ''
event['tags'] = ''
event['start'] = datetime.datetime.strptime(
event['start'], '%Y-%m-%dT%H:%M:%S' )
event['end'] = datetime.datetime.strptime(
event['end'], '%Y-%m-%dT%H:%M:%S' )
delta = event['end'] - event['start']
minutes = delta.seconds/60
event['duration'] = "00:%s:00" % ( minutes)
rooms = self.get_rooms(events)
self.add_rooms(rooms,show)
self.add_eps(events, show)
return
def nodevember14(self,schedule,show):
# remove rows where id='empty'
schedule = [s for s in schedule if s['id'] != 'empty']
field_maps = [
('room','location'),
('name','name'),
('speaker','authors'),
# ('','emails'),
('description','description'),
('start','start'),
('end','end'),
# ('','duration'),
('released','released'),
('license','license'),
# ('','tags'),
('id','conf_key'),
('conf_url','conf_url'),
]
events = self.generic_events(schedule, field_maps)
for event in events:
if self.options.verbose:
print("event:")
pprint.pprint(event)
if event['location'] in ["Room 1","Room 2","Room 3","Room 4"]:
# room 1 is really room 100, 2 200...
event['location'] = event['location'] + "00"
speakers = [event['authors']]
event['authors'] = ', '.join(
[s['name'] for s in speakers])
event['emails'] = ', '.join(
[s['email'] for s in speakers])
event['tags'] = ''
event['start'] = datetime.datetime.strptime(
event['start'], '%Y-%m-%dT%H:%M:%S' )
event['end'] = datetime.datetime.strptime(
event['end'], '%Y-%m-%dT%H:%M:%S' )
delta = event['end'] - event['start']
minutes = delta.seconds/60
event['duration'] = "00:%s:00" % ( minutes)
event['conf_url'] = event['conf_url'].replace(".org.com", ".org")
rooms = self.get_rooms(events)
self.add_rooms(rooms,show)
self.add_eps(events, show)
return
def osdc2015(self, schedule, show):
schedule = schedule['schedule']
schedule = [s for s in schedule if 'authors' in s]
field_maps = [
('room','location'),
('name','name'),
('description','description'),
('authors','authors'),
('authors','emails'),
('start','start'),
('duration','duration'),
('released','released'),
('license','license'),
('tags','tags'),
('conf_key','conf_key'),
('conf_url','conf_url'),
('','twitter_id'),
('','host_url'),
('','public_url'),
]
events = self.generic_events(schedule, field_maps)
# for event in events:
# pprint.pprint( event )
# remove events with no room (like Break)
events = [e for e in events if e['location'] is not None ]
for event in events:
if "Derwent 1" in event['location']:
event['location'] = 'Derwent 1'
"""
if event['conf_key']==23:
# name": "Crash-safe Replication with MariaDB...
event['location'] = 'Riviera'
if event['conf_key']==20:
# name": "SubPos...
event['location'] = 'Derwent 1'
if event['conf_key']==21:
# name": "Intro to OpenStreetMap
event['location'] = 'Derwent 1'
if event['conf_key']==75:
# name": "Opportunities in Openness...
event['location'] = 'Derwent 1'
"""
event['start'] = datetime.datetime.strptime(
event['start'], '%Y-%m-%dT%H:%M:%S' )
event['duration'] = "00:{}:00".format(event['duration'])
event['authors']=', '.join(event['authors'])
event['emails']=', '.join(event['emails'])
event['tags'] = ''
rooms = self.get_rooms(events)
self.add_rooms(rooms,show)
self.add_eps(events, show)
return
def nodevember15(self,schedule,show):
schedule = schedule['schedule']
s1 = []
x=1
for day in schedule:
date = day["date"] #: "November 13, 2015",
for s in day['slots']:
if "speaker" in s:
if s['room'] == "Ezel 301":
s['room'] = "Ezell 301"
if s['room'] == "Stow 108":
s['room'] = "Stowe Hall"
s['start'] = "{} {}".format( date, s['time'] )
s['duration'] = 60 if s['keynote'] else 40
s['key'] = x
s['released'] = True
x += 1
s1.append(s)
# import code; code.interact(local=locals())
field_maps = [
('room','location'),
('title','name'),
('speaker','authors'),
('','emails'),
('summary','description'),
('start','start'),
('','twitter_id'),
('duration','duration'),
('released','released'),
('','license'),
('','tags'),
('key','conf_key'),
('','conf_url'),
]
# remove rows where id='empty'
# schedule = [s for s in schedule if s['id'] != 'empty']
events = self.generic_events(s1, field_maps)
for event in events:
if self.options.verbose:
print("event:")
pprint.pprint(event)
# event['start'] = dateutil.parser.parse( event['start'] )
event['start'] = parse( event['start'] )
# datetime.datetime.strptime(
# event['start'], '%B %d, %Y %I:%M %p' )
event['duration'] = "00:{:02}:00".format(event['duration'])
event['conf_url'] = event['conf_url'].replace(".org.com", ".org")
rooms = self.get_rooms(events)
print(rooms)
self.add_rooms(rooms,show)
self.add_eps(events, show)
return
def djbp10(self, schedule, show):
room = 'Liberty Hall'
# make list of talks from dict of talks
# where video=true
talks=[]
for k in schedule['globals']['talks']:
if schedule['globals']['talks'][k]['video']:
talks.append( schedule['globals']['talks'][k] )
field_maps = [
('id','conf_key'),
('title','name'),
('start','start'),
('duration','duration'),
('speakers','authors'),
('abstract','description'),
('released','released'),
('speakers','twitter_id'),
('slug','conf_url'),
]
events = self.generic_events(talks, field_maps)
rooms = [room]
self.add_rooms(rooms,show)
for event in events:
event['location'] = room
event['authors'] = ', '.join([
a['name'] for a in event['authors'] ])
event['start'] = datetime.datetime.strptime(
event['start'], '%Y-%m-%dT%H:%M:%S-05:00' )
event['duration'] = "00:%s:00" % ( event['duration'] )
try:
event['twitter_id'] = ', '.join([
[ "@"+s['link'].split('/')[-1] for s in t['social']
if s['title']=="twitter"][0]
for t in event['twitter_id'] ])
except (IndexError,KeyError) as e:
event['twitter_id'] = ""
if event['description'] is None:
event['description'] = ""
event['emails'] = ""
event['license'] = ""
event['conf_url'] = "https://djangobirthday.com/talks/#{}".format(event['conf_url'])
event['tags'] = ""
if self.options.verbose: pprint.pprint(event)
self.add_eps(events, show)
return
def pygotham2015(self,schedule,show):
# PyGotham 2015
field_maps = [
# ('id','id'),
('room','location'),
('title','name'),
('user','authors'),
('user','emails'),
('user','twitter_id'),
('description','description'),
('start','start'),
('duration','duration'),
('released','released'),
('license','license'),
('tags','tags'),
# ('conf_key','conf_key'),
('id','conf_key'),
('conf_url','conf_url'),
('','host_url'),
('','public_url'),
]
events = self.generic_events(schedule, field_maps)
# remove events with no room (like Break)
events = [e for e in events if e['location'] is not None ]
for event in events:
if "701" in event['location']:
event['location'] = 'Room 701'
# if event['start'] is None:
# event['start'] = datetime.datetime.now()
# if event['name'] == "Keynote (JM)":
# event['start'] = datetime.datetime(2015,8,16,9,0,0)
# else:
event['start'] = datetime.datetime.strptime(
event['start'], '%Y-%m-%dT%H:%M:%S' )
event['duration'] = "00:{}:00".format(event['duration'])
event['tags'] = ''
if event['license'] == 'Creative Commons':
event['license'] = 'CC BY-SA'
if event['conf_url'] is None:
base = 'https://pygotham.org/2015/'
event['conf_url'] = '{base}talks/{id}/{slug}'.format(
base=base,
id = event['conf_key'],
slug = slugify(event['name']) )
# https://pygotham.org/2015/talks/169/going-parallel-and-out-of
# event['authors']=', '.join(event['authors'])
event['authors']=event['authors']['name']
if event['emails']['email']=="<redacted>":
event['emails']=""
else:
event['emails']=event['emails']['email']
if event['twitter_id']['twitter_id']:
event['twitter_id']="@" + event['twitter_id']['twitter_id']
else:
event['twitter_id']=""
rooms = self.get_rooms(events)
self.add_rooms(rooms,show)
self.add_eps(events, show)
def kiwipycon2015(self,schedule,show):
field_maps = [
# ('id','id'),
('room','location'),
('name','name'),
('authors','authors'),
('contact','emails'),
('abstract','description'),
('start','start'),
('duration','duration'),
('released','released'),
('license','license'),
('tags','tags'),
('conf_key','conf_key'),
('conf_url','conf_url'),
('','twitter_id'),
]
events = self.generic_events(schedule, field_maps)
# remove events with no room (like Break)
# events = [e for e in events if e['location'] is not None ]
for event in events:
event['start'] = datetime.datetime.strptime(
event['raw']['date'] + 'T' + event['start'],
'%d/%m/%YT%H:%M:%S' )
event['duration'] = "00:{}:00".format(event['duration'])
if event['license'] == 'CC':
event['license'] = 'CC BY-SA'
event['authors']=', '.join(event['authors'])
event['emails']=', '.join(event['emails'])
rooms = self.get_rooms(events)
self.add_rooms(rooms,show)
self.add_eps(events, show)
def linuxwochen(self,schedule,show):
conf = schedule['schedule']['conference']
schedule = []
for day in conf['days']:
for room in day['rooms']:
for event in day['rooms'][room]:
if self.options.verbose: pprint.pprint(event)
schedule.append(event)
field_maps = [
# ('id','id'),
('room','location'),
('title','name'),
('persons','authors'),
('','emails'),
('description','description'),
('date','start'),
('duration','duration'),
# ('released','released'),
# ('license','license'),
('track','tags'),
('language','language'),
('id','conf_key'),
('id','conf_url'),
('','twitter_id'),
]
# https://cfp.linuxwochen.at/de/LWW16/public/events/396
events = self.generic_events(schedule, field_maps)
# remove events with no room (like Break)
# events = [e for e in events if e['location'] is not None ]
for event in events:
if self.options.verbose: pprint.pprint(event)
event['conf_key']=str(event['conf_key'])
event['conf_url']="https://cfp.linuxwochen.at/de/LWW16/public/events/{}".format(event['conf_key'])
event['start'] = datetime.datetime.strptime(
event['start'],
'%Y-%m-%dT%H:%M:%S+02:00' )
event['duration'] = "{}:00".format(event['duration'])
event['authors']=', '.join([
p['full_public_name'] for p in event['authors']])
event['released']=False
event['license'] = 'CC BY-SA'
rooms = self.get_rooms(events)
self.add_rooms(rooms,show)
self.add_eps(events, show)
def amberapp(self,schedule,show):
schedule = schedule['speaker_list']
field_maps = [
# ('id','id'),
('room','location'),
('title','name'),
('presenter_list','authors'),
('presenter_list','emails'),
('presenter_list','twitter_id'),
('description','description'),
('start_time','start'),
('duration','duration'),
('released','released'),
('license','license'),
('','tags'),
('talk_language','language'),
('id','conf_key'),
('conf_url','conf_url'),
]
events = self.generic_events(schedule, field_maps)
# remove events with no room (like Break)
# events = [e for e in events if e['location'] is not None ]
for event in events:
if self.options.verbose: pprint.pprint(event)
event['conf_key']=str(event['conf_key'])
event['start'] = datetime.datetime.strptime(
event['start'],
'%Y-%m-%d %H:%M:%S' )
event['duration'] = "{}:00".format(event['duration'])
event['authors']=', '.join(
[ d[list(d.keys())[0]]['name'] for d in event['authors']])
event['twitter_id']=', '.join(
[ d[list(d.keys())[0]]['twitter_id'] for d in event['twitter_id']])
event['emails']=', '.join(
[ d[list(d.keys())[0]]['email'] for d in event['emails']])
rooms = self.get_rooms(events)
self.add_rooms(rooms,show)
self.add_eps(events, show)
#################################################3
# main entry point
def one_show(self, show):
# url='http://us.pycon.org/2010/conference/schedule/events.json'
# url='http://pycon-au.org/2010/conference/schedule/events.json'
# url='http://djangocon.us/schedule/json/'
# url='http://2010.osdc.com.au/program/json'
# url='http://conf.followtheflow.org/programme/schedule/json'
# url='http://lca2011.linux.org.au/programme/schedule/json'
# url='http://veyepar.debian.org/main/C/chipy/S/may_2011.json'
# url='http://lca2011.linux.org.au/programme/schedule/json'
# url='http://2011.pyohio.org/programme/schedule/json'
# url='http://pyohio.nextdayvideo.com/programme/schedule/json'
# url='http://veyepar.debian.org/main/C/jschi/S/june_2011.json'
# url='http://pyohio.org/schedule/json/'
# url='https://www.desktopsummit.org/program/veyepar.csv'
# url='http://pycon-au.org/2011/conference/schedule/events.json'
# url='http://djangocon.us/schedule/json/'
# url='http://pygotham.org/talkvote/full_schedule/'
# url='http://www.pytexas.org/2011/schedule/json/'
"""
'djangocon2011': 'http://djangocon.us/schedule/json/',
'pygotham_2012': 'http://pygotham.org/talkvote/full_schedule/',
'pytexas_2011': 'http://www.pytexas.org/2011/schedule/json/',
'pyconde2011': 'http://de.pycon.org/2011/site_media/media/wiki/mediafiles/pyconde2011_talks.json',
'ddu_2012': "http://drupaldownunder.org/program/session-schedule/json",
'lca_2012': "http://lca2012.linux.org.au/programme/schedule/json",
'fosdem_2012': "http://tmp.fosdem.org/video.xml",
'pycon_2012': "https://us.pycon.org/2012/schedule/json/",
'xpycon_2012': "file://pc2012.json",
'flourish_2012': "http://flourishconf.com/2012/schedule_json.php",
'chipy_may2012': "http://72.14.188.25:8095/meetings/1/topics.json",
'ictev_2012': "http://ictev.vic.edu.au/program/2012/json",
# 'ictev_2013': "http://ictev.vic.edu.au/program/2013/json",
'ictev_2013': "file://schedules/ictev2013.json",
# 'scipy_2012_v1': "file://scipy_talks.json",
# 'scipy_2012_v2': "http://conference.scipy.org/scipy2012/talks/test.php",
# 'scipy_2012': "http://conference.scipy.org/scipy2012/talks/schedule_json.php",
'scipy_2012': "http://conference.scipy.org/scipy2012/schedule/schedule_json.php",
'chipy_june2012': "http://chipy.org/api/meetings/",
'chipy_july_2012': "http://chipy.org/api/meetings/",
'pyohio_2012': "file://pyohio_2012.json",
'chipy_aug_2012': "http://chipy.org/api/meetings/",
'pycon_au_2012': "http://2012.pycon-au.org/programme/schedule/json",
'chipy_sep_2012': "http://chipy.org/api/meetings/",
'chipy_jan_2013': "http://chipy.org/api/meetings/",
'chipy_feb_2013': "http://chipy.org/api/meetings/",
# 'pyconde2012': 'http://de.pycon.org/2011/site_media/media/wiki/mediafiles/pyconde2011_talks.json',
# 'pyconde2012': 'https://stage.2012.de.pycon.org/episodes.json',
'pyconde2012': 'https://2012.de.pycon.org/episodes.json',
'pyconca2012': 'http://pycon.ca/talk.json',
'lca2013': 'http://lca2013.linux.org.au/programme/schedule/json',
'pycon2013': 'https://us.pycon.org/2013/schedule/conference.json',
'write_the_docs_2013': 'file://schedules/writethedocs.json',
# 'write_the_docs_2013': 'http://lanyrd.com/2013/writethedocs/schedule/ad9911ddf35b5f0e.v1.json',
'nodepdx2013': 'file://schedules/nodepdx.2013.schedule.json',
'chipy_may_2013': "http://chipy.org/api/meetings/",
}[self.options.show]
"""
client = show.client
url = show.schedule_url
if self.options.verbose: print(url)
if url.startswith('file'):
f = open(url[7:])
# j = f.read()
else:
session = requests.session()
# auth stuff goes here, kinda.
auth = pw.addeps.get(self.options.client, None)
if auth is not None:
if self.options.verbose: print(auth)
# get the csrf token out of login page
session.get(auth['login_page'])
token = session.cookies['csrftoken']
# in case it does't get passed in the headers
# result = requests.get(auth['login_page'])
# soup = BeautifulSoup(x.text)
# token = soup.find('input',
# dict(name='csrfmiddlewaretoken'))['value']
# setup the values needed to log in:
login_data = auth['login_data']
login_data['csrfmiddlewaretoken'] = token
if self.options.verbose: print("login_data", login_data)
ret = session.post(auth['login_page'],
data=login_data,
headers={'Referer':auth['login_page']})
if self.options.verbose: print("login ret:", ret)
# import code; code.interact(local=locals())
if self.options.show in ['chicagowebconf2012"',
"cusec2013" , ]:
payload = {
"api_key": pw.sched[self.options.show]['apikey'],
"format":"json",
# "fields":"name,session_type,description",
"strip_html":"Y",
"custom_data":"Y",
}
else:
payload = None
response = session.get(url, params=payload, verify=False)
ext = os.path.splitext(url)[1]
if ext=='.csv':
# schedule = list(csv.reader(f))
schedule = list(csv.DictReader(f))
if 'desktopsummit.org' in url:
return self.desktopsummit(schedule,show)
elif ext=='.xml':
if url.startswith('file'):
schedule=xml.etree.ElementTree.XML(f.read())
else:
schedule=xml.etree.ElementTree.XML(
response.content)
else:
# lets hope it is json, like everything should be.
# j = response.text
if url.startswith('file'):
schedule = json.loads(f.read())
else:
schedule = response.json()
# if it is a python prety printed list:
# (pyohio 2012)
# schedule = eval(j)
# save for later
# filename="schedule/%s_%s.json" % ( client.slug, show.slug )
# file(filename,'w').write(j)
# j=file(filename).read()
if self.options.verbose: pprint.pprint(schedule)
# if self.options.verbose: print j[:40]
if self.options.keys: return self.dump_keys(schedule)
# look at fingerprint of file, (or cheat and use the showname)
# call appropiate parser
if url.endswith('programme/schedule/json'):
# Zookeepr
return self.zoo(schedule,show)
if self.options.show =='depy_2016':
return self.amberapp(schedule,show)
if self.options.show =='linuxwochen_wien_2016':
return self.linuxwochen(schedule,show)
if self.options.show =='osdc2015':
return self.osdc2015(schedule,show)
if self.options.show =='djbp10':
return self.djbp10(schedule,show)
if self.options.show =='nodevember15':
return self.nodevember15(schedule,show)
if self.options.show =='nodevember14':
return self.nodevember14(schedule,show)
if self.options.show =='prodconf14':
return self.prodconf14(schedule,show)
if self.options.show =='kiwipycon2015':
# return self.veyepar(schedule,show)
return self.kiwipycon2015(schedule,show)
if self.options.show =='citycode15':
return self.citycode15(schedule,show)
if self.options.show =='chicago_erlang_factory_lite_2014':
return self.erlang_chi_2014(schedule,show)
if self.options.show =='pytexas2014':
return self.pytexas2014(schedule,show)
if self.options.show =='pyconza2015':
return self.summit_penta(schedule,show)
if self.options.show =='debconf15':
return self.summit_penta(schedule,show)
if self.options.show =='debconf16':
return self.summit_penta(schedule,show)
if self.options.show =='bosc_2014':
return self.bosc_2014(schedule,show)
if self.options.show =='wtd_NA_2014':
return self.wtd_na_2014(schedule,show)
if self.options.client =='fosdem':
return self.fosdem2014(schedule,show)
if self.options.client =='chipy':
return self.chipy_v3(schedule,show)
if self.options.show =='nodepdx2013':
return self.nodepdx(schedule,show)
if url.startswith("http://lanyrd.com"):
# if self.options.show =='write_the_docs_2013':
# if self.options.show =='write_the_docs_2016':
return self.lanyrd(schedule,show)
if self.options.show =='write_docs_na_2016':
# for Eric's email me a file process
return self.lanyrd(schedule,show)
if self.options.show in ['pyohio_2015',"pycon_2014_warmup"]:
return self.pyohio2013(schedule,show)
if self.options.show =='pygotham_2015':
return self.pygotham2015(schedule,show)
if self.options.show =='pyconca2013':
return self.pyconca2013(schedule,show)
if self.options.show =='pytn2014':
return self.pyconca2013(schedule,show)
if self.options.show =='pyconca2012':
return self.pyconca2012(schedule,show)
if self.options.show == 'pyconde2013':
# "same as last year"
return self.pyconde2012(schedule,show)
if self.options.show == 'pyconde2012':
return self.pyconde2012(schedule,show)
if self.options.show == 'pycon2013':
return self.pycon2013(schedule,show)
# if self.options.show =='chicagowebconf2012':
if url.endswith(".sched.org/api/session/export"):
# Sched.org Conference Mobcaile Apps
# Chicago Web Conf 2012
return self.sched(schedule,show)
if self.options.show == 'pyohio_2012':
# pyohio
return self.pyohio(schedule,show)
if self.options.show == 'scipy_2012':
# scipy ver 2
return self.scipy_v2(schedule,show)
if self.options.show == 'scipy_2012_v1':
# scipy ver 1
return self.scipy_v1(schedule,show)
if self.options.client == 'chipy':
# chipy
return self.chipy_v1(schedule,show)
if self.options.show == 'flourish_2012':
# flourish_2012
return self.flourish(schedule,show)
if self.options.show == 'pyconde2011':
# pycon.de 2011
return self.pyohio(schedule,show)
# return self.pyconde2011(schedule,show)
if self.options.show =='blinkon4':
return self.blinkon4(schedule,show)
if self.options.show =='depy_2015':
return self.depy15(schedule,show)
if self.options.show =='jupyter_chicago_2016':
return self.jupyter_chicago_2016(schedule,show)
if j.startswith('{"files": {'):
# doug pycon, used by py.au
return self.pctech(schedule,show)
if j.startswith('[{"pk": '):
# veyepar show export
return self.veyepar(schedule,show)
if j.startswith('[{"') and 'room_name' in schedule[0]:
# PyCon 2012
return self.symposium(schedule,show)
if j.startswith('[{"') and 'last_updated' in schedule[0]:
# pyohio
return self.pyohio(schedule,show)
if j.startswith('[{"') and 'start_iso' in schedule[0]:
# pyTexas
return self.pyohio(schedule,show)
if j.startswith('[{"') and 'talk_day_time' in schedule[0]:
# pyGotham
return self.pygotham(schedule,show)
if url.endswith('/program/2012/json'):
# some drupal thing
# 'ictev_2012': "http://ictev.vic.edu.au/program/2012/json",
# dig out the data from the nodes:[data]
schedule = [s['node'] for s in schedule['nodes']]
# pprint.pprint( schedule )
return self.ictev(schedule,show)
if self.options.show == 'ictev_2013':
# some drupal thing
# 'ictev_2013': "http://ictev.vic.edu.au/program/2013/json",
schedule = self.unfold_origami_unicorn( schedule )
# pprint.pprint( schedule )
# return self.dump_keys(schedule)
return self.ictev_2013(schedule,show)
if url.endswith('program/session-schedule/json'):
# ddu 2012
schedule = [s['session'] for s in schedule['ddu2012']]
# pprint.pprint( schedule )
s_keys = list(schedule[0].keys())
print(s_keys)
v_keys=('id',
'location','sequence',
'name','slug', 'authors','emails', 'description',
'start','duration',
'released', 'license', 'tags',
'conf_key', 'conf_url',
'host_url', 'public_url',
)
print([k for k in v_keys if k in s_keys])
print([k for k in v_keys if k not in s_keys])
return self.ddu(schedule,show)
def add_more_options(self, parser):
parser.add_option('--schedule',
help='URI of schedule data - gets stored in new show record' )
parser.add_option('-u', '--update', action="store_true",
help='update when diff, else print' )
parser.add_option('-k', '--keys', action="store_true",
help='dump keys of input stream' )
def work(self):
print("working")
if self.options.client and self.options.show:
client,created = Client.objects.get_or_create(slug=self.options.client)
if created:
client.name = self.options.client.capitalize()
client.save()
show,created = Show.objects.get_or_create(
client=client,slug=self.options.show)
if created:
show.name = self.options.show.capitalize()
show.schedule_url = self.options.schedule
show.save()
if self.options.whack:
# DRAGONS!
# clear out previous runs for this show
rfs = Raw_File.objects.filter(show=show)
if rfs and not self.options.force:
print("There are Raw Fiels... --force to whack.")
print(rfs)
print("whacking aborted.")
return False
rfs.delete()
Episode.objects.filter(show=show).delete()
self.show = show
self.one_show(show)
if __name__ == '__main__':
p=add_eps()
p.main()
|
yoe/veyepar
|
dj/scripts/addeps.py
|
Python
|
mit
| 120,529
|
[
"Brian"
] |
1ddd1b5c327cc51efeeebaf463de3f79ef5028cfca0df52e660470dee9f0479e
|
# this program corresponds to special.py
### Means test is not done yet
# E Means test is giving error (E)
# F Means test is failing (F)
# EF Means test is giving error and Failing
#! Means test is segfaulting
# 8 Means test runs forever
### test_besselpoly
### test_mathieu_a
### test_mathieu_even_coef
### test_mathieu_odd_coef
### test_modfresnelp
### test_modfresnelm
# test_pbdv_seq
### test_pbvv_seq
### test_sph_harm
# test_sph_in
# test_sph_jn
# test_sph_kn
from __future__ import division, print_function, absolute_import
import itertools
import warnings
import numpy as np
from numpy import array, isnan, r_, arange, finfo, pi, sin, cos, tan, exp, \
log, zeros, sqrt, asarray, inf, nan_to_num, real, arctan, float_
from numpy.testing import assert_equal, assert_almost_equal, \
assert_array_equal, assert_array_almost_equal, assert_approx_equal, \
assert_, rand, dec, TestCase, run_module_suite, assert_allclose, \
assert_raises, assert_array_almost_equal_nulp
from scipy import special
import scipy.special._ufuncs as cephes
from scipy.special import ellipk
from scipy.special._testutils import assert_tol_equal, with_special_errors, \
assert_func_equal
class TestCephes(TestCase):
def test_airy(self):
cephes.airy(0)
def test_airye(self):
cephes.airye(0)
def test_binom(self):
n = np.array([0.264, 4, 5.2, 17])
k = np.array([2, 0.4, 7, 3.3])
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
rknown = np.array([[-0.097152, 0.9263051596159367, 0.01858423645695389,
-0.007581020651518199],[6, 2.0214389119675666, 0, 2.9827344527963846],
[10.92, 2.22993515861399, -0.00585728, 10.468891352063146],
[136, 3.5252179590758828, 19448, 1024.5526916174495]])
assert_func_equal(cephes.binom, rknown.ravel(), nk, rtol=1e-13)
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.arange(-7, 30), 1000*np.random.rand(30) - 500]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_2(self):
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.logspace(1, 300, 20)]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_exact(self):
@np.vectorize
def binom_int(n, k):
n = int(n)
k = int(k)
num = int(1)
den = int(1)
for i in range(1, k+1):
num *= i + n - k
den *= i
return float(num/den)
np.random.seed(1234)
n = np.arange(1, 15)
k = np.arange(0, 15)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
nk = nk[nk[:,0] >= nk[:,1]]
assert_func_equal(cephes.binom,
binom_int(nk[:,0], nk[:,1]),
nk,
atol=0, rtol=0)
def test_bdtr(self):
assert_equal(cephes.bdtr(1,1,0.5),1.0)
def test_bdtri(self):
assert_equal(cephes.bdtri(1,3,0.5),0.5)
def test_bdtrc(self):
assert_equal(cephes.bdtrc(1,3,0.5),0.5)
def test_bdtrin(self):
assert_equal(cephes.bdtrin(1,0,1),5.0)
def test_bdtrik(self):
cephes.bdtrik(1,3,0.5)
def test_bei(self):
assert_equal(cephes.bei(0),0.0)
def test_beip(self):
assert_equal(cephes.beip(0),0.0)
def test_ber(self):
assert_equal(cephes.ber(0),1.0)
def test_berp(self):
assert_equal(cephes.berp(0),0.0)
def test_besselpoly(self):
assert_equal(cephes.besselpoly(0,0,0),1.0)
def test_beta(self):
assert_equal(cephes.beta(1,1),1.0)
assert_allclose(cephes.beta(-100.3, 1e-200), cephes.gamma(1e-200))
assert_allclose(cephes.beta(0.0342, 171), 24.070498359873497, rtol=1e-14, atol=0)
def test_betainc(self):
assert_equal(cephes.betainc(1,1,1),1.0)
assert_allclose(cephes.betainc(0.0342, 171, 1e-10), 0.55269916901806648)
def test_betaln(self):
assert_equal(cephes.betaln(1,1),0.0)
assert_allclose(cephes.betaln(-100.3, 1e-200), cephes.gammaln(1e-200))
assert_allclose(cephes.betaln(0.0342, 170), 3.1811881124242447, rtol=1e-14, atol=0)
def test_betaincinv(self):
assert_equal(cephes.betaincinv(1,1,1),1.0)
assert_allclose(cephes.betaincinv(0.0342, 171, 0.25), 8.4231316935498957e-21, rtol=1e-12, atol=0)
def test_beta_inf(self):
assert_(np.isinf(special.beta(-1, 2)))
def test_btdtr(self):
assert_equal(cephes.btdtr(1,1,1),1.0)
def test_btdtri(self):
assert_equal(cephes.btdtri(1,1,1),1.0)
def test_btdtria(self):
assert_equal(cephes.btdtria(1,1,1),5.0)
def test_btdtrib(self):
assert_equal(cephes.btdtrib(1,1,1),5.0)
def test_cbrt(self):
assert_approx_equal(cephes.cbrt(1),1.0)
def test_chdtr(self):
assert_equal(cephes.chdtr(1,0),0.0)
def test_chdtrc(self):
assert_equal(cephes.chdtrc(1,0),1.0)
def test_chdtri(self):
assert_equal(cephes.chdtri(1,1),0.0)
def test_chdtriv(self):
assert_equal(cephes.chdtriv(0,0),5.0)
def test_chndtr(self):
assert_equal(cephes.chndtr(0,1,0),0.0)
p = cephes.chndtr(np.linspace(20, 25, 5), 2, 1.07458615e+02)
assert_allclose(p, [1.21805009e-09, 2.81979982e-09, 6.25652736e-09,
1.33520017e-08, 2.74909967e-08],
rtol=1e-6, atol=0)
assert_almost_equal(cephes.chndtr(np.inf, np.inf, 0), 2.0)
assert_almost_equal(cephes.chndtr(2, 1, np.inf), 0.0)
assert_(np.isnan(cephes.chndtr(np.nan, 1, 2)))
assert_(np.isnan(cephes.chndtr(5, np.nan, 2)))
assert_(np.isnan(cephes.chndtr(5, 1, np.nan)))
def test_chndtridf(self):
assert_equal(cephes.chndtridf(0,0,1),5.0)
def test_chndtrinc(self):
assert_equal(cephes.chndtrinc(0,1,0),5.0)
def test_chndtrix(self):
assert_equal(cephes.chndtrix(0,1,0),0.0)
def test_cosdg(self):
assert_equal(cephes.cosdg(0),1.0)
def test_cosm1(self):
assert_equal(cephes.cosm1(0),0.0)
def test_cotdg(self):
assert_almost_equal(cephes.cotdg(45),1.0)
def test_dawsn(self):
assert_equal(cephes.dawsn(0),0.0)
assert_allclose(cephes.dawsn(1.23), 0.50053727749081767)
def test_diric(self):
# Test behavior near multiples of 2pi. Regression test for issue
# described in gh-4001.
n_odd = [1, 5, 25]
x = np.array(2*np.pi + 5e-5).astype(np.float32)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=7)
x = np.array(2*np.pi + 1e-9).astype(np.float64)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
x = np.array(2*np.pi + 1e-15).astype(np.float64)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
if hasattr(np, 'float128'):
# No float128 available in 32-bit numpy
x = np.array(2*np.pi + 1e-12).astype(np.float128)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=19)
n_even = [2, 4, 24]
x = np.array(2*np.pi + 1e-9).astype(np.float64)
assert_almost_equal(special.diric(x, n_even), -1.0, decimal=15)
# Test at some values not near a multiple of pi
x = np.arange(0.2*np.pi, 1.0*np.pi, 0.2*np.pi)
octave_result = [0.872677996249965, 0.539344662916632,
0.127322003750035, -0.206011329583298]
assert_almost_equal(special.diric(x, 3), octave_result, decimal=15)
def test_diric_broadcasting(self):
x = np.arange(5)
n = np.array([1, 3, 7])
assert_(special.diric(x[:, np.newaxis], n).shape == (x.size, n.size))
def test_ellipe(self):
assert_equal(cephes.ellipe(1),1.0)
def test_ellipeinc(self):
assert_equal(cephes.ellipeinc(0,1),0.0)
def test_ellipj(self):
cephes.ellipj(0,1)
def test_ellipk(self):
assert_allclose(ellipk(0), pi/2)
def test_ellipkinc(self):
assert_equal(cephes.ellipkinc(0,0),0.0)
def test_erf(self):
assert_equal(cephes.erf(0),0.0)
def test_erfc(self):
assert_equal(cephes.erfc(0),1.0)
def test_exp1(self):
cephes.exp1(1)
def test_expi(self):
cephes.expi(1)
def test_expn(self):
cephes.expn(1,1)
def test_exp1_reg(self):
# Regression for #834
a = cephes.exp1(-complex(19.9999990))
b = cephes.exp1(-complex(19.9999991))
assert_array_almost_equal(a.imag, b.imag)
def test_exp10(self):
assert_approx_equal(cephes.exp10(2),100.0)
def test_exp2(self):
assert_equal(cephes.exp2(2),4.0)
def test_expm1(self):
assert_equal(cephes.expm1(0),0.0)
def test_fdtr(self):
assert_equal(cephes.fdtr(1,1,0),0.0)
def test_fdtrc(self):
assert_equal(cephes.fdtrc(1,1,0),1.0)
def test_fdtri(self):
# cephes.fdtri(1,1,0.5) #BUG: gives NaN, should be 1
assert_allclose(cephes.fdtri(1, 1, [0.499, 0.501]),
array([0.9937365, 1.00630298]), rtol=1e-6)
def test_fdtridfd(self):
assert_equal(cephes.fdtridfd(1,0,0),5.0)
def test_fresnel(self):
assert_equal(cephes.fresnel(0),(0.0,0.0))
def test_gamma(self):
assert_equal(cephes.gamma(5),24.0)
def test_gammainc(self):
assert_equal(cephes.gammainc(5,0),0.0)
def test_gammaincc(self):
assert_equal(cephes.gammaincc(5,0),1.0)
def test_gammainccinv(self):
assert_equal(cephes.gammainccinv(5,1),0.0)
def test_gammaln(self):
cephes.gammaln(10)
def test_gammasgn(self):
vals = np.array([-4, -3.5, -2.3, 1, 4.2], np.float64)
assert_array_equal(cephes.gammasgn(vals), np.sign(cephes.rgamma(vals)))
def test_gdtr(self):
assert_equal(cephes.gdtr(1,1,0),0.0)
def test_gdtrc(self):
assert_equal(cephes.gdtrc(1,1,0),1.0)
def test_gdtria(self):
assert_equal(cephes.gdtria(0,1,1),0.0)
def test_gdtrib(self):
cephes.gdtrib(1,0,1)
# assert_equal(cephes.gdtrib(1,0,1),5.0)
def test_gdtrix(self):
cephes.gdtrix(1,1,.1)
def test_hankel1(self):
cephes.hankel1(1,1)
def test_hankel1e(self):
cephes.hankel1e(1,1)
def test_hankel2(self):
cephes.hankel2(1,1)
def test_hankel2e(self):
cephes.hankel2e(1,1)
def test_hyp1f1(self):
assert_approx_equal(cephes.hyp1f1(1,1,1), exp(1.0))
assert_approx_equal(cephes.hyp1f1(3,4,-6), 0.026056422099537251095)
cephes.hyp1f1(1,1,1)
def test_hyp1f2(self):
cephes.hyp1f2(1,1,1,1)
def test_hyp2f0(self):
cephes.hyp2f0(1,1,1,1)
def test_hyp2f1(self):
assert_equal(cephes.hyp2f1(1,1,1,0),1.0)
def test_hyp3f0(self):
assert_equal(cephes.hyp3f0(1,1,1,0),(1.0,0.0))
def test_hyperu(self):
assert_equal(cephes.hyperu(0,1,1),1.0)
def test_i0(self):
assert_equal(cephes.i0(0),1.0)
def test_i0e(self):
assert_equal(cephes.i0e(0),1.0)
def test_i1(self):
assert_equal(cephes.i1(0),0.0)
def test_i1e(self):
assert_equal(cephes.i1e(0),0.0)
def test_it2i0k0(self):
cephes.it2i0k0(1)
def test_it2j0y0(self):
cephes.it2j0y0(1)
def test_it2struve0(self):
cephes.it2struve0(1)
def test_itairy(self):
cephes.itairy(1)
def test_iti0k0(self):
assert_equal(cephes.iti0k0(0),(0.0,0.0))
def test_itj0y0(self):
assert_equal(cephes.itj0y0(0),(0.0,0.0))
def test_itmodstruve0(self):
assert_equal(cephes.itmodstruve0(0),0.0)
def test_itstruve0(self):
assert_equal(cephes.itstruve0(0),0.0)
def test_iv(self):
assert_equal(cephes.iv(1,0),0.0)
def _check_ive(self):
assert_equal(cephes.ive(1,0),0.0)
def test_j0(self):
assert_equal(cephes.j0(0),1.0)
def test_j1(self):
assert_equal(cephes.j1(0),0.0)
def test_jn(self):
assert_equal(cephes.jn(0,0),1.0)
def test_jv(self):
assert_equal(cephes.jv(0,0),1.0)
def _check_jve(self):
assert_equal(cephes.jve(0,0),1.0)
def test_k0(self):
cephes.k0(2)
def test_k0e(self):
cephes.k0e(2)
def test_k1(self):
cephes.k1(2)
def test_k1e(self):
cephes.k1e(2)
def test_kei(self):
cephes.kei(2)
def test_keip(self):
assert_equal(cephes.keip(0),0.0)
def test_ker(self):
cephes.ker(2)
def test_kerp(self):
cephes.kerp(2)
def _check_kelvin(self):
cephes.kelvin(2)
def test_kn(self):
cephes.kn(1,1)
def test_kolmogi(self):
assert_equal(cephes.kolmogi(1),0.0)
assert_(np.isnan(cephes.kolmogi(np.nan)))
def test_kolmogorov(self):
assert_equal(cephes.kolmogorov(0),1.0)
def _check_kv(self):
cephes.kv(1,1)
def _check_kve(self):
cephes.kve(1,1)
def test_log1p(self):
assert_equal(cephes.log1p(0),0.0)
def test_lpmv(self):
assert_equal(cephes.lpmv(0,0,1),1.0)
def test_mathieu_a(self):
assert_equal(cephes.mathieu_a(1,0),1.0)
def test_mathieu_b(self):
assert_equal(cephes.mathieu_b(1,0),1.0)
def test_mathieu_cem(self):
assert_equal(cephes.mathieu_cem(1,0,0),(1.0,0.0))
# Test AMS 20.2.27
@np.vectorize
def ce_smallq(m, q, z):
z *= np.pi/180
if m == 0:
return 2**(-0.5) * (1 - .5*q*cos(2*z)) # + O(q^2)
elif m == 1:
return cos(z) - q/8 * cos(3*z) # + O(q^2)
elif m == 2:
return cos(2*z) - q*(cos(4*z)/12 - 1/4) # + O(q^2)
else:
return cos(m*z) - q*(cos((m+2)*z)/(4*(m+1)) - cos((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(0, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_cem(m[:,None], q[None,:], 0.123)[0],
ce_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_sem(self):
assert_equal(cephes.mathieu_sem(1,0,0),(0.0,1.0))
# Test AMS 20.2.27
@np.vectorize
def se_smallq(m, q, z):
z *= np.pi/180
if m == 1:
return sin(z) - q/8 * sin(3*z) # + O(q^2)
elif m == 2:
return sin(2*z) - q*sin(4*z)/12 # + O(q^2)
else:
return sin(m*z) - q*(sin((m+2)*z)/(4*(m+1)) - sin((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(1, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_sem(m[:,None], q[None,:], 0.123)[0],
se_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_modcem1(self):
assert_equal(cephes.mathieu_modcem1(1,0,0),(0.0,0.0))
def test_mathieu_modcem2(self):
cephes.mathieu_modcem2(1,1,1)
# Test reflection relation AMS 20.6.19
m = np.arange(0, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modcem2(m, q, -z)[0]
fr = -cephes.mathieu_modcem2(m, q, 0)[0] / cephes.mathieu_modcem1(m, q, 0)[0]
y2 = -cephes.mathieu_modcem2(m, q, z)[0] - 2*fr*cephes.mathieu_modcem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_modsem1(self):
assert_equal(cephes.mathieu_modsem1(1,0,0),(0.0,0.0))
def test_mathieu_modsem2(self):
cephes.mathieu_modsem2(1,1,1)
# Test reflection relation AMS 20.6.20
m = np.arange(1, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modsem2(m, q, -z)[0]
fr = cephes.mathieu_modsem2(m, q, 0)[1] / cephes.mathieu_modsem1(m, q, 0)[1]
y2 = cephes.mathieu_modsem2(m, q, z)[0] - 2*fr*cephes.mathieu_modsem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_overflow(self):
# Check that these return NaNs instead of causing a SEGV
assert_equal(cephes.mathieu_cem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_cem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem2(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem2(10000, 1.5, 1.3), (np.nan, np.nan))
def test_mathieu_ticket_1847(self):
# Regression test --- this call had some out-of-bounds access
# and could return nan occasionally
for k in range(60):
v = cephes.mathieu_modsem2(2, 100, -1)
# Values from ACM TOMS 804 (derivate by numerical differentiation)
assert_allclose(v[0], 0.1431742913063671074347, rtol=1e-10)
assert_allclose(v[1], 0.9017807375832909144719, rtol=1e-4)
def test_modfresnelm(self):
cephes.modfresnelm(0)
def test_modfresnelp(self):
cephes.modfresnelp(0)
def _check_modstruve(self):
assert_equal(cephes.modstruve(1,0),0.0)
def test_nbdtr(self):
assert_equal(cephes.nbdtr(1,1,1),1.0)
def test_nbdtrc(self):
assert_equal(cephes.nbdtrc(1,1,1),0.0)
def test_nbdtri(self):
assert_equal(cephes.nbdtri(1,1,1),1.0)
def __check_nbdtrik(self):
cephes.nbdtrik(1,.4,.5)
def test_nbdtrin(self):
assert_equal(cephes.nbdtrin(1,0,0),5.0)
def test_ncfdtr(self):
assert_equal(cephes.ncfdtr(1,1,1,0),0.0)
def test_ncfdtri(self):
assert_equal(cephes.ncfdtri(1,1,1,0),0.0)
def test_ncfdtridfd(self):
cephes.ncfdtridfd(1,0.5,0,1)
def __check_ncfdtridfn(self):
cephes.ncfdtridfn(1,0.5,0,1)
def __check_ncfdtrinc(self):
cephes.ncfdtrinc(1,0.5,0,1)
def test_nctdtr(self):
assert_equal(cephes.nctdtr(1,0,0),0.5)
assert_equal(cephes.nctdtr(9, 65536, 45), 0.0)
assert_approx_equal(cephes.nctdtr(np.inf, 1., 1.), 0.5, 5)
assert_(np.isnan(cephes.nctdtr(2., np.inf, 10.)))
assert_approx_equal(cephes.nctdtr(2., 1., np.inf), 1.)
assert_(np.isnan(cephes.nctdtr(np.nan, 1., 1.)))
assert_(np.isnan(cephes.nctdtr(2., np.nan, 1.)))
assert_(np.isnan(cephes.nctdtr(2., 1., np.nan)))
def __check_nctdtridf(self):
cephes.nctdtridf(1,0.5,0)
def test_nctdtrinc(self):
cephes.nctdtrinc(1,0,0)
def test_nctdtrit(self):
cephes.nctdtrit(.1,0.2,.5)
def test_ndtr(self):
assert_equal(cephes.ndtr(0), 0.5)
assert_almost_equal(cephes.ndtr(1), 0.84134474606)
def test_ndtri(self):
assert_equal(cephes.ndtri(0.5),0.0)
def test_nrdtrimn(self):
assert_approx_equal(cephes.nrdtrimn(0.5,1,1),1.0)
def test_nrdtrisd(self):
assert_tol_equal(cephes.nrdtrisd(0.5,0.5,0.5), 0.0,
atol=0, rtol=0)
def test_obl_ang1(self):
cephes.obl_ang1(1,1,1,0)
def test_obl_ang1_cv(self):
result = cephes.obl_ang1_cv(1,1,1,1,0)
assert_almost_equal(result[0],1.0)
assert_almost_equal(result[1],0.0)
def _check_obl_cv(self):
assert_equal(cephes.obl_cv(1,1,0),2.0)
def test_obl_rad1(self):
cephes.obl_rad1(1,1,1,0)
def test_obl_rad1_cv(self):
cephes.obl_rad1_cv(1,1,1,1,0)
def test_obl_rad2(self):
cephes.obl_rad2(1,1,1,0)
def test_obl_rad2_cv(self):
cephes.obl_rad2_cv(1,1,1,1,0)
def test_pbdv(self):
assert_equal(cephes.pbdv(1,0),(0.0,1.0))
def test_pbvv(self):
cephes.pbvv(1,0)
def test_pbwa(self):
cephes.pbwa(1,0)
def test_pdtr(self):
val = cephes.pdtr(0, 1)
assert_almost_equal(val, np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtr([0, 1, 2], 0.0)
assert_array_equal(val, [1, 1, 1])
def test_pdtrc(self):
val = cephes.pdtrc(0, 1)
assert_almost_equal(val, 1 - np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtrc([0, 1, 2], 0.0)
assert_array_equal(val, [0, 0, 0])
def test_pdtri(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
cephes.pdtri(0.5,0.5)
def test_pdtrik(self):
k = cephes.pdtrik(0.5, 1)
assert_almost_equal(cephes.gammaincc(k + 1, 1), 0.5)
# Edge case: m = 0 or very small.
k = cephes.pdtrik([[0], [0.25], [0.95]], [0, 1e-20, 1e-6])
assert_array_equal(k, np.zeros((3, 3)))
def test_pro_ang1(self):
cephes.pro_ang1(1,1,1,0)
def test_pro_ang1_cv(self):
assert_array_almost_equal(cephes.pro_ang1_cv(1,1,1,1,0),
array((1.0,0.0)))
def _check_pro_cv(self):
assert_equal(cephes.pro_cv(1,1,0),2.0)
def test_pro_rad1(self):
cephes.pro_rad1(1,1,1,0.1)
def test_pro_rad1_cv(self):
cephes.pro_rad1_cv(1,1,1,1,0)
def test_pro_rad2(self):
cephes.pro_rad2(1,1,1,0)
def test_pro_rad2_cv(self):
cephes.pro_rad2_cv(1,1,1,1,0)
def test_psi(self):
cephes.psi(1)
def test_radian(self):
assert_equal(cephes.radian(0,0,0),0)
def test_rgamma(self):
assert_equal(cephes.rgamma(1),1.0)
def test_round(self):
assert_equal(cephes.round(3.4),3.0)
assert_equal(cephes.round(-3.4),-3.0)
assert_equal(cephes.round(3.6),4.0)
assert_equal(cephes.round(-3.6),-4.0)
assert_equal(cephes.round(3.5),4.0)
assert_equal(cephes.round(-3.5),-4.0)
def test_shichi(self):
cephes.shichi(1)
def test_sici(self):
cephes.sici(1)
s, c = cephes.sici(np.inf)
assert_almost_equal(s, np.pi * 0.5)
assert_almost_equal(c, 0)
s, c = cephes.sici(-np.inf)
assert_almost_equal(s, -np.pi * 0.5)
assert_(np.isnan(c), "cosine integral(-inf) is not nan")
def test_sindg(self):
assert_equal(cephes.sindg(90),1.0)
def test_smirnov(self):
assert_equal(cephes.smirnov(1,.1),0.9)
assert_(np.isnan(cephes.smirnov(1,np.nan)))
def test_smirnovi(self):
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.4)),0.4)
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.6)),0.6)
assert_(np.isnan(cephes.smirnovi(1,np.nan)))
def test_spence(self):
assert_equal(cephes.spence(1),0.0)
def test_stdtr(self):
assert_equal(cephes.stdtr(1,0),0.5)
assert_almost_equal(cephes.stdtr(1,1), 0.75)
assert_almost_equal(cephes.stdtr(1,2), 0.852416382349)
def test_stdtridf(self):
cephes.stdtridf(0.7,1)
def test_stdtrit(self):
cephes.stdtrit(1,0.7)
def test_struve(self):
assert_equal(cephes.struve(0,0),0.0)
def test_tandg(self):
assert_equal(cephes.tandg(45),1.0)
def test_tklmbda(self):
assert_almost_equal(cephes.tklmbda(1,1),1.0)
def test_y0(self):
cephes.y0(1)
def test_y1(self):
cephes.y1(1)
def test_yn(self):
cephes.yn(1,1)
def test_yv(self):
cephes.yv(1,1)
def _check_yve(self):
cephes.yve(1,1)
def test_zeta(self):
cephes.zeta(2,2)
def test_zetac(self):
assert_equal(cephes.zetac(0),-1.5)
def test_wofz(self):
z = [complex(624.2,-0.26123), complex(-0.4,3.), complex(0.6,2.),
complex(-1.,1.), complex(-1.,-9.), complex(-1.,9.),
complex(-0.0000000234545,1.1234), complex(-3.,5.1),
complex(-53,30.1), complex(0.0,0.12345),
complex(11,1), complex(-22,-2), complex(9,-28),
complex(21,-33), complex(1e5,1e5), complex(1e14,1e14)
]
w = [
complex(-3.78270245518980507452677445620103199303131110e-7,
0.000903861276433172057331093754199933411710053155),
complex(0.1764906227004816847297495349730234591778719532788,
-0.02146550539468457616788719893991501311573031095617),
complex(0.2410250715772692146133539023007113781272362309451,
0.06087579663428089745895459735240964093522265589350),
complex(0.30474420525691259245713884106959496013413834051768,
-0.20821893820283162728743734725471561394145872072738),
complex(7.317131068972378096865595229600561710140617977e34,
8.321873499714402777186848353320412813066170427e34),
complex(0.0615698507236323685519612934241429530190806818395,
-0.00676005783716575013073036218018565206070072304635),
complex(0.3960793007699874918961319170187598400134746631,
-5.593152259116644920546186222529802777409274656e-9),
complex(0.08217199226739447943295069917990417630675021771804,
-0.04701291087643609891018366143118110965272615832184),
complex(0.00457246000350281640952328010227885008541748668738,
-0.00804900791411691821818731763401840373998654987934),
complex(0.8746342859608052666092782112565360755791467973338452,
0.),
complex(0.00468190164965444174367477874864366058339647648741,
0.0510735563901306197993676329845149741675029197050),
complex(-0.0023193175200187620902125853834909543869428763219,
-0.025460054739731556004902057663500272721780776336),
complex(9.11463368405637174660562096516414499772662584e304,
3.97101807145263333769664875189354358563218932e305),
complex(-4.4927207857715598976165541011143706155432296e281,
-2.8019591213423077494444700357168707775769028e281),
complex(2.820947917809305132678577516325951485807107151e-6,
2.820947917668257736791638444590253942253354058e-6),
complex(2.82094791773878143474039725787438662716372268e-15,
2.82094791773878143474039725773333923127678361e-15)
]
assert_func_equal(cephes.wofz, w, z, rtol=1e-13)
class TestAiry(TestCase):
def test_airy(self):
# This tests the airy function to ensure 8 place accuracy in computation
x = special.airy(.99)
assert_array_almost_equal(x,array([0.13689066,-0.16050153,1.19815925,0.92046818]),8)
x = special.airy(.41)
assert_array_almost_equal(x,array([0.25238916,-.23480512,0.80686202,0.51053919]),8)
x = special.airy(-.36)
assert_array_almost_equal(x,array([0.44508477,-0.23186773,0.44939534,0.48105354]),8)
def test_airye(self):
a = special.airye(0.01)
b = special.airy(0.01)
b1 = [None]*4
for n in range(2):
b1[n] = b[n]*exp(2.0/3.0*0.01*sqrt(0.01))
for n in range(2,4):
b1[n] = b[n]*exp(-abs(real(2.0/3.0*0.01*sqrt(0.01))))
assert_array_almost_equal(a,b1,6)
def test_bi_zeros(self):
bi = special.bi_zeros(2)
bia = (array([-1.17371322, -3.2710930]),
array([-2.29443968, -4.07315509]),
array([-0.45494438, 0.39652284]),
array([0.60195789, -0.76031014]))
assert_array_almost_equal(bi,bia,4)
def test_ai_zeros(self):
ai = special.ai_zeros(1)
assert_array_almost_equal(ai,(array([-2.33810741]),
array([-1.01879297]),
array([0.5357]),
array([0.7012])),4)
class TestAssocLaguerre(TestCase):
def test_assoc_laguerre(self):
a1 = special.genlaguerre(11,1)
a2 = special.assoc_laguerre(.2,11,1)
assert_array_almost_equal(a2,a1(.2),8)
a2 = special.assoc_laguerre(1,11,1)
assert_array_almost_equal(a2,a1(1),8)
class TestBesselpoly(TestCase):
def test_besselpoly(self):
pass
class TestKelvin(TestCase):
def test_bei(self):
mbei = special.bei(2)
assert_almost_equal(mbei, 0.9722916273066613,5) # this may not be exact
def test_beip(self):
mbeip = special.beip(2)
assert_almost_equal(mbeip,0.91701361338403631,5) # this may not be exact
def test_ber(self):
mber = special.ber(2)
assert_almost_equal(mber,0.75173418271380821,5) # this may not be exact
def test_berp(self):
mberp = special.berp(2)
assert_almost_equal(mberp,-0.49306712470943909,5) # this may not be exact
def test_bei_zeros(self):
bi = special.bi_zeros(5)
assert_array_almost_equal(bi[0],array([-1.173713222709127,
-3.271093302836352,
-4.830737841662016,
-6.169852128310251,
-7.376762079367764]),11)
assert_array_almost_equal(bi[1],array([-2.294439682614122,
-4.073155089071828,
-5.512395729663599,
-6.781294445990305,
-7.940178689168587]),10)
assert_array_almost_equal(bi[2],array([-0.454944383639657,
0.396522836094465,
-0.367969161486959,
0.349499116831805,
-0.336026240133662]),11)
assert_array_almost_equal(bi[3],array([0.601957887976239,
-0.760310141492801,
0.836991012619261,
-0.88947990142654,
0.929983638568022]),11)
def test_beip_zeros(self):
bip = special.beip_zeros(5)
assert_array_almost_equal(bip,array([3.772673304934953,
8.280987849760042,
12.742147523633703,
17.193431752512542,
21.641143941167325]),4)
def test_ber_zeros(self):
ber = special.ber_zeros(5)
assert_array_almost_equal(ber,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
def test_berp_zeros(self):
brp = special.berp_zeros(5)
assert_array_almost_equal(brp,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
def test_kelvin(self):
mkelv = special.kelvin(2)
assert_array_almost_equal(mkelv,(special.ber(2) + special.bei(2)*1j,
special.ker(2) + special.kei(2)*1j,
special.berp(2) + special.beip(2)*1j,
special.kerp(2) + special.keip(2)*1j),8)
def test_kei(self):
mkei = special.kei(2)
assert_almost_equal(mkei,-0.20240006776470432,5)
def test_keip(self):
mkeip = special.keip(2)
assert_almost_equal(mkeip,0.21980790991960536,5)
def test_ker(self):
mker = special.ker(2)
assert_almost_equal(mker,-0.041664513991509472,5)
def test_kerp(self):
mkerp = special.kerp(2)
assert_almost_equal(mkerp,-0.10660096588105264,5)
def test_kei_zeros(self):
kei = special.kei_zeros(5)
assert_array_almost_equal(kei,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
def test_keip_zeros(self):
keip = special.keip_zeros(5)
assert_array_almost_equal(keip,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
# numbers come from 9.9 of A&S pg. 381
def test_kelvin_zeros(self):
tmp = special.kelvin_zeros(5)
berz,beiz,kerz,keiz,berpz,beipz,kerpz,keipz = tmp
assert_array_almost_equal(berz,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
assert_array_almost_equal(beiz,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
assert_array_almost_equal(kerz,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44382]),4)
assert_array_almost_equal(keiz,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
assert_array_almost_equal(berpz,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
assert_array_almost_equal(beipz,array([3.77267,
# table from 1927 had 3.77320
# but this is more accurate
8.28099,
12.74215,
17.19343,
21.64114]),4)
assert_array_almost_equal(kerpz,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
assert_array_almost_equal(keipz,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
def test_ker_zeros(self):
ker = special.ker_zeros(5)
assert_array_almost_equal(ker,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44381]),4)
def test_kerp_zeros(self):
kerp = special.kerp_zeros(5)
assert_array_almost_equal(kerp,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
class TestBernoulli(TestCase):
def test_bernoulli(self):
brn = special.bernoulli(5)
assert_array_almost_equal(brn,array([1.0000,
-0.5000,
0.1667,
0.0000,
-0.0333,
0.0000]),4)
class TestBeta(TestCase):
def test_beta(self):
bet = special.beta(2,4)
betg = (special.gamma(2)*special.gamma(4))/special.gamma(6)
assert_almost_equal(bet,betg,8)
def test_betaln(self):
betln = special.betaln(2,4)
bet = log(abs(special.beta(2,4)))
assert_almost_equal(betln,bet,8)
def test_betainc(self):
btinc = special.betainc(1,1,.2)
assert_almost_equal(btinc,0.2,8)
def test_betaincinv(self):
y = special.betaincinv(2,4,.5)
comp = special.betainc(2,4,y)
assert_almost_equal(comp,.5,5)
class TestCombinatorics(TestCase):
def test_comb(self):
assert_array_almost_equal(special.comb([10, 10], [3, 4]), [120., 210.])
assert_almost_equal(special.comb(10, 3), 120.)
assert_equal(special.comb(10, 3, exact=True), 120)
assert_equal(special.comb(10, 3, exact=True, repetition=True), 220)
def test_comb_with_np_int64(self):
n = 70
k = 30
np_n = np.int64(n)
np_k = np.int64(k)
assert_equal(special.comb(np_n, np_k, exact=True),
special.comb(n, k, exact=True))
def test_comb_zeros(self):
assert_equal(special.comb(2, 3, exact=True), 0)
assert_equal(special.comb(-1, 3, exact=True), 0)
assert_equal(special.comb(2, -1, exact=True), 0)
assert_equal(special.comb(2, -1, exact=False), 0)
assert_array_almost_equal(special.comb([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 120.])
def test_perm(self):
assert_array_almost_equal(special.perm([10, 10], [3, 4]), [720., 5040.])
assert_almost_equal(special.perm(10, 3), 720.)
assert_equal(special.perm(10, 3, exact=True), 720)
def test_perm_zeros(self):
assert_equal(special.perm(2, 3, exact=True), 0)
assert_equal(special.perm(-1, 3, exact=True), 0)
assert_equal(special.perm(2, -1, exact=True), 0)
assert_equal(special.perm(2, -1, exact=False), 0)
assert_array_almost_equal(special.perm([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 720.])
class TestTrigonometric(TestCase):
def test_cbrt(self):
cb = special.cbrt(27)
cbrl = 27**(1.0/3.0)
assert_approx_equal(cb,cbrl)
def test_cbrtmore(self):
cb1 = special.cbrt(27.9)
cbrl1 = 27.9**(1.0/3.0)
assert_almost_equal(cb1,cbrl1,8)
def test_cosdg(self):
cdg = special.cosdg(90)
cdgrl = cos(pi/2.0)
assert_almost_equal(cdg,cdgrl,8)
def test_cosdgmore(self):
cdgm = special.cosdg(30)
cdgmrl = cos(pi/6.0)
assert_almost_equal(cdgm,cdgmrl,8)
def test_cosm1(self):
cs = (special.cosm1(0),special.cosm1(.3),special.cosm1(pi/10))
csrl = (cos(0)-1,cos(.3)-1,cos(pi/10)-1)
assert_array_almost_equal(cs,csrl,8)
def test_cotdg(self):
ct = special.cotdg(30)
ctrl = tan(pi/6.0)**(-1)
assert_almost_equal(ct,ctrl,8)
def test_cotdgmore(self):
ct1 = special.cotdg(45)
ctrl1 = tan(pi/4.0)**(-1)
assert_almost_equal(ct1,ctrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.cotdg(45), 1.0, 14)
assert_almost_equal(special.cotdg(-45), -1.0, 14)
assert_almost_equal(special.cotdg(90), 0.0, 14)
assert_almost_equal(special.cotdg(-90), 0.0, 14)
assert_almost_equal(special.cotdg(135), -1.0, 14)
assert_almost_equal(special.cotdg(-135), 1.0, 14)
assert_almost_equal(special.cotdg(225), 1.0, 14)
assert_almost_equal(special.cotdg(-225), -1.0, 14)
assert_almost_equal(special.cotdg(270), 0.0, 14)
assert_almost_equal(special.cotdg(-270), 0.0, 14)
assert_almost_equal(special.cotdg(315), -1.0, 14)
assert_almost_equal(special.cotdg(-315), 1.0, 14)
assert_almost_equal(special.cotdg(765), 1.0, 14)
def test_sinc(self):
# the sinc implementation and more extensive sinc tests are in numpy
assert_array_equal(special.sinc([0]), 1)
assert_equal(special.sinc(0.0), 1.0)
def test_sindg(self):
sn = special.sindg(90)
assert_equal(sn,1.0)
def test_sindgmore(self):
snm = special.sindg(30)
snmrl = sin(pi/6.0)
assert_almost_equal(snm,snmrl,8)
snm1 = special.sindg(45)
snmrl1 = sin(pi/4.0)
assert_almost_equal(snm1,snmrl1,8)
class TestTandg(TestCase):
def test_tandg(self):
tn = special.tandg(30)
tnrl = tan(pi/6.0)
assert_almost_equal(tn,tnrl,8)
def test_tandgmore(self):
tnm = special.tandg(45)
tnmrl = tan(pi/4.0)
assert_almost_equal(tnm,tnmrl,8)
tnm1 = special.tandg(60)
tnmrl1 = tan(pi/3.0)
assert_almost_equal(tnm1,tnmrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.tandg(0), 0.0, 14)
assert_almost_equal(special.tandg(45), 1.0, 14)
assert_almost_equal(special.tandg(-45), -1.0, 14)
assert_almost_equal(special.tandg(135), -1.0, 14)
assert_almost_equal(special.tandg(-135), 1.0, 14)
assert_almost_equal(special.tandg(180), 0.0, 14)
assert_almost_equal(special.tandg(-180), 0.0, 14)
assert_almost_equal(special.tandg(225), 1.0, 14)
assert_almost_equal(special.tandg(-225), -1.0, 14)
assert_almost_equal(special.tandg(315), -1.0, 14)
assert_almost_equal(special.tandg(-315), 1.0, 14)
class TestEllip(TestCase):
def test_ellipj_nan(self):
"""Regression test for #912."""
special.ellipj(0.5, np.nan)
def test_ellipj(self):
el = special.ellipj(0.2,0)
rel = [sin(0.2),cos(0.2),1.0,0.20]
assert_array_almost_equal(el,rel,13)
def test_ellipk(self):
elk = special.ellipk(.2)
assert_almost_equal(elk,1.659623598610528,11)
assert_equal(special.ellipkm1(0.0), np.inf)
assert_equal(special.ellipkm1(1.0), pi/2)
assert_equal(special.ellipkm1(np.inf), 0.0)
assert_equal(special.ellipkm1(np.nan), np.nan)
assert_equal(special.ellipkm1(-1), np.nan)
assert_allclose(special.ellipk(-10), 0.7908718902387385)
def test_ellipkinc(self):
elkinc = special.ellipkinc(pi/2,.2)
elk = special.ellipk(0.2)
assert_almost_equal(elkinc,elk,15)
alpha = 20*pi/180
phi = 45*pi/180
m = sin(alpha)**2
elkinc = special.ellipkinc(phi,m)
assert_almost_equal(elkinc,0.79398143,8)
# From pg. 614 of A & S
assert_equal(special.ellipkinc(pi/2, 0.0), pi/2)
assert_equal(special.ellipkinc(pi/2, 1.0), np.inf)
assert_equal(special.ellipkinc(pi/2, -np.inf), 0.0)
assert_equal(special.ellipkinc(pi/2, np.nan), np.nan)
assert_equal(special.ellipkinc(pi/2, 2), np.nan)
assert_equal(special.ellipkinc(0, 0.5), 0.0)
assert_equal(special.ellipkinc(np.inf, 0.5), np.inf)
assert_equal(special.ellipkinc(-np.inf, 0.5), -np.inf)
assert_equal(special.ellipkinc(np.inf, np.inf), np.nan)
assert_equal(special.ellipkinc(np.inf, -np.inf), np.nan)
assert_equal(special.ellipkinc(-np.inf, -np.inf), np.nan)
assert_equal(special.ellipkinc(-np.inf, np.inf), np.nan)
assert_equal(special.ellipkinc(np.nan, 0.5), np.nan)
assert_equal(special.ellipkinc(np.nan, np.nan), np.nan)
assert_allclose(special.ellipkinc(0.38974112035318718, 1), 0.4, rtol=1e-14)
assert_allclose(special.ellipkinc(1.5707, -10), 0.79084284661724946)
def test_ellipkinc_2(self):
# Regression test for gh-3550
# ellipkinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
mbad = 0.68359375000000011
phi = 0.9272952180016123
m = np.nextafter(mbad, 0)
mvals = []
for j in range(10):
mvals.append(m)
m = np.nextafter(m, 1)
f = special.ellipkinc(phi, mvals)
assert_array_almost_equal_nulp(f, 1.0259330100195334 * np.ones_like(f), 1)
# this bug also appears at phi + n * pi for at least small n
f1 = special.ellipkinc(phi + pi, mvals)
assert_array_almost_equal_nulp(f1, 5.1296650500976675 * np.ones_like(f1), 2)
def test_ellipkinc_singular(self):
# ellipkinc(phi, 1) has closed form and is finite only for phi in (-pi/2, pi/2)
xlog = np.logspace(-300, -17, 25)
xlin = np.linspace(1e-17, 0.1, 25)
xlin2 = np.linspace(0.1, pi/2, 25, endpoint=False)
assert_allclose(special.ellipkinc(xlog, 1), np.arcsinh(np.tan(xlog)), rtol=1e14)
assert_allclose(special.ellipkinc(xlin, 1), np.arcsinh(np.tan(xlin)), rtol=1e14)
assert_allclose(special.ellipkinc(xlin2, 1), np.arcsinh(np.tan(xlin2)), rtol=1e14)
assert_equal(special.ellipkinc(np.pi/2, 1), np.inf)
assert_allclose(special.ellipkinc(-xlog, 1), np.arcsinh(np.tan(-xlog)), rtol=1e14)
assert_allclose(special.ellipkinc(-xlin, 1), np.arcsinh(np.tan(-xlin)), rtol=1e14)
assert_allclose(special.ellipkinc(-xlin2, 1), np.arcsinh(np.tan(-xlin2)), rtol=1e14)
assert_equal(special.ellipkinc(-np.pi/2, 1), np.inf)
def test_ellipe(self):
ele = special.ellipe(.2)
assert_almost_equal(ele,1.4890350580958529,8)
assert_equal(special.ellipe(0.0), pi/2)
assert_equal(special.ellipe(1.0), 1.0)
assert_equal(special.ellipe(-np.inf), np.inf)
assert_equal(special.ellipe(np.nan), np.nan)
assert_equal(special.ellipe(2), np.nan)
assert_allclose(special.ellipe(-10), 3.6391380384177689)
def test_ellipeinc(self):
eleinc = special.ellipeinc(pi/2,.2)
ele = special.ellipe(0.2)
assert_almost_equal(eleinc,ele,14)
# pg 617 of A & S
alpha, phi = 52*pi/180,35*pi/180
m = sin(alpha)**2
eleinc = special.ellipeinc(phi,m)
assert_almost_equal(eleinc, 0.58823065, 8)
assert_equal(special.ellipeinc(pi/2, 0.0), pi/2)
assert_equal(special.ellipeinc(pi/2, 1.0), 1.0)
assert_equal(special.ellipeinc(pi/2, -np.inf), np.inf)
assert_equal(special.ellipeinc(pi/2, np.nan), np.nan)
assert_equal(special.ellipeinc(pi/2, 2), np.nan)
assert_equal(special.ellipeinc(0, 0.5), 0.0)
assert_equal(special.ellipeinc(np.inf, 0.5), np.inf)
assert_equal(special.ellipeinc(-np.inf, 0.5), -np.inf)
assert_equal(special.ellipeinc(np.inf, -np.inf), np.inf)
assert_equal(special.ellipeinc(-np.inf, -np.inf), -np.inf)
assert_equal(special.ellipeinc(np.inf, np.inf), np.nan)
assert_equal(special.ellipeinc(-np.inf, np.inf), np.nan)
assert_equal(special.ellipeinc(np.nan, 0.5), np.nan)
assert_equal(special.ellipeinc(np.nan, np.nan), np.nan)
assert_allclose(special.ellipeinc(1.5707, -10), 3.6388185585822876)
def test_ellipeinc_2(self):
# Regression test for gh-3550
# ellipeinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
mbad = 0.68359375000000011
phi = 0.9272952180016123
m = np.nextafter(mbad, 0)
mvals = []
for j in range(10):
mvals.append(m)
m = np.nextafter(m, 1)
f = special.ellipeinc(phi, mvals)
assert_array_almost_equal_nulp(f, 0.84442884574781019 * np.ones_like(f), 2)
# this bug also appears at phi + n * pi for at least small n
f1 = special.ellipeinc(phi + pi, mvals)
assert_array_almost_equal_nulp(f1, 3.3471442287390509 * np.ones_like(f1), 4)
class TestErf(TestCase):
def test_erf(self):
er = special.erf(.25)
assert_almost_equal(er,0.2763263902,8)
def test_erf_zeros(self):
erz = special.erf_zeros(5)
erzr = array([1.45061616+1.88094300j,
2.24465928+2.61657514j,
2.83974105+3.17562810j,
3.33546074+3.64617438j,
3.76900557+4.06069723j])
assert_array_almost_equal(erz,erzr,4)
def _check_variant_func(self, func, other_func, rtol, atol=0):
np.random.seed(1234)
n = 10000
x = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
y = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
z = x + 1j*y
old_errors = np.seterr(all='ignore')
try:
w = other_func(z)
w_real = other_func(x).real
mask = np.isfinite(w)
w = w[mask]
z = z[mask]
mask = np.isfinite(w_real)
w_real = w_real[mask]
x = x[mask]
# test both real and complex variants
assert_func_equal(func, w, z, rtol=rtol, atol=atol)
assert_func_equal(func, w_real, x, rtol=rtol, atol=atol)
finally:
np.seterr(**old_errors)
def test_erfc_consistent(self):
self._check_variant_func(
cephes.erfc,
lambda z: 1 - cephes.erf(z),
rtol=1e-12,
atol=1e-14 # <- the test function loses precision
)
def test_erfcx_consistent(self):
self._check_variant_func(
cephes.erfcx,
lambda z: np.exp(z*z) * cephes.erfc(z),
rtol=1e-12
)
def test_erfi_consistent(self):
self._check_variant_func(
cephes.erfi,
lambda z: -1j * cephes.erf(1j*z),
rtol=1e-12
)
def test_dawsn_consistent(self):
self._check_variant_func(
cephes.dawsn,
lambda z: sqrt(pi)/2 * np.exp(-z*z) * cephes.erfi(z),
rtol=1e-12
)
def test_erfcinv(self):
i = special.erfcinv(1)
# Use assert_array_equal instead of assert_equal, so the comparsion
# of -0.0 and 0.0 doesn't fail.
assert_array_equal(i, 0)
def test_erfinv(self):
i = special.erfinv(0)
assert_equal(i,0)
def test_errprint(self):
a = special.errprint()
b = 1-a # a is the state 1-a inverts state
c = special.errprint(b) # returns last state 'a'
assert_equal(a,c)
d = special.errprint(a) # returns to original state
assert_equal(d,b) # makes sure state was returned
# assert_equal(d,1-a)
class TestEuler(TestCase):
def test_euler(self):
eu0 = special.euler(0)
eu1 = special.euler(1)
eu2 = special.euler(2) # just checking segfaults
assert_almost_equal(eu0[0],1,8)
assert_almost_equal(eu2[2],-1,8)
eu24 = special.euler(24)
mathworld = [1,1,5,61,1385,50521,2702765,199360981,
19391512145,2404879675441,
370371188237525,69348874393137901,
15514534163557086905]
correct = zeros((25,),'d')
for k in range(0,13):
if (k % 2):
correct[2*k] = -float(mathworld[k])
else:
correct[2*k] = float(mathworld[k])
olderr = np.seterr(all='ignore')
try:
err = nan_to_num((eu24-correct)/correct)
errmax = max(err)
finally:
np.seterr(**olderr)
assert_almost_equal(errmax, 0.0, 14)
class TestExp(TestCase):
def test_exp2(self):
ex = special.exp2(2)
exrl = 2**2
assert_equal(ex,exrl)
def test_exp2more(self):
exm = special.exp2(2.5)
exmrl = 2**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_exp10(self):
ex = special.exp10(2)
exrl = 10**2
assert_approx_equal(ex,exrl)
def test_exp10more(self):
exm = special.exp10(2.5)
exmrl = 10**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_expm1(self):
ex = (special.expm1(2),special.expm1(3),special.expm1(4))
exrl = (exp(2)-1,exp(3)-1,exp(4)-1)
assert_array_almost_equal(ex,exrl,8)
def test_expm1more(self):
ex1 = (special.expm1(2),special.expm1(2.1),special.expm1(2.2))
exrl1 = (exp(2)-1,exp(2.1)-1,exp(2.2)-1)
assert_array_almost_equal(ex1,exrl1,8)
class TestFactorialFunctions(TestCase):
def test_factorial(self):
assert_array_almost_equal([6., 24., 120.],
special.factorial([3, 4, 5], exact=False))
assert_equal(special.factorial(5, exact=True), 120)
def test_factorial2(self):
assert_array_almost_equal([105., 384., 945.],
special.factorial2([7, 8, 9], exact=False))
assert_equal(special.factorial2(7, exact=True), 105)
def test_factorialk(self):
assert_equal(special.factorialk(5, 1, exact=True), 120)
assert_equal(special.factorialk(5, 3, exact=True), 10)
class TestFresnel(TestCase):
def test_fresnel(self):
frs = array(special.fresnel(.5))
assert_array_almost_equal(frs,array([0.064732432859999287, 0.49234422587144644]),8)
# values from pg 329 Table 7.11 of A & S
# slightly corrected in 4th decimal place
def test_fresnel_zeros(self):
szo, czo = special.fresnel_zeros(5)
assert_array_almost_equal(szo,
array([2.0093+0.2885j,
2.8335+0.2443j,
3.4675+0.2185j,
4.0026+0.2009j,
4.4742+0.1877j]),3)
assert_array_almost_equal(czo,
array([1.7437+0.3057j,
2.6515+0.2529j,
3.3204+0.2240j,
3.8757+0.2047j,
4.3611+0.1907j]),3)
vals1 = special.fresnel(szo)[0]
vals2 = special.fresnel(czo)[1]
assert_array_almost_equal(vals1,0,14)
assert_array_almost_equal(vals2,0,14)
def test_fresnelc_zeros(self):
szo, czo = special.fresnel_zeros(6)
frc = special.fresnelc_zeros(6)
assert_array_almost_equal(frc,czo,12)
def test_fresnels_zeros(self):
szo, czo = special.fresnel_zeros(5)
frs = special.fresnels_zeros(5)
assert_array_almost_equal(frs,szo,12)
class TestGamma(TestCase):
def test_gamma(self):
gam = special.gamma(5)
assert_equal(gam,24.0)
def test_gammaln(self):
gamln = special.gammaln(3)
lngam = log(special.gamma(3))
assert_almost_equal(gamln,lngam,8)
def test_gammainc(self):
gama = special.gammainc(.5,.5)
assert_almost_equal(gama,.7,1)
def test_gammaincnan(self):
gama = special.gammainc(-1,1)
assert_(isnan(gama))
def test_gammainczero(self):
# bad arg but zero integration limit
gama = special.gammainc(-1,0)
assert_equal(gama,0.0)
def test_gammaincc(self):
gicc = special.gammaincc(.5,.5)
greal = 1 - special.gammainc(.5,.5)
assert_almost_equal(gicc,greal,8)
def test_gammainccnan(self):
gama = special.gammaincc(-1,1)
assert_(isnan(gama))
def test_gammainccinv(self):
gccinv = special.gammainccinv(.5,.5)
gcinv = special.gammaincinv(.5,.5)
assert_almost_equal(gccinv,gcinv,8)
@with_special_errors
def test_gammaincinv(self):
y = special.gammaincinv(.4,.4)
x = special.gammainc(.4,y)
assert_almost_equal(x,0.4,1)
y = special.gammainc(10, 0.05)
x = special.gammaincinv(10, 2.5715803516000736e-20)
assert_almost_equal(0.05, x, decimal=10)
assert_almost_equal(y, 2.5715803516000736e-20, decimal=10)
x = special.gammaincinv(50, 8.20754777388471303050299243573393e-18)
assert_almost_equal(11.0, x, decimal=10)
@with_special_errors
def test_975(self):
# Regression test for ticket #975 -- switch point in algorithm
# check that things work OK at the point, immediately next floats
# around it, and a bit further away
pts = [0.25,
np.nextafter(0.25, 0), 0.25 - 1e-12,
np.nextafter(0.25, 1), 0.25 + 1e-12]
for xp in pts:
y = special.gammaincinv(.4, xp)
x = special.gammainc(0.4, y)
assert_tol_equal(x, xp, rtol=1e-12)
def test_rgamma(self):
rgam = special.rgamma(8)
rlgam = 1/special.gamma(8)
assert_almost_equal(rgam,rlgam,8)
def test_infinity(self):
assert_(np.isinf(special.gamma(-1)))
assert_equal(special.rgamma(-1), 0)
class TestHankel(TestCase):
def test_negv1(self):
assert_almost_equal(special.hankel1(-3,2), -special.hankel1(3,2), 14)
def test_hankel1(self):
hank1 = special.hankel1(1,.1)
hankrl = (special.jv(1,.1) + special.yv(1,.1)*1j)
assert_almost_equal(hank1,hankrl,8)
def test_negv1e(self):
assert_almost_equal(special.hankel1e(-3,2), -special.hankel1e(3,2), 14)
def test_hankel1e(self):
hank1e = special.hankel1e(1,.1)
hankrle = special.hankel1(1,.1)*exp(-.1j)
assert_almost_equal(hank1e,hankrle,8)
def test_negv2(self):
assert_almost_equal(special.hankel2(-3,2), -special.hankel2(3,2), 14)
def test_hankel2(self):
hank2 = special.hankel2(1,.1)
hankrl2 = (special.jv(1,.1) - special.yv(1,.1)*1j)
assert_almost_equal(hank2,hankrl2,8)
def test_neg2e(self):
assert_almost_equal(special.hankel2e(-3,2), -special.hankel2e(3,2), 14)
def test_hankl2e(self):
hank2e = special.hankel2e(1,.1)
hankrl2e = special.hankel2e(1,.1)
assert_almost_equal(hank2e,hankrl2e,8)
class TestHyper(TestCase):
def test_h1vp(self):
h1 = special.h1vp(1,.1)
h1real = (special.jvp(1,.1) + special.yvp(1,.1)*1j)
assert_almost_equal(h1,h1real,8)
def test_h2vp(self):
h2 = special.h2vp(1,.1)
h2real = (special.jvp(1,.1) - special.yvp(1,.1)*1j)
assert_almost_equal(h2,h2real,8)
def test_hyp0f1(self):
# scalar input
assert_allclose(special.hyp0f1(2.5, 0.5), 1.21482702689997, rtol=1e-12)
assert_allclose(special.hyp0f1(2.5, 0), 1.0, rtol=1e-15)
# float input, expected values match mpmath
x = special.hyp0f1(3.0, [-1.5, -1, 0, 1, 1.5])
expected = np.array([0.58493659229143, 0.70566805723127, 1.0,
1.37789689539747, 1.60373685288480])
assert_allclose(x, expected, rtol=1e-12)
# complex input
x = special.hyp0f1(3.0, np.array([-1.5, -1, 0, 1, 1.5]) + 0.j)
assert_allclose(x, expected.astype(np.complex), rtol=1e-12)
# test broadcasting
x1 = [0.5, 1.5, 2.5]
x2 = [0, 1, 0.5]
x = special.hyp0f1(x1, x2)
expected = [1.0, 1.8134302039235093, 1.21482702689997]
assert_allclose(x, expected, rtol=1e-12)
x = special.hyp0f1(np.row_stack([x1] * 2), x2)
assert_allclose(x, np.row_stack([expected] * 2), rtol=1e-12)
assert_raises(ValueError, special.hyp0f1,
np.row_stack([x1] * 3), [0, 1])
def test_hyp1f1(self):
hyp1 = special.hyp1f1(.1,.1,.3)
assert_almost_equal(hyp1, 1.3498588075760032,7)
# test contributed by Moritz Deger (2008-05-29)
# http://projects.scipy.org/scipy/scipy/ticket/659
# reference data obtained from mathematica [ a, b, x, m(a,b,x)]:
# produced with test_hyp1f1.nb
ref_data = array([[-8.38132975e+00, -1.28436461e+01, -2.91081397e+01, 1.04178330e+04],
[2.91076882e+00, -6.35234333e+00, -1.27083993e+01, 6.68132725e+00],
[-1.42938258e+01, 1.80869131e-01, 1.90038728e+01, 1.01385897e+05],
[5.84069088e+00, 1.33187908e+01, 2.91290106e+01, 1.59469411e+08],
[-2.70433202e+01, -1.16274873e+01, -2.89582384e+01, 1.39900152e+24],
[4.26344966e+00, -2.32701773e+01, 1.91635759e+01, 6.13816915e+21],
[1.20514340e+01, -3.40260240e+00, 7.26832235e+00, 1.17696112e+13],
[2.77372955e+01, -1.99424687e+00, 3.61332246e+00, 3.07419615e+13],
[1.50310939e+01, -2.91198675e+01, -1.53581080e+01, -3.79166033e+02],
[1.43995827e+01, 9.84311196e+00, 1.93204553e+01, 2.55836264e+10],
[-4.08759686e+00, 1.34437025e+01, -1.42072843e+01, 1.70778449e+01],
[8.05595738e+00, -1.31019838e+01, 1.52180721e+01, 3.06233294e+21],
[1.81815804e+01, -1.42908793e+01, 9.57868793e+00, -2.84771348e+20],
[-2.49671396e+01, 1.25082843e+01, -1.71562286e+01, 2.36290426e+07],
[2.67277673e+01, 1.70315414e+01, 6.12701450e+00, 7.77917232e+03],
[2.49565476e+01, 2.91694684e+01, 6.29622660e+00, 2.35300027e+02],
[6.11924542e+00, -1.59943768e+00, 9.57009289e+00, 1.32906326e+11],
[-1.47863653e+01, 2.41691301e+01, -1.89981821e+01, 2.73064953e+03],
[2.24070483e+01, -2.93647433e+00, 8.19281432e+00, -6.42000372e+17],
[8.04042600e-01, 1.82710085e+01, -1.97814534e+01, 5.48372441e-01],
[1.39590390e+01, 1.97318686e+01, 2.37606635e+00, 5.51923681e+00],
[-4.66640483e+00, -2.00237930e+01, 7.40365095e+00, 4.50310752e+00],
[2.76821999e+01, -6.36563968e+00, 1.11533984e+01, -9.28725179e+23],
[-2.56764457e+01, 1.24544906e+00, 1.06407572e+01, 1.25922076e+01],
[3.20447808e+00, 1.30874383e+01, 2.26098014e+01, 2.03202059e+04],
[-1.24809647e+01, 4.15137113e+00, -2.92265700e+01, 2.39621411e+08],
[2.14778108e+01, -2.35162960e+00, -1.13758664e+01, 4.46882152e-01],
[-9.85469168e+00, -3.28157680e+00, 1.67447548e+01, -1.07342390e+07],
[1.08122310e+01, -2.47353236e+01, -1.15622349e+01, -2.91733796e+03],
[-2.67933347e+01, -3.39100709e+00, 2.56006986e+01, -5.29275382e+09],
[-8.60066776e+00, -8.02200924e+00, 1.07231926e+01, 1.33548320e+06],
[-1.01724238e-01, -1.18479709e+01, -2.55407104e+01, 1.55436570e+00],
[-3.93356771e+00, 2.11106818e+01, -2.57598485e+01, 2.13467840e+01],
[3.74750503e+00, 1.55687633e+01, -2.92841720e+01, 1.43873509e-02],
[6.99726781e+00, 2.69855571e+01, -1.63707771e+01, 3.08098673e-02],
[-2.31996011e+01, 3.47631054e+00, 9.75119815e-01, 1.79971073e-02],
[2.38951044e+01, -2.91460190e+01, -2.50774708e+00, 9.56934814e+00],
[1.52730825e+01, 5.77062507e+00, 1.21922003e+01, 1.32345307e+09],
[1.74673917e+01, 1.89723426e+01, 4.94903250e+00, 9.90859484e+01],
[1.88971241e+01, 2.86255413e+01, 5.52360109e-01, 1.44165360e+00],
[1.02002319e+01, -1.66855152e+01, -2.55426235e+01, 6.56481554e+02],
[-1.79474153e+01, 1.22210200e+01, -1.84058212e+01, 8.24041812e+05],
[-1.36147103e+01, 1.32365492e+00, -7.22375200e+00, 9.92446491e+05],
[7.57407832e+00, 2.59738234e+01, -1.34139168e+01, 3.64037761e-02],
[2.21110169e+00, 1.28012666e+01, 1.62529102e+01, 1.33433085e+02],
[-2.64297569e+01, -1.63176658e+01, -1.11642006e+01, -2.44797251e+13],
[-2.46622944e+01, -3.02147372e+00, 8.29159315e+00, -3.21799070e+05],
[-1.37215095e+01, -1.96680183e+01, 2.91940118e+01, 3.21457520e+12],
[-5.45566105e+00, 2.81292086e+01, 1.72548215e-01, 9.66973000e-01],
[-1.55751298e+00, -8.65703373e+00, 2.68622026e+01, -3.17190834e+16],
[2.45393609e+01, -2.70571903e+01, 1.96815505e+01, 1.80708004e+37],
[5.77482829e+00, 1.53203143e+01, 2.50534322e+01, 1.14304242e+06],
[-1.02626819e+01, 2.36887658e+01, -2.32152102e+01, 7.28965646e+02],
[-1.30833446e+00, -1.28310210e+01, 1.87275544e+01, -9.33487904e+12],
[5.83024676e+00, -1.49279672e+01, 2.44957538e+01, -7.61083070e+27],
[-2.03130747e+01, 2.59641715e+01, -2.06174328e+01, 4.54744859e+04],
[1.97684551e+01, -2.21410519e+01, -2.26728740e+01, 3.53113026e+06],
[2.73673444e+01, 2.64491725e+01, 1.57599882e+01, 1.07385118e+07],
[5.73287971e+00, 1.21111904e+01, 1.33080171e+01, 2.63220467e+03],
[-2.82751072e+01, 2.08605881e+01, 9.09838900e+00, -6.60957033e-07],
[1.87270691e+01, -1.74437016e+01, 1.52413599e+01, 6.59572851e+27],
[6.60681457e+00, -2.69449855e+00, 9.78972047e+00, -2.38587870e+12],
[1.20895561e+01, -2.51355765e+01, 2.30096101e+01, 7.58739886e+32],
[-2.44682278e+01, 2.10673441e+01, -1.36705538e+01, 4.54213550e+04],
[-4.50665152e+00, 3.72292059e+00, -4.83403707e+00, 2.68938214e+01],
[-7.46540049e+00, -1.08422222e+01, -1.72203805e+01, -2.09402162e+02],
[-2.00307551e+01, -7.50604431e+00, -2.78640020e+01, 4.15985444e+19],
[1.99890876e+01, 2.20677419e+01, -2.51301778e+01, 1.23840297e-09],
[2.03183823e+01, -7.66942559e+00, 2.10340070e+01, 1.46285095e+31],
[-2.90315825e+00, -2.55785967e+01, -9.58779316e+00, 2.65714264e-01],
[2.73960829e+01, -1.80097203e+01, -2.03070131e+00, 2.52908999e+02],
[-2.11708058e+01, -2.70304032e+01, 2.48257944e+01, 3.09027527e+08],
[2.21959758e+01, 4.00258675e+00, -1.62853977e+01, -9.16280090e-09],
[1.61661840e+01, -2.26845150e+01, 2.17226940e+01, -8.24774394e+33],
[-3.35030306e+00, 1.32670581e+00, 9.39711214e+00, -1.47303163e+01],
[7.23720726e+00, -2.29763909e+01, 2.34709682e+01, -9.20711735e+29],
[2.71013568e+01, 1.61951087e+01, -7.11388906e-01, 2.98750911e-01],
[8.40057933e+00, -7.49665220e+00, 2.95587388e+01, 6.59465635e+29],
[-1.51603423e+01, 1.94032322e+01, -7.60044357e+00, 1.05186941e+02],
[-8.83788031e+00, -2.72018313e+01, 1.88269907e+00, 1.81687019e+00],
[-1.87283712e+01, 5.87479570e+00, -1.91210203e+01, 2.52235612e+08],
[-5.61338513e-01, 2.69490237e+01, 1.16660111e-01, 9.97567783e-01],
[-5.44354025e+00, -1.26721408e+01, -4.66831036e+00, 1.06660735e-01],
[-2.18846497e+00, 2.33299566e+01, 9.62564397e+00, 3.03842061e-01],
[6.65661299e+00, -2.39048713e+01, 1.04191807e+01, 4.73700451e+13],
[-2.57298921e+01, -2.60811296e+01, 2.74398110e+01, -5.32566307e+11],
[-1.11431826e+01, -1.59420160e+01, -1.84880553e+01, -1.01514747e+02],
[6.50301931e+00, 2.59859051e+01, -2.33270137e+01, 1.22760500e-02],
[-1.94987891e+01, -2.62123262e+01, 3.90323225e+00, 1.71658894e+01],
[7.26164601e+00, -1.41469402e+01, 2.81499763e+01, -2.50068329e+31],
[-1.52424040e+01, 2.99719005e+01, -2.85753678e+01, 1.31906693e+04],
[5.24149291e+00, -1.72807223e+01, 2.22129493e+01, 2.50748475e+25],
[3.63207230e-01, -9.54120862e-02, -2.83874044e+01, 9.43854939e-01],
[-2.11326457e+00, -1.25707023e+01, 1.17172130e+00, 1.20812698e+00],
[2.48513582e+00, 1.03652647e+01, -1.84625148e+01, 6.47910997e-02],
[2.65395942e+01, 2.74794672e+01, 1.29413428e+01, 2.89306132e+05],
[-9.49445460e+00, 1.59930921e+01, -1.49596331e+01, 3.27574841e+02],
[-5.89173945e+00, 9.96742426e+00, 2.60318889e+01, -3.15842908e-01],
[-1.15387239e+01, -2.21433107e+01, -2.17686413e+01, 1.56724718e-01],
[-5.30592244e+00, -2.42752190e+01, 1.29734035e+00, 1.31985534e+00]])
for a,b,c,expected in ref_data:
result = special.hyp1f1(a,b,c)
assert_(abs(expected - result)/expected < 1e-4)
def test_hyp1f1_gh2957(self):
hyp1 = special.hyp1f1(0.5, 1.5, -709.7827128933)
hyp2 = special.hyp1f1(0.5, 1.5, -709.7827128934)
assert_almost_equal(hyp1, hyp2, 12)
def test_hyp1f2(self):
pass
def test_hyp2f0(self):
pass
def test_hyp2f1(self):
# a collection of special cases taken from AMS 55
values = [[0.5, 1, 1.5, 0.2**2, 0.5/0.2*log((1+0.2)/(1-0.2))],
[0.5, 1, 1.5, -0.2**2, 1./0.2*arctan(0.2)],
[1, 1, 2, 0.2, -1/0.2*log(1-0.2)],
[3, 3.5, 1.5, 0.2**2,
0.5/0.2/(-5)*((1+0.2)**(-5)-(1-0.2)**(-5))],
[-3, 3, 0.5, sin(0.2)**2, cos(2*3*0.2)],
[3, 4, 8, 1, special.gamma(8)*special.gamma(8-4-3)/special.gamma(8-3)/special.gamma(8-4)],
[3, 2, 3-2+1, -1, 1./2**3*sqrt(pi) *
special.gamma(1+3-2)/special.gamma(1+0.5*3-2)/special.gamma(0.5+0.5*3)],
[5, 2, 5-2+1, -1, 1./2**5*sqrt(pi) *
special.gamma(1+5-2)/special.gamma(1+0.5*5-2)/special.gamma(0.5+0.5*5)],
[4, 0.5+4, 1.5-2*4, -1./3, (8./9)**(-2*4)*special.gamma(4./3) *
special.gamma(1.5-2*4)/special.gamma(3./2)/special.gamma(4./3-2*4)],
# and some others
# ticket #424
[1.5, -0.5, 1.0, -10.0, 4.1300097765277476484],
# negative integer a or b, with c-a-b integer and x > 0.9
[-2,3,1,0.95,0.715],
[2,-3,1,0.95,-0.007],
[-6,3,1,0.95,0.0000810625],
[2,-5,1,0.95,-0.000029375],
# huge negative integers
(10, -900, 10.5, 0.99, 1.91853705796607664803709475658e-24),
(10, -900, -10.5, 0.99, 3.54279200040355710199058559155e-18),
]
for i, (a, b, c, x, v) in enumerate(values):
cv = special.hyp2f1(a, b, c, x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_hyp3f0(self):
pass
def test_hyperu(self):
val1 = special.hyperu(1,0.1,100)
assert_almost_equal(val1,0.0098153,7)
a,b = [0.3,0.6,1.2,-2.7],[1.5,3.2,-0.4,-3.2]
a,b = asarray(a), asarray(b)
z = 0.5
hypu = special.hyperu(a,b,z)
hprl = (pi/sin(pi*b))*(special.hyp1f1(a,b,z) /
(special.gamma(1+a-b)*special.gamma(b)) -
z**(1-b)*special.hyp1f1(1+a-b,2-b,z)
/ (special.gamma(a)*special.gamma(2-b)))
assert_array_almost_equal(hypu,hprl,12)
def test_hyperu_gh2287(self):
assert_almost_equal(special.hyperu(1, 1.5, 20.2),
0.048360918656699191, 12)
class TestBessel(TestCase):
def test_itj0y0(self):
it0 = array(special.itj0y0(.2))
assert_array_almost_equal(it0,array([0.19933433254006822, -0.34570883800412566]),8)
def test_it2j0y0(self):
it2 = array(special.it2j0y0(.2))
assert_array_almost_equal(it2,array([0.0049937546274601858, -0.43423067011231614]),8)
def test_negv_iv(self):
assert_equal(special.iv(3,2), special.iv(-3,2))
def test_j0(self):
oz = special.j0(.1)
ozr = special.jn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_j1(self):
o1 = special.j1(.1)
o1r = special.jn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_jn(self):
jnnr = special.jn(1,.2)
assert_almost_equal(jnnr,0.099500832639235995,8)
def test_negv_jv(self):
assert_almost_equal(special.jv(-3,2), -special.jv(3,2), 14)
def test_jv(self):
values = [[0, 0.1, 0.99750156206604002],
[2./3, 1e-8, 0.3239028506761532e-5],
[2./3, 1e-10, 0.1503423854873779e-6],
[3.1, 1e-10, 0.1711956265409013e-32],
[2./3, 4.0, -0.2325440850267039],
]
for i, (v, x, y) in enumerate(values):
yc = special.jv(v, x)
assert_almost_equal(yc, y, 8, err_msg='test #%d' % i)
def test_negv_jve(self):
assert_almost_equal(special.jve(-3,2), -special.jve(3,2), 14)
def test_jve(self):
jvexp = special.jve(1,.2)
assert_almost_equal(jvexp,0.099500832639235995,8)
jvexp1 = special.jve(1,.2+1j)
z = .2+1j
jvexpr = special.jv(1,z)*exp(-abs(z.imag))
assert_almost_equal(jvexp1,jvexpr,8)
def test_jn_zeros(self):
jn0 = special.jn_zeros(0,5)
jn1 = special.jn_zeros(1,5)
assert_array_almost_equal(jn0,array([2.4048255577,
5.5200781103,
8.6537279129,
11.7915344391,
14.9309177086]),4)
assert_array_almost_equal(jn1,array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),4)
jn102 = special.jn_zeros(102,5)
assert_tol_equal(jn102, array([110.89174935992040343,
117.83464175788308398,
123.70194191713507279,
129.02417238949092824,
134.00114761868422559]), rtol=1e-13)
jn301 = special.jn_zeros(301,5)
assert_tol_equal(jn301, array([313.59097866698830153,
323.21549776096288280,
331.22338738656748796,
338.39676338872084500,
345.03284233056064157]), rtol=1e-13)
def test_jn_zeros_slow(self):
jn0 = special.jn_zeros(0, 300)
assert_tol_equal(jn0[260-1], 816.02884495068867280, rtol=1e-13)
assert_tol_equal(jn0[280-1], 878.86068707124422606, rtol=1e-13)
assert_tol_equal(jn0[300-1], 941.69253065317954064, rtol=1e-13)
jn10 = special.jn_zeros(10, 300)
assert_tol_equal(jn10[260-1], 831.67668514305631151, rtol=1e-13)
assert_tol_equal(jn10[280-1], 894.51275095371316931, rtol=1e-13)
assert_tol_equal(jn10[300-1], 957.34826370866539775, rtol=1e-13)
jn3010 = special.jn_zeros(3010,5)
assert_tol_equal(jn3010, array([3036.86590780927,
3057.06598526482,
3073.66360690272,
3088.37736494778,
3101.86438139042]), rtol=1e-8)
def test_jnjnp_zeros(self):
jn = special.jn
def jnp(n, x):
return (jn(n-1,x) - jn(n+1,x))/2
for nt in range(1, 30):
z, n, m, t = special.jnjnp_zeros(nt)
for zz, nn, tt in zip(z, n, t):
if tt == 0:
assert_allclose(jn(nn, zz), 0, atol=1e-6)
elif tt == 1:
assert_allclose(jnp(nn, zz), 0, atol=1e-6)
else:
raise AssertionError("Invalid t return for nt=%d" % nt)
def test_jnp_zeros(self):
jnp = special.jnp_zeros(1,5)
assert_array_almost_equal(jnp, array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),4)
jnp = special.jnp_zeros(443,5)
assert_tol_equal(special.jvp(443, jnp), 0, atol=1e-15)
def test_jnyn_zeros(self):
jnz = special.jnyn_zeros(1,5)
assert_array_almost_equal(jnz,(array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),
array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),
array([2.19714,
5.42968,
8.59601,
11.74915,
14.89744]),
array([3.68302,
6.94150,
10.12340,
13.28576,
16.44006])),5)
def test_jvp(self):
jvprim = special.jvp(2,2)
jv0 = (special.jv(1,2)-special.jv(3,2))/2
assert_almost_equal(jvprim,jv0,10)
def test_k0(self):
ozk = special.k0(.1)
ozkr = special.kv(0,.1)
assert_almost_equal(ozk,ozkr,8)
def test_k0e(self):
ozke = special.k0e(.1)
ozker = special.kve(0,.1)
assert_almost_equal(ozke,ozker,8)
def test_k1(self):
o1k = special.k1(.1)
o1kr = special.kv(1,.1)
assert_almost_equal(o1k,o1kr,8)
def test_k1e(self):
o1ke = special.k1e(.1)
o1ker = special.kve(1,.1)
assert_almost_equal(o1ke,o1ker,8)
def test_jacobi(self):
a = 5*rand() - 1
b = 5*rand() - 1
P0 = special.jacobi(0,a,b)
P1 = special.jacobi(1,a,b)
P2 = special.jacobi(2,a,b)
P3 = special.jacobi(3,a,b)
assert_array_almost_equal(P0.c,[1],13)
assert_array_almost_equal(P1.c,array([a+b+2,a-b])/2.0,13)
cp = [(a+b+3)*(a+b+4), 4*(a+b+3)*(a+2), 4*(a+1)*(a+2)]
p2c = [cp[0],cp[1]-2*cp[0],cp[2]-cp[1]+cp[0]]
assert_array_almost_equal(P2.c,array(p2c)/8.0,13)
cp = [(a+b+4)*(a+b+5)*(a+b+6),6*(a+b+4)*(a+b+5)*(a+3),
12*(a+b+4)*(a+2)*(a+3),8*(a+1)*(a+2)*(a+3)]
p3c = [cp[0],cp[1]-3*cp[0],cp[2]-2*cp[1]+3*cp[0],cp[3]-cp[2]+cp[1]-cp[0]]
assert_array_almost_equal(P3.c,array(p3c)/48.0,13)
def test_kn(self):
kn1 = special.kn(0,.2)
assert_almost_equal(kn1,1.7527038555281462,8)
def test_negv_kv(self):
assert_equal(special.kv(3.0, 2.2), special.kv(-3.0, 2.2))
def test_kv0(self):
kv0 = special.kv(0,.2)
assert_almost_equal(kv0, 1.7527038555281462, 10)
def test_kv1(self):
kv1 = special.kv(1,0.2)
assert_almost_equal(kv1, 4.775972543220472, 10)
def test_kv2(self):
kv2 = special.kv(2,0.2)
assert_almost_equal(kv2, 49.51242928773287, 10)
def test_kn_largeorder(self):
assert_allclose(special.kn(32, 1), 1.7516596664574289e+43)
def test_kv_largearg(self):
assert_equal(special.kv(0, 1e19), 0)
def test_negv_kve(self):
assert_equal(special.kve(3.0, 2.2), special.kve(-3.0, 2.2))
def test_kve(self):
kve1 = special.kve(0,.2)
kv1 = special.kv(0,.2)*exp(.2)
assert_almost_equal(kve1,kv1,8)
z = .2+1j
kve2 = special.kve(0,z)
kv2 = special.kv(0,z)*exp(z)
assert_almost_equal(kve2,kv2,8)
def test_kvp_v0n1(self):
z = 2.2
assert_almost_equal(-special.kv(1,z), special.kvp(0,z, n=1), 10)
def test_kvp_n1(self):
v = 3.
z = 2.2
xc = -special.kv(v+1,z) + v/z*special.kv(v,z)
x = special.kvp(v,z, n=1)
assert_almost_equal(xc, x, 10) # this function (kvp) is broken
def test_kvp_n2(self):
v = 3.
z = 2.2
xc = (z**2+v**2-v)/z**2 * special.kv(v,z) + special.kv(v+1,z)/z
x = special.kvp(v, z, n=2)
assert_almost_equal(xc, x, 10)
def test_y0(self):
oz = special.y0(.1)
ozr = special.yn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_y1(self):
o1 = special.y1(.1)
o1r = special.yn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_y0_zeros(self):
yo,ypo = special.y0_zeros(2)
zo,zpo = special.y0_zeros(2,complex=1)
all = r_[yo,zo]
allval = r_[ypo,zpo]
assert_array_almost_equal(abs(special.yv(0.0,all)),0.0,11)
assert_array_almost_equal(abs(special.yv(1,all)-allval),0.0,11)
def test_y1_zeros(self):
y1 = special.y1_zeros(1)
assert_array_almost_equal(y1,(array([2.19714]),array([0.52079])),5)
def test_y1p_zeros(self):
y1p = special.y1p_zeros(1,complex=1)
assert_array_almost_equal(y1p,(array([0.5768+0.904j]), array([-0.7635+0.5892j])),3)
def test_yn_zeros(self):
an = special.yn_zeros(4,2)
assert_array_almost_equal(an,array([5.64515, 9.36162]),5)
an = special.yn_zeros(443,5)
assert_tol_equal(an, [450.13573091578090314, 463.05692376675001542,
472.80651546418663566, 481.27353184725625838,
488.98055964441374646], rtol=1e-15)
def test_ynp_zeros(self):
ao = special.ynp_zeros(0,2)
assert_array_almost_equal(ao,array([2.19714133, 5.42968104]),6)
ao = special.ynp_zeros(43,5)
assert_tol_equal(special.yvp(43, ao), 0, atol=1e-15)
ao = special.ynp_zeros(443,5)
assert_tol_equal(special.yvp(443, ao), 0, atol=1e-9)
def test_ynp_zeros_large_order(self):
ao = special.ynp_zeros(443,5)
assert_tol_equal(special.yvp(443, ao), 0, atol=1e-14)
def test_yn(self):
yn2n = special.yn(1,.2)
assert_almost_equal(yn2n,-3.3238249881118471,8)
def test_negv_yv(self):
assert_almost_equal(special.yv(-3,2), -special.yv(3,2), 14)
def test_yv(self):
yv2 = special.yv(1,.2)
assert_almost_equal(yv2,-3.3238249881118471,8)
def test_negv_yve(self):
assert_almost_equal(special.yve(-3,2), -special.yve(3,2), 14)
def test_yve(self):
yve2 = special.yve(1,.2)
assert_almost_equal(yve2,-3.3238249881118471,8)
yve2r = special.yv(1,.2+1j)*exp(-1)
yve22 = special.yve(1,.2+1j)
assert_almost_equal(yve22,yve2r,8)
def test_yvp(self):
yvpr = (special.yv(1,.2) - special.yv(3,.2))/2.0
yvp1 = special.yvp(2,.2)
assert_array_almost_equal(yvp1,yvpr,10)
def _cephes_vs_amos_points(self):
"""Yield points at which to compare Cephes implementation to AMOS"""
# check several points, including large-amplitude ones
for v in [-120, -100.3, -20., -10., -1., -.5,
0., 1., 12.49, 120., 301]:
for z in [-1300, -11, -10, -1, 1., 10., 200.5, 401., 600.5,
700.6, 1300, 10003]:
yield v, z
# check half-integers; these are problematic points at least
# for cephes/iv
for v in 0.5 + arange(-60, 60):
yield v, 3.5
def check_cephes_vs_amos(self, f1, f2, rtol=1e-11, atol=0, skip=None):
for v, z in self._cephes_vs_amos_points():
if skip is not None and skip(v, z):
continue
c1, c2, c3 = f1(v, z), f1(v,z+0j), f2(int(v), z)
if np.isinf(c1):
assert_(np.abs(c2) >= 1e300, (v, z))
elif np.isnan(c1):
assert_(c2.imag != 0, (v, z))
else:
assert_tol_equal(c1, c2, err_msg=(v, z), rtol=rtol, atol=atol)
if v == int(v):
assert_tol_equal(c3, c2, err_msg=(v, z),
rtol=rtol, atol=atol)
def test_jv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.jv, special.jn, rtol=1e-10, atol=1e-305)
def test_yv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305)
def test_yv_cephes_vs_amos_only_small_orders(self):
skipper = lambda v, z: (abs(v) > 50)
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305, skip=skipper)
def test_iv_cephes_vs_amos(self):
olderr = np.seterr(all='ignore')
try:
self.check_cephes_vs_amos(special.iv, special.iv, rtol=5e-9, atol=1e-305)
finally:
np.seterr(**olderr)
@dec.slow
def test_iv_cephes_vs_amos_mass_test(self):
N = 1000000
np.random.seed(1)
v = np.random.pareto(0.5, N) * (-1)**np.random.randint(2, size=N)
x = np.random.pareto(0.2, N) * (-1)**np.random.randint(2, size=N)
imsk = (np.random.randint(8, size=N) == 0)
v[imsk] = v[imsk].astype(int)
old_err = np.seterr(all='ignore')
try:
c1 = special.iv(v, x)
c2 = special.iv(v, x+0j)
# deal with differences in the inf and zero cutoffs
c1[abs(c1) > 1e300] = np.inf
c2[abs(c2) > 1e300] = np.inf
c1[abs(c1) < 1e-300] = 0
c2[abs(c2) < 1e-300] = 0
dc = abs(c1/c2 - 1)
dc[np.isnan(dc)] = 0
finally:
np.seterr(**old_err)
k = np.argmax(dc)
# Most error apparently comes from AMOS and not our implementation;
# there are some problems near integer orders there
assert_(dc[k] < 2e-7, (v[k], x[k], special.iv(v[k], x[k]), special.iv(v[k], x[k]+0j)))
def test_kv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.kv, special.kn, rtol=1e-9, atol=1e-305)
self.check_cephes_vs_amos(special.kv, special.kv, rtol=1e-9, atol=1e-305)
def test_ticket_623(self):
assert_tol_equal(special.jv(3, 4), 0.43017147387562193)
assert_tol_equal(special.jv(301, 1300), 0.0183487151115275)
assert_tol_equal(special.jv(301, 1296.0682), -0.0224174325312048)
def test_ticket_853(self):
"""Negative-order Bessels"""
# cephes
assert_tol_equal(special.jv(-1, 1), -0.4400505857449335)
assert_tol_equal(special.jv(-2, 1), 0.1149034849319005)
assert_tol_equal(special.yv(-1, 1), 0.7812128213002887)
assert_tol_equal(special.yv(-2, 1), -1.650682606816255)
assert_tol_equal(special.iv(-1, 1), 0.5651591039924851)
assert_tol_equal(special.iv(-2, 1), 0.1357476697670383)
assert_tol_equal(special.kv(-1, 1), 0.6019072301972347)
assert_tol_equal(special.kv(-2, 1), 1.624838898635178)
assert_tol_equal(special.jv(-0.5, 1), 0.43109886801837607952)
assert_tol_equal(special.yv(-0.5, 1), 0.6713967071418031)
assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967)
assert_tol_equal(special.kv(-0.5, 1), 0.4610685044478945)
# amos
assert_tol_equal(special.jv(-1, 1+0j), -0.4400505857449335)
assert_tol_equal(special.jv(-2, 1+0j), 0.1149034849319005)
assert_tol_equal(special.yv(-1, 1+0j), 0.7812128213002887)
assert_tol_equal(special.yv(-2, 1+0j), -1.650682606816255)
assert_tol_equal(special.iv(-1, 1+0j), 0.5651591039924851)
assert_tol_equal(special.iv(-2, 1+0j), 0.1357476697670383)
assert_tol_equal(special.kv(-1, 1+0j), 0.6019072301972347)
assert_tol_equal(special.kv(-2, 1+0j), 1.624838898635178)
assert_tol_equal(special.jv(-0.5, 1+0j), 0.43109886801837607952)
assert_tol_equal(special.jv(-0.5, 1+1j), 0.2628946385649065-0.827050182040562j)
assert_tol_equal(special.yv(-0.5, 1+0j), 0.6713967071418031)
assert_tol_equal(special.yv(-0.5, 1+1j), 0.967901282890131+0.0602046062142816j)
assert_tol_equal(special.iv(-0.5, 1+0j), 1.231200214592967)
assert_tol_equal(special.iv(-0.5, 1+1j), 0.77070737376928+0.39891821043561j)
assert_tol_equal(special.kv(-0.5, 1+0j), 0.4610685044478945)
assert_tol_equal(special.kv(-0.5, 1+1j), 0.06868578341999-0.38157825981268j)
assert_tol_equal(special.jve(-0.5,1+0.3j), special.jv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(special.yve(-0.5,1+0.3j), special.yv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(special.ive(-0.5,0.3+1j), special.iv(-0.5, 0.3+1j)*exp(-0.3))
assert_tol_equal(special.kve(-0.5,0.3+1j), special.kv(-0.5, 0.3+1j)*exp(0.3+1j))
assert_tol_equal(special.hankel1(-0.5, 1+1j), special.jv(-0.5, 1+1j) + 1j*special.yv(-0.5,1+1j))
assert_tol_equal(special.hankel2(-0.5, 1+1j), special.jv(-0.5, 1+1j) - 1j*special.yv(-0.5,1+1j))
def test_ticket_854(self):
"""Real-valued Bessel domains"""
assert_(isnan(special.jv(0.5, -1)))
assert_(isnan(special.iv(0.5, -1)))
assert_(isnan(special.yv(0.5, -1)))
assert_(isnan(special.yv(1, -1)))
assert_(isnan(special.kv(0.5, -1)))
assert_(isnan(special.kv(1, -1)))
assert_(isnan(special.jve(0.5, -1)))
assert_(isnan(special.ive(0.5, -1)))
assert_(isnan(special.yve(0.5, -1)))
assert_(isnan(special.yve(1, -1)))
assert_(isnan(special.kve(0.5, -1)))
assert_(isnan(special.kve(1, -1)))
assert_(isnan(special.airye(-1)[0:2]).all(), special.airye(-1))
assert_(not isnan(special.airye(-1)[2:4]).any(), special.airye(-1))
def test_ticket_503(self):
"""Real-valued Bessel I overflow"""
assert_tol_equal(special.iv(1, 700), 1.528500390233901e302)
assert_tol_equal(special.iv(1000, 1120), 1.301564549405821e301)
def test_iv_hyperg_poles(self):
assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967)
def iv_series(self, v, z, n=200):
k = arange(0, n).astype(float_)
r = (v+2*k)*log(.5*z) - special.gammaln(k+1) - special.gammaln(v+k+1)
r[isnan(r)] = inf
r = exp(r)
err = abs(r).max() * finfo(float_).eps * n + abs(r[-1])*10
return r.sum(), err
def test_i0_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(0, z)
assert_tol_equal(special.i0(z), value, atol=err, err_msg=z)
def test_i1_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(1, z)
assert_tol_equal(special.i1(z), value, atol=err, err_msg=z)
def test_iv_series(self):
for v in [-20., -10., -1., 0., 1., 12.49, 120.]:
for z in [1., 10., 200.5, -1+2j]:
value, err = self.iv_series(v, z)
assert_tol_equal(special.iv(v, z), value, atol=err, err_msg=(v, z))
def test_i0(self):
values = [[0.0, 1.0],
[1e-10, 1.0],
[0.1, 0.9071009258],
[0.5, 0.6450352706],
[1.0, 0.4657596077],
[2.5, 0.2700464416],
[5.0, 0.1835408126],
[20.0, 0.0897803119],
]
for i, (x, v) in enumerate(values):
cv = special.i0(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i0e(self):
oize = special.i0e(.1)
oizer = special.ive(0,.1)
assert_almost_equal(oize,oizer,8)
def test_i1(self):
values = [[0.0, 0.0],
[1e-10, 0.4999999999500000e-10],
[0.1, 0.0452984468],
[0.5, 0.1564208032],
[1.0, 0.2079104154],
[5.0, 0.1639722669],
[20.0, 0.0875062222],
]
for i, (x, v) in enumerate(values):
cv = special.i1(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i1e(self):
oi1e = special.i1e(.1)
oi1er = special.ive(1,.1)
assert_almost_equal(oi1e,oi1er,8)
def test_iti0k0(self):
iti0 = array(special.iti0k0(5))
assert_array_almost_equal(iti0,array([31.848667776169801, 1.5673873907283657]),5)
def test_it2i0k0(self):
it2k = special.it2i0k0(.1)
assert_array_almost_equal(it2k,array([0.0012503906973464409, 3.3309450354686687]),6)
def test_iv(self):
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(iv1,0.90710092578230106,10)
def test_negv_ive(self):
assert_equal(special.ive(3,2), special.ive(-3,2))
def test_ive(self):
ive1 = special.ive(0,.1)
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(ive1,iv1,10)
def test_ivp0(self):
assert_almost_equal(special.iv(1,2), special.ivp(0,2), 10)
def test_ivp(self):
y = (special.iv(0,2) + special.iv(2,2))/2
x = special.ivp(1,2)
assert_almost_equal(x,y,10)
class TestLaguerre(TestCase):
def test_laguerre(self):
lag0 = special.laguerre(0)
lag1 = special.laguerre(1)
lag2 = special.laguerre(2)
lag3 = special.laguerre(3)
lag4 = special.laguerre(4)
lag5 = special.laguerre(5)
assert_array_almost_equal(lag0.c,[1],13)
assert_array_almost_equal(lag1.c,[-1,1],13)
assert_array_almost_equal(lag2.c,array([1,-4,2])/2.0,13)
assert_array_almost_equal(lag3.c,array([-1,9,-18,6])/6.0,13)
assert_array_almost_equal(lag4.c,array([1,-16,72,-96,24])/24.0,13)
assert_array_almost_equal(lag5.c,array([-1,25,-200,600,-600,120])/120.0,13)
def test_genlaguerre(self):
k = 5*rand()-0.9
lag0 = special.genlaguerre(0,k)
lag1 = special.genlaguerre(1,k)
lag2 = special.genlaguerre(2,k)
lag3 = special.genlaguerre(3,k)
assert_equal(lag0.c,[1])
assert_equal(lag1.c,[-1,k+1])
assert_almost_equal(lag2.c,array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0)
assert_almost_equal(lag3.c,array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0)
# Base polynomials come from Abrahmowitz and Stegan
class TestLegendre(TestCase):
def test_legendre(self):
leg0 = special.legendre(0)
leg1 = special.legendre(1)
leg2 = special.legendre(2)
leg3 = special.legendre(3)
leg4 = special.legendre(4)
leg5 = special.legendre(5)
assert_equal(leg0.c, [1])
assert_equal(leg1.c, [1,0])
assert_almost_equal(leg2.c, array([3,0,-1])/2.0, decimal=13)
assert_almost_equal(leg3.c, array([5,0,-3,0])/2.0)
assert_almost_equal(leg4.c, array([35,0,-30,0,3])/8.0)
assert_almost_equal(leg5.c, array([63,0,-70,0,15,0])/8.0)
class TestLambda(TestCase):
def test_lmbda(self):
lam = special.lmbda(1,.1)
lamr = (array([special.jn(0,.1), 2*special.jn(1,.1)/.1]),
array([special.jvp(0,.1), -2*special.jv(1,.1)/.01 + 2*special.jvp(1,.1)/.1]))
assert_array_almost_equal(lam,lamr,8)
class TestLog1p(TestCase):
def test_log1p(self):
l1p = (special.log1p(10), special.log1p(11), special.log1p(12))
l1prl = (log(11), log(12), log(13))
assert_array_almost_equal(l1p,l1prl,8)
def test_log1pmore(self):
l1pm = (special.log1p(1), special.log1p(1.1), special.log1p(1.2))
l1pmrl = (log(2),log(2.1),log(2.2))
assert_array_almost_equal(l1pm,l1pmrl,8)
class TestLegendreFunctions(TestCase):
def test_clpmn(self):
z = 0.5+0.3j
clp = special.clpmn(2, 2, z, 3)
assert_array_almost_equal(clp,
(array([[1.0000, z, 0.5*(3*z*z-1)],
[0.0000, sqrt(z*z-1), 3*z*sqrt(z*z-1)],
[0.0000, 0.0000, 3*(z*z-1)]]),
array([[0.0000, 1.0000, 3*z],
[0.0000, z/sqrt(z*z-1), 3*(2*z*z-1)/sqrt(z*z-1)],
[0.0000, 0.0000, 6*z]])),
7)
def test_clpmn_close_to_real_2(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 2)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 2)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x),
special.lpmv(m, n, x)]),
7)
def test_clpmn_close_to_real_3(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 3)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 3)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x)*np.exp(-0.5j*m*np.pi),
special.lpmv(m, n, x)*np.exp(0.5j*m*np.pi)]),
7)
def test_clpmn_across_unit_circle(self):
eps = 1e-7
m = 1
n = 1
x = 1j
for type in [2, 3]:
assert_almost_equal(special.clpmn(m, n, x+1j*eps, type)[0][m, n],
special.clpmn(m, n, x-1j*eps, type)[0][m, n], 6)
def test_inf(self):
for z in (1, -1):
for n in range(4):
for m in range(1, n):
lp = special.clpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
lp = special.lpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
def test_deriv_clpmn(self):
# data inside and outside of the unit circle
zvals = [0.5+0.5j, -0.5+0.5j, -0.5-0.5j, 0.5-0.5j,
1+1j, -1+1j, -1-1j, 1-1j]
m = 2
n = 3
for type in [2, 3]:
for z in zvals:
for h in [1e-3, 1e-3j]:
approx_derivative = (special.clpmn(m, n, z+0.5*h, type)[0]
- special.clpmn(m, n, z-0.5*h, type)[0])/h
assert_allclose(special.clpmn(m, n, z, type)[1],
approx_derivative,
rtol=1e-4)
def test_lpmn(self):
lp = special.lpmn(0,2,.5)
assert_array_almost_equal(lp,(array([[1.00000,
0.50000,
-0.12500]]),
array([[0.00000,
1.00000,
1.50000]])),4)
def test_lpn(self):
lpnf = special.lpn(2,.5)
assert_array_almost_equal(lpnf,(array([1.00000,
0.50000,
-0.12500]),
array([0.00000,
1.00000,
1.50000])),4)
def test_lpmv(self):
lp = special.lpmv(0,2,.5)
assert_almost_equal(lp,-0.125,7)
lp = special.lpmv(0,40,.001)
assert_almost_equal(lp,0.1252678976534484,7)
# XXX: this is outside the domain of the current implementation,
# so ensure it returns a NaN rather than a wrong answer.
olderr = np.seterr(all='ignore')
try:
lp = special.lpmv(-1,-1,.001)
finally:
np.seterr(**olderr)
assert_(lp != 0 or np.isnan(lp))
def test_lqmn(self):
lqmnf = special.lqmn(0,2,.5)
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqmnf[0][0],lqf[0],4)
assert_array_almost_equal(lqmnf[1][0],lqf[1],4)
def test_lqmn_gt1(self):
"""algorithm for real arguments changes at 1.0001
test against analytical result for m=2, n=1
"""
x0 = 1.0001
delta = 0.00002
for x in (x0-delta, x0+delta):
lq = special.lqmn(2, 1, x)[0][-1, -1]
expected = 2/(x*x-1)
assert_almost_equal(lq, expected)
def test_lqmn_shape(self):
a, b = special.lqmn(4, 4, 1.1)
assert_equal(a.shape, (5, 5))
assert_equal(b.shape, (5, 5))
a, b = special.lqmn(4, 0, 1.1)
assert_equal(a.shape, (5, 1))
assert_equal(b.shape, (5, 1))
def test_lqn(self):
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqf,(array([0.5493, -0.7253, -0.8187]),
array([1.3333, 1.216, -0.8427])),4)
class TestMathieu(TestCase):
def test_mathieu_a(self):
pass
def test_mathieu_even_coef(self):
mc = special.mathieu_even_coef(2,5)
# Q not defined broken and cannot figure out proper reporting order
def test_mathieu_odd_coef(self):
# same problem as above
pass
class TestFresnelIntegral(TestCase):
def test_modfresnelp(self):
pass
def test_modfresnelm(self):
pass
class TestOblCvSeq(TestCase):
def test_obl_cv_seq(self):
obl = special.obl_cv_seq(0,3,1)
assert_array_almost_equal(obl,array([-0.348602,
1.393206,
5.486800,
11.492120]),5)
class TestParabolicCylinder(TestCase):
def test_pbdn_seq(self):
pb = special.pbdn_seq(1,.1)
assert_array_almost_equal(pb,(array([0.9975,
0.0998]),
array([-0.0499,
0.9925])),4)
def test_pbdv(self):
pbv = special.pbdv(1,.2)
derrl = 1/2*(.2)*special.pbdv(1,.2)[0] - special.pbdv(0,.2)[0]
def test_pbdv_seq(self):
pbn = special.pbdn_seq(1,.1)
pbv = special.pbdv_seq(1,.1)
assert_array_almost_equal(pbv,(real(pbn[0]),real(pbn[1])),4)
def test_pbdv_points(self):
# simple case
eta = np.linspace(-10, 10, 5)
z = 2**(eta/2)*np.sqrt(np.pi)/special.gamma(.5-.5*eta)
assert_tol_equal(special.pbdv(eta, 0.)[0], z, rtol=1e-14, atol=1e-14)
# some points
assert_tol_equal(special.pbdv(10.34, 20.44)[0], 1.3731383034455e-32, rtol=1e-12)
assert_tol_equal(special.pbdv(-9.53, 3.44)[0], 3.166735001119246e-8, rtol=1e-12)
def test_pbdv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbdv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbdv(eta, x + eps)[0] - special.pbdv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
def test_pbvv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbvv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbvv(eta, x + eps)[0] - special.pbvv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
class TestPolygamma(TestCase):
# from Table 6.2 (pg. 271) of A&S
def test_polygamma(self):
poly2 = special.polygamma(2,1)
poly3 = special.polygamma(3,1)
assert_almost_equal(poly2,-2.4041138063,10)
assert_almost_equal(poly3,6.4939394023,10)
# Test polygamma(0, x) == psi(x)
x = [2, 3, 1.1e14]
assert_almost_equal(special.polygamma(0, x), special.psi(x))
# Test broadcasting
n = [0, 1, 2]
x = [0.5, 1.5, 2.5]
expected = [-1.9635100260214238, 0.93480220054467933,
-0.23620405164172739]
assert_almost_equal(special.polygamma(n, x), expected)
expected = np.row_stack([expected]*2)
assert_almost_equal(special.polygamma(n, np.row_stack([x]*2)),
expected)
assert_almost_equal(special.polygamma(np.row_stack([n]*2), x),
expected)
class TestProCvSeq(TestCase):
def test_pro_cv_seq(self):
prol = special.pro_cv_seq(0,3,1)
assert_array_almost_equal(prol,array([0.319000,
2.593084,
6.533471,
12.514462]),5)
class TestPsi(TestCase):
def test_psi(self):
ps = special.psi(1)
assert_almost_equal(ps,-0.57721566490153287,8)
class TestRadian(TestCase):
def test_radian(self):
rad = special.radian(90,0,0)
assert_almost_equal(rad,pi/2.0,5)
def test_radianmore(self):
rad1 = special.radian(90,1,60)
assert_almost_equal(rad1,pi/2+0.0005816135199345904,5)
class TestRiccati(TestCase):
def test_riccati_jn(self):
jnrl = (special.sph_jn(1,.2)[0]*.2,special.sph_jn(1,.2)[0]+special.sph_jn(1,.2)[1]*.2)
ricjn = special.riccati_jn(1,.2)
assert_array_almost_equal(ricjn,jnrl,8)
def test_riccati_yn(self):
ynrl = (special.sph_yn(1,.2)[0]*.2,special.sph_yn(1,.2)[0]+special.sph_yn(1,.2)[1]*.2)
ricyn = special.riccati_yn(1,.2)
assert_array_almost_equal(ricyn,ynrl,8)
class TestRound(TestCase):
def test_round(self):
rnd = list(map(int,(special.round(10.1),special.round(10.4),special.round(10.5),special.round(10.6))))
# Note: According to the documentation, scipy.special.round is
# supposed to round to the nearest even number if the fractional
# part is exactly 0.5. On some platforms, this does not appear
# to work and thus this test may fail. However, this unit test is
# correctly written.
rndrl = (10,10,10,11)
assert_array_equal(rnd,rndrl)
def test_sph_harm():
# Tests derived from tables in
# http://en.wikipedia.org/wiki/Table_of_spherical_harmonics
sh = special.sph_harm
pi = np.pi
exp = np.exp
sqrt = np.sqrt
sin = np.sin
cos = np.cos
yield (assert_array_almost_equal, sh(0,0,0,0),
0.5/sqrt(pi))
yield (assert_array_almost_equal, sh(-2,2,0.,pi/4),
0.25*sqrt(15./(2.*pi)) *
(sin(pi/4))**2.)
yield (assert_array_almost_equal, sh(-2,2,0.,pi/2),
0.25*sqrt(15./(2.*pi)))
yield (assert_array_almost_equal, sh(2,2,pi,pi/2),
0.25*sqrt(15/(2.*pi)) *
exp(0+2.*pi*1j)*sin(pi/2.)**2.)
yield (assert_array_almost_equal, sh(2,4,pi/4.,pi/3.),
(3./8.)*sqrt(5./(2.*pi)) *
exp(0+2.*pi/4.*1j) *
sin(pi/3.)**2. *
(7.*cos(pi/3.)**2.-1))
yield (assert_array_almost_equal, sh(4,4,pi/8.,pi/6.),
(3./16.)*sqrt(35./(2.*pi)) *
exp(0+4.*pi/8.*1j)*sin(pi/6.)**4.)
class TestSpherical(TestCase):
def test_sph_harm(self):
# see test_sph_harm function
pass
def test_sph_in(self):
i1n = special.sph_in(1,.2)
inp0 = (i1n[0][1])
inp1 = (i1n[0][0] - 2.0/0.2 * i1n[0][1])
assert_array_almost_equal(i1n[0],array([1.0066800127054699381,
0.066933714568029540839]),12)
assert_array_almost_equal(i1n[1],[inp0,inp1],12)
def test_sph_inkn(self):
spikn = r_[special.sph_in(1,.2) + special.sph_kn(1,.2)]
inkn = r_[special.sph_inkn(1,.2)]
assert_array_almost_equal(inkn,spikn,10)
def test_sph_in_kn_order0(self):
x = 1.
sph_i0 = special.sph_in(0, x)
sph_i0_expected = np.array([np.sinh(x)/x,
np.cosh(x)/x-np.sinh(x)/x**2])
assert_array_almost_equal(r_[sph_i0], sph_i0_expected)
sph_k0 = special.sph_kn(0, x)
sph_k0_expected = np.array([0.5*pi*exp(-x)/x,
-0.5*pi*exp(-x)*(1/x+1/x**2)])
assert_array_almost_equal(r_[sph_k0], sph_k0_expected)
sph_i0k0 = special.sph_inkn(0, x)
assert_array_almost_equal(r_[sph_i0+sph_k0],
r_[sph_i0k0],
10)
def test_sph_jn(self):
s1 = special.sph_jn(2,.2)
s10 = -s1[0][1]
s11 = s1[0][0]-2.0/0.2*s1[0][1]
s12 = s1[0][1]-3.0/0.2*s1[0][2]
assert_array_almost_equal(s1[0],[0.99334665397530607731,
0.066400380670322230863,
0.0026590560795273856680],12)
assert_array_almost_equal(s1[1],[s10,s11,s12],12)
def test_sph_jnyn(self):
jnyn = r_[special.sph_jn(1,.2) + special.sph_yn(1,.2)] # tuple addition
jnyn1 = r_[special.sph_jnyn(1,.2)]
assert_array_almost_equal(jnyn1,jnyn,9)
def test_sph_kn(self):
kn = special.sph_kn(2,.2)
kn0 = -kn[0][1]
kn1 = -kn[0][0]-2.0/0.2*kn[0][1]
kn2 = -kn[0][1]-3.0/0.2*kn[0][2]
assert_array_almost_equal(kn[0],[6.4302962978445670140,
38.581777787067402086,
585.15696310385559829],12)
assert_array_almost_equal(kn[1],[kn0,kn1,kn2],9)
def test_sph_yn(self):
sy1 = special.sph_yn(2,.2)[0][2]
sy2 = special.sph_yn(0,.2)[0][0]
sphpy = (special.sph_yn(1,.2)[0][0]-2*special.sph_yn(2,.2)[0][2])/3 # correct derivative value
assert_almost_equal(sy1,-377.52483,5) # previous values in the system
assert_almost_equal(sy2,-4.9003329,5)
sy3 = special.sph_yn(1,.2)[1][1]
assert_almost_equal(sy3,sphpy,4) # compare correct derivative val. (correct =-system val).
class TestStruve(object):
def _series(self, v, z, n=100):
"""Compute Struve function & error estimate from its power series."""
k = arange(0, n)
r = (-1)**k * (.5*z)**(2*k+v+1)/special.gamma(k+1.5)/special.gamma(k+v+1.5)
err = abs(r).max() * finfo(float_).eps * n
return r.sum(), err
def test_vs_series(self):
"""Check Struve function versus its power series"""
for v in [-20, -10, -7.99, -3.4, -1, 0, 1, 3.4, 12.49, 16]:
for z in [1, 10, 19, 21, 30]:
value, err = self._series(v, z)
assert_tol_equal(special.struve(v, z), value, rtol=0, atol=err), (v, z)
def test_some_values(self):
assert_tol_equal(special.struve(-7.99, 21), 0.0467547614113, rtol=1e-7)
assert_tol_equal(special.struve(-8.01, 21), 0.0398716951023, rtol=1e-8)
assert_tol_equal(special.struve(-3.0, 200), 0.0142134427432, rtol=1e-12)
assert_tol_equal(special.struve(-8.0, -41), 0.0192469727846, rtol=1e-11)
assert_equal(special.struve(-12, -41), -special.struve(-12, 41))
assert_equal(special.struve(+12, -41), -special.struve(+12, 41))
assert_equal(special.struve(-11, -41), +special.struve(-11, 41))
assert_equal(special.struve(+11, -41), +special.struve(+11, 41))
assert_(isnan(special.struve(-7.1, -1)))
assert_(isnan(special.struve(-10.1, -1)))
def test_regression_679(self):
"""Regression test for #679"""
assert_tol_equal(special.struve(-1.0, 20 - 1e-8), special.struve(-1.0, 20 + 1e-8))
assert_tol_equal(special.struve(-2.0, 20 - 1e-8), special.struve(-2.0, 20 + 1e-8))
assert_tol_equal(special.struve(-4.3, 20 - 1e-8), special.struve(-4.3, 20 + 1e-8))
def test_chi2_smalldf():
assert_almost_equal(special.chdtr(0.6,3), 0.957890536704110)
def test_chi2c_smalldf():
assert_almost_equal(special.chdtrc(0.6,3), 1-0.957890536704110)
def test_chi2_inv_smalldf():
assert_almost_equal(special.chdtri(0.6,1-0.957890536704110), 3)
def test_agm_simple():
assert_allclose(special.agm(24, 6), 13.4581714817)
assert_allclose(special.agm(1e30, 1), 2.2292230559453832047768593e28)
def test_legacy():
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
# Legacy behavior: truncating arguments to integers
assert_equal(special.bdtrc(1, 2, 0.3), special.bdtrc(1.8, 2.8, 0.3))
assert_equal(special.bdtr(1, 2, 0.3), special.bdtr(1.8, 2.8, 0.3))
assert_equal(special.bdtri(1, 2, 0.3), special.bdtri(1.8, 2.8, 0.3))
assert_equal(special.expn(1, 0.3), special.expn(1.8, 0.3))
assert_equal(special.hyp2f0(1, 2, 0.3, 1), special.hyp2f0(1, 2, 0.3, 1.8))
assert_equal(special.nbdtrc(1, 2, 0.3), special.nbdtrc(1.8, 2.8, 0.3))
assert_equal(special.nbdtr(1, 2, 0.3), special.nbdtr(1.8, 2.8, 0.3))
assert_equal(special.nbdtri(1, 2, 0.3), special.nbdtri(1.8, 2.8, 0.3))
assert_equal(special.pdtrc(1, 0.3), special.pdtrc(1.8, 0.3))
assert_equal(special.pdtr(1, 0.3), special.pdtr(1.8, 0.3))
assert_equal(special.pdtri(1, 0.3), special.pdtri(1.8, 0.3))
assert_equal(special.kn(1, 0.3), special.kn(1.8, 0.3))
assert_equal(special.yn(1, 0.3), special.yn(1.8, 0.3))
assert_equal(special.smirnov(1, 0.3), special.smirnov(1.8, 0.3))
assert_equal(special.smirnovi(1, 0.3), special.smirnovi(1.8, 0.3))
@with_special_errors
def test_error_raising():
assert_raises(special.SpecialFunctionWarning, special.iv, 1, 1e99j)
def test_xlogy():
def xfunc(x, y):
if x == 0 and not np.isnan(y):
return x
else:
return x*np.log(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0)], dtype=float)
z2 = np.r_[z1, [(0, 1j), (1, 1j)]]
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlogy, w1, z1, rtol=1e-13, atol=1e-13)
w2 = np.vectorize(xfunc)(z2[:,0], z2[:,1])
assert_func_equal(special.xlogy, w2, z2, rtol=1e-13, atol=1e-13)
def test_xlog1py():
def xfunc(x, y):
if x == 0 and not np.isnan(y):
return x
else:
return x * np.log1p(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0),
(1, 1e-30)], dtype=float)
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlog1py, w1, z1, rtol=1e-13, atol=1e-13)
def test_entr():
def xfunc(x):
if x < 0:
return -np.inf
else:
return -special.xlogy(x, x)
values = (0, 0.5, 1.0, np.inf)
signs = [-1, 1]
arr = []
for sgn, v in itertools.product(signs, values):
arr.append(sgn * v)
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z)
assert_func_equal(special.entr, w, z, rtol=1e-13, atol=1e-13)
def test_kl_div():
def xfunc(x, y):
if x < 0 or y < 0 or (y == 0 and x != 0):
# extension of natural domain to preserve convexity
return np.inf
elif np.isposinf(x) or np.isposinf(y):
# limits within the natural domain
return np.inf
elif x == 0:
return y
else:
return special.xlogy(x, x/y) - x + y
values = (0, 0.5, 1.0)
signs = [-1, 1]
arr = []
for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
arr.append((sgna*va, sgnb*vb))
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.kl_div, w, z, rtol=1e-13, atol=1e-13)
def test_rel_entr():
def xfunc(x, y):
if x > 0 and y > 0:
return special.xlogy(x, x/y)
elif x == 0 and y >= 0:
return 0
else:
return np.inf
values = (0, 0.5, 1.0)
signs = [-1, 1]
arr = []
for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
arr.append((sgna*va, sgnb*vb))
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.rel_entr, w, z, rtol=1e-13, atol=1e-13)
def test_huber():
assert_equal(special.huber(-1, 1.5), np.inf)
assert_allclose(special.huber(2, 1.5), 0.5 * np.square(1.5))
assert_allclose(special.huber(2, 2.5), 2 * (2.5 - 0.5 * 2))
def xfunc(delta, r):
if delta < 0:
return np.inf
elif np.abs(r) < delta:
return 0.5 * np.square(r)
else:
return delta * (np.abs(r) - 0.5 * delta)
z = np.random.randn(10, 2)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.huber, w, z, rtol=1e-13, atol=1e-13)
def test_pseudo_huber():
def xfunc(delta, r):
if delta < 0:
return np.inf
elif (not delta) or (not r):
return 0
else:
return delta**2 * (np.sqrt(1 + (r/delta)**2) - 1)
z = np.array(np.random.randn(10, 2).tolist() + [[0, 0.5], [0.5, 0]])
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.pseudo_huber, w, z, rtol=1e-13, atol=1e-13)
if __name__ == "__main__":
run_module_suite()
|
witcxc/scipy
|
scipy/special/tests/test_basic.py
|
Python
|
bsd-3-clause
| 119,513
|
[
"Elk"
] |
91f57bb651ae6e9c109a1321aee661a2f0a75fdafeba043f8ad484f670b46dd1
|
#!/usr/bin/python
"""Test of the fix for bug 568768"""
from macaroon.playback import *
import utils
sequence = MacroSequence()
########################################################################
# Load the local test case.
#
sequence.append(KeyComboAction("<Control>l"))
sequence.append(TypeAction(utils.htmlURLPrefix + "orca-wiki.html#head-a269540f0f3a25d25e08216f0438ee743a3ebe88"))
sequence.append(KeyComboAction("Return"))
sequence.append(WaitForDocLoad())
########################################################################
# Down Arrow to the next line, which should be the line after the
# About heading.
#
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Orca is a free, open source, flexible, extensible, and'",
" VISIBLE: 'Orca is a free, open source, fle', cursor=1",
"SPEECH OUTPUT: 'Orca is a free, open source, flexible, extensible, and '"]))
########################################################################
# Move to the location bar by pressing Control+L. When it has focus
# type "about:blank" and press Return to restore the browser to the
# conditions at the test's start.
#
sequence.append(KeyComboAction("<Control>l"))
sequence.append(TypeAction("about:blank"))
sequence.append(KeyComboAction("Return"))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
h4ck3rm1k3/orca-sonar
|
test/keystrokes/firefox/bug_568768.py
|
Python
|
lgpl-2.1
| 1,454
|
[
"ORCA"
] |
35af77bebeb45dc2ba21de539c71bf5973f0d59addb1a2f77a3c0307092e8f58
|
""" test File Plugin
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import mock
import unittest
import tempfile
import os
import shutil
import errno
from DIRAC import S_OK
from DIRAC.Resources.Storage.StorageElement import StorageElementItem
def mock_StorageFactory_getConfigStorageName(storageName, referenceType, seConfigPath=None):
resolvedName = storageName
return S_OK(resolvedName)
def mock_StorageFactory_getConfigStorageOptions(storageName, derivedStorageName=None, seConfigPath=None):
"""Get the options associated to the StorageElement as defined in the CS"""
optionsDict = {
"BackendType": "local",
"ReadAccess": "Active",
"WriteAccess": "Active",
"AccessProtocols": ["file"],
"WriteProtocols": ["file"],
}
return S_OK(optionsDict)
def mock_StorageFactory_getConfigStorageProtocols(storageName, derivedStorageName=None, seConfigPath=None):
"""Protocol specific information is present as sections in the Storage configuration"""
protocolDetails = {
"Section": {
"Host": "",
"Path": "/tmp/se",
"PluginName": "File",
"Port": "",
"Protocol": "file",
"SpaceToken": "",
"WSUrl": "",
}
}
return S_OK(protocolDetails)
class TestBase(unittest.TestCase):
"""Base test class. Defines all the method to test"""
@mock.patch(
"DIRAC.Resources.Storage.StorageFactory.StorageFactory._getConfigStorageName",
side_effect=mock_StorageFactory_getConfigStorageName,
)
@mock.patch(
"DIRAC.Resources.Storage.StorageFactory.StorageFactory._getConfigStorageOptions",
side_effect=mock_StorageFactory_getConfigStorageOptions,
)
@mock.patch(
"DIRAC.Resources.Storage.StorageFactory.StorageFactory._getConfigStorageProtocols",
side_effect=mock_StorageFactory_getConfigStorageProtocols,
)
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE",
return_value=S_OK(True),
) # Pretend it's local
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem.addAccountingOperation", return_value=None
) # Don't send accounting
def setUp(
self,
mk_getConfigStorageName,
mk_getConfigStorageOptions,
mk_getConfigStorageProtocols,
mk_isLocalSE,
mk_addAccountingOperation,
):
self.se = StorageElementItem("FAKE")
self.se.vo = "test"
self.basePath = tempfile.mkdtemp(dir="/tmp")
# Update the basePath of the plugin
self.se.storages[0].basePath = self.basePath
self.srcPath = tempfile.mkdtemp(dir="/tmp")
self.destPath = tempfile.mkdtemp(dir="/tmp")
self.existingFile = "/test/file.txt"
self.existingFileSize = 0
self.nonExistingFile = "/test/nonExistingFile.txt"
self.subDir = "/test/subDir"
self.subFile = os.path.join(self.subDir, "subFile.txt")
self.subFileSize = 0
self.FILES = [self.existingFile, self.nonExistingFile, self.subFile]
self.DIRECTORIES = [self.subDir]
self.ALL = self.FILES + self.DIRECTORIES
with open(os.path.join(self.srcPath, self.existingFile.replace("/test/", "")), "w") as f:
f.write("I put something in the file so that it has a size\n")
self.existingFileSize = os.path.getsize(os.path.join(self.srcPath, self.existingFile.replace("/test/", "")))
assert self.existingFileSize
os.mkdir(os.path.join(self.srcPath, os.path.basename(self.subDir)))
with open(os.path.join(self.srcPath, self.subFile.replace("/test/", "")), "w") as f:
f.write("This one should have a size as well\n")
self.subFileSize = os.path.getsize(os.path.join(self.srcPath, self.subFile.replace("/test/", "")))
assert self.subFileSize
def tearDown(self):
shutil.rmtree(self.basePath)
shutil.rmtree(self.srcPath)
shutil.rmtree(self.destPath)
pass
def walkAll(self):
for dirname in [self.basePath, self.destPath]:
self.walkPath(dirname)
def walkPath(self, path):
for root, dirs, files in os.walk(path):
print(root)
print(" dirs")
for d in dirs:
print(" ", os.path.join(root, d))
print(" files")
for f in files:
print(" ", os.path.join(root, f))
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE",
return_value=S_OK(True),
) # Pretend it's local
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem.addAccountingOperation", return_value=None
) # Don't send accounting
def test_01_getURL(self, mk_isLocalSE, mk_addAccounting):
"""Testing getURL"""
# Testing the getURL
res = self.se.getURL(self.ALL)
self.assertTrue(res["OK"], res)
self.assertTrue(not res["Value"]["Failed"], res["Value"]["Failed"])
self.assertTrue(len(res["Value"]["Successful"]) == len(self.ALL))
for lfn, url in res["Value"]["Successful"].items():
self.assertEqual(url, self.basePath.rstrip("/") + lfn)
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE",
return_value=S_OK(True),
) # Pretend it's local
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem.addAccountingOperation", return_value=None
) # Don't send accounting
def test_02_FileTest(self, mk_isLocalSE, mk_addAccounting):
"""Testing createDirectory"""
# Putting the files
def localPutFile(fn, size=0):
"""If fn is '/test/fn.txt', it calls
{ '/test/fn.txt' : /tmp/generatedPath/fn.txt}
"""
transfDic = {fn: os.path.join(self.srcPath, fn.replace("/test/", ""))}
return self.se.putFile(transfDic, sourceSize=size)
# wrong size
res = localPutFile(self.existingFile, size=-1)
self.assertTrue(res["OK"], res)
self.assertTrue(self.existingFile in res["Value"]["Failed"], res)
self.assertTrue("not match" in res["Value"]["Failed"][self.existingFile], res)
self.assertTrue(not os.path.exists(self.basePath + self.existingFile))
# Correct size
res = localPutFile(self.existingFile, size=self.existingFileSize)
self.assertTrue(res["OK"], res)
self.assertTrue(self.existingFile in res["Value"]["Successful"], res)
self.assertTrue(os.path.exists(self.basePath + self.existingFile))
# No size
res = localPutFile(self.existingFile)
self.assertTrue(res["OK"], res)
self.assertTrue(self.existingFile in res["Value"]["Successful"], res)
self.assertTrue(os.path.exists(self.basePath + self.existingFile))
# No existing source file
res = localPutFile(self.nonExistingFile)
self.assertTrue(res["OK"], res)
self.assertTrue(self.nonExistingFile in res["Value"]["Failed"], res)
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][self.nonExistingFile], res)
# sub file
res = localPutFile(self.subFile)
self.assertTrue(res["OK"], res)
self.assertTrue(self.subFile in res["Value"]["Successful"], res)
self.assertTrue(os.path.exists(self.basePath + self.subFile))
# Directory
res = localPutFile(self.subDir)
self.assertTrue(res["OK"], res)
self.assertTrue(self.subDir in res["Value"]["Failed"])
self.assertTrue(
os.strerror(errno.EISDIR) in res["Value"]["Failed"][self.subDir] or
# Python 3.9.7+ improved the Exception that is raised
"Directory does not exist" in res["Value"]["Failed"][self.subDir],
res,
)
res = self.se.exists(self.FILES)
self.assertTrue(res["OK"], res)
self.assertTrue(not res["Value"]["Failed"], res)
self.assertTrue(res["Value"]["Successful"][self.existingFile], res)
self.assertTrue(not res["Value"]["Successful"][self.nonExistingFile], res)
res = self.se.getFileSize(self.ALL)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"]["Successful"][self.existingFile], self.existingFileSize)
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][self.nonExistingFile], res)
self.assertTrue(os.strerror(errno.EISDIR) in res["Value"]["Failed"][self.subDir], res)
res = self.se.getFileMetadata(self.ALL)
self.assertTrue(res["OK"], res)
self.assertTrue(self.existingFile in res["Value"]["Successful"])
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][self.nonExistingFile], res)
self.assertTrue(os.strerror(errno.EISDIR) in res["Value"]["Failed"][self.subDir], res)
res = self.se.isFile(self.ALL)
self.assertTrue(res["OK"], res)
self.assertTrue(res["Value"]["Successful"][self.existingFile], res)
self.assertTrue(not res["Value"]["Successful"][self.subDir], res)
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][self.nonExistingFile], res)
res = self.se.getFile(self.ALL, localPath=self.destPath)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"]["Successful"][self.existingFile], self.existingFileSize)
self.assertTrue(os.path.exists(os.path.join(self.destPath, os.path.basename(self.existingFile))))
self.assertEqual(res["Value"]["Successful"][self.subFile], self.subFileSize)
self.assertTrue(os.path.exists(os.path.join(self.destPath, os.path.basename(self.subFile))))
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][self.nonExistingFile], res)
self.assertTrue(
os.strerror(errno.EISDIR) in res["Value"]["Failed"][self.subDir] or
# Python 3.9.7+ improved the Exception that is raised
"Directory does not exist" in res["Value"]["Failed"][self.subDir],
res,
)
res = self.se.removeFile(self.ALL)
self.assertTrue(res["OK"], res)
self.assertTrue(res["Value"]["Successful"][self.existingFile])
self.assertTrue(not os.path.exists(self.basePath + self.existingFile))
self.assertTrue(res["Value"]["Successful"][self.subFile])
self.assertTrue(not os.path.exists(self.basePath + self.subFile))
self.assertTrue(res["Value"]["Successful"][self.nonExistingFile])
self.assertTrue(os.strerror(errno.EISDIR) in res["Value"]["Failed"][self.subDir])
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE",
return_value=S_OK(True),
) # Pretend it's local
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem.addAccountingOperation", return_value=None
) # Don't send accounting
def test_03_createDirectory(self, mk_isLocalSE, mk_addAccounting):
"""Testing creating directories"""
res = self.se.createDirectory(self.subDir)
self.assertTrue(res["OK"], res)
self.assertTrue(self.subDir in res["Value"]["Successful"])
self.assertTrue(os.path.exists(self.basePath + self.subDir))
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE",
return_value=S_OK(True),
) # Pretend it's local
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem.addAccountingOperation", return_value=None
) # Don't send accounting
def test_04_putDirectory(self, mk_isLocalSE, mk_addAccounting):
"""Testing putDirectory"""
nonExistingDir = "/test/forsuredoesnotexist"
localdirs = ["/test", nonExistingDir]
# Correct size
res = self.se.putDirectory({"/test": self.srcPath})
self.assertTrue(res["OK"], res)
self.assertTrue("/test" in res["Value"]["Successful"], res)
self.assertEqual(
res["Value"]["Successful"]["/test"], {"Files": 2, "Size": self.existingFileSize + self.subFileSize}
)
self.assertTrue(os.path.exists(self.basePath + "/test"))
self.assertTrue(os.path.exists(self.basePath + self.existingFile))
self.assertTrue(os.path.exists(self.basePath + self.subFile))
# No existing source directory
res = self.se.putDirectory({"/test": nonExistingDir})
self.assertTrue(res["OK"], res)
self.assertTrue("/test" in res["Value"]["Failed"], res)
self.assertEqual(res["Value"]["Failed"]["/test"], {"Files": 0, "Size": 0})
# sub file
res = self.se.putDirectory({"/test": self.existingFile})
self.assertTrue(res["OK"], res)
self.assertTrue("/test" in res["Value"]["Failed"], res)
self.assertEqual(res["Value"]["Failed"]["/test"], {"Files": 0, "Size": 0})
res = self.se.exists(self.DIRECTORIES + localdirs)
self.assertTrue(res["OK"], res)
self.assertTrue(not res["Value"]["Failed"], res)
self.assertTrue(res["Value"]["Successful"][self.subDir], res)
self.assertTrue(not res["Value"]["Successful"][nonExistingDir], res)
res = self.se.getDirectorySize(self.ALL + localdirs)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"]["Successful"][self.subDir], {"Files": 1, "Size": self.subFileSize, "SubDirs": 0})
self.assertEqual(res["Value"]["Successful"]["/test"], {"Files": 1, "Size": self.existingFileSize, "SubDirs": 1})
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][self.nonExistingFile], res)
self.assertTrue(os.strerror(errno.ENOTDIR) in res["Value"]["Failed"][self.existingFile], res)
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][nonExistingDir], res)
res = self.se.getDirectoryMetadata(self.ALL + localdirs)
self.assertTrue(res["OK"], res)
self.assertTrue(self.subDir in res["Value"]["Successful"])
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][self.nonExistingFile], res)
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][nonExistingDir], res)
self.assertTrue(os.strerror(errno.ENOTDIR) in res["Value"]["Failed"][self.existingFile], res)
res = self.se.isDirectory(self.ALL + localdirs)
self.assertTrue(res["OK"], res)
self.assertTrue(not res["Value"]["Successful"][self.existingFile])
self.assertTrue(res["Value"]["Successful"][self.subDir], res)
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][self.nonExistingFile], res)
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][nonExistingDir], res)
res = self.se.listDirectory(self.ALL + localdirs)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"]["Successful"][self.subDir], {"Files": [self.subFile], "SubDirs": []})
self.assertEqual(res["Value"]["Successful"]["/test"], {"Files": [self.existingFile], "SubDirs": [self.subDir]})
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][self.nonExistingFile], res)
self.assertTrue(os.strerror(errno.ENOTDIR) in res["Value"]["Failed"][self.existingFile], res)
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][nonExistingDir], res)
res = self.se.getDirectory(self.ALL + localdirs, localPath=self.destPath)
self.assertTrue(res["OK"], res)
self.assertEqual(
res["Value"]["Successful"]["/test"], {"Files": 2, "Size": self.existingFileSize + self.subFileSize}
)
self.assertTrue(os.path.exists(self.destPath + self.existingFile))
self.assertTrue(os.path.exists(self.destPath + self.subFile))
self.assertEqual(res["Value"]["Successful"][self.subDir], {"Files": 1, "Size": self.subFileSize})
self.assertTrue(os.path.exists(self.destPath + self.subFile.replace("/test", "")))
self.assertEqual(res["Value"]["Failed"][self.nonExistingFile], {"Files": 0, "Size": 0})
self.assertEqual(res["Value"]["Failed"][self.existingFile], {"Files": 0, "Size": 0})
self.assertEqual(res["Value"]["Failed"][nonExistingDir], {"Files": 0, "Size": 0})
res = self.se.removeDirectory(nonExistingDir, recursive=False)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"]["Successful"][nonExistingDir], True)
res = self.se.removeDirectory(nonExistingDir, recursive=True)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"]["Failed"][nonExistingDir], {"FilesRemoved": 0, "SizeRemoved": 0})
res = self.se.removeDirectory(self.nonExistingFile, recursive=False)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"]["Successful"][self.nonExistingFile], True)
res = self.se.removeDirectory(self.nonExistingFile, recursive=True)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"]["Failed"][self.nonExistingFile], {"FilesRemoved": 0, "SizeRemoved": 0})
res = self.se.removeDirectory(self.existingFile, recursive=False)
self.assertTrue(res["OK"], res)
self.assertTrue(os.strerror(errno.ENOTDIR) in res["Value"]["Failed"][self.existingFile], res)
res = self.se.removeDirectory(self.existingFile, recursive=True)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"]["Failed"][self.existingFile], {"FilesRemoved": 0, "SizeRemoved": 0})
res = self.se.removeDirectory("/test", recursive=False)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"]["Successful"]["/test"], True)
self.assertTrue(not os.path.exists(self.basePath + self.existingFile))
self.assertTrue(os.path.exists(self.basePath + self.subFile))
res = self.se.removeDirectory("/test", recursive=True)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"]["Successful"]["/test"], {"FilesRemoved": 1, "SizeRemoved": self.subFileSize})
self.assertTrue(not os.path.exists(self.basePath + "/test"))
if __name__ == "__main__":
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestBase)
unittest.TextTestRunner(verbosity=2).run(suite)
|
ic-hep/DIRAC
|
src/DIRAC/Resources/Storage/test/Test_FilePlugin.py
|
Python
|
gpl-3.0
| 18,616
|
[
"DIRAC"
] |
edee6292cdd0e65fd64099653a82c7e322a536c749d048b8ca2f24b9f4291f8b
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# ## BEGIN LICENSE
# Copyright (c) 2012, Peter Levi <peterlevi@peterlevi.com>
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
import string
from gi.repository import GObject, Gdk, Gtk
import hashlib
from requests.exceptions import HTTPError, RequestException
import io
import webbrowser
import re
from variety.Util import Util, throttle, cache
from variety.Options import Options
from variety.Stats import Stats
from variety.SmartFeaturesNoticeDialog import SmartFeaturesNoticeDialog
from variety.SmartRegisterDialog import SmartRegisterDialog
from variety.AttrDict import AttrDict
from variety.ImageFetcher import ImageFetcher
from variety import _, _u
import os
import logging
import random
import json
import base64
import threading
import time
import sys
random.seed()
logger = logging.getLogger('variety')
class Smart:
SITE_URL = 'http://localhost:4000' if '--debug-smart' in sys.argv else 'https://vrty.org'
API_URL = SITE_URL + '/api'
META_KEYS_MAP = {
'sourceURL': 'origin_url',
'imageURL': 'image_url',
'sourceType': 'source_type',
'sourceLocation': 'source_location',
'sourceName': 'source_name',
'authorURL': 'author_url',
'sfwRating': 'sfw_rating',
}
def __init__(self, parent):
Smart.instance = self
self.parent = parent
self.user = None
self.load_user_lock = threading.Lock()
try:
self.load_user(create_if_missing=False)
except:
logger.exception(lambda: "Smart: Cound not load user during init")
@classmethod
def get_instance(cls):
return Smart.instance
def reload(self):
if not self.is_smart_enabled():
self._reset_sync()
return
try:
if self.smart_settings_changed():
self.load_user(create_if_missing=False, force_reload=True)
self.sync()
elif self.parent.previous_options.sources != self.parent.options.sources:
self.sync_sources(in_thread=True)
except:
logger.exception(lambda: "Smart: Exception in reload:")
def get_profile_url(self):
if self.user:
return "%s/login/%s?authkey=%s" % (Smart.SITE_URL, self.user["id"], self.user.get('authkey', ''))
else:
return None
def get_register_url(self, source):
if self.user:
return '%s/user/%s/register?authkey=%s&source=%s' % (Smart.SITE_URL, self.user['id'], self.user['authkey'], source)
else:
return '%s/register?source=%s' % (Smart.SITE_URL, source)
def smart_settings_changed(self):
return self.parent.previous_options is None or \
self.parent.previous_options.smart_enabled != self.parent.options.smart_enabled or \
self.parent.previous_options.sync_enabled != self.parent.options.sync_enabled or \
self.parent.previous_options.favorites_folder != self.parent.options.favorites_folder
def load_user(self, create_if_missing=True, force_reload=False):
with self.load_user_lock:
if not self.user or force_reload:
self.user = None
try:
with io.open(os.path.join(self.parent.config_folder, 'smart_user.json'), encoding='utf8') as f:
data = f.read()
try:
self.user = AttrDict(json.loads(data))
except:
logger.exception(lambda: "Smart: Could not json-parse smart_user.json. Broken file? "
"Please report this error to peterlevi@peterlevi.com. Thanks.")
self.parent.show_notification(_("Your smart_user.json config file appears broken. "
"You may have to login again to VRTY.ORG."))
raise IOError("Could not json-parse smart_user.json")
if self.parent.preferences_dialog:
self.parent.preferences_dialog.on_smart_user_updated()
logger.info(lambda: 'smart: Loaded smart user: %s' % self.user["id"])
except IOError:
if create_if_missing:
logger.info(lambda: 'smart: Missing smart_user.json, creating new smart user')
self.new_user()
def new_user(self):
try:
logger.info(lambda: 'smart: Creating new smart user')
self._reset_sync()
self.user = Util.fetch_json(Smart.API_URL + '/newuser')
self.save_user()
if self.parent.preferences_dialog:
GObject.idle_add(self.parent.preferences_dialog.on_smart_user_updated)
logger.info(lambda: 'smart: Created smart user: %s' % self.user["id"])
except:
logging.error('smart: Error creating new smart user')
raise
def save_user(self):
with io.open(os.path.join(self.parent.config_folder, 'smart_user.json'), 'w', encoding='utf8') as f:
f.write(json.dumps(self.user, indent=4, ensure_ascii=False, encoding='utf8'))
def set_user(self, user):
logger.info(lambda: 'smart: Setting new smart user')
# keep machine-dependent settings from current user
if self.user:
for key in ("machine_id", "machine_label"):
if key in self.user:
user[key] = self.user[key]
self.user = user
if self.parent.preferences_dialog:
GObject.idle_add(self.parent.preferences_dialog.on_smart_user_updated)
with open(os.path.join(self.parent.config_folder, 'smart_user.json'), 'w') as f:
json.dump(self.user, f, ensure_ascii=False, indent=2)
logger.info(lambda: 'smart: Updated smart user: %s' % self.user["id"])
self.sync()
def report_trash(self, origin_url):
if not self.is_smart_enabled():
return
try:
self.load_user()
user = self.user
logger.info(lambda: "smart: Reporting %s as trash" % origin_url)
try:
url = Smart.API_URL + '/upload/' + user['id'] + '/trash'
result = Util.fetch(url, {'image': json.dumps({'origin_url': origin_url}), 'authkey': user['authkey']})
logger.info(lambda: "smart: Reported, server returned: %s" % result)
return
except HTTPError, e:
self.handle_user_http_error(e)
except Exception:
logger.exception(lambda: "smart: Could not report %s as trash" % url)
def report_file(self, filename, mark, async=True, upload_full_image=False, needs_reupload=False):
if not self.is_smart_enabled():
return
def _go():
self._do_report_file(filename, mark=mark, sfw_rating=None,
upload_full_image=upload_full_image, needs_reupload=needs_reupload, allow_anon=False)
_go() if not async else threading.Timer(0, _go).start()
def report_sfw_rating(self, filename, sfw_rating, async=True):
def _go():
self._do_report_file(filename, mark=None, sfw_rating=sfw_rating,
upload_full_image=False, needs_reupload=False, allow_anon=True)
_go() if not async else threading.Timer(0, _go).start()
def handle_user_http_error(self, e):
logger.error(lambda: "smart: Server returned %d, potential reason - server failure?" % e.response.status_code)
if e.response.status_code in (403, 404):
self.parent.show_notification(
_('Your VRTY.ORG credentials are probably outdated. Please login again.'))
Util.add_mainloop_task(self.parent.preferences_dialog.on_btn_login_register_clicked)
raise e
@staticmethod
def fix_origin_url(origin_url):
if origin_url and '//picasaweb.google.com' in origin_url and '?' in origin_url:
origin_url = origin_url[:origin_url.rindex('?')]
return origin_url
@staticmethod
def fill_missing_meta_info(filename, meta):
try:
if 'imageURL' not in meta:
image_url = Util.guess_image_url(meta)
if image_url:
meta['imageURL'] = image_url
Util.write_metadata(filename, meta)
if 'sourceType' not in meta:
source_type = Util.guess_source_type(meta)
if source_type:
meta['sourceType'] = source_type
Util.write_metadata(filename, meta)
if 'headline' not in meta:
origin_url = meta['sourceURL']
if 'flickr.com' in origin_url:
from variety.FlickrDownloader import FlickrDownloader
extra_meta = FlickrDownloader.get_extra_metadata(origin_url)
meta.update(extra_meta)
Util.write_metadata(filename, meta)
except:
logger.exception(lambda: 'Could not fill missing meta-info')
def _do_report_file(self, filename, mark, sfw_rating, attempt=1,
upload_full_image=False, needs_reupload=False, allow_anon=False):
if not allow_anon and not self.is_smart_enabled():
return
try:
self.load_user(create_if_missing=not allow_anon)
user = self.user
meta = Util.read_metadata(filename)
if not meta or not "sourceURL" in meta:
return # we only smart-report images coming from Variety online sources, not local images
origin_url = Smart.fix_origin_url(meta['sourceURL'])
if mark and not (upload_full_image or needs_reupload):
# Attempt quick-markging using just the computed image ID - will only succeed if the image already exists on the server
try:
logger.info(lambda: "smart: Quick-reporting %s as '%s'" % (filename, mark))
imageid = self.get_image_id(origin_url)
report_url = Smart.API_URL + '/mark/%s/%s/+%s' % (user['id'], imageid, mark)
result = Util.fetch(report_url, {
'authkey': user['authkey'],
'action_source': 'Linux Client, ' + mark
})
logger.info(lambda: "smart: Quick-reported, server returned: %s" % result)
if 'needs_reupload' in result:
logger.info(lambda: "smart: Server requested full image data, "
"performing full report")
else:
return
except:
logger.info(lambda: "smart: Image unknown to server, performing full report")
width, height = Util.get_size(filename)
Smart.fill_missing_meta_info(filename, meta)
image_url = meta.get('imageURL', None)
image = {
'width': width,
'height': height,
'filename': os.path.basename(filename),
'origin_url': origin_url,
'image_url': image_url,
}
if mark == 'favorite':
image['thumbnail'] = base64.b64encode(Util.get_thumbnail_data(filename, 1024, 1024))
for key, value in meta.items():
server_key = Smart.META_KEYS_MAP.get(key, key)
if not server_key in image:
image[server_key] = value
if sfw_rating is not None:
image['sfw_rating'] = sfw_rating
logger.info(lambda: "smart: Reporting %s as mark '%s', sfw rating %s" % (filename, mark, sfw_rating))
# check for dead links and upload full image in that case (happens with old favorites):
if upload_full_image or (mark == 'favorite' and Util.is_dead_or_not_image(image_url)):
if upload_full_image:
logger.info(lambda: 'smart: Including full image in upload per server request')
else:
logger.info(lambda: 'smart: Including full image in upload as image link seems dead: %s, sourceURL: %s' %
(image_url, origin_url))
with open(filename, 'r') as f:
image['full_image'] = base64.b64encode(f.read())
if mark:
report_url = Smart.API_URL + '/upload/%s/%s' % (user['id'], mark)
else:
report_url = Smart.API_URL + '/upload/%s' % (user['id'] if user else '-anonymous')
try:
result = Util.fetch(report_url, {
'image': json.dumps(image),
'authkey': user['authkey'] if user else None,
'action_source': 'Linux Client, ' + ('SFW Rating' if sfw_rating is not None else mark)
})
logger.info(lambda: "smart: Reported, server returned: %s" % result)
return
except HTTPError, e:
self.handle_user_http_error(e)
if attempt == 1:
self._do_report_file(filename, mark, sfw_rating, attempt + 1)
else:
logger.exception(lambda: "smart: Could not report %s as mark '%s', rating '%s', server error code %s'" % (
filename, mark, sfw_rating, e.response.status_code))
except Exception:
logger.exception(lambda: "smart: Could not report %s as mark '%s', rating '%s'" % (filename, mark, sfw_rating))
def show_notice_dialog(self):
# Show Smart Variety notice
dialog = SmartFeaturesNoticeDialog()
def _done():
self.parent.options.smart_notice_shown = True
self.parent.options.write()
self.parent.reload_config()
dialog.destroy()
self.parent.dialogs.remove(dialog)
def _on_ok(button):
self.parent.options.smart_enabled = dialog.ui.smart_enabled.get_active()
if self.parent.options.smart_enabled:
for s in self.parent.options.sources:
if s[1] in (Options.SourceType.RECOMMENDED,):
s[0] = True
_done()
def _on_no(*args):
self.parent.options.smart_enabled = False
_done()
dialog.ui.btn_ok.connect("clicked", _on_ok)
dialog.ui.btn_no.connect("clicked", _on_no)
dialog.connect("delete-event", _on_no)
self.parent.dialogs.append(dialog)
dialog.run()
def show_register_dialog(self):
self.load_user(create_if_missing=False)
if self.is_registered():
self.parent.options.smart_register_shown = True
self.parent.options.write()
return
self.register_dialog = SmartRegisterDialog()
def _register_link(*args):
self.register_dialog.ui.register_error.set_visible(False)
self.register_dialog.ui.register_spinner.set_visible(True)
self.register_dialog.ui.register_spinner.start()
def _register():
error = False
try:
self.load_user(create_if_missing=True)
webbrowser.open_new_tab(self.get_register_url('variety_register_dialog'))
except IOError:
error = True
finally:
def _stop_spinner():
self.register_dialog.ui.register_spinner.set_visible(False)
self.register_dialog.ui.register_spinner.stop()
self.register_dialog.ui.register_error.set_visible(error)
self.register_dialog.ui.register_message.set_visible(not error)
GObject.idle_add(_stop_spinner)
threading.Timer(0, _register).start()
self.register_dialog.ui.btn_register.connect('activate-link', _register_link)
self.parent.dialogs.append(self.register_dialog)
self.register_dialog.run()
result = self.register_dialog.result
try:
self.parent.dialogs.remove(self.register_dialog)
except:
pass
self.register_dialog.destroy()
self.register_dialog = None
if not self.parent.running:
return
self.parent.options.smart_register_shown = True
self.parent.options.write()
if result == 'login':
self.parent.preferences_dialog.on_btn_login_register_clicked()
def load_syncdb(self):
logger.debug(lambda: "sync: Loading syncdb")
syncdb_file = os.path.join(self.parent.config_folder, 'syncdb.json')
try:
with io.open(syncdb_file, encoding='utf8') as f:
data = f.read()
syncdb = AttrDict(json.loads(data))
except:
syncdb = AttrDict(version=1, local={}, remote={})
return syncdb
@throttle(seconds=5, trailing_call=True)
def write_syncdb(self, syncdb):
syncdb_file = os.path.join(self.parent.config_folder, 'syncdb.json')
with io.open(syncdb_file, "w", encoding='utf8') as f:
f.write(json.dumps(syncdb.asdict(), indent=4, ensure_ascii=False, encoding='utf8'))
@staticmethod
def get_image_id(url):
return base64.urlsafe_b64encode(hashlib.md5(url).digest())[:10].replace('-', 'a').replace('_', 'b').lower()
@staticmethod
def random_id():
return ''.join([random.choice(string.ascii_lowercase + string.digits) for _ in range(10)])
def is_smart_enabled(self):
return self.parent.options.smart_notice_shown and self.parent.options.smart_enabled
def is_registered(self):
return self.user is not None and self.user.get("username") is not None
def is_sync_enabled(self):
return self.is_smart_enabled() and self.is_registered() and self.parent.options.sync_enabled
def sync_sources(self, in_thread=False):
if not self.is_smart_enabled():
return
def _run():
try:
logger.info(lambda: "sync: Syncing image sources")
try:
self.load_user(create_if_missing=True)
except:
logger.exception(lambda: "sync: Could not load or create smart user")
return
sources = [{'enabled': s[0], 'type': Options.type_to_str(s[1]), 'location': s[2]}
for s in self.parent.options.sources if s[1] in Options.SourceType.dl_types]
data = {'sources': sources, 'machine_type': Util.get_os_name()}
if "machine_id" in self.user:
data["machine_id"] = self.user["machine_id"]
try:
sync_url = '%s/user/%s/sync-sources?authkey=%s' % (Smart.API_URL, self.user["id"], self.user["authkey"])
server_data = AttrDict(Util.fetch_json(sync_url, {'data': json.dumps(data)}))
self.user["machine_id"] = server_data["machine_id"]
self.user["machine_label"] = server_data["machine_label"]
self.save_user()
except HTTPError, e:
self.handle_user_http_error(e)
raise
except:
logger.exception(lambda: "smart: Could not sync sources")
if in_thread:
sync_sources_thread = threading.Thread(target=_run)
sync_sources_thread.daemon = True
sync_sources_thread.start()
else:
_run()
def _reset_sync(self):
self.sync_hash = Util.random_hash() # stop current sync if running
self.last_synced = 0
def sync(self):
if not self.is_smart_enabled():
return
self._reset_sync()
current_sync_hash = self.sync_hash
def _run():
logger.info(lambda: 'sync: Started, hash %s' % current_sync_hash)
try:
self.load_user(create_if_missing=True)
except:
logger.exception(lambda: "sync: Could not load or create smart user")
return
self.sync_sources(in_thread=False)
try:
logger.info(lambda: "sync: Fetching serverside data")
try:
sync_url = '%s/user/%s/sync?authkey=%s' % (Smart.API_URL, self.user["id"], self.user["authkey"])
server_data = AttrDict(Util.fetch_json(sync_url))
throttle_interval = int(server_data.throttle_interval) if server_data.throttle_interval else 1
except HTTPError, e:
self.handle_user_http_error(e)
raise
syncdb = self.load_syncdb()
# First upload local favorites that need uploading:
logger.info(lambda: "sync: Uploading local favorites to server")
files = os.listdir(self.parent.options.favorites_folder)
files = [os.path.join(self.parent.options.favorites_folder, f) for f in files]
files = filter(lambda f: os.path.isfile(f) and Util.is_image(f), files)
files.sort(key=os.path.getmtime)
for path in files:
try:
if not self.is_smart_enabled() or current_sync_hash != self.sync_hash:
return
name = os.path.basename(path)
if path in syncdb.local:
info = syncdb.local[path]
else:
info = {}
meta = Util.read_metadata(path)
source_url = Smart.fix_origin_url(None if meta is None else meta.get("sourceURL", None))
if source_url:
info["sourceURL"] = source_url
syncdb.local[path] = info
self.write_syncdb(syncdb)
if not "sourceURL" in info:
continue
imageid = self.get_image_id(info["sourceURL"])
if not "success" in syncdb.remote[imageid]:
syncdb.remote[imageid] = {"success": True}
self.write_syncdb(syncdb)
if imageid in server_data["ignore"]:
logger.warning(lambda: 'sync: Skipping upload of %s as it is has been deleted from your profile. '
'To undo this visit: %s' % (name, Smart.SITE_URL + '/image/' + imageid))
continue
if not imageid in server_data["favorite"]:
logger.info(lambda: "sync: Smart-reporting existing favorite %s" % path)
self.report_file(path, "favorite", async=False)
time.sleep(throttle_interval)
elif "upload_full_image" in server_data["favorite"][imageid]:
logger.info(lambda: "sync: Uploading full image for existing favorite %s" % path)
self.report_file(path, "favorite", async=False, upload_full_image=True)
time.sleep(throttle_interval)
elif "needs_reupload" in server_data["favorite"][imageid]:
logger.info(lambda: "sync: Server requested reupload of existing favorite %s" % path)
self.report_file(path, "favorite", async=False, needs_reupload=True)
time.sleep(throttle_interval)
except:
logger.exception(lambda: "sync: Could not process file %s" % name)
# Upload locally trashed URLs
logger.info(lambda: "sync: Uploading local banned URLs to server")
for url in self.parent.banned:
if not self.is_smart_enabled() or current_sync_hash != self.sync_hash:
return
imageid = self.get_image_id(url)
if not imageid in server_data["trash"]:
self.report_trash(url)
time.sleep(throttle_interval)
# Perform server to local downloading only if Sync is enabled
if self.is_sync_enabled():
# Append locally missing trashed URLs to banned list
local_trash = map(self.get_image_id, self.parent.banned)
for imageid in server_data["trash"]:
if not self.is_sync_enabled() or current_sync_hash != self.sync_hash:
return
if not imageid in local_trash:
image_data = Util.fetch_json(Smart.API_URL + '/image/' + imageid + '?action_source=sync')
self.parent.ban_url(image_data["origin_url"])
time.sleep(throttle_interval)
# Download locally-missing favorites from the server
to_sync = []
for imageid in server_data["favorite"]:
if imageid in server_data["ignore"]:
logger.warning(lambda: 'sync: Skipping download of %s as it is has been deleted from your profile. '
'To undo this visit: %s' % (imageid, Smart.SITE_URL + '/image/' + imageid))
continue
if imageid in server_data["trash"]:
# do not download favorites that have later been trashed
logger.info(lambda: 'sync: Skipping download of %s as it is also in trash. ' % imageid)
continue
if imageid in syncdb.remote:
if 'success' in syncdb.remote[imageid]:
continue # we have this image locally
if syncdb.remote[imageid].get('error', 0) >= 3:
continue # we have tried and got error for this image 3 or more times, leave it alone
to_sync.append(imageid)
if to_sync:
self.parent.show_notification(
_("Sync"),
(_("Fetching %d images") % len(to_sync)) if len(to_sync) != 1 else _("Fetching 1 image"))
for imageid in to_sync:
if not self.is_sync_enabled() or current_sync_hash != self.sync_hash:
return
try:
logger.info(lambda: "sync: Downloading locally-missing favorite image %s" % imageid)
image_data = Util.fetch_json(Smart.API_URL + '/image/' + imageid)
if 'sfw_rating' in image_data and image_data['sfw_rating'] < 100:
logger.info(lambda: "sync: Skipping download of non-safe favorite image %s" % imageid)
prefer_source_id = server_data["favorite"][imageid].get("source", None)
source = image_data.get("sources", {}).get(prefer_source_id, None)
image_url, origin_url, source_type, source_location, source_name, extra_metadata = \
Smart.extract_fetch_data(image_data)
path = ImageFetcher.fetch(image_url, self.parent.options.favorites_folder,
origin_url=origin_url,
source_type=source[0] if source else source_type,
source_location=source[1] if source else source_location,
source_name=source[2] if source else source_name,
extra_metadata=extra_metadata,
verbose=False)
if not path:
raise Exception("Fetch failed")
self.parent.register_downloaded_file(path)
syncdb.remote[imageid] = {"success": True}
syncdb.local[path] = {'sourceURL': image_data["origin_url"]}
except:
logger.exception(lambda: "sync: Could not fetch favorite image %s" % imageid)
syncdb.remote[imageid] = syncdb.remote[imageid] or {}
syncdb.remote[imageid].setdefault("error", 0)
syncdb.remote[imageid]["error"] += 1
finally:
if not self.is_smart_enabled() or current_sync_hash != self.sync_hash:
return
self.write_syncdb(syncdb)
time.sleep(throttle_interval)
if to_sync:
self.parent.show_notification(_("Sync"), _("Finished"))
self.last_synced = time.time()
except:
logger.exception(lambda: 'sync: Error')
finally:
self.syncing = False
sync_thread = threading.Thread(target=_run)
sync_thread.daemon = True
sync_thread.start()
def sync_if_its_time(self):
if not self.is_smart_enabled():
return
last_synced = getattr(self, 'last_synced', 0)
if time.time() - last_synced > 6 * 60 * 3600:
self.sync()
def process_login_request(self, userid, username, authkey):
def _do_login():
self.parent.show_notification(_('Logged in as %s') % username)
self.set_user({'id': userid, 'authkey': authkey, 'username': username})
self.parent.preferences_dialog.close_login_register_dialog()
if hasattr(self, "register_dialog") and self.register_dialog:
def _close():
self.register_dialog.result = 'logged'
self.register_dialog.response(Gtk.ResponseType.OK)
GObject.idle_add(_close)
if self.user is None or self.user['authkey'] != authkey:
def _go():
dialog = Gtk.MessageDialog(self.parent.preferences_dialog, Gtk.DialogFlags.MODAL, Gtk.MessageType.QUESTION, Gtk.ButtonsType.OK_CANCEL)
dialog.set_markup(_('Do you want to login to VRTY.ORG as <span font_weight="bold">%s</span>?') % username)
dialog.set_title(_('VRTY.ORG login confirmation'))
dialog.set_default_response(Gtk.ResponseType.OK)
response = dialog.run()
dialog.destroy()
if response == Gtk.ResponseType.OK:
_do_login()
Util.add_mainloop_task(_go)
else:
_do_login()
@staticmethod
def extract_fetch_data(json_image_data):
image = AttrDict(json_image_data)
origin_url = image.origin_url
image_url, source_type, source_location, source_name, extra_metadata = None, None, None, None, {}
if image.download_url:
image_url = image.download_url
if image.sources:
source = image.sources.values()[0]
source_type = source[0]
source_location = source[1]
source_name = image.origin_name or source[2]
if image.author and image.author_url:
extra_metadata['author'] = image.author
extra_metadata['authorURL'] = image.author_url
if image.keywords and isinstance(image.keywords, list):
extra_metadata['keywords'] = image.keywords
if image.headline:
extra_metadata['headline'] = image.headline
if image.description:
extra_metadata['description'] = image.description
if "sfw_rating" in image and image.sfw_rating is not None:
extra_metadata['sfwRating'] = image.sfw_rating
return image_url, origin_url, source_type, source_location, source_name, extra_metadata
@classmethod
def get_all_sfw_ratings(cls):
try:
return Util.fetch_json(Smart.API_URL + '/all-sfw-ratings').values()[0]
except:
# Do not fail, fallback to some decent default
return [
{
"rating": 100,
"bg": "#74A300",
"label_short": "Safe",
"label_long": "Safe in any context",
"fg": "white",
"min_rating": 95
},
{
"rating": 80,
"bg": "#A09200",
"label_short": "Mild",
"label_long": "Mild, mostly safe",
"fg": "white",
"min_rating": 75
},
{
"rating": 50,
"bg": "#E5BE20",
"label_short": "Sketchy",
"label_long": "Sketchy, not safe in many contexts",
"fg": "white",
"min_rating": 40
},
{
"rating": 0,
"bg": "#CF1F00",
"label_short": "Not safe",
"label_long": "Definitely NSFW",
"fg": "white",
"min_rating": 0
}
]
@classmethod
@cache(ttl_seconds=1800)
def get_sfw_rating(cls, origin_url):
try:
logger.debug('Checking SFW rating for image origin URL %s' % origin_url)
imageid = Smart.get_image_id(origin_url)
info = Util.fetch_json(Smart.API_URL + '/image/' + imageid + '?action_source=get_sfw_rating')
rating = int(info['sfw_rating'])
logger.debug('Rating is: %s' % rating)
return rating
except Exception, e:
return None
@classmethod
@cache(ttl_seconds=1800)
def get_safe_mode_keyword_blacklist(cls):
try:
logger.debug('Fetching safe mode keywords blacklist')
blacklisted = set(Util.fetch_json(Smart.API_URL + '/safe-mode-blacklisted-tags').keys())
logger.info('Safe mode blacklisted keywords: %s' % str(blacklisted))
return blacklisted
except Exception, e:
logger.info('Could not fetch Safe mode blacklisted keywords, using defaults:')
return {
# Sample of Wallhaven and Flickr tags that cover most not-fully-safe images
'woman', 'women', 'model', 'models', 'boob', 'boobs', 'tit', 'tits',
'lingerie', 'bikini', 'bikini model', 'sexy', 'bra', 'bras', 'panties',
'face', 'faces', 'legs', 'feet', 'pussy',
'ass', 'asses', 'topless', 'long hair', 'lesbians', 'cleavage',
'brunette', 'brunettes', 'redhead', 'redheads', 'blonde', 'blondes',
'high heels', 'miniskirt', 'stockings', 'anime girls', 'in bed', 'kneeling',
'girl', 'girls', 'nude', 'naked', 'people', 'fuck', 'sex'
}
def stats_report_config(self):
logger.info(lambda: "Stats: Reporting config anonymously")
try:
with open(os.path.join(self.parent.config_folder, ".statsid")) as f:
statsid = f.read().strip()
except Exception:
statsid = None
if not statsid or not re.match(r"^([0-9A-Za-z]{10})$", statsid):
# Generate and use a random id for reporting anonynous stats:
statsid = Smart.random_id()
with open(os.path.join(self.parent.config_folder, ".statsid"), "w") as f:
f.write(statsid)
try:
data = {"config": json.dumps(Stats.get_sanitized_config(self.parent))}
res = Util.fetch_json(Smart.API_URL + '/stats/%s/report-config' % statsid, data=data)
logger.info(lambda: "Stats: config reported, server response: %s" % str(res))
except Exception:
raise
|
GLolol/variety
|
variety/Smart.py
|
Python
|
gpl-3.0
| 37,386
|
[
"VisIt"
] |
cc458bb3844a8136017b2595be3c98eaa38a661b5ccf6017d9bc23e5e73a2005
|
import unittest
import ssexp
class JsonTests(unittest.TestCase):
def setUp(self):
class Parrot(object):
def __init__(self, is_dead=True, from_egg=None):
self.is_dead = is_dead
self.from_egg = from_egg
self.preserializer = ssexp.SsexpPreserializer()
self.preserializer.register(Parrot, version=2)
class Egg(object):
def __init__(self, from_parrot=None):
self.from_parrot = from_parrot
self.preserializer.register(Egg)
self.parrot = Parrot()
self.parrot.from_egg = Egg(from_parrot=self.parrot)
def test_int(self):
obj = 123
result = u"123"
self.assertEqual(ssexp.dumps(obj), result)
def test_float(self):
obj = 3.1415927
result = u"3.1415927"
self.assertEqual(ssexp.dumps(obj), result)
def test_str(self):
obj = u'The Knights who say "Ni!".'
result = u'"The Knights who say \\"Ni!\\"."'
self.assertEqual(ssexp.dumps(obj), result)
def test_bool(self):
obj = False
result = u"#f"
self.assertEqual(ssexp.dumps(obj), result)
def test_none(self):
obj = None
result = u"(none)"
self.assertEqual(ssexp.dumps(obj), result)
def test_list(self):
obj = [123, 3.1415927, u'The Knights who say "Ni!".', False, None]
result = '(123 3.1415927 "The Knights who say \\"Ni!\\"." #f (none))'
self.assertEqual(ssexp.dumps(obj), result)
def test_dict(self):
obj = {'brian': 'naughty boy'}
result = '(: brian: "naughty boy")'
self.assertEqual(ssexp.dumps(obj), result)
def test_dict_args(self):
obj = {'brian': 'naughty boy', 3: 'Antioch'}
result = '(: ("brian" "naughty boy") (3 "Antioch"))'
self.assertEqual(ssexp.dumps(obj), result)
def test_dict_args_cyclic(self):
obj = {'brian': 'naughty boy', 3: 'Antioch', 'ouroboros': self.parrot}
result = '(: ("brian" "naughty boy") (3 "Antioch") ("ouroboros" #0=(parrot :version: 2 dead?: #t from-egg: (egg from-parrot: #0#))))'
self.assertEqual(ssexp.dumps(obj, self.preserializer), result)
|
jahs/ssexp
|
test.py
|
Python
|
mit
| 2,221
|
[
"Brian"
] |
c116e2fe99acee56e2d3c314b2b897c07656886d816b9be7c7d751c9f6b289b4
|
'''
Copyright 2016 Crowd-ML team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License
'''
from firebase import firebase
from firebase_token_generator import create_token
import numpy as np
#import numpy.random
import time
from scipy.optimize import check_grad
import loss_hinge
import loss_logreg
import loss_softmax
import loss_nndemo1
####### Change below
'''
url = <Your firebase url>
uid = <arbitrary string>
secret = <Copy and paste the string from firebase db>
'''
maxiter = 100
## For testing
dataDir = './DataFiles' # Where you put your files
testFeatures = 'MNISTTestImages.50.l2.dat';
testLabels = 'MNISTTestLabels.dat';
Ntest = 1000
Dtest = 50#784
Ktest = 10
## For local training
standalone = True # Set this true for faster, local training.
params = {}
if standalone: # local version for testing purposes.
params = {}
params['D'] = 50#784
params['K'] = 10
params['L']= 1e-6
params['N'] = 60000
params['naughtRate'] = 10.
params['clientBatchSize'] = 100
params['localUpdateNum'] = 10
params['featureSource'] = 'MNISTTrainImages.50.l2.dat'
params['labelSource'] = 'MNISTTrainLabels.dat'
params['lossFunction'] = 'NNdemo1'#'Softmax'
params['noiseDistribution'] = 'NoNoise'
params['noiseScale'] = 0.
'''
# w is the paramter vector (, which is from the K x D matrix W for softmax)
# X is the N x D array of N samples of D-dimensional features
# y is the N x 1 array of N samples
# The output of a loss function is the averaged gradient over N samples, and the loss value
'''
############################################################################################################
## Gaussian noise
def GenerateGaussianNoise(scale=1.,tsize=None):
noise = np.random.normal(0., scale, tsize)
return noise
## Laplace noise
def GenerateLaplaceNoise(scale=1.,tsize=None):
U = np.random.uniform(-0.5, 0.5,tsize)
noise = - np.sqrt(0.5)*scale*np.sign(U)*np.log(1. - 2.*np.abs(U))
return noise
############################################################################################################
## Train model, and retrieve/upload w and loss
def trainModel():
print 'Setting up firebase'
if not standalone:
ref = firebase.FirebaseApplication(url, None)
users = firebase.FirebaseApplication(url+'/users', None)
auth_payload = {"uid": uid}
token = create_token(secret, auth_payload)
user = '/users/'+uid+'/'
print 'Pre-loading test data'
Xtest,ytest = loadData(dataDir,testFeatures,testLabels,Ntest,Dtest,Ktest)
while True:
paramIter = -1
weightIter = -1
if not standalone: # Read all params from server
print ' '
print 'Downloading parameters from server'
paramIter = np.int(ref.get('/parameters/paramIter', None, params = {"auth":token}))
params['D'] = np.int(ref.get('/parameters/D', None, params = {"auth":token}))
params['K'] = np.int(ref.get('/parameters/K', None, params = {"auth":token}))
params['L'] = np.double(ref.get('/parameters/L', None, params = {"auth":token}))
params['N'] = np.int(ref.get('/parameters/N', None, params = {"auth":token}))
params['naughtRate'] = np.int(ref.get('/parameters/naughtRate', None, params = {"auth":token}))
params['clientBatchSize'] = np.int(ref.get('/parameters/clientBatchSize', None, params = {"auth":token}))
params['featureSource'] = ref.get('/parameters/featureSource', None, params = {"auth":token})
params['labelSource'] = ref.get('/parameters/labelSource', None, params = {"auth":token})
params['lossFunction'] = ref.get('/parameters/lossFunction', None, params = {"auth":token})
params['noiseDistribution'] = ref.get('/parameters/noiseDistribution', None, params = {"auth":token})
params['noiseScale'] = np.double(ref.get('/parameters/noiseScale', None, params = {"auth":token}))
params['localUpdateNum'] = np.int(ref.get('/parameters/localUpdateNum', None, params = {"autho":token}))
print params
print 'Loading training data'
X,y = loadData(dataDir,params['featureSource'],params['labelSource'],params['N'],params['D'],params['K'])
# Re-init w
if (params['lossFunction']=='Hinge'):
w = loss_hinge.init(params['D'])
elif (params['lossFunction']=='LogReg'):
w = loss_logreg.init(params['D'])
elif (params['lossFunction']=='Softmax'):
w = loss_softmax.init(params['D'],params['K'])
elif (params['lossFunction']=='NNdemo1'):
w = loss_nndemo1.init(params['D'],params['K'])
else:
print 'Unknown loss type'
exit()
print 'Begin iteration'
for gradIter in range(1,maxiter+1):
print ' '
print 'paramIter = ', str(paramIter)
print 'weightIter = ', str(weightIter)
print 'gradIter = ', str(gradIter),'/',str(maxiter)
# Ready to send weights?
reset = False
print 'Checking server status'
while not standalone:
if (gradIter==1): # beginning
break;
print '.',
time.sleep(1.) # sleep for 1 sec
paramIter_server = np.int(ref.get('parameters/paramIter', None, params = {"auth":token}))
if (paramIter_server > paramIter): # parameter has changed. Reset
reset = True
break
gradientProcessed = ref.get(user+'gradientProcessed', None, params = {"auth":token})
gradIter_server = np.int(ref.get(user+'gradIter', None, params = {"auth":token}))
#print 'gradientProcessed:',str(gradientProcessed),', gradIter_server:',str(gradIter_server)
if (gradientProcessed and gradIter_server == gradIter-1):
break
print ' '
if reset:
print 'Parameter changed !!!'
break;
# Fetch iteration number and weight
if standalone:
weightIter = gradIter
else:
#print 'Fetching weights'
weightIter = np.int(ref.get('/trainingWeights/iteration', None, params = {"auth":token}))
#print 'weightIter= ', weightIter
w = np.array(ref.get('/trainingWeights/weights', None, params = {"auth":token}),dtype=np.double)
if params['localUpdateNum']<=0 :
# SGD mode: compute and send the gradient
tX,ty = sampleData(X,y,params)
g, l = computeLossGradient(w,tX,ty,params)
xi = sampleNoise(w,params)
g += xi
else: # Parameter averaging mode: compute and send the parameters
for s in range(params['localUpdateNum']):
tX,ty = sampleData(X,y,params)
g,l = computeLossGradient(w,tX,ty,params)
# Simple learning rate
#w -= naught/gradIter*g
w -= params['naughtRate']/np.sqrt(params['localUpdateNum']*gradIter+s)*g
xi = sampleNoise(w,params)
w += xi
print 'loss = ',str(l)
if standalone:
if params['localUpdateNum']<=0:
# Simple learning rate
#w -= naughtRate/gradIter*g
w -= params['naughtRate']/np.sqrt(gradIter)*g
else:
pass # Do nothing
else:
print 'Uploading gradients'
if params['localUpdateNum']<=0:
gradJson = g.tolist()
else:
gradJson = w.tolist()
ref.put(user, 'paramIter', paramIter, params = {"auth":token})
ref.put(user, 'weightIter', weightIter, params = {"auth":token})
ref.put(user, 'gradIter', gradIter, params = {"auth":token})
ref.put(user, 'gradients', gradJson, params = {"auth":token})
ref.put(user ,'gradientProcessed', False, params = {"auth":token})
## Iteration ended
if (gradIter==maxiter):
testModel(w,Xtest,ytest,params['K'],params['lossFunction'])
if standalone:
break
def sampleData(X,y,params):
# Randomly choose (clientBatchSize) samples
ind = np.random.choice(range(params['N']),size=(params['clientBatchSize'],),replace=False)
tX = X[ind,:]
ty = y[ind]
return (tX,ty)
def computeLossGradient(w,tX,ty,params):
# Use one of loss functions.
# The output is the averaged gradient
if (params['lossFunction']=='Hinge'):
g,l = loss_hinge.getAvgGradient(w,tX,ty,params['L'])
elif (params['lossFunction']=='LogReg'):
g,l = loss_logreg.getAvgGradient(w,tX,ty,params['L'])
elif (params['lossFunction']=='Softmax'):
g,l = loss_softmax.getAvgGradient(w,tX,ty,params['L'],params['K'])
elif (params['lossFunction']=='NNdemo1'):
g,l = loss_nndemo1.getAvgGradient(w,tX,ty,params['L'],params['K'])
else:
print 'Unknown loss type'
exit()
if np.isnan(g).any():
print 'Nan in gradient'
exit()
return (g,l)
def sampleNoise(w,params):
if (params['noiseDistribution']=='NoNoise'):
xi = np.zeros(w.shape)
elif (params['noiseDistribution']=='Gauss'):
xi = GenerateGaussianNoise(params['noiseScale'], w.shape)
elif (params['noiseDistribution']=='Laplace'):
xi = GenerateLaplaceNoise(params['noiseScale'], w.shape)
else:
print 'Unknown noise type'
exit()
return xi
## Test
def testModel(w,X,y,K,lossFunction):
if (lossFunction=='Hinge'):
ypred = loss_hinge.predict(w,X)
elif (lossFunction=='LogReg'):
ypred = loss_logreg.predict(w,X)
elif (lossFunction=='Softmax'):
ypred = loss_softmax.predict(w,X,K)
elif (lossFunction=='NNdemo1'):
ypred = loss_nndemo1.predict(w,X,K)
else:
print 'Unknown loss type'
exit()
ind_correct = np.where(ypred==y)[0]
ncorrect = ind_correct.size
rate = float(ncorrect) / float(ypred.size)
print 'accuracy = ', str(rate)
## Load data
def loadData(dataDir,featureSource,labelSource,N,D,K):
# Load data
X = np.loadtxt(dataDir+'/'+featureSource, delimiter=',', dtype=float)
#print X.shape
if (X.shape[0]!=N):
print 'Wrong number of samples'
exit()
#return
if (X.shape[1]!=D):
print 'Wrong feature dimension'
exit()
#return
y = np.loadtxt(dataDir+'/'+labelSource, dtype=float).astype(int)
if (y.size!=N):
print 'Wrong number of labels'
exit()
#return
if (K==2):
y[y==0] = -1
if any((y!=1) & (y!=-1)):
print 'Wrong labels'
exit()
if (K>2):
if any((y<0) | (y>K-1)):
print 'Wrong labels'
exit()
return (X,y)
###############################################################################################
## Begining of main
'''
loss_hinge.self_test1()
loss_logreg.self_test1()
loss_softmax.self_test1()
loss_nndemo1.self_test1()
exit()
'''
trainModel()
|
jihunhamm/Crowd-ML
|
client/python/pythonClient.py
|
Python
|
apache-2.0
| 12,063
|
[
"Gaussian"
] |
47e28aaaab4588e14121651d244cd4fd3a3e7d9f3d3bc5b566621951d2820125
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.