text stringlengths 0 1.05M | meta dict |
|---|---|
'''Analog to Digital sensor (in ADC0) for the esp8266 microcontroller'''
# author: Daniel Mizyrycki
# license: MIT
# repository: https://github.com/mzdaniel/micropython-iot
from machine import ADC
class ADCSensor:
def __init__(self, sensor_id='adc', min_rd=0, max_rd=1024,
min_val=0, max_val=1):
'''Initialize sensor
min_rd and max_rd are used in sample for sensor calibration
min_val and max_val are the sample limits
'''
self.sensor_id = sensor_id
self.min_rd = min_rd
self.max_rd = max_rd
self.min_val = min_val
self.max_val = max_val
self.coef = (max_val - min_val) / (max_rd - min_rd)
self.adc = ADC(0)
def read(self) -> int:
'''Get a sensor reading using Micropython API
Return 0-1024 direct ADC (0~3.3v) reading
'''
return self.adc.read()
def sample(self) -> float:
'''Get an ADC interpolated reading using ThingFlow sensor API
Return min_val~max_val
'''
reading = self.read()
return self.min_val + (reading - self.min_rd) * self.coef
def __repr__(self):
return "ADCSensor('%s')" % self.sensor_id
| {
"repo_name": "mpi-sws-rse/thingflow-python",
"path": "micropython/sensors/adc_esp8266.py",
"copies": "1",
"size": "1236",
"license": "apache-2.0",
"hash": 2465018510559578600,
"line_mean": 29.1463414634,
"line_max": 72,
"alpha_frac": 0.5768608414,
"autogenerated": false,
"ratio": 3.4049586776859506,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4481819519085951,
"avg_score": null,
"num_lines": null
} |
# anal.py
import os
import sys
import pickle
import json
import yaml
import h5py
import itertools
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict as ddict
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_auc_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import matthews_corrcoef
def main():
if len(sys.argv)!=2:
print 'USAGE:'
print 'python anal.py [targetDevelDir]'
return
tdir = sys.argv[1]
odir = os.path.join(tdir,'anal')
if not os.path.exists(odir): os.makedirs(odir)
perfs = ddict(list); cms = []
tags = [i.split('.')[0].replace('result_','') for i in os.listdir(tdir) if 'result' in i]
tags = sorted(tags)
with open(os.path.join(tdir,'devLog_'+tags[0]+'.json'),'r') as f:
labels = yaml.load(f)['labels']
relDict = ddict(list)
delim = '__'
for i,tag in enumerate(tags):
print 'anal on '+tag+' '+str(i+1)+'/'+str(len(tags))
rfpath = os.path.join(tdir,'result_'+tag+'.h5')
with h5py.File(rfpath,'r') as f:
ytrue = f['yte'][:]; ypred = f['ypred'][:]; yscore = f['yscore'][:]
yrel = f['yrel'][:]; yrelscore = f['yrelscore'][:]; xrelraw = f['xrelraw'][:]
perfs['roc_auc_score'].append( roc_auc_score(ytrue,yscore,average='macro') )
perfs['aupr_score'].append( average_precision_score(ytrue,yscore,average='macro') )
perfs['accuracy_score'].append( accuracy_score(ytrue,ypred) )
perfs['cohen_kappa_score'].append( cohen_kappa_score(ytrue,ypred) )
perfs['fbeta_score'].append( fbeta_score(ytrue,ypred,average='macro',beta=0.5) )
perfs['matthews_corrcoef'].append( matthews_corrcoef(ytrue,ypred) )
cms.append( confusion_matrix(ytrue,ypred,labels) )
for i,ii in enumerate(xrelraw):
relDict[delim.join(ii)].append( str(yrel[i])+delim+str(yrelscore[i]) )
print 'writing perfs...'
perfAvg = {}
for m,v in perfs.iteritems():
perfAvg[m+'_avg'] = ( np.mean(v),np.std(v) )
with open(os.path.join(odir,'perfAvg.json'),'w') as f:
json.dump(perfAvg,f,indent=2,sort_keys=True)
perfs['tags'] = tags
with open(os.path.join(odir,'perfs.json'),'w') as f:
json.dump(perfs,f,indent=2,sort_keys=True)
print 'writing release...'
relPosProbs = []; relNegProbs = []
for k,v in relDict.iteritems():
labels = [int(i.split(delim)[0]) for i in v]
probs = [float(i.split(delim)[1]) for i in v]
maxProb = max(probs)
maxProbLabel = labels[ probs.index(maxProb) ]
if maxProbLabel==1:
vpos = (k,maxProb)
vneg = (k,1.0-maxProb)
else:
vneg = (k,maxProb)
vpos = (k,1.0-maxProb)
relPosProbs.append(vpos)
relNegProbs.append(vneg)
with open(os.path.join(odir,'release.json'),'w') as f:
json.dump(relDict,f,indent=2,sort_keys=True)
with open(os.path.join(odir,'releaseMaxProbPos.json'),'w') as f:
json.dump(relPosProbs,f,indent=2,sort_keys=True)
with open(os.path.join(odir,'releaseMaxProbNeg.json'),'w') as f:
json.dump(relNegProbs,f,indent=2,sort_keys=True)
def plotHist(vals,normalized,tag):
histRange = (0.0,1.0); histInc = 0.05
histBins = np.arange(histRange[0],histRange[1]+histInc,histInc)
weights = np.ones_like(vals)/float(len(vals))
fig = plt.figure()
plt.xlabel('probability')
plt.xticks(np.arange(0.0,1.0+0.1,0.1))
fname = tag
if normalized:
plt.hist(vals,weights=weights,normed=False,
bins=histBins,range=histRange)
plt.ylabel('#data (normalized)')
plt.yticks(np.arange(0.0,1.0+0.1,0.1))
fname += '_norm'
else:
plt.hist(vals,normed=False,
bins=histBins,range=histRange)
plt.ylabel('#data')
plt.grid();
plt.savefig(os.path.join(odir,fname+'.png'),
dpi=300,format='png',bbox_inches='tight');
plt.close(fig)
for norm in [True,False]:
plotHist([i[1] for i in relPosProbs],norm,'releaseMaxProbPosHist')
plotHist([i[1] for i in relNegProbs],norm,'releaseMaxProbNegHist')
print 'writing cm...'
def _getBestIdx(metric):
idx = perfs[metric].index( max(perfs[metric]) )
return idx
m = 'aupr_score'
for n in ['normalized','unnormalized']:
_plotCM(cms[_getBestIdx(m)],labels,n,os.path.join(odir,'cm_best_'+m+'_'+n+'.png'))
def _plotCM(cm,classes,normalized,fpath):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
fig = plt.figure()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalized=='normalized': cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cm, interpolation='nearest',cmap=plt.cm.Blues); plt.colorbar()
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j], horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig(fpath,dpi=300,format='png',bbox_inches='tight')
plt.close(fig)
if __name__ == '__main__':
main()
| {
"repo_name": "tttor/csipb-jamu-prj",
"path": "predictor/connectivity/classifier/imbalance/anal.py",
"copies": "1",
"size": "5452",
"license": "mit",
"hash": 1040749159826671000,
"line_mean": 34.6339869281,
"line_max": 92,
"alpha_frac": 0.6320616288,
"autogenerated": false,
"ratio": 3.0038567493112946,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8948364393770833,
"avg_score": 0.03751079686809231,
"num_lines": 153
} |
# anal.py
import os
import sys
import pickle
import yaml
import matplotlib.pyplot as plt
metrics = ['calinskiharabaz','silhouette']
def main():
if len(sys.argv)!=2:
print 'USAGE:'
print 'python anal.py [targetClusterDir]'
return
tdir = sys.argv[1]
odir = os.path.join(tdir,'anal')
if not os.path.exists(odir): os.makedirs(odir)
_pie(tdir,odir)
# _scatter(tdir,odir)
def _scatter(tdir,odir):
odir = os.path.join(odir,'scatter')
if not os.path.exists(odir): os.makedirs(odir)
for d in [i for i in os.listdir(tdir) if not('anal' in i)]:
fig = plt.figure()
for d2 in [i for i in os.listdir(os.path.join(tdir,d)) if os.path.isdir(os.path.join(tdir,d,i))]:
print d2
dataset = d2.split('-')[2]
c = 'r' if ('protein' in d2) else 'g'
for fname in [i for i in os.listdir(os.path.join(tdir,d,d2)) if 'bestlabels_stat' in i]:
m = '^' if 'silhouette' in fname else 'o'
# tag = 'protein' if ('protein' in d2) else 'compound'
# tag = '_silhouette' if 'silhouette' in fname else '_calinskiharabaz'
with open(os.path.join(tdir,d,d2,fname),'r') as f:
data = yaml.load(f)
x = [int(i) for i in data.keys()]
y = [i[0] for i in data.values()]
plt.scatter(x,y,c=[c]*len(x),alpha=0.5,marker=m)
plt.grid(True)
plt.xlabel('class labels')
plt.ylabel('#members')
plt.savefig(os.path.join(odir,dataset+'_scatter.png'),
dpi=300,format='png',bbox_inches='tight')
plt.close(fig)
def _pie(tdir,odir):
odir = os.path.join(odir,'pie')
if not os.path.exists(odir): os.makedirs(odir)
for d in [i for i in os.listdir(tdir) if not('anal' in i)]:
print d
odir2 = os.path.join(odir,d)
if not os.path.exists(odir2): os.makedirs(odir2)
for comMet in metrics:
for proMet in metrics:
with open(os.path.join(tdir,d,comMet+'_'+proMet+'_labels_stat.json')) as f:
data = yaml.load(f); keys = data.keys()
fig = plt.figure()
plt.pie([data[k][0] for k in keys],
explode=[0.3 if (k=='0') else 0.0 for k in keys],labels=keys,autopct='%1.2f%%',
colors=['g' if (k=='1') else 'b' if (k=='-1') else 'r' for k in keys],
shadow=False, startangle=90)
plt.axis('equal')
plt.savefig(os.path.join(odir2,'_'.join([d.split('-')[-1],comMet,proMet,'pie.png'])),
dpi=300,format='png',bbox_inches='tight')
plt.close(fig)
if __name__ == '__main__':
main()
| {
"repo_name": "tttor/csipb-jamu-prj",
"path": "predictor/connectivity/cluster/anal.py",
"copies": "1",
"size": "2736",
"license": "mit",
"hash": 2969728882767532500,
"line_mean": 35.972972973,
"line_max": 103,
"alpha_frac": 0.5372807018,
"autogenerated": false,
"ratio": 3,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40372807018,
"avg_score": null,
"num_lines": null
} |
# an alternative approach
from SimpleCV import Color, Display, Image, Line
from util import dist, show_img, timer, cart_to_polar
import PIL
import numpy as np
from math import sqrt, ceil, pi, atan2
class ParsedFrame:
def __init__(self, img, bimg, arr, rot_arr, rot_img, cursor_r, cursor_angle):
w,h = img.size()
self.img = img
self.center_point = (w/2, h/2)
self.bimg = bimg
self.arr = arr
self.rot_arr = rot_arr
self.rot_img = rot_img
self.cursor_r = cursor_r
self.cursor_angle = cursor_angle
rw, rh = self.rot_img.size()
max_r = ceil(dist(0, 0, w/2, h/2))
cursor_y = rh - int(rh * cursor_r/max_r)
cursor_x = int(float(rw) * self.cursor_angle / 360)
self.rot_img.dl().circle((cursor_x, cursor_y), 3, color=Color.RED, filled=True)
def parse_frame(img):
"""
Parses a SimpleCV image object of a frame from Super Hexagon.
Returns a ParsedFrame object containing selected features.
"""
img
# helper image size variables
w,h = img.size()
midx,midy = w/2,h/2
# Create normalized images for targeting objects in the foreground or background.
# (This normalization is handy since Super Hexagon's colors are inverted for some parts of the game)
# fg_img = foreground image (bright walls, black when binarized)
# bg_img = background image (bright space, black when binarized)
fg_img = img
if sum(img.binarize().getPixel(midx,midy)) == 0:
fg_img = img.invert()
bg_img = fg_img.invert()
# We need to close any gaps around the center wall so we can detect its containing blob.
# The gaps are resulting artifacts from video encoding.
# The 'erode' function does this by expanding the dark parts of the image.
bimg = bg_img.binarize()
bimg = black_out_GUI(bimg)
blobs = bimg.findBlobs()
cursor_blob = get_cursor_blob(blobs, h, midx, midy)
if cursor_blob:
cursor_point = map(int, cursor_blob.centroid())
cursor_r, cursor_angle = cart_to_polar(cursor_point[0] - midx, midy - cursor_point[1])
cursor_angle = int(cursor_angle * 360/ (2 * pi))
cursor_angle = 180 - cursor_angle
if cursor_angle < 0:
a += 360
bimg = black_out_center(bimg, cursor_r).applyLayers()
arr = bimg.resize(100).getGrayNumpy() > 100
rot_arr = arr_to_polar(arr)
rot_img = Image(PIL.Image.fromarray(np.uint8(np.transpose(rot_arr)*255))).dilate(iterations=3)
rot_arr = rot_img.getGrayNumpy() > 100
rot_img = rot_img.resize(400).flipVertical()
return ParsedFrame(img, bimg, arr, rot_arr, rot_img, cursor_r, cursor_angle)
else:
return None
def get_cursor_blob(blobs, h, midx, midy):
def is_cursor(b):
max_size = h * 0.05 # cursor is teensy tiny
cx, cy = b.centroid()
max_dist_from_center = h * 0.2 # and close to the middle
return (b.width() < max_size
and b.height() < max_size
and dist(cx, cy, midx, midy) < max_dist_from_center)
# Locate the blob within a given size containing the midpoint of the screen.
# Select the one with the largest area.
cursor_blob = None
if blobs:
for b in blobs:
if is_cursor(b):
cursor_blob = b
break
return cursor_blob
def arr_to_polar(arr):
w, h = arr.shape
cx, cy = w/2, h/2
max_r = ceil(dist(0, 0, cx, cy))
new_w = 100
new_h = 62
x_bound = new_w - 1
y_bound = new_h - 1
new_arr = np.zeros((new_w, new_h), dtype=np.bool_)
it = np.nditer(arr, flags=['multi_index'])
while not it.finished:
x, y = it.multi_index
r, t = cart_to_polar(x-cx,cy-y) # flip y
new_x = x_bound - int(x_bound * (t + pi) / ( 2 * pi))
new_y = int(y_bound * r/max_r)
new_arr[new_x, new_y] = it[0]
it.iternext()
return new_arr
def black_out_GUI(img):
dl = img.dl()
dl.rectangle((0,0), (209, 31), filled=True)
total_w = img.size()[0]
w2, h2 = (229, 31)
w3, h3 = (111, 51)
dl.rectangle((total_w-w2,0), (w2, h2), filled=True)
dl.rectangle((total_w-w3,0), (w3, h3), filled=True)
return img
def black_out_center(img, radius):
dl = img.dl()
w,h = img.size()
center = (w/2, h/2)
dl.circle(center, radius+10, filled=True)
return img
def test():
with timer('image'):
img = Image('train/372.png')
print "image size (%d, %d)" % img.size()
with timer('parse'):
p = parse_frame(img)
print '--------------'
return p
if __name__ == "__main__":
p = test()
if p:
print 'cursor angle: %d' % p.cursor_angle
show_img(p.rot_img)
else:
print 'PARSE FAILED' | {
"repo_name": "david-crespo/py-super-hexagon",
"path": "parse.py",
"copies": "1",
"size": "4827",
"license": "mit",
"hash": -8301702377197169000,
"line_mean": 27.0697674419,
"line_max": 104,
"alpha_frac": 0.5825564533,
"autogenerated": false,
"ratio": 3.116204002582311,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4198760455882311,
"avg_score": null,
"num_lines": null
} |
# An alternative formulation of namedtuples
import operator
import types
import sys
def named_tuple(classname, fieldnames):
# Populate a dictionary of field property accessors
cls_dict = { name: property(operator.itemgetter(n))
for n, name in enumerate(fieldnames) }
# Make a __new__ function and add to the class dict
def __new__(cls, *args):
if len(args) != len(fieldnames):
raise TypeError('Expected {} arguments'.format(len(fieldnames)))
return tuple.__new__(cls, (args))
cls_dict['__new__'] = __new__
# Make the class
cls = types.new_class(classname, (tuple,), {},
lambda ns: ns.update(cls_dict))
cls.__module__ = sys._getframe(1).f_globals['__name__']
return cls
if __name__ == '__main__':
Point = named_tuple('Point', ['x', 'y'])
print(Point)
p = Point(4, 5)
print(len(p))
print(p.x, p[0])
print(p.y, p[1])
try:
p.x = 2
except AttributeError as e:
print(e)
print('%s %s' % p)
| {
"repo_name": "tuanavu/python-cookbook-3rd",
"path": "src/9/defining_classes_programmatically/example2.py",
"copies": "2",
"size": "1050",
"license": "mit",
"hash": -1651122735494921700,
"line_mean": 27.3783783784,
"line_max": 76,
"alpha_frac": 0.5666666667,
"autogenerated": false,
"ratio": 3.583617747440273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5150284414140273,
"avg_score": null,
"num_lines": null
} |
"""An alternative Rasterio raster calculator."""
__version__ = '0.1'
__author__ = 'Kevin Wurster'
__email__ = 'wursterk@gmail.com'
__source__ = 'https://github.com/geowurster/rio-eval-calc'
__license__ = """
New BSD License
Copyright (c) 2015-2016, Kevin D. Wurster
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* The names of rio-eval-calc its contributors may not be used to endorse or
promote products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
| {
"repo_name": "geowurster/rio-eval-calc",
"path": "rio_eval_calc/__init__.py",
"copies": "1",
"size": "1703",
"license": "bsd-3-clause",
"hash": 265939180232381980,
"line_mean": 43.8157894737,
"line_max": 78,
"alpha_frac": 0.7856723429,
"autogenerated": false,
"ratio": 4.344387755102041,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 38
} |
# analy_mod_.py
# revised version
# A python program to analyze the SUS weighting function in order to reach the following goals:
# 1. plot the weight function
# 2. generate the normalized distribution for Z=1
# 3. extrapolate the N distribution for different Zs given by the user.
# Author: Yuding Ai
# Date: 2015 Nov 11
import math
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib import rc
# rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
def PN():
WF = [] # a list of my target Weighting function
PN = [] # a list of number distribution
with open("SUSWeight_function.txt","r") as file:
for line in file:
words = line.split()
n = float(words[0]) #take the value
WF.append(n); #append value into my WF list
maxi = max(WF)
if maxi > 500:
for i in range(len(WF)):
WF[i] = WF[i]-maxi +500
PN.append(math.exp(WF[i]));
PN = [float(i)/sum(PN) for i in PN]
return WF,PN
def Pplot(PN,z):
fig = plt.figure()
plt.plot(PN,'+b',markersize=3)
Z = str(z)
ylabel = 'P(N;Z='+ Z + ')'
plt.ylabel(ylabel)
plt.xlabel('N')
title = 'P(N;Z='+ Z + ').png'
fig.savefig(title, dpi=300, bbox_inches='tight')
def enlargePplot(PN,z):
fig = plt.figure()
plt.plot(PN,'+b-',markersize=3,linewidth = 0.1)
plt.xlim(6400,8200)
# plt.ylim(0,0.007)
Z = str(z)
ylabel = 'P(N;Z='+ Z + ')'
plt.ylabel(ylabel)
plt.xlabel('N')
title = 'ENLP(N;Z='+ Z + ').png'
fig.savefig(title, dpi=300, bbox_inches='tight')
def Wplot(WN):
fig = plt.figure()
plt.plot(WN,'+r',markersize=1,)
plt.ylabel('Weighting Function')
plt.xlabel('N')
title = 'WeightingFunc.png'
fig.savefig(title, dpi=300, bbox_inches='tight')
def exploPN(W,z):
P = [] # a list of number distribution
WF = list(W)
for i in range(len(WF)):
WF[i] = WF[i] + i*math.log(z)
maxi = max(WF)
if maxi > 500:
for j in range(len(W)):
WF[j] = WF[j]-maxi +500
P.append(math.exp(WF[j]));
# else:
# for j in range(len(W)):
# P.append(math.exp(WF[j]));
P = [float(k)/sum(P) for k in P]
return P
def main():
P = PN()[1] # take the P(N;z=1)
W = PN()[0] # take the original weighting function
# Wplot(W)
# Pplot(P,1)
# Pe = exploPN(W,3.08)
# enlargePplot(Pe,3.08)
for i in range(10):
t = 9.29 + 0.02*i
Pe = exploPN(W,t)
# Pplot(Pe,t)
enlargePplot(Pe,t)
main()
| {
"repo_name": "Aieener/SUS_3D",
"path": "DATA/8_32_32_128_1E7/analy_mod_.py",
"copies": "1",
"size": "2457",
"license": "mit",
"hash": 2260506233576298500,
"line_mean": 21.9626168224,
"line_max": 95,
"alpha_frac": 0.6308506309,
"autogenerated": false,
"ratio": 2.418307086614173,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3549157717514173,
"avg_score": null,
"num_lines": null
} |
# analy.py
# A python program to analyze the SUS weighting function in order to reach the following goals:
# 1. plot the weight function
# 2. generate the normalized distribution for Z=1
# 3. extrapolate the N distribution for different Zs given by the user.
# Author: Yuding Ai
# Date: 2015 Oct 23
import math
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib import rc
# rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
def PN():
PN = [] # a list of number distribution
with open("P_N.txt","r") as file:
for line in file:
words = line.split()
n = float(words[0]) #take the value
PN.append(n); #append value into my WF list
return PN
def WN():
WN = []
with open("SUSWeight_function.txt","r") as file:
for line in file:
words = line.split()
n = float(words[0]) #take the value
WN.append(n); #append value into my WF list
return WN
def Pplot(PN,z):
fig = plt.figure()
plt.plot(PN,'+b',markersize=3)
Z = str(z)
ylabel = 'P(N;Z='+ Z + ')'
plt.ylabel('W(index)')
# plt.xlabel('S')
plt.xlabel('index; S = -0.5 + index/2000')
title = Z + ').png'
fig.savefig(title, dpi=300, bbox_inches='tight')
def enlargePplot(PN,z):
fig = plt.figure()
plt.plot(PN,'+b-',markersize=3,linewidth = 0.1)
plt.xlim(15300,16000)
plt.ylim(0,0.006)
Z = str(z)
ylabel = 'P(N;Z='+ Z + ')'
plt.ylabel(ylabel)
plt.xlabel('N')
title = 'ENLP(N;Z='+ Z + ').png'
fig.savefig(title, dpi=300, bbox_inches='tight')
def exploPN(P,z):
P_new = [] # a list of number distribution
W = [] # the log of P
for i in range(len(P)):
P_new.append(P[i]*z**(i-32450))
# if P[i] != 0:
# W.append(math.log(P[i]))
# else:
# W.append(-999)
# for i in range(len(W)):
# W[i] = W[i] + i*math.log(z) # update it
# maxi = max(W)
# if maxi > 500:
# for j in range(len(W)):
# W[j] = W[j]-maxi +500
# P_new.append(math.exp(W[j]));
# P_new = [float(k)/sum(P) for k in P_new]
# print P
return P_new
def main():
P = PN() # take the P(N;z=1)
W = WN()
# Pe = exploPN(P,9.33)
# Pplot(P,"1")
Pplot(W,"Weighting_function")
# Pe = exploPN(P,1.33)
# Pplot(Pe,1.33)
# enlargePplot(Pe,"9.33")
# for i in range(10):
# W = PN()[0] # take the original weighting function
# t = 9.77 + 0.01*i
# Pe = exploPN(W,t)
# Pplot(Pe,t)
# enlargePplot(Pe,t)
main()
| {
"repo_name": "Aieener/SUS_on_S",
"path": "Data/1E6/Z=1/analy.py",
"copies": "1",
"size": "2480",
"license": "mit",
"hash": 8014293537017971000,
"line_mean": 21.3423423423,
"line_max": 95,
"alpha_frac": 0.6120967742,
"autogenerated": false,
"ratio": 2.373205741626794,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3485302515826794,
"avg_score": null,
"num_lines": null
} |
"""Analyse popular nuget packages."""
from bs4 import BeautifulSoup
from re import compile as re_compile
from requests import get
from .base import AnalysesBaseHandler
from f8a_worker.solver import NugetReleasesFetcher
class NugetPopularAnalyses(AnalysesBaseHandler):
"""Analyse popular nuget packages."""
_URL = 'https://www.nuget.org/packages?page={page}'
_POPULAR_PACKAGES_PER_PAGE = 20
def _scrape_nuget_org(self, popular=True):
"""Schedule analyses for popular NuGet packages."""
# TODO: reduce cyclomatic complexity
first_page = ((self.count.min - 1) // self._POPULAR_PACKAGES_PER_PAGE) + 1
last_page = ((self.count.max - 1) // self._POPULAR_PACKAGES_PER_PAGE) + 1
for page in range(first_page, last_page + 1):
url = self._URL.format(page=page)
pop = get(url)
if not pop.ok:
self.log.warning('Couldn\'t get url %r' % url)
continue
poppage = BeautifulSoup(pop.text, 'html.parser')
packages = poppage.find_all('article', class_='package')
if len(packages) == 0:
# (probably not needed anymore) previous nuget.org version had different structure
packages = poppage.find_all('section', class_='package')
if len(packages) == 0:
self.log.warning('Quitting, no packages on %r' % url)
break
first_package = (self.count.min % self._POPULAR_PACKAGES_PER_PAGE) \
if page == first_page else 1
if first_package == 0:
first_package = self._POPULAR_PACKAGES_PER_PAGE
last_package = (self.count.max % self._POPULAR_PACKAGES_PER_PAGE) \
if page == last_page else self._POPULAR_PACKAGES_PER_PAGE
if last_package == 0:
last_package = self._POPULAR_PACKAGES_PER_PAGE
for package in packages[first_package - 1:last_package]:
# url_suffix ='/packages/ExtMongoMembership/1.7.0-beta'.split('/')
url_suffix = package.find(href=re_compile(r'^/packages/'))['href'].split('/')
if len(url_suffix) == 4:
name, releases = NugetReleasesFetcher.\
scrape_versions_from_nuget_org(url_suffix[2], sort_by_downloads=popular)
self.log.debug("Scheduling %d most %s versions of %s",
self.nversions,
'popular' if popular else 'recent',
name)
releases = releases[:self.nversions] if popular else releases[-self.nversions:]
for release in releases:
self.analyses_selinon_flow(name, release)
def do_execute(self, popular=True):
"""Run analyses on NuGet packages.
:param popular: boolean, sort index by popularity
"""
# Use nuget.org for all (popular or not)
self._scrape_nuget_org(popular)
| {
"repo_name": "fabric8-analytics/fabric8-analytics-jobs",
"path": "f8a_jobs/handlers/nuget_popular_analyses.py",
"copies": "1",
"size": "3058",
"license": "apache-2.0",
"hash": 8572497280540400000,
"line_mean": 45.3333333333,
"line_max": 99,
"alpha_frac": 0.5696533682,
"autogenerated": false,
"ratio": 3.9356499356499355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.999850892712042,
"avg_score": 0.001358875345903028,
"num_lines": 66
} |
"""Analyser class for running analysis on columns depending on the column type"""
import re
from statistics import mode, StatisticsError
from math import floor, log10, pow, ceil
threshold = 0.9
max_Outliers = 100
standardDeviations = 3
re_date = re.compile('^((31(\/|-)(0?[13578]|1[02]))(\/|-)|((29|30)(\/|-)(0?[1,3-9]|1[0-2])(\/|-)))((1[6-9]|[2-9]\d)?\d{2})$|^(29(\/|-)0?2(\/|-)(((1[6-9]|[2-9]\d)?(0[48]|[2468][048]|[13579][26])|((16|[2468][048]|[3579][26])00))))$|^(0?[1-9]|1\d|2[0-8])(\/|-)((0?[1-9])|(1[0-2]))(\/|-)((1[6-9]|[2-9]\d)?\d{2})$')
re_dateDF = re.compile('^\d{1,2}(\/|-)((0?[12])|(12))')
re_dateMM = re.compile('^\d{1,2}(\/|-)(0?[3-5])')
re_dateJA = re.compile('^\d{1,2}(\/|-)(0?[6-8])')
re_dateSN = re.compile('^\d{1,2}(\/|-)((0?9)|(1[01]))')
re_time = re.compile('(^([0-9]|0[0-9]|1[0-9]|2[0-3]):[0-5][0-9]$)|(^(1[012]|0?[1-9]):[0-5][0-9](\ )?(?i)(am|pm)$)')
re_timePM = re.compile('[pP][mM]$')
re_timeAM = re.compile('[aA][mM]$')
re_timehr = re.compile('^\d{1,2}')
class Analyser(object):
"""
Base analysis class object. Initiate the object, and assigns the statistical mode, if any.
Global variables:
max_Outliers -- the maximum amount of outliers that will be found.
standardDeviations -- The number of standard deviations away from the mean a value is
allowed to be before it is an error, default 3.
re_date -- A regular expression for dates.
re_dateDF -- A regular expression for months December-February.
re_dateMM -- A regular expression for months March-May
re_dateJA -- A regular expression for months June-August.
re_dateSN -- A regular expression for months September-November.
re_time -- A regular expression for time.
re_timePM -- A regular expression for PM times.
re_timeAM -- A regular expression for AM times
re_timehr -- A regular expression for the hour.
Class variables:
mode -- Returns the mode of the column analysed.
unique -- The count of unique values in the column.
Child classes and associated variables:
StringAnalyser -- String column analysis.
EmailAnalyser -- Email column analysis.
EnumAnalyser -- Enumerated column analysis.
NumericalAnalyser -- String/Float column analysis.
min -- Minimum value in column values.
max -- Maximum value in column values.
mean -- Mean value in column values.
lower quartile -- Lower quartile for column values.
median -- Median value for column values.
upper quartile -- Upper quartile for column values.
normDist -- String Yes/No if columns value is normally distributed.
stdev -- Standard deviation for column values, N/A if not normally distributed to
within 95.5% confidence.
stDevOutliers -- List of values outside a certain number of standard deviations
from the mean.
CurrencyAnalyser -- Child class of NumericalAnalyser
BooleanAnalyser -- Boolean column analysis
DateAnalyser -- Date column analysis
TimeAnalyser -- Time column analysis
CharAnalyser -- Character column Analysis
DayAnalyser -- Day column Analysis
HyperAnalyser -- Hyperlink column Analysis
Class Methods:
uniqueCount -- Returns the count of unique values in a list.
"""
def uniqueCount(self, values):
"""Return the amount of unique values in the values list.
Keyword arguments:
values -- A list of values.
"""
valSet = set()
for vals in values:
valSet.add(vals)
return len(valSet)
def __init__(self, values):
try:
self.mode = mode(values)
except StatisticsError:
self.mode = 'N/A'
self.unique = self.uniqueCount(values)
class EmailAnalyser(Analyser):
"""Run email analysis, currently only using Analyser super class methods.
Keyword arguments:
Analyser -- An analyser object.
"""
def __init__(self, values):
super().__init__(values)
# TODO Something actually useful for emails.
class NumericalAnalyser(Analyser):
"""Runs numeric analysis.
Keyword arguments:
Analyser -- An analyser object.
"""
def __init__(self, values, stdDevs):
new_values = []
isNumeric = True
for i in values:
if i != '':
try:
if "." in i:
new_values.append(float(i))
else:
new_values.append(int(i))
except ValueError:
#assuming error cells are not passed to here
isNumeric = False
try:
print("Can't convert: ",i)
except UnicodeEncodeError:
# Character not recognised in python
pass
values = [i for i in new_values]
super().__init__(values)
if isNumeric:
self.stDevOutliers = []
#standardDeviations = Decimal(stdDevs)
length = len(values)
if values:
self.min = values[0]
self.max = values[1]
self.mean = 0
for x in values:
if x < self.min:
self.min = x
if x > self.max:
self.max = x
self.mean += x
self.mean = self.mean /length
self.stdev = 0
for x in values:
self.stdev += pow(x-self.mean, 2)
self.stdev = pow(self.stdev/length, 1/2)
values.sort()
median_index = (length+1)/2 - 1
qlow_index = (length+1)/4 - 1
qup_index = 3*(length+1)/4 - 1
if median_index % 1 == 0:
self.median = values[int(median_index)]
else:
self.median = (values[floor(median_index)]+values[ceil(median_index)])/2
if qlow_index % 1 == 0:
self.quartile_low = values[int(qlow_index)]
self.quartile_up = values[int(qup_index)]
else:
self.quartile_low = (values[floor(qlow_index)] + values[ceil(qlow_index)]) / 2
self.quartile_up = (values[floor(qup_index)] + values[ceil(qup_index)]) / 2
IQR = self.quartile_up - self.quartile_low
outlier_count = 0
for x, value in enumerate(values):
if value < (self.quartile_low - stdDevs/2 * IQR) or value > (self.quartile_up + stdDevs/2 * IQR):
self.stDevOutliers.append("Row: %d Value: %s" % (x, value))
outlier_count += 1
#if outlier_count > max_Outliers:
#self.stDevOutliers = "%d outliers" % outlier_count
self.max = self.round_significant(self.max)
self.min = self.round_significant(self.min)
self.mean = self.round_significant(self.mean)
self.quartile_low = self.round_significant(self.quartile_low)
self.quartile_up = self.round_significant(self.quartile_up)
self.median = self.round_significant(self.median)
self.stdev = self.round_significant(self.stdev)
else:
print("WARNING: Type error, cannot convert column to numerical value")
self.min = 'N/A'
self.max = 'N/A'
self.mean = 'N/A'
self.quartile_low = 'N/A'
self.median = 'N/A'
self.quartile_up = 'N/A'
self.stdev = 'N/A'
self.normDist = 'N/A'
self.stDevOutliers = 'N/A'
@staticmethod
def round_significant(x):
# Rounds to 6 significant figures
if isinstance(x, int) and abs(x) < 1000000 and abs(x) > 0.000001 or x == 0:
return x
if abs(x) >= 1000000:
return NumericalAnalyser.int_to_sci(x)
return float('%.6g' % x)
@staticmethod
def int_to_sci(value):
"""Converts numbers into a string in scientific notation form
Keyword arguments:
value -- The value to be converted to scientific notation.
"""
if value == 0:
return "0E+0"
power = floor(log10(abs(value)))
base = round(value / pow(10, power), 5)
if power > 0:
return str(base) + "e+" + str(power)
else:
return str(base) + "e-" + str(power)
@staticmethod
def is_compatable(values):
bad_values = 0
for i in values:
if i != '':
try:
if "." in i:
float(i)
else:
int(i)
except:
bad_values += 1
if bad_values / len(values) >= threshold:
return False
return True
class CurrencyAnalyser(NumericalAnalyser):
"""Run currency analysis, using NumericalAnalyser as a super class. Removes
currency symbols in values.
Keyword arguments:
NumericalAnalyser -- A NumericalAnalyser object.
"""
def __init__(self, values, stdDevs):
temp_values = [i for i in values]
for x, value in enumerate(temp_values):
temp_values[x] = re.sub('(\$)|(€)|(£)', '', value)
temp_values[x] = temp_values[x].replace('(','-')#negatives
temp_values[x] = temp_values[x].replace(')','')
temp_values[x] = temp_values[x].replace(',','') #long numbers
super().__init__(temp_values, stdDevs)
@staticmethod
def is_compatable(values):
temp_values = [i for i in values]
for x, value in enumerate(temp_values):
temp_values[x] = re.sub('(\$)|(€)|(£)', '', value)
temp_values[x] = temp_values[x].replace('(','-')#negatives
temp_values[x] = temp_values[x].replace(')','')
temp_values[x] = temp_values[x].replace(',','') #long numbers
return super(CurrencyAnalyser, CurrencyAnalyser).is_compatable(temp_values)
class StringAnalyser(Analyser):
"""Run string analysis, currently only using Analyser super class methods.
Keyword arguments:
Analyser -- An analyser object.
"""
def __init__(self, values):
super().__init__(values)
# TODO Implement some string exclusive statistics.
class IdentifierAnalyser(Analyser):
"""Run identifier analysis, currently only using Analyser super class methods.
Keyword arguments:
Analyser -- An analyser object.
"""
def __init__(self, values):
super().__init__(values)
# TODO Implement some identifier exclusive statistics.
class EnumAnalyser(Analyser):
"""Run enumerated analysis, currently only using Analyser super class methods.
Keyword arguments:
Analyser -- An analyser object.
"""
def __init__(self, values):
super().__init__(values)
# TODO Implement some enum exclusive statistics.
class BooleanAnalyser(Analyser):
"""Run boolean analysis, currently only using Analyser super class methods.
Keyword arguments:
Analyser -- An analyser object.
"""
def __init__(self, values):
super().__init__(values)
class SciNotationAnalyser(Analyser):
"""Run scientific notation analysis.
Keyword arguments:
Analyser -- An analyser object.
Class Methods:
int_to_sci -- Converts a a given number into a string in scientific notation form.
"""
def __init__(self, values, stdDevs):
standardDeviations = stdDevs
new_values = []
isNumeric = True
for i in values:
if i != '':
try:
new_values.append(float(i))
except:
isNumeric = False
print("Can't convert: ", i)
values = [i for i in new_values]
super().__init__(values)
if isNumeric:
length = len(values)
self.stDevOutliers = []
if values:
self.min = values[0]
self.max = values[1]
self.mean = 0
for x in values:
if x < self.min:
self.min = x
if x > self.max:
self.max = x
self.mean += x
self.min = NumericalAnalyser.int_to_sci(self.min)
self.max = NumericalAnalyser.int_to_sci(self.max)
self.mean = self.mean /length
self.stdev = 0
for x in values:
self.stdev += pow(x-self.mean, 2)
self.stdev = pow(self.stdev/length, 1/2)
values.sort()
median_index = (length + 1) / 2 - 1
qlow_index = (length + 1) / 4 - 1
qup_index = 3 * (length + 1) / 4 - 1
if median_index % 1 == 0:
self.median = values[int(median_index)]
else:
self.median = (values[floor(median_index)] + values[ceil(median_index)]) / 2
if qlow_index % 1 == 0:
self.quartile_low = values[int(qlow_index)]
self.quartile_up = values[int(qup_index)]
else:
self.quartile_low = (values[floor(qlow_index)] + values[ceil(qlow_index)]) / 2
self.quartile_up = (values[floor(qup_index)] + values[ceil(qup_index)]) / 2
IQR = self.quartile_up - self.quartile_low
outlier_count = 0
for x, value in enumerate(values):
if value < (self.quartile_low - 1.5 * IQR) or value > (self.quartile_up + 1.5 * IQR):
self.stDevOutliers.append("Row: %d Value: %s" % (x, value))
outlier_count += 1
#if outlier_count > max_Outliers:
#self.stDevOutliers = "%d outliers" % outlier_count
self.mean = NumericalAnalyser.round_significant(self.mean)
self.quartile_low = NumericalAnalyser.round_significant(self.quartile_low)
self.quartile_up = NumericalAnalyser.round_significant(self.quartile_up)
self.median = NumericalAnalyser.round_significant(self.median)
self.stdev = NumericalAnalyser.round_significant(self.stdev)
if self.mode != 'N/A':
self.mode = NumericalAnalyser.int_to_sci(self.mode)
else:
print("WARNING Cannot convert to scientific notation")
self.min = 'N/A'
self.max = 'N/A'
self.mean = 'N/A'
self.quartile_low = 'N/A'
self.median = 'N/A'
self.quartile_up = 'N/A'
self.stdev = 'N/A'
self.normDist = 'N/A'
self.stDevOutliers = 'N/A'
@staticmethod
def is_compatable(values):
bad_values = 0
for i in values:
if i != '':
try:
float(i)
except:
bad_values += 1
if bad_values / len(values) >= threshold:
return False
return True
class DateAnalyser(Analyser):
"""Run date analysis, currently only using Analyser super class methods.
Keyword Arguments:
Analyser -- An analyser object.
"""
def __init__(self, values):
super().__init__(values)
DFcount = 0
MMcount = 0
JAcount = 0
SNcount = 0
for value in values:
if re_date.search(value):
if re_dateDF.search(value):
DFcount += 1
if re_dateMM.search(value):
MMcount += 1
if re_dateJA.search(value):
JAcount += 1
if re_dateSN.search(value):
SNcount += 1
self.dateDF = DFcount
self.dateMM = MMcount
self.dateJA = JAcount
self.dateSN = SNcount
class TimeAnalyser(Analyser):
"""Run time analysis, currently only using Analyser super class methods.
Keyword arguments:
Analyser -- An analyser object.
"""
def __init__(self, values):
hourcount = []
for x in range(0,24):
hourcount.append([])
hourcount[x].append(x)
hourcount[x].append(0)
super().__init__(values)
for value in values:
if re_time.search(value):
temp=int(re_timehr.search(value).group(0))
if re_timePM.search(value) and temp != 12:
temp += 12
elif re_timeAM.search(value) and temp == 12:
temp = 0
hourcount[temp][1]+= 1
hoursort= sorted(hourcount,key=lambda l:l[1], reverse=True)
self.hourCS = hoursort
class CharAnalyser(Analyser):
"""Run char analysis, currently only using Analyser super class methods.
Keyword arguments:
Analyser -- An analyser object.
"""
def __init__(self, values):
super().__init__(values)
class DayAnalyser(Analyser):
"""Run day analysis, currently only using Analyser super class methods.
Keyword arguments:
Analyser -- An analyser object.
"""
def __init__(self, values):
super().__init__(values)
class HyperAnalyser(Analyser):
"""Run hyperlink analysis, currently only using Analyser super class methods.
Keyword arguments:
Analyser -- An analyser object.
"""
def __init__(self, values):
super().__init__(values)
# TODO Implement some hyperlink unique stats, e.g. domain frequency.
class DatetimeAnalyser(Analyser):
"""Run datetime analysis, currenytly only using Analyser super class methods.
"""
def __init__(self, values):
super().__init__(values)
# TODO implement datetime unique stats
def normaltest(values):
"""Normality test of values based on Jarque-Bera Test"""
"""xbar = mean(values)
n = len(values)
s_top = sum([pow(x-xbar, 3) for x in values])/n
std = sum([pow(x-xbar,2) for x in values])/n
s_bot = pow(std, 3/2)
S = s_top/s_bot
c_top = sum([pow(x-xbar, 4) for x in values])/n
c_bot = pow(std , 2)
C = c_top/c_bot
JB = (n /6) * (pow(S,2) + 0.25 * pow(C-3, 2))
print("JB: ", JB)""" | {
"repo_name": "lilfolr/CITS4406-Assignment2",
"path": "analyser.py",
"copies": "1",
"size": "18929",
"license": "mit",
"hash": -2797921422660496400,
"line_mean": 34.6384180791,
"line_max": 310,
"alpha_frac": 0.5282460498,
"autogenerated": false,
"ratio": 3.9234916027368856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49517376525368856,
"avg_score": null,
"num_lines": null
} |
"""Analyser of Assembly code that recognizes standard macros of the
compiler."""
import logging
import collections
from . import instructions
Push = collections.namedtuple('Push', ['arg', 'tmp'])
Pop = collections.namedtuple('Pop', ['arg', 'tmp'])
class Analyzer:
def __init__(self, program):
self.program = program
def analyze(self):
self.pops = self.recognize_pops()
self.pushes = self.recognize_pushes()
def get_stack_top(self, state):
if state.registers[6] == 255:
return None
return state.data[state.registers[6]+1]
def recognize_pushes(self):
pushes = {}
for (i, inst) in enumerate(self.program):
try:
(r, (six, offset)) = inst.match('st', None, None)
if six != 6:
raise instructions.MatchError()
except instructions.MatchError:
continue
pushes[i] = Push(r.id, None)
return pushes
def recognize_pops(self):
pops = {}
for (i, inst) in enumerate(self.program):
try:
(r, (six, offset)) = inst.match('ld', None, None)
if six != 6:
raise instructions.MatchError()
except instructions.MatchError:
continue
pops[i] = Pop(r.id, None)
return pops
| {
"repo_name": "ProgVal/pydigmips",
"path": "pydigmips/assembly_analysis.py",
"copies": "1",
"size": "1383",
"license": "mit",
"hash": -5587019729106984000,
"line_mean": 26.66,
"line_max": 67,
"alpha_frac": 0.5444685466,
"autogenerated": false,
"ratio": 4.055718475073314,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5100187021673314,
"avg_score": null,
"num_lines": null
} |
"""Analyses datasets"""
from csv import reader
def parse_bus_stops(filename, suburb_filter=""):
"""Parses a csv file of bus stops
Returns a list of bus stops
"""
bus_stops = []
with open(filename, "rb") as bus_stop_file:
bus_csv_reader = reader(bus_stop_file)
header = bus_csv_reader.next()
# Each second line of the file is garbage
toggle = 0
for line in bus_csv_reader:
if toggle:
if suburb_filter != "":
if line[3] == suburb_filter:
bus_stops.append(BusStop(line[0], line[2], line[3], line[7], line[8]))
else:
bus_stops.append(BusStop(line[0], line[2], line[3], line[7], line[8]))
toggle = 0
else:
toggle = 1
return bus_stops
"""Finds the middle location of all stops in the list, used for centering the map on the points
Return a list of coordinates, [lat, long]
"""
def get_mid_location(bus_stops):
max_lat = 0
min_lat = 0
max_long = 0
min_long = 0
for stop in bus_stops:
# Find the lats
if max_lat == 0:
max_lat = stop.lat
else:
if max_lat < stop.lat:
max_lat = stop.lat
if min_lat == 0:
min_lat = stop.lat
else:
if min_lat > stop.lat:
min_lat = stop.lat
# Find the longs
if max_long == 0:
max_long = stop.long
else:
if max_long < stop.long:
max_long = stop.long
if min_long == 0:
min_long = stop.long
else:
if min_long > stop.long:
min_long = stop.long
mid_lat = ((max_lat - min_lat) / 2) + min_lat
mid_long = ((max_long - min_long) / 2) + min_long
return [mid_lat, mid_long]
"""Stores a bus stop"""
class BusStop:
def __init__(self, stopid, road, suburb, lat, long):
self.stopid = stopid
self.road = road
self.suburb = suburb
self.lat = float(lat)
self.long = float(long)
def __repr__(self):
return "{} - {}, {} - ({}, {})".format(self.stopid, self.road, self.suburb, self.long, self.lat)
def get_location(self):
"""Returns the location of the bus stop in a list [lat, long]"""
return [self.lat, self.long]
if __name__ == "__main__":
stops = parse_bus_stops("datasets/dataset_bus_stops.csv")
print(stops) | {
"repo_name": "r-portas/brisbane-bus-stops",
"path": "analyse.py",
"copies": "1",
"size": "2506",
"license": "mit",
"hash": -4975846715688513000,
"line_mean": 26.5494505495,
"line_max": 104,
"alpha_frac": 0.5139664804,
"autogenerated": false,
"ratio": 3.5246132208157523,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4538579701215752,
"avg_score": null,
"num_lines": null
} |
"""Analyse the polarity of cells using tensors."""
import os
import os.path
import argparse
import logging
import warnings
import PIL
import numpy as np
import skimage.draw
from jicbioimage.core.util.array import pretty_color_array
from jicbioimage.core.io import (
AutoName,
AutoWrite,
)
from jicbioimage.illustrate import AnnotatedImage
from jicbioimage.transform import max_intensity_projection
from utils import (
get_microscopy_collection,
get_wall_intensity_and_mask_images,
get_marker_intensity_images,
marker_cell_identifier,
)
from segment import (
cell_segmentation,
marker_segmentation,
)
from tensor import get_tensors
from annotate import (
annotate_segmentation,
annotate_markers,
annotate_tensors,
make_transparent,
)
# Suppress spurious scikit-image warnings.
warnings.filterwarnings("ignore", module="skimage.morphology.misc")
AutoName.prefix_format = "{:03d}_"
def analyse(microscopy_collection, wall_channel, marker_channel, threshold, max_cell_size):
"""Do the analysis."""
# Prepare the input data for the segmentations.
(wall_intensity2D,
wall_intensity3D,
wall_mask2D,
wall_mask3D) = get_wall_intensity_and_mask_images(microscopy_collection, wall_channel)
(marker_intensity2D,
marker_intensity3D) = get_marker_intensity_images(microscopy_collection, marker_channel)
# Perform the segmentation.
cells = cell_segmentation(wall_intensity2D, wall_mask2D, max_cell_size)
markers = marker_segmentation(marker_intensity3D, wall_mask3D, threshold)
# Get marker in cell wall and project to 2D.
wall_marker = marker_intensity3D * wall_mask3D
wall_marker = max_intensity_projection(wall_marker)
# Get tensors.
tensors = get_tensors(cells, markers)
# Write out tensors to a text file.
fpath = os.path.join(AutoName.directory, "raw_tensors.txt")
with open(fpath, "w") as fh:
tensors.write_raw_tensors(fh)
# Write out intensity images.
fpath = os.path.join(AutoName.directory, "wall_intensity.png")
with open(fpath, "wb") as fh:
fh.write(wall_intensity2D.png())
fpath = os.path.join(AutoName.directory, "marker_intensity.png")
with open(fpath, "wb") as fh:
fh.write(wall_marker.png())
# Shrink the segments to make them clearer.
for i in cells.identifiers:
region = cells.region_by_identifier(i)
mask = region - region.inner.inner
cells[mask] = 0
colorful = pretty_color_array(cells)
pil_im = PIL.Image.fromarray(colorful.view(dtype=np.uint8))
pil_im = make_transparent(pil_im, 60)
fpath = os.path.join(AutoName.directory, "segmentation.png")
pil_im.save(fpath)
def main():
"""Run the analysis on an individual image."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("input_file", help="Path to input tiff file")
parser.add_argument("output_dir", help="Output directory")
parser.add_argument("-w", "--wall-channel",
default=1, type=int,
help="Wall channel (zero indexed)")
parser.add_argument("-m", "--marker-channel",
default=0, type=int,
help="Marker channel (zero indexed)")
parser.add_argument("-t", "--threshold",
default=45, type=int,
help="Marker threshold")
parser.add_argument("-s", "--max-cell-size",
default=10000, type=int,
help="Maximum cell size (pixels)")
parser.add_argument("--debug",
default=False, action="store_true")
args = parser.parse_args()
if not os.path.isfile(args.input_file):
parser.error("No such file: {}".format(args.input_file))
if not os.path.isdir(args.output_dir):
os.mkdir(args.output_dir)
AutoName.directory = args.output_dir
AutoWrite.on = args.debug
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)
logging.info("Input file: {}".format(args.input_file))
logging.info("Wall channel: {}".format(args.wall_channel))
logging.info("Marker channel: {}".format(args.marker_channel))
logging.info("Marker threshold: {}".format(args.threshold))
logging.info("Max cell size: {}".format(args.max_cell_size))
microscopy_collection = get_microscopy_collection(args.input_file)
analyse(microscopy_collection,
wall_channel=args.wall_channel,
marker_channel=args.marker_channel,
threshold=args.threshold,
max_cell_size=args.max_cell_size)
if __name__ == "__main__":
main()
| {
"repo_name": "JIC-Image-Analysis/leaf-cell-polarisation-tensors",
"path": "scripts/automated_analysis.py",
"copies": "1",
"size": "4679",
"license": "mit",
"hash": -2390279877104362500,
"line_mean": 32.6618705036,
"line_max": 93,
"alpha_frac": 0.6633896132,
"autogenerated": false,
"ratio": 3.6813532651455545,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.98416731265189,
"avg_score": 0.0006139503653308679,
"num_lines": 139
} |
"""Analyse the polarity of cells using tensors.
This script makes use of a Gaussian projection as a pre-processing step prior
to segmentation of the cell wall and marker channels.
"""
import os
import os.path
import argparse
import logging
import PIL
import numpy as np
import skimage.feature
from jicbioimage.core.util.array import pretty_color_array
from jicbioimage.core.transform import transformation
from jicbioimage.core.io import (
AutoName,
AutoWrite,
)
from jicbioimage.transform import (
invert,
dilate_binary,
remove_small_objects,
)
from jicbioimage.segment import connected_components, watershed_with_seeds
from utils import (
get_microscopy_collection,
threshold_abs,
identity,
remove_large_segments,
)
from tensor import get_tensors
from annotate import make_transparent
from gaussproj import (
generate_surface_from_stack,
projection_from_stack_and_surface,
)
AutoName.prefix_format = "{:03d}_"
@transformation
def threshold_adaptive_median(image, block_size):
return skimage.filters.threshold_adaptive(image, block_size=block_size)
@transformation
def marker_in_wall(marker, wall):
return marker * wall
def segment_cells(image, max_cell_size):
"""Return segmented cells."""
image = identity(image)
wall = threshold_adaptive_median(image, block_size=101)
seeds = remove_small_objects(wall, min_size=100)
seeds = dilate_binary(seeds)
seeds = invert(seeds)
seeds = remove_small_objects(seeds, min_size=5)
seeds = connected_components(seeds, background=0)
segmentation = watershed_with_seeds(-image, seeds=seeds)
segmentation = remove_large_segments(segmentation, max_cell_size)
return segmentation, wall
def segment_markers(image, wall, threshold):
"""Return segmented markers."""
image = threshold_abs(image, threshold)
image = marker_in_wall(image, wall)
image = remove_small_objects(image, min_size=10)
segmentation = connected_components(image, background=0)
return segmentation
def analyse(microscopy_collection, wall_channel, marker_channel,
threshold, max_cell_size):
"""Do the analysis."""
# Prepare the input data for the segmentations.
cell_wall_stack = microscopy_collection.zstack_array(c=wall_channel)
marker_stack = microscopy_collection.zstack_array(c=marker_channel)
surface = generate_surface_from_stack(cell_wall_stack)
cell_wall_projection = projection_from_stack_and_surface(cell_wall_stack,
surface, 1, 9)
marker_projection = projection_from_stack_and_surface(marker_stack,
surface, 1, 9)
# Perform the segmentation.
cells, wall = segment_cells(cell_wall_projection, max_cell_size)
markers = segment_markers(marker_projection, wall, threshold)
# Get tensors.
tensors = get_tensors(cells, markers)
# Write out tensors to a text file.
fpath = os.path.join(AutoName.directory, "raw_tensors.txt")
with open(fpath, "w") as fh:
tensors.write_raw_tensors(fh)
# Write out intensity images.
fpath = os.path.join(AutoName.directory, "wall_intensity.png")
with open(fpath, "wb") as fh:
fh.write(cell_wall_projection.png())
fpath = os.path.join(AutoName.directory, "marker_intensity.png")
marker_im = marker_in_wall(marker_projection, wall)
with open(fpath, "wb") as fh:
fh.write(marker_im.png())
# Shrink the segments to make them clearer.
for i in cells.identifiers:
region = cells.region_by_identifier(i)
mask = region - region.inner.inner
cells[mask] = 0
colorful = pretty_color_array(cells)
pil_im = PIL.Image.fromarray(colorful.view(dtype=np.uint8))
pil_im = make_transparent(pil_im, 60)
fpath = os.path.join(AutoName.directory, "segmentation.png")
pil_im.save(fpath)
def main():
"""Run the analysis on an individual image."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("input_file", help="Path to input tiff file")
parser.add_argument("output_dir", help="Output directory")
parser.add_argument("-w", "--wall-channel",
default=1, type=int,
help="Wall channel (zero indexed)")
parser.add_argument("-m", "--marker-channel",
default=0, type=int,
help="Marker channel (zero indexed)")
parser.add_argument("-t", "--threshold",
default=60, type=int,
help="Marker threshold")
parser.add_argument("-s", "--max-cell-size",
default=10000, type=int,
help="Maximum cell size (pixels)")
parser.add_argument("--debug",
default=False, action="store_true")
args = parser.parse_args()
if not os.path.isfile(args.input_file):
parser.error("No such file: {}".format(args.input_file))
if not os.path.isdir(args.output_dir):
os.mkdir(args.output_dir)
AutoName.directory = args.output_dir
AutoWrite.on = args.debug
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)
logging.info("Input file: {}".format(args.input_file))
logging.info("Wall channel: {}".format(args.wall_channel))
logging.info("Marker channel: {}".format(args.marker_channel))
logging.info("Marker threshold: {}".format(args.threshold))
logging.info("Max cell size: {}".format(args.max_cell_size))
microscopy_collection = get_microscopy_collection(args.input_file)
analyse(microscopy_collection,
wall_channel=args.wall_channel,
marker_channel=args.marker_channel,
threshold=args.threshold,
max_cell_size=args.max_cell_size)
if __name__ == "__main__":
main()
| {
"repo_name": "JIC-Image-Analysis/leaf-cell-polarisation-tensors",
"path": "scripts/automated_gaussproj_analysis.py",
"copies": "1",
"size": "5901",
"license": "mit",
"hash": -3817863702827571000,
"line_mean": 33.3081395349,
"line_max": 77,
"alpha_frac": 0.6609049314,
"autogenerated": false,
"ratio": 3.7609942638623326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49218991952623325,
"avg_score": null,
"num_lines": null
} |
"""Analyse top maven popular projects."""
import bs4
from collections import OrderedDict
import os
import re
import requests
import tempfile
from selinon import StoragePool
from shutil import rmtree
from .base import AnalysesBaseHandler
from f8a_worker.utils import cwd, TimedCommand
from f8a_worker.errors import TaskError
class MavenPopularAnalyses(AnalysesBaseHandler):
"""Analyse top maven popular projects."""
_BASE_URL = 'http://mvnrepository.com'
_MAX_PAGES = 10
def __init__(self, *args, **kwargs):
"""Create and instance of this analyse handler."""
super().__init__(*args, **kwargs)
self.projects = OrderedDict()
self.nprojects = 0
@staticmethod
def _find_versions(project_page, latest_version_only=False):
def _has_numeric_usages(tag):
return tag.has_attr('href') and \
tag.get('href').endswith('/usages') and \
tag.get_text().replace(',', '').isnumeric()
usage_tags = project_page.find_all(_has_numeric_usages)
if usage_tags and not latest_version_only:
# sort according to usage
usage_tags = sorted(usage_tags, key=lambda u: int(u.text.replace(',', '')),
reverse=True)
# [<a href="jboss-logging-log4j/2.0.5.GA/usages">64</a>]
versions = [v.get('href').split('/')[-2] for v in usage_tags]
else: # no usage stats, get the versions other way
versions = project_page.find_all('a', class_=re.compile('vbtn *'))
# [<a class="vbtn release" href="common-angularjs/3.8">3.8</a>]
if latest_version_only:
# take the first one (always the latest version)
versions = versions[:1]
else:
versions = sorted(versions, key=lambda v: v.text, reverse=True)
versions = [v.text for v in versions]
return versions
def _projects_from(self, url_suffix):
"""Scrape the selected page @ http://mvnrepository.com.
:param url_suffix: to add to _BASE_URL
:return: 2-tuple of (dict of {project_name: [versions]}, number of found projects)
"""
if not url_suffix.startswith('/'):
url_suffix = '/' + url_suffix
for page in range(1, self._MAX_PAGES + 1):
page_link = '{base_url}{url_suffix}?p={page}'.format(base_url=self._BASE_URL,
url_suffix=url_suffix,
page=page)
pop = requests.get(page_link)
poppage = bs4.BeautifulSoup(pop.text, 'html.parser')
for link in poppage.find_all('a', class_='im-usage'):
# <a class="im-usage" href="/artifact/junit/junit/usages"><b>56,752</b> usages</a>
artifact = link.get('href')[0:-len('/usages')]
artifact_link = '{url}{a}'.format(url=self._BASE_URL, a=artifact)
art = requests.get(artifact_link)
artpage = bs4.BeautifulSoup(art.text, 'html.parser')
name = artifact[len('/artifact/'):].replace('/', ':')
all_versions = self._find_versions(artpage, self.nversions == 1)
if name not in self.projects and all_versions:
versions = all_versions[:self.nversions]
self.log.debug("Scheduling #%d. (number versions: %d)",
self.nprojects, self.nversions)
self.projects[name] = versions
self.nprojects += 1
for version in versions:
# TODO: this can be unrolled
if self.count.min <= self.nprojects <= self.count.max:
self.analyses_selinon_flow(name, version)
else:
self.log.debug("Skipping scheduling for #%d. (min=%d, max=%d, "
"name=%s, version=%s)",
self.nprojects, self.count.min, self.count.max, name,
version)
if self.nprojects >= self.count.max:
return
def _top_projects(self):
"""Scrape Top Projects page @ http://mvnrepository.com/popular."""
self.log.debug('Scraping Top Projects page http://mvnrepository.com/popular')
self._projects_from('/popular')
def _top_categories_projects(self):
"""Scrape Top Categories page @ http://mvnrepository.com/open-source."""
for page in range(1, self._MAX_PAGES + 1):
page_link = '{url}/open-source?p={page}'.format(url=self._BASE_URL, page=page)
self.log.debug('Scraping Top Categories page %s' % page_link)
cat = requests.get(page_link)
catpage = bs4.BeautifulSoup(cat.text, 'html.parser')
# [<a href="/open-source/testing-frameworks">more...</a>]
for link in catpage.find_all('a', text='more...'):
category = link.get('href')
self._projects_from(category)
if self.nprojects >= self.count.max:
return
def _top_tags_projects(self):
"""Scrape Popular Tags page @ http://mvnrepository.com/tags."""
page_link = '{url}/tags'.format(url=self._BASE_URL)
self.log.debug('Scraping Popular Tags page %s' % page_link)
tags_page = requests.get(page_link)
tagspage = bs4.BeautifulSoup(tags_page.text, 'html.parser')
tags_a = tagspage.find_all('a', class_=re.compile('t[1-9]'))
# [<a class="t4" href="/tags/accumulo">accumulo</a>,
# <a class="t7" href="/tags/actor">actor</a>]
tags_a = sorted(tags_a, key=lambda x: x.get('class'), reverse=True)
for link in tags_a:
tag = link.get('href')
self._projects_from(tag)
if self.nprojects >= self.count.max:
return
def _use_maven_index_checker(self):
maven_index_checker_dir = os.getenv('MAVEN_INDEX_CHECKER_PATH')
maven_index_checker_data_dir = os.environ.get('MAVEN_INDEX_CHECKER_DATA_PATH',
'/tmp/index-checker')
os.makedirs(maven_index_checker_data_dir, exist_ok=True)
central_index_dir = os.path.join(maven_index_checker_data_dir, 'central-index')
timestamp_path = os.path.join(central_index_dir, 'timestamp')
s3 = StoragePool.get_connected_storage('S3MavenIndex')
self.log.info('Fetching pre-built maven index from S3, if available.')
s3.retrieve_index_if_exists(maven_index_checker_data_dir)
old_timestamp = 0
try:
old_timestamp = int(os.stat(timestamp_path).st_mtime)
except OSError:
self.log.info('Timestamp is missing, we will probably need to build the index '
'from scratch.')
pass
java_temp_dir = tempfile.mkdtemp(prefix='tmp-', dir=os.environ.get('PV_DIR', '/tmp'))
index_range = '{}-{}'.format(self.count.min, self.count.max)
command = ['java', '-Xmx768m',
'-Djava.io.tmpdir={}'.format(java_temp_dir),
'-DcentralIndexDir={}'.format(central_index_dir),
'-jar', 'maven-index-checker.jar', '-r', index_range]
if self.nversions == 1:
command.append('-l')
with cwd(maven_index_checker_dir):
try:
output = TimedCommand.get_command_output(command, is_json=True, graceful=False,
timeout=1200)
new_timestamp = int(os.stat(timestamp_path).st_mtime)
if old_timestamp != new_timestamp:
self.log.info('Storing pre-built maven index to S3...')
s3.store_index(maven_index_checker_data_dir)
self.log.debug('Stored. Index in S3 is up-to-date.')
else:
self.log.info('Index in S3 is up-to-date.')
except TaskError as e:
self.log.exception(e)
raise
finally:
rmtree(central_index_dir)
self.log.debug('central-index/ deleted')
rmtree(java_temp_dir)
s3data = StoragePool.get_connected_storage('S3Data')
bucket = s3data._s3.Bucket(s3data.bucket_name)
for idx, release in enumerate(output):
name = '{}:{}'.format(release['groupId'], release['artifactId'])
version = release['version']
# For now (can change in future) we want to analyze only ONE version of each package
try:
next(iter(bucket.objects.filter(Prefix='{e}/{p}/'.format(
e=self.ecosystem, p=name)).limit(1)))
self.log.info("Analysis of some version of %s has already been scheduled, "
"skipping version %s", name, version)
continue
except StopIteration:
self.log.info("Scheduling #%d.", self.count.min + idx)
self.analyses_selinon_flow(name, version)
def do_execute(self, popular=True):
"""Run core analyse on maven projects.
:param popular: boolean, sort index by popularity
"""
if popular:
self._top_projects()
if self.nprojects < self.count.max:
# There's only 100 projects on Top Projects page, look at top categories
self._top_categories_projects()
if self.nprojects < self.count.max:
# Still not enough ? Ok, let's try popular tags
self._top_tags_projects()
if self.nprojects < self.count.max:
self.log.warning("No more sources of popular projects. "
"%d will be scheduled instead of requested %d" % (self.nprojects,
self.count.max))
else:
self._use_maven_index_checker()
| {
"repo_name": "fabric8-analytics/fabric8-analytics-jobs",
"path": "f8a_jobs/handlers/maven_popular_analyses.py",
"copies": "1",
"size": "10306",
"license": "apache-2.0",
"hash": -9137589073530232000,
"line_mean": 46.9348837209,
"line_max": 100,
"alpha_frac": 0.5356103241,
"autogenerated": false,
"ratio": 4.035238841033673,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5070849165133673,
"avg_score": null,
"num_lines": null
} |
"""Analyse top npm popular packages."""
import bs4
import requests
from .base import AnalysesBaseHandler
try:
import xmlrpclib
except ImportError:
import xmlrpc.client as xmlrpclib
class PythonPopularAnalyses(AnalysesBaseHandler):
"""Analyse top npm popular packages."""
_URL = 'http://pypi-ranking.info'
_PACKAGES_PER_PAGE = 50
@staticmethod
def _parse_version_stats(html_version_stats, sort_by_popularity=True):
"""Parse version statistics from HTML definition.
Parse version statistics from HTML definition and return ordered
versions based on downloads
:param html_version_stats: tr-like representation of version statistics
:param sort_by_popularity: whether or not to return versions sorted by popularity
:return: sorted versions based on downloads
"""
result = []
for version_definition in html_version_stats:
# Access nested td
version_name = version_definition.text.split('\n')[1]
version_downloads = version_definition.text.split('\n')[4]
# There are numbers with comma, get rid of it
result.append((version_name, int(version_downloads.replace(',', ''))))
if sort_by_popularity:
return sorted(result, key=lambda x: x[1], reverse=True)
return result
def _use_pypi_xml_rpc(self):
"""Schedule analyses of packages based on PyPI index using XML-RPC.
https://wiki.python.org/moin/PyPIXmlRpc
"""
client = xmlrpclib.ServerProxy('https://pypi.python.org/pypi')
# get a list of package names
packages = sorted(client.list_packages())
for idx, package in enumerate(packages[self.count.min:self.count.max]):
releases = client.package_releases(package, True) # True for show_hidden arg
self.log.debug("Scheduling #%d. (number versions: %d)",
self.count.min + idx, self.nversions)
for version in releases[:self.nversions]:
self.analyses_selinon_flow(package, version)
def _use_pypi_ranking(self):
"""Schedule analyses of packages based on PyPI ranking."""
to_schedule_count = self.count.max - self.count.min
packages_count = 0
page = int((self.count.min / self._PACKAGES_PER_PAGE) + 1)
page_offset = self.count.min % self._PACKAGES_PER_PAGE
while True:
pop = requests.get('{url}/alltime?page={page}'.format(url=self._URL, page=page))
pop.raise_for_status()
poppage = bs4.BeautifulSoup(pop.text, 'html.parser')
page += 1
for package_name in poppage.find_all('span', class_='list_title'):
if page_offset:
page_offset -= 1
continue
packages_count += 1
if packages_count > to_schedule_count:
return
pop = requests.get('{url}/module/{pkg}'.format(url=self._URL,
pkg=package_name.text))
poppage = bs4.BeautifulSoup(pop.text, 'html.parser')
table = poppage.find('table', id='release_list')
if table is None:
self.log.warning('No releases in %s', pop.url)
continue
versions = self._parse_version_stats(table.find_all('tr'),
sort_by_popularity=self.nversions > 1)
self.log.debug("Scheduling #%d. (number versions: %d)",
self.count.min + packages_count, self.nversions)
for version in versions[:self.nversions]:
self.analyses_selinon_flow(package_name.text, version[0])
def do_execute(self, popular=True):
"""Run core analyse on Python packages.
:param popular: boolean, sort index by popularity
"""
if popular:
self._use_pypi_ranking()
else:
self._use_pypi_xml_rpc()
| {
"repo_name": "fabric8-analytics/fabric8-analytics-jobs",
"path": "f8a_jobs/handlers/python_popular_analyses.py",
"copies": "1",
"size": "4111",
"license": "apache-2.0",
"hash": 8720407663141008000,
"line_mean": 38.5288461538,
"line_max": 92,
"alpha_frac": 0.5789345658,
"autogenerated": false,
"ratio": 4.27783558792924,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.535677015372924,
"avg_score": null,
"num_lines": null
} |
"""Analysis and modification of structural data exported from GeoModeller
All structural data from an entire GeoModeller project can be exported into ASCII
files using the function in the GUI:
Export -> 3D Structural Data
This method generates files for defined geological parameters:
"Points" (i.e. formation contact points) and
"Foliations" (i.e. orientations/ potential field gradients).
Exported parameters include all those defined in sections as well as 3D data points.
This package contains methods to check, visualise, and extract/modify parts of these
exported data sets, for example to import them into a different Geomodeller project.
"""
# import os, sys
import numpy as np
import numpy.random
import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
class Struct3DPoints():
"""Class container for 3D structural points data sets"""
def __init__(self, **kwds):
"""Structural points data set
**Optional keywords**:
- *filename* = string : filename of csv file with exported points to load
"""
# store point information in purpose defined numpy record
self.ptype = np.dtype([('x', np.float32),
('y', np.float32),
('z', np.float32),
('formation', np.str_, 32)])
if kwds.has_key("filename"):
self.filename = kwds['filename']
# read data
self.parse()
self.get_formation_names()
self.get_range()
def parse(self):
"""Parse filename and load data into numpy record
The point information is stored in a purpose defined numpy record
self.points
"""
f = open(self.filename, "r")
lines = f.readlines()
self.header = lines[0]
# determine position of elements in header (for extension to foliations, etc.)
h_elem = np.array(self.header.rstrip().split(','))
x_id = np.where(h_elem == 'X')[0]
y_id = np.where(h_elem == 'Y')[0]
z_id = np.where(h_elem == 'Z')[0]
form_id = np.where(h_elem == 'formation')[0]
# print x_id
# create numpy array for points
self.len = (len(lines)-1)
self.points = np.ndarray(self.len, dtype = self.ptype)
for i,line in enumerate(lines[1:]):
l = line.rstrip().split(',')
self.points[i]['x'] = float(l[x_id])
self.points[i]['y'] = float(l[y_id])
self.points[i]['z'] = float(l[z_id])
self.points[i]['formation'] = l[form_id]
def get_formation_names(self):
"""Get names of all formations that have a point in this data set
and store in:
self.formation_names
"""
# self.formation_names = np.unique(self.formations)
self.formation_names = np.unique(self.points[:]['formation'])
def get_range(self):
"""Update min, max for all coordinate axes and store in
self.xmin, self.xmax, ..."""
self.xmin = np.min(self.points['x'])
self.ymin = np.min(self.points['y'])
self.zmin = np.min(self.points['z'])
self.xmax = np.max(self.points['x'])
self.ymax = np.max(self.points['y'])
self.zmax = np.max(self.points['z'])
def create_formation_subset(self, formation_names):
"""Create a subset (as another Struct3DPoints object) with specified formations only
**Arguments**:
- *formation_names* : list of formation names
**Returns**:
Struct3DPoints object with subset of points
"""
# create new object
# reference to own class type for consistency with Struct3DFoliations
pts_subset = self.__class__()
# determine ids for all points of these formations:
ids = np.ndarray((self.len), dtype='bool')
ids[:] = False
if type(formation_names) == list:
for formation in formation_names:
ids[self.points['formation'] == formation] = True
else:
ids[self.points['formation'] == formation_names] = True
# new length is identical to sum of ids bool array (all True elements)
pts_subset.len = np.sum(ids)
# extract points
pts_subset.points = self.points[ids]
# update range
pts_subset.get_range()
# update formation names
pts_subset.get_formation_names()
# get header from original
pts_subset.header = self.header
return pts_subset
def remove_formations(self, formation_names):
"""Remove points for specified formations from the point set
This function can be useful, for example, to remove one formation, perform
a thinning operation, and then add it back in with the `combine_with` function.
**Arguments**:
- *formation_names* = list of formations to be removed (or a single string to
remove only one formation)
"""
# Note: implementation is very similar to create_formation_subset, only inverse
# and changes in original point set!
# determine ids for all points of these formations:
ids = np.ndarray((self.len), dtype='bool')
ids[:] = True
if type(formation_names) == list:
for formation in formation_names:
ids[self.points['formation'] == formation] = False
else:
ids[self.points['formation'] == formation_names] = False
self.len = np.sum(ids)
# extract points
self.points = self.points[ids]
# update range
self.get_range()
# update formation names
self.get_formation_names()
def rename_formations(self, rename_dict):
"""Rename formation according to assignments in dictionary
Mapping in dictionary is of the form:
old_name_1 : new_name_1, old_name_2 : new_name_2, ...
"""
for k,v in rename_dict.items():
print("Change name from %s to %s" % (k,v))
for p in self.points:
if p['formation'] == k: p['formation'] = v
# update formation names
self.get_formation_names()
def extract_range(self, **kwds):
"""Extract subset for defined ranges
Pass ranges as keywords: from_x, to_x, from_y, to_y, from_z, to_z
All not defined ranges are simply kept as before
**Returns**:
pts_subset : Struct3DPoints data subset
"""
from_x = kwds.get("from_x", self.xmin)
from_y = kwds.get("from_y", self.ymin)
from_z = kwds.get("from_z", self.zmin)
to_x = kwds.get("to_x", self.xmax)
to_y = kwds.get("to_y", self.ymax)
to_z = kwds.get("to_z", self.zmax)
# create new object
# pts_subset = Struct3DPoints()
pts_subset = self.__class__()
# determine ids for points in range
ids = np.ndarray((self.len), dtype='bool')
ids[:] = False
ids[(self.points['x'] >= from_x) *
(self.points['y'] >= from_y) *
(self.points['z'] >= from_z) *
(self.points['x'] <= to_x) *
(self.points['y'] <= to_y) *
(self.points['z'] <= to_z)] = True
# new length is identical to sum of ids bool array (all True elements)
pts_subset.len = np.sum(ids)
# extract points
pts_subset.points = self.points[ids]
# update range
pts_subset.get_range()
# update formation names
pts_subset.get_formation_names()
# get header from original
pts_subset.header = self.header
return pts_subset
def thin(self, nx, ny, nz, **kwds):
"""Thin data for one formations on grid with defined number of cells and store as subset
**Arguments**:
- *nx*, *ny*, *nz* = int : number of cells in each direction for thinning grid
The thinning is performed on a raster and not 'formation-aware',
following this simple procedure:
(1) Iterate through grid
(2) If multiple points for formation in this cell: thin
(3a) If thin: Select one point in cell at random and keep this one!
(3b) else: if one point in raneg, keep it!
Note: Thinning is performed for all formations, so make sure to create a subset
for a single formation first!
**Returns**:
pts_subset = Struct3DPoints : subset with thinned data for formation
"""
# DEVNOTE: This would be an awesome function to parallelise! Should be quite simple!
# first step: generate subset
# pts_subset = self.create_formation_subset([formation])
# create new pointset:
# reference to own class type for consistency with Struct3DFoliations
pts_subset = self.__class__()
# determine cell boundaries of subset for thinning:
delx = np.ones(nx) * (self.xmax - self.xmin) / nx
bound_x = self.xmin + np.cumsum(delx)
dely = np.ones(ny) * (self.ymax - self.ymin) / ny
bound_y = self.ymin + np.cumsum(dely)
delz = np.ones(nz) * (self.zmax - self.zmin) / nz
bound_z = self.zmin + np.cumsum(delz)
ids_to_keep = []
for i in range(nx-1):
for j in range(ny-1):
for k in range(nz-1):
# determin number of points in this cell
ids = np.ndarray((self.len), dtype='bool')
ids[:] = False
ids[(self.points['x'] > bound_x[i]) *
(self.points['y'] > bound_y[j]) *
(self.points['z'] > bound_z[k]) *
(self.points['x'] < bound_x[i+1]) *
(self.points['y'] < bound_y[j+1]) *
(self.points['z'] < bound_z[k+1])] = True
if np.sum(ids) > 1:
# Thinning required!
# keep random point
ids_to_keep.append(numpy.random.choice(np.where(ids)[0]))
# pts_subset.points[nx * ny * i + ny * j + k] = self.points[id_to_keep]
# assign to new pointset:
elif np.sum(ids) == 1:
# keep the one point, of course!
# pts_subset.points[nx * ny * i + ny * j + k] = self.points[ids[0]]
ids_to_keep.append(ids[0])
# now get points for all those ids:
# extract points
pts_subset.points = self.points[np.array(ids_to_keep)]
# update range
pts_subset.get_range()
# update length
pts_subset.len = len(pts_subset.points)
# update formation names
pts_subset.get_formation_names()
# get header from original
pts_subset.header = self.header
return pts_subset
def combine_with(self, pts_set):
"""Combine this point set with another point set
**Arguments**:
- *pts_set* = Struct3DPoints : points set to combine
"""
self.points = np.concatenate((self.points, pts_set.points))
# update range and everything
self.get_range()
self.get_formation_names()
self.len = len(self.points)
def plot_plane(self, plane=('x','y'), **kwds):
"""Create 2-D plots for point distribution
**Arguments**:
- *plane* = tuple of plane axes directions, e.g. ('x','y') (default)
**Optional Keywords**:
- *ax* = matplotlib axis object: if provided, plot is attached to this axis
- *formation_names* = list of formations : plot only points for specific formations
"""
color = kwds.get("color", 'b')
if kwds.has_key("ax"):
# axis is provided, attach here
ax = kwds['ax']
else:
fig = plt.figure()
ax = fig.add_subplot(111)
if kwds.has_key("formation_names"):
pts_subset = self.create_formation_subset(kwds['formation_names'])
ax.plot(pts_subset.points[:][plane[0]], pts_subset.points[:][plane[1]], '.', color = color)
else:
ax.plot(self.points[:][plane[0]], self.points[:][plane[1]], '.', color = color)
def plot_3D(self, **kwds):
"""Create a plot of points in 3-D
**Optional keywords**:
- *ax* = matplotlib axis object: if provided, plot is attached to this axis
- *formation_names* = list of formations : plot only points for specific formations
"""
if kwds.has_key("ax"):
# axis is provided, attach here
ax = kwds['ax']
else:
fig = plt.figure(figsize = (10,8))
ax = fig.add_subplot(111, projection='3d')
if kwds.has_key("formation_names"):
# create a subset with speficied formations, only
pts_subset = self.create_formation_subset(kwds['formation_names'])
pts_subset.plot_3D(ax = ax)
else:
# plot all
ax.scatter(self.points['x'], self.points['y'], self.points['z'])
def save(self, filename):
"""Save points set to file
**Arguments**:
- *filename* = string : name of new file
"""
f = open(filename, 'w')
f.write(self.header)
for point in self.points:
f.write("%.2f,%.2f,%.3f,%s\n" % (point['x'], point['y'], point['z'], point['formation']))
f.close()
class Struct3DFoliations(Struct3DPoints):
"""Class container for foliations (i.e. orientations) exported from GeoModeller
Mainly based on Struct3DPoints as must required functionality
for location of elements - some functions overwritten, e.g. save and parse to read orientation data,
as well!
However, further methods might be added or adapted in the future, for example:
- downsampling according to (eigen)vector methods, e.g. the work from the Monash guys, etc.
- ploting of orientations in 2-D and 3-D
"""
def __init__(self, **kwds):
"""Structural points data set
**Optional keywords**:
- *filename* = string : filename of csv file with exported points to load
"""
# store point information in purpose defined numpy record
self.ftype = np.dtype([('x', np.float32),
('y', np.float32),
('z', np.float32),
('azimuth', np.float32),
('dip', np.float32),
('polarity', np.int),
('formation', np.str_, 32)])
if kwds.has_key("filename"):
self.filename = kwds['filename']
# read data
self.parse()
self.get_formation_names()
self.get_range()
def parse(self):
"""Parse filename and load data into numpy record
The point information is stored in a purpose defined numpy record
self.points
"""
f = open(self.filename, "r")
lines = f.readlines()
self.header = lines[0]
# determine position of elements in header (for extension to foliations, etc.)
h_elem = np.array(self.header.rstrip().split(','))
x_id = np.where(h_elem == 'X')[0]
y_id = np.where(h_elem == 'Y')[0]
z_id = np.where(h_elem == 'Z')[0]
azi_id = np.where(h_elem == 'azimuth')[0]
dip_id = np.where(h_elem == 'dip')[0]
pol_id = np.where(h_elem == 'polarity')[0]
form_id = np.where(h_elem == 'formation')[0]
# print x_id
# create numpy array for points
self.len = (len(lines)-1)
self.points = np.ndarray(self.len, dtype = self.ftype)
for i,line in enumerate(lines[1:]):
l = line.rstrip().split(',')
self.points[i]['x'] = float(l[x_id])
self.points[i]['y'] = float(l[y_id])
self.points[i]['z'] = float(l[z_id])
self.points[i]['azimuth'] = float(l[azi_id])
self.points[i]['dip'] = float(l[dip_id])
self.points[i]['polarity'] = float(l[pol_id])
self.points[i]['formation'] = l[form_id]
def save(self, filename):
"""Save points set to file
**Arguments**:
- *filename* = string : name of new file
"""
f = open(filename, 'w')
f.write(self.header)
for point in self.points:
f.write("%.2f,%.2f,%.3f,%.3f,%.3f,%d,%s\n" % (point['x'], point['y'], point['z'],
point['azimuth'], point['dip'], point['polarity'],
point['formation']))
f.close()
if __name__ == '__main__':
pass
| {
"repo_name": "Leguark/pygeomod",
"path": "pygeomod/struct_data.py",
"copies": "3",
"size": "17593",
"license": "mit",
"hash": 3902953201581329000,
"line_mean": 36.6723768737,
"line_max": 115,
"alpha_frac": 0.5279372478,
"autogenerated": false,
"ratio": 4.061172668513389,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02731904303858272,
"num_lines": 467
} |
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import math
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
def his():
N1 = [] # Ver
N2 = [] # Hor
N3 = [] # Up
N = [] # Total number
with open("dataplot.dat","r") as file:
for line in file:
words = line.split()
n1 = float(words[2]) # Ver
n2 = float(words[3]) # Hor
n3 = float(words[4]) # Up
ntot = n1+n2+n3
N1.append(n1);
N2.append(n2);
N3.append(n3);
N.append(ntot);
fig1 = plt.figure()
fig2 = plt.figure()
fig3 = plt.figure()
fig4 = plt.figure()
fig5 = plt.figure()
fig6 = plt.figure()
ax1 = fig1.add_subplot(111)
ax2 = fig2.add_subplot(111)
ax3 = fig3.add_subplot(111)
ax4 = fig4.add_subplot(111)
ax5 = fig5.add_subplot(111)
ax6 = fig6.add_subplot(111)
numBins = 100
# if N == "N1":
# ax.hist(N1,numBins,color = 'blue', alpha = 0.8)
# title = 'N1_#distribution.png'
# fig.savefig(title, dpi=180, bbox_inches='tight')
# elif N == "N2":
# ax.hist(N2,numBins,color = 'red', alpha = 0.8)
# title = 'N2_#distribution.png'
# fig.savefig(title, dpi=180, bbox_inches='tight')
# elif N == "N3":
# ax.hist(N3,numBins,color = 'green', alpha = 0.8)
# title = 'N3_#distribution.png'
# fig.savefig(title, dpi=180, bbox_inches='tight')
# else:
# ax.hist(N1,numBins,color = 'blue', alpha = 0.6)
# ax.hist(N2,numBins,color = 'red', alpha = 0.6)
# ax.hist(N3,numBins,color = 'green', alpha = 0.6)
# title = 'All_#distribution.png'
# fig.savefig(title, dpi=720, bbox_inches='tight')
ax1.set_title("Number Distribution for Vertical Rods")
ax1.set_xlabel('Numbers')
ax1.set_ylabel('Frequency')
ax1.hist(N1,numBins,color = 'blue', alpha = 0.8, label='Vertical Rods')
leg = ax1.legend()
leg.get_frame().set_alpha(0.5)
title = 'N1_#distribution.png'
fig1.savefig(title, dpi=180, bbox_inches='tight')
ax2.set_title("Number Distribution for Horizontal Rods")
ax2.set_xlabel('Numbers')
ax2.set_ylabel('Frequency')
ax2.hist(N2,numBins,color = 'red', alpha = 0.8,label ='Horizontal Rods')
leg = ax2.legend()
leg.get_frame().set_alpha(0.5)
title = 'N2_#distribution.png'
fig2.savefig(title, dpi=180, bbox_inches='tight')
ax3.set_title("Number Distribution for Up Rods")
ax3.set_xlabel('Numbers')
ax3.set_ylabel('Frequency')
ax3.hist(N3,numBins,color = 'green', alpha = 0.8, label = 'Up Rods')
leg = ax3.legend()
leg.get_frame().set_alpha(0.5)
title = 'N3_#distribution.png'
fig3.savefig(title, dpi=180, bbox_inches='tight')
ax4.set_title("Number Distribution for All")
ax4.set_xlabel('Numbers')
ax4.set_ylabel('Frequency')
ax4.hist(N1,numBins,color = 'blue', alpha = 0.6,label = 'Vertical Rods')
ax4.hist(N2,numBins,color = 'red', alpha = 0.6,label = 'Horizontal Rods')
ax4.hist(N3,numBins,color = 'green', alpha = 0.6,label = 'Up Rods')
leg = ax4.legend()
leg.get_frame().set_alpha(0.5)
title = 'All_#distribution.png'
fig4.savefig(title, dpi=180, bbox_inches='tight')
ax5.set_title("Total Number Distribution")
ax5.set_xlabel('Numbers')
ax5.set_ylabel('Frequency')
ax5.hist(N,numBins,color = 'yellow', alpha = 0.8, label = 'Total Rods')
leg = ax5.legend()
leg.get_frame().set_alpha(0.5)
title = 'Ntot_#distribution.png'
fig5.savefig(title, dpi=180, bbox_inches='tight')
ax6.set_title("Log (Total Number Distribution)")
ax6.set_xlabel('Numbers')
ax6.set_ylabel('Log(Frequency)')
ax6.hist(N,numBins,color = 'pink', alpha = 0.8, label = 'Log (Total Rods)',log =True)
leg = ax6.legend()
leg.get_frame().set_alpha(0.5)
title = 'LogNtot_#distribution.png'
fig6.savefig(title, dpi=180, bbox_inches='tight')
his()
# def main():
# print "#=====================================#"
# print "# Welcome to the Distribution factory #"
# print "#=====================================#"
# check = True
# while (check):
# N = raw_input("Please tell me which specie distribution do you want for this time? \nType 'N1','N2','N3' or 'all'\n")
# his(N)
# again = raw_input("Do you want to checkout other distribution? y/n\n")
# if again == "n":
# check = False
# print "Thanks for trying on our Drawing tool!\nSee you soon! LOL"
# main() | {
"repo_name": "Aieener/SUS_3D",
"path": "DATA/GCMC_data_one_specie_model/1E11L_8_64_9.65_2nd_coex/his.py",
"copies": "1",
"size": "4276",
"license": "mit",
"hash": -562595815348245000,
"line_mean": 29.1197183099,
"line_max": 121,
"alpha_frac": 0.6426566885,
"autogenerated": false,
"ratio": 2.4267877412031784,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.35694444297031785,
"avg_score": null,
"num_lines": null
} |
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
def his():
N1 = [] # Ver
N2 = [] # Hor
N = [] # tot
with open("dataplot.dat","r") as file:
for line in file:
words = line.split()
n1 = float(words[2]) # Ver
n2 = float(words[3]) # Hor
ntot = n1 + n2
N1.append(n1);
N2.append(n2);
N.append(ntot);
fig1 = plt.figure()
fig2 = plt.figure()
fig4 = plt.figure()
fig5 = plt.figure()
fig6 = plt.figure()
ax1 = fig1.add_subplot(111)
ax2 = fig2.add_subplot(111)
ax4 = fig4.add_subplot(111)
ax5 = fig5.add_subplot(111)
ax6 = fig6.add_subplot(111)
numBins = 100
ax1.set_title("Number Distribution for Vertical Rods")
ax1.set_xlabel('Numbers')
ax1.set_ylabel('Frequency')
ax1.hist(N1,numBins,color = 'blue', alpha = 0.8, label='Vertical Rods')
leg = ax1.legend()
leg.get_frame().set_alpha(0.5)
title = 'N1_#distribution.png'
fig1.savefig(title, dpi=180, bbox_inches='tight')
ax2.set_title("Number Distribution for Horizontal Rods")
ax2.set_xlabel('Numbers')
ax2.set_ylabel('Frequency')
ax2.hist(N2,numBins,color = 'red', alpha = 0.8,label ='Horizontal Rods')
leg = ax2.legend()
leg.get_frame().set_alpha(0.5)
title = 'N2_#distribution.png'
fig2.savefig(title, dpi=180, bbox_inches='tight')
ax4.set_title("Number Distribution for All")
ax4.set_xlabel('Numbers')
ax4.set_ylabel('Frequency')
ax4.hist(N1,numBins,color = 'blue', alpha = 0.6,label = 'Vertical Rods')
ax4.hist(N2,numBins,color = 'red', alpha = 0.6,label = 'Horizontal Rods')
leg = ax4.legend()
leg.get_frame().set_alpha(0.5)
title = 'All_#distribution.png'
fig4.savefig(title, dpi=180, bbox_inches='tight')
ax5.set_title("Total Number Distribution")
ax5.set_xlabel('Numbers')
ax5.set_ylabel('Frequency')
ax5.hist(N,numBins,color = 'yellow', alpha = 0.8, label = 'Total Rods')
leg = ax5.legend()
leg.get_frame().set_alpha(0.5)
title = 'Ntot_#distribution.png'
fig5.savefig(title, dpi=180, bbox_inches='tight')
ax6.set_title("Log (Total Number Distribution)")
ax6.set_xlabel('Numbers')
ax6.set_ylabel('Log(Frequency)')
ax6.hist(N,numBins,color = 'pink', alpha = 0.8, label = 'Log (Total Rods)',log =True)
leg = ax6.legend()
leg.get_frame().set_alpha(0.5)
title = 'LogNtot_#distribution.png'
fig6.savefig(title, dpi=180, bbox_inches='tight')
his()
| {
"repo_name": "Aieener/HRE",
"path": "his.py",
"copies": "1",
"size": "2398",
"license": "mit",
"hash": -3927022510344414700,
"line_mean": 26.8837209302,
"line_max": 86,
"alpha_frac": 0.6743119266,
"autogenerated": false,
"ratio": 2.436991869918699,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3611303796518699,
"avg_score": null,
"num_lines": null
} |
"""Analysis engine service."""
import dacite
import pandas as pd
from math import log, pi
from CoolProp.CoolProp import PropsSI
from CoolProp.HumidAirProp import HAPropsSI
from scipy.stats import chi2
from uncertainties import ufloat
from coimbra_chamber.access.experiment.service import ExperimentAccess
from coimbra_chamber.access.experiment.contracts import FitSpec
from coimbra_chamber.utility.io.contracts import Prompt
from coimbra_chamber.utility.io.service import IOUtility
from coimbra_chamber.utility.plot.contracts import Axis, DataSeries, Layout, Plot
from coimbra_chamber.utility.plot.service import PlotUtility
class AnalysisEngine(object):
"""Encapsulate all aspects of analysis."""
def __init__(self, experiment_id): # noqa: D107
self._experiment_id = experiment_id
self._exp_acc = ExperimentAccess()
self._io_util = IOUtility()
self._plot_util = PlotUtility()
self._error = 0.01
self._fits = []
self._idx = 1
self._steps = 1
self._bounds = (None, None)
# IR sensor calibration
self._a = ufloat(-2.34, 0.07)
self._b = ufloat(1.0445, 0.0022)
# Tube radius
self._R = ufloat(0.015, 0.0001)
self._A = pi*self._R**2
self._M1 = 18.015
self._M2 = 28.964
self._SIGMA = 5.67036713e-8
self._eps_chamber = 0.1
self._eps_h20 = 0.99
self._R_chamber = 0.3
self._L_chamber = 0.7
self._A_chamber = (
2*pi*self._R_chamber**2 + 2*pi*self._R_chamber*self._L_chamber
)
self._RAD_FACT = (
self._A * (
(1-self._eps_chamber)/(self._eps_chamber*self._A_chamber)
+ 1/self._A
+ (1-self._eps_h20)/(self._eps_h20*self._A)
)
)
self._ACC_G = 9.81
# ------------------------------------------------------------------------
# Public methods: included in the API
def process_fits(self, data):
"""
Process fits from data.
Parameters
----------
data : comimbra_chamber.access.experiment.contracts.DataSpec.
"""
self._data = data
self._get_observations()
self._get_fits()
self._persist_fits()
# ------------------------------------------------------------------------
# Internal methods: not included in the API
def _get_observations(self):
# Create empty lists to hold data as we iterate through observations.
dew_point = []
mass = []
pow_ref = []
pressure = []
surface_temp = []
ic_temp = []
cap_man = []
optidew = []
temp = []
time = []
# Interate and append observations while adding uncertainties
observations = self._data.observations
initial_idx = observations[0].idx
for obs in observations:
dew_point.append(ufloat(obs.dew_point, 0.2))
mass.append(ufloat(obs.mass, 1e-7))
pow_ref.append(ufloat(obs.pow_ref, abs(float(obs.pow_ref)) * 0.05))
pressure.append(ufloat(obs.pressure, int(obs.pressure * 0.0015)))
surface_temp.append(ufloat(obs.surface_temp, 0.5))
ic_temp.append(ufloat(obs.ic_temp, 0.2))
# Average temperatures with error propagation
temps = obs.temperatures
temp.append(
sum(ufloat(temp.temperature, 0.2) for temp in temps)
/ len(temps)
)
# Bools for equipment status
cap_man.append(obs.cap_man_ok)
optidew.append(obs.optidew_ok)
# Ensure that time starts at zero
time.append(obs.idx - initial_idx)
# DataFrame payload
data = dict(
Tdp=dew_point,
m=mass,
Jref=pow_ref,
P=pressure,
Te=temp,
Ts=surface_temp,
Tic=ic_temp,
cap_man=cap_man,
optidew=optidew,
)
self._observations = pd.DataFrame(index=time, data=data)
def _layout_observations(self):
# internal helper logic
def nominal(ufloat_):
return ufloat_.nominal_value
def std_dev(ufloat_):
return ufloat_.std_dev
# DataSeries ---------------------------------------------------------
data_series = dict()
# First get the time data series
data = dict(values=self._observations.index.tolist())
data_series['t'] = dacite.from_dict(DataSeries, data)
# dew point, Tdp
data = dict(
values=self._observations.Tdp.map(nominal).tolist(),
sigma=self._observations.Tdp.map(std_dev).tolist(),
label='Tdp')
data_series['Tdp'] = dacite.from_dict(DataSeries, data)
# mass, m
data = dict(
values=self._observations.m.map(nominal).tolist(),
sigma=self._observations.m.map(std_dev).tolist(),
label='m')
data_series['m'] = dacite.from_dict(DataSeries, data)
# pow_ref, Jref
data = dict(
values=self._observations.Jref.map(nominal).to_list(),
sigma=self._observations.Jref.map(std_dev).to_list(),
label='Jref')
data_series['Jref'] = dacite.from_dict(DataSeries, data)
# pressure, P
data = dict(
values=self._observations.P.map(nominal).tolist(),
sigma=self._observations.P.map(std_dev).tolist(),
label='P')
data_series['P'] = dacite.from_dict(DataSeries, data)
# Ambient temp, Te
data = dict(
values=self._observations.Te.map(nominal).tolist(),
sigma=self._observations.Te.map(std_dev).tolist(),
label='Te')
data_series['Te'] = dacite.from_dict(DataSeries, data)
# Surface temp, Ts
data = dict(
values=self._observations.Ts.map(nominal).tolist(),
sigma=self._observations.Ts.map(std_dev).tolist(),
label='Ts')
data_series['Ts'] = dacite.from_dict(DataSeries, data)
# IC temp, Tic
data = dict(
values=self._observations.Tic.map(nominal).tolist(),
sigma=self._observations.Tic.map(std_dev).tolist(),
label='Tic')
data_series['Tic'] = dacite.from_dict(DataSeries, data)
# Cap-man status, cap_man
data = dict(
values=self._observations.cap_man.tolist(),
label='cap_man')
data_series['cap_man'] = dacite.from_dict(DataSeries, data)
# Optidew status, optidew
data = dict(
values=self._observations.optidew.tolist(),
label='optidew')
data_series['optidew'] = dacite.from_dict(DataSeries, data)
# Axes ---------------------------------------------------------------
axes = dict()
data = dict(
data=[data_series['m']], y_label='mass, [kg]',
error_type='continuous')
axes['mass'] = dacite.from_dict(Axis, data)
data = dict(
data=[data_series['Tdp'], data_series['Te'], data_series['Ts'],
data_series['Tic']],
y_label='temperature, [K]',
error_type='continuous')
axes['temp'] = dacite.from_dict(Axis, data)
data = dict(
data=[data_series['P']], y_label='pressure, [Pa]',
error_type='continuous')
axes['pressure'] = dacite.from_dict(Axis, data)
data = dict(
data=[data_series['Jref']], y_label='Ref power, [W]',
error_type='continuous')
axes['Jref'] = dacite.from_dict(Axis, data)
data = dict(
data=[data_series['cap_man'], data_series['optidew']],
y_label='status')
axes['status'] = dacite.from_dict(Axis, data)
# Then the Plots ---------------------------------------------------------
plots = dict()
data = dict(
abscissa=data_series['t'],
axes=[axes['mass'], axes['temp']],
x_label='index')
plots['mass_and_temp'] = dacite.from_dict(Plot, data)
data = dict(
abscissa=data_series['t'],
axes=[axes['pressure']],
x_label='index')
plots['pressure'] = dacite.from_dict(Plot, data)
data = dict(
abscissa=data_series['t'],
axes=[axes['Jref'], axes['status']],
x_label='index')
plots['pow_and_status'] = dacite.from_dict(Plot, data)
# Finally, the layout ----------------------------------------------------
data = dict(
plots=[
plots['mass_and_temp'], plots['pressure'],
plots['pow_and_status']
],
style='seaborn-darkgrid')
self._layout = dacite.from_dict(Layout, data)
def _get_fits(self):
# len - 2 because we want to make sure we never end up at the last
# index and can't take a max slice
while self._idx < len(self._observations) - 2:
# Get a new sample centered at the self._idx that is as large as
# possible.
left = (2 * self._idx) - len(self._observations) + 1
right = 2 * self._idx
self._sample = self._observations.loc[left:right, :]
# Then search for the best fit in self._sample
self._get_best_local_fit()
if self._this_fit: # We got a fit that met the error threshold
self._evaluate_fit()
self._set_local_exp_state()
self._set_local_properties()
self._set_nondim_groups()
self._fits.append(self._this_fit)
# Length of the best fit is the degrees of freedom plus 2 for
# a linear fit.
self._idx += self._this_fit['nu_chi'] + 2
else: # _get_best_local_fit returned None
self._idx += len(self._sample)
def _persist_fits(self):
counter = 0
for data in self._fits:
fit_spec = dacite.from_dict(FitSpec, data)
self._exp_acc.add_fit(fit_spec, self._experiment_id)
counter += 1
return counter
# Properties .............................................................
def _set_local_exp_state(self):
samples = len(self._this_sample)
data = self._this_sample
offset = 273.15
# Use calibration for ifrared sensor
Ts_bar_K = sum(data.Ts)/samples
Ts_bar_C = Ts_bar_K - offset
Ts_bar_C = self._a + self._b*Ts_bar_C
Ts_bar_K = Ts_bar_C + offset
# Now the rest of the state variables
Te_bar = sum(data.Te)/samples
Tdp_bar = sum(data.Tdp)/samples
P_bar = sum(data.P)/samples
self._experimental_state = dict(
Te=Te_bar,
Tdp=Tdp_bar,
Ts=Ts_bar_K,
P=P_bar,
)
def _set_local_properties(self):
# Internal mapper ----------------------------------------------------
def x1_2_m1(self, x1):
num = self._M1 * x1
den = num + (self._M2 * (1 - x1))
return num/den
Ts = self._experimental_state['Ts']
P = self._experimental_state['P']
Te = self._experimental_state['Te']
Tdp = self._experimental_state['Tdp']
# mddp ---------------------------------------------------------------
mdot = ufloat(-self._this_fit['b'], self._this_fit['sig_b'])
mddp = mdot/self._A
# x1 -----------------------------------------------------------------
# s-state
x1s_nv = HAPropsSI(
'psi_w',
'T', Ts.nominal_value,
'P', P.nominal_value,
'RH', 1)
x1s_sig = x1s_nv - HAPropsSI(
'psi_w',
'T', Ts.nominal_value + Ts.std_dev,
'P', P.nominal_value,
'RH', 1)
x1s = ufloat(x1s_nv, abs(x1s_sig))
# e-state
x1e_nv = HAPropsSI(
'psi_w',
'T', Te.nominal_value,
'P', P.nominal_value,
'Tdp', Tdp.nominal_value)
x1e_sig = x1e_nv - HAPropsSI(
'psi_w',
'T', Te.nominal_value + Te.std_dev,
'P', P.nominal_value,
'Tdp', Tdp.nominal_value + Tdp.std_dev)
x1e = ufloat(x1e_nv, abs(x1e_sig))
# film
x1 = (x1s+x1e) / 2
# m1 -----------------------------------------------------------------
# s-state
m1s = x1_2_m1(self, x1s)
# e-state
m1e = x1_2_m1(self, x1e)
# film
m1 = (m1s+m1e) / 2
# rho ---------------------------------------------------------------
# s-state
rhos_nv = 1 / HAPropsSI(
'Vha',
'T', Ts.nominal_value,
'P', P.nominal_value,
'Y', x1s_nv)
rhos_sig = rhos_nv - (
1 / HAPropsSI(
'Vha',
'T', Ts.nominal_value + Ts.std_dev,
'P', P.nominal_value,
'Y', x1s_nv)
)
rhos = ufloat(rhos_nv, abs(rhos_sig))
# e-state
rhoe_nv = 1 / HAPropsSI(
'Vha',
'T', Te.nominal_value,
'P', P.nominal_value,
'Y', x1e_nv)
rhoe_sig = rhoe_nv - (
1 / HAPropsSI(
'Vha',
'T', Te.nominal_value + Te.std_dev,
'P', P.nominal_value,
'Y', x1e_nv)
)
rhoe = ufloat(rhoe_nv, abs(rhoe_sig))
# film
rho = (rhos+rhoe) / 2
# Bm1 ----------------------------------------------------------------
Bm1 = (m1s - m1e)/(1-m1s)
# T ------------------------------------------------------------------
T = (Te+Ts) / 2
# D12 ----------------------------------------------------------------
D12 = 1.97e-5 * (101325/P) * pow(T/256, 1.685)
# hfg -----------------------------------------------------------------
# hg
hg_nv = PropsSI(
'H',
'T', Ts.nominal_value,
'Q', 1,
'water')
hg_sig = hg_nv - PropsSI(
'H',
'T', Ts.nominal_value + Ts.std_dev,
'Q', 1,
'water')
hg = ufloat(hg_nv, abs(hg_sig))
# hf
hf_nv = PropsSI(
'H',
'T', Ts.nominal_value,
'Q', 0,
'water')
hf_sig = hf_nv - PropsSI(
'H',
'T', Ts.nominal_value + Ts.std_dev,
'Q', 0,
'water')
hf = ufloat(hf_nv, abs(hf_sig))
# hfg
hfg = hg - hf
# hu -----------------------------------------------------------------
hu = -hfg
# hs -----------------------------------------------------------------
hs = ufloat(0, 0)
# cpv ----------------------------------------------------------------
cpv_nv = HAPropsSI(
'cp_ha',
'P', P.nominal_value,
'T', T.nominal_value,
'Y', x1.nominal_value,
)
cpv_sig = cpv_nv - HAPropsSI(
'cp_ha',
'P', P.nominal_value,
'T', T.nominal_value + T.std_dev,
'Y', x1.nominal_value,
)
cpv = ufloat(cpv_nv, abs(cpv_sig))
# he -----------------------------------------------------------------
he = cpv * (Te - Ts)
# cpl ----------------------------------------------------------------
cpl_nv = PropsSI(
'Cpmass',
'T', T.nominal_value,
'Q', 0,
'water')
cpl_sig = cpl_nv - PropsSI(
'Cpmass',
'T', T.nominal_value + T.std_dev,
'Q', 0,
'water')
cpl = ufloat(cpl_nv, abs(cpl_sig))
# hT -----------------------------------------------------------------
hT = cpl * (Te - Ts)
# qcu ----------------------------------------------------------------
qcu = mddp * (hT - hu)
# Ebe ----------------------------------------------------------------
Ebe = self._SIGMA*Te**4
# Ebs ----------------------------------------------------------------
Ebs = self._SIGMA*Ts**4
# qrs ----------------------------------------------------------------
qrs = (Ebe - Ebs)/self._RAD_FACT
# kv -----------------------------------------------------------------
kv_nv = HAPropsSI(
'k',
'P', P.nominal_value,
'T', T.nominal_value,
'Y', x1.nominal_value,
)
kv_sig = kv_nv - HAPropsSI(
'k',
'P', P.nominal_value,
'T', T.nominal_value + T.std_dev,
'Y', x1.nominal_value,
)
kv = ufloat(kv_nv, abs(kv_sig))
# alpha --------------------------------------------------------------
alpha = kv / (rho*cpv)
# Bh -----------------------------------------------------------------
Bh = (hs-he) / (hu + (qcu+qrs)/mddp - hs)
# M ------------------------------------------------------------------
M = (m1 * self._M1) + ((1 - m1) * self._M2)
# gamma1 -------------------------------------------------------------
gamma1 = (1/rho) * (M/self._M1 - 1)
# gamma2 -------------------------------------------------------------
gamma2 = (1/rho) * (M/self._M2 - 1)
# beta ---------------------------------------------------------------
beta = 1/T
# Delta_m ------------------------------------------------------------
Delta_m = m1s - m1e
# Delta_T ------------------------------------------------------------
Delta_T = Ts - Te
# mu -----------------------------------------------------------------
mu_nv = HAPropsSI(
'mu',
'P', P.nominal_value,
'T', T.nominal_value,
'Y', x1.nominal_value,
)
mu_sig = mu_nv - HAPropsSI(
'mu',
'P', P.nominal_value,
'T', T.nominal_value + T.std_dev,
'Y', x1.nominal_value,
)
mu = ufloat(mu_nv, abs(mu_sig))
# nu -----------------------------------------------------------------
nu = mu/rho
# set properties
self._properties = dict(
mddp=mddp,
x1s=x1s,
x1e=x1e,
x1=x1,
m1s=m1s,
m1e=m1e,
m1=m1,
rhos=rhos,
rhoe=rhoe,
rho=rho,
Bm1=Bm1,
T=T,
D12=D12,
hfg=hfg,
hu=hu,
hs=hs,
cpv=cpv,
he=he,
cpl=cpl,
hT=hT,
qcu=qcu,
Ebe=Ebe,
Ebs=Ebs,
qrs=qrs,
kv=kv,
alpha=alpha,
Bh=Bh,
M=M,
gamma1=gamma1,
gamma2=gamma2,
beta=beta,
Delta_m=Delta_m,
Delta_T=Delta_T,
mu=mu,
nu=nu,
Ts=Ts,
)
# Update this fit
for key, value in self._properties.items():
self._this_fit[key] = value.nominal_value
self._this_fit[f'sig_{key}'] = value.std_dev
def _set_nondim_groups(self):
Bm1 = self._properties['Bm1']
mddp = self._properties['mddp']
R = self._R
rho = self._properties['rho']
D12 = self._properties['D12']
alpha = self._properties['alpha']
Bh = self._properties['Bh']
g = self._ACC_G
nu = self._properties['nu']
beta = self._properties['beta']
Delta_T = self._properties['Delta_T']
gamma1 = self._properties['gamma1']
Delta_m = self._properties['Delta_m']
mu = self._properties['mu']
rhoe = self._properties['rhoe']
rhos = self._properties['rhos']
# Manual natural log error propagation -------------------------------
# Bm1
ln_Bm1_nv = log(1 + Bm1.nominal_value)
ln_Bm1_sig = ln_Bm1_nv - log(1 + Bm1.nominal_value + Bm1.std_dev)
ln_Bm1 = ufloat(ln_Bm1_nv, abs(ln_Bm1_sig))
# Bh
ln_Bh_nv = log(1 + Bh.nominal_value)
ln_Bh_sig = ln_Bh_nv - log(1 + Bh.nominal_value + Bh.std_dev)
ln_Bh = ufloat(ln_Bh_nv, abs(ln_Bh_sig))
# ShR ----------------------------------------------------------------
ShR = (mddp * R) / (ln_Bm1 * rho * D12)
# NuR ----------------------------------------------------------------
NuR = (mddp * R) / (ln_Bh * rho * alpha)
# Le -----------------------------------------------------------------
Le = D12/alpha
# GrR_binary ---------------------------------------------------------
GrR_binary = (g * R**3 / nu**2) * (beta*Delta_T + gamma1*rho*Delta_m)
# GrR_primary --------------------------------------------------------
GrR_primary = (g * R**3 / mu**2) * (rho * (rhos - rhoe))
self._nondim_groups = dict(
ShR=ShR,
NuR=NuR,
Le=Le,
GrR_binary=GrR_binary,
GrR_primary=GrR_primary,
)
# Update this fit
for key, value in self._nondim_groups.items():
self._this_fit[key] = value.nominal_value
self._this_fit[f'sig_{key}'] = value.std_dev
# ------------------------------------------------------------------------
# Class helpers: internal use only
def _ols_fit(self):
sample = self._this_sample['m'].tolist()
# Prepare the data
y = [i.nominal_value for i in sample]
sig = [i.std_dev for i in sample]
x = list(range(len(y))) # Always indexed at zero
# Determine fit components
S = sum(1/sig[i]**2 for i in range(len(x)))
Sx = sum(x[i]/sig[i]**2 for i in range(len(x)))
Sy = sum(y[i]/sig[i]**2 for i in range(len(x)))
Sxx = sum(x[i]**2/sig[i]**2 for i in range(len(x)))
Sxy = sum(x[i]*y[i]/sig[i]**2 for i in range(len(x)))
Delta = S*Sxx - Sx**2
# Now calculate model parameters: y = a + bx
a = (Sxx*Sy - Sx*Sxy) / Delta
sig_a = (Sxx/Delta)**0.5
b = (S*Sxy - Sx*Sy) / Delta
sig_b = (S/Delta)**0.5
return dict(
a=a,
sig_a=sig_a,
b=b,
sig_b=sig_b,
)
def _get_best_local_fit(self):
# self._sample always has an odd length, so we use integer division.
center = len(self._sample) // 2
steps = int(self._steps) # Explicitly make a copy
delta = int(steps) # Explicityly make a copy
while center + steps + 1 <= len(self._sample):
self._this_sample = (
self._sample.iloc[center - steps: center + steps + 1, :]
)
fit = self._ols_fit()
# With small sample sizes, b is sometimes zero.
# If this is the case we want to continue.
if fit['b'] == 0:
steps += delta
continue
elif fit['sig_b']/abs(fit['b']) <= self._error:
self._this_fit = fit
return
else:
steps += delta
# We did not find a fit
self._this_fit = None
def _evaluate_fit(self):
# Prepare the data
y = [i.nominal_value for i in self._this_sample['m']]
sig = [i.std_dev for i in self._this_sample['m']]
x = list(range(len(y))) # Always indexed at zero
# Fit parameters
a = self._this_fit['a']
b = self._this_fit['b']
# Calculate R^2
predicted = [a + b*i for i in x]
y_bar = sum(y)/len(y)
SSres = sum((y[i] - predicted[i])**2 for i in range(len(x)))
SStot = sum((y[i] - y_bar)**2 for i in range(len(x)))
R2 = 1 - SSres/SStot
# Now for the merit function; i.e. chi^2
merit_value = sum(((y[i] - a - b*x[i])/sig[i])**2 for i in range(len(x)))
# And the goodness of fit; i.e. Q from Numerical Recipes
Q = chi2.sf(merit_value, len(x)-2)
# update this fit
self._this_fit['r2'] = R2
self._this_fit['q'] = Q
self._this_fit['chi2'] = merit_value
self._this_fit['nu_chi'] = len(x) - 2
self._this_fit['exp_id'] = self._experiment_id
self._this_fit['idx'] = self._idx
| {
"repo_name": "rinman24/ucsd_ch",
"path": "coimbra_chamber/engine/analysis/service.py",
"copies": "1",
"size": "24723",
"license": "mit",
"hash": -7547856592103409000,
"line_mean": 32.5,
"line_max": 82,
"alpha_frac": 0.4255551511,
"autogenerated": false,
"ratio": 3.636804942630185,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4562360093730185,
"avg_score": null,
"num_lines": null
} |
# Analysis Example
# Minimum, maximum, and average
# Get the minimum, maximum, and the average value of the variable temperature from your device,
# and save these values in new variables
# Instructions
# To run this analysis you need to add a device token to the environment variables,
# To do that, go to your device, then token and copy your token.
#* Go the the analysis, then environment variables,
# type device_token on key, and paste your token on value
import sys
sys.path.append('..')
import functools
from tago import Device
from tago import Analysis
# The function myAnalysis will run when you execute your analysis
def myAnalysis(context, scope):
# reads the value of device_token from the environment variable
device_token = list(filter(lambda device_token: device_token['key'] == 'device_token', context.environment))
device_token = device_token[0]['value']
if not device_token:
return context.log("Missing device_token Environment Variable.")
my_device = Device(device_token)
# This is a filter to get the minimum value of the variable temperature in the last day
minFilter = {
'variable': 'temperature',
'qty': 1
}
# Now we use the filter for the device to get the data
# check if the variable min has any value
# if so, we crete a new object to send to Tago
min_result = my_device.find({ 'variable': 'temperature', 'qty': 1 })
if len(min_result["result"]) and min_result['status'] is True:
# context.log(min_result["result"])
min_result = min_result["result"][0]
minValue = {
'variable': 'temperature_minimum',
'value': min_result["value"],
'unit': 'F',
}
# now we insert the new object with the minimum value
result = my_device.insert(minValue)
if result['status'] is not True:
return
# context.log(result['result'])
else:
context.log('Temperature Minimum Updated')
else:
context.log('Minimum value not found')
# This is a filter to get the maximum value of the variable temperature in the last day
maxFilter = {
'variable': 'temperature',
'query': 'max',
'start_date': '1 day',
}
max_result = my_device.find(minFilter)
if len(max_result["result"]) and max_result['status'] is True:
max_result = max_result["result"][0]
minValue = {
'variable': 'temperature_maximum',
'value': max_result["value"],
'unit': 'F',
}
# now we insert the new object with the Maximum value
result = my_device.insert(minValue)
if result['status'] is not True:
context.log(result['result'])
else:
context.log('Temperature Maximum Updated')
else:
context.log('Maximum value not found')
# This is a filter to get the last 1000 values of the variable temperature in the last day
avgFilter = {
'variable': 'temperature',
'qty': 1000,
'start_date': '1 day',
}
avg = device.find(avgFilter)
if len(avg["result"]) and avg['status'] is True:
temperatureSum = functools.reduce(lambda a,b : a+int(b["value"]),avg["result"])
temperatureSum = temperatureSum / len(avg)
context.log(temperatureSum)
avgValue = {
'variable': 'temperature_average',
'value': temperatureSum,
'unit': 'F',
}
result = my_device.insert(avgValue)
if result['status'] is not True:
context.log(result['result'])
else:
context.log('Temperature Average Updated')
else:
context.log('No result found for the avg calculation')
# The analysis token in only necessary to run the analysis outside TagoIO
Analysis('Your-analysis-token').init(myAnalysis)
| {
"repo_name": "tago-io/tago-python",
"path": "!example/min_max_avg.py",
"copies": "1",
"size": "3616",
"license": "mit",
"hash": -7336758297467804000,
"line_mean": 30.1724137931,
"line_max": 110,
"alpha_frac": 0.6725663717,
"autogenerated": false,
"ratio": 3.842720510095643,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5015286881795643,
"avg_score": null,
"num_lines": null
} |
#analysis files
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QFileDialog
from analysis_gui import Ui_Analysis
import numpy as np
import matplotlib,math,csv
matplotlib.use('Qt5Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
class MyMplCanvas(FigureCanvas):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
def __init__(self, parent=None):
fig = Figure()
self.axes = fig.add_subplot(111)
self.compute_initial_figure()
FigureCanvas.__init__(self, fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def compute_initial_figure(self):
pass
class Radar(FigureCanvas):
def __init__(self, titles, rect=None, parent=None):
fig = Figure()
if rect is None:
rect = [0.05, 0.05, 0.8, 0.8]
self.n = len(titles)
self.angles = np.arange(90, 90 + 360, 360.0 / self.n)
self.angles = [a % 360 for a in self.angles]
self.axes = [fig.add_axes(rect, projection="polar", label="axes%d" % i)
for i in range(self.n)]
#FigureCanvas.setSizePolicy(self,
#QtWidgets.QSizePolicy.Expanding,
#QtWidgets.QSizePolicy.Expanding)
#FigureCanvas.updateGeometry(self)
self.ax = self.axes[0]
self.ax.set_thetagrids(self.angles,labels=titles, fontsize=14)
for ax in self.axes[1:]:
ax.patch.set_visible(False)
ax.grid("off")
ax.xaxis.set_visible(False)
for ax, angle in zip(self.axes, self.angles):
ax.set_rgrids([0.2,0.4,0.6,0.8,1.0], angle=angle)
ax.spines["polar"].set_visible(False)
ax.set_ylim(auto=True)
ax.set_xlim(auto=True)
FigureCanvas.__init__(self, fig)
self.setParent(parent)
def plot(self, values, *args, **kw):
angle = np.deg2rad(np.r_[self.angles, self.angles[0]])
values = np.r_[values, values[0]]
self.ax.plot(angle, values, *args, **kw)
class Analysis(QtWidgets.QMainWindow, Ui_Analysis):
def __init__(self,parent = None):
super(Analysis,self).__init__(parent)
self.setupUi(self)
self.XY_widget = QtWidgets.QWidget(self.tab_XY)
self.Radar_widget = QtWidgets.QWidget(self.tab_Radar)
self.Box_widget = QtWidgets.QWidget(self.tab_Box)
self.Table_widget = QtWidgets.QWidget(self.tab_Table)
self.XY_Layout = QtWidgets.QVBoxLayout(self.XY_widget)
self.XY = MyMplCanvas(self.XY_widget)
self.XY_Layout.addWidget(self.XY)
self.mpl_toolbar = NavigationToolbar(self.XY, self.XY_widget)
self.XY_Layout.addWidget(self.mpl_toolbar)
self.Box_Layout = QtWidgets.QVBoxLayout(self.Box_widget)
self.box = MyMplCanvas(self.Box_widget)
self.Box_Layout.addWidget(self.box)
self.box_toolbar = NavigationToolbar(self.box, self.Box_widget)
self.Box_Layout.addWidget(self.box_toolbar)
#self.tabWidget.setFocus()
#self.setCentralWidget(self.tabWidget)
#self.XY_widget.setFocus()
#self.Radar_widget.setFocus()
#self.Box_widget.setFocus()
#self.tabWidget.setFocus()
#self.setCentralWidget(self.tabWidget)
self.actionOpen.triggered.connect(self.open)
self.actionMax_min.triggered.connect(self.max_min)
self.actionStandardization_M_0_S_1.triggered.connect(self.standardization)
self.actionBaseline_Correction.triggered.connect(self.baseline)
self.actionPeak_Detection.triggered.connect(self.peak_detection)
self.actionFWHM.triggered.connect(self.FWHM)
self.actionRise_Time.triggered.connect(self.rise_time)
self.actionFall_Time.triggered.connect(self.fall_time)
self.sensor_name = []
self.sensor_sn = []
self.time = []
self.s1, self.s2, self.s3, self.s4, self.s5 = [], [], [], [], []
self.s6, self.s7, self.s8, self.s9, self.s10 = [], [], [], [], []
self.s11, self.s12, self.s13, self.s14, self.s15 = [], [], [], [], []
self.s16, self.s17, self.s18 = [], [], []
def open(self):
self.data = []
self.sensor_name = []
self.sensor_sn = []
self.time = []
self.s1, self.s2, self.s3, self.s4, self.s5 = [], [], [], [], []
self.s6, self.s7, self.s8, self.s9, self.s10 = [], [], [], [], []
self.s11, self.s12, self.s13, self.s14, self.s15 = [], [], [], [], []
self.s16, self.s17, self.s18 = [], [], []
self.s1_normalized = []
self.s2_normalized = []
self.s3_normalized = []
self.s4_normalized = []
self.s5_normalized = []
self.s6_normalized = []
self.s7_normalized = []
self.s8_normalized = []
self.s9_normalized = []
self.s10_normalized = []
self.s11_normalized = []
self.s12_normalized = []
self.s13_normalized = []
self.s14_normalized = []
self.s15_normalized = []
self.s16_normalized = []
self.s17_normalized = []
self.s18_normalized = []
filename = QFileDialog.getOpenFileName(self, 'Open',filter="CSV Files (*.csv);;FOX Files (*.txt)",
initialFilter= "CSV Files (*.csv)")
if filename[0]=='':
print("Cancel")
elif filename[1]=='FOX Files (*.txt)':
file = open(filename[0])
lines = file.readlines()
for i in range(len(lines)):
if lines[i].startswith("[SENSOR NAME]"):
i += 1
self.sensor_name = lines[i].split()
if lines[i].startswith("[SENSOR SN]"):
i += 1
self.sensor_sn = lines[i].split()
if lines[i].startswith("[SENSOR DATA]"):
j = i + 1
self.data = []
for i in range(121):
self.data.append(lines[j].split())
j += 1
print(self.sensor_name)
print(self.sensor_sn)
print(self.data)
for i in range(len(self.data)):
for j in range(19):
if j==0:
self.time.append(self.data[i][j])
if j==1:
self.s1.append(float(self.data[i][j]))
if j==2:
self.s2.append(float(self.data[i][j]))
if j==3:
self.s3.append(float(self.data[i][j]))
if j==4:
self.s4.append(float(self.data[i][j]))
if j==5:
self.s5.append(float(self.data[i][j]))
if j==6:
self.s6.append(float(self.data[i][j]))
if j==7:
self.s7.append(float(self.data[i][j]))
if j==8:
self.s8.append(float(self.data[i][j]))
if j==9:
self.s9.append(float(self.data[i][j]))
if j==10:
self.s10.append(float(self.data[i][j]))
if j==11:
self.s11.append(float(self.data[i][j]))
if j==12:
self.s12.append(float(self.data[i][j]))
if j==13:
self.s13.append(float(self.data[i][j]))
if j==14:
self.s14.append(float(self.data[i][j]))
if j==15:
self.s15.append(float(self.data[i][j]))
if j==16:
self.s16.append(float(self.data[i][j]))
if j==17:
self.s17.append(float(self.data[i][j]))
if j==18:
self.s18.append(float(self.data[i][j]))
self.XY.axes.cla()
self.XY.axes.plot(self.time, self.s1,label=self.sensor_name[0])
self.XY.axes.plot(self.time, self.s2,label=self.sensor_name[1])
self.XY.axes.plot(self.time, self.s3,label=self.sensor_name[2])
self.XY.axes.plot(self.time, self.s4,label=self.sensor_name[3])
self.XY.axes.plot(self.time, self.s5,label=self.sensor_name[4])
self.XY.axes.plot(self.time, self.s6,label=self.sensor_name[5])
self.XY.axes.plot(self.time, self.s7,label=self.sensor_name[6])
self.XY.axes.plot(self.time, self.s8,label=self.sensor_name[7])
self.XY.axes.plot(self.time, self.s9,label=self.sensor_name[8])
self.XY.axes.plot(self.time, self.s10,label=self.sensor_name[9])
self.XY.axes.plot(self.time, self.s11,label=self.sensor_name[10])
self.XY.axes.plot(self.time, self.s12,label=self.sensor_name[11])
self.XY.axes.plot(self.time, self.s13,label=self.sensor_name[12])
self.XY.axes.plot(self.time, self.s14,label=self.sensor_name[13])
self.XY.axes.plot(self.time, self.s15,label=self.sensor_name[14])
self.XY.axes.plot(self.time, self.s16,label=self.sensor_name[15])
self.XY.axes.plot(self.time, self.s17,label=self.sensor_name[16])
self.XY.axes.plot(self.time, self.s18,label=self.sensor_name[17])
self.XY.axes.set_xlabel("Time")
self.XY.axes.set_ylabel("Impedance")
self.XY.axes.legend(loc='best')
self.XY.draw()
self.menuNormalization.setEnabled(True)
for item in self.s1:
self.s1_normalized.append((item - min(self.s1)) / (max(self.s1) - min(self.s1)))
for item in self.s2:
self.s2_normalized.append((item - min(self.s2)) / (max(self.s2) - min(self.s2)))
for item in self.s3:
self.s3_normalized.append((item - min(self.s3)) / (max(self.s3) - min(self.s3)))
for item in self.s4:
self.s4_normalized.append((item - min(self.s4)) / (max(self.s4) - min(self.s4)))
for item in self.s5:
self.s5_normalized.append((item - min(self.s5)) / (max(self.s5) - min(self.s5)))
for item in self.s6:
self.s6_normalized.append((item - min(self.s6)) / (max(self.s6) - min(self.s6)))
for item in self.s7:
self.s7_normalized.append((item - min(self.s7)) / (max(self.s7) - min(self.s7)))
for item in self.s8:
self.s8_normalized.append((item - min(self.s8)) / (max(self.s8) - min(self.s8)))
for item in self.s9:
self.s9_normalized.append((item - min(self.s9)) / (max(self.s9) - min(self.s9)))
for item in self.s10:
self.s10_normalized.append((item - min(self.s10)) / (max(self.s10) - min(self.s10)))
for item in self.s11:
self.s11_normalized.append((item - min(self.s11)) / (max(self.s11) - min(self.s11)))
for item in self.s12:
self.s12_normalized.append((item - min(self.s12)) / (max(self.s12) - min(self.s12)))
for item in self.s13:
self.s13_normalized.append((item - min(self.s13)) / (max(self.s13) - min(self.s13)))
for item in self.s14:
self.s14_normalized.append((item - min(self.s14)) / (max(self.s14) - min(self.s14)))
for item in self.s15:
self.s15_normalized.append((item - min(self.s15)) / (max(self.s15) - min(self.s15)))
for item in self.s16:
self.s16_normalized.append((item - min(self.s16)) / (max(self.s16) - min(self.s16)))
for item in self.s17:
self.s17_normalized.append((item - min(self.s17)) / (max(self.s17) - min(self.s17)))
for item in self.s18:
self.s18_normalized.append((item - min(self.s18)) / (max(self.s18) - min(self.s18)))
self.radar_plot()
self.box_plot()
elif filename[1] == "CSV Files (*.csv)":
with open(filename[0], 'r') as csvfile:
lines = csv.reader(csvfile)
data = list(lines)
self.tableWidget.setRowCount(len(data))
self.tableWidget.setColumnCount(64)
for i in range(3):
for j in range(2):
self.tableWidget.setItem(i,j,QtWidgets.QTableWidgetItem(data[i][j]))
for i in range(3,len(data)):
for j in range(64):
self.tableWidget.setItem(i, j, QtWidgets.QTableWidgetItem(data[i][j]))
def max_min(self):
self.XY.axes.cla()
self.XY.axes.plot(self.time, self.s1_normalized, label=self.sensor_name[0])
'''
self.sc.axes.plot(self.time, self.s2_normalized, label=self.sensor_name[1])
self.sc.axes.plot(self.time, self.s3_normalized, label=self.sensor_name[2])
self.sc.axes.plot(self.time, self.s4_normalized, label=self.sensor_name[3])
self.sc.axes.plot(self.time, self.s5_normalized, label=self.sensor_name[4])
self.sc.axes.plot(self.time, self.s6_normalized, label=self.sensor_name[5])
self.sc.axes.plot(self.time, self.s7_normalized, label=self.sensor_name[6])
self.sc.axes.plot(self.time, self.s8_normalized, label=self.sensor_name[7])
self.sc.axes.plot(self.time, self.s9_normalized, label=self.sensor_name[8])
self.sc.axes.plot(self.time, self.s10_normalized, label=self.sensor_name[9])
self.sc.axes.plot(self.time, self.s11_normalized, label=self.sensor_name[10])
self.sc.axes.plot(self.time, self.s12_normalized, label=self.sensor_name[11])
self.sc.axes.plot(self.time, self.s13_normalized, label=self.sensor_name[12])
self.sc.axes.plot(self.time, self.s14_normalized, label=self.sensor_name[13])
self.sc.axes.plot(self.time, self.s15_normalized, label=self.sensor_name[14])
self.sc.axes.plot(self.time, self.s16_normalized, label=self.sensor_name[15])
self.sc.axes.plot(self.time, self.s17_normalized, label=self.sensor_name[16])
self.sc.axes.plot(self.time, self.s18_normalized, label=self.sensor_name[17])
'''
self.XY.axes.set_xlabel("Time")
self.XY.axes.set_ylabel("Impedance")
self.XY.axes.legend(loc='best')
self.XY.draw()
self.actionPeak_Detection.setEnabled(True)
self.actionRise_Time.setEnabled(True)
self.actionFall_Time.setEnabled(True)
self.actionFWHM.setEnabled(True)
def standardization(self):
z1,z2,z3,z4,z5,z6,z7,z8,z9,z10,z11,z12,z13,z14,z15,z16,z17,z18 = [],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]
m1 = sum(self.s1) / len(self.s1)
m2 = sum(self.s2) / len(self.s2)
m3 = sum(self.s3) / len(self.s3)
m4 = sum(self.s4) / len(self.s4)
m5 = sum(self.s5) / len(self.s5)
m6 = sum(self.s6) / len(self.s6)
m7 = sum(self.s7) / len(self.s7)
m8 = sum(self.s8) / len(self.s8)
m9 = sum(self.s9) / len(self.s9)
m10 = sum(self.s10) / len(self.s10)
m11 = sum(self.s11) / len(self.s11)
m12 = sum(self.s12) / len(self.s12)
m13 = sum(self.s13) / len(self.s13)
m14 = sum(self.s14) / len(self.s14)
m15 = sum(self.s15) / len(self.s15)
m16 = sum(self.s16) / len(self.s16)
m17 = sum(self.s17) / len(self.s17)
m18 = sum(self.s18) / len(self.s18)
sd1 = self.calculate_sd(self.s1, m1)
sd2 = self.calculate_sd(self.s2, m2)
sd3 = self.calculate_sd(self.s3, m3)
sd4 = self.calculate_sd(self.s4, m4)
sd5 = self.calculate_sd(self.s5, m5)
sd6 = self.calculate_sd(self.s6, m6)
sd7 = self.calculate_sd(self.s7, m7)
sd8 = self.calculate_sd(self.s8, m8)
sd9 = self.calculate_sd(self.s9, m9)
sd10 = self.calculate_sd(self.s10, m10)
sd11 = self.calculate_sd(self.s11, m11)
sd12 = self.calculate_sd(self.s12, m12)
sd13 = self.calculate_sd(self.s13, m13)
sd14 = self.calculate_sd(self.s14, m14)
sd15 = self.calculate_sd(self.s15, m15)
sd16 = self.calculate_sd(self.s16, m16)
sd17 = self.calculate_sd(self.s17, m17)
sd18 = self.calculate_sd(self.s18, m18)
for item in self.s1:
z1.append((item-m1)/sd1)
for item in self.s2:
z2.append((item-m2)/sd2)
for item in self.s3:
z3.append((item-m3)/sd3)
for item in self.s4:
z4.append((item-m4)/sd4)
for item in self.s5:
z5.append((item-m5)/sd5)
for item in self.s6:
z6.append((item-m6)/sd6)
for item in self.s7:
z7.append((item-m7)/sd7)
for item in self.s8:
z8.append((item-m8)/sd8)
for item in self.s9:
z9.append((item-m9)/sd9)
for item in self.s10:
z10.append((item-m10)/sd10)
for item in self.s11:
z11.append((item-m11)/sd11)
for item in self.s12:
z12.append((item-m12)/sd12)
for item in self.s13:
z13.append((item-m13)/sd13)
for item in self.s14:
z14.append((item-m14)/sd14)
for item in self.s15:
z15.append((item-m15)/sd15)
for item in self.s16:
z16.append((item-m16)/sd16)
for item in self.s17:
z17.append((item-m17)/sd17)
for item in self.s18:
z18.append((item-m18)/sd18)
'''
mz1 = sum(z1) / len(z1)
mz2 = sum(z2) / len(z2)
mz3 = sum(z3) / len(z3)
mz4 = sum(z4) / len(z4)
mz5 = sum(z5) / len(z5)
mz6 = sum(z6) / len(z6)
mz7 = sum(z7) / len(z7)
mz8 = sum(z8) / len(z8)
mz9 = sum(z9) / len(z9)
mz10 = sum(z10) / len(z10)
mz11 = sum(z11) / len(z11)
mz12 = sum(z12) / len(z12)
mz13 = sum(z13) / len(z13)
mz14 = sum(z14) / len(z14)
mz15 = sum(z15) / len(z15)
mz16 = sum(z16) / len(z16)
mz17 = sum(z17) / len(z17)
mz18 = sum(z18) / len(z18)
sdz1 = self.calculate_sd(z1, mz1)
sdz2 = self.calculate_sd(z2, mz2)
sdz3 = self.calculate_sd(z3, mz3)
sdz4 = self.calculate_sd(z4, mz4)
sdz5 = self.calculate_sd(z5, mz5)
sdz6 = self.calculate_sd(z6, mz6)
sdz7 = self.calculate_sd(z7, mz7)
sdz8 = self.calculate_sd(z8, mz8)
sdz9 = self.calculate_sd(z9, mz9)
sdz10 = self.calculate_sd(z10, mz10)
sdz11 = self.calculate_sd(z11, mz11)
sdz12 = self.calculate_sd(z12, mz12)
sdz13 = self.calculate_sd(z13, mz13)
sdz14 = self.calculate_sd(z14, mz14)
sdz15 = self.calculate_sd(z15, mz15)
sdz16 = self.calculate_sd(z16, mz16)
sdz17 = self.calculate_sd(z17, mz17)
sdz18 = self.calculate_sd(z18, mz18)
print(mz1,sdz1)
print(mz2, sdz2)
print(mz3, sdz3)
print(mz4, sdz4)
print(mz5, sdz5)
print(mz6, sdz6)
print(mz7, sdz7)
print(mz8, sdz8)
print(mz9, sdz9)
print(mz10, sdz10)
print(mz11, sdz11)
print(mz12, sdz12)
print(mz13, sdz13)
print(mz14, sdz14)
print(mz15, sdz15)
print(mz16, sdz16)
print(mz17, sdz17)
print(mz18, sdz18)
'''
self.XY.axes.cla()
self.XY.axes.plot(self.time, z1, label=self.sensor_name[0])
'''
self.sc.axes.plot(self.time, z2, label=self.sensor_name[1])
self.sc.axes.plot(self.time, z3, label=self.sensor_name[2])
self.sc.axes.plot(self.time, z4, label=self.sensor_name[3])
self.sc.axes.plot(self.time, z5, label=self.sensor_name[4])
self.sc.axes.plot(self.time, z6, label=self.sensor_name[5])
self.sc.axes.plot(self.time, z7, label=self.sensor_name[6])
self.sc.axes.plot(self.time, z8, label=self.sensor_name[7])
self.sc.axes.plot(self.time, z9, label=self.sensor_name[8])
self.sc.axes.plot(self.time, z10, label=self.sensor_name[9])
self.sc.axes.plot(self.time, z11, label=self.sensor_name[10])
self.sc.axes.plot(self.time, z12, label=self.sensor_name[11])
self.sc.axes.plot(self.time, z13, label=self.sensor_name[12])
self.sc.axes.plot(self.time, z14, label=self.sensor_name[13])
self.sc.axes.plot(self.time, z15, label=self.sensor_name[14])
self.sc.axes.plot(self.time, z16, label=self.sensor_name[15])
self.sc.axes.plot(self.time, z17, label=self.sensor_name[16])
self.sc.axes.plot(self.time, z18, label=self.sensor_name[17])
'''
self.XY.axes.set_xlabel("Time")
self.XY.axes.set_ylabel("Impedance")
self.XY.axes.legend(loc='best')
self.XY.draw()
def calculate_sd(self,list,mean):
sd = 0.0
for item in list:
sd += (item-mean) ** 2
sd = sd/(len(list)-1)
sd = sd ** (1/2)
return sd
def baseline(self):
'''
s1 = np.array(self.s1)
base = peakutils.baseline(s1, deg=3, max_it=100, tol=0.001)
#self.sc.axes.cla()
self.sc.axes.plot(self.time, base, label="baseline",c='red')
self.sc.axes.legend(loc='best')
self.sc.draw()
'''
def peak_detection(self):
s1_diff = []
self.s1_indexes = []
for i in range(len(self.s1_normalized)-1):
s1_diff.append(self.s1_normalized[i+1]-self.s1_normalized[i])
print("diff=" + str(s1_diff))
print(len(s1_diff))
for i in range(len(s1_diff)-1):
if s1_diff[i]>0 and s1_diff[i+1]<0:
self.s1_indexes.append(i+1)
print(self.s1_indexes)
for i in range(len(self.s1_indexes)-1):
if self.s1_normalized[self.s1_indexes[i]]>0.5 and (self.s1_indexes[i+1]-self.s1_indexes[i])>=5:
self.XY.axes.scatter(self.time[self.s1_indexes[i]], self.s1_normalized[self.s1_indexes[i]],c='red')
self.XY.draw()
self.actionRise_Time.setEnabled(True)
def rise_time(self):
upper_limit = 0
lower_limit = 0
max_index = 0
rel_tol = 0.05
abs_tol = 0.1
peak_values = []
#for i in range(len(self.s1_indexes)):
#peak_values.append(self.s1_normalized[self.s1_indexes[i]])
for i in range(len(self.s1_normalized)):
if self.s1_normalized[i]==max(self.s1_normalized):
max_index = i
print("max index=" + str(max_index))
for i in range(max_index):
#if math.isclose(self.s1_normalized[i],0.9*self.s1_normalized[peak_index],rel_tol=0.05):
if abs(self.s1_normalized[i]-0.9*max(self.s1_normalized)) <= abs_tol:
upper_limit = i
#if math.isclose(self.s1_normalized[i], 0.1*self.s1_normalized[peak_index], rel_tol=0.05):
if abs(self.s1_normalized[i]-0.1*max(self.s1_normalized)) <= abs_tol:
lower_limit = i
print(upper_limit)
print(lower_limit)
self.XY.axes.text(100,0.9,"Rise Time = " + str(upper_limit-lower_limit)+'s')
self.XY.draw()
def fall_time(self):
upper_limit = 0
lower_limit = 0
max_index = 0
rel_tol = 0.05
abs_tol = 0.1
for i in range(len(self.s1_normalized)):
if self.s1_normalized[i]==max(self.s1_normalized):
max_index = i
print("max index="+ str(max_index))
for i in range(max_index,len(self.s1_normalized)):
if abs(self.s1_normalized[i] - 0.9 * max(self.s1_normalized)) <= abs_tol:
lower_limit = i
if abs(self.s1_normalized[i] - 0.1 * max(self.s1_normalized)) <= abs_tol:
upper_limit = i
break
print(upper_limit)
print(lower_limit)
self.XY.axes.text(100,0.8,"Fall Time = " + str(upper_limit - lower_limit) + 's')
self.XY.draw()
def FWHM(self):
upper_limit = 0
lower_limit = 0
max_index = 0
rel_tol = 0.15
abs_tol = 0.1
for i in range(len(self.s1_normalized)):
if self.s1_normalized[i] == max(self.s1_normalized):
max_index = i
print("max index=" + str(max_index))
for i in range(max_index):
if abs(self.s1_normalized[i] - 0.5 * max(self.s1_normalized)) <= abs_tol:
lower_limit = i
for i in range(max_index, len(self.s1_normalized)):
if abs(self.s1_normalized[i] - 0.5 * max(self.s1_normalized)) <= abs_tol:
upper_limit = i
break
print(upper_limit)
print(lower_limit)
x = [lower_limit,upper_limit]
y = [self.s1_normalized[lower_limit],self.s1_normalized[upper_limit]]
self.XY.axes.plot(x,y,c='red')
self.XY.axes.text(100,0.7, "FWHM = " + str(upper_limit - lower_limit) + 's')
self.XY.draw()
def radar_plot(self):
titles = self.sensor_name
self.Radar_Layout = QtWidgets.QVBoxLayout(self.Radar_widget)
self.radar = Radar(titles, rect=None, parent=self.Radar_widget)
self.Radar_Layout.addWidget(self.radar)
self.radar_toolbar = NavigationToolbar(self.radar, self.Radar_widget)
self.Radar_Layout.addWidget(self.radar_toolbar)
for i in range(121):
self.radar.plot([self.s1_normalized[i],self.s2_normalized[i],self.s3_normalized[i],self.s4_normalized[i],self.s5_normalized[i],self.s6_normalized[i],self.s7_normalized[i],self.s8_normalized[i],self.s9_normalized[i],self.s10_normalized[i],self.s11_normalized[i],self.s12_normalized[i],self.s13_normalized[i],self.s14_normalized[i],self.s15_normalized[i],self.s16_normalized[i],self.s17_normalized[i],self.s18_normalized[i]])
self.radar.draw()
self.actionRadar_Plot.setEnabled(False)
def box_plot(self):
labels = self.sensor_name
data = [self.s1_normalized,self.s2_normalized,self.s3_normalized,self.s4_normalized,self.s5_normalized,self.s6_normalized,self.s7_normalized,self.s8_normalized,self.s9_normalized,self.s10_normalized,self.s11_normalized,self.s12_normalized,self.s13_normalized,self.s14_normalized,self.s15_normalized,self.s16_normalized,self.s17_normalized,self.s18_normalized]
self.box.axes.cla()
self.box.axes.boxplot(data,labels=labels)
self.box.axes.set_ylabel("Impedance")
self.box.draw()
| {
"repo_name": "ElectronicNose/Electronic-Nose",
"path": "analysis.py",
"copies": "1",
"size": "27688",
"license": "mit",
"hash": -6801433839231590000,
"line_mean": 42.3717948718,
"line_max": 435,
"alpha_frac": 0.5419676394,
"autogenerated": false,
"ratio": 3.1103122893731747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.912056617846218,
"avg_score": 0.0063427500621988315,
"num_lines": 624
} |
"""Analysis functions for place field data."""
import numpy as np
import math
import pandas as pd
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.pyplot as plt
try:
from bottleneck import nanmean
except ImportError:
from numpy import nanmean
from pycircstat.descriptive import _complex_mean
from cmath import polar
import itertools as it
import warnings as wa
import cPickle as pickle
from copy import deepcopy
from random import sample
from collections import defaultdict
from scipy.stats import spearmanr, pearsonr, linregress
import lab
from ..classes.place_cell_classes import pcExperimentGroup
import behavior_analysis as ba
import imaging_analysis as ia
from calc_activity import calc_activity
import filters
from .. import plotting
from ..misc import stats, memoize
from .. import misc
from ..classes import exceptions as exc
from ..misc.analysis_helpers import rewards_by_condition
@memoize
def sensitivity(
exptGrp, roi_filter=None, includeFrames='running_only'):
"""
Fraction of complete forward passes through the place field that trigger
a significant calcium transient
returns a place_cell_df
"""
pfs_n = exptGrp.pfs_n(roi_filter=roi_filter)
data_list = []
n_wide_pfs = 0
for expt in exptGrp:
pfs = pfs_n[expt]
if includeFrames == 'running_only':
imaging_label = exptGrp.args['imaging_label']
if imaging_label is None:
imaging_label = expt.most_recent_key(
channel=exptGrp.args['channel'])
for trial_idx, trial in enumerate(expt.findall('trial')):
position = trial.behaviorData(imageSync=True)['treadmillPosition']
transients = trial.transientsData(
roi_filter=roi_filter, channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'],
demixed=exptGrp.args['demixed'])
if includeFrames == 'running_only':
with open(expt.placeFieldsFilePath(), 'rb') as f:
p = pickle.load(f)
running_kwargs = p[imaging_label][
'demixed' if exptGrp.args['demixed'] else 'undemixed'][
'running_kwargs']
running_frames = ba.runningIntervals(
trial, returnBoolList=True, **running_kwargs)
else:
raise ValueError
rois = expt.rois(
roi_filter=roi_filter, channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'])
assert len(rois) == len(pfs)
assert len(rois) == len(transients)
for roi_transients, roi_pfs, roi in it.izip(transients, pfs, rois):
onsets = roi_transients['start_indices'].tolist()
onsets = [onset for onset in onsets if running_frames[onset]]
# At the moment, pfs wider than 0.5 will not be accurately
# counted. This could potentially be fixed if needed.
for pf in reversed(roi_pfs):
pf_len = pf[1] - pf[0]
if pf_len < 0:
pf_len += 1.
if pf_len > 0.5:
n_wide_pfs += 1
roi_pfs.remove(pf)
if not len(roi_pfs):
# Sensitivity will be nan if roi has no place fields,
# or if all place fields were wider than 0.5
data_dict = {
'trial': trial, 'roi': roi, 'value': np.nan}
data_list.append(data_dict)
continue
passes = 0
hits = 0.
for pf in roi_pfs:
current_frame = 0
while current_frame < expt.num_frames():
if pf[0] < pf[1]:
entries = np.argwhere(
(position[current_frame:] >= pf[0]) *
(position[current_frame:] < pf[1]))
else:
entries = np.argwhere(
(position[current_frame:] >= pf[0]) +
(position[current_frame:] < pf[1]))
if not len(entries):
break
next_entry = current_frame + entries[0, 0]
if pf[0] < pf[1]:
exits = np.argwhere(
(position[next_entry + 1:] >= pf[1]) +
(position[next_entry + 1:] < pf[0]))
else:
exits = np.argwhere(
(position[next_entry + 1:] >= pf[1]) *
(position[next_entry + 1:] < pf[0]))
if not len(exits):
# NOTE: a trial ending within the placefield does
# not count as a pass
break
next_exit = next_entry + 1 + exits[0, 0]
# if not good entry, continue
# if not (position[next_entry - 1] - pf[0] < 0 or
# position[next_entry - 1] - pf[0] > 0.5):
# current_frame = next_exit + 1
# continue
previous_position = position[next_entry - 1]
if (0 <= previous_position - pf[0] < 0.5) or \
previous_position - pf[0] < -0.5:
current_frame = next_exit
continue
# if not good exit, continue
# if not (position[next_exit + 1] - pf[1] > 0 or
# position[next_exit + 1] - pf[1] < -0.5):
# current_frame = next_exit + 1
# continue
next_position = position[next_exit]
if (-0.5 < next_position - pf[1] < 0.) or \
(next_position - pf[1] > 0.5):
current_frame = next_exit
continue
passes += 1
for onset in onsets:
if next_entry <= onset < next_exit:
hits += 1
break
current_frame = next_exit
assert passes
data_dict = {'trial': trial,
'roi': roi,
'value': hits / passes}
data_list.append(data_dict)
if n_wide_pfs:
with wa.catch_warnings():
wa.simplefilter('always')
wa.warn(
'Sensitivity not calculated for pf width >0.5: ' +
'{} pfs skipped'.format(n_wide_pfs))
return pd.DataFrame(data_list, columns=['trial', 'roi', 'value'])
@memoize
def specificity(
exptGrp, roi_filter=None, includeFrames='running_only'):
"""
Fraction of transient onsets that occur in a place field
"""
pfs_n = exptGrp.pfs_n(roi_filter=roi_filter)
data_list = []
for expt in exptGrp:
pfs = pfs_n[expt]
for trial_idx, trial in enumerate(expt.findall('trial')):
position = trial.behaviorData(imageSync=True)['treadmillPosition']
transients = trial.transientsData(
roi_filter=roi_filter, channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'],
demixed=exptGrp.args['demixed'])
if includeFrames == 'running_only':
with open(expt.placeFieldsFilePath(), 'rb') as f:
p = pickle.load(f)
running_kwargs = p[exptGrp.args['imaging_label']][
'demixed' if exptGrp.args['demixed'] else 'undemixed'][
'running_kwargs']
running_frames = ba.runningIntervals(
trial, returnBoolList=True, **running_kwargs)
rois = expt.rois(
roi_filter=roi_filter, channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'])
assert len(rois) == len(pfs)
assert len(rois) == len(transients)
for roi_transients, roi_pfs, roi in it.izip(transients, pfs, rois):
if not len(roi_pfs):
continue
onsets = roi_transients['start_indices'].tolist()
onsets = [o for o in onsets if running_frames[o]]
nTransients = 0
hits = 0.
for onset in onsets:
nTransients += 1
onset_position = position[onset]
for pf in roi_pfs:
if pf[0] < pf[1]:
if pf[0] < onset_position < pf[1]:
hits += 1
break
else:
if onset_position > pf[0] or \
onset_position < pf[1]:
hits += 1
break
if not nTransients:
value = np.nan
else:
value = hits / nTransients
data_dict = {'trial': trial,
'roi': roi,
'value': value}
data_list.append(data_dict)
return pd.DataFrame(data_list, columns=['trial', 'roi', 'value'])
def plotPositionHeatmap(
exptGrp, roi_filter=None, ax=None, title='', plotting_order='all',
cbar_visible=True, cax=None, norm=None, rasterized=False, cmap=None,
show_belt=True, reward_in_middle=False):
"""
Plot a heatmap of ROI activity at each place bin
Keyword arguments:
exptGrp -- pcExperimentGroup containing data to plot
ax -- axis to plot on
title -- label for the axis
cbar_visible -- if False does not show a colorbar
norm -- one of None, 'individual', an np.array of imagingData to
determine normalization method
'individual' scales each ROI individually to the same range
np.array of imagingData normalizes to the raw imaging data by
subtracting off the mean and dividing by the std on a per ROI basis,
similar to a z-score
reward_in_middle : bool
If True, move reward to middle of plot
"""
if ax is None:
fig = plt.figure(figsize=(15, 8))
ax = fig.add_subplot(111, rasterized=rasterized)
else:
fig = ax.figure
divider = make_axes_locatable(ax)
if cbar_visible and cax is None:
cax = divider.append_axes("right", size="5%", pad=0.05)
if show_belt:
if reward_in_middle:
raise NotImplementedError
belt_ax = divider.append_axes("bottom", size="5%", pad=0.05)
exptGrp[0].belt().show(belt_ax, zeroOnLeft=True)
def roller(data, expt):
if not reward_in_middle:
return data
n_bins = exptGrp.args['nPositionBins']
mid_bin = int(np.around(n_bins / 2.))
reward_bin = int(np.around(
expt.rewardPositions(units='normalized')[0] * n_bins))
roll_factor = mid_bin - reward_bin
return np.roll(data, roll_factor, axis=1)
if plotting_order == 'all':
roi_data_to_plot = []
for expt in exptGrp:
roi_data_to_plot.extend(
roller(exptGrp.data(roi_filter=roi_filter)[expt], expt))
data_to_plot = np.array(roi_data_to_plot)
elif plotting_order == 'place_cells_first':
place_cells = []
for expt in exptGrp:
place_cells.extend(roller(exptGrp.data(
roi_filter=exptGrp.pcs_filter(roi_filter=roi_filter))[expt],
expt))
if len(place_cells) != 0:
place_cells_array = np.array(place_cells)
place_cells_array = place_cells_array[
np.argsort(np.argmax(place_cells_array, axis=1))]
empty_array = np.empty((1, place_cells_array.shape[1]))
empty_array.fill(np.nan)
place_cells_array = np.vstack((place_cells_array, empty_array))
non_place_cells = []
for expt in exptGrp:
expt_filter = misc.filter_intersection(
[misc.invert_filter(exptGrp.pcs_filter()), roi_filter])
non_place_cells.extend(roller(
exptGrp.data(roi_filter=expt_filter)[expt], expt))
non_place_cells_array = np.array(non_place_cells)
# non_place_cells_array = non_place_cells_array[
# np.argsort(np.argmax(non_place_cells_array, axis=1))]
if len(place_cells) != 0:
data_to_plot = np.vstack((place_cells_array, non_place_cells_array))
n_pcs = place_cells_array.shape[0] - 1
else:
data_to_plot = non_place_cells_array
n_pcs = 0
elif plotting_order == 'place_cells_only':
roi_data_to_plot = []
for expt in exptGrp:
roi_data_to_plot.extend(roller(exptGrp.data(
roi_filter=exptGrp.pcs_filter(roi_filter=roi_filter))[expt],
expt))
roi_data_to_plot = np.array(roi_data_to_plot)
data_to_plot = roi_data_to_plot[
np.argsort(np.argmax(roi_data_to_plot, axis=1))]
n_pcs = data_to_plot.shape[0]
elif plotting_order is not None:
allROIs = exptGrp.allROIs(channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'],
roi_filter=None)
roi_data_to_plot = []
for roi_tuple in plotting_order:
(expt, roi_idx) = allROIs[roi_tuple][0]
roi_data_to_plot.append(roller(
exptGrp.data()[expt], expt)[roi_idx])
data_to_plot = np.array(roi_data_to_plot)
if norm is 'individual':
# Find the all zero rows, and just put back zeros there
all_zero_rows = np.where(np.all(data_to_plot == 0, axis=1))[0]
data_to_plot -= np.amin(data_to_plot, axis=1)[:, np.newaxis]
data_to_plot /= np.amax(data_to_plot, axis=1)[:, np.newaxis]
data_to_plot[all_zero_rows] = 0
elif norm is 'pc_individual_non_pc_grouped':
data_to_plot[:n_pcs] -= np.amin(
data_to_plot[:n_pcs], axis=1)[:, np.newaxis]
data_to_plot[:n_pcs] /= np.amax(
data_to_plot[:n_pcs], axis=1)[:, np.newaxis]
data_to_plot[n_pcs + 1:] -= np.amin(data_to_plot[n_pcs + 1:])
data_to_plot[n_pcs + 1:] /= np.amax(data_to_plot[n_pcs + 1:])
elif norm is 'all':
data_to_plot -= np.amin(data_to_plot[np.isfinite(data_to_plot)])
data_to_plot /= np.amax(data_to_plot[np.isfinite(data_to_plot)])
elif norm is not None:
try:
try_data = data_to_plot - np.nanmean(norm, axis=1)
try_data /= np.nanstd(norm, axis=1)
except:
print "Unable to normalize data"
else:
data_to_plot = try_data
# Set the color scale based on a percentile of the data
vmin = np.percentile(data_to_plot[np.isfinite(data_to_plot)], 40)
vmax = np.percentile(data_to_plot[np.isfinite(data_to_plot)], 99)
im = ax.imshow(
data_to_plot, vmin=vmin, vmax=vmax, interpolation='none',
aspect='auto', cmap=cmap)
if cbar_visible:
plt.colorbar(im, cax=cax, ticks=[vmin, vmax], format='%.2f')
ax.set_xlabel('Normalized position')
ax.set_xticks(np.linspace(ax.get_xlim()[0], ax.get_xlim()[1], 5))
ax.set_xticklabels(np.linspace(0, 1, 5))
ax.set_ylabel('Cell')
ax.set_title(title)
# If the cells are sorted, the y labels will be wrong
plt.setp(ax.get_yticklabels(), visible=False)
return fig
def plotTuningCurve(
data, roi, ax=None, polar=False, placeField=None,
placeFieldColor='red', xlabel_visible=True, ylabel_visible=True,
error_bars=None, axis_title=None, two_cycles=False, rasterized=False):
# TODO: WHATEVER CALLS THIS SHOULD JUST PASS IN 1D ARRAYS FOR DATA AND
# ERROR BARS! NO NEED TO PASS IN WHOLE 2D ARRAYS?
"""Plot an ROI tuning curve on a normal or polar axis
Keyword arguments:
roi -- number of ROI to plot
ax -- axis to plot on
polar -- if True, will plot on polar plot
placeField -- a single element from the normalized identifyPlaceField list,
corresponding to the roi
placeFieldColor -- color to shade the placefield
error_bars -- if not None, plots error bounds on tuning curve, should be
same shape as data
axis_title -- title used to label the axis
two_cycles -- if True, plots two identical cycles, ignored for polar plots
"""
if ax is None and not polar:
fig = plt.figure(figsize=(15, 8))
ax = fig.add_subplot(111)
elif ax is None and polar:
fig = plt.figure(figsize=(15, 8))
ax = fig.add_subplot(111, polar=True)
if error_bars is not None:
assert data.shape == error_bars.shape, \
'Data and error bars shape mismatch'
if not polar:
if not two_cycles:
x_range = np.linspace(0, 1, data.shape[1])
ax.plot(x_range, data[roi], rasterized=rasterized)
else:
x_range = np.linspace(0, 2, data.shape[1] * 2)
double_data = np.ma.hstack([data[roi]] * 2)
ax.plot(x_range, double_data, rasterized=rasterized)
if error_bars is not None:
if not two_cycles:
ax.fill_between(
x_range, data[roi], data[roi] + error_bars[roi],
facecolor='gray', edgecolor='none', alpha=0.2,
rasterized=rasterized)
else:
double_err = np.ma.hstack([error_bars[roi]] * 2)
ax.fill_between(
x_range, double_data, double_data + double_err,
facecolor='gray', edgecolor='none', alpha=0.2,
rasterized=rasterized)
if placeField is not None:
for field in placeField:
if field[0] <= field[1]:
ax.fill_between(
x_range, data[roi] if not two_cycles else double_data,
where=np.logical_and(
x_range % 1 >= field[0], x_range % 1 <= field[1]),
facecolor=placeFieldColor, alpha=0.5,
rasterized=rasterized)
else:
ax.fill_between(
x_range, data[roi] if not two_cycles else double_data,
where=np.logical_or(
x_range % 1 >= field[0], x_range % 1 <= field[1]),
facecolor=placeFieldColor, alpha=0.5,
rasterized=rasterized)
ax.set_xlim((x_range[0], x_range[-1]))
ax.set_yticks([0, np.round(ax.get_ylim()[1], 2)])
else:
theta_range = np.linspace(0, 2 * np.pi, data.shape[1])
ax.plot(theta_range, data[roi], rasterized=rasterized)
if error_bars is not None:
ax.fill_between(
theta_range, data[roi], data[roi] + error_bars[roi],
facecolor='gray', edgecolor='gray', alpha=0.2,
rasterized=rasterized)
ax.set_xticks([0, np.pi / 2., np.pi, 3 * np.pi / 2.])
x_ticks = ax.get_xticks()
x_tick_labels = [str(x / 2 / np.pi) for x in x_ticks]
ax.set_xticklabels(x_tick_labels)
if placeField:
for field in placeField:
# Convert to polar coordinates
field = [x * 2 * np.pi for x in field]
if field[0] <= field[1]:
ax.fill_between(
theta_range, data[roi], where=np.logical_and(
theta_range >= field[0], theta_range <= field[1]),
facecolor=placeFieldColor, alpha=0.5,
rasterized=rasterized)
else:
ax.fill_between(
theta_range, data[roi], where=np.logical_or(
theta_range >= field[0], theta_range <= field[1]),
facecolor=placeFieldColor, alpha=0.5,
rasterized=rasterized)
plt.setp(ax.get_yticklabels(), visible=False)
ax.text(.86, .86, round(ax.get_ylim()[1], 3), transform=ax.transAxes,
va='bottom', size=10)
if not xlabel_visible:
plt.setp(ax.get_xticklabels(), visible=False)
else:
ax.set_xlabel('Normalized position')
if ylabel_visible:
ax.set_ylabel(r'Average $\Delta$F/F')
ax.set_title(axis_title)
def plotImagingData(
roi_tSeries, ax=None, roi_transients=None, position=None,
placeField=None, imaging_interval=1, xlabel_visible=True,
ylabel_visible=True, right_label=False, placeFieldColor='red',
transients_color='r', title='', rasterized=False, **plot_kwargs):
if ax is None:
fig = plt.figure(figsize=(15, 8))
ax = fig.add_subplot(111, rasterized=rasterized)
x_range = np.arange(len(roi_tSeries)) * imaging_interval
ax.plot(x_range, roi_tSeries, rasterized=rasterized, **plot_kwargs)
if roi_transients is not None:
for start, stop in zip(roi_transients['start_indices'],
roi_transients['end_indices']):
if not np.isnan(start) and not np.isnan(stop):
ax.plot(x_range[start:stop + 1], roi_tSeries[start:stop + 1],
color=transients_color, rasterized=rasterized)
if placeField:
# if behav data recording is shorter than imaging recording:
x_range = x_range[:position.shape[0]]
position = position[:x_range.shape[0]]
yl = ax.get_ylim()
for interval in placeField:
if interval[0] <= interval[1]:
ax.fill_between(
x_range, yl[0], yl[1],
where=np.logical_and(position >= interval[0],
position <= interval[1]),
facecolor=placeFieldColor,
alpha=0.5,
rasterized=rasterized)
else:
ax.fill_between(
x_range, yl[0], yl[1],
where=np.logical_or(position >= interval[0],
position <= interval[1]),
facecolor=placeFieldColor,
alpha=0.5,
rasterized=rasterized)
ax.set_ylim(yl)
if not xlabel_visible:
plt.setp(ax.get_xticklabels(), visible=False)
else:
ax.set_xlabel('Time(s)')
yl = ax.get_ylim()
ax.set_yticks((0, yl[1]))
ax.set_yticklabels(('', str(yl[1])))
ax.tick_params(axis='y', direction='out')
ax.set_ylim(yl)
if not ylabel_visible:
pass
# plt.setp(ax.get_yticklabels(), visible=False)
else:
ax.set_ylabel(r'Mean $\Delta$F/F')
if right_label:
plotting.right_label(ax, title)
else:
ax.set_title(title)
def plotTransientVectors(exptGrp, roi_idx, ax, color='k', mean_color='r',
mean_zorder=1):
"""Plot running-related transients on a polar axis weighted by occupancy
Accepts a single experiment pcExperimentGroup"""
with open(exptGrp[0].placeFieldsFilePath(), 'rb') as f:
pfs = pickle.load(f)
demixed_key = 'demixed' if exptGrp.args['demixed'] else 'undemixed'
imaging_label = exptGrp.args['imaging_label'] \
if exptGrp.args['imaging_label'] is not None \
else exptGrp[0].most_recent_key(channel=exptGrp.args['channel'])
true_values = pfs[imaging_label][demixed_key]['true_values'][roi_idx]
# true_counts = pfs[imaging_label][demixed_key]['true_counts'][roi_idx]
true_counts = exptGrp[0].positionOccupancy()
bins = 2 * np.pi * np.arange(0, 1, .01)
magnitudes = []
angles = []
for pos_bin in xrange(len(true_values)):
for v in xrange(int(true_values[pos_bin])):
angles.append(bins[pos_bin])
magnitudes.append(1. / true_counts[pos_bin])
magnitudes /= np.amax(magnitudes)
for a, m in zip(angles, magnitudes):
ax.arrow(a, 0, 0, m, length_includes_head=True, color=color,
head_width=np.amin([0.1, m]), head_length=np.amin([0.1, m]),
zorder=2)
p = polar(_complex_mean(bins, true_values / true_counts))
mean_r = p[0]
mean_angle = p[1]
ax.arrow(mean_angle, 0, 0, mean_r, length_includes_head=True,
color=mean_color, head_width=0, head_length=0, lw=1,
zorder=mean_zorder)
ax.set_xticklabels([])
ax.set_yticklabels([])
def plotPosition(
trial, ax=None, placeFields=None, placeFieldColors=None, polar=False,
trans_roi_filter=None, trans_marker_size=10, running_trans_only=False,
rasterized=False, channel='Ch2', label=None, demixed=False,
behaviorData=None, trans_kwargs=None, position_kwargs=None,
nan_lap_transitions=True):
"""Plot position over time
Keyword arguments:
trial -- an Experiment/Trial object to extract data from, if an Experiment
is passed in, analyze the first Trial
ax -- axes to plot on
placeFields -- output from identifyPlaceFields, shades placefields, not
sorted, so sort beforehand if desired
placeFieldColors -- colors to use for each shaded placefield, should have
length equal to number of place cells
polar -- plot on polar axis
trans_roi_filter -- either None or an roi_filter to mark transient peak
times on the plot. Filter must return 1 ROI!
channel, label, demixed -- used to determine which transients to plot
behaviorData -- optionally pass in imageSync'd behaviorData so it doesn't
need to be reloaded
nan_lap_transitions -- If True, NaN the last value before each reset.
Should allow for plotting as a line instead of a scatter. Only applies
to non-polar plots.
"""
if ax is None and not polar:
fig = plt.figure(figsize=(15, 8))
ax = fig.add_subplot(111, rasterized=rasterized)
elif ax is None and polar:
fig = plt.figure(figsize=(15, 8))
ax = fig.add_subplot(111, polar=True, rasterized=rasterized)
if placeFields is not None:
roi_list = [x for x in range(len(placeFields)) if
placeFields[x] != []]
# roi_list = identifyPlaceCells(placeFields, sort=False)
if placeFieldColors is None:
placeFieldColors = [plt.cm.Set1(i) for i in
np.linspace(0, .9, len(roi_list))]
if behaviorData is None:
bd = trial.behaviorData(imageSync=True)
else:
bd = deepcopy(behaviorData)
if 'treadmillPosition' not in bd:
raise exc.MissingBehaviorData(' ''treadmillPosition'' not defined')
position = bd['treadmillPosition']
imagingInterval = trial.parent.frame_period()
if trans_roi_filter is not None:
try:
trans_indices = trial.parent.transientsData(
behaviorSync=True, roi_filter=trans_roi_filter,
label=label)[
trial.trialNum()][0]['start_indices']
except TypeError:
trans_roi_filter = None
else:
if running_trans_only:
try:
with open(trial.parent.placeFieldsFilePath(
channel=channel), 'r') as f:
place_fields = pickle.load(f)
except (IOError, exc.NoSimaPath, pickle.UnpicklingError):
wa.warn('No place fields found')
running_intervals = ba.runningIntervals(
trial, returnBoolList=False)
else:
if label is None:
label = trial.parent.most_recent_key(channel=channel)
demix_label = 'demixed' if demixed else 'undemixed'
running_kwargs = place_fields[label][demix_label][
'running_kwargs']
running_intervals = ba.runningIntervals(
trial, returnBoolList=False, **running_kwargs)
running_frames = np.hstack([np.arange(start, end + 1) for
start, end in running_intervals])
trans_indices = list(set(trans_indices).intersection(
running_frames))
if len(trans_indices) == 0:
trans_roi_filter = None
if not polar:
if position_kwargs is None:
if nan_lap_transitions:
position_kwargs = {}
position_kwargs['linestyle'] = '-'
else:
position_kwargs = {}
position_kwargs['linestyle'] = 'None'
position_kwargs['marker'] = '.'
x_range = np.arange(len(position)) * imagingInterval
pos_copy = position.copy()
if nan_lap_transitions:
jumps = np.hstack([np.abs(np.diff(pos_copy)) > 0.4, False])
pos_copy[jumps] = np.nan
ax.plot(x_range, pos_copy, rasterized=rasterized, **position_kwargs)
if placeFields is not None:
for idx, roi in enumerate(roi_list):
for interval in placeFields[roi]:
if interval[0] <= interval[1]:
ax.fill_between(
x_range, (idx / float(len(roi_list))),
((idx + 1) / float(len(roi_list))),
where=np.logical_and(position >= interval[0],
position <= interval[1]),
facecolor=placeFieldColors[idx], alpha=0.5,
rasterized=rasterized)
else:
ax.fill_between(
x_range, (idx / float(len(roi_list))),
((idx + 1) / float(len(roi_list))),
where=np.logical_or(position >= interval[0],
position <= interval[1]),
facecolor=placeFieldColors[idx], alpha=0.5,
rasterized=rasterized)
if trans_roi_filter is not None:
ax.plot(x_range[trans_indices], position[trans_indices], 'r*',
markersize=trans_marker_size)
y_ticks = ax.get_yticks()
ax.set_yticks((y_ticks[0], y_ticks[-1]))
ax.set_xlabel('Time(s)')
ax.set_ylabel('Normalized position')
ax.set_xlim((0, x_range[-1]))
else:
if position_kwargs is None:
position_kwargs = {}
position *= 2 * np.pi
r_range = np.linspace(0.2, 1, len(position))
ax.plot(position, r_range, rasterized=rasterized, **position_kwargs)
pos = np.linspace(0, 2 * np.pi, 100)
if placeFields is not None:
for idx, roi in enumerate(roi_list):
for interval in placeFields[roi]:
interval = [x * 2 * np.pi for x in interval]
if interval[0] <= interval[1]:
ax.fill_between(
pos, r_range[-1],
where=np.logical_and(pos >= interval[0],
pos <= interval[1]),
facecolor=placeFieldColors[idx], alpha=0.5,
rasterized=rasterized)
else:
ax.fill_between(
pos, r_range[-1],
where=np.logical_or(pos >= interval[0],
pos <= interval[1]),
facecolor=placeFieldColors[idx], alpha=0.5,
rasterized=rasterized)
if trans_roi_filter is not None:
if trans_kwargs is None:
trans_kwargs = {}
trans_kwargs['color'] = 'r'
trans_kwargs['marker'] = '*'
trans_kwargs['linestyle'] = 'None'
trans_kwargs['markersize'] = 10
ax.plot(position[trans_indices], r_range[trans_indices],
**trans_kwargs)
x_ticks = ax.get_xticks()
x_tick_labels = [str(x / 2 / np.pi) for x in x_ticks]
ax.set_xticklabels(x_tick_labels)
y_ticks = ax.get_yticks()
ax.set_yticks((y_ticks[0], y_ticks[-1]))
ax.set_ylabel('Time(s)')
ax.set_xlabel('Normalized position')
ax.set_rmax(r_range[-1])
@memoize
def place_field_width(exptGrp, roi_filter=None, belt_length=200):
"""Calculate all place field widths.
Keyword arguments:
exptGrp -- pcExperimentGroup to analyze
belt_length -- length of the belt in cm
Output: Pandas DataFrame consisting of one value per observation of a
place field
"""
pfs_n = exptGrp.pfs_n(roi_filter=roi_filter)
data_list = []
for expt in exptGrp:
try:
belt_length = expt.belt().length()
except exc.NoBeltInfo:
print 'No belt information found for experiment {}.'.format(
str(expt))
print 'Using default belt length = {}'.format(str(belt_length))
rois = expt.rois(
roi_filter=roi_filter, channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'])
assert len(rois) == len(pfs_n[expt])
for roi, pfs in zip(rois, pfs_n[expt]):
if not len(pfs):
continue
for pf_idx, pf in enumerate(pfs):
if pf[0] <= pf[1]:
value = (pf[1] - pf[0]) * belt_length
else:
value = (1 + pf[1] - pf[0]) * belt_length
data_dict = {'expt': expt,
'roi': roi,
'value': value}
data_list.append(data_dict)
return pd.DataFrame(data_list, columns=['expt', 'roi', 'value'])
@memoize
def population_activity(
exptGrp, stat, roi_filter=None, interval='all', dF='from_file',
running_only=False, non_running_only=False, running_kwargs=None):
"""Calculate and plot the histogram of various activity properties
Keyword arguments:
exptGrp -- pcExperimentGroup to analyze
stat -- activity statistic to calculate, see calc_activity for details
interval -- 3 options: 'all' = all time, 'pf' = inside place field,
'non pf' = outside place field
running_only -- If True, only include running intervals
dF -- dF method to use on imaging data
average_trials -- if True, averages across trials
Output:
activity -- dict of lists of lists
access as activity[expt][roiNum][trialNum]
"""
activity_dfs = []
if 'pf' in interval:
pfs_n = exptGrp.pfs_n(roi_filter=roi_filter)
for expt in exptGrp:
(nROIs, nFrames, nTrials) = expt.imaging_shape(
roi_filter=roi_filter, channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'])
if interval == 'all':
calc_intervals = None
elif interval == 'pf' or interval == 'non pf':
placeFields = pfs_n[expt]
calc_intervals = np.zeros((nROIs, nFrames, nTrials), 'bool')
for trial_idx, trial in enumerate(expt.findall('trial')):
position = trial.behaviorData(
imageSync=True)['treadmillPosition']
for roi_idx, roi in enumerate(placeFields):
for pf in roi:
if pf[0] <= pf[1]:
calc_intervals[roi_idx, np.logical_and(
position >= pf[0], position <= pf[1]),
trial_idx] = True
else:
calc_intervals[roi_idx, np.logical_or(
position >= pf[0], position <= pf[1]),
trial_idx] = True
if interval == 'non pf':
calc_intervals = ~calc_intervals
else:
calc_intervals = interval
activity_dfs.append(ia.population_activity(
exptGrp.subGroup([expt]), stat=stat,
channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'], roi_filter=roi_filter,
interval=calc_intervals, dF=dF, running_only=running_only,
non_running_only=non_running_only))
return pd.concat(activity_dfs)
def place_field_bins(expt_grp, roi_filter=None, n_bins=None):
if n_bins is None:
n_bins = expt_grp.args['nPositionBins']
pfs = expt_grp.pfs_n(roi_filter=roi_filter)
rois = expt_grp.rois(roi_filter=roi_filter, channel=expt_grp.args['channel'], label=expt_grp.args['imaging_label'])
result = []
for expt in expt_grp:
assert len(rois[expt]) == len(pfs[expt])
for roi, roi_pfs in zip(rois[expt], pfs[expt]):
for start, stop in roi_pfs:
if start < stop:
for bin in range(int(start * n_bins), int(stop * n_bins) + 1):
result.append({'expt': expt, 'roi': roi, 'bin': bin, 'value': 1})
else:
for bin in range(0, int(stop * n_bins) + 1):
result.append({'expt': expt, 'roi': roi, 'bin': bin, 'value': 1})
for bin in range(int(start * n_bins), n_bins):
result.append({'expt': expt, 'roi': roi, 'bin': bin, 'value': 1})
return pd.DataFrame(result, columns=['expt', 'roi', 'bin', 'value'])
def place_field_distribution(
exptGrp, roi_filter=None, ax=None, normed=False, showBelt=False,
nBins=None, label=None, color=None):
"""Plot density of place fields on the belt."""
if showBelt:
belt = exptGrp[0].belt()
belt.addToAxis(ax)
if nBins is None:
nBins = exptGrp.args['nPositionBins']
nBins = int(nBins)
result = np.zeros(nBins, 'int')
for expt in exptGrp:
for roi in exptGrp.pfs_n(roi_filter=roi_filter)[expt]:
for field in roi:
if field[0] <= field[1]:
result[int(field[0] * nBins):int(field[1] * nBins + 1)] \
+= 1
else:
result[int(field[0] * nBins):] += 1
result[:int(field[1] * nBins + 1)] += 1
if normed:
result = result.astype('float') / float(np.sum(result))
if ax:
if color is None:
color = lab.plotting.color_cycle().next()
ax.plot(np.linspace(0, 1, nBins), result, label=label, color=color)
ax.set_xlabel('Position')
ax.set_title('Place field distribution')
if normed:
ax.set_ylabel('Normalized place field density')
else:
ax.set_ylabel('Number of place fields')
ax.legend(frameon=False, loc='best')
return result
@memoize
def recurrence_probability(exptGrp, roi_filter=None, circ_var_pcs=False):
"""Generate a plot of the probability of place field recurrence on
subsequent days
Keyword arguments:
exptGrp -- ExperimentGroup of experiments to analyze
shuffle -- calculate recurrence probability of shuffled cells
circ_var_pcs -- If True, uses circular variance method for identifying
place cells.
Note: roi_filter filters the rois on the first day, but not the second
"""
data_list = []
shuffle_list = []
for e1, e2 in exptGrp.genImagedExptPairs():
td = e2 - e1
shared_rois = exptGrp.subGroup([e1, e2]).sharedROIs(
channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'])
shared_rois = set(shared_rois)
shared_filter = lambda x: x.id in shared_rois
if len(shared_rois) == 0:
continue
shared_pcs = exptGrp.pcs_filter(
roi_filter=shared_filter, circ_var=circ_var_pcs)
shared_pcs1 = set(e1.roi_ids(
channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'],
roi_filter=misc.filter_intersection([shared_pcs, roi_filter])))
shared_pcs2 = set(e2.roi_ids(
channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'], roi_filter=shared_pcs))
# The shuffle probability that a pc on day 1 is also a pc on day 2 is
# just equal to the probability that a cell is a pc on day 2
if len(shared_pcs1) < 1:
continue
num_recur = len(shared_pcs1.intersection(shared_pcs2))
num_pcs_first_expt = len(shared_pcs1)
num_pcs_second_expt = len(shared_pcs2)
# num_recur_shuffle = len(shared_pcs2)
# num_start_shuffle = len(shared_rois)
num_shared_rois = len(shared_rois)
data_dict = {'num_recur': num_recur,
'num_pcs_first_expt': num_pcs_first_expt,
'num_pcs_second_expt': num_pcs_second_expt,
'value': float(num_recur) / num_pcs_first_expt,
'time_diff': td,
'first_expt': e1,
'second_expt': e2}
data_list.append(data_dict)
shuffle_dict = {'num_recur': num_pcs_second_expt,
'num_shared_rois': num_shared_rois,
'value': float(num_pcs_second_expt) / num_shared_rois,
'time_diff': td,
'first_expt': e1,
'second_expt': e2}
shuffle_list.append(shuffle_dict)
return (
pd.DataFrame(data_list, columns=[
'num_recur', 'num_pcs_first_expt', 'num_pcs_second_expt', 'value',
'time_diff', 'first_expt', 'second_expt']),
pd.DataFrame(shuffle_list, columns=[
'num_recur', 'num_shared_rois', 'value', 'time_diff', 'first_expt',
'second_expt']))
@memoize
def recurrence_above_chance(expt_grp, roi_filter=None, circ_var_pcs=False):
raise Exception('Code incomplete')
recurrence_df, _ = recurrence_probability(
expt_grp, roi_filter=roi_filter, circ_var_pcs=circ_var_pcs)
@memoize
def circular_variance(exptGrp, roi_filter=None, min_transients=0):
data_list = []
circular_variance = exptGrp.circular_variance(roi_filter=roi_filter)
if min_transients:
n_trans = {}
for expt in exptGrp:
n_trans[expt] = calc_activity(
expt, 'n transients', roi_filter=roi_filter,
interval='running',
running_kwargs=exptGrp.running_kwargs())[:, 0]
for expt in exptGrp:
rois = expt.rois(
roi_filter=roi_filter, channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'])
assert len(rois) == len(circular_variance[expt])
if min_transients:
for roi, n, var in it.izip(
rois, n_trans[expt], circular_variance[expt]):
if n > min_transients:
data_list.append({'expt': expt, 'roi': roi, 'value': var})
else:
for roi, var in it.izip(rois, circular_variance[expt]):
data_list.append({'expt': expt, 'roi': roi, 'value': var})
return pd.DataFrame(data_list, columns=['expt', 'roi', 'value'])
@memoize
def circular_variance_p(exptGrp, roi_filter=None):
data_list = []
circular_variance_p = exptGrp.circular_variance_p(roi_filter=roi_filter)
for expt in exptGrp:
rois = expt.rois(
roi_filter=roi_filter, channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'])
assert len(rois) == len(circular_variance_p[expt])
for roi, p in it.izip(rois, circular_variance_p[expt]):
data_list.append({'expt': expt, 'roi': roi, 'value': p})
return pd.DataFrame(data_list, columns=['expt', 'roi', 'value'])
@memoize
def spatial_information_p(exptGrp, roi_filter=None):
data_list = []
spatial_information_p = exptGrp.spatial_information_p(
roi_filter=roi_filter)
for expt in exptGrp:
rois = expt.rois(
roi_filter=roi_filter, channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'])
assert len(rois) == len(spatial_information_p[expt])
for roi, p in it.izip(rois, spatial_information_p[expt]):
data_list.append({'expt': expt, 'roi': roi, 'value': p})
return pd.DataFrame(data_list, columns=['expt', 'roi', 'value'])
@memoize
def spatial_information(exptGrp, roi_filter=None):
data_list = []
spatial_information = exptGrp.spatial_information(
roi_filter=roi_filter)
for expt in exptGrp:
rois = expt.rois(
roi_filter=roi_filter, channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'])
assert len(rois) == len(spatial_information[expt])
for roi, p in it.izip(rois, spatial_information[expt]):
data_list.append({'expt': expt, 'roi': roi, 'value': p})
return pd.DataFrame(data_list, columns=['expt', 'roi', 'value'])
@memoize
def calcCentroids(data, pfs, returnAll=False, return_pfs=False):
"""
Input:
data (df/f by position)
pfs (not-normalized place field output from identifyPlaceFields)
returnAll
-False --> for cells with >1 place field, take the one with bigger
peak
-True --> return centroids for each place field ordered by peak
df/f in the pf
return_pfs : If True, returns pfs matching centroids
Output:
centroids: nROIs length list, each element is either an empty list or a
list containing the bin (not rounded) of the centroid
"""
# if np.all(np.array(list(flatten(pfs)))<1):
# raise TypeError('Invalid argument, must pass in non-normalized pfs')
centroids = [[] for x in range(len(pfs))]
pfs_out = [[] for x in range(len(pfs))]
# peaks_out = [[] for x in range(len(pfs))]
for pfIdx, roi, pfList in it.izip(it.count(), data, pfs):
if len(pfList) > 0:
peaks = []
roi_centroids = []
for pf in pfList:
if pf[0] < pf[1]:
pf_data = roi[pf[0]:pf[1] + 1]
peaks.append(np.amax(pf_data))
roi_centroids.append(pf[0] + np.sum(
pf_data * np.arange(len(pf_data))) / np.sum(pf_data))
else:
pf_data = np.hstack([roi[pf[0]:], roi[:pf[1] + 1]])
peaks.append(np.amax(pf_data))
roi_centroids.append((pf[0] + np.sum(
pf_data * np.arange(len(pf_data))) / np.sum(pf_data))
% data.shape[1])
# sort the pf peaks in descending order
order = np.argsort(peaks)[::-1]
if returnAll:
centroids[pfIdx] = [roi_centroids[x] for x in order]
pfs_out[pfIdx] = [pfList[x] for x in order]
# peaks_out[pfIdx] = [peaks[x] for x in order]
else:
centroids[pfIdx] = [roi_centroids[order[0]]]
pfs_out[pfIdx] = [pfList[order[0]]]
# peaks_out[pfIdx] = [peaks[order[0]]]
assert not np.any(np.isnan(centroids[pfIdx]))
if return_pfs:
return centroids, pfs_out # , peaks_out
return centroids
@memoize
def calc_activity_centroids(exptGrp, roi_filter=None):
"""
Output:
list = (nROIs,)
"""
bins = 2 * np.pi * np.arange(0, 1, 1. / exptGrp.args['nPositionBins'])
result = {}
for expt in exptGrp:
expt_result = []
for tuning_curve in exptGrp.data_raw(roi_filter=roi_filter)[expt]:
finite_idxs = np.where(np.isfinite(tuning_curve))[0]
p = _complex_mean(bins[finite_idxs], tuning_curve[finite_idxs])
expt_result.append(p)
result[expt] = expt_result
return result
@memoize
def activity_centroid(exptGrp, roi_filter=None):
centroids = calc_activity_centroids(exptGrp, roi_filter=roi_filter)
data_list = []
for expt in exptGrp:
rois = expt.rois(
roi_filter=roi_filter, channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'])
assert len(rois) == len(centroids[expt])
data_list.extend(
[{'roi': roi, 'expt': expt, 'value': centroid}
for roi, centroid in zip(rois, centroids[expt])])
df = pd.DataFrame(data_list, columns=['expt', 'roi', 'value'])
df['angle'] = df['value'].apply(np.angle)
df['length'] = df['value'].apply(np.absolute)
return df
@memoize
def centroid_shift(exptGrp, roi_filter=None, return_abs=False, shuffle=True):
"""Calculate the shift of place field centers over days
Determines the center by calculating the center of 'mass' of the calcium
signal
Keyword arguments:
exptGrp -- pcExperimentGroup of experiments to analyze
return_abs -- If True, return absolute value of centroid shift
"""
N_SHUFFLES = 10000
nBins = exptGrp.args['nPositionBins']
pcs_filter = exptGrp.pcs_filter(roi_filter=roi_filter)
pfs = exptGrp.pfs(roi_filter=roi_filter)
data = exptGrp.data(roi_filter=roi_filter)
rois_by_id = {
expt: {roi.id: roi for roi in expt.rois(
channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'])} for expt in exptGrp}
all_roi_ids = exptGrp.roi_ids(
channel=exptGrp.args['channel'], label=exptGrp.args['imaging_label'],
roi_filter=roi_filter)
data_list = []
shuffle_dicts = []
centroids = {}
for expt in exptGrp:
# Calculate centroids and then discard all placeFields for an ROI
# that has more than 1
centroids[expt] = calcCentroids(data[expt], pfs[expt])
centroids[expt] = [centroids[expt][idx] if len(pfs[expt][idx])
<= 1 else [] for idx in range(len(centroids[expt]))]
for (e1, e2) in exptGrp.genImagedExptPairs():
shared_pcs = exptGrp.subGroup([e1, e2]).sharedROIs(
channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'], roi_filter=pcs_filter)
if len(shared_pcs) == 0:
continue
e1_pairs, e2_pairs = [], []
for pc in shared_pcs:
centroid1 = centroids[e1][all_roi_ids[e1].index(pc)]
centroid2 = centroids[e2][all_roi_ids[e2].index(pc)]
if not len(centroid1) or not len(centroid2):
continue
first_roi = rois_by_id[e1][pc]
second_roi = rois_by_id[e2][pc]
shift = (centroid2[0] - centroid1[0]) / nBins
shift = shift - 1 if shift > 0.5 else \
shift + 1 if shift <= -0.5 else shift
data_dict = {'value': shift,
'first_expt': e1,
'second_expt': e2,
'first_roi': first_roi,
'second_roi': second_roi}
data_list.append(data_dict)
if shuffle:
e1_pairs.append((first_roi, centroid1[0]))
e2_pairs.append((second_roi, centroid2[0]))
if shuffle:
shuffle_dicts.extend([
{'expts': (e1, e2), 'rois': (r1, r2), 'data': (d1, d2)}
for (r1, d1), (r2, d2) in it.product(e1_pairs, e2_pairs)])
data_df = pd.DataFrame(data_list, columns=[
'first_expt', 'second_expt', 'first_roi', 'second_roi', 'value'])
if shuffle:
if len(shuffle_dicts) < N_SHUFFLES:
shuffler = shuffle_dicts
else:
shuffler = sample(shuffle_dicts, N_SHUFFLES)
shuffle_list = []
for pair in shuffler:
shift = (pair['data'][1] - pair['data'][0]) / nBins
shift = shift - 1 if shift > 0.5 else \
shift + 1 if shift <= -0.5 else shift
shuffle_dict = {'value': shift,
'first_expt': pair['expts'][0],
'second_expt': pair['expts'][1],
'first_roi': pair['rois'][0],
'second_roi': pair['rois'][1]}
shuffle_list.append(shuffle_dict)
shuffle_df = pd.DataFrame(shuffle_list, columns=[
'first_expt', 'second_expt', 'first_roi', 'second_roi', 'value'])
else:
shuffle_df = None
if return_abs:
data_df['value'] = data_df['value'].abs()
if shuffle:
shuffle_df['value'] = shuffle_df['value'].abs()
return data_df, shuffle_df
@memoize
def activity_centroid_shift(
exptGrp, roi_filter=None, activity_filter='pc_either',
circ_var_pcs=True, shuffle=True, units='rad'):
"""Calculate the angle between activity centroids.
Parameters
----------
activity_filter : {'pc_either', 'pc_both', 'active_either', 'active_both'}
Determines which cells to include on a per-expt-pair basis.
circ_var_pcs : bool
If True, use circular variance place cell threshold instead of spatial
information.
shuffle : bool
If True, calculate shuffle distributions.
units : {'rad', 'norm', 'cm'}
Determine the units of the returned result.
Returns
-------
data_df : pandas.DataFrame
shuffle_df {pandas.DataFrame, None}
"""
if units not in ['rad', 'norm', 'cm']:
raise ValueError("Unrecognized 'units' parameter value.")
def dotproduct(v1, v2):
return sum((a * b) for a, b in zip(v1, v2))
def length(v):
return math.sqrt(dotproduct(v, v))
def angle(v1, v2):
return math.acos(
np.round(dotproduct(v1, v2) / (length(v1) * length(v2)), 3))
N_SHUFFLES = 10000
data_list = []
shuffle_dicts = []
centroids = {}
# Pre-calc and store things for speed
centroids = calc_activity_centroids(exptGrp, roi_filter=None)
if activity_filter is not None:
if 'pc' in activity_filter:
pcs_filter = exptGrp.pcs_filter(circ_var=circ_var_pcs)
elif 'active' in activity_filter:
active_filter = filters.active_roi_filter(
exptGrp, min_transients=1, channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'], roi_filter=roi_filter)
rois_by_id = {
expt: {roi.id: roi for roi in expt.rois(
channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'])} for expt in exptGrp}
all_roi_ids = exptGrp.roi_ids(
channel=exptGrp.args['channel'], label=exptGrp.args['imaging_label'],
roi_filter=None)
for (e1, e2) in exptGrp.genImagedExptPairs():
e1_pairs, e2_pairs = [], []
shared_rois = exptGrp.subGroup([e1, e2]).sharedROIs(
channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'], roi_filter=roi_filter)
if activity_filter == 'pc_either':
e1_pcs = e1.roi_ids(roi_filter=pcs_filter)
e2_pcs = e2.roi_ids(roi_filter=pcs_filter)
roi_ids = list(set(shared_rois).intersection(
set(e1_pcs).union(e2_pcs)))
elif activity_filter == 'pc_both':
e1_pcs = e1.roi_ids(roi_filter=pcs_filter)
e2_pcs = e2.roi_ids(roi_filter=pcs_filter)
roi_ids = list(set(shared_rois).intersection(
e1_pcs).intersection(e2_pcs))
elif activity_filter == 'active_either':
e1_active = e1.roi_ids(roi_filter=active_filter)
e2_active = e2.roi_ids(roi_filter=active_filter)
roi_ids = list(set(shared_rois).intersection(
set(e1_active).union(e2_active)))
elif activity_filter == 'active_both':
e1_active = e1.roi_ids(roi_filter=active_filter)
e2_active = e2.roi_ids(roi_filter=active_filter)
roi_ids = list(set(shared_rois).intersection(
set(e1_active).intersection(e2_active)))
elif activity_filter:
e1_rois = e1.roi_ids(roi_filter=activity_filter)
e2_rois = e2.roi_ids(roi_filter=activity_filter)
roi_ids = list(set(shared_rois).intersection(
set(e1_rois).union(set(e2_rois))))
else:
roi_ids = shared_rois
for roi_id in roi_ids:
c1 = centroids[e1][all_roi_ids[e1].index(roi_id)]
c2 = centroids[e2][all_roi_ids[e2].index(roi_id)]
roi1 = rois_by_id[e1][roi_id]
roi2 = rois_by_id[e2][roi_id]
value = angle([c1.real, c1.imag], [c2.real, c2.imag])
if units == 'norm' or units == 'cm':
value /= 2 * np.pi
if units == 'cm':
belt_length = np.mean([
e1.belt().length(units='cm'),
e2.belt().length(units='cm')])
value *= belt_length
data_dict = {'value': value,
'first_expt': e1,
'second_expt': e2,
'first_roi': roi1,
'second_roi': roi2,
'first_centroid': c1,
'second_centroid': c2}
data_list.append(data_dict)
if shuffle:
e1_pairs.append((roi1, c1))
e2_pairs.append((roi2, c2))
if shuffle:
shuffle_dicts.extend([
{'expts': (e1, e2), 'rois': (r1, r2), 'data': (d1, d2)}
for (r1, d1), (r2, d2) in it.product(e1_pairs, e2_pairs)])
if shuffle:
if len(shuffle_dicts) < N_SHUFFLES:
shuffler = shuffle_dicts
else:
shuffler = sample(shuffle_dicts, N_SHUFFLES)
shuffle_list = []
for pair in shuffler:
value = angle([pair['data'][0].real, pair['data'][0].imag],
[pair['data'][1].real, pair['data'][1].imag])
if units == 'norm' or units == 'cm':
value /= 2 * np.pi
if units == 'cm':
belt_length = np.mean([
e1.belt().length(units='cm'),
e2.belt().length(units='cm')])
value *= belt_length
shuffle_dict = {'value': value,
'first_expt': pair['expts'][0],
'second_expt': pair['expts'][1],
'first_roi': pair['rois'][0],
'second_roi': pair['rois'][1],
'first_centroid': pair['data'][0],
'second_centroid': pair['data'][1]}
shuffle_list.append(shuffle_dict)
data_df = pd.DataFrame(data_list, columns=[
'first_expt', 'second_expt', 'first_roi', 'second_roi',
'first_centroid', 'second_centroid', 'value'])
shuffle_df = pd.DataFrame(shuffle_list, columns=[
'first_expt', 'second_expt', 'first_roi', 'second_roi',
'first_centroid', 'second_centroid', 'value']) \
if shuffle else None
return data_df, shuffle_df
@memoize
def sparsity(exptGrp, roi_filter=None):
"""Calculate single-cell sparsity index (equivalently
'lifetime sparseness') as defined in Ahmed and Mehta (Trends in
Neuroscience, 2009)
"""
data = exptGrp.data(roi_filter=roi_filter)
nBins = exptGrp.args['nPositionBins']
data_list = []
for expt in exptGrp:
rois = expt.rois(
roi_filter=roi_filter, channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'])
assert len(rois) == len(data[expt])
for roi, tuning_curve in zip(rois, data[expt]):
num = (np.sum(tuning_curve) / float(nBins)) ** 2
den = np.sum(np.square(tuning_curve)) / float(nBins)
value = num / den
data_dict = {'expt': expt,
'roi': roi,
'value': value}
data_list.append(data_dict)
return pd.DataFrame(data_list, columns=['expt', 'roi', 'value'])
@memoize
def place_field_correlation(
exptGrp, roi_filter=None, activity_filter='pc_either', shuffle=True):
"""Calculate the mean correlation in spatial tuning over time.
For each pair of experiments in the experiment group, find common place
cells and then calculate correlation in spatial tuning curves between the
two experiments. Each pair of experiments is assigned a single mean
correlation in the spatial tuning of the common place cells.
Keyword arguments:
activity_filter -- determines how to filter the cells for each expt pair
Can be None, 'pc_either' or 'pc_both'
"""
N_SHUFFLES = 10000
data = exptGrp.data(roi_filter=roi_filter)
pcs_filter = exptGrp.pcs_filter(roi_filter=roi_filter)
rois_by_id = {
expt: {roi.id: roi for roi in expt.rois(
channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'])} for expt in exptGrp}
all_roi_ids = exptGrp.roi_ids(
channel=exptGrp.args['channel'], label=exptGrp.args['imaging_label'],
roi_filter=roi_filter)
data_list = []
shuffle_dicts = []
for e1, e2 in exptGrp.genImagedExptPairs():
shared_rois = exptGrp.subGroup([e1, e2]).sharedROIs(
channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'],
demixed=exptGrp.args['demixed'], roi_filter=roi_filter)
if activity_filter is None:
roi_ids = shared_rois
elif activity_filter == 'pc_either':
e1_pcs = e1.roi_ids(roi_filter=pcs_filter)
e2_pcs = e2.roi_ids(roi_filter=pcs_filter)
roi_ids = list(set(shared_rois).intersection(
set(e1_pcs).union(e2_pcs)))
elif activity_filter == 'pc_both':
e1_pcs = e1.roi_ids(roi_filter=pcs_filter)
e2_pcs = e2.roi_ids(roi_filter=pcs_filter)
roi_ids = list(set(shared_rois).intersection(
set(e1_pcs).intersection(e2_pcs)))
elif activity_filter == 'pc_first':
e1_pcs = e1.roi_ids(roi_filter=pcs_filter)
roi_ids = list(set(shared_rois).intersection(set(e1_pcs)))
elif activity_filter == 'pc_second':
e2_pcs = e2.roi_ids(roi_filter=pcs_filter)
roi_ids = list(set(shared_rois).intersection(set(e2_pcs)))
else:
try:
e1_rois = e1.roi_ids(label=exptGrp.args['imaging_label'], roi_filter=activity_filter)
e2_rois = e2.roi_ids(label=exptGrp.args['imaging_label'], roi_filter=activity_filter)
roi_ids = list(set(shared_rois).intersection(
set(e1_rois).union(set(e2_rois))))
except:
raise ValueError('Unrecognized activity filter')
if len(roi_ids) == 0:
continue
e1_pairs, e2_pairs = [], []
for roi in roi_ids:
tuning1 = data[e1][all_roi_ids[e1].index(roi)]
tuning2 = data[e2][all_roi_ids[e2].index(roi)]
tuning_corr = np.corrcoef(tuning1, tuning2)[0, 1]
first_roi = rois_by_id[e1][roi]
second_roi = rois_by_id[e2][roi]
data_dict = {'value': tuning_corr,
'first_expt': e1,
'second_expt': e2,
'first_roi': first_roi,
'second_roi': second_roi}
data_list.append(data_dict)
if shuffle:
e1_pairs.append((first_roi, tuning1))
e2_pairs.append((second_roi, tuning2))
if shuffle:
shuffle_dicts.extend(
[{'expts': (e1, e2), 'rois': (r1, r2), 'data': (d1, d2)}
for (r1, d1), (r2, d2) in it.product(e1_pairs, e2_pairs)])
if shuffle:
if len(shuffle_dicts) < N_SHUFFLES:
shuffler = shuffle_dicts
else:
shuffler = sample(shuffle_dicts, N_SHUFFLES)
shuffle_list = []
for pair in shuffler:
tuning_corr = np.corrcoef(*pair['data'])[0, 1]
shuffle_dict = {'value': tuning_corr,
'first_expt': pair['expts'][0],
'second_expt': pair['expts'][1],
'first_roi': pair['rois'][0],
'second_roi': pair['rois'][1]}
shuffle_list.append(shuffle_dict)
data_df = pd.DataFrame(data_list, columns=[
'first_expt', 'second_expt', 'first_roi', 'second_roi', 'value'])
shuffle_df = pd.DataFrame(shuffle_list, columns=[
'first_expt', 'second_expt', 'first_roi', 'second_roi', 'value']) \
if shuffle else None
return data_df, shuffle_df
@memoize
def rank_order_correlation(
exptGrp, roi_filter=None, method='centroids', min_shared_rois=1,
shuffle=True, return_abs=False):
"""Calculate the rank order correlation between pairs of experiments
For each pair of experiments in the experiment group, find common place
cells and then calculate the Spearman rank order correlation in the order
of their place fields Note that a significant p-value will be returned if
the order is either preserved or reversed
"""
N_SHUFFLES = 10000
if method == 'centroids':
pcs_filter = exptGrp.pcs_filter(roi_filter=roi_filter)
data = exptGrp.data(roi_filter=pcs_filter)
pfs = exptGrp.pfs(roi_filter=pcs_filter)
centroids = {}
for expt in exptGrp:
# Calculate centroids and then discard all placeFields for an ROI
# that has more than 1
centroids[expt] = calcCentroids(
data[expt], pfs[expt], returnAll=True)
centroids[expt] = [
centroids[expt][idx] if len(pfs[expt][idx]) <= 1 else []
for idx in range(len(centroids[expt]))]
elif method == 'tuning_vectors':
pcs_filter = exptGrp.pcs_filter(roi_filter=roi_filter, circ_var=True)
centroids = calc_activity_centroids(exptGrp, roi_filter=pcs_filter)
for key in centroids.iterkeys():
complex_angles = centroids[key]
centroids[key] = (np.angle(complex_angles) % (2 * np.pi)) / \
(2 * np.pi)
centroids[key] = [[x] for x in centroids[key]]
else:
raise('Not a valid method')
all_roi_ids = exptGrp.roi_ids(
channel=exptGrp.args['channel'], label=exptGrp.args['imaging_label'],
roi_filter=pcs_filter)
data_list = []
for e1, e2 in exptGrp.genImagedExptPairs():
shared_pcs = exptGrp.subGroup([e1, e2]).sharedROIs(
channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'], roi_filter=pcs_filter)
for pc in shared_pcs[::-1]:
if not len(centroids[e1][all_roi_ids[e1].index(pc)]) or \
not len(centroids[e2][all_roi_ids[e2].index(pc)]):
shared_pcs.remove(pc)
if len(shared_pcs) < min_shared_rois:
continue
centroids1 = []
centroids2 = []
for pc in shared_pcs:
c1 = centroids[e1][all_roi_ids[e1].index(pc)]
c2 = centroids[e2][all_roi_ids[e2].index(pc)]
centroids1.append(c1[0])
centroids2.append(c2[0])
template_order = np.argsort(np.array(centroids1))
target_order = np.argsort(np.array(centroids2))
template_order_ids = [shared_pcs[x] for x in template_order]
target_order_ids = [shared_pcs[x] for x in target_order]
template = np.arange(len(centroids1))
target = np.array(
[template_order_ids.index(x) for x in target_order_ids])
p_val = 1.
rho = 0.
for shift in xrange(len(template)):
r, p = spearmanr(template, np.roll(target, shift))
if p < p_val:
p_val = p
rho = r
data_list.append({'first_expt': e1,
'second_expt': e2,
'value': rho if not return_abs else np.abs(rho),
'p': p_val,
'n_shared_rois': len(shared_pcs)})
return pd.DataFrame(data_list, columns=[
'first_expt', 'second_expt', 'value', 'p', 'n_shared_rois'])
@memoize
def place_cell_percentage(exptGrp, roi_filter=None, circ_var=False):
"""Calculate the percentage of cells that are a place cell on each day."""
pcs_filter = exptGrp.pcs_filter(roi_filter=roi_filter, circ_var=circ_var)
data_list = []
for expt in exptGrp:
n_rois = len(expt.rois(
roi_filter=roi_filter, channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label']))
n_pcs = len(expt.rois(
roi_filter=pcs_filter, channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label']))
if n_rois == 0:
result = np.nan
else:
result = float(n_pcs) / n_rois
data_dict = {'expt': expt,
'value': result}
data_list.append(data_dict)
return pd.DataFrame(data_list, columns=['expt', 'value'])
@memoize
def n_place_fields(
exptGrp, roi_filter=None, per_mouse_fractions=False,
max_n_place_fields=None):
"""Calculate the number of place fields for each place cell"""
data_list = []
pfs = exptGrp.pfs(roi_filter=roi_filter)
for expt in exptGrp:
rois = expt.rois(
roi_filter=roi_filter, channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'])
assert len(rois) == len(pfs[expt])
for roi, pc in zip(rois, pfs[expt]):
n_place_fields = len(pc)
if n_place_fields:
data_dict = {'expt': expt,
'roi': roi,
'value': n_place_fields}
data_list.append(data_dict)
result = pd.DataFrame(data_list, columns=['expt', 'roi', 'value'])
if per_mouse_fractions:
new_data_list = []
plotting.prepare_dataframe(result, include_columns=['mouse'])
for mouse, mouse_df in result.groupby('mouse'):
n_total_rois = len(mouse_df)
mouse_counts = mouse_df.groupby('value', as_index=False).count()
for n_pfs in np.arange(mouse_counts['value'].max()) + 1:
if max_n_place_fields is None or n_pfs < max_n_place_fields:
n_rois = mouse_counts.ix[
mouse_counts['value'] == n_pfs, 'mouse'].sum()
number = n_pfs
elif n_pfs == max_n_place_fields:
n_rois = mouse_counts.ix[
mouse_counts['value'] >= max_n_place_fields,
'mouse'].sum()
number = str(n_pfs) + '+'
else:
break
data_dict = {'mouse': mouse,
'number': n_pfs,
'n_rois': n_rois,
'n_total_rois': n_total_rois,
'value': n_rois / float(n_total_rois)}
new_data_list.append(data_dict)
result = pd.DataFrame(
new_data_list, columns=[
'mouse', 'number', 'n_rois', 'n_total_rois', 'value'])
return result
def n_sessions_place_cell(
exptGrp, roi_filter=None, ax=None, title_visible=True,
minimum_observations=0, plotShuffle=True, color=None):
pcs_filter = exptGrp.pcs_filter(roi_filter=roi_filter)
pfs = exptGrp.pfs()
placeCellPercentages = {}
for e in exptGrp:
nPCs = len(e.rois(roi_filter=pcs_filter))
nCells = len(e.rois(roi_filter=roi_filter))
placeCellPercentages[e] = float(nPCs) / nCells
allROIs = exptGrp.allROIs(channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'],
roi_filter=roi_filter)
nSessionsPlaceCell = []
shuffles = []
for roi in allROIs.itervalues():
shuffle_probabilities = []
nSessions = 0
for (expt, roi_idx) in roi:
if len(pfs[expt][roi_idx]):
nSessions += 1
shuffle_probabilities.append(placeCellPercentages[expt])
nSessionsPlaceCell.append(nSessions)
shuffles.append(
stats.poisson_binomial_distribution(shuffle_probabilities))
shuffle_dist = np.empty(
(len(allROIs), np.amax([len(x) for x in shuffles])))
shuffle_dist.fill(np.nan)
for x, dist in zip(shuffles, shuffle_dist):
dist[:len(x)] = x
shuffle_dist = nanmean(shuffle_dist, axis=0)
if ax:
if color is None:
color = lab.plotting.color_cycle().next()
if len(nSessionsPlaceCell):
plotting.histogram(
ax, nSessionsPlaceCell, bins=len(shuffle_dist),
range=(0, len(shuffle_dist)), normed=True, plot_mean=True,
label=exptGrp.label(), color=color)
ax.set_ylabel('Normalized density')
ax.set_xlabel('Number of sessions')
if title_visible:
ax.set_title('Number of sessions as place cell')
ax.legend(frameon=False, loc='best')
if plotShuffle:
ax.step(np.arange(len(shuffle_dist)), shuffle_dist, where='post',
color='k', linestyle='dashed')
return nSessionsPlaceCell
def n_sessions_imaged(exptGrp, roi_filter=None, ax=None, title_visible=True):
"""Of all cells in the group, how many days was each imaged?"""
# dict by experiment
roi_ids = exptGrp.roi_ids(channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'],
roi_filter=roi_filter)
data_list = []
for all_rois in exptGrp.allROIs(channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'],
roi_filter=roi_filter).itervalues():
mouse_id = all_rois[0][0].parent.get('mouseID')
roi_id = roi_ids[all_rois[0][0]][all_rois[0][1]]
location = all_rois[0][0].get('uniqueLocationKey')
nSessionsImaged = len(all_rois)
data_dict = {'mouseID': mouse_id,
'roi_id': roi_id,
'location': location,
'value': nSessionsImaged}
data_list.append(data_dict)
return pd.DataFrame(data_list, columns=[
'mouseID', 'roi_id', 'location', 'value'])
def is_ever_place_cell(
expt_grps, roi_filters=None, ax=None, colors=None, groupby=None,
**plot_kwargs):
if roi_filters is None:
roi_filters = [None] * len(expt_grps)
dfs = []
for expt_grp, roi_filter in zip(expt_grps, roi_filters):
df = lab.ExperimentGroup.filtered_rois(
expt_grp, roi_filter=expt_grp.pcs_filter(),
include_roi_filter=roi_filter, channel=expt_grp.args['channel'],
label=expt_grp.args['imaging_label'])
plotting.prepare_dataframe(
df, include_columns=['mouseID', 'uniqueLocationKey', 'roi_id',
'session_number_in_df'])
data = []
for key, group in df.groupby(
['mouseID', 'uniqueLocationKey', 'roi_id']):
values = np.array(group['value'])
order = np.argsort(group['session_number_in_df']).tolist()
for i, val in enumerate(np.maximum.accumulate(values[order])):
data.append(
{'mouseID': key[0], 'uniqueLocationKey': key[1],
'roi_id': key[2], 'session_number': i, 'value': val})
dfs.append(pd.DataFrame(data))
if ax is not None:
plotting.plot_dataframe(
ax, dfs, labels=[expt_grp.label() for expt_grp in expt_grps],
plotby=['session_number'], groupby=groupby,
plot_method='grouped_bar', colors=colors,
activity_label='Has ever been PC', **plot_kwargs)
if groupby is None:
ax.collections = []
return dfs
@memoize
def population_vector_correlation(
exptGrp, roi_filter=None, method='angle', activity_filter='pc_either',
min_pf_density=0, shuffle=True, circ_var_pcs=False, reward_at_zero=False):
"""Calculates and plots the population similarity score over time
Keyword arguments:
method -- similarity method to use, either 'corr' for correlation
or 'angle' for cosine of angle between the pop vectors
activity_filter -- determines how to filter the cells for each expt pair
Can be None, 'pf_either' or 'pf_both'
min_pf_density -- only include position bins that have a minimum fraction
of place cells with nonzero tuning curves at that position
"""
N_SHUFFLES = 10000
if activity_filter and not callable(activity_filter):
if 'pc' in activity_filter:
pcs_filter = exptGrp.pcs_filter(
roi_filter=roi_filter, circ_var=circ_var_pcs)
elif 'active' in activity_filter:
active_filter = filters.active_roi_filter(
exptGrp, min_transients=1, channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'], roi_filter=roi_filter)
data = exptGrp.data(roi_filter=roi_filter)
all_roi_ids = exptGrp.roi_ids(
channel=exptGrp.args['channel'], label=exptGrp.args['imaging_label'],
roi_filter=roi_filter)
data_list = []
shuffle_dicts = []
for e1, e2 in exptGrp.genImagedExptPairs():
grp = exptGrp.subGroup([e1, e2])
shared_rois = grp.sharedROIs(channel=grp.args['channel'],
label=grp.args['imaging_label'],
demixed=grp.args['demixed'],
roi_filter=roi_filter)
if len(shared_rois) == 0:
continue
if activity_filter is None:
roi_ids = shared_rois
elif activity_filter == 'pc_either':
e1_pcs = e1.roi_ids(roi_filter=pcs_filter)
e2_pcs = e2.roi_ids(roi_filter=pcs_filter)
roi_ids = list(set(shared_rois).intersection(
set(e1_pcs).union(e2_pcs)))
elif activity_filter == 'pc_both':
e1_pcs = e1.roi_ids(roi_filter=pcs_filter)
e2_pcs = e2.roi_ids(roi_filter=pcs_filter)
roi_ids = list(set(shared_rois).intersection(
set(e1_pcs).intersection(e2_pcs)))
elif activity_filter == 'pc_first':
e1_pcs = e1.roi_ids(roi_filter=pcs_filter)
roi_ids = list(set(shared_rois).intersection(set(e1_pcs)))
elif activity_filter == 'pc_second':
e2_pcs = e2.roi_ids(roi_filter=pcs_filter)
roi_ids = list(set(shared_rois).intersection(set(e2_pcs)))
elif activity_filter == 'active_either':
e1_active = e1.roi_ids(roi_filter=active_filter)
e2_active = e2.roi_ids(roi_filter=active_filter)
roi_ids = list(set(shared_rois).intersection(
set(e1_active).union(e2_active)))
elif activity_filter == 'active_both':
e1_active = e1.roi_ids(roi_filter=active_filter)
e2_active = e2.roi_ids(roi_filter=active_filter)
roi_ids = list(set(shared_rois).intersection(
set(e1_active).intersection(e2_active)))
else:
try:
e1_rois = e1.roi_ids(label=exptGrp.args['imaging_label'], roi_filter=activity_filter)
e2_rois = e2.roi_ids(label=exptGrp.args['imaging_label'], roi_filter=activity_filter)
roi_ids = list(set(shared_rois).intersection(
set(e1_rois).intersection(set(e2_rois))))
except:
raise ValueError("Unrecognized value for 'activity_filter'")
if len(roi_ids) == 0:
continue
e1_rois = np.array([all_roi_ids[e1].index(x) for x in roi_ids])
e2_rois = np.array([all_roi_ids[e2].index(x) for x in roi_ids])
# shape = (position, rois)
e1_data = data[e1][e1_rois].swapaxes(0, 1)
e2_data = data[e2][e2_rois].swapaxes(0, 1)
if reward_at_zero:
# Define mapping from [0, 99) to [-50, 50) with reward at center
n_bins = exptGrp.args['nPositionBins']
reward = e1.rewardPositions(units='normalized')[0]
reward *= n_bins
reward = int(reward)
new_pos_bins = np.arange(0, n_bins) - reward
for ix, x in enumerate(new_pos_bins):
if x >= n_bins / 2:
new_pos_bins[ix] -= n_bins
if x < -1 * (n_bins / 2):
new_pos_bins[ix] += n_bins
# Iterate over positions
for pos_bin, vect1, vect2 in it.izip(it.count(), e1_data, e2_data):
# impose min_pf_density threshold
if min_pf_density:
if not (float(len(np.nonzero(vect1)[0])) / len(vect1) >
min_pf_density and
float(len(np.nonzero(vect2)[0])) / len(vect2) >
min_pf_density):
continue
if method == 'corr':
value = np.corrcoef(vect1, vect2)[0, 1]
elif method == 'angle':
value = np.dot(vect1, vect2) / np.linalg.norm(vect1) / \
np.linalg.norm(vect2)
else:
raise Exception('Unrecognized similarity method')
position_bin = new_pos_bins[pos_bin] if reward_at_zero else pos_bin
data_dict = {'value': value,
'first_expt': e1,
'second_expt': e2,
'position_bin_index': position_bin}
data_list.append(data_dict)
if shuffle:
shuffle_dicts.extend([
{'expts': (e1, e2), 'position_bin_indices': (b1, b2),
'data': (d1, d2)} for (b1, d1), (b2, d2) in it.product(
it.izip(it.count(), e1_data),
it.izip(it.count(), e2_data))])
if shuffle:
if len(shuffle_dicts) < N_SHUFFLES:
shuffler = shuffle_dicts
else:
shuffler = sample(shuffle_dicts, N_SHUFFLES)
shuffle_list = []
for pair in shuffler:
if method == 'corr':
value = np.corrcoef(*pair['data'])[0, 1]
elif method == 'angle':
value = np.dot(*pair['data']) / np.linalg.norm(
pair['data'][0]) / np.linalg.norm(pair['data'][1])
else:
raise ValueError('Unrecognized similarity method')
shuffle_dict = {
'value': value, 'first_expt': pair['expts'][0],
'second_expt': pair['expts'][1],
'position_bin_index': pair['position_bin_indices'][0]}
shuffle_list.append(shuffle_dict)
data_df = pd.DataFrame(data_list, columns=[
'first_expt', 'second_expt', 'position_bin_index', 'value'])
shuffle_df = pd.DataFrame(shuffle_list, columns=[
'first_expt', 'second_expt', 'position_bin_index', 'value']) \
if shuffle else None
return data_df, shuffle_df
@memoize
def overlap(
exptGrp, roi_filter=None, activity_method='frequency',
running_only=True, activity_filter='pc_either', shuffle=True,
circ_var_pcs=False, **activity_filter_kwargs):
N_SHUFFLES = 10000
if activity_filter:
if 'pc' in activity_filter:
pcs_filter = exptGrp.pcs_filter(
roi_filter=roi_filter, circ_var=circ_var_pcs)
elif 'active' in activity_filter:
active_filter = filters.active_roi_filter(
exptGrp, channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'], roi_filter=roi_filter,
**activity_filter_kwargs)
rois_by_id = {
expt: {roi.id: roi for roi in expt.rois(
channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'])} for expt in exptGrp}
all_roi_ids = exptGrp.roi_ids(
channel=exptGrp.args['channel'], label=exptGrp.args['imaging_label'],
roi_filter=roi_filter)
activity = {}
for expt in exptGrp:
act = calc_activity(
expt, interval='running' if running_only else None,
method=activity_method, channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'], roi_filter=roi_filter,
demixed=exptGrp.args['demixed'],
running_kwargs=exptGrp.running_kwargs() if running_only else None)
act = act.mean(1)
activity[expt] = act
data_list = []
shuffle_dicts = []
for e1, e2 in exptGrp.genImagedExptPairs():
grp = exptGrp.subGroup([e1, e2])
shared_rois = grp.sharedROIs(
channel=grp.args['channel'], label=grp.args['imaging_label'],
demixed=grp.args['demixed'], roi_filter=roi_filter)
if activity_filter is None:
roi_ids = shared_rois
elif activity_filter == 'pc_either':
e1_pcs = e1.roi_ids(roi_filter=pcs_filter)
e2_pcs = e2.roi_ids(roi_filter=pcs_filter)
roi_ids = list(set(shared_rois).intersection(
set(e1_pcs).union(e2_pcs)))
elif activity_filter == 'pc_both':
e1_pcs = e1.roi_ids(roi_filter=pcs_filter)
e2_pcs = e2.roi_ids(roi_filter=pcs_filter)
roi_ids = list(set(shared_rois).intersection(
set(e1_pcs).intersection(e2_pcs)))
elif activity_filter == 'pc_first':
e1_pcs = e1.roi_ids(roi_filter=pcs_filter)
roi_ids = list(set(shared_rois).intersection(set(e1_pcs)))
elif activity_filter == 'pc_second':
e2_pcs = e2.roi_ids(roi_filter=pcs_filter)
roi_ids = list(set(shared_rois).intersection(set(e2_pcs)))
elif activity_filter == 'active_either':
e1_active = e1.roi_ids(roi_filter=active_filter)
e2_active = e2.roi_ids(roi_filter=active_filter)
roi_ids = list(set(shared_rois).intersection(
set(e1_active).union(e2_active)))
elif activity_filter == 'active_both':
e1_active = e1.roi_ids(roi_filter=active_filter)
e2_active = e2.roi_ids(roi_filter=active_filter)
roi_ids = list(set(shared_rois).intersection(
set(e1_active).intersection(e2_active)))
else:
try:
e1_rois = e1.roi_ids(roi_filter=activity_filter)
e2_rois = e2.roi_ids(roi_filter=activity_filter)
roi_ids = list(set(shared_rois).intersection(
set(e1_rois).union(set(e2_rois))))
except:
raise ValueError('Unrecognized activity filter')
e1_pairs, e2_pairs = [], []
for roi in roi_ids:
a1 = activity[e1][all_roi_ids[e1].index(roi)]
a2 = activity[e2][all_roi_ids[e2].index(roi)]
roi1 = rois_by_id[e1][roi]
roi2 = rois_by_id[e2][roi]
value = min(a1, a2) / max(a1, a2)
data_dict = {'value': value,
'value1': a1,
'value2': a2,
'first_expt': e1,
'second_expt': e2,
'first_roi': roi1,
'second_roi': roi2}
data_list.append(data_dict)
e1_pairs.append((a1, roi1))
e2_pairs.append((a2, roi2))
if shuffle:
shuffle_dicts.extend([
{'expts': (e1, e2), 'rois': (r1, r2), 'data': (d1, d2)}
for (d1, r1), (d2, r2) in it.product(e1_pairs, e2_pairs)])
if shuffle:
if len(shuffle_dicts) < N_SHUFFLES:
shuffler = shuffle_dicts
else:
shuffler = sample(shuffle_dicts, N_SHUFFLES)
shuffle_list = []
for pair in shuffler:
value = min(*pair['data']) / max(*pair['data'])
shuffle_dict = {'value': value,
'first_expt': pair['expts'][0],
'second_expt': pair['expts'][1],
'first_roi': pair['rois'][0],
'second_roi': pair['rois'][1]}
shuffle_list.append(shuffle_dict)
data_df = pd.DataFrame(data_list, columns=[
'first_expt', 'second_expt', 'first_roi', 'second_roi', 'value'])
shuffle_df = pd.DataFrame(shuffle_list, columns=[
'first_expt', 'second_expt', 'first_roi', 'second_roi', 'value']) \
if shuffle else None
return data_df, shuffle_df
def plot_activity_stability_correlation(
ax, exptGrp, activity_metric, stability_metric, stability_kwargs=None,
activity_combine_method='mean', activity_filter='pc_either',
roi_filter=None, z_score=True):
"""Scatter plots the activity of an ROI against it's stability
Arguments:
activity_metric: any metric argument to calc_activity
stability_metric: any metric argument to place_field_stability_df,
except similarity
Note:
To look at subsets of ROI pairs, pass in a paired_pcExperimentGroup
"""
if stability_metric == population_vector_correlation:
raise ValueError('Stability metric must be a per-cell metric')
if stability_kwargs is None:
stability_kwargs = {}
activity = {}
rois = {}
for expt in exptGrp:
# Calculate the desired activity metric and average across trials
activity[expt] = calc_activity(
expt, activity_metric, dF='from_file',
channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'], roi_filter=roi_filter).mean(1)
rois[expt] = expt.rois(
channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'], roi_filter=roi_filter)
data, _ = stability_metric(exptGrp, roi_filter, **stability_kwargs)
activity_to_plot = []
stability_to_plot = []
for index, row in data.iterrows():
e1 = row['first_expt']
e2 = row['second_expt']
r1 = row['first_roi']
r2 = row['second_roi']
a1 = activity[e1][rois[e1].index(r1)]
a2 = activity[e2][rois[e2].index(r2)]
if activity_combine_method == 'mean':
row_activity = np.mean([a1, a2])
elif activity_combine_method == 'first':
row_activity = a1
else:
raise ValueError('Unrecognized method argument')
if np.isfinite(row_activity) and np.isfinite(row['value']):
activity_to_plot.append(row_activity)
stability_to_plot.append(row['value'])
activity_to_plot = np.array(activity_to_plot)
stability_to_plot = np.array(stability_to_plot)
if z_score:
activity_to_plot -= np.mean(activity_to_plot)
activity_to_plot /= np.std(activity_to_plot)
stability_to_plot -= np.mean(stability_to_plot)
stability_to_plot /= np.std(stability_to_plot)
# Determine stability label
if stability_metric == overlap:
stability_label = \
stability_kwargs.get('activity_method', '?') + ' overlap: '
else:
stability_label = stability_metric.__name__ + ': '
if stability_metric == centroid_shift:
stability_label += 'pc_both'
elif activity_filter is None:
stability_label += 'all_cells'
else:
stability_label += activity_filter
if z_score:
stability_label += ' (z-score)'
plotting.scatterPlot(
ax, [activity_to_plot, stability_to_plot],
[activity_metric, stability_label], plotEqualLine=True,
print_stats=True)
return activity_to_plot, stability_to_plot
def plot_recur_vs_non_recur_activity(
ax, exptGrp, metric_fn, fn_kwargs=None, roi_filter=None,
groupby=None, plotby=('pair_condition',),
orderby='pair_condition_order', z_score=False, circ_var_pcs=False,
**plot_kwargs):
"""Compare the stability of cells that are/become place cells versus those
that are not.
"""
# TODO: pairing is not necessarily consecutive days...
if fn_kwargs is None:
fn_kwargs = {}
# dict by roi id
roi_activity_values = {}
roi_activity_values
day_minus1 = {}
day_minus1['non_pcs'] = []
day_minus1['pcs'] = []
day_zero = {}
day_zero['non_pcs'] = []
day_zero['pcs'] = []
day_plus1 = {}
day_plus1['non_pcs'] = []
day_plus1['pcs'] = []
pcs_filter = exptGrp.pcs_filter(
roi_filter=roi_filter, circ_var=circ_var_pcs)
data = metric_fn(exptGrp, roi_filter=pcs_filter, **fn_kwargs)
plotting.prepare_dataframe(data, include_columns=['expt', 'roi_id'])
for e1, e2 in exptGrp.genImagedExptPairs():
# Removed shared filer so: m1_npc is NOT:
# roi.id not in e2_pc_ids_set for roi in e1_roi_ids
e1_pc_ids_set = set(e1.roi_ids(roi_filter=pcs_filter))
e2_pc_ids_set = set(e2.roi_ids(roi_filter=pcs_filter))
e1_pc_id_filter = lambda roi_id: roi_id in e1_pc_ids_set
e2_pc_id_filter = lambda roi_id: roi_id in e2_pc_ids_set
e1_npc_ids_set = set(
e1.roi_ids(roi_filter=roi_filter)).difference(e1_pc_ids_set)
e2_npc_ids_set = set(
e2.roi_ids(roi_filter=roi_filter)).difference(e2_pc_ids_set)
e1_npc_id_filter = lambda roi_id: roi_id in e1_npc_ids_set
e2_npc_id_filter = lambda roi_id: roi_id in e2_npc_ids_set
m1_pc = data[(data['expt'] == e2) &
(data['roi_id'].apply(e1_pc_id_filter))]
m1_npc = data[(data['expt'] == e2) &
(data['roi_id'].apply(e1_npc_id_filter))]
p1_pc = data[(data['expt'] == e1) &
(data['roi_id'].apply(e2_pc_id_filter))]
p1_npc = data[(data['expt'] == e1) &
(data['roi_id'].apply(e2_npc_id_filter))]
if z_score:
m1_mean = np.nanmean(np.hstack([m1_pc['value'], m1_npc['value']]))
p1_mean = np.nanmean(np.hstack([p1_pc['value'], p1_npc['value']]))
m1_std = np.nanstd(np.hstack([m1_pc['value'], m1_npc['value']]))
p1_std = np.nanstd(np.hstack([p1_pc['value'], p1_npc['value']]))
m1_pc['value'] = m1_pc['value'] - m1_mean
p1_pc['value'] = p1_pc['value'] - p1_mean
m1_pc['value'] = m1_pc['value'] / m1_std
p1_pc['value'] = p1_pc['value'] / p1_std
m1_npc['value'] = m1_npc['value'] - m1_mean
p1_npc['value'] = p1_npc['value'] - p1_mean
m1_npc['value'] = m1_npc['value'] / m1_std
p1_npc['value'] = p1_npc['value'] / p1_std
day_minus1['pcs'].append(m1_pc)
day_minus1['non_pcs'].append(m1_npc)
day_plus1['pcs'].append(p1_pc)
day_plus1['non_pcs'].append(p1_npc)
if z_score:
d0_mean = np.nanmean(data['value'])
d0_std = np.nanstd(data['value'])
data['value'] = data['value'] - d0_mean
data['value'] = data['value'] / d0_std
day_zero['pcs'] = data
day_minus1_pc_df = pd.concat(day_minus1['pcs'])
day_minus1_pc_df['pair_condition'] = 'Previous'
day_minus1_pc_df['pair_condition_order'] = -1
day_minus1_pc_df['is_place_cell'] = 'place cell'
day_minus1_npc_df = pd.concat(day_minus1['non_pcs'])
day_minus1_npc_df['pair_condition'] = 'Previous'
day_minus1_npc_df['pair_condition_order'] = -1
day_minus1_npc_df['is_place_cell'] = 'not a place cell'
day_plus1_pc_df = pd.concat(day_plus1['pcs'])
day_plus1_pc_df['pair_condition'] = 'Next'
day_plus1_pc_df['pair_condition_order'] = 1
day_plus1_pc_df['is_place_cell'] = 'place cell'
day_plus1_npc_df = pd.concat(day_plus1['non_pcs'])
day_plus1_npc_df['pair_condition'] = 'Next'
day_plus1_npc_df['pair_condition_order'] = 1
day_plus1_npc_df['is_place_cell'] = 'not a place cell'
day_0_pc_df = day_zero['pcs']
day_0_pc_df['pair_condition'] = 'Current'
day_0_pc_df['pair_condition_order'] = 0
day_0_pc_df['is_place_cell'] = 'place cell'
pc_df = pd.concat([day_minus1_pc_df, day_plus1_pc_df, day_0_pc_df])
n_pc_df = pd.concat([day_minus1_npc_df, day_plus1_npc_df])
if ax is not None:
plotting.plot_dataframe(
ax, [pc_df, n_pc_df], labels=['place cell', 'not a place cell'],
groupby=groupby, plotby=plotby, orderby=orderby,
plot_method='grouped_bar', **plot_kwargs)
return pc_df, n_pc_df
def plot_activity_versus_place_coding(
ax, exptGrp, metric_fn, fn_kwargs=None, roi_filter=None,
z_score=True, circ_var_pcs=False, **plot_kwargs):
"""Compare the stability of cells that are/become place cells versus those
that are not.
Determines if various metrics can predict future place cells
"""
# TODO: pairing is not necessarily consecutive days...
if fn_kwargs is None:
fn_kwargs = {}
# dict by roi id
roi_activity_values = {}
roi_activity_values
day_minus1 = {}
day_minus1['non_pcs'] = []
day_minus1['pcs'] = []
day_zero = {}
day_zero['non_pcs'] = []
day_zero['pcs'] = []
day_plus1 = {}
day_plus1['non_pcs'] = []
day_plus1['pcs'] = []
pcs_filter = exptGrp.pcs_filter(
roi_filter=roi_filter, circ_var=circ_var_pcs)
data = metric_fn(exptGrp, roi_filter=roi_filter, **fn_kwargs)
plotting.prepare_dataframe(data, include_columns=['expt', 'roi_id'])
for e1, e2 in exptGrp.genImagedExptPairs():
# Removed shared filer so: m1_npc is NOT:
# roi.id not in e2_pc_ids_set for roi in e1_roi_ids
e1_pc_ids_set = set(e1.roi_ids(roi_filter=pcs_filter))
e2_pc_ids_set = set(e2.roi_ids(roi_filter=pcs_filter))
e1_pc_id_filter = lambda roi_id: roi_id in e1_pc_ids_set
e2_pc_id_filter = lambda roi_id: roi_id in e2_pc_ids_set
e1_npc_ids_set = set(
e1.roi_ids(roi_filter=roi_filter)).difference(e1_pc_ids_set)
e2_npc_ids_set = set(
e2.roi_ids(roi_filter=roi_filter)).difference(e2_pc_ids_set)
e1_npc_id_filter = lambda roi_id: roi_id in e1_npc_ids_set
e2_npc_id_filter = lambda roi_id: roi_id in e2_npc_ids_set
m1_pc = data[(data['expt'] == e1) &
(data['roi_id'].apply(e2_pc_id_filter))]['value']
m1_npc = data[(data['expt'] == e1) &
(data['roi_id'].apply(e2_npc_id_filter))]['value']
p1_pc = data[(data['expt'] == e2) &
(data['roi_id'].apply(e1_pc_id_filter))]['value']
p1_npc = data[(data['expt'] == e2) &
(data['roi_id'].apply(e1_npc_id_filter))]['value']
if z_score:
m1_mean = np.nanmean(np.hstack([m1_pc, m1_npc]))
p1_mean = np.nanmean(np.hstack([p1_pc, p1_npc]))
m1_std = np.nanstd(np.hstack([m1_pc, m1_npc]))
p1_std = np.nanstd(np.hstack([p1_pc, p1_npc]))
m1_pc -= m1_mean
p1_pc -= p1_mean
m1_pc /= m1_std
p1_pc /= p1_std
m1_npc -= m1_mean
p1_npc -= p1_mean
m1_npc /= m1_std
p1_npc /= p1_std
day_minus1['pcs'].extend(m1_pc)
day_minus1['non_pcs'].extend(m1_npc)
day_plus1['pcs'].extend(p1_pc)
day_plus1['non_pcs'].extend(p1_npc)
for e in exptGrp:
pcs = set(e.roi_ids(roi_filter=pcs_filter))
pcs_id_filter = lambda roi_id: roi_id in pcs
npcs = set(e1.roi_ids(roi_filter=roi_filter)).difference(pcs)
npcs_id_filter = lambda roi_id: roi_id in npcs
d0_pc = data[(data['expt'] == e) &
(data['roi_id'].apply(pcs_id_filter))]['value']
d0_npc = data[(data['expt'] == e) &
(data['roi_id'].apply(npcs_id_filter))]['value']
if z_score:
d0_mean = np.nanmean(np.hstack([d0_pc, d0_npc]))
d0_std = np.nanstd(np.hstack([d0_pc, d0_npc]))
d0_pc -= d0_mean
d0_pc /= d0_std
d0_npc -= d0_mean
d0_npc /= d0_std
day_zero['pcs'].extend(d0_pc)
day_zero['non_pcs'].extend(d0_npc)
values = [[day_minus1['non_pcs'],
day_zero['non_pcs'],
day_plus1['non_pcs']],
[day_minus1['pcs'],
day_zero['pcs'],
day_plus1['pcs']]]
plotting.grouped_bar(ax, values, ['non place cells', 'place cells'],
['Previous', 'Current', 'Next'], **plot_kwargs)
return values
def get_activity_label(metric_fn, activity_kwargs=None):
if activity_kwargs is None:
activity_kwargs = {}
if metric_fn == sensitivity:
if activity_kwargs.get('includeFrames', '') == 'running_only':
return 'sensitivity during running by lap'
else:
return 'sensitivity by lap'
if metric_fn == specificity:
if activity_kwargs.get('includeFrames', '') == 'running_only':
return 'specificity during running by lap'
else:
return 'specificity by lap'
if metric_fn == place_field_width:
return 'place field width'
if metric_fn == population_activity:
# This assumes that the default value for 'running_only' is False in
# calc_activity_statistic
running_only = activity_kwargs.get('running_only', False)
if 'interval' in activity_kwargs:
if activity_kwargs['interval'] == 'pf':
return '{} in place fields{}'.format(
activity_kwargs['stat'],
': running only' if running_only else '')
elif activity_kwargs['interval'] == 'non pf':
return '{} not in place fields{}'.format(
activity_kwargs['stat'],
': running only' if running_only else '')
elif activity_kwargs['interval'] == 'all':
return '{} across all frames{}'.format(
activity_kwargs['stat'],
': running only' if running_only else '')
else:
return '{} in interval{}'.format(
activity_kwargs['stat'],
': running only' if running_only else '')
else:
return '{} across all frames{}'.format(
activity_kwargs['stat'],
': running only' if running_only else '')
if metric_fn == spatial_information:
return 'spatial information'
if metric_fn == sparsity:
return 'single cell sparsity'
if metric_fn == n_place_fields:
return 'number of place fields'
if metric_fn == n_sessions_imaged:
return 'number of sessions imaged'
if metric_fn == place_cell_percentage:
return 'fraction place cells'
if metric_fn == place_field_correlation:
return 'PF correlation'
if metric_fn == population_vector_correlation:
if activity_kwargs.get('method', False):
if activity_kwargs['method'] == 'angle':
return 'PV correlation (angle)'
elif activity_kwargs['method'] == 'corr':
return 'PV correlation (corr)'
return 'PV correlation'
if metric_fn == recurrence_probability:
return 'recurrence probability'
if metric_fn == overlap:
return 'overlap score ({})'.format(
activity_kwargs.get('activity_method', 'unknown'))
if metric_fn == centroid_shift:
return 'centroid shift'
if metric_fn == activity_centroid_shift:
return 'activity centroid shift'
if metric_fn == circular_variance:
return 'circular variance'
def plot_acute_remapping_metric(
ax, exptGrps_list, metric_fn, plot_method, roi_filters_list=None,
group_labels=None, groupby=None, plotby=None, orderby=None,
colorby=None, plot_shuffle=False, shuffle_plotby=False,
pool_shuffle=False, plot_abs=False, activity_kwargs=None,
**plot_kwargs):
"""Plotting function for acute remapping experiments.
ExptGrps should already be split in to separate sub groups. You can plotby
the different conditions with the 'condition' plotby column argument.
exptGrps_list -- a list of lists of exptGrps. First index is the group and
the second are the separate acute conditions
roi_filters_list -- a list of list of filters, matched in shape to
exptGrps_list
group_labels -- labels for the overall exptGrps (not the individual
conditions)
See plot_place_cell_metric for description of additional arguments.
"""
condition_order = {'SameAll': 0, 'DiffCtxs': 1, 'DiffAll': 2}
if roi_filters_list is None:
roi_filters_list = [
[None] * len(conditions) for conditions in exptGrps_list]
if group_labels is None:
group_labels = ['Group ' + str(x) for x in range(len(exptGrps_list))]
if activity_kwargs is None:
activity_kwargs = {}
if groupby is not None:
include_columns = set(
column for groupby_list in groupby for column in groupby_list)
else:
include_columns = set()
if plotby is not None:
include_columns.update(plotby)
if orderby is not None:
include_columns.update(orderby)
try:
include_columns.remove('condition')
except KeyError:
pass
try:
include_columns.remove('condition_order')
except KeyError:
pass
dataframes, shuffles = [], []
for exptGrps, roi_filters in it.izip(exptGrps_list, roi_filters_list):
grp_data, grp_shuffle = [], []
for condition_idx, condition_grp, roi_filter in it.izip(
it.count(), exptGrps, roi_filters):
condition_label = condition_grp.label() if condition_grp.label() \
else "Condition " + str(condition_idx)
data, shuffle = metric_fn(
condition_grp, roi_filter=roi_filter, **activity_kwargs)
try:
prepped_data = plotting.prepare_dataframe(
data, include_columns=include_columns)
except plotting.InvalidDataFrame:
prepped_data = pd.DataFrame()
prepped_data['condition'] = condition_label
prepped_data['condition_order'] = \
condition_order.get(condition_label, 10)
grp_data.append(prepped_data)
if plot_shuffle:
try:
prepped_shuffle = plotting.prepare_dataframe(
shuffle, include_columns=include_columns)
except plotting.InvalidDataFrame:
prepped_shuffle = pd.DataFrame()
prepped_shuffle['condition'] = condition_label
prepped_shuffle['condition_order'] = \
condition_order.get(condition_label, 10)
grp_shuffle.append(prepped_shuffle)
dataframes.append(pd.concat(grp_data))
if plot_shuffle:
shuffles.append(pd.concat(grp_shuffle))
try:
plotting.plot_dataframe(
ax, dataframes, shuffles if plot_shuffle else None,
activity_label=get_activity_label(metric_fn, activity_kwargs),
groupby=groupby, plotby=plotby, orderby=orderby, colorby=colorby,
plot_method=plot_method, plot_shuffle=plot_shuffle,
shuffle_plotby=shuffle_plotby, pool_shuffle=pool_shuffle,
labels=group_labels, **plot_kwargs)
except plotting.InvalidDataFrame:
pass
return {label: df for label, df in zip(group_labels, dataframes)}
def plot_spatial_tuning_overlay(
ax, exptGrp, plane=0, roi_filter=None, labels_visible=True,
cax=None, alpha=0.2, **kwargs):
"""Plot place cell spatial tuning for a single expt exptGrp"""
pcs_filter = exptGrp.pcs_filter(roi_filter=roi_filter)
centroids = calcCentroids(
exptGrp.data(roi_filter=pcs_filter)[exptGrp[0]],
exptGrp.pfs(roi_filter=pcs_filter)[exptGrp[0]])
nPositionBins = exptGrp.args['nPositionBins']
centroid_vals = np.array([x[0] for x in centroids]) / float(nPositionBins)
background_figure = exptGrp[0].returnFinalPrototype(
channel=exptGrp.args['channel'])[plane, ...]
roiVerts = exptGrp[0].roiVertices(
channel=exptGrp.args['channel'], label=exptGrp.args['imaging_label'],
roi_filter=pcs_filter)
if not len(roiVerts):
return
imaging_parameters = exptGrp[0].imagingParameters()
aspect_ratio = imaging_parameters['pixelsPerLine'] \
/ imaging_parameters['linesPerFrame']
roi_inds = [i for i, v in enumerate(roiVerts) if v[0][0][2] == plane]
plane_verts = np.array(roiVerts)[roi_inds].tolist()
twoD_verts = []
for roi in plane_verts:
roi_polys = []
for poly in roi:
roi_polys.append(np.array(poly)[:, :2])
twoD_verts.append(roi_polys)
if labels_visible:
pcLabels = exptGrp.roi_ids(
channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'],
roi_filter=pcs_filter)[exptGrp[0]]
pcLabels = np.array(pcLabels)[roi_inds].tolist()
else:
pcLabels = None
plotting.roiDataImageOverlay(
ax, background_figure, twoD_verts,
values=centroid_vals, vmin=0, vmax=1, labels=pcLabels, cax=cax,
alpha=alpha, aspect=aspect_ratio, **kwargs)
ax.set_title('Spatial tuning of place cells\nPlane {}'.format(plane))
def place_imaging_animation(
expt, ax, n_position_bins=100, running_kwargs=None, channel='Ch2',
**plot_kwargs):
"""Creates an animation where time is position on the belt and each
frame is the average activity across all running frames at that
position
"""
if running_kwargs is None:
running_kwargs = {}
running_frames = expt.runningIntervals(
imageSync=True, direction='forward', returnBoolList=True,
**running_kwargs)
imaging_dataset = expt.imaging_dataset()
ch_idx = imaging_dataset.channel_names.index(channel)
position_sums = np.zeros(
(n_position_bins, imaging_dataset.num_rows,
imaging_dataset.num_columns))
position_counts = np.zeros(
(n_position_bins, imaging_dataset.num_rows,
imaging_dataset.num_columns), dtype=int)
for trial, cycle, cycle_running in it.izip(
expt, imaging_dataset, running_frames):
position = trial.behaviorData(imageSync=True)['treadmillPosition']
for frame, pos in it.compress(
it.izip(cycle, position), cycle_running):
pos_bin = int(pos * n_position_bins)
non_nan_pixels = np.isfinite(frame[ch_idx])
frame[ch_idx][np.isnan(frame[ch_idx])] = 0
position_sums[pos_bin] += frame[ch_idx]
position_counts[pos_bin] += non_nan_pixels.astype(int)
position_average_movie = position_sums / position_counts
imaging_parameters = expt.imagingParameters()
aspect_ratio = imaging_parameters['pixelsPerLine'] \
/ imaging_parameters['linesPerFrame']
image = ax.imshow(
position_average_movie[0], cmap='gray', interpolation='none',
aspect=aspect_ratio, **plot_kwargs)
ax.set_axis_off()
for frame in position_average_movie:
image.set_data(frame)
yield
def place_cell_tuning_animation(
expt, ax, channel='Ch2', label=None, roi_filter=None,
n_position_bins=100, add_end_frame=False):
"""Animation over belt position showing the spatial tuning of all place cells
add_end_frame -- If True, adds an extra frame at end of the movie showing
all place_cells shaded
"""
background = expt.returnFinalPrototype(channel=channel)[..., 0]
imaging_parameters = expt.imagingParameters()
aspect_ratio = imaging_parameters['pixelsPerLine'] \
/ imaging_parameters['linesPerFrame']
ax.imshow(
background, cmap='gray', interpolation='none', aspect=aspect_ratio,
zorder=0)
exptGrp = pcExperimentGroup(
[expt], channel=channel, imaging_label=label,
nPositionBins=n_position_bins)
pcs_filter = exptGrp.pcs_filter(roi_filter=roi_filter)
data = exptGrp.data(roi_filter=pcs_filter)[expt]
centroids, pfs = calcCentroids(
data, exptGrp.pfs(roi_filter=pcs_filter)[expt], return_pfs=True)
data /= np.amax(data, axis=1)[:, None]
pf_mask = np.empty_like(data)
pf_mask.fill(False)
for pf_idx, pf in enumerate(pfs):
if pf[0][0] < pf[0][1]:
pf_mask[pf_idx, pf[0][0]:pf[0][1] + 1] = True
else:
pf_mask[pf_idx, pf[0][0]:] = True
pf_mask[pf_idx, :pf[0][1] + 1] = True
data *= pf_mask
# Gather all the roi masks and color them
# Probably would have been easier to keep the mask and color separate
# until plotting, but this works
roi_masks = []
rois = expt.rois(
roi_filter=pcs_filter, channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'])
assert len(rois) == len(centroids)
for roi, centroid in it.izip(rois, centroids):
roi_mask = np.array(roi.mask[0].todense())
color = np.array(plt.cm.hsv(centroid[0] / float(n_position_bins)))
roi_masks.append(roi_mask[..., None] * color[None, None, :])
roi_masks = np.array(roi_masks)
rois = np.array(expt.roiVertices(roi_filter=pcs_filter))
pf_image = ax.imshow(
np.ma.masked_all_like(background), interpolation='none',
aspect=aspect_ratio, zorder=1)
ax.set_axis_off()
for bin in data.T:
bin_masks = roi_masks[bin > 0]
if bin_masks.shape[0]:
bin_masks_copy = bin_masks.copy()
for mask_idx, roi, mask_mag in it.izip(
it.count(), rois[bin > 0], bin[bin > 0]):
bin_masks_copy[mask_idx, :, :, 3] *= mask_mag
# If this is the peak, add an ROI polygon
if mask_mag == 1:
for poly in roi:
xlim = ax.get_xlim()
ylim = ax.get_ylim()
ax.plot(
poly[:, 0] - 0.5, poly[:, 1] - 0.5,
color=bin_masks[mask_idx].max(axis=0).max(axis=0),
zorder=2)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
bin_mask = bin_masks_copy.sum(axis=0)
masked_pixels = ~np.any(bin_mask, axis=2)
masked_pixels = np.tile(masked_pixels[..., None], (1, 1, 4))
pf_image.set_data(np.ma.masked_where(masked_pixels, bin_mask))
yield
if add_end_frame:
all_masks = roi_masks.sum(axis=0)
all_masks[..., 3] *= 0.7 # Add some alpha
masked_pixels = ~np.any(all_masks, axis=2)
masked_pixels = np.tile(masked_pixels[..., None], (1, 1, 4))
pf_image.set_data(np.ma.masked_where(masked_pixels, all_masks))
yield
@memoize
def centroid_to_position_distance(
exptGrp, positions, roi_filter=None, multiple_fields='closest',
multiple_positions='closest', return_abs=False):
"""Calculates the distance from the centroid of each place field to any
positions. Values < 0 denote the pf preceded the reward, > 0 followed.
Parameters
----------
positions : array or 'reward' or 'A', 'B', 'C',...
Positions to calculate distance from. Either a list of positions
(normalized) or 'reward' to use the reward locations. If a single
character is passed, use the reward position corresponding to
that condition (calc'd on a per mouse basis and assuming 1 reward
position per condition per mouse)
multiple_fields : ['closest', 'largest']
Determines hot to handle a PC with multiple fields, either return the
closest field or the largest field.
multiple_positions : ['closest']
Determines how to handle multiple positions. Currently only 'closest'
is implemented, where the closest position to the place field is
considered.
return_abs : boolean
If True, returns absolute value of the distance
"""
n_position_bins = float(exptGrp.args['nPositionBins'])
if isinstance(positions, basestring):
if positions == 'reward':
rewards_by_expt = {
expt: expt.rewardPositions(units='normalized')
for expt in exptGrp}
else:
rewards_by_expt = rewards_by_condition(
exptGrp, positions, condition_column='condition')
else:
rewards_by_expt = defaultdict(
lambda: np.array(positions).astype(float))
exptGrp_pfs = exptGrp.pfs(roi_filter=roi_filter)
exptGrp_data = exptGrp.data(roi_filter=roi_filter)
data_list = []
for expt in exptGrp:
calc_positions = rewards_by_expt[expt].copy()
if calc_positions is None:
continue
# If any positions are >=1, they should be tick counts
if np.any(calc_positions >= 1.):
track_length = np.mean(
[trial.behaviorData()['trackLength']
for trial in expt.findall('trial')])
calc_positions /= track_length
rois = expt.rois(
roi_filter=roi_filter, channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'])
data = exptGrp_data[expt]
pfs = exptGrp_pfs[expt]
centroids = calcCentroids(data, pfs, returnAll=True)
assert len(rois) == len(centroids)
for roi, roi_centroids in it.izip(rois, centroids):
if not len(roi_centroids):
continue
centroid_distances = []
for centroid in roi_centroids:
centroid /= n_position_bins
diffs = centroid - calc_positions
diffs[diffs >= 0.5] -= 1.
diffs[diffs < -0.5] += 1.
if multiple_positions == 'closest':
centroid_distances.append(
diffs[np.argmin(np.abs(diffs))])
else:
raise ValueError
if multiple_fields == 'largest':
# calcCentroids should return pfs sorted by peak, so only
# consider the first pf for each ROI
break
if multiple_fields == 'closest':
distance = centroid_distances[
np.argmin(np.abs(centroid_distances))]
elif multiple_fields == 'largest':
distance = centroid_distances[0]
else:
raise ValueError
data_list.append({'expt': expt, 'roi': roi, 'value': distance})
dataframe = pd.DataFrame(data_list, columns=['expt', 'roi', 'value'])
if return_abs:
dataframe['value'] = dataframe['value'].abs()
return dataframe
@memoize
def centroid_to_position_threshold(
exptGrp, positions, threshold, method='centroid', **kwargs):
if method == 'centroid':
dataframe = centroid_to_position_distance(
exptGrp, positions=positions, return_abs=True, **kwargs)
elif method == 'resultant_vector':
dataframe = mean_resultant_vector_to_position_angle(
exptGrp, positions=positions, **kwargs)
else:
raise ValueError('Unrecognized centroid method')
assert dataframe['value'].min() >= 0
dataframe['distance'] = dataframe['value']
dataframe['value'] = dataframe['distance'].apply(
lambda x: int(x < threshold))
return dataframe
@memoize
def mean_resultant_vector_to_position_angle(
exptGrp, positions, roi_filter=None, multiple_positions='closest',
pcs_only=False, circ_var_pcs=True, method='vector_angle'):
"""Calculates the angle between the mean resultant vector and any
positions.
Parameters
----------
positions : array or 'reward' or 'A', 'B', 'C',...
Positions to calculate distance from. Either a list of positions
(normalized) or 'reward' to use the reward locations. If a single
character is passed, use the reward position corresponding to
that condition (calc'd on a per mouse basis and assuming 1 reward
position per condition per mouse)
multiple_positions : {'closest', 'mean'}
Determines how to handle multiple positions 'closest' takes the closest
position and 'mean' takes the average.
pcs_only : bool
If True, only return place cells (as determined by circular variance)
circ_var_pcs : bool
If True and pcs_only is True, use circular variance PC criteria
method : {'vector_angle', 'angle_difference'}
Method for determining the distance from activity to reward. Either the
angle between firing and reward, on [0, pi), or difference between the
angles, on [-pi, pi). Positive angle and vector is in front of
positions, negative is behind.
"""
def dotproduct(v1, v2):
return sum((a * b) for a, b in zip(v1, v2))
def length(v):
return math.sqrt(dotproduct(v, v))
def angle(v1, v2):
return math.acos(
np.round(dotproduct(v1, v2) / (length(v1) * length(v2)), 3))
if pcs_only:
roi_filter = exptGrp.pcs_filter(
roi_filter=roi_filter, circ_var=circ_var_pcs)
vectors = calc_activity_centroids(exptGrp, roi_filter=roi_filter)
if isinstance(positions, basestring):
if positions == 'reward':
rewards_by_expt = {
expt: expt.rewardPositions(units='normalized')
for expt in exptGrp}
else:
rewards_by_expt = rewards_by_condition(
exptGrp, positions, condition_column='condition')
else:
rewards_by_expt = defaultdict(
lambda: np.array(positions).astype(float))
data_list = []
for expt in exptGrp:
expt_positions = rewards_by_expt[expt].copy()
if expt_positions is None:
continue
# If any positions are >=1, they should be tick counts
if np.any(expt_positions >= 1.):
track_length = np.mean(
[trial.behaviorData()['trackLength']
for trial in expt.findall('trial')])
expt_positions /= track_length
expt_positions *= 2 * np.pi
expt_positions = [np.complex(x, y) for x, y in zip(
np.cos(expt_positions), np.sin(expt_positions))]
expt_angles = np.angle(expt_positions)
rois = expt.rois(
roi_filter=roi_filter, channel=exptGrp.args['channel'],
label=exptGrp.args['imaging_label'])
assert len(rois) == len(vectors[expt])
for roi, roi_resultant in it.izip(rois, vectors[expt]):
if method == 'vector_angle':
angles = [angle([roi_resultant.real, roi_resultant.imag],
[pos.real, pos.imag])
for pos in expt_positions]
elif method == 'angle_difference':
angles = np.array(
[np.angle(roi_resultant) - ang for ang in expt_angles])
angles[angles < -np.pi] += 2 * np.pi
angles[angles >= np.pi] -= 2 * np.pi
if multiple_positions == 'closest':
closest_position = np.argmin(np.abs(angles))
min_angle = angles[closest_position]
data_list.append(
{'expt': expt, 'roi': roi,
'closest_position_idx': closest_position,
'value': min_angle})
elif multiple_positions == 'mean':
raise NotImplementedError
else:
raise ValueError
return pd.DataFrame(data_list, columns=[
'expt', 'roi', 'closest_position_idx', 'value'])
@memoize
def distance_to_position_shift(
exptGrp, roi_filter=None, method='resultant', **kwargs):
if method == 'resultant':
distance_fn = mean_resultant_vector_to_position_angle
elif method == 'centroid':
distance_fn = centroid_to_position_distance
else:
raise ValueError('Unrecognized method argument: {}'.format(method))
data = distance_fn(exptGrp, roi_filter=roi_filter, **kwargs)
plotting.prepare_dataframe(data, include_columns=['expt', 'roi_id'])
df_list = []
for e1, e2 in exptGrp.genImagedExptPairs():
paired_df = pd.merge(
data[data['expt'] == e1], data[data['expt'] == e2], on=['roi_id'],
suffixes=('_e1', '_e2'))
paired_df['value'] = paired_df['value_e2'] - paired_df['value_e1']
paired_df['first_roi'] = paired_df['roi_e1']
paired_df['second_roi'] = paired_df['roi_e2']
paired_df['first_expt'] = paired_df['expt_e1']
paired_df['second_expt'] = paired_df['expt_e2']
paired_df = paired_df.drop(
['roi_e1', 'roi_e2', 'expt_e1', 'expt_e2', 'value_e1', 'value_e2',
'roi_id'], axis=1)
df_list.append(paired_df)
return pd.concat(df_list, ignore_index=True)
def metric_correlation(
expt_grp, first_metric_fn, second_metric_fn, correlate_by,
groupby=None, roi_filter=None, first_metric_kwargs=None,
second_metric_kwargs=None, method='pearson_r'):
if groupby is None:
groupby = [[]]
if first_metric_kwargs is None:
first_metric_kwargs = {}
if second_metric_kwargs is None:
second_metric_kwargs = {}
try:
first_metric_data = first_metric_fn(
expt_grp, roi_filter=roi_filter, **first_metric_kwargs)
except TypeError:
first_metric_data = first_metric_fn(expt_grp, **first_metric_kwargs)
if not isinstance(first_metric_data, pd.DataFrame) and \
(len(first_metric_data) == 2 and
isinstance(first_metric_data[0], pd.DataFrame) and
(isinstance(first_metric_data[1], pd.DataFrame) or
first_metric_data[1] is None)):
first_metric_data = first_metric_data[0]
try:
second_metric_data = second_metric_fn(
expt_grp, roi_filter=roi_filter, **second_metric_kwargs)
except TypeError:
second_metric_data = second_metric_fn(expt_grp, **second_metric_kwargs)
if not isinstance(second_metric_data, pd.DataFrame) and \
(len(second_metric_data) == 2 and
isinstance(second_metric_data[0], pd.DataFrame) and
(isinstance(second_metric_data[1], pd.DataFrame) or
second_metric_data[1] is None)):
second_metric_data = second_metric_data[0]
grouped_dfs = []
for df in (first_metric_data, second_metric_data):
for groupby_list in groupby:
plotting.prepare_dataframe(df, include_columns=groupby_list)
df = df.groupby(groupby_list, as_index=False).mean()
plotting.prepare_dataframe(df, include_columns=correlate_by)
grouped_dfs.append(df)
merge_on = tuple(groupby[-1]) + tuple(correlate_by)
merged_df = pd.merge(
grouped_dfs[0], grouped_dfs[1], how='inner', on=merge_on,
suffixes=('_1', '_2'))
result_dicts = []
for key, group in merged_df.groupby(correlate_by):
if method.startswith('pearson_r'):
corr, _ = pearsonr(group['value_1'], group['value_2'])
elif method.startswith('spearman_r'):
corr, _ = spearmanr(group['value_1'], group['value_2'])
if method.endswith('_squared'):
corr **= 2
group_dict = {c: v for c, v in zip(correlate_by, key)}
group_dict['value'] = corr
result_dicts.append(group_dict)
return pd.DataFrame(result_dicts)
@memoize
def cue_cell_remapping(
expt_grp, roi_filter=None, near_threshold=0.05 * 2 * np.pi,
activity_filter=None, circ_var_pcs=False, shuffle=True):
N_SHUFFLES = 10000
centroids = calc_activity_centroids(expt_grp, roi_filter=None)
if activity_filter is not None:
if 'pc' in activity_filter:
pcs_filter = expt_grp.pcs_filter(circ_var=circ_var_pcs)
elif 'active' in activity_filter:
active_filter = filters.active_roi_filter(
expt_grp, min_transients=1, channel=expt_grp.args['channel'],
label=expt_grp.args['imaging_label'], roi_filter=roi_filter)
def dotproduct(v1, v2):
return sum((a * b) for a, b in zip(v1, v2))
def length(v):
return math.sqrt(dotproduct(v, v))
def angle(v1, v2):
return math.acos(
np.round(dotproduct(v1, v2) / (length(v1) * length(v2)), 3))
def complex_from_angle(angle):
return np.complex(np.cos(angle), np.sin(angle))
def expt_cues(expt):
cues = expt.belt().cues(normalized=True)
assert np.all(cues.start < cues.stop)
cues['pos'] = np.array([cues.start, cues.stop]).mean(axis=0)
cues['pos_complex'] = (cues['pos'] * 2 * np.pi).apply(complex_from_angle)
return cues
data_list = []
for e1, e2 in expt_grp.genImagedExptPairs():
shared_rois = expt_grp.subGroup([e1, e2]).sharedROIs(
channel=expt_grp.args['channel'],
label=expt_grp.args['imaging_label'], roi_filter=roi_filter)
if activity_filter == 'pc_either':
e1_pcs = e1.roi_ids(roi_filter=pcs_filter)
e2_pcs = e2.roi_ids(roi_filter=pcs_filter)
roi_ids = list(set(shared_rois).intersection(
set(e1_pcs).union(e2_pcs)))
elif activity_filter == 'pc_both':
e1_pcs = e1.roi_ids(roi_filter=pcs_filter)
e2_pcs = e2.roi_ids(roi_filter=pcs_filter)
roi_ids = list(set(shared_rois).intersection(
e1_pcs).intersection(e2_pcs))
elif activity_filter == 'active_either':
e1_active = e1.roi_ids(roi_filter=active_filter)
e2_active = e2.roi_ids(roi_filter=active_filter)
roi_ids = list(set(shared_rois).intersection(
set(e1_active).union(e2_active)))
elif activity_filter == 'active_both':
e1_active = e1.roi_ids(roi_filter=active_filter)
e2_active = e2.roi_ids(roi_filter=active_filter)
roi_ids = list(set(shared_rois).intersection(
set(e1_active).intersection(e2_active)))
elif activity_filter:
e1_rois = e1.roi_ids(roi_filter=activity_filter)
e2_rois = e2.roi_ids(roi_filter=activity_filter)
roi_ids = list(set(shared_rois).intersection(
set(e1_rois).union(set(e2_rois))))
else:
roi_ids = shared_rois
for roi in roi_ids:
c1 = centroids[e1][e1.roi_ids().index(roi)]
c2 = centroids[e2][e2.roi_ids().index(roi)]
# Activity centroid will be NaN if there was no transients
if np.isnan(c1) or np.isnan(c2):
continue
cue_positions = expt_cues(e1)['pos_complex']
if not len(cue_positions):
continue
cue_distances = [
angle((pos.real, pos.imag),
(c1.real, c1.imag)) for pos in cue_positions]
assert all(dist <= np.pi for dist in cue_distances)
closest_cue_idx = np.argmin(cue_distances)
closest_cue = expt_cues(e1)['cue'][closest_cue_idx]
e1_distance = cue_distances[closest_cue_idx]
if e1_distance > near_threshold:
continue
if closest_cue not in set(expt_cues(e2)['cue']):
continue
cue2_position = expt_cues(e2).ix[
expt_cues(e2).cue == closest_cue, 'pos_complex'].values[0]
e2_distance = angle(
[c2.real, c2.imag], [cue2_position.real, cue2_position.imag])
data_dict = {'first_expt': e1,
'second_expt': e2,
'first_centroid': c1,
'second_centroid': c2,
'roi_id': roi,
'cue': closest_cue,
'value': e2_distance}
data_list.append(data_dict)
result_df = pd.DataFrame(
data_list, columns=['first_expt', 'second_expt', 'first_centroid',
'second_centroid', 'roi_id', 'cue', 'value'])
if shuffle:
shuffle_list = []
for _, row in result_df.sample(
N_SHUFFLES, replace=True, axis=0).iterrows():
same_expts_df = result_df.loc[
(result_df.first_expt == row.first_expt) &
(result_df.second_expt == row.second_expt)]
# Instead of matching roi_id, choose a random ROI
random_row = same_expts_df.sample(1).iloc[0]
# How far is the random ROI in the second expt from the cue
# preceding the original ROI in the first expt?
c2 = random_row.second_centroid
e2_cues = expt_cues(row.second_expt)
e2_cue_pos = e2_cues.ix[e2_cues.cue == row.cue, 'pos_complex'].values[0]
e2_distance = angle(
[c2.real, c2.imag], [e2_cue_pos.real, e2_cue_pos.imag])
shuffle_list.append(
{'value': e2_distance,
'first_expt': row.first_expt,
'second_expt': row.second_expt,
'first_roi_id': row.roi_id,
'second_roi_id': random_row.roi_id,
'first_centroid': row.first_centroid,
'second_centroid': c2,
'cue': row.cue})
shuffle_df = pd.DataFrame(shuffle_list, columns=[
'first_expt', 'second_expt', 'first_roi_id', 'second_roi_id',
'first_centroid', 'second_centroid', 'cue', 'value'])
else:
shuffle_df = None
return result_df, shuffle_df
@memoize
def place_field_centroid(
expt_grp, roi_filter=None, normalized=False,
drop_multi_peaked_pfs=False):
"""Return the position of the centroid of each place field.
Parameters
----------
expt_grp : lab.classes.pcExperimentGroup
roi_filter : filter function
normalized : bool
If True, return the centroid as a normalized belt position on [0, 1),
otherwise return the centroid in position bins.
drop_multi_peaked_pfs : bool
If True, drop any place cells that have multiple peaks, otherwise will
return 1 row per place field.
Returns
-------
pd.DataFrame
expt : lab.Experiment
roi : sima.ROI.ROI
idx : int
Index of place field centroid, sorted by size of place field peak
value : float
Position of place field centroid.
"""
pfs = expt_grp.pfs(roi_filter=roi_filter)
data = expt_grp.data(roi_filter=roi_filter)
rois = expt_grp.rois(
channel=expt_grp.args['channel'], label=expt_grp.args['imaging_label'],
roi_filter=roi_filter)
centroids = []
for expt in expt_grp:
expt_centroids = calcCentroids(data[expt], pfs[expt], returnAll=True)
assert len(expt_centroids) == len(rois[expt])
for roi_centroids, roi in zip(expt_centroids, rois[expt]):
if drop_multi_peaked_pfs and len(roi_centroids) > 1:
continue
for centroid_idx, roi_centroid in enumerate(roi_centroids):
centroids.append({'expt': expt,
'roi': roi,
'idx': centroid_idx,
'value': roi_centroid})
df = pd.DataFrame(centroids, columns=['expt', 'roi', 'idx', 'value'])
if normalized:
df['value'] /= expt_grp.args['nPositionBins']
return df
@memoize
def place_field_gain(expt_grp, roi_filter=None):
"""Return the gain of each place cell.
The 'gain' is defined as:
max(tuning_curve) - min(tuning_curve)
Returns
-------
pd.DataFrame
"""
df_list = []
rois = expt_grp.rois(
channel=expt_grp.args['channel'], label=expt_grp.args['imaging_label'],
roi_filter=roi_filter)
tuning_curves = expt_grp.data(roi_filter=roi_filter)
for expt in expt_grp:
for roi, tuning_curve in zip(rois[expt], tuning_curves[expt]):
df_list.append({'expt': expt, 'roi': roi,
'value': tuning_curve.max() - tuning_curve.min()})
return pd.DataFrame(df_list, columns=['expt', 'roi', 'value'])
| {
"repo_name": "losonczylab/Zaremba_NatNeurosci_2017",
"path": "losonczy_analysis_bundle/lab/analysis/place_cell_analysis.py",
"copies": "1",
"size": "137809",
"license": "mit",
"hash": 5799012853106272000,
"line_mean": 37.7975788288,
"line_max": 119,
"alpha_frac": 0.5528739052,
"autogenerated": false,
"ratio": 3.5248874565172907,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4577761361717291,
"avg_score": null,
"num_lines": null
} |
"""Analysis functions (mostly for psychophysics data).
"""
from collections import namedtuple
import warnings
import numpy as np
import scipy.stats as ss
from scipy.optimize import curve_fit
from .._utils import string_types
def press_times_to_hmfc(presses, targets, foils, tmin, tmax,
return_type='counts'):
"""Convert press times to hits/misses/FA/CR and reaction times
Parameters
----------
presses : list
List of press times (in seconds).
targets : list
List of target times.
foils : list
List of foil (distractor) times.
tmin : float
Minimum time after a target/foil to consider a press, exclusive.
tmax : float
Maximum time after a target/foil to consider a press, inclusive.
The final bounds will be :math:`(t_{min}, t_{max}]`.
.. note:: Do do not rely on floating point arithmetic to get
exclusive/inclusive bounds right consistently. Such
exact equivalence in floating point arithmetic should
not be relied upon.
return_type : str | list of str
A list containing one or more of ``['counts', 'rts']`` to return
a tuple of outputs (see below for description).
Can also be a single string, in which case only the single
requested type is returned (not within a tuple).
Returns
-------
hmfco : tuple
5-element tuple of hits, misses, false alarms, correct rejections,
and other presses (not within the window for a target or a masker).
Only returned if ``'counts'`` is in ``return_type``.
rts : tuple
2-element tuple of reaction times for hits and false alarms.
Only returned if ``'rts'`` is in ``return_type``.
Notes
-----
Multiple presses within a single "target window" (i.e., between ``tmin``
and ``tmax`` of a target) or "masker window" get treated as a single
press by this function. However, there is no such de-bouncing of responses
to "other" times.
"""
known_types = ['counts', 'rts']
if isinstance(return_type, string_types):
singleton = True
return_type = [return_type]
else:
singleton = False
for r in return_type:
if not isinstance(r, string_types) or r not in known_types:
raise ValueError('r must be one of %s, got %s' % (known_types, r))
# Sanity check that targets and foils don't overlap (due to tmin/tmax)
targets = np.atleast_1d(targets)
foils = np.atleast_1d(foils)
presses = np.sort(np.atleast_1d(presses))
assert targets.ndim == foils.ndim == presses.ndim == 1
# Stack as targets, then foils
stim_times = np.concatenate(([-np.inf], targets, foils, [np.inf]))
order = np.argsort(stim_times)
stim_times = stim_times[order]
if not np.all(stim_times[:-1] + tmax <= stim_times[1:] + tmin):
raise ValueError('Analysis windows for targets and foils overlap')
# figure out what targ/mask times our presses correspond to
press_to_stim = np.searchsorted(stim_times, presses - tmin) - 1
if len(press_to_stim) > 0:
assert press_to_stim.max() < len(stim_times) # True b/c of np.inf
assert press_to_stim.min() >= 0
stim_times = stim_times[press_to_stim]
order = order[press_to_stim]
assert (stim_times <= presses).all()
# figure out which presses were valid (to target or masker)
valid_mask = ((presses >= stim_times + tmin) &
(presses <= stim_times + tmax))
n_other = np.sum(~valid_mask)
press_to_stim = press_to_stim[valid_mask]
presses = presses[valid_mask]
stim_times = stim_times[valid_mask]
order = order[valid_mask]
del valid_mask
press_to_stim, used_map_idx = np.unique(press_to_stim, return_index=True)
presses = presses[used_map_idx]
stim_times = stim_times[used_map_idx]
order = order[used_map_idx]
assert len(presses) == len(stim_times)
diffs = presses - stim_times
del used_map_idx
# figure out which valid presses were to target or masker
target_mask = (order <= len(targets))
n_hit = np.sum(target_mask)
n_fa = len(target_mask) - n_hit
n_miss = len(targets) - n_hit
n_cr = len(foils) - n_fa
outs = dict(counts=(n_hit, n_miss, n_fa, n_cr, n_other),
rts=(diffs[target_mask], diffs[~target_mask]))
assert outs['counts'][:4:2] == tuple(map(len, outs['rts']))
outs = tuple(outs[r] for r in return_type)
if singleton:
outs = outs[0]
return outs
def logit(prop, max_events=None):
"""Convert proportion (expressed in the range [0, 1]) to logit.
Parameters
----------
prop : float | array-like
the occurrence proportion.
max_events : int | array-like | None
the number of events used to calculate ``prop``. Used in a correction
factor for cases when ``prop`` is 0 or 1, to prevent returning ``inf``.
If ``None``, no correction is done, and ``inf`` or ``-inf`` may result.
Returns
-------
lgt : ``numpy.ndarray``, with shape matching ``numpy.array(prop).shape``.
See Also
--------
scipy.special.logit
"""
prop = np.atleast_1d(prop).astype(float)
if np.any([prop > 1, prop < 0]):
raise ValueError('Proportions must be in the range [0, 1].')
if max_events is not None:
# add equivalent of half an event to 0s, and subtract same from 1s
max_events = np.atleast_1d(max_events) * np.ones_like(prop)
corr_factor = 0.5 / max_events
for loc in zip(*np.where(prop == 0)):
prop[loc] = corr_factor[loc]
for loc in zip(*np.where(prop == 1)):
prop[loc] = 1 - corr_factor[loc]
return np.log(prop / (1. - prop))
def sigmoid(x, lower=0., upper=1., midpt=0., slope=1.):
"""Calculate sigmoidal values along the x-axis
Parameters
----------
x : array-like
x-values to calculate the sigmoidal values from.
lower : float
The lower y-asymptote.
upper : float
The upper y-asymptote.
midpt : float
The x-value that obtains 50% between the lower and upper asymptote.
slope : float
The slope of the sigmoid.
Returns
-------
y : array
The y-values of the sigmoid evaluated at x.
"""
x = np.asarray(x)
lower = float(lower)
upper = float(upper)
midpt = float(midpt)
slope = float(slope)
y = (upper - lower) / (1 + np.exp(-slope * (x - midpt))) + lower
return y
def fit_sigmoid(x, y, p0=None, fixed=()):
"""Fit a sigmoid to summary data
Given a set of average values ``y`` (e.g., response probabilities) as a
function of a variable ``x`` (e.g., presented target level), this
will estimate the underlying sigmoidal response. Note that the fitting
function can be sensitive to the shape of the data, so always inspect
your results.
Parameters
----------
x : array-like
x-values along the sigmoid.
y : array-like
y-values at each location in the sigmoid.
p0 : array-like | None
Initial guesses for the fit. Can be None to estimate all parameters,
or members of the array can be None to have these automatically
estimated.
fixed : list of str
Which parameters should be fixed.
Returns
-------
lower, upper, midpt, slope : floats
See expyfun.analyze.sigmoid for descriptions.
"""
# Initial estimates
x = np.asarray(x)
y = np.asarray(y)
k = 2 * 4. / (np.max(x) - np.min(x))
if p0 is None:
p0 = [None] * 4
p0 = list(p0)
for ii, p in enumerate([np.min(y), np.max(y),
np.mean([np.max(x), np.min(x)]), k]):
p0[ii] = p if p0[ii] is None else p0[ii]
p0 = np.array(p0, dtype=np.float64)
if p0.size != 4 or p0.ndim != 1:
raise ValueError('p0 must have 4 elements, or be None')
# Fixing values
p_types = ('lower', 'upper', 'midpt', 'slope')
for f in fixed:
if f not in p_types:
raise ValueError('fixed {0} not in parameter list {1}'
''.format(f, p_types))
fixed = np.array([(True if f in fixed else False) for f in p_types], bool)
kwargs = dict()
idx = list()
keys = list()
for ii, key in enumerate(p_types):
if fixed[ii]:
kwargs[key] = p0[ii]
else:
keys.append(key)
idx.append(ii)
p0 = p0[idx]
if len(idx) == 0:
raise RuntimeError('cannot fit with all fixed values')
def wrapper(*args):
assert len(args) == len(keys) + 1
for key, arg in zip(keys, args[1:]):
kwargs[key] = arg
return sigmoid(args[0], **kwargs)
out = curve_fit(wrapper, x, y, p0=p0)[0]
assert len(idx) == len(out)
for ii, o in zip(idx, out):
kwargs[p_types[ii]] = o
return namedtuple('params', p_types)(**kwargs)
def rt_chisq(x, axis=None, warn=True):
"""Chi square fit for reaction times (a better summary statistic than mean)
Parameters
----------
x : array-like
Reaction time data to fit.
axis : int | None
The axis along which to calculate the chi-square fit. If none, ``x``
will be flattened before fitting.
warn : bool
If True, warn about possible bad reaction times.
Returns
-------
peak : float | array-like
The peak(s) of the fitted chi-square probability density function(s).
Notes
-----
Verify that it worked by plotting pdf vs hist (for 1-dimensional x)::
>>> import numpy as np
>>> from scipy import stats as ss
>>> import matplotlib.pyplot as plt
>>> plt.ion()
>>> x = np.abs(np.random.randn(10000) + 1)
>>> lsp = np.linspace(np.floor(np.amin(x)), np.ceil(np.amax(x)), 100)
>>> df, loc, scale = ss.chi2.fit(x, floc=0)
>>> pdf = ss.chi2.pdf(lsp, df, scale=scale)
>>> plt.plot(lsp, pdf)
>>> plt.hist(x, normed=True)
"""
x = np.asarray(x)
if np.any(np.less(x, 0)): # save the user some pain
raise ValueError('x cannot have negative values')
if axis is None:
df, _, scale = ss.chi2.fit(x, floc=0)
else:
def fit(x):
return np.array(ss.chi2.fit(x, floc=0))
params = np.apply_along_axis(fit, axis=axis, arr=x) # df, loc, scale
pmut = np.concatenate((np.atleast_1d(axis),
np.delete(np.arange(x.ndim), axis)))
df = np.transpose(params, pmut)[0]
scale = np.transpose(params, pmut)[2]
quartiles = np.percentile(x, (25, 75))
whiskers = quartiles + np.array((-1.5, 1.5)) * np.diff(quartiles)
n_bad = np.sum(np.logical_or(np.less(x, whiskers[0]),
np.greater(x, whiskers[1])))
if n_bad > 0 and warn:
warnings.warn('{0} likely bad values in x (of {1})'
''.format(n_bad, x.size))
peak = np.maximum(0, (df - 2)) * scale
return peak
def dprime(hmfc, zero_correction=True):
"""Estimates d-prime, with optional correction factor to avoid infinites.
Parameters
----------
hmfc : array-like
Hits, misses, false-alarms, and correct-rejections, in that order, as
array-like data with last dimension having size 4.
zero_correction : bool
Whether to add a correction factor of 0.5 to each category to prevent
division-by-zero leading to infinite d-prime values.
Returns
-------
dp : array-like
Array of dprimes with shape ``hmfc.shape[:-1]``.
Notes
-----
For two-alternative forced-choice tasks, it is recommended to enter correct
trials as hits and incorrect trials as false alarms, and enter misses and
correct rejections as 0. An alternative is to use ``dprime_2afc()``, which
wraps to ``dprime()`` and does this assignment for you.
"""
hmfc = _check_dprime_inputs(hmfc)
a = 0.5 if zero_correction else 0.0
dp = ss.norm.ppf((hmfc[..., 0] + a) /
(hmfc[..., 0] + hmfc[..., 1] + 2 * a)) - \
ss.norm.ppf((hmfc[..., 2] + a) /
(hmfc[..., 2] + hmfc[..., 3] + 2 * a))
return dp
def dprime_2afc(hm, zero_correction=True):
"""Estimates d-prime for two-alternative forced-choice paradigms.
Parameters
----------
hm : array-like
Correct trials (hits) and incorrect trials (misses), in that order, as
array-like data with last dimension having size 4.
zero_correction : bool
Whether to add a correction factor of 0.5 to each category to prevent
division-by-zero leading to infinite d-prime values.
Returns
-------
dp : array-like
Array of dprimes with shape ``hmfc.shape[:-1]``.
"""
hmfc = _check_dprime_inputs(hm, True)
return dprime(hmfc, zero_correction)
def _check_dprime_inputs(hmfc, tafc=False):
"""Formats input to dprime() and dprime_2afc().
Parameters
----------
hmfc : array-like
Hit, miss, false-alarm, correct-rejection; or hit, miss for 2AFC.
tafc : bool
Is this a 2AFC design?
"""
hmfc = np.asarray(hmfc)
if tafc:
if hmfc.shape[-1] != 2:
raise ValueError('Array must have last dimension 2.')
else:
if hmfc.shape[-1] != 4:
raise ValueError('Array must have last dimension 4')
if tafc:
z = np.zeros(hmfc.shape[:-1] + (4,), hmfc.dtype)
z[..., [0, 2]] = hmfc
hmfc = z
if hmfc.dtype not in (np.int64, np.int32):
warnings.warn('Argument (%s) to dprime() cast to np.int64; floating '
'point values will have been truncated.' % hmfc.dtype)
hmfc = hmfc.astype(np.int64)
return hmfc
| {
"repo_name": "rkmaddox/expyfun",
"path": "expyfun/analyze/_analyze.py",
"copies": "1",
"size": "13837",
"license": "bsd-3-clause",
"hash": -8320991507887767000,
"line_mean": 33.9419191919,
"line_max": 79,
"alpha_frac": 0.5913131459,
"autogenerated": false,
"ratio": 3.527147591129238,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46184607370292374,
"avg_score": null,
"num_lines": null
} |
"""Analysis functions (mostly for psychophysics data).
"""
import numpy as np
from ..visual import FixationDot
from ..analyze import sigmoid
from .._utils import logger, verbose_dec
from ..stimuli import window_edges
def _check_pyeparse():
"""Helper to ensure package is available"""
try:
import pyeparse # noqa analysis:ignore
except ImportError:
raise ImportError('Cannot run, requires "pyeparse" package')
def _load_raw(el, fname):
"""Helper to load some pupil data"""
import pyeparse
fname = el.transfer_remote_file(fname)
# Load and parse data
logger.info('Pupillometry: Parsing local file "{0}"'.format(fname))
raw = pyeparse.RawEDF(fname)
raw.remove_blink_artifacts()
events = raw.find_events('SYNCTIME', 1)
return raw, events
@verbose_dec
def find_pupil_dynamic_range(ec, el, prompt=True, verbose=None):
"""Find pupil dynamic range
Parameters
----------
ec : instance of ExperimentController
The experiment controller.
el : instance of EyelinkController
The Eyelink controller.
prompt : bool
If True, a standard prompt message will be displayed.
verbose : bool, str, int, or None
If not None, override default verbose level (see expyfun.verbose).
Returns
-------
bgcolor : array
The background color that maximizes dynamic range.
fcolor : array
The corresponding fixation dot color.
levels : array
The levels shown.
responses : array
The average responses to each level.
Notes
-----
If ``el.dummy_mode`` is on, the test will run at around 10x the speed.
"""
_check_pyeparse()
import pyeparse
if el.recording:
el.stop()
el.calibrate()
if prompt:
ec.screen_prompt('We will now determine the dynamic '
'range of your pupil.\n\n'
'Press a button to continue.')
levels = np.concatenate(([0.], 2 ** np.arange(8) / 255.))
fixs = levels + 0.2
n_rep = 2
# inter-rep interval (allow system to reset)
iri = 10.0 if not el.dummy_mode else 1.0
# amount of time between levels
settle_time = 3.0 if not el.dummy_mode else 0.3
fix = FixationDot(ec)
fix.set_colors([fixs[0] * np.ones(3), 'k'])
ec.set_background_color('k')
fix.draw()
ec.flip()
for ri in range(n_rep):
ec.wait_secs(iri)
for ii, (lev, fc) in enumerate(zip(levels, fixs)):
ec.identify_trial(ec_id='FPDR_%02i' % (ii + 1),
el_id=[ii + 1], ttl_id=())
bgcolor = np.ones(3) * lev
fcolor = np.ones(3) * fc
ec.set_background_color(bgcolor)
fix.set_colors([fcolor, bgcolor])
fix.draw()
ec.start_stimulus()
ec.wait_secs(settle_time)
ec.check_force_quit()
ec.stop()
ec.trial_ok()
ec.set_background_color('k')
fix.set_colors([fixs[0] * np.ones(3), 'k'])
fix.draw()
ec.flip()
el.stop() # stop the recording
ec.screen_prompt('Processing data, please wait...', max_wait=0,
clear_after=False)
# now we need to parse the data
if el.dummy_mode:
resp = sigmoid(np.tile(levels, n_rep), 1000, 3000, 0.01, -100)
resp += np.random.rand(*resp.shape) * 500 - 250
else:
# Pull data locally
assert len(el.file_list) >= 1
raw, events = _load_raw(el, el.file_list[-1])
assert len(events) == len(levels) * n_rep
epochs = pyeparse.Epochs(raw, events, 1, -0.5, settle_time)
assert len(epochs) == len(levels) * n_rep
idx = epochs.n_times // 2
resp = np.median(epochs.get_data('ps')[:, idx:], 1)
bgcolor = np.mean(resp.reshape((n_rep, len(levels))), 0)
idx = np.argmin(np.diff(bgcolor)) + 1
bgcolor = levels[idx] * np.ones(3)
fcolor = fixs[idx] * np.ones(3)
logger.info('Pupillometry: optimal background color {0}'.format(bgcolor))
return bgcolor, fcolor, np.tile(levels, n_rep), resp
def find_pupil_tone_impulse_response(ec, el, bgcolor, fcolor, prompt=True,
verbose=None, targ_is_fm=True):
"""Find pupil impulse response using responses to tones
Parameters
----------
ec : instance of ExperimentController
The experiment controller.
el : instance of EyelinkController
The Eyelink controller.
bgcolor : color
Background color to use.
fcolor : color
Fixation dot color to use.
prompt : bool
If True, a standard prompt message will be displayed.
verbose : bool, str, int, or None
If not None, override default verbose level (see expyfun.verbose).
targ_is_fm : bool
If ``True`` then use frequency modulated tones as the target and
constant frequency tones as the non-target stimuli. Otherwise use
constant frequency tones are targets and fm tones as non-targets.
Returns
-------
srf : array
The pupil response function to sound.
t : array
The time points for the response function.
std_err : array
The standard error as a function of time.
Notes
-----
If ``el.dummy_mode`` is on, the test will run at around 10x the speed.
"""
_check_pyeparse()
import pyeparse
if el.recording:
el.stop()
#
# Determine parameters / randomization
#
n_stimuli = 300 if not el.dummy_mode else 10
cal_stim = [0, 75, 150, 225] # when to offer the subject a break
delay_range = (3.0, 5.0) if not el.dummy_mode else (0.3, 0.5)
delay_range = np.array(delay_range)
targ_prop = 0.25
stim_dur = 100e-3
f0 = 1000. # Hz
rng = np.random.RandomState(0)
isis = np.linspace(*delay_range, num=n_stimuli)
n_targs = int(targ_prop * n_stimuli)
targs = np.zeros(n_stimuli, bool)
targs[np.linspace(0, n_stimuli - 1, n_targs + 2)[1:-1].astype(int)] = True
while(True): # ensure we randomize but don't start with a target
idx = rng.permutation(np.arange(n_stimuli))
isis = isis[idx]
targs = targs[idx]
if not targs[0]:
break
#
# Generate stimuli
#
fs = ec.stim_fs
n_samp = int(fs * stim_dur)
t = np.arange(n_samp).astype(float) / fs
steady = np.sin(2 * np.pi * f0 * t)
wobble = np.sin(np.cumsum(f0 + 100 * np.sin(2 * np.pi * (1 / stim_dur) * t)
) / fs * 2 * np.pi)
std_stim, dev_stim = (steady, wobble) if targ_is_fm else (wobble, steady)
std_stim = window_edges(std_stim * ec._stim_rms * np.sqrt(2), fs)
dev_stim = window_edges(dev_stim * ec._stim_rms * np.sqrt(2), fs)
#
# Subject "Training"
#
ec.stop()
ec.set_background_color(bgcolor)
targstr, tonestr = ('wobble', 'beep') if targ_is_fm else ('beep', 'wobble')
instr = ('Remember to press the button as quickly as possible following '
'each "{}" sound.\n\nPress the response button to '
'continue.'.format(targstr))
if prompt:
notes = [('We will now determine the response of your pupil to sound '
'changes.\n\nYour job is to press the response button '
'as quickly as possible when you hear a "{1}" instead '
'of a "{0}".\n\nPress a button to hear the "{0}".'
''.format(tonestr, targstr)),
('Now press a button to hear the "{}".'.format(targstr))]
for text, stim in zip(notes, (std_stim, dev_stim)):
ec.screen_prompt(text)
ec.load_buffer(stim)
ec.wait_secs(0.5)
ec.play()
ec.wait_secs(0.5)
ec.stop()
ec.screen_prompt(instr)
fix = FixationDot(ec, colors=[fcolor, bgcolor])
flip_times = list()
presses = list()
assert 0 in cal_stim
for ii, (isi, targ) in enumerate(zip(isis, targs)):
if ii in cal_stim:
if ii != 0:
el.stop()
perc = round((100. * ii) / n_stimuli)
ec.screen_prompt('Great work! You are {0}% done.\n\nFeel '
'free to take a break, then press the '
'button to continue.'.format(perc))
el.calibrate()
ec.screen_prompt(instr)
# let's put the initial color up to allow the system to settle
fix.draw()
ec.flip()
ec.wait_secs(10.0) # let the pupil settle
fix.draw()
ec.load_buffer(dev_stim if targ else std_stim)
ec.identify_trial(ec_id='TONE_{0}'.format(int(targ)),
el_id=[int(targ)], ttl_id=[int(targ)])
flip_times.append(ec.start_stimulus())
presses.append(ec.wait_for_presses(isi))
ec.stop()
ec.trial_ok()
el.stop() # stop the recording
ec.screen_prompt('Processing data, please wait...', max_wait=0,
clear_after=False)
flip_times = np.array(flip_times)
tmin = -0.5
if el.dummy_mode:
pk = pyeparse.utils.pupil_kernel(el.fs, 3.0 - tmin)
response = np.zeros(len(pk))
offset = int(el.fs * 0.5)
response[offset:] = pk[:-offset]
std_err = np.ones_like(response) * 0.1 * response.max()
std_err += np.random.rand(std_err.size) * 0.1 * response.max()
else:
raws = list()
events = list()
assert len(el.file_list) >= 4
for fname in el.file_list[-4:]:
raw, event = _load_raw(el, fname)
raws.append(raw)
events.append(event)
assert sum(len(event) for event in events) == n_stimuli
epochs = pyeparse.Epochs(raws, events, 1,
tmin=tmin, tmax=delay_range[0])
response = epochs.pupil_zscores()
assert response.shape[0] == n_stimuli
std_err = np.std(response[~targs], axis=0)
std_err /= np.sqrt(np.sum(~targs))
response = np.mean(response[~targs], axis=0)
t = np.arange(len(response)).astype(float) / el.fs + tmin
return response, t, std_err
| {
"repo_name": "LABSN/expyfun",
"path": "expyfun/codeblocks/_pupillometry.py",
"copies": "2",
"size": "10207",
"license": "bsd-3-clause",
"hash": 7489609465371853000,
"line_mean": 34.6888111888,
"line_max": 79,
"alpha_frac": 0.5716665034,
"autogenerated": false,
"ratio": 3.4576558265582658,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 286
} |
"""Analysis functions (mostly for psychophysics data).
"""
import warnings
import numpy as np
import scipy.stats as ss
from scipy.optimize import curve_fit
from functools import partial
from collections import namedtuple
def press_times_to_hmfc(presses, targets, foils, tmin, tmax,
return_type='counts'):
"""Convert press times to hits/misses/FA/CR
Parameters
----------
presses : list
List of press times (in seconds).
targets : list
List of target times.
foils : list | None
List of foil (distractor) times.
tmin : float
Minimum time after a target/foil to consider a press.
tmax : float
Maximum time after a target/foil to consider a press.
return_type : str
Currently only ``'counts'`` is supported. Eventually we will
add rection-time support as well.
Returns
-------
hmfco : list
Hits, misses, false alarms, correct rejections, and other presses
(not within the window for a target or a masker).
Notes
-----
Multiple presses within a single "target window" (i.e., between ``tmin``
and ``tmax`` of a target) or "masker window" get treated as a single
press by this function. However, there is no such de-bouncing of responses
to "other" times.
"""
# Sanity check that targets and foils don't overlap (due to tmin/tmax)
targets = np.atleast_1d(targets) + tmin
foils = np.atleast_1d(foils) + tmin
dur = float(tmax - tmin)
assert dur > 0
presses = np.sort(np.atleast_1d(presses))
assert targets.ndim == foils.ndim == presses.ndim == 1
all_times = np.concatenate(([-np.inf], targets, foils, [np.inf]))
order = np.argsort(all_times)
inv_order = np.argsort(order)
all_times = all_times[order]
if not np.all(all_times[:-1] + dur <= all_times[1:]):
raise ValueError('Analysis windows for targets and foils overlap')
# Let's just loop (could probably be done with vector math, but it's
# too hard and unlikely to be correct)
locs = np.searchsorted(all_times, presses, 'right')
if len(locs) > 0:
assert locs.max() < len(all_times) # should be True b/c of np.inf
assert locs.min() >= 1
# figure out which presses were to target or masker (valid_idx)
in_window = (presses <= all_times[locs - 1] + dur)
valid_idx = np.where(in_window)[0]
n_other = np.sum(~in_window)
# figure out which of valid presses were to target or masker
used = np.unique(locs[valid_idx]) # unique to remove double-presses
orig_places = (inv_order[used - 1] - 1)
n_hit = sum(orig_places < len(targets))
n_fa = len(used) - n_hit
n_miss = len(targets) - n_hit
n_cr = len(foils) - n_fa
return n_hit, n_miss, n_fa, n_cr, n_other
def logit(prop, max_events=None):
"""Convert proportion (expressed in the range [0, 1]) to logit.
Parameters
----------
prop : float | array-like
the occurrence proportion.
max_events : int | array-like | None
the number of events used to calculate ``prop``. Used in a correction
factor for cases when ``prop`` is 0 or 1, to prevent returning ``inf``.
If ``None``, no correction is done, and ``inf`` or ``-inf`` may result.
Returns
-------
lgt : ``numpy.ndarray``, with shape matching ``numpy.array(prop).shape``.
"""
prop = np.atleast_1d(prop).astype(float)
if np.any([prop > 1, prop < 0]):
raise ValueError('Proportions must be in the range [0, 1].')
if max_events is not None:
# add equivalent of half an event to 0s, and subtract same from 1s
max_events = np.atleast_1d(max_events) * np.ones_like(prop)
corr_factor = 0.5 / max_events
for loc in zip(*np.where(prop == 0)):
prop[loc] = corr_factor[loc]
for loc in zip(*np.where(prop == 1)):
prop[loc] = 1 - corr_factor[loc]
return np.log(prop / (np.ones_like(prop) - prop))
def sigmoid(x, lower=0., upper=1., midpt=0., slope=1.):
"""Calculate sigmoidal values along the x-axis
Parameters
----------
x : array-like
x-values to calculate the sigmoidal values from.
lower : float
The lower y-asymptote.
upper : float
The upper y-asymptote.
midpt : float
The x-value that obtains 50% between the lower and upper asymptote.
slope : float
The slope of the sigmoid.
Returns
-------
y : array
The y-values of the sigmoid evaluated at x.
"""
x = np.asarray(x)
lower = float(lower)
upper = float(upper)
midpt = float(midpt)
slope = float(slope)
y = (upper - lower) / (1 + np.exp(-slope * (x - midpt))) + lower
return y
def fit_sigmoid(x, y, p0=None, fixed=()):
"""Fit a sigmoid to summary data
Given a set of average values ``y`` (e.g., response probabilities) as a
function of a variable ``x`` (e.g., presented target level), this
will estimate the underlying sigmoidal response. Note that the fitting
function can be sensitive to the shape of the data, so always inspect
your results.
Parameters
----------
x : array-like
x-values along the sigmoid.
y : array-like
y-values at each location in the sigmoid.
p0 : array-like | None
Initial guesses for the fit. Can be None to estimate all parameters,
or members of the array can be None to have these automatically
estimated.
fixed : list of str
Which parameters should be fixed.
Returns
-------
lower, upper, midpt, slope : floats
See expyfun.analyze.sigmoid for descriptions.
"""
# Initial estimates
x = np.asarray(x)
y = np.asarray(y)
k = 2 * 4. / (np.max(x) - np.min(x))
if p0 is None:
p0 = [None] * 4
p0 = list(p0)
for ii, p in enumerate([np.min(y), np.max(y),
np.mean([np.max(x), np.min(x)]), k]):
p0[ii] = p if p0[ii] is None else p0[ii]
p0 = np.array(p0, dtype=np.float64)
if p0.size != 4 or p0.ndim != 1:
raise ValueError('p0 must have 4 elements, or be None')
# Fixing values
p_types = ('lower', 'upper', 'midpt', 'slope')
for f in fixed:
if f not in p_types:
raise ValueError('fixed {0} not in parameter list {1}'
''.format(f, p_types))
fixed = np.array([(True if f in fixed else False) for f in p_types], bool)
kwargs = dict()
idx = list()
keys = list()
for ii, key in enumerate(p_types):
if fixed[ii]:
kwargs[key] = p0[ii]
else:
keys.append(key)
idx.append(ii)
p0 = p0[idx]
if len(idx) == 0:
raise RuntimeError('cannot fit with all fixed values')
def wrapper(*args):
assert len(args) == len(keys) + 1
for key, arg in zip(keys, args[1:]):
kwargs[key] = arg
return sigmoid(args[0], **kwargs)
out = curve_fit(wrapper, x, y, p0=p0)[0]
assert len(idx) == len(out)
for ii, o in zip(idx, out):
kwargs[p_types[ii]] = o
return namedtuple('params', p_types)(**kwargs)
def rt_chisq(x, axis=None):
"""Chi square fit for reaction times (a better summary statistic than mean)
Parameters
----------
x : array-like
Reaction time data to fit.
axis : int | None
The axis along which to calculate the chi-square fit. If none, ``x``
will be flattened before fitting.
Returns
-------
peak : float | array-like
The peak(s) of the fitted chi-square probability density function(s).
Notes
-----
Verify that it worked by plotting pdf vs hist (for 1-dimensional x)::
>>> import numpy as np
>>> from scipy import stats as ss
>>> import matplotlib.pyplot as plt
>>> plt.ion()
>>> x = np.abs(np.random.randn(10000) + 1)
>>> lsp = np.linspace(np.floor(np.amin(x)), np.ceil(np.amax(x)), 100)
>>> df, loc, scale = ss.chi2.fit(x, floc=0)
>>> pdf = ss.chi2.pdf(lsp, df, scale=scale)
>>> plt.plot(lsp, pdf)
>>> plt.hist(x, normed=True)
"""
x = np.asarray(x)
if np.any(np.less(x, 0)): # save the user some pain
raise ValueError('x cannot have negative values')
if axis is None:
df, _, scale = ss.chi2.fit(x, floc=0)
else:
fit = partial(ss.chi2.fit, floc=0)
params = np.apply_along_axis(fit, axis=axis, arr=x) # df, loc, scale
pmut = np.concatenate((np.atleast_1d(axis),
np.delete(np.arange(x.ndim), axis)))
df = np.transpose(params, pmut)[0]
scale = np.transpose(params, pmut)[2]
quartiles = np.percentile(x, (25, 75))
whiskers = quartiles + np.array((-1.5, 1.5)) * np.diff(quartiles)
n_bad = np.sum(np.logical_or(np.less(x, whiskers[0]),
np.greater(x, whiskers[1])))
if n_bad > 0:
warnings.warn('{0} likely bad values in x (of {1})'
''.format(n_bad, x.size))
peak = np.maximum(0, (df - 2)) * scale
return peak
def dprime(hmfc, zero_correction=True):
"""Estimates d-prime, with optional correction factor to avoid infinites.
Parameters
----------
hmfc : array-like
Hits, misses, false-alarms, and correct-rejections, in that order, as
array-like data with last dimension having size 4.
zero_correction : bool
Whether to add a correction factor of 0.5 to each category to prevent
division-by-zero leading to infinite d-prime values.
Returns
-------
dp : array-like
Array of dprimes with shape ``hmfc.shape[:-1]``.
Notes
-----
For two-alternative forced-choice tasks, it is recommended to enter correct
trials as hits and incorrect trials as false alarms, and enter misses and
correct rejections as 0. An alternative is to use ``dprime_2afc()``, which
wraps to ``dprime()`` and does this assignment for you.
"""
hmfc = _check_dprime_inputs(hmfc)
a = 0.5 if zero_correction else 0.0
dp = ss.norm.ppf((hmfc[..., 0] + a) /
(hmfc[..., 0] + hmfc[..., 1] + 2 * a)) - \
ss.norm.ppf((hmfc[..., 2] + a) /
(hmfc[..., 2] + hmfc[..., 3] + 2 * a))
return dp
def dprime_2afc(hm, zero_correction=True):
"""Estimates d-prime for two-alternative forced-choice paradigms.
Parameters
----------
hm : array-like
Correct trials (hits) and incorrect trials (misses), in that order, as
array-like data with last dimension having size 4.
zero_correction : bool
Whether to add a correction factor of 0.5 to each category to prevent
division-by-zero leading to infinite d-prime values.
Returns
-------
dp : array-like
Array of dprimes with shape ``hmfc.shape[:-1]``.
"""
hmfc = _check_dprime_inputs(hm, True)
return dprime(hmfc, zero_correction)
def _check_dprime_inputs(hmfc, tafc=False):
"""Formats input to dprime() and dprime_2afc().
Parameters
----------
hmfc : array-like
Hit, miss, false-alarm, correct-rejection; or hit, miss for 2AFC.
tafc : bool
Is this a 2AFC design?
"""
hmfc = np.asarray(hmfc)
if tafc:
if hmfc.shape[-1] != 2:
raise ValueError('Array must have last dimension 2.')
else:
if hmfc.shape[-1] != 4:
raise ValueError('Array must have last dimension 4')
if tafc:
z = np.zeros(hmfc.shape[:-1] + (4,), hmfc.dtype)
z[..., [0, 2]] = hmfc
hmfc = z
if hmfc.dtype not in (np.int64, np.int32):
warnings.warn('Argument (%s) to dprime() cast to np.int64; floating '
'point values will have been truncated.' % hmfc.dtype)
hmfc = hmfc.astype(np.int64)
return hmfc
| {
"repo_name": "lkishline/expyfun",
"path": "expyfun/analyze/_analyze.py",
"copies": "1",
"size": "11928",
"license": "bsd-3-clause",
"hash": -5782529164248100000,
"line_mean": 33.1776504298,
"line_max": 79,
"alpha_frac": 0.5885311871,
"autogenerated": false,
"ratio": 3.4795799299883314,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4568111117088331,
"avg_score": null,
"num_lines": null
} |
"""analysis grouping
Revision ID: 591a97d42d42
Revises: 33af744196bc
Create Date: 2014-02-11 13:40:07.664778
"""
# revision identifiers, used by Alembic.
revision = '591a97d42d42'
down_revision = '33af744196bc'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('proc_AnalysisGroupTable',
sa.Column('id', sa.BigInteger, primary_key=True),
sa.Column('name', sa.String(80)),
sa.Column('last_modified', sa.TIMESTAMP,
server_default=sa.text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP')),
sa.Column('create_date', sa.TIMESTAMP,
default=sa.func.now()))
op.create_table('proc_AnalysisGroupSetTable',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('group_id', sa.BigInteger, sa.ForeignKey('proc_AnalysisGroupTable.id')),
sa.Column('analysis_id', sa.Integer, sa.ForeignKey('meas_AnalysisTable.id')),
sa.Column('analysis_type_id', sa.Integer, sa.ForeignKey('gen_AnalysisTypeTable.id')), )
def downgrade():
op.drop_table('proc_AnalysisGroupTable')
op.drop_table('proc_AnalysisGroupSetTable')
| {
"repo_name": "USGSDenverPychron/pychron",
"path": "migration/versions/591a97d42d42_analysis_grouping.py",
"copies": "1",
"size": "1267",
"license": "apache-2.0",
"hash": -423161281346383940,
"line_mean": 35.2,
"line_max": 107,
"alpha_frac": 0.6140489345,
"autogenerated": false,
"ratio": 3.7596439169139466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9855988224141432,
"avg_score": 0.0035409254545030423,
"num_lines": 35
} |
"""analysismanager URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
#necessary to use template bootstrap
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^projects/', include('sequencer.urls', namespace="sequencer")),
#~ url(r'^cnv/', include('protonprojects.urls')),
]+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| {
"repo_name": "CARPEM/GalaxyDocker",
"path": "data-manager-hegp/analysisManager/analysismanager/analysismanager/urls.py",
"copies": "1",
"size": "1158",
"license": "mit",
"hash": -2101165795094498600,
"line_mean": 40.3571428571,
"line_max": 130,
"alpha_frac": 0.7176165803,
"autogenerated": false,
"ratio": 3.574074074074074,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9740544712760073,
"avg_score": 0.010229188322800216,
"num_lines": 28
} |
"""Analysis methods for TACA."""
import glob
import logging
import os
import subprocess
from shutil import copyfile
from taca.illumina.HiSeqX_Runs import HiSeqX_Run
from taca.illumina.HiSeq_Runs import HiSeq_Run
from taca.illumina.MiSeq_Runs import MiSeq_Run
from taca.illumina.NextSeq_Runs import NextSeq_Run
from taca.illumina.NovaSeq_Runs import NovaSeq_Run
from taca.utils.config import CONFIG
from taca.utils.transfer import RsyncAgent
from taca.utils import statusdb
from flowcell_parser.classes import RunParametersParser
from io import open
logger = logging.getLogger(__name__)
def get_runObj(run):
"""Tries to read runParameters.xml to parse the type of sequencer
and then return the respective Run object (MiSeq, HiSeq..)
:param run: run name identifier
:type run: string
:rtype: Object
:returns: returns the sequencer type object,
None if the sequencer type is unknown of there was an error
"""
if os.path.exists(os.path.join(run, 'runParameters.xml')):
run_parameters_file = 'runParameters.xml'
elif os.path.exists(os.path.join(run, 'RunParameters.xml')):
run_parameters_file = 'RunParameters.xml'
else:
logger.error('Cannot find RunParameters.xml or runParameters.xml in the run folder for run {}'.format(run))
return
rppath = os.path.join(run, run_parameters_file)
try:
rp = RunParametersParser(os.path.join(run, run_parameters_file))
except OSError:
logger.warn('Problems parsing the runParameters.xml file at {}. '
'This is quite unexpected. please archive the run {} manually'.format(rppath, run))
else:
# Do a case by case test becasue there are so many version of RunParameters that there is no real other way
runtype = rp.data['RunParameters'].get('Application', rp.data['RunParameters'].get('ApplicationName', ''))
if 'Setup' in rp.data['RunParameters']:
# This is the HiSeq2500, MiSeq, and HiSeqX case
try:
# Works for recent control software
runtype = rp.data['RunParameters']['Setup']['Flowcell']
except KeyError:
# Use this as second resource but print a warning in the logs
logger.warn('Parsing runParameters to fecth instrument type, '
'not found Flowcell information in it. Using ApplicationName')
# Here makes sense to use get with default value '' ->
# so that it doesn't raise an exception in the next lines
# (in case ApplicationName is not found, get returns None)
runtype = rp.data['RunParameters']['Setup'].get('ApplicationName', '')
if 'HiSeq X' in runtype:
return HiSeqX_Run(run, CONFIG['analysis']['HiSeqX'])
elif 'HiSeq' in runtype or 'TruSeq' in runtype:
return HiSeq_Run(run, CONFIG['analysis']['HiSeq'])
elif 'MiSeq' in runtype:
return MiSeq_Run(run, CONFIG['analysis']['MiSeq'])
elif 'NextSeq' in runtype:
return NextSeq_Run(run, CONFIG['analysis']['NextSeq'])
elif 'NovaSeq' in runtype:
return NovaSeq_Run(run, CONFIG['analysis']['NovaSeq'])
else:
logger.warn('Unrecognized run type {}, cannot archive the run {}. '
'Someone as likely bought a new sequencer without telling '
'it to the bioinfo team'.format(runtype, run))
return None
def upload_to_statusdb(run_dir):
"""Function to upload run_dir informations to statusDB directly from click interface.
:param run_dir: run name identifier
:type run: string
:rtype: None
"""
runObj = get_runObj(run_dir)
if runObj:
# runObj can be None
# Make the actual upload
_upload_to_statusdb(runObj)
def _upload_to_statusdb(run):
"""Triggers the upload to statusdb using the dependency flowcell_parser.
:param Run run: the object run
"""
couch_conf = CONFIG['statusdb']
couch_connection = statusdb.StatusdbSession(couch_conf).connection
db = couch_connection[couch_conf['xten_db']]
parser = run.runParserObj
# Check if I have NoIndex lanes
for element in parser.obj['samplesheet_csv']:
if 'NoIndex' in element['index'] or not element['index']: # NoIndex in the case of HiSeq, empty in the case of HiSeqX
lane = element['Lane'] # This is a lane with NoIndex
# In this case PF Cluster is the number of undetermined reads
try:
PFclusters = parser.obj['Undetermined'][lane]['unknown']
except KeyError:
logger.error('While taking extra care of lane {} of NoIndex type ' \
'I found out that not all values were available'.format(lane))
continue
# In Lanes_stats fix the lane yield
parser.obj['illumina']['Demultiplex_Stats']['Lanes_stats'][int(lane) - 1]['PF Clusters'] = str(PFclusters)
# Now fix Barcode lane stats
updated = 0 # Check that only one update is made
for sample in parser.obj['illumina']['Demultiplex_Stats']['Barcode_lane_statistics']:
if lane in sample['Lane']:
updated += 1
sample['PF Clusters'] = str(PFclusters)
if updated != 1:
logger.error('While taking extra care of lane {} of NoIndex type '
'I updated more than once the barcode_lane. '
'This is too much to continue so I will fail.'.format(lane))
os.sys.exit()
# If I am here it means I changed the HTML representation to something
# else to accomodate the wired things we do
# someone told me that in such cases it is better to put a place holder for this
parser.obj['illumina']['Demultiplex_Stats']['NotOriginal'] = 'True'
# Update info about bcl2fastq tool
if not parser.obj.get('DemultiplexConfig'):
parser.obj['DemultiplexConfig'] = {'Setup': {'Software': run.CONFIG.get('bcl2fastq', {})}}
statusdb.update_doc(db, parser.obj, over_write_db_entry=True)
def transfer_run(run_dir):
"""Interface for click to force a transfer a run to uppmax.
:param: string run_dir: the run to tranfer
"""
runObj = get_runObj(run_dir)
mail_recipients = CONFIG.get('mail', {}).get('recipients')
if runObj is None:
mail_recipients = CONFIG.get('mail', {}).get('recipients')
logger.error('Trying to force a transfer of run {} but the sequencer was not recognized.'.format(run_dir))
else:
runObj.transfer_run(os.path.join('nosync', CONFIG['analysis']['status_dir'], 'transfer.tsv'), mail_recipients)
def transfer_runfolder(run_dir, pid, exclude_lane):
"""Transfer the entire run folder for a specified project and run to uppmax.
:param: string run_dir: the run to transfer
:param: string pid: the project to include in the SampleSheet
:param: string exclude_lane: lanes to exclude separated by comma
"""
original_sample_sheet = os.path.join(run_dir, 'SampleSheet.csv')
new_sample_sheet = os.path.join(run_dir, pid + '_SampleSheet.txt')
# Write new sample sheet including only rows for the specified project
try:
with open(new_sample_sheet, 'w') as nss:
nss.write(extract_project_samplesheet(original_sample_sheet, pid))
except IOError as e:
logger.error('An error occured while parsing the samplesheet. '
'Please check the sample sheet and try again.')
raise e
# Create a tar archive of the runfolder
dir_name = os.path.basename(run_dir)
archive = dir_name + '.tar.gz'
run_dir_path = os.path.dirname(run_dir)
# Prepare the options for excluding lanes
if exclude_lane != '':
dir_for_excluding_lane = []
lane_to_exclude = exclude_lane.split(',')
for lane in lane_to_exclude:
if os.path.isdir('{}/{}/Thumbnail_Images/L00{}'.format(run_dir_path, dir_name, lane)):
dir_for_excluding_lane.extend(['--exclude', 'Thumbnail_Images/L00{}'.format(lane)])
if os.path.isdir('{}/{}/Images/Focus/L00{}'.format(run_dir_path, dir_name, lane)):
dir_for_excluding_lane.extend(['--exclude', 'Images/Focus/L00{}'.format(lane)])
if os.path.isdir('{}/{}/Data/Intensities/L00{}'.format(run_dir_path, dir_name, lane)):
dir_for_excluding_lane.extend(['--exclude', 'Data/Intensities/L00{}'.format(lane)])
if os.path.isdir('{}/{}/Data/Intensities/BaseCalls/L00{}'.format(run_dir_path, dir_name, lane)):
dir_for_excluding_lane.extend(['--exclude', 'Data/Intensities/BaseCalls/L00{}'.format(lane)])
try:
exclude_options_for_tar = ['--exclude', 'Demultiplexing*',
'--exclude', 'demux_*',
'--exclude', 'rsync*',
'--exclude', '*.csv']
if exclude_lane != '':
exclude_options_for_tar += dir_for_excluding_lane
subprocess.call(['tar'] + exclude_options_for_tar + ['-cvzf', archive, '-C', run_dir_path, dir_name])
except subprocess.CalledProcessError as e:
logger.error('Error creating tar archive')
raise e
# Generate the md5sum
md5file = archive + '.md5'
try:
f = open(md5file, 'w')
subprocess.call(['md5sum', archive], stdout=f)
f.close()
except subprocess.CalledProcessError as e:
logger.error('Error creating md5 file')
raise e
# Rsync the files to irma
destination = CONFIG['analysis']['deliver_runfolder'].get('destination')
rsync_opts = {'-Lav': None,
'--no-o': None,
'--no-g': None,
'--chmod': 'g+rw'}
connection_details = CONFIG['analysis']['deliver_runfolder'].get('analysis_server')
archive_transfer = RsyncAgent(archive,
dest_path=destination,
remote_host=connection_details['host'],
remote_user=connection_details['user'],
validate=False,
opts=rsync_opts)
md5_transfer = RsyncAgent(md5file,
dest_path=destination,
remote_host=connection_details['host'],
remote_user=connection_details['user'],
validate=False,
opts=rsync_opts)
archive_transfer.transfer()
md5_transfer.transfer()
# clean up the generated files
try:
os.remove(new_sample_sheet)
os.remove(archive)
os.remove(md5file)
except IOError as e:
logger.error('Was not able to delete all temporary files')
raise e
return
def extract_project_samplesheet(sample_sheet, pid):
header_line = ''
project_entries = ''
with open(sample_sheet) as f:
for line in f:
if line.split(',')[0] in ('Lane', 'FCID'): # include the header
header_line += line
elif pid in line:
project_entries += line # include only lines related to the specified project
new_samplesheet_content = header_line + project_entries
return new_samplesheet_content
def run_preprocessing(run, force_trasfer=True, statusdb=True):
"""Run demultiplexing in all data directories.
:param str run: Process a particular run instead of looking for runs
:param bool force_tranfer: if set to True the FC is transferred also if fails QC
:param bool statusdb: True if we want to upload info to statusdb
"""
def _process(run, force_trasfer):
"""Process a run/flowcell and transfer to analysis server.
:param taca.illumina.Run run: Run to be processed and transferred
"""
logger.info('Checking run {}'.format(run.id))
t_file = os.path.join(CONFIG['analysis']['status_dir'], 'transfer.tsv')
if run.is_transferred(t_file):
# In this case I am either processing a run that is in transfer
# or that has been already transferred. Do nothing.
# time to time this situation is due to runs that are copied back from NAS after a reboot.
# This check avoid failures
logger.info('Run {} already transferred to analysis server, skipping it'.format(run.id))
return
if run.get_run_status() == 'SEQUENCING':
# Check status files and say i.e Run in second read, maybe something
# even more specific like cycle or something
logger.info('Run {} is not finished yet'.format(run.id))
# Upload to statusDB if applies
if 'statusdb' in CONFIG:
_upload_to_statusdb(run)
elif run.get_run_status() == 'TO_START':
if run.get_run_type() == 'NON-NGI-RUN':
# For now MiSeq specific case. Process only NGI-run, skip all the others (PhD student runs)
logger.warn('Run {} marked as {}, '
'TACA will skip this and move the run to '
'no-sync directory'.format(run.id, run.get_run_type()))
# Archive the run if indicated in the config file
if 'storage' in CONFIG:
run.archive_run(CONFIG['storage']['archive_dirs'][run.sequencer_type])
return
# Otherwise it is fine, process it
logger.info(('Starting BCL to FASTQ conversion and demultiplexing for run {}'.format(run.id)))
# Upload to statusDB if applies
if 'statusdb' in CONFIG:
_upload_to_statusdb(run)
run.demultiplex_run()
elif run.get_run_status() == 'IN_PROGRESS':
logger.info(('BCL conversion and demultiplexing process in '
'progress for run {}, skipping it'.format(run.id)))
# Upload to statusDB if applies
if 'statusdb' in CONFIG:
_upload_to_statusdb(run)
# This function checks if demux is done
run.check_run_status()
# Previous elif might change the status to COMPLETED, therefore to avoid skipping
# a cycle take the last if out of the elif
if run.get_run_status() == 'COMPLETED':
logger.info(('Preprocessing of run {} is finished, transferring it'.format(run.id)))
# Upload to statusDB if applies
if 'statusdb' in CONFIG:
_upload_to_statusdb(run)
# Notify with a mail run completion and stats uploaded
msg = """The run {run} has been demultiplexed.
The Run will be transferred to Irma for further analysis.
The run is available at : https://genomics-status.scilifelab.se/flowcells/{run}
""".format(run=run.id)
run.send_mail(msg, rcp=CONFIG['mail']['recipients'])
# Copy demultiplex stats file to shared file system for LIMS purpose
if 'mfs_path' in CONFIG['analysis']:
try:
mfs_dest = os.path.join(CONFIG['analysis']['mfs_path'][run.sequencer_type.lower()],run.id)
logger.info('Copying demultiplex stats for run {} to {}'.format(run.id, mfs_dest))
if not os.path.exists(mfs_dest):
os.mkdir(mfs_dest)
demulti_stat_src = os.path.join(run.run_dir, run.demux_dir, 'Reports',
'html', run.flowcell_id, 'all', 'all', 'all', 'laneBarcode.html')
copyfile(demulti_stat_src, os.path.join(mfs_dest, 'laneBarcode.html'))
except:
logger.warn('Could not copy demultiplex stat file for run {}'.format(run.id))
# Transfer to analysis server if flag is True
if run.transfer_to_analysis_server:
mail_recipients = CONFIG.get('mail', {}).get('recipients')
logger.info('Transferring run {} to {} into {}'
.format(run.id,
run.CONFIG['analysis_server']['host'],
run.CONFIG['analysis_server']['sync']['data_archive']))
run.transfer_run(t_file, mail_recipients)
# Archive the run if indicated in the config file
if 'storage' in CONFIG:
run.archive_run(CONFIG['storage']['archive_dirs'][run.sequencer_type])
if run:
# Needs to guess what run type I have (HiSeq, MiSeq, HiSeqX, NextSeq)
runObj = get_runObj(run)
if not runObj:
raise RuntimeError('Unrecognized instrument type or incorrect run folder {}'.format(run))
else:
_process(runObj, force_trasfer)
else:
data_dirs = CONFIG.get('analysis').get('data_dirs')
for data_dir in data_dirs:
# Run folder looks like DATE_*_*_*, the last section is the FC name.
# See Courtesy information from illumina of 10 June 2016 (no more XX at the end of the FC)
runs = glob.glob(os.path.join(data_dir, '[1-9]*_*_*_*'))
for _run in runs:
runObj = get_runObj(_run)
if not runObj:
logger.warning('Unrecognized instrument type or incorrect run folder {}'.format(run))
else:
try:
_process(runObj, force_trasfer)
except:
# This function might throw and exception,
# it is better to continue processing other runs
logger.warning('There was an error processing the run {}'.format(run))
pass
| {
"repo_name": "SciLifeLab/TACA",
"path": "taca/analysis/analysis.py",
"copies": "1",
"size": "18011",
"license": "mit",
"hash": -195847128657923900,
"line_mean": 46.9015957447,
"line_max": 125,
"alpha_frac": 0.5901393593,
"autogenerated": false,
"ratio": 4.081350555177884,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5171489914477884,
"avg_score": null,
"num_lines": null
} |
"""Analysis module for Databench."""
from __future__ import absolute_import, unicode_literals, division
from . import utils
from .datastore import Datastore
import inspect
import logging
import random
import string
import tornado.gen
import wrapt
log = logging.getLogger(__name__)
class ActionHandler(object):
"""Databench action handler."""
def __init__(self, action, f, bound_instance=None):
self.action = action
self.f = f
self.bound_instance = bound_instance
@tornado.gen.coroutine
def __call__(self, *args, **kwargs):
if self.bound_instance is not None:
return self.f(self.bound_instance, *args, **kwargs)
return self.f(*args, **kwargs)
def __get__(self, obj, objtype):
if obj is not None:
# return an ActionHandler that is bound to the given instance
return ActionHandler(self.action, self.f, obj)
return self
def code(self):
"""Get the source code of the decorated function."""
return inspect.getsource(self.f)
def on(f):
"""Decorator for action handlers.
The action name is inferred from the function name.
This also decorates the method with `tornado.gen.coroutine` so that
`~tornado.concurrent.Future` can be yielded.
"""
action = f.__name__
f.action = action
@wrapt.decorator
@tornado.gen.coroutine
def _execute(wrapped, instance, args, kwargs):
return wrapped(*args, **kwargs)
return _execute(f)
def on_action(action):
"""Decorator for action handlers.
:param str action: explicit action name
This also decorates the method with `tornado.gen.coroutine` so that
`~tornado.concurrent.Future` can be yielded.
"""
@wrapt.decorator
@tornado.gen.coroutine
def _execute(wrapped, instance, args, kwargs):
return wrapped(*args, **kwargs)
_execute.action = action
return _execute
class Analysis(object):
"""Databench's analysis class.
This contains the analysis code. Every browser connection corresponds to
an instance of this class.
**Initialization**: All initializations should be done in
:meth:`.connected`. Instance variables (which should be avoided in favor
of state) should be initialized in the constructor. Some cleanup
can be done in :meth:`.disconnected`.
**Arguments/Parameters**: Command line arguments are available
at ``cli_args`` and the parameters of the HTTP GET request at
``request_args``. ``request_args`` is a dictionary of all
arguments. Each value of the dictionary is a list of given values for this
key even if this key only appeared once in the url
(see `urllib.parse.parse_qs`).
**Actions**: are captured by class method decorated
with `databench.on`. To capture the action
``run`` that is emitted with the JavaScript code
.. code-block:: js
// on the JavaScript frontend
d.emit('run', {my_param: 'helloworld'});
use
.. code-block:: python
# in Python
@databench.on
def run(self, my_param):
pass
in Python. Lists are treated as positional arguments and objects as keyword
arguments to the function call.
If the message is neither of type `list` nor `dict` (for example a
plain `string` or `float`), the function will be called with that
as its first parameter.
**Writing to a datastore**: By default, a :class:`Datastore`
scoped to the current analysis instance is created at
``data``. You can write state updates to it with
.. code-block:: python
yield self.set_state(key1=value1)
Similarly, there is a :class:`Datastore` instance at
``class_data`` which is
scoped to all instances of this analysis by its class name and state
updates are supported with :meth:`.set_class_state`.
**Communicating with the frontend**: The default is to change state with
:meth:`.set_state` or :meth:`.set_class_state` and let that
change propagate to all frontends. Directly calling :meth:`.emit` is also
possible.
:ivar Datastore data: data scoped for this instance/connection
:ivar Datastore class_data: data scoped across all instances
:ivar list cli_args: command line arguments
:ivar dict request_args: request arguments
"""
_databench_analysis = True
def __init__(self):
self.data = None
self.class_data = None
self.cli_args = []
self.request_args = {}
def init_databench(self, id_=None):
self.id_ = id_ if id_ else Analysis.__create_id()
self.emit_to_frontend = (
lambda s, pl:
log.error('emit called before Analysis setup was complete')
)
self.log_frontend = logging.getLogger(__name__ + '.frontend')
self.log_backend = logging.getLogger(__name__ + '.backend')
self.init_datastores()
return self
def init_datastores(self):
"""Initialize datastores for this analysis instance.
This creates instances of :class:`.Datastore` at ``data`` and
``class_data`` with the datastore domains being the current id
and the class name of this analysis respectively.
Overwrite this method to use other datastore backends.
"""
self.data = Datastore(self.id_)
self.data.subscribe(lambda data: self.emit('data', data))
self.class_data = Datastore(type(self).__name__)
self.class_data.subscribe(lambda data: self.emit('class_data', data))
@staticmethod
def __create_id():
return ''.join(random.choice(string.ascii_letters + string.digits)
for _ in range(8))
def set_emit_fn(self, emit_fn):
self.emit_to_frontend = emit_fn
return self
def emit(self, signal, message='__nomessagetoken__'):
"""Emit a signal to the frontend.
:param str signal: name of the signal
:param message: message to send
:returns: return value from frontend emit function
:rtype: tornado.concurrent.Future
"""
# call pre-emit hooks
if signal == 'log':
self.log_backend.info(message)
elif signal == 'warn':
self.log_backend.warn(message)
elif signal == 'error':
self.log_backend.error(message)
return self.emit_to_frontend(signal, message)
"""Events."""
@on
def connect(self):
pass
@on
def args(self, cli_args, request_args):
self.cli_args = cli_args
self.request_args = request_args
@on
def log(self, *args, **kwargs):
self.log_frontend.info(utils.to_string(*args, **kwargs))
@on
def warn(self, *args, **kwargs):
self.log_frontend.warn(utils.to_string(*args, **kwargs))
@on
def error(self, *args, **kwargs):
self.log_frontend.error(utils.to_string(*args, **kwargs))
@on
def connected(self):
"""Default handler for "connected" action.
Overwrite to add behavior.
"""
pass
@on
def disconnected(self):
"""Default handler for "disconnected" action.
Overwrite to add behavior.
"""
log.debug('on_disconnected called.')
@on
def set_state(self, updater=None, **kwargs):
"""Set state in Datastore."""
yield self.data.set_state(updater, **kwargs)
@on
def set_class_state(self, updater=None, **kwargs):
"""Set state in class Datastore."""
yield self.class_data.set_state(updater, **kwargs)
| {
"repo_name": "svenkreiss/databench",
"path": "databench/analysis.py",
"copies": "1",
"size": "7591",
"license": "mit",
"hash": -5552864817687256000,
"line_mean": 28.8858267717,
"line_max": 79,
"alpha_frac": 0.6346989856,
"autogenerated": false,
"ratio": 4.1255434782608695,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5260242463860869,
"avg_score": null,
"num_lines": null
} |
"""Analysis module for Databench."""
from __future__ import absolute_import, unicode_literals, division
from . import __version__ as DATABENCH_VERSION
from .analysis import ActionHandler
from .readme import Readme
from .utils import json_encoder_default
from collections import defaultdict
import functools
import glob
import json
import logging
import os
import tornado.gen
import tornado.web
import tornado.websocket
try:
from urllib.parse import parse_qs # Python 3
except ImportError:
from urlparse import parse_qs # Python 2
PING_INTERVAL = 15000
log = logging.getLogger(__name__)
class Meta(object):
"""Meta class referencing an analysis.
:param str name: Name of this analysis.
:param databench.Analysis analysis_class:
Object that should be instantiated for every new websocket connection.
:param str analysis_path: Path of the analysis class.
:param list extra_routes: [(route, handler, data), ...]
:param list cli_args: Arguments from the command line.
"""
def __init__(self, name, analysis_class, analysis_path, extra_routes=None,
cli_args=None, main_template='index.html', info=None):
self.name = name
self.analysis_class = analysis_class
self.analysis_path = analysis_path
self.cli_args = cli_args if cli_args is not None else []
# detect whether a thumbnail image is present
thumbnail = False
thumbnails = glob.glob(os.path.join(self.analysis_path, 'thumbnail.*'))
if len(thumbnails) >= 1:
thumbnail = os.path.basename(thumbnails[0])
# analysis readme
readme = Readme(self.analysis_path)
self.info = {
'title': self.analysis_class.__name__,
'readme': readme.html,
'description': readme.text.strip(),
'show_in_index': True,
'thumbnail': thumbnail,
'home_link': False,
'version': '0.0.0',
}
if info is not None:
self.info.update(info)
self.fill_action_handlers(analysis_class)
self.routes = [
(r'static/(.+)', tornado.web.StaticFileHandler,
{'path': os.path.join(self.analysis_path, 'static')}),
(r'(analysis\.(?:js|css)).*', tornado.web.StaticFileHandler,
{'path': self.analysis_path}),
(r'(thumbnail\.(?:png|jpg|jpeg)).*', tornado.web.StaticFileHandler,
{'path': self.analysis_path}),
(r'ws', FrontendHandler,
{'meta': self}),
(r'(?P<template_name>.+\.html)', RenderTemplate,
{'info': self.info, 'path': self.analysis_path}),
(r'', RenderTemplate,
{'template_name': main_template,
'info': self.info, 'path': self.analysis_path}),
] + (extra_routes if extra_routes is not None else [])
@staticmethod
def fill_action_handlers(analysis_class):
analysis_class._action_handlers = defaultdict(list)
for attr_str in dir(analysis_class):
attr = getattr(analysis_class, attr_str)
action = None
if isinstance(attr, ActionHandler):
action = attr.action
elif hasattr(attr, 'action'):
action = attr.action
elif attr_str.startswith('on_'):
action = attr_str[3:]
if action is None:
continue
analysis_class._action_handlers[action].append(attr)
@staticmethod
@tornado.gen.coroutine
def run_process(analysis, action_name, message='__nomessagetoken__'):
"""Executes an action in the analysis with the given message.
It also handles the start and stop signals in the case that message
is a `dict` with a key ``__process_id``.
:param str action_name: Name of the action to trigger.
:param message: Message.
:param callback:
A callback function when done (e.g.
:meth:`~tornado.testing.AsyncTestCase.stop` in tests).
:rtype: tornado.concurrent.Future
"""
if analysis is None:
return
# detect process_id
process_id = None
if isinstance(message, dict) and '__process_id' in message:
process_id = message['__process_id']
del message['__process_id']
if process_id:
yield analysis.emit('__process',
{'id': process_id, 'status': 'start'})
fns = [
functools.partial(handler, analysis)
for handler in (analysis._action_handlers.get(action_name, []) +
analysis._action_handlers.get('*', []))
]
if fns:
args, kwargs = [], {}
# Check whether this is a list (positional arguments)
# or a dictionary (keyword arguments).
if isinstance(message, list):
args = message
elif isinstance(message, dict):
kwargs = message
elif message == '__nomessagetoken__':
pass
else:
args = [message]
for fn in fns:
log.debug('calling {}'.format(fn))
try:
yield tornado.gen.maybe_future(fn(*args, **kwargs))
except Exception as e:
yield analysis.emit('error', 'an Exception occured')
raise e
else:
yield analysis.emit('warn',
'no handler for {}'.format(action_name))
if process_id:
yield analysis.emit('__process',
{'id': process_id, 'status': 'end'})
class FrontendHandler(tornado.websocket.WebSocketHandler):
def initialize(self, meta):
self.meta = meta
self.analysis = None
self.ping_callback = tornado.ioloop.PeriodicCallback(self.do_ping,
PING_INTERVAL)
self.ping_callback.start()
tornado.autoreload.add_reload_hook(self.on_close)
def do_ping(self):
if self.ws_connection is None:
self.ping_callback.stop()
return
self.ping(b'ping')
def open(self):
log.debug('WebSocket connection opened.')
@tornado.gen.coroutine
def on_close(self):
log.debug('WebSocket connection closed.')
yield self.meta.run_process(self.analysis, 'disconnected')
@tornado.gen.coroutine
def on_message(self, message):
if message is None:
log.debug('empty message received.')
return
msg = json.loads(message)
if '__connect' in msg:
if self.analysis is not None:
log.error('Connection already has an analysis. Abort.')
return
requested_id = msg['__connect']
log.debug('Instantiate analysis with id {}'.format(requested_id))
self.analysis = self.meta.analysis_class()
self.analysis.init_databench(requested_id)
self.analysis.set_emit_fn(self.emit)
log.info('Analysis {} instanciated.'.format(self.analysis.id_))
yield self.emit('__connect', {
'analysis_id': self.analysis.id_,
'databench_backend_version': DATABENCH_VERSION,
'analyses_version': self.meta.info['version'],
})
yield self.meta.run_process(self.analysis, 'connect')
args = {'cli_args': self.meta.cli_args, 'request_args': {}}
if '__request_args' in msg and msg['__request_args']:
args['request_args'] = parse_qs(
msg['__request_args'].lstrip('?'))
yield self.meta.run_process(self.analysis, 'args', args)
yield self.meta.run_process(self.analysis, 'connected')
log.info('Connected to analysis.')
return
if self.analysis is None:
log.warning('no analysis connected. Abort.')
return
if 'signal' not in msg:
log.info('message not processed: {}'.format(message))
return
if 'load' not in msg:
yield self.meta.run_process(self.analysis,
msg['signal'])
else:
yield self.meta.run_process(self.analysis,
msg['signal'], msg['load'])
def emit(self, signal, message='__nomessagetoken__'):
data = {'signal': signal}
if message != '__nomessagetoken__':
data['load'] = message
try:
return self.write_message(
json.dumps(data, default=json_encoder_default).encode('utf-8'))
except tornado.websocket.WebSocketClosedError:
pass
class RenderTemplate(tornado.web.RequestHandler):
def initialize(self, info, path, template_name=None):
self.info = info
self.path = path
self.template_name = template_name
def get(self, template_name=None):
if template_name is None:
template_name = self.template_name
self.render(os.path.join(self.path, template_name),
databench_version=DATABENCH_VERSION,
**self.info)
def head(self):
pass
| {
"repo_name": "svenkreiss/databench",
"path": "databench/meta.py",
"copies": "1",
"size": "9388",
"license": "mit",
"hash": 1934442513545567200,
"line_mean": 33.5147058824,
"line_max": 79,
"alpha_frac": 0.5597571368,
"autogenerated": false,
"ratio": 4.391019644527596,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5450776781327595,
"avg_score": null,
"num_lines": null
} |
# ANALYSIS MODULE FOR NFL PREDICT
import nfldb, nfldbc, nflgame
import json
dbc = nfldbc.dbc
def get_team(team_name):
with nfldb.Tx(dbc) as cursor:
cursor.execute('SELECT * FROM team WHERE team_id = %s', [team_name,])
return cursor.fetchone()
def get_all_teams():
with nfldb.Tx(dbc) as cursor:
cursor.execute('SELECT * FROM team;')
return cursor.fetchall()
def get_stats_categories():
return nfldb.stat_categories
def fuzzy_search(name, limit=1, team=None, position=None):
return nfldb.player_search(dbc, name, limit=limit, team=team, position=position)
def get_player(last_name, first_name, team):
with nfldb.Tx(dbc) as cursor:
cursor.execute(
'''SELECT * FROM player WHERE last_name = %s AND first_name = %s AND team = %s''',
[last_name,first_name, team])
player = cursor.fetchone()
if player == None:
team = 'UNK'
cursor.execute(
'''SELECT * FROM player WHERE last_name = %s AND first_name = %s AND team = %s''',
[last_name,first_name, team])
return cursor.fetchone()
else:
return player
def get_player_all_time_stats(last_name, first_name, team):
player = get_player(last_name, first_name, team)
q = nfldb.Query(dbc)
q.play_player(player_id=player['player_id'])
return q.limit(1).as_aggregate()[0]
def get_team_roster(team):
q = nfldb.Query(dbc)
players = q.player(team=team, status='Active').as_players()
return players
def ratio_of_snaps(last_name, first_name, team, year, week):
browser = webdriver.PhantomJS()
browser.get('http://nflgsis.com')
print browser.title
print dir(browser)
name_input = browser.find_element_by_name('Name')
password_input = browser.find_element_by_name('Password')
login_button = browser.find_element_by_name('Login')
print name_input
print password_input
print login_button
name_input.send_keys('media')
password_input.send_keys('media')
login_button.click()
accept_button = browser.find_element_by_name('btnAccept')
print accept_button
accept_button.click()
print browser.title
browser.switch_to_frame('BodyNav')
year_dropdown = browser.find_element_by_xpath("//select[@name='selectSeason']/option[text()='2015']")
print year_dropdown
year_dropdown.click()
link = browser.find_elements_by_link_text('5')[1]
link.click()
# <a href="../2015/Reg/05/56577/Gamebook.pdf" target="_blank">PDF</a>
browser.switch_to_default_content()
browser.switch_to_frame('Body')
gamebook_link = browser.find_element_by_xpath("//a[@href='../2015/Reg/05/56577/Gamebook.pdf']")
session = requests.Session()
cookies = browser.get_cookies()
for cookie in cookies:
session.cookies.set(cookie['name'], cookie['value'])
response = session.get('http://nflgsis.com/2015/Reg/05/56577/Gamebook.pdf')
print response
newFileByteArray = bytearray(response.content)
f = open('gamebook.pdf', 'w')
f.write(newFileByteArray)
# gamebook_link.click()
# print gamebook_link
browser.quit()
| {
"repo_name": "strandx/nflpredict",
"path": "predict/nflanalyze.py",
"copies": "1",
"size": "3169",
"license": "mit",
"hash": 3040591709024404500,
"line_mean": 34.2111111111,
"line_max": 105,
"alpha_frac": 0.6481539918,
"autogenerated": false,
"ratio": 3.3498942917547567,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4498048283554757,
"avg_score": null,
"num_lines": null
} |
""" Analysis Module for Pyneal Real-time Scan
These tools will set up and apply the specified analysis steps to incoming
volume data during a real-time scan
"""
import os
import sys
import logging
import importlib
import numpy as np
import nibabel as nib
class Analyzer:
""" Analysis Class
This is the main Analysis module that gets instantiated by Pyneal, and will
handle executing the specific analyses throughout the scan. The specific
analysis functions that get used will be based on the analyses specified
in the settings dictionary that gets passed in.
"""
def __init__(self, settings):
""" Initialize the class
Parameters
----------
settings : dict
dictionary that contains all of the pyneal settings for the current
session. This dictionary is loaded/configured by the GUI once
Pyneal is first launched
"""
# set up logger
self.logger = logging.getLogger('PynealLog')
# create reference to settings dict
self.settings = settings
### Format the mask. If the settings specify that the the mask should
# be weighted, create separate vars for the weights and mask. Convert
# mask to boolean array
mask_img = nib.load(settings['maskFile'])
if settings['maskIsWeighted'] is True:
self.weightMask = True
self.weights = mask_img.get_fdata().copy()
self.mask = mask_img.get_fdata() > 0
else:
self.weightMask = False
self.mask = mask_img.get_fdata() > 0
### Set the appropriate analysis function based on the settings
if settings['analysisChoice'] == 'Average':
self.analysisFunc = self.averageFromMask
elif settings['analysisChoice'] == 'Median':
self.analysisFunc = self.medianFromMask
else:
# must be a custom analysis script
# get the path to the custom analysis file and import it
customAnalysisDir, customAnalysisName = os.path.split(settings['analysisChoice'])
sys.path.append(customAnalysisDir)
customAnalysisModule = importlib.import_module(customAnalysisName.split('.')[0])
# create instance of customAnalysis class, pass in mask reference
customAnalysis = customAnalysisModule.CustomAnalysis(settings['maskFile'],
settings['maskIsWeighted'],
settings['numTimepts'])
# define the analysis func for the custom analysis (should be 'compute'
# method of the customAnaylsis template)
self.analysisFunc = customAnalysis.compute
def runAnalysis(self, vol, volIdx):
""" Analyze the supplied volume
This is a generic function that Pyneal can call in order to execute the
unique analysis routines setup for this session. The specific analysis
routines are contained in the `analysisFunc` function, and will be
set up by the `__init__` method of this class.
Every analysisFunc will have access to the volume data and the `volIdx`
(0-based index). Not every `analysisFunc` will use the `volIdx` for
anything (e.g. averageFromMask),but is included anyway so that any
custom analysis scripts that need it have access to it
Parameters
----------
vol : numpy-array
3D array of voxel data for the current volume
volIdx : int
0-based index indicating where, in time (4th dimension), the volume
belongs
Returns
-------
output : dict
dictionary containing key:value pair(s) for analysis results
specific to the current volume
"""
self.logger.debug('started volIdx {}'.format(volIdx))
# submit vol and volIdx to the specified analysis function
output = self.analysisFunc(vol, volIdx)
self.logger.info('analyzed volIdx {}'.format(volIdx))
return output
def averageFromMask(self, vol, volIdx):
""" Compute the average voxel activation within the mask.
Note: np.average has weights option, np.mean doesn't
Parameters
----------
vol : numpy-array
3D array of voxel data for the current volume
volIdx : int
0-based index indicating where, in time (4th dimension), the volume
belongs
Returns
-------
dict
{'weightedAverage': ####} or {'average': ####}
"""
if self.weightMask:
result = np.average(vol[self.mask], weights=self.weights[self.mask])
return {'weightedAverage': np.round(result, decimals=2)}
else:
result = np.mean(vol[self.mask])
return {'average': np.round(result, decimals=2)}
def medianFromMask(self, vol, volIdx):
""" Compute the median voxel activation within the mask
Parameters
----------
vol : numpy-array
3D array of voxel data for the current volume
volIdx : int
0-based index indicating where, in time (4th dimension), the volume
belongs
Returns
-------
dict
{'weightedMedian': ####} or {'median': ####}
See Also
--------
Weighted median algorithm from: https://pypi.python.org/pypi/weightedstats/0.2
"""
if self.weightMask:
data = vol[self.mask]
sorted_data, sorted_weights = map(np.array, zip(*sorted(zip(data, self.weights[self.mask]))))
midpoint = 0.5 * sum(sorted_weights)
if any(self.weights[self.mask] > midpoint):
return (data[self.weights == np.max(self.weights)])[0]
cumulative_weight = np.cumsum(sorted_weights)
below_midpoint_index = np.where(cumulative_weight <= midpoint)[0][-1]
if cumulative_weight[below_midpoint_index] == midpoint:
return np.mean(sorted_data[below_midpoint_index:below_midpoint_index + 2])
result = sorted_data[below_midpoint_index + 1]
return {'weightedMedian': np.round(result, decimals=2)}
else:
# take the median of the voxels in the mask
result = np.median(vol[self.mask])
return {'median': np.round(result, decimals=2)}
| {
"repo_name": "jeffmacinnes/pyneal",
"path": "src/pynealAnalysis.py",
"copies": "1",
"size": "6539",
"license": "mit",
"hash": -8028475021501976000,
"line_mean": 36.7976878613,
"line_max": 105,
"alpha_frac": 0.5996329714,
"autogenerated": false,
"ratio": 4.6607270135424095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.576035998494241,
"avg_score": null,
"num_lines": null
} |
"""analysis module."""
import datetime
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Sum
from apps.managers.log_mgr.models import MakahikiLog
from apps.managers.player_mgr import player_mgr
from apps.managers.player_mgr.models import Profile
from apps.managers.score_mgr.models import PointsTransaction
from apps.managers.team_mgr.models import Team
from apps.widgets.resource_goal.models import EnergyGoal
from apps.widgets.smartgrid.models import ActionMember, Action
MIN_SESSION = 60 # Assume user spends 60 seconds on a single page.
def _output(msg, outfile=None):
"""output the msg to outfile if outfile is specified, otherwise, return the msg."""
if outfile:
outfile.write(msg)
return ""
else:
return msg
def energy_goal_timestamps(date_start, date_end, outfile=None):
"""display the timestamps for user points."""
output = _output('=== energy goal timestamps from %s to %s ===\n' % (
date_start, date_end), outfile)
if not date_start:
output += _output("must specify date_start parameter.", outfile)
return output
date_end, date_start = _get_start_end_date(date_end, date_start)
output += _output("%s,%s,%s\n" % ("timestamp", "team", "energy-goals"
), outfile)
days = (date_end - date_start).days
for day in range(days):
for team in Team.objects.all():
timestamp = date_start + datetime.timedelta(days=day)
count = EnergyGoal.objects.filter(
goal_status="Below the goal",
team=team,
date__gte=date_start,
date__lte=timestamp).count()
output += _output("%s,%s,%d\n" % (timestamp, team, count), outfile)
return output
def user_point_timestamps(date_start, date_end, outfile=None):
"""display the timestamps for user points."""
output = _output('=== point timestamps from %s to %s ===\n' % (
date_start, date_end), outfile)
if not date_start:
output += _output("must specify date_start parameter.", outfile)
return output
date_end, date_start = _get_start_end_date(date_end, date_start)
output += _output("%s,%s,%s\n" % ("timestamp", "user", "points"
), outfile)
users = PointsTransaction.objects.filter(
transaction_date__gte=date_start,
transaction_date__lt=date_end).values_list("user", flat=True).order_by("user").distinct()
print "total %d users" % len(users)
count = 0
for user_id in users:
user = User.objects.get(id=user_id)
if user.is_superuser or user.is_staff:
continue
timestamp = date_start
while timestamp <= date_end:
points = PointsTransaction.objects.filter(
user=user,
transaction_date__gte=date_start,
transaction_date__lt=timestamp).aggregate(Sum('points'))["points__sum"]
output += _output("%s,%s,%d\n" % (timestamp, user, points if points else 0), outfile)
timestamp += datetime.timedelta(hours=1)
count += 1
if count % 10 == 0:
print "process user #%d" % count
print "process user #%d" % count
return output
def _get_start_end_date(date_end, date_start):
"""return the start and end date in date object."""
date_start = datetime.datetime.strptime(date_start, "%Y-%m-%d")
if not date_end:
date_end = datetime.datetime.today()
else:
date_end = datetime.datetime.strptime(date_end, "%Y-%m-%d")
return date_end, date_start
def _process_post(log, outfile, output, p):
"""process the post content."""
partner = None
if "referrer_email" in log.post_content:
partner = _get_post_content_value(log.post_content, "referrer_email")
if "social_email" in log.post_content:
partner = _get_post_content_value(log.post_content, "social_email")
if partner:
user = player_mgr.get_user_by_email(partner)
if user and user != p.user:
partner_p = user.get_profile()
if partner_p.team:
output += _output(",%s,%s,%s" % (
user, partner_p.team.group, _get_profile_room(partner_p)), outfile)
return output
def user_timestamps(team, date_start, date_end, outfile=None):
"""display the timestamps for user interaction with the site and other players."""
output = _output('=== user timestamps in team %s from %s to %s ===\n' % (
team, date_start, date_end), outfile)
if team:
try:
team = Team.objects.get(name=team)
except ObjectDoesNotExist:
output += _output("team does not exist.", outfile)
return output
if not date_start:
output += _output("must specify date_start parameter.", outfile)
return output
date_end, date_start = _get_start_end_date(date_end, date_start)
output += _output("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n" % ("timestamp", "user", "last_name",
"group", "room",
"action", "action-url",
"partner", "partner-group", "partner-room"), outfile)
logs = MakahikiLog.objects.filter(request_time__gte=date_start,
request_time__lt=date_end).order_by("request_time")
count = 0
for log in logs:
if log.remote_user in ("not-login", "AnonymousUser", "admin") or\
log.remote_user.find("*") != -1:
continue
try:
p = Profile.objects.get(user__username=log.remote_user)
except ObjectDoesNotExist:
continue
if not p.team or (team and p.team != team):
continue
output += _output("%s,%s,%s,%s,%s,%s,%s" % (log.request_time, p.user, p.user.last_name,
p.team.group, _get_profile_room(p),
_get_action_type(log.request_url), _get_action(log.request_url)), outfile)
if log.post_content:
output = _process_post(log, outfile, output, p)
output += _output("\n", outfile)
count += 1
if count % 1000 == 0:
print _output("process log entry #%d" % count)
print _output("process log entry #%d" % count)
return output
def _get_profile_room(profile):
"""get the profile's room from properties."""
if profile.properties:
props = profile.properties.split(";")
room_prop = props[0].split("=")
if len(room_prop) >= 1:
return room_prop[1]
else:
return None
def _get_post_content_value(post_content, key):
"""get the referral email from the post content."""
key = "'" + key + "': [u'"
pos_start = post_content.find(key)
if pos_start == -1:
return None
pos_start += len(key)
pos_end = post_content.find("']", pos_start)
return post_content[pos_start:pos_end]
def _get_action(url):
"""return the action short url."""
return url.split("?")[0]
def _get_action_type(url):
"""return the action type."""
url = _get_action(url)
if url.endswith("/login/"):
return "Login"
elif url.endswith("/referral/"):
return "Referral"
elif url.endswith("/setup/complete/"):
return "Setup"
elif url.endswith("/add/"):
return "Submission"
elif url.find("/video-") != -1:
return "Watch video"
else:
return "View"
def calculate_summary_stats():
"""calculate the summary."""
output = '=== Summary Stats ===\n'
output += "%s,%d\n" % (
"Total number of social submission",
ActionMember.objects.filter(social_email__isnull=True).count()
)
output += "%s,%d\n" % (
"Total number of referrals",
Profile.objects.filter(referring_user__isnull=False).count()
)
return output
def calculate_user_stats():
"""Calculate the user stats."""
output = '=== User Stats ===\n'
users = User.objects.filter(is_superuser=False)
output += 'user id,total seconds spent,total hours spent,total submissions,total points\n'
for user in users:
logs = MakahikiLog.objects.filter(remote_user=user.username).order_by(
'request_time')
total_time = _user_time_spent(logs)
total_submission = _user_submissions(user)
total_point = _user_points(user)
output += '%d,%d,%.2f,%d,%d\n' % (
user.id, total_time, total_time / 3600.0,
total_submission, total_point)
return output
def calculate_action_stats():
"""action stats"""
output = '=== Action Stats ===\n'
output += 'action_type,total_actions\n'
actions = Action.objects.filter(level__isnull=False, category__isnull=False)
output += "%s,%d\n" % (
"activity", actions.filter(type="activity").count()
)
output += "%s,%d\n" % (
"event", actions.filter(type="event").count()
)
output += "%s,%d\n" % (
"excursion", actions.filter(type="excursion").count()
)
output += "%s,%d\n" % (
"commitment", actions.filter(type="commitment").count()
)
return output
def _user_submissions(user):
"""
user submissions.
"""
return ActionMember.objects.filter(user=user).count()
def _user_points(user):
"""user points."""
return user.get_profile().points()
def _user_time_spent(logs, start_date=None):
""" Iterate over the logs and track previous time and time spent."""
query = logs
if start_date:
query = query.filter(request_time__gt=start_date)
if query.count() > 0:
prev = query[0].request_time
cur_session = total = 0
for log in query[1:]:
current = log.request_time
diff = current - prev
# Start a new interval if 30 minutes have passed.
if diff.total_seconds() > (60 * 30):
if cur_session == 0:
total += MIN_SESSION
else:
total += cur_session
cur_session = 0
else:
cur_session += diff.total_seconds()
prev = current
# Append any session that was in progress.
total += cur_session
return total
return 0
| {
"repo_name": "justinslee/Wai-Not-Makahiki",
"path": "makahiki/apps/widgets/status/analysis.py",
"copies": "2",
"size": "10332",
"license": "mit",
"hash": 8380279694374474000,
"line_mean": 30.2145015106,
"line_max": 97,
"alpha_frac": 0.5857530004,
"autogenerated": false,
"ratio": 3.7448350851757883,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010327988308666497,
"num_lines": 331
} |
"""analysis module."""
import datetime
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Sum, Q
from apps.managers.log_mgr.models import MakahikiLog
from apps.managers.player_mgr import player_mgr
from apps.managers.player_mgr.models import Profile
from apps.managers.score_mgr.models import PointsTransaction
from apps.managers.team_mgr.models import Team
from apps.widgets.resource_goal.models import EnergyGoal
from apps.widgets.smartgrid.models import ActionMember, Action
from apps.managers.challenge_mgr.models import RoundSetting
MIN_SESSION = 60 # Assume user spends 60 seconds on a single page.
def _output(msg, outfile=None):
"""output the msg to outfile if outfile is specified, otherwise, return the msg."""
if outfile:
outfile.write(msg)
return ""
else:
return msg
def energy_goal_timestamps(date_start, date_end, outfile=None):
"""display the timestamps for user points."""
output = _output('=== energy goal timestamps from %s to %s ===\n' % (
date_start, date_end), outfile)
if not date_start:
output += _output("must specify date_start parameter.", outfile)
return output
date_end, date_start = _get_start_end_date(date_end, date_start)
output += _output("%s,%s,%s\n" % ("timestamp", "team", "energy-goals"
), outfile)
days = (date_end - date_start).days
for day in range(days):
for team in Team.objects.all():
timestamp = date_start + datetime.timedelta(days=day)
count = EnergyGoal.objects.filter(
goal_status="Below the goal",
team=team,
date__gte=date_start,
date__lte=timestamp).count()
output += _output("%s,%s,%d\n" % (timestamp, team, count), outfile)
return output
def user_point_timestamps(date_start, date_end, outfile=None):
"""display the timestamps for user points."""
output = _output('=== point timestamps from %s to %s ===\n' % (
date_start, date_end), outfile)
if not date_start:
output += _output("must specify date_start parameter.", outfile)
return output
date_end, date_start = _get_start_end_date(date_end, date_start)
output += _output("%s,%s,%s\n" % ("timestamp", "user", "points"
), outfile)
users = PointsTransaction.objects.filter(
transaction_date__gte=date_start,
transaction_date__lt=date_end).values_list("user", flat=True).order_by("user").distinct()
print "total %d users" % len(users)
count = 0
for user_id in users:
user = User.objects.get(id=user_id)
if user.is_superuser or user.is_staff:
continue
timestamp = date_start
while timestamp <= date_end:
points = PointsTransaction.objects.filter(
user=user,
transaction_date__gte=date_start,
transaction_date__lt=timestamp).aggregate(Sum('points'))["points__sum"]
output += _output("%s,%s,%d\n" % (timestamp, user, points if points else 0), outfile)
timestamp += datetime.timedelta(hours=1)
count += 1
if count % 10 == 0:
print "process user #%d" % count
print "process user #%d" % count
return output
def _get_start_end_date(date_end, date_start):
"""return the start and end date in date object."""
date_start = datetime.datetime.strptime(date_start, "%Y-%m-%d")
if not date_end:
date_end = datetime.datetime.today()
else:
date_end = datetime.datetime.strptime(date_end, "%Y-%m-%d")
return date_end, date_start
def _process_post(log, outfile, output, p):
"""process the post content."""
partner = None
if "referrer_email" in log.post_content:
partner = _get_post_content_value(log.post_content, "referrer_email")
if "social_email" in log.post_content:
partner = _get_post_content_value(log.post_content, "social_email")
if partner:
user = player_mgr.get_user_by_email(partner)
if user and user != p.user:
partner_p = user.get_profile()
if partner_p.team:
output += _output(",%s,%s,%s" % (
user, partner_p.team.group, _get_profile_room(partner_p)), outfile)
return output
def user_timestamps(team, date_start, date_end, outfile=None):
"""display the timestamps for user interaction with the site and other players."""
output = _output('=== user timestamps in team %s from %s to %s ===\n' % (
team, date_start, date_end), outfile)
if team:
try:
team = Team.objects.get(name=team)
except ObjectDoesNotExist:
output += _output("team does not exist.", outfile)
return output
if not date_start:
output += _output("must specify date_start parameter.", outfile)
return output
date_end, date_start = _get_start_end_date(date_end, date_start)
output += _output("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n" % ("timestamp", "user", "last_name",
"group", "room",
"action", "action-url",
"partner", "partner-group",
"partner-room"), outfile)
logs = MakahikiLog.objects.filter(request_time__gte=date_start,
request_time__lt=date_end).order_by("request_time")
count = 0
for log in logs:
if log.remote_user in ("not-login", "AnonymousUser", "admin") or \
log.remote_user.find("*") != -1:
continue
try:
p = Profile.objects.get(user__username=log.remote_user)
except ObjectDoesNotExist:
continue
if not p.team or (team and p.team != team):
continue
output += _output("%s,%s,%s,%s,%s,%s,%s" % (log.request_time, p.user, p.user.last_name,
p.team.group, _get_profile_room(p),
_get_action_type(log.request_url),
_get_action(log.request_url)), outfile)
if log.post_content:
output = _process_post(log, outfile, output, p)
output += _output("\n", outfile)
count += 1
if count % 1000 == 0:
print _output("process log entry #%d" % count)
print _output("process log entry #%d" % count)
return output
def _get_profile_room(profile):
"""get the profile's room from properties."""
if profile.properties:
props = profile.properties.split(";")
room_prop = props[0].split("=")
if len(room_prop) >= 1:
return room_prop[1]
else:
return None
def _get_post_content_value(post_content, key):
"""get the referral email from the post content."""
key = "'" + key + "': [u'"
pos_start = post_content.find(key)
if pos_start == -1:
return None
pos_start += len(key)
pos_end = post_content.find("']", pos_start)
return post_content[pos_start:pos_end]
def _get_action(url):
"""return the action short url."""
return url.split("?")[0]
def _get_action_type(url):
"""return the action type."""
url = _get_action(url)
if url.endswith("/login/"):
return "Login"
elif url.endswith("/referral/"):
return "Referral"
elif url.endswith("/setup/complete/"):
return "Setup"
elif url.endswith("/add/"):
return "Submission"
elif url.find("/video-") != -1:
return "Watch video"
else:
return "View"
def calculate_action_stats():
"""action stats"""
output = '=== Action Stats ===\n'
output += 'action_type,total_actions\n'
actions = Action.objects.all() # was only actions in the grid. need to address?
output += "%s,%d\n" % (
"activity", actions.filter(type="activity").count()
)
output += "%s,%d\n" % (
"event", actions.filter(type="event").count()
)
output += "%s,%d\n" % (
"excursion", actions.filter(type="excursion").count()
)
output += "%s,%d\n" % (
"commitment", actions.filter(type="commitment").count()
)
return output
def calculate_summary_stats():
"""calculate the summary."""
output = '=== Summary Stats ===\n'
output += "%s,%d\n" % (
"Total number of social submission",
ActionMember.objects.filter(social_email__isnull=True).count()
)
output += "%s,%d\n" % (
"Total number of referrals",
Profile.objects.filter(referring_user__isnull=False).count()
)
return output
def calculate_user_stats():
"""Calculate the user stats."""
output = '=== User Stats ===\n'
users = User.objects.filter(is_superuser=False)
output += 'user id,total seconds spent,total hours spent,total submissions,total points\n'
for user in users:
logs = MakahikiLog.objects.filter(remote_user=user.username).order_by(
'request_time')
total_time = _user_time_spent(logs)
total_submission = _user_submissions(user)
total_point = _user_points(user)
output += '%d,%d,%.2f,%d,%d\n' % (
user.id, total_time, total_time / 3600.0,
total_submission, total_point)
return output
def calculate_user_summary(user_list):
"""Calculate the user summary."""
output = '=== Summary for users in list: "%s" ===\n' % user_list
user_list = user_list.split(",")
profiles = Profile.objects.filter(name__in=user_list)
rounds = RoundSetting.objects.all()
point_types = (
"Activity", "Commitment", "Event", "Excursion", "Referred", "Super Referred",
"Mega Referred", "Badge",
"Set up profile", "Bonus Points", "Team 50% participation",
"Team 75% participation", "Team energy Goal")
sub_point_types = ("Provide feedback", "Social Bonus", "Sign up", "No Show")
output += 'name, email, round, total points, '
for t in point_types:
output += "%s, " % t
for t in sub_point_types:
output += "%s, " % t
output += "\n"
for p in profiles:
for rd in rounds:
user = p.user
# get user points and submissions from_action for each round and action type
output += "%s, %s, %s, " % (user.first_name + " " + user.last_name,
user.username, rd.name)
query = PointsTransaction.objects.filter(
transaction_date__lte=rd.end,
transaction_date__gte=rd.start,
user=user)
total_points = query.aggregate(Sum("points"))["points__sum"]
total_points = total_points if total_points else 0
output += '%d, ' % (total_points if total_points else 0)
all_points = 0
for t in point_types:
type_points = query.filter(
Q(message__startswith=t) | Q(message__startswith=" " + t)).aggregate(
Sum("points"))["points__sum"]
type_points = type_points if type_points else 0
all_points += type_points
output += '%d, ' % (type_points)
for t in sub_point_types:
sub_type_points = query.filter(
message__contains=t).aggregate(Sum("points"))["points__sum"]
output += '%d, ' % (sub_type_points if sub_type_points else 0)
output += "\n"
# the total_points should be equals to all_points
if total_points != all_points:
output += "all points (%d) not added to total points.\n" % all_points
return output
def _user_submissions(user):
"""
user submissions.
"""
return ActionMember.objects.filter(user=user).count()
def _user_points(user):
"""user points."""
return user.get_profile().points()
def _user_time_spent(logs, start_date=None):
""" Iterate over the logs and track previous time and time spent."""
query = logs
if start_date:
query = query.filter(request_time__gt=start_date)
if query.count() > 0:
prev = query[0].request_time
cur_session = total = 0
for log in query[1:]:
current = log.request_time
diff = current - prev
# Start a new interval if 30 minutes have passed.
if diff.total_seconds() > (60 * 30):
if cur_session == 0:
total += MIN_SESSION
else:
total += cur_session
cur_session = 0
else:
cur_session += diff.total_seconds()
prev = current
# Append any session that was in progress.
total += cur_session
return total
return 0
| {
"repo_name": "jtakayama/makahiki-draft",
"path": "makahiki/apps/widgets/status/analysis.py",
"copies": "2",
"size": "13096",
"license": "mit",
"hash": -3547825688593700400,
"line_mean": 32.5794871795,
"line_max": 97,
"alpha_frac": 0.5633781307,
"autogenerated": false,
"ratio": 3.8699763593380614,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009869068127653536,
"num_lines": 390
} |
""" Analysis module provides general functions used for examining the results
of running feature detection on a dataset of images.
"""
import functools
import logging
import re
import random
import numpy as np
import pandas as pd
from sklearn import manifold, preprocessing
from mia.features.blobs import blob_props
from mia.features.linear_structure import line_props
logger = logging.getLogger(__name__)
def _handle_data_frame(func):
"""Decorator to wrap the scikit-learn funcions so that they can handle
being directly passed a pandas DataFrame.
"""
@functools.wraps(func)
def inner(feature_matrix, **kwargs):
if isinstance(feature_matrix, pd.DataFrame):
fit_output = func(feature_matrix.as_matrix(), **kwargs)
return pd.DataFrame(fit_output, index=feature_matrix.index)
else:
return func(feature_matrix)
return inner
@_handle_data_frame
def tSNE(feature_matrix, **kwargs):
"""Run the t-SNE algorithm on the feature space of a collection of images
:param feature_matrix: matrix of features use with the t-SNE
:returns: 2darray -- lower dimensional mapping of the t-SNE algorithm
"""
feature_matrix = standard_scaler(feature_matrix)
tSNE = manifold.TSNE(**kwargs)
fit_output = tSNE.fit_transform(feature_matrix)
return fit_output
@_handle_data_frame
def isomap(feature_matrix, **kwargs):
"""Run the Isomap algorithm on the feature space of a collection of images
:param feature_matrix: matrix of features use with the Isomap
:returns: 2darray -- lower dimensional mapping of the Isomap algorithm
"""
feature_matrix = standard_scaler(feature_matrix)
isomap = manifold.Isomap(**kwargs)
fit_output = isomap.fit_transform(feature_matrix)
return fit_output
@_handle_data_frame
def lle(feature_matrix, **kwargs):
"""Run the Locally Linear Embedding algorithm on the feature space of a
collection of images
:param feature_matrix: matrix of features use with the Isomap
:returns: 2darray -- lower dimensional mapping of the Isomap algorithm
"""
feature_matrix = standard_scaler(feature_matrix)
lle = manifold.LocallyLinearEmbedding(**kwargs)
fit_output = lle.fit_transform(feature_matrix)
return fit_output
@_handle_data_frame
def standard_scaler(feature_matrix):
"""Run the standard scaler algorithm. This removes the mean and scales to
unit variance.
:param feature_matrix: matrix of features to run standard scaler on.
:returns: 2darray -- data transformed using the standard scaler
"""
scalar = preprocessing.StandardScaler()
feature_matrix = scalar.fit_transform(feature_matrix)
return feature_matrix
@_handle_data_frame
def normalize_data_frame(feature_matrix):
"""Run the normalisation function. This scales all values to between 0 and 1
:param feature_matrix: matrix of features to perform normalisation on.
:returns: 2darray -- normalised data
"""
return preprocessing.normalize(feature_matrix)
def measure_closeness(data_frame, labels):
"""This function computes the average distance between all data points in a
group and the centroid of that group.
The neighbours are defiend as being all points with the same
class. The class for each point is defined by the labels parameter.
:param data_frame: data frame points to measure the distance between.
:param labels: labels for each data point. Each point with the same class
are neighbours of points.
:returns: Series -- containing the closeness measure for each group
"""
groups = data_frame.groupby(labels)
ds = [_cluster_measure(frame) for index, frame in groups]
return pd.Series(ds, index=labels.unique())
def _cluster_measure(group):
"""Measure the distance between all points in a group from the centroid.
:param group: the group of points all belonging to the same cluster.
:returns: float -- mean value of the group.
"""
points = group[[0, 1]]
centroid = points.sum() / group.size
distances = ((centroid - points)**2).sum(axis=1)
distances = distances.apply(np.sqrt)
return distances.mean()
def create_hologic_meta_data(df, meta_data_file):
"""Generate a data frame containing the meta data for the Hologic dataset.
This uses the existing data frame of images for the index names.
:param df: the data frame of features detected from a reduction.
:param meta_data_file: name of the file containin the meta data for the
dataset
:returns: DataFrame -- containing the meta data for the Hologic data set
"""
data = [_split_hologic_img_name(img_name) for img_name in df.index.values]
columns = ['patient_id', 'side', 'view']
name_meta_data = pd.DataFrame(data, columns=columns)
name_meta_data.index = name_meta_data.patient_id
name_meta_data['img_name'] = df.index.values
BIRADS_data = pd.DataFrame.from_csv(meta_data_file)
meta_data = name_meta_data.join(BIRADS_data, how='left', rsuffix='_r')
meta_data.drop('patient_id_r', axis=1, inplace=True)
meta_data.index = df.index
return meta_data
def _split_hologic_img_name(name):
"""Split the Hologic naming convention into several parts.
This function will parse the image name to extract the patient id, side
(left or right), and the view (CC or ML).
:param name: file name to parse
:returns: tuple -- containing the (id, view, side)
"""
img_regex = re.compile(r'p(\d{3}-\d{3}-\d{5})-([a-z])([a-z])\.png')
m = re.match(img_regex, name)
return int(m.group(1).replace('-', '')), m.group(2), m.group(3)
def create_synthetic_meta_data(df, meta_data_file):
"""Create a meta data DataFrame for the synthetic data set.
This uses the existing data frame of images for the index names.
:param df: the data frame of features detected from a reduction.
:param meta_data_file: name of the file containin the meta data for the
dataset
:returns: DataFrame -- containing the meta data for the synthetic data set
"""
indicies = [_split_sythentic_img_name(img_name)
for img_name in df.index.values]
raw_md = pd.DataFrame.from_csv(meta_data_file)
md = raw_md.loc[indicies].copy()
md['phantom_name'] = md.index
md.index = df.index
return md
def _split_sythentic_img_name(name):
"""Split the synthetic naming convention into several parts.
This function will parse the image name to extract the group of the
synthetic.
:param name: file name to parse
:returns: string -- the group of the synthetic.
"""
group_regex = re.compile(r'(test_Mix_DPerc\d+_c)_\d+\.dcm')
img_regex = re.compile(r'(phMix\d+)_c_\d+\.dcm')
group_match = re.match(group_regex, name)
img_match = re.match(img_regex, name)
if group_match:
return group_match.group(1)
elif img_match:
return img_match.group(1)
def features_from_blobs(df):
""" Create features from blobs detected over a image dataset
:param df: DataFrame containing the raw detections.
:returns: DataFrame with the descriptive statistics generated
from the blobs.
"""
features = df.groupby(df.index).apply(blob_props)
return features.reset_index(level=1, drop=True)
def features_from_lines(df):
""" Create features from lines detected over a image dataset
:param df: DataFrame containing the raw detections.
:returns: DataFrame with the descriptive statistics generated
from the lines.
"""
features = df.groupby(df.index).apply(line_props)
return features.reset_index(level=1, drop=True)
def remove_duplicate_index(df):
"""Remove all entries in a data frame that have a duplicate index.
:param df: DataFrame containing duplicate indicies.
:returns: DataFrame with the duplicates removed
"""
index_name = df.index.name
md = df.reset_index()
md.drop_duplicates(index_name, inplace=True)
md.set_index(index_name, inplace=True)
return md
def create_random_subset(data_frame, column_name):
"""Choose a random subset from a DataFrame.
The column name determines which groups are sampled. Only one image from
each group is returned.
:param df: DataFrame of multiple images to sample from.
:param column_name: column defining the group an image belongs to.
:returns: DataFrame -- DataFrame with one image from each group.
"""
def _select_random(x):
return x.ix[random.sample(x.index, 1)]
group = data_frame.groupby(column_name)
random_subset = group.apply(_select_random)
random_subset.drop(column_name, axis=1, inplace=True)
random_subset.reset_index(drop=True, level=0, inplace=True)
return random_subset
def group_by_scale_space(data_frame):
"""Group an image by scale space.
This function takes a data frame of features detect from blobs and groups
each one by the scale it was detected at.
:param data_frame: containing features derived from each blob.
:returns: DataFrame -- DataFrame with features grouped by scale.
"""
radius_groups = data_frame.groupby([data_frame.radius, data_frame.index])
blobs_by_scale = radius_groups.apply(lambda x: x.mean())
scale_groups = blobs_by_scale.groupby(level=0)
features = pd.DataFrame(index=blobs_by_scale.index.levels[1])
for scale_num, (i, x) in enumerate(scale_groups):
x = x.reset_index(level=0, drop=True)
x = x.drop(['radius'], axis=1)
features = features.join(x, rsuffix='_%d' % scale_num)
features.fillna(features.mean(), inplace=True)
return features
def sort_by_scale_space(data_frame, n_features):
"""Sort the features in a matrix into the correct order.
This can be used to sort features so that each feature appears in scale
order. (i.e. count_0, count_1, ... count_n)
:param data_frame: containing features for each image.
:returns: DataFrame -- DataFrame with features sorted by group and scale.
"""
features = data_frame.copy()
features = normalize_data_frame(features)
features.columns = data_frame.columns
cols = [features.columns[i::n_features] for i in range(n_features)]
cols = [c for l in cols for c in l]
features = features[cols]
return features
| {
"repo_name": "samueljackson92/major-project",
"path": "src/mia/analysis.py",
"copies": "1",
"size": "10346",
"license": "mit",
"hash": 8828946646216008000,
"line_mean": 33.602006689,
"line_max": 80,
"alpha_frac": 0.694567949,
"autogenerated": false,
"ratio": 3.7814327485380117,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49760006975380117,
"avg_score": null,
"num_lines": null
} |
"""analysis nature issues
Revision ID: 57f1d2b5c2b1
Revises: 3421025b5e5e
Create Date: 2015-08-12 18:02:34.537329
"""
# revision identifiers, used by Alembic.
revision = '57f1d2b5c2b1'
down_revision = '3421025b5e5e'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('analysis_nature_issues',
sa.Column('analysis_nature_id', sa.Integer(), nullable=True),
sa.Column('issue_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['analysis_nature_id'], ['analysis_natures.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['issue_id'], ['issues.id'], ondelete='CASCADE')
)
### end Alembic commands ###
# issues with an analysis nature
op.execute("INSERT INTO analysis_nature_issues (analysis_nature_id, issue_id)" +
" SELECT analysis_nature_id, id from issues where analysis_nature_id is not null")
# issues for all analysis natures
from dexter.models import AnalysisNature
for nature in AnalysisNature.all():
op.execute("INSERT INTO analysis_nature_issues (analysis_nature_id, issue_id)" +
" SELECT %d, id from issues where analysis_nature_id is null" % nature.id)
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('analysis_nature_issues')
### end Alembic commands ###
| {
"repo_name": "Code4SA/mma-dexter",
"path": "migrations/versions/57f1d2b5c2b1_analysis_nature_issues.py",
"copies": "1",
"size": "1418",
"license": "apache-2.0",
"hash": 2588920971233580500,
"line_mean": 33.5853658537,
"line_max": 97,
"alpha_frac": 0.6868829337,
"autogenerated": false,
"ratio": 3.466992665036675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9618845371560236,
"avg_score": 0.007006045435287703,
"num_lines": 41
} |
"""analysis nature topics
Revision ID: 44ec193d1661
Revises: 57f1d2b5c2b1
Create Date: 2015-08-13 13:07:26.025968
"""
# revision identifiers, used by Alembic.
revision = '44ec193d1661'
down_revision = '57f1d2b5c2b1'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('analysis_nature_topics',
sa.Column('analysis_nature_id', sa.Integer(), nullable=True),
sa.Column('topic_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['analysis_nature_id'], ['analysis_natures.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['topic_id'], ['topics.id'], ondelete='CASCADE')
)
### end Alembic commands ###
# topics with an analysis nature
op.execute("INSERT INTO analysis_nature_topics (analysis_nature_id, topic_id)" +
" SELECT analysis_nature_id, id from topics where analysis_nature_id is not null")
# topics for all analysis natures
from dexter.models import AnalysisNature
for nature in AnalysisNature.all():
op.execute("INSERT INTO analysis_nature_topics (analysis_nature_id, topic_id)" +
" SELECT %d, id from topics where analysis_nature_id is null" % nature.id)
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('analysis_nature_topics')
### end Alembic commands ###
| {
"repo_name": "Code4SA/mma-dexter",
"path": "migrations/versions/44ec193d1661_analysis_nature_topics.py",
"copies": "1",
"size": "1455",
"license": "apache-2.0",
"hash": -8611658522346166000,
"line_mean": 34.487804878,
"line_max": 97,
"alpha_frac": 0.6920962199,
"autogenerated": false,
"ratio": 3.5144927536231885,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47065889735231886,
"avg_score": null,
"num_lines": null
} |
"""Analysis of an fMRI dataset with a Finite Impule Response (FIR) model
=====================================================================
FIR models are used to estimate the hemodyamic response non-parametrically.
The example below shows that they're good to do statistical inference
even on fast event-related fMRI datasets.
Here, we demonstrate the use of a FIR model with 3 lags, computing 4 contrasts from a single subject dataset from the "Neurospin Localizer". It is a fast event related design: During 5 minutes, 80 events of the following types are presented : ['calculaudio', 'calculvideo'', 'clicDvideo', 'clicGaudio', 'clicGvideo', 'damier_H', 'damier_V', 'phraseaudio', 'phrasevideo']
"""
#########################################################################
# At first, we grab the localizer data.
import pandas as pd
from nistats import datasets
data = datasets.fetch_localizer_first_level()
fmri_img = data.epi_img
t_r = 2.4
events_file = data['events']
events = pd.read_table(events_file)
#########################################################################
# Next solution is to try Finite Impulse Reponse (FIR) models: we just
# say that the hrf is an arbitrary function that lags behind the
# stimulus onset. In the present case, given that the numbers of
# conditions is high, we should use a simple FIR model.
#
# Concretely, we set `hrf_model` to 'fir' and `fir_delays` to [1, 2,
# 3] (scans) corresponding to a 3-step functions on the [1 * t_r, 4 *
# t_r] seconds interval.
#
from nistats.first_level_model import FirstLevelModel
from nistats.reporting import plot_design_matrix, plot_contrast_matrix
first_level_model = FirstLevelModel(t_r, hrf_model='fir', fir_delays=[1, 2, 3])
first_level_model = first_level_model.fit(fmri_img, events=events)
design_matrix = first_level_model.design_matrices_[0]
plot_design_matrix(design_matrix)
#########################################################################
# We have to adapt contrast specification. We characterize the BOLD
# response by the sum across the three time lags. It's a bit hairy,
# sorry, but this is the price to pay for flexibility...
import numpy as np
contrast_matrix = np.eye(design_matrix.shape[1])
contrasts = dict([(column, contrast_matrix[i])
for i, column in enumerate(design_matrix.columns)])
conditions = events.trial_type.unique()
for condition in conditions:
contrasts[condition] = np.sum(
[contrasts[name] for name in design_matrix.columns
if name[:len(condition)] == condition], 0)
contrasts['audio'] = np.sum([contrasts[name] for name in
['audio_right_hand_button_press',
'audio_left_hand_button_press',
'audio_computation',
'sentence_listening']], 0)
contrasts['video'] = np.sum(
[contrasts[name] for name in
['visual_right_hand_button_press',
'visual_left_hand_button_press',
'visual_computation',
'sentence_reading']], 0)
contrasts['computation'] = contrasts['audio_computation'] +\
contrasts['visual_computation']
contrasts['sentences'] = contrasts['sentence_listening'] +\
contrasts['sentence_reading']
contrasts = {
'left-right': (
contrasts['visual_left_hand_button_press'] +
contrasts['audio_left_hand_button_press'] -
contrasts['visual_right_hand_button_press'] -
contrasts['audio_right_hand_button_press']),
'H-V': (contrasts['horizontal_checkerboard'] -
contrasts['vertical_checkerboard']),
'audio-video': contrasts['audio'] - contrasts['video'],
'sentences-computation': (contrasts['sentences']-contrasts['computation'])
}
#########################################################################
# Take a look at the contrasts.
plot_contrast_matrix(contrasts['left-right'], design_matrix)
#########################################################################
# Take a breath.
#
# We can now proceed by estimating the contrasts and displaying them.
import matplotlib.pyplot as plt
from nilearn.plotting import plot_stat_map
fig = plt.figure(figsize=(11, 3))
for index, (contrast_id, contrast_val) in enumerate(contrasts.items()):
ax = plt.subplot(1, len(contrasts), 1 + index)
z_map = first_level_model.compute_contrast(
contrast_val, output_type='z_score')
plot_stat_map(
z_map, display_mode='z', threshold=3.0, title=contrast_id, axes=ax,
cut_coords=1)
plt.show()
#########################################################################
# The result is acceptable. Note that we're asking a lot of questions
# to a small dataset, yet with a relatively large number of experimental
# conditions.
#
| {
"repo_name": "nistats/nistats",
"path": "examples/02_first_level_models/plot_fir_model.py",
"copies": "1",
"size": "4795",
"license": "bsd-3-clause",
"hash": 8461724966383554000,
"line_mean": 40.6956521739,
"line_max": 370,
"alpha_frac": 0.6129301356,
"autogenerated": false,
"ratio": 3.8146380270485283,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4927568162648528,
"avg_score": null,
"num_lines": null
} |
"""Analysis of calcium imaging time series data"""
import os
import warnings
import numpy as np
import scipy
from scipy.fftpack import fft, fftfreq
from scipy import corrcoef
from scipy.cluster import hierarchy
from scipy.stats import mode, chisquare, zscore
from scipy.spatial.distance import squareform
try:
from bottleneck import nanmean, nanstd
except ImportError:
warnings.warn("Install bottleneck to speed up some numpy functions")
from numpy import nanmean, nanstd
from matplotlib.mlab import demean
import pandas as pd
import seaborn.apionly as sns
import itertools as it
import cPickle as pickle
import shapely
try:
from bottleneck import nanmean
except ImportError:
warnings.warn("Install bottleneck to speed up some numpy functions")
from numpy import nanmean
import sima
import matplotlib.pyplot as plt
import behavior_analysis as ba
import filters
# from ..analysis import boutonAnalysis as bouton
import calc_activity as ca
from ..classes import exceptions as exc
# from ..classes.experiment import SalienceExperiment, FearConditioningExperiment
from ..misc.misc import timestamp
from ..misc import stats, memoize
from ..plotting import plotting as plotting
import filters
"""
Free functions
"""
def _responseIntegrals(timeSeries, stimIdx, maxIdx, dt):
baseline = timeSeries[:, :stimIdx, :].mean(axis=2).mean(axis=1)
for i in range(timeSeries.shape[0]):
timeSeries[i, :, :] -= baseline[i]
timeSeries[i, :, :] /= baseline[i]
return timeSeries[:, stimIdx:maxIdx, :].sum(axis=1) * dt
def _responseZScores(timeSeries, stimIdx, maxIdx, dt):
respIntegrals = _responseIntegrals(timeSeries, stimIdx, maxIdx, dt)
return respIntegrals.mean(axis=1) / np.sqrt(
np.var(respIntegrals, axis=1)) * np.sqrt(respIntegrals.shape[1])
"""
Helper functions
"""
def offsetPCA(data):
"""Perform PCA, but rather than diagonalizing the cross-correlation matrix at zero
delay, diagonalize the symmetrized cross-correlation matrix at one timestep delays"""
meanSubtractedData = data - np.dot(
np.ones([data.shape[0], 1]), np.mean(data, axis=0).reshape([1, -1]))
corrOffset = np.dot(meanSubtractedData[0:-1, :].T, meanSubtractedData[1:, :])
corrOffset = 0.5 * (corrOffset + corrOffset.T)
eiVals, eiVects = np.linalg.eigh(corrOffset)
# sort the eigenvectors and eigenvalues
idx = np.argsort(-eiVals)
eiVals = eiVals[idx]
eiVects = eiVects[:, idx]
return eiVals, eiVects
def offsetCorrelation(data, pcaClean=True):
if pcaClean:
data = pcaCleanup(data, 1.)
data = demean(data, axis=1)
for i in range(data.shape[0]):
data[i, :] /= np.sqrt((data[i, 1:] * data[i, :-1]).mean())
data[np.logical_not(np.isfinite(data))] = np.nan
corrMatrix = np.dot(data[:, 1:], data[:, :-1].T) / (data.shape[1] - 1)
return 0.5 * (corrMatrix + corrMatrix.T)
def pcaCleanupMatrix(data, retainedVariance=1.):
variances, pcs = offsetPCA(data.T)
variances = variances / sum(variances)
coefficientTimeSeries = np.dot(pcs.T, data)
numPCs = 0
capturedVariance = 0.
while numPCs < len(variances) and capturedVariance < retainedVariance:
capturedVariance += variances[numPCs]
numPCs += 1
for i in range(numPCs, len(pcs)):
pcs[:, i] = 0.
cleanup = np.dot(pcs, pcs.T)
return cleanup
def pcaCleanup(data, retainedVariance=.99):
variances, pcs = offsetPCA(data.T)
variances = variances / sum(variances)
coefficientTimeSeries = np.dot(pcs.T, data)
numPCs = 0
capturedVariance = 0.
while numPCs < len(variances) and capturedVariance < retainedVariance:
capturedVariance += variances[numPCs]
numPCs += 1
cleanData = np.dot(pcs[:, :numPCs], coefficientTimeSeries[:numPCs, :])
return cleanData
def principalAngles(A, B):
_, s, _ = scipy.linalg.svd(np.dot(A.T, B))
return np.arccos(s)
"""
Experiment analysis
"""
def isActive(expt, conf_level=95, channel='Ch2', label=None,
demixed=False, roi_filter=None):
# return a boolean np array #rois x #frames corresponding to whether
# the cell is in a significant transient
activity = np.zeros(expt.imaging_shape(channel=channel, label=label,
roi_filter=roi_filter),
dtype='bool')
transients = expt.transientsData(
threshold=conf_level, channel=channel, demixed=demixed,
label=label, roi_filter=roi_filter)
if activity.ndim == 2:
activity = activity[::, np.newaxis]
for cell_index, cell in enumerate(transients):
for cycle_index, cycle in enumerate(cell):
starts = cycle['start_indices']
ends = cycle['end_indices']
for start, end in zip(starts, ends):
if np.isnan(start):
start = 0
if np.isnan(end):
end = activity.shape[1] - 1
activity[cell_index, start:end + 1, cycle_index] = True
return activity
def oPCA(expt, channel='Ch2', num_pcs=75):
"""Perform offset principal component analysis on the dataset
If the number of requests PCs have already been calculated and saved,
just returns those. Otherwise re-run oPCA and save desired number of
PCs
Returns
-------
oPC_vars : array
The offset variance accounted for by each oPC. Shape: num_pcs.
oPCs : array
The spatial representations of the oPCs.
Shape: (num_rows, num_columns, num_pcs).
oPC_signals : array
The temporal representations of the oPCs.
Shape: (num_times, num_pcs).
"""
dataset = expt.imaging_dataset()
channel_idx = dataset._resolve_channel(channel)
path = os.path.join(
dataset.savedir, 'opca_' + str(channel_idx) + '.npz')
return sima.segment.oPCA.dataset_opca(
dataset, ch=channel_idx, num_pcs=num_pcs, path=path)
def powerSpectra(
expt, dFOverF='None', demixed=False, linearTransform=None,
window_width=100, dFOverF_percentile=0.25, removeNanBoutons=False,
channel='Ch2', label=None, roi_filter=None):
"""Give the power spectrum of each signal and the corresponding frequencies.
See imagingData for parameters.
"""
imData = expt.imagingData(
dFOverF=dFOverF, demixed=demixed,
linearTransform=linearTransform, window_width=window_width,
dFOverF_percentile=dFOverF_percentile,
removeNanBoutons=removeNanBoutons, channel=channel, label=label,
roi_filter=roi_filter)
ft = fft(imData, axis=1)
power = np.real(2 * ft * ft.conjugate())
# average across trials
spectra = power[:, :(ft.shape[1] / 2), :].mean(axis=2)
frequencies = fftfreq(imData[0, :, 0].size,
expt.frame_period())[:(ft.shape[1] / 2)]
return spectra, frequencies
def _psth(expt, stimulus, ax=None, pre_frames=10, post_frames=20,
exclude=None, data=None, transients_conf_level=99,
dFOverF='from_file', smoothing=None, window_length=5,
plot_mean=True, shade_ste=None, channel='Ch2', label=None,
roi_filter=None, return_full=False, return_starts=False, shuffle=False,
plot='heatmap', deduplicate=False, duplicate_window=None,
z_score=False):
"""Calculates a psth based on trigger times
Keyword arguments:
stimulus -- either a behaviorData key or frames to trigger the PSTH,
nTrials length list of trigger times for each trial
pre_frames, post_frames -- frames preceding and following the stim
frame to include
exclude -- 'running' or nTrials x nFrames boolean array used to mask
data
data -- None, 'trans', imaging data
smoothing -- window function to use, should be 'flat' for a moving
average or np.'smoothing' (hamming, hanning, bartlett, etc.)
window_length -- length of smoothing window function, should be odd
shuffle -- if True, shuffle stim start times within each trial
Returns:
nRois x (pre_frames + 1 + post_frames) array of the average response
to the stimulus
Note:
If stimulus is empty, returns a NaN array of the same size.
"""
# see if stimulus is a behaviorData key
if((not isinstance(stimulus, list)) or (not isinstance(stimulus, np.ndarray))):
if isinstance(stimulus, basestring):
stimulus = ba.stimStarts(expt, stimulus,
deduplicate=deduplicate,
duplicate_window=duplicate_window)
elif isinstance(stimulus, int):
frame = expt.imagingIndex(stimulus)
stimulus = []
stimulus.append(np.array([frame]))
if pre_frames is None:
frame_period = expt.frame_period()
pre_frames = int(expt.stimulusTime() / frame_period)
if post_frames is None:
frame_period = expt.frame_period()
n_frames = expt.imaging_shape()[1]
post_frames = n_frames - int(expt.stimulusTime() / frame_period) \
- 1
if data is None:
imData = expt.imagingData(dFOverF=dFOverF, channel=channel,
label=label, roi_filter=roi_filter)
elif data == 'trans':
imData = isActive(
expt, conf_level=transients_conf_level, channel=channel,
label=label, roi_filter=roi_filter)
elif data == 'raw':
imData = expt.imagingData(channel=channel, label=label,
roi_filter=roi_filter)
else:
imData = data
imData = np.rollaxis(imData, 2, 0)
(nTrials, nROIs, nFrames) = imData.shape
if z_score:
imData = zscore(imData, axis=2)
nTriggers = int(np.sum([len(trial_stims) for trial_stims in stimulus]))
if shuffle:
stimulus = [np.random.randint(
0, nFrames, len(trial_stims)) for trial_stims in stimulus]
if exclude == 'running':
exclude = expt.runningIntervals(
imageSync=True, returnBoolList=True, end_padding=2.0)
exclude = np.rollaxis(np.tile(exclude, (nROIs, 1, 1)), 1, 0)
if exclude is not None:
if exclude.shape[2] < nFrames:
exclude = np.dstack(
(exclude, np.zeros(
(nTrials, nROIs, nFrames - exclude.shape[2]),
dtype='bool')))
elif exclude.shape[2] > nFrames:
exclude = exclude[:, :, :nFrames]
imData[exclude] = np.nan
psth_data = np.empty([nROIs, pre_frames + post_frames + 1, nTriggers])
psth_data.fill(np.nan)
trig_idx = 0
for triggers, data in it.izip(stimulus, imData):
for trig in triggers:
# Check for running off the ends
if trig - pre_frames >= 0:
window_start = trig - pre_frames
data_start = 0
else:
window_start = 0
data_start = pre_frames - trig
if trig + post_frames < data.shape[1]:
window_end = trig + post_frames + 1
data_end = psth_data.shape[1]
else:
window_end = data.shape[1]
data_end = data.shape[1] - trig - post_frames - 1
psth_data[:, data_start:data_end, trig_idx] = \
data[:, window_start:window_end]
trig_idx += 1
if return_full:
if(return_starts):
return psth_data, stimulus
return psth_data
# Taking the nanmean over an all NaN axis raises a warning, but it
# will still return NaN there which is the intended behavior
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
result = nanmean(psth_data, axis=2)
if smoothing is not None:
if smoothing == 'flat': # moving average
w = np.ones(window_length, 'd')
else:
# If 'smoothing' is not a valid method this will throw an AttributeError
w = eval('np.' + smoothing + '(window_length)')
for idx, row in enumerate(result):
s = np.r_[row[window_length - 1:0:-1], row, row[-1:-window_length:-1]]
tmp = np.convolve(w / w.sum(), s, mode='valid')
# Trim away extra frames
result[idx] = tmp[window_length / 2 - 1:-window_length / 2]
if ax:
framePeriod = expt.frame_period()
xAxis = np.linspace(
-pre_frames, post_frames, result.shape[1]) * framePeriod
if shade_ste:
if shade_ste == 'sem':
ste_psth = np.std(result, axis=0) / np.sqrt(nROIs)
elif shade_ste == 'std':
ste_psth = np.std(result, axis=0)
else:
raise ValueError(
'Unrecognized error shading argument: {}'.format(
shade_ste))
mean_psth = np.mean(result, axis=0)
if plot == 'heatmap':
ax.imshow(mean_psth, interpolation='none', aspect='auto')
elif plot == 'line':
ax.plot(xAxis, mean_psth, color='b', lw=1)
ax.fill_between(
xAxis, mean_psth - ste_psth, mean_psth + ste_psth,
facecolor='r', alpha=0.4)
else:
for roi in result:
ax.plot(xAxis, roi)
if plot_mean:
mean_psth = np.mean(result, axis=0)
ax.plot(xAxis, mean_psth, color='k', lw=2)
ax.axvline(0, linestyle='dashed', color='k')
ax.set_xlim((-pre_frames * framePeriod, post_frames * framePeriod))
ax.set_xlabel('Time (s)')
ax.set_ylabel(r'Mean $\Delta$F/F')
return result
"""
ExperimentGroup analysis functions
"""
@memoize
def population_activity_new(
expt_grp, stat, channel='Ch2', label=None, roi_filter=None,
interval=None, **imaging_kwargs):
"""Calculates various activity metrics on each cell.
Parameters
----------
stat : str
Metric to calculate. See lab.analysis.calc_activity for details.
interval : dict of lab.classes.interval.Interval
Dictionary by experiment of Interval objects corresponding to frames
to include.
**imaging_kwargs
Additional arguments are passed to expt.imagingData.
Returns
-------
pd.DataFrame
Returns DataFrame with 'trial', 'roi', and 'value' as columns.
"""
activity = []
for expt in expt_grp:
if interval is None:
expt_interval = None
else:
expt_interval = interval[expt]
if label is None:
try:
label = expt_grp.args['imaging_label']
except (KeyError, AttributeError):
pass
expt_activity = ca.calc_activity(
expt, channel=channel, label=label, roi_filter=roi_filter,
method=stat, interval=expt_interval, **imaging_kwargs)
expt_rois = expt.rois(
channel=channel, label=label, roi_filter=roi_filter)
assert expt_activity.shape[0] == len(expt_rois)
assert expt_activity.shape[1] == len(expt.findall('trial'))
for trial_idx, trial in enumerate(expt.findall('trial')):
for roi_activity, roi in it.izip(expt_activity, expt_rois):
activity.append(
{'trial': trial, 'roi': roi,
'value': roi_activity[trial_idx]})
return pd.DataFrame(activity, columns=['trial', 'roi', 'value'])
def population_activity(
exptGrp, stat, channel='Ch2', label=None, roi_filter=None,
interval=None, dF='from_file', running_only=False,
non_running_only=False, running_kwargs=None):
"""Calculates the activity of a population of cells with various methods
Keyword arguments:
exptGrp -- pcExperimentGroup to analyze
stat -- activity statistic to calculate, see calc_activity for details
interval -- [(start1, stop1), ...] times to include, or boolean mask
running_only/non_running_only -- If True, only include running/non-running
intervals
dF -- dF method to use on imaging data
running_kwargs -- Optional, arguments for running intervals
"""
if running_kwargs is None:
running_kwargs = {}
if label is None:
try:
label = expt_grp.args['imaging_label']
except (KeyError, AttributeError):
pass
activity = []
for expt in exptGrp:
(nROIs, nFrames, nTrials) = expt.imaging_shape(
channel=channel, label=label, roi_filter=roi_filter)
if interval is not None:
if interval.shape == (nROIs, nFrames, nTrials):
# Hackily allow passing in pre-calced intervals
# No reason nROIs will be the same for all expts, so need a
# better solution here
expt_interval = interval
else:
frame_period = expt.frame_period()
expt_interval = np.zeros(
(nROIs, nFrames, nTrials), dtype='bool')
for inter in interval:
expt_interval[:,
int(inter[0] / frame_period):
int(inter[1] / frame_period) + 1, :] = True
else:
expt_interval = None
if running_only:
running_intervals = np.array(expt.runningIntervals(
returnBoolList=True, imageSync=True, **running_kwargs))
running_intervals = np.tile(running_intervals.T, (nROIs, 1, 1))
assert running_intervals.shape == (nROIs, nFrames, nTrials)
if expt_interval is None:
expt_interval = running_intervals
else:
expt_interval = np.logical_and(
expt_interval, running_intervals)
elif non_running_only:
non_running_intervals = ~np.array(expt.runningIntervals(
returnBoolList=True, imageSync=True, **running_kwargs))
non_running_intervals = np.tile(
non_running_intervals.T, (nROIs, 1, 1))
assert non_running_intervals.shape == (nROIs, nFrames, nTrials)
if expt_interval is None:
expt_interval = non_running_intervals
else:
expt_interval = np.logical_and(
expt_interval, non_running_intervals)
expt_activity = ca.calc_activity(
expt, channel=channel, label=label, roi_filter=roi_filter,
method=stat, interval=expt_interval, dF=dF)
expt_rois = expt.rois(
channel=channel, label=label, roi_filter=roi_filter)
assert expt_activity.shape[0] == len(expt_rois)
assert expt_activity.shape[1] == len(expt.findall('trial'))
for trial_idx, trial in enumerate(expt.findall('trial')):
for roi_activity, roi in it.izip(expt_activity, expt_rois):
activity.append(
{'trial': trial, 'roi': roi,
'value': roi_activity[trial_idx]})
return pd.DataFrame(activity, columns=['trial', 'roi', 'value'])
def trace_sigma(
expt_grp, channel='Ch2', label=None, roi_filter=None, **trans_kwargs):
"""Calculates the standard deviation of the calcium trace for each ROI.
Parameters
----------
**trans_kwargs
All additional keyword arguments are passed to expt.transientsData
Returns
-------
DataFrame
Returns a DataFrame with trial, roi and value as columns
"""
rois = expt_grp.rois(channel=channel, label=label, roi_filter=roi_filter)
data_list = []
for expt in expt_grp:
if not expt.hasTransientsFile(channel=channel):
continue
sigmas = expt.transientsData(
channel=channel, label=label, roi_filter=roi_filter,
**trans_kwargs)['sigma']
trials = expt.findall('trial')
assert len(sigmas) == len(rois[expt])
for roi, sigma in zip(rois[expt], sigmas):
assert len(sigma) == len(trials)
for trial, s in zip(trials, sigma):
data_list.append(
{'trial': trial, 'roi': roi, 'value': float(s)})
return pd.DataFrame(data_list, columns=['trial', 'roi', 'value'])
@memoize
def mean_fluorescence(
expt_grp, channel='Ch2', label=None, roi_filter=None):
"""Calculates the mane raw fluorescence (not dF/F) for each ROI.
Parameters
----------
Returns
-------
DataFrame
Returns a DataFrame with expt, roi, and value as columns.
"""
rois = expt_grp.rois(channel=channel, label=label, roi_filter=roi_filter)
data_list = []
for expt in expt_grp:
dset = expt.imaging_dataset()
ch_idx = sima.misc.resolve_channels(channel, dset.channel_names)
time_average = dset.time_averages[..., ch_idx]
for roi in rois[expt]:
mask = roi.mask
assert len(mask) == time_average.shape[0]
vals = []
for plane, plane_mask in zip(time_average, mask):
assert plane_mask.shape == plane.shape
vals.append(plane[np.array(plane_mask.todense())])
roi_mean = np.concatenate(vals).mean()
data_list.append({'expt': expt, 'roi': roi, 'value': roi_mean})
return pd.DataFrame(data_list, columns=['expt', 'roi', 'value'])
def running_modulation(
exptGrp, roi_filter, stat, population_activity_kwargs=None,
mode='difference'):
"""Calculates for each ROI a population metric in running and non-running
conditions. Returns either the difference or ratio of the metric between
conditions, set by mode='difference' or mode='ratio'
Returns a pandas df
"""
if not population_activity_kwargs:
population_activity_kwargs = {}
running_df = population_activity(
exptGrp, stat, roi_filter=roi_filter, running_only=True,
**population_activity_kwargs)
nonrunning_df = population_activity(
exptGrp, stat, roi_filter=roi_filter, non_running_only=True,
**population_activity_kwargs)
df = pd.merge(
running_df, nonrunning_df, how='inner', on='roi',
suffixes=['_running', '_nonrunning'])
if mode == 'difference':
df['value'] = df['value_running'] - df['value_nonrunning']
if mode == 'ratio':
df['value'] = df['value_running'] / df['value_nonrunning']
df = df.dropna()
df['trial'] = df['trial_running']
df.drop(['value_running', 'value_nonrunning', 'trial_running',
'trial_nonrunning'], axis=1, inplace=True)
return df
def baselineActivityCorrelations(
exptGrp, ax=None, cbarAx=None, includeStimResponse=False,
offset=False, cluster=None, reordering=None, colorcode=None,
channel='Ch2', label=None, roi_filter=None):
"""
Return the correlation matrix for the activity across ROIs before the
stimuli
"""
ROIs = exptGrp.sharedROIs(
channel=channel, label=label, roi_filter=roi_filter)
shared_filter = lambda x: x.id in ROIs
corrs = np.zeros([len(ROIs), len(ROIs)])
totalFrames = 0.
for expt in exptGrp:
imData = expt.imagingData(channel=channel,
label=label, roi_filter=shared_filter)
if not includeStimResponse and isinstance(expt, SalienceExperiment):
stimIdx = expt.imagingIndex(expt.stimulusTime())
imData = imData[:, 0:(stimIdx - 1), :]
imData = imData.reshape([imData.shape[0], -1], order='F')
# collapse trial dimension of imaging data and take the correlation
# matrix
if offset:
tmpCorr = offsetCorrelation(imData, pcaClean=True)
# corrs = np.minimum(corrs, 1.)
else:
tmpCorr = corrcoef(imData)
corrs += tmpCorr * imData.shape[1]
totalFrames += imData.shape[1]
corrs /= totalFrames
if ax is not None:
if cluster == 'complete':
distances = 1 - corrs
condensedDistances = squareform(distances, checks=False)
linkageMatrix = hierarchy.complete(condensedDistances)
elif cluster is not None:
linkageMatrix = hierarchy.ward(corrs)
if cluster is not None:
ordering = hierarchy.leaves_list(linkageMatrix)
if reordering is not None:
ordering = ordering[reordering]
corrs = corrs[ordering, :]
corrs = corrs[:, ordering]
# plt.figure(); hierarchy.dendrogram(linkageMatrix, labels=ROIs)
ROIs = [ROIs[i] for i in ordering]
im = ax.imshow(corrs, interpolation='nearest', aspect=1.0,
vmin=-1.0, vmax=1.0, cmap='bwr')
if cbarAx:
cbar = cbarAx.figure.colorbar(im, ax=cbarAx, ticks=[-1, 1],
fraction=0.05)
cbar.set_label('correlation', labelpad=-5)
ax.set_xticks([])
ax.tick_params(axis='y', color='white', labelcolor='k')
try:
roiGroups, roiGroupNames = bouton.BoutonSet(ROIs).boutonGroups()
except:
ax.set_yticks(range(len(ROIs)))
ax.set_yticklabels(ROIs)
else:
if colorcode == "postSynaptic":
for k, group in enumerate(roiGroups):
for roi in group:
# if roiGroupNames[k] != 'other':
ax.add_patch(plt.Rectangle(
(-2, ROIs.index(roi.id) - 0.5), 1, 1,
color=bouton.groupPointStyle(roiGroupNames[k])[0],
lw=0))
ax.set_xlim(left=-2)
elif colorcode == "axon":
colors = ['b', 'r', 'c', 'm', 'y', 'g', 'b', 'r']
for k, group in enumerate(bouton.BoutonSet(ROIs).axonGroups()):
for roi in group:
ax.add_patch(plt.Rectangle(
(-2, ROIs.index(roi.id) - 0.5), 1, 1,
color=colors[k], lw=0))
ax.set_xlim(left=-2)
ax.set_xlim(right=ax.get_xlim()[1])
ax.set_axis_off()
return corrs, ROIs
def autocorrelation(exptGrp, channel='Ch2', label=None, roi_filter=None):
ROIs = exptGrp.sharedROIs(
channel=channel, label=label, roi_filter=roi_filter)
shared_filter = lambda x: x.id in ROIs
ac = []
for roiIdx in xrange(len(ROIs)):
ac.append(np.zeros([]))
for expt in exptGrp:
imData = expt.imagingData(dFOverF='mean',
channel=channel, label=label,
roi_filter=shared_filter)
for roiIdx in xrange(len(ROIs)):
for i in xrange(imData.shape[2]):
try:
ac[roiIdx] = ac[roiIdx] + np.correlate(
imData[roiIdx, :, i] - imData[roiIdx, :, i].mean(),
imData[roiIdx, :, i] - imData[roiIdx, :, i].mean(),
'full')
except:
tmpCorr = np.correlate(
imData[roiIdx, :, i] - imData[roiIdx, :, i].mean(),
imData[roiIdx, :, i] - imData[roiIdx, :, i].mean(),
'full')
ac[roiIdx] = ac[roiIdx][:tmpCorr.size] + \
tmpCorr[:ac[roiIdx].size]
for roiIdx in range(len(ROIs)):
ac[roiIdx] = ac[roiIdx][ac[roiIdx].size / 2:]
ac[roiIdx] = ac[roiIdx] / ac[roiIdx][0]
return np.array(ac, dtype=float)
def trialAverages(exptGrp, stimulus, duration=None, excludeRunning=False,
power=None, removeNanBoutons=False, **imaging_kwargs):
timeSeries = []
for expt in exptGrp:
timeSeries.append(expt.imagingData(**imaging_kwargs))
if expt.get('experimentType') == 'contextualFearConditioning':
trialIndices = [0]
else:
trialIndices = expt.trialIndices(
stimulus, duration, power=power,
excludeRunning=excludeRunning)
timeSeries[-1] = timeSeries[-1][:, :, trialIndices]
timeSeries = np.concatenate(timeSeries, axis=2)
if removeNanBoutons:
timeSeries = timeSeries[np.nonzero(np.all(np.isfinite(
timeSeries.reshape([timeSeries.shape[0], -1], order='F')),
axis=1))[0], :, :]
return np.mean(timeSeries, axis=2)
def peakAverageResponses(exptGrp, stimulus, postStimDuration=1.5,
duration=None, excludeRunning=False, power=None,
removeNanBoutons=False, **kwargs):
t = trialAverages(exptGrp, stimulus, duration=duration,
excludeRunning=excludeRunning, power=power,
removeNanBoutons=removeNanBoutons, **kwargs)
if exptGrp[0].get('experimentType') == 'contextualFearConditioning' and \
stimulus == 'context':
stimIdx = exptGrp[0].imagingIndex(exptGrp.contextInterval()[0])
else:
# TODO: make robust to different stim times and imaging parameters
stimIdx = exptGrp[0].imagingIndex(exptGrp[0].stimulusTime())
maxIdx = stimIdx + exptGrp[0].imagingIndex(postStimDuration)
baseline = nanmean(t[:, :stimIdx], axis=1)
peaks = (np.nanmax(t[:, stimIdx:maxIdx], axis=1) - baseline) / baseline
return peaks
def averageResponseIntegrals(
exptGrp, stimulus, duration=None, ROIs=None, power=None,
excludeRunning=False, demixed=False, dFOverF=None,
postStimDuration=1.5, sharedBaseline=True, linearTransform=None,
channel='Ch2', label=None, roi_filter=None):
"""
Return the integral of the trial-averaged response of each roi to the
stimulus
"""
return responseIntegrals(
exptGrp, stimulus, postStimDuration=postStimDuration,
duration=duration, excludeRunning=excludeRunning, demixed=demixed,
dFOverF=dFOverF, sharedBaseline=sharedBaseline,
linearTransform=linearTransform, channe=channel, label=label,
roi_filter=roi_filter).mean(axis=1)
def responseIntegrals(exptGrp, stimuli, postStimDuration=1.5, duration=None,
power=None, excludeRunning=False, demixed=False,
dFOverF=None, sharedBaseline=False,
linearTransform=None, channel='Ch2', label=None,
roi_filter=None):
ROIs = exptGrp.sharedROIs(
channel=channel, label=label, roi_filter=roi_filter)
shared_filter = lambda x: x.id in ROIs
if not isinstance(stimuli, list):
stimuli = [stimuli]
integrals = []
for experiment in exptGrp:
if isinstance(experiment, SalienceExperiment) or isinstance(
experiment, FearConditioningExperiment):
for stimulus in stimuli:
if isinstance(experiment, FearConditioningExperiment) or \
stimulus in experiment.stimuli():
integrals.append(experiment.responseIntegrals(
stimulus, postStimDuration=postStimDuration,
linearTransform=linearTransform,
excludeRunning=excludeRunning, demixed=demixed,
duration=duration, power=power,
dFOverF=dFOverF,
sharedBaseline=sharedBaseline, channel=channel,
label=label, roi_filter=shared_filter))
if len(integrals):
return np.concatenate(integrals, axis=1)
else:
return np.zeros([len(ROIs), 0])
# TODO: Figure out what's up w/ this function, looks broken
def responseZScores(
exptGrp, stimuli, postStimDuration=1.5, duration=None, power=None,
excludeRunning=False, demixed=False, dFOverF=None,
sharedBaseline=False, linearTransform=None, channel='Ch2',
label=None, roi_filter=None):
if stimuli == 'running':
modulations = runningModulation(
exptGrp, channel=channel, label=label, roi_filter=roi_filter)
else:
if not isinstance(stimuli, list):
stimuli = [stimuli]
response_integrals = responseIntegrals(
exptGrp, stimuli, duration=duration, power=power,
excludeRunning=excludeRunning, demixed=demixed,
dFOverF=dFOverF, postStimDuration=postStimDuration,
sharedBaseline=sharedBaseline, linearTransform=linearTransform,
channel=channel, label=label, roi_filter=roi_filter)
return response_integrals.mean(axis=1) / np.sqrt(
np.var(response_integrals, axis=1)) * np.sqrt(
response_integrals.shape[1])
# This function also looks broken
def runningModulation(
exptGrp, voidRange=None, returnFull=False, minRunningFrames=3,
padding=0.5, pcaCleanup=False, linearTransform=None,
signal='imaging_data', LFP_freq_range=(4, 8), channel='Ch2',
label=None, roi_filter=None):
"""
Evaluate how signals are modulated by running.
Inputs:
signal: {'imaging_data', 'LFP'} -- determines whether to analyze imaging
data or LFP void range: pre-set range to exclude
Return:
"""
if signal == 'LFP':
modulations = []
runDurations = []
for experiment in exptGrp:
blankData = []
runData = []
experimentModulations = []
if experiment.get('experimentType') in ['salience', 'doubleStimulus', 'intensityRanges']:
stimTime = experiment.stimulusTime()
postStimTime = experiment.stimulusTime() + 5.
for trial in experiment.findall('trial'):
LFP_power, LFP_times = trial.freqBandPower(LFP_freq_range[0], LFP_freq_range[1])
dt = LFP_times[1] - LFP_times[0]
if voidRange is None:
if experiment.get('experimentType') in ['salience', 'doubleStimulus', 'intensityRanges']:
tmpVoidRanges = [[stimTime, postStimTime]]
elif experiment.get('experimentType') == 'contextualFearConditioning':
tmpVoidRanges = [[experiment.contextInterval()[0], np.Inf]]
else:
tmpVoidRanges = []
else:
tmpVoidRanges = [voidRange]
tmpVoidRanges = linePartition.LinePartition(tmpVoidRanges)
runIntervals = linePartition.LinePartition([list(x) for x in
ba.runningIntervals(trial, stationary_tolerance=2.2, imageSync=False, end_padding=padding)])
validRunIntervals = linePartition.intersection(runIntervals,
linePartition.complement(tmpVoidRanges))
nonRunningIntervals = linePartition.complement(linePartition.union(tmpVoidRanges, runIntervals))
for interval in nonRunningIntervals:
startIdx = 0 if interval[0] == -np.Inf else int(interval[0] / dt)
endIdx = len(LFP_times) if interval[1] == np.Inf else int(interval[1] / dt)
blankData.append(LFP_power[startIdx:endIdx])
for interval in validRunIntervals:
startIdx = 0 if interval[0] == -np.Inf else int(interval[0] / dt)
endIdx = len(LFP_times) if interval[1] == np.Inf else int(interval[1] / dt)
runData.append(LFP_power[startIdx:endIdx])
runDurations.append(interval[1] - interval[0])
baseline = np.concatenate(blankData).mean()
for runEpoch in runData:
runEpoch /= baseline
runEpoch -= 1
modulations.extend(runData)
if len(modulations):
modulations = np.concatenate(modulations, axis=1)
runDurations = np.array(runDurations)
if returnFull:
return modulations, runDurations
else:
raise Exception('Code incomplete')
meanModulations = np.zeros(len(ROIs))
for i, d in enumerate(runDurations):
meanModulations += modulations[:, i] * d
meanModulations /= runDurations.sum()
return meanModulations
if signal == 'imaging_data':
# find shared ROIs and pca cleanup matrix
ROIs = exptGrp.sharedROIs(
channel=channel, label=label, roi_filter=roi_filter)
shared_filter = lambda x: x.id in ROIs
if not len(ROIs):
raise exc.NoSharedROIs
if pcaCleanup:
assert linearTransform is None
linearTransform = exptGrp.pcaCleanupMatrix(ROIs)
modulations = []
runDurations = []
for experiment in exptGrp:
blankData = []
runData = []
if signal == 'imaging_data':
if not len(experiment.findall('trial')):
raise exc.MissingTrial
imData = experiment.imagingData(
dFOverF='mean', demixed=False,
linearTransform=linearTransform, channel=channel,
label=label, roi_filter=shared_filter)
imData += 1.
imageSync = True
elif signal == 'LFP':
imageSync = False
runInts = experiment.runningIntervals(stationary_tolerance=2.2, imageSync=imageSync)
if experiment.get('experimentType') in ['salience', 'doubleStimulus', 'intensityRanges']:
stimIdx = experiment.imagingIndex(experiment.stimulusTime())
maxIdx = experiment.imagingIndex(experiment.stimulusTime() + 5.)
padIndices = experiment.imagingIndex(padding)
for cycleIdx in range(min(imData.shape[2], len(experiment.findall('trial')))):
if voidRange is None:
if experiment.get('experimentType') in ['salience', 'doubleStimulus', 'intensityRanges']:
if experiment.findall('trial')[cycleIdx].get('stimulus') == 'light':
tmpVoidRange = []
else:
tmpVoidRange = range(stimIdx, maxIdx)
elif experiment.get('experimentType') == 'contextualFearConditioning':
onsetIdx = experiment.imagingIndex(experiment.contextInterval()[0])
tmpVoidRange = range(onsetIdx, imData.shape[1])
else:
tmpVoidRange = []
else:
tmpVoidRange = voidRange
runningIndices = []
for interval in runInts[cycleIdx]:
if interval[0] not in tmpVoidRange:
runningIndices.extend(
[x for x in range(interval[0] - padIndices,
interval[1] + padIndices + 1)
if (x not in tmpVoidRange)
and x < imData.shape[1] and x >= 0])
nonRunningIndices = [x for x in range(imData.shape[1])
if x not in runningIndices
and x not in tmpVoidRange]
blankData.append(imData[:, nonRunningIndices, cycleIdx])
if len(runningIndices) >= minRunningFrames:
assert exptGrp[0].parent.get('mouseID') != 'al1'
runData.append(imData[:, runningIndices, cycleIdx].mean(axis=1).reshape([-1, 1]))
runDurations.append(len(runningIndices))
baseline = np.concatenate(blankData, axis=1).mean(axis=1).reshape([-1, 1])
for runEpoch in runData:
runEpoch /= baseline
runEpoch -= 1
modulations.extend(runData)
if len(modulations):
modulations = np.concatenate(modulations, axis=1)
runDurations = np.array(runDurations)
if returnFull:
return modulations, runDurations
else:
meanModulations = np.zeros(len(ROIs))
for i, d in enumerate(runDurations):
meanModulations += modulations[:, i] * d
meanModulations /= runDurations.sum()
return meanModulations
def lickingModulation(exptGrp, voidRange=None, returnFull=False,
minLickingFrames=3, padding=0., pcaCleanup=False,
linearTransform=None, excludeRunning=True,
channel='Ch2', label=None, roi_filter=None):
"""For each ROI, return the (L-B)/B, where L is the activity during licking intervals,
and B is the baseline activity, i.e during non-licking intervals
"""
ROIs = exptGrp.sharedROIs(
channel=channel, label=label, roi_filter=roi_filter)
shared_filter = lambda x: x.id in ROIs
if not len(ROIs):
raise exc.NoSharedROIs
if pcaCleanup:
assert linearTransform is None
linearTransform = exptGrp.pcaCleanupMatrix(
channel=channel, label=label, roi_filter=shared_filter)
modulations = []
lickDurations = []
for experiment in exptGrp:
blankData = []
lickData = []
if not len(experiment.findall('trial')):
raise exc.MissingTrial
imData = experiment.imagingData(
dFOverF='mean', demixed=False,
linearTransform=linearTransform, channel=channel, label=label,
shared_filter=shared_filter)
imData += 1.
lickInts = experiment.lickingIntervals(imageSync=True, threshold=20 * experiment.frame_period())
if experiment.get('experimentType') in ['salience', 'doubleStimulus', 'intensityRanges']:
stimIdx = experiment.imagingIndex(experiment.stimulusTime())
maxIdx = experiment.imagingIndex(experiment.stimulusTime() + 5.)
padIndices = experiment.imagingIndex(padding)
for cycleIdx in range(min(imData.shape[2],
len(experiment.findall('trial')))):
# determine range of data to be ignored
if voidRange is None:
if experiment.get('experimentType') in [
'salience', 'doubleStimulus', 'intensityRanges']:
if experiment.findall('trial')[cycleIdx].get(
'stimulus') == 'light':
tmpVoidRange = []
else:
tmpVoidRange = range(stimIdx, maxIdx)
elif experiment.get('experimentType') \
== 'contextualFearConditioning':
onsetIdx = experiment.imagingIndex(experiment.contextInterval()[0])
tmpVoidRange = range(onsetIdx, imData.shape[1])
else:
tmpVoidRange = []
else:
tmpVoidRange = voidRange
lickingIndices = []
for interval in lickInts[cycleIdx]:
if interval[0] not in tmpVoidRange:
lickingIndices.extend([x for x in range(interval[0] - padIndices,
interval[1] + padIndices + 1)
if (x not in tmpVoidRange) and x < imData.shape[1] and x >= 0])
nonLickingIndices = [x for x in range(imData.shape[1]) if (x not in lickingIndices)
and x not in tmpVoidRange]
if excludeRunning:
runInts = experiment.runningIntervals(stationary_tolerance=2.2)[0]
nonLickingIndices = [i for i in nonLickingIndices
if not any([i <= M and i >= m for m, M in runInts])]
blankData.append(imData[:, nonLickingIndices, cycleIdx])
if len(lickingIndices) >= minLickingFrames:
lickData.append(imData[:, lickingIndices, cycleIdx].mean(axis=1).reshape([-1, 1]))
lickDurations.append(len(lickingIndices))
baseline = np.concatenate(blankData, axis=1).mean(axis=1).reshape([-1, 1])
for lickEpoch in lickData:
lickEpoch /= baseline
lickEpoch -= 1
modulations.extend(lickData)
if len(modulations):
modulations = np.concatenate(modulations, axis=1)
lickDurations = np.array(lickDurations)
if returnFull:
return modulations, lickDurations
else:
meanModulations = np.zeros(len(ROIs))
for i, d in enumerate(lickDurations):
meanModulations += modulations[:, i] * d
meanModulations /= lickDurations.sum()
return meanModulations
def runTriggeredTraces(exptGrp, linearTransform=None, prePad=15, postPad=35,
voidRange=None, postRunningTime=3., channel='Ch2',
label=None, roi_filter=None):
ROIs = exptGrp.sharedROIs(
channel=channel, label=label, roi_filter=roi_filter)
shared_filter = lambda x: x.id in ROIs
if not len(ROIs):
raise exc.NoSharedROIs
runningTriggeredData = []
for experiment in exptGrp:
postRunIndices = experiment.imagingIndex(postRunningTime)
imData = experiment.imagingData(
dFOverF='mean', demixed=False,
linearTransform=linearTransform, channel=channel, label=label,
roi_filter=shared_filter)
runInts = experiment.runningIntervals(stationary_tolerance=2.2)
if experiment.get('experimentType') in ['salience',
'doubleStimulus',
'intensityRanges']:
stimIdx = experiment.imagingIndex(experiment.stimulusTime())
maxIdx = experiment.imagingIndex(
experiment.stimulusTime() + 5.)
for cycleIdx in range(imData.shape[2]):
if voidRange is None:
if experiment.get('experimentType') in ['salience',
'doubleStimulus',
'intensityRanges']:
if experiment.findall('trial')[
cycleIdx].get('stimulus') == 'light':
tmpVoidRange = []
else:
tmpVoidRange = range(stimIdx, maxIdx)
elif experiment.get('experimentType') == \
'contextualFearConditioning':
onsetIdx = experiment.imagingIndex(
experiment.contextInterval()[0])
tmpVoidRange = range(onsetIdx, imData.shape[1])
else:
tmpVoidRange = []
else:
tmpVoidRange = voidRange
for interval in runInts[cycleIdx]:
if interval[0] not in tmpVoidRange and \
interval[0] < imData.shape[1]:
indices = np.arange(interval[0] - prePad,
min(interval[1] + postRunIndices,
imData.shape[1]))
m = min(prePad + postPad, len(indices))
indices = indices[:m]
assert interval[0] in list(indices)
if indices.size:
preClip = -min(indices.min(), 0)
runData = np.nan * np.ones([imData.shape[0],
prePad + postPad])
runData[:, preClip:len(indices)] = \
imData[:, indices[preClip:], cycleIdx]
for i in range(preClip, prePad):
if indices[i] in tmpVoidRange or any(
[indices[i] in I for I in runInts[cycleIdx]
if all(I != interval)]):
runData[:, :(i + 1)] = np.nan
for i in range(prePad, len(indices)):
if indices[i] in tmpVoidRange:
runData[:, i:] = np.nan
assert np.isfinite(runData[0, list(indices).index(
interval[0])])
runningTriggeredData.append(runData.reshape(
[runData.shape[0], runData.shape[1], 1]))
if runningTriggeredData:
return np.concatenate(runningTriggeredData, axis=2)
return np.nan * np.ones([len(ROIs), imData.shape[1], 1])
def getSalienceTraces(expGroup):
"""
method to get the salience data and return a pandas dataframe
Arguments:
expGroup -- a lab.ExperimentGroup instance that contains only salience
experiments
return a pandas dataframe instance that has the columns ROI, expt, trialNum,
stimulus, dFOF, time, and stimTime.
"""
data = dict()
data["ROI"] = []
data["expt"] = []
data["trialNum"] = []
data["stimulus"] = []
data["dFOF"] = []
data["time"] = []
data["stimTime"] = []
for exp in expGroup:
iData = np.transpose(exp.imagingData(dFOverF="from_file"), (2, 0, 1))
stimuli = map(lambda trial: trial.get("stimulus"), exp.findall("trial"))
mouse = exp.parent;
rois = map(lambda roi: (mouse, roi[1], roi[2]), exp.roi_tuples())
numROIs = exp.num_rois()
imagingTimes = [exp.imagingTimes() - exp.stimulusTime()] * numROIs
stimTime = [exp.stimulusTime()]*numROIs
for i in np.r_[0:len(stimuli)]:
data["ROI"].extend(rois)
data["expt"].extend([exp] * numROIs)
data["trialNum"].extend([i] * numROIs)
data["stimulus"].extend([stimuli[i]] * numROIs)
data["dFOF"].extend(list(iData[i, :, :]))
data["time"].extend(imagingTimes)
data["stimTime"].extend(stimTime)
return pd.DataFrame(data)
def PSTH(exptGrp, stimulus, ax=None, pre_time=5, post_time=5,
channel='Ch2', label=None, roi_filter=None, plot_mean=True,
shade_ste=None, return_full=False, return_df=False, exclude=None, data=None,
gray_traces=False, color=None, **kwargs):
"""PSTH method for combining data within an ExperimentGroup.
See expt.psth for details.
stimulus, exclude, and data can also be a dictionary with experiments
as keys that will be passed in to each call of Experiment.psth
Returns an array n_total_rois x n_frames as well as an n_total_rois
length list of (Mouse, location, roi_id) tuples that uniquely identify
each ROI and an x_range for each ROI.
Also doesn't assume the same sampling_rate
"""
if not isinstance(stimulus, dict):
stimulus = {expt.totuple(): stimulus for expt in exptGrp}
if not isinstance(exclude, dict):
exclude = {expt.totuple(): exclude for expt in exptGrp}
if not isinstance(data, dict):
data = {expt.totuple(): data for expt in exptGrp}
if pre_time is None:
pre_time = 0
for expt in exptGrp:
pre_time = np.amax([pre_time, expt.stimulusTime()])
if post_time is None:
post_time = 0
for expt in exptGrp:
post_time = np.amax(
[post_time, expt.imagingTimes()[-1] - expt.stimulusTime()])
psth_data = {}
stimStarts = {}
x_range = {}
df = dict()
if(return_df):
df["expt"] = []
df["stimulus"] = []
df["stimStart"] = []
df["ROI"] = []
df["activity"] = []
df["time"] = []
for expt in exptGrp:
frame_period = expt.frame_period()
pre_frames = int(pre_time / frame_period)
post_frames = int(post_time / frame_period)
psth_data[expt.totuple()], stimStarts[expt] = _psth(
expt, stimulus[expt.totuple()], pre_frames=pre_frames,
post_frames=post_frames, channel=channel, label=label,
roi_filter=roi_filter, return_full=True, return_starts=True,
exclude=exclude[expt.totuple()], data=data[expt.totuple()],
duplicate_window=pre_time+post_time, **kwargs)
x_range[expt.totuple()] = np.linspace(
-pre_frames, post_frames, psth_data[expt.totuple()].shape[1]) \
* frame_period
assert 0 in x_range[expt.totuple()]
all_rois = exptGrp.allROIs(
channel=channel, label=label, roi_filter=roi_filter)
result = []
rois = []
x_ranges = []
if return_full:
all_stacked_data = []
for roi in all_rois:
roi_data = []
for roi_expt, roi_idx in all_rois[roi]:
roi_data.append(psth_data[roi_expt.totuple()][roi_idx])
if(return_df):
psh = psth_data[roi_expt.totuple()][roi_idx]
numPoints, numTraces = psh.shape
df["stimulus"].extend([stimulus[roi_expt.totuple()]] * numTraces)
df["stimStart"].extend([val for sublist in stimStarts[roi_expt] for val in sublist])
df["ROI"].extend([roi] * numTraces)
df["expt"].extend([roi_expt] * numTraces)
df["activity"].extend(list(psth_data[roi_expt.totuple()][roi_idx].T))
df["time"].extend([x_range[roi_expt.totuple()]] * numTraces)
# If the experiments were all sampled at the same rate, just stack
# the data, otherwise keep the most common frame rate, drop the rest
try:
stacked_data = np.hstack(roi_data)
except ValueError:
lengths = [r.shape[0] for r in roi_data]
most_common = mode(lengths)[0][0]
new_data = [r for r, length in zip(roi_data, lengths)
if length == most_common]
stacked_data = np.hstack(new_data)
warnings.warn(
'ROI imaged at multiple frame rates, only keeping most ' +
'common frame rate: mouse {}, loc {}, id {}'.format(
roi[0].get('mouseID'), roi[1], roi[2]))
# Find an experiment that was sampled at the slowest rate
roi_expt, _ = all_rois[roi][lengths.index(most_common)]
if return_full:
all_stacked_data.append(stacked_data)
result.append(nanmean(stacked_data, axis=1))
rois.append(roi)
x_ranges.append(x_range[roi_expt.totuple()])
if (ax or return_full == 'norm') and len(result):
min_x_range_idx = np.argmin([len(x) for x in x_ranges])
min_x_range = x_ranges[min_x_range_idx]
normalized_psths = []
for x_range, psth in it.izip(x_ranges, result):
normalized_psths.append(np.interp(min_x_range, x_range, psth))
normalized_psths = np.array(normalized_psths)
if ax and len(result):
if color is not None:
light_color = sns.light_palette(color)[1]
if shade_ste:
if shade_ste == 'sem':
ste_psth = nanstd(normalized_psths, axis=0) / np.sqrt(
normalized_psths.shape[0])
elif shade_ste == 'std':
ste_psth = nanstd(normalized_psths, axis=0)
else:
raise ValueError(
'Unrecognized error shading argument: {}'.format(
shade_ste))
mean_psth = nanmean(normalized_psths, axis=0)
ax.plot(min_x_range, mean_psth,
color='b' if color is None else color, lw=1)
ax.fill_between(
min_x_range, mean_psth - ste_psth, mean_psth + ste_psth,
facecolor='r' if color is None else color, alpha=0.4)
else:
for x_range, psth in it.izip(x_ranges, result):
if gray_traces or color is not None:
ax.plot(x_range, psth,
color='0.8' if color is None else light_color)
else:
ax.plot(x_range, psth)
if plot_mean:
mean_psth = nanmean(normalized_psths, axis=0)
ax.plot(min_x_range, mean_psth,
color='k' if color is None else color, lw=2)
ax.axvline(0, linestyle='dashed', color='k')
ax.set_xlim(min_x_range[0], min_x_range[-1])
ax.set_xlabel('Time (s)')
ax.set_ylabel(r'Mean $\Delta$F/F')
if(return_df):
return pd.DataFrame(df)
if return_full == 'norm':
if not len(result):
return np.array([]), np.array([])
return normalized_psths, min_x_range
if return_full:
return all_stacked_data, rois, x_ranges
return result, rois, x_ranges
def response_magnitudes(
exptGrp, stimulus, method='responsiveness', return_full=False,
z_score=False, return_df=False, **kwargs):
"""Determine response magnitudes for all ROIs across all experiments.
stimulus : stim to calculate responsiveness
method : method used to calculate responsiveness.
'responsiveness': difference in mean activity after and before stim
'peak': difference in peak activity after and before stim
return_df : If True, returns a pandas DataFrame
**kwargs : additional keyword arguments to pass to psth method
"""
psths, rois, x_ranges = PSTH(exptGrp, stimulus, return_full=True, **kwargs)
responses = np.empty(len(psths))
response_stds = np.empty(len(psths))
for idx, psth, x_range in it.izip(it.count(), psths, x_ranges):
if method == 'responsiveness':
roi_responses = nanmean(psth[x_range > 0], axis=0) \
- nanmean(psth[x_range < 0], axis=0)
elif method == 'peak':
roi_responses = np.nanmax(psth[x_range > 0], axis=0) \
- np.nanmax(psth[x_range < 0], axis=0)
else:
raise ValueError('Unrecognized method: {}'.format(method))
responses[idx] = nanmean(roi_responses)
if return_full:
response_stds[idx] = nanstd(roi_responses)
# ROIs responding to 'off' stimuli, decrease their activity at the
# stim times, so flip the sign of the response
if 'off' in stimulus:
responses *= -1
if z_score:
responses -= nanmean(responses)
responses /= nanstd(responses)
# This might be possible to scale, but for now this is incomplete
response_stds = None
if return_df:
assert len(responses) == len(rois)
if not len(rois):
rois = np.empty((0, 3))
df = pd.DataFrame(
rois, columns=['mouse', 'uniqueLocationKey', 'roi_id'])
df['value'] = responses
return df
if return_full:
return responses, response_stds, psths, rois, x_ranges
return responses
def response_matrix(
exptGrp, stimuli, z_score=True, return_full=False, **response_kwargs):
"""Returns a matrix that is n_rois x n_stimuli of the responsiveness
of each ROI to each stim"""
responses = {}
rois = {}
for stim in stimuli:
responses[stim], _, _, rois[stim], _ = response_magnitudes(
exptGrp, stim, z_score=z_score, return_full=True,
**response_kwargs)
all_rois = list(set(it.chain(*rois.itervalues())))
data = np.empty((len(all_rois), len(stimuli)))
for roi_idx, roi in it.izip(it.count(), all_rois):
for stim_idx, stim in enumerate(stimuli):
try:
data[roi_idx, stim_idx] = responses[stim][rois[stim].index(
roi)]
except ValueError:
data[roi_idx, stim_idx] = np.nan
if return_full:
return data, all_rois
return data
def _expt_grp_response_magnitudes_shuffler(inputs):
stimulus, method, pre_time, post_time, exclude, data, channel, \
label, roi_filter = inputs
global expt_grp
return response_magnitudes(
expt_grp, stimulus, method=method, pre_time=pre_time,
post_time=post_time, exclude=exclude, data=data, channel=channel,
label=label, roi_filter=roi_filter, shuffle=True)
# TODO: SHOULD THIS BE MOVED TO FILTERS?
def identify_stim_responsive_cells(
exptGrp, stimulus, ax=None, method='responsiveness', data=None,
pre_time=None, post_time=None, conf_level=95, sig_tail='upper',
transients_conf_level=99, exclude=None, shuffle_exclude='exclude',
plot_mean=True, shade_ste=None, channel='Ch2', label=None,
roi_filter=None, n_bootstraps=10000, dFOverF='from_file',
save_to_expt=True, ignore_saved=False, n_processes=1,
return_roi_tuple_filter=False, return_set=False):
"""Identifies cells that significantly respond to a given stimulus
See Experiment.psth for most of the arguments.
Parameters
----------
method : str
'responsiveness', 'peak'. Method to determine responsive rois
data : str
None, 'trans' for normal imaging or isActive methods respectively
sig_tail : str
'upper', 'lower', 'both'
Choose if you want to look for responses in the upper tail, lower
tail or either tail of the bootstrap distribution.
exclude : str or bool array
nTrials x nFrames array or 'running'
Frames to exclude from the response calculation
shuffle_exclude : str
Frames to exclude from the shuffles, 'running' to
exclude running intervals, 'all' to exclude both stims and running,
or 'exclude' to match the 'exclude' argument
transients_conf_level : int
95 or 99, only used if 'data' is 'trans'
save_to_expt: bool
If True, saves responsive rois for the given set of
parameters for each experiment
ignore_saved : bool
If True, don't check for saved responsive rois
n_processes : int
If > 1, farms out the shuffling over multiple cores.
return_roi_tuple_filter : bool
The default return will filter on ROI objects present in the current
ExperimentGroup. Alternatively, return a filter on ROI
(mouse, location, id) tuples that will also filter ROIs not in the
current ExperimentGroup.
Returns
-------
responsive_filter: roi_filter
An roi_filter for the responsive ROIs.
"""
if roi_filter and n_processes > 1:
warnings.warn('Currently unable to use an roi_filter with pools.' +
' This will run with 1 core, re-run with no filter' +
' to re-enable pools.')
n_processes = 1
STIM_INTERVAL = 10.
if stimulus == 'air':
stimulus = 'airpuff'
if shuffle_exclude == 'exclude':
shuffle_exclude = exclude
if pre_time is None:
pre_time = 0
for expt in exptGrp:
pre_time = np.amax([pre_time, expt.stimulusTime()])
if post_time is None:
post_time = 0
for expt in exptGrp:
post_time = np.amax(
[post_time, expt.imagingTimes()[-1] - expt.stimulusTime()])
pre_filtered_expts = []
responsive_rois = set()
if not ignore_saved:
# See if all the experiments already have a saved filter
for expt in exptGrp:
frame_period = expt.frame_period()
pre_frames = int(pre_time / frame_period)
post_frames = int(post_time / frame_period)
trials = []
for ee in exptGrp:
if ee.parent.get('mouseID') == expt.parent.get('mouseID') \
and ee.get('uniqueLocationKey') == \
expt.get('uniqueLocationKey'):
trials.extend(ee.findall('trial'))
try:
expt_filter = filters.responsive_roi_filter(
expt, stimulus, trials, method=method,
pre_frames=pre_frames, post_frames=post_frames,
channel=channel, label=label, roi_filter=roi_filter,
data=data, conf_level=conf_level, sig_tail=sig_tail,
transients_conf_level=transients_conf_level,
exclude=exclude, shuffle_exclude=shuffle_exclude,
n_bootstraps=n_bootstraps, dFOverF=dFOverF)
except ValueError:
break
else:
pre_filtered_expts.append(expt)
responsive_rois = responsive_rois.union(expt.rois(
channel=channel, label=label, roi_filter=expt_filter))
# Find experiments that still need to be processed
not_done_expts = set(exptGrp).difference(pre_filtered_expts)
fields_to_run = set([expt.field_tuple() for expt in not_done_expts])
expts_to_run = [expt for expt in exptGrp
if expt.field_tuple() in fields_to_run]
new_grp = exptGrp.subGroup(expts_to_run)
if isinstance(stimulus, basestring):
stimulus_arg = {
expt.totuple(): ba.stimStarts(expt, stimulus) for expt in new_grp}
else:
stimulus_arg = stimulus
if data is None:
data_arg = {expt.totuple(): expt.imagingData(
dFOverF=dFOverF, channel=channel, label=label,
roi_filter=roi_filter) for expt in new_grp}
elif data == 'trans':
data_arg = {expt.totuple(): isActive(
expt, conf_level=transients_conf_level, channel=channel,
label=label, roi_filter=roi_filter) for expt in new_grp}
else:
data_arg = data
if shuffle_exclude == 'running':
shuffle_exclude_arg = {}
for expt in new_grp:
nROIs = expt.imaging_shape(
channel=channel, label=label, roi_filter=roi_filter)[0]
expt_exclude = expt.runningIntervals(
imageSync=True, returnBoolList=True, end_padding=2.0)
expt_exclude = np.rollaxis(
np.tile(expt_exclude, (nROIs, 1, 1)), 1, 0)
shuffle_exclude_arg[expt.totuple()] = expt_exclude
elif shuffle_exclude == 'all':
shuffle_exclude_arg = {}
for expt in new_grp:
nROIs, nFrames, _ = expt.imaging_shape(
channel=channel, label=label, roi_filter=roi_filter)
expt_exclude = expt.runningIntervals(
imageSync=True, returnBoolList=True, end_padding=2.0)
expt_exclude = np.rollaxis(
np.tile(expt_exclude, (nROIs, 1, 1)), 1, 0)
frame_period = expt.frame_period()
stim_starts = ba.stimStarts(expt, 'all', imageSync=True)
for trial_idx, trial in enumerate(stim_starts):
trial_mask = np.zeros(nFrames, dtype='bool')
for stim in trial:
trial_mask[stim:
stim + STIM_INTERVAL / frame_period] = True
expt_exclude[trial_idx, ...] = np.logical_or(
expt_exclude[trial_idx, ...], trial_mask)
shuffle_exclude_arg[expt.totuple()] = expt_exclude
else:
shuffle_exclude_arg = shuffle_exclude
inputs = (stimulus_arg, method, pre_time, post_time,
shuffle_exclude_arg, data_arg, channel, label, roi_filter)
global expt_grp
expt_grp = new_grp
# Run through one shuffle to pre-load a few things
_expt_grp_response_magnitudes_shuffler(inputs)
if n_processes > 1:
def init_grp(grp):
global expt_grp
expt_grp = grp
from multiprocessing import Pool
pool = Pool(
processes=n_processes, initializer=init_grp, initargs=[new_grp])
# import multiprocessing.util as util
# util.log_to_stderr(util.SUBDEBUG)
bootstrap_results = pool.map(
_expt_grp_response_magnitudes_shuffler,
it.repeat(inputs, n_bootstraps))
pool.close()
pool.join()
else:
bootstrap_results = map(
_expt_grp_response_magnitudes_shuffler,
it.repeat(inputs, n_bootstraps))
bootstrap_results = np.vstack(bootstrap_results)
trueDiff, _, _, true_rois, _ = response_magnitudes(
new_grp, stimulus, method=method, pre_time=pre_time,
post_time=post_time, data=data_arg, exclude=exclude, channel=channel,
label=label, roi_filter=roi_filter, return_full=True)
if sig_tail == 'upper':
upper_threshold = np.percentile(
bootstrap_results, conf_level, axis=0)
responsiveCells = trueDiff > upper_threshold
elif sig_tail == 'lower':
lower_threshold = np.percentile(
bootstrap_results, 100 - conf_level, axis=0)
responsiveCells = trueDiff < lower_threshold
elif sig_tail == 'both':
half_conf_level = (100 - conf_level) / 2.
upper_threshold = np.percentile(
bootstrap_results, 100 - half_conf_level, axis=0)
lower_threshold = np.percentile(
bootstrap_results, half_conf_level, axis=0)
responsiveCells = np.bitwise_or(
trueDiff > upper_threshold, trueDiff < lower_threshold)
else:
raise ValueError('Unrecognized sig_tail value')
for roi_mouse, roi_location, roi_id in np.array(
true_rois)[responsiveCells]:
for expt in roi_mouse.findall('experiment'):
if expt in new_grp \
and expt.get('uniqueLocationKey') == roi_location:
responsive_rois = responsive_rois.union(expt.rois(
channel=channel, label=label,
roi_filter=lambda x: x.id == roi_id))
if return_roi_tuple_filter:
responsive_roi_tuples = set(
[(roi.expt.parent.get('mouseID'),
roi.expt.get('uniqueLocationKey'),
roi.id) for roi in responsive_rois])
def responsive_roi_tuple_filter(roi):
return (roi.expt.parent.get('mouseID'),
roi.expt.get('uniqueLocationKey'),
roi.id) in responsive_roi_tuples
else:
def responsive_filter(roi):
return roi in responsive_rois
if roi_filter is None and save_to_expt:
for expt in new_grp:
trials = []
for ee in new_grp:
if ee.parent.get('mouseID') == expt.parent.get('mouseID') \
and ee.get('uniqueLocationKey') == \
expt.get('uniqueLocationKey'):
trials.extend([trial.get('time') for trial
in ee.findall('trial')])
responsive_rois_path = os.path.join(
expt.sima_path(), 'responsive_rois.pkl')
frame_period = expt.frame_period()
pre_frames = int(pre_time / frame_period)
post_frames = int(post_time / frame_period)
if label is None:
expt_label = expt.most_recent_key(channel=channel)
else:
expt_label = label
key_tuple = (''.join(sorted(trials)), method, pre_frames,
post_frames, channel, expt_label, data,
conf_level, sig_tail, transients_conf_level,
exclude, shuffle_exclude, n_bootstraps, dFOverF)
roi_ids = expt.roi_ids(
channel=channel, label=label, roi_filter=responsive_filter)
try:
with open(responsive_rois_path, 'r') as f:
responsive_dict = pickle.load(f)
except (IOError, pickle.UnpicklingError):
responsive_dict = {}
if stimulus not in responsive_dict:
responsive_dict[stimulus] = {}
responsive_dict[stimulus][key_tuple] = {}
responsive_dict[stimulus][key_tuple]['roi_ids'] = roi_ids
responsive_dict[stimulus][key_tuple]['timestamp'] = timestamp()
with open(responsive_rois_path, 'w') as f:
pickle.dump(responsive_dict, f, pickle.HIGHEST_PROTOCOL)
if ax:
PSTH(exptGrp, stimulus, ax=ax, pre_time=pre_time, post_time=post_time,
exclude=exclude, data=data, shade_ste=shade_ste,
plot_mean=plot_mean, channel=channel, label=label,
roi_filter=responsive_filter)
if return_roi_tuple_filter:
return responsive_roi_tuple_filter
return responsive_filter
expt_grp = None
# TODO: REMOVE THIS? IT'S NEVER USED IN THE REPO
def identify_stim_responsive_trials(
exptGrp, stimulus, pre_time=None, post_time=None, conf_level=95,
data=None, sig_tail='upper', exclude=None, channel='Ch2',
shuffle_exclude='exclude', label=None, roi_filter=None,
n_bootstraps=10000, transients_conf_level=99, dFOverF='from_file',
save_to_expt=True):
"""Identifies trials that are significantly responsive"""
if stimulus == 'air':
stimulus = 'airpuff'
if stimulus == 'running_stop_off':
sig_tail = 'lower'
if isinstance(stimulus, basestring):
stimulus_arg = {
expt: ba.stimStarts(expt, stimulus) for expt in exptGrp}
else:
stimulus_arg = stimulus
if pre_time is None:
pre_time = 0
for expt in exptGrp:
pre_time = np.amax([pre_time, expt.stimulusTime()])
if post_time is None:
post_time = 0
for expt in exptGrp:
post_time = np.amax(
[post_time, expt.imagingTimes()[-1] - expt.stimulusTime()])
if data is None:
data_arg = {expt.totuple(): expt.imagingData(
dFOverF=dFOverF, channel=channel, label=label,
roi_filter=roi_filter) for expt in exptGrp}
elif data == 'trans':
data_arg = {expt.totuple(): isActive(
expt, conf_level=transients_conf_level, channel=channel,
label=label, roi_filter=roi_filter) for expt in exptGrp}
else:
data_arg = data
if exclude == 'running':
exclude_arg = {}
for expt in exptGrp:
nROIs = expt.imaging_shape(
channel=channel, label=label, roi_filter=roi_filter)[0]
expt_exclude = expt.runningIntervals(
imageSync=True, returnBoolList=True, end_padding=2.0)
expt_exclude = np.rollaxis(
np.tile(expt_exclude, (nROIs, 1, 1)), 1, 0)
exclude_arg[expt.totuple()] = expt_exclude
elif exclude == 'stim':
exclude_arg = {}
for expt in exptGrp:
pass
else:
exclude_arg = exclude
max_frames = max([expt.imaging_shape()[1] for expt in exptGrp])
for bootstrap_idx in xrange(max_frames):
bootstrap_stim_arg = {}
for expt in exptGrp:
expt_frames = expt.imaging_shape()[1]
bootstrap_stim_arg[expt.totuple()] = [
np.array([bootstrap_idx]) if len(trial_stim)
and bootstrap_idx < expt_frames
else np.array([]) for trial_stim
in stimulus_arg[expt.totuple()]]
psths, _, x_ranges = PSTH(
exptGrp, bootstrap_stim_arg, pre_time=pre_time,
post_time=post_time, exclude=exclude_arg, data=data_arg,
channel=channel, label=label, roi_filter=roi_filter,
return_full=True, shuffle=False)
# Initialize output array
if bootstrap_idx == 0:
bootstrap_results = [[] for _ in x_ranges]
for psth_idx, psth, x_range in it.izip(
it.count(), psths, x_ranges):
responses = nanmean(psth[x_range > 0, :], axis=0) - \
nanmean(psth[x_range < 0, :], axis=0)
responses = [response for response in responses
if np.isfinite(response)]
bootstrap_results[psth_idx].extend(responses)
true_psths, true_rois, true_x_ranges = PSTH(
exptGrp, stimulus, pre_time=pre_time, post_time=post_time,
data=data_arg, exclude=exclude, channel=channel, label=label,
roi_filter=roi_filter, return_full=True)
trueDiffs = []
for psth, x_range in it.izip(true_psths, true_x_ranges):
trueDiffs.append(
nanmean(psth[x_range > 0, :], axis=0) -
nanmean(psth[x_range < 0, :], axis=0))
responsive_trials = []
if sig_tail == 'upper':
for responses, roi_bootstrap_results in it.izip(
trueDiffs, bootstrap_results):
upper_threshold = np.percentile(
roi_bootstrap_results, conf_level)
responsive_trials.append(responses > upper_threshold)
elif sig_tail == 'lower':
for responses, roi_bootstrap_results in it.izip(
trueDiffs, bootstrap_results):
lower_threshold = np.percentile(
roi_bootstrap_results, 100 - conf_level)
responsive_trials.append(responses < lower_threshold)
elif sig_tail == 'both':
half_conf_level = (100 - conf_level) / 2.
for responses, roi_bootstrap_results in it.izip(
trueDiffs, bootstrap_results):
upper_threshold = np.percentile(
roi_bootstrap_results, 100 - half_conf_level)
lower_threshold = np.percentile(
roi_bootstrap_results, half_conf_level)
responsive_trials.append(np.bitwise_or(
responses > upper_threshold, responses < lower_threshold))
else:
raise ValueError('Unrecognized sig_tail value')
return responsive_trials
def compare_run_response_by_running_duration(
exptGrp, ax, run_intervals='running', response_method='responsiveness',
plot_method='scatter', pre_time=None, post_time=None,
channel='Ch2', label=None, roi_filter=None,
responsive_method=None, **psth_kwargs):
"""Compare the running response of ROIs by scattering single trial
responses against running interval duration
run_intervals -- any argument to stimStarts that returns a subset
of running intervals
reponse_method -- how to calculate the running response on a per-trial
basis. 'responsiveness' is the same metric used to determine
responsive rois, 'mean' is the mean during the running bout
pre/post_time -- time to include in psth before and after stim
responsive_method -- None, to include all cells, or a method used to
identify stim responsive cells
psth_kwargs -- any other arguments will be passed to expt.psth
"""
if pre_time is None:
pre_time = 0
for expt in exptGrp:
pre_time = max([pre_time, expt.stimulusTime()])
if post_time is None:
post_time = 0
for expt in exptGrp:
post_time = max(
[post_time, expt.imagingTimes()[-1] - expt.stimulusTime()])
if responsive_method:
roi_filter = identify_stim_responsive_cells(
exptGrp, stimulus=run_intervals, method=responsive_method,
pre_time=pre_time, post_time=post_time, channel=channel,
label=label, roi_filter=roi_filter, **psth_kwargs)
durations = []
responses = []
for expt in exptGrp:
if response_method == 'responsiveness':
frame_period = expt.frame_period()
pre_frames = int(pre_time / frame_period)
post_frames = int(post_time / frame_period)
# psths is nROIs x nFrames x nIntervals
psths = _psth(
expt, run_intervals, pre_frames=pre_frames,
post_frames=post_frames, return_full=True, channel=channel,
label=label, roi_filter=roi_filter, **psth_kwargs)
post = nanmean(psths[:, -post_frames:, :], axis=1)
pre = nanmean(psths[:, :pre_frames, :], axis=1)
# responses is now nROIs x nIntervals
expt_responses = post - pre
# Need to determine which running intervals were included in
# psths, match filtered stim starts with all running intervals
starts = ba.stimStarts(expt, run_intervals, imageSync=True)
starts = list(it.chain(*starts))
running_intervals = expt.runningIntervals(imageSync=True)
response_idx = 0
for interval in it.chain(*running_intervals):
if response_idx >= expt_responses.shape[1]:
break
if 'stop' in run_intervals:
if interval[1] != starts[response_idx]:
continue
else:
if interval[0] != starts[response_idx]:
continue
interval_responses = expt_responses[:, response_idx]
interval_duration = \
(interval[1] - interval[0] + 1) * frame_period
responses.extend(interval_responses.tolist())
durations.extend(
[interval_duration] * len(interval_responses))
response_idx += 1
elif response_method == 'mean':
frame_period = expt.frame_period()
imaging_data = expt.imagingData(
channel=channel, label=label, roi_filter=roi_filter,
dFOverF='from_file')
starts = ba.stimStarts(expt, run_intervals, imageSync=True)
running_intervals = expt.runningIntervals(imageSync=True)
for trial_idx, trial_starts, trial_intervals in it.izip(
it.count(), starts, running_intervals):
for start in trial_starts:
if 'stop' in run_intervals:
if start not in trial_intervals[:, 1]:
continue
interval_idx = np.nonzero(
trial_intervals == start)[0][0]
run_start = trial_intervals[interval_idx, 0]
run_stop = start + 1
else:
if start not in trial_intervals[:, 0]:
continue
interval_idx = np.nonzero(
trial_intervals == start)[0][0]
run_start = start
run_stop = trial_intervals[interval_idx, 1] + 1
running_imaging = imaging_data[:, run_start:run_stop,
trial_idx]
mean_responses = nanmean(
running_imaging, axis=1).tolist()
interval_duration = \
(run_stop - run_start) * frame_period
responses.extend(mean_responses)
durations.extend(
[interval_duration] * len(mean_responses))
else:
raise NotImplementedError
if not len(durations):
warnings.warn('No running intervals found')
return
if plot_method == 'scatter':
plotting.scatterPlot(
ax, np.vstack([durations, responses]),
['Running duration (s)', response_method],
plotEqualLine=False, print_stats=True, s=1)
ax.set_title(run_intervals)
else:
raise NotImplementedError
def plot_number_of_stims_responsive(
exptGrp, ax, stimuli, method='responsiveness', pre_time=None,
post_time=None, exclude=None, channel='Ch2', label=None,
roi_filter=None, plot_mean=True, n_processes=1,
n_bootstraps=10000):
"""Plot a histogram of the number of stims each ROI is responsive to
and compare to the distribution if all ROIs were equally likely to
respond to each stim and all the stims were independent.
"""
# Only include ROIs that were actually exposed to all stims
stims_to_check = set(filter(
lambda stim: 'running' not in stim and 'licking' not in stim,
stimuli))
responsive_cells = {}
for stim in stimuli:
responsive_cells[stim] = identify_stim_responsive_cells(
exptGrp, stimulus=stim, method=method, pre_time=pre_time,
post_time=post_time, sig_tail='upper', exclude=exclude,
channel=channel, label=label, roi_filter=roi_filter,
n_bootstraps=n_bootstraps, save_to_expt=True,
n_processes=n_processes)
all_rois = exptGrp.allROIs(
channel=channel, label=label, roi_filter=roi_filter)
roi_counts = []
stim_counts = {stimulus: 0 for stimulus in stimuli}
for roi in all_rois:
# Skip ROIs that were not exposed to all stims
roi_stims = set(it.chain(
*[expt.stimuli() for expt, _ in all_rois[roi]]))
if len(stims_to_check.difference(roi_stims)) > 0:
continue
roi_id = roi[2]
roi_expt = all_rois[roi][0][0]
first_roi = roi_expt.rois(
channel=channel, label=label,
roi_filter=lambda r: r.id == roi_id)[0]
roi_count = np.sum([responsive_cells[stimulus](first_roi)
for stimulus in stimuli])
roi_counts.append(roi_count)
for stimulus in stimuli:
stim_counts[stimulus] += int(
responsive_cells[stimulus](first_roi))
counts, edges, _ = plotting.histogram(
ax, roi_counts, range(len(stimuli) + 2), color='m',
plot_mean=plot_mean)
probs = np.array([stim_counts[stimulus] for stimulus in stim_counts]) \
/ float(len(roi_counts))
dist = stats.poisson_binomial_distribution(probs)
dist_counts = np.array([val * sum(counts) for val in dist])
ax.step(range(len(dist_counts)), dist_counts, where='post', color='c')
# Not sure that ddof should be 0, but 0 is the most conservative
_, p_val = chisquare(counts[dist_counts > 0],
dist_counts[dist_counts > 0], ddof=0)
ax.set_xticks((edges[:-1] + edges[1:]) / 2.0)
ax.set_xticklabels(range(0, len(stimuli) + 1))
ax.set_xlabel('Number of stims responsive')
ax.set_ylabel('Number of ROIs')
plotting.stackedText(
ax, ['actual', 'expected', 'p={:.5f}'.format(p_val)],
colors=['m', 'c', 'k'])
return counts, dist_counts
@memoize
def roi_area(expt_grp, channel='Ch2', label=None, roi_filter=None):
"""Calculate the area of each ROI (in um^2).
Parameters
----------
channel : string
label : string or None
roi_filter : filter_function or None
Returns
-------
pd.DataFrame
"""
rois = expt_grp.rois(channel=channel, label=label, roi_filter=roi_filter)
data_list = []
for expt in expt_grp:
params = expt.imagingParameters()
x_um = params['micronsPerPixel']['XAxis']
y_um = params['micronsPerPixel']['YAxis']
for roi in rois[expt]:
new_polys = []
for poly in roi.polygons:
coords = np.array(poly.exterior)
coords[:, 0] *= x_um
coords[:, 1] *= y_um
new_polys.append(shapely.geometry.Polygon(coords))
tmp_multi_poly = shapely.geometry.MultiPolygon(new_polys)
data_list.append(
{'expt': expt, 'roi': roi, 'value': tmp_multi_poly.area})
return pd.DataFrame(data_list, columns=['expt', 'roi', 'value'])
@memoize
def transients(
expt_grp, key=None, interval=None, invert_interval=False, **transient_kwargs):
"""Return a DataFrame of all transients.
Parameters
----------
key : None or str
If not None, drop all columns other than trial, roi, and 'key'.
Also renames column 'key' to 'value'.
interval : lab.classes.new_interval.Interval
An interval object used to filter out events. Filters on the
'start_frame' of the transient.
**transient_kwargs : dict
All other keyword arguments are passed to the per-experiment
transients data method.
Returns
-------
pd.DataFrame
"""
trans_list = [expt.transientsData(dataframe=True, **transient_kwargs)
for expt in expt_grp]
trans = pd.concat(trans_list, ignore_index=True)
if interval is not None:
int_sec = interval.resample()
trans['_start_time'] = trans[['trial', 'start_frame']].apply(
lambda inputs: inputs[0].parent.frame_period() * inputs[1],
axis=1)
trans = int_sec.filter_events(trans, key='_start_time', invert=invert_interval)
del trans['_start_time']
if key is not None:
trans = trans[['trial', 'roi', key]]
trans.rename(columns={key: 'value'}, inplace=True)
return trans
| {
"repo_name": "losonczylab/Zaremba_NatNeurosci_2017",
"path": "losonczy_analysis_bundle/lab/analysis/imaging_analysis.py",
"copies": "1",
"size": "88430",
"license": "mit",
"hash": -2544790426250544000,
"line_mean": 39.0861287398,
"line_max": 116,
"alpha_frac": 0.5775076332,
"autogenerated": false,
"ratio": 3.808518885395581,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4886026518595581,
"avg_score": null,
"num_lines": null
} |
"""Analysis of centroid residuals for determining suitable refinement and
outlier rejection parameters automatically"""
import math
from scitbx.math.periodogram import Periodogram
from dials.array_family import flex
RAD2DEG = 180.0 / math.pi
class CentroidAnalyser:
def __init__(self, reflections, av_callback=flex.mean, debug=False):
# flags to indicate at what level the analysis has been performed
self._average_residuals = False
self._spectral_analysis = False
self._av_callback = av_callback
# Remove invalid reflections
reflections = reflections.select(~(reflections["miller_index"] == (0, 0, 0)))
x, y, z = reflections["xyzcal.mm"].parts()
sel = (x == 0) & (y == 0)
reflections = reflections.select(~sel)
self._nexp = flex.max(reflections["id"]) + 1
# Ensure required keys are present
if not all(k in reflections for k in ["x_resid", "y_resid", "phi_resid"]):
x_obs, y_obs, phi_obs = reflections["xyzobs.mm.value"].parts()
x_cal, y_cal, phi_cal = reflections["xyzcal.mm"].parts()
# do not wrap around multiples of 2*pi; keep the full rotation
# from zero to differentiate repeat observations.
TWO_PI = 2.0 * math.pi
resid = phi_cal - (flex.fmod_positive(phi_obs, TWO_PI))
# ensure this is the smaller of two possibilities
resid = flex.fmod_positive((resid + math.pi), TWO_PI) - math.pi
phi_cal = phi_obs + resid
reflections["x_resid"] = x_cal - x_obs
reflections["y_resid"] = y_cal - y_obs
reflections["phi_resid"] = phi_cal - phi_obs
# create empty results list
self._results = []
# first, just determine a suitable block size for analysis
for iexp in range(self._nexp):
ref_this_exp = reflections.select(reflections["id"] == iexp)
if len(ref_this_exp) == 0:
# can't do anything, just keep an empty dictionary
self._results.append({})
continue
phi_obs_deg = ref_this_exp["xyzobs.mm.value"].parts()[2] * RAD2DEG
phi_range = flex.min(phi_obs_deg), flex.max(phi_obs_deg)
phi_width = phi_range[1] - phi_range[0]
ideal_block_size = 1.0
old_nblocks = 0
while True:
nblocks = int(phi_width // ideal_block_size)
if nblocks == old_nblocks:
nblocks -= 1
nblocks = max(nblocks, 1)
block_size = phi_width / nblocks
nr = flex.int()
for i in range(nblocks - 1):
blk_start = phi_range[0] + i * block_size
blk_end = blk_start + block_size
sel = (phi_obs_deg >= blk_start) & (phi_obs_deg < blk_end)
nref_in_block = sel.count(True)
nr.append(nref_in_block)
# include max phi in the final block
blk_start = phi_range[0] + (nblocks - 1) * block_size
blk_end = phi_range[1]
sel = (phi_obs_deg >= blk_start) & (phi_obs_deg <= blk_end)
nref_in_block = sel.count(True)
nr.append(nref_in_block)
# Break if there are enough reflections, otherwise increase block size,
# unless only one block remains
if nblocks == 1:
break
min_nr = flex.min(nr)
if min_nr >= 50:
break
if min_nr < 5:
fac = 2
else:
fac = 50 / min_nr
ideal_block_size *= fac
old_nblocks = nblocks
# collect the basic data for this experiment
self._results.append(
{
"block_size": block_size,
"nref_per_block": nr,
"nblocks": nblocks,
"phi_range": phi_range,
}
)
# keep reflections for analysis
self._reflections = reflections
# for debugging, write out reflections used
if debug:
self._reflections.as_file("centroid_analysis.refl")
def __call__(
self, calc_average_residuals=True, calc_periodograms=True, spans=(4, 4)
):
"""Perform analysis and return the results as a list of dictionaries (one
for each experiment)"""
# if not doing further analysis, return the basic data
if not calc_average_residuals and not calc_periodograms:
return self._results
# if we don't have average residuals already, calculate them
if not self._average_residuals:
for iexp in range(self._nexp):
results_this_exp = self._results[iexp]
block_size = results_this_exp.get("block_size")
if block_size is None:
continue
phi_range = results_this_exp["phi_range"]
nblocks = results_this_exp["nblocks"]
ref_this_exp = self._reflections.select(self._reflections["id"] == iexp)
x_resid = ref_this_exp["x_resid"]
y_resid = ref_this_exp["y_resid"]
phi_resid = ref_this_exp["phi_resid"]
phi_obs_deg = ref_this_exp["xyzobs.mm.value"].parts()[2] * RAD2DEG
xr_per_blk = flex.double()
yr_per_blk = flex.double()
pr_per_blk = flex.double()
for i in range(nblocks - 1):
blk_start = phi_range[0] + i * block_size
blk_end = blk_start + block_size
sel = (phi_obs_deg >= blk_start) & (phi_obs_deg < blk_end)
xr_per_blk.append(self._av_callback(x_resid.select(sel)))
yr_per_blk.append(self._av_callback(y_resid.select(sel)))
pr_per_blk.append(self._av_callback(phi_resid.select(sel)))
# include max phi in the final block
blk_start = phi_range[0] + (nblocks - 1) * block_size
blk_end = phi_range[1]
sel = (phi_obs_deg >= blk_start) & (phi_obs_deg <= blk_end)
xr_per_blk.append(self._av_callback(x_resid.select(sel)))
yr_per_blk.append(self._av_callback(y_resid.select(sel)))
pr_per_blk.append(self._av_callback(phi_resid.select(sel)))
# the first and last block of average residuals (especially those in
# phi) are usually bad because rocking curves are truncated at the
# edges of the scan. When we have enough blocks and they are narrow,
# just replace the extreme values with their neighbours
if nblocks > 2 and block_size < 3.0:
xr_per_blk[0] = xr_per_blk[1]
xr_per_blk[-1] = xr_per_blk[-2]
yr_per_blk[0] = yr_per_blk[1]
yr_per_blk[-1] = yr_per_blk[-2]
pr_per_blk[0] = pr_per_blk[1]
pr_per_blk[-1] = pr_per_blk[-2]
results_this_exp["av_x_resid_per_block"] = xr_per_blk
results_this_exp["av_y_resid_per_block"] = yr_per_blk
results_this_exp["av_phi_resid_per_block"] = pr_per_blk
self._average_residuals = True
# Perform power spectrum analysis on the residuals, converted to microns
# and mrad to avoid tiny numbers
if calc_periodograms:
if self._spectral_analysis:
return self._results
for exp_data in self._results:
exp_data["x_periodogram"] = None
exp_data["y_periodogram"] = None
exp_data["phi_periodogram"] = None
if exp_data["nblocks"] < 5:
continue
for pname, data in zip(
["x_periodogram", "y_periodogram", "phi_periodogram"],
[
exp_data["av_x_resid_per_block"],
exp_data["av_y_resid_per_block"],
exp_data["av_phi_resid_per_block"],
],
):
if (flex.max(data) - flex.min(data)) > 1.0e-8:
exp_data[pname] = Periodogram(1000.0 * data, spans=spans)
self._spectral_analysis = True
# extract further information from the power spectrum
for exp_data in self._results:
exp_data["x_interval"] = self._analyse_periodogram(
exp_data["x_periodogram"]
)
exp_data["y_interval"] = self._analyse_periodogram(
exp_data["y_periodogram"]
)
exp_data["phi_interval"] = self._analyse_periodogram(
exp_data["phi_periodogram"]
)
return self._results
def _analyse_periodogram(self, pgram):
"""Use the periodogram pgram to suggest a suitable interval width for
scan-varying refinement to account for the major variation in residuals"""
if pgram is None:
return None
# determine a baseline from the high frequency noise
bl = flex.median(pgram.spec.select(pgram.freq > 0.25))
# look for peaks greater than 5 times this baseline. We expect one at
# low frequency
cutoff = 5 * bl
peaks = pgram.spec > cutoff
# find where this peak falls off below the cutoff and return the cycle
# period at half that frequency (this is a heuristic that often seems to
# give sensible results)
pk_start = flex.first_index(peaks, True)
if pk_start is None:
return None
peaks = peaks[pk_start:]
idx = pk_start + flex.first_index(peaks, False) - 1
if idx is not None:
f1 = pgram.freq[idx]
s1 = pgram.spec[idx]
try:
f2 = pgram.freq[idx + 1]
s2 = pgram.spec[idx + 1]
ds = cutoff - s1
df = (f2 - f1) * ds / (s2 - s1)
freq = f1 + df
except IndexError:
freq = f1
period = 2.0 * 1.0 / freq
else:
period = None
return period
| {
"repo_name": "dials/dials",
"path": "algorithms/refinement/analysis/centroid_analysis.py",
"copies": "1",
"size": "10489",
"license": "bsd-3-clause",
"hash": 6015165620809982000,
"line_mean": 41.6382113821,
"line_max": 88,
"alpha_frac": 0.5170178282,
"autogenerated": false,
"ratio": 3.9196562032884903,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.493667403148849,
"avg_score": null,
"num_lines": null
} |
# # Analysis of combined data sets: Counts vs. angle
#
# 7/18/2018
#
# Doing this in energy space because that is more accurate.
# Import packages ------------------------------
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
import imageio
import pandas as pd
import seaborn as sns
sns.set(style='ticks')
# Custom scripts ---------------------------------
sys.path.append('../../scripts')
import bicorr as bicorr
import bicorr_e as bicorr_e
import bicorr_plot as bicorr_plot
import bicorr_sums as bicorr_sums
import bicorr_math as bicorr_math
# Specify energy range
e_min = 1
e_max = 4
# Load data- experimental setup --------------------
det_df = bicorr.load_det_df('../../meas_info/det_df_pairs_angles.csv')
chList, fcList, detList, num_dets, num_det_pairs = bicorr.build_ch_lists()
dict_pair_to_index, dict_index_to_pair, dict_pair_to_angle = bicorr.build_dict_det_pair(det_df)
# Load singles hist and bicorr histogram -----------
singles_hist_e_n, e_bin_edges, dict_det_to_index, dict_index_to_det = bicorr_e.load_singles_hist_both(filepath = 'datap/',plot_flag=True, save_flag=True)
bhm_e, e_bin_edges, note = bicorr_e.load_bhm_e('datap')
# Calculate bicorr hist plot -----------------------
bhp_e = np.zeros((len(det_df),len(e_bin_edges)-1,len(e_bin_edges)-1))
for index in det_df.index.values: # index is same as in `bhm`
bhp_e[index,:,:] = bicorr_e.build_bhp_e(bhm_e,e_bin_edges,pair_is=[index])[0]
# Singles sums -------------------------------------
singles_e_df = bicorr_sums.init_singles_e_df(dict_index_to_det)
singles_e_df = bicorr_sums.fill_singles_e_df(dict_index_to_det, singles_hist_e_n, e_bin_edges, e_min, e_max)
bicorr_plot.Sd_vs_ch_all(singles_e_df, show_flag=False)
# Append sums to det_df ---------------------------------
det_df = bicorr_sums.init_det_df_sums(det_df)
det_df, energies_real = bicorr_sums.fill_det_df_doubles_e_sums(det_df, bhp_e, e_bin_edges, e_min, e_max, True)
det_df = bicorr_sums.fill_det_df_singles_sums(det_df, singles_e_df)
det_df = bicorr_sums.calc_det_df_W(det_df)
# Plot W vs angle --------------------------------------
chIgnore = [1,17,33]
det_df_ignore = det_df[~det_df['d1'].isin(chIgnore) & ~det_df['d2'].isin(chIgnore)]
bicorr_plot.W_vs_angle_all(det_df_ignore, save_flag=True, show_flag=False)
# Group into angle bins --------------------------------
angle_bin_edges = np.arange(10.01,181,10)
by_angle_df = bicorr_sums.condense_det_df_by_angle(det_df_ignore, angle_bin_edges)
bicorr_plot.W_vs_angle(det_df_ignore, by_angle_df, save_flag=True, show_flag=False)
# Store to datap ---------------------------------------
singles_e_df.to_csv('datap/singles_e_df_filled.csv')
det_df_ignore.to_csv(r'datap/det_df_e_ignorefc_filled.csv')
det_df.to_csv(r'datap/det_df_e_filled.csv')
by_angle_df.to_csv(r'datap/by_angle_e_df.csv')
| {
"repo_name": "pfschus/fission_bicorrelation",
"path": "scripts/plot_counts_vs_angle_E.py",
"copies": "1",
"size": "2825",
"license": "mit",
"hash": -8524715376864720000,
"line_mean": 38.2361111111,
"line_max": 153,
"alpha_frac": 0.6431858407,
"autogenerated": false,
"ratio": 2.5245755138516532,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8516980253387423,
"avg_score": 0.03015622023284604,
"num_lines": 72
} |
"""Analysis of current MOS temperature bias."""
import sys
import pytz
from pyiem.plot import MapPlot, get_cmap
from pyiem.util import get_dbconn, utc
def doit(now, model):
""" Figure out the model runtime we care about """
mos_pgconn = get_dbconn("mos")
iem_pgconn = get_dbconn("iem")
mcursor = mos_pgconn.cursor()
icursor = iem_pgconn.cursor()
mcursor.execute(
"SELECT max(runtime at time zone 'UTC') from alldata "
"where station = 'KDSM' and ftime = %s and model = %s",
(now, model),
)
row = mcursor.fetchone()
runtime = row[0]
if runtime is None:
sys.exit()
runtime = runtime.replace(tzinfo=pytz.utc)
# Load up the mos forecast for our given
mcursor.execute(
"SELECT station, tmp FROM alldata "
"WHERE model = %s and runtime = %s and ftime = %s and tmp < 999",
(model, runtime, now),
)
forecast = {}
for row in mcursor:
if row[0][0] == "K":
forecast[row[0][1:]] = row[1]
# Load up the currents!
icursor.execute(
"""
SELECT
s.id, s.network, tmpf, ST_x(s.geom) as lon, ST_y(s.geom) as lat
FROM
current c, stations s
WHERE
c.iemid = s.iemid and
(s.network ~* 'ASOS' or s.network = 'AWOS') and s.country = 'US' and
valid + '60 minutes'::interval > now() and
tmpf > -50
"""
)
lats = []
lons = []
vals = []
for row in icursor:
if row[0] not in forecast:
continue
diff = forecast[row[0]] - row[2]
if diff > 20 or diff < -20:
continue
lats.append(row[4])
lons.append(row[3])
vals.append(diff)
cmap = get_cmap("RdYlBu_r")
cmap.set_under("black")
cmap.set_over("black")
localnow = now.astimezone(pytz.timezone("America/Chicago"))
mp = MapPlot(
sector="midwest",
title="%s MOS Temperature Bias " % (model,),
subtitle=("Model Run: %s Forecast Time: %s")
% (
runtime.strftime("%d %b %Y %H %Z"),
localnow.strftime("%d %b %Y %-I %p %Z"),
),
)
mp.contourf(lons, lats, vals, range(-10, 11, 2), units="F", cmap=cmap)
pqstr = "plot ac %s00 %s_mos_T_bias.png %s_mos_T_bias_%s.png png" % (
now.strftime("%Y%m%d%H"),
model.lower(),
model.lower(),
now.strftime("%H"),
)
mp.postprocess(pqstr=pqstr, view=False)
mp.close()
mp = MapPlot(
sector="conus",
title="%s MOS Temperature Bias " % (model,),
subtitle=("Model Run: %s Forecast Time: %s")
% (
runtime.strftime("%d %b %Y %H %Z"),
localnow.strftime("%d %b %Y %-I %p %Z"),
),
)
mp.contourf(lons, lats, vals, range(-10, 11, 2), units="F", cmap=cmap)
pqstr = (
"plot ac %s00 conus_%s_mos_T_bias.png "
"conus_%s_mos_T_bias_%s.png png"
) % (
now.strftime("%Y%m%d%H"),
model.lower(),
model.lower(),
now.strftime("%H"),
)
mp.postprocess(pqstr=pqstr, view=False)
def main(argv):
""" Go main go"""
ts = utc()
model = argv[1]
if len(argv) == 6:
ts = utc(int(argv[1]), int(argv[2]), int(argv[3]), int(argv[4]))
model = sys.argv[5]
ts = ts.replace(minute=0, second=0, microsecond=0)
doit(ts, model)
if __name__ == "__main__":
main(sys.argv)
| {
"repo_name": "akrherz/iem",
"path": "scripts/mos/current_bias.py",
"copies": "1",
"size": "3425",
"license": "mit",
"hash": 6647644696432409000,
"line_mean": 26.1825396825,
"line_max": 76,
"alpha_frac": 0.5258394161,
"autogenerated": false,
"ratio": 3.127853881278539,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9153693297378539,
"avg_score": 0,
"num_lines": 126
} |
"""Analysis of mouse behavior during in vivo calcium imaging"""
import numpy as np
from matplotlib import pyplot as plt
import itertools as it
from scipy.ndimage.filters import gaussian_filter1d
import warnings
from ..classes import exceptions as exc
from .. import plotting
# def infer_expt_pair_condition(expt1, expt2):
# same_belt = expt1.get('belt') == expt2.get('belt')
# same_context = expt1.get('environment') == expt2.get('environment')
# same_rewards = expt1.get('rewardPositions') == expt2.get('rewardPositions')
# if same_belt and same_context and same_rewards:
# return "SameAll"
# if same_belt and same_context and not same_rewards:
# return "SameAll_DiffRewards"
# if same_belt and not same_context and same_rewards:
# return "DiffCtxs"
# if not same_belt and same_context and same_rewards:
# return "DiffBelts"
# if not same_belt and not same_context:
# return 'DiffAll'
# return None
"""
Experiment behavior functions
"""
def calculateRewardedLickIntervals(expt, threshold=1.0, imageSync=False):
"""Separates lick intervals in to rewarded and unrewarded intervals"""
lick_intervals = expt.lickingIntervals(
imageSync=imageSync, sampling_interval=None,
threshold=threshold, returnBoolList=False)
rewards = []
for trial in expt.findall('trial'):
if imageSync:
sampling_interval = expt.frame_period()
else:
sampling_interval = trial.behavior_sampling_interval()
water = trial.behaviorData(imageSync=False)['water'] \
/ float(sampling_interval)
rewards.append(water[:, 0] if water.shape[0] > 0 else np.array([]))
rewarded_intervals = []
unrewarded_intervals = []
for trial_idx, intervals_trial, rewards_trial in it.izip(
it.count(), lick_intervals, rewards):
rewarded_intervals.append([])
unrewarded_intervals.append([])
for interval in intervals_trial:
if np.any((interval[0] <= rewards_trial)
& (interval[1] >= rewards_trial)):
rewarded_intervals[trial_idx].append(interval)
else:
unrewarded_intervals[trial_idx].append(interval)
rewarded_intervals[-1] = np.array(rewarded_intervals[-1]) \
if len(rewarded_intervals[-1]) else np.empty((0, 2))
unrewarded_intervals[-1] = np.array(unrewarded_intervals[-1]) \
if len(unrewarded_intervals[-1]) else np.empty((0, 2))
return np.array(rewarded_intervals), np.array(unrewarded_intervals)
"""
Trial behavior functions
"""
def runningIntervals(
trial, imageSync=True, stationary_tolerance=2.0,
returnBoolList=False, direction='both', min_duration=0,
min_mean_speed=0, min_peak_speed=0, end_padding=0,
preceding_still_time=0):
"""Return running interval start and stop times (Nx2 array).
Parameters
----------
imageSync : bool
If True, returns data synced to image frames.
stationary_tolerance : float
Amount of time (s) where mouse is still that is allowed before starting
a new interval.
returnBoolList : bool
If True, converts intervals to boolean array that is True when there is
running.
direction : {'both', forward', 'backwards'}
Determines direction of running to include.
min_duration : float, optional
Minimum duration (in seconds) of a valid running interval.
min_mean_speed : float, optional
Minimum mean speed of a valid running interval.
min_peak_speed : float, optional
Minimum peak speed of a valid running interval.
end_padding : float, optional
Time (s) to add on to the end of every running interval.
Examples
--------
If running=[0 1 1 0 0 0 0 1 1 1 0] and stationary_tolerance=2,
returns [[1, 2], [7, 9]].
Using a smaller stationary_tolerance results in more granular intervals,
while a larger stationary_tolerance leads to longer running intervals
Note
----
The last frame of each interval does not actually contain running activity
They are formatted for easy array slicing
See Also
--------
lickingIntervals
"""
behaviorData = trial.behaviorData(imageSync=imageSync)
if imageSync:
period = trial.parent.frame_period()
else:
period = trial.behavior_sampling_interval()
if 'treadmillPosition' not in behaviorData:
warnings.warn(
'Quadrature data not available. Analyzing both directions of' +
' motion')
direction = 'both'
vel = None
else:
if imageSync:
vel = velocity(trial, imageSync=True, sampling_interval=None)
else:
vel = velocity(trial, imageSync=False, sampling_interval='actual')
if direction == 'both':
if vel is not None:
running_inds = np.where(vel != 0)[0]
else:
if imageSync:
running_times = []
for key in ['treadmillTimes', 'treadmillTimes2']:
try:
running_times.append(
np.where(behaviorData[key] != 0)[0])
except KeyError:
pass
if len(running_times):
running_inds = np.hstack(running_times)
running_inds.sort()
else:
running_inds = np.array([])
else:
running_times = []
for key in ['treadmillTimes', 'treadmillTimes2']:
try:
treadmill_times = behaviorData[key] / period
except KeyError:
pass
else:
if len(treadmill_times):
running_times.append(treadmill_times)
if len(running_times):
running_inds = np.hstack(running_times)
running_inds.sort()
else:
running_inds = np.array([])
elif direction == 'forward':
running_inds = np.where(vel > 0)[0]
elif direction == 'backwards':
running_inds = np.where(vel < 0)[0]
else:
raise ValueError(
"Invalid direction, must be one of 'forward', 'backwards', " +
"or 'both'")
if running_inds.size:
running_inds = running_inds.astype('uint32')
# find end indices where the gap between running frames is above
# stationary_tolerance
ends = np.where(
np.diff(running_inds) > stationary_tolerance / period + 1)[0]
ends = ends.astype('uint32')
starts = ends + 1
# The first one is always a start, and the last is always an end
ends = np.hstack([ends, running_inds.size - 1])
starts = np.hstack([0, starts])
good_inds = np.ones(len(starts), 'bool')
# Check for various interval validity criteria
if min_mean_speed or min_peak_speed or min_duration or \
preceding_still_time:
for idx, (start, end) in enumerate(zip(starts, ends)):
if idx == 0:
previous_run_frame = 0
else:
previous_run_frame = running_inds[ends[idx - 1]]
if (running_inds[start] - previous_run_frame) * period < \
preceding_still_time:
good_inds[idx] = False
if (running_inds[end] - running_inds[start] + 1) * period < \
min_duration:
good_inds[idx] = False
if vel is not None:
if np.mean(np.abs(
vel[running_inds[start]:running_inds[end] + 1])) \
< min_mean_speed or \
np.amax(np.abs(
vel[running_inds[start]:running_inds[end] + 1])) \
< min_peak_speed:
good_inds[idx] = False
else:
warnings.warn(
'Unable to determine velocity, ignoring speed criteria')
result = np.array(
[running_inds[starts[good_inds]], running_inds[ends[good_inds]]]).T
# pad frames to the ends of the running intervals
if end_padding and len(result) > 0:
if imageSync:
result[:, 1] += int(end_padding / period)
result = result[
result[:, 1] < behaviorData['treadmillTimes'].shape[0]]
else:
result[:, 1] += int(end_padding)
result = result[result[:, 1] <= behaviorData[
'recordingDuration']]
padded_result = []
interval_idx = 0
while interval_idx < result.shape[0]:
if interval_idx != result.shape[0] - 1 and result[
interval_idx, 1] >= result[interval_idx + 1, 0]:
padded_result.append(
[result[interval_idx, 0], result[interval_idx + 1, 1]])
interval_idx += 2
else:
padded_result.append(
[result[interval_idx, 0], result[interval_idx, 1]])
interval_idx += 1
result = np.array(padded_result)
else:
result = np.zeros([0, 2], 'uint32')
if returnBoolList:
if imageSync:
try:
boolList = np.zeros(
behaviorData['treadmillPosition'].shape[0], dtype='bool')
except KeyError:
boolList = np.zeros(
behaviorData['treadmillTimes'].shape[0], dtype='bool')
else:
boolList = np.zeros(
int(behaviorData['recordingDuration'] /
behaviorData['samplingInterval']), dtype='bool')
for interval in result:
boolList[interval[0]:interval[1]] = True
return boolList
return result
def lickingIntervals(trial, imageSync=False, sampling_interval=None,
threshold=2.0, returnBoolList=False):
"""Return licking interval start and stop frames (Nx2 array).
Parameters
----------
imageSync : bool
If True, synchronizes the output structure to imaging frames
sampling_interval : float, optional
In place of `imageSync`, you can set a particular rate to re-sample the
data at.
If left None and `imageSync=False`, defaults to sampling rate of the
behavior data.
threshold : float, optional
Combines intervals separated by less than `threshold` (in seconds)
returnBoolList : bool
If True, returns data as a boolean mask of in/out of licking intervals.
If False, returns interval start/stop frames.
Examples
--------
If sampling_interval=0.5, licking=[0 1 1 0 0 0 0 1 1 1 0] and
threshold=1 frame, this returns [[1, 2], [7, 9]].
Using a smaller threshold results in more granular intervals, while a
larger threshold leads to longer licking intervals
Note
----
The last frame of each interval does not actually contain licking activity.
They are formatted for easy array slicing.
See Also
--------
runningIntervals
"""
if imageSync:
behaviorData = trial.behaviorData(imageSync=True)
sampling_interval = trial.parent.frame_period()
else:
if sampling_interval is None:
# Default to behavior data sampling rate
sampling_interval = trial.behavior_sampling_interval()
behaviorData = trial.behaviorData(
imageSync=False, sampling_interval=sampling_interval)
lickingFrames = behaviorData['licking']
licking_inds = np.where(lickingFrames != 0)[0]
if licking_inds.size:
licking_inds = licking_inds.astype('uint32')
# end indices where the gap between licking frames is above threshold
ends = np.where(np.diff(
licking_inds) > threshold / float(sampling_interval))[0]
ends = ends.astype('uint32')
starts = ends + 1
# The first one is always a start, and the last is always an end
ends = np.hstack((ends, licking_inds.size - 1))
starts = np.hstack((0, starts))
result = np.array([licking_inds[starts], licking_inds[ends] + 1]).T
else:
result = np.zeros([0, 2])
if returnBoolList:
boolList = np.zeros(len(lickingFrames), dtype='bool')
for interval in result:
boolList[interval[0]:interval[1] + 1] = True
return boolList
else:
return result
def lickCount(trial, startTime=0, endTime=-1, duration_threshold=0.1):
"""Count licks in a given interval if entire lick falls
within interval
"""
behavior_data = trial.behaviorData()
if 'licking' not in behavior_data:
print "No licking data"
return -1
if endTime == -1:
endTime = behavior_data['recordingDuration']
licks = [lick for lick in behavior_data['licking']
if lick[0] >= startTime and lick[1] <= endTime]
licks = [lick[0] for lick in licks if
(lick[1] - lick[0]) <= duration_threshold]
return len(licks)
def absolutePosition(trial, imageSync=True, sampling_interval=None):
"""Returns the normalized absolute position of the mouse at each imaging time frame
Keyword arguments:
imageSync -- if True, syncs to imaging data
absolutePosition % 1 = behaviorData()['treadmillPosition']
"""
assert not (imageSync and sampling_interval is not None)
if not imageSync and sampling_interval is None:
raise(Exception,
"Should be either image sync'd or at an explicit sampling " +
"interval, defaulting to 'actual' sampling interval")
sampling_interval = 'actual'
bd = trial.behaviorData(
imageSync=imageSync, sampling_interval=sampling_interval)
try:
position = bd['treadmillPosition']
except KeyError:
raise exc.MissingBehaviorData(
'No treadmillPosition, unable to calculate absolute position')
# if not imageSync:
# full_position = np.empty(
# int(bd['recordingDuration'] / bd['samplingInterval']))
# for tt, pos in position:
# full_position[int(tt / bd['samplingInterval']):] = pos
# position = full_position
lap_starts = np.where(np.diff(position) < -0.5)[0]
lap_back = np.where(np.diff(position) > 0.5)[0].tolist()
lap_back.reverse()
# Need to check for backwards steps around the lap start point
if len(lap_back) > 0:
next_back = lap_back.pop()
else:
next_back = np.inf
for start in lap_starts:
if next_back < start:
position[next_back + 1:] -= 1
position[start + 1:] += 1
if len(lap_back) > 0:
next_back = lap_back.pop()
else:
next_back = np.inf
else:
position[start + 1:] += 1
return position
def velocity(trial, imageSync=True, sampling_interval=None, belt_length=200,
smoothing=None, window_length=5, tick_count=None):
"""Return the velocity of the mouse.
Parameters
----------
imageSync : bool
If True, syncs to imaging data.
belt_length : float
Length of belt, will return velocity in units/second.
smoothing {None, str}
Window function to use, should be 'flat' for a moving average or
np.'smoothing' (hamming, hanning, bartltett, etc.).
window_length int
Length of window function, should probably be odd.
tick_count : float
if not None velocity is calculated based on the treadmill
times by counting ticks and dividing by the tick_count. i.e.
tick _count should be in ticks/m (or ticks/cm) to get m/s (cm/s)
returned.
"""
assert not (imageSync and sampling_interval is not None)
if not imageSync and sampling_interval is None:
warnings.warn(
"Should be either image sync'd or at an explicit sampling " +
"interval, defaulting to 'actual' sampling interval")
sampling_interval = 'actual'
try:
b = trial.parent.belt().length()
assert b > 0
belt_length = b
except (exc.NoBeltInfo, AssertionError):
warnings.warn('No belt information found for experiment %s. \nUsing default belt length = %f' % (str(trial.parent), belt_length))
if tick_count is not None:
bd = trial.behaviorData(imageSync=imageSync)
times = bd['treadmillTimes']
duration = bd['recordingDuration']
if imageSync:
times = np.where(times != 0)[0] * trial.parent.frame_period()
bincounts = np.bincount(
times.astype(int), minlength=duration)[:duration]
bincounts = bincounts.astype(float) / tick_count
interpFunc = scipy.interpolate.interp1d(
range(len(bincounts)), bincounts)
xnew = np.linspace(
0, len(bincounts) - 1, len(bd['treadmillTimes']))
return interpFunc(xnew)
else:
bincounts = np.bincount(
times.astype(int), minlength=duration)[:duration]
bincounts = bincounts.astype(float) / tick_count
return bincounts
try:
position = absolutePosition(
trial, imageSync=imageSync, sampling_interval=sampling_interval)
except exc.MissingBehaviorData:
raise exc.MissingBehaviorData(
'Unable to calculate position based velocity')
if imageSync:
samp_int = trial.parent.frame_period()
elif sampling_interval == 'actual':
samp_int = trial.behavior_sampling_interval()
else:
samp_int = sampling_interval
vel = np.hstack([0, np.diff(position)]) * belt_length / samp_int
if smoothing is not None and np.any(vel != 0):
if smoothing == 'flat': # moving average
w = np.ones(window_length, 'd')
else:
# If 'smoothing' is not a valid method this will throw an AttributeError
w = eval('np.' + smoothing + '(window_length)')
s = np.r_[vel[window_length - 1:0:-1], vel, vel[-1:-window_length:-1]]
vel = np.convolve(w / w.sum(), s, mode='valid')
# Trim away extra frames
vel = vel[window_length / 2 - 1:-window_length / 2]
return vel
"""
ExperimentGroup functions
"""
def averageBehavior(exptGrp, ax=None, key='velocity', sampling_interval=1,
smooth_length=None, trim_length=None):
"""Plots the average behavior data over the course of the experiment
For example, average lick rate over time since experiment start
Keyword arguments:
key -- behavior data to plot
sampling_interval -- sampling interval of final output (in seconds)
smooth_length -- smoothing window length in seconds
trim_length -- length to trim the final average to (in seconds)
"""
data_sum = np.array([])
data_count = np.array([])
for expt in exptGrp:
for trial in expt.findall('trial'):
try:
bd = trial.behaviorData(imageSync=False)
bd['samplingInterval']
bd['recordingDuration']
except (exc.MissingBehaviorData, KeyError):
continue
if bd['samplingInterval'] > sampling_interval:
warnings.warn(
"{}_{}: Sampling interval too low, skipping experiment.".format(
expt.parent.get('mouseID'), expt.get('startTime')))
continue
if key == 'velocity':
try:
vel = velocity(
trial, imageSync=False, sampling_interval='actual',
smoothing=None)
except exc.MissingBehaviorData:
warnings.warn(
"{}_{}: Unable to determine velocity, skipping experiment.".format(
expt.parent.get('mouseID'), expt.get('startTime')))
continue
else:
data = np.zeros(
int(bd['recordingDuration'] / sampling_interval))
starts, step = np.linspace(0, len(vel), num=len(data),
retstep=True)
for idx, start in enumerate(starts):
data[idx] = np.mean(
vel[int(start):int(np.ceil(start + step))])
intervals = None
elif key == 'running':
intervals = runningIntervals(trial, imageSync=False) * \
bd['samplingInterval']
else:
try:
intervals = bd[key]
except KeyError:
continue
if intervals is not None:
data = np.zeros(
int(bd['recordingDuration'] / sampling_interval))
for start, stop in intervals:
if np.isnan(start):
start = 0
if np.isnan(stop):
stop = bd['recordingDuration']
data[int(start / sampling_interval):
int(np.ceil(stop / sampling_interval))] = 1
if len(data) > len(data_sum):
data_sum = np.hstack(
[data_sum, np.zeros(len(data) - len(data_sum))])
data_count = np.hstack(
[data_count, np.zeros(len(data) - len(data_count))])
data_sum[:len(data)] += data
data_count[:len(data)] += 1
if np.sum(data_count) == 0:
return None
final_average = data_sum / data_count
if trim_length is not None:
final_average = final_average[:(trim_length / sampling_interval)]
if smooth_length is not None:
smooth = int(smooth_length / sampling_interval)
final_average = gaussian_filter1d(
final_average, smooth if smooth % 2 == 0 else smooth + 1)
if ax is not None:
ax.plot(
np.linspace(0, len(final_average) * float(sampling_interval),
len(final_average)),
final_average, label=exptGrp.label())
ax.set_xlabel('Time (s)')
if key == 'velocity':
ax.set_ylabel('Average velocity')
else:
ax.set_ylabel('Average activity (% of trials)')
ax.set_title('{} averaged across trials'.format(key))
return final_average
def getBehaviorTraces(
exptGrp, stimulus_key, data_key, pre_time=5, post_time=5,
sampling_interval=0.01, imageSync=False, use_rebinning=False,
deduplicate=False):
"""Grab behavior data traces triggered by a stimulus.
Parameters
----------
exptGrp : lab.ExperimentGroup
contains the group of experiments to get the triggered traces.
stimulus_key : {str, dict}
stimulus for triggering the traces. If a string, it should be a key in
behaviorData. If a dict, then the keys should be experiments. For each
experiment key the values should be a list of start times.
data_key : str
the behavior data type, e.g., velocity.
pre_time, post_time : float
time (in seconds) before and after stimuli
sampling_interval : float
sampling interval to convert all behavior data to
imageSync : bool
if True, sync to the imaging interval, defaults to False.
use_rebinning : bool
if True, use average of points to resample, and not interpolation.
Most useful for downsampling binary signals such as licking. Defaults
to False.
deduplicate : bool
if True, then triggers within the pre_time+post_time window of the
first trigger is ignored. Defaults to False.
Returns
-------
pandas.DataFrame
Each row is a behavior trace from a single triggered event.
Notes
-----
Returned dataframe has columns:
expt
The experiment that this trace belongs to.
stimulus
The stimulus type that triggered the trace.
dataKey
The type of behavior trace that is stored, e.g., velocity.
stimStart
The frame # that triggered the trace.
data
The actual behavior trace.
time
The time values corresponding to the behavior trace, with the
triggered event centered at 0.
lapNum
The lap # that the triggered event occured.
"""
df = dict()
df["expt"] = []
df["stimulus"] = []
df["dataKey"] = []
df["stimStart"] = []
df["data"] = []
df["time"] = []
df["lapNum"] = []
for expt in exptGrp:
data_raw = []
data_lapNum = []
if isinstance(stimulus_key, dict):
try:
starts = stimulus_key[expt]
except KeyError:
continue
else:
try:
starts = stimStarts(expt, stimulus_key, imageSync=imageSync,
deduplicate=deduplicate,
duplicate_window=pre_time + post_time)
except (exc.MissingBehaviorData, KeyError):
continue
if imageSync:
sampling_interval = expt.frame_period()
stim_starts = starts
else:
stim_starts = [np.around(trial / sampling_interval).astype(int)
for trial in starts]
pre_frames = int(pre_time / sampling_interval)
post_frames = int(post_time / sampling_interval)
if data_key == 'velocity':
if imageSync:
behaviorData = [velocity(
trial, imageSync=imageSync)
for trial in expt.findall('trial')]
else:
behaviorData = [velocity(
trial, imageSync=imageSync, sampling_interval=sampling_interval)
for trial in expt.findall('trial')]
else:
try:
if imageSync:
behaviorData = [trial.behaviorData(
imageSync=imageSync,
use_rebinning=use_rebinning)[data_key]
for trial in expt.findall('trial')]
else:
behaviorData = [trial.behaviorData(
imageSync=imageSync, sampling_interval=sampling_interval,
use_rebinning=use_rebinning)[data_key]
for trial in expt.findall('trial')]
except KeyError:
continue
try:
if imageSync:
lapNums = [np.array(absolutePosition(
trial, imageSync=imageSync)).astype("int32")
for trial in expt.findall("trial")]
else:
lapNums = [np.array(absolutePosition(
trial, imageSync=imageSync, sampling_interval=sampling_interval)).astype("int32")
for trial in expt.findall("trial")]
except exc.MissingBehaviorData:
lapNums = [None for trial in expt.findall('trial')]
for stimFrames, data, lapNum in it.izip(stim_starts, behaviorData, lapNums):
for stim in stimFrames:
if np.isnan(stim):
stim = 0
# Check for running off the ends
if stim - pre_frames >= 0:
data_start = 0
start_frame = stim - pre_frames
else:
data_start = pre_frames - stim
start_frame = 0
if stim + post_frames < len(data):
data_end = pre_frames + post_frames + 1
stop_frame = stim + post_frames + 1
else:
data_end = len(data) - stim - post_frames - 1
stop_frame = len(data)
dataRow = np.empty(pre_frames + post_frames + 1)
dataRow.fill(np.nan)
dataRow[data_start:data_end] = data[start_frame:stop_frame]
data_raw.append(dataRow)
if lapNum is not None:
if(stim >= lapNum.size):
stim = lapNum.size - 1
data_lapNum.append(lapNum[stim])
else:
data_lapNum.append(np.nan)
numTraces = len(data_raw)
df["expt"].extend([expt] * numTraces)
df["stimulus"].extend([stimulus_key] * numTraces)
df["dataKey"].extend([data_key] * numTraces)
df["stimStart"].extend([val for sublist in stim_starts for val in sublist])
df["data"].extend(data_raw)
df["time"].extend([np.r_[-(pre_frames * sampling_interval):
((post_frames + 1) * sampling_interval):sampling_interval]] * numTraces)
df["lapNum"].extend(data_lapNum)
return pd.DataFrame(df)
def behaviorPSTH(exptGrp, stimuli_key, data_key, pre_time=5,
post_time=10, sampling_interval=0.01, smoothing=None,
window_length=1):
"""calculates a PSTH of behavior data versus a stimuli
Parameters
----------
exptGrp: lab.ExperimentGroup
contains the group of experiments to calculate the PSTH on
stimuli_key: str
stimulus to trigger the PSTH, should be a key in behaviorData or
'running' which will be the start of running intervals
data_key: str
behaviorData key used to generate the histogram
pre_time, post_time: float
time (in seconds) before and after the stimuli
sampling_interval: float
sampling interval to convert all behavior data to
smoothing: func
window function to use, should be 'flat' for a moving average
or np.'smoothing' (hamming, hanning, bartltett, etc.)
window_length: float
length of smoothing window function in seconds
Returns
-------
numpy.ndarray
the PSTH of the behavior data triggered on the stimulus for the exptGrp
"""
pre_frames = int(pre_time / sampling_interval)
post_frames = int(post_time / sampling_interval)
window_length = int(window_length / sampling_interval)
if window_length % 2 == 0:
window_length += 1
data_sum = np.zeros(pre_frames + post_frames + 1)
data_count = np.zeros(data_sum.shape)
for expt in exptGrp:
try:
starts = stimStarts(expt, stimuli_key, imageSync=False)
except (exc.MissingBehaviorData, KeyError):
continue
stim_starts = [np.around(trial / sampling_interval).astype(int) for
trial in starts]
if data_key == 'velocity':
behaviorData = [velocity(
trial, imageSync=False, sampling_interval=sampling_interval)
for trial in expt.findall('trial')]
else:
try:
behaviorData = [trial.behaviorData(
sampling_interval=sampling_interval)[data_key]
for trial in expt.findall('trial')]
except KeyError:
continue
for stimFrames, data in it.izip(stim_starts, behaviorData):
for stim in stimFrames:
if np.isnan(stim):
stim = 0
# Check for running off the ends
if stim - pre_frames >= 0:
data_start = 0
start_frame = stim - pre_frames
else:
data_start = pre_frames - stim
start_frame = 0
if stim + post_frames < len(data):
data_end = len(data_sum)
stop_frame = stim + post_frames + 1
else:
data_end = len(data) - stim - post_frames - 1
stop_frame = len(data)
data_sum[data_start:data_end] += data[start_frame:stop_frame]
data_count[data_start:data_end] += 1
result = data_sum / data_count
if smoothing is not None:
if smoothing == 'flat': # moving average
w = np.ones(window_length, 'd')
else:
# If 'smoothing' is not a valid method, will throw AttributeError
w = eval('np.' + smoothing + '(window_length)')
s = np.r_[result[window_length - 1:0:-1], result,
result[-1:-window_length:-1]]
tmp = np.convolve(w / w.sum(), s, mode='valid')
# Trim away extra frames
result = tmp[window_length / 2 - 1:-window_length / 2]
return result
def plotBehaviorPSTH(exptGrp, stimulus_key, data_key, ax,
pre_time=5, post_time=10, color="b", **kwargs):
"""caltulates and plots a PSTH of behavior data vs a stimuli.
Parameters
----------
exptGrp: lab.ExperimentGroup
contains the group of experiments to plot the PSTH on
stimulus_key: str
stimulus to trigger the PSTH, should be a key in behaviorData or
'running' which will be the start of running intervals
data_key: str
stimulus to trigger the PSTH, should be a key in behaviorData or
'running' which will be the start of running intervals
ax: matplotlib.axes
the PSTH will be plotted on the axes instance.
pre_time, post_time: float
time (in seconds) before and after the stimulus
color:
the color of the PSTH lines, use any convention accepted by matplotlib
**kwargs: dict
see BehaviorPSTH for other keyword arguments
Returns
-------
None
"""
result = behaviorPSTH(exptGrp, stimulus_key, data_key,
pre_time=pre_time, post_time=post_time, **kwargs)
xAxis = np.linspace(-pre_time, post_time, len(result))
ax.plot(xAxis, result, label=exptGrp.label(), color=color)
ax.axvline(0, 0, 1, linestyle='dashed', color='k')
ax.set_xlim((-pre_time, post_time))
ax.set_xlabel('Time (s)')
ax.set_ylabel('Mean data')
ax.set_title('{} triggered {} PSTH'.format(stimulus_key, data_key))
def positionOccupancy(exptGrp, ax=None, nBins=100, normed=True, showBelt=True,
running_only=False, running_kwargs=None,
**plot_kwargs):
"""Calculate and plot the time spent at each position on the belt"""
if not exptGrp.sameBelt():
warnings.warn('Not all experiments recorded on same belt')
binSize = 1.0 / nBins
binStarts = np.arange(0, 1.0, binSize)
# framePeriod will be equal to the slowest behavior data sampling
# interval of all the trials
framePeriod = 0
for expt in exptGrp:
for trial in expt.findall('trial'):
framePeriod = np.amax(
[framePeriod, trial.behavior_sampling_interval()])
occupancy = np.zeros(nBins)
for expt in exptGrp:
for trial in expt.findall('trial'):
treadmillPosition = trial.behaviorData(
imageSync=False,
sampling_interval=framePeriod)['treadmillPosition']
if running_only:
if running_kwargs:
running_intervals = runningIntervals(
trial, returnBoolList=True,
imageSync=False, **running_kwargs)
else:
running_intervals = runningIntervals(
trial, returnBoolList=True,
imageSync=False)
treadmillPosition[~running_intervals] = -1
for bin_ind, bin_start in enumerate(binStarts):
bins = np.logical_and(
treadmillPosition >= bin_start,
treadmillPosition < bin_start + binSize)
occupancy[bin_ind] += np.sum(bins) * framePeriod
if normed:
occupancy /= np.sum(occupancy)
if ax:
ax.plot(binStarts, occupancy, **plot_kwargs)
ax.set_xlim(0, 1)
ax.set_xlabel('Position')
if normed:
ax.set_ylabel('Time (percent)')
else:
ax.set_ylabel('Time (s)')
if showBelt and exptGrp.sameBelt():
exptGrp[0].belt().addToAxis(ax)
ax.legend(frameon=False, loc='best')
return occupancy
"""
ExperimentGroup compare functions
"""
def compareLickRate(exptGrps, ax=None):
if ax is None:
ax = plt.axes()
bar_labels = [exptGrp.label() if exptGrp.label() is not None else
'Group {}'.format(idx + 1)
for idx, exptGrp in enumerate(exptGrps)]
lickRates = []
for exptGrp in exptGrps:
lickRates.append(
{mouseID: [] for mouseID in set([expt.parent.get('mouseID')
for expt in exptGrp])})
for expt in exptGrp:
for trial in expt.findall('trial'):
bd = trial.behaviorData()
duration = bd['recordingDuration']
try:
lick_rate = lickCount(trial) / duration
except KeyError:
pass
else:
# If lick rate is exactly 0, assume recording did not work
if lick_rate > 0:
lickRates[-1][expt.parent.get('mouseID')].append(
lick_rate)
values = [
[exptGrp[mouseID] for mouseID in exptGrp] for exptGrp in lickRates]
group_labels = [[mouseID for mouseID in exptGrp] for exptGrp in lickRates]
plotting.scatter_1d(ax, values=values, group_labels=group_labels,
bar_labels=bar_labels)
ax.set_title('Lick rate compare')
ax.set_ylabel('Lick rate (Hz)')
def compareLapRate(exptGrps, ax=None):
if ax is None:
ax = plt.axes()
bar_labels = [exptGrp.label() if exptGrp.label() is not None
else 'Group {}'.format(idx + 1)
for idx, exptGrp in enumerate(exptGrps)]
lapRates = []
for exptGrp in exptGrps:
lapRates.append({mouseID: [] for mouseID in
set([expt.parent.get('mouseID') for expt in exptGrp])})
for expt in exptGrp:
for trial in expt.findall('trial'):
bd = trial.behaviorData()
duration = bd['recordingDuration']
try:
lap_rate = np.sum(bd['lapCounter'][:, 1] == 1) \
/ duration * 60
except KeyError:
pass
else:
lapRates[-1][expt.parent.get('mouseID')].append(lap_rate)
values = [
[exptGrp[mouseID] for mouseID in exptGrp] for exptGrp in lapRates]
group_labels = [[mouseID for mouseID in exptGrp] for exptGrp in lapRates]
plotting.scatter_1d(ax, values=values, group_labels=group_labels,
bar_labels=bar_labels)
ax.set_title('Lap rate compare')
ax.set_ylabel('Lap rate (laps/minute)')
def compareBehaviorPSTH(exptGrps, stimuli_key, data_key, ax, pre_time=4,
post_time=10, **kwargs):
"""Compare behavior PSTHs. See exptGrp.behaviorPSTH for details.
Parameters
----------
exptGrps: iterable
contains an iterable of lab.ExperimentGroup instances. Plot the
BehaviorPSTH for each ExperimentGroup instance on the same axes.
stimuli_key: str
stimulus to trigger the PSTH, should be a key in behaviorData or
'running' which will be the start of running intervals
data_key: str
stimulus to trigger the PSTH, should be a key in behaviorData or
'running' which will be the start of running intervals
ax: matplotlib.axes
the PSTHs will be plotted on the axes instance.
pre_time, post_time: float
time (in seconds) before and after the stimulus
**kwargs: dict
see BehaviorPSTH for other keyword arguments
Returns
-------
None
"""
for exptGrp in exptGrps:
result = behaviorPSTH(
exptGrp, stimuli_key, data_key, pre_time=pre_time,
post_time=post_time, **kwargs)
xAxis = np.linspace(-pre_time, post_time, len(result))
ax.plot(xAxis, result, label=exptGrp.label())
ylim = ax.get_ylim()
ax.vlines(0, 0, 1, linestyles='dashed', color='k')
ax.set_ylim(ylim)
ax.set_xlim((-pre_time, post_time))
ax.legend()
ax.set_xlabel('Time (s)')
ax.set_ylabel('Mean data')
ax.set_title('{} triggered {} PSTH'.format(stimuli_key, data_key))
def stimStarts(expt, stimulus, exclude_paired_from_single=True,
imageSync=True, deduplicate=False, duplicate_window=None):
"""Return stimulus start times, formatted for psth().
Returns list of np.arrrays, one item per trial and then an array of
start frames if imageSync is True and start times if False
If the stim is present at the start of the trial, does NOT return the first
frame.
"""
POST_STIM_DELAY = 10.0
REWARDED_RUN_INTERVAL = 3.0
if stimulus == 'air':
stimulus = 'airpuff'
if stimulus == 'all':
stimuli = expt.stimuli()
all_starts = stimStarts(expt, stimuli[0], imageSync=imageSync)
for stim in stimuli[1:]:
starts = stimStarts(expt, stim, imageSync=imageSync)
for trial_idx, trial_starts in enumerate(starts):
all_starts[trial_idx] = np.sort(np.unique(np.hstack(
(all_starts[trial_idx], trial_starts))))
return all_starts
if stimulus == 'running' or stimulus == 'running_start':
starts = [interval[:, 0] * trial.behavior_sampling_interval()
for interval, trial in zip(
expt.runningIntervals(imageSync=False),
expt.findall('trial'))]
elif stimulus in ['running_start_5', 'running_start_5_5']:
starts = [interval[:, 0] * trial.behavior_sampling_interval()
for interval, trial in zip(
expt.runningIntervals(
imageSync=False, min_duration=5,
preceding_still_time=5 if '5_5' in stimulus else 0),
expt.findall('trial'))]
elif stimulus in [
'running_stop_5', 'running_stop_5_off', 'running_stop_5_5',
'running_stop_5_5_off']:
starts = [interval[:, 1] * trial.behavior_sampling_interval()
for interval, trial in zip(
expt.runningIntervals(
imageSync=False, min_duration=5,
preceding_still_time=5 if '5_5' in stimulus else 0),
expt.findall('trial'))]
elif 'running_stop' in stimulus:
starts = [interval[:, 1] * trial.behavior_sampling_interval()
for interval, trial in zip(
expt.runningIntervals(imageSync=False),
expt.findall('trial'))]
if 'rewarded' in stimulus:
water = [trial.behaviorData(imageSync=False)['water']
for trial in expt.findall('trial')]
result = []
for water_trial, run_trial in it.izip(water, starts):
frame_diff = np.abs(run_trial.reshape((-1, 1)) -
water_trial.reshape((1, -1)))
rewarded = np.any(
frame_diff < REWARDED_RUN_INTERVAL, axis=1)
if 'unrewarded' in stimulus:
result.append(run_trial[~rewarded])
else:
result.append(run_trial[rewarded])
starts = result
elif stimulus == 'running_no_stim':
starts = []
stim_time = expt.stimulusTime()
for trial_running, trial in zip(
expt.runningIntervals(imageSync=False),
expt.findall('trial')):
trial_interval = trial.behavior_sampling_interval()
starts.append(np.array(
[interval[0] * trial_interval
for interval in trial_running
if interval[1] * trial_interval < stim_time or
interval[0] * trial_interval > stim_time +
POST_STIM_DELAY]))
elif stimulus == 'running_stim':
starts = []
stim_time = expt.stimulusTime()
for trial_running, trial in zip(
expt.runningIntervals(imageSync=False),
expt.findall('trial')):
trial_interval = trial.behavior_sampling_interval()
starts.append(np.array(
[interval[0] * trial_interval
for interval in trial_running
if interval[0] * trial_interval >= stim_time and
interval[1] * trial_interval < stim_time +
POST_STIM_DELAY]))
elif stimulus == 'running_stim_no_pair':
starts = []
stim_time = expt.stimulusTime()
for trial_running, trial in zip(
expt.runningIntervals(imageSync=False),
expt.findall('trial')):
if 'Paired' in trial.get('stimulus', ''):
continue
trial_interval = trial.behavior_sampling_interval()
starts.append(np.array(
[interval[0] * trial_interval
for interval in trial_running
if interval[0] * trial_interval >= stim_time and
interval[1] * trial_interval < stim_time +
POST_STIM_DELAY]))
elif 'running_stim_' in stimulus:
stim = stimulus[13:]
starts = []
stim_time = expt.stimulusTime()
for trial_running, trial in zip(
expt.runningIntervals(imageSync=False),
expt.findall('trial')):
if trial.get('stimulus', '') != stim:
continue
trial_interval = trial.behavior_sampling_interval()
starts.append(np.array(
[interval[0] * trial_interval
for interval in trial_running
if interval[0] * trial_interval >= stim_time and
interval[1] * trial_interval < stim_time +
POST_STIM_DELAY]))
elif stimulus == 'licking':
starts = [interval[:, 0] * trial.behavior_sampling_interval()
for interval, trial in zip(
expt.lickingIntervals(imageSync=False),
expt.findall('trial'))]
elif stimulus == 'licking_no_stim':
starts = []
stim_time = expt.stimulusTime()
for trial_running, trial in zip(
expt.lickingIntervals(imageSync=False),
expt.findall('trial')):
trial_interval = trial.behavior_sampling_interval()
starts.append(np.array(
[interval[0] * trial_interval
for interval in trial_running
if interval[1] * trial_interval < stim_time or
interval[0] * trial_interval > stim_time +
POST_STIM_DELAY]))
elif stimulus == 'licking_stim':
starts = []
stim_time = expt.stimulusTime()
for trial_running, trial in zip(
expt.lickingIntervals(imageSync=False),
expt.findall('trial')):
trial_interval = trial.behavior_sampling_interval()
starts.append(np.array(
[interval[0] * trial_interval
for interval in trial_running
if interval[0] * trial_interval >= stim_time and
interval[0] * trial_interval < stim_time +
POST_STIM_DELAY]))
elif 'licking_stop' in stimulus:
starts = [interval[:, 1] * trial.behavior_sampling_interval()
for interval, trial in zip(
expt.lickingIntervals(imageSync=False),
expt.findall('trial'))]
elif 'licking_reward' in stimulus:
rewarded_intervals, unrewarded_intervals = \
calculateRewardedLickIntervals(expt, imageSync=False)
starts = []
# Loop over trials
for trial, trial_intervals in it.izip(
expt.findall('trial'), rewarded_intervals):
sampling_interval = \
trial.behavior_sampling_interval()
if len(trial_intervals):
intervals = trial_intervals[:, 0] * sampling_interval
else:
intervals = trial_intervals
starts.append(intervals)
elif 'licking_no_reward' in stimulus:
rewarded_intervals, unrewarded_intervals = \
calculateRewardedLickIntervals(expt, imageSync=False)
starts = []
# Loop over trials
for trial, trial_intervals in it.izip(
expt.findall('trial'), unrewarded_intervals):
sampling_interval = \
trial.behavior_sampling_interval()
if len(trial_intervals):
intervals = trial_intervals[:, 0] * sampling_interval
else:
intervals = trial_intervals
starts.append(intervals)
elif 'Paired' in stimulus:
starts = []
stims = stimulus.split()[1:]
for trial in expt.findall('trial'):
trial_stim_times = []
if trial.get('stimulus', '') == stimulus:
try:
bd = trial.behaviorData(imageSync=False)
except exc.MissingBehaviorData:
starts.append(np.array([]))
continue
for stim in stims:
if stim == 'air':
stim = 'airpuff'
if bd[stim].shape[1] > 0:
if len(trial_stim_times):
trial_stim_times = np.intersect1d(
trial_stim_times, bd[stim][:, 0])
else:
trial_stim_times = bd[stim][:, 0]
starts.append(trial_stim_times)
else:
starts.append(np.array([]))
elif 'position_' in stimulus or stimulus == 'reward':
# Finds the first frame where the mouse passed the goal position
# each lap.
# Running backwards and then forwards again will not trigger
# multiple positions, so the max number of starts is the number of
# laps (actually 1 more than the number of completed laps)
if stimulus == 'reward':
rewards = expt.rewardPositions(units=None)
assert len(rewards) == 1 # Only works for a single reward for now
goal = rewards[0]
else:
goal = int(stimulus[9:])
starts = []
for trial in expt.findall('trial'):
trial_starts = []
position = absolutePosition(
trial, imageSync=False, sampling_interval='actual')
trial_goal = goal / trial.behaviorData()['trackLength']
if position[0] > trial_goal:
position -= 1
while True:
position[position < 0] = np.nan
pos_bins = np.where(position >= trial_goal)[0]
if not len(pos_bins):
break
trial_starts.append(pos_bins[0])
position -= 1
starts.append(
np.array(trial_starts) *
trial.behavior_sampling_interval())
else:
starts = []
for trial in expt.findall('trial'):
if exclude_paired_from_single \
and 'Paired' in trial.get('stimulus', ''):
starts.append(np.array([]))
continue
try:
bd = trial.behaviorData(imageSync=False)[stimulus]
except exc.MissingBehaviorData:
starts.append(np.array([]))
continue
if bd.shape[1] > 0:
starts.append(bd[:, 0])
else:
starts.append(np.array([]))
if(deduplicate):
assert(duplicate_window is not None)
dedup = []
for trialStarts in starts:
if(trialStarts.size <= 0):
continue
dedupTrial = []
curInd = 0
while(True):
dedupTrial.append(trialStarts[curInd])
nextInds = np.nonzero((trialStarts - trialStarts[curInd]) > duplicate_window)[0]
if(nextInds.size <= 0):
break
curInd = nextInds[0]
dedup.append(np.array(dedupTrial))
starts = dedup
# Drop NaN values, which correspond to a stim at the start of a trial.
starts = [
trial_starts[np.isfinite(trial_starts)] for trial_starts in starts]
if imageSync:
syncd_starts = []
for trial_starts in starts:
trial_starts /= expt.frame_period()
# Make sure all the frames are unique
trial_starts = np.sort(np.unique(trial_starts.astype('int')))
# Drop frames acquired after imaging stopped
trial_starts = trial_starts[trial_starts < expt.num_frames()]
syncd_starts.append(trial_starts)
starts = syncd_starts
return starts
def total_absolute_position(
expt_grp, imageSync=True, sampling_interval=None, by_condition=False):
"""Calculates the position (in laps) as the total laps run per mouse.
Returns a dictionary with trials as keys and an array of positions with the
same format as returned by absolutePosition as values.
"""
if by_condition:
raise NotImplementedError
result = {}
for mouse, mouse_df in expt_grp.dataframe(
expt_grp, include_columns=['mouse', 'trial']).groupby('mouse'):
trials = sorted(mouse_df['trial'])
prev_last_lap = 0
for trial in trials:
trial_pos = absolutePosition(
trial, imageSync=imageSync,
sampling_interval=sampling_interval)
last_lap = int(trial_pos.max())
result[trial] = trial_pos + prev_last_lap
prev_last_lap += last_lap + 1
return result
def licks_near_position(
expt_grp, position, pre=None, post=None, nbins=100):
"""Return normalized licks near a specific position.
Parameters
----------
expt_grp : lab.ExperimentGroup
position : {str, float}
Position to center lick counts on. Argument is passed to expt.locate
for each experiment.
pre, post : float, optional
If not None, filter the resulting dataframe to only include values
within the interval [-pre, post], centered about 'position'. Should be
in normalized belt units: [0, 1).
nbins : int, optional
Number of bins for resulting lick histogram.
"""
result = pd.DataFrame([], columns=['expt', 'pos', 'value'])
for expt in expt_grp:
licks, bins = expt.licktogram(
normed=True, nPositionBins=nbins)
pos = expt.locate(position)
pos_bin = np.argmin(np.abs(bins - pos))
rolled_licks = np.roll(licks, nbins / 2 - pos_bin)
result = pd.concat([result, pd.DataFrame({
'expt': [expt] * nbins,
'pos': bins - 0.5,
'value': rolled_licks})], ignore_index=True)
if pre is not None:
result = result[result['pos'] >= -pre]
if post is not None:
result = result[result['pos'] <= post]
return result
# TODO: REMOVE -- Temporary for compatibility
from ..classes import *
from ..misc import *
from imaging_analysis import *
| {
"repo_name": "losonczylab/Zaremba_NatNeurosci_2017",
"path": "losonczy_analysis_bundle/lab/analysis/behavior_analysis.py",
"copies": "1",
"size": "56162",
"license": "mit",
"hash": 7357348735125979000,
"line_mean": 37.1017639077,
"line_max": 138,
"alpha_frac": 0.5565863039,
"autogenerated": false,
"ratio": 4.176234384295062,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5232820688195062,
"avg_score": null,
"num_lines": null
} |
#analysis of possible mirna sequences using frame sliding method
#cerceve kaydirma yontemi ile olasi mirna sekanslarinin bulunmasi
from StringIO import StringIO
import operator
def gen_sozluk(dosya_adi):
x=open(dosya_adi,"r")
dosya=x.read()
x.close()
sio=StringIO(dosya)
sozluk={}
a=[]
li=dosya.split(">")
for sline in sio.readlines():
gen=''
if sline[0]==">":
gen_kodu=sline.rstrip().replace(">","")
for i in li:
if gen_kodu in i:
i=i.replace(str(gen_kodu),"")
sozluk[gen_kodu]=i.replace("\n","")
return sozluk
def intron_hesap(gen_kodu,gen):
sozluk={}
liste=gen_kodu.split("|")
genAdi=liste[0]
baslangic_noktasi=liste[1]
intron_baslangic_listesi=liste[2].split(";")
intron_bitis_listesi=liste[3].split(";")
intron_sirasi=liste[4].split(";")
sozluk={}
intron=gen
intron_liste=[]
intron_son_liste=[]
for i in range(len(intron_sirasi)):
sozluk[ intron_sirasi[i]]=gen[(int(intron_baslangic_listesi[i])-int(baslangic_noktasi)):(int(intron_bitis_listesi[i])-int(baslangic_noktasi)+1)]
for item in sozluk:
if sozluk[item] in gen:
intron=intron.replace(sozluk[item],"*******")
## print intron
intron_liste=intron.split("*")
for i in intron_liste:
if i!="":
intron_son_liste.append(i)
return intron_son_liste
sozluk= gen_sozluk("vitisViniferaExonPositions_test_.txt")
intronlar_sozluk={}
for item in sozluk:
gen_kodu= item
gen=sozluk[item]
intronlar_sozluk[item]=intron_hesap(gen_kodu,gen)
olasi_mirna_listesi=[]
for item in intronlar_sozluk:
print len(intronlar_sozluk[item])
for a in intronlar_sozluk[item]:
for i in range (len(a)-20):
olasi_mirna_listesi.append(a[i:i+20])
print"********"
print olasi_mirna_listesi
fo=open("olasi_mirna_listesi.txt",'wb')
fo.write(','.join(olasi_mirna_listesi))
fo.close()
| {
"repo_name": "hanakamer/bioinformatics",
"path": "mirna_hesap_cerceve_kaydirma.py",
"copies": "1",
"size": "2086",
"license": "apache-2.0",
"hash": -1062074594924687500,
"line_mean": 28.6764705882,
"line_max": 152,
"alpha_frac": 0.5915627996,
"autogenerated": false,
"ratio": 2.6042446941323347,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3695807493732335,
"avg_score": null,
"num_lines": null
} |
# Analysis of scraped file
import time
import praw
import datetime
import pickle
import requests
import json
import pprint
from wordcloud import WordCloud, STOPWORDS
WIDTH = 1280
HEIGHT = 720
NUM_OF_WORDS = 250
def sentiAnalysis(isUrl, dataToAnalyse):
"""
Function which does sentimental analysis of URL or Text
"""
print ('Entering...\n')
header={"X-Mashape-Key": "euAnPjoRGMmshFM35j8LaStTkTLwp1cvGc9jsnfgvjFO2KRD7h", "Accept": "application/json"}
if(isUrl):
postURL = 'https://loudelement-free-natural-language-processing-service.p.mashape.com/nlp-url/?url='
url_params = postURL + dataToAnalyse
#print (url_params)
if not(isUrl):
postURL = 'https://loudelement-free-natural-language-processing-service.p.mashape.com/nlp-text/?text='
url_params = postURL + dataToAnalyse
#print (url_params)
try :
response = requests.get(url_params, headers=header)
tempDict = response.json()
#print (str(tempDict['sentiment-text']) + ' ' + str(tempDict['sentiment-score']) + '\n' + str(tempDict['extracted-content']) + '\n')
print ('Leaving...' + '\n')
return (str(tempDict['sentiment-text']) + ',' + str(tempDict['sentiment-score']))
except Exception as e:
print ('Error in sentimental analysis...\n')
print ('Exception: ' + str(e) + '\n' + 'Leaving..' + '\n')
return ''
def easyTime(timestamp):
time = datetime.datetime.utcfromtimestamp(timestamp)
time = datetime.datetime.strftime(time, "%b %d %Y %H:%M:%S")
return time
def processSubmissions(content, outFile):
"""
Function to process submissions and write necessary stuff onto a .csv file
"""
submissions = reversed(content)
for submission in submissions:
try:
# Pre-processing of text
sub_url = str(submission.url)
sub_url = sub_url.replace("\n", "")
sub_url = sub_url.replace(",", "")
sub_title = str(submission.title)
sub_title = sub_title.replace("\n", "")
sub_title = sub_title.replace(",", ";")
#sub_sentiAnalysis = sentiAnalysis(False, sub_title)
# All writes
"""
outFile.write(str(submission.author) + ',')
outFile.write(str(submission.num_comments) + ',')
outFile.write(str(submission.score) + ',')
outFile.write(str(submission.num_reports) + ',')
outFile.write(str(easyTime(submission.created_utc)) + ',')
outFile.write(str(submission.is_self) + ',')
outFile.write(str(submission.over_18) + ',')
outFile.write(str(submission.gilded) + ',')
outFile.write(str(submission.link_flair_text) + ',')
outFile.write(sub_url + ',')
outFile.write(str(submission.fullname) + ',')
outFile.write(str(submission.permalink) + ',')
outFile.write(str(sub_title) + ',')
outFile.write(str(sub_sentiAnalysis) + '\n')
"""
outFile.write(str(submission.ups) + ',')
outFile.write(str(submission.downs) + '\n')
except Exception as e:
print ('Something wrong with Reddit\n')
print ('Sleeping for 30 seconds . . .\n')
time.sleep(30)
return True;
def getSubmissionTextAsSingleString(content):
"""
Get all submission titles as a single string
"""
items = reversed(content)
text = ''
for item in items:
#print (item.link_flair_text)
if item.is_self is not True:
#print (str(item.author) + ' ' + item.permalink)
text += item.title
text += ' '
return text
def makeCloud(text, imgFile, words):
"""
Makes a word cloud and stores it in a jpeg file
"""
excludewords = STOPWORDS.copy()
for word in words:
excludewords.add(word)
wordcloud = WordCloud(max_words=NUM_OF_WORDS, width=WIDTH, height=HEIGHT, stopwords=excludewords).generate(text)
image = wordcloud.to_image()
image.show()
image.save(imgFile + '.jpeg')
def writeFreq(text, outFile, words):
"""
Writes frequencies of words into the specified file
"""
excludewords = STOPWORDS.copy()
for word in words:
excludewords.add(word)
wordcloud = WordCloud(max_words=NUM_OF_WORDS, stopwords=excludewords)
freqList = wordcloud.process_text(text)
for item in freqList:
outFile.write(item[0] + ',' + str(item[1]) + '\n')
def fetchAndProcessComments(content, outFile):
"""
Function to process comments from a submission and write necessary stuff onto a .csv file
"""
submissions = reversed(content)
r = praw.Reddit('/r/india scraping by /u/kashre001')
#sub_sentiAnalysis = sentiAnalysis(False, sub_title)
for submission in submissions:
if (int(submission.created_utc) > 1418545890):
continue
# Pre-processing of text
sub_url = str(submission.url)
sub_url = sub_url.replace("\n", "")
sub_url = sub_url.replace(",", "")
sub_title = str(submission.title)
sub_title = sub_title.replace("\n", "")
sub_title = sub_title.replace(",", ";")
# Get all comments for this submission
Done = True
while Done:
try:
new_submission = r.get_submission(submission_id = submission.id)
new_submission.replace_more_comments(limit=None, threshold=0)
all_flat_comments = praw.helpers.flatten_tree(new_submission.comments)
break
except Exception as e:
print ('Something went wrong...\n Sleeping for 60 seconds...\n')
time.sleep(60)
for comment in all_flat_comments:
# Pre-processing of comment body
comment_body = str(comment.body)
comment_body = comment_body.replace("\n", "")
comment_body = comment_body.replace(",", ";")
comment_permalink = str(submission.permalink[:-17]) + str(comment.id)
# All writes
outFile.write(str(comment.author) + ',')
outFile.write(str(easyTime(comment.created_utc)) + ',')
outFile.write(str(comment.score) + ',')
outFile.write(str(comment.controversiality) + ',')
outFile.write(str(comment.gilded) + ',')
outFile.write(str(comment.id) + ',')
outFile.write(comment_permalink + ',')
outFile.write(str(comment.parent_id) + ',')
outFile.write(str(comment.distinguished) + ',')
outFile.write(str(comment_body) + ',')
outFile.write(',')
outFile.write(str(submission.author) + ',')
outFile.write(str(submission.num_comments) + ',')
outFile.write(str(submission.score) + ',')
outFile.write(str(submission.num_reports) + ',')
outFile.write(str(easyTime(submission.created_utc)) + ',')
outFile.write(str(submission.is_self) + ',')
outFile.write(str(submission.over_18) + ',')
outFile.write(str(submission.gilded) + ',')
outFile.write(str(submission.link_flair_text) + ',')
outFile.write(str(submission.domain) + ',')
outFile.write(str(submission.fullname) + ',')
outFile.write(str(submission.permalink) + ',')
outFile.write(str(sub_title) + '\n')
return True
def main():
inFile = open('submissions.p','rb')
#outFile = open('RandiaxRand.csv','w',encoding='utf-8')
#freqFile = open('RandiaFreq.csv','w',encoding='utf-8')
txtFile = open('RandiaComments5.csv','w',encoding='utf-8')
imgFile = 'randialinkcloud'
words_to_be_excluded = ['p','np','r','s','thread','say','will','need','india\'','t','u','modi\'','k','e','go',\
'see','x','still','vs','says','may','.']
content = pickle.load(inFile)
print (len(content))
#processSubmissions(content, outFile)
#text = getSubmissionTextAsSingleString(content)
#makeCloud(text, imgFile, words_to_be_excluded)
#writeFreq(text, freqFile, words_to_be_excluded)
if(fetchAndProcessComments(content, txtFile)):
print ("SUCESS BRO")
print ('Total no. of submissions: ' + str(len(content)))
outFile.close()
inFile.close()
# Call Main
main()
| {
"repo_name": "KaushikR/SubredditStats",
"path": "RandiaAnalysis.py",
"copies": "1",
"size": "8767",
"license": "bsd-3-clause",
"hash": -3416135192729312000,
"line_mean": 33.5157480315,
"line_max": 140,
"alpha_frac": 0.5664423406,
"autogenerated": false,
"ratio": 3.7132570944515035,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9687278875421873,
"avg_score": 0.018484111925926132,
"num_lines": 254
} |
"""Analysis of text input into executable blocks.
The main class in this module, :class:`InputSplitter`, is designed to break
input from either interactive, line-by-line environments or block-based ones,
into standalone blocks that can be executed by Python as 'single' statements
(thus triggering sys.displayhook).
A companion, :class:`IPythonInputSplitter`, provides the same functionality but
with full support for the extended IPython syntax (magics, system calls, etc).
For more details, see the class docstring below.
Syntax Transformations
----------------------
One of the main jobs of the code in this file is to apply all syntax
transformations that make up 'the IPython language', i.e. magics, shell
escapes, etc. All transformations should be implemented as *fully stateless*
entities, that simply take one line as their input and return a line.
Internally for implementation purposes they may be a normal function or a
callable object, but the only input they receive will be a single line and they
should only return a line, without holding any data-dependent state between
calls.
As an example, the EscapedTransformer is a class so we can more clearly group
together the functionality of dispatching to individual functions based on the
starting escape character, but the only method for public use is its call
method.
ToDo
----
- Should we make push() actually raise an exception once push_accepts_more()
returns False?
- Naming cleanups. The tr_* names aren't the most elegant, though now they are
at least just attributes of a class so not really very exposed.
- Think about the best way to support dynamic things: automagic, autocall,
macros, etc.
- Think of a better heuristic for the application of the transforms in
IPythonInputSplitter.push() than looking at the buffer ending in ':'. Idea:
track indentation change events (indent, dedent, nothing) and apply them only
if the indentation went up, but not otherwise.
- Think of the cleanest way for supporting user-specified transformations (the
user prefilters we had before).
Authors
-------
* Fernando Perez
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib
import ast
import codeop
import re
import sys
# IPython modules
from IPython.utils.py3compat import cast_unicode
from IPython.core.inputtransformer import (leading_indent,
classic_prompt,
ipy_prompt,
strip_encoding_cookie,
cellmagic,
assemble_logical_lines,
help_end,
escaped_commands,
assign_from_magic,
assign_from_system,
assemble_python_lines,
)
# These are available in this module for backwards compatibility.
from IPython.core.inputtransformer import (ESC_SHELL, ESC_SH_CAP, ESC_HELP,
ESC_HELP2, ESC_MAGIC, ESC_MAGIC2,
ESC_QUOTE, ESC_QUOTE2, ESC_PAREN, ESC_SEQUENCES)
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
# FIXME: These are general-purpose utilities that later can be moved to the
# general ward. Kept here for now because we're being very strict about test
# coverage with this code, and this lets us ensure that we keep 100% coverage
# while developing.
# compiled regexps for autoindent management
dedent_re = re.compile('|'.join([
r'^\s+raise(\s.*)?$', # raise statement (+ space + other stuff, maybe)
r'^\s+raise\([^\)]*\).*$', # wacky raise with immediate open paren
r'^\s+return(\s.*)?$', # normal return (+ space + other stuff, maybe)
r'^\s+return\([^\)]*\).*$', # wacky return with immediate open paren
r'^\s+pass\s*$', # pass (optionally followed by trailing spaces)
r'^\s+break\s*$', # break (optionally followed by trailing spaces)
r'^\s+continue\s*$', # continue (optionally followed by trailing spaces)
]))
ini_spaces_re = re.compile(r'^([ \t\r\f\v]+)')
# regexp to match pure comment lines so we don't accidentally insert 'if 1:'
# before pure comments
comment_line_re = re.compile('^\s*\#')
def num_ini_spaces(s):
"""Return the number of initial spaces in a string.
Note that tabs are counted as a single space. For now, we do *not* support
mixing of tabs and spaces in the user's input.
Parameters
----------
s : string
Returns
-------
n : int
"""
ini_spaces = ini_spaces_re.match(s)
if ini_spaces:
return ini_spaces.end()
else:
return 0
def last_blank(src):
"""Determine if the input source ends in a blank.
A blank is either a newline or a line consisting of whitespace.
Parameters
----------
src : string
A single or multiline string.
"""
if not src: return False
ll = src.splitlines()[-1]
return (ll == '') or ll.isspace()
last_two_blanks_re = re.compile(r'\n\s*\n\s*$', re.MULTILINE)
last_two_blanks_re2 = re.compile(r'.+\n\s*\n\s+$', re.MULTILINE)
def last_two_blanks(src):
"""Determine if the input source ends in two blanks.
A blank is either a newline or a line consisting of whitespace.
Parameters
----------
src : string
A single or multiline string.
"""
if not src: return False
# The logic here is tricky: I couldn't get a regexp to work and pass all
# the tests, so I took a different approach: split the source by lines,
# grab the last two and prepend '###\n' as a stand-in for whatever was in
# the body before the last two lines. Then, with that structure, it's
# possible to analyze with two regexps. Not the most elegant solution, but
# it works. If anyone tries to change this logic, make sure to validate
# the whole test suite first!
new_src = '\n'.join(['###\n'] + src.splitlines()[-2:])
return (bool(last_two_blanks_re.match(new_src)) or
bool(last_two_blanks_re2.match(new_src)) )
def remove_comments(src):
"""Remove all comments from input source.
Note: comments are NOT recognized inside of strings!
Parameters
----------
src : string
A single or multiline input string.
Returns
-------
String with all Python comments removed.
"""
return re.sub('#.*', '', src)
def get_input_encoding():
"""Return the default standard input encoding.
If sys.stdin has no encoding, 'ascii' is returned."""
# There are strange environments for which sys.stdin.encoding is None. We
# ensure that a valid encoding is returned.
encoding = getattr(sys.stdin, 'encoding', None)
if encoding is None:
encoding = 'ascii'
return encoding
#-----------------------------------------------------------------------------
# Classes and functions for normal Python syntax handling
#-----------------------------------------------------------------------------
class InputSplitter(object):
"""An object that can accumulate lines of Python source before execution.
This object is designed to be fed python source line-by-line, using
:meth:`push`. It will return on each push whether the currently pushed
code could be executed already. In addition, it provides a method called
:meth:`push_accepts_more` that can be used to query whether more input
can be pushed into a single interactive block.
This is a simple example of how an interactive terminal-based client can use
this tool::
isp = InputSplitter()
while isp.push_accepts_more():
indent = ' '*isp.indent_spaces
prompt = '>>> ' + indent
line = indent + raw_input(prompt)
isp.push(line)
print 'Input source was:\n', isp.source_reset(),
"""
# Number of spaces of indentation computed from input that has been pushed
# so far. This is the attributes callers should query to get the current
# indentation level, in order to provide auto-indent facilities.
indent_spaces = 0
# String, indicating the default input encoding. It is computed by default
# at initialization time via get_input_encoding(), but it can be reset by a
# client with specific knowledge of the encoding.
encoding = ''
# String where the current full source input is stored, properly encoded.
# Reading this attribute is the normal way of querying the currently pushed
# source code, that has been properly encoded.
source = ''
# Code object corresponding to the current source. It is automatically
# synced to the source, so it can be queried at any time to obtain the code
# object; it will be None if the source doesn't compile to valid Python.
code = None
# Private attributes
# List with lines of input accumulated so far
_buffer = None
# Command compiler
_compile = None
# Mark when input has changed indentation all the way back to flush-left
_full_dedent = False
# Boolean indicating whether the current block is complete
_is_complete = None
def __init__(self):
"""Create a new InputSplitter instance.
"""
self._buffer = []
self._compile = codeop.CommandCompiler()
self.encoding = get_input_encoding()
def reset(self):
"""Reset the input buffer and associated state."""
self.indent_spaces = 0
self._buffer[:] = []
self.source = ''
self.code = None
self._is_complete = False
self._full_dedent = False
def source_reset(self):
"""Return the input source and perform a full reset.
"""
out = self.source
self.reset()
return out
def push(self, lines):
"""Push one or more lines of input.
This stores the given lines and returns a status code indicating
whether the code forms a complete Python block or not.
Any exceptions generated in compilation are swallowed, but if an
exception was produced, the method returns True.
Parameters
----------
lines : string
One or more lines of Python input.
Returns
-------
is_complete : boolean
True if the current input source (the result of the current input
plus prior inputs) forms a complete Python execution block. Note that
this value is also stored as a private attribute (``_is_complete``), so it
can be queried at any time.
"""
self._store(lines)
source = self.source
# Before calling _compile(), reset the code object to None so that if an
# exception is raised in compilation, we don't mislead by having
# inconsistent code/source attributes.
self.code, self._is_complete = None, None
# Honor termination lines properly
if source.endswith('\\\n'):
return False
self._update_indent(lines)
try:
self.code = self._compile(source, symbol="exec")
# Invalid syntax can produce any of a number of different errors from
# inside the compiler, so we have to catch them all. Syntax errors
# immediately produce a 'ready' block, so the invalid Python can be
# sent to the kernel for evaluation with possible ipython
# special-syntax conversion.
except (SyntaxError, OverflowError, ValueError, TypeError,
MemoryError):
self._is_complete = True
else:
# Compilation didn't produce any exceptions (though it may not have
# given a complete code object)
self._is_complete = self.code is not None
return self._is_complete
def push_accepts_more(self):
"""Return whether a block of interactive input can accept more input.
This method is meant to be used by line-oriented frontends, who need to
guess whether a block is complete or not based solely on prior and
current input lines. The InputSplitter considers it has a complete
interactive block and will not accept more input when either:
* A SyntaxError is raised
* The code is complete and consists of a single line or a single
non-compound statement
* The code is complete and has a blank line at the end
If the current input produces a syntax error, this method immediately
returns False but does *not* raise the syntax error exception, as
typically clients will want to send invalid syntax to an execution
backend which might convert the invalid syntax into valid Python via
one of the dynamic IPython mechanisms.
"""
# With incomplete input, unconditionally accept more
# A syntax error also sets _is_complete to True - see push()
if not self._is_complete:
#print("Not complete") # debug
return True
# The user can make any (complete) input execute by leaving a blank line
last_line = self.source.splitlines()[-1]
if (not last_line) or last_line.isspace():
#print("Blank line") # debug
return False
# If there's just a single line or AST node, and we're flush left, as is
# the case after a simple statement such as 'a=1', we want to execute it
# straight away.
if self.indent_spaces==0:
if len(self.source.splitlines()) <= 1:
return False
try:
code_ast = ast.parse(u''.join(self._buffer))
except Exception:
#print("Can't parse AST") # debug
return False
else:
if len(code_ast.body) == 1 and \
not hasattr(code_ast.body[0], 'body'):
#print("Simple statement") # debug
return False
# General fallback - accept more code
return True
#------------------------------------------------------------------------
# Private interface
#------------------------------------------------------------------------
def _find_indent(self, line):
"""Compute the new indentation level for a single line.
Parameters
----------
line : str
A single new line of non-whitespace, non-comment Python input.
Returns
-------
indent_spaces : int
New value for the indent level (it may be equal to self.indent_spaces
if indentation doesn't change.
full_dedent : boolean
Whether the new line causes a full flush-left dedent.
"""
indent_spaces = self.indent_spaces
full_dedent = self._full_dedent
inisp = num_ini_spaces(line)
if inisp < indent_spaces:
indent_spaces = inisp
if indent_spaces <= 0:
#print 'Full dedent in text',self.source # dbg
full_dedent = True
if line.rstrip()[-1] == ':':
indent_spaces += 4
elif dedent_re.match(line):
indent_spaces -= 4
if indent_spaces <= 0:
full_dedent = True
# Safety
if indent_spaces < 0:
indent_spaces = 0
#print 'safety' # dbg
return indent_spaces, full_dedent
def _update_indent(self, lines):
for line in remove_comments(lines).splitlines():
if line and not line.isspace():
self.indent_spaces, self._full_dedent = self._find_indent(line)
def _store(self, lines, buffer=None, store='source'):
"""Store one or more lines of input.
If input lines are not newline-terminated, a newline is automatically
appended."""
if buffer is None:
buffer = self._buffer
if lines.endswith('\n'):
buffer.append(lines)
else:
buffer.append(lines+'\n')
setattr(self, store, self._set_source(buffer))
def _set_source(self, buffer):
return u''.join(buffer)
class IPythonInputSplitter(InputSplitter):
"""An input splitter that recognizes all of IPython's special syntax."""
# String with raw, untransformed input.
source_raw = ''
# Flag to track when a transformer has stored input that it hasn't given
# back yet.
transformer_accumulating = False
# Flag to track when assemble_python_lines has stored input that it hasn't
# given back yet.
within_python_line = False
# Private attributes
# List with lines of raw input accumulated so far.
_buffer_raw = None
def __init__(self, line_input_checker=True, physical_line_transforms=None,
logical_line_transforms=None, python_line_transforms=None):
super(IPythonInputSplitter, self).__init__()
self._buffer_raw = []
self._validate = True
if physical_line_transforms is not None:
self.physical_line_transforms = physical_line_transforms
else:
self.physical_line_transforms = [
leading_indent(),
classic_prompt(),
ipy_prompt(),
strip_encoding_cookie(),
cellmagic(end_on_blank_line=line_input_checker),
]
self.assemble_logical_lines = assemble_logical_lines()
if logical_line_transforms is not None:
self.logical_line_transforms = logical_line_transforms
else:
self.logical_line_transforms = [
help_end(),
escaped_commands(),
assign_from_magic(),
assign_from_system(),
]
self.assemble_python_lines = assemble_python_lines()
if python_line_transforms is not None:
self.python_line_transforms = python_line_transforms
else:
# We don't use any of these at present
self.python_line_transforms = []
@property
def transforms(self):
"Quick access to all transformers."
return self.physical_line_transforms + \
[self.assemble_logical_lines] + self.logical_line_transforms + \
[self.assemble_python_lines] + self.python_line_transforms
@property
def transforms_in_use(self):
"""Transformers, excluding logical line transformers if we're in a
Python line."""
t = self.physical_line_transforms[:]
if not self.within_python_line:
t += [self.assemble_logical_lines] + self.logical_line_transforms
return t + [self.assemble_python_lines] + self.python_line_transforms
def reset(self):
"""Reset the input buffer and associated state."""
super(IPythonInputSplitter, self).reset()
self._buffer_raw[:] = []
self.source_raw = ''
self.transformer_accumulating = False
self.within_python_line = False
for t in self.transforms:
t.reset()
def flush_transformers(self):
def _flush(transform, out):
if out is not None:
tmp = transform.push(out)
return tmp or transform.reset() or None
else:
return transform.reset() or None
out = None
for t in self.transforms_in_use:
out = _flush(t, out)
if out is not None:
self._store(out)
def source_raw_reset(self):
"""Return input and raw source and perform a full reset.
"""
self.flush_transformers()
out = self.source
out_r = self.source_raw
self.reset()
return out, out_r
def source_reset(self):
self.flush_transformers()
return super(IPythonInputSplitter, self).source_reset()
def push_accepts_more(self):
if self.transformer_accumulating:
return True
else:
return super(IPythonInputSplitter, self).push_accepts_more()
def transform_cell(self, cell):
"""Process and translate a cell of input.
"""
self.reset()
self.push(cell)
return self.source_reset()
def push(self, lines):
"""Push one or more lines of IPython input.
This stores the given lines and returns a status code indicating
whether the code forms a complete Python block or not, after processing
all input lines for special IPython syntax.
Any exceptions generated in compilation are swallowed, but if an
exception was produced, the method returns True.
Parameters
----------
lines : string
One or more lines of Python input.
Returns
-------
is_complete : boolean
True if the current input source (the result of the current input
plus prior inputs) forms a complete Python execution block. Note that
this value is also stored as a private attribute (_is_complete), so it
can be queried at any time.
"""
# We must ensure all input is pure unicode
lines = cast_unicode(lines, self.encoding)
# ''.splitlines() --> [], but we need to push the empty line to transformers
lines_list = lines.splitlines()
if not lines_list:
lines_list = ['']
# Store raw source before applying any transformations to it. Note
# that this must be done *after* the reset() call that would otherwise
# flush the buffer.
self._store(lines, self._buffer_raw, 'source_raw')
for line in lines_list:
out = self.push_line(line)
return out
def push_line(self, line):
buf = self._buffer
def _accumulating(dbg):
#print(dbg)
self.transformer_accumulating = True
return False
for transformer in self.physical_line_transforms:
line = transformer.push(line)
if line is None:
return _accumulating(transformer)
if not self.within_python_line:
line = self.assemble_logical_lines.push(line)
if line is None:
return _accumulating('acc logical line')
for transformer in self.logical_line_transforms:
line = transformer.push(line)
if line is None:
return _accumulating(transformer)
line = self.assemble_python_lines.push(line)
if line is None:
self.within_python_line = True
return _accumulating('acc python line')
else:
self.within_python_line = False
for transformer in self.python_line_transforms:
line = transformer.push(line)
if line is None:
return _accumulating(transformer)
#print("transformers clear") #debug
self.transformer_accumulating = False
return super(IPythonInputSplitter, self).push(line)
| {
"repo_name": "noslenfa/tdjangorest",
"path": "uw/lib/python2.7/site-packages/IPython/core/inputsplitter.py",
"copies": "2",
"size": "24271",
"license": "apache-2.0",
"hash": -6093731128193159000,
"line_mean": 35.7186081694,
"line_max": 93,
"alpha_frac": 0.5823822669,
"autogenerated": false,
"ratio": 4.713730821518742,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006152851171614502,
"num_lines": 661
} |
#imports
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib import cm as cm
import seaborn as sns
from sklearn import linear_model
sns.set(style='white')
airfoil = pd.read_csv('./data/airfoil_self_noise.csv')
#printing the first head of the airfoil dataset
print(airfoil.head())
# check if any missing or NaN values in the dataset
print(airfoil.isnull().sum())
#finding correlation between data set
print(airfoil.corr())
#plotting correlation matrix between dataset
def correlation_df(df):
fig = plt.figure(figsize=(10,60))
ax1 = fig.add_subplot(111)
cmap = cm.get_cmap('jet', 30)
cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
ax1.grid(True)
plt.title('Airfoil UCI Regression Correlation Chart')
labels=['Frquency(Hz)','Angle_of_Attack','Chord_Length','Free_stream_velocity','Displacement','Sound_pressure_level']
ax1.set_xticklabels(labels,fontsize=6)
ax1.set_yticklabels(labels,fontsize=6)
# Add colorbar, to specify tick locations to match desired ticklabels
fig.colorbar(cax, ticks=[-1.0,0,.75,.8,.85,.90,.95,1])
plt.show()
#correlation_df(airfoil)
#print(airfoil.columns)
def correlation_df_seabrn(df):
c = df.corr()
sns.plt.title('Airfoil UCI Regression Correlation Chart - Heatmap')
sns.heatmap(c)
plt.yticks(rotation=0)
sns.plt.show()
correlation_df_seabrn(airfoil)
| {
"repo_name": "rishuatgithub/MLPy",
"path": "airfoil_uci_regression.py",
"copies": "1",
"size": "1510",
"license": "apache-2.0",
"hash": -7804168195865149000,
"line_mean": 28.6078431373,
"line_max": 121,
"alpha_frac": 0.721192053,
"autogenerated": false,
"ratio": 3.0382293762575454,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4259421429257545,
"avg_score": null,
"num_lines": null
} |
"""Analysis output generation, common for all model/pipeline variants
"""
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from os import path
import io
import jinja2
import numpy as np
from matplotlib import pyplot as plt
import seaborn
from ozelot import client, config
# global jinja2 environment
jenv = jinja2.Environment(loader=jinja2.FileSystemLoader(path.join(path.dirname(__file__), 'templates')))
# global output directory, default is directory containing this file
out_dir = path.dirname(__file__)
def fig_to_svg(fig):
"""Helper function to convert matplotlib figure to SVG string
Returns:
str: figure as SVG string
"""
buf = io.StringIO()
fig.savefig(buf, format='svg')
buf.seek(0)
return buf.getvalue()
def pixels_to_inches(size):
"""Helper function: compute figure size in inches @ 72 dpi
Args:
size (tuple(int, int)): figure size in pixels
Returns:
tuple(int, int): figure size in inches
"""
return size[0] / 72., size[1] / 72.
def plots_html_page(query_module):
"""Generate analysis output as html page
Args:
query_module (module): module to use for querying data for the
desired model/pipeline variant, e.g. leonardo.standard.queries
"""
# page template
template = jenv.get_template("analysis.html")
# container for template context
context = dict(extended=config.EXTENDED)
# a database client/session to run queries in
cl = client.get_client()
session = cl.create_session()
# general styling
seaborn.set_style('whitegrid')
#
# plot: painting area by decade, with linear regression
#
decade_df = query_module.decade_query()
pix_size = pixels_to_inches((600, 400))
ax = seaborn.lmplot(x='decade', y='area', data=decade_df,
size=pix_size[1], aspect=pix_size[0] / pix_size[1],
scatter_kws={"s": 30, "alpha": 0.3})
ax.set(xlabel='Decade', ylabel='Area, m^2')
context['area_by_decade_svg'] = fig_to_svg(plt.gcf())
plt.close('all')
#
# plot: painting area by gender, with logistic regression
#
if config.EXTENDED:
gender_df = query_module.gender_query()
pix_size = pixels_to_inches((600, 400))
g = seaborn.FacetGrid(gender_df, hue="gender", margin_titles=True,
size=pix_size[1], aspect=pix_size[0] / pix_size[1])
bins = np.linspace(0, 5, 30)
g.map(plt.hist, "area", bins=bins, lw=0, alpha=0.5, normed=True)
g.axes[0, 0].set_xlabel('Area, m^2')
g.axes[0, 0].set_ylabel('Percentage of paintings')
context['area_by_gender_svg'] = fig_to_svg(plt.gcf())
plt.close('all')
#
# render template
#
out_file = path.join(out_dir, "analysis.html")
html_content = template.render(**context)
with open(out_file, 'w') as f:
f.write(html_content)
# done, clean up
plt.close('all')
session.close()
| {
"repo_name": "trycs/ozelot",
"path": "examples/leonardo/leonardo/common/analysis.py",
"copies": "1",
"size": "3069",
"license": "mit",
"hash": -2048404598603588400,
"line_mean": 26.1592920354,
"line_max": 105,
"alpha_frac": 0.6301726947,
"autogenerated": false,
"ratio": 3.4677966101694917,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9579438566241576,
"avg_score": 0.003706147725583065,
"num_lines": 113
} |
# analysis over all patients
# miscellaneous
def zeros(n):
zeros = []
for i in range (0,n):
zeros.append(0)
return zeros
# initializing -
# function takes no args (opens patient CSV files).
# Function returns a list containing one list per patient. Each patient list contains all unique tuples that are found in the patient's CSV file.
def initialize():
data = []
output = []
for i in range (1,11):
if i == 10:
patient = "patient10.csv"
else:
patient = "patient" + "0" + str(i) + ".csv"
f = open(patient)
data1 = f.read()
f.close
data.append([])
lines = data1.split("\r")
lines.pop(0)
output1 = []
prevact = '12'
prevbr = 0
for line in lines:
token = line.split()
act = int(token[1])
br = float(token[0].strip())
token = [br, act]
if prevact <> act or prevbr <> br:
output1.append(token)
prevact = act
prevbr = br
data[i-1].append(token)
output.append(output1)
return output
# print initialize()
# Rank by exercise
# Function ranks patients per exercise and gives scores per exercise. Scoring is as outlined below the function.
# ASSUMPTIONS - if there is no data or breathing rate is zero, it is assumed that the patient was not able to carry out the required exercise. 0 points are awarded in this case.
def RankByPoints(data):
exercises = [1,2,3,4,5,6,7,8,9,10]
patients = [1,2,3,4,5,6,7,8,9,10]
scores = []
for patient in patients:
scores.append([patient,0])
for exercise in exercises:
averages = []
not_participated = []
for patient in patients:
sum = 0
count = 0
for tuple in data[patient-1]:
if tuple[1] == exercise:
sum += tuple[0]
count += 1
if count == 0 or sum == 0:
not_participated.append([patient, 0])
else:
average = sum/count
average = [patient, average]
averages.append(average)
averages = sorted(averages, key = lambda x:x[1])
if not_participated <> []:
for element in not_participated:
averages.append(element)
for i in range(0,10):
if averages[i][1] <> 0:
scores[averages[i][0]-1][1] += 10 - i
scores = sorted(scores, key = lambda x:x[1], reverse = True)
ranking = []
for tuple in scores:
ranking.append(tuple[0])
return ranking
# scoring is done as follows
# 0 br or no data => 0 points
# otherwise, patients are ranked from lowest to highest breathing rate
# 1st place => 10 points
# 2nd place => 9 points
#.. and so on.
print RankByPoints(initialize())
# Not finished
def RankbyAvg(data):
exercises = [1,2,3,4,5,6,7,8,9,10]
patients = [1,2,3,4,5,6,7,8,9,10]
scores = []
for patient in patients:
scores.append([patient,0])
| {
"repo_name": "easyCZ/SLIP-A-2015",
"path": "respiratory/Processed Datasets/Processed Datasets/test2.py",
"copies": "1",
"size": "2620",
"license": "mit",
"hash": -8105964971317075000,
"line_mean": 24.9405940594,
"line_max": 177,
"alpha_frac": 0.6541984733,
"autogenerated": false,
"ratio": 2.817204301075269,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8556327355750999,
"avg_score": 0.08301508372485408,
"num_lines": 101
} |
# analysis.py - load, calculate, save
"""Load an analysis config file, calculate it, and save the results."""
import collections
import logging
import yaml
from . import calculation
from . import features
from . import readjustments
from . import rules
from . import tools
from . import types
from . import vis
__all__ = ['Analysis']
log = logging.getLogger()
class Analysis(object):
"""Distibuted Morphology (DM) analyis from YAML configuration file."""
Features = features.FeatureSystem
Vis = vis.VocabularyItems
Rules = rules.Rules
Readjustments = readjustments.Readjustments
Calculator = calculation.Calculator
def __init__(self, filename, *, directory=None, encoding='utf-8'):
self.filename = filename
self.results = tools.derive_filename(filename,
suffix='-results',
extension='yaml',
directory=directory)
log.info(f'{self!r}')
with open(self.filename, encoding=encoding) as fd:
cfg = yaml.safe_load(fd)
self.author = cfg.get('author', 'Anomymous')
self.title = cfg.get('title', 'DM-Analyis')
self.features = self.Features(cfg['features'], always_bag=cfg.get('multisets'))
self.vis = self.Vis(cfg['vis'])
self.rules = self.Rules(cfg.get('rules', []))
self.readjustments = self.Readjustments(cfg.get('readjustments', []))
self.paradigms = [collections.OrderedDict([
('name', p['name']),
('headers', types.FlowList(p['headers'])),
('inputs', list(map(types.FlowList, p['inputs']))),
('spellouts_expected', types.List(p.get('spellouts_expected', []))),
]) for p in cfg['paradigms']]
inputs = (i for p in self.paradigms for i in p['inputs'])
self.inputs = list(map(SlotList.from_heads, inputs))
self.calculator = self.Calculator(cfg.get('insertion', 'cyclic'),
self.inputs, self.rules, self.vis, self.readjustments)
def __repr__(self):
return f'{self.__class__.__name__}({self.filename!r})'
def calculate(self):
log.info('\tcalculate..')
self.worklog, self.outputs, self.spellouts = self.calculator()
def save(self, *, encoding='utf-8', newline=''):
log.info(f'\tsave to {self.results!r}..')
data = collections.OrderedDict([
('author', self.author),
('title', self.title),
('insertion', self.calculator.insertion.kind),
('multisets', self.features.always_bag),
('features', self.features),
('vis', self.vis),
('rules', self.rules),
('readjustments', self.readjustments),
('paradigms', self.paradigms),
('worklog', self.worklog),
])
with open(self.results, 'w', encoding=encoding, newline=newline) as fd:
yaml.dump(data, fd)
class SlotList(types.FlowList):
"""Hierarchy of potentially fused heads represented as sequence."""
@classmethod
def from_heads(cls, heads):
return cls(Slot([Head(h)]) for h in heads)
def __str__(self):
return ' '.join(map(str, self))
class Slot(types.List):
"""Sequence of heads that have been fused."""
def __str__(self):
return '#{}#'.format(' '.join(map(str, self)))
class Head(features.FeatureSet):
"""Head (morpheme, lexical item) as a (multi)set of morphosyntactic features."""
def __str__(self):
return '[{}]'.format(super().__str__())
| {
"repo_name": "xflr6/dmengine",
"path": "dmengine/analysis.py",
"copies": "1",
"size": "3629",
"license": "mit",
"hash": 7405669180204131000,
"line_mean": 28.7459016393,
"line_max": 87,
"alpha_frac": 0.5822540645,
"autogenerated": false,
"ratio": 3.889603429796356,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4971857494296356,
"avg_score": null,
"num_lines": null
} |
# analysis.py
# -----------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
######################
# ANALYSIS QUESTIONS #
######################
# Change these default values to obtain the specified policies through
# value iteration.
def question2a():
answerDiscount = 0.9
answerNoise = 0.2
answerLivingReward = 0.0
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question2b():
answerDiscount = 0.9
answerNoise = 0.2
answerLivingReward = 0.0
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question2c():
answerDiscount = 0.9
answerNoise = 0.2
answerLivingReward = 0.0
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question2d():
answerDiscount = 0.9
answerNoise = 0.2
answerLivingReward = 0.0
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question2e():
answerDiscount = 0.9
answerNoise = 0.2
answerLivingReward = 0.0
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
if __name__ == '__main__':
print('Answers to analysis questions:')
import analysis
for q in [q for q in dir(analysis) if q.startswith('question')]:
response = getattr(analysis, q)()
print(' Question %s:\t%s' % (q, str(response)))
| {
"repo_name": "yandexdataschool/Practical_RL",
"path": "week03_model_free/crawler_and_pacman/seminar_py3/analysis.py",
"copies": "1",
"size": "1834",
"license": "unlicense",
"hash": 9139489044553953000,
"line_mean": 28.5806451613,
"line_max": 78,
"alpha_frac": 0.685387132,
"autogenerated": false,
"ratio": 3.4669187145557654,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46523058465557654,
"avg_score": null,
"num_lines": null
} |
# analysis.py
# -----------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
######################
# ANALYSIS QUESTIONS #
######################
# Set the given parameters to obtain the specified policies through
# value iteration.
def question2():
answerDiscount = 0.9
answerNoise = 0.0
return answerDiscount, answerNoise
def question3a():
answerDiscount = 0.1
answerNoise = 0.0
answerLivingReward = 0.5
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3b():
answerDiscount = 0.3
answerNoise = 0.2
answerLivingReward = 0.0
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3c():
answerDiscount = 0.9
answerNoise = 0.0
answerLivingReward = 0.0
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3d():
answerDiscount = 0.9
answerNoise = 0.5
answerLivingReward = 0.0
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3e():
answerDiscount = 0.0
answerNoise = 0.5
answerLivingReward = 0.1
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question6():
return 'NOT POSSIBLE'
answerEpsilon = None
answerLearningRate = None
return answerEpsilon, answerLearningRate
# If not possible, return 'NOT POSSIBLE'
if __name__ == '__main__':
print 'Answers to analysis questions:'
import analysis
for q in [q for q in dir(analysis) if q.startswith('question')]:
response = getattr(analysis, q)()
print ' Question %s:\t%s' % (q, str(response))
| {
"repo_name": "PiscesDream/Ideas",
"path": "reinforcement/tutorial/analysis.py",
"copies": "1",
"size": "2372",
"license": "apache-2.0",
"hash": 978874903977520100,
"line_mean": 31.0540540541,
"line_max": 80,
"alpha_frac": 0.6922428331,
"autogenerated": false,
"ratio": 3.4985250737463125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9610876264507953,
"avg_score": 0.015978328467672033,
"num_lines": 74
} |
# analysis.py
# -----------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to
# http://inst.eecs.berkeley.edu/~cs188/pacman/pacman.html
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
######################
# ANALYSIS QUESTIONS #
######################
# Set the given parameters to obtain the specified policies through
# value iteration.
def question2():
answerDiscount = 0.9
answerNoise = 0
return answerDiscount, answerNoise
def question3a():
answerDiscount = 0.1
answerNoise = 0
answerLivingReward = 0.5
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3b():
answerDiscount = 0.2
answerNoise = 0.2
answerLivingReward = 0.2
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3c():
answerDiscount = 0.8
answerNoise = 0
answerLivingReward = 0.5
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3d():
answerDiscount = 0.9
answerNoise = 0.2
answerLivingReward = 0.2
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3e():
answerDiscount = 1
answerNoise = 0
answerLivingReward = 1
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question6():
answerEpsilon = None
answerLearningRate = None
return 'NOT POSSIBLE'
# If not possible, return 'NOT POSSIBLE'
if __name__ == '__main__':
print 'Answers to analysis questions:'
import analysis
for q in [q for q in dir(analysis) if q.startswith('question')]:
response = getattr(analysis, q)()
print ' Question %s:\t%s' % (q, str(response))
| {
"repo_name": "pfwangthu/edX-Artificial-Intelligence",
"path": "reinforcement/analysis.py",
"copies": "1",
"size": "2341",
"license": "mpl-2.0",
"hash": -789916919059710200,
"line_mean": 30.6351351351,
"line_max": 80,
"alpha_frac": 0.694574968,
"autogenerated": false,
"ratio": 3.4528023598820057,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4647377327882006,
"avg_score": null,
"num_lines": null
} |
"""Analysis-specific plotting methods"""
import warnings
import numpy as np
import scipy as sp
import itertools as it
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import datetime
import lab
from ..classes.classes import ExperimentGroup
import plotting as plotting
import plotting_helpers as plotting_helpers
from lab.misc import signalsmooth
from ..analysis import behavior_analysis as ba
from ..analysis import place_cell_analysis as place
from ..analysis import imaging_analysis as ia
from ..analysis import calc_activity as calc_activity
from ..classes import exceptions as exc
def activityPlot(
trial, ax, dFOverF='median', demixed=False,
yOffsets=None, linearTransform=None, window_width=100,
dFOverF_percentile=8, timeInterval=None, removeNanBoutons=False,
colorbarAx=None, smoothSize=0, resampling=None, style='color',
colorcode=None, markerDuration=5, colorRange=[-0.2, 1],
label_x_axis=False, channel='Ch2', label=None, roi_filter=None):
"""Plot the activity of all boutons at each time as a heatmap"""
times = trial.parent.imagingTimes()
imData = trial.parent.imagingData(
dFOverF=dFOverF, demixed=demixed,
linearTransform=linearTransform, window_width=window_width,
dFOverF_percentile=dFOverF_percentile,
removeNanBoutons=removeNanBoutons, channel=channel, label=label,
roi_filter=roi_filter)[:, :, trial.trialNum()]
if timeInterval is not None:
imData = imData[:, trial.parent.imagingIndex(
timeInterval[0]):trial.parent.imagingIndex(timeInterval[1])]
times = np.array(times)[trial.parent.imagingIndex(timeInterval[0]):
trial.parent.imagingIndex(timeInterval[1])]
if smoothSize:
for roiIdx in range(imData.shape[0]):
imData[roiIdx] = signalsmooth.smooth(
imData[roiIdx], window_len=smoothSize, window='hanning')
# imData = imData[:,int(smoothSize/2):-int(smoothSize/2)]
# times = times[:-(2*int(smoothSize/2))]
if resampling is not None:
imData = sp.signal.decimate(imData, resampling, axis=1)
times = times[::resampling]
if style == 'color':
roiNums = np.arange(0, imData.shape[0] + 1) + 0.5
TIMES, ROI_NUMS = np.meshgrid(times, roiNums)
im = ax.pcolor(TIMES, ROI_NUMS, imData, vmin=colorRange[0],
vmax=colorRange[1], rasterized=True)
if colorbarAx is not None:
ticks = colorRange
if 0 > ticks[0] and 0 < ticks[1]:
ticks.append(0)
if not colorbarAx == ax:
cbar = colorbarAx.figure.colorbar(
im, ax=colorbarAx, ticks=ticks, fraction=1)
else:
cbar = colorbarAx.figure.colorbar(
im, ax=colorbarAx, ticks=ticks)
cbar.set_label(r'$\Delta$F/F', labelpad=-10)
""" Label the ROIs """
ROIs = [roi.id for roi in trial.rois(channel=channel, label=label)
if roi_filter(roi)]
try:
roiGroups, roiGroupNames = bouton.BoutonSet(ROIs).boutonGroups()
except:
ax.set_yticks(range(len(ROIs)))
ax.set_yticklabels(ROIs)
else:
if colorcode == 'postSynaptic':
for k, group in enumerate(roiGroups):
for roi in group:
# if roiGroupNames[k] != 'other':
ax.add_patch(plt.Rectangle(
(-2, ROIs.index(roi.name) + 0.5), 1, 1,
color=bouton.groupPointStyle(roiGroupNames[k])[0],
lw=0))
""" Plot the behavior data beneath the plot """
framePeriod = trial.parent.frame_period()
for interval in ba.runningIntervals(trial) * framePeriod:
ax.add_patch(plt.Rectangle(
(interval[0], -1), interval[1] - interval[0], 1.3,
color='g', lw=0))
height = -1
for key, color in [('air', 'r'), ('airpuff', 'r'),
('licking', 'b'), ('odorA', 'c'),
('odorB', 'm')]:
try:
intervals = trial.behaviorData()[key]
except KeyError:
pass
else:
height -= 1
for interval in intervals:
ax.add_patch(Rectangle(
(interval[0], height), interval[1] - interval[0],
1, facecolor=color, lw=0))
ax.set_xlim([-2, times[-1]])
ax.spines['left'].set_bounds(1, len(roiNums) - 1)
ax.spines['left'].set_position(('outward', 2))
for side in ['right', 'top', 'bottom']:
ax.spines[side].set_color('none')
ax.set_yticks([1, len(roiNums) - 1])
if label_x_axis:
ax.set_xlabel('time (s)')
else:
ax.set_xticks([])
ax.set_ylabel('ROI #', labelpad=-9)
ax.yaxis.set_ticks_position('left')
ax.tick_params(axis='y', direction='out')
ax.set_ylim([height, len(roiNums) - 0.5])
elif style == 'traces':
data = [imData.reshape([imData.shape[0], imData.shape[1], 1])]
plotting.tracePlot(
ax, data, times, ROIs, stimulusDurations=None, shading=None,
yOffsets=yOffsets, markerDuration=markerDuration)
framePeriod = trial.parent.frame_period()
yMin = ax.get_ylim()[0]
for interval in ba.runningIntervals(trial) * framePeriod:
ax.add_patch(plt.Rectangle(
(interval[0], yMin - 1), interval[1] - interval[0], 1,
color='g', lw=0))
# ax.set_xlim([-2, times[-1]])
ax.set_ylim(bottom=yMin - 1)
# ADDED SUPPORT FOR LASER PLOT
def behaviorPlot(
trial, ax, keys=['velocity', 'running', 'position', 'licking', 'tone', 'light',
'water', 'reward', 'airpuff', 'motion', 'laser'],
colors=None, include_empty=False, y_start=-1):
"""Plot behavior data over time
Keyword arguments:
ax -- axis to plot on
keys -- behavior data to plot, id data is missing it is skipped
colors -- colors list to use, will be iterated over
include_empty -- if True, plot data that has no intervals, if false,
exclude those rows
y_start -- bottom of first plot, decreases by one for each successive plot
"""
try:
bd = trial.behaviorData()
except exc.MissingBehaviorData:
return
if colors is None:
colors = lab.plotting.color_cycle()
else:
colors = iter(colors)
next_y = y_start
labels = []
label_colors = []
for key in keys:
if key == 'velocity':
try:
velocity = ba.velocity(
trial, imageSync=False, sampling_interval='actual',
smoothing='hanning', window_length=71)
bd['recordingDuration']
except (exc.MissingBehaviorData, KeyError):
bd['recordingDuration']
except (exc.MissingBehaviorData, KeyError):
pass
else:
labels.append('velocity')
next_color = colors.next()
label_colors.append(next_color)
velocity -= np.amin(velocity)
velocity /= np.amax(velocity) / 0.9
velocity += next_y + 0.05
ax.plot(np.linspace(0, bd['recordingDuration'],
len(velocity)), velocity,
color=next_color)
next_y -= 1
elif key == 'position':
try:
position = bd['treadmillPosition']
bd['recordingDuration']
bd['samplingInterval']
except KeyError:
pass
else:
labels.append('position')
next_color = colors.next()
label_colors.append(next_color)
full_position = np.empty(int(np.ceil(
bd['recordingDuration'] / bd['samplingInterval'])))
for t, pos in position:
full_position[int(t / bd['samplingInterval']):] = pos
full_position *= 0.9
full_position += next_y + 0.05
ax.plot(np.linspace(0, bd['recordingDuration'],
len(full_position)), full_position,
color=next_color)
next_y -= 1
else:
try:
if key == 'running':
data = ba.runningIntervals(trial, imageSync=False) *\
bd['samplingInterval']
else:
data = bd[key]
except KeyError:
pass
else:
if include_empty or len(data) > 0:
labels.append(key)
next_color = colors.next()
label_colors.append(next_color)
for interval in data:
ax.add_patch(Rectangle(
(interval[0], next_y),
interval[1] - interval[0], 1,
facecolor=next_color, lw=0))
next_y -= 1
if next_y == y_start:
return
ax.set_yticks(np.arange(-0.5, next_y + 0.5, -1))
ax.set_yticklabels(labels)
for tick, c in zip(ax.get_yticklabels(), label_colors):
tick.set_color(c)
try:
ax.set_xlim([0, int(bd['recordingDuration'])])
except KeyError:
pass
ax.set_ylim([next_y + 1, 0])
ax.set_xlabel('Time (s)')
ax.set_title('{0}:{1}'.format(trial.parent.parent.get('mouseID'),
trial.get('time')))
def plot_imaging_and_behavior(
trial, ax, start_time=0, stop_time=None, channel='Ch2', label=None,
roi_filter=None, label_rois=False,
keys=['running', 'licking', 'water', 'airpuff', 'tone', 'light'],
colors=None, include_empty=False, dFOverF='from_file'):
"""Plot imaging data for all ROIs with behavior data underneath"""
imaging_data = trial.parent.imagingData(
channel=channel, label=label, roi_filter=roi_filter,
dFOverF=dFOverF)[..., trial.parent.findall('trial').index(trial)]
if not imaging_data.shape[0]:
return
frame_period = trial.parent.frame_period()
start_frame = int(start_time / frame_period)
if stop_time is None:
stop_frame = imaging_data.shape[1]
else:
stop_frame = int(stop_time / frame_period)
if stop_time is None:
stop_time = trial.parent.imagingTimes(channel=channel)[-1]
imaging_data = imaging_data[:, start_frame:stop_frame]
t_range = np.linspace(start_time, stop_time, imaging_data.shape[1])
max_F = np.nanmax(imaging_data)
# Normalize and re-scale so they can all be plotted on top of eachother
imaging_data /= max_F
imaging_data += np.arange(imaging_data.shape[0]).reshape((-1, 1)) + 0.5
ax.plot(t_range, imaging_data.T)
behaviorPlot(
trial, ax, keys=keys, colors=colors, include_empty=include_empty)
if label_rois:
roi_ids = trial.parent.roi_ids(
channel=channel, label=label, roi_filter=roi_filter)
x_range = ax.get_xlim()[1]
for idx, roi_id in enumerate(roi_ids):
ax.text(x_range * -0.01, idx + 0.5, roi_id, ha='right')
ax.set_ylim(top=imaging_data.shape[0] + 0.5)
plotting_helpers.add_scalebar(
ax, matchx=False, matchy=False, hidex=False, hidey=False,
sizey=0.5 / max_F, labely='0.5', bar_thickness=0, loc=1,
borderpad=0.5)
def responsePairPlot(exptGrp, ax, stim1, stim2, stimuliLabels=None,
excludeRunning=True, boutonGroupLabeling=False,
linearTransform=None, axesCenter=True, channel='Ch2',
label=None, roi_filter=None):
if not isinstance(stim1, list):
stim1 = [stim1]
if not isinstance(stim2, list):
stim2 = [stim2]
if stimuliLabels is None:
stimuliLabels = [stim1[0], stim2[0]]
ROIs = exptGrp.sharedROIs(
roiType='GABAergic', channel=channel, label=label,
roi_filter=roi_filter)
shared_filter = lambda x: x.id in ROIs
rIntegrals = []
for stim in [stim1, stim2]:
if stim == ['running']:
rIntegrals.append(ia.runningModulation(
exptGrp, linearTransform=linearTransform, channel=channel,
label=label, roi_filter=shared_filter).reshape([-1, 1]))
elif stim == ['licking']:
rIntegrals.append(ia.lickingModulation(
exptGrp, linearTransform=linearTransform, channel=channel,
label=label, roi_filter=shared_filter).reshape([-1, 1]))
else:
rIntegrals.append(ia.responseIntegrals(
exptGrp, stim, excludeRunning=excludeRunning,
sharedBaseline=True, linearTransform=linearTransform,
dFOverF='mean', channel=channel, label=label,
roi_filter=shared_filter))
if not boutonGroupLabeling:
ROIs = None
plotting.ellipsePlot(
ax, rIntegrals[0].mean(axis=1), rIntegrals[1].mean(axis=1),
2 * np.sqrt(rIntegrals[0].var(axis=1) / rIntegrals[0].shape[1]),
2 * np.sqrt(rIntegrals[1].var(axis=1) / rIntegrals[1].shape[1]),
boutonGroupLabeling=ROIs, color='k', axesCenter=axesCenter)
ax.set_xlabel(stimuliLabels[0], labelpad=1)
ax.set_ylabel(stimuliLabels[1], labelpad=1)
# TODO: LOOKS BROKEN IF YOU PASS IN AX
def plotLickRate(exptGrp, ax=None, minTrialDuration=0):
"""Generate a figure showing the lick rate for each trial in this
ExperimentGroup.
Keyword arguments:
ax -- axis to plot on, created if 'None'
minTrialLength -- minimum length of trial (in seconds) to be included in
analysis
"""
lickrates = []
dates = []
for experiment in exptGrp:
for trial in experiment.findall('trial'):
try:
bd = trial.behaviorData()
if 'licking' in bd.keys() and \
'recordingDuration' in bd.keys() and \
bd['recordingDuration'] >= minTrialDuration:
lickrates.append(bd['licking'].shape[0] /
bd['recordingDuration'])
dates.append(trial.attrib['time'])
except exc.MissingBehaviorData:
pass
if len(lickrates) > 0:
if ax is None:
fig = plt.figure(figsize=(11, 8))
ax = fig.add_subplot(111)
ax.bar(np.arange(len(lickrates)), lickrates, 0.5)
ax.set_ylabel('Lick rate (Hz)')
ax.set_title('lick rate per trial')
ax.set_xticks(np.arange(len(lickrates)) + 0.25)
ax.set_xticklabels(
dates, ha='right', rotation_mode='anchor', rotation=30)
return fig
# SAME, LOOKS BROKEN IF YOU PASS IN AX
def plotLapRate(exptGrp, ax=None, minTrialDuration=0):
"""Generates a figure showing the number of laps completed per minute.
Keyword arguments:
ax -- axis to plot on, created if 'None'
minTrialLength -- minimum length of trial (in seconds) to be included
in analysis
"""
laprates = []
dates = []
for experiment in exptGrp:
for trial in experiment.findall('trial'):
try:
bd = trial.behaviorData()
if 'lapCounter' in bd.keys() and \
len(bd['lapCounter']) > 0 and \
'recordingDuration' in bd.keys() and \
bd['recordingDuration'] >= minTrialDuration:
laprates.append(sum(bd['lapCounter'][:, 1] == 1) /
bd['recordingDuration'] * 60.0)
dates.append(trial.attrib['time'])
except exc.MissingBehaviorData:
pass
if len(laprates) > 0:
if ax is None:
fig = plt.figure(figsize=(11, 8))
ax = fig.add_subplot(111)
ax.bar(np.arange(len(laprates)), laprates, 0.5)
ax.set_ylabel('Lap rate (laps/minute)')
ax.set_title('lap rate per trial')
ax.set_xticks(np.arange(len(laprates)) + 0.25)
ax.set_xticklabels(
dates, ha='right', rotation_mode='anchor', rotation=15)
return fig
def plotLapRateByDays(exptGrp, ax=None, color=None):
"""Plots lap rate by days of training"""
if ax is None:
ax = plt.axes()
if color is None:
color = lab.plotting.color_cycle().next()
training_days = exptGrp.priorDaysOfExposure(ignoreBelt=True)
lap_rates = {}
for expt in exptGrp:
for trial in expt.findall('trial'):
try:
bd = trial.behaviorData()
except exc.MissingBehaviorData:
continue
else:
if len(bd.get('lapCounter', [])) > 0 \
and 'recordingDuration' in bd:
if training_days[expt] not in lap_rates:
lap_rates[training_days[expt]] = []
lap_rates[training_days[expt]].append(
np.sum(bd['lapCounter'][:, 1] == 1) /
bd['recordingDuration'] * 60.0)
if len(lap_rates) > 0:
days = lap_rates.keys()
days.sort()
day_means = []
for day in days:
# Jitter x position
x = (np.random.rand(len(lap_rates[day])) * 0.2) - 0.1 + day
ax.plot(x, lap_rates[day], '.', color=color, markersize=7)
day_means.append(np.mean(lap_rates[day]))
ax.plot(days, day_means, '-', label=exptGrp.label(), color=color)
ax.set_ylabel('Lap rate (laps/minute)')
ax.set_xlabel('Days of training')
ax.set_title('Average running by days of belt exposure')
def activityComparisonPlot(exptGrp, method, ax=None, mask1=None, mask2=None,
label1=None, label2=None, roiNamesToLabel=None,
normalize=False, rasterized=False,
dF='from_file', channel='Ch2', label=None,
roi_filter=None, demixed=False):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
if len(exptGrp) != 2:
warnings.warn(
'activityComparisonPlot requires an experimentGroup of 2 ' +
'experiments. Using the first two elements of {}'.format(exptGrp))
grp = exptGrp[:2]
else:
grp = exptGrp
ROIs = grp.sharedROIs(channel=channel, label=label,
roi_filter=roi_filter)
shared_filter = lambda x: x.id in ROIs
exp1ROIs = grp[0].roi_ids(channel=channel, label=label,
roi_filter=shared_filter)
exp2ROIs = grp[1].roi_ids(channel=channel, label=label,
roi_filter=shared_filter)
order1 = np.array([exp1ROIs.index(x) for x in ROIs])
order2 = np.array([exp2ROIs.index(x) for x in ROIs])
# inds of the roiNamesToLabel (in terms of exp1 indices)
if roiNamesToLabel:
order3 = np.array(
[exp1ROIs.index(x) for x in roiNamesToLabel if x in ROIs])
activity1 = calc_activity(
grp[0], method=method, interval=mask1, dF=dF, channel=channel,
label=label, roi_filter=roi_filter, demixed=demixed)
activity2 = calc_activity(
grp[1], method=method, interval=mask2, dF=dF, channel=channel,
label=label, roi_filter=roi_filter, demixed=demixed)
# ordering corresponds to sharedROIs() ordering
activity1 = np.array([activity1[x] for x in order1]).flatten()
activity2 = np.array([activity2[x] for x in order2]).flatten()
if normalize:
activity1 = activity1 / float(np.amax(activity1))
activity2 = activity2 / float(np.amax(activity2))
# -1 flips sort so it's actually high to low and also puts NaNs at the end
order = np.argsort(-1 * activity2)
bar_lefts = np.arange(len(ROIs))
width = 1
if not label1:
label1 = grp[0].get('startTime')
if not label2:
label2 = grp[1].get('startTime')
ax.bar(np.array(bar_lefts), activity1[order], width, color='b',
alpha=0.5, label=label1, rasterized=rasterized)
ax.bar(np.array(bar_lefts), np.negative(activity2)[order], width,
color='r', alpha=0.5, label=label2, rasterized=rasterized)
max_y = np.amax(np.abs(ax.get_ylim()))
ax.set_ylim(-max_y, max_y)
ax.set_xlim(right=len(ROIs))
# roiIndsToIndicate = [np.argwhere(order1[order]==roi)[0][0] for roi in exp1RoisToIndicate if roi in order1[order]]
if roiNamesToLabel:
# ylim = ax.get_ylim()
roiIndsToIndicate = [
np.argwhere(order1[order] == x)[0][0] for x in order3]
for idx in roiIndsToIndicate:
ax.axvline(
idx + 0.5, linestyle='dashed', color='k',
rasterized=rasterized)
# ax.vlines(np.array(roiIndsToIndicate)+0.5, ylim[0], ylim[1], linestyles='dashed', color='k')
# ax.set_ylim(ylim)
# make all y-axis labels positive
ax.set_yticklabels(np.abs(ax.get_yticks()))
ax.set_xlabel('ROI index')
ax.set_ylabel('Activity = {}'.format(method))
ax.legend()
return fig
def activityByExposure(exptGrp, ax=None, stat='mean',
combineTimes=datetime.timedelta(hours=12),
ignoreContext=False, **kwargs):
"""Plots cdf of activity of ROIs by days of context exposure
Keyword arguments:
stat -- statistic to plot, see calc_activity.py for details
combineTimes -- experiments within this timedelta of each other are
considered the same day for determining exposure
ignoreContext -- if True, ignores context for determining exposure
**kwargs -- any additional arguments will be passed in to
place.calc_activity_statistic
"""
if ax is None:
_, ax = plt.subplots()
exptsByExposure = ExperimentGroup.dictByExposure(
exptGrp, combineTimes=combineTimes, ignoreBelt=ignoreContext,
ignoreContext=ignoreContext)
colors = lab.plotting.color_cycle()
for exposure in sorted(exptsByExposure):
exgrp = ExperimentGroup(
exptsByExposure[exposure],
label='1 day' if exposure == 0 else str(exposure + 1) +
' days')
place.calc_activity_statistic(
exgrp, ax=ax, stat=stat, plot_method='cdf',
label=exgrp.label(), c=colors.next(), **kwargs)
ax.legend(loc='lower right')
ax.set_title('{} by exposure - {}'.format(
stat,
'ignoring context' if ignoreContext else 'including context'))
def compare_bouton_responses(
exptGrp, ax, stimuli, comp_method='angle', plot_method='cdf',
channel='Ch2', label=None, roi_filter=None, **response_kwargs):
"""Compare various pairs of boutons, based on several conventions:
'bouton' in label of bouton ROIs
boutons targeting a cell soma are tagged with the cell number they are
targeting, i.e. 'cell1', 'cell2', etc.
boutons on an axons are tagged with the fiber number they are on,
i.e. 'fiber1', 'fiber2', etc.
boutons with no tags have no information about their axon or target
"""
response_matrix, rois = ia.response_matrix(
exptGrp, stimuli, channel=channel, label=label, roi_filter=roi_filter,
return_full=True, **response_kwargs)
data = {}
data['mouse'] = [roi[0] for roi in rois]
data['loc'] = [roi[1] for roi in rois]
data['label'] = [roi[2] for roi in rois]
tags = []
for mouse, loc, name in it.izip(
data['mouse'], data['loc'], data['label']):
roi_tags = set()
for expt in exptGrp:
if expt.parent == mouse \
and expt.get('uniqueLocationKey') == loc:
for roi in expt.rois(
channel=channel, label=label,
roi_filter=roi_filter):
if roi.label == name:
# NOTE: Taking the union of all tags,
# so mis-matched tags will just be combined
roi_tags = roi_tags.union(roi.tags)
tags.append(roi_tags)
data['tags'] = tags
data['responses'] = [response for response in response_matrix]
df = pd.DataFrame(data)
if comp_method == 'angle':
ax.set_xlabel('Response similarity (angle)')
def compare(roi1, roi2):
return np.dot(roi1, roi2) / np.linalg.norm(roi1) \
/ np.linalg.norm(roi2)
elif comp_method == 'abs angle':
ax.set_xlabel('Response similarity (abs angle)')
def compare(roi1, roi2):
return np.abs(np.dot(roi1, roi2) / np.linalg.norm(roi1)
/ np.linalg.norm(roi2))
elif comp_method == 'corr':
ax.set_xlabel('Response similarity (corr)')
def compare(roi1, roi2):
return np.corrcoef(roi1, roi2)[0, 1]
elif comp_method == 'abs corr':
ax.set_xlabel('Response similarity (abs corr)')
def compare(roi1, roi2):
return np.abs(np.corrcoef(roi1, roi2)[0, 1])
elif comp_method == 'mean diff':
ax.set_xlabel('Response similarity (mean diff)')
def compare(roi1, roi2):
return np.abs(roi1 - roi2).mean()
else:
raise ValueError('Unrecognized compare method argument')
same_fiber = []
fiber_with_not = []
same_soma = []
soma_with_not = []
bouton_with_fiber = []
diff_all = []
for name, group in df.groupby(['mouse', 'loc']):
for roi1, roi2 in it.combinations(group.iterrows(), 2):
r1_responses = roi1[1]['responses']
r2_responses = roi2[1]['responses']
non_nan = np.isfinite(r1_responses) & np.isfinite(r2_responses)
comp = compare(r1_responses[non_nan], r2_responses[non_nan])
if np.isnan(comp):
continue
fiber1 = set(
[tag for tag in roi1[1]['tags'] if 'fiber' in tag])
fiber2 = set(
[tag for tag in roi2[1]['tags'] if 'fiber' in tag])
cell1 = set([tag for tag in roi1[1]['tags'] if 'cell' in tag])
cell2 = set([tag for tag in roi2[1]['tags'] if 'cell' in tag])
if len(fiber1.intersection(fiber2)):
same_fiber.append(comp)
elif len(fiber1) or len(fiber2):
fiber_with_not.append(comp)
if len(cell1.intersection(cell2)):
same_soma.append(comp)
elif len(cell1) or len(cell2):
soma_with_not.append(comp)
if len(fiber1) and roi2[1]['label'] in fiber1 \
or len(fiber2) and roi1[1]['label'] in fiber2:
bouton_with_fiber.append(comp)
elif not len(fiber1.intersection(fiber2)) \
and not len(cell1.intersection(cell2)):
diff_all.append(comp)
if plot_method == 'cdf':
plotting.cdf(
ax, same_fiber, bins='exact', label='same fiber')
plotting.cdf(
ax, same_soma, bins='exact', label='same soma')
plotting.cdf(
ax, bouton_with_fiber, bins='exact', label='bouton with fiber')
plotting.cdf(
ax, fiber_with_not, bins='exact', label='fiber with not')
plotting.cdf(
ax, soma_with_not, bins='exact', label='soma with not')
plotting.cdf(
ax, diff_all, bins='exact', label='diff all')
elif plot_method == 'hist':
colors = lab.plotting.color_cycle()
plotting.histogram(
ax, same_fiber, bins=50, color=colors.next(), normed=True,
label='same fiber')
plotting.histogram(
ax, same_soma, bins=50, color=colors.next(), normed=True,
label='same soma')
plotting.histogram(
ax, bouton_with_fiber, bins=50, color=colors.next(),
normed=True, label='bouton with fiber')
plotting.histogram(
ax, fiber_with_not, bins=50, color=colors.next(), normed=True,
label='fiber with not')
plotting.histogram(
ax, soma_with_not, bins=50, color=colors.next(), normed=True,
label='soma with not')
plotting.histogram(
ax, diff_all, bins=50, color=colors.next(), normed=True,
label='diff all')
# ax.legend()
return {'same fiber': same_fiber, 'same soma': same_soma,
'bouton_with_fiber': bouton_with_fiber,
'fiber_with_not': fiber_with_not,
'soma_with_not': soma_with_not, 'diff all': diff_all}
def stim_response_heatmap(
exptGrp, ax, stims, sort_by=None, method='responsiveness',
z_score=True, aspect_ratio=0.25, **response_kwargs):
"""Plot a heatmap of stim responses per ROI."""
data = ia.response_matrix(
exptGrp, stims, method=method, z_score=z_score, **response_kwargs)
if sort_by is not None:
if isinstance(sort_by, list):
# If we get a list of stims, sort by the mean of them
indices = [stims.index(stim) for stim in sort_by]
to_sort = data[:, indices].mean(1)
# Remove rows that have a nan in any of the sort by cols
non_nan_rows = np.isfinite(to_sort)
data = data[non_nan_rows]
order = to_sort[non_nan_rows].argsort()[::-1]
data = data[order]
else:
# If we get a single stim, sort by the response to that stim
sort_column = stims.index(sort_by)
# Remove rows that have NaN's in the sort_by column
non_nan_rows = np.isfinite(data[:, sort_column])
data = data[non_nan_rows, :]
order = data[:, sort_column].argsort()[::-1]
data = data[order]
ax.imshow(data, interpolation='none', aspect=aspect_ratio)
ax.xaxis.tick_top()
ax.set_xticks(np.arange(len(stims)))
ax.set_xticklabels(stims)
ax.tick_params(labelbottom=False, bottom=False, left=False, top=False,
right=False)
| {
"repo_name": "losonczylab/Zaremba_NatNeurosci_2017",
"path": "losonczy_analysis_bundle/lab/plotting/analysis_plotting.py",
"copies": "1",
"size": "30484",
"license": "mit",
"hash": -8089800671851079000,
"line_mean": 37.5873417722,
"line_max": 119,
"alpha_frac": 0.5649193019,
"autogenerated": false,
"ratio": 3.6630617639990386,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47279810658990384,
"avg_score": null,
"num_lines": null
} |
"""Analysis the start2 equipments data"""
__all__ = ['main']
import json
from collections import OrderedDict
from urllib.request import urlopen
from utils import python_data_to_lua_table
START2_URL = 'https://acc.kcwiki.org/start2'
TIMEOUT_IN_SECOND = 10
START2_JSON = 'data/start2.json'
JA_ZH_JSON = 'data/ja_zh.json'
SINKAI_EQUIP_ID_BASE = 501
EQUIPS_LUA = 'lua/equips2.lua'
EQUIPS_JSON = 'json/equips2.json'
EQUIPS_HR_JSON = 'json/equips2_human_readable.json'
def shinkai_parse_equips(start2):
"""Get shinkai equipments stored by python OrderedDict"""
equips = [i for i in start2['api_mst_slotitem']
if i['api_id'] >= SINKAI_EQUIP_ID_BASE]
equips_dict = OrderedDict()
with open(JA_ZH_JSON, 'r', encoding='utf-8') as json_fp:
ja_zh_table = json.load(json_fp)
for equip in equips:
equip_dict = OrderedDict()
equip_dict['日文名'] = equip['api_name']
equip_dict['中文名'] = ja_zh_table.get(equip['api_name'], '')
if not equip_dict['中文名']:
print('[{}] {} not found in file {}'.format(equip['api_id'],
equip['api_name'],
JA_ZH_JSON))
# api_type = [大分類, 図鑑表示, カテゴリ, アイコンID, 航空機カテゴリ]
# equip_dict['类别'] = equip['api_type'][2]
# equip_dict['图鉴'] = equip['api_type'][3]
equip_dict['类型'] = equip['api_type']
equip_dict['稀有度'] = equip['api_rare']
for lua_variable_name, api_name in [
('火力', 'api_houg'),
('雷装', 'api_raig'),
('爆装', 'api_baku'),
('对空', 'api_tyku'),
('装甲', 'api_souk'),
('对潜', 'api_tais'),
('命中', 'api_houm'),
('索敌', 'api_saku'),
('回避', 'api_houk')]:
if equip[api_name] > 0:
equip_dict[lua_variable_name] = equip[api_name]
equip_dict['射程'] = [
'无', '短', '中', '长', '超长', '超超长'][equip['api_leng']]
equips_dict[str(equip['api_id'])] = equip_dict
return equips_dict
def shinkai_generate_equips_json(start2):
"""Generate shinkai equipment json"""
equips = [i for i in start2['api_mst_slotitem']
if i['api_id'] >= SINKAI_EQUIP_ID_BASE]
# Add zh-CN name
with open(JA_ZH_JSON, 'r', encoding='utf-8') as json_fp:
ja_zh_table = json.load(json_fp)
for equip in equips:
equip['api_zh_cn_name'] = ja_zh_table.get(equip['api_name'], '')
if not equip['api_zh_cn_name']:
print('[{}] {} not found in file {}'.format(equip['api_id'],
equip['api_name'],
JA_ZH_JSON))
with open(EQUIPS_JSON, 'w', encoding='utf8') as json_fp:
json.dump(equips, json_fp)
with open(EQUIPS_HR_JSON, 'w', encoding='utf8') as json_fp:
json.dump(equips, json_fp, ensure_ascii=False, indent=' ')
def shinkai_generate_equips_lua(start2):
"""Generate KcWiki shinkai equipment Lua table"""
equips_dict = shinkai_parse_equips(start2)
data, _ = python_data_to_lua_table(equips_dict, level=1)
with open(EQUIPS_LUA, 'w', encoding='utf8') as lua_fp:
lua_fp.write('local d = {}\n\n'
+ 'd.equipDataTable = {\n')
lua_fp.write(data)
lua_fp.write('\n}\n\nreturn d\n')
def load_start2_json(json_file):
"""Load and decode start2.json"""
print('Download start2 original file to {}'.format(START2_JSON))
with urlopen(url=START2_URL, timeout=TIMEOUT_IN_SECOND) as url_fp:
data = url_fp.read().decode('utf8')
with open(json_file, 'w', encoding='utf8') as json_fp:
json_fp.write(data)
with open(json_file, 'r', encoding='utf8') as file:
start2 = json.load(file)
return start2
def main():
"""Main process"""
start2 = load_start2_json(START2_JSON)
while True:
print('== Equip ==')
print('[1] Generate Shinkai equipment Lua table\n'
+ '[2] Generate Shinkai equipment Json\n'
+ '\n[0] Exit')
choice = input('> ')
if choice == '0':
break
elif choice == '1':
print('Generate Shinkai equipments Lua table')
shinkai_generate_equips_lua(start2)
print('Done')
elif choice == '2':
print('Generate Shinkai equipment Json')
shinkai_generate_equips_json(start2)
print('Done')
else:
print('Unknown choice: {}'.format(choice))
if __name__ == '__main__':
main()
| {
"repo_name": "kcwikizh/kancolle-shinkai-db",
"path": "equip.py",
"copies": "1",
"size": "4802",
"license": "mit",
"hash": -7797976888027811000,
"line_mean": 34.8769230769,
"line_max": 74,
"alpha_frac": 0.5319468268,
"autogenerated": false,
"ratio": 2.9388783868935096,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39708252136935096,
"avg_score": null,
"num_lines": null
} |
ANALYSIS_TYPES = ("wgs", "wes", "mixed", "unknown", "panel", "external")
CUSTOM_CASE_REPORTS = [
"multiqc",
"cnv_report",
"coverage_qc_report",
"gene_fusion_report",
"gene_fusion_report_research",
]
SEX_MAP = {
1: "male",
2: "female",
"other": "unknown",
0: "unknown",
"1": "male",
"2": "female",
"0": "unknown",
}
REV_SEX_MAP = {"male": "1", "female": "2", "unknown": "0"}
PHENOTYPE_MAP = {1: "unaffected", 2: "affected", 0: "unknown", -9: "unknown"}
CANCER_PHENOTYPE_MAP = {1: "normal", 2: "tumor", 0: "unknown", -9: "unknown"}
REV_PHENOTYPE_MAP = {value: key for key, value in PHENOTYPE_MAP.items()}
CASE_STATUSES = ("prioritized", "inactive", "active", "solved", "archived")
VERBS_MAP = {
"assign": "was assigned to",
"unassign": "was unassigned from",
"status": "updated the status for",
"comment": "commented on",
"comment_update": "updated a comment for",
"synopsis": "updated synopsis for",
"pin": "pinned variant",
"unpin": "removed pinned variant",
"sanger": "ordered sanger sequencing for",
"cancel_sanger": "cancelled sanger order for",
"archive": "archived",
"open_research": "opened research mode for",
"mark_causative": "marked causative for",
"unmark_causative": "unmarked causative for",
"mark_partial_causative": "mark partial causative for",
"unmark_partial_causative": "unmarked partial causative for",
"manual_rank": "updated manual rank for",
"cancer_tier": "updated cancer tier for",
"add_phenotype": "added HPO term for",
"remove_phenotype": "removed HPO term for",
"remove_variants": "removed variants for",
"add_case": "added case",
"update_case": "updated case",
"update_individual": "updated individuals for",
"check_case": "marked case as",
"share": "shared case with",
"unshare": "revoked access for",
"rerun": "requested rerun of",
"validate": "marked validation status for",
"update_diagnosis": "updated diagnosis for",
"add_cohort": "updated cohort for",
"remove_cohort": "removed cohort for",
"acmg": "updated ACMG classification for",
"dismiss_variant": "dismissed variant for",
"reset_dismiss_variant": "reset dismissed variant status for",
"reset_dismiss_all_variants": "reset all dismissed variants for",
"mosaic_tags": "updated mosaic tags for",
"update_default_panels": "updated default panels for",
"update_clinical_filter_hpo": "updated clinical filter HPO status for",
"mme_add": "exported to MatchMaker patient",
"mme_remove": "removed from MatchMaker patient",
"filter_stash": "stored a filter for",
"filter_audit": "marked case audited with filter",
"update_sample": "updated sample data for",
"update_case_group_ids": "updated case group ids for",
}
# Tissue types for rare disease samples and controls
SOURCES = [
"blood",
"bone marrow",
"buccal swab",
"cell line",
"cell-free DNA",
"cytology (FFPE)",
"cytology (not fixed/fresh)",
"muscle",
"nail",
"saliva",
"skin",
"tissue (FFPE)",
"tissue (fresh frozen)",
"CVB",
"AC",
"other fetal tissue",
"other",
"unknown",
]
SAMPLE_SOURCE = dict((i, el) for i, el in enumerate(SOURCES))
CASE_SEARCH_TERMS = {
"case": {"label": "Case or individual name", "prefix": "case:"},
"exact_pheno": {
"label": "HPO term",
"prefix": "exact_pheno:",
},
"synopsis": {
"label": "Search synopsis",
"prefix": "synopsis:",
},
"panel": {"label": "Gene panel", "prefix": "panel:"},
"status": {"label": "Case status", "prefix": "status:"},
"track": {"label": "Analysis track", "prefix": "track:"},
"pheno_group": {
"label": "Phenotype group",
"prefix": "pheno_group:",
},
"cohort": {"label": "Patient cohort", "prefix": "cohort:"},
"Similar case": {
"label": "Similar case",
"prefix": "similar_case:",
},
"similar_pheno": {
"label": "Similar phenotype",
"prefix": "similar_pheno:",
},
"pinned": {"label": "Pinned gene", "prefix": "pinned:"},
"causative": {"label": "Causative gene", "prefix": "causative:"},
"user": {"label": "Assigned user", "prefix": "user:"},
}
| {
"repo_name": "Clinical-Genomics/scout",
"path": "scout/constants/case_tags.py",
"copies": "1",
"size": "4296",
"license": "bsd-3-clause",
"hash": 4120250516523967000,
"line_mean": 32.0461538462,
"line_max": 77,
"alpha_frac": 0.5996275605,
"autogenerated": false,
"ratio": 3.1869436201780417,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42865711806780415,
"avg_score": null,
"num_lines": null
} |
"""Analysis visualization functions
"""
import numpy as np
from itertools import chain
from .._utils import string_types
def format_pval(pval, latex=True, scheme='default'):
"""Format a p-value using one of several schemes.
Parameters
----------
pval : float | array-like
The raw p-value(s).
latex : bool
Whether to use LaTeX wrappers suitable for use with matplotlib.
scheme : str
A keyword indicating the formatting scheme. Currently supports "stars",
"ross", and "default"; any other string will yield the same as
"default".
Returns
-------
pv : str | np.objectarray
A string or array of strings of formatted p-values. If a list output is
preferred, users may call ``.tolist()`` on the output of the function.
"""
single_value = False
if np.array(pval).shape == ():
single_value = True
pval = np.atleast_1d(np.asanyarray(pval))
# add a tiny amount to handle cases where p is exactly a power of ten
pval = pval + np.finfo(pval.dtype).eps
expon = np.trunc(np.log10(pval)).astype(int) # exponents
pv = np.zeros_like(pval, dtype=object)
if latex:
wrap = '$'
brk_l = '{{'
brk_r = '}}'
else:
wrap = ''
brk_l = ''
brk_r = ''
if scheme == 'ross': # (exact value up to 4 decimal places)
pv[pval >= 0.0001] = [wrap + 'p = {:.4f}'.format(x) + wrap
for x in pval[pval > 0.0001]]
pv[pval < 0.0001] = [wrap + 'p < 10^' + brk_l + '{}'.format(x) +
brk_r + wrap for x in expon[pval < 0.0001]]
elif scheme == 'stars':
star = '{*}' if latex else '*'
pv[pval >= 0.05] = wrap + '' + wrap
pv[pval < 0.05] = wrap + star + wrap
pv[pval < 0.01] = wrap + star * 2 + wrap
pv[pval < 0.001] = wrap + star * 3 + wrap
else: # scheme == 'default'
pv[pval >= 0.05] = wrap + 'n.s.' + wrap
pv[pval < 0.05] = wrap + 'p < 0.05' + wrap
pv[pval < 0.01] = wrap + 'p < 0.01' + wrap
pv[pval < 0.001] = wrap + 'p < 0.001' + wrap
pv[pval < 0.0001] = [wrap + 'p < 10^' + brk_l + '{}'.format(x) +
brk_r + wrap for x in expon[pval < 0.0001]]
if single_value:
pv = pv[0]
return(pv)
def _instantiate(obj, typ):
"""Returns obj if obj is not None, else returns new instance of typ
obj : an object
An object (most likely one that a user passed into a function) that,
if ``None``, should be initiated as an empty object of some other type.
typ : an object type
Expected values are list, dict, int, bool, etc.
"""
return typ() if obj is None else obj
def barplot(h, axis=-1, ylim=None, err_bars=None, lines=False,
groups=None, eq_group_widths=False, gap_size=0.2,
brackets=None, bracket_text=None, bracket_inline=False,
bracket_group_lines=False, bar_names=None, group_names=None,
bar_kwargs=None, err_kwargs=None, line_kwargs=None,
bracket_kwargs=None, pval_kwargs=None, figure_kwargs=None,
smart_defaults=True, fname=None, ax=None):
"""Makes barplots w/ optional line overlays, grouping, & signif. brackets.
Parameters
----------
h : array-like
If `h` is 2-dimensional, heights will be calculated as means along
the axis given by `axis`. If `h` is of lower dimension, it is
treated as raw height values. If `h` is a `pandas.DataFrame` and
`bar_names` is ``None``, `bar_names` will be inferred from the
DataFrame's `column` labels (if ``axis=0``) or `index` labels.
axis : int
The axis along which to calculate mean values to determine bar heights.
Ignored if `h` is 0- or 1-dimensional.
ylim : tuple | None
y-axis limits passed to `matplotlib.pyplot.subplot.set_ylim`.
err_bars : str | array-like | None
Type of error bars to be added to the barplot. Possible values are
``'sd'`` for sample standard deviation, ``'se'`` for standard error of
the mean, or ``'ci'`` for 95% confidence interval. If ``None``, no
error bars will be plotted. Custom error bar heights are possible by
passing an array-like object; in such cases `err_bars` must have the
same dimensionality and shape as `h`.
lines : bool
Whether to plot within-subject data as lines overlaid on the barplot.
groups : list | None
List of lists containing the integers in ``range(num_bars)``, with
sub-lists indicating the desired grouping. For example, if `h` has
has shape (10, 4) and ``axis = -1`` then "num_bars" is 4; if you want
the first bar isolated and the remaining three grouped, then specify
``groups=[[0], [1, 2, 3]]``.
eq_group_widths : bool
Should all groups have the same width? If ``False``, all bars will have
the same width. Ignored if `groups` is ``None``, since the bar/group
distinction is meaningless in that case.
gap_size : float
Width of the gap between groups (if `eq_group_width` is ``True``) or
between bars, expressed as a proportion [0,1) of group or bar width.
Half the width of `gap_size` will be added between the outermost bars
and the plot edges.
brackets : list of tuples | None
Location of significance brackets. Scheme is similar to the
specification of `groups`; a bracket between the first and second bar
and another between the third and fourth bars would be specified as
``brackets=[(0, 1), (2, 3)]``. Brackets between groups of bars instead
of individual bars are specified as lists within the tuple:
``brackets=[([0, 1], [2, 3])]`` draws a single bracket between group
``[0, 1]`` and group ``[2, 3]``. For best results, pairs of adjacent
bars should come earlier in the list than non-adjacent pairs.
bracket_text : str | list | None
Text to display above brackets.
bracket_inline : bool
If ``True``, bracket text will be vertically centered along a broken
bracket line. If ``False``, text will be above the line.
bracket_group_lines : bool
When drawing brackets between groups rather than single bars, should a
horizontal line be drawn at each foot of the bracket to indicate this?
bar_names : array-like | None
Optional axis labels for each bar.
group_names : array-like | None
Optional axis labels for each group.
bar_kwargs : dict
Arguments passed to ``matplotlib.pyplot.bar()`` (ex: color, linewidth).
err_kwargs : dict
Arguments passed to ``matplotlib.pyplot.bar(error_kw)`` (ex: ecolor,
capsize).
line_kwargs : dict
Arguments passed to ``matplotlib.pyplot.plot()`` (e.g., color, marker,
linestyle).
bracket_kwargs : dict
arguments passed to ``matplotlib.pyplot.plot()`` (e.g., color, marker,
linestyle).
pval_kwargs : dict
Arguments passed to ``matplotlib.pyplot.annotate()`` when drawing
bracket labels.
figure_kwargs : dict
arguments passed to ``matplotlib.pyplot.figure()`` (e.g., figsize, dpi,
frameon).
smart_defaults : bool
Whether to use pyplot default colors (``False``), or something more
pleasing to the eye (``True``).
fname : str | None
Path and name of output file. File type is inferred from the file
extension of `fname` and should work for any of the types supported by
pyplot (pdf, eps, svg, png, raw).
ax : matplotlib.pyplot.axes | None
A ``matplotlib.pyplot.axes`` instance. If ``None``, a new figure with
a single subplot will be created.
Returns
-------
p : handle for the ``matplotlib.pyplot.subplot`` instance.
b : handle for the ``matplotlib.pyplot.bar`` instance.
Notes
-----
Known limitations:
1 Bracket heights don't get properly set when generating multiple
subplots with ``sharey=True`` (matplotlib seems to temporarily force
the ``ylim`` to +/- 0.6 in this case). Work around is to use
``sharey=False`` and manually set ``ylim`` for each subplot.
2 Brackets that span groups cannot span partial groups. For example,
if ``groups=[[0, 1, 2], [3, 4]]`` it is impossible to have a bracket
at ``[(0, 1), (3, 4)]``... it is only possible to do, e.g.,
``[0, (3, 4)]`` (single bar vs group) or ``[(0, 1, 2), (3, 4)]``
(full group vs full group).
3 Bracket drawing is much better when adjacent pairs of bars are
specified before non-adjacent pairs of bars.
Smart defaults sets the following parameters:
bar color: light gray (70%)
error bar color: black
line color: black
bracket color: dark gray (30%)
"""
from matplotlib import pyplot as plt, rcParams
try:
from pandas.core.frame import DataFrame
except Exception:
DataFrame = None
# be nice to pandas
if DataFrame is not None:
if isinstance(h, DataFrame) and bar_names is None:
bar_names = h.columns.tolist() if axis == 0 else h.index.tolist()
# check arg errors
if gap_size < 0 or gap_size >= 1:
raise ValueError('Barplot argument "gap_size" must be in the range '
'[0, 1).')
if err_bars is not None:
if isinstance(err_bars, string_types) and \
err_bars not in ['sd', 'se', 'ci']:
raise ValueError('err_bars must be "sd", "se", or "ci" (or an '
'array of error bar magnitudes).')
if brackets is not None:
if any([len(x) != 2 for x in brackets]):
raise ValueError('Each top-level element of brackets must have '
'length 2.')
if not len(brackets) == len(bracket_text):
raise ValueError('Mismatch between number of brackets and bracket '
'labels.')
# handle single-element args
if isinstance(bracket_text, string_types):
bracket_text = [bracket_text]
if isinstance(group_names, string_types):
group_names = [group_names]
# arg defaults: if arg is None, instantiate as given type
brackets = _instantiate(brackets, list)
bar_kwargs = _instantiate(bar_kwargs, dict)
err_kwargs = _instantiate(err_kwargs, dict)
line_kwargs = _instantiate(line_kwargs, dict)
pval_kwargs = _instantiate(pval_kwargs, dict)
figure_kwargs = _instantiate(figure_kwargs, dict)
bracket_kwargs = _instantiate(bracket_kwargs, dict)
# user-supplied Axes
if ax is not None:
bar_kwargs['axes'] = ax
# smart defaults
if smart_defaults:
if 'color' not in bar_kwargs.keys():
bar_kwargs['color'] = '0.7'
if 'color' not in line_kwargs.keys():
line_kwargs['color'] = 'k'
if 'ecolor' not in err_kwargs.keys():
err_kwargs['ecolor'] = 'k'
if 'color' not in bracket_kwargs.keys():
bracket_kwargs['color'] = '0.3'
# fix bar alignment (defaults to 'center' in more recent versions of MPL)
if 'align' not in bar_kwargs.keys():
bar_kwargs['align'] = 'edge'
# parse heights
h = np.array(h)
if len(h.shape) > 2:
raise ValueError('Barplot "h" must have 2 or fewer dimensions.')
heights = np.atleast_1d(h) if h.ndim < 2 else h.mean(axis=axis)
# grouping
num_bars = len(heights)
if groups is None:
groups = [[x] for x in range(num_bars)]
groups = [list(x) for x in groups] # forgive list/tuple mix-ups
# calculate bar positions
non_gap = 1 - gap_size
offset = gap_size / 2.
if eq_group_widths:
group_sizes = np.array([float(len(_grp)) for _grp in groups], int)
group_widths = [non_gap for _ in groups]
group_edges = [offset + _ix for _ix in range(len(groups))]
group_ixs = list(chain.from_iterable([range(x) for x in group_sizes]))
bar_widths = np.repeat(np.array(group_widths) / group_sizes,
group_sizes).tolist()
bar_edges = (np.repeat(group_edges, group_sizes) +
bar_widths * np.array(group_ixs)).tolist()
else:
bar_widths = [[non_gap for _ in _grp] for _grp in groups]
# next line: offset + cumul. gap widths + cumul. bar widths
bar_edges = [[offset + _ix * gap_size + _bar * non_gap
for _bar in _grp] for _ix, _grp in enumerate(groups)]
group_widths = [np.sum(_width) for _width in bar_widths]
group_edges = [_edge[0] for _edge in bar_edges]
bar_edges = list(chain.from_iterable(bar_edges))
bar_widths = list(chain.from_iterable(bar_widths))
bar_centers = np.array(bar_edges) + np.array(bar_widths) / 2.
group_centers = np.array(group_edges) + np.array(group_widths) / 2.
# calculate error bars
err = np.zeros(num_bars) # default if no err_bars
if err_bars is not None:
if h.ndim == 2:
if err_bars == 'sd': # sample standard deviation
err = h.std(axis)
elif err_bars == 'se': # standard error
err = h.std(axis) / np.sqrt(h.shape[axis])
else: # 95% conf int
err = 1.96 * h.std(axis) / np.sqrt(h.shape[axis])
else: # h.ndim == 1
if isinstance(err_bars, string_types):
raise ValueError('string arguments to "err_bars" ignored when '
'"h" has fewer than 2 dimensions.')
elif not h.shape == np.array(err_bars).shape:
raise ValueError('When "err_bars" is array-like it must have '
'the same shape as "h".')
err = np.atleast_1d(err_bars)
bar_kwargs['yerr'] = err
# plot (bars and error bars)
if ax is None:
plt.figure(**figure_kwargs)
p = plt.subplot(111)
else:
p = ax
b = p.bar(bar_edges, heights, bar_widths, error_kw=err_kwargs,
**bar_kwargs)
# plot within-subject lines
if lines:
_h = h if axis == 0 else h.T
xy = [(bar_centers, hts) for hts in _h]
for subj in xy:
p.plot(subj[0], subj[1], **line_kwargs)
# draw significance brackets
if len(brackets):
brackets = [tuple(x) for x in brackets] # forgive list/tuple mix-ups
brk_offset = np.diff(p.get_ylim()) * 0.025
brk_min_h = np.diff(p.get_ylim()) * 0.05
# temporarily plot a textbox to get its height
t = plt.annotate(bracket_text[0], (0, 0), **pval_kwargs)
t.set_bbox(dict(boxstyle='round, pad=0.25'))
plt.draw()
bb = t.get_bbox_patch().get_window_extent()
txth = np.diff(p.transData.inverted().transform(bb),
axis=0).ravel()[-1]
if bracket_inline:
txth = txth / 2.
t.remove()
# find highest points
if lines and h.ndim == 2: # brackets must be above lines & error bars
apex = np.amax(np.r_[np.atleast_2d(heights + err),
np.atleast_2d(np.amax(h, axis))], axis=0)
else:
apex = np.atleast_1d(heights + err)
apex = np.maximum(apex, 0) # for negative-going bars
apex = apex + brk_offset
gr_apex = np.array([np.amax(apex[_g]) for _g in groups])
# boolean for whether each half of a bracket is a group
is_group = [[hasattr(_b, 'append') for _b in _br] for _br in brackets]
# bracket left & right coords
brk_lr = [[group_centers[groups.index(_ix)] if _g
else bar_centers[_ix] for _ix, _g in zip(_brk, _isg)]
for _brk, _isg in zip(brackets, is_group)]
# bracket L/R midpoints (label position)
brk_c = [np.mean(_lr) for _lr in brk_lr]
# bracket bottom coords (first pass)
brk_b = [[gr_apex[groups.index(_ix)] if _g else apex[_ix]
for _ix, _g in zip(_brk, _isg)]
for _brk, _isg in zip(brackets, is_group)]
# main bracket positioning loop
brk_t = []
for _ix, (_brk, _isg) in enumerate(zip(brackets, is_group)):
# which bars does this bracket span?
spanned_bars = list(chain.from_iterable(
[_b if hasattr(_b, 'append') else [_b] for _b in _brk]))
spanned_bars = range(min(spanned_bars), max(spanned_bars) + 1)
# raise apex a bit extra if prev bracket label centered on bar
prev_label_pos = brk_c[_ix - 1] if _ix else -1
label_bar_ix = np.where(np.isclose(bar_centers, prev_label_pos))[0]
if any(np.array_equal(label_bar_ix, x) for x in _brk):
apex[label_bar_ix] += txth
elif any(_isg):
label_bar_less = np.where(bar_centers < prev_label_pos)[0]
label_bar_more = np.where(bar_centers > prev_label_pos)[0]
if len(label_bar_less) and len(label_bar_more):
apex[label_bar_less] += txth
apex[label_bar_more] += txth
gr_apex = np.array([np.amax(apex[_g]) for _g in groups])
# recalc lower tips of bracket: apex / gr_apex may have changed
brk_b[_ix] = [gr_apex[groups.index(_b)] if _g else apex[_b]
for _b, _g in zip(_brk, _isg)]
# calculate top span position
_min_t = max(apex[spanned_bars]) + brk_min_h
brk_t.append(_min_t)
# raise apex on spanned bars to account for bracket
apex[spanned_bars] = np.maximum(apex[spanned_bars],
_min_t) + brk_offset
gr_apex = np.array([np.amax(apex[_g]) for _g in groups])
# draw horz line spanning groups if desired
if bracket_group_lines:
for _brk, _isg, _blr in zip(brackets, is_group, brk_b):
for _bk, _g, _b in zip(_brk, _isg, _blr):
if _g:
_lr = [bar_centers[_ix]
for _ix in groups[groups.index(_bk)]]
_lr = (min(_lr), max(_lr))
p.plot(_lr, (_b, _b), **bracket_kwargs)
# draw (left, right, bottom-left, bottom-right, top, center, string)
for ((_l, _r), (_bl, _br), _t, _c, _s) in zip(brk_lr, brk_b, brk_t,
brk_c, bracket_text):
# bracket text
_t = float(_t) # on newer Pandas it can be shape (1,)
defaults = dict(ha='center', annotation_clip=False,
textcoords='offset points')
for k, v in defaults.items():
if k not in pval_kwargs.keys():
pval_kwargs[k] = v
if 'va' not in pval_kwargs.keys():
pval_kwargs['va'] = 'center' if bracket_inline else 'baseline'
if 'xytext' not in pval_kwargs.keys():
pval_kwargs['xytext'] = (0, 0) if bracket_inline else (0, 2)
txt = p.annotate(_s, (_c, _t), **pval_kwargs)
txt.set_bbox(dict(facecolor='w', alpha=0,
boxstyle='round, pad=0.2'))
plt.draw()
# bracket lines
lline = ((_l, _l), (_bl, _t))
rline = ((_r, _r), (_br, _t))
tline = ((_l, _r), (_t, _t))
if bracket_inline:
bb = txt.get_bbox_patch().get_window_extent()
txtw = np.diff(p.transData.inverted().transform(bb),
axis=0).ravel()[0]
_m = _c - txtw / 2.
_n = _c + txtw / 2.
tline = [((_l, _m), (_t, _t)), ((_n, _r), (_t, _t))]
else:
tline = [((_l, _r), (_t, _t))]
for x, y in [lline, rline] + tline:
p.plot(x, y, **bracket_kwargs)
# boost ymax if needed
ybnd = p.get_ybound()
if ybnd[-1] < _t + txth:
p.set_ybound(ybnd[0], _t + txth)
# annotation
box_off(p)
p.tick_params(axis='x', length=0, pad=12)
p.xaxis.set_ticks(bar_centers)
if bar_names is not None:
p.xaxis.set_ticklabels(bar_names, va='baseline')
if group_names is not None:
ymin = ylim[0] if ylim is not None else p.get_ylim()[0]
yoffset = -2.5 * rcParams['font.size']
for gn, gp in zip(group_names, group_centers):
p.annotate(gn, xy=(gp, ymin), xytext=(0, yoffset),
xycoords='data', textcoords='offset points',
ha='center', va='baseline')
# axis limits
p.set_xlim(0, bar_edges[-1] + bar_widths[-1] + gap_size / 2)
if ylim is not None:
p.set_ylim(ylim)
# output file
if fname is not None:
from os.path import splitext
fmt = splitext(fname)[-1][1:]
plt.savefig(fname, format=fmt, transparent=True)
# return handles for subplot and barplot instances
plt.draw()
return (p, b)
def box_off(ax):
"""Remove the top and right edges of a plot frame, and point ticks outward.
Parameters
----------
ax : matplotlib.axes.Axes
A matplotlib plot or subplot object.
"""
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.tick_params(axis='x', direction='out')
ax.tick_params(axis='y', direction='out')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
def plot_screen(screen, ax=None):
"""Plot a captured screenshot
Parameters
----------
screen : array
The N x M x 3 (or 4) array of screen pixel values.
ax : matplotlib Axes | None
If provided, the axes will be plotted to and cleared of ticks.
If None, a figure will be created.
Returns
-------
ax : matplotlib Axes
The axes used to plot the image.
"""
import matplotlib.pyplot as plt
screen = np.array(screen)
if screen.ndim != 3 or screen.shape[2] not in [3, 4]:
raise ValueError('screen must be a 3D array with 3 or 4 channels')
if ax is None:
plt.figure()
ax = plt.axes([0, 0, 1, 1])
ax.imshow(screen)
ax.axis('off')
return ax
| {
"repo_name": "LABSN/expyfun",
"path": "expyfun/analyze/_viz.py",
"copies": "2",
"size": "22433",
"license": "bsd-3-clause",
"hash": -4701717880586258000,
"line_mean": 43.7764471058,
"line_max": 79,
"alpha_frac": 0.5615388044,
"autogenerated": false,
"ratio": 3.606011895193699,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 501
} |
"""Analysis visualization functions
"""
import numpy as np
from itertools import chain
try:
import matplotlib.pyplot as plt
from matplotlib import rcParams
except ImportError:
plt = None
try:
from pandas.core.frame import DataFrame
except ImportError:
DataFrame = None
from .._utils import string_types
def format_pval(pval, latex=True, scheme='default'):
"""Format a p-value using one of several schemes.
Parameters
----------
pval : float | array-like
The raw p-value(s).
latex : bool
Whether to use LaTeX wrappers suitable for use with matplotlib.
scheme : str
A keyword indicating the formatting scheme. Currently supports "stars",
"ross", and "default"; any other string will yield the same as
"default".
Returns
-------
pv : str | np.objectarray
A string or array of strings of formatted p-values. If a list output is
preferred, users may call ``.tolist()`` on the output of the function.
"""
single_value = False
if np.array(pval).shape == ():
single_value = True
pval = np.atleast_1d(np.asanyarray(pval))
# add a tiny amount to handle cases where p is exactly a power of ten
pval = pval + np.finfo(pval.dtype).eps
expon = np.trunc(np.log10(pval)).astype(int) # exponents
pv = np.zeros_like(pval, dtype=object)
if latex:
wrap = '$'
brac = '{{'
brak = '}}'
else:
wrap = ''
brac = ''
brak = ''
if scheme == 'ross': # (exact value up to 4 decimal places)
pv[pval >= 0.0001] = [wrap + 'p = {:.4f}'.format(x) + wrap
for x in pval[pval > 0.0001]]
pv[pval < 0.0001] = [wrap + 'p < 10^' + brac + '{}'.format(x) + brak +
wrap for x in expon[pval < 0.0001]]
elif scheme == 'stars':
star = '{*}' if latex else '*'
pv[pval >= 0.05] = wrap + '' + wrap
pv[pval < 0.05] = wrap + star + wrap
pv[pval < 0.01] = wrap + star * 2 + wrap
pv[pval < 0.001] = wrap + star * 3 + wrap
else: # scheme == 'default'
pv[pval >= 0.05] = wrap + 'n.s.' + wrap
pv[pval < 0.05] = wrap + 'p < 0.05' + wrap
pv[pval < 0.01] = wrap + 'p < 0.01' + wrap
pv[pval < 0.001] = wrap + 'p < 0.001' + wrap
pv[pval < 0.0001] = [wrap + 'p < 10^' + brac + '{}'.format(x) + brak +
wrap for x in expon[pval < 0.0001]]
if single_value:
pv = pv[0]
return(pv)
def barplot(h, axis=-1, ylim=None, err_bars=None, lines=False,
groups=None, eq_group_widths=False, gap_size=0.2,
brackets=None, bracket_text=None, bracket_group_lines=False,
bar_names=None, group_names=None, bar_kwargs=None,
err_kwargs=None, line_kwargs=None, bracket_kwargs=None,
figure_kwargs=None, smart_defaults=True, fname=None, ax=None):
"""Makes barplots w/ optional line overlays, grouping, & signif. brackets.
Parameters
----------
h : array-like
If ``h`` is 2-dimensional, heights will be calculated as means along
the axis given by ``axis``. If ``h`` is of lower dimension, it is
treated as raw height values. If ``h`` is a pandas ``DataFrame`` and
``bar_names`` is None, ``bar_names`` will be inferred from the
``DataFrame``'s ``column`` labels (if ``axis=0``) or ``index`` labels.
axis : int
The axis along which to calculate mean values to determine bar heights.
Ignored if ``h`` is 0- or 1-dimensional.
ylim : tuple | None
y-axis limits passed to ``matplotlib.pyplot.subplot.set_ylim()``.
err_bars : str | array-like | None
Type of error bars to be added to the barplot. Possible values are
``'sd'`` for sample standard deviation, ``'se'`` for standard error of
the mean, or ``'ci'`` for 95% confidence interval. If ``None``, no
error bars will be plotted. Custom error bar heights are possible by
passing an array-like object; in such cases ``err_bars`` must have the
same dimensionality and shape as ``h``.
lines : bool
Whether to plot within-subject data as lines overlaid on the barplot.
groups : list | None
List of lists containing the integers in ``range(num_bars)``, with
sub-lists indicating the desired grouping. For example, if ``h`` has
has shape (10, 4) and ``axis = -1`` then "num_bars" is 4; if you want
the first bar isolated and the remaining three grouped, then specify
``groups=[[0], [1, 2, 3]]``.
eq_group_widths : bool
Should all groups have the same width? If ``False``, all bars will have
the same width. Ignored if ``groups=None``, since the bar/group
distinction is meaningless in that case.
gap_size : float
Width of the gap between groups (if ``eq_group_width = True``) or
between bars, expressed as a proportion [0,1) of group or bar width.
brackets : list of tuples | None
Location of significance brackets. Scheme is similar to ``grouping``;
if you want a bracket between the first and second bar and another
between the third and fourth bars, specify as [(0, 1), (2, 3)]. If you
want brackets between groups of bars instead of between bars, indicate
the groups as lists within the tuple: [([0, 1], [2, 3])].
For best results, pairs of adjacent bars should come earlier in the
list than non-adjacent pairs.
bracket_text : str | list | None
Text to display above brackets.
bracket_group_lines : bool
When drawing brackets between groups rather than single bars, should a
horizontal line be drawn at each foot of the bracket to indicate this?
bar_names : array-like | None
Optional axis labels for each bar.
group_names : array-like | None
Optional axis labels for each group.
bar_kwargs : dict
Arguments passed to ``matplotlib.pyplot.bar()`` (ex: color, linewidth).
err_kwargs : dict
Arguments passed to ``matplotlib.pyplot.bar(error_kw)`` (ex: ecolor,
capsize).
line_kwargs : dict
Arguments passed to ``matplotlib.pyplot.plot()`` (e.g., color, marker,
linestyle).
bracket_kwargs : dict
arguments passed to ``matplotlib.pyplot.plot()`` (e.g., color, marker,
linestyle).
figure_kwargs : dict
arguments passed to ``matplotlib.pyplot.figure()`` (e.g., figsize, dpi,
frameon).
smart_defaults : bool
Whether to use pyplot default colors (``False``), or something more
pleasing to the eye (``True``).
fname : str | None
Path and name of output file. Type is inferred from ``fname`` and
should work for any of the types supported by pyplot (pdf, eps,
svg, png, raw).
ax : matplotlib.pyplot.axes | None
A ``matplotlib.pyplot.axes`` instance. If none, a new figure with a
single subplot will be created.
Returns
-------
p : handle for the ``matplotlib.pyplot.subplot`` instance.
b : handle for the ``matplotlib.pyplot.bar`` instance.
Notes
-----
Known limitations:
1 Bracket heights don't get properly set when generating multiple
subplots with ``sharey=True`` (matplotlib seems to temporarily force
the ``ylim`` to +/- 0.6 in this case). Work around is to use
``sharey=False`` and manually set ``ylim`` for each subplot.
2 Brackets that span groups cannot span partial groups. For example,
if ``groups=[[0, 1, 2], [3, 4]]`` it is impossible to have a bracket
at ``[(0, 1), (3, 4)]``... it is only possible to do, e.g.,
``[0, (3, 4)]`` (single bar vs group) or ``[(0, 1, 2), (3, 4)]``
(full group vs full group).
3 Bracket drawing is much better when adjacent pairs of bars are
specified before non-adjacent pairs of bars.
Smart defaults sets the following parameters:
bar color: light gray (70%)
error bar color: black
line color: black
bracket color: dark gray (30%)
"""
# check matplotlib
if plt is None:
raise ImportError('Barplot requires matplotlib.pyplot.')
# be nice to pandas
if DataFrame is not None:
if isinstance(h, DataFrame) and bar_names is None:
if axis == 0:
bar_names = h.columns.tolist()
else:
bar_names = h.index.tolist()
# check arg errors
if gap_size < 0 or gap_size >= 1:
raise ValueError('Barplot argument "gap_size" must be in the range '
'[0, 1).')
if err_bars is not None:
if isinstance(err_bars, string_types) and \
err_bars not in ['sd', 'se', 'ci']:
raise ValueError('err_bars must be "sd", "se", or "ci" (or an '
'array of error bar magnitudes).')
# handle single-element args
if isinstance(bracket_text, string_types):
bracket_text = [bracket_text]
if isinstance(group_names, string_types):
group_names = [group_names]
# arg defaults
if bar_kwargs is None:
bar_kwargs = dict()
if err_kwargs is None:
err_kwargs = dict()
if line_kwargs is None:
line_kwargs = dict()
if bracket_kwargs is None:
bracket_kwargs = dict()
if figure_kwargs is None:
figure_kwargs = dict()
# user-supplied Axes
if ax is not None:
bar_kwargs['axes'] = ax
# smart defaults
if smart_defaults:
if 'color' not in bar_kwargs.keys():
bar_kwargs['color'] = '0.7'
if 'color' not in line_kwargs.keys():
line_kwargs['color'] = 'k'
if 'ecolor' not in err_kwargs.keys():
err_kwargs['ecolor'] = 'k'
if 'color' not in bracket_kwargs.keys():
bracket_kwargs['color'] = '0.3'
# parse heights
h = np.array(h)
if len(h.shape) > 2:
raise ValueError('Barplot "h" must have 2 or fewer dimensions.')
elif len(h.shape) < 2:
heights = np.atleast_1d(h)
else:
heights = h.mean(axis=axis)
# grouping
num_bars = len(heights)
if groups is None:
groups = [[x] for x in range(num_bars)]
groups = [list(x) for x in groups] # forgive list/tuple mix-ups
num_groups = len(groups)
if eq_group_widths:
group_widths = [1. - gap_size for _ in range(num_groups)]
group_edges = [x + gap_size / 2. for x in range(num_groups)]
bar_widths = [[(1. - gap_size) / len(x) for _ in enumerate(x)]
for x in groups]
bar_edges = [[gap_size / 2. + grp + (1. - gap_size) * bar / len(x) for
bar, _ in enumerate(x)] for grp, x in enumerate(groups)]
else:
bar_widths = [[1. - gap_size for _ in x] for x in groups]
bar_edges = [[gap_size / 2. + grp * gap_size + (1. - gap_size) * bar
for bar in x] for grp, x in enumerate(groups)]
group_widths = [np.sum(x) for x in bar_widths]
group_edges = [x[0] for x in bar_edges]
bar_edges = list(chain.from_iterable(bar_edges))
bar_widths = list(chain.from_iterable(bar_widths))
bar_centers = np.array(bar_edges) + np.array(bar_widths) / 2.
group_centers = np.array(group_edges) + np.array(group_widths) / 2.
# calculate error bars
err = np.zeros(num_bars) # default if no err_bars
if err_bars is not None:
if len(h.shape) == 2:
if err_bars == 'sd': # sample standard deviation
err = h.std(axis)
elif err_bars == 'se': # standard error
err = h.std(axis) / np.sqrt(h.shape[axis])
else: # 95% conf int
err = 1.96 * h.std(axis) / np.sqrt(h.shape[axis])
else: # len(h.shape) == 1
if isinstance(err_bars, string_types):
raise ValueError('string arguments to "err_bars" ignored when '
'"h" has fewer than 2 dimensions.')
elif not h.shape == np.array(err_bars).shape:
raise ValueError('When "err_bars" is array-like it must have '
'the same shape as "h".')
err = np.atleast_1d(err_bars)
bar_kwargs['yerr'] = err
# plot (bars and error bars)
if ax is None:
plt.figure(**figure_kwargs)
p = plt.subplot(1, 1, 1)
else:
p = ax
b = p.bar(bar_edges, heights, bar_widths, error_kw=err_kwargs,
**bar_kwargs)
# plot within-subject lines
if lines:
if axis == 0:
xy = [(bar_centers, hts) for hts in h]
else:
xy = [(bar_centers, hts) for hts in h.T]
for subj in xy:
p.plot(subj[0], subj[1], **line_kwargs)
# draw significance brackets
if brackets is not None:
brackets = [tuple(x) for x in brackets] # forgive list/tuple mix-ups
if not len(brackets) == len(bracket_text):
raise ValueError('Mismatch between number of brackets and bracket '
'labels.')
brk_offset = np.diff(p.get_ylim()) * 0.025
brk_height = np.diff(p.get_ylim()) * 0.05
# prelim: calculate text height
t = plt.text(0.5, 0.5, bracket_text[0])
t.set_bbox(dict(boxstyle='round, pad=0'))
plt.draw()
bb = t.get_bbox_patch().get_window_extent()
txth = np.diff(p.transData.inverted().transform(bb),
axis=0).ravel()[-1] # + brk_offset / 2.
t.remove()
# find highest points
if lines and len(h.shape) == 2: # brackets must be above lines
apex = np.max(np.r_[np.atleast_2d(heights + err),
np.atleast_2d(np.max(h, axis))], axis=0)
else:
apex = np.atleast_1d(heights + err)
apex = np.maximum(apex, 0) # for negative-going bars
gr_apex = np.array([np.max(apex[x]) for x in groups])
# calculate bracket coords
brk_lrx = []
brk_lry = []
brk_top = []
brk_txt = []
for pair in brackets:
lr = [] # x
ll = [] # y lower
hh = [] # y upper
ed = 0
for br in pair:
if hasattr(br, 'append'): # group
bri = groups.index(br)
curc = [bar_centers[x] for x in groups[bri]]
curx = group_centers[bri]
cury = float(gr_apex[bri] + brk_offset)
else: # single bar
curc = []
curx = bar_centers[br]
cury = float(apex[br] + brk_offset)
# adjust as necessary to avoid overlap
allx = np.array(brk_lrx).ravel().tolist()
if curx in brk_txt:
count = brk_txt.count(curx)
mustclear = brk_top[brk_txt.index(curx)] + \
count * (txth + brk_offset) - brk_offset
for x in curc:
ix = len(allx) - allx[::-1].index(x) - 1
mustclear = max(mustclear, brk_top[ix // 2])
cury = mustclear + brk_offset
elif curx in allx:
#count = allx.count(curx)
ix = len(allx) - allx[::-1].index(curx) - 1
cury = brk_top[ix // 2] + brk_offset # * count
for l, r in brk_lrx:
if l < curx < r and cury < max(brk_top):
ed += 1
# draw horiz line spanning groups if desired
if hasattr(br, 'append') and bracket_group_lines:
gbr = [bar_centers[x] for x in groups[bri]]
gbr = (min(gbr), max(gbr))
p.plot(gbr, (cury, cury), **bracket_kwargs)
# store adjusted values
lr.append(curx)
ll.append(cury)
hh.append(cury + brk_height + ed * txth)
brk_lrx.append(tuple(lr))
brk_lry.append(tuple(ll))
brk_top.append(np.max(hh))
brk_txt.append(np.mean(lr)) # text x
# plot brackets
for ((xl, xr), (yl, yr), yh, tx, st) in zip(brk_lrx, brk_lry, brk_top,
brk_txt, bracket_text):
# bracket lines
lline = ((xl, xl), (yl, yh))
rline = ((xr, xr), (yr, yh))
hline = ((xl, xr), (yh, yh))
for x, y in [lline, rline, hline]:
p.plot(x, y, **bracket_kwargs)
# bracket text
txt = p.annotate(st, (tx, yh), xytext=(0, 2),
textcoords='offset points', ha='center',
va='baseline', annotation_clip=False)
txt.set_bbox(dict(facecolor='w', alpha=0, boxstyle='round, pad=1'))
# boost ymax if needed
ybnd = p.get_ybound()
if ybnd[-1] < yh + txth:
p.set_ybound(ybnd[0], yh + txth)
# annotation
box_off(p)
p.tick_params(axis='x', length=0, pad=12)
p.xaxis.set_ticks(bar_centers)
if bar_names is not None:
p.xaxis.set_ticklabels(bar_names, va='baseline')
if group_names is not None:
ymin = ylim[0] if ylim is not None else p.get_ylim()[0]
yoffset = -2 * rcParams['font.size']
for gn, gp in zip(group_names, group_centers):
p.annotate(gn, xy=(gp, ymin), xytext=(0, yoffset),
xycoords='data', textcoords='offset points',
ha='center', va='baseline')
# axis limits
p.set_xlim(0, bar_edges[-1] + bar_widths[-1] + gap_size / 2)
if ylim is not None:
p.set_ylim(ylim)
# output file
if fname is not None:
from os.path import splitext
fmt = splitext(fname)[-1][1:]
plt.savefig(fname, format=fmt, transparent=True)
# return handles for subplot and barplot instances
plt.draw()
return (p, b)
def box_off(ax):
"""Remove the top and right edges of a plot frame, and point ticks outward.
Parameter
---------
ax : matplotlib.axes.Axes
A matplotlib plot or subplot object.
"""
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.tick_params(axis='x', direction='out')
ax.tick_params(axis='y', direction='out')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
def plot_screen(screen, ax=None):
"""Plot a captured screenshot
Parameters
----------
screen : array
The N x M x 3 (or 4) array of screen pixel values.
ax : matplotlib Axes | None
If provided, the axes will be plotted to and cleared of ticks.
If None, a figure will be created.
Retruns
-------
ax : matplotlib Axes
The axes used to plot the image.
"""
screen = np.array(screen)
if screen.ndim != 3 or screen.shape[2] not in [3, 4]:
raise ValueError('screen must be a 3D array with 3 or 4 channels')
if ax is None:
plt.figure()
ax = plt.axes([0, 0, 1, 1])
ax.imshow(screen)
ax.set_xticks([])
ax.set_yticks([])
plt.box('off')
return ax
| {
"repo_name": "lkishline/expyfun",
"path": "expyfun/analyze/_viz.py",
"copies": "1",
"size": "19369",
"license": "bsd-3-clause",
"hash": 7391822108945943000,
"line_mean": 40.8336933045,
"line_max": 79,
"alpha_frac": 0.5503639837,
"autogenerated": false,
"ratio": 3.649020346646571,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4699384330346571,
"avg_score": null,
"num_lines": null
} |
"""analyst URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from articles.views import HomeViews as homeviews
from articles.views import single_view as singleview
from articles.views import UpdateArticle as update_article
from articles.views import article_form_view as article_form_view
from articles.views import ArticleDeletion as delete_article
from articles.views import delete_own_comment as delete_comment
from articles.views import VoteFormView as vote_view
from articles.views import tag_page as tag_page
from articles.views import about as about
from mysites.views import featured_news as featured_news
from mysites.views import politics as politics
from accounts.views import UserProfileDetailView as user_profile
from accounts.views import UserProfileEditView as user_edit
from django.contrib.auth.decorators import login_required as auth
from haystack.query import SearchQuerySet
from haystack.views import SearchView
sqs = SearchQuerySet().order_by('-date')
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
#--------------------------------homepage --------------------------------
url (r'^logs/$', homeviews.as_view(), name='logs'),
url(r'^$', SearchView(
load_all=False,
searchqueryset=sqs,
),
name='haystack_search',
),
url(r'^article/submission/$', article_form_view, name="article_form_view"),
url(r'^article/(?P<slug>[\w-]+)/$', singleview, name="single_view"),
url(r'^article/update/(?P<slug>[\w-]+)/$', update_article.as_view(), name="update_article"),
url(r'^article/delete/(?P<slug>[\w-]+)/$', delete_article.as_view(), name="delete_article"),
url(r'^comments/delete_own/(?P<id>.*)/$', delete_comment, name='delete_comment'),
url(r'^article/tag/(?P<tag>[\w-]+)/$', tag_page, name="tag_page"),
url(r'^vote/$', auth(vote_view.as_view()), name="vote"),
url(r'^about/$',about, name="about"),
#-------------------------------Account handling --------------------------
url(r'^accounts/', include('registration.backends.hmac.urls')),
url(r'^users/(?P<slug>[\w-]+)/$', user_profile.as_view(), name="profile"),
url(r'^edit_profile/$', auth(user_edit.as_view()), name="edit_profile"),
#------------------------------Commenting Section---------------------------
url(r'^comments/', include('django_comments.urls')),
#----------------------------------News views --------------------------------
url(r'^news/featured-news/$', featured_news, name='featured_news'),
url(r'^news/politics/$', politics, name='politics'),
#----------------------------------Django in apps-----------------------------
url(r'^markdownx/', include('markdownx.urls')),
#----------------------------------production Deletables ---------------------
]
| {
"repo_name": "cmwaura/Newspade",
"path": "analyst/urls.py",
"copies": "1",
"size": "3598",
"license": "mit",
"hash": 5929150469381640000,
"line_mean": 37.688172043,
"line_max": 96,
"alpha_frac": 0.597832129,
"autogenerated": false,
"ratio": 4.051801801801802,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5149633930801802,
"avg_score": null,
"num_lines": null
} |
# analyte documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'analyte'
copyright = """2017, Boris"""
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'analytedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'analyte.tex',
'analyte Documentation',
"""Boris""", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'analyte', 'analyte Documentation',
["""Boris"""], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'analyte', 'analyte Documentation',
"""Boris""", 'analyte',
"""Analyte""", 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| {
"repo_name": "bio-boris/social_media_scanner",
"path": "docs/conf.py",
"copies": "1",
"size": "7719",
"license": "mit",
"hash": 2490750170071320000,
"line_mean": 30.7654320988,
"line_max": 80,
"alpha_frac": 0.6940018137,
"autogenerated": false,
"ratio": 3.825074331020813,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004572473708276177,
"num_lines": 243
} |
# AnalyticAgent.py
# -*- coding: utf-8 -*-
"""
Code for managing RESTful queries with the ALMA Analytic API. This
includes the AnalyticAgent object class and the QueryType enumeration.
QueryType
An enumeration characterizing the three types of queries that
AnalyticAgent can perform:
PAGE Return only a 'limit' number of results, where limit is
a parameter in the query such that 25 <= limit <= 1000.
REPORT Return multiple pages up to reaching the upper bound of
AnalyticAgent.OBIEE_MAX (currently 65001) records.
ALL Return multiple reports such that all records are pulled
from the analytic. Requires a non-simple RequestObject
as the queries must be specifically structured in order
to bypass the hard-coded OBIEE_MAX limit
AnalyticAgent
An object class that uses RequestObject and QueryType to perform
queries. Importantly, this class provides basic functionality for
performing queries but also provides entry points (key functions)
for further customization through class extensions.
"""
##########################################################################
# Copyright (c) 2014 Katherine Deibel
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
##########################################################################
from __future__ import print_function
from enum import Enum
import bidict
import codecs # for unicode read/write
import datetime
import os
import re
import string
import sys
from multiprocessing import Queue
try:
from lxml import etree
except ImportError: # No lxml installed
import xml.etree.cElementTree as etree
from random import randint, uniform
from string import rjust
from time import sleep, strftime
from urllib2 import Request, urlopen, HTTPError
from urllib import urlencode, quote_plus
from CustomExceptions import AnalyticServerError, ZeroResultsError
from RequestObject import RequestObject
class QueryType(Enum):
"""
An enumeration characterizing the three types of queries that AnalyticAgent
can perform:
PAGE Return only a 'limit' number of results, where limit is a
parameter in the query such that 25 <= limit <= 1000.
REPORT Return multiple pages up to reaching the upper bound of
AnalyticAgent.OBIEE_MAX (currently 65001) records.
ALL Return multiple reports such that all records are pulled
from the analytic. Requires a non-simple RequestObject
as the queries must be specifically structured in order
to bypass the hard-coded OBIEE_MAX limit
Enumeration characterizing the three types of queries that AnalyticAgent
can perform:
"""
PAGE = 1
REPORT = 2
ALL = 3
#end class QueryType
class AnalyticAgent(object):
"""
Class object that performs a RESTful GET request to the Analytic API
according to a provided RequestObject. This class presents a base
class from which Analytic queries can be made. In particular, several
methods are provided as hooks to allow future customization of the
class through inheritance. These hooks primarily influence how data
is transformed and outputted into its final format.
Class Attributes:
OBIEE_MAX
The maximum number of requests that the OBIEE behind the Analytic
will contain. Typically hardset by the database manager.
LIMIT_LOWER, LIMIT_UPPER
Integer values describing the lower and upper bounds (inclusive) for
the 'limit' parameter in the RESTful GET request.
Object Attributes:
Request
The RequestObject containing the information for the query. This
agent requires that Request.Simple be False.
FailedRequestTolerance, ZeroResultTolerance
A request can fail in two ways: returning a server error (Failed...)
or by returning zero results when there are results to find
(Zero...). The latter is a current bug in the Alma API. These two
tolerance values indicate how many times the agent will sent requests
before giving up when faced with these problems.
StillLoadingTolerance
Sometimes, the analytic takes time to load before it can send data. In
general, you wait a bit before resending the request with the provided
resume token. However, the resume token eventually expires. When the
number of tries reaches this value, the current token is discarded and
a new one is grabbed on the next request.
ErrorSleep
The number of seconds the agent should wait before resending a
request after experiencing one of the aforementioned errors.
StillLoadingSleep
The number of seconds the agent waits before sending another request
if the analytic is still loading the data.
SleepNoise
Adds a little random noise to the time the agent sleeps. Useful for
preventing conflicts when running parallel agents.
Protected Object Attributes
The following are attributes used during queries by the agent for
tracking and coordinating the progress of the agent.
_SeenIDs A dictionary that keeps track of what uniqueIDs
have been seen by the agent in the returned XML.
This is used only when working around the
OBIEE_MAX return limit.
_jobName String ID for the Agent
_writer File object for writing the collected data
_logger List of file or Queue objects for conveying
progress messages / logging info
_allowZeros Boolean indicating if an analytic query can
legitimately contain zero results
_path Path to the analytic currently being queried
_apikey API key currently being used for the query
_limit Current number of records to be returned by a
call to _query_page()
_isFinished Boolean indicating that _query_page() can return
more rows
_resumeToken Identifier for continuing a sequence of
_query_page() calls in order to download an
entire report
_filterStart The lower (>=) bound if filtering is being used
_filterStop The upper (<) bound if filtering is being used
_filterStopReached Boolean indicating if _filterStop has been
reached (essentially _filterValue >= _filterStop)
_filterValue Most recently collected datum from the record
field (sortedBy) being used for filtering
_page_RowCount Number of rows processed by the most recent
call to _query_page()
_page_RecordCount Number of records collected by the most recent
call to _query_page
_report_RowCount Number of rows processed by the most recent call
to _query_record ()
_report_RecordCount Number of records collected by the most recent
call to _query_record()
_all_RowCount Number of rows processed by the most recent call
to _query_all()
_all_RecordCount Number of records collected by the most recent
call to _query_all()
"""
# The maximum number of requests that the OBIEE behind the Analytic
# will contain. Typically hardset by the database manager.
OBIEE_MAX = 65001
# Bounds (inclusive) for the limit parameter
LIMIT_LOWER, LIMIT_UPPER = 25, 1000
def __init__(self):
"""
Basic constructor for initializing the AnalyticAgent.
"""
self.Request = None
self.FailedRequestTolerance = 3
self.ZeroResultTolerance = 4
self.StillLoadingTolerance = 6
self.ErrorSleep = 15
self.StillLoadingSleep = 5
self.SleepNoise = 2
# internal variables used when conducting queries
self._SeenIDs = {}
self._jobName = ""
self._writer = None
self._logger = None
self._path = None
self._apikey = None
self._limit = '1000'
self._page_RowCount = 0
self._page_RecordCount = 0
self._report_RowCount = 0
self._report_RecordCount = 0
self._all_RowCount = 0
self._all_RecordCount = 0
self._isFinished = False
self._resumeToken = None
self._filterStart = None
self._filterStop = None
self._filterStopReached = False
self._filterValue = None
self._allowZeros = False
# end init
@classmethod
def loadRequest(cls,req):
"""
Helper class method for creating an AnalyticAgent from a provided
RequestObject.
"""
agt = cls()
agt.Request = req
return agt
#end load request
@staticmethod
def data_filename(stem=u'results',extension=u'xml',id=None,digits=None,leading='0'):
"""
HOOK: Static method that returns a filename for the type of data
that this agent will produce.
Parameters:
stem The filestem to be used
extension The file extension
id An identification number/symbol
digits The number of 'digits' to expand the id to with leading
leading What character to use to expand the id to digits length
Returns (assuming extension is xml):
If id is None: stem.xml
If id='5' and digits is none: stem-5.xml
If id='5', digits=3, and leading='x': stem-xx5.xml
"""
filename=''
if id is None:
filename = stem
elif digits is None:
filename = stem + u'-' + unicode(id)
else:
filename = stem + u'-' + rjust(unicode(id),digits,leading)
return filename + u'.' + extension
def _increment_RowCounts(self):
"""
Protected method for incrementing all RowCounts simultaneously.
"""
self._page_RowCount += 1
self._report_RowCount += 1
self._all_RowCount += 1
def _increment_RecordCounts(self):
"""
Protected method for incrementing all RecordCounts simultaneously.
"""
self._page_RecordCount += 1
self._report_RecordCount += 1
self._all_RecordCount += 1
def _reset_internal_query_vars(self):
"""
Protected method for resetting all protected attributes to the
same default values as when an Agent is created by the
constructor.
"""
self._jobName = ""
self._writer = None
self._logger = None
self._path = None
self._apikey = None
self._limit = '1000'
self._page_RowCount = 0
self._page_RecordCount = 0
self._report_RowCount = 0
self._report_RecordCount = 0
self._all_RowCount = 0
self._all_RecordCount = 0
self._isFinished = False
self._resumeToken = None
self._filterStart = None
self._filterStop = None
self._filterStopReached = False
self._filterValue = None
self._allowZeros = False
self._SeenIDs = []
def isLessThanFilterStop(self, textValue, filterStop):
"""
Performs a proper less than comparison between textValue and
filterStop by first converting textValue to Request.sortedByType.
Note that all data returned by Analytic API are originally in
text form.
Currently provides support for decimal and string types. date and
datetime are to be implemented at a later date.
Parameters:
textValue The text of the sortedBy field in the current
row of data being queried
filterStop The sentinel value (already typed) that is never
to be exceeded by the query
Returns:
If filterStop is None: True
Else: returns appropriately typed (textValue < filterStop)
"""
if filterStop is None:
return True
if self.Request.sortedByType == "string":
return (textValue < filterStop)
elif self.Request.sortedByType == "decimal":
return (float(textValue) < filterStop)
#for date
#for dateTime = datetime.datetime.strptime(t,"%m/%d/%Y %I:%M:%S %p")
#end isLessThan...
def pre_process(self):
"""
HOOK: This method is called by self.run(...) before sending any
queries. It should contain functionality that prepares (if
necessary) any structure for the output (which is typically sent
to self._writer).
Note that extensions of this and other process() methods can add
attributes/variables to the AnalyticAgent class.
This base version of pre_process() create an XML structure (root
is stored in self.outXML) into which the analytic data will be
inserted. Included in this structure is the <Description> section
that records information about this current analytic download.
"""
self.outXML = etree.Element(u'Analytic')
self.outXML.set(u'encoding','UTF-8')
about = etree.SubElement(self.outXML, u'Description')
node = etree.SubElement(about, u'Path')
node.text = self._path
node = etree.SubElement(about, u'Apikey')
node.text = self._apikey
if not self.Request.Simple:
node = etree.SubElement(about, u'UniqueField')
node.text = self.Request.uniqueID
node = etree.SubElement(about, u'SortField')
node.text = self.Request.sortedBy
node.set(u'datatype', self.Request.sortedByType)
node.set(u'obiee_field', self.Request.sortedByOBIEE)
if self._filterStart is not None:
node = etree.SubElement(about, u'GreaterOrEqual')
node.text = self._filterStart
if self._filterStop is not None:
node = etree.SubElement(about, u'LessThan')
node.text = self._filterStop
#end if complex request
node = etree.SubElement(about, u'Started')
node.text = strftime(u'%H:%M:%S %Z %Y-%m-%d')
etree.SubElement(self.outXML, u'Results')
#end pre_process
def row_process(self, data):
"""
HOOK: This method is called by self.run(...) for each row of
data in the analytic. This method is where the row data can
be transformed and inserted into the output.
The data parameter is a dictionary in which the keys are the
preferred names from Request.ColumnMap or 'Column#'.
Since the row data is only processed and outputted here,
further filtering of the data can be conducted in this method.
To ensure that record counts are accurate, this method should
return a boolean value to indicate if the data was included in
the output.
This base version of row_process() creates an XML <Record>
element, fills it with sub-elements based on what is in data,
and inserts it into self.outXML's <Results> sub-element.
"""
results = self.outXML.find(u'Results')
record = etree.SubElement(results,u'Record')
if len(self.Request.NamesOrder) > 0:
for _name in self.Request.NamesOrder:
etree.SubElement(record, _name).text = unicode(data.get(_name, None))
else:
keys = data.keys()
keys.sort()
for k in keys:
etree.SubElement(record, k).text = unicode(data.get(k))
return True
def post_process(self):
"""
HOOK: This method is called by self.run(...) after the query has
completed. This method is useful for any final edits, summaries,
outputting, etc. of the collected data.
This base method adds the completion time to the <Description>
element in self.outXML and then sends the string version of
the XML to self._writer.
"""
about = self.outXML.find(u'Description')
etree.SubElement(about, u'Completed').text = strftime(u'%H:%M:%S %Z %Y-%m-%d')
etree.SubElement(about, u'RecordCount').text = unicode(self._all_RecordCount)
self._writer.write(etree.tostring(self.outXML, pretty_print=True))
def log(self, msg):
"""
Helper function for pushing logging messages to the correct place.
If self._logger is None, no logging occurs and msg is discarded.
Otherwise, it attempts to push the message out to what is in
self._logger.
For each message, the current time (HH:MM:SS) is prepended.
Parameter:
msg String to be added to logs in self._logger
"""
if self._logger is None:
return
msg = strftime("%H:%M:%S") + " : " + msg.strip()
if not isinstance(self._logger, list):
raise TypeError(u"AnalyticAgent.log(): self._logger must be a list object")
for out in self._logger:
if hasattr(out, "put"):
out.put(msg)
elif hasattr(out, "write"):
out.write(msg + u"\n")
else:
raise TypeError(u"AnalyticAgent: self._logger element: '" + unicode(out)
+ "u' does not have a write(...) or put(...)method" )
#end for out in ...
#end log(...)
def _duplicate_check(self, newID):
"""
Helper method for determining if newID has already been processed
by this analytic by using self._SeenIDs.
Parameter:
newID The value of uniqueID from a row of data
Returns:
True: If self.seenIDs[newID] exists
False: If self.seenIDs[newID] does not
"""
if self._SeenIDs is None:
self._SeenIDs = {}
if newID in self._SeenIDs:
self._SeenIDs[newID] += 1
return True
else:
self._SeenIDs[newID] = 1
return False
#end duplicate_check
def noisy_sleep(self, t, delta):
"""
Call for this thread to sleep for a random number of seconds
from the distribution [t-delta, t+delta].
Parameters:
t A positive number of seconds to sleep
delta An error range (0 <= delta <= t)
Throws:
ValueError on incompatible parameters
"""
if t <= 0:
raise ValueError(u"noisy_sleep: t must be positive")
if delta < 0:
raise ValueError(u"noisy_sleep: delta must be non-negative")
if delta > t:
raise ValueError(u"noisy_sleep: delta must be <= t")
sleep( t + uniform(-delta,delta) )
#end noisy_sleep
def generate_request(self, path='', apikey='', limit='1000',
resume=None, filter=None):
"""
Function that generates (but does not call) and %-encodes
the HTTP GET request to the Analytic ALMA API using data
from self.Request and the parameters.
Parameters:
path The relative path to the analytic to be queried
apikey The apikey to use in the query
limit The number of results (25 - 1000) to be returned
resume A resumption token (optional)
filter A filter statement (optional)
Returns:
A properly percent-encoded / utf-encoded urllib2 Request
object ready for sending
"""
params = {}
params['path'] = path.encode('utf-8')
params['apikey'] = apikey.encode('utf-8')
params['limit'] = limit.encode('utf-8')
if filter is not None:
params['filter'] = filter.encode('utf-8')
if resume is not None:
params['token'] = resume.encode('utf-8')
req = Request( self.Request.URL + '?' + urlencode(params) )
req.get_method = lambda: 'GET'
return req
#end generate_request
def output_names(self, data_filename):
"""
Returns a list of additional file object names that could be
produced by this agent. For optimal usage with QueryFactory,
these additional file object names should be based off the string
in data_filename.
Parameters:
data_filename The file name (string) of where data will
be outputted
Output:
A list (including data_filename) of filenames
"""
return [data_filename]
def create_filter(self, tag):
"""
Creates a sawx filter for the analytic to return values from
the self.Request.sortedBy field of type self.Request.sortedByType
such that the values are greater than or equal to the value in
tag.
Notes:
Lone, whitespace-bounded & are converted to &
To avoid processing issues, any apostrophes in tag are
replaced with '.
As only decimal and string types are currently supported,
there is no need to transform tag into a proper format.
Supporting date and datetime types may require some
transformation.
Parameters:
tag The text that needs to be inserted into the filter
Returns:
A sawx filter for self.Request.sortedBy field for records
greater than or equal to tag
"""
filter = u'<sawx:expr xsi:type="sawx:comparison" op="greaterOrEqual" xmlns:saw="com.siebel.analytics.web/report/v1.1" xmlns:sawx="com.siebel.analytics.web/expression/v1.1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema"><sawx:expr xsi:type="sawx:sqlExpression">"#FIELD#"</sawx:expr><sawx:expr xsi:type="xsd:#TYPE#">#TAG#</sawx:expr></sawx:expr>'
filter = filter.encode('utf-8')
filterField = self.Request.sortedByOBIEE
filterField = u'"."'.join(filterField.split(u'.')).encode('utf-8')
filterType = self.Request.sortedByType.encode('utf-8')
# take care of apostrophes
tag = unicode(tag)
cleanTag = tag.replace(u"'",u"'")
# take care of non-entity &
pattern = re.compile(u'&(\W)', re.UNICODE|re.MULTILINE)
cleanTag = re.sub(pattern, u'&\\1', cleanTag)
pattern = re.compile(u'&$', re.UNICODE|re.MULTILINE)
cleanTag = re.sub(pattern, u'&', cleanTag)
filter = filter.replace(u'#FIELD#', filterField)
filter = filter.replace(u'#TYPE#', filterType)
filter = filter.replace(u'#TAG#', cleanTag)
return filter
#end create_filter
def run(self, jobName="", writer=sys.stdout, logger=None, allowZeros=False,
path=None, apikey=None, limit='1000',
filterStart=None, filterStop=None, queryType=QueryType.ALL):
"""
This is the function that is used to make the Agent run an
Analytic Query. It initializes / clears out the protected internal
query attributes and coordinates the calling of the pre_process,
_query_*, and post_process methods.
Parameters:
jobName String ID for the Agent
writer File object for writing the collected data
logger List of file or Queue objects for conveying
progress messages / logging info
allowZeros Boolean indicating if an analytic query can
legitimately contain zero results
path Path to the analytic currently being queried.
Defaults to self.Request.Paths[0] if omitted.
apikey API key currently being used for the query.
Defaults to self.Request.Keys[0] if omitted.
limit Number of records to be returned by a single
call to _query_page()
filterStart The lower (>=) bound if filtering is being used
filterStop The upper (<) bound if filtering is being used
queryType Value from enum QueryType indicating what type
of query to perform (ALL, REPORT, PAGE).
Returns:
The number of collected records from this call to run()
Throws:
ValueError if parameters or the agent's RequestObject are
invalid for the type of call to run
"""
# do basic parameter checking first
if queryType not in QueryType:
raise ValueError(u"Unrecognized QueryType passed into queryType parameter.")
if queryType is QueryType.ALL and self.Request.Simple:
raise ValueError(u"query_all() requires the agent to have a non-simple RequestObject")
if not (AnalyticAgent.LIMIT_LOWER <= int(limit) <= AnalyticAgent.LIMIT_UPPER):
raise ValueError(u"limit must in the range ["
+ unicode(AnalyticAgent.LIMIT_LOWER)
+ u", "
+ unicode(AnalyticAgent.LIMIT_UPPER)
+ u"]")
# fill in path and apikey if not provided
if path is None:
path = self.Request.Paths[0]
if apikey is None:
apikey = self.Request.Keys[0]
# initialize the internal query vars
self._jobName = jobName
self._writer = writer
self._logger = logger
self._path = path
self._apikey = apikey
self._resumeToken = None
self._limit = unicode(limit)
self._filterStart = filterStart
self._filterStop = filterStop
self._allowZeros = allowZeros
self._page_RowCount = 0
self._page_RecordCount = 0
self._report_RowCount = 0
self._report_RecordCount = 0
self._all_RowCount = 0
self._all_RecordCount = 0
self._SeenIDs = {}
self._isFinished = False
self._filterStopReached = False
self._filterValue = None
# run initial processing step
self.pre_process()
# run the appropriate query call
if queryType is QueryType.ALL:
self._query_all()
totalRecordCount = self._all_RecordCount
elif queryType is QueryType.REPORT:
self._query_report()
totalRecordCount = self._report_RecordCount
if self._report_RowCount == AnalyticAgent.OBIEE_MAX:
print(u"\nWARNING: Analytic may contain more data.\n",
file=sys.stderr)
elif queryType is QueryType.PAGE:
self._query_page()
totalRecordCount = self._page_RecordCount
if not self._isFinished and not self._filterStopReached:
print(u"\nWARNING!!! Analytic contains more data.\n",
file=sys.stderr)
# wrap up the processing
self.post_process()
# reset the internal query vars
self._reset_internal_query_vars()
# return the number of records found
return totalRecordCount
#end run(...)
def _query_all(self):
"""
Internal method for downloading all results from an analytic.
Coordinates multiple calls to _query_report() using the
protected, internal query attributes.
"""
if self.Request.Simple:
raise ValueError(u"query_all() requires the agent to have a non-simple RequestObject")
self._all_RowCount = 0
self._all_RecordCount = 0
moreRecords = True
while moreRecords:
self._query_report()
# we need to change the filterStart as the next report needs to start
# where the previous one left off
self._filterStart = self._filterValue
moreRecords = (not self._filterStopReached) and \
(self._report_RowCount == AnalyticAgent.OBIEE_MAX)
#end while moreRecords
self.log(self._jobName
+ u"> has finished collecting all "
+ unicode(self._all_RecordCount)
+ u" record(s)")
# adjust values of receipt to return
return
#end query_all
def _query_report(self):
"""
Internal method for downloading a report (OBIEE_MAX rows) from
an analytic. Coordinates multiple calls to _query_page() using
the protected, internal query attributes.
"""
self._report_RowCount = 0
self._report_RecordCount = 0
self._isFinished = False
while not self._isFinished:
self._query_page()
#end while not isFinished
self.log(self._jobName
+ u"> collected a report of "
+ unicode(self._report_RecordCount)
+ u" record(s) ("
+ unicode(self._all_RecordCount)
+ u" total)")
# reset resumeToken to None as we just exhausted it
self._resumeToken = None
return
#end query_report
def _query_page(self):
"""
Internal method for downloading a page of results (self._limit)
by using the protected, internal query attributes. This is the
only method that actually performs HTTP GET requests.
Throws:
AnalyticServerError and ZeroResultsError
"""
self._page_RowCount = 0
self._page_RecordCount = 0
self._filterValue = self._filterStart
filterParam = None
# put in a filter if the request if not simple
if not self.Request.Simple and self._filterValue is not None:
filterParam = unicode(self.create_filter(self._filterValue))
row_namespace = None
self._isFinished = False
zeroTries = 0
isLoadingTries = 0
# keep cycling until we either get data (triggers a break)
# or we throw either AnalyticServerException or ZeroResultsException
while True:
_request = self.generate_request(path=self._path,
apikey=self._apikey,
limit=self._limit,
resume=self._resumeToken,
filter=filterParam)
# keep trying until we get a valid HTTP response
requestTries = 0
loopFlag = True
while loopFlag:
try: # hopefully we won't get a server error
requestTries = requestTries + 1
response = urlopen(_request).read()
loopFlag = False
except HTTPError as e:
# if we do, sleep and try again until tolerance exceeded
if requestTries < self.FailedRequestTolerance:
loopFlag = True
msg = u"Returned data:" + u"\n"
msg = msg + unicode(e.read()) + u"\n"
self.log(self._jobName
+ u" received error status " \
+ unicode(e.code) + u":\n" \
+ msg + u"\n" \
+ u"Trying again...")
self.noisy_sleep(self.ErrorSleep, self.SleepNoise)
else:
# throw a custom exception with the HTTPError
msg = u"Error Code: " + unicode(e.code) + u"\n"
msg = msg + u"Returned data:" + u"\n"
msg = msg + unicode(e.read()) + u"\n"
raise AnalyticServerError(msg)
#end try/except
#end while loopFlag
# If here, HTTP Status is 200 and xml has been returned
data_xml = etree.fromstring(response)
token = data_xml.findtext(".//ResumptionToken")
if token is not None:
# grab the ResumptionToken that you will use from now on
self._resumeToken = token
# sometimes, while the analytic is loading, it returns empty results
# in that there is nothing in the ResultXml tag, not even the xsd info
# solution: sleep for a bit and try again using the resume token
if (len(data_xml.find(".//ResultXml")) == 0):
self.log(self._jobName + u"> Analytic is still loading...")
isLoadingTries = isLoadingTries + 1
if isLoadingTries == self.StillLoadingTolerance:
# reset token and isLoadingTries
isLoadingTries = 0
self._resumeToken = None
self.noisy_sleep(self.StillLoadingSleep, self.SleepNoise)
continue
# grab the namespace of the rowset
if row_namespace is None:
row_namespace = '{' + data_xml.find(".//ResultXml")[0].nsmap[None] + '}'
# check for the zero results but is finished bug
if not self._allowZeros and len(data_xml.findall(".//" + row_namespace + "Row")) == 0:
self._resumeToken = None
zeroTries = zeroTries + 1
if zeroTries < self.ZeroResultTolerance:
self.log(self._jobName
+ u"> Analytic returned zero results. Trying again...")
self.noisy_sleep(self.ErrorSleep, self.SleepNoise)
else:
raise ZeroResultsError()
else:
# we have data, so break
break
#end while true
# grab the isFinished flag as part of the return values
self._isFinished = (data_xml.findtext(".//IsFinished").lower() == 'true')
self._filterStopReached = False
# iterate over each row
for row in data_xml.findall(".//" + row_namespace + "Row"):
self._increment_RowCounts()
# load the data from each column and save into a dictionary
row_data = {}
if len(self.Request.NamesOrder) > 0:
# columnmap exists
for _name in self.Request.NamesOrder:
_column = self.Request.ColumnMap.names.get(_name)
_value = row.findtext(row_namespace + _column, default=None)
row_data[_name] = _value
else:
# grab all columns save 0
for _col in row:
_tag = re.search("Column[0-9]+",_col.tag).group(0)
if _tag == "Column0":
continue
row_data[_tag] = _col.text
if not self.Request.Simple:
# grab the next filterValue
self._filterValue = row_data.get(self.Request.sortedBy)
if not self.isLessThanFilterStop(self._filterValue, self._filterStop):
self._isFinished = True # do not repeat a query with the token
self._filterStopReached = True
break # end 'for row in data_xml...'
# check for duplicates
if self._duplicate_check(row_data.get(self.Request.uniqueID)):
# duplicate found, so skip and continue
continue
row_accepted = self.row_process(data=row_data)
if row_accepted:
self._increment_RecordCounts()
#end for row in data_xml...
self.log(self._jobName
+ u"> collected a page of "
+ unicode(self._page_RecordCount) + u" / "
+ unicode(self._page_RowCount) + u" records ("
+ unicode(self._all_RecordCount) + u" total)")
return
#end query_page
#end class AnalyticAgent
| {
"repo_name": "NEU-Libraries/alma-analytic-tools",
"path": "AnalyticAgent.py",
"copies": "1",
"size": "37487",
"license": "isc",
"hash": 5469089994348050000,
"line_mean": 38.171368861,
"line_max": 404,
"alpha_frac": 0.5817483394,
"autogenerated": false,
"ratio": 4.66604431167538,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5747792651075381,
"avg_score": null,
"num_lines": null
} |
"""Analytical computation of Solar System bodies
"""
import numpy as np
from ..constants import Earth, Moon, Sun
from ..errors import UnknownBodyError
from ..orbits import Orbit
from ..utils.units import AU
from ..propagators.base import AnalyticalPropagator
def get_body(name):
"""Retrieve a given body orbits and parameters
Args:
name (str): Object name
Return:
Body:
"""
try:
body, propag = _bodies[name.lower()]
# attach a propagator to the object
body.propagate = propag.propagate
except KeyError as e:
raise UnknownBodyError(e.args[0])
return body
class EarthPropagator(AnalyticalPropagator):
orbit = None
@classmethod
def propagate(cls, date):
return Orbit([0] * 6, date, "cartesian", "EME2000", cls())
class MoonPropagator(AnalyticalPropagator):
"""Dummy propagator for moon position"""
orbit = None
@classmethod
def propagate(cls, date):
"""Compute the Moon position at a given date
Args:
date (~beyond.utils.date.Date)
Return:
~beyond.orbits.orbit.Orbit: Position of the Moon in EME2000 frame
Example:
.. code-block:: python
from beyond.utils.date import Date
MoonPropagator.propagate(Date(1994, 4, 28))
# Orbit =
# date = 1994-04-28T00:00:00 UTC
# form = Cartesian
# frame = EME2000
# propag = MoonPropagator
# coord =
# x = -134181157.317
# y = -311598171.54
# z = -126699062.437
# vx = 0.0
# vy = 0.0
# vz = 0.0
"""
date = date.change_scale("TDB")
t_tdb = date.julian_century
def cos(angle):
"""cosine in degrees"""
return np.cos(np.radians(angle))
def sin(angle):
"""sine in degrees"""
return np.sin(np.radians(angle))
lambda_el = (
218.32
+ 481267.8813 * t_tdb
+ 6.29 * sin(134.9 + 477198.85 * t_tdb)
- 1.27 * sin(259.2 - 413335.38 * t_tdb)
+ 0.66 * sin(235.7 + 890534.23 * t_tdb)
+ 0.21 * sin(269.9 + 954397.7 * t_tdb)
- 0.19 * sin(357.5 + 35999.05 * t_tdb)
- 0.11 * sin(186.6 + 966404.05 * t_tdb)
)
phi_el = (
5.13 * sin(93.3 + 483202.03 * t_tdb)
+ 0.28 * sin(228.2 + 960400.87 * t_tdb)
- 0.28 * sin(318.3 + 6003.18 * t_tdb)
- 0.17 * sin(217.6 - 407332.2 * t_tdb)
)
p = (
0.9508
+ 0.0518 * cos(134.9 + 477198.85 * t_tdb)
+ 0.0095 * cos(259.2 - 413335.38 * t_tdb)
+ 0.0078 * cos(235.7 + 890534.23 * t_tdb)
+ 0.0028 * cos(269.9 + 954397.70 * t_tdb)
)
e_bar = (
23.439291 - 0.0130042 * t_tdb - 1.64e-7 * t_tdb ** 2 + 5.04e-7 * t_tdb ** 3
)
r_moon = Earth.r / sin(p)
state_vector = r_moon * np.array(
[
cos(phi_el) * cos(lambda_el),
cos(e_bar) * cos(phi_el) * sin(lambda_el) - sin(e_bar) * sin(phi_el),
sin(e_bar) * cos(phi_el) * sin(lambda_el) + cos(e_bar) * sin(phi_el),
0,
0,
0,
]
)
return Orbit(state_vector, date, "cartesian", "EME2000", cls())
class SunPropagator(AnalyticalPropagator):
"""Dummy propagator for Sun position"""
orbit = None
@classmethod
def propagate(cls, date):
"""Compute the position of the sun at a given date
Args:
date (~beyond.utils.date.Date)
Return:
~beyond.orbits.orbit.Orbit: Position of the sun in MOD frame
Example:
.. code-block:: python
from beyond.utils.date import Date
SunPropagator.propagate(Date(2006, 4, 2))
# Orbit =
# date = 2006-04-02T00:00:00 UTC
# form = Cartesian
# frame = MOD
# propag = SunPropagator
# coord =
# x = 146186235644.0
# y = 28789144480.5
# z = 12481136552.3
# vx = 0.0
# vy = 0.0
# vz = 0.0
"""
date = date.change_scale("UT1")
t_ut1 = date.julian_century
lambda_M = 280.460 + 36000.771 * t_ut1
M = np.radians(357.5291092 + 35999.05034 * t_ut1)
lambda_el = np.radians(
lambda_M + 1.914666471 * np.sin(M) + 0.019994643 * np.sin(2 * M)
)
r = 1.000140612 - 0.016708617 * np.cos(M) - 0.000139589 * np.cos(2 * M)
eps = np.radians(23.439291 - 0.0130042 * t_ut1)
pv = (
r
* np.array(
[
np.cos(lambda_el),
np.cos(eps) * np.sin(lambda_el),
np.sin(eps) * np.sin(lambda_el),
0,
0,
0,
]
)
* AU
)
return Orbit(pv, date, "cartesian", "MOD", cls())
_bodies = {
"moon": (Moon, MoonPropagator),
"sun": (Sun, SunPropagator),
"earth": (Earth, EarthPropagator),
}
| {
"repo_name": "galactics/beyond",
"path": "beyond/env/solarsystem.py",
"copies": "2",
"size": "5486",
"license": "mit",
"hash": 6459409260315617000,
"line_mean": 26.2935323383,
"line_max": 87,
"alpha_frac": 0.4637258476,
"autogenerated": false,
"ratio": 3.316807738814994,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9779672406058846,
"avg_score": 0.00017223607122947632,
"num_lines": 201
} |
"""Analytical discrete ordinates method."""
from . import base
from . import iso
from . import ani
__all__ = ['ado']
def ado(n, N, bc, xf, c, L=False, Q=False, x0=0):
"""
Determine the radiation density using analytical discrete ordinates
method.
Parameters
----------
n : int
Number of points to determine the radiation density; n > 0.
N : int
Number of nodes and weights of the Gauss-Legendre quadrature
scheme; N > 0, even.
bc : array_like
Boundary conditions; an array_like of size 2, with each element
being a scalar or an 1-D array_like of size 'N'.
xf : scalar
Final position.
c : float
Abledo; 0 < c < 1.
L : int | optional
Anisotropy degree; L > 0.
Q : scalar | optional
External sources.
x0 : scalar | optional
Inicial position.
Returns
-------
rho: ndarray
Radiation density; 1-D ndarray of size 'n'.
"""
if Q:
ps = base.cal_ps(c, Q)
else:
ps = False
if L:
return ani.solve(n, N, bc, xf, c, L, ps, x0)
else:
return iso.solve(n, N, bc, xf, c, ps, x0)
| {
"repo_name": "olivertso/ado",
"path": "python/ado.py",
"copies": "1",
"size": "1171",
"license": "mit",
"hash": 7264428352133746000,
"line_mean": 23.3958333333,
"line_max": 71,
"alpha_frac": 0.561058924,
"autogenerated": false,
"ratio": 3.4747774480712166,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4535836372071217,
"avg_score": null,
"num_lines": null
} |
""" Analytical expressions of information theoretical quantities. """
from scipy.linalg import det, inv
from numpy import log, prod, absolute, exp, pi, trace, dot, cumsum, \
hstack, ix_, sqrt, eye, diag, array
from ite.shared import compute_h2
def analytical_value_h_shannon(distr, par):
""" Analytical value of the Shannon entropy for the given distribution.
Parameters
----------
distr : str
Name of the distribution.
par : dictionary
Parameters of the distribution. If distr = 'uniform': par["a"],
par["b"], par["l"] <- lxU[a,b]. If distr = 'normal' : par["cov"]
is the covariance matrix.
Returns
-------
h : float
Analytical value of the Shannon entropy.
"""
if distr == 'uniform':
# par = {"a": a, "b": b, "l": l}
h = log(prod(par["b"] - par["a"])) + log(absolute(det(par["l"])))
elif distr == 'normal':
# par = {"cov": c}
dim = par["cov"].shape[0] # =c.shape[1]
h = 1/2 * log((2 * pi * exp(1))**dim * det(par["cov"]))
# = 1/2 * log(det(c)) + d / 2 * log(2*pi) + d / 2
else:
raise Exception('Distribution=?')
return h
def analytical_value_c_cross_entropy(distr1, distr2, par1, par2):
""" Analytical value of the cross-entropy for the given distributions.
Parameters
----------
distr1, distr2 : str
Name of the distributions.
par1, par2 : dictionaries
Parameters of the distribution. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
c : float
Analytical value of the cross-entropy.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
dim = len(m1)
invc2 = inv(c2)
diffm = m1 - m2
c = 1/2 * (dim * log(2*pi) + log(det(c2)) + trace(dot(invc2, c1)) +
dot(diffm, dot(invc2, diffm)))
else:
raise Exception('Distribution=?')
return c
def analytical_value_d_kullback_leibler(distr1, distr2, par1, par2):
""" Analytical value of the KL divergence for the given distributions.
Parameters
----------
distr1, distr2 : str-s
Names of the distributions.
par1, par2 : dictionary-s
Parameters of the distributions. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
d : float
Analytical value of the Kullback-Leibler divergence.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
dim = len(m1)
invc2 = inv(c2)
diffm = m1 - m2
d = 1/2 * (log(det(c2)/det(c1)) + trace(dot(invc2, c1)) +
dot(diffm, dot(invc2, diffm)) - dim)
else:
raise Exception('Distribution=?')
return d
def analytical_value_i_shannon(distr, par):
""" Analytical value of mutual information for the given distribution.
Parameters
----------
distr : str
Name of the distribution.
par : dictionary
Parameters of the distribution. If distr = 'normal': par["ds"],
par["cov"] are the vector of component dimensions and the (joint)
covariance matrix.
Returns
-------
i : float
Analytical value of the Shannon mutual information.
"""
if distr == 'normal':
c, ds = par["cov"], par["ds"]
# 0,d_1,d_1+d_2,...,d_1+...+d_{M-1}; starting indices of the
# subspaces:
cum_ds = cumsum(hstack((0, ds[:-1])))
i = 1
for m in range(len(ds)):
idx = range(cum_ds[m], cum_ds[m] + ds[m])
i *= det(c[ix_(idx, idx)])
i = log(i / det(c)) / 2
else:
raise Exception('Distribution=?')
return i
def analytical_value_h_renyi(distr, alpha, par):
""" Analytical value of the Renyi entropy for the given distribution.
Parameters
----------
distr : str
Name of the distribution.
alpha : float, alpha \ne 1
Parameter of the Renyi entropy.
par : dictionary
Parameters of the distribution. If distr = 'uniform': par["a"],
par["b"], par["l"] <- lxU[a,b]. If distr = 'normal' : par["cov"]
is the covariance matrix.
Returns
-------
h : float
Analytical value of the Renyi entropy.
References
----------
Kai-Sheng Song. Renyi information, loglikelihood and an intrinsic
distribution measure. Journal of Statistical Planning and Inference
93: 51-69, 2001.
"""
if distr == 'uniform':
# par = {"a": a, "b": b, "l": l}
# We also apply the transformation rule of the Renyi entropy in
# case of linear transformations:
h = log(prod(par["b"] - par["a"])) + log(absolute(det(par["l"])))
elif distr == 'normal':
# par = {"cov": c}
dim = par["cov"].shape[0] # =c.shape[1]
h = log((2*pi)**(dim / 2) * sqrt(absolute(det(par["cov"])))) -\
dim * log(alpha) / 2 / (1 - alpha)
else:
raise Exception('Distribution=?')
return h
def analytical_value_h_tsallis(distr, alpha, par):
""" Analytical value of the Tsallis entropy for the given distribution.
Parameters
----------
distr : str
Name of the distribution.
alpha : float, alpha \ne 1
Parameter of the Tsallis entropy.
par : dictionary
Parameters of the distribution. If distr = 'uniform': par["a"],
par["b"], par["l"] <- lxU[a,b]. If distr = 'normal' : par["cov"]
is the covariance matrix.
Returns
-------
h : float
Analytical value of the Tsallis entropy.
"""
# Renyi entropy:
h = analytical_value_h_renyi(distr, alpha, par)
# Renyi entropy -> Tsallis entropy:
h = (exp((1 - alpha) * h) - 1) / (1 - alpha)
return h
def analytical_value_k_prob_product(distr1, distr2, rho, par1, par2):
""" Analytical value of the probability product kernel.
Parameters
----------
distr1, distr2 : str
Name of the distributions.
rho: float, >0
Parameter of the probability product kernel.
par1, par2 : dictionary-s
Parameters of the distributions. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
k : float
Analytical value of the probability product kernel.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
dim = len(m1)
# inv1, inv2, inv12:
inv1, inv2 = inv(c1), inv(c2)
inv12 = inv(inv1+inv2)
m12 = dot(inv1, m1) + dot(inv2, m2)
exp_arg = \
dot(m1, dot(inv1, m1)) + dot(m2, dot(inv2, m2)) -\
dot(m12, dot(inv12, m12))
k = (2 * pi)**((1 - 2 * rho) * dim / 2) * rho**(-dim / 2) *\
absolute(det(inv12))**(1 / 2) * \
absolute(det(c1))**(-rho / 2) * \
absolute(det(c2))**(-rho / 2) * exp(-rho / 2 * exp_arg)
else:
raise Exception('Distribution=?')
return k
def analytical_value_k_expected(distr1, distr2, sigma, par1, par2):
""" Analytical value of expected kernel for the given distributions.
Parameters
----------
distr1, distr2 : str
Names of the distributions.
sigma: float, >0
Std parameter of the expected kernel.
par1, par2 : dictionary-s
Parameters of the distributions. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
k : float
Analytical value of the expected kernel.
References
----------
Krikamol Muandet, Kenji Fukumizu, Francesco Dinuzzo, and Bernhard
Scholkopf. Learning from distributions via support measure machines.
In Advances in Neural Information Processing Systems (NIPS), pages
10-18, 2011.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
dim = len(m1)
gam = 1 / sigma**2
diffm = m1 - m2
exp_arg = dot(dot(diffm, inv(c1 + c2 + eye(dim) / gam)), diffm)
k = exp(-exp_arg / 2) / \
sqrt(absolute(det(gam * c1 + gam * c2 + eye(dim))))
else:
raise Exception('Distribution=?')
return k
def analytical_value_h_sharma_mittal(distr, alpha, beta, par):
""" Analytical value of the Sharma-Mittal entropy.
Parameters
----------
distr : str
Name of the distribution.
alpha : float, 0 < alpha \ne 1
Parameter of the Sharma-Mittal entropy.
beta : float, beta \ne 1
Parameter of the Sharma-Mittal entropy.
par : dictionary
Parameters of the distribution. If distr = 'normal' : par["cov"]
= covariance matrix.
Returns
-------
h : float
Analytical value of the Sharma-Mittal entropy.
References
----------
Frank Nielsen and Richard Nock. A closed-form expression for the
Sharma-Mittal entropy of exponential families. Journal of Physics A:
Mathematical and Theoretical, 45:032003, 2012.
"""
if distr == 'normal':
# par = {"cov": c}
c = par['cov']
dim = c.shape[0] # =c.shape[1]
h = (((2*pi)**(dim / 2) * sqrt(absolute(det(c))))**(1 - beta) /
alpha**(dim * (1 - beta) / (2 * (1 - alpha))) - 1) / \
(1 - beta)
else:
raise Exception('Distribution=?')
return h
def analytical_value_h_phi(distr, par, c):
""" Analytical value of the Phi entropy for the given distribution.
Parameters
----------
distr : str
Name of the distribution.
par : dictionary
Parameters of the distribution. If distr = 'uniform': par.a,
par.b in U[a,b].
c : float, >=1
Parameter of the Phi-entropy: phi = lambda x: x**c
Returns
-------
h : float
Analytical value of the Phi entropy.
"""
if distr == 'uniform':
a, b = par['a'], par['b']
h = 1 / (b-a)**c
else:
raise Exception('Distribution=?')
return h
def analytical_value_d_chi_square(distr1, distr2, par1, par2):
""" Analytical value of chi^2 divergence for the given distributions.
Parameters
----------
distr1, distr2 : str-s.
Names of distributions.
par1, par2 : dictionary-s.
Parameters of distributions. If (distr1, distr2) =
('uniform', 'uniform'), then both distributions are
uniform: distr1 = U[0,a] with a = par1['a'], distr2 =
U[0,b] with b = par2['a']. If (distr1, distr2) =
('normalI', 'normalI'), then distr1 = N(m1,I) where m1 =
par1['mean'], distr2 = N(m2,I), where m2 = par2['mean'].
Returns
-------
d : float
Analytical value of the (Pearson) chi^2 divergence.
References
----------
Frank Nielsen and Richard Nock. On the chi square and higher-order chi
distances for approximating f-divergence. IEEE Signal Processing
Letters, 2:10-13, 2014.
"""
if distr1 == 'uniform' and distr2 == 'uniform':
a = par1['a']
b = par2['a']
d = prod(b) / prod(a) - 1
elif distr1 == 'normalI' and distr2 == 'normalI':
m1 = par1['mean']
m2 = par2['mean']
diffm = m2 - m1
d = exp(dot(diffm, diffm)) - 1
else:
raise Exception('Distribution=?')
return d
def analytical_value_d_l2(distr1, distr2, par1, par2):
""" Analytical value of the L2 divergence for the given distributions.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('uniform', 'uniform'), then both distributions are
uniform: distr1 = U[0,a] with a = par1['a'], distr2 =
U[0,b] with b = par2['a'].
Returns
-------
d : float
Analytical value of the L2 divergence.
"""
if distr1 == 'uniform' and distr2 == 'uniform':
a = par1['a']
b = par2['a']
d = sqrt(1 / prod(b) - 1 / prod(a))
else:
raise Exception('Distribution=?')
return d
def analytical_value_d_renyi(distr1, distr2, alpha, par1, par2):
""" Analytical value of Renyi divergence for the given distributions.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
alpha : float, \ne 1
Parameter of the Sharma-Mittal divergence.
par1, par2 : dictionary-s
Parameters of distributions.
If (distr1,distr2) = ('normal','normal'), then distr1 =
N(m1,c1), where m1 = par1['mean'], c1 = par1['cov'],
distr2 = N(m2,c2), where m2 = par2['mean'], c2 =
par2['cov'].
Returns
-------
d : float
Analytical value of the Renyi divergence.
References
----------
Manuel Gil. On Renyi Divergence Measures for Continuous Alphabet
Sources. Phd Thesis, Queen’s University, 2011.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
mix_c = alpha * c2 + (1 - alpha) * c1
diffm = m1 - m2
d = alpha * (1/2 * dot(dot(diffm, inv(mix_c)), diffm) -
1 / (2 * alpha * (alpha - 1)) *
log(absolute(det(mix_c)) /
(det(c1)**(1 - alpha) * det(c2)**alpha)))
else:
raise Exception('Distribution=?')
return d
def analytical_value_d_tsallis(distr1, distr2, alpha, par1, par2):
""" Analytical value of Tsallis divergence for the given distributions.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
alpha : float, \ne 1
Parameter of the Sharma-Mittal divergence.
par1, par2 : dictionary-s
Parameters of distributions.
If (distr1,distr2) = ('normal','normal'), then distr1 =
N(m1,c1), where m1 = par1['mean'], c1 = par1['cov'],
distr2 = N(m2,c2), where m2 = par2['mean'], c2 =
par2['cov'].
Returns
-------
d : float
Analytical value of the Tsallis divergence.
"""
if distr1 == 'normal' and distr2 == 'normal':
d = analytical_value_d_renyi(distr1, distr2, alpha, par1, par2)
d = (exp((alpha - 1) * d) - 1) / (alpha - 1)
else:
raise Exception('Distribution=?')
return d
def analytical_value_d_sharma_mittal(distr1, distr2, alpha, beta, par1,
par2):
""" Analytical value of the Sharma-Mittal divergence.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
alpha : float, 0 < alpha \ne 1
Parameter of the Sharma-Mittal divergence.
beta : float, beta \ne 1
Parameter of the Sharma-Mittal divergence.
par1, par2 : dictionary-s
Parameters of distributions.
If (distr1,distr2) = ('normal','normal'), then distr1 =
N(m1,c1), where m1 = par1['mean'], c1 = par1['cov'],
distr2 = N(m2,c2), where m2 = par2['mean'], c2 =
par2['cov'].
Returns
-------
D : float
Analytical value of the Tsallis divergence.
References
----------
Frank Nielsen and Richard Nock. A closed-form expression for the
Sharma-Mittal entropy of exponential families. Journal of Physics A:
Mathematical and Theoretical, 45:032003, 2012.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
c = inv(alpha * inv(c1) + (1 - alpha) * inv(c2))
diffm = m1 - m2
# Jensen difference divergence, c2:
j = (log(absolute(det(c1))**alpha * absolute(det(c2))**(1 -
alpha) /
absolute(det(c))) + alpha * (1 - alpha) *
dot(dot(diffm, inv(c)), diffm)) / 2
c2 = exp(-j)
d = (c2**((1 - beta) / (1 - alpha)) - 1) / (beta - 1)
else:
raise Exception('Distribution=?')
return d
def analytical_value_d_bregman(distr1, distr2, alpha, par1, par2):
""" Analytical value of Bregman divergence for the given distributions.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
alpha : float, \ne 1
Parameter of the Bregman divergence.
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('uniform', 'uniform'), then both distributions are
uniform: distr1 = U[0,a] with a = par1['a'], distr2 =
U[0,b] with b = par2['a'].
Returns
-------
d : float
Analytical value of the Bregman divergence.
"""
if distr1 == 'uniform' and distr2 == 'uniform':
a = par1['a']
b = par2['a']
d = \
-1 / (alpha - 1) * prod(b)**(1 - alpha) +\
1 / (alpha - 1) * prod(a)**(1 - alpha)
else:
raise Exception('Distribution=?')
return d
def analytical_value_d_jensen_renyi(distr1, distr2, w, par1, par2):
""" Analytical value of the Jensen-Renyi divergence.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
w : vector, w[i] > 0 (for all i), sum(w) = 1
Weight used in the Jensen-Renyi divergence.
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('normal', 'normal'), then both distributions are normal:
distr1 = N(m1,s1^2 I) with m1 = par1['mean'], s1 =
par1['std'], distr2 = N(m2,s2^2 I) with m2 =
par2['mean'], s2 = par2['std'].
Returns
-------
d : float
Analytical value of the Jensen-Renyi divergence.
References
----------
Fei Wang, Tanveer Syeda-Mahmood, Baba C. Vemuri, David Beymer, and
Anand Rangarajan. Closed-Form Jensen-Renyi Divergence for Mixture of
Gaussians and Applications to Group-Wise Shape Registration. Medical
Image Computing and Computer-Assisted Intervention, 12: 648–655, 2009.
"""
if distr1 == 'normal' and distr2 == 'normal':
m1, s1 = par1['mean'], par1['std']
m2, s2 = par2['mean'], par2['std']
term1 = compute_h2(w, (m1, m2), (s1, s2))
term2 = \
w[0] * compute_h2((1,), (m1,), (s1,)) +\
w[1] * compute_h2((1,), (m2,), (s2,))
# H2(\sum_i wi yi) - \sum_i w_i H2(yi), where H2 is the quadratic
# Renyi entropy:
d = term1 - term2
else:
raise Exception('Distribution=?')
return d
def analytical_value_i_renyi(distr, alpha, par):
""" Analytical value of the Renyi mutual information.
Parameters
----------
distr : str
Name of the distribution.
alpha : float
Parameter of the Renyi mutual information.
par : dictionary
Parameters of the distribution. If distr = 'normal': par["cov"]
is the covariance matrix.
Returns
-------
i : float
Analytical value of the Renyi mutual information.
"""
if distr == 'normal':
c = par["cov"]
t1 = -alpha / 2 * log(det(c))
t2 = -(1 - alpha) / 2 * log(prod(diag(c)))
t3 = log(det(alpha * inv(c) + (1 - alpha) * diag(1 / diag(c)))) / 2
i = 1 / (alpha - 1) * (t1 + t2 - t3)
else:
raise Exception('Distribution=?')
return i
def analytical_value_k_ejr1(distr1, distr2, u, par1, par2):
""" Analytical value of the Jensen-Renyi kernel-1.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
u : float, >0
Parameter of the Jensen-Renyi kernel-1 (alpha = 2: fixed).
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('normal', 'normal'), then both distributions are normal:
distr1 = N(m1,s1^2 I) with m1 = par1['mean'], s1 =
par1['std'], distr2 = N(m2,s2^2 I) with m2 =
par2['mean'], s2 = par2['std'].
References
----------
Fei Wang, Tanveer Syeda-Mahmood, Baba C. Vemuri, David Beymer, and
Anand Rangarajan. Closed-Form Jensen-Renyi Divergence for Mixture of
Gaussians and Applications to Group-Wise Shape Registration. Medical
Image Computing and Computer-Assisted Intervention, 12: 648–655, 2009.
"""
if distr1 == 'normal' and distr2 == 'normal':
m1, s1 = par1['mean'], par1['std']
m2, s2 = par2['mean'], par2['std']
w = array([1/2, 1/2])
h = compute_h2(w, (m1, m2), (s1, s2)) # quadratic Renyi entropy
k = exp(-u * h)
else:
raise Exception('Distribution=?')
return k
def analytical_value_k_ejr2(distr1, distr2, u, par1, par2):
""" Analytical value of the Jensen-Renyi kernel-2.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
u : float, >0
Parameter of the Jensen-Renyi kernel-2 (alpha = 2: fixed).
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('normal', 'normal'), then both distributions are normal:
distr1 = N(m1,s1^2 I) with m1 = par1['mean'], s1 =
par1['std'], distr2 = N(m2,s2^2 I) with m2 =
par2['mean'], s2 = par2['std'].
"""
if distr1 == 'normal' and distr2 == 'normal':
w = array([1/2, 1/2])
d = analytical_value_d_jensen_renyi(distr1, distr2, w, par1, par2)
k = exp(-u * d)
else:
raise Exception('Distribution=?')
return k
def analytical_value_k_ejt1(distr1, distr2, u, par1, par2):
""" Analytical value of the Jensen-Tsallis kernel-1.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
u : float, >0
Parameter of the Jensen-Tsallis kernel-1 (alpha = 2: fixed).
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('normal', 'normal'), then both distributions are normal:
distr1 = N(m1,s1^2 I) with m1 = par1['mean'], s1 =
par1['std'], distr2 = N(m2,s2^2 I) with m2 =
par2['mean'], s2 = par2['std'].
References
----------
Fei Wang, Tanveer Syeda-Mahmood, Baba C. Vemuri, David Beymer, and
Anand Rangarajan. Closed-Form Jensen-Renyi Divergence for Mixture of
Gaussians and Applications to Group-Wise Shape Registration. Medical
Image Computing and Computer-Assisted Intervention, 12: 648–655, 2009.
(Renyi entropy)
"""
if distr1 == 'normal' and distr2 == 'normal':
m1, s1 = par1['mean'], par1['std']
m2, s2 = par2['mean'], par2['std']
w = array([1/2, 1/2])
h = compute_h2(w, (m1, m2), (s1, s2)) # quadratic Renyi entropy
# quadratic Renyi entropy -> quadratic Tsallis entropy:
h = 1 - exp(-h)
k = exp(-u * h)
else:
raise Exception('Distribution=?')
return k
def analytical_value_k_ejt2(distr1, distr2, u, par1, par2):
""" Analytical value of the Jensen-Tsallis kernel-2.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
u : float, >0
Parameter of the Jensen-Tsallis kernel-2 (alpha = 2: fixed).
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('normal', 'normal'), then both distributions are normal:
distr1 = N(m1,s1^2 I) with m1 = par1['mean'], s1 =
par1['std'], distr2 = N(m2,s2^2 I) with m2 =
par2['mean'], s2 = par2['std'].
References
----------
Fei Wang, Tanveer Syeda-Mahmood, Baba C. Vemuri, David Beymer, and
Anand Rangarajan. Closed-Form Jensen-Renyi Divergence for Mixture of
Gaussians and Applications to Group-Wise Shape Registration. Medical
Image Computing and Computer-Assisted Intervention, 12: 648–655, 2009.
(analytical value of the Jensen-Renyi divergence)
"""
if distr1 == 'normal' and distr2 == 'normal':
m1, s1 = par1['mean'], par1['std']
m2, s2 = par2['mean'], par2['std']
w = array([1/2, 1/2])
# quadratic Renyi entropy -> quadratic Tsallis entropy:
term1 = 1 - exp(-compute_h2(w, (m1, m2), (s1, s2)))
term2 = \
w[0] * (1 - exp(-compute_h2((1, ), (m1, ), (s1,)))) +\
w[1] * (1 - exp(-compute_h2((1,), (m2,), (s2,))))
# H2(\sum_i wi Yi) - \sum_i w_i H2(Yi), where H2 is the quadratic
# Tsallis entropy:
d = term1 - term2
k = exp(-u * d)
else:
raise Exception('Distribution=?')
return k
def analytical_value_d_hellinger(distr1, distr2, par1, par2):
""" Analytical value of Hellinger distance for the given distributions.
Parameters
----------
distr1, distr2 : str-s
Names of the distributions.
par1, par2 : dictionary-s
Parameters of the distributions. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
d : float
Analytical value of the Hellinger distance.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
# "https://en.wikipedia.org/wiki/Hellinger_distance": Examples:
diffm = m1 - m2
avgc = (c1 + c2) / 2
inv_avgc = inv(avgc)
d = 1 - det(c1)**(1/4) * det(c2)**(1/4) / sqrt(det(avgc)) * \
exp(-dot(diffm, dot(inv_avgc, diffm))/8) # D^2
d = sqrt(d)
else:
raise Exception('Distribution=?')
return d
def analytical_value_cond_h_shannon(distr, par):
""" Analytical value of the conditional Shannon entropy.
Parameters
----------
distr : str-s
Names of the distributions; 'normal'.
par : dictionary
Parameters of the distribution. If distr is 'normal': par["cov"]
and par["dim1"] are the covariance matrix and the dimension of
y1.
Returns
-------
cond_h : float
Analytical value of the conditional Shannon entropy.
"""
if distr == 'normal':
# h12 (=joint entropy):
h12 = analytical_value_h_shannon(distr, par)
# h2 (=entropy of the conditioning variable):
c, dim1 = par['cov'], par['dim1'] # covariance matrix, dim(y1)
par = {"cov": c[dim1:, dim1:]}
h2 = analytical_value_h_shannon(distr, par)
cond_h = h12 - h2
else:
raise Exception('Distribution=?')
return cond_h
def analytical_value_cond_i_shannon(distr, par):
""" Analytical value of the conditional Shannon mutual information.
Parameters
----------
distr : str-s
Names of the distributions; 'normal'.
par : dictionary
Parameters of the distribution. If distr is 'normal':
par["cov"] and par["ds"] are the (joint) covariance matrix and
the vector of subspace dimensions.
Returns
-------
cond_i : float
Analytical value of the conditional Shannon mutual
information.
"""
# initialization:
ds = par['ds']
len_ds = len(ds)
# 0,d_1,d_1+d_2,...,d_1+...+d_M; starting indices of the subspaces:
cum_ds = cumsum(hstack((0, ds[:-1])))
idx_condition = range(cum_ds[len_ds - 1],
cum_ds[len_ds - 1] + ds[len_ds - 1])
if distr == 'normal':
c = par['cov']
# h_joint:
h_joint = analytical_value_h_shannon(distr, par)
# h_cross:
h_cross = 0
for m in range(len_ds-1): # non-conditioning subspaces
idx_m = range(cum_ds[m], cum_ds[m] + ds[m])
idx_m_and_condition = hstack((idx_m, idx_condition))
par = {"cov": c[ix_(idx_m_and_condition, idx_m_and_condition)]}
h_cross += analytical_value_h_shannon(distr, par)
# h_condition:
par = {"cov": c[ix_(idx_condition, idx_condition)]}
h_condition = analytical_value_h_shannon(distr, par)
cond_i = -h_joint + h_cross - (len_ds - 2) * h_condition
else:
raise Exception('Distribution=?')
return cond_i
| {
"repo_name": "gdikov/vae-playground",
"path": "third_party/ite/cost/x_analytical_values.py",
"copies": "1",
"size": "30969",
"license": "mit",
"hash": 1570425064661072100,
"line_mean": 30.3350202429,
"line_max": 76,
"alpha_frac": 0.5220452857,
"autogenerated": false,
"ratio": 3.523273016956868,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45453183026568683,
"avg_score": null,
"num_lines": null
} |
# Analytical solutions for problems that can be solved with the two Shen basis.
###
# Problem one is
#
# -u`` = f in [-1, 1] with u(-1) = u(1) = 0 for f which is
# g on [-1, 0) and h on [0, 1]
#
###
# Problem two is
#
# u```` = f in [-1, 1] with u(-1) = u(1) = 0, u`(-1) = u`(1) = 0 for f which is
# g on [-1, 0) and h on [0, 1]
from __future__ import division
from sympy import Symbol, integrate
import numpy as np
def solve_poisson(g, h, alpha=1, beta=1):
'''Solve the Poisson problem with f defined by g, h'''
x = Symbol('x')
# Primitive functions of g
G = integrate(-g/alpha, x)
GG = integrate(G, x)
# Primitive functions of h
H = integrate(-h/beta, x)
HH = integrate(H, x)
# The solution is GG + a0*x + b0 on [-1, 0] and HH + a1*x + b1 on [0, 1]
# Build the lin sys for the coefficients. The system reflects bcs and
# continuity of u and u` in 0
A = np.array([[-1., 1., 0., 0.],
[0., 0., 1., 1.],
[0., 1., 0., -1.],
[1., 0., -1., 0.]])
b = np.array([-GG.subs(x, -1),
-HH.subs(x, 1),
HH.subs(x, 0) - GG.subs(x, 0),
H.subs(x, 0) - G.subs(x, 0)])
[a0, b0, a1, b1] = np.linalg.solve(A, b)
u0 = GG + a0*x + b0
u1 = HH + a1*x + b1
# Let's the the checks
# Boundary conditions
bcl = u0.subs(x, -1)
bcr = u1.subs(x, 1)
# Continuity of solution and the derivative
u_cont = u0.subs(x, 0) - u1.subs(x, 0)
du_cont = u0.diff(x, 1).subs(x, 0) - u1.diff(x, 1).subs(x, 0)
# That it in fact solves the laplacian
u0_lap = integrate((alpha*u0.diff(x, 2) + g)**2, (x, -1, 0))
u1_lap = integrate((beta*u1.diff(x, 2) + h)**2, (x, 0, 1))
conds = [bcl, bcr, u_cont, du_cont, u0_lap, u1_lap]
assert all(map(lambda v: abs(v) < 1E-13, conds))
return u0, u1
def solve_biharmonic(g, h):
'''Solve the biharmonic problem with f defined by g, h'''
x = Symbol('x')
# Primitive functions of g
G = integrate(g, x)
GG = integrate(G, x)
GGG = integrate(GG, x)
GGGG = integrate(GGG, x)
# Primitive functions of h
H = integrate(h, x)
HH = integrate(H, x)
HHH = integrate(HH, x)
HHHH = integrate(HHH, x)
# The solution now needs to match bcs and continuity.
A = np.array([[-1./6, 1./2, -1., 1., 0., 0., 0., 0.],
[0, 0, 0, 0, 1/6., 1/2., 1., 1.],
[1/2., -1, 1, 0, 0, 0, 0, 0.],
[0, 0, 0, 0, 1/2., 1, 1, 0],
[0, 0, 0, 1, 0, 0, 0, -1],
[0, 0, 1, 0, 0, 0, -1, 0],
[0, 1, 0, 0, 0, -1, 0, 0],
[1, 0, 0, 0, -1, 0, 0, 0]])
b = np.array([-GGGG.subs(x, -1),
-HHHH.subs(x, 1),
-GGG.subs(x, -1),
-HHH.subs(x, 1),
HHHH.subs(x, 0) - GGGG.subs(x, 0),
HHH.subs(x, 0) - GGG.subs(x, 0),
HH.subs(x, 0) - GG.subs(x, 0),
H.subs(x, 0) - G.subs(x, 0)])
[a0, a1, a2, a3, b0, b1, b2, b3] = np.linalg.solve(A, b)
u0 = GGGG + a0*x**3/6 + a1*x**2/2 + a2*x + a3
u1 = HHHH + b0*x**3/6 + b1*x**2/2 + b2*x + b3
# Let's the the checks
checks = []
# Boundary conditions
checks.append(u0.subs(x, -1))
checks.append(u1.subs(x, 1))
checks.append(u0.diff(x, 1).subs(x, -1))
checks.append(u1.diff(x, 1).subs(x, 1))
# Continuity of solution and the derivatives
checks.append(u0.subs(x, 0) - u1.subs(x, 0))
checks.append(u0.diff(x, 1).subs(x, 0) - u1.diff(x, 1).subs(x, 0))
checks.append(u0.diff(x, 2).subs(x, 0) - u1.diff(x, 2).subs(x, 0))
checks.append(u0.diff(x, 3).subs(x, 0) - u1.diff(x, 3).subs(x, 0))
# That it in fact solves the biharmonic equation
checks.append(integrate((u0.diff(x, 4) - g)**2, (x, -1, 0)))
checks.append(integrate((u1.diff(x, 4) - h)**2, (x, 0, 1)))
assert all(map(lambda v: abs(v) < 1E-13, checks))
return u0, u1
# -----------------------------------------------------------------------------
if __name__ == '__main__':
from sympy import S, nsimplify
from sympy.plotting import plot
x = Symbol('x')
g, h = S(1), S(3)
problem = 'poisson'
k = 0
if problem == 'poisson':
u0, u1 = solve_poisson(g, h, alpha=1, beta=2)
p0 = plot(u0.diff(x, k), (x, -1, 0), show=False)
p1 = plot(u1.diff(x, k), (x, 0, 1), show=False)
p2 = plot(g, (x, -1, 0), show=False)
p3 = plot(h, (x, 0, 1), show=False)
p4 = plot(u0.diff(x, 1), (x, -1, 0), show=False)
p5 = plot(u1.diff(x, 1), (x, 0, 1), show=False)
p0[0].line_color='red'
p1[0].line_color='red'
p2[0].line_color='blue'
p3[0].line_color='blue'
# p4[0].line_color='green'
# p5[0].line_color='green'
p0.append(p1[0])
p0.append(p2[0])
p0.append(p3[0])
# p0.append(p4[0])
# p0.append(p5[0])
if problem == 'biharmonic':
u0, u1 = solve_biharmonic(g, h)
# Sol
p0 = plot(u0, (x, -1, 0), show=False)
p1 = plot(u1, (x, 0, 1), show=False)
# Du
# p2 = plot(u0.diff(x, 1), (x, -1, 0), show=False)
# p3 = plot(u1.diff(x, 1), (x, 0, 1), show=False)
# DDu
# p4 = plot(u0.diff(x, 2), (x, -1, 0), show=False)
# p5 = plot(u1.diff(x, 2), (x, 0, 1), show=False)
# DDDu
# p6 = plot(u0.diff(x, 3), (x, -1, 0), show=False)
# p7 = plot(u1.diff(x, 3), (x, 0, 1), show=False)
p0[0].line_color='red'
p1[0].line_color='red'
# p2[0].line_color='blue'
# p3[0].line_color='blue'
# p4[0].line_color='green'
# p5[0].line_color='green'
# p6[0].line_color='black'
# p7[0].line_color='black'
p0.append(p1[0])
# [p0.append(p[0]) for p in (p1, p2, p3, p4, p5, p6, p7)]
print 'u0', nsimplify(u0)
print 'u1', nsimplify(u1)
p0.show()
| {
"repo_name": "MiroK/lega",
"path": "sandbox/bendpy/discont/dg_shen.py",
"copies": "1",
"size": "6035",
"license": "mit",
"hash": -6869612549207594000,
"line_mean": 31.1010638298,
"line_max": 79,
"alpha_frac": 0.4724109362,
"autogenerated": false,
"ratio": 2.5208855472013365,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.34932964834013364,
"avg_score": null,
"num_lines": null
} |
# Analytical solutions for problems that can be solved with the two sine basis.
###
# Problem one is
#
# -u`` = f in [0, pi] with u(0) = u(pi) = 0 for f which is
# g on [0, pi/2) and h on [pi/2, pi]
#
###
# Problem two is
#
# u```` = f in [0, pi] with u(0) = u(pi) = 0, u`(0) = u`(pi) = 0 for f which is
# g on [0, pi/2) and h on [pi/2, pi]
from __future__ import division
from sympy import Symbol, integrate
import numpy as np
from math import pi
def solve_poisson(g, h):
'''Solve the Poisson problem with f defined by g, h'''
x = Symbol('x')
# Primitive functions of g
G = integrate(-g, x)
GG = integrate(G, x)
# Primitive functions of h
H = integrate(-h, x)
HH = integrate(H, x)
# The solution is GG + a0*x + b0 on [-1, 0] and HH + a1*x + b1 on [0, 1]
# Build the lin sys for the coefficients. The system reflects bcs and
# continuity of u and u` in 0
A = np.array([[0, 1., 0., 0.],
[0., 0., pi, 1.],
[pi/2., 1., -pi/2, -1.],
[1., 0., -1., 0.]])
b = np.array([-GG.subs(x, 0),
-HH.subs(x, pi),
HH.subs(x, pi/2) - GG.subs(x, pi/2),
H.subs(x, pi/2) - G.subs(x, pi/2)])
[a0, b0, a1, b1] = np.linalg.solve(A, b)
u0 = GG + a0*x + b0
u1 = HH + a1*x + b1
# Let's the the checks
# Boundary conditions
bcl = u0.subs(x, 0)
bcr = u1.subs(x, pi)
# Continuity of solution and the derivative
u_cont = u0.subs(x, pi/2) - u1.subs(x, pi/2)
du_cont = u0.diff(x, 1).subs(x, pi/2) - u1.diff(x, 1).subs(x, pi/2)
# That it in fact solves the laplacian
u0_lap = integrate((u0.diff(x, 2) + g)**2, (x, 0, pi/2))
u1_lap = integrate((u1.diff(x, 2) + h)**2, (x, pi/2, pi))
conds = [bcl, bcr, u_cont, du_cont, u0_lap, u1_lap]
for i, c in enumerate(conds):
print i, c, abs(c) < 1E-13
return u0, u1
def solve_biharmonic(g, h):
'''Solve the biharmonic problem with f defined by g, h'''
x = Symbol('x')
# Primitive functions of g
G = integrate(g, x)
GG = integrate(G, x)
GGG = integrate(GG, x)
GGGG = integrate(GGG, x)
# Primitive functions of h
H = integrate(h, x)
HH = integrate(H, x)
HHH = integrate(HH, x)
HHHH = integrate(HHH, x)
# The solution now needs to match bcs and continuity.
A = np.array([[-1./6, 1./2, -1., 1., 0., 0., 0., 0.],
[0, 0, 0, 0, 1/6., 1/2., 1., 1.],
[-1., 1, 0, 0, 0, 0, 0, 0.],
[0, 0, 0, 0, 1., 1., 0, 0],
[0, 0, 0, 1, 0, 0, 0, -1],
[0, 0, 1, 0, 0, 0, -1, 0],
[0, 1, 0, 0, 0, -1, 0, 0],
[1, 0, 0, 0, -1, 0, 0, 0]])
b = np.array([-GGGG.subs(x, -1),
-HHHH.subs(x, 1),
-GG.subs(x, -1),
-HH.subs(x, 1),
HHHH.subs(x, 0) - GGGG.subs(x, 0),
HHH.subs(x, 0) - GGG.subs(x, 0),
HH.subs(x, 0) - GG.subs(x, 0),
H.subs(x, 0) - G.subs(x, 0)])
[a0, a1, a2, a3, b0, b1, b2, b3] = np.linalg.solve(A, b)
u0 = GGGG + a0*x**3/6 + a1*x**2/2 + a2*x + a3
u1 = HHHH + b0*x**3/6 + b1*x**2/2 + b2*x + b3
# Let's the the checks
checks = []
# Boundary conditions
checks.append(u0.subs(x, -1))
checks.append(u1.subs(x, 1))
checks.append(u0.diff(x, 2).subs(x, -1))
checks.append(u1.diff(x, 2).subs(x, 1))
# Continuity of solution and the derivatives
checks.append(u0.subs(x, 0) - u1.subs(x, 0))
checks.append(u0.diff(x, 1).subs(x, 0) - u1.diff(x, 1).subs(x, 0))
checks.append(u0.diff(x, 2).subs(x, 0) - u1.diff(x, 2).subs(x, 0))
checks.append(u0.diff(x, 3).subs(x, 0) - u1.diff(x, 3).subs(x, 0))
# That it in fact solves the biharmonic equation
checks.append(integrate((u0.diff(x, 4) - g)**2, (x, -1, 0)))
checks.append(integrate((u1.diff(x, 4) - h)**2, (x, 0, 1)))
assert all(map(lambda v: abs(v) < 1E-13, checks))
return u0, u1
# -----------------------------------------------------------------------------
if __name__ == '__main__':
from sympy import S
from sympy.plotting import plot
x = Symbol('x')
g, h = S(1), x
problem = 'biharmonic'
if problem == 'poisson':
u0, u1 = solve_poisson(g, h)
p0 = plot(u0, (x, 0, pi/2), show=False)
p1 = plot(u1, (x, pi/2, pi), show=False)
p2 = plot(g, (x, 0, pi/2), show=False)
p3 = plot(h, (x, pi/2, pi), show=False)
p4 = plot(u0.diff(x, 1), (x, 0, pi/2), show=False)
p5 = plot(u1.diff(x, 1), (x, pi/2, pi), show=False)
p0[0].line_color='red'
p1[0].line_color='red'
# p2[0].line_color='blue'
# p3[0].line_color='blue'
# p4[0].line_color='green'
# p5[0].line_color='green'
p0.append(p1[0])
# p0.append(p2[0])
# p0.append(p3[0])
# p0.append(p4[0])
# p0.append(p5[0])
if problem == 'biharmonic':
u0, u1 = solve_biharmonic(g, h)
u0.subs(x, 2/pi*x-1), u1.subs(x, 2/pi*x-1)
# Sol
k = 3
p0 = plot(u0.diff(x, k), (x, 0, pi/2), show=False)
p1 = plot(u1.diff(x, k), (x, pi/2, pi), show=False)
p0[0].line_color='red'
p1[0].line_color='red'
p0.append(p1[0])
p0.show()
| {
"repo_name": "MiroK/lega",
"path": "sandbox/bendpy/discont/dg_sine.py",
"copies": "1",
"size": "5398",
"license": "mit",
"hash": -5752548179703789000,
"line_mean": 30.2023121387,
"line_max": 79,
"alpha_frac": 0.4772137829,
"autogenerated": false,
"ratio": 2.537846732487071,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3515060515387071,
"avg_score": null,
"num_lines": null
} |
" analytical test problem to validate 2D and 3D solvers "
import math
from collections import OrderedDict
from dolfin import *
from nanopores import *
from nanopores.physics.simplepnps import *
from nanopores.geometries.curved import Cylinder
# --- define parameters ---
add_params(
bV = -0.1, # [V]
rho = -0.05, # [C/m**2]
h2D = .5,
h3D = .5,
nmax2D = 1e5,
nmax3D = 1e4,
damp = 1.,
bulkcon = 300.,
iterative = True,
)
print PARAMS
set_log_level(100)
# --- create 2D geometry ---
Rz = 2. # [nm] length in z direction of channel part
R = 1. # [nm] pore radius
hcross = .5 # [nm] height of crossection
domain2D = Box([0., -Rz], [R, Rz])
cross = Box([0., 0.], [R, hcross])
domain2D.addsubdomains(
main = domain2D - cross,
cross = cross,
)
domain2D.addboundaries(
lowerb = domain2D.boundary("bottom"),
upperb = domain2D.boundary("top"),
wall = domain2D.boundary("right"),
cross = cross.boundary("bottom"),
#center = domain2D.boundary("left")
)
domain2D.params["lscale"] = 1e9
domain2D.synonymes = dict(
fluid = {"main", "cross"},
pore = "fluid",
#chargedmembraneb = "wall",
noslip = "wall",
nopressure = "center",
bulk = {"upperb", "lowerb"},
#nocbc = {"lowerb"},
)
geo2D = domain2D.create_geometry(lc=h2D)
print "Number of cells (2D):", geo2D.mesh.num_cells()
#domain2D.plot()
# --- create 3D geometry by rotating ---
domain3D = rotate_z(domain2D)
geo3D = domain3D.create_geometry(lc=h3D)
print "Number of cells (3D):", geo3D.mesh.num_cells()
#domain3D.plot()
# define cylinder
cyl = Cylinder(R=R, L=2.*Rz)
# this causes geo to automatically snap boundaries when adapting
geo3D.curved = dict(wall = cyl.snap)
# --- create geometry for 1D crossection ---
# TODO: it would be cool if the 1D domain could be extracted from the 2D one
# (should be pretty easy)
domain1D = Interval(0., R)
domain1D.addsubdomain(domain1D, "fluid")
domain1D.addboundaries(
wall = domain1D.boundary("right"),
center = domain1D.boundary("left")
)
domain1D.params["lscale"] = 1e9
domain1D.synonymes = dict(
pore = "fluid",
chargedmembraneb = "wall",
)
geo1D = domain1D.create_geometry(lc=.001)
# --- define physical parameters for 1D problem ---
phys_params = dict(
Membraneqs = rho,
bulkcon = bulkcon,
v0 = {}
)
phys = Physics("pore", geo1D, **phys_params)
# --- solve 1D problem for "exact" solution ---
pb = solve_pde(SimplePBProblem, geo1D, phys, cyl=True, iterative=False, tolnewton=1e-10)
# define expressions for interpolation into 2D/3D
phi = pb.solution
UT = phys.UT
c0 = phys.bulkcon
D = phys.DPore
lscale = phys.lscale
E0 = -lscale*bV/(2.*Rz)
eps = phys.permittivity["water"]
eta = phys.eta
print "Diffusion constant in pore:", D*1e9, "[nm**2/ns]"
print "Constant electric field:", E0*1e-9, "[V/nm]"
print "Debye length:", phys.debye*1e9, "[nm]"
def cpPB(x):
return c0*math.exp(-phi(x)/UT)
def cmPB(x):
return c0*math.exp(phi(x)/UT)
def pPB(x):
return -2.*c0*cFarad*UT*(math.cosh(phi(x)/UT) - math.cosh(phi(0.)/UT))
def uPB(x):
return eps*E0/eta*(phi(x) - phi(R))
def r(x): # radius for 2D AND 3D:
return sqrt(sum(t**2 for t in x[:-1]))
class vPB(Expression):
def eval(self, value, x):
value[0] = bV*x[-1]/(2.*Rz) + phi(r(x))
class JpPB(Expression):
def eval(self, value, x):
value[0] = (+D/UT*E0 + uPB(r(x)))*cpPB(r(x))
class JmPB(Expression):
def eval(self, value, x):
value[0] = (-D/UT*E0 + uPB(r(x)))*cmPB(r(x))
class cpPBEx(Expression):
def eval(self, value, x):
value[0] = cpPB(r(x))
class cmPBEx(Expression):
def eval(self, value, x):
value[0] = cmPB(r(x))
class pPBEx(Expression):
def eval(self, value, x):
value[0] = pPB(r(x))
# compute "exact" current
r2pi = Expression("2*pi*x[0]")
u_PB = Constant(eps/eta)*(phi - Constant(phi(R)))
J_el = Constant(D/UT)*(exp(-phi/UT) + exp(phi/UT))
J_u = u_PB*(exp(-phi/UT) - exp(phi/UT))
J_PB_el = assemble(Constant(cFarad*c0*E0/lscale**2)*J_el*r2pi*dx)
J_PB_u = assemble(Constant(cFarad*c0*E0/lscale**2)*J_u*r2pi*dx)
J_PB = J_PB_el + J_PB_u
print "J (PB): %s [A]" % J_PB
print " J_el: %s [A]" % J_PB_el
print " J_u : %s [A]" % J_PB_u
# --- define physical parameters and customized BCs of 2D problem ---
# constant Dirichlet BCs for v, cp, cm on wall,
# non-zero flux BCs on top/bottom
# non-standard pressure BC
# FIXME something is truly fishy with the Expressions
lscale = Constant(phys.lscale)
phys_params.update(
cp0 = dict(
wall = cpPBEx(), #c0*exp(-phi(R)/UT),
#bulk = cpPBEx(),
),
cm0 = dict(
wall = cmPBEx(), #c0*exp(+phi(R)/UT),
#bulk = cmPBEx(),
),
v0 = dict(wall = vPB()),
cpflux = dict(upperb = +JpPB(),
lowerb = -JpPB(),),
cmflux = dict(upperb = +JmPB(),
lowerb = -JmPB(),),
pressure = dict(bulk = pPBEx()),
surfcharge = dict(
wall = rho,
upperb = lscale*eps*bV/(2.*Rz),
lowerb = -lscale*eps*bV/(2.*Rz),)
)
phys2D = Physics("pore", geo2D, **phys_params)
phys3D = Physics("pore", geo3D, **phys_params)
# --- define goal functional: current through crosssection ---
grad = phys2D.grad
def J_PNP(U, geo):
v, cp, cm = U
dim = geo.physics.dim
coeff = Constant(1.) if dim==3 else r2pi
Jp = Constant(D)*(-grad(cp) - Constant(1/UT)*cp*grad(v))
Jm = Constant(D)*(-grad(cm) + Constant(1/UT)*cm*grad(v))
Jsurf = avg(Constant(cFarad/lscale**2)*(Jp - Jm)[dim-1] * coeff) * geo.dS("cross")
Jvol = Constant(cFarad/lscale**2/hcross)*(Jp - Jm)[dim-1] * coeff * geo.dx("cross")
return dict(Jsurf=Jsurf, Jvol=Jvol)
def J(U, geo):
v, cp, cm, u, p = U
#u, p, v, cp, cm = U
dim = geo.physics.dim
coeff = Constant(1.) if dim==3 else r2pi
Jp = Constant(D)*(-grad(cp) - Constant(1/UT)*cp*grad(v)) + cp*u
Jm = Constant(D)*(-grad(cm) + Constant(1/UT)*cm*grad(v)) + cm*u
Jsurf = avg(Constant(cFarad/lscale**2)*(Jp - Jm)[dim-1] * coeff) * geo.dS("cross")
Jvol = Constant(cFarad/lscale**2/hcross)*(Jp - Jm)[dim-1] * coeff * geo.dx("cross")
return dict(Jsurf=Jsurf, Jvol=Jvol)
"""
def saveJ(self):
self.save_estimate("(Jsing_h - J)/J", abs((self.functionals["Jsurf"].value()-J_PB)/J_PB),
N=self.solution.function_space().dim())
self.save_estimate("(J_h - J)/J", abs((self.functionals["Jvol"].value()-J_PB)/J_PB),
N=self.solution.function_space().dim())
"""
"""
def saveJ(self):
#i = self.geo.mesh.num_vertices()
i = len(self.functionals["Jvol"].values)
self.save_estimate("(Jsing_h - J)/J", abs((self.functionals["Jsurf"].value()-J_PB)/J_PB), N=i)
self.save_estimate("(J_h - J)/J", abs((self.functionals["Jvol"].value()-J_PB)/J_PB), N=i)
"""
def saveJ(self):
i = self.geo.mesh.num_vertices()
#i = len(self.functionals["Jvol"].values)
self.save_estimate("(J*h - J)/J" + Dstr, abs((self.functionals["Jsurf"].evaluate()-J_PB)/J_PB), N=i)
self.save_estimate("(Jh - J)/J" + Dstr, abs((self.functionals["Jvol"].evaluate()-J_PB)/J_PB), N=i)
#print " rel. error Jv:", abs((self.functionals["Jvol"].value()-J_PB)/J_PB)
#print " rel. error Js:", abs((self.functionals["Jsurf"].value()-J_PB)/J_PB)
# --- set up PNP+Stokes problem ---
problems = OrderedDict([
("pnp", SimplePNPProblem),
("stokes", SimpleStokesProblem),])
def couple_pnp(ustokes):
return dict(ustokes = ustokes.sub(0))
def couple_stokes(upnp, phys):
v, cp, cm = upnp.split()
f = -phys.cFarad*(cp - cm)*grad(v)
return dict(f = f)
couplers = dict(
pnp = couple_pnp,
stokes = couple_stokes
)
# --- solve 2D problem ---
Dstr = " (2D)"
problem = CoupledProblem(problems, couplers, geo2D, phys2D, cyl=True, conservative=False, ku=1, beta=10.)
pnps2D = CoupledSolver(problem, goals=[J], damp=damp, inewton=1, ipicard=30, tolnewton=1e-2, verbose=False)
#pnps2D.uniform_refinement = True
pnps2D.marking_fraction = .25
pnps2D.maxcells = nmax2D
pnps2D.solve(refinement=True, inside_loop=saveJ)
(v0, cp0, cm0, u0, p0) = pnps2D.solutions()
pnps2D.visualize()
"""
# --- solve 3D problem ---
Dstr = " (3D)"
problem = CoupledProblem(problems, couplers, geo3D, phys3D, cyl=False, conservative=False, ku=1, beta=1.)
problem.problems["pnp"].method["iterative"] = iterative
problem.problems["stokes"].method["iterative"] = iterative
pnps3D = CoupledSolver(problem, goals=[J], damp=damp, inewton=1, ipicard=30, tolnewton=1e-2, verbose=False)
pnps3D.uniform_refinement = True
pnps3D.maxcells = nmax3D
pnps3D.solve(refinement=True, inside_loop=saveJ)
"""
# --- visualization ---
#pnps3D.visualize()
#(v, cp, cm, u, p) = pnps3D.solutions()
#plot1D({"phi PB":phi}, (0., R, 101), "x", dim=1, axlabels=("r [nm]", "potential [V]"))
#plot1D({"phi (2D)": v0}, (0., R, 101), "x", dim=2, axlabels=("r [nm]", "potential [V]"), newfig=False)
#plot1D({"phi (3D)": v}, (0., R, 101), "x", dim=3, axlabels=("r [nm]", "potential [V]"), newfig=False)
"""
plot1D({"c+ PB":cpPB, "c- PB":cmPB}, (0., R, 101), "x", dim=1, axlabels=("r [nm]", "concentration [mol/m**3]"))
plot1D({"c+ (2D)": cp0, "c- (2D)": cm0}, (0., R, 101), "x", dim=2, axlabels=("r [nm]", "concentration [mol/m**3]"), newfig=False)
plot1D({"c+ (3D)": cp, "c- (3D)": cm}, (0., R, 101), "x", dim=3, axlabels=("r [nm]", "concentration [mol/m**3]"), newfig=False, legend="upper left")
"""
plot1D({
"c+ (2D)": cp0,
"c- (2D)": cm0,
"c+ PB":lambda x: cpPB(0.),
"c- PB":lambda x: cmPB(0.)},
(-Rz, Rz, 101), "y", origin=(.0, 0.), dim=2, axlabels=("z [nm]", "concentration [mol/m**3]"))
#plot1D({"c+ (2D)": cp, "c- PNP (2D)": cm},
# (-Rz, Rz, 101), "z", origin=(.0, 0., 0.), dim=3, axlabels=("z [nm]", "concentration [mol/m**3]"), newfig=False)
"""
plot1D({"uz PB":uPB}, (0., R, 101), "x", dim=1, axlabels=("r [nm]", "velocity [m/s]"))
plot1D({"uz (2D)":u0[1]}, (0., R, 101), "x", dim=2, axlabels=("r [nm]", "velocity [m/s]"), newfig=False)
plot1D({"uz (3D)":u[2]}, (0., R, 101), "x", dim=3, axlabels=("r [nm]", "velocity [m/s]"), newfig=False)
plot1D({"ur PB":lambda x:0.}, (0., R, 101), "x", dim=1, axlabels=("r [nm]", "velocity [m/s]"))
plot1D({"ur (2D)":u0[0]}, (0., R, 101), "x", dim=2, axlabels=("r [nm]", "velocity [m/s]"), newfig=False)
plot1D({"ur (3D)":u[0]}, (0., R, 101), "x", dim=3, axlabels=("r [nm]", "velocity [m/s]"), newfig=False)
plot1D({"p PB":pPB}, (0., R, 101), "x", dim=1)
plot1D({"p (2D)":p0}, (0., R, 101), "x", dim=2, newfig=False)
plot1D({"p (3D)":p}, (0., R, 101), "x", dim=3, axlabels=("r [nm]", "pressure [Pa s]"), newfig=False)
"""
#pnps2D.estimators["(Jh - J)/J (2D)"].newtonplot()
#pnps3D.estimators["(Jh - J)/J (3D)"].newtonplot(fig=False)
pnps2D.estimators["(Jh - J)/J (2D)"].plot(rate=-1.)
#pnps3D.estimators["(Jh - J)/J (3D)"].plot(rate=-2./3)
pnps2D.estimators["(J*h - J)/J (2D)"].plot(rate=-1.)
#pnps3D.estimators["(J*h - J)/J (3D)"].plot(rate=-2./3)
#pnps.estimators["(Jsing_h - J)/J"].newtonplot()
#pnp.estimators["(Jsing_h - J)/J"].newtonplot()
#pnp.estimators["(J_h - J)/J"].newtonplot(fig=False)
#saveplots("anaPNPSrefine", meta=PARAMS)
showplots()
| {
"repo_name": "mitschabaude/nanopores",
"path": "scripts/numerics/anaPNPS_refinement_2D.py",
"copies": "1",
"size": "11035",
"license": "mit",
"hash": -3809633973085056000,
"line_mean": 33.592476489,
"line_max": 148,
"alpha_frac": 0.6068871772,
"autogenerated": false,
"ratio": 2.4495005549389566,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.35563877321389564,
"avg_score": null,
"num_lines": null
} |
" analytical test problem to validate 2D and 3D solvers "
import math
from collections import OrderedDict
from dolfin import *
from nanopores import *
from nanopores.physics.simplepnps import *
# --- define parameters ---
add_params(
bV = -0.1, # [V]
rho = -0.05, # [C/m**2]
h2D = .05,
Nmax = 1e5,
damp = 1.,
bulkcon = 300.,
)
# --- create 2D geometry ---
Rz = 2. # [nm] length in z direction of channel part
R = 1. # [nm] pore radius
hcross = .2
domain2D = Box([0., -Rz], [R, Rz])
cross = Box([0., 0.], [R, hcross])
domain2D.addsubdomains(
main = domain2D - cross,
cross = cross,
)
domain2D.addboundaries(
lowerb = domain2D.boundary("bottom"),
upperb = domain2D.boundary("top"),
wall = domain2D.boundary("right"),
cross = cross.boundary("bottom"),
center = domain2D.boundary("left")
)
domain2D.params["lscale"] = 1e9
domain2D.synonymes = dict(
fluid = {"main", "cross"},
pore = "fluid",
chargedmembraneb = "wall",
noslip = "wall",
nopressure = "center",
bulk = {"upperb", "lowerb"},
#nocbc = {"lowerb"},
)
geo2D = domain2D.create_geometry(lc=h2D)
print "Number of cells:", geo2D.mesh.num_cells()
#mesh = geo2D.mesh
#boundary = MeshFunction("size_t", mesh, 1)
#boundary.set_all(0)
#DomainBoundary().mark(boundary, 2)
#plot(boundary)
#domain2D.plot()
# --- create geometry for 1D crossection ---
# TODO: it would be cool if the 1D domain could be extracted from the 2D one
# (should be pretty easy)
domain1D = Interval(0., R)
domain1D.addsubdomain(domain1D, "fluid")
domain1D.addboundaries(
wall = domain1D.boundary("right"),
center = domain1D.boundary("left")
)
domain1D.params["lscale"] = 1e9
domain1D.synonymes = dict(
pore = "fluid",
chargedmembraneb = "wall",
)
geo1D = domain1D.create_geometry(lc=.001)
# --- define physical parameters for 1D problem ---
phys_params = dict(
Membraneqs = rho,
bulkcon = bulkcon,
v0 = {}
)
phys = Physics("pore", geo1D, **phys_params)
# --- solve 1D problem for "exact" solution ---
pb = solve_pde(SimplePBProblem, geo1D, phys, cyl=True, iterative=False, tolnewton=1e-10)
# define expressions for interpolation into 2D
phi = pb.solution
UT = phys.UT
c0 = phys.bulkcon
D = phys.DPore
lscale = phys.lscale
E0 = -lscale*bV/(2.*Rz)
eps = phys.permittivity["water"]
eta = phys.eta
print "Diffusion constant in pore:", D*1e9, "[nm**2/ns]"
print "Constant electric field:", E0, "[V/m]"
def cpPB(x):
return c0*exp(-phi(x)/UT)
def cmPB(x):
return c0*exp(phi(x)/UT)
def pPB(x):
return -2.*c0*cFarad*UT*(math.cosh(phi(x)/UT) - math.cosh(phi(0.)/UT))
def uPB(x):
return eps*E0/eta*(phi(x) - phi(R))
class vPB(Expression):
def eval(self, value, x):
value[0] = bV*x[1]/(2.*Rz) + phi(x[0])
class JpPB(Expression):
def eval(self, value, x):
value[0] = (+D/UT*E0 + uPB(x[0]))*cpPB(x[0])
class JmPB(Expression):
def eval(self, value, x):
value[0] = (-D/UT*E0 + uPB(x[0]))*cmPB(x[0])
# compute "exact" current
r2pi = Expression("2*pi*x[0]")
u_PB = Constant(eps/eta)*(phi - Constant(phi(R)))
J_el = Constant(D/UT)*(exp(-phi/UT) + exp(phi/UT))
J_u = u_PB*(exp(-phi/UT) - exp(phi/UT))
J_PB_el = assemble(Constant(cFarad*c0*E0/lscale**2)*J_el*r2pi*dx)
J_PB_u = assemble(Constant(cFarad*c0*E0/lscale**2)*J_u*r2pi*dx)
J_PB = J_PB_el + J_PB_u
print "J (PB): %s [A]" % J_PB
print " J_el: %s [A]" % J_PB_el
print " J_u : %s [A]" % J_PB_u
# --- define physical parameters and customized BCs of 2D problem ---
# constant Dirichlet BCs for v, cp, cm on wall,
# non-zero flux BCs on top/bottom
n = FacetNormal(geo2D.mesh)
lscale = Constant(phys.lscale)
phys_params.update(
cp0 = dict(wall = c0*exp(-phi(R)/UT)),
cm0 = dict(wall = c0*exp(+phi(R)/UT)),
v0 = dict(wall = vPB()),
cpflux = dict(bulk = JpPB()*n[1]),
cmflux = dict(bulk = JmPB()*n[1]),
)
phys = Physics("pore", geo2D, **phys_params)
phys.surfcharge.update(
upperb = lscale*eps*bV/(2.*Rz),
lowerb = -lscale*eps*bV/(2.*Rz),
)
# --- solve 2D PNP+Stokes problem ---
# the goal functional: current through crosssection
grad = phys.grad
def J_PNP(U, geo):
v, cp, cm = U
Jp = Constant(D)*(-grad(cp) - Constant(1/UT)*cp*grad(v))
Jm = Constant(D)*(-grad(cm) + Constant(1/UT)*cm*grad(v))
Jsurf = avg(Constant(cFarad/lscale**2)*(Jp - Jm)[1] * r2pi) * geo.dS("cross")
Jvol = Constant(cFarad/lscale**2/hcross)*(Jp - Jm)[1] * r2pi * geo.dx("cross")
return dict(Jsurf=Jsurf, Jvol=Jvol)
def J(U, geo):
v, cp, cm, u, p = U
Jp = Constant(D)*(-grad(cp) - Constant(1/UT)*cp*grad(v)) + cp*u
Jm = Constant(D)*(-grad(cm) + Constant(1/UT)*cm*grad(v)) + cm*u
Jsurf = avg(Constant(cFarad/lscale**2)*(Jp - Jm)[1] * r2pi) * geo.dS("cross")
Jvol = Constant(cFarad/lscale**2/hcross)*(Jp - Jm)[1] * r2pi * geo.dx("cross")
return dict(Jsurf=Jsurf, Jvol=Jvol)
"""
def saveJ(self):
self.save_estimate("(Jsing_h - J)/J", abs((self.functionals["Jsurf"].value()-J_PB)/J_PB),
N=self.solution.function_space().dim())
self.save_estimate("(J_h - J)/J", abs((self.functionals["Jvol"].value()-J_PB)/J_PB),
N=self.solution.function_space().dim())
"""
"""
def saveJ(self):
#i = self.geo.mesh.num_vertices()
i = len(self.functionals["Jvol"].values)
self.save_estimate("(Jsing_h - J)/J", abs((self.functionals["Jsurf"].value()-J_PB)/J_PB), N=i)
self.save_estimate("(J_h - J)/J", abs((self.functionals["Jvol"].value()-J_PB)/J_PB), N=i)
"""
def saveJ(self):
#i = self.geo.mesh.num_vertices()
i = len(self.functionals["Jvol"].values)
self.save_estimate("(Jsing_h - J)/J", abs((self.functionals["Jsurf"].evaluate()-J_PB)/J_PB), N=i)
self.save_estimate("(J_h - J)/J", abs((self.functionals["Jvol"].evaluate()-J_PB)/J_PB), N=i)
print " rel. error Jv:", abs((self.functionals["Jvol"].value()-J_PB)/J_PB)
print " rel. error Js:", abs((self.functionals["Jsurf"].value()-J_PB)/J_PB)
# solve
#pnp = solve_pde(SimplePNPProblem, geo2D, phys, cyl=True, newtondamp=1., goals=[J_PNP], inside_loop=saveJ,
# refinement=False, marking_fraction=.5, maxcells=Nmax, iterative=False, verbose=False)
#v, cp, cm = pnp.solutions()
#stokes = solve_pde(SimpleStokesProblem, geo2D, phys, cyl=True, conservative=False, f=-cFarad*(cp-cm)*grad(v), ku=1, beta=10.)
problems = OrderedDict([
("pnp", SimplePNPProblem),
("stokes", SimpleStokesProblem)])
def couple_pnp(ustokes):
return dict(ustokes = ustokes.sub(0))
def couple_stokes(upnp, phys):
v, cp, cm = upnp.split()
f = -phys.cFarad*(cp - cm)*grad(v)
return dict(f = f)
couplers = dict(
pnp = couple_pnp,
stokes = couple_stokes
)
problem = CoupledProblem(problems, couplers, geo2D, phys, cyl=True, conservative=False, ku=1, beta=10.)
pnps = CoupledSolver(problem, goals=[J], damp=damp, inewton=1, ipicard=30, tolnewton=1e-2)
pnps.single_solve(inside_loop=saveJ)
# --- visualization ---
(v, cp, cm, u, p) = pnps.solutions()
#plot(-cFarad*(cp-cm)*grad(v)[1]/(lscale**2*eta), title="electroosmotic forcing [m/s]")
#pnps.visualize()
#plot1D({"phi PB":phi}, (0., R, 101), "x", dim=1, axlabels=("r [nm]", "potential [V]"))
#plot1D({"phi PNP (2D)": v}, (0., R, 101), "x", dim=2, axlabels=("r [nm]", "potential [V]"), newfig=False)
plot1D({"c+ PB":cpPB, "c- PB":cmPB}, (0., R, 101), "x", dim=1, axlabels=("r [nm]", "concentration [mol/m**3]"))
plot1D({"c+ PNP (2D)": cp, "c- PNP (2D)": cm}, (0., R, 101), "x", origin=(0.,-Rz), dim=2, axlabels=("r [nm]", "concentration [mol/m**3]"), newfig=False)
#plot1D({"c+ PNP (2D)": cp, "c- PNP (2D)": cm, "c+ PB":lambda x: cpPB(0.), "c- PB":lambda x: cmPB(0.)},
# (-Rz, Rz, 101), "y", origin=(.0*R, 0.), dim=2, axlabels=("z [nm]", "concentration [mol/m**3]"))
plot1D({"uz PB":uPB}, (0., R, 101), "x", dim=1, axlabels=("r [nm]", "velocity [m/s]"))
plot1D({"uz PNP (2D)":u[1]}, (0., R, 101), "x", dim=2, axlabels=("r [nm]", "velocity [m/s]"), newfig=False)
#plot1D({"ur PB":lambda x:0.}, (0., R, 101), "x", dim=1, axlabels=("r [nm]", "velocity [m/s]"))
#plot1D({"ur PNP (2D)":u[0]}, (0., R, 101), "x", dim=2, axlabels=("r [nm]", "velocity [m/s]"), newfig=False)
plot1D({"p PB":pPB}, (0., R, 101), "x", dim=1, axlabels=("r [nm]", "velocity [m/s]"))
plot1D({"p PNP (2D)":p}, (0., R, 101), "x", dim=2, axlabels=("r [nm]", "velocity [m/s]"), newfig=False)
pnps.estimators["(J_h - J)/J"].newtonplot()
pnps.estimators["(Jsing_h - J)/J"].newtonplot()
#pnp.estimators["(Jsing_h - J)/J"].newtonplot()
#pnp.estimators["(J_h - J)/J"].newtonplot(fig=False)
saveplots("anaPNPS_2D", meta=PARAMS)
showplots()
| {
"repo_name": "mitschabaude/nanopores",
"path": "scripts/numerics/analyticalPNPS_2D.py",
"copies": "1",
"size": "8529",
"license": "mit",
"hash": -2299403639243705900,
"line_mean": 34.6861924686,
"line_max": 152,
"alpha_frac": 0.6150779693,
"autogenerated": false,
"ratio": 2.4375535867390683,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8394336770477304,
"avg_score": 0.03165895711235292,
"num_lines": 239
} |
" analytical test problem to validate 2D and 3D solvers "
import math
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from collections import OrderedDict
from dolfin import *
from nanopores import *
from nanopores.physics.simplepnps import *
# --- define parameters ---
add_params(
bV = -0.1, # [V]
rho = -0.05, # [C/m**2]
h2D = .2,
h3D = .2,
Nmax = 1e5,
damp = 1.,
bulkcon = 300.,
iterative = True,
)
print PARAMS
# --- create 2D geometry ---
Rz = 2. # [nm] length in z direction of channel part
R = 1. # [nm] pore radius
hcross = .2 # [nm] height of crossection
domain2D = Box([0., -Rz], [R, Rz])
cross = Box([0., 0.], [R, hcross])
domain2D.addsubdomains(
main = domain2D - cross,
cross = cross,
)
domain2D.addboundaries(
lowerb = domain2D.boundary("bottom"),
upperb = domain2D.boundary("top"),
wall = domain2D.boundary("right"),
cross = cross.boundary("bottom"),
#center = domain2D.boundary("left")
)
domain2D.params["lscale"] = 1e9
domain2D.synonymes = dict(
fluid = {"main", "cross"},
pore = "fluid",
#chargedmembraneb = "wall",
noslip = "wall",
nopressure = "center",
bulk = {"upperb", "lowerb"},
#nocbc = {"lowerb"},
)
geo2D = domain2D.create_geometry(lc=h2D)
print "Number of cells (2D):", geo2D.mesh.num_cells()
#domain2D.plot()
# --- create 3D geometry by rotating ---
domain3D = rotate_z(domain2D)
geo3D = domain3D.create_geometry(lc=h3D)
print "Number of cells (3D):", geo3D.mesh.num_cells()
#domain3D.plot()
# --- create geometry for 1D crossection ---
# TODO: it would be cool if the 1D domain could be extracted from the 2D one
# (should be pretty easy)
domain1D = Interval(0., R)
domain1D.addsubdomain(domain1D, "fluid")
domain1D.addboundaries(
wall = domain1D.boundary("right"),
center = domain1D.boundary("left")
)
domain1D.params["lscale"] = 1e9
domain1D.synonymes = dict(
pore = "fluid",
chargedmembraneb = "wall",
)
geo1D = domain1D.create_geometry(lc=.001)
# --- define physical parameters for 1D problem ---
phys_params = dict(
Membraneqs = rho,
bulkcon = bulkcon,
v0 = {}
)
phys = Physics("pore", geo1D, **phys_params)
# --- solve 1D problem for "exact" solution ---
pb = solve_pde(SimplePBProblem, geo1D, phys, cyl=True, iterative=False, tolnewton=1e-10)
# define expressions for interpolation into 2D/3D
phi = pb.solution
UT = phys.UT
c0 = phys.bulkcon
D = phys.DPore
lscale = phys.lscale
E0 = -lscale*bV/(2.*Rz)
eps = phys.permittivity["water"]
eta = phys.eta
print "Diffusion constant in pore:", D*1e9, "[nm**2/ns]"
print "Constant electric field:", E0*1e-9, "[V/nm]"
print "Debye length:", phys.debye*1e9, "[nm]"
def cpPB(x):
return c0*exp(-phi(x)/UT)
def cmPB(x):
return c0*exp(phi(x)/UT)
def pPB(x):
return -2.*c0*cFarad*UT*(math.cosh(phi(x)/UT) - math.cosh(phi(0.)/UT))
def uPB(x):
return eps*E0/eta*(phi(x) - phi(R))
def r(x): # radius for 2D AND 3D:
return sqrt(sum(t**2 for t in x[:-1]))
class vPB(Expression):
def eval(self, value, x):
value[0] = bV*x[-1]/(2.*Rz) + phi(r(x))
class JpPB(Expression):
def eval(self, value, x):
value[0] = (+D/UT*E0 + uPB(r(x)))*cpPB(r(x))
class JmPB(Expression):
def eval(self, value, x):
value[0] = (-D/UT*E0 + uPB(r(x)))*cmPB(r(x))
class cpPBEx(Expression):
def eval(self, value, x):
value[0] = cpPB(r(x))
class cmPBEx(Expression):
def eval(self, value, x):
value[0] = cmPB(r(x))
class pPBEx(Expression):
def eval(self, value, x):
value[0] = pPB(r(x))
# compute "exact" current
r2pi = Expression("2*pi*x[0]")
u_PB = Constant(eps/eta)*(phi - Constant(phi(R)))
J_el = Constant(D/UT)*(exp(-phi/UT) + exp(phi/UT))
J_u = u_PB*(exp(-phi/UT) - exp(phi/UT))
J_PB_el = assemble(Constant(cFarad*c0*E0/lscale**2)*J_el*r2pi*dx)
J_PB_u = assemble(Constant(cFarad*c0*E0/lscale**2)*J_u*r2pi*dx)
J_PB = J_PB_el + J_PB_u
print "J (PB): %s [A]" % J_PB
print " J_el: %s [A]" % J_PB_el
print " J_u : %s [A]" % J_PB_u
# --- define physical parameters and customized BCs of 2D problem ---
# constant Dirichlet BCs for v, cp, cm on wall,
# non-zero flux BCs on top/bottom
# non-standard pressure BC
lscale = Constant(phys.lscale)
phys_params.update(
cp0 = dict(
wall = c0*exp(-phi(R)/UT),
bulk = cpPBEx()),
cm0 = dict(
wall = c0*exp(+phi(R)/UT),
bulk = cmPBEx()),
v0 = dict(wall = vPB()),
#cpflux = dict(bulk = JpPB()*n3D[2]),
#cmflux = dict(bulk = JmPB()*n3D[2]),
pressure = dict(bulk = pPBEx()),
surfcharge = dict(
wall = rho,
upperb = lscale*eps*bV/(2.*Rz),
lowerb = -lscale*eps*bV/(2.*Rz),)
)
phys2D = Physics("pore", geo2D, **phys_params)
phys3D = Physics("pore", geo3D, **phys_params)
# --- define goal functional: current through crosssection ---
grad = phys2D.grad
def J_PNP(U, geo):
v, cp, cm = U
dim = geo.physics.dim
coeff = Constant(1.) if dim==3 else r2pi
Jp = Constant(D)*(-grad(cp) - Constant(1/UT)*cp*grad(v))
Jm = Constant(D)*(-grad(cm) + Constant(1/UT)*cm*grad(v))
Jsurf = avg(Constant(cFarad/lscale**2)*(Jp - Jm)[dim-1] * coeff) * geo.dS("cross")
Jvol = Constant(cFarad/lscale**2/hcross)*(Jp - Jm)[dim-1] * coeff * geo.dx("cross")
return dict(Jsurf=Jsurf, Jvol=Jvol)
def J(U, geo):
v, cp, cm, u, p = U
dim = geo.physics.dim
coeff = Constant(1.) if dim==3 else r2pi
Jp = Constant(D)*(-grad(cp) - Constant(1/UT)*cp*grad(v)) + cp*u
Jm = Constant(D)*(-grad(cm) + Constant(1/UT)*cm*grad(v)) + cm*u
Jsurf = avg(Constant(cFarad/lscale**2)*(Jp - Jm)[dim-1] * coeff) * geo.dS("cross")
Jvol = Constant(cFarad/lscale**2/hcross)*(Jp - Jm)[dim-1] * coeff * geo.dx("cross")
return dict(Jsurf=Jsurf, Jvol=Jvol)
"""
def saveJ(self):
self.save_estimate("(Jsing_h - J)/J", abs((self.functionals["Jsurf"].value()-J_PB)/J_PB),
N=self.solution.function_space().dim())
self.save_estimate("(J_h - J)/J", abs((self.functionals["Jvol"].value()-J_PB)/J_PB),
N=self.solution.function_space().dim())
"""
"""
def saveJ(self):
#i = self.geo.mesh.num_vertices()
i = len(self.functionals["Jvol"].values)
self.save_estimate("(Jsing_h - J)/J", abs((self.functionals["Jsurf"].value()-J_PB)/J_PB), N=i)
self.save_estimate("(J_h - J)/J", abs((self.functionals["Jvol"].value()-J_PB)/J_PB), N=i)
"""
def saveJ(self):
#i = self.geo.mesh.num_vertices()
i = len(self.functionals["Jvol"].values) + 1
self.save_estimate("(Jsing_h - J)/J" + Dstr, abs((self.functionals["Jsurf"].evaluate()-J_PB)/J_PB), N=i)
self.save_estimate(r"$|J_h - J|/J$" + Dstr, abs((self.functionals["Jvol"].evaluate()-J_PB)/J_PB), N=i)
print " rel. error Jv:", abs((self.functionals["Jvol"].value()-J_PB)/J_PB)
print " rel. error Js:", abs((self.functionals["Jsurf"].value()-J_PB)/J_PB)
# --- set up PNP+Stokes problem ---
problems = OrderedDict([
("pnp", SimplePNPProblem),
("stokes", SimpleStokesProblem)])
def couple_pnp(ustokes):
return dict(ustokes = ustokes.sub(0))
def couple_stokes(upnp, phys):
v, cp, cm = upnp.split()
f = -phys.cFarad*(cp - cm)*grad(v)
return dict(f = f)
couplers = dict(
pnp = couple_pnp,
stokes = couple_stokes
)
# --- solve 2D problem ---
Dstr = " (2D)"
problem = CoupledProblem(problems, couplers, geo2D, phys2D, cyl=True, conservative=False, ku=1, beta=10.)
pnps2D = CoupledSolver(problem, goals=[J], damp=damp, inewton=1, ipicard=30, tolnewton=1e-2)
#pnps2D.single_solve(inside_loop=saveJ)
for i in pnps2D.fixedpoint():
saveJ(pnps2D)
# --- solve 3D problem ---
Dstr = " (3D)"
problem = CoupledProblem(problems, couplers, geo3D, phys3D, cyl=False, conservative=False, ku=1, beta=1.)
problem.problems["pnp"].method["iterative"] = iterative
problem.problems["stokes"].method["iterative"] = iterative
pnps3D = CoupledSolver(problem, goals=[J], damp=damp, inewton=1, ipicard=30, tolnewton=1e-2)
#pnps3D.single_solve(inside_loop=saveJ)
for i in pnps3D.fixedpoint():
saveJ(pnps3D)
# --- visualization ---
#pnps2D.visualize()
#pnps3D.visualize()
(v0, cp0, cm0, u0, p0) = pnps2D.solutions()
(v, cp, cm, u, p) = pnps3D.solutions()
#plot1D({"phi PB":phi}, (0., R, 101), "x", dim=1, axlabels=("r [nm]", "potential [V]"))
#plot1D({"phi (2D)": v0}, (0., R, 101), "x", dim=2, axlabels=("r [nm]", "potential [V]"), newfig=False)
#plot1D({"phi (3D)": v}, (0., R, 101), "x", dim=3, axlabels=("r [nm]", "potential [V]"), newfig=False)
figsize = 5*0.8, 4*0.8
plt.figure("concentrations", figsize=figsize)
params = dict(axis="x", axlabels=("r [nm]", "concentration [mol/m$^3$]"), newfig=False)
plot1D({r"$c^+$ PB":cpPB, r"$c^-$ PB":cmPB},
(0., R, 101), dim=1, style="b-", **params)
plot1D({r"$c^+$ 2D": cp0, r"$c^-$ 2D": cm0},
(0., R, 11), dim=2, style="gs", **params)
plot1D({r"$c^+$ 3D": cp, r"$c^-$ 3D": cm},
(0.05*R, 0.95*R, 10), dim=3, style="ro", **params)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
"""
plot1D({
"c+ (2D)": cp0,
"c- (2D)": cm0,
"c+ PB":lambda x: cpPB(0.),
"c- PB":lambda x: cmPB(0.)},
(-Rz, Rz, 101), "y", origin=(.0, 0.), dim=2, axlabels=("z [nm]", "concentration [mol/m**3]"))
plot1D({"c+ (2D)": cp, "c- PNP (2D)": cm},
(-Rz, Rz, 101), "z", origin=(.0, 0., 0.), dim=3, axlabels=("z [nm]", "concentration [mol/m**3]"), newfig=False)
"""
plt.figure("velocity", figsize=figsize)
params = dict(axis="x", axlabels=("r [nm]", "velocity [m/s]"), newfig=False)
plot1D({r"$u_z$ PB":uPB},
(0., R, 101), dim=1, style="b-", **params)
plot1D({r"$u_z$ 2D":u0[1]},
(0., R, 11), dim=2, style="gs", **params)
plot1D({r"$u_z$ 3D":u[2]},
(0.05*R, 0.95*R, 10), dim=3, style="ro", **params)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
#plot1D({"ur PB":lambda x:0.}, (0., R, 101), "x", dim=1, axlabels=("r [nm]", "velocity [m/s]"))
#plot1D({"ur (2D)":u0[0]}, (0., R, 101), "x", dim=2, axlabels=("r [nm]", "velocity [m/s]"), newfig=False)
#plot1D({"ur (3D)":u[0]}, (0., R, 101), "x", dim=3, axlabels=("r [nm]", "velocity [m/s]"), newfig=False)
#plot1D({"p PB":pPB}, (0., R, 101), "x", dim=1, axlabels=("r [nm]", "velocity [m/s]"))
#plot1D({"p (2D)":p0}, (0., R, 101), "x", dim=2, axlabels=("r [nm]", "velocity [m/s]"), newfig=False)
#plot1D({"p (3D)":p}, (0., R, 101), "x", dim=3, axlabels=("r [nm]", "velocity [m/s]"), newfig=False)
fig = plt.figure("hybrid", figsize=figsize)
pnps2D.estimators[r"$|J_h - J|/J$" + " (2D)"].newtonplot(fig=False)
pnps3D.estimators[r"$|J_h - J|/J$" + " (3D)"].newtonplot(fig=False)
fig.axes[0].xaxis.set_major_locator(ticker.MultipleLocator(1))
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
from folders import FIGDIR
savefigs("anaPNPS2", FIGDIR)
#pnps.estimators["(Jsing_h - J)/J"].newtonplot()
#pnp.estimators["(Jsing_h - J)/J"].newtonplot()
#pnp.estimators["(J_h - J)/J"].newtonplot(fig=False)
#saveplots("anaPNPS", meta=PARAMS)
#showplots()
| {
"repo_name": "mitschabaude/nanopores",
"path": "scripts/numerics/analyticalPNPS.py",
"copies": "1",
"size": "10917",
"license": "mit",
"hash": 7245422797871124000,
"line_mean": 33.4384858044,
"line_max": 115,
"alpha_frac": 0.6080425025,
"autogenerated": false,
"ratio": 2.441189624329159,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.35492321268291593,
"avg_score": null,
"num_lines": null
} |
" analytical test problem to validate 2D solver "
import math
from dolfin import *
from nanopores import *
from nanopores.physics.simplepnps import *
# --- define parameters ---
bV = -0.5 # [V]
rho = -0.025 # [C/m**2]
# --- create 2D geometry ---
Rz = 2. # [nm] length in z direction of channel part
R = 2. # [nm] pore radius
domain2D = Box([0., -Rz], [R, Rz])
cross = Box([0., 0.], [R, 0.])
domain2D.addsubdomains(fluid = domain2D)
domain2D.addboundaries(
lowerb = domain2D.boundary("bottom"),
upperb = domain2D.boundary("top"),
wall = domain2D.boundary("right"),
cross = cross,
)
domain2D.params["lscale"] = 1e9
domain2D.synonymes = dict(
pore = "fluid",
chargedmembraneb = "wall",
)
geo2D = domain2D.create_geometry(lc=.1)
domain2D.plot()
# --- create geometry for 1D crossection ---
# TODO: it would be cool if the 1D domain could be extracted from the 2D one
# (should be pretty easy)
domain1D = Interval(0., R)
domain1D.addsubdomain(domain1D, "fluid")
domain1D.addboundaries(
wall = domain1D.boundary("right"),
center = domain1D.boundary("left")
)
domain1D.params["lscale"] = 1e9
domain1D.synonymes = dict(
pore = "fluid",
chargedmembraneb = "wall",
)
geo1D = domain1D.create_geometry(lc=.01)
# --- define physical parameters for 1D problem ---
phys_params = dict(
Membraneqs = rho,
bulkcon = 300,
v0 = {}
)
phys = Physics("pore", geo1D, **phys_params)
# --- solve 1D problem for "exact" solution and interpolate onto 2D mesh ---
pb = solve_pde(SimplePBProblem, geo1D, phys, cyl=True, iterative=False, tolnewton=1e-10)
phi = pb.solution
UT = phys.UT
c0 = phys.bulkcon
D = phys.DPore
lscale = phys.lscale
E0 = -lscale*bV/(2.*Rz)
print "Diffusion constant in pore:",D
print "Constant electric field:",E0
def cpPB(x):
return c0*exp(-phi(x)/UT)
def cmPB(x):
return c0*exp(phi(x)/UT)
class Potential(Expression):
def eval(self, value, x):
value[0] = phi(x[0]) + bV*x[1]/(2.*Rz)
class Jp(Expression):
def eval(self, value, x):
value[0] = D/UT*E0*cpPB(x[0])
class Jm(Expression):
def eval(self, value, x):
value[0] = -D/UT*E0*cmPB(x[0])
"""
phi1 = Function(FunctionSpace(geo2D.mesh, 'CG', 2))
phi1.interpolate(Potential())
plot(phi1, interactive=True)
phi1.interpolate(Jp())
plot(phi1, interactive=True)
phi1.interpolate(Jm())
plot(phi1, interactive=True)
"""
# --- define physical parameters and non-standard BCs for 2D problem ---
#v0 = dict(wall = phi(R))
v0ex = Potential()
v0 = dict(
upperb = v0ex,
lowerb = v0ex,
wall = v0ex,
)
cp0 = dict(wall = c0*exp(-phi(R)/UT))
cm0 = dict(wall = c0*exp(+phi(R)/UT))
phys_params.update(
cp0 = cp0,
cm0 = cm0,
v0 = v0,
)
phys = Physics("pore", geo2D, **phys_params)
V = SimplePNPProblem.space(geo2D.mesh)
bcs = geo2D.pwBC(V.sub(0), "v0")
bcs = bcs + geo2D.pwconstBC(V.sub(1), "cp0")
bcs = bcs + geo2D.pwconstBC(V.sub(2), "cm0")
# --- create customized problem with non-zero flux BCs for nernst-planck ---
problem = SimplePNPProblem(geo2D, phys, cyl=True, newtondamp=0.8, bcs=bcs)
w, dp, dm = split(problem.a.arguments()[0])
r2pi = Expression("2*pi*x[0]")
lscale = Constant(phys.lscale)
Lp = -lscale*Jp()*dp*r2pi*geo2D.ds("upperb") + lscale*Jp()*dp*r2pi*geo2D.ds("lowerb")
Lm = -lscale*Jm()*dm*r2pi*geo2D.ds("upperb") + lscale*Jm()*dm*r2pi*geo2D.ds("lowerb")
problem.addRHS(- Lp - Lm)
# --- solve 2D problem
pnps = solve_problem(problem, geo2D)
pnps.visualize()
(v, cp, cm) = pnps.solutions()
#print geo2D
#domain2D.plot()
#print geo1D
fig = plot1D({"PB":phi}, (0., R, 101), "x", dim=1, axlabels=("r [nm]", "potential [V]"))
plot1D({"PNP (2D)": v}, (0., R, 101), "x", dim=2, axlabels=("r [nm]", "potential [V]"), fig=fig)
fig = plot1D({"c+ PB":cpPB, "c- PB":cmPB}, (0., R, 101), "x", dim=1, axlabels=("r [nm]", "concentration [mol/m**3]"))
plot1D({"c+ PNP (2D)": cp, "c- PNP (2D)": cm}, (0., R, 101), "x", origin=(0.,-Rz), dim=2, axlabels=("r [nm]", "concentration [mol/m**3]"), fig=fig)
showplots()
| {
"repo_name": "mitschabaude/nanopores",
"path": "scripts/analytical_test_case.py",
"copies": "1",
"size": "3984",
"license": "mit",
"hash": 6575314854384698000,
"line_mean": 26.6666666667,
"line_max": 147,
"alpha_frac": 0.641064257,
"autogenerated": false,
"ratio": 2.4745341614906833,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.36155984184906836,
"avg_score": null,
"num_lines": null
} |
'''Analytic decoding failure bound and inactivations estimate
'''
import math
import numpy as np
from pynumeric import nchoosek_log
from functools import lru_cache
from scipy.special import comb as nchoosek
from .. import Soliton
# @profile
@lru_cache(maxsize=2048)
def _vartheta_log(i=None, weight=None, degree=None, num_intermediate_symbols=None):
'''decoding_failure_upper helper function '''
assert i is not None
assert 0 < weight and weight % 1 == 0
assert 0 < degree and degree % 1 == 0
assert 0 < num_intermediate_symbols
assert num_intermediate_symbols % 1 == 0
result = nchoosek_log(weight, i)
result += nchoosek_log(num_intermediate_symbols-weight, degree-i)
result -= nchoosek_log(num_intermediate_symbols, degree)
return result
def _phi(i=None, field_size=None):
'''decoding_failure_upper helper function '''
if i % 2 == 0:
result = 1
else:
result = -1
result /= pow(field_size-1, i-1)
result += 1
result /= field_size
return result
# @profile
@lru_cache(maxsize=2048)
def _pi(max_degree=None, distribution=None, weight=None,
field_size=None, num_intermediate_symbols=None):
'''decoding_failure_upper helper function '''
result = 0
for degree in range(1, max_degree+1):
degree_probability = distribution.pdf(degree)
if not degree_probability:
continue
tmp = 0
for i in range(min(degree, weight)+1):
try:
v = _vartheta_log(
i=i,
weight=weight,
degree=degree,
num_intermediate_symbols=num_intermediate_symbols,
)
# raised when the binomial coefficients would be zero.
except ValueError:
continue
p = _phi(i=i, field_size=field_size)
# p may be zero. in this case we should not count this term.
if not p:
continue
# if p is nonzero we move to the log domain and add it with v
p = math.log(p)
tmp += math.exp(v+p)
result += degree_probability * tmp
print('weight', weight, result)
return result
@lru_cache(maxsize=2048)
def _weight_enumerator_log(weight=None, num_inputs=None, field_size=None):
'''compute the weight enumerator of a rateless code. used to estimate the
decoding failure probability.
args:
weight: hamming weight to compute the weight enumerator for.
'''
assert 0 < num_inputs
assert num_inputs % 1 == 0
assert 0 < field_size
assert field_size % 1 == 0
result = nchoosek_log(num_inputs, weight)
result += weight * math.log(field_size-1)
return result
def decoding_failure_upper(distribution=None, num_inputs=None,
overhead=None, field_size=None):
'''upper bound the decoding failure probability of an LT code under ML
decoding. implemented in Matlab by Lázaro Francisco and converted to Python
by Albin Severinson.
field_size: number of elements in the finite field that the code is defined
over.
Source: [Blasco2017, chapter 5, theorem 2]
'''
num_received = num_inputs * overhead
abs_reception_overhead = num_inputs * (overhead - 1)
# the bound is adapted from one for Raptor codes. for LT codes there is no
# precode and the number of intermediate symbols is thus equal to the
# number of input symbols.
num_intermediate_symbols = num_inputs
result = 0
for weight in range(1, num_intermediate_symbols+1):
_A = _weight_enumerator_log(
weight=weight,
num_inputs=num_inputs,
field_size=field_size,
)
_pi_value = _pi(
weight=weight,
max_degree=num_inputs,
distribution=distribution,
num_intermediate_symbols=num_intermediate_symbols,
field_size=field_size,
)
if not _pi_value:
continue
_pi_value = math.log(_pi_value)
_pi_value *= num_received
print('l=', weight, 'A=', _A, 'pi=', _pi_value, _A + _pi_value)
result += math.exp(_A + _pi_value - math.log(field_size-1))
# result /= field_size-1
return result
def decoding_failure_simple_lower(num_inputs=None, overhead=None, field_size=None):
'''lower bound the decoding failure probability of an LT code under ML
decoding. this bound is very loose for small failure probability.
field_size: number of elements in the finite field that the code is defined
over.
'''
assert 0 < num_inputs
assert num_inputs % 1 == 0
assert 1 < overhead
assert 0 < field_size
assert field_size % 1 == 0
abs_reception_overhead = num_inputs * (overhead - 1)
failure_probability = pow(field_size, -abs_reception_overhead)
return failure_probability
def decoding_failure_lower(num_inputs=None, distribution=None, overhead=None):
num_received = num_inputs * overhead
result = 0
for i in range(1, num_inputs+1):
a = 0
for d in range(1, num_inputs+1):
b_log = math.log(distribution.pdf(d))
try:
b_log += nchoosek_log(num_inputs-i, d)
except ValueError:
continue
b_log -= nchoosek_log(num_inputs, d)
b = math.exp(b_log)
a += b
if not a:
continue
a_log = math.log(a)
a_log *= num_received
outer_log = a_log + nchoosek_log(num_inputs, i)
outer = math.exp(outer_log)
if i % 2 == 0:
result -= outer
else:
result += outer
return result
def decoding_failure_prob_estimate_reference(soliton=None, num_inputs=None,
overhead=None, field_size=2):
'''estimate the decoding failure probability.
reference implementation of [Blasco2017, eq3.5]. this implementation does
not work for large num_inputs due to the nchoosek expressions. we use this
implementation only to verify the correctness of the improved
implementation below.
'''
# assert isinstance(soliton, Soliton)
assert num_inputs is not None
assert overhead is not None
# vector of possible output symbol degrees
degrees = np.arange(1, num_inputs+1)
# compute the total number of received symbols
received = num_inputs * overhead
# the inner term sums over the distribution pdf
inner = np.fromiter(
(soliton.pdf(d) for d in range(1, num_inputs+1)),
dtype=float,
)
# create an array num_inputs-1, ..., 0. we use this to speed for the inner
# loop computation.
rev = np.arange(num_inputs-1, -1, -1)
# the sign of the outer terms alternate
sign = 1
# we need to not consider values too close to zero
tiny = np.finfo(dtype=float).tiny
# iterate over the outer sum, adding the inner term to each one
failure_prob = 0
for i in range(1, num_inputs+1):
# recursively update the inner term. this computation exploits the
# structure of the inner terms.
inner[:num_inputs-i+1] *= rev[i-1:] / (num_inputs - i + 1)
outer = math.log(nchoosek(num_inputs, i, exact=True))
inner_term = np.power(inner.sum(), received)
# stop if the inner terms are too close to zero
if inner_term < tiny:
break
outer += np.log(inner_term)
# sum the inner term, raise it and add it to the term of the outer sum
failure_prob += sign * math.exp(outer)
sign *= -1
# verify that the estimate converged
if np.isnan(failure_prob) or not 0 < failure_prob < 1:
raise ValueError('estimate did not converge: %f' % failure_prob)
return failure_prob
def decoding_failure_prob_estimate(soliton=None, num_inputs=None,
overhead=None, field_size=2):
'''estimate the decoding failure probability.
robust implementation of [Blasco2017, eq3.5]. this method works well even
for large num_inputs. independent of field size.
'''
# assert isinstance(soliton, Soliton)
assert num_inputs is not None
assert overhead is not None
# vector of possible output symbol degrees
degrees = np.arange(1, num_inputs+1)
# compute the total number of received symbols
received = num_inputs * overhead
# we need to not consider values too close to zero
tiny = np.finfo(dtype=float).tiny
# compute the terms of the outer sum
outer = np.power(-np.ones(num_inputs), np.arange(2, num_inputs+2))
outer *= nchoosek(num_inputs, degrees)
# zero out the infinite values. this is fine for two reasons: 1) Each
# positive term is accompanied by a negative term. 2) The inner term will
# be very small for these values, meaning that the difference between the
# corresponding positive and negative terms will be negligible.
outer[np.isinf(outer)] = 0
# the inner term sums over the distribution pdf
inner = np.fromiter(
(soliton.pdf(d) for d in range(1, num_inputs+1)),
dtype=float,
)
# create an array num_inputs-1, ..., 0. we use this to speed for the inner
# loop computation.
rev = np.arange(num_inputs-1, -1, -1)
# iterate over the outer sum, adding the inner term to each one
failure_prob = 0
for i in range(1, num_inputs+1):
# recursively update the inner term. this computation exploits the
# structure of the inner terms.
inner[:num_inputs-i+1] *= rev[i-1:] / (num_inputs - i + 1)
# compute the inner term for this iteration
inner_term = np.power(inner.sum(), received)
# stop if the inner terms are too close to zero
if inner_term < tiny:
break
# sum the inner term, raise it and add it to the term of the outer sum
failure_prob += outer[i-1] * inner_term
# the estimate does not converge for mode=1. check for this and for nan.
if np.isnan(failure_prob) or not 0 < failure_prob < 1:
raise ValueError('estimate did not converge: %f' % failure_prob)
return failure_prob
def inactivations_estimate(soliton=None, num_inputs=None, overhead=None):
'''estimate the number of inactivations during decoding [Blasco2017]'''
assert isinstance(soliton, Soliton)
assert num_inputs is not None
assert overhead is not None
# track the expected fraction of symbols with degree d for all d.
state = np.zeros(num_inputs+1, dtype=float)
# the inital values (before peeling) is exactly the Soliton
# distribution pdf. we allocate an additional element with value 0
# to simplify the recursive state update computation.
state[:-1] += np.fromiter(
(soliton.pdf(d) for d in range(1, num_inputs+1)),
dtype=float
)
# allocate a temporary variable to use when updating state.
tmp = np.zeros(num_inputs+1, dtype=float)
# pre-allocate array of degrees to speed up the computation
degrees = np.arange(1, num_inputs+2)
# compute the total number of received symbols
received = num_inputs * overhead
# we track the number of inactivation throughout the decoding process
inactivations = 0
# simulate the peeling process as a finite state machine
for u in range(num_inputs, 0, -1):
inactivations += np.power(1 - state[0], received)
# update state according to [Blasco2017, pp73]
tmp[:-1] = (1 - degrees[:-1] / u) * state[:-1]
tmp[:-1] += degrees[1:] / u * state[1:]
# update state for d = 1 according to [Blasco2017, pp74]
tmp[0] -= (1 - 1/u) / received * (1 - np.power(1 - state[0], received))
# swap tmp and state to prepare for the next iteration
tmp, state = state, tmp
return inactivations
| {
"repo_name": "severinson/pyrateless",
"path": "pyrateless/optimize/analytic.py",
"copies": "1",
"size": "11900",
"license": "apache-2.0",
"hash": -8411216390108471000,
"line_mean": 32.4241573034,
"line_max": 83,
"alpha_frac": 0.6292965795,
"autogenerated": false,
"ratio": 3.8570502431118316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9982770419783058,
"avg_score": 0.0007152805657547815,
"num_lines": 356
} |
''' analytic ik module by mmkim'''
import numpy as np
import sys
if '..' not in sys.path:
sys.path.append('..')
import hmath.mm_math as mm
# if parent_joint_axis is None: assume 3dof parent joint
# if not: use parent_joint_axis as rotV
# parent_joint_axis is a local direction
def ik_analytic(posture, joint_name_or_index, new_position, parent_joint_axis=None):
if isinstance(joint_name_or_index, int):
joint = joint_name_or_index
else:
joint = posture.skeleton.getJointIndex(joint_name_or_index)
joint_parent = posture.skeleton.getParentJointIndex(joint)
joint_parent_parent = posture.skeleton.getParentJointIndex(joint_parent)
B = posture.getJointPositionGlobal(joint)
C = posture.getJointPositionGlobal(joint_parent)
A = posture.getJointPositionGlobal(joint_parent_parent)
L = B - A
N = B - C
M = C - A
l = mm.length(L)
n = mm.length(N)
m = mm.length(M)
a = mm.ACOS((l*l + n*n - m*m) / (2*l*n))
b = mm.ACOS((l*l + m*m - n*n) / (2*l*m))
B_new = new_position
L_new = B_new - A
l_ = mm.length(L_new)
a_ = mm.ACOS((l_*l_ + n*n - m*m) / (2*l_*n))
b_ = mm.ACOS((l_*l_ + m*m - n*n) / (2*l_*m))
# rotate joint in plane
rotV = mm.normalize2(np.cross(M, L))
if parent_joint_axis is not None:
rotV = np.dot(posture.getJointOrientationGlobal(joint_parent), mm.normalize(parent_joint_axis))
print(mm.length(rotV))
if mm.length(rotV) <= 1e-9:
z_axis = np.array([0, 0, 1], float)
rotV = np.dot(posture.getJointOrientationGlobal(joint_parent), z_axis)
print("mm_analytic_ik.py: ik_analytic: length of rotV is 0. check the orientations of results of ik")
rotb = b - b_
rota = a_ - a - rotb
posture.mulJointOrientationGlobal(joint_parent_parent, mm.exp(rotV, rotb))
posture.mulJointOrientationGlobal(joint_parent, mm.exp(rotV * rota))
# rotate plane
rotV2 = mm.normalize2(np.cross(L, L_new))
l_new = mm.length(L_new)
l_diff = mm.length(L_new - L)
rot2 = mm.ACOS((l_new * l_new + l * l - l_diff * l_diff) / (2 * l_new * l))
posture.mulJointOrientationGlobal(joint_parent_parent, mm.exp(rotV2, rot2))
return posture
if __name__ == '__main__':
from fltk import *
import copy
import Resource.ysMotionLoader as yf
import GUI.ysSimpleViewer as ysv
import Renderer.ysRenderer as yr
def test_ik_analytic():
bvhFilePath = '../samples/wd2_WalkSameSame00.bvh'
jointMotion = yf.readBvhFile(bvhFilePath, .01)
ik_target = [(0, .3, -.3)]
jointMotion2 = copy.deepcopy(jointMotion)
for i in range(len(jointMotion2)):
ik_analytic(jointMotion2[i], 'LeftFoot', ik_target[0])
viewer = ysv.SimpleViewer()
viewer.record(False)
viewer.doc.addRenderer('jointMotion', yr.JointMotionRenderer(jointMotion, (0,150,255)))
viewer.doc.addObject('jointMotion', jointMotion)
viewer.doc.addRenderer('jointMotion2', yr.JointMotionRenderer(jointMotion2, (0,255,0)))
viewer.doc.addObject('jointMotion2', jointMotion2)
viewer.doc.addRenderer('ik_target', yr.PointsRenderer(ik_target, (255,0,0)))
viewer.startTimer(1/30.)
viewer.show()
Fl.run()
pass
test_ik_analytic()
| {
"repo_name": "queid7/hma",
"path": "modules/ys_motion/mm_analytic_ik.py",
"copies": "1",
"size": "3343",
"license": "mit",
"hash": -7684022205829600000,
"line_mean": 32.099009901,
"line_max": 109,
"alpha_frac": 0.6269817529,
"autogenerated": false,
"ratio": 3.0008976660682225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4127879418968222,
"avg_score": null,
"num_lines": null
} |
""" analytic_reference script """
from common import info, info_split, info_cyan, info_error
from postprocess import get_step_and_info, rank, compute_norms
import dolfin as df
import os
from utilities.plot import plot_any_field
import importlib
def description(ts, **kwargs):
info("""Compare to analytic reference expression at given timestep.
This is done by importing the function "reference" in the problem module.""")
def method(ts, time=None, step=0, show=False,
save_fig=False, **kwargs):
""" Compare to analytic reference expression at given timestep.
This is done by importing the function "reference" in the problem module.
"""
info_cyan("Comparing to analytic reference at given time or step.")
step, time = get_step_and_info(ts, time, step)
parameters = ts.get_parameters(time=time)
problem = parameters.get("problem", "intrusion_bulk")
try:
module = importlib.import_module("problems.{}".format(problem))
reference = module.reference
except:
info_error("No analytic reference available.")
ref_exprs = reference(t=time, **parameters)
info("Comparing to analytic solution.")
info_split("Problem:", "{}".format(problem))
info_split("Time:", "{}".format(time))
f = ts.functions(ref_exprs.keys())
err = dict()
f_int = dict()
f_ref = dict()
for field in ref_exprs.keys():
el = f[field].function_space().ufl_element()
degree = el.degree()
if bool(el.value_size() != 1):
W = df.VectorFunctionSpace(ts.mesh, "CG", degree+3)
else:
W = df.FunctionSpace(ts.mesh, "CG", degree+3)
err[field] = df.Function(W)
f_int[field] = df.Function(W)
f_ref[field] = df.Function(W)
for field, ref_expr in ref_exprs.items():
ref_expr.t = time
# Update numerical solution f
ts.update(f[field], field, step)
# Interpolate f to higher space
f_int[field].assign(df.interpolate(
f[field], f_int[field].function_space()))
# Interpolate f_ref to higher space
f_ref[field].assign(df.interpolate(
ref_expr, f_ref[field].function_space()))
err[field].vector()[:] = (f_int[field].vector().get_local() -
f_ref[field].vector().get_local())
if show or save_fig:
# Interpolate the error to low order space for visualisation.
err_int = df.interpolate(err[field], f[field].function_space())
err_arr = ts.nodal_values(err_int)
label = "Error in " + field
if rank == 0:
save_fig_file = None
if save_fig:
save_fig_file = os.path.join(
ts.plots_folder, "error_{}_time{}_analytic.png".format(
field, time))
plot_any_field(ts.nodes, ts.elems, err_arr,
save=save_fig_file, show=show, label=label)
save_file = os.path.join(ts.analysis_folder,
"errornorms_time{}_analytic.dat".format(
time))
compute_norms(err, save=save_file)
| {
"repo_name": "gautelinga/BERNAISE",
"path": "analysis_scripts/analytic_reference.py",
"copies": "1",
"size": "3206",
"license": "mit",
"hash": -1425048543762784500,
"line_mean": 36.2790697674,
"line_max": 79,
"alpha_frac": 0.5845290081,
"autogenerated": false,
"ratio": 3.8395209580838325,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4924049966183832,
"avg_score": null,
"num_lines": null
} |
'''Analytic routines for debris disks.'''
import numpy as np
from . import photometry
from . import filter
from . import utils
class BB_Disk(object):
'''A blackbody disk class.
Takes multiple temperatures, the purpose being for use to show
disk properties in parameter spaces such as fractional luminosity
vs. temperature.
Parameters
----------
lstar : float
Stellar luminosity in Solar units.
tstar : float
Stellar effective temperature in Kelvin.
distance : float
Stellar distance in parsec.
wavelengths : 1-D array, optional
Vector of wavelengths.
temperatures : 1-D array, optional
Vector of temperatures.
.. todo:: distance not actually needed for calibration limited, fix.
.. todo:: don't use a for loop over temperatures,
fix utils.bnu_wav_micron instead.
'''
def __init__(self,wavelengths=None,temperatures=None,
lstar=None,tstar=None,distance=None):
'''Initialise, default T=100K, Omega=1.0'''
if wavelengths is None:
self.wavelengths = 10**np.linspace(-1,4,1000)
else:
self.wavelengths = wavelengths
if temperatures is None:
self.temperatures = 10**np.linspace(1,3,1000)
else:
self.temperatures = temperatures
self.lstar = lstar
self.tstar = tstar
self.distance = distance
def blackbody_radii(self):
'''Return the blackbody radii.'''
return (278.3/self.temperatures)**2 * self.lstar**0.5
def radiance(self):
'''Return radiance, in W / m^2 / sr.'''
return 5.67e-8 * self.temperatures**4 / np.pi
def f_limits(self,lim_waves,flux_limits=None,r_limits=None,
stellar_flux=None,fwhm=None,lstar_1pc=None):
'''Return fractional luminosity limits.
This routine implements Wyatt (2008) equations 8 and 11.
Parameters
----------
lim_waves : numpy.ndarray
Array of wavelengths at which limits apply.
flux_limits : numpy.ndarray, optional
Array of flux limits.
r_limits : numpy.ndarray, optional
Array of calibration limits (F_disk/F_star).
stellar_flux : numpy.ndarray, optional
Array of stellar fluxes at lim_waves.
fwhm : numpy.ndarray, optional
Array of spatial resolutions at lim_waves, affects flux
limited observations if disk is resolved.
lstar_1pc : float
L_star at 1pc, used for flux limits when distance unknown.
One of flux_limits or r_limits must be given. If both, they must
have the same length, and correspond to the wavelengths given.
Likewise for stellar_flux and fwhm.
'''
if flux_limits is not None and r_limits is not None:
if len(flux_limits) != len(r_limits):
raise RuntimeError(
'flux_limits must be same length as r_limits')
# sensitivity limit
if flux_limits is not None:
slims = np.zeros((len(self.temperatures),len(flux_limits)))
for i,temp in enumerate(self.temperatures):
if self.distance is not None:
slims[i,:] = 3.4e9 * flux_limits * self.distance**2 / \
self.blackbody_radii()[i]**2 / \
utils.bnu_wav_micron(lim_waves,temp)
else:
# distance independent calculation, 2487305. is
# pc^2/Lsun, haven't tracked down the 4 yet
ldisk_1pc = 4 * 5.6704e-8 * flux_limits * 2487305. * \
temp**4 / utils.bnu_wav_micron(lim_waves,temp)
slims[i,:] = ldisk_1pc / lstar_1pc
# apply correction for resolved disks
if self.distance is not None and fwhm is not None:
fwhm_fact = 2 * self.blackbody_radii()[i] / self.distance / fwhm
resolved = fwhm_fact > 1.0
slims[i,resolved] *= fwhm_fact[resolved]
# calibration limit, use actual stellar flux if given
if r_limits is not None:
if stellar_flux is not None:
if len(stellar_flux) != len(r_limits):
raise RuntimeError(
'Stellar flux ({}) must have same '
'length as r_limits ({})'.format(
len(stellar_flux),
len(r_limits)
)
)
fstar = stellar_flux
else:
fstar = 1.77 * utils.bnu_wav_micron(lim_waves,self.tstar) * \
self.lstar / self.tstar**4 / self.distance**2
clims = np.zeros((len(self.temperatures),len(r_limits)))
for i,temp in enumerate(self.temperatures):
clims[i,:] = 6e9/1.77 * r_limits * fstar / \
utils.bnu_wav_micron(lim_waves,temp) * \
(self.distance/self.blackbody_radii()[i])**2
if flux_limits is not None and r_limits is not None:
return np.minimum(slims,clims)
elif flux_limits is not None:
return slims
elif r_limits is not None:
return clims
else:
raise RuntimeError('Need to pass flux_limits or r_limits')
def f_limits_from_result(self,r,min_wavelength=8.0, sn=3,
x={}, x_det={},
skip_filters=[],keep_filters=None):
'''Derive fractional luminosity limits from an sdf result object.
Also derive fractional luminosities and signal to noise of excess
detections. Return low and high limits, expect to plot these
with pyplot.fill_between and something like:
ax.fill_between(temps, det_lo[:,i], det_hi[:,i],
where=(det_lo[:,i]<det_hi[:,i]), alpha=0.25)
Account for long wavelength grain inefficiency with X factor,
used per filter, e.g. {'WAV850':4}.
Rather than worry about flux vs. calibration limited, just do
the calculation assuming flux limited by calculating the flux
limit for each observed filter (whether it was an upper limit
or not).
Parameters
----------
r : sdf.result.Result
Result object with photometry.
min_wavelength : float, optional
Exclude filters with a mean wavelength shorter than this.
sn : float, optional
S/N at which detection significant, used only for detections.
x : dict
X factor to increase limits by: {filter,X}
x_det : dict
X factor to increase upper detection limit by: {filter,X}
skip_filters : list, optional
List of filters to skip.
keep_filters : list, optional
List of filters to keep, applied after skip_filters.
'''
waves = np.array([])
filters = np.array([])
f_lim = np.array([])
f_det = np.array([])
e_det = np.array([])
f_star = np.array([])
# get stellar luminosity at 1pc if no distance
lstar = None
if self.distance is None:
lstar = 0.0
if hasattr(r,'star'):
for s in r.star:
lstar += s['lstar_1pc']
if lstar == 0.0:
raise utils.SdfError('dont have lstar_1pc or distance')
for p in r.obs:
if not isinstance(p,photometry.Photometry):
continue
ok = np.invert(p.ignore)
# loop to grab correct stellar photometry
for i,filt in enumerate(p.filters[ok]):
new_wave = p.mean_wavelength()[ok][i]
if (filter.iscolour(filt) or
new_wave < min_wavelength or
filt in skip_filters):
continue
if keep_filters is not None:
if filt not in keep_filters:
continue
waves = np.append(waves,new_wave)
filters = np.append(filters,filt)
filt_i = np.where(filt == np.array(r.all_filters))[0]
f_star = np.append(f_star,r.all_star_phot[filt_i])
fac = 1
if filt in x.keys():
fac = x[filt]
if p.upperlim[ok][i]:
f_lim = np.append(f_lim,p.fnujy[ok][i]*fac)
f_det = np.append(f_det, 0)
e_det = np.append(e_det, 0)
else:
# 1sigma uncertainty, observed and star in quadrature
unc = np.sqrt(
p.e_fnujy[ok][i]**2 + \
0.25*(r.all_star_phot_1sig_lo[filt_i] + r.all_star_phot_1sig_hi[filt_i])**2
)
f_lim = np.append(f_lim,3*unc*fac)
f_det = np.append(f_det, p.fnujy[ok][i] - f_star[-1])
e_det = np.append(e_det, unc)
lims = self.f_limits(waves,flux_limits=f_lim,
stellar_flux=f_star,lstar_1pc=lstar)
dets = self.f_limits(waves,flux_limits=f_det,
stellar_flux=f_star,lstar_1pc=lstar)
ok = e_det > 0
sn_dets = np.zeros(lims.shape[1])
sn_dets[ok] = f_det[ok] / e_det[ok]
# now compute limit ranges for detections, first get ranges
det_lo = np.zeros(lims.shape)
det_hi = lims.copy()
both_hi = lims.copy()
for i in range(lims.shape[1]):
if sn_dets[i]>sn:
fac = 1
if filters[i] in x_det.keys():
fac = x_det[filters[i]]
det_lo[:,i] = dets[:,i]*(1-sn/sn_dets[i])
det_hi[:,i] = dets[:,i]*(fac+sn/sn_dets[i])
both_hi[:,i] = np.max([[det_hi[:,i]],[lims[:,i]]], axis=0)
# now adjust high limit based on other limits
for i in range(lims.shape[1]):
other = np.arange(lims.shape[1]) != i
det_hi[:,i] = np.min( np.hstack((both_hi[:,other],det_hi[:,i].reshape((-1,1)))), axis=1 )
return lims, det_lo, det_hi, sn_dets, filters
def f_limits_togrid(self, lims, f=None):
'''Return boolean grid in f - r/T space indicating detectability.
Sum multiple of these to get the grid that shows how many of the
systems it was possible to detect a disk for.
Parameters
----------
lims : array
Array of f limits (i.e. n_temperatures x n_lim).
f : array, optional
Array of f to use in grid.
'''
if f is None:
f = 10**np.linspace(-7,-1,100)
fs, _ = np.meshgrid(f, self.temperatures)
return fs > np.min(lim, axis=1), f
| {
"repo_name": "drgmk/sdf",
"path": "sdf/analytics.py",
"copies": "1",
"size": "11395",
"license": "mit",
"hash": -1743949656810416400,
"line_mean": 36.8571428571,
"line_max": 101,
"alpha_frac": 0.5106625713,
"autogenerated": false,
"ratio": 3.9456371191135733,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9888848019181347,
"avg_score": 0.013490334246445398,
"num_lines": 301
} |
"""Analytics helper class for the analytics integration."""
import asyncio
import uuid
import aiohttp
import async_timeout
from homeassistant.components import hassio
from homeassistant.components.api import ATTR_INSTALLATION_TYPE
from homeassistant.components.automation.const import DOMAIN as AUTOMATION_DOMAIN
from homeassistant.const import ATTR_DOMAIN, __version__ as HA_VERSION
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.storage import Store
from homeassistant.helpers.system_info import async_get_system_info
from homeassistant.loader import IntegrationNotFound, async_get_integration
from homeassistant.setup import async_get_loaded_integrations
from .const import (
ANALYTICS_ENDPOINT_URL,
ANALYTICS_ENDPOINT_URL_DEV,
ATTR_ADDON_COUNT,
ATTR_ADDONS,
ATTR_ARCH,
ATTR_AUTO_UPDATE,
ATTR_AUTOMATION_COUNT,
ATTR_BASE,
ATTR_BOARD,
ATTR_CUSTOM_INTEGRATIONS,
ATTR_DIAGNOSTICS,
ATTR_HEALTHY,
ATTR_INTEGRATION_COUNT,
ATTR_INTEGRATIONS,
ATTR_ONBOARDED,
ATTR_OPERATING_SYSTEM,
ATTR_PREFERENCES,
ATTR_PROTECTED,
ATTR_SLUG,
ATTR_STATE_COUNT,
ATTR_STATISTICS,
ATTR_SUPERVISOR,
ATTR_SUPPORTED,
ATTR_USAGE,
ATTR_USER_COUNT,
ATTR_UUID,
ATTR_VERSION,
LOGGER,
PREFERENCE_SCHEMA,
STORAGE_KEY,
STORAGE_VERSION,
)
class Analytics:
"""Analytics helper class for the analytics integration."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize the Analytics class."""
self.hass: HomeAssistant = hass
self.session = async_get_clientsession(hass)
self._data = {ATTR_PREFERENCES: {}, ATTR_ONBOARDED: False, ATTR_UUID: None}
self._store: Store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
@property
def preferences(self) -> dict:
"""Return the current active preferences."""
preferences = self._data[ATTR_PREFERENCES]
return {
ATTR_BASE: preferences.get(ATTR_BASE, False),
ATTR_DIAGNOSTICS: preferences.get(ATTR_DIAGNOSTICS, False),
ATTR_USAGE: preferences.get(ATTR_USAGE, False),
ATTR_STATISTICS: preferences.get(ATTR_STATISTICS, False),
}
@property
def onboarded(self) -> bool:
"""Return bool if the user has made a choice."""
return self._data[ATTR_ONBOARDED]
@property
def uuid(self) -> bool:
"""Return the uuid for the analytics integration."""
return self._data[ATTR_UUID]
@property
def endpoint(self) -> str:
"""Return the endpoint that will receive the payload."""
if HA_VERSION.endswith("0.dev0"):
# dev installations will contact the dev analytics environment
return ANALYTICS_ENDPOINT_URL_DEV
return ANALYTICS_ENDPOINT_URL
@property
def supervisor(self) -> bool:
"""Return bool if a supervisor is present."""
return hassio.is_hassio(self.hass)
async def load(self) -> None:
"""Load preferences."""
stored = await self._store.async_load()
if stored:
self._data = stored
if self.supervisor:
supervisor_info = hassio.get_supervisor_info(self.hass)
if not self.onboarded:
# User have not configured analytics, get this setting from the supervisor
if supervisor_info[ATTR_DIAGNOSTICS] and not self.preferences.get(
ATTR_DIAGNOSTICS, False
):
self._data[ATTR_PREFERENCES][ATTR_DIAGNOSTICS] = True
elif not supervisor_info[ATTR_DIAGNOSTICS] and self.preferences.get(
ATTR_DIAGNOSTICS, False
):
self._data[ATTR_PREFERENCES][ATTR_DIAGNOSTICS] = False
async def save_preferences(self, preferences: dict) -> None:
"""Save preferences."""
preferences = PREFERENCE_SCHEMA(preferences)
self._data[ATTR_PREFERENCES].update(preferences)
self._data[ATTR_ONBOARDED] = True
await self._store.async_save(self._data)
if self.supervisor:
await hassio.async_update_diagnostics(
self.hass, self.preferences.get(ATTR_DIAGNOSTICS, False)
)
async def send_analytics(self, _=None) -> None:
"""Send analytics."""
supervisor_info = None
operating_system_info = {}
if not self.onboarded or not self.preferences.get(ATTR_BASE, False):
LOGGER.debug("Nothing to submit")
return
if self._data.get(ATTR_UUID) is None:
self._data[ATTR_UUID] = uuid.uuid4().hex
await self._store.async_save(self._data)
if self.supervisor:
supervisor_info = hassio.get_supervisor_info(self.hass)
operating_system_info = hassio.get_os_info(self.hass)
system_info = await async_get_system_info(self.hass)
integrations = []
custom_integrations = []
addons = []
payload: dict = {
ATTR_UUID: self.uuid,
ATTR_VERSION: HA_VERSION,
ATTR_INSTALLATION_TYPE: system_info[ATTR_INSTALLATION_TYPE],
}
if supervisor_info is not None:
payload[ATTR_SUPERVISOR] = {
ATTR_HEALTHY: supervisor_info[ATTR_HEALTHY],
ATTR_SUPPORTED: supervisor_info[ATTR_SUPPORTED],
ATTR_ARCH: supervisor_info[ATTR_ARCH],
}
if operating_system_info.get(ATTR_BOARD) is not None:
payload[ATTR_OPERATING_SYSTEM] = {
ATTR_BOARD: operating_system_info[ATTR_BOARD],
ATTR_VERSION: operating_system_info[ATTR_VERSION],
}
if self.preferences.get(ATTR_USAGE, False) or self.preferences.get(
ATTR_STATISTICS, False
):
configured_integrations = await asyncio.gather(
*[
async_get_integration(self.hass, domain)
for domain in async_get_loaded_integrations(self.hass)
],
return_exceptions=True,
)
for integration in configured_integrations:
if isinstance(integration, IntegrationNotFound):
continue
if isinstance(integration, BaseException):
raise integration
if integration.disabled:
continue
if not integration.is_built_in:
custom_integrations.append(
{
ATTR_DOMAIN: integration.domain,
ATTR_VERSION: integration.version,
}
)
continue
integrations.append(integration.domain)
if supervisor_info is not None:
installed_addons = await asyncio.gather(
*[
hassio.async_get_addon_info(self.hass, addon[ATTR_SLUG])
for addon in supervisor_info[ATTR_ADDONS]
]
)
for addon in installed_addons:
addons.append(
{
ATTR_SLUG: addon[ATTR_SLUG],
ATTR_PROTECTED: addon[ATTR_PROTECTED],
ATTR_VERSION: addon[ATTR_VERSION],
ATTR_AUTO_UPDATE: addon[ATTR_AUTO_UPDATE],
}
)
if self.preferences.get(ATTR_USAGE, False):
payload[ATTR_INTEGRATIONS] = integrations
payload[ATTR_CUSTOM_INTEGRATIONS] = custom_integrations
if supervisor_info is not None:
payload[ATTR_ADDONS] = addons
if self.preferences.get(ATTR_STATISTICS, False):
payload[ATTR_STATE_COUNT] = len(self.hass.states.async_all())
payload[ATTR_AUTOMATION_COUNT] = len(
self.hass.states.async_all(AUTOMATION_DOMAIN)
)
payload[ATTR_INTEGRATION_COUNT] = len(integrations)
if supervisor_info is not None:
payload[ATTR_ADDON_COUNT] = len(addons)
payload[ATTR_USER_COUNT] = len(
[
user
for user in await self.hass.auth.async_get_users()
if not user.system_generated
]
)
try:
with async_timeout.timeout(30):
response = await self.session.post(self.endpoint, json=payload)
if response.status == 200:
LOGGER.info(
(
"Submitted analytics to Home Assistant servers. "
"Information submitted includes %s"
),
payload,
)
else:
LOGGER.warning(
"Sending analytics failed with statuscode %s from %s",
response.status,
self.endpoint,
)
except asyncio.TimeoutError:
LOGGER.error("Timeout sending analytics to %s", ANALYTICS_ENDPOINT_URL)
except aiohttp.ClientError as err:
LOGGER.error(
"Error sending analytics to %s: %r", ANALYTICS_ENDPOINT_URL, err
)
| {
"repo_name": "kennedyshead/home-assistant",
"path": "homeassistant/components/analytics/analytics.py",
"copies": "1",
"size": "9573",
"license": "apache-2.0",
"hash": 4094008070077530600,
"line_mean": 35.3992395437,
"line_max": 90,
"alpha_frac": 0.5688916745,
"autogenerated": false,
"ratio": 4.379231473010064,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004073225110442096,
"num_lines": 263
} |
"""Analytics helper class for the analytics integration."""
import asyncio
import aiohttp
import async_timeout
from homeassistant.components import hassio
from homeassistant.components.api import ATTR_INSTALLATION_TYPE
from homeassistant.components.automation.const import DOMAIN as AUTOMATION_DOMAIN
from homeassistant.const import __version__ as HA_VERSION
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.storage import Store
from homeassistant.helpers.system_info import async_get_system_info
from homeassistant.loader import IntegrationNotFound, async_get_integration
from homeassistant.setup import async_get_loaded_integrations
from .const import (
ANALYTICS_ENDPOINT_URL,
ATTR_ADDON_COUNT,
ATTR_ADDONS,
ATTR_AUTO_UPDATE,
ATTR_AUTOMATION_COUNT,
ATTR_BASE,
ATTR_DIAGNOSTICS,
ATTR_HEALTHY,
ATTR_HUUID,
ATTR_INTEGRATION_COUNT,
ATTR_INTEGRATIONS,
ATTR_ONBOARDED,
ATTR_PREFERENCES,
ATTR_PROTECTED,
ATTR_SLUG,
ATTR_STATE_COUNT,
ATTR_STATISTICS,
ATTR_SUPERVISOR,
ATTR_SUPPORTED,
ATTR_USAGE,
ATTR_USER_COUNT,
ATTR_VERSION,
LOGGER,
PREFERENCE_SCHEMA,
STORAGE_KEY,
STORAGE_VERSION,
)
class Analytics:
"""Analytics helper class for the analytics integration."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize the Analytics class."""
self.hass: HomeAssistant = hass
self.session = async_get_clientsession(hass)
self._data = {ATTR_PREFERENCES: {}, ATTR_ONBOARDED: False}
self._store: Store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
@property
def preferences(self) -> dict:
"""Return the current active preferences."""
preferences = self._data[ATTR_PREFERENCES]
return {
ATTR_BASE: preferences.get(ATTR_BASE, False),
ATTR_DIAGNOSTICS: preferences.get(ATTR_DIAGNOSTICS, False),
ATTR_USAGE: preferences.get(ATTR_USAGE, False),
ATTR_STATISTICS: preferences.get(ATTR_STATISTICS, False),
}
@property
def onboarded(self) -> bool:
"""Return bool if the user has made a choice."""
return self._data[ATTR_ONBOARDED]
@property
def supervisor(self) -> bool:
"""Return bool if a supervisor is present."""
return hassio.is_hassio(self.hass)
async def load(self) -> None:
"""Load preferences."""
stored = await self._store.async_load()
if stored:
self._data = stored
if self.supervisor:
supervisor_info = hassio.get_supervisor_info(self.hass)
if not self.onboarded:
# User have not configured analytics, get this setting from the supervisor
if supervisor_info[ATTR_DIAGNOSTICS] and not self.preferences.get(
ATTR_DIAGNOSTICS, False
):
self._data[ATTR_PREFERENCES][ATTR_DIAGNOSTICS] = True
elif not supervisor_info[ATTR_DIAGNOSTICS] and self.preferences.get(
ATTR_DIAGNOSTICS, False
):
self._data[ATTR_PREFERENCES][ATTR_DIAGNOSTICS] = False
async def save_preferences(self, preferences: dict) -> None:
"""Save preferences."""
preferences = PREFERENCE_SCHEMA(preferences)
self._data[ATTR_PREFERENCES].update(preferences)
self._data[ATTR_ONBOARDED] = True
await self._store.async_save(self._data)
if self.supervisor:
await hassio.async_update_diagnostics(
self.hass, self.preferences.get(ATTR_DIAGNOSTICS, False)
)
async def send_analytics(self, _=None) -> None:
"""Send analytics."""
supervisor_info = None
if not self.onboarded or not self.preferences.get(ATTR_BASE, False):
LOGGER.debug("Nothing to submit")
return
huuid = await self.hass.helpers.instance_id.async_get()
if self.supervisor:
supervisor_info = hassio.get_supervisor_info(self.hass)
system_info = await async_get_system_info(self.hass)
integrations = []
addons = []
payload: dict = {
ATTR_HUUID: huuid,
ATTR_VERSION: HA_VERSION,
ATTR_INSTALLATION_TYPE: system_info[ATTR_INSTALLATION_TYPE],
}
if supervisor_info is not None:
payload[ATTR_SUPERVISOR] = {
ATTR_HEALTHY: supervisor_info[ATTR_HEALTHY],
ATTR_SUPPORTED: supervisor_info[ATTR_SUPPORTED],
}
if self.preferences.get(ATTR_USAGE, False) or self.preferences.get(
ATTR_STATISTICS, False
):
configured_integrations = await asyncio.gather(
*[
async_get_integration(self.hass, domain)
for domain in async_get_loaded_integrations(self.hass)
],
return_exceptions=True,
)
for integration in configured_integrations:
if isinstance(integration, IntegrationNotFound):
continue
if isinstance(integration, BaseException):
raise integration
if integration.disabled or not integration.is_built_in:
continue
integrations.append(integration.domain)
if supervisor_info is not None:
installed_addons = await asyncio.gather(
*[
hassio.async_get_addon_info(self.hass, addon[ATTR_SLUG])
for addon in supervisor_info[ATTR_ADDONS]
]
)
for addon in installed_addons:
addons.append(
{
ATTR_SLUG: addon[ATTR_SLUG],
ATTR_PROTECTED: addon[ATTR_PROTECTED],
ATTR_VERSION: addon[ATTR_VERSION],
ATTR_AUTO_UPDATE: addon[ATTR_AUTO_UPDATE],
}
)
if self.preferences.get(ATTR_USAGE, False):
payload[ATTR_INTEGRATIONS] = integrations
if supervisor_info is not None:
payload[ATTR_ADDONS] = addons
if self.preferences.get(ATTR_STATISTICS, False):
payload[ATTR_STATE_COUNT] = len(self.hass.states.async_all())
payload[ATTR_AUTOMATION_COUNT] = len(
self.hass.states.async_all(AUTOMATION_DOMAIN)
)
payload[ATTR_INTEGRATION_COUNT] = len(integrations)
if supervisor_info is not None:
payload[ATTR_ADDON_COUNT] = len(addons)
payload[ATTR_USER_COUNT] = len(
[
user
for user in await self.hass.auth.async_get_users()
if not user.system_generated
]
)
try:
with async_timeout.timeout(30):
response = await self.session.post(ANALYTICS_ENDPOINT_URL, json=payload)
if response.status == 200:
LOGGER.info(
(
"Submitted analytics to Home Assistant servers. "
"Information submitted includes %s"
),
payload,
)
else:
LOGGER.warning(
"Sending analytics failed with statuscode %s", response.status
)
except asyncio.TimeoutError:
LOGGER.error("Timeout sending analytics to %s", ANALYTICS_ENDPOINT_URL)
except aiohttp.ClientError as err:
LOGGER.error(
"Error sending analytics to %s: %r", ANALYTICS_ENDPOINT_URL, err
)
| {
"repo_name": "sander76/home-assistant",
"path": "homeassistant/components/analytics/analytics.py",
"copies": "2",
"size": "7999",
"license": "apache-2.0",
"hash": 8759668593589128000,
"line_mean": 35.6926605505,
"line_max": 90,
"alpha_frac": 0.5758219777,
"autogenerated": false,
"ratio": 4.361504907306434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5937326885006433,
"avg_score": null,
"num_lines": null
} |
"""Analytics modeling to help understand the projects on Read the Docs."""
import datetime
from django.db import models
from django.db.models import Sum
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from readthedocs.builds.models import Version
from readthedocs.projects.models import Project
def _last_30_days_iter():
"""Returns iterator for previous 30 days (including today)."""
thirty_days_ago = timezone.now().date() - timezone.timedelta(days=30)
# this includes the current day, len() = 31
return (thirty_days_ago + timezone.timedelta(days=n) for n in range(31))
class PageView(models.Model):
"""PageView counts per day for a project, version, and path."""
project = models.ForeignKey(
Project,
related_name='page_views',
on_delete=models.CASCADE,
)
version = models.ForeignKey(
Version,
verbose_name=_('Version'),
related_name='page_views',
on_delete=models.CASCADE,
)
path = models.CharField(max_length=4096)
view_count = models.PositiveIntegerField(default=0)
date = models.DateField(default=datetime.date.today, db_index=True)
class Meta:
unique_together = ("project", "version", "path", "date")
def __str__(self):
return f'PageView: [{self.project.slug}:{self.version.slug}] - {self.path} for {self.date}'
@classmethod
def top_viewed_pages(cls, project, since=None, limit=10):
"""
Returns top pages according to view counts.
Structure of returned data is compatible to make graphs.
Sample returned data::
{
'pages': ['index', 'config-file/v1', 'intro/import-guide'],
'view_counts': [150, 120, 100]
}
This data shows that `index` is the most viewed page having 150 total views,
followed by `config-file/v1` and `intro/import-guide` having 120 and
100 total page views respectively.
"""
if since is None:
since = timezone.now().date() - timezone.timedelta(days=30)
queryset = (
cls.objects
.filter(project=project, date__gte=since)
.values_list('path')
.annotate(total_views=Sum('view_count'))
.values_list('path', 'total_views')
.order_by('-total_views')[:limit]
)
pages = []
view_counts = []
for data in queryset.iterator():
pages.append(data[0])
view_counts.append(data[1])
final_data = {
'pages': pages,
'view_counts': view_counts,
}
return final_data
@classmethod
def page_views_by_date(cls, project_slug, since=None):
"""
Returns the total page views count for last 30 days for a particular project.
Structure of returned data is compatible to make graphs.
Sample returned data::
{
'labels': ['01 Jul', '02 Jul', '03 Jul'],
'int_data': [150, 200, 143]
}
This data shows that there were 150 page views on 01 July,
200 page views on 02 July and 143 page views on 03 July.
"""
if since is None:
since = timezone.now().date() - timezone.timedelta(days=30)
queryset = cls.objects.filter(
project__slug=project_slug,
date__gte=since,
).values('date').annotate(total_views=Sum('view_count')).order_by('date')
count_dict = dict(
queryset.order_by('date').values_list('date', 'total_views')
)
# This fills in any dates where there is no data
# to make sure we have a full 30 days of dates
count_data = [count_dict.get(date) or 0 for date in _last_30_days_iter()]
# format the date value to a more readable form
# Eg. `16 Jul`
last_30_days_str = [
timezone.datetime.strftime(date, '%d %b')
for date in _last_30_days_iter()
]
final_data = {
'labels': last_30_days_str,
'int_data': count_data,
}
return final_data
| {
"repo_name": "rtfd/readthedocs.org",
"path": "readthedocs/analytics/models.py",
"copies": "1",
"size": "4168",
"license": "mit",
"hash": -4579146996766342700,
"line_mean": 31.0615384615,
"line_max": 99,
"alpha_frac": 0.5885316699,
"autogenerated": false,
"ratio": 3.9847036328871894,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5073235302787189,
"avg_score": null,
"num_lines": null
} |
# analytics/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.db import models
from django.db.models import Q
from django.utils.timezone import localtime, now
from datetime import timedelta
from election.models import Election
from exception.models import print_to_log
from follow.models import FollowOrganizationList
from organization.models import Organization
import wevote_functions.admin
from wevote_functions.functions import convert_date_as_integer_to_date, convert_date_to_date_as_integer, \
convert_to_int, positive_value_exists
from wevote_settings.models import WeVoteSetting, WeVoteSettingsManager
ACTION_VOTER_GUIDE_VISIT = 1
ACTION_VOTER_GUIDE_ENTRY = 2 # DEPRECATED: Now we use ACTION_VOTER_GUIDE_VISIT + first_visit
ACTION_ORGANIZATION_FOLLOW = 3
ACTION_ORGANIZATION_AUTO_FOLLOW = 4
ACTION_ISSUE_FOLLOW = 5
ACTION_BALLOT_VISIT = 6
ACTION_POSITION_TAKEN = 7
ACTION_VOTER_TWITTER_AUTH = 8
ACTION_VOTER_FACEBOOK_AUTH = 9
ACTION_WELCOME_ENTRY = 10
ACTION_FRIEND_ENTRY = 11
ACTION_WELCOME_VISIT = 12
ACTION_ORGANIZATION_FOLLOW_IGNORE = 13
ACTION_ORGANIZATION_STOP_FOLLOWING = 14
ACTION_ISSUE_FOLLOW_IGNORE = 15
ACTION_ISSUE_STOP_FOLLOWING = 16
ACTION_MODAL_ISSUES = 17
ACTION_MODAL_ORGANIZATIONS = 18
ACTION_MODAL_POSITIONS = 19
ACTION_MODAL_FRIENDS = 20
ACTION_MODAL_SHARE = 21
ACTION_MODAL_VOTE = 22
ACTION_NETWORK = 23
ACTION_FACEBOOK_INVITABLE_FRIENDS = 24
ACTION_DONATE_VISIT = 25
ACTION_ACCOUNT_PAGE = 26
ACTION_INVITE_BY_EMAIL = 27
ACTION_ABOUT_GETTING_STARTED = 28
ACTION_ABOUT_VISION = 29
ACTION_ABOUT_ORGANIZATION = 30
ACTION_ABOUT_TEAM = 31
ACTION_ABOUT_MOBILE = 32
ACTION_OFFICE = 33
ACTION_CANDIDATE = 34
ACTION_VOTER_GUIDE_GET_STARTED = 35
ACTION_FACEBOOK_AUTHENTICATION_EXISTS = 36
ACTION_GOOGLE_AUTHENTICATION_EXISTS = 37
ACTION_TWITTER_AUTHENTICATION_EXISTS = 38
ACTION_EMAIL_AUTHENTICATION_EXISTS = 39
ACTION_ELECTIONS = 40
ACTION_ORGANIZATION_STOP_IGNORING = 41
ACTION_MODAL_VOTER_PLAN = 42
ACTION_READY_VISIT = 43
ACTION_SELECT_BALLOT_MODAL = 44
ACTION_SHARE_BUTTON_COPY = 45
ACTION_SHARE_BUTTON_EMAIL = 46
ACTION_SHARE_BUTTON_FACEBOOK = 47
ACTION_SHARE_BUTTON_FRIENDS = 48
ACTION_SHARE_BUTTON_TWITTER = 49
ACTION_SHARE_BALLOT = 50
ACTION_SHARE_BALLOT_ALL_OPINIONS = 51
ACTION_SHARE_CANDIDATE = 52
ACTION_SHARE_CANDIDATE_ALL_OPINIONS = 53
ACTION_SHARE_MEASURE = 54
ACTION_SHARE_MEASURE_ALL_OPINIONS = 55
ACTION_SHARE_OFFICE = 56
ACTION_SHARE_OFFICE_ALL_OPINIONS = 57
ACTION_SHARE_READY = 58
ACTION_SHARE_READY_ALL_OPINIONS = 59
ACTION_VIEW_SHARED_BALLOT = 60
ACTION_VIEW_SHARED_BALLOT_ALL_OPINIONS = 61
ACTION_VIEW_SHARED_CANDIDATE = 62
ACTION_VIEW_SHARED_CANDIDATE_ALL_OPINIONS = 63
ACTION_VIEW_SHARED_MEASURE = 64
ACTION_VIEW_SHARED_MEASURE_ALL_OPINIONS = 65
ACTION_VIEW_SHARED_OFFICE = 66
ACTION_VIEW_SHARED_OFFICE_ALL_OPINIONS = 67
ACTION_VIEW_SHARED_READY = 68
ACTION_VIEW_SHARED_READY_ALL_OPINIONS = 69
ACTION_SEARCH_OPINIONS = 70
ACTION_UNSUBSCRIBE_EMAIL_PAGE = 71
ACTION_UNSUBSCRIBE_SMS_PAGE = 72
ACTION_MEASURE = 73
ACTION_NEWS = 74
ACTION_SHARE_ORGANIZATION = 75
ACTION_SHARE_ORGANIZATION_ALL_OPINIONS = 76
ACTION_VIEW_SHARED_ORGANIZATION = 77
ACTION_VIEW_SHARED_ORGANIZATION_ALL_OPINIONS = 77
ACTIONS_THAT_REQUIRE_ORGANIZATION_IDS = \
[ACTION_ORGANIZATION_AUTO_FOLLOW,
ACTION_ORGANIZATION_FOLLOW, ACTION_ORGANIZATION_FOLLOW_IGNORE, ACTION_ORGANIZATION_STOP_FOLLOWING,
ACTION_ORGANIZATION_STOP_IGNORING, ACTION_VOTER_GUIDE_VISIT]
logger = wevote_functions.admin.get_logger(__name__)
class AnalyticsAction(models.Model):
"""
This is an incoming action we want to track
"""
action_constant = models.PositiveSmallIntegerField(
verbose_name="constant representing action", null=True, unique=False, db_index=True)
exact_time = models.DateTimeField(verbose_name='date and time of action', null=False, auto_now_add=True)
# We store YYYYMMDD as an integer for very fast lookup (ex/ "20170901" for September, 1, 2017)
date_as_integer = models.PositiveIntegerField(
verbose_name="YYYYMMDD of the action", null=True, unique=False, db_index=True)
# We store both
voter_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, default=None, null=True, blank=True, unique=False,
db_index=True)
voter_id = models.PositiveIntegerField(verbose_name="voter internal id", null=True, unique=False)
# This voter is linked to a sign in account (Facebook, Twitter, Google, etc.)
is_signed_in = models.BooleanField(verbose_name='', default=False)
state_code = models.CharField(
verbose_name="state_code", max_length=255, null=True, blank=True, unique=False)
organization_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, null=True, blank=True, unique=False, db_index=True)
organization_id = models.PositiveIntegerField(null=True, blank=True)
ballot_item_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, null=True, blank=True, unique=False)
# The unique ID of this election. (Provided by Google Civic)
google_civic_election_id = models.PositiveIntegerField(
verbose_name="google civic election id", null=True, unique=False, db_index=True)
# This entry was the first entry on this day, used for tracking direct links to We Vote
first_visit_today = models.BooleanField(verbose_name='', default=False)
# We only want to store voter_device_id if we haven't verified the session yet. Set to null once verified.
voter_device_id = models.CharField(
verbose_name="voter_device_id of initiating voter", max_length=255, null=True, blank=True, unique=False)
# When analytics comes into Analytics Application server, we need to authenticate the request. We authenticate
# the voter_device_id against a read-only database server, which might run seconds behind the master. Because of
# this, if a voter_device_id is not found the first time, we want to try again minutes later. BUT if that
# fails we want to invalidate the analytics.
authentication_failed_twice = models.BooleanField(verbose_name='', default=False)
user_agent = models.CharField(verbose_name="https request user agent", max_length=255, null=True, blank=True,
unique=False)
is_bot = models.BooleanField(verbose_name="request came from web-bots or spider", default=False)
is_mobile = models.BooleanField(verbose_name="request came from mobile device", default=False)
is_desktop = models.BooleanField(verbose_name="request came from desktop device", default=False)
is_tablet = models.BooleanField(verbose_name="request came from tablet device", default=False)
# We override the save function to auto-generate date_as_integer
def save(self, *args, **kwargs):
if self.date_as_integer:
self.date_as_integer = convert_to_int(self.date_as_integer)
if self.date_as_integer == "" or self.date_as_integer is None: # If there isn't a value...
self.generate_date_as_integer()
super(AnalyticsAction, self).save(*args, **kwargs)
def display_action_constant_human_readable(self):
return display_action_constant_human_readable(self.action_constant)
def generate_date_as_integer(self):
# We want to store the day as an integer for extremely quick database indexing and lookup
datetime_now = localtime(now()).date() # We Vote uses Pacific Time for TIME_ZONE
day_as_string = "{:d}{:02d}{:02d}".format(
datetime_now.year,
datetime_now.month,
datetime_now.day,
)
self.date_as_integer = convert_to_int(day_as_string)
return
def election(self):
if not self.google_civic_election_id:
return
try:
election = Election.objects.using('readonly').get(google_civic_election_id=self.google_civic_election_id)
except Election.MultipleObjectsReturned as e:
logger.error("position.election Found multiple")
return
except Election.DoesNotExist:
return
return election
def organization(self):
if not self.organization_we_vote_id:
return
try:
organization = Organization.objects.using('readonly').get(we_vote_id=self.organization_we_vote_id)
except Organization.MultipleObjectsReturned as e:
logger.error("analytics.organization Found multiple")
return
except Organization.DoesNotExist:
logger.error("analytics.organization did not find")
return
return organization
class AnalyticsCountManager(models.Manager):
def fetch_ballot_views(self, google_civic_election_id=0, limit_to_one_date_as_integer=0):
"""
Count the number of voters that viewed at least one ballot
:param google_civic_election_id:
:param limit_to_one_date_as_integer:
:return:
"""
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(action_constant=ACTION_BALLOT_VISIT)
if positive_value_exists(google_civic_election_id):
count_query = count_query.filter(google_civic_election_id=google_civic_election_id)
if positive_value_exists(limit_to_one_date_as_integer):
count_query = count_query.filter(date_as_integer=limit_to_one_date_as_integer)
count_query = count_query.values('voter_we_vote_id').distinct()
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_organization_entrants_list(self, organization_we_vote_id, google_civic_election_id=0):
"""
:param organization_we_vote_id:
:param google_civic_election_id:
:return:
"""
voters_who_visited_organization_first_simple_list = []
try:
first_visit_query = AnalyticsAction.objects.using('analytics').all()
first_visit_query = first_visit_query.filter(Q(action_constant=ACTION_VOTER_GUIDE_VISIT) |
Q(action_constant=ACTION_ORGANIZATION_AUTO_FOLLOW))
first_visit_query = first_visit_query.filter(organization_we_vote_id__iexact=organization_we_vote_id)
if positive_value_exists(google_civic_election_id):
first_visit_query = first_visit_query.filter(google_civic_election_id=google_civic_election_id)
first_visit_query = first_visit_query.filter(first_visit_today=True)
first_visit_query = first_visit_query.values('voter_we_vote_id').distinct()
voters_who_visited_organization_first = list(first_visit_query)
for voter_dict in voters_who_visited_organization_first:
if positive_value_exists(voter_dict['voter_we_vote_id']):
voters_who_visited_organization_first_simple_list.append(voter_dict['voter_we_vote_id'])
except Exception as e:
pass
return voters_who_visited_organization_first_simple_list
def fetch_organization_entrants_took_position(
self, organization_we_vote_id, google_civic_election_id=0):
"""
Count the voters who entered on an organization's voter guide, and then took a position
:param organization_we_vote_id:
:param google_civic_election_id:
:return:
"""
voters_who_visited_organization_first_simple_list = \
self.fetch_organization_entrants_list(organization_we_vote_id, google_civic_election_id)
if not len(voters_who_visited_organization_first_simple_list):
return 0
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(action_constant=ACTION_POSITION_TAKEN)
if positive_value_exists(google_civic_election_id):
count_query = count_query.filter(google_civic_election_id=google_civic_election_id)
count_query = count_query.filter(voter_we_vote_id__in=voters_who_visited_organization_first_simple_list)
count_query = count_query.values('voter_we_vote_id').distinct()
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_organization_entrants_visited_ballot(
self, organization_we_vote_id, google_civic_election_id=0):
"""
Count the voters who entered on an organization's voter guide, and then who proceeded to ballot
:param organization_we_vote_id:
:param google_civic_election_id:
:return:
"""
voters_who_visited_organization_first_simple_list = \
self.fetch_organization_entrants_list(organization_we_vote_id, google_civic_election_id)
if not len(voters_who_visited_organization_first_simple_list):
return 0
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(action_constant=ACTION_BALLOT_VISIT)
if positive_value_exists(google_civic_election_id):
count_query = count_query.filter(google_civic_election_id=google_civic_election_id)
count_query = count_query.filter(voter_we_vote_id__in=voters_who_visited_organization_first_simple_list)
count_query = count_query.values('voter_we_vote_id').distinct()
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_organization_followers_took_position(self, organization_we_vote_id, google_civic_election_id=0):
follow_organization_list = FollowOrganizationList()
return_voter_we_vote_id = True
voter_we_vote_ids_of_organization_followers = \
follow_organization_list.fetch_followers_list_by_organization_we_vote_id(
organization_we_vote_id, return_voter_we_vote_id)
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(action_constant=ACTION_POSITION_TAKEN)
if positive_value_exists(google_civic_election_id):
count_query = count_query.filter(google_civic_election_id=google_civic_election_id)
count_query = count_query.filter(voter_we_vote_id__in=voter_we_vote_ids_of_organization_followers)
count_query = count_query.values('voter_we_vote_id').distinct()
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_organization_followers_visited_ballot(self, organization_we_vote_id, google_civic_election_id=0):
follow_organization_list = FollowOrganizationList()
return_voter_we_vote_id = True
voter_we_vote_ids_of_organization_followers = \
follow_organization_list.fetch_followers_list_by_organization_we_vote_id(
organization_we_vote_id, return_voter_we_vote_id)
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(action_constant=ACTION_BALLOT_VISIT)
if positive_value_exists(google_civic_election_id):
count_query = count_query.filter(google_civic_election_id=google_civic_election_id)
count_query = count_query.filter(voter_we_vote_id__in=voter_we_vote_ids_of_organization_followers)
count_query = count_query.values('voter_we_vote_id').distinct()
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_visitors(self, google_civic_election_id=0, organization_we_vote_id='',
limit_to_one_date_as_integer=0, count_through_this_date_as_integer=0,
limit_to_authenticated=False):
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
if positive_value_exists(google_civic_election_id):
count_query = count_query.filter(google_civic_election_id=google_civic_election_id)
if positive_value_exists(organization_we_vote_id):
count_query = count_query.filter(action_constant=ACTION_VOTER_GUIDE_VISIT)
count_query = count_query.filter(organization_we_vote_id__iexact=organization_we_vote_id)
if positive_value_exists(limit_to_one_date_as_integer):
count_query = count_query.filter(date_as_integer=limit_to_one_date_as_integer)
elif positive_value_exists(count_through_this_date_as_integer):
count_query = count_query.filter(date_as_integer__lte=count_through_this_date_as_integer)
if limit_to_authenticated:
count_query = count_query.filter(is_signed_in=True)
count_query = count_query.values('voter_we_vote_id').distinct()
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_visitors_first_visit_to_organization_in_election(self, organization_we_vote_id, google_civic_election_id):
"""
Entries are marked "first_visit_today" if it is the first visit in one day
:param organization_we_vote_id:
:param google_civic_election_id:
:return:
"""
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(Q(action_constant=ACTION_VOTER_GUIDE_VISIT) |
Q(action_constant=ACTION_ORGANIZATION_AUTO_FOLLOW))
count_query = count_query.filter(organization_we_vote_id__iexact=organization_we_vote_id)
count_query = count_query.filter(google_civic_election_id=google_civic_election_id)
count_query = count_query.filter(first_visit_today=True)
count_query = count_query.values('voter_we_vote_id').distinct()
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_new_followers_in_election(self, google_civic_election_id, organization_we_vote_id=""):
"""
:param organization_we_vote_id:
:param google_civic_election_id:
:return:
"""
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(Q(action_constant=ACTION_ORGANIZATION_FOLLOW) |
Q(action_constant=ACTION_ORGANIZATION_AUTO_FOLLOW))
if positive_value_exists(organization_we_vote_id):
count_query = count_query.filter(organization_we_vote_id__iexact=organization_we_vote_id)
count_query = count_query.filter(google_civic_election_id=google_civic_election_id)
count_query = count_query.values('voter_we_vote_id').distinct()
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_new_auto_followers_in_election(self, google_civic_election_id, organization_we_vote_id=""):
"""
:param organization_we_vote_id:
:param google_civic_election_id:
:return:
"""
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(action_constant=ACTION_ORGANIZATION_AUTO_FOLLOW)
if positive_value_exists(organization_we_vote_id):
count_query = count_query.filter(organization_we_vote_id__iexact=organization_we_vote_id)
count_query = count_query.filter(google_civic_election_id=google_civic_election_id)
count_query = count_query.values('voter_we_vote_id').distinct()
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_voter_action_count(self, voter_we_vote_id):
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_voter_ballot_visited(self, voter_we_vote_id, google_civic_election_id=0, organization_we_vote_id=''):
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
count_query = count_query.filter(action_constant=ACTION_BALLOT_VISIT)
if positive_value_exists(google_civic_election_id):
count_query = count_query.filter(google_civic_election_id=google_civic_election_id)
if positive_value_exists(organization_we_vote_id):
count_query = count_query.filter(organization_we_vote_id__iexact=organization_we_vote_id)
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_voter_welcome_visited(self, voter_we_vote_id):
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
count_query = count_query.filter(action_constant=ACTION_WELCOME_VISIT)
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_voter_days_visited(self, voter_we_vote_id):
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
count_query = count_query.values('date_as_integer').distinct()
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_voter_last_action_date(self, voter_we_vote_id):
last_action_date = None
try:
fetch_query = AnalyticsAction.objects.using('analytics').all()
fetch_query = fetch_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
fetch_query = fetch_query.order_by('-id')
fetch_query = fetch_query[:1]
fetch_result = list(fetch_query)
analytics_action = fetch_result.pop()
last_action_date = analytics_action.exact_time
except Exception as e:
pass
return last_action_date
def fetch_voter_voter_guides_viewed(self, voter_we_vote_id):
count_result = 0
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
count_query = count_query.filter(action_constant=ACTION_VOTER_GUIDE_VISIT)
count_query = count_query.values('organization_we_vote_id').distinct()
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_voter_guides_viewed(
self, google_civic_election_id=0, limit_to_one_date_as_integer=0, count_through_this_date_as_integer=0):
count_result = 0
try:
count_query = AnalyticsAction.objects.using('analytics').all()
if positive_value_exists(google_civic_election_id):
count_query = count_query.filter(google_civic_election_id=google_civic_election_id)
count_query = count_query.filter(action_constant=ACTION_VOTER_GUIDE_VISIT)
if positive_value_exists(limit_to_one_date_as_integer):
count_query = count_query.filter(date_as_integer=limit_to_one_date_as_integer)
elif positive_value_exists(count_through_this_date_as_integer):
count_query = count_query.filter(date_as_integer__lte=count_through_this_date_as_integer)
count_query = count_query.values('organization_we_vote_id').distinct()
count_result = count_query.count()
except Exception as e:
pass
return count_result
class AnalyticsManager(models.Manager):
def create_action_type1(
self, action_constant, voter_we_vote_id, voter_id, is_signed_in, state_code,
organization_we_vote_id, organization_id, google_civic_election_id,
user_agent_string, is_bot, is_mobile, is_desktop, is_tablet,
ballot_item_we_vote_id="", voter_device_id=None):
"""
Create AnalyticsAction data
"""
success = True
status = "ACTION_CONSTANT:" + display_action_constant_human_readable(action_constant) + " "
action_saved = False
action = AnalyticsAction()
missing_required_variable = False
if not action_constant:
missing_required_variable = True
status += 'MISSING_ACTION_CONSTANT '
if not voter_we_vote_id:
missing_required_variable = True
status += 'MISSING_VOTER_WE_VOTE_ID '
if not organization_we_vote_id:
missing_required_variable = True
status += 'MISSING_ORGANIZATION_WE_VOTE_ID '
if missing_required_variable:
results = {
'success': success,
'status': status,
'action_saved': action_saved,
'action': action,
}
return results
try:
action = AnalyticsAction.objects.using('analytics').create(
action_constant=action_constant,
voter_we_vote_id=voter_we_vote_id,
voter_id=voter_id,
is_signed_in=is_signed_in,
state_code=state_code,
organization_we_vote_id=organization_we_vote_id,
organization_id=organization_id,
google_civic_election_id=google_civic_election_id,
ballot_item_we_vote_id=ballot_item_we_vote_id,
user_agent=user_agent_string,
is_bot=is_bot,
is_mobile=is_mobile,
is_desktop=is_desktop,
is_tablet=is_tablet
)
success = True
action_saved = True
status += 'ACTION_TYPE1_SAVED '
except Exception as e:
success = False
status += 'COULD_NOT_SAVE_ACTION_TYPE1 ' + str(e) + ' '
results = {
'success': success,
'status': status,
'action_saved': action_saved,
'action': action,
}
return results
def create_action_type2(
self, action_constant, voter_we_vote_id, voter_id, is_signed_in, state_code,
organization_we_vote_id, google_civic_election_id,
user_agent_string, is_bot, is_mobile, is_desktop, is_tablet,
ballot_item_we_vote_id, voter_device_id=None):
"""
Create AnalyticsAction data
"""
success = True
status = "ACTION_CONSTANT:" + display_action_constant_human_readable(action_constant) + " "
action_saved = False
action = AnalyticsAction()
missing_required_variable = False
if not action_constant:
missing_required_variable = True
status += 'MISSING_ACTION_CONSTANT '
if not voter_we_vote_id:
missing_required_variable = True
status += 'MISSING_VOTER_WE_VOTE_ID '
if missing_required_variable:
results = {
'success': success,
'status': status,
'action_saved': action_saved,
'action': action,
}
return results
try:
action = AnalyticsAction.objects.using('analytics').create(
action_constant=action_constant,
voter_we_vote_id=voter_we_vote_id,
voter_id=voter_id,
is_signed_in=is_signed_in,
state_code=state_code,
organization_we_vote_id=organization_we_vote_id,
google_civic_election_id=google_civic_election_id,
ballot_item_we_vote_id=ballot_item_we_vote_id,
user_agent=user_agent_string,
is_bot=is_bot,
is_mobile=is_mobile,
is_desktop=is_desktop,
is_tablet=is_tablet
)
success = True
action_saved = True
status += 'ACTION_TYPE2_SAVED '
except Exception as e:
success = False
status += 'COULD_NOT_SAVE_ACTION_TYPE2 ' + str(e) + ' '
results = {
'success': success,
'status': status,
'action_saved': action_saved,
'action': action,
}
return results
def retrieve_analytics_action_list(self, voter_we_vote_id='', voter_we_vote_id_list=[], google_civic_election_id=0,
organization_we_vote_id='', action_constant='', distinct_for_members=False,
state_code=''):
success = True
status = ""
analytics_action_list = []
try:
list_query = AnalyticsAction.objects.using('analytics').all()
if positive_value_exists(voter_we_vote_id):
list_query = list_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
elif len(voter_we_vote_id_list):
list_query = list_query.filter(voter_we_vote_id__in=voter_we_vote_id_list)
if positive_value_exists(google_civic_election_id):
list_query = list_query.filter(google_civic_election_id=google_civic_election_id)
if positive_value_exists(organization_we_vote_id):
list_query = list_query.filter(organization_we_vote_id__iexact=organization_we_vote_id)
if positive_value_exists(action_constant):
list_query = list_query.filter(action_constant=action_constant)
if positive_value_exists(state_code):
list_query = list_query.filter(state_code__iexact=state_code)
if positive_value_exists(distinct_for_members):
list_query = list_query.distinct(
'google_civic_election_id', 'organization_we_vote_id', 'voter_we_vote_id')
analytics_action_list = list(list_query)
analytics_action_list_found = positive_value_exists(len(analytics_action_list))
except Exception as e:
analytics_action_list_found = False
status += "ANALYTICS_ACTION_LIST_ERROR: " + str(e) + " "
success = False
results = {
'success': success,
'status': status,
'analytics_action_list': analytics_action_list,
'analytics_action_list_found': analytics_action_list_found,
}
return results
def retrieve_analytics_processed_list(
self, analytics_date_as_integer=0, voter_we_vote_id='', voter_we_vote_id_list=[],
google_civic_election_id=0, organization_we_vote_id='', kind_of_process='',
batch_process_id=0, batch_process_analytics_chunk_id=0, analytics_date_as_integer_more_recent_than=0):
success = True
status = ""
analytics_processed_list = []
retrieved_voter_we_vote_id_list = []
try:
list_query = AnalyticsProcessed.objects.using('analytics').all()
if positive_value_exists(batch_process_id):
list_query = list_query.filter(batch_process_id=batch_process_id)
if positive_value_exists(batch_process_analytics_chunk_id):
list_query = list_query.filter(batch_process_analytics_chunk_id=batch_process_analytics_chunk_id)
if positive_value_exists(analytics_date_as_integer_more_recent_than):
list_query = list_query.filter(analytics_date_as_integer__gte=analytics_date_as_integer)
elif positive_value_exists(analytics_date_as_integer):
list_query = list_query.filter(analytics_date_as_integer=analytics_date_as_integer)
if positive_value_exists(voter_we_vote_id):
list_query = list_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
elif len(voter_we_vote_id_list):
list_query = list_query.filter(voter_we_vote_id__in=voter_we_vote_id_list)
if positive_value_exists(google_civic_election_id):
list_query = list_query.filter(google_civic_election_id=google_civic_election_id)
if positive_value_exists(organization_we_vote_id):
list_query = list_query.filter(organization_we_vote_id__iexact=organization_we_vote_id)
if positive_value_exists(kind_of_process):
list_query = list_query.filter(kind_of_process__iexact=kind_of_process)
voter_list_query = list_query
analytics_processed_list = list(list_query)
analytics_processed_list_found = True
except Exception as e:
analytics_processed_list_found = False
status += "ANALYTICS_PROCESSED_LIST_ERROR: " + str(e) + " "
success = False
try:
retrieved_voter_we_vote_id_list = voter_list_query.values_list('voter_we_vote_id', flat=True).distinct()
retrieved_voter_we_vote_id_list_found = True
except Exception as e:
retrieved_voter_we_vote_id_list_found = False
status += "ANALYTICS_PROCESSED_LIST_VOTER_WE_VOTE_ID_ERROR: " + str(e) + " "
success = False
results = {
'success': success,
'status': status,
'analytics_processed_list': analytics_processed_list,
'analytics_processed_list_found': analytics_processed_list_found,
'retrieved_voter_we_vote_id_list': retrieved_voter_we_vote_id_list,
'retrieved_voter_we_vote_id_list_found': retrieved_voter_we_vote_id_list_found,
}
return results
def delete_analytics_processed_list(
self, analytics_date_as_integer=0, voter_we_vote_id='', voter_we_vote_id_list=[],
google_civic_election_id=0, organization_we_vote_id='', kind_of_process=''):
success = True
status = ""
try:
list_query = AnalyticsProcessed.objects.using('analytics').filter(
analytics_date_as_integer=analytics_date_as_integer)
if positive_value_exists(voter_we_vote_id):
list_query = list_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
elif len(voter_we_vote_id_list):
list_query = list_query.filter(voter_we_vote_id__in=voter_we_vote_id_list)
if positive_value_exists(google_civic_election_id):
list_query = list_query.filter(google_civic_election_id=google_civic_election_id)
if positive_value_exists(organization_we_vote_id):
list_query = list_query.filter(organization_we_vote_id__iexact=organization_we_vote_id)
if positive_value_exists(kind_of_process):
list_query = list_query.filter(kind_of_process__iexact=kind_of_process)
list_query.delete()
analytics_processed_list_deleted = True
except Exception as e:
analytics_processed_list_deleted = False
status += "ANALYTICS_PROCESSED_LIST_ERROR: " + str(e) + " "
success = False
results = {
'success': success,
'status': status,
'analytics_processed_list_deleted': analytics_processed_list_deleted,
}
return results
def save_analytics_processed(self, analytics_date_as_integer, voter_we_vote_id, defaults):
success = True
status = ""
analytics_processed = None
analytics_processed_saved = False
try:
analytics_processed, created = AnalyticsProcessed.objects.using('analytics').\
update_or_create(
analytics_date_as_integer=analytics_date_as_integer,
voter_we_vote_id=voter_we_vote_id,
kind_of_process=defaults['kind_of_process'],
defaults=defaults
)
analytics_processed_saved = True
except Exception as e:
success = False
status += 'SAVE_ANALYTICS_PROCESSED_PROBLEM: ' + str(e) + ' '
results = {
'success': success,
'status': status,
'analytics_processed_saved': analytics_processed_saved,
'analytics_processed': analytics_processed,
}
return results
def save_analytics_processing_status(self, analytics_date_as_integer, defaults):
success = True
status = ""
analytics_processing_status = None
analytics_processing_status_saved = False
try:
analytics_processing_status, created = AnalyticsProcessingStatus.objects.using('analytics').\
update_or_create(
analytics_date_as_integer=analytics_date_as_integer,
defaults=defaults
)
analytics_processing_status_saved = True
except Exception as e:
success = False
status += 'SAVE_ANALYTICS_PROCESSING_STATUS_PROBLEM: ' + str(e) + ' '
results = {
'success': success,
'status': status,
'analytics_processing_status_saved': analytics_processing_status_saved,
'analytics_processing_status': analytics_processing_status,
}
return results
def retrieve_analytics_processing_status_by_date_as_integer(self, analytics_date_as_integer):
success = False
status = ""
try:
analytics_processing_status = AnalyticsProcessingStatus.objects.using('analytics').get(
analytics_date_as_integer=analytics_date_as_integer)
analytics_processing_status_found = True
except Exception as e:
analytics_processing_status = None
analytics_processing_status_found = False
status += "RETRIEVE_ANALYTICS_PROCESSING_STATUS: " + str(e) + " "
results = {
'success': success,
'status': status,
'analytics_processing_status': analytics_processing_status,
'analytics_processing_status_found': analytics_processing_status_found,
}
return results
def find_next_date_with_analytics_to_process(self, last_analytics_date_as_integer):
status = ""
success = True
times_tried = 0
still_looking_for_next_date = True
prior_analytics_date_as_integer = last_analytics_date_as_integer
new_analytics_date_as_integer = 0
new_analytics_date_as_integer_found = False
# If here, these are all finished and we need to analyze the next day
date_now = now()
date_now_as_integer = convert_date_to_date_as_integer(date_now)
while still_looking_for_next_date:
times_tried += 1
if times_tried > 500:
still_looking_for_next_date = False
continue
# Make sure the last date_as_integer analyzed isn't past today
prior_analytics_date = convert_date_as_integer_to_date(prior_analytics_date_as_integer)
one_day = timedelta(days=1)
new_analytics_date = prior_analytics_date + one_day
new_analytics_date_as_integer = convert_date_to_date_as_integer(new_analytics_date)
if new_analytics_date_as_integer >= date_now_as_integer:
new_analytics_date_as_integer_found = False
status += "NEXT_ANALYTICS_DATE_IS_TODAY "
results = {
'success': success,
'status': status,
'new_analytics_date_as_integer': new_analytics_date_as_integer,
'new_analytics_date_as_integer_found': new_analytics_date_as_integer_found,
}
return results
# Now see if we have analytics on that date
try:
voter_history_query = AnalyticsAction.objects.using('analytics').all()
voter_history_query = voter_history_query.filter(date_as_integer=new_analytics_date_as_integer)
analytics_count = voter_history_query.count()
if positive_value_exists(analytics_count):
still_looking_for_next_date = False
new_analytics_date_as_integer_found = True
else:
prior_analytics_date_as_integer = new_analytics_date_as_integer
except Exception as e:
status += "COULD_NOT_RETRIEVE_ANALYTICS: " + str(e) + " "
still_looking_for_next_date = False
results = {
'success': success,
'status': status,
'new_analytics_date_as_integer': new_analytics_date_as_integer,
'new_analytics_date_as_integer_found': new_analytics_date_as_integer_found,
}
return results
def does_analytics_processing_status_exist_for_one_date(self, analytics_date_as_integer_last_processed):
status = ""
success = True
queryset = AnalyticsProcessingStatus.objects.using('analytics').order_by('analytics_date_as_integer')
if positive_value_exists(analytics_date_as_integer_last_processed):
# If there is a start date, force this query to only search before that date.
# Go back and find the next date with analytics to process
queryset = queryset.filter(analytics_date_as_integer=analytics_date_as_integer_last_processed)
analytics_processing_status_list = queryset[:1]
analytics_processing_status = None
analytics_processing_status_found = False
if len(analytics_processing_status_list):
# We have found one to work on
analytics_processing_status = analytics_processing_status_list[0]
analytics_processing_status_found = True
results = {
'success': success,
'status': status,
'analytics_processing_status': analytics_processing_status,
'analytics_processing_status_found': analytics_processing_status_found,
}
return results
def request_unfinished_analytics_processing_status_for_one_date(self, analytics_date_as_integer_last_processed):
status = ""
success = True
queryset = AnalyticsProcessingStatus.objects.using('analytics').order_by('analytics_date_as_integer')
if positive_value_exists(analytics_date_as_integer_last_processed):
# If there is a start date, force this query to only search before that date.
# Go back and find the next date with analytics to process
queryset = queryset.filter(analytics_date_as_integer=analytics_date_as_integer_last_processed)
# Limit to entries that haven't been finished
filters = []
# AUGMENT_ANALYTICS_ACTION_WITH_ELECTION_ID
new_filter = Q(finished_augment_analytics_action_with_election_id=False)
filters.append(new_filter)
# AUGMENT_ANALYTICS_ACTION_WITH_FIRST_VISIT
new_filter = Q(finished_augment_analytics_action_with_first_visit=False)
filters.append(new_filter)
# CALCULATE_ORGANIZATION_DAILY_METRICS
# new_filter = Q(finished_calculate_organization_daily_metrics=False)
# filters.append(new_filter)
#
# CALCULATE_ORGANIZATION_ELECTION_METRICS
# new_filter = Q(finished_calculate_organization_election_metrics=False)
# filters.append(new_filter)
# CALCULATE_SITEWIDE_DAILY_METRICS
new_filter = Q(finished_calculate_sitewide_daily_metrics=False)
filters.append(new_filter)
# CALCULATE_SITEWIDE_ELECTION_METRICS
# new_filter = Q(finished_calculate_sitewide_election_metrics=False)
# filters.append(new_filter)
# CALCULATE_SITEWIDE_VOTER_METRICS
new_filter = Q(finished_calculate_sitewide_voter_metrics=False)
filters.append(new_filter)
# Add the first query
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
queryset = queryset.filter(final_filters)
analytics_processing_status_list = queryset[:1]
analytics_processing_status = None
analytics_processing_status_found = False
if len(analytics_processing_status_list):
# We have found one to work on
analytics_processing_status = analytics_processing_status_list[0]
analytics_processing_status_found = True
results = {
'success': success,
'status': status,
'analytics_processing_status': analytics_processing_status,
'analytics_processing_status_found': analytics_processing_status_found,
}
return results
def retrieve_or_create_next_analytics_processing_status(self):
"""
Find the highest (most recent) date_as_integer. If all elements have been completed,
then create an entry for the next day. If not, return the latest entry.
:return:
"""
success = True
status = ""
analytics_processing_status = None
analytics_processing_status_found = False
create_new_status_entry = False
we_vote_settings_manager = WeVoteSettingsManager()
results = we_vote_settings_manager.fetch_setting_results('analytics_date_as_integer_last_processed')
analytics_date_as_integer_last_processed = 0
if results['we_vote_setting_found']:
analytics_date_as_integer_last_processed = convert_to_int(results['setting_value'])
if not positive_value_exists(analytics_date_as_integer_last_processed):
status += "analytics_date_as_integer_last_processed-MISSING "
success = False
results = {
'success': success,
'status': status,
'analytics_processing_status': analytics_processing_status,
'analytics_processing_status_found': analytics_processing_status_found,
}
return results
try:
# Is there an analytics_processing_status for the date we care about?
results = self.does_analytics_processing_status_exist_for_one_date(analytics_date_as_integer_last_processed)
if not results['analytics_processing_status_found']:
defaults = {}
analytics_processing_status, created = AnalyticsProcessingStatus.objects.using('analytics').\
update_or_create(
analytics_date_as_integer=analytics_date_as_integer_last_processed,
defaults=defaults
)
else:
# Are there any tasks remaining on this date?
results = self.request_unfinished_analytics_processing_status_for_one_date(
analytics_date_as_integer_last_processed)
if results['analytics_processing_status_found']:
# We have found one to work on
analytics_processing_status = results['analytics_processing_status']
analytics_processing_status_found = True
else:
if positive_value_exists(analytics_date_as_integer_last_processed):
results = self.find_next_date_with_analytics_to_process(
last_analytics_date_as_integer=analytics_date_as_integer_last_processed)
if results['new_analytics_date_as_integer_found']:
new_analytics_date_as_integer = results['new_analytics_date_as_integer']
create_new_status_entry = True
else:
# Find last day processed with all calculations finished, so we advance to next available day
status += "NO_ANALYTICS_PROCESSING_STATUS_FOUND "
queryset = AnalyticsProcessingStatus.objects.using('analytics').\
order_by('-analytics_date_as_integer')
analytics_processing_status_list = queryset[:1]
if len(analytics_processing_status_list):
# We have found one to work on
analytics_processing_status = analytics_processing_status_list[0]
new_analytics_date_as_integer = analytics_processing_status.analytics_date_as_integer
except Exception as e:
analytics_processing_status_found = False
status += "ANALYTICS_PROCESSING_STATUS_ERROR: " + str(e) + " "
success = False
# If here, we need to create a new entry
if create_new_status_entry and positive_value_exists(new_analytics_date_as_integer) and success:
try:
defaults = {}
analytics_processing_status, created = AnalyticsProcessingStatus.objects.using('analytics').\
update_or_create(
analytics_date_as_integer=new_analytics_date_as_integer,
defaults=defaults
)
analytics_processing_status_found = True
status += "ANALYTICS_PROCESSING_STATUS_CREATED "
if positive_value_exists(new_analytics_date_as_integer):
# Update this value in the settings table: analytics_date_as_integer_last_processed
# ...to new_analytics_date_as_integer
results = we_vote_settings_manager.save_setting(
setting_name="analytics_date_as_integer_last_processed",
setting_value=new_analytics_date_as_integer,
value_type=WeVoteSetting.INTEGER)
except Exception as e:
success = False
status += 'CREATE_ANALYTICS_PROCESSING_STATUS_ERROR: ' + str(e) + ' '
results = {
'success': success,
'status': status,
'analytics_processing_status': analytics_processing_status,
'analytics_processing_status_found': analytics_processing_status_found,
}
return results
def retrieve_organization_election_metrics_list(self, google_civic_election_id=0):
success = False
status = ""
organization_election_metrics_list = []
try:
list_query = OrganizationElectionMetrics.objects.using('analytics').all()
if positive_value_exists(google_civic_election_id):
list_query = list_query.filter(google_civic_election_id=google_civic_election_id)
organization_election_metrics_list = list(list_query)
organization_election_metrics_list_found = True
except Exception as e:
organization_election_metrics_list_found = False
success = False
status += 'ORGANIZATION_ELECTION_METRICS_ERROR: ' + str(e) + ' '
results = {
'success': success,
'status': status,
'organization_election_metrics_list': organization_election_metrics_list,
'organization_election_metrics_list_found': organization_election_metrics_list_found,
}
return results
def retrieve_sitewide_election_metrics_list(self, google_civic_election_id=0):
success = False
status = ""
sitewide_election_metrics_list = []
try:
list_query = SitewideElectionMetrics.objects.using('analytics').all()
if positive_value_exists(google_civic_election_id):
list_query = list_query.filter(google_civic_election_id=google_civic_election_id)
sitewide_election_metrics_list = list(list_query)
success = True
sitewide_election_metrics_list_found = True
except Exception as e:
sitewide_election_metrics_list_found = False
results = {
'success': success,
'status': status,
'sitewide_election_metrics_list': sitewide_election_metrics_list,
'sitewide_election_metrics_list_found': sitewide_election_metrics_list_found,
}
return results
def retrieve_list_of_dates_with_actions(self, date_as_integer, through_date_as_integer=0):
success = False
status = ""
date_list = []
try:
date_list_query = AnalyticsAction.objects.using('analytics').all()
date_list_query = date_list_query.filter(date_as_integer__gte=date_as_integer)
if positive_value_exists(through_date_as_integer):
date_list_query = date_list_query.filter(date_as_integer__lte=through_date_as_integer)
date_list_query = date_list_query.values('date_as_integer').distinct()
date_list = list(date_list_query)
date_list_found = True
except Exception as e:
date_list_found = False
modified_date_list = []
for date_as_integer_dict in date_list:
if positive_value_exists(date_as_integer_dict['date_as_integer']):
modified_date_list.append(date_as_integer_dict['date_as_integer'])
results = {
'success': success,
'status': status,
'date_as_integer_list': modified_date_list,
'date_as_integer_list_found': date_list_found,
}
return results
def retrieve_organization_list_with_election_activity(self, google_civic_election_id):
success = False
status = ""
organization_list = []
try:
organization_list_query = AnalyticsAction.objects.using('analytics').all()
organization_list_query = organization_list_query.filter(google_civic_election_id=google_civic_election_id)
organization_list_query = organization_list_query.values('organization_we_vote_id').distinct()
organization_list = list(organization_list_query)
organization_list_found = True
except Exception as e:
organization_list_found = False
modified_organization_list = []
for organization_dict in organization_list:
if positive_value_exists(organization_dict['organization_we_vote_id']):
modified_organization_list.append(organization_dict['organization_we_vote_id'])
results = {
'success': success,
'status': status,
'organization_we_vote_id_list': modified_organization_list,
'organization_we_vote_id_list_found': organization_list_found,
}
return results
def retrieve_voter_we_vote_id_list_with_changes_since(self, date_as_integer, through_date_as_integer):
success = True
status = ""
voter_list = []
try:
voter_list_query = AnalyticsAction.objects.using('analytics').all()
voter_list_query = voter_list_query.filter(date_as_integer__gte=date_as_integer)
voter_list_query = voter_list_query.filter(date_as_integer__lte=through_date_as_integer)
voter_list_query = voter_list_query.values('voter_we_vote_id').distinct()
# voter_list_query = voter_list_query[:5] # TEMP limit to 5
voter_list = list(voter_list_query)
voter_list_found = True
except Exception as e:
success = False
voter_list_found = False
modified_voter_list = []
for voter_dict in voter_list:
if positive_value_exists(voter_dict['voter_we_vote_id']):
modified_voter_list.append(voter_dict['voter_we_vote_id'])
results = {
'success': success,
'status': status,
'voter_we_vote_id_list': modified_voter_list,
'voter_we_vote_id_list_found': voter_list_found,
}
return results
def save_action(self, action_constant="",
voter_we_vote_id="", voter_id=0, is_signed_in=False, state_code="",
organization_we_vote_id="", organization_id=0, google_civic_election_id=0,
user_agent_string="", is_bot=False, is_mobile=False, is_desktop=False, is_tablet=False,
ballot_item_we_vote_id="", voter_device_id=None):
# If a voter_device_id is passed in, it is because this action may be coming from
# https://analytics.wevoteusa.org and hasn't been authenticated yet
# Confirm that we have a valid voter_device_id. If not, store the action with the voter_device_id so we can
# look up later.
# If either voter identifier comes in, make sure we have both
# If either organization identifier comes in, make sure we have both
if action_constant in ACTIONS_THAT_REQUIRE_ORGANIZATION_IDS:
# In the future we could reduce clutter in the AnalyticsAction table by only storing one entry per day
return self.create_action_type1(action_constant, voter_we_vote_id, voter_id, is_signed_in, state_code,
organization_we_vote_id, organization_id, google_civic_election_id,
user_agent_string, is_bot, is_mobile, is_desktop, is_tablet,
ballot_item_we_vote_id, voter_device_id)
else:
return self.create_action_type2(action_constant, voter_we_vote_id, voter_id, is_signed_in, state_code,
organization_we_vote_id, google_civic_election_id,
user_agent_string, is_bot, is_mobile, is_desktop, is_tablet,
ballot_item_we_vote_id, voter_device_id)
def save_organization_daily_metrics_values(self, organization_daily_metrics_values):
success = False
status = ""
metrics_saved = False
metrics = OrganizationDailyMetrics()
missing_required_variables = False
date_as_integer = 0
organization_we_vote_id = ''
if positive_value_exists(organization_daily_metrics_values['organization_we_vote_id']):
organization_we_vote_id = organization_daily_metrics_values['organization_we_vote_id']
else:
missing_required_variables = True
if positive_value_exists(organization_daily_metrics_values['date_as_integer']):
date_as_integer = organization_daily_metrics_values['date_as_integer']
else:
missing_required_variables = True
if not missing_required_variables:
try:
metrics_saved, created = OrganizationDailyMetrics.objects.using('analytics').update_or_create(
organization_we_vote_id=organization_we_vote_id,
date_as_integer=date_as_integer,
defaults=organization_daily_metrics_values
)
except Exception as e:
success = False
status += 'ORGANIZATION_DAILY_METRICS_UPDATE_OR_CREATE_FAILED ' + str(e) + ' '
results = {
'success': success,
'status': status,
'metrics_saved': metrics_saved,
'metrics': metrics,
}
return results
def save_organization_election_metrics_values(self, organization_election_metrics_values):
success = False
status = ""
metrics_saved = False
metrics = OrganizationElectionMetrics()
missing_required_variables = False
google_civic_election_id = 0
organization_we_vote_id = ''
if positive_value_exists(organization_election_metrics_values['google_civic_election_id']):
google_civic_election_id = organization_election_metrics_values['google_civic_election_id']
else:
missing_required_variables = True
if positive_value_exists(organization_election_metrics_values['organization_we_vote_id']):
organization_we_vote_id = organization_election_metrics_values['organization_we_vote_id']
else:
missing_required_variables = True
if not missing_required_variables:
try:
metrics_saved, created = OrganizationElectionMetrics.objects.using('analytics').update_or_create(
google_civic_election_id=google_civic_election_id,
organization_we_vote_id__iexact=organization_we_vote_id,
defaults=organization_election_metrics_values
)
except Exception as e:
success = False
status += 'ORGANIZATION_ELECTION_METRICS_UPDATE_OR_CREATE_FAILED ' + str(e) + ' '
results = {
'success': success,
'status': status,
'metrics_saved': metrics_saved,
'metrics': metrics,
}
return results
def save_sitewide_daily_metrics_values(self, sitewide_daily_metrics_values):
success = True
status = ""
sitewide_daily_metrics_saved = False
sitewide_daily_metrics = SitewideDailyMetrics()
if positive_value_exists(sitewide_daily_metrics_values['date_as_integer']):
date_as_integer = sitewide_daily_metrics_values['date_as_integer']
try:
sitewide_daily_metrics, created = SitewideDailyMetrics.objects.using('analytics').update_or_create(
date_as_integer=date_as_integer,
defaults=sitewide_daily_metrics_values
)
sitewide_daily_metrics_saved = True
except Exception as e:
success = False
status += 'SITEWIDE_DAILY_METRICS_UPDATE_OR_CREATE_FAILED ' \
'(' + str(date_as_integer) + '): ' + str(e) + ' '
else:
status += "SITEWIDE_DAILY_METRICS-MISSING_DATE_AS_INTEGER "
results = {
'success': success,
'status': status,
'sitewide_daily_metrics_saved': sitewide_daily_metrics_saved,
'sitewide_daily_metrics': sitewide_daily_metrics,
}
return results
def save_sitewide_election_metrics_values(self, sitewide_election_metrics_values):
success = False
status = ""
metrics_saved = False
metrics = SitewideElectionMetrics()
if positive_value_exists(sitewide_election_metrics_values['google_civic_election_id']):
google_civic_election_id = sitewide_election_metrics_values['google_civic_election_id']
try:
metrics_saved, created = SitewideElectionMetrics.objects.using('analytics').update_or_create(
google_civic_election_id=google_civic_election_id,
defaults=sitewide_election_metrics_values
)
except Exception as e:
success = False
status += 'SITEWIDE_ELECTION_METRICS_UPDATE_OR_CREATE_FAILED ' + str(e) + ' '
results = {
'success': success,
'status': status,
'metrics_saved': metrics_saved,
'metrics': metrics,
}
return results
def save_sitewide_voter_metrics_values_for_one_voter(self, sitewide_voter_metrics_values):
success = False
status = ""
metrics_saved = False
if positive_value_exists(sitewide_voter_metrics_values['voter_we_vote_id']):
voter_we_vote_id = sitewide_voter_metrics_values['voter_we_vote_id']
try:
metrics_saved, created = SitewideVoterMetrics.objects.using('analytics').update_or_create(
voter_we_vote_id__iexact=voter_we_vote_id,
defaults=sitewide_voter_metrics_values
)
success = True
except Exception as e:
success = False
status += 'SITEWIDE_VOTER_METRICS_UPDATE_OR_CREATE_FAILED ' + str(e) + ' '
results = {
'success': success,
'status': status,
'metrics_saved': metrics_saved,
}
return results
else:
status += "SITEWIDE_VOTER_METRICS_SAVE-MISSING_VOTER_WE_VOTE_ID "
results = {
'success': success,
'status': status,
'metrics_saved': metrics_saved,
}
return results
def sitewide_voter_metrics_for_this_voter_updated_this_date(self, voter_we_vote_id, updated_date_integer):
updated_on_date_query = SitewideVoterMetrics.objects.using('analytics').filter(
voter_we_vote_id__iexact=voter_we_vote_id,
last_calculated_date_as_integer=updated_date_integer
)
return positive_value_exists(updated_on_date_query.count())
def update_first_visit_today_for_all_voters_since_date(self, date_as_integer, through_date_as_integer):
success = True
status = ""
distinct_days_list = []
first_visit_today_count = 0
# Get distinct days
try:
distinct_days_query = AnalyticsAction.objects.using('analytics').all()
distinct_days_query = distinct_days_query.filter(date_as_integer__gte=date_as_integer)
distinct_days_query = distinct_days_query.filter(date_as_integer__lte=through_date_as_integer)
distinct_days_query = distinct_days_query.values('date_as_integer').distinct()
# distinct_days_query = distinct_days_query[:5] # TEMP limit to 5
distinct_days_list = list(distinct_days_query)
distinct_days_found = True
except Exception as e:
success = False
status += "UPDATE_FIRST_VISIT_TODAY-DISTINCT_DAY_QUERY_ERROR " + str(e) + ' '
distinct_days_found = False
simple_distinct_days_list = []
for day_dict in distinct_days_list:
if positive_value_exists(day_dict['date_as_integer']):
simple_distinct_days_list.append(day_dict['date_as_integer'])
# Loop through each day
for one_date_as_integer in simple_distinct_days_list:
# Get distinct voters on that day
if not positive_value_exists(one_date_as_integer):
continue
voter_list = []
try:
voter_list_query = AnalyticsAction.objects.using('analytics').all()
voter_list_query = voter_list_query.filter(date_as_integer=one_date_as_integer)
voter_list_query = voter_list_query.values('voter_we_vote_id').distinct()
# voter_list_query = voter_list_query[:5] # TEMP limit to 5
voter_list = list(voter_list_query)
voter_list_found = True
except Exception as e:
success = False
status += "UPDATE_FIRST_VISIT_TODAY-DISTINCT_VOTER_QUERY_ERROR " + str(e) + ' '
voter_list_found = False
simple_voter_list = []
for voter_dict in voter_list:
if positive_value_exists(voter_dict['voter_we_vote_id']) and \
voter_dict['voter_we_vote_id'] not in simple_voter_list:
simple_voter_list.append(voter_dict['voter_we_vote_id'])
if not voter_list_found:
continue
# Loop through each voter per day, and update the first entry for that day with "first_visit_today=True"
for voter_we_vote_id in simple_voter_list:
if not positive_value_exists(voter_we_vote_id):
continue
try:
first_visit_query = AnalyticsAction.objects.using('analytics').all()
first_visit_query = first_visit_query.order_by("id") # order by oldest first
first_visit_query = first_visit_query.filter(date_as_integer=one_date_as_integer)
first_visit_query = first_visit_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
analytics_action = first_visit_query.first()
if not analytics_action.first_visit_today:
analytics_action.first_visit_today = True
analytics_action.save()
first_visit_saved = True
first_visit_today_count += 1
except Exception as e:
success = False
status += "UPDATE_FIRST_VISIT_TODAY-VOTER_ON_DATE_QUERY_ERROR " + str(e) + ' '
print_to_log(logger=logger, exception_message_optional=status)
first_visit_found = False
results = {
'success': success,
'status': status,
'first_visit_today_count': first_visit_today_count,
}
return results
def update_first_visit_today_for_one_voter(self, voter_we_vote_id):
success = False
status = ""
distinct_days_list = []
first_visit_today_count = 0
# Get distinct days
try:
distinct_days_query = AnalyticsAction.objects.using('analytics').all()
distinct_days_query = distinct_days_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
distinct_days_query = distinct_days_query.values('date_as_integer').distinct()
distinct_days_list = list(distinct_days_query)
except Exception as e:
pass
simple_distinct_days_list = []
for day_dict in distinct_days_list:
if positive_value_exists(day_dict['date_as_integer']):
simple_distinct_days_list.append(day_dict['date_as_integer'])
# Loop through each day
for one_date_as_integer in simple_distinct_days_list:
try:
first_visit_query = AnalyticsAction.objects.using('analytics').all()
first_visit_query = first_visit_query.order_by("id") # order by oldest first
first_visit_query = first_visit_query.filter(date_as_integer=one_date_as_integer)
first_visit_query = first_visit_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
analytics_action = first_visit_query.first()
analytics_action.first_visit_today = True
analytics_action.save()
first_visit_today_count += 1
except Exception as e:
pass
results = {
'success': success,
'status': status,
'first_visit_today_count': first_visit_today_count,
}
return results
class AnalyticsProcessingStatus(models.Model):
"""
When we have finished analyzing one element of the analytics data for a day, store our completion here
"""
analytics_date_as_integer = models.PositiveIntegerField(verbose_name="YYYYMMDD", null=False, unique=True)
# AUGMENT_ANALYTICS_ACTION_WITH_ELECTION_ID
finished_augment_analytics_action_with_election_id = models.BooleanField(default=False)
# AUGMENT_ANALYTICS_ACTION_WITH_FIRST_VISIT
finished_augment_analytics_action_with_first_visit = models.BooleanField(default=False)
# CALCULATE_ORGANIZATION_DAILY_METRICS
finished_calculate_organization_daily_metrics = models.BooleanField(default=False)
# CALCULATE_ORGANIZATION_ELECTION_METRICS
finished_calculate_organization_election_metrics = models.BooleanField(default=False)
# CALCULATE_SITEWIDE_DAILY_METRICS
finished_calculate_sitewide_daily_metrics = models.BooleanField(default=False)
# CALCULATE_SITEWIDE_ELECTION_METRICS
finished_calculate_sitewide_election_metrics = models.BooleanField(default=False)
# CALCULATE_SITEWIDE_VOTER_METRICS
finished_calculate_sitewide_voter_metrics = models.BooleanField(default=False)
class AnalyticsProcessed(models.Model):
"""
When we have finished analyzing one element of the analytics data for a day, store our completion here
"""
analytics_date_as_integer = models.PositiveIntegerField(verbose_name="YYYYMMDD", null=False, unique=False)
batch_process_id = models.PositiveIntegerField(null=True, unique=False)
batch_process_analytics_chunk_id = models.PositiveIntegerField(null=True, unique=False)
organization_we_vote_id = models.CharField(max_length=255, null=True, blank=True, unique=False)
google_civic_election_id = models.PositiveIntegerField(null=True, unique=False)
voter_we_vote_id = models.CharField(max_length=255, null=True, unique=False)
kind_of_process = models.CharField(max_length=50, null=True, unique=False)
class OrganizationDailyMetrics(models.Model):
"""
This is a summary of the organization activity on one day.
"""
# We store YYYYMMDD as an integer for very fast lookup (ex/ "20170901" for September, 1, 2017)
date_as_integer = models.PositiveIntegerField(verbose_name="YYYYMMDD of the action",
null=True, unique=False, db_index=True)
organization_we_vote_id = models.CharField(verbose_name="we vote permanent id",
max_length=255, null=True, blank=True, unique=False)
visitors_total = models.PositiveIntegerField(verbose_name="number of visitors, all time", null=True, unique=False)
authenticated_visitors_total = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
visitors_today = models.PositiveIntegerField(verbose_name="number of visitors, today", null=True, unique=False)
authenticated_visitors_today = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
new_visitors_today = models.PositiveIntegerField(verbose_name="new visitors, today", null=True, unique=False)
voter_guide_entrants_today = models.PositiveIntegerField(verbose_name="first touch, voter guide",
null=True, unique=False)
voter_guide_entrants = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
entrants_visiting_ballot = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
followers_visiting_ballot = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
followers_total = models.PositiveIntegerField(verbose_name="all time",
null=True, unique=False)
new_followers_today = models.PositiveIntegerField(verbose_name="today",
null=True, unique=False)
auto_followers_total = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
new_auto_followers_today = models.PositiveIntegerField(verbose_name="today",
null=True, unique=False)
issues_linked_total = models.PositiveIntegerField(verbose_name="organization classifications, all time",
null=True, unique=False)
organization_public_positions = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
def generate_date_as_integer(self):
# We want to store the day as an integer for extremely quick database indexing and lookup
datetime_now = localtime(now()).date() # We Vote uses Pacific Time for TIME_ZONE
day_as_string = "{:d}{:02d}{:02d}".format(
datetime_now.year,
datetime_now.month,
datetime_now.day,
)
self.date_as_integer = convert_to_int(day_as_string)
return
class OrganizationElectionMetrics(models.Model):
# The unique ID of this election. (Provided by Google Civic)
google_civic_election_id = models.PositiveIntegerField(
verbose_name="google civic election id", null=True, unique=False)
organization_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, null=True, blank=True, unique=False)
election_day_text = models.CharField(verbose_name="election day", max_length=255, null=True, blank=True)
visitors_total = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
authenticated_visitors_total = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
voter_guide_entrants = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
followers_at_time_of_election = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
new_followers = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
new_auto_followers = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
entrants_visited_ballot = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
followers_visited_ballot = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
entrants_took_position = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
entrants_public_positions = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
entrants_public_positions_with_comments = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
entrants_friends_only_positions = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
entrants_friends_only_positions_with_comments = models.PositiveIntegerField(
verbose_name="", null=True, unique=False)
followers_took_position = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
followers_public_positions = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
followers_public_positions_with_comments = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
followers_friends_only_positions = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
followers_friends_only_positions_with_comments = models.PositiveIntegerField(
verbose_name="", null=True, unique=False)
def election(self):
if not self.google_civic_election_id:
return
try:
# We retrieve this from the read-only database (as opposed to the analytics database)
election = Election.objects.using('readonly').get(google_civic_election_id=self.google_civic_election_id)
except Election.MultipleObjectsReturned as e:
return
except Election.DoesNotExist:
return
return election
def organization(self):
if positive_value_exists(self.organization_we_vote_id):
try:
organization = Organization.objects.using('readonly').get(we_vote_id=self.organization_we_vote_id)
except Organization.MultipleObjectsReturned as e:
logger.error("analytics.organization Found multiple")
return
except Organization.DoesNotExist:
logger.error("analytics.organization did not find")
return
return organization
else:
return Organization()
class SitewideDailyMetrics(models.Model):
"""
This is a summary of the sitewide activity on one day.
"""
# We store YYYYMMDD as an integer for very fast lookup (ex/ "20170901" for September, 1, 2017)
date_as_integer = models.PositiveIntegerField(verbose_name="YYYYMMDD of the action",
null=True, unique=False, db_index=True)
visitors_total = models.PositiveIntegerField(verbose_name="number of visitors, all time", null=True, unique=False)
visitors_today = models.PositiveIntegerField(verbose_name="number of visitors, today", null=True, unique=False)
new_visitors_today = models.PositiveIntegerField(verbose_name="new visitors, today", null=True, unique=False)
voter_guide_entrants_today = models.PositiveIntegerField(verbose_name="first touch, voter guide",
null=True, unique=False)
welcome_page_entrants_today = models.PositiveIntegerField(verbose_name="first touch, welcome page",
null=True, unique=False)
friend_entrants_today = models.PositiveIntegerField(verbose_name="first touch, response to friend",
null=True, unique=False)
authenticated_visitors_total = models.PositiveIntegerField(verbose_name="number of visitors, all time",
null=True, unique=False)
authenticated_visitors_today = models.PositiveIntegerField(verbose_name="number of visitors, today",
null=True, unique=False)
ballot_views_today = models.PositiveIntegerField(verbose_name="number of voters who viewed a ballot today",
null=True, unique=False)
voter_guides_viewed_total = models.PositiveIntegerField(verbose_name="number of voter guides viewed, all time",
null=True, unique=False)
voter_guides_viewed_today = models.PositiveIntegerField(verbose_name="number of voter guides viewed, today",
null=True, unique=False)
issues_followed_total = models.PositiveIntegerField(verbose_name="number of issues followed, all time",
null=True, unique=False)
issues_followed_today = models.PositiveIntegerField(verbose_name="issues followed today, today",
null=True, unique=False)
issue_follows_total = models.PositiveIntegerField(verbose_name="one follow for one issue, all time",
null=True, unique=False)
issue_follows_today = models.PositiveIntegerField(verbose_name="one follow for one issue, today",
null=True, unique=False)
organizations_followed_total = models.PositiveIntegerField(verbose_name="voter follow organizations, all time",
null=True, unique=False)
organizations_followed_today = models.PositiveIntegerField(verbose_name="voter follow organizations, today",
null=True, unique=False)
organizations_auto_followed_total = models.PositiveIntegerField(verbose_name="auto_follow organizations, all",
null=True, unique=False)
organizations_auto_followed_today = models.PositiveIntegerField(verbose_name="auto_follow organizations, today",
null=True, unique=False)
organizations_with_linked_issues = models.PositiveIntegerField(verbose_name="organizations linked to issues, all",
null=True, unique=False)
issues_linked_total = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
issues_linked_today = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
organizations_signed_in_total = models.PositiveIntegerField(verbose_name="organizations signed in, all",
null=True, unique=False)
organizations_with_positions = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
organizations_with_new_positions_today = models.PositiveIntegerField(verbose_name="today",
null=True, unique=False)
organization_public_positions = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
individuals_with_positions = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
individuals_with_public_positions = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
individuals_with_friends_only_positions = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
friends_only_positions = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
entered_full_address = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
def generate_date_as_integer(self):
# We want to store the day as an integer for extremely quick database indexing and lookup
datetime_now = localtime(now()).date() # We Vote uses Pacific Time for TIME_ZONE
day_as_string = "{:d}{:02d}{:02d}".format(
datetime_now.year,
datetime_now.month,
datetime_now.day,
)
self.date_as_integer = convert_to_int(day_as_string)
return
class SitewideElectionMetrics(models.Model):
"""
This is a summary of the sitewide activity for one election.
"""
# The unique ID of this election. (Provided by Google Civic)
google_civic_election_id = models.PositiveIntegerField(
verbose_name="google civic election id", null=True, unique=False)
election_day_text = models.CharField(verbose_name="election day", max_length=255, null=True, blank=True)
visitors_total = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
authenticated_visitors_total = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
voter_guide_entries = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
voter_guide_views = models.PositiveIntegerField(verbose_name="one person viewed one voter guide, this election",
null=True, unique=False)
voter_guides_viewed = models.PositiveIntegerField(verbose_name="one org, seen at least once, this election",
null=True, unique=False)
issues_followed = models.PositiveIntegerField(verbose_name="follow issue connections, all time",
null=True, unique=False)
unique_voters_that_followed_organizations = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
unique_voters_that_auto_followed_organizations = models.PositiveIntegerField(verbose_name="",
null=True, unique=False)
organizations_followed = models.PositiveIntegerField(verbose_name="voter follow organizations, today",
null=True, unique=False)
organizations_auto_followed = models.PositiveIntegerField(verbose_name="auto_follow organizations, today",
null=True, unique=False)
organizations_signed_in = models.PositiveIntegerField(verbose_name="organizations signed in, all",
null=True, unique=False)
organizations_with_positions = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
organization_public_positions = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
individuals_with_positions = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
individuals_with_public_positions = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
individuals_with_friends_only_positions = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
public_positions = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
public_positions_with_comments = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
friends_only_positions = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
friends_only_positions_with_comments = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
entered_full_address = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
def election(self):
if not self.google_civic_election_id:
return
try:
# We retrieve this from the read-only database (as opposed to the analytics database)
election = Election.objects.using('readonly').get(google_civic_election_id=self.google_civic_election_id)
except Election.MultipleObjectsReturned as e:
return
except Election.DoesNotExist:
return
return election
def generate_date_as_integer(self):
# We want to store the day as an integer for extremely quick database indexing and lookup
datetime_now = localtime(now()).date() # We Vote uses Pacific Time for TIME_ZONE
day_as_string = "{:d}{:02d}{:02d}".format(
datetime_now.year,
datetime_now.month,
datetime_now.day,
)
self.date_as_integer = convert_to_int(day_as_string)
return
class SitewideVoterMetrics(models.Model):
"""
A single entry per voter summarizing all activity every done on We Vote
"""
voter_we_vote_id = models.CharField(
verbose_name="we vote permanent id",
max_length=255, default=None, null=True, blank=True, unique=False, db_index=True)
actions_count = models.PositiveIntegerField(verbose_name="all", null=True, unique=False, db_index=True)
elections_viewed = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
voter_guides_viewed = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
ballot_visited = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
welcome_visited = models.PositiveIntegerField(verbose_name="all", null=True, unique=False, db_index=True)
entered_full_address = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
issues_followed = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
organizations_followed = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
time_until_sign_in = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
positions_entered_friends_only = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
positions_entered_public = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
comments_entered_friends_only = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
comments_entered_public = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
signed_in_twitter = models.BooleanField(verbose_name='', default=False)
signed_in_facebook = models.BooleanField(verbose_name='', default=False)
signed_in_with_email = models.BooleanField(verbose_name='', default=False)
signed_in_with_sms_phone_number = models.BooleanField(verbose_name='', default=False)
seconds_on_site = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
days_visited = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
last_action_date = models.DateTimeField(verbose_name='last action date and time', null=True, db_index=True)
last_calculated_date_as_integer = models.PositiveIntegerField(
verbose_name="YYYYMMDD of the last time stats calculated", null=True, unique=False, db_index=True)
def generate_last_calculated_date_as_integer(self):
# We want to store the day as an integer for extremely quick database indexing and lookup
datetime_now = localtime(now()).date() # We Vote uses Pacific Time for TIME_ZONE
day_as_string = "{:d}{:02d}{:02d}".format(
datetime_now.year,
datetime_now.month,
datetime_now.day,
)
self.last_calculated_date_as_integer = convert_to_int(day_as_string)
return
def display_action_constant_human_readable(action_constant):
if action_constant == ACTION_ABOUT_GETTING_STARTED:
return "ABOUT_GETTING_STARTED"
if action_constant == ACTION_ABOUT_MOBILE:
return "ABOUT_MOBILE"
if action_constant == ACTION_ABOUT_ORGANIZATION:
return "ABOUT_ORGANIZATION"
if action_constant == ACTION_ABOUT_TEAM:
return "ABOUT_TEAM"
if action_constant == ACTION_ABOUT_VISION:
return "ABOUT_VISION"
if action_constant == ACTION_ACCOUNT_PAGE:
return "ACCOUNT_PAGE"
if action_constant == ACTION_BALLOT_VISIT:
return "BALLOT_VISIT"
if action_constant == ACTION_CANDIDATE:
return "CANDIDATE"
if action_constant == ACTION_DONATE_VISIT:
return "DONATE_VISIT"
if action_constant == ACTION_ELECTIONS:
return "ELECTIONS"
if action_constant == ACTION_EMAIL_AUTHENTICATION_EXISTS:
return "EMAIL_AUTHENTICATION_EXISTS"
if action_constant == ACTION_FACEBOOK_AUTHENTICATION_EXISTS:
return "FACEBOOK_AUTHENTICATION_EXISTS"
if action_constant == ACTION_FACEBOOK_INVITABLE_FRIENDS:
return "FACEBOOK_INVITABLE_FRIENDS"
if action_constant == ACTION_FRIEND_ENTRY:
return "FRIEND_ENTRY"
if action_constant == ACTION_GOOGLE_AUTHENTICATION_EXISTS:
return "GOOGLE_AUTHENTICATION_EXISTS"
if action_constant == ACTION_INVITE_BY_EMAIL:
return "INVITE_BY_EMAIL"
if action_constant == ACTION_ISSUE_FOLLOW:
return "ISSUE_FOLLOW"
if action_constant == ACTION_ISSUE_FOLLOW_IGNORE:
return "ISSUE_FOLLOW_IGNORE"
if action_constant == ACTION_ISSUE_STOP_FOLLOWING:
return "ISSUE_STOP_FOLLOWING"
if action_constant == ACTION_MEASURE:
return "MEASURE"
if action_constant == ACTION_MODAL_ISSUES:
return "MODAL_ISSUES"
if action_constant == ACTION_MODAL_ORGANIZATIONS:
return "MODAL_ORGANIZATIONS"
if action_constant == ACTION_MODAL_POSITIONS:
return "MODAL_POSITIONS"
if action_constant == ACTION_MODAL_FRIENDS:
return "MODAL_FRIENDS"
if action_constant == ACTION_MODAL_SHARE:
return "MODAL_SHARE"
if action_constant == ACTION_MODAL_VOTE:
return "MODAL_VOTE"
if action_constant == ACTION_MODAL_VOTER_PLAN:
return "MODAL_VOTER_PLAN"
if action_constant == ACTION_NETWORK:
return "NETWORK"
if action_constant == ACTION_NEWS:
return "NEWS"
if action_constant == ACTION_OFFICE:
return "OFFICE"
if action_constant == ACTION_ORGANIZATION_AUTO_FOLLOW:
return "ORGANIZATION_AUTO_FOLLOW"
if action_constant == ACTION_ORGANIZATION_FOLLOW:
return "ORGANIZATION_FOLLOW"
if action_constant == ACTION_ORGANIZATION_FOLLOW_IGNORE:
return "ORGANIZATION_FOLLOW_IGNORE"
if action_constant == ACTION_ORGANIZATION_STOP_FOLLOWING:
return "ORGANIZATION_STOP_FOLLOWING"
if action_constant == ACTION_ORGANIZATION_STOP_IGNORING:
return "ORGANIZATION_STOP_IGNORING"
if action_constant == ACTION_POSITION_TAKEN:
return "POSITION_TAKEN"
if action_constant == ACTION_READY_VISIT:
return "READY_VISIT"
if action_constant == ACTION_SEARCH_OPINIONS:
return "SEARCH_OPINIONS"
if action_constant == ACTION_SELECT_BALLOT_MODAL:
return "SELECT_BALLOT_MODAL"
if action_constant == ACTION_SHARE_BUTTON_COPY:
return "SHARE_BUTTON_COPY"
if action_constant == ACTION_SHARE_BUTTON_EMAIL:
return "SHARE_BUTTON_EMAIL"
if action_constant == ACTION_SHARE_BUTTON_FACEBOOK:
return "SHARE_BUTTON_FACEBOOK"
if action_constant == ACTION_SHARE_BUTTON_FRIENDS:
return "SHARE_BUTTON_FRIENDS"
if action_constant == ACTION_SHARE_BUTTON_TWITTER:
return "SHARE_BUTTON_TWITTER"
if action_constant == ACTION_SHARE_BALLOT:
return "SHARE_BALLOT"
if action_constant == ACTION_SHARE_BALLOT_ALL_OPINIONS:
return "SHARE_BALLOT_ALL_OPINIONS"
if action_constant == ACTION_SHARE_CANDIDATE:
return "SHARE_CANDIDATE"
if action_constant == ACTION_SHARE_CANDIDATE_ALL_OPINIONS:
return "SHARE_CANDIDATE_ALL_OPINIONS"
if action_constant == ACTION_SHARE_MEASURE:
return "SHARE_MEASURE"
if action_constant == ACTION_SHARE_MEASURE_ALL_OPINIONS:
return "SHARE_MEASURE_ALL_OPINIONS"
if action_constant == ACTION_SHARE_OFFICE:
return "SHARE_OFFICE"
if action_constant == ACTION_SHARE_OFFICE_ALL_OPINIONS:
return "SHARE_OFFICE_ALL_OPINIONS"
if action_constant == ACTION_SHARE_ORGANIZATION:
return "SHARE_ORGANIZATION"
if action_constant == ACTION_SHARE_ORGANIZATION_ALL_OPINIONS:
return "SHARE_ORGANIZATION_ALL_OPINIONS"
if action_constant == ACTION_SHARE_READY:
return "SHARE_READY"
if action_constant == ACTION_SHARE_READY_ALL_OPINIONS:
return "SHARE_READY_ALL_OPINIONS"
if action_constant == ACTION_TWITTER_AUTHENTICATION_EXISTS:
return "TWITTER_AUTHENTICATION_EXISTS"
if action_constant == ACTION_UNSUBSCRIBE_EMAIL_PAGE:
return "UNSUBSCRIBE_EMAIL_PAGE"
if action_constant == ACTION_UNSUBSCRIBE_SMS_PAGE:
return "UNSUBSCRIBE_SMS_PAGE"
if action_constant == ACTION_VIEW_SHARED_BALLOT:
return "VIEW_SHARED_BALLOT"
if action_constant == ACTION_VIEW_SHARED_BALLOT_ALL_OPINIONS:
return "VIEW_SHARED_BALLOT_ALL_OPINIONS"
if action_constant == ACTION_VIEW_SHARED_CANDIDATE:
return "VIEW_SHARED_CANDIDATE"
if action_constant == ACTION_VIEW_SHARED_CANDIDATE_ALL_OPINIONS:
return "VIEW_SHARED_CANDIDATE_ALL_OPINIONS"
if action_constant == ACTION_VIEW_SHARED_MEASURE:
return "VIEW_SHARED_MEASURE"
if action_constant == ACTION_VIEW_SHARED_MEASURE_ALL_OPINIONS:
return "VIEW_SHARED_MEASURE_ALL_OPINIONS"
if action_constant == ACTION_VIEW_SHARED_OFFICE:
return "VIEW_SHARED_OFFICE"
if action_constant == ACTION_VIEW_SHARED_OFFICE_ALL_OPINIONS:
return "VIEW_SHARED_OFFICE_ALL_OPINIONS"
if action_constant == ACTION_VIEW_SHARED_ORGANIZATION:
return "VIEW_SHARED_ORGANIZATION"
if action_constant == ACTION_VIEW_SHARED_ORGANIZATION_ALL_OPINIONS:
return "VIEW_SHARED_ORGANIZATION_ALL_OPINIONS"
if action_constant == ACTION_VIEW_SHARED_READY:
return "VIEW_SHARED_READY"
if action_constant == ACTION_VIEW_SHARED_READY_ALL_OPINIONS:
return "VIEW_SHARED_READY_ALL_OPINIONS"
if action_constant == ACTION_VOTER_FACEBOOK_AUTH:
return "VOTER_FACEBOOK_AUTH"
if action_constant == ACTION_VOTER_GUIDE_ENTRY:
return "VOTER_GUIDE_ENTRY"
if action_constant == ACTION_VOTER_GUIDE_GET_STARTED:
return "VOTER_GUIDE_GET_STARTED"
if action_constant == ACTION_VOTER_GUIDE_VISIT:
return "VOTER_GUIDE_VISIT"
if action_constant == ACTION_VOTER_TWITTER_AUTH:
return "VOTER_TWITTER_AUTH"
if action_constant == ACTION_WELCOME_ENTRY:
return "WELCOME_ENTRY"
if action_constant == ACTION_WELCOME_VISIT:
return "WELCOME_VISIT"
return "ACTION_CONSTANT:" + str(action_constant)
def fetch_action_constant_number_from_constant_string(action_constant_string):
action_constant_string = action_constant_string.upper()
if action_constant_string in 'ACTION_VOTER_GUIDE_VISIT':
return 1
if action_constant_string in 'ACTION_VOTER_GUIDE_ENTRY':
return 2
if action_constant_string in 'ACTION_ORGANIZATION_FOLLOW':
return 3
if action_constant_string in 'ACTION_ORGANIZATION_AUTO_FOLLOW':
return 4
if action_constant_string in 'ACTION_ISSUE_FOLLOW':
return 5
if action_constant_string in 'ACTION_BALLOT_VISIT':
return 6
if action_constant_string in 'ACTION_POSITION_TAKEN':
return 7
if action_constant_string in 'ACTION_VOTER_TWITTER_AUTH':
return 8
if action_constant_string in 'ACTION_VOTER_FACEBOOK_AUTH':
return 9
if action_constant_string in 'ACTION_WELCOME_ENTRY':
return 10
if action_constant_string in 'ACTION_FRIEND_ENTRY':
return 11
if action_constant_string in 'ACTION_WELCOME_VISIT':
return 12
if action_constant_string in 'ACTION_ORGANIZATION_FOLLOW_IGNORE':
return 13
if action_constant_string in 'ACTION_ORGANIZATION_STOP_FOLLOWING':
return 14
if action_constant_string in 'ACTION_ISSUE_FOLLOW_IGNORE':
return 15
if action_constant_string in 'ACTION_ISSUE_STOP_FOLLOWING':
return 16
if action_constant_string in 'ACTION_MODAL_ISSUES':
return 17
if action_constant_string in 'ACTION_MODAL_ORGANIZATIONS':
return 18
if action_constant_string in 'ACTION_MODAL_POSITIONS':
return 19
if action_constant_string in 'ACTION_MODAL_FRIENDS':
return 20
if action_constant_string in 'ACTION_MODAL_SHARE':
return 21
if action_constant_string in 'ACTION_MODAL_VOTE':
return 22
if action_constant_string in 'ACTION_NETWORK':
return 23
if action_constant_string in 'ACTION_FACEBOOK_INVITABLE_FRIENDS':
return 24
if action_constant_string in 'ACTION_DONATE_VISIT':
return 25
if action_constant_string in 'ACTION_ACCOUNT_PAGE':
return 26
if action_constant_string in 'ACTION_INVITE_BY_EMAIL':
return 27
if action_constant_string in 'ACTION_ABOUT_GETTING_STARTED':
return 28
if action_constant_string in 'ACTION_ABOUT_VISION':
return 29
if action_constant_string in 'ACTION_ABOUT_ORGANIZATION':
return 30
if action_constant_string in 'ACTION_ABOUT_TEAM':
return 31
if action_constant_string in 'ACTION_ABOUT_MOBILE':
return 32
if action_constant_string in 'ACTION_OFFICE':
return 33
if action_constant_string in 'ACTION_CANDIDATE':
return 34
if action_constant_string in 'ACTION_VOTER_GUIDE_GET_STARTED':
return 35
if action_constant_string in 'ACTION_FACEBOOK_AUTHENTICATION_EXISTS':
return 36
if action_constant_string in 'ACTION_GOOGLE_AUTHENTICATION_EXISTS':
return 37
if action_constant_string in 'ACTION_TWITTER_AUTHENTICATION_EXISTS':
return 38
if action_constant_string in 'ACTION_EMAIL_AUTHENTICATION_EXISTS':
return 39
if action_constant_string in 'ACTION_ELECTIONS':
return 40
if action_constant_string in 'ACTION_ORGANIZATION_STOP_IGNORING':
return 41
if action_constant_string in 'ACTION_MODAL_VOTER_PLAN':
return 42
if action_constant_string in 'ACTION_READY_VISIT':
return 43
if action_constant_string in 'ACTION_SELECT_BALLOT_MODAL':
return 44
if action_constant_string in 'ACTION_SHARE_BUTTON_COPY':
return 45
if action_constant_string in 'ACTION_SHARE_BUTTON_EMAIL':
return 46
if action_constant_string in 'ACTION_SHARE_BUTTON_FACEBOOK':
return 47
if action_constant_string in 'ACTION_SHARE_BUTTON_FRIENDS':
return 48
if action_constant_string in 'ACTION_SHARE_BUTTON_TWITTER':
return 49
if action_constant_string in 'ACTION_SHARE_BALLOT':
return 50
if action_constant_string in 'ACTION_SHARE_BALLOT_ALL_OPINIONS':
return 51
if action_constant_string in 'ACTION_SHARE_CANDIDATE':
return 52
if action_constant_string in 'ACTION_SHARE_CANDIDATE_ALL_OPINIONS':
return 53
if action_constant_string in 'ACTION_SHARE_MEASURE':
return 54
if action_constant_string in 'ACTION_SHARE_MEASURE_ALL_OPINIONS':
return 55
if action_constant_string in 'ACTION_SHARE_OFFICE':
return 56
if action_constant_string in 'ACTION_SHARE_OFFICE_ALL_OPINIONS':
return 57
if action_constant_string in 'ACTION_SHARE_READY':
return 58
if action_constant_string in 'ACTION_SHARE_READY_ALL_OPINIONS':
return 59
if action_constant_string in 'ACTION_VIEW_SHARED_BALLOT':
return 60
if action_constant_string in 'ACTION_VIEW_SHARED_BALLOT_ALL_OPINIONS':
return 61
if action_constant_string in 'ACTION_VIEW_SHARED_CANDIDATE':
return 62
if action_constant_string in 'ACTION_VIEW_SHARED_CANDIDATE_ALL_OPINIONS':
return 63
if action_constant_string in 'ACTION_VIEW_SHARED_MEASURE':
return 64
if action_constant_string in 'ACTION_VIEW_SHARED_MEASURE_ALL_OPINIONS':
return 65
if action_constant_string in 'ACTION_VIEW_SHARED_OFFICE':
return 66
if action_constant_string in 'ACTION_VIEW_SHARED_OFFICE_ALL_OPINIONS':
return 67
if action_constant_string in 'ACTION_VIEW_SHARED_READY':
return 68
if action_constant_string in 'ACTION_VIEW_SHARED_READY_ALL_OPINIONS':
return 69
if action_constant_string in 'ACTION_SEARCH_OPINIONS':
return 70
if action_constant_string in 'ACTION_UNSUBSCRIBE_EMAIL_PAGE':
return 71
if action_constant_string in 'ACTION_UNSUBSCRIBE_SMS_PAGE':
return 72
if action_constant_string in 'ACTION_MEASURE':
return 73
if action_constant_string in 'ACTION_NEWS':
return 74
if action_constant_string in 'ACTION_SHARE_ORGANIZATION':
return 75
if action_constant_string in 'ACTION_SHARE_ORGANIZATION_ALL_OPINIONS':
return 76
if action_constant_string in 'ACTION_VIEW_SHARED_ORGANIZATION':
return 77
if action_constant_string in 'ACTION_VIEW_SHARED_ORGANIZATION_ALL_OPINIONS':
return 78
return 0
| {
"repo_name": "wevote/WeVoteServer",
"path": "analytics/models.py",
"copies": "1",
"size": "108975",
"license": "mit",
"hash": -8721606415581617000,
"line_mean": 48.1986455982,
"line_max": 120,
"alpha_frac": 0.6223996329,
"autogenerated": false,
"ratio": 4.005550246269205,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0022737885825165607,
"num_lines": 2215
} |
# analytics/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.db import models
from django.db.models import Q
from django.utils.timezone import localtime, now
from election.models import Election
from exception.models import print_to_log
from follow.models import FollowOrganizationList
from organization.models import Organization
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, positive_value_exists
ACTION_VOTER_GUIDE_VISIT = 1
ACTION_VOTER_GUIDE_ENTRY = 2 # DEPRECATED: Now we use ACTION_VOTER_GUIDE_VISIT + first_visit
ACTION_ORGANIZATION_FOLLOW = 3
ACTION_ORGANIZATION_AUTO_FOLLOW = 4
ACTION_ISSUE_FOLLOW = 5
ACTION_BALLOT_VISIT = 6
ACTION_POSITION_TAKEN = 7
ACTION_VOTER_TWITTER_AUTH = 8
ACTION_VOTER_FACEBOOK_AUTH = 9
ACTION_WELCOME_ENTRY = 10
ACTION_FRIEND_ENTRY = 11
ACTION_WELCOME_VISIT = 12
ACTION_ORGANIZATION_FOLLOW_IGNORE = 13
ACTION_ORGANIZATION_STOP_FOLLOWING = 14
ACTION_ISSUE_FOLLOW_IGNORE = 15
ACTION_ISSUE_STOP_FOLLOWING = 16
ACTION_MODAL_ISSUES = 17
ACTION_MODAL_ORGANIZATIONS = 18
ACTION_MODAL_POSITIONS = 19
ACTION_MODAL_FRIENDS = 20
ACTION_MODAL_SHARE = 21
ACTION_MODAL_VOTE = 22
ACTION_NETWORK = 23
ACTION_FACEBOOK_INVITABLE_FRIENDS = 24
ACTION_DONATE_VISIT = 25
ACTION_ACCOUNT_PAGE = 26
ACTION_INVITE_BY_EMAIL = 27
ACTION_ABOUT_GETTING_STARTED = 28
ACTION_ABOUT_VISION = 29
ACTION_ABOUT_ORGANIZATION = 30
ACTION_ABOUT_TEAM = 31
ACTION_ABOUT_MOBILE = 32
ACTION_OFFICE = 33
ACTION_CANDIDATE = 34
ACTION_VOTER_GUIDE_GET_STARTED = 35
ACTION_FACEBOOK_AUTHENTICATION_EXISTS = 36
ACTION_GOOGLE_AUTHENTICATION_EXISTS = 37
ACTION_TWITTER_AUTHENTICATION_EXISTS = 38
ACTION_EMAIL_AUTHENTICATION_EXISTS = 39
ACTION_ELECTIONS = 40
ACTIONS_THAT_REQUIRE_ORGANIZATION_IDS = \
[ACTION_ORGANIZATION_AUTO_FOLLOW,
ACTION_ORGANIZATION_FOLLOW, ACTION_ORGANIZATION_FOLLOW_IGNORE, ACTION_ORGANIZATION_STOP_FOLLOWING,
ACTION_VOTER_GUIDE_VISIT]
logger = wevote_functions.admin.get_logger(__name__)
class AnalyticsAction(models.Model):
"""
This is an incoming action we want to track
"""
action_constant = models.PositiveSmallIntegerField(
verbose_name="constant representing action", null=True, unique=False, db_index=True)
exact_time = models.DateTimeField(verbose_name='date and time of action', null=False, auto_now_add=True)
# We store YYYYMMDD as an integer for very fast lookup (ex/ "20170901" for September, 1, 2017)
date_as_integer = models.PositiveIntegerField(
verbose_name="YYYYMMDD of the action", null=True, unique=False, db_index=True)
# We store both
voter_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, default=None, null=True, blank=True, unique=False,
db_index=True)
voter_id = models.PositiveIntegerField(verbose_name="voter internal id", null=True, unique=False)
# This voter is linked to a sign in account (Facebook, Twitter, Google, etc.)
is_signed_in = models.BooleanField(verbose_name='', default=False)
state_code = models.CharField(
verbose_name="state_code", max_length=255, null=True, blank=True, unique=False)
organization_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, null=True, blank=True, unique=False, db_index=True)
organization_id = models.PositiveIntegerField(null=True, blank=True)
ballot_item_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, null=True, blank=True, unique=False)
# The unique ID of this election. (Provided by Google Civic)
google_civic_election_id = models.PositiveIntegerField(
verbose_name="google civic election id", null=True, unique=False, db_index=True)
# This entry was the first entry on this day, used for tracking direct links to We Vote
first_visit_today = models.BooleanField(verbose_name='', default=False)
# We only want to store voter_device_id if we haven't verified the session yet. Set to null once verified.
voter_device_id = models.CharField(
verbose_name="voter_device_id of initiating voter", max_length=255, null=True, blank=True, unique=False)
# When analytics comes into Analytics Application server, we need to authenticate the request. We authenticate
# the voter_device_id against a read-only database server, which might run seconds behind the master. Because of
# this, if a voter_device_id is not found the first time, we want to try again minutes later. BUT if that
# fails we want to invalidate the analytics.
authentication_failed_twice = models.BooleanField(verbose_name='', default=False)
user_agent = models.CharField(verbose_name="https request user agent", max_length=255, null=True, blank=True,
unique=False)
is_bot = models.BooleanField(verbose_name="request came from web-bots or spider", default=False)
is_mobile = models.BooleanField(verbose_name="request came from mobile device", default=False)
is_desktop = models.BooleanField(verbose_name="request came from desktop device", default=False)
is_tablet = models.BooleanField(verbose_name="request came from tablet device", default=False)
# We override the save function to auto-generate date_as_integer
def save(self, *args, **kwargs):
if self.date_as_integer:
self.date_as_integer = convert_to_int(self.date_as_integer)
if self.date_as_integer == "" or self.date_as_integer is None: # If there isn't a value...
self.generate_date_as_integer()
super(AnalyticsAction, self).save(*args, **kwargs)
def display_action_constant_human_readable(self):
return display_action_constant_human_readable(self.action_constant)
def generate_date_as_integer(self):
# We want to store the day as an integer for extremely quick database indexing and lookup
datetime_now = localtime(now()).date() # We Vote uses Pacific Time for TIME_ZONE
day_as_string = "{:d}{:02d}{:02d}".format(
datetime_now.year,
datetime_now.month,
datetime_now.day,
)
self.date_as_integer = convert_to_int(day_as_string)
return
def election(self):
if not self.google_civic_election_id:
return
try:
election = Election.objects.using('readonly').get(google_civic_election_id=self.google_civic_election_id)
except Election.MultipleObjectsReturned as e:
logger.error("position.election Found multiple")
return
except Election.DoesNotExist:
return
return election
def organization(self):
if not self.organization_we_vote_id:
return
try:
organization = Organization.objects.using('readonly').get(we_vote_id=self.organization_we_vote_id)
except Organization.MultipleObjectsReturned as e:
logger.error("analytics.organization Found multiple")
return
except Organization.DoesNotExist:
logger.error("analytics.organization did not find")
return
return organization
class AnalyticsCountManager(models.Model):
def fetch_ballot_views(self, google_civic_election_id=0, limit_to_one_date_as_integer=0):
"""
Count the number of voters that viewed at least one ballot
:param google_civic_election_id:
:param limit_to_one_date_as_integer:
:return:
"""
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(action_constant=ACTION_BALLOT_VISIT)
if positive_value_exists(google_civic_election_id):
count_query = count_query.filter(google_civic_election_id=google_civic_election_id)
if positive_value_exists(limit_to_one_date_as_integer):
count_query = count_query.filter(date_as_integer=limit_to_one_date_as_integer)
count_query = count_query.values('voter_we_vote_id').distinct()
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_organization_entrants_list(self, organization_we_vote_id, google_civic_election_id=0):
"""
:param organization_we_vote_id:
:param google_civic_election_id:
:return:
"""
voters_who_visited_organization_first_simple_list = []
try:
first_visit_query = AnalyticsAction.objects.using('analytics').all()
first_visit_query = first_visit_query.filter(Q(action_constant=ACTION_VOTER_GUIDE_VISIT) |
Q(action_constant=ACTION_ORGANIZATION_AUTO_FOLLOW))
first_visit_query = first_visit_query.filter(organization_we_vote_id__iexact=organization_we_vote_id)
if positive_value_exists(google_civic_election_id):
first_visit_query = first_visit_query.filter(google_civic_election_id=google_civic_election_id)
first_visit_query = first_visit_query.filter(first_visit_today=True)
first_visit_query = first_visit_query.values('voter_we_vote_id').distinct()
voters_who_visited_organization_first = list(first_visit_query)
for voter_dict in voters_who_visited_organization_first:
if positive_value_exists(voter_dict['voter_we_vote_id']):
voters_who_visited_organization_first_simple_list.append(voter_dict['voter_we_vote_id'])
except Exception as e:
pass
return voters_who_visited_organization_first_simple_list
def fetch_organization_entrants_took_position(
self, organization_we_vote_id, google_civic_election_id=0):
"""
Count the voters who entered on an organization's voter guide, and then took a position
:param organization_we_vote_id:
:param google_civic_election_id:
:return:
"""
voters_who_visited_organization_first_simple_list = \
self.fetch_organization_entrants_list(organization_we_vote_id, google_civic_election_id)
if not len(voters_who_visited_organization_first_simple_list):
return 0
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(action_constant=ACTION_POSITION_TAKEN)
if positive_value_exists(google_civic_election_id):
count_query = count_query.filter(google_civic_election_id=google_civic_election_id)
count_query = count_query.filter(voter_we_vote_id__in=voters_who_visited_organization_first_simple_list)
count_query = count_query.values('voter_we_vote_id').distinct()
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_organization_entrants_visited_ballot(
self, organization_we_vote_id, google_civic_election_id=0):
"""
Count the voters who entered on an organization's voter guide, and then who proceeded to ballot
:param organization_we_vote_id:
:param google_civic_election_id:
:return:
"""
voters_who_visited_organization_first_simple_list = \
self.fetch_organization_entrants_list(organization_we_vote_id, google_civic_election_id)
if not len(voters_who_visited_organization_first_simple_list):
return 0
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(action_constant=ACTION_BALLOT_VISIT)
if positive_value_exists(google_civic_election_id):
count_query = count_query.filter(google_civic_election_id=google_civic_election_id)
count_query = count_query.filter(voter_we_vote_id__in=voters_who_visited_organization_first_simple_list)
count_query = count_query.values('voter_we_vote_id').distinct()
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_organization_followers_took_position(self, organization_we_vote_id, google_civic_election_id=0):
follow_organization_list = FollowOrganizationList()
return_voter_we_vote_id = True
voter_we_vote_ids_of_organization_followers = \
follow_organization_list.fetch_followers_list_by_organization_we_vote_id(
organization_we_vote_id, return_voter_we_vote_id)
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(action_constant=ACTION_POSITION_TAKEN)
if positive_value_exists(google_civic_election_id):
count_query = count_query.filter(google_civic_election_id=google_civic_election_id)
count_query = count_query.filter(voter_we_vote_id__in=voter_we_vote_ids_of_organization_followers)
count_query = count_query.values('voter_we_vote_id').distinct()
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_organization_followers_visited_ballot(self, organization_we_vote_id, google_civic_election_id=0):
follow_organization_list = FollowOrganizationList()
return_voter_we_vote_id = True
voter_we_vote_ids_of_organization_followers = \
follow_organization_list.fetch_followers_list_by_organization_we_vote_id(
organization_we_vote_id, return_voter_we_vote_id)
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(action_constant=ACTION_BALLOT_VISIT)
if positive_value_exists(google_civic_election_id):
count_query = count_query.filter(google_civic_election_id=google_civic_election_id)
count_query = count_query.filter(voter_we_vote_id__in=voter_we_vote_ids_of_organization_followers)
count_query = count_query.values('voter_we_vote_id').distinct()
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_visitors(self, google_civic_election_id=0, organization_we_vote_id='',
limit_to_one_date_as_integer=0, count_through_this_date_as_integer=0,
limit_to_authenticated=False):
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
if positive_value_exists(google_civic_election_id):
count_query = count_query.filter(google_civic_election_id=google_civic_election_id)
if positive_value_exists(organization_we_vote_id):
count_query = count_query.filter(action_constant=ACTION_VOTER_GUIDE_VISIT)
count_query = count_query.filter(organization_we_vote_id__iexact=organization_we_vote_id)
if positive_value_exists(limit_to_one_date_as_integer):
count_query = count_query.filter(date_as_integer=limit_to_one_date_as_integer)
elif positive_value_exists(count_through_this_date_as_integer):
count_query = count_query.filter(date_as_integer__lte=count_through_this_date_as_integer)
if limit_to_authenticated:
count_query = count_query.filter(is_signed_in=True)
count_query = count_query.values('voter_we_vote_id').distinct()
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_visitors_first_visit_to_organization_in_election(self, organization_we_vote_id, google_civic_election_id):
"""
Entries are marked "first_visit_today" if it is the first visit in one day
:param organization_we_vote_id:
:param google_civic_election_id:
:return:
"""
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(Q(action_constant=ACTION_VOTER_GUIDE_VISIT) |
Q(action_constant=ACTION_ORGANIZATION_AUTO_FOLLOW))
count_query = count_query.filter(organization_we_vote_id__iexact=organization_we_vote_id)
count_query = count_query.filter(google_civic_election_id=google_civic_election_id)
count_query = count_query.filter(first_visit_today=True)
count_query = count_query.values('voter_we_vote_id').distinct()
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_new_followers_in_election(self, google_civic_election_id, organization_we_vote_id=""):
"""
:param organization_we_vote_id:
:param google_civic_election_id:
:return:
"""
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(Q(action_constant=ACTION_ORGANIZATION_FOLLOW) |
Q(action_constant=ACTION_ORGANIZATION_AUTO_FOLLOW))
if positive_value_exists(organization_we_vote_id):
count_query = count_query.filter(organization_we_vote_id__iexact=organization_we_vote_id)
count_query = count_query.filter(google_civic_election_id=google_civic_election_id)
count_query = count_query.values('voter_we_vote_id').distinct()
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_new_auto_followers_in_election(self, google_civic_election_id, organization_we_vote_id=""):
"""
:param organization_we_vote_id:
:param google_civic_election_id:
:return:
"""
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(action_constant=ACTION_ORGANIZATION_AUTO_FOLLOW)
if positive_value_exists(organization_we_vote_id):
count_query = count_query.filter(organization_we_vote_id__iexact=organization_we_vote_id)
count_query = count_query.filter(google_civic_election_id=google_civic_election_id)
count_query = count_query.values('voter_we_vote_id').distinct()
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_voter_action_count(self, voter_we_vote_id):
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_voter_ballot_visited(self, voter_we_vote_id):
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
count_query = count_query.filter(action_constant=ACTION_BALLOT_VISIT)
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_voter_welcome_visited(self, voter_we_vote_id):
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
count_query = count_query.filter(action_constant=ACTION_WELCOME_VISIT)
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_voter_days_visited(self, voter_we_vote_id):
count_result = None
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
count_query = count_query.values('date_as_integer').distinct()
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_voter_last_action_date(self, voter_we_vote_id):
last_action_date = None
try:
fetch_query = AnalyticsAction.objects.using('analytics').all()
fetch_query = fetch_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
fetch_query = fetch_query.order_by('-id')
fetch_query = fetch_query[:1]
fetch_result = list(fetch_query)
analytics_action = fetch_result.pop()
last_action_date = analytics_action.exact_time
except Exception as e:
pass
return last_action_date
def fetch_voter_voter_guides_viewed(self, voter_we_vote_id):
count_result = 0
try:
count_query = AnalyticsAction.objects.using('analytics').all()
count_query = count_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
count_query = count_query.filter(action_constant=ACTION_VOTER_GUIDE_VISIT)
count_query = count_query.values('organization_we_vote_id').distinct()
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_voter_guides_viewed(
self, google_civic_election_id=0, limit_to_one_date_as_integer=0, count_through_this_date_as_integer=0):
count_result = 0
try:
count_query = AnalyticsAction.objects.using('analytics').all()
if positive_value_exists(google_civic_election_id):
count_query = count_query.filter(google_civic_election_id=google_civic_election_id)
count_query = count_query.filter(action_constant=ACTION_VOTER_GUIDE_VISIT)
if positive_value_exists(limit_to_one_date_as_integer):
count_query = count_query.filter(date_as_integer=limit_to_one_date_as_integer)
elif positive_value_exists(count_through_this_date_as_integer):
count_query = count_query.filter(date_as_integer__lte=count_through_this_date_as_integer)
count_query = count_query.values('organization_we_vote_id').distinct()
count_result = count_query.count()
except Exception as e:
pass
return count_result
class AnalyticsManager(models.Model):
def create_action_type1(
self, action_constant, voter_we_vote_id, voter_id, is_signed_in, state_code,
organization_we_vote_id, organization_id, google_civic_election_id,
user_agent_string, is_bot, is_mobile, is_desktop, is_tablet,
ballot_item_we_vote_id="", voter_device_id=None):
"""
Create AnalyticsAction data
"""
success = True
status = "ACTION_CONSTANT:" + display_action_constant_human_readable(action_constant) + " "
action_saved = False
action = AnalyticsAction()
missing_required_variable = False
if not action_constant:
missing_required_variable = True
status += 'MISSING_ACTION_CONSTANT '
if not voter_we_vote_id:
missing_required_variable = True
status += 'MISSING_VOTER_WE_VOTE_ID '
if not organization_we_vote_id:
missing_required_variable = True
status += 'MISSING_ORGANIZATION_WE_VOTE_ID '
if missing_required_variable:
results = {
'success': success,
'status': status,
'action_saved': action_saved,
'action': action,
}
return results
try:
action = AnalyticsAction.objects.using('analytics').create(
action_constant=action_constant,
voter_we_vote_id=voter_we_vote_id,
voter_id=voter_id,
is_signed_in=is_signed_in,
state_code=state_code,
organization_we_vote_id=organization_we_vote_id,
organization_id=organization_id,
google_civic_election_id=google_civic_election_id,
ballot_item_we_vote_id=ballot_item_we_vote_id,
user_agent=user_agent_string,
is_bot=is_bot,
is_mobile=is_mobile,
is_desktop=is_desktop,
is_tablet=is_tablet
)
success = True
action_saved = True
status += 'ACTION_TYPE1_SAVED '
except Exception as e:
success = False
status += 'COULD_NOT_SAVE_ACTION_TYPE1 '
results = {
'success': success,
'status': status,
'action_saved': action_saved,
'action': action,
}
return results
def create_action_type2(
self, action_constant, voter_we_vote_id, voter_id, is_signed_in, state_code, google_civic_election_id,
user_agent_string, is_bot, is_mobile, is_desktop, is_tablet,
ballot_item_we_vote_id, voter_device_id=None):
"""
Create AnalyticsAction data
"""
success = True
status = "ACTION_CONSTANT:" + display_action_constant_human_readable(action_constant) + " "
action_saved = False
action = AnalyticsAction()
missing_required_variable = False
if not action_constant:
missing_required_variable = True
status += 'MISSING_ACTION_CONSTANT '
if not voter_we_vote_id:
missing_required_variable = True
status += 'MISSING_VOTER_WE_VOTE_ID '
if missing_required_variable:
results = {
'success': success,
'status': status,
'action_saved': action_saved,
'action': action,
}
return results
try:
action = AnalyticsAction.objects.using('analytics').create(
action_constant=action_constant,
voter_we_vote_id=voter_we_vote_id,
voter_id=voter_id,
is_signed_in=is_signed_in,
state_code=state_code,
google_civic_election_id=google_civic_election_id,
ballot_item_we_vote_id=ballot_item_we_vote_id,
user_agent=user_agent_string,
is_bot=is_bot,
is_mobile=is_mobile,
is_desktop=is_desktop,
is_tablet=is_tablet
)
success = True
action_saved = True
status += 'ACTION_TYPE2_SAVED '
except Exception as e:
success = False
status += 'COULD_NOT_SAVE_ACTION_TYPE2 '
results = {
'success': success,
'status': status,
'action_saved': action_saved,
'action': action,
}
return results
def retrieve_analytics_action_list(self, voter_we_vote_id='', google_civic_election_id=0):
success = False
status = ""
analytics_action_list = []
try:
list_query = AnalyticsAction.objects.using('analytics').all()
if positive_value_exists(voter_we_vote_id):
list_query = list_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
if positive_value_exists(google_civic_election_id):
list_query = list_query.filter(google_civic_election_id=google_civic_election_id)
analytics_action_list = list(list_query)
analytics_action_list_found = True
except Exception as e:
analytics_action_list_found = False
results = {
'success': success,
'status': status,
'analytics_action_list': analytics_action_list,
'analytics_action_list_found': analytics_action_list_found,
}
return results
def retrieve_organization_election_metrics_list(self, google_civic_election_id=0):
success = False
status = ""
organization_election_metrics_list = []
try:
list_query = OrganizationElectionMetrics.objects.using('analytics').all()
if positive_value_exists(google_civic_election_id):
list_query = list_query.filter(google_civic_election_id=google_civic_election_id)
organization_election_metrics_list = list(list_query)
organization_election_metrics_list_found = True
except Exception as e:
organization_election_metrics_list_found = False
results = {
'success': success,
'status': status,
'organization_election_metrics_list': organization_election_metrics_list,
'organization_election_metrics_list_found': organization_election_metrics_list_found,
}
return results
def retrieve_sitewide_election_metrics_list(self, google_civic_election_id=0):
success = False
status = ""
sitewide_election_metrics_list = []
try:
list_query = SitewideElectionMetrics.objects.using('analytics').all()
if positive_value_exists(google_civic_election_id):
list_query = list_query.filter(google_civic_election_id=google_civic_election_id)
sitewide_election_metrics_list = list(list_query)
success = True
sitewide_election_metrics_list_found = True
except Exception as e:
sitewide_election_metrics_list_found = False
results = {
'success': success,
'status': status,
'sitewide_election_metrics_list': sitewide_election_metrics_list,
'sitewide_election_metrics_list_found': sitewide_election_metrics_list_found,
}
return results
def retrieve_list_of_dates_with_actions(self, date_as_integer, through_date_as_integer=0):
success = False
status = ""
date_list = []
try:
date_list_query = AnalyticsAction.objects.using('analytics').all()
date_list_query = date_list_query.filter(date_as_integer__gte=date_as_integer)
if positive_value_exists(through_date_as_integer):
date_list_query = date_list_query.filter(date_as_integer__lte=through_date_as_integer)
date_list_query = date_list_query.values('date_as_integer').distinct()
date_list = list(date_list_query)
date_list_found = True
except Exception as e:
date_list_found = False
modified_date_list = []
for date_as_integer_dict in date_list:
if positive_value_exists(date_as_integer_dict['date_as_integer']):
modified_date_list.append(date_as_integer_dict['date_as_integer'])
results = {
'success': success,
'status': status,
'date_as_integer_list': modified_date_list,
'date_as_integer_list_found': date_list_found,
}
return results
def retrieve_organization_list_with_election_activity(self, google_civic_election_id):
success = False
status = ""
organization_list = []
try:
organization_list_query = AnalyticsAction.objects.using('analytics').all()
organization_list_query = organization_list_query.filter(google_civic_election_id=google_civic_election_id)
organization_list_query = organization_list_query.values('organization_we_vote_id').distinct()
organization_list = list(organization_list_query)
organization_list_found = True
except Exception as e:
organization_list_found = False
modified_organization_list = []
for organization_dict in organization_list:
if positive_value_exists(organization_dict['organization_we_vote_id']):
modified_organization_list.append(organization_dict['organization_we_vote_id'])
results = {
'success': success,
'status': status,
'organization_we_vote_id_list': modified_organization_list,
'organization_we_vote_id_list_found': organization_list_found,
}
return results
def retrieve_voter_we_vote_id_list_with_changes_since(self, date_as_integer, through_date_as_integer):
success = True
status = ""
voter_list = []
try:
voter_list_query = AnalyticsAction.objects.using('analytics').all()
voter_list_query = voter_list_query.filter(date_as_integer__gte=date_as_integer)
voter_list_query = voter_list_query.filter(date_as_integer__lte=through_date_as_integer)
voter_list_query = voter_list_query.values('voter_we_vote_id').distinct()
# voter_list_query = voter_list_query[:5] # TEMP limit to 5
voter_list = list(voter_list_query)
voter_list_found = True
except Exception as e:
success = False
voter_list_found = False
modified_voter_list = []
for voter_dict in voter_list:
if positive_value_exists(voter_dict['voter_we_vote_id']):
modified_voter_list.append(voter_dict['voter_we_vote_id'])
results = {
'success': success,
'status': status,
'voter_we_vote_id_list': modified_voter_list,
'voter_we_vote_id_list_found': voter_list_found,
}
return results
def save_action(self, action_constant, voter_we_vote_id, voter_id, is_signed_in=False, state_code="",
organization_we_vote_id="", organization_id=0, google_civic_election_id=0,
user_agent_string="", is_bot=False, is_mobile=False, is_desktop=False, is_tablet=False,
ballot_item_we_vote_id="", voter_device_id=None):
# If a voter_device_id is passed in, it is because this action may be coming from
# https://analytics.wevoteusa.org and hasn't been authenticated yet
# Confirm that we have a valid voter_device_id. If not, store the action with the voter_device_id so we can
# look up later.
# If either voter identifier comes in, make sure we have both
# If either organization identifier comes in, make sure we have both
if action_constant in ACTIONS_THAT_REQUIRE_ORGANIZATION_IDS:
# In the future we could reduce clutter in the AnalyticsAction table by only storing one entry per day
return self.create_action_type1(action_constant, voter_we_vote_id, voter_id, is_signed_in, state_code,
organization_we_vote_id, organization_id, google_civic_election_id,
user_agent_string, is_bot, is_mobile, is_desktop, is_tablet,
ballot_item_we_vote_id, voter_device_id)
else:
return self.create_action_type2(action_constant, voter_we_vote_id, voter_id, is_signed_in, state_code,
google_civic_election_id,
user_agent_string, is_bot, is_mobile, is_desktop, is_tablet,
ballot_item_we_vote_id, voter_device_id)
def save_organization_daily_metrics_values(self, organization_daily_metrics_values):
success = False
status = ""
metrics_saved = False
metrics = OrganizationDailyMetrics()
missing_required_variables = False
date_as_integer = 0
organization_we_vote_id = ''
if positive_value_exists(organization_daily_metrics_values['organization_we_vote_id']):
organization_we_vote_id = organization_daily_metrics_values['organization_we_vote_id']
else:
missing_required_variables = True
if positive_value_exists(organization_daily_metrics_values['date_as_integer']):
date_as_integer = organization_daily_metrics_values['date_as_integer']
else:
missing_required_variables = True
if not missing_required_variables:
try:
metrics_saved, created = OrganizationDailyMetrics.objects.using('analytics').update_or_create(
organization_we_vote_id=organization_we_vote_id,
date_as_integer=date_as_integer,
defaults=organization_daily_metrics_values
)
except Exception as e:
success = False
status += 'ORGANIZATION_DAILY_METRICS_UPDATE_OR_CREATE_FAILED '
results = {
'success': success,
'status': status,
'metrics_saved': metrics_saved,
'metrics': metrics,
}
return results
def save_organization_election_metrics_values(self, organization_election_metrics_values):
success = False
status = ""
metrics_saved = False
metrics = OrganizationElectionMetrics()
missing_required_variables = False
google_civic_election_id = 0
organization_we_vote_id = ''
if positive_value_exists(organization_election_metrics_values['google_civic_election_id']):
google_civic_election_id = organization_election_metrics_values['google_civic_election_id']
else:
missing_required_variables = True
if positive_value_exists(organization_election_metrics_values['organization_we_vote_id']):
organization_we_vote_id = organization_election_metrics_values['organization_we_vote_id']
else:
missing_required_variables = True
if not missing_required_variables:
try:
metrics_saved, created = OrganizationElectionMetrics.objects.using('analytics').update_or_create(
google_civic_election_id=google_civic_election_id,
organization_we_vote_id__iexact=organization_we_vote_id,
defaults=organization_election_metrics_values
)
except Exception as e:
success = False
status += 'ORGANIZATION_ELECTION_METRICS_UPDATE_OR_CREATE_FAILED '
results = {
'success': success,
'status': status,
'metrics_saved': metrics_saved,
'metrics': metrics,
}
return results
def save_sitewide_daily_metrics_values(self, sitewide_daily_metrics_values):
success = True
status = ""
sitewide_daily_metrics_saved = False
sitewide_daily_metrics = SitewideDailyMetrics()
if positive_value_exists(sitewide_daily_metrics_values['date_as_integer']):
date_as_integer = sitewide_daily_metrics_values['date_as_integer']
try:
sitewide_daily_metrics, created = SitewideDailyMetrics.objects.using('analytics').update_or_create(
date_as_integer=date_as_integer,
defaults=sitewide_daily_metrics_values
)
sitewide_daily_metrics_saved = True
except Exception as e:
success = False
status += 'SITEWIDE_DAILY_METRICS_UPDATE_OR_CREATE_FAILED '
else:
status += "SITEWIDE_DAILY_METRICS-MISSING_DATE_AS_INTEGER "
results = {
'success': success,
'status': status,
'sitewide_daily_metrics_saved': sitewide_daily_metrics_saved,
'sitewide_daily_metrics': sitewide_daily_metrics,
}
return results
def save_sitewide_election_metrics_values(self, sitewide_election_metrics_values):
success = False
status = ""
metrics_saved = False
metrics = SitewideElectionMetrics()
if positive_value_exists(sitewide_election_metrics_values['google_civic_election_id']):
google_civic_election_id = sitewide_election_metrics_values['google_civic_election_id']
try:
metrics_saved, created = SitewideElectionMetrics.objects.using('analytics').update_or_create(
google_civic_election_id=google_civic_election_id,
defaults=sitewide_election_metrics_values
)
except Exception as e:
success = False
status += 'SITEWIDE_ELECTION_METRICS_UPDATE_OR_CREATE_FAILED '
results = {
'success': success,
'status': status,
'metrics_saved': metrics_saved,
'metrics': metrics,
}
return results
def save_sitewide_voter_metrics_values_for_one_voter(self, sitewide_voter_metrics_values):
success = False
status = ""
metrics_saved = False
if positive_value_exists(sitewide_voter_metrics_values['voter_we_vote_id']):
voter_we_vote_id = sitewide_voter_metrics_values['voter_we_vote_id']
try:
metrics_saved, created = SitewideVoterMetrics.objects.using('analytics').update_or_create(
voter_we_vote_id__iexact=voter_we_vote_id,
defaults=sitewide_voter_metrics_values
)
success = True
except Exception as e:
success = False
status += 'SITEWIDE_VOTER_METRICS_UPDATE_OR_CREATE_FAILED ' + str(e) + ' '
results = {
'success': success,
'status': status,
'metrics_saved': metrics_saved,
}
return results
else:
status += "SITEWIDE_VOTER_METRICS_SAVE-MISSING_VOTER_WE_VOTE_ID "
results = {
'success': success,
'status': status,
'metrics_saved': metrics_saved,
}
return results
def sitewide_voter_metrics_for_this_voter_updated_this_date(self, voter_we_vote_id, updated_date_integer):
updated_on_date_query = SitewideVoterMetrics.objects.using('analytics').filter(
voter_we_vote_id__iexact=voter_we_vote_id,
last_calculated_date_as_integer=updated_date_integer
)
return positive_value_exists(updated_on_date_query.count())
def update_first_visit_today_for_all_voters_since_date(self, date_as_integer, through_date_as_integer):
success = True
status = ""
distinct_days_list = []
first_visit_today_count = 0
# Get distinct days
try:
distinct_days_query = AnalyticsAction.objects.using('analytics').all()
distinct_days_query = distinct_days_query.filter(date_as_integer__gte=date_as_integer)
distinct_days_query = distinct_days_query.filter(date_as_integer__lte=through_date_as_integer)
distinct_days_query = distinct_days_query.values('date_as_integer').distinct()
# distinct_days_query = distinct_days_query[:5] # TEMP limit to 5
distinct_days_list = list(distinct_days_query)
distinct_days_found = True
except Exception as e:
success = False
status += "UPDATE_FIRST_VISIT_TODAY-DISTINCT_DAY_QUERY_ERROR "
distinct_days_found = False
simple_distinct_days_list = []
for day_dict in distinct_days_list:
if positive_value_exists(day_dict['date_as_integer']):
simple_distinct_days_list.append(day_dict['date_as_integer'])
# Loop through each day
for one_date_as_integer in simple_distinct_days_list:
# Get distinct voters on that day
if not positive_value_exists(one_date_as_integer):
continue
voter_list = []
try:
voter_list_query = AnalyticsAction.objects.using('analytics').all()
voter_list_query = voter_list_query.filter(date_as_integer=one_date_as_integer)
voter_list_query = voter_list_query.values('voter_we_vote_id').distinct()
# voter_list_query = voter_list_query[:5] # TEMP limit to 5
voter_list = list(voter_list_query)
voter_list_found = True
except Exception as e:
success = False
status += "UPDATE_FIRST_VISIT_TODAY-DISTINCT_VOTER_QUERY_ERROR "
voter_list_found = False
simple_voter_list = []
for voter_dict in voter_list:
if positive_value_exists(voter_dict['voter_we_vote_id']) and \
voter_dict['voter_we_vote_id'] not in simple_voter_list:
simple_voter_list.append(voter_dict['voter_we_vote_id'])
if not voter_list_found:
continue
# Loop through each voter per day, and update the first entry for that day with "first_visit_today=True"
for voter_we_vote_id in simple_voter_list:
if not positive_value_exists(voter_we_vote_id):
continue
try:
first_visit_query = AnalyticsAction.objects.using('analytics').all()
first_visit_query = first_visit_query.order_by("id") # order by oldest first
first_visit_query = first_visit_query.filter(date_as_integer=one_date_as_integer)
first_visit_query = first_visit_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
analytics_action = first_visit_query.first()
if not analytics_action.first_visit_today:
analytics_action.first_visit_today = True
analytics_action.save()
first_visit_saved = True
first_visit_today_count += 1
except Exception as e:
success = False
status += "UPDATE_FIRST_VISIT_TODAY-VOTER_ON_DATE_QUERY_ERROR "
print_to_log(logger=logger, exception_message_optional=status)
first_visit_found = False
results = {
'success': success,
'status': status,
'first_visit_today_count': first_visit_today_count,
}
return results
def update_first_visit_today_for_one_voter(self, voter_we_vote_id):
success = False
status = ""
distinct_days_list = []
first_visit_today_count = 0
# Get distinct days
try:
distinct_days_query = AnalyticsAction.objects.using('analytics').all()
distinct_days_query = distinct_days_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
distinct_days_query = distinct_days_query.values('date_as_integer').distinct()
distinct_days_list = list(distinct_days_query)
except Exception as e:
pass
simple_distinct_days_list = []
for day_dict in distinct_days_list:
if positive_value_exists(day_dict['date_as_integer']):
simple_distinct_days_list.append(day_dict['date_as_integer'])
# Loop through each day
for one_date_as_integer in simple_distinct_days_list:
try:
first_visit_query = AnalyticsAction.objects.using('analytics').all()
first_visit_query = first_visit_query.order_by("id") # order by oldest first
first_visit_query = first_visit_query.filter(date_as_integer=one_date_as_integer)
first_visit_query = first_visit_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
analytics_action = first_visit_query.first()
analytics_action.first_visit_today = True
analytics_action.save()
first_visit_today_count += 1
except Exception as e:
pass
results = {
'success': success,
'status': status,
'first_visit_today_count': first_visit_today_count,
}
return results
class OrganizationDailyMetrics(models.Model):
"""
This is a summary of the organization activity on one day.
"""
# We store YYYYMMDD as an integer for very fast lookup (ex/ "20170901" for September, 1, 2017)
date_as_integer = models.PositiveIntegerField(verbose_name="YYYYMMDD of the action",
null=True, unique=False, db_index=True)
organization_we_vote_id = models.CharField(verbose_name="we vote permanent id",
max_length=255, null=True, blank=True, unique=False)
visitors_total = models.PositiveIntegerField(verbose_name="number of visitors, all time", null=True, unique=False)
authenticated_visitors_total = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
visitors_today = models.PositiveIntegerField(verbose_name="number of visitors, today", null=True, unique=False)
authenticated_visitors_today = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
new_visitors_today = models.PositiveIntegerField(verbose_name="new visitors, today", null=True, unique=False)
voter_guide_entrants_today = models.PositiveIntegerField(verbose_name="first touch, voter guide",
null=True, unique=False)
voter_guide_entrants = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
entrants_visiting_ballot = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
followers_visiting_ballot = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
followers_total = models.PositiveIntegerField(verbose_name="all time",
null=True, unique=False)
new_followers_today = models.PositiveIntegerField(verbose_name="today",
null=True, unique=False)
auto_followers_total = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
new_auto_followers_today = models.PositiveIntegerField(verbose_name="today",
null=True, unique=False)
issues_linked_total = models.PositiveIntegerField(verbose_name="organization classifications, all time",
null=True, unique=False)
organization_public_positions = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
def generate_date_as_integer(self):
# We want to store the day as an integer for extremely quick database indexing and lookup
datetime_now = localtime(now()).date() # We Vote uses Pacific Time for TIME_ZONE
day_as_string = "{:d}{:02d}{:02d}".format(
datetime_now.year,
datetime_now.month,
datetime_now.day,
)
self.date_as_integer = convert_to_int(day_as_string)
return
class OrganizationElectionMetrics(models.Model):
# The unique ID of this election. (Provided by Google Civic)
google_civic_election_id = models.PositiveIntegerField(
verbose_name="google civic election id", null=True, unique=False)
organization_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, null=True, blank=True, unique=False)
election_day_text = models.CharField(verbose_name="election day", max_length=255, null=True, blank=True)
visitors_total = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
authenticated_visitors_total = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
voter_guide_entrants = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
followers_at_time_of_election = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
new_followers = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
new_auto_followers = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
entrants_visited_ballot = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
followers_visited_ballot = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
entrants_took_position = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
entrants_public_positions = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
entrants_public_positions_with_comments = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
entrants_friends_only_positions = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
entrants_friends_only_positions_with_comments = models.PositiveIntegerField(
verbose_name="", null=True, unique=False)
followers_took_position = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
followers_public_positions = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
followers_public_positions_with_comments = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
followers_friends_only_positions = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
followers_friends_only_positions_with_comments = models.PositiveIntegerField(
verbose_name="", null=True, unique=False)
def election(self):
if not self.google_civic_election_id:
return
try:
# We retrieve this from the read-only database (as opposed to the analytics database)
election = Election.objects.using('readonly').get(google_civic_election_id=self.google_civic_election_id)
except Election.MultipleObjectsReturned as e:
return
except Election.DoesNotExist:
return
return election
def organization(self):
if positive_value_exists(self.organization_we_vote_id):
try:
organization = Organization.objects.using('readonly').get(we_vote_id=self.organization_we_vote_id)
except Organization.MultipleObjectsReturned as e:
logger.error("analytics.organization Found multiple")
return
except Organization.DoesNotExist:
logger.error("analytics.organization did not find")
return
return organization
else:
return Organization()
class SitewideDailyMetrics(models.Model):
"""
This is a summary of the sitewide activity on one day.
"""
# We store YYYYMMDD as an integer for very fast lookup (ex/ "20170901" for September, 1, 2017)
date_as_integer = models.PositiveIntegerField(verbose_name="YYYYMMDD of the action",
null=True, unique=False, db_index=True)
visitors_total = models.PositiveIntegerField(verbose_name="number of visitors, all time", null=True, unique=False)
visitors_today = models.PositiveIntegerField(verbose_name="number of visitors, today", null=True, unique=False)
new_visitors_today = models.PositiveIntegerField(verbose_name="new visitors, today", null=True, unique=False)
voter_guide_entrants_today = models.PositiveIntegerField(verbose_name="first touch, voter guide",
null=True, unique=False)
welcome_page_entrants_today = models.PositiveIntegerField(verbose_name="first touch, welcome page",
null=True, unique=False)
friend_entrants_today = models.PositiveIntegerField(verbose_name="first touch, response to friend",
null=True, unique=False)
authenticated_visitors_total = models.PositiveIntegerField(verbose_name="number of visitors, all time",
null=True, unique=False)
authenticated_visitors_today = models.PositiveIntegerField(verbose_name="number of visitors, today",
null=True, unique=False)
ballot_views_today = models.PositiveIntegerField(verbose_name="number of voters who viewed a ballot today",
null=True, unique=False)
voter_guides_viewed_total = models.PositiveIntegerField(verbose_name="number of voter guides viewed, all time",
null=True, unique=False)
voter_guides_viewed_today = models.PositiveIntegerField(verbose_name="number of voter guides viewed, today",
null=True, unique=False)
issues_followed_total = models.PositiveIntegerField(verbose_name="number of issues followed, all time",
null=True, unique=False)
issues_followed_today = models.PositiveIntegerField(verbose_name="issues followed today, today",
null=True, unique=False)
issue_follows_total = models.PositiveIntegerField(verbose_name="one follow for one issue, all time",
null=True, unique=False)
issue_follows_today = models.PositiveIntegerField(verbose_name="one follow for one issue, today",
null=True, unique=False)
organizations_followed_total = models.PositiveIntegerField(verbose_name="voter follow organizations, all time",
null=True, unique=False)
organizations_followed_today = models.PositiveIntegerField(verbose_name="voter follow organizations, today",
null=True, unique=False)
organizations_auto_followed_total = models.PositiveIntegerField(verbose_name="auto_follow organizations, all",
null=True, unique=False)
organizations_auto_followed_today = models.PositiveIntegerField(verbose_name="auto_follow organizations, today",
null=True, unique=False)
organizations_with_linked_issues = models.PositiveIntegerField(verbose_name="organizations linked to issues, all",
null=True, unique=False)
issues_linked_total = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
issues_linked_today = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
organizations_signed_in_total = models.PositiveIntegerField(verbose_name="organizations signed in, all",
null=True, unique=False)
organizations_with_positions = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
organizations_with_new_positions_today = models.PositiveIntegerField(verbose_name="today",
null=True, unique=False)
organization_public_positions = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
individuals_with_positions = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
individuals_with_public_positions = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
individuals_with_friends_only_positions = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
friends_only_positions = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
entered_full_address = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
def generate_date_as_integer(self):
# We want to store the day as an integer for extremely quick database indexing and lookup
datetime_now = localtime(now()).date() # We Vote uses Pacific Time for TIME_ZONE
day_as_string = "{:d}{:02d}{:02d}".format(
datetime_now.year,
datetime_now.month,
datetime_now.day,
)
self.date_as_integer = convert_to_int(day_as_string)
return
class SitewideElectionMetrics(models.Model):
"""
This is a summary of the sitewide activity for one election.
"""
# The unique ID of this election. (Provided by Google Civic)
google_civic_election_id = models.PositiveIntegerField(
verbose_name="google civic election id", null=True, unique=False)
election_day_text = models.CharField(verbose_name="election day", max_length=255, null=True, blank=True)
visitors_total = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
authenticated_visitors_total = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
voter_guide_entries = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
voter_guide_views = models.PositiveIntegerField(verbose_name="one person viewed one voter guide, this election",
null=True, unique=False)
voter_guides_viewed = models.PositiveIntegerField(verbose_name="one org, seen at least once, this election",
null=True, unique=False)
issues_followed = models.PositiveIntegerField(verbose_name="follow issue connections, all time",
null=True, unique=False)
unique_voters_that_followed_organizations = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
unique_voters_that_auto_followed_organizations = models.PositiveIntegerField(verbose_name="",
null=True, unique=False)
organizations_followed = models.PositiveIntegerField(verbose_name="voter follow organizations, today",
null=True, unique=False)
organizations_auto_followed = models.PositiveIntegerField(verbose_name="auto_follow organizations, today",
null=True, unique=False)
organizations_signed_in = models.PositiveIntegerField(verbose_name="organizations signed in, all",
null=True, unique=False)
organizations_with_positions = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
organization_public_positions = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
individuals_with_positions = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
individuals_with_public_positions = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
individuals_with_friends_only_positions = models.PositiveIntegerField(verbose_name="all",
null=True, unique=False)
public_positions = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
public_positions_with_comments = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
friends_only_positions = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
friends_only_positions_with_comments = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
entered_full_address = models.PositiveIntegerField(verbose_name="", null=True, unique=False)
def election(self):
if not self.google_civic_election_id:
return
try:
# We retrieve this from the read-only database (as opposed to the analytics database)
election = Election.objects.using('readonly').get(google_civic_election_id=self.google_civic_election_id)
except Election.MultipleObjectsReturned as e:
return
except Election.DoesNotExist:
return
return election
def generate_date_as_integer(self):
# We want to store the day as an integer for extremely quick database indexing and lookup
datetime_now = localtime(now()).date() # We Vote uses Pacific Time for TIME_ZONE
day_as_string = "{:d}{:02d}{:02d}".format(
datetime_now.year,
datetime_now.month,
datetime_now.day,
)
self.date_as_integer = convert_to_int(day_as_string)
return
class SitewideVoterMetrics(models.Model):
"""
A single entry per voter summarizing all activity every done on We Vote
"""
voter_we_vote_id = models.CharField(
verbose_name="we vote permanent id",
max_length=255, default=None, null=True, blank=True, unique=False, db_index=True)
actions_count = models.PositiveIntegerField(verbose_name="all", null=True, unique=False, db_index=True)
elections_viewed = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
voter_guides_viewed = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
ballot_visited = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
welcome_visited = models.PositiveIntegerField(verbose_name="all", null=True, unique=False, db_index=True)
entered_full_address = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
issues_followed = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
organizations_followed = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
time_until_sign_in = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
positions_entered_friends_only = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
positions_entered_public = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
comments_entered_friends_only = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
comments_entered_public = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
signed_in_twitter = models.BooleanField(verbose_name='', default=False)
signed_in_facebook = models.BooleanField(verbose_name='', default=False)
signed_in_with_email = models.BooleanField(verbose_name='', default=False)
seconds_on_site = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
days_visited = models.PositiveIntegerField(verbose_name="all", null=True, unique=False)
last_action_date = models.DateTimeField(verbose_name='last action date and time', null=True, db_index=True)
last_calculated_date_as_integer = models.PositiveIntegerField(
verbose_name="YYYYMMDD of the last time stats calculated", null=True, unique=False, db_index=True)
def generate_last_calculated_date_as_integer(self):
# We want to store the day as an integer for extremely quick database indexing and lookup
datetime_now = localtime(now()).date() # We Vote uses Pacific Time for TIME_ZONE
day_as_string = "{:d}{:02d}{:02d}".format(
datetime_now.year,
datetime_now.month,
datetime_now.day,
)
self.last_calculated_date_as_integer = convert_to_int(day_as_string)
return
def display_action_constant_human_readable(action_constant):
if action_constant == ACTION_ABOUT_GETTING_STARTED:
return "ABOUT_GETTING_STARTED"
if action_constant == ACTION_ABOUT_MOBILE:
return "ABOUT_MOBILE"
if action_constant == ACTION_ABOUT_ORGANIZATION:
return "ABOUT_ORGANIZATION"
if action_constant == ACTION_ABOUT_TEAM:
return "ABOUT_TEAM"
if action_constant == ACTION_ABOUT_VISION:
return "ABOUT_VISION"
if action_constant == ACTION_ACCOUNT_PAGE:
return "ACCOUNT_PAGE"
if action_constant == ACTION_BALLOT_VISIT:
return "BALLOT_VISIT"
if action_constant == ACTION_CANDIDATE:
return "CANDIDATE"
if action_constant == ACTION_DONATE_VISIT:
return "DONATE_VISIT"
if action_constant == ACTION_ELECTIONS:
return "ELECTIONS"
if action_constant == ACTION_EMAIL_AUTHENTICATION_EXISTS:
return "EMAIL_AUTHENTICATION_EXISTS"
if action_constant == ACTION_FACEBOOK_AUTHENTICATION_EXISTS:
return "FACEBOOK_AUTHENTICATION_EXISTS"
if action_constant == ACTION_FACEBOOK_INVITABLE_FRIENDS:
return "FACEBOOK_INVITABLE_FRIENDS"
if action_constant == ACTION_FRIEND_ENTRY:
return "FRIEND_ENTRY"
if action_constant == ACTION_GOOGLE_AUTHENTICATION_EXISTS:
return "GOOGLE_AUTHENTICATION_EXISTS"
if action_constant == ACTION_INVITE_BY_EMAIL:
return "INVITE_BY_EMAIL"
if action_constant == ACTION_ISSUE_FOLLOW:
return "ISSUE_FOLLOW"
if action_constant == ACTION_ISSUE_FOLLOW_IGNORE:
return "ISSUE_FOLLOW_IGNORE"
if action_constant == ACTION_ISSUE_STOP_FOLLOWING:
return "ISSUE_STOP_FOLLOWING"
if action_constant == ACTION_MODAL_ISSUES:
return "MODAL_ISSUES"
if action_constant == ACTION_MODAL_ORGANIZATIONS:
return "MODAL_ORGANIZATIONS"
if action_constant == ACTION_MODAL_POSITIONS:
return "MODAL_POSITIONS"
if action_constant == ACTION_MODAL_FRIENDS:
return "MODAL_FRIENDS"
if action_constant == ACTION_MODAL_SHARE:
return "MODAL_SHARE"
if action_constant == ACTION_MODAL_VOTE:
return "MODAL_VOTE"
if action_constant == ACTION_NETWORK:
return "NETWORK"
if action_constant == ACTION_OFFICE:
return "OFFICE"
if action_constant == ACTION_ORGANIZATION_AUTO_FOLLOW:
return "ORGANIZATION_AUTO_FOLLOW"
if action_constant == ACTION_ORGANIZATION_FOLLOW:
return "ORGANIZATION_FOLLOW"
if action_constant == ACTION_ORGANIZATION_FOLLOW_IGNORE:
return "ORGANIZATION_FOLLOW_IGNORE"
if action_constant == ACTION_ORGANIZATION_STOP_FOLLOWING:
return "ORGANIZATION_STOP_FOLLOWING"
if action_constant == ACTION_POSITION_TAKEN:
return "POSITION_TAKEN"
if action_constant == ACTION_TWITTER_AUTHENTICATION_EXISTS:
return "TWITTER_AUTHENTICATION_EXISTS"
if action_constant == ACTION_VOTER_FACEBOOK_AUTH:
return "VOTER_FACEBOOK_AUTH"
if action_constant == ACTION_VOTER_GUIDE_ENTRY:
return "VOTER_GUIDE_ENTRY"
if action_constant == ACTION_VOTER_GUIDE_GET_STARTED:
return "VOTER_GUIDE_GET_STARTED"
if action_constant == ACTION_VOTER_GUIDE_VISIT:
return "VOTER_GUIDE_VISIT"
if action_constant == ACTION_VOTER_TWITTER_AUTH:
return "VOTER_TWITTER_AUTH"
if action_constant == ACTION_WELCOME_ENTRY:
return "WELCOME_ENTRY"
if action_constant == ACTION_WELCOME_VISIT:
return "WELCOME_VISIT"
return "ACTION_CONSTANT:" + str(action_constant)
| {
"repo_name": "jainanisha90/WeVoteServer",
"path": "analytics/models.py",
"copies": "1",
"size": "73059",
"license": "mit",
"hash": -3677543344842123000,
"line_mean": 48.7,
"line_max": 120,
"alpha_frac": 0.6179389261,
"autogenerated": false,
"ratio": 4.066740885054272,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0025773687276925065,
"num_lines": 1470
} |
# ANALYTICS :)
def sentiment()
import json
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from textblob import TextBlob
from elasticsearch import Elasticsearch
# import twitter keys and tokens
from config import *
# create instance of elasticsearch
es = Elasticsearch()
class TweetStreamListener(StreamListener):
# on success
def on_data(self, data):
# decode json
dict_data = json.loads(data)
# pass tweet into TextBlob
tweet = TextBlob(dict_data["text"])
# output sentiment polarity
print tweet.sentiment.polarity
# determine if sentiment is positive, negative, or neutral
if tweet.sentiment.polarity < 0:
sentiment = "negative"
elif tweet.sentiment.polarity == 0:
sentiment = "neutral"
else:
sentiment = "positive"
# output sentiment
print sentiment
# add text and sentiment info to elasticsearch
es.index(index="sentiment",
doc_type="test-type",
body={"author": dict_data["user"]["screen_name"],
"date": dict_data["created_at"],
"message": dict_data["text"],
"polarity": tweet.sentiment.polarity,
"subjectivity": tweet.sentiment.subjectivity,
"sentiment": sentiment})
return True
# on failure
def on_error(self, status):
print status
if __name__ == '__main__':
# create instance of the tweepy tweet stream listener
listener = TweetStreamListener()
# set twitter keys/tokens
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# create instance of the tweepy stream
stream = Stream(auth, listener)
# search twitter for "congress" keyword
stream.filter(track=['congress'])
| {
"repo_name": "maketwittergreatagain/maketwittergreatagain",
"path": "analytics.py",
"copies": "1",
"size": "1970",
"license": "mit",
"hash": -6989649563856612000,
"line_mean": 26.3611111111,
"line_max": 68,
"alpha_frac": 0.6203045685,
"autogenerated": false,
"ratio": 4.095634095634096,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5215938664134095,
"avg_score": null,
"num_lines": null
} |
# Analytic solution of EM fields due to a plane wave
import numpy as np, SimPEG as simpeg
def getEHfields(m1d,sigma,freq,zd,scaleUD=True):
'''Analytic solution for MT 1D layered earth. Returns E and H fields.
:param SimPEG.mesh, object m1d: Mesh object with the 1D spatial information.
:param numpy.array, vector sigma: Physical property of conductivity corresponding with the mesh.
:param float, freq: Frequency to calculate data at.
:param numpy array, vector zd: location to calculate EH fields at
:param bollean, scaleUD: scales the output to be 1 at the top, increases numeracal stability.
Assumes a halfspace with the same conductive as the last cell below.
'''
# Note add an error check for the mesh and sigma are the same size.
# Constants: Assume constant
mu = 4*np.pi*1e-7*np.ones((m1d.nC+1))
eps = 8.85*1e-12*np.ones((m1d.nC+1))
# Angular freq
w = 2*np.pi*freq
# Add the halfspace value to the property
sig = np.concatenate((np.array([sigma[0]]),sigma))
# Calculate the wave number
k = np.sqrt(eps*mu*w**2-1j*mu*sig*w)
# Initiate the propagation matrix, in the order down up.
UDp = np.zeros((2,m1d.nC+1),dtype=complex)
UDp[1,0] = 1. # Set the wave amplitude as 1 into the half-space at the bottom of the mesh
# Loop over all the layers, starting at the bottom layer
for lnr, h in enumerate(m1d.hx): # lnr-number of layer, h-thickness of the layer
# Calculate
yp1 = k[lnr]/(w*mu[lnr]) # Admittance of the layer below the current layer
zp = (w*mu[lnr+1])/k[lnr+1] # Impedance in the current layer
# Build the propagation matrix
# Convert fields to down/up going components in layer below current layer
Pj1 = np.array([[1,1],[yp1,-yp1]])
# Convert fields to down/up going components in current layer
Pjinv = 1./2*np.array([[1,zp],[1,-zp]])
# Propagate down and up components through the current layer
elamh = np.array([[np.exp(-1j*k[lnr+1]*h),0],[0,np.exp(1j*k[lnr+1]*h)]])
# The down and up component in current layer.
UDp[:,lnr+1] = elamh.dot(Pjinv.dot(Pj1)).dot(UDp[:,lnr])
if scaleUD:
UDp[:,lnr+1::-1] = UDp[:,lnr+1::-1]/UDp[1,lnr+1]
# Calculate the fields
Ed = np.empty((zd.size,),dtype=complex)
Eu = np.empty((zd.size,),dtype=complex)
Hd = np.empty((zd.size,),dtype=complex)
Hu = np.empty((zd.size,),dtype=complex)
# Loop over the layers and calculate the fields
# In the halfspace below the mesh
dup = m1d.vectorNx[0]
dind = dup >= zd
Ed[dind] = UDp[1,0]*np.exp(-1j*k[0]*(dup-zd[dind]))
Eu[dind] = UDp[0,0]*np.exp(1j*k[0]*(dup-zd[dind]))
Hd[dind] = (k[0]/(w*mu[0]))*UDp[1,0]*np.exp(-1j*k[0]*(dup-zd[dind]))
Hu[dind] = -(k[0]/(w*mu[0]))*UDp[0,0]*np.exp(1j*k[0]*(dup-zd[dind]))
for ki,mui,epsi,dlow,dup,Up,Dp in zip(k[1::],mu[1::],eps[1::],m1d.vectorNx[:-1],m1d.vectorNx[1::],UDp[0,1::],UDp[1,1::]):
dind = np.logical_and(dup >= zd, zd > dlow)
Ed[dind] = Dp*np.exp(-1j*ki*(dup-zd[dind]))
Eu[dind] = Up*np.exp(1j*ki*(dup-zd[dind]))
Hd[dind] = (ki/(w*mui))*Dp*np.exp(-1j*ki*(dup-zd[dind]))
Hu[dind] = -(ki/(w*mui))*Up*np.exp(1j*ki*(dup-zd[dind]))
# Return return the fields
return Ed, Eu, Hd, Hu
def getImpedance(m1d,sigma,freq):
"""Analytic solution for MT 1D layered earth. Returns the impedance at the surface.
:param SimPEG.mesh, object m1d: Mesh object with the 1D spatial information.
:param numpy.array, vector sigma: Physical property corresponding with the mesh.
:param numpy.array, vector freq: Frequencies to calculate data at.
"""
# Define constants
mu0 = 4*np.pi*1e-7
eps0 = 8.85e-12
# Initiate the impedances
Z1d = np.empty(len(freq) , dtype='complex')
h = m1d.hx #vectorNx[:-1]
# Start the process
for nrFr, fr in enumerate(freq):
om = 2*np.pi*fr
Zall = np.empty(len(h)+1,dtype='complex')
# Calculate the impedance for the bottom layer
Zall[0] = (mu0*om)/np.sqrt(mu0*eps0*(om)**2 - 1j*mu0*sigma[0]*om)
for nr,hi in enumerate(h):
# Calculate the wave number
# print nr,sigma[nr]
k = np.sqrt(mu0*eps0*om**2 - 1j*mu0*sigma[nr]*om)
Z = (mu0*om)/k
Zall[nr+1] = Z *((Zall[nr] + Z*np.tanh(1j*k*hi))/(Z + Zall[nr]*np.tanh(1j*k*hi)))
#pdb.set_trace()
Z1d[nrFr] = Zall[-1]
return Z1d
| {
"repo_name": "simpeg/simpegmt",
"path": "simpegMT/Utils/MT1Danalytic.py",
"copies": "1",
"size": "4510",
"license": "mit",
"hash": -5696215553528173000,
"line_mean": 39.6306306306,
"line_max": 125,
"alpha_frac": 0.6141906874,
"autogenerated": false,
"ratio": 2.7483241925655086,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8809335586067693,
"avg_score": 0.010635858779563238,
"num_lines": 111
} |
"""analyticsolution.py - Analytic solutions for the second order Klein-Gordon equation
"""
#Author: Ian Huston
#For license and copyright information see LICENSE.txt which was distributed with this file.
from __future__ import division
import numpy as np
import scipy
from generalsolution import GeneralSolution
#Change to fortran names for compatability
Log = scipy.log
Sqrt = scipy.sqrt
ArcTan = scipy.arctan
Pi = scipy.pi
ArcSinh = scipy.arcsinh
ArcTanh = scipy.arctanh
class AnalyticSolution(GeneralSolution):
"""Analytic Solution base class.
"""
def __init__(self, *args, **kwargs):
"""Given a fixture and a cosmomodels model instance, initialises an AnalyticSolution class instance.
"""
super(AnalyticSolution, self).__init__(*args, **kwargs)
self.J_terms = []
def full_source_from_model(self, m, nix, **kwargs):
"""Use the data from a model at a timestep nix to calculate the full source term S."""
#Get background values
bgvars = m.yresult[nix, 0:3, 0]
a = m.ainit*np.exp(m.tresult[nix])
if np.any(np.isnan(bgvars[0])):
raise AttributeError("Background values not available for this timestep.")
k = self.srceqns.k
#Get potentials
potentials = m.potentials(np.array([bgvars[0]]), m.pot_params)
Cterms = self.calculate_Cterms(bgvars, a, potentials)
results = np.complex256(self.J_terms[0](k, Cterms, **kwargs))
#Get component integrals
for term in self.J_terms[1:]:
results += term(k, Cterms, **kwargs)
src = 1 / ((2*np.pi)**2) * results
return src
class NoPhaseBunchDaviesSolution(AnalyticSolution):
r"""Analytic solution using the Bunch Davies initial conditions as the first order
solution and with no phase information.
.. math::
\delta\varphi_1 = \alpha/\sqrt(k)
\delta\varphi^\dagger_1 = -\alpha/\sqrt(k) - \alpha/\beta \sqrt(k) i
"""
def __init__(self, *args, **kwargs):
super(NoPhaseBunchDaviesSolution, self).__init__(*args, **kwargs)
self.J_terms = [self.J_A, self.J_B, self.J_C, self.J_D]
def J_A(self, k, Cterms, **kwargs):
"""Solution for J_A which is the integral for A in terms of constants C1 and C2."""
#Set limits from k
kmin = k[0]
kmax = k[-1]
alpha = kwargs["alpha"]
beta = kwargs["beta"]
C1 = Cterms[0]
C2 = Cterms[1]
J_A = ((alpha ** 2 * (-(Sqrt(kmax * (-k + kmax)) *
(80 * C1 * (3 * k ** 2 - 14 * k * kmax + 8 * kmax ** 2) +
3 * C2 * (15 * k ** 4 + 10 * k ** 3 * kmax + 8 * k ** 2 * kmax ** 2 - 176 * k * kmax ** 3 + 128 * kmax ** 4))) +
Sqrt(kmax * (k + kmax)) * (80 * C1 * (3 * k ** 2 + 14 * k * kmax + 8 * kmax ** 2) +
3 * C2 * (15 * k ** 4 - 10 * k ** 3 * kmax + 8 * k ** 2 * kmax ** 2 + 176 * k * kmax ** 3 + 128 * kmax ** 4)) -
Sqrt((k - kmin) * kmin) * (80 * C1 * (3 * k ** 2 - 14 * k * kmin + 8 * kmin ** 2) +
3 * C2 * (15 * k ** 4 + 10 * k ** 3 * kmin + 8 * k ** 2 * kmin ** 2 - 176 * k * kmin ** 3 + 128 * kmin ** 4)) -
Sqrt(kmin) * Sqrt(k + kmin) * (80 * C1 * (3 * k ** 2 + 14 * k * kmin + 8 * kmin ** 2) +
3 * C2 * (15 * k ** 4 - 10 * k ** 3 * kmin + 8 * k ** 2 * kmin ** 2 + 176 * k * kmin ** 3 + 128 * kmin ** 4)) -
(15 * k ** 3 * (16 * C1 + 3 * C2 * k ** 2) * Pi) / 2. +
15 * k ** 3 * (16 * C1 + 3 * C2 * k ** 2) * ArcTan(Sqrt(kmin / (k - kmin))) +
15 * k ** 3 * (16 * C1 + 3 * C2 * k ** 2) * Log(2 * Sqrt(k)) -
15 * k ** 3 * (16 * C1 + 3 * C2 * k ** 2) * Log(2 * (Sqrt(kmax) + Sqrt(-k + kmax))) -
15 * k ** 3 * (16 * C1 + 3 * C2 * k ** 2) * Log(2 * (Sqrt(kmax) + Sqrt(k + kmax))) +
15 * k ** 3 * (16 * C1 + 3 * C2 * k ** 2) * Log(2 * (Sqrt(kmin) + Sqrt(k + kmin))))) / (2880. * k))
return J_A
def J_B(self, k, Cterms, **kwargs):
"""Solution for J_B which is the integral for B in terms of constants C3 and C4."""
kmax = k[-1]
kmin = k[0]
alpha = kwargs["alpha"]
beta = kwargs["beta"]
C3 = Cterms[2]
C4 = Cterms[3]
J_B = ((alpha ** 2 * (Sqrt(kmax * (k + kmax)) * (112 * C3 *
(105 * k ** 4 + 250 * k ** 3 * kmax - 104 * k ** 2 * kmax ** 2 - 48 * k * kmax ** 3 + 96 * kmax ** 4) +
3 * C4 * (945 * k ** 6 - 630 * k ** 5 * kmax + 504 * k ** 4 * kmax ** 2 + 4688 * k ** 3 * kmax ** 3 - 2176 * k ** 2 * kmax ** 4 -
1280 * k * kmax ** 5 + 2560 * kmax ** 6)) -
Sqrt(kmax * (-k + kmax)) * (112 * C3 * (105 * k ** 4 - 250 * k ** 3 * kmax - 104 * k ** 2 * kmax ** 2 + 48 * k * kmax ** 3 +
96 * kmax ** 4) + 3 * C4 * (945 * k ** 6 + 630 * k ** 5 * kmax + 504 * k ** 4 * kmax ** 2 - 4688 * k ** 3 * kmax ** 3 -
2176 * k ** 2 * kmax ** 4 + 1280 * k * kmax ** 5 + 2560 * kmax ** 6)) -
Sqrt(kmin) * Sqrt(k + kmin) * (112 * C3 *
(105 * k ** 4 + 250 * k ** 3 * kmin - 104 * k ** 2 * kmin ** 2 - 48 * k * kmin ** 3 + 96 * kmin ** 4) +
3 * C4 * (945 * k ** 6 - 630 * k ** 5 * kmin + 504 * k ** 4 * kmin ** 2 + 4688 * k ** 3 * kmin ** 3 - 2176 * k ** 2 * kmin ** 4 -
1280 * k * kmin ** 5 + 2560 * kmin ** 6)) -
Sqrt((k - kmin) * kmin) * (112 * C3 * (105 * k ** 4 - 250 * k ** 3 * kmin - 104 * k ** 2 * kmin ** 2 + 48 * k * kmin ** 3 +
96 * kmin ** 4) + 3 * C4 * (945 * k ** 6 + 630 * k ** 5 * kmin + 504 * k ** 4 * kmin ** 2 - 4688 * k ** 3 * kmin ** 3 -
2176 * k ** 2 * kmin ** 4 + 1280 * k * kmin ** 5 + 2560 * kmin ** 6)) -
(105 * k ** 5 * (112 * C3 + 27 * C4 * k ** 2) * Pi) / 2. +
105 * k ** 5 * (112 * C3 + 27 * C4 * k ** 2) * ArcTan(Sqrt(kmin / (k - kmin))) +
105 * k ** 5 * (112 * C3 + 27 * C4 * k ** 2) * Log(2 * Sqrt(k)) -
105 * k ** 5 * (112 * C3 + 27 * C4 * k ** 2) * Log(2 * (Sqrt(kmax) + Sqrt(-k + kmax))) -
105 * k ** 5 * (112 * C3 + 27 * C4 * k ** 2) * Log(2 * (Sqrt(kmax) + Sqrt(k + kmax))) +
105 * k ** 5 * (112 * C3 + 27 * C4 * k ** 2) * Log(2 * (Sqrt(kmin) + Sqrt(k + kmin))))) / (282240. * k ** 2))
return J_B
def J_C(self, k, Cterms, **kwargs):
"""Second method for J_C"""
kmax = k[-1]
kmin = k[0]
alpha = kwargs["alpha"]
beta = kwargs["beta"]
C5 = Cterms[4]
J_C = ((alpha**2*C5*(-(Sqrt(2)*k**3*(-10000*beta**2 - (0+15360*1j)*beta*k + 6363*k**2)) -
Sqrt(kmax*(-k + kmax))*((0+3840*1j)*beta*(k - kmax)**2*kmax + 400*beta**2*(3*k**2 - 14*k*kmax + 8*kmax**2) +
9*(15*k**4 + 10*k**3*kmax - 248*k**2*kmax**2 + 336*k*kmax**3 - 128*kmax**4)) +
Sqrt(kmax*(k + kmax))*((0+3840*1j)*beta*kmax*(k + kmax)**2 + 400*beta**2*(3*k**2 + 14*k*kmax + 8*kmax**2) -
9*(-15*k**4 + 10*k**3*kmax + 248*k**2*kmax**2 + 336*k*kmax**3 + 128*kmax**4)) +
Sqrt((k - kmin)*kmin)*(-400*beta**2*(3*k**2 - 14*k*kmin + 8*kmin**2) -
(0+60*1j)*beta*(15*k**3 - 54*k**2*kmin + 8*k*kmin**2 + 16*kmin**3) +
9*(15*k**4 + 10*k**3*kmin - 248*k**2*kmin**2 + 336*k*kmin**3 - 128*kmin**4)) +
Sqrt(kmin)*Sqrt(k + kmin)*((-3840*1j)*beta*kmin*(k + kmin)**2 -
400*beta**2*(3*k**2 + 14*k*kmin + 8*kmin**2) +
9*(-15*k**4 + 10*k**3*kmin + 248*k**2*kmin**2 + 336*k*kmin**3 + 128*kmin**4)) +
(15*k**3*(-80*beta**2 - (0+60*1j)*beta*k + 9*k**2)*Pi)/2. +
15*k**3*(80*beta**2 + (0+60*1j)*beta*k - 9*k**2)*ArcTan(Sqrt(kmin/(k - kmin))) -
15*k**3*(80*beta**2 + 9*k**2)*Log(2*(1 + Sqrt(2))*Sqrt(k)) +
k**3*(Sqrt(2)*(-10000*beta**2 - (0+15360*1j)*beta*k + 6363*k**2) + 15*(80*beta**2 + 9*k**2)*Log(2*Sqrt(k)) +
15*(80*beta**2 + 9*k**2)*Log(2*(1 + Sqrt(2))*Sqrt(k))) -
15*k**3*(80*beta**2 + 9*k**2)*Log(2*(Sqrt(kmax) + Sqrt(-k + kmax))) -
15*k**3*(80*beta**2 + 9*k**2)*Log(2*(Sqrt(kmax) + Sqrt(k + kmax))) +
15*k**3*(80*beta**2 + 9*k**2)*Log(2*(Sqrt(kmin) + Sqrt(k + kmin)))))/(14400.*beta**2*k))
return J_C
def J_D(self, k, Cterms, **kwargs):
"""Solution for J_D which is the integral for D in terms of constants C6 and C7."""
kmax = k[-1]
kmin = k[0]
alpha = kwargs["alpha"]
beta = kwargs["beta"]
C6 = Cterms[5]
C7 = Cterms[6]
j1 = ((alpha ** 2 * (-240 * Sqrt((k + kmax) / kmax) *
(40 * C6 * (24 * k ** 3 + 9 * k ** 2 * kmax + 2 * k * kmax ** 2 - 4 * kmax ** 3) +
C7 * kmax * (-105 * k ** 4 - 250 * k ** 3 * kmax + 104 * k ** 2 * kmax ** 2 + 48 * k * kmax ** 3 -
96 * kmax ** 4)) - 240 * Sqrt(1 - k / kmax) *
(40 * C6 * (24 * k ** 3 - 9 * k ** 2 * kmax + 2 * k * kmax ** 2 + 4 * kmax ** 3) +
C7 * kmax * (105 * k ** 4 - 250 * k ** 3 * kmax - 104 * k ** 2 * kmax ** 2 + 48 * k * kmax ** 3 +
96 * kmax ** 4)) + 240 * Sqrt((k + kmin) / kmin) *
(40 * C6 * (24 * k ** 3 + 9 * k ** 2 * kmin + 2 * k * kmin ** 2 - 4 * kmin ** 3) +
C7 * kmin * (-105 * k ** 4 - 250 * k ** 3 * kmin + 104 * k ** 2 * kmin ** 2 + 48 * k * kmin ** 3 -
96 * kmin ** 4)) - 240 * Sqrt(-1 + k / kmin) *
(40 * C6 * (24 * k ** 3 - 9 * k ** 2 * kmin + 2 * k * kmin ** 2 + 4 * kmin ** 3) +
C7 * kmin * (105 * k ** 4 - 250 * k ** 3 * kmin - 104 * k ** 2 * kmin ** 2 + 48 * k * kmin ** 3 +
96 * kmin ** 4)) + 12600 * k ** 3 * (8 * C6 - C7 * k ** 2) * Pi -
25200 * k ** 3 * (8 * C6 - C7 * k ** 2) * ArcTan(Sqrt(kmin / (k - kmin))) -
25200 * k ** 3 * (8 * C6 - C7 * k ** 2) * Log(2 * Sqrt(k)) +
25200 * k ** 3 * (8 * C6 - C7 * k ** 2) * Log(2 * (Sqrt(kmax) + Sqrt(-k + kmax))) +
25200 * k ** 3 * (8 * C6 - C7 * k ** 2) * Log(2 * (Sqrt(kmax) + Sqrt(k + kmax))) -
25200 * k ** 3 * (8 * C6 - C7 * k ** 2) * Log(2 * (Sqrt(kmin) + Sqrt(k + kmin))))) / (604800. * k ** 2))
j2 = ((alpha ** 2 * (-3 * kmax * Sqrt((k + kmax) / kmax) *
(112 * C6 * (185 * k ** 4 - 70 * k ** 3 * kmax - 168 * k ** 2 * kmax ** 2 - 16 * k * kmax ** 3 +
32 * kmax ** 4) + C7 * (-945 * k ** 6 + 630 * k ** 5 * kmax + 6664 * k ** 4 * kmax ** 2 -
3152 * k ** 3 * kmax ** 3 - 11136 * k ** 2 * kmax ** 4 - 1280 * k * kmax ** 5 + 2560 * kmax ** 6)) +
3 * Sqrt(1 - k / kmax) * kmax * (112 * C6 *
(185 * k ** 4 + 70 * k ** 3 * kmax - 168 * k ** 2 * kmax ** 2 + 16 * k * kmax ** 3 + 32 * kmax ** 4) +
C7 * (-945 * k ** 6 - 630 * k ** 5 * kmax + 6664 * k ** 4 * kmax ** 2 + 3152 * k ** 3 * kmax ** 3 -
11136 * k ** 2 * kmax ** 4 + 1280 * k * kmax ** 5 + 2560 * kmax ** 6)) +
3 * kmin * Sqrt((k + kmin) / kmin) *
(112 * C6 * (185 * k ** 4 - 70 * k ** 3 * kmin - 168 * k ** 2 * kmin ** 2 - 16 * k * kmin ** 3 +
32 * kmin ** 4) + C7 * (-945 * k ** 6 + 630 * k ** 5 * kmin + 6664 * k ** 4 * kmin ** 2 -
3152 * k ** 3 * kmin ** 3 - 11136 * k ** 2 * kmin ** 4 - 1280 * k * kmin ** 5 + 2560 * kmin ** 6)) -
3 * Sqrt(-1 + k / kmin) * kmin * (112 * C6 *
(185 * k ** 4 + 70 * k ** 3 * kmin - 168 * k ** 2 * kmin ** 2 + 16 * k * kmin ** 3 + 32 * kmin ** 4) +
C7 * (-945 * k ** 6 - 630 * k ** 5 * kmin + 6664 * k ** 4 * kmin ** 2 + 3152 * k ** 3 * kmin ** 3 -
11136 * k ** 2 * kmin ** 4 + 1280 * k * kmin ** 5 + 2560 * kmin ** 6)) +
(2835 * k ** 5 * (16 * C6 + C7 * k ** 2) * Pi) / 2. -
2835 * k ** 5 * (16 * C6 + C7 * k ** 2) * ArcTan(Sqrt(kmin / (k - kmin))) +
2835 * k ** 5 * (16 * C6 + C7 * k ** 2) * Log(2 * Sqrt(k)) -
2835 * k ** 5 * (16 * C6 + C7 * k ** 2) * Log(2 * (Sqrt(kmax) + Sqrt(-k + kmax))) -
2835 * k ** 5 * (16 * C6 + C7 * k ** 2) * Log(2 * (Sqrt(kmax) + Sqrt(k + kmax))) +
2835 * k ** 5 * (16 * C6 + C7 * k ** 2) * Log(2 * (Sqrt(kmin) + Sqrt(k + kmin))))) /
(604800. * beta ** 2 * k ** 2))
j3 = ((alpha ** 2 *
(-10 * 1j * Sqrt((k + kmax) / kmax) *
(24 * C6 * (448 * k ** 4 - 239 * k ** 3 * kmax + 522 * k ** 2 * kmax ** 2 + 88 * k * kmax ** 3 -
176 * kmax ** 4) + C7 * kmax *
(315 * k ** 5 - 3794 * k ** 4 * kmax - 2648 * k ** 3 * kmax ** 2 + 6000 * k ** 2 * kmax ** 3 +
1408 * k * kmax ** 4 - 2816 * kmax ** 5)) +
10 * 1j * Sqrt(1 - k / kmax) * (24 * C6 *
(448 * k ** 4 + 239 * k ** 3 * kmax + 522 * k ** 2 * kmax ** 2 - 88 * k * kmax ** 3 - 176 * kmax ** 4) -
C7 * kmax * (315 * k ** 5 + 3794 * k ** 4 * kmax - 2648 * k ** 3 * kmax ** 2 - 6000 * k ** 2 * kmax ** 3 +
1408 * k * kmax ** 4 + 2816 * kmax ** 5)) +
10 * 1j * Sqrt((k + kmin) / kmin) *
(24 * C6 * (448 * k ** 4 - 239 * k ** 3 * kmin + 522 * k ** 2 * kmin ** 2 + 88 * k * kmin ** 3 -
176 * kmin ** 4) + C7 * kmin *
(315 * k ** 5 - 3794 * k ** 4 * kmin - 2648 * k ** 3 * kmin ** 2 + 6000 * k ** 2 * kmin ** 3 +
1408 * k * kmin ** 4 - 2816 * kmin ** 5)) -
20 * 1j * Sqrt(-1 + k / kmin) * (384 * C6 * (k - kmin) ** 2 * (14 * k ** 2 + 5 * k * kmin + 2 * kmin ** 2) +
C7 * kmin * (945 * k ** 5 - 1162 * k ** 4 * kmin - 2696 * k ** 3 * kmin ** 2 + 1200 * k ** 2 * kmin ** 3 +
256 * k * kmin ** 4 + 512 * kmin ** 5)) - 9450 * 1j * C7 * k ** 6 * Pi +
18900 * 1j * C7 * k ** 6 * ArcTan(Sqrt(kmin / (k - kmin))) +
3150 * 1j * k ** 3 * (72 * C6 * k + C7 * k ** 3) * Log(2 * Sqrt(k)) -
3150 * 1j * k ** 3 * (72 * C6 * k + C7 * k ** 3) * Log(2 * (Sqrt(kmax) + Sqrt(-k + kmax))) +
3150 * 1j * k ** 3 * (72 * C6 * k + C7 * k ** 3) * Log(2 * (Sqrt(kmax) + Sqrt(k + kmax))) -
3150 * 1j * k ** 3 * (72 * C6 * k + C7 * k ** 3) * Log(2 * (Sqrt(kmin) + Sqrt(k + kmin))))) /
(604800. * beta * k ** 2))
return j1 + j2 + j3
def calculate_Cterms(self, bgvars, a, potentials):
"""
Calculate the constant terms needed for source integration.
"""
k = self.srceqns.k
phi, phidot, H = bgvars
#Set ones array with same shape as self.k
onekshape = np.ones(k.shape)
V, Vp, Vpp, Vppp = potentials
a2 = a**2
H2 = H**2
aH2 = a2*H2
k2 = k**2
#Set C_i values
C1 = 1/H2 * (Vppp + 3 * phidot * Vpp + 2 * phidot * k2 /a2 )
C2 = 3.5 * phidot /(aH2) * onekshape
C3 = -4.5 * phidot * k / (aH2)
C4 = -phidot / (aH2 * k)
C5 = -1.5 * phidot * onekshape
C6 = 2 * phidot * k
C7 = - phidot / k
Cterms = [C1, C2, C3, C4, C5, C6, C7]
return Cterms
def full_source_from_model(self, m, nix):
"""Use the data from a model at a timestep nix to calculate the full source term S."""
#Get background values
bgvars = m.yresult[nix, 0:3, 0]
a = m.ainit*np.exp(m.tresult[nix])
#Set alpha and beta
alpha = 1/(a*np.sqrt(2))
beta = a*bgvars[2]
return super(NoPhaseBunchDaviesSolution, self).full_source_from_model(m, nix, alpha=alpha, beta=beta)
class SimpleInverseSolution(AnalyticSolution):
r"""Analytic solution using a simple inverse solution as the first order
solution and with no phase information.
.. math::
\delta\varphi_1 = 1/k
\delta\varphi^\dagger_1 = 1/k
"""
def __init__(self, *args, **kwargs):
super(SimpleInverseSolution, self).__init__(*args, **kwargs)
self.J_terms = [self.J_A, self.J_B, self.J_C, self.J_D]
self.calculate_Cterms = self.srceqns.calculate_Cterms
def J_general_Atype(self, k, C, n):
kmin = k[0]
kmax = k[-1]
if n == 1:
J_general = 2*C*(1/n * k**(n-1) - np.log(k) + np.log(kmax) - kmin**n/(k*n))
else:
J_general = 2*C*(-1/(n*(n-1))*k**(n-1) + kmax**(n-1)/(n-1) - kmin**n/(k*n))
return J_general
def J_general_Btype(self, k, C, n):
kmin = k[0]
kmax = k[-1]
if n == 2:
J_general = 2/3*C*(1/(k**2*(n-1)) * (k**(n+1) - kmin**(n+1)) + k*np.log(kmax/k))
else:
J_general = 2/3*C*(-3/((n+1)*(n-2))*k**(n-1) + k*kmax**(n-2)/(n-2) - kmin**(n+1)/(k**2*(n+1)))
return J_general
def J_A(self, k, Cterms, **kwargs):
"""Solution for J_A which is the integral for A in terms of constants C1 and C2."""
C1 = Cterms[0]
C2 = Cterms[1]
J_A = self.J_general_Atype(k, C1, 2) + self.J_general_Atype(k, C2, 4)
return J_A
def J_B(self, k, Cterms, **kwargs):
"""Solution for J_B which is the integral for B in terms of constants C3 and C4."""
C3 = Cterms[2] #multiplies q**3
C4 = Cterms[3] #multiplies q**5
J_B = self.J_general_Btype(k, C3, 3) + self.J_general_Btype(k, C4, 5)
return J_B
def J_C(self, k, Cterms, **kwargs):
"""Second method for J_C"""
C5 = Cterms[4]
J_C = self.J_general_Atype(k, C5, 2)
return J_C
def J_D(self, k, Cterms, **kwargs):
"""Solution for J_D which is the integral for D in terms of constants C6 and C7."""
C6 = Cterms[5]
C7 = Cterms[6]
J_D = self.J_general_Btype(k, C6, 1) + self.J_general_Btype(k, C7, 3)
return J_D
class ImaginaryInverseSolution(AnalyticSolution):
r"""Analytic solution using an imaginary inverse solution as the first order
solution and with no phase information.
.. math::
\delta\varphi_1 = 1/k i
\delta\varphi^\dagger_1 = 1/k i
where :math:`i=\sqrt(-1)`
"""
def __init__(self, *args, **kwargs):
super(ImaginaryInverseSolution, self).__init__(*args, **kwargs)
self.J_terms = [self.J_A, self.J_B, self.J_C, self.J_D]
self.calculate_Cterms = self.srceqns.calculate_Cterms
def J_general_Atype(self, k, C, n):
kmin = k[0]
kmax = k[-1]
if n == 1:
J_general = -2*C*(1/n * k**(n-1) - np.log(k) + np.log(kmax) - kmin**n/(k*n))
else:
J_general = -2*C*(-1/(n*(n-1))*k**(n-1) + kmax**(n-1)/(n-1) - kmin**n/(k*n))
return J_general
def J_general_Btype(self, k, C, n):
kmin = k[0]
kmax = k[-1]
if n == 2:
J_general = -2/3*C*(1/(k**2*(n-1)) * (k**(n+1) - kmin**(n+1)) + k*np.log(kmax/k))
else:
J_general = -2/3*C*(-3/((n+1)*(n-2))*k**(n-1) + k*kmax**(n-2)/(n-2) - kmin**(n+1)/(k**2*(n+1)))
return J_general
def J_A(self, k, Cterms, **kwargs):
"""Solution for J_A which is the integral for A in terms of constants C1 and C2."""
C1 = Cterms[0]
C2 = Cterms[1]
J_A = self.J_general_Atype(k, C1, 2) + self.J_general_Atype(k, C2, 4)
return J_A
def J_B(self, k, Cterms, **kwargs):
"""Solution for J_B which is the integral for B in terms of constants C3 and C4."""
C3 = Cterms[2] #multiplies q**3
C4 = Cterms[3] #multiplies q**5
J_B = self.J_general_Btype(k, C3, 3) + self.J_general_Btype(k, C4, 5)
return J_B
def J_C(self, k, Cterms, **kwargs):
"""Second method for J_C"""
C5 = Cterms[4]
J_C = self.J_general_Atype(k, C5, 2)
return J_C
def J_D(self, k, Cterms, **kwargs):
"""Solution for J_D which is the integral for D in terms of constants C6 and C7."""
C6 = Cterms[5]
C7 = Cterms[6]
J_D = self.J_general_Btype(k, C6, 1) + self.J_general_Btype(k, C7, 3)
return J_D
class OldSimpleInverseFull(AnalyticSolution):
"""Analytic solution using a simple inverse solution as the first order
solution and with no phase information. This uses the solutions of the old equations
and is not reliable. Should not be used in production.
\delta\varphi_1 = 1/k
\dN{\delta\varphi_1} = 1/k
"""
def __init__(self, *args, **kwargs):
super(OldSimpleInverseFull, self).__init__(*args, **kwargs)
self.J_terms = [self.J_A1, self.J_A2, self.J_B1, self.J_B2,
self.J_C1, self.J_C2, self.J_D1, self.J_D2,
self.J_E1, self.J_E2, self.J_F1, self.J_F2,
self.J_G1, self.J_G2]
self.calculate_Cterms = self.srceqns.calculate_Cterms
def J_general_Atype(self, k, C, n):
kmin = k[0]
kmax = k[-1]
if n == 1:
J_general = 2*C*(1/n * k**(n-1) - np.log(k) + np.log(kmax) - kmin**n/(k*n))
else:
J_general = 2*C*(-1/(n*(n-1))*k**(n-1) + kmax**(n-1)/(n-1) - kmin**n/(k*n))
return J_general
def J_general_Btype(self, k, C, n):
kmin = k[0]
kmax = k[-1]
if n == 2:
J_general = 2/3*C*(1/(k**2*(n-1)) * (k**(n+1) - kmin**(n+1)) + k*np.log(kmax/k))
else:
J_general = 2/3*C*(-3/((n+1)*(n-2))*k**(n-1) + k*kmax**(n-2)/(n-2) - kmin**(n+1)/(k**2*(n+1)))
return J_general
def J_general_Etype(self, k, C, n):
kmin = k[0]
kmax = k[-1]
if n == 1:
J_general = 2/3 * C * (23/15 + np.log(kmax/k) - kmin/k
-2/5 * (k/kmax)**2 + 1/3 * (kmin/k)**3)
elif n == 3:
J_general = 2/3 * C * (-13/150*k**2 + 0.5*kmax**2 - 1/3*kmin**3/k
+ 2/5*(k**2*np.log(kmax/k) - 0.2*kmin**5/k**3))
else:
J_general = 2/3 * C * (-k**(n-1)*(1/(n*(n-1)) + 2/((n+2)*(n-3)))
+ kmax**(n-1)/(n-1) - kmin**n/(k*n)
+ 2/5*(k**2*kmax**(n-3)/(n-3) - kmin**(n+2)/(k**3*(n+2))))
return J_general
def J_general_Ftype(self, k, C, n):
kmin = k[0]
kmax = k[-1]
if n == 3:
J_general = 4/3 * C * (1/3 - 1/3 * (kmin/k)**3 + np.log(kmax/k))
else:
J_general = 4/3 * C * (k**(n-3)*3/(n*(3-n))
+ kmax**(n-3)/(n-3) - kmin**n/(n*k**3))
return J_general
def J_A1(self, k, Cterms, **kwargs):
"""Solution for J_A which is the integral for A in terms of constants C1 and C2."""
C1 = Cterms[0]
C2 = Cterms[1]
J_A1 = self.J_general_Atype(k, C1, 2) + self.J_general_Atype(k, C2, 4)
return J_A1
def J_A2(self, k, Cterms, **kwargs):
"""Solution for J_A which is the integral for A in terms of constants C1 and C2."""
C17 = Cterms[16]
C18 = Cterms[17]
J_A2 = self.J_general_Atype(k, C17, 2) + self.J_general_Atype(k, C18, 4)
return J_A2
def J_B1(self, k, Cterms, **kwargs):
"""Solution for J_B which is the integral for B in terms of constants C3 and C4."""
C3 = Cterms[2] #multiplies q**3
C4 = Cterms[3] #multiplies q**5
J_B1 = self.J_general_Btype(k, C3, 3) + self.J_general_Btype(k, C4, 5)
return J_B1
def J_B2(self, k, Cterms, **kwargs):
"""Solution for J_B which is the integral for B in terms of constants C3 and C4."""
C19 = Cterms[18] #multiplies q**3
J_B2 = self.J_general_Btype(k, C19, 3)
return J_B2
def J_C1(self, k, Cterms, **kwargs):
"""Second method for J_C"""
C5 = Cterms[4]
J_C1 = self.J_general_Atype(k, C5, 2)
return J_C1
def J_C2(self, k, Cterms, **kwargs):
"""Second method for J_C"""
C20 = Cterms[19]
J_C2 = self.J_general_Atype(k, C20, 2)
return J_C2
def J_D1(self, k, Cterms, **kwargs):
"""Solution for J_D which is the integral for D in terms of constants C6 and C7."""
C6 = Cterms[5]
C7 = Cterms[6]
J_D1 = self.J_general_Btype(k, C6, 1) + self.J_general_Btype(k, C7, 3)
return J_D1
def J_D2(self, k, Cterms, **kwargs):
"""Solution for J_D which is the integral for D in terms of constants C6 and C7."""
C21 = Cterms[20]
J_D2 = self.J_general_Btype(k, C21, 1)
return J_D2
def J_E1(self, k, Cterms, **kwargs):
"""Solution for J_D which is the integral for D in terms of constants C6 and C7."""
C8 = Cterms[7]
C9 = Cterms[8]
J_E1 = self.J_general_Etype(k, C8, 2) + self.J_general_Etype(k, C9, 4)
return J_E1
def J_E2(self, k, Cterms, **kwargs):
"""Solution for J_D which is the integral for D in terms of constants C6 and C7."""
C10 = Cterms[9]
J_E2 = self.J_general_Etype(k, C10, 2)
return J_E2
def J_F1(self, k, Cterms, **kwargs):
"""Solution for J_D which is the integral for D in terms of constants C6 and C7."""
C11 = Cterms[10]
C12 = Cterms[11]
J_F1 = self.J_general_Ftype(k, C11, 2) + self.J_general_Ftype(k, C12, 4)
return J_F1
def J_F2(self, k, Cterms, **kwargs):
"""Solution for J_D which is the integral for D in terms of constants C6 and C7."""
C13 = Cterms[12]
J_F2 = self.J_general_Ftype(k, C13, 2)
return J_F2
def J_G1(self, k, Cterms, **kwargs):
"""Solution for J_D which is the integral for D in terms of constants C6 and C7."""
C14 = Cterms[13]
C15 = Cterms[14]
J_G1 = self.J_general_Ftype(k, C14, 2) + self.J_general_Ftype(k, C15, 4)
return J_G1
def J_G2(self, k, Cterms, **kwargs):
"""Solution for J_D which is the integral for D in terms of constants C6 and C7."""
C16 = Cterms[15]
J_G2 = self.J_general_Ftype(k, C16, 2)
return J_G2
class SimpleInverseFull(AnalyticSolution):
r"""Analytic solution using a simple inverse solution as the first order
solution and with no phase information.
.. math::
\delta\varphi_1 = 1/k
\delta\varphi^\dagger_1 = 1/k
"""
def __init__(self, *args, **kwargs):
super(SimpleInverseFull, self).__init__(*args, **kwargs)
self.calculate_Cterms = self.srceqns.calculate_Cterms
self.J_params = self.srceqns.J_params
self.J_terms = dict([(Jkey,self.J_factory(Jkey)) for Jkey in self.J_params.iterkeys()])
def J_factory(self, Jkey):
pretermix = self.J_params[Jkey]["pretermix"]
if pretermix == 0:
J_func = self.J_general_Atype
elif pretermix == 1:
J_func = self.J_general_Btype
elif pretermix == 2:
J_func = self.J_general_Atype
elif pretermix == 3:
J_func = self.J_general_Btype
elif pretermix == 4:
J_func = self.J_general_Etype
elif pretermix == 5:
J_func = self.J_general_Ftype
elif pretermix == 6:
J_func = self.J_general_Ftype
def newJfunc(k, Cterms, **kwargs):
return J_func(k, Cterms[Jkey], self.J_params[Jkey]["n"])
return newJfunc
def J_general_Atype(self, k, C, n):
kmin = k[0]
kmax = k[-1]
if n == 1:
J_general = 2*C*(1/n * k**(n-1) - np.log(k) + np.log(kmax) - kmin**n/(k*n))
else:
J_general = 2*C*(-1/(n*(n-1))*k**(n-1) + kmax**(n-1)/(n-1) - kmin**n/(k*n))
return J_general
def J_general_Btype(self, k, C, n):
kmin = k[0]
kmax = k[-1]
if n == 2:
J_general = 2/3*C*(1/(k**2*(n-1)) * (k**(n+1) - kmin**(n+1)) + k*np.log(kmax/k))
else:
J_general = 2/3*C*(-3/((n+1)*(n-2))*k**(n-1) + k*kmax**(n-2)/(n-2) - kmin**(n+1)/(k**2*(n+1)))
return J_general
def J_general_Etype(self, k, C, n):
kmin = k[0]
kmax = k[-1]
if n == 1:
J_general = 2/3 * C * (23/15 + np.log(kmax/k) - kmin/k
-2/5 * (k/kmax)**2 + 1/3 * (kmin/k)**3)
elif n == 3:
J_general = 2/3 * C * (-13/150*k**2 + 0.5*kmax**2 - 1/3*kmin**3/k
+ 2/5*(k**2*np.log(kmax/k) - 0.2*kmin**5/k**3))
else:
J_general = 2/3 * C * (-k**(n-1)*(1/(n*(n-1)) + 2/((n+2)*(n-3)))
+ kmax**(n-1)/(n-1) - kmin**n/(k*n)
+ 2/5*(k**2*kmax**(n-3)/(n-3) - kmin**(n+2)/(k**3*(n+2))))
return J_general
def J_general_Ftype(self, k, C, n):
kmin = k[0]
kmax = k[-1]
if n == 3:
J_general = 4/3 * k**2 * C * (1/3 - 1/3 * (kmin/k)**3 + np.log(kmax/k))
else:
J_general = 4/3 * k**2 * C * (k**(n-3)*3/(n*(3-n))
+ kmax**(n-3)/(n-3) - kmin**n/(n*k**3))
return J_general
| {
"repo_name": "ihuston/pyflation",
"path": "pyflation/solutions/analyticsolution.py",
"copies": "1",
"size": "30177",
"license": "bsd-3-clause",
"hash": 3001533420712452600,
"line_mean": 41.9274537696,
"line_max": 144,
"alpha_frac": 0.4353315439,
"autogenerated": false,
"ratio": 2.7220819051055387,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3657413449005539,
"avg_score": null,
"num_lines": null
} |
"""Analytics relying on IVRE's data.
IVRE is an open-source network recon framework. See
<https://ivre.rocks/> to learn more about it.
Currently, this analytics provides:
- Estimated geographic location and Autonomous System (AS) of IP
addresses (based on MaxMind data, see
<https://dev.maxmind.com/geoip/geoip2/geolite2/>).
- DNS responses seen: links are created from IP addresses to
hostnames and vice versa, aka your own private Passive DNS
service.
- X509 certificates seen in TLS traffic: links are created:
- from IP addresses to certificates.
- from certificates to hostnames and IP addresses (via Subject
and Subject Alternative Names).
- from certificates to subjects and issuers (as a dedicated
observable type: CertificateSubject, via Subject and Issuer).
- certificate subjects to (other) certificates (same issuer or
subject).
- HTTP headers: links are created from IP addresses to hostnames
(and vice versa) based on Host: headers, and from IP addresses to
User-Agent and Server header values.
"""
import itertools
import logging
from pprint import pformat
from ivre.db import db
from ivre.utils import encode_b64
from core.analytics import InlineAnalytics, OneShotAnalytics
from core.errors import ObservableValidationError
from core.observables import (
AutonomousSystem,
Certificate,
CertificateSubject,
Email,
Hostname,
Ip,
Text,
Url,
)
LOG = logging.getLogger("Yeti-Ivre")
def _try_link(
links,
base_obs,
obs_type,
value,
description,
source,
first_seen=None,
last_seen=None,
):
"""Try to add a link from `base_obs` to a new or existing obervable of
type `obs_type`. ObservableValidationError exceptions on observable
creation are silently ignored.
This is useful, for example, to prevent crashes when trying to add
Hostname observables based on fields that can contain either a
hostname or an IP address (e.g., the commonName field of X509
certificate subjects).
"""
try:
obs = obs_type.get_or_create(value=value)
except ObservableValidationError:
pass
else:
links.update(
base_obs.link_to(
obs, description, source, first_seen=first_seen, last_seen=last_seen
)
)
class IvreMaxMind(InlineAnalytics, OneShotAnalytics):
"""Perform lookups in MaxMind databases using IVRE.
It creates links from an `Ip` observable to an `AutonomousSystem`
observable, and fills the `geoip` attribute of `Ip` observables with
`country`, `region` and `city` fields.
You can fetch or update the local MaxMind databases by running `ivre
ipdata --download`.
"""
default_values = {
"name": "IVRE - MaxMind",
"description": __doc__,
}
ACTS_ON = ["Ip"]
@classmethod
def analyze(cls, observable, results):
LOG.debug(
"%s: begin analyze %r (type %s)",
cls.__name__,
observable,
observable.__class__.__name__,
)
if isinstance(observable, Ip):
return cls.analyze_ip(observable, results)
LOG.warning(
"%s: cannot analyze, unknown observable %r (type %s)",
cls.__name__,
observable,
observable.__class__.__name__,
)
return []
@classmethod
def each(cls, observable):
return cls.analyze(observable, None)
@staticmethod
def analyze_ip(ip, results):
"""Specific analyzer for Ip observables."""
links = set()
result = db.data.infos_byip(ip.value)
if result is None:
return []
if results is not None:
results.update(raw=pformat(result))
if "as_name" in result:
asn = AutonomousSystem.get_or_create(
value=result["as_name"],
as_num=result["as_num"],
)
links.update(ip.active_link_to(asn, "asn#", "IVRE - MaxMind"))
if "country_code" in result:
ip.geoip = {"country": result["country_code"]}
if "region_code" in result:
ip.geoip["region"] = " / ".join(result["region_code"])
if "city" in result:
ip.geoip["city"] = result["city"]
ip.save()
if all(context["source"] != "ivre_maxmind" for context in ip.context):
result["source"] = "ivre_maxmind"
ip.add_context(result)
return list(links)
def _handle_cert(dbase, rec, links):
"""Internal function to handle a record corresponding to an X509
certificate.
"""
raw_data = dbase.from_binary(rec["value"])
cert = Certificate.from_data(raw_data, hash_sha256=rec["infos"]["sha256"])
rec["value"] = encode_b64(raw_data).decode()
links.update(
cert.link_to(
CertificateSubject.get_or_create(value=rec["infos"]["subject_text"]),
"cert-subject",
"IVRE - X509 subject",
)
)
links.update(
cert.link_to(
CertificateSubject.get_or_create(value=rec["infos"]["issuer_text"]),
"cert-issuer",
"IVRE - X509 issuer",
)
)
commonname = rec["infos"]["subject"]["commonName"]
if commonname:
while commonname.startswith("*."):
commonname = commonname[2:]
if commonname:
_try_link(
links,
cert,
Hostname,
commonname,
"cert-commonname",
"IVRE - X509 Subject commonName",
)
for san in rec["infos"].get("san", []):
if san.startswith("DNS:"):
san = san[4:]
while san.startswith("*."):
san = san[2:]
if san:
_try_link(
links, cert, Hostname, san, "cert-san", "IVRE - X509 subjectAltName"
)
elif san.startswith("IP Address:"):
san = san[11:]
if san:
_try_link(
links, cert, Ip, san, "cert-san", "IVRE - X509 subjectAltName"
)
elif san.startswith("email:"):
san = san[6:]
if san:
_try_link(
links, cert, Email, san, "cert-san", "IVRE - X509 subjectAltName"
)
elif san.startswith("URI:"):
san = san[4:]
if san:
_try_link(
links, cert, Url, san, "cert-san", "IVRE - X509 subjectAltName"
)
else:
LOG.debug("_handle_rec: cannot handle subjectAltName: %r", san)
return cert
class IvrePassive(OneShotAnalytics):
"""Perform lookups in IVRE's passive database for records created with
Zeek (formerly known as Bro, see <https://www.zeek.org/>) and the
passiverecon script.
It creates links from an `Ip` observable to:
- `Certificate` and `CertificateSubject` observables, based on X509
certificates seen in TLS traffic.
- from those, to `Hostname`, `Ip` `Email` and `Url` observables,
based on the subject commonName and the subjectAltName values
- `Hostname` observables, based on DNS answers and HTTP Host: headers
From `Hostname` observables to:
- `Ip` and other `Hostname` observables, based on DNS answers
- `Ip`, based on HTTP Host: headers
From `CertificateSubject` observables to:
- (other) `Certificate` observables, based on X509 certificates seen
in TLS traffic.
Please refer to IVRE's documentation on how to collect passive data.
"""
default_values = {
"name": "IVRE - Passive",
"description": __doc__,
}
ACTS_ON = ["Ip", "Hostname", "CertificateSubject"]
@classmethod
def analyze(cls, observable, results):
LOG.debug(
"Begin analyze %s for %r (type %s)",
cls.__name__,
observable,
observable.__class__.__name__,
)
if isinstance(observable, Ip):
return cls.analyze_ip(observable, results)
if isinstance(observable, Hostname):
return cls.analyze_hostname(observable, results)
if isinstance(observable, CertificateSubject):
return cls.analyze_certsubj(observable, results)
LOG.warning(
"%s: cannot analyze, unknown observable %r (type %s)",
cls.__name__,
observable,
observable.__class__.__name__,
)
return []
@classmethod
def analyze_ip(cls, ip, results):
"""Specific analyzer for Ip observables."""
links = set()
result = {}
for rec in db.passive.get(db.passive.searchhost(ip.value)):
LOG.debug("%s.analyze_ip: record %r", cls.__name__, rec)
if rec["recontype"] == "DNS_ANSWER":
value = rec["value"]
hostname = Hostname.get_or_create(value=value)
rec_type = "dns-%s" % rec["source"].split("-", 1)[0]
result.setdefault(rec_type, set()).add(value)
links.update(
ip.link_to(
hostname,
rec_type,
"IVRE - DNS-%s" % rec["source"],
first_seen=rec["firstseen"],
last_seen=rec["lastseen"],
)
)
elif rec["recontype"] == "HTTP_CLIENT_HEADER_SERVER":
if rec["source"] == "HOST":
value = rec["value"]
result.setdefault("http-host", set()).add(value)
_try_link(
links,
ip,
Hostname,
value,
"http-host",
"IVRE - HTTP Host: header",
first_seen=rec["firstseen"],
last_seen=rec["lastseen"],
)
else:
continue
elif rec["recontype"] == "HTTP_SERVER_HEADER":
if rec["source"] == "SERVER":
value = rec["value"]
result.setdefault("http-server", set()).add(value)
links.update(
ip.link_to(
Text.get_or_create(value=value),
"http-server",
"IVRE - HTTP Server: header",
first_seen=rec["firstseen"],
last_seen=rec["lastseen"],
)
)
else:
continue
elif rec["recontype"] == "HTTP_CLIENT_HEADER":
if rec["source"] == "USER-AGENT":
value = rec["value"]
result.setdefault("http-user-agent", set()).add(value)
links.update(
ip.link_to(
Text.get_or_create(value=value),
"http-server",
"IVRE - HTTP User-Agent: header",
first_seen=rec["firstseen"],
last_seen=rec["lastseen"],
)
)
else:
continue
elif rec["recontype"] == "SSL_SERVER":
if rec["source"] == "cert":
cert = _handle_cert(db.passive, rec, links)
result.setdefault("ssl-cert", set()).add(cert.value)
links.update(
ip.link_to(
cert,
"ssl-cert",
"IVRE - SSL X509 certificate",
first_seen=rec["firstseen"],
last_seen=rec["lastseen"],
)
)
else:
continue
else:
continue
if result:
results.update(
raw=pformat({key: list(value) for key, value in result.items()})
)
if all(context["source"] != "ivre_passive" for context in ip.context):
ip.add_context({"source": "ivre_passive", "results": result})
return list(links)
@classmethod
def analyze_hostname(cls, hostname, results):
"""Specific analyzer for Hostname observables."""
links = set()
result = []
for rec in itertools.chain(
db.passive.get(db.passive.searchdns(hostname.value, subdomains=True)),
db.passive.get(
db.passive.searchdns(hostname.value, reverse=True, subdomains=True)
),
):
LOG.debug("%s.analyze_hostname: record %r", cls.__name__, rec)
host = Hostname.get_or_create(value=rec["value"])
if "addr" in rec:
links.update(
Ip.get_or_create(value=rec["addr"]).link_to(
host,
"dns-%s" % rec["source"].split("-", 1)[0],
"IVRE - DNS-%s" % rec["source"],
first_seen=rec["firstseen"],
last_seen=rec["lastseen"],
)
)
else:
links.update(
host.link_to(
Hostname.get_or_create(value=rec["targetval"]),
"dns-%s" % rec["source"].split("-", 1)[0],
"IVRE - DNS-%s" % rec["source"],
first_seen=rec["firstseen"],
last_seen=rec["lastseen"],
)
)
result.append(rec)
results.update(raw=pformat(result))
return list(links)
@classmethod
def analyze_certsubj(cls, subject, results):
"""Specific analyzer for CertificateSubject observables."""
links = set()
result = []
for rec in itertools.chain(
db.passive.get(db.passive.searchcertsubject(subject.value)),
db.passive.get(db.passive.searchcertissuer(subject.value)),
):
LOG.debug("%s.analyze_certsubj: record %r", cls.__name__, rec)
cert = _handle_cert(db.passive, rec, links)
links.update(
Ip.get_or_create(value=rec["addr"]).link_to(
cert,
"ssl-cert",
"IVRE - SSL X509 certificate",
first_seen=rec["firstseen"],
last_seen=rec["lastseen"],
)
)
result.append(rec)
results.update(raw=pformat(result))
return list(links)
| {
"repo_name": "yeti-platform/yeti",
"path": "contrib/analytics/ivre_api/ivre_api.py",
"copies": "1",
"size": "14984",
"license": "apache-2.0",
"hash": -8328794815094073000,
"line_mean": 32.2977777778,
"line_max": 88,
"alpha_frac": 0.5073411639,
"autogenerated": false,
"ratio": 4.345707656612529,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5353048820512528,
"avg_score": null,
"num_lines": null
} |
# analytics/urls.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from . import views_admin
from django.conf.urls import re_path
urlpatterns = [
# views_admin
re_path(r'^$', views_admin.analytics_index_view, name='analytics_index',),
re_path(r'^analytics_index_process/$',
views_admin.analytics_index_process_view, name='analytics_index_process'),
re_path(r'^analytics_action_list/(?P<voter_we_vote_id>wv[\w]{2}voter[\w]+)/$',
views_admin.analytics_action_list_view, name='analytics_action_list'),
re_path(r'^analytics_action_list/(?P<organization_we_vote_id>wv[\w]{2}org[\w]+)/$',
views_admin.analytics_action_list_view, name='analytics_action_list'),
re_path(r'^analytics_action_list/(?P<incorrect_integer>[0-9]+)/$',
views_admin.analytics_action_list_view, name='analytics_action_list'), # Needed for bug with bad data
re_path(r'^analytics_action_list/$',
views_admin.analytics_action_list_view, name='analytics_action_list'),
re_path(r'^augment_voter_analytics_process/(?P<voter_we_vote_id>wv[\w]{2}voter[\w]+)/$',
views_admin.augment_voter_analytics_process_view, name='augment_voter_analytics_process'),
re_path(r'^organization_analytics_index/$',
views_admin.organization_analytics_index_view, name='organization_analytics_index',),
re_path(r'^organization_daily_metrics/$',
views_admin.organization_daily_metrics_view, name='organization_daily_metrics'),
re_path(r'^organization_daily_metrics_process/$',
views_admin.organization_daily_metrics_process_view, name='organization_daily_metrics_process'),
re_path(r'^organization_election_metrics/$',
views_admin.organization_election_metrics_view, name='organization_election_metrics'),
re_path(r'^organization_election_metrics_process/$',
views_admin.organization_election_metrics_process_view, name='organization_election_metrics_process'),
re_path(r'^sitewide_daily_metrics/$', views_admin.sitewide_daily_metrics_view, name='sitewide_daily_metrics'),
re_path(r'^sitewide_daily_metrics_process/$',
views_admin.sitewide_daily_metrics_process_view, name='sitewide_daily_metrics_process'),
re_path(r'^sitewide_election_metrics/$', views_admin.sitewide_election_metrics_view, name='sitewide_election_metrics'),
re_path(r'^sitewide_election_metrics_process/$',
views_admin.sitewide_election_metrics_process_view, name='sitewide_election_metrics_process'),
re_path(r'^sitewide_voter_metrics/$', views_admin.sitewide_voter_metrics_view, name='sitewide_voter_metrics'),
re_path(r'^sitewide_voter_metrics_process/$',
views_admin.sitewide_voter_metrics_process_view, name='sitewide_voter_metrics_process'),
]
| {
"repo_name": "wevote/WeVoteServer",
"path": "analytics/urls.py",
"copies": "1",
"size": "2764",
"license": "mit",
"hash": 4496144601076813000,
"line_mean": 63.2790697674,
"line_max": 123,
"alpha_frac": 0.7094790159,
"autogenerated": false,
"ratio": 3.3997539975399755,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4609233013439975,
"avg_score": null,
"num_lines": null
} |
# analytics/urls.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from . import views_admin
from django.conf.urls import url
urlpatterns = [
# views_admin
url(r'^$', views_admin.analytics_index_view, name='analytics_index',),
url(r'^analytics_action_list/(?P<voter_we_vote_id>wv[\w]{2}voter[\w]+)/$',
views_admin.analytics_action_list_view, name='analytics_action_list'),
url(r'^analytics_action_list/(?P<organization_we_vote_id>wv[\w]{2}org[\w]+)/$',
views_admin.analytics_action_list_view, name='analytics_action_list'),
url(r'^analytics_action_list/(?P<incorrect_integer>[0-9]+)/$',
views_admin.analytics_action_list_view, name='analytics_action_list'), # Needed for bug with bad data
url(r'^analytics_action_list/$',
views_admin.analytics_action_list_view, name='analytics_action_list'),
url(r'^augment_voter_analytics_process/(?P<voter_we_vote_id>wv[\w]{2}voter[\w]+)/$',
views_admin.augment_voter_analytics_process_view, name='augment_voter_analytics_process'),
url(r'^organization_analytics_index/$',
views_admin.organization_analytics_index_view, name='organization_analytics_index',),
url(r'^organization_daily_metrics/$',
views_admin.organization_daily_metrics_view, name='organization_daily_metrics'),
url(r'^organization_daily_metrics_process/$',
views_admin.organization_daily_metrics_process_view, name='organization_daily_metrics_process'),
url(r'^organization_election_metrics/$',
views_admin.organization_election_metrics_view, name='organization_election_metrics'),
url(r'^organization_election_metrics_process/$',
views_admin.organization_election_metrics_process_view, name='organization_election_metrics_process'),
url(r'^sitewide_daily_metrics/$', views_admin.sitewide_daily_metrics_view, name='sitewide_daily_metrics'),
url(r'^sitewide_daily_metrics_process/$',
views_admin.sitewide_daily_metrics_process_view, name='sitewide_daily_metrics_process'),
url(r'^sitewide_election_metrics/$', views_admin.sitewide_election_metrics_view, name='sitewide_election_metrics'),
url(r'^sitewide_election_metrics_process/$',
views_admin.sitewide_election_metrics_process_view, name='sitewide_election_metrics_process'),
url(r'^sitewide_voter_metrics/$', views_admin.sitewide_voter_metrics_view, name='sitewide_voter_metrics'),
url(r'^sitewide_voter_metrics_process/$',
views_admin.sitewide_voter_metrics_process_view, name='sitewide_voter_metrics_process'),
]
| {
"repo_name": "jainanisha90/WeVoteServer",
"path": "analytics/urls.py",
"copies": "1",
"size": "2566",
"license": "mit",
"hash": -1529149770348114700,
"line_mean": 61.5853658537,
"line_max": 119,
"alpha_frac": 0.7088854248,
"autogenerated": false,
"ratio": 3.4535666218034993,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9644176099135422,
"avg_score": 0.003655189493615443,
"num_lines": 41
} |
# analytics/views_admin.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .controllers import augment_one_voter_analytics_action_entries_without_election_id, \
augment_voter_analytics_action_entries_without_election_id, \
save_organization_daily_metrics, save_organization_election_metrics, \
save_sitewide_daily_metrics, save_sitewide_election_metrics, save_sitewide_voter_metrics
from .models import ACTION_WELCOME_VISIT, AnalyticsAction, AnalyticsManager, display_action_constant_human_readable, \
fetch_action_constant_number_from_constant_string, OrganizationDailyMetrics, OrganizationElectionMetrics, \
SitewideDailyMetrics, SitewideElectionMetrics, SitewideVoterMetrics
from admin_tools.views import redirect_to_sign_in_page
from config.base import get_environment_variable
import csv
from datetime import date, datetime, timedelta
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.messages import get_messages
from django.db.models import Q
from django.shortcuts import render
from django.utils.timezone import now
from election.models import Election, ElectionManager
from exception.models import print_to_log
import json
from voter.models import voter_has_authority
import wevote_functions.admin
from wevote_functions.functions import convert_date_as_integer_to_date, convert_date_to_date_as_integer, \
convert_date_to_we_vote_date_string, convert_to_int, positive_value_exists
from wevote_settings.models import WeVoteSetting, WeVoteSettingsManager
logger = wevote_functions.admin.get_logger(__name__)
ANALYTICS_ACTION_SYNC_URL = "https://api.wevoteusa.org/apis/v1/analyticsActionSyncOut/"
ORGANIZATION_ELECTION_METRICS_SYNC_URL = "https://api.wevoteusa.org/apis/v1/organizationElectionMetricsSyncOut/"
SITEWIDE_DAILY_METRICS_SYNC_URL = "https://api.wevoteusa.org/apis/v1/sitewideDailyMetricsSyncOut/"
SITEWIDE_ELECTION_METRICS_SYNC_URL = "https://api.wevoteusa.org/apis/v1/sitewideElectionMetricsSyncOut/"
SITEWIDE_VOTER_METRICS_SYNC_URL = "https://api.wevoteusa.org/apis/v1/sitewideVoterMetricsSyncOut/"
WEB_APP_ROOT_URL = get_environment_variable("WEB_APP_ROOT_URL")
def analytics_action_sync_out_view(request): # analyticsActionSyncOut
status = ''
success = True
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'analytics_admin'}
if not voter_has_authority(request, authority_required):
json_data = {
'success': False,
'status': 'ANALYTICS_ACTION_SYNC_OUT-NOT_ANALYTICS_ADMIN '
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
starting_date_as_integer = convert_to_int(request.GET.get('starting_date_as_integer', 0))
ending_date_as_integer = convert_to_int(request.GET.get('ending_date_as_integer', 0))
return_csv_format = positive_value_exists(request.GET.get('return_csv_format', False))
generated_starting_date_as_integer = 0
try:
analytics_action_query = AnalyticsAction.objects.all().order_by('-id')
if positive_value_exists(starting_date_as_integer):
analytics_action_query = analytics_action_query.filter(date_as_integer__gte=starting_date_as_integer)
else:
one_month_ago = now() - timedelta(days=30)
generated_starting_date_as_integer = convert_date_to_date_as_integer(one_month_ago)
analytics_action_query = analytics_action_query.filter(
date_as_integer__gte=generated_starting_date_as_integer)
if positive_value_exists(ending_date_as_integer):
analytics_action_query = analytics_action_query.filter(date_as_integer__lte=ending_date_as_integer)
# else:
# # By default only return up to two days ago, so we are sure that the post-processing is done
# yesterday = now() - timedelta(days=1)
# generated_ending_date_as_integer = convert_date_to_date_as_integer(yesterday)
# analytics_action_query = analytics_action_query.filter(
# date_as_integer__lte=generated_ending_date_as_integer)
analytics_action_query = analytics_action_query.extra(
select={'exact_time': "to_char(exact_time, 'YYYY-MM-DD HH24:MI:SS')"})
analytics_action_list_dict = analytics_action_query.values(
'id', 'action_constant', 'authentication_failed_twice',
'ballot_item_we_vote_id', 'date_as_integer',
'exact_time', 'first_visit_today', 'google_civic_election_id',
'is_bot', 'is_desktop', 'is_mobile', 'is_signed_in', 'is_tablet',
'organization_we_vote_id', 'state_code', 'user_agent', 'voter_we_vote_id')
if analytics_action_list_dict:
analytics_action_list_raw = list(analytics_action_list_dict)
if return_csv_format:
# Create the HttpResponse object with the appropriate CSV header.
filename = "analyticsActionSyncOut"
if positive_value_exists(starting_date_as_integer):
filename += "-" + str(starting_date_as_integer)
elif positive_value_exists(generated_starting_date_as_integer):
filename += "-" + str(generated_starting_date_as_integer)
if positive_value_exists(ending_date_as_integer):
filename += "-" + str(ending_date_as_integer)
filename += ".csv"
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="' + filename + '"'
writer = csv.writer(response)
writer.writerow(['exact_time', 'id', 'action_constant', 'authentication_failed_twice',
'ballot_item_we_vote_id', 'date_as_integer',
'first_visit_today', 'google_civic_election_id',
'is_bot', 'is_desktop', 'is_mobile', 'is_signed_in', 'is_tablet',
'organization_we_vote_id', 'state_code', 'user_agent', 'voter_we_vote_id',
'action_constant_text'])
for one_dict in analytics_action_list_raw:
one_row = list(one_dict.values())
one_row.append(display_action_constant_human_readable(one_dict['action_constant']))
writer.writerow(one_row)
return response
else:
analytics_action_list_json = []
for one_dict in analytics_action_list_raw:
one_dict['action_constant_text'] = display_action_constant_human_readable(
one_dict['action_constant'])
analytics_action_list_json.append(one_dict)
return HttpResponse(json.dumps(analytics_action_list_json), content_type='application/json')
except Exception as e:
status += 'QUERY_FAILURE: ' + str(e) + ' '
success = False
status += 'ANALYTICS_ACTION_LIST_EMPTY '
json_data = {
'success': success,
'status': status,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def organization_daily_metrics_sync_out_view(request): # organizationDailyMetricsSyncOut
status = ''
success = True
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'analytics_admin'}
if not voter_has_authority(request, authority_required):
json_data = {
'success': False,
'status': 'ORGANIZATION_DAILY_METRICS_SYNC_OUT-NOT_ANALYTICS_ADMIN '
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
starting_date_as_integer = convert_to_int(request.GET.get('starting_date_as_integer', 0))
ending_date_as_integer = convert_to_int(request.GET.get('ending_date_as_integer', 0))
return_csv_format = positive_value_exists(request.GET.get('return_csv_format', False))
generated_starting_date_as_integer = 0
try:
metrics_query = OrganizationDailyMetrics.objects.all().order_by('-id')
if positive_value_exists(starting_date_as_integer):
metrics_query = metrics_query.filter(date_as_integer__gte=starting_date_as_integer)
else:
one_month_ago = now() - timedelta(days=30)
generated_starting_date_as_integer = convert_date_to_date_as_integer(one_month_ago)
metrics_query = metrics_query.filter(
date_as_integer__gte=generated_starting_date_as_integer)
if positive_value_exists(ending_date_as_integer):
metrics_query = metrics_query.filter(date_as_integer__lte=ending_date_as_integer)
metrics_list_dict = metrics_query.values(
'id', 'authenticated_visitors_today', 'authenticated_visitors_total', 'auto_followers_total',
'date_as_integer', 'entrants_visiting_ballot',
'followers_total', 'followers_visiting_ballot',
'issues_linked_total', 'new_auto_followers_today', 'new_followers_today', 'new_visitors_today',
'organization_public_positions', 'organization_we_vote_id',
'visitors_today', 'visitors_total', 'voter_guide_entrants', 'voter_guide_entrants_today'
)
if metrics_list_dict:
metrics_list_raw = list(metrics_list_dict)
if return_csv_format:
# Create the HttpResponse object with the appropriate CSV header.
filename = "organizationDailyMetricsSyncOut"
if positive_value_exists(starting_date_as_integer):
filename += "-" + str(starting_date_as_integer)
elif positive_value_exists(generated_starting_date_as_integer):
filename += "-" + str(generated_starting_date_as_integer)
if positive_value_exists(ending_date_as_integer):
filename += "-" + str(ending_date_as_integer)
filename += ".csv"
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="' + filename + '"'
writer = csv.writer(response)
writer.writerow(
[
'id', 'authenticated_visitors_today', 'authenticated_visitors_total', 'auto_followers_total',
'date_as_integer', 'entrants_visiting_ballot', 'exact_time',
'followers_total', 'followers_visiting_ballot',
'issues_linked_total', 'new_auto_followers_today', 'new_followers_today', 'new_visitors_today',
'organization_public_positions', 'organization_we_vote_id',
'visitors_today', 'visitors_total', 'voter_guide_entrants', 'voter_guide_entrants_today'
])
for one_dict in metrics_list_raw:
one_row = list(one_dict.values())
writer.writerow(one_row)
return response
else:
analytics_action_list_json = []
for one_dict in metrics_list_raw:
analytics_action_list_json.append(one_dict)
return HttpResponse(json.dumps(analytics_action_list_json), content_type='application/json')
except Exception as e:
status += 'QUERY_FAILURE: ' + str(e) + ' '
success = False
status += 'ORGANIZATION_DAILY_METRICS_LIST_EMPTY '
json_data = {
'success': success,
'status': status,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def organization_election_metrics_sync_out_view(request): # organizationElectionMetricsSyncOut
status = ''
success = True
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'analytics_admin'}
if not voter_has_authority(request, authority_required):
json_data = {
'success': False,
'status': 'ORGANIZATION_ELECTION_METRICS_SYNC_OUT-NOT_ANALYTICS_ADMIN '
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
starting_date_as_integer = convert_to_int(request.GET.get('starting_date_as_integer', 0))
ending_date_as_integer = convert_to_int(request.GET.get('ending_date_as_integer', 0))
return_csv_format = positive_value_exists(request.GET.get('return_csv_format', False))
if not positive_value_exists(starting_date_as_integer):
one_month_ago = now() - timedelta(days=30)
starting_date_as_integer = convert_date_to_date_as_integer(one_month_ago)
if not positive_value_exists(ending_date_as_integer):
time_now = now()
ending_date_as_integer = convert_date_to_date_as_integer(time_now)
election_manager = ElectionManager()
results = election_manager.retrieve_elections_between_dates(
starting_date_as_integer=starting_date_as_integer,
ending_date_as_integer=ending_date_as_integer
)
election_list = results['election_list']
google_civic_election_id_list = []
for one_election in election_list:
google_civic_election_id_list.append(one_election.google_civic_election_id)
try:
metrics_query = OrganizationElectionMetrics.objects.all().order_by('-id')
metrics_query = metrics_query.filter(google_civic_election_id__in=google_civic_election_id_list)
metrics_list_dict = metrics_query.values(
'id', 'authenticated_visitors_total', 'election_day_text',
'entrants_friends_only_positions', 'entrants_friends_only_positions_with_comments',
'entrants_public_positions', 'entrants_public_positions_with_comments',
'entrants_took_position', 'entrants_visited_ballot',
'followers_at_time_of_election', 'followers_friends_only_positions',
'followers_friends_only_positions_with_comments', 'followers_public_positions',
'followers_public_positions_with_comments', 'followers_took_position',
'followers_visited_ballot', 'google_civic_election_id', 'new_auto_followers', 'new_followers',
'organization_we_vote_id', 'visitors_total', 'voter_guide_entrants'
)
if metrics_list_dict:
metrics_list_raw = list(metrics_list_dict)
if return_csv_format:
# Create the HttpResponse object with the appropriate CSV header.
filename = "organizationElectionMetricsSyncOut"
filename += "-" + str(starting_date_as_integer)
filename += "-" + str(ending_date_as_integer)
filename += ".csv"
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="' + filename + '"'
writer = csv.writer(response)
writer.writerow(
[
'id', 'authenticated_visitors_total', 'election_day_text',
'entrants_friends_only_positions', 'entrants_friends_only_positions_with_comments',
'entrants_public_positions', 'entrants_public_positions_with_comments',
'entrants_took_position', 'entrants_visited_ballot',
'followers_at_time_of_election', 'followers_friends_only_positions',
'followers_friends_only_positions_with_comments', 'followers_public_positions',
'followers_public_positions_with_comments', 'followers_took_position',
'followers_visited_ballot', 'google_civic_election_id', 'new_auto_followers', 'new_followers',
'organization_we_vote_id', 'visitors_total', 'voter_guide_entrants'
])
for one_dict in metrics_list_raw:
one_row = list(one_dict.values())
writer.writerow(one_row)
return response
else:
analytics_action_list_json = []
for one_dict in metrics_list_raw:
analytics_action_list_json.append(one_dict)
return HttpResponse(json.dumps(analytics_action_list_json), content_type='application/json')
except Exception as e:
status += 'QUERY_FAILURE: ' + str(e) + ' '
success = False
status += 'ORGANIZATION_ELECTION_METRICS_LIST_EMPTY '
json_data = {
'success': success,
'status': status,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def sitewide_daily_metrics_sync_out_view(request): # sitewideDailyMetricsSyncOut
status = ''
success = True
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'analytics_admin'}
if not voter_has_authority(request, authority_required):
json_data = {
'success': False,
'status': 'SITEWIDE_DAILY_METRICS_SYNC_OUT-NOT_ANALYTICS_ADMIN '
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
starting_date_as_integer = convert_to_int(request.GET.get('starting_date_as_integer', 0))
ending_date_as_integer = convert_to_int(request.GET.get('ending_date_as_integer', 0))
return_csv_format = positive_value_exists(request.GET.get('return_csv_format', False))
generated_starting_date_as_integer = 0
try:
metrics_query = SitewideDailyMetrics.objects.all().order_by('-id')
if positive_value_exists(starting_date_as_integer):
metrics_query = metrics_query.filter(date_as_integer__gte=starting_date_as_integer)
else:
one_month_ago = now() - timedelta(days=30)
generated_starting_date_as_integer = convert_date_to_date_as_integer(one_month_ago)
metrics_query = metrics_query.filter(
date_as_integer__gte=generated_starting_date_as_integer)
if positive_value_exists(ending_date_as_integer):
metrics_query = metrics_query.filter(date_as_integer__lte=ending_date_as_integer)
metrics_list_dict = metrics_query.values(
'id', 'authenticated_visitors_today', 'authenticated_visitors_total',
'ballot_views_today', 'date_as_integer', 'entered_full_address',
'friend_entrants_today', 'friends_only_positions',
'individuals_with_friends_only_positions', 'individuals_with_positions',
'individuals_with_public_positions',
'issue_follows_today', 'issue_follows_total',
'issues_followed_today', 'issues_followed_total',
'issues_linked_today', 'issues_linked_total',
'new_visitors_today', 'organization_public_positions',
'organizations_auto_followed_today', 'organizations_auto_followed_total',
'organizations_followed_today', 'organizations_followed_total',
'organizations_signed_in_total', 'organizations_with_linked_issues',
'organizations_with_new_positions_today', 'organizations_with_positions',
'visitors_today', 'visitors_total',
'voter_guide_entrants_today', 'voter_guides_viewed_today',
'voter_guides_viewed_total', 'welcome_page_entrants_today',
)
if metrics_list_dict:
metrics_list_raw = list(metrics_list_dict)
if return_csv_format:
# Create the HttpResponse object with the appropriate CSV header.
filename = "sitewideDailyMetricsSyncOut"
if positive_value_exists(starting_date_as_integer):
filename += "-" + str(starting_date_as_integer)
elif positive_value_exists(generated_starting_date_as_integer):
filename += "-" + str(generated_starting_date_as_integer)
if positive_value_exists(ending_date_as_integer):
filename += "-" + str(ending_date_as_integer)
filename += ".csv"
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="' + filename + '"'
writer = csv.writer(response)
writer.writerow(
[
'id', 'authenticated_visitors_today', 'authenticated_visitors_total',
'ballot_views_today', 'date_as_integer', 'entered_full_address',
'friend_entrants_today', 'friends_only_positions',
'individuals_with_friends_only_positions', 'individuals_with_positions',
'individuals_with_public_positions',
'issue_follows_today', 'issue_follows_total',
'issues_followed_today', 'issues_followed_total',
'issues_linked_today', 'issues_linked_total',
'new_visitors_today', 'organization_public_positions',
'organizations_auto_followed_today', 'organizations_auto_followed_total',
'organizations_followed_today', 'organizations_followed_total',
'organizations_signed_in_total', 'organizations_with_linked_issues',
'organizations_with_new_positions_today', 'organizations_with_positions',
'visitors_today', 'visitors_total',
'voter_guide_entrants_today', 'voter_guides_viewed_today',
'voter_guides_viewed_total', 'welcome_page_entrants_today',
])
for one_dict in metrics_list_raw:
one_row = list(one_dict.values())
writer.writerow(one_row)
return response
else:
analytics_action_list_json = []
for one_dict in metrics_list_raw:
analytics_action_list_json.append(one_dict)
return HttpResponse(json.dumps(analytics_action_list_json), content_type='application/json')
except Exception as e:
status += 'QUERY_FAILURE: ' + str(e) + ' '
success = False
status += 'SITEWIDE_DAILY_METRICS_LIST_EMPTY '
json_data = {
'success': success,
'status': status,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def sitewide_election_metrics_sync_out_view(request): # sitewideElectionMetricsSyncOut
status = ''
success = True
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'analytics_admin'}
if not voter_has_authority(request, authority_required):
json_data = {
'success': False,
'status': 'SITEWIDE_ELECTION_METRICS_SYNC_OUT-NOT_ANALYTICS_ADMIN '
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
starting_date_as_integer = convert_to_int(request.GET.get('starting_date_as_integer', 0))
ending_date_as_integer = convert_to_int(request.GET.get('ending_date_as_integer', 0))
return_csv_format = positive_value_exists(request.GET.get('return_csv_format', False))
if not positive_value_exists(starting_date_as_integer):
one_month_ago = now() - timedelta(days=30)
starting_date_as_integer = convert_date_to_date_as_integer(one_month_ago)
if not positive_value_exists(ending_date_as_integer):
time_now = now()
ending_date_as_integer = convert_date_to_date_as_integer(time_now)
election_manager = ElectionManager()
results = election_manager.retrieve_elections_between_dates(
starting_date_as_integer=starting_date_as_integer,
ending_date_as_integer=ending_date_as_integer
)
election_list = results['election_list']
google_civic_election_id_list = []
for one_election in election_list:
google_civic_election_id_list.append(one_election.google_civic_election_id)
try:
metrics_query = SitewideElectionMetrics.objects.all().order_by('-id')
metrics_query = metrics_query.filter(google_civic_election_id__in=google_civic_election_id_list)
metrics_list_dict = metrics_query.values(
'id', 'authenticated_visitors_total',
'election_day_text', 'entered_full_address',
'friends_only_positions', 'friends_only_positions_with_comments', 'google_civic_election_id',
'individuals_with_friends_only_positions', 'individuals_with_positions',
'individuals_with_public_positions',
'issues_followed',
'organization_public_positions', 'organizations_auto_followed', 'organizations_followed',
'organizations_signed_in', 'organizations_with_positions',
'public_positions', 'public_positions_with_comments',
'unique_voters_that_auto_followed_organizations', 'unique_voters_that_followed_organizations',
'visitors_total', 'voter_guide_entries',
'voter_guide_views', 'voter_guides_viewed',
)
if metrics_list_dict:
metrics_list_raw = list(metrics_list_dict)
if return_csv_format:
# Create the HttpResponse object with the appropriate CSV header.
filename = "sitewideElectionMetricsSyncOut"
filename += "-" + str(starting_date_as_integer)
filename += "-" + str(ending_date_as_integer)
filename += ".csv"
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="' + filename + '"'
writer = csv.writer(response)
writer.writerow(
[
'id', 'authenticated_visitors_total',
'election_day_text', 'entered_full_address',
'friends_only_positions', 'friends_only_positions_with_comments', 'google_civic_election_id',
'individuals_with_friends_only_positions', 'individuals_with_positions',
'individuals_with_public_positions',
'issues_followed',
'organization_public_positions', 'organizations_auto_followed', 'organizations_followed',
'organizations_signed_in', 'organizations_with_positions',
'public_positions', 'public_positions_with_comments',
'unique_voters_that_auto_followed_organizations', 'unique_voters_that_followed_organizations',
'visitors_total', 'voter_guide_entries',
'voter_guide_views', 'voter_guides_viewed',
])
for one_dict in metrics_list_raw:
one_row = list(one_dict.values())
writer.writerow(one_row)
return response
else:
analytics_action_list_json = []
for one_dict in metrics_list_raw:
analytics_action_list_json.append(one_dict)
return HttpResponse(json.dumps(analytics_action_list_json), content_type='application/json')
except Exception as e:
status += 'QUERY_FAILURE: ' + str(e) + ' '
success = False
status += 'SITEWIDE_ELECTION_METRICS_LIST_EMPTY '
json_data = {
'success': success,
'status': status,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def sitewide_voter_metrics_sync_out_view(request): # sitewideVoterMetricsSyncOut
status = ''
success = True
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'analytics_admin'}
if not voter_has_authority(request, authority_required):
json_data = {
'success': False,
'status': 'SITEWIDE_VOTER_METRICS_SYNC_OUT-NOT_ANALYTICS_ADMIN '
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
starting_date_as_integer = convert_to_int(request.GET.get('starting_date_as_integer', 0))
ending_date_as_integer = convert_to_int(request.GET.get('ending_date_as_integer', 0))
return_csv_format = positive_value_exists(request.GET.get('return_csv_format', False))
if positive_value_exists(starting_date_as_integer):
starting_date = convert_date_as_integer_to_date(starting_date_as_integer)
else:
starting_date = now() - timedelta(days=30)
starting_date_as_integer = convert_date_to_date_as_integer(starting_date)
if positive_value_exists(ending_date_as_integer):
ending_date = convert_date_as_integer_to_date(ending_date_as_integer)
else:
ending_date = now()
ending_date_as_integer = convert_date_to_date_as_integer(ending_date)
try:
metrics_query = SitewideVoterMetrics.objects.all().order_by('-id')
metrics_query = metrics_query.filter(last_action_date__gte=starting_date)
metrics_query = metrics_query.filter(last_action_date__lte=ending_date)
metrics_query = metrics_query.extra(
select={'last_action_date': "to_char(last_action_date, 'YYYY-MM-DD HH24:MI:SS')"})
metrics_list_dict = metrics_query.values(
'id', 'actions_count', 'ballot_visited',
'comments_entered_friends_only', 'comments_entered_public',
'days_visited', 'elections_viewed',
'entered_full_address', 'issues_followed',
'last_action_date', 'last_calculated_date_as_integer',
'organizations_followed', 'positions_entered_friends_only', 'positions_entered_public',
'seconds_on_site', 'signed_in_facebook', 'signed_in_twitter', 'signed_in_with_email',
'signed_in_with_sms_phone_number',
'time_until_sign_in', 'voter_guides_viewed',
'voter_we_vote_id', 'welcome_visited',
)
if metrics_list_dict:
metrics_list_raw = list(metrics_list_dict)
if return_csv_format:
# Create the HttpResponse object with the appropriate CSV header.
filename = "sitewideVoterMetricsSyncOut"
filename += "-" + str(starting_date_as_integer)
filename += "-" + str(ending_date_as_integer)
filename += ".csv"
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="' + filename + '"'
writer = csv.writer(response)
writer.writerow(
[
'id', 'actions_count', 'ballot_visited',
'comments_entered_friends_only', 'comments_entered_public',
'days_visited', 'elections_viewed',
'entered_full_address', 'issues_followed',
'last_action_date', 'last_calculated_date_as_integer',
'organizations_followed', 'positions_entered_friends_only', 'positions_entered_public',
'seconds_on_site', 'signed_in_facebook', 'signed_in_twitter', 'signed_in_with_email',
'signed_in_with_sms_phone_number',
'time_until_sign_in', 'voter_guides_viewed',
'voter_we_vote_id', 'welcome_visited',
])
for one_dict in metrics_list_raw:
one_row = list(one_dict.values())
writer.writerow(one_row)
return response
else:
analytics_action_list_json = []
for one_dict in metrics_list_raw:
analytics_action_list_json.append(one_dict)
return HttpResponse(json.dumps(analytics_action_list_json), content_type='application/json')
except Exception as e:
status += 'QUERY_FAILURE: ' + str(e) + ' '
success = False
status += 'SITEWIDE_VOTER_METRICS_LIST_EMPTY '
json_data = {
'success': success,
'status': status,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
@login_required
def analytics_index_view(request):
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
voter_allowed_to_see_organization_analytics = True
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
date_to_process = convert_to_int(request.GET.get('date_to_process', 0))
analytics_date_as_integer_last_processed = \
convert_to_int(request.GET.get('analytics_date_as_integer_last_processed', 0))
sitewide_election_metrics_list = []
try:
sitewide_election_metrics_query = SitewideElectionMetrics.objects.using('analytics')\
.order_by('-election_day_text')
sitewide_election_metrics_query = sitewide_election_metrics_query[:3]
sitewide_election_metrics_list = list(sitewide_election_metrics_query)
except SitewideElectionMetrics.DoesNotExist:
# This is fine
pass
sitewide_daily_metrics_list = []
try:
sitewide_daily_metrics_query = SitewideDailyMetrics.objects.using('analytics').order_by('-date_as_integer')
sitewide_daily_metrics_query = sitewide_daily_metrics_query[:3]
sitewide_daily_metrics_list = list(sitewide_daily_metrics_query)
except SitewideDailyMetrics.DoesNotExist:
# This is fine
pass
organization_election_metrics_list = []
try:
organization_election_metrics_query = OrganizationElectionMetrics.objects.using('analytics').\
order_by('-followers_visited_ballot')
organization_election_metrics_query = organization_election_metrics_query[:3]
organization_election_metrics_list = list(organization_election_metrics_query)
except OrganizationElectionMetrics.DoesNotExist:
# This is fine
pass
sitewide_voter_metrics_list = []
try:
sitewide_voter_metrics_query = SitewideVoterMetrics.objects.using('analytics').order_by('-last_action_date')
# Don't return the welcome page bounces
sitewide_voter_metrics_query = sitewide_voter_metrics_query.exclude(welcome_visited=1, actions_count=1)
sitewide_voter_metrics_query = sitewide_voter_metrics_query[:3]
sitewide_voter_metrics_list = list(sitewide_voter_metrics_query)
except SitewideVoterMetrics.DoesNotExist:
# This is fine
pass
election_list = Election.objects.order_by('-election_day_text')
we_vote_settings_manager = WeVoteSettingsManager()
results = we_vote_settings_manager.fetch_setting_results('analytics_date_as_integer_last_processed')
if results['we_vote_setting_found']:
analytics_date_as_integer_last_processed = convert_to_int(results['setting_value'])
analytics_date_last_processed = None
if positive_value_exists(analytics_date_as_integer_last_processed):
analytics_date_last_processed = convert_date_as_integer_to_date(analytics_date_as_integer_last_processed)
messages_on_stage = get_messages(request)
template_values = {
'analytics_date_as_integer_last_processed': analytics_date_as_integer_last_processed,
'analytics_date_last_processed': analytics_date_last_processed,
'messages_on_stage': messages_on_stage,
'WEB_APP_ROOT_URL': WEB_APP_ROOT_URL,
'sitewide_election_metrics_list': sitewide_election_metrics_list,
'sitewide_daily_metrics_list': sitewide_daily_metrics_list,
'sitewide_voter_metrics_list': sitewide_voter_metrics_list,
'organization_election_metrics_list': organization_election_metrics_list,
'voter_allowed_to_see_organization_analytics': voter_allowed_to_see_organization_analytics,
'state_code': state_code,
'google_civic_election_id': google_civic_election_id,
'election_list': election_list,
'date_to_process': date_to_process,
}
return render(request, 'analytics/index.html', template_values)
@login_required
def analytics_index_process_view(request):
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
analytics_date_as_integer_last_processed = \
convert_to_int(request.GET.get('analytics_date_as_integer_last_processed', 0))
we_vote_settings_manager = WeVoteSettingsManager()
if positive_value_exists(analytics_date_as_integer_last_processed):
# Update this value in the settings table: analytics_date_as_integer_last_processed
# ...to new_analytics_date_as_integer
results = we_vote_settings_manager.save_setting(
setting_name="analytics_date_as_integer_last_processed",
setting_value=analytics_date_as_integer_last_processed,
value_type=WeVoteSetting.INTEGER)
messages.add_message(request, messages.INFO, 'Analytics processing date updated.')
return HttpResponseRedirect(reverse('analytics:analytics_index', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&state_code=" + str(state_code))
@login_required
def organization_analytics_index_view(request):
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
organization_we_vote_id = request.GET.get('organization_we_vote_id', '')
organization_election_metrics_list = []
try:
organization_election_metrics_query = OrganizationElectionMetrics.objects.using('analytics').\
order_by('-election_day_text')
organization_election_metrics_query = \
organization_election_metrics_query.filter(organization_we_vote_id__iexact=organization_we_vote_id)
organization_election_metrics_query = organization_election_metrics_query[:3]
organization_election_metrics_list = list(organization_election_metrics_query)
except OrganizationElectionMetrics.DoesNotExist:
# This is fine
pass
organization_daily_metrics_list = []
try:
organization_daily_metrics_query = \
OrganizationDailyMetrics.objects.using('analytics').order_by('-date_as_integer')
organization_daily_metrics_query = \
organization_daily_metrics_query.filter(organization_we_vote_id__iexact=organization_we_vote_id)
organization_daily_metrics_query = organization_daily_metrics_query[:3]
organization_daily_metrics_list = list(organization_daily_metrics_query)
except OrganizationDailyMetrics.DoesNotExist:
# This is fine
pass
messages_on_stage = get_messages(request)
voter_allowed_to_see_organization_analytics = False # To be implemented
template_values = {
'messages_on_stage': messages_on_stage,
'organization_election_metrics_list': organization_election_metrics_list,
'organization_daily_metrics_list': organization_daily_metrics_list,
'voter_allowed_to_see_organization_analytics': voter_allowed_to_see_organization_analytics,
'state_code': state_code,
'google_civic_election_id': google_civic_election_id,
'organization_we_vote_id': organization_we_vote_id,
}
return render(request, 'analytics/organization_analytics_index.html', template_values)
@login_required
def organization_daily_metrics_process_view(request):
"""
:param request:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
organization_we_vote_id = request.GET.get('organization_we_vote_id', '')
state_code = request.GET.get('state_code', '')
changes_since_this_date_as_integer = convert_to_int(request.GET.get('date_as_integer', 0))
through_date_as_integer = convert_to_int(request.GET.get('through_date_as_integer', 0))
if not positive_value_exists(changes_since_this_date_as_integer):
messages.add_message(request, messages.ERROR, 'date_as_integer required.')
return HttpResponseRedirect(reverse('analytics:organization_daily_metrics', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&state_code=" + str(state_code))
results = save_organization_daily_metrics(organization_we_vote_id, changes_since_this_date_as_integer)
return HttpResponseRedirect(reverse('analytics:organization_daily_metrics', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&state_code=" + str(state_code) +
"&date_as_integer=" + str(changes_since_this_date_as_integer) +
"&through_date_as_integer=" + str(through_date_as_integer)
)
@login_required
def analytics_action_list_view(request, voter_we_vote_id=False, organization_we_vote_id=False, incorrect_integer=0):
"""
:param request:
:param voter_we_vote_id:
:param organization_we_vote_id:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
analytics_action_search = request.GET.get('analytics_action_search', '')
show_user_agent = request.GET.get('show_user_agent', '')
date_as_integer = convert_to_int(request.GET.get('date_as_integer', 0))
through_date_as_integer = convert_to_int(request.GET.get('through_date_as_integer', date_as_integer))
start_date = None
if positive_value_exists(date_as_integer):
start_date = convert_date_as_integer_to_date(date_as_integer)
through_date = None
if positive_value_exists(through_date_as_integer):
through_date_as_integer_modified = through_date_as_integer + 1
try:
through_date = convert_date_as_integer_to_date(through_date_as_integer_modified)
except Exception as e:
through_date = convert_date_as_integer_to_date(through_date_as_integer)
analytics_action_list = []
messages_on_stage = get_messages(request)
try:
analytics_action_query = AnalyticsAction.objects.using('analytics').order_by('-id')
if positive_value_exists(date_as_integer):
analytics_action_query = analytics_action_query.filter(date_as_integer__gte=date_as_integer)
if positive_value_exists(through_date_as_integer):
analytics_action_query = analytics_action_query.filter(
date_as_integer__lte=through_date_as_integer)
if positive_value_exists(voter_we_vote_id):
analytics_action_query = analytics_action_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
if positive_value_exists(google_civic_election_id):
analytics_action_query = analytics_action_query.filter(google_civic_election_id=google_civic_election_id)
if positive_value_exists(organization_we_vote_id):
analytics_action_query = analytics_action_query.filter(
organization_we_vote_id__iexact=organization_we_vote_id)
if positive_value_exists(analytics_action_search):
search_words = analytics_action_search.split()
for one_word in search_words:
one_word_integer = convert_to_int(one_word)
action_constant_integer = fetch_action_constant_number_from_constant_string(one_word)
filters = []
if positive_value_exists(action_constant_integer):
new_filter = Q(action_constant=action_constant_integer)
filters.append(new_filter)
new_filter = Q(ballot_item_we_vote_id__iexact=one_word)
filters.append(new_filter)
if positive_value_exists(one_word_integer):
new_filter = Q(date_as_integer=one_word_integer)
filters.append(new_filter)
if positive_value_exists(one_word_integer):
new_filter = Q(google_civic_election_id=one_word_integer)
filters.append(new_filter)
if positive_value_exists(one_word_integer):
new_filter = Q(id=one_word_integer)
filters.append(new_filter)
new_filter = Q(organization_we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(state_code__iexact=one_word)
filters.append(new_filter)
new_filter = Q(voter_we_vote_id__iexact=one_word)
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
analytics_action_query = analytics_action_query.filter(final_filters)
if positive_value_exists(voter_we_vote_id) or positive_value_exists(organization_we_vote_id) \
or positive_value_exists(date_as_integer) or positive_value_exists(through_date_as_integer):
analytics_action_query = analytics_action_query[:500]
else:
analytics_action_query = analytics_action_query[:200]
analytics_action_list = list(analytics_action_query)
except OrganizationDailyMetrics.DoesNotExist:
# This is fine
pass
template_values = {
'messages_on_stage': messages_on_stage,
'analytics_action_list': analytics_action_list,
'analytics_action_search': analytics_action_search,
'google_civic_election_id': google_civic_election_id,
'state_code': state_code,
'organization_we_vote_id': organization_we_vote_id,
'voter_we_vote_id': voter_we_vote_id,
'show_user_agent': show_user_agent,
'date_as_integer': date_as_integer,
'start_date': start_date,
'through_date_as_integer': through_date_as_integer,
'through_date': through_date,
}
return render(request, 'analytics/analytics_action_list.html', template_values)
@login_required
def augment_voter_analytics_process_view(request, voter_we_vote_id):
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
changes_since_this_date_as_integer = convert_to_int(request.GET.get('date_as_integer', 0))
analytics_manager = AnalyticsManager()
first_visit_today_results = analytics_manager.update_first_visit_today_for_one_voter(voter_we_vote_id)
results = augment_one_voter_analytics_action_entries_without_election_id(voter_we_vote_id)
messages.add_message(request, messages.INFO,
str(results['analytics_updated_count']) + ' analytics entries updated.<br />')
return HttpResponseRedirect(reverse('analytics:analytics_action_list', args=(voter_we_vote_id,)) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&state_code=" + str(state_code) +
"&date_as_integer=" + str(changes_since_this_date_as_integer)
)
@login_required
def organization_daily_metrics_view(request):
"""
:param request:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
organization_we_vote_id = request.GET.get('organization_we_vote_id', '')
organization_daily_metrics_list = []
messages_on_stage = get_messages(request)
try:
organization_daily_metrics_query = OrganizationDailyMetrics.objects.using('analytics').\
order_by('-date_as_integer')
organization_daily_metrics_query = organization_daily_metrics_query.filter(
organization_we_vote_id__iexact=organization_we_vote_id)
organization_daily_metrics_list = list(organization_daily_metrics_query)
except OrganizationDailyMetrics.DoesNotExist:
# This is fine
pass
template_values = {
'messages_on_stage': messages_on_stage,
'organization_daily_metrics_list': organization_daily_metrics_list,
'google_civic_election_id': google_civic_election_id,
'state_code': state_code,
}
return render(request, 'analytics/organization_daily_metrics.html', template_values)
@login_required
def organization_election_metrics_process_view(request):
"""
:param request:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
organization_we_vote_id = request.GET.get('organization_we_vote_id', '')
if not positive_value_exists(google_civic_election_id):
messages.add_message(request, messages.ERROR, 'google_civic_election_id required.')
return HttpResponseRedirect(reverse('analytics:organization_election_metrics', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&state_code=" + str(state_code)
)
analytics_manager = AnalyticsManager()
if positive_value_exists(organization_we_vote_id):
one_organization_results = save_organization_election_metrics(google_civic_election_id, organization_we_vote_id)
messages.add_message(request, messages.INFO, one_organization_results['status'])
else:
results = analytics_manager.retrieve_organization_list_with_election_activity(google_civic_election_id)
if results['organization_we_vote_id_list_found']:
organization_we_vote_id_list = results['organization_we_vote_id_list']
for organization_we_vote_id in organization_we_vote_id_list:
save_organization_election_metrics(google_civic_election_id, organization_we_vote_id)
return HttpResponseRedirect(reverse('analytics:organization_election_metrics', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&state_code=" + str(state_code)
)
@login_required
def organization_election_metrics_view(request):
"""
:param request:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
organization_we_vote_id = request.GET.get('organization_we_vote_id', '')
organization_election_metrics_list = []
try:
organization_election_metrics_query = OrganizationElectionMetrics.objects.using('analytics').\
order_by('-followers_visited_ballot')
if positive_value_exists(google_civic_election_id):
organization_election_metrics_query = \
organization_election_metrics_query.filter(google_civic_election_id=google_civic_election_id)
if positive_value_exists(organization_we_vote_id):
organization_election_metrics_query = \
organization_election_metrics_query.filter(organization_we_vote_id__iexact=organization_we_vote_id)
organization_election_metrics_list = list(organization_election_metrics_query)
except OrganizationElectionMetrics.DoesNotExist:
# This is fine
pass
election_list = Election.objects.order_by('-election_day_text')
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
'WEB_APP_ROOT_URL': WEB_APP_ROOT_URL,
'organization_election_metrics_list': organization_election_metrics_list,
'google_civic_election_id': google_civic_election_id,
'organization_we_vote_id': organization_we_vote_id,
'election_list': election_list,
'state_code': state_code,
}
return render(request, 'analytics/organization_election_metrics.html', template_values)
@login_required
def sitewide_daily_metrics_process_view(request):
"""
:param request:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
changes_since_this_date_as_integer = convert_to_int(request.GET.get('date_as_integer', 0))
through_date_as_integer = convert_to_int(request.GET.get('through_date_as_integer',
changes_since_this_date_as_integer))
# analytics_manager = AnalyticsManager()
# first_visit_today_results = \
# analytics_manager.update_first_visit_today_for_all_voters_since_date(
# changes_since_this_date_as_integer, through_date_as_integer)
#
# augment_results = augment_voter_analytics_action_entries_without_election_id(
# changes_since_this_date_as_integer, through_date_as_integer)
results = save_sitewide_daily_metrics(changes_since_this_date_as_integer, through_date_as_integer)
# messages.add_message(
# request, messages.INFO,
# str(first_visit_today_results['first_visit_today_count']) + ' first visit updates.<br />' +
# 'augment-analytics_updated_count: ' + str(augment_results['analytics_updated_count']) + '<br />' +
# 'sitewide_daily_metrics_saved_count: ' + str(results['sitewide_daily_metrics_saved_count']) + '')
messages.add_message(
request, messages.INFO,
'sitewide_daily_metrics_saved_count: ' + str(results['sitewide_daily_metrics_saved_count']) + '')
return HttpResponseRedirect(reverse('analytics:sitewide_daily_metrics', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&state_code=" + str(state_code) +
"&date_as_integer=" + str(changes_since_this_date_as_integer) +
"&through_date_as_integer=" + str(through_date_as_integer)
)
@login_required
def sitewide_daily_metrics_view(request):
"""
:param request:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
date_as_integer = convert_to_int(request.GET.get('date_as_integer', 0))
through_date_as_integer = convert_to_int(request.GET.get('through_date_as_integer', date_as_integer))
start_date = None
if positive_value_exists(date_as_integer):
start_date = convert_date_as_integer_to_date(date_as_integer)
through_date = None
if positive_value_exists(through_date_as_integer):
through_date_as_integer_modified = through_date_as_integer + 1
through_date = convert_date_as_integer_to_date(through_date_as_integer_modified)
sitewide_daily_metrics_list = []
messages_on_stage = get_messages(request)
try:
sitewide_daily_metrics_query = SitewideDailyMetrics.objects.using('analytics').order_by('-date_as_integer')
if positive_value_exists(date_as_integer):
sitewide_daily_metrics_query = sitewide_daily_metrics_query.filter(date_as_integer__gte=date_as_integer)
if positive_value_exists(through_date_as_integer):
sitewide_daily_metrics_query = sitewide_daily_metrics_query.filter(
date_as_integer__lte=through_date_as_integer)
sitewide_daily_metrics_list = sitewide_daily_metrics_query[:180] # Limit to no more than 6 months
except SitewideDailyMetrics.DoesNotExist:
# This is fine
pass
template_values = {
'messages_on_stage': messages_on_stage,
'sitewide_daily_metrics_list': sitewide_daily_metrics_list,
'google_civic_election_id': google_civic_election_id,
'state_code': state_code,
'date_as_integer': date_as_integer,
'through_date_as_integer': through_date_as_integer,
'start_date': start_date,
'through_date': through_date,
}
return render(request, 'analytics/sitewide_daily_metrics.html', template_values)
@login_required
def sitewide_election_metrics_process_view(request):
"""
:param request:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
if not positive_value_exists(google_civic_election_id):
messages.add_message(request, messages.ERROR, 'google_civic_election_id required.')
return HttpResponseRedirect(reverse('analytics:sitewide_election_metrics', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&state_code=" + str(state_code))
results = save_sitewide_election_metrics(google_civic_election_id) # DEBUG=1
messages.add_message(request, messages.INFO,
' NEED TO UPGRADE TO INCLUDE NATIONAL ELECTION TO INCLUDE STATE')
return HttpResponseRedirect(reverse('analytics:sitewide_election_metrics', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&state_code=" + str(state_code))
@login_required
def sitewide_election_metrics_view(request):
"""
:param request:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
sitewide_election_metrics_list = []
try:
sitewide_election_metrics_query = SitewideElectionMetrics.objects.using('analytics').\
order_by('-election_day_text')
sitewide_election_metrics_list = list(sitewide_election_metrics_query)
except SitewideElectionMetrics.DoesNotExist:
# This is fine
pass
election_list = Election.objects.order_by('-election_day_text')
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
'sitewide_election_metrics_list': sitewide_election_metrics_list,
'google_civic_election_id': google_civic_election_id,
'state_code': state_code,
'election_list': election_list,
}
return render(request, 'analytics/sitewide_election_metrics.html', template_values)
@login_required
def sitewide_voter_metrics_process_view(request):
"""
:param request:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
augment_voter_data = request.GET.get('augment_voter_data', '')
erase_existing_voter_metrics_data = request.GET.get('erase_existing_voter_metrics_data', False)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
changes_since_this_date_as_integer = convert_to_int(request.GET.get('date_as_integer', 0))
through_date_as_integer = convert_to_int(request.GET.get('through_date_as_integer',
changes_since_this_date_as_integer))
first_visit_today_count = 0
sitewide_voter_metrics_updated = 0
if positive_value_exists(augment_voter_data):
message = "[sitewide_voter_metrics_process_view, start: " + str(changes_since_this_date_as_integer) + "" \
", end: " + str(through_date_as_integer) + ", " \
"STARTING update_first_visit_today_for_all_voters_since_date]"
print_to_log(logger=logger, exception_message_optional=message)
analytics_manager = AnalyticsManager()
first_visit_today_results = analytics_manager.update_first_visit_today_for_all_voters_since_date(
changes_since_this_date_as_integer, through_date_as_integer)
first_visit_today_count = first_visit_today_results['first_visit_today_count']
message = "[sitewide_voter_metrics_process_view, STARTING " \
"augment_voter_analytics_action_entries_without_election_id]"
print_to_log(logger=logger, exception_message_optional=message)
results = augment_voter_analytics_action_entries_without_election_id(
changes_since_this_date_as_integer, through_date_as_integer)
if positive_value_exists(erase_existing_voter_metrics_data):
# Add code here to erase data for all of the voters who otherwise would be updated between
# the dates: changes_since_this_date_as_integer and through_date_as_integer
pass
else:
message = "[sitewide_voter_metrics_process_view, STARTING " \
"save_sitewide_voter_metrics]"
print_to_log(logger=logger, exception_message_optional=message)
results = save_sitewide_voter_metrics(changes_since_this_date_as_integer, through_date_as_integer)
sitewide_voter_metrics_updated = results['sitewide_voter_metrics_updated']
message = "[sitewide_voter_metrics_process_view, FINISHED " \
"save_sitewide_voter_metrics]"
print_to_log(logger=logger, exception_message_optional=message)
messages.add_message(request, messages.INFO,
str(first_visit_today_count) + ' first visit updates.<br />' +
'voters with updated metrics: ' + str(sitewide_voter_metrics_updated) + '')
return HttpResponseRedirect(reverse('analytics:sitewide_voter_metrics', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&state_code=" + str(state_code) +
"&augment_voter_data=" + str(augment_voter_data) +
"&date_as_integer=" + str(changes_since_this_date_as_integer) +
"&through_date_as_integer=" + str(through_date_as_integer)
)
@login_required
def sitewide_voter_metrics_view(request):
"""
:param request:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
date_as_integer = convert_to_int(request.GET.get('date_as_integer', 0))
through_date_as_integer = convert_to_int(request.GET.get('through_date_as_integer', date_as_integer))
start_date = None
if positive_value_exists(date_as_integer):
start_date = convert_date_as_integer_to_date(date_as_integer)
through_date = None
if positive_value_exists(through_date_as_integer):
through_date_as_integer_modified = through_date_as_integer + 1
through_date = convert_date_as_integer_to_date(through_date_as_integer_modified)
sitewide_voter_metrics_list_short = []
try:
sitewide_voter_metrics_query = SitewideVoterMetrics.objects.using('analytics').order_by('-last_action_date')
# Don't return the welcome page bounces
sitewide_voter_metrics_query = sitewide_voter_metrics_query.exclude(welcome_visited=1, actions_count=1)
if positive_value_exists(date_as_integer):
sitewide_voter_metrics_query = sitewide_voter_metrics_query.filter(last_action_date__gte=start_date)
if positive_value_exists(through_date_as_integer):
sitewide_voter_metrics_query = sitewide_voter_metrics_query.filter(
last_action_date__lte=through_date)
sitewide_voter_metrics_list = list(sitewide_voter_metrics_query)
# Count how many welcome page bounces are being removed
bounce_query = SitewideVoterMetrics.objects.using('analytics').all()
bounce_query = bounce_query.filter(welcome_visited=1, actions_count=1)
if positive_value_exists(date_as_integer):
bounce_query = bounce_query.filter(last_action_date__gte=start_date)
if positive_value_exists(through_date_as_integer):
bounce_query = bounce_query.filter(last_action_date__lte=through_date)
bounce_count = bounce_query.count()
# And the total we found
total_number_of_voters_without_bounce = len(sitewide_voter_metrics_list)
number_of_voters_to_show = 400
sitewide_voter_metrics_list_short = sitewide_voter_metrics_list[:number_of_voters_to_show]
# Bounce rate
total_voters = total_number_of_voters_without_bounce + bounce_count
if positive_value_exists(bounce_count) and positive_value_exists(total_voters):
voter_bounce_rate = bounce_count / total_voters
else:
voter_bounce_rate = 0
messages.add_message(request, messages.INFO,
str(total_number_of_voters_without_bounce) + ' voters with statistics. ' +
str(bounce_count) + ' welcome page bounces not shown. ' +
str(voter_bounce_rate) + '% visitors bounced (left with only one view).')
except SitewideVoterMetrics.DoesNotExist:
# This is fine
pass
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
'sitewide_voter_metrics_list': sitewide_voter_metrics_list_short,
'google_civic_election_id': google_civic_election_id,
'state_code': state_code,
'date_as_integer': date_as_integer,
'start_date': start_date,
'through_date_as_integer': through_date_as_integer,
'through_date': through_date,
}
return render(request, 'analytics/sitewide_voter_metrics.html', template_values)
| {
"repo_name": "wevote/WeVoteServer",
"path": "analytics/views_admin.py",
"copies": "1",
"size": "71740",
"license": "mit",
"hash": 3344935132245164000,
"line_mean": 50.8352601156,
"line_max": 120,
"alpha_frac": 0.6360886535,
"autogenerated": false,
"ratio": 3.829809950886184,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9953755929852456,
"avg_score": 0.002428534906745595,
"num_lines": 1384
} |
"""Analytics views that are served from the same domain as the docs."""
from functools import lru_cache
from django.db.models import F
from django.shortcuts import get_object_or_404
from django.utils import timezone
from rest_framework.response import Response
from rest_framework.views import APIView
from readthedocs.analytics.models import PageView
from readthedocs.api.v2.permissions import IsAuthorizedToViewVersion
from readthedocs.core.unresolver import unresolve_from_request
from readthedocs.core.utils.extend import SettingsOverrideObject
from readthedocs.projects.models import Project
class BaseAnalyticsView(APIView):
"""
Track page views.
Query parameters:
- project
- version
- absolute_uri: Full path with domain.
"""
http_method_names = ['get']
permission_classes = [IsAuthorizedToViewVersion]
@lru_cache(maxsize=1)
def _get_project(self):
project_slug = self.request.GET.get('project')
project = get_object_or_404(Project, slug=project_slug)
return project
@lru_cache(maxsize=1)
def _get_version(self):
version_slug = self.request.GET.get('version')
project = self._get_project()
version = get_object_or_404(
project.versions.all(),
slug=version_slug,
)
return version
# pylint: disable=unused-argument
def get(self, request, *args, **kwargs):
project = self._get_project()
version = self._get_version()
absolute_uri = self.request.GET.get('absolute_uri')
self.increase_page_view_count(
request=request,
project=project,
version=version,
absolute_uri=absolute_uri,
)
return Response(status=200)
# pylint: disable=no-self-use
def increase_page_view_count(self, request, project, version, absolute_uri):
"""Increase the page view count for the given project."""
unresolved = unresolve_from_request(request, absolute_uri)
if not unresolved:
return
path = unresolved.filename
fields = dict(
project=project,
version=version,
path=path,
date=timezone.now().date(),
)
page_view = PageView.objects.filter(**fields).first()
if page_view:
page_view.view_count = F('view_count') + 1
page_view.save(update_fields=['view_count'])
else:
PageView.objects.create(**fields, view_count=1)
class AnalyticsView(SettingsOverrideObject):
_default_class = BaseAnalyticsView
| {
"repo_name": "rtfd/readthedocs.org",
"path": "readthedocs/analytics/proxied_api.py",
"copies": "1",
"size": "2612",
"license": "mit",
"hash": 8050106452556271000,
"line_mean": 29.0229885057,
"line_max": 80,
"alpha_frac": 0.6496937213,
"autogenerated": false,
"ratio": 4.08125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.52309437213,
"avg_score": null,
"num_lines": null
} |
"""Analyze a game"""
import argparse
import json
import sys
import numpy as np
from numpy import linalg
from gameanalysis import dominance
from gameanalysis import gameio
from gameanalysis import nash
from gameanalysis import reduction
from gameanalysis import regret
from gameanalysis import subgame
def add_parser(subparsers):
parser = subparsers.add_parser(
'analyze', help="""Analyze games""", description="""Perform game
analysis.""")
parser.add_argument(
'--input', '-i', metavar='<input-file>', default=sys.stdin,
type=argparse.FileType('r'), help="""Input file for script. (default:
stdin)""")
parser.add_argument(
'--output', '-o', metavar='<output-file>', default=sys.stdout,
type=argparse.FileType('w'), help="""Output file for script. (default:
stdout)""")
parser.add_argument(
'--dist-thresh', metavar='<distance-threshold>', type=float,
default=1e-3, help="""L2 norm threshold, inside of which, equilibria
are considered identical. (default: %(default)g)""")
parser.add_argument(
'--regret-thresh', '-r', metavar='<regret-threshold>', type=float,
default=1e-3, help="""Maximum regret to consider an equilibrium
confirmed. (default: %(default)g)""")
parser.add_argument(
'--supp-thresh', '-t', metavar='<support-threshold>', type=float,
default=1e-3, help="""Maximum probability to consider a strategy in
support. (default: %(default)g)""")
parser.add_argument(
'--rand-restarts', metavar='<random-restarts>', type=int, default=0,
help="""The number of random points to add to nash equilibrium finding.
(default: %(default)d)""")
parser.add_argument(
'--max-iters', '-m', metavar='<maximum-iterations>', type=int,
default=10000, help="""The maximum number of iterations to run through
replicator dynamics. (default: %(default)d)""")
parser.add_argument(
'--converge-thresh', '-c', metavar='<convergence-threshold>',
type=float, default=1e-8, help="""The convergence threshold for
replicator dynamics. (default: %(default)g)""")
parser.add_argument(
'--processes', '-p', metavar='<num-procs>', type=int, help="""Number of
processes to use to run nash finding. (default: number of cores)""")
parser.add_argument(
'--dpr', nargs='+', metavar='<role> <count>', help="""Apply a DPR
reduction to the game, with reduced counts per role specified.""")
parser.add_argument(
'--dominance', '-d', action='store_true', help="""Remove dominated
strategies.""")
parser.add_argument(
'--subgames', '-s', action='store_true', help="""Extract maximal
subgames, and analyze each individually instead of considering the game
as a whole.""")
return parser
def main(args):
game, serial = gameio.read_game(json.load(args.input))
if args.dpr:
red_players = serial.from_role_json(dict(zip(
args.dpr[::2], map(int, args.dpr[1::2]))))
red = reduction.DeviationPreserving(game.num_strategies,
game.num_players, red_players)
redgame = red.reduce_game(game, True)
else:
redgame = game
redserial = serial
if args.dominance:
domsub = dominance.iterated_elimination(redgame, 'strictdom')
redgame = subgame.subgame(redgame, domsub)
redserial = subgame.subserializer(redserial, domsub)
if args.subgames:
subgames = subgame.maximal_subgames(redgame)
else:
subgames = np.ones(redgame.num_role_strats, bool)[None]
methods = {
'replicator': {
'max_iters': args.max_iters,
'converge_thresh': args.converge_thresh},
'optimize': {}}
noeq_subgames = []
candidates = []
for submask in subgames:
subg = subgame.subgame(redgame, submask)
subeqa = nash.mixed_nash(
subg, regret_thresh=args.regret_thresh,
dist_thresh=args.dist_thresh, processes=args.processes, **methods)
eqa = subgame.translate(subg.trim_mixture_support(
subeqa, supp_thresh=args.supp_thresh), submask)
if eqa.size:
for eqm in eqa:
if not any(linalg.norm(eqm - eq) < args.dist_thresh
for eq in candidates):
candidates.append(eqm)
else:
noeq_subgames.append(submask) # pragma: no cover
equilibria = []
unconfirmed = []
unexplored = []
for eqm in candidates:
support = eqm > 0
gains = regret.mixture_deviation_gains(redgame, eqm)
role_gains = redgame.role_reduce(gains, ufunc=np.fmax)
gain = np.nanmax(role_gains)
if np.isnan(gains).any() and gain <= args.regret_thresh:
# Not fully explored but might be good
unconfirmed.append((eqm, gain))
elif np.any(role_gains > args.regret_thresh):
# There are deviations, did we explore them?
dev_inds = ([np.argmax(gs == mg) for gs, mg
in zip(redgame.role_split(gains), role_gains)] +
redgame.role_starts)[role_gains > args.regret_thresh]
for dind in dev_inds:
devsupp = support.copy()
devsupp[dind] = True
if not np.all(devsupp <= subgames, -1).any():
unexplored.append((devsupp, dind, gains[dind], eqm))
else:
# Equilibrium!
equilibria.append((eqm, np.max(gains)))
# Output Game
args.output.write('Game Analysis\n')
args.output.write('=============\n')
args.output.write(serial.to_game_printstr(game))
args.output.write('\n\n')
if args.dpr is not None:
args.output.write('With DPR reduction: ')
args.output.write(' '.join(args.dpr))
args.output.write('\n\n')
if args.dominance:
num = np.sum(~domsub)
if num:
args.output.write('Found {:d} dominated strateg{}\n'.format(
num, 'y' if num == 1 else 'ies'))
args.output.write(serial.to_subgame_printstr(~domsub))
args.output.write('\n')
else:
args.output.write('Found no dominated strategies\n\n')
if args.subgames:
num = subgames.shape[0]
if num:
args.output.write(
'Found {:d} maximal complete subgame{}\n\n'.format(
num, '' if num == 1 else 's'))
else:
args.output.write('Found no complete subgames\n\n')
args.output.write('\n')
# Output social welfare
args.output.write('Social Welfare\n')
args.output.write('--------------\n')
welfare, profile = regret.max_pure_social_welfare(game)
if profile is None:
args.output.write('There was no profile with complete payoff data\n\n')
else:
args.output.write('\nMaximum social welfare profile:\n')
args.output.write(serial.to_prof_printstr(profile))
args.output.write('Welfare: {:.4f}\n\n'.format(welfare))
if game.num_roles > 1:
for role, welfare, profile in zip(
serial.role_names,
*regret.max_pure_social_welfare(game, True)):
args.output.write('Maximum "{}" welfare profile:\n'.format(
role))
args.output.write(serial.to_prof_printstr(profile))
args.output.write('Welfare: {:.4f}\n\n'.format(welfare))
args.output.write('\n')
# Output Equilibria
args.output.write('Equilibria\n')
args.output.write('----------\n')
if equilibria:
args.output.write('Found {:d} equilibri{}\n\n'.format(
len(equilibria), 'um' if len(equilibria) == 1 else 'a'))
for i, (eqm, reg) in enumerate(equilibria, 1):
args.output.write('Equilibrium {:d}:\n'.format(i))
args.output.write(redserial.to_mix_printstr(eqm))
args.output.write('Regret: {:.4f}\n\n'.format(reg))
else:
args.output.write('Found no equilibria\n\n') # pragma: no cover
args.output.write('\n')
# Output No-equilibria Subgames
args.output.write('No-equilibria Subgames\n')
args.output.write('----------------------\n')
if noeq_subgames: # pragma: no cover
args.output.write('Found {:d} no-equilibria subgame{}\n\n'.format(
len(noeq_subgames), '' if len(noeq_subgames) == 1 else 's'))
noeq_subgames.sort(key=lambda x: x.sum())
for i, subg in enumerate(noeq_subgames, 1):
args.output.write('No-equilibria subgame {:d}:\n'.format(i))
args.output.write(redserial.to_subgame_printstr(subg))
args.output.write('\n')
else:
args.output.write('Found no no-equilibria subgames\n\n')
args.output.write('\n')
# Output Unconfirmed Candidates
args.output.write('Unconfirmed Candidate Equilibria\n')
args.output.write('--------------------------------\n')
if unconfirmed:
args.output.write('Found {:d} unconfirmed candidate{}\n\n'.format(
len(unconfirmed), '' if len(unconfirmed) == 1 else 's'))
unconfirmed.sort(key=lambda x: ((x[0] > 0).sum(), x[1]))
for i, (eqm, reg_bound) in enumerate(unconfirmed, 1):
args.output.write('Unconfirmed candidate {:d}:\n'.format(i))
args.output.write(redserial.to_mix_printstr(eqm))
args.output.write('Regret at least: {:.4f}\n\n'.format(reg_bound))
else:
args.output.write('Found no unconfirmed candidate equilibria\n\n')
args.output.write('\n')
# Output Unexplored Subgames
args.output.write('Unexplored Best-response Subgames\n')
args.output.write('---------------------------------\n')
if unexplored:
min_supp = min(supp.sum() for supp, _, _, _ in unexplored)
args.output.write(
'Found {:d} unexplored best-response subgame{}\n'.format(
len(unexplored), '' if len(unexplored) == 1 else 's'))
args.output.write(
'Smallest unexplored subgame has support {:d}\n\n'.format(
min_supp))
unexplored.sort(key=lambda x: (x[0].sum(), -x[2]))
for i, (sub, dev, gain, eqm) in enumerate(unexplored, 1):
args.output.write('Unexplored subgame {:d}:\n'.format(i))
args.output.write(redserial.to_subgame_printstr(sub))
args.output.write('{:.4f} for deviating to {} from:\n'.format(
gain, redserial.strat_name(dev)))
args.output.write(redserial.to_mix_printstr(eqm))
args.output.write('\n')
else:
args.output.write('Found no unexplored best-response subgames\n\n')
args.output.write('\n')
# Output json data
args.output.write('Json Data\n')
args.output.write('=========\n')
json_data = {
'equilibria': [redserial.to_mix_json(eqm) for eqm, _ in equilibria]}
json.dump(json_data, args.output)
args.output.write('\n')
| {
"repo_name": "yackj/GameAnalysis",
"path": "gameanalysis/script/analyze.py",
"copies": "1",
"size": "11106",
"license": "apache-2.0",
"hash": 2134976008702140000,
"line_mean": 40.7518796992,
"line_max": 79,
"alpha_frac": 0.5881505493,
"autogenerated": false,
"ratio": 3.5167827739075364,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4604933323207536,
"avg_score": null,
"num_lines": null
} |
"""Analyze a game"""
import argparse
import json
import sys
import numpy as np
from gameanalysis import collect
from gameanalysis import dominance
from gameanalysis import gamereader
from gameanalysis import nash
from gameanalysis import reduction
from gameanalysis import regret
from gameanalysis import restrict
def add_parser(subparsers):
"""Create analysis parser"""
parser = subparsers.add_parser(
'analyze', help="""Analyze games""", description="""Perform game
analysis.""")
parser.add_argument(
'--input', '-i', metavar='<input-file>', default=sys.stdin,
type=argparse.FileType('r'), help="""Input file for script. (default:
stdin)""")
parser.add_argument(
'--output', '-o', metavar='<output-file>', default=sys.stdout,
type=argparse.FileType('w'), help="""Output file for script. (default:
stdout)""")
parser.add_argument(
'--dist-thresh', metavar='<distance-threshold>', type=float,
default=0.1, help="""Average normalized per-role L2-norm threshold,
inside of which, equilibria are considered identical. Valid in [0, 1].
(default: %(default)g)""")
parser.add_argument(
'--regret-thresh', '-r', metavar='<regret-threshold>', type=float,
default=1e-3, help="""Maximum regret to consider an equilibrium
confirmed. (default: %(default)g)""")
parser.add_argument(
'--support', '-t', metavar='<support-threshold>', type=float,
default=1e-3, help="""Maximum probability to consider a strategy in
support. (default: %(default)g)""")
parser.add_argument(
'--processes', '-p', metavar='<num-procs>', type=int, help="""Number of
processes to use to run nash finding. (default: number of cores)""")
parser.add_argument(
'--dominance', '-d', action='store_true', help="""Remove dominated
strategies.""")
parser.add_argument(
'--restrictions', '-s', action='store_true', help="""Extract maximal
restricted games, and analyze each individually instead of considering
the game as a whole.""")
parser.add_argument(
'--style', default='best',
choices=['fast', 'fast*', 'more', 'more*', 'best', 'best*', 'one'],
help="""The `style` of mixed equilibrium finding. `fast` runs the
fastest algorithms that should find an equilibrium. `more` will try
slower ones until it finds one. `best` is more but will do an
exhaustive search with a timeout of a half hour. `one` is the same as
best with no timeout. The starred* versions do the same, but will
return the minimum regret mixture if no equilibria were found.
(default: %(default)s)""")
reductions = parser.add_mutually_exclusive_group()
reductions.add_argument(
'--dpr', metavar='<role:count;role:count,...>', help="""Specify a
deviation preserving reduction.""")
reductions.add_argument(
'--hr', metavar='<role:count;role:count,...>', help="""Specify a
hierarchical reduction.""")
return parser
def main(args): # pylint: disable=too-many-statements,too-many-branches,too-many-locals
"""Entry point for analysis"""
game = gamereader.load(args.input)
if args.dpr is not None:
red_players = game.role_from_repr(args.dpr, dtype=int)
game = reduction.deviation_preserving.reduce_game(game, red_players)
elif args.hr is not None:
red_players = game.role_from_repr(args.hr, dtype=int)
game = reduction.hierarchical.reduce_game(game, red_players)
if args.dominance:
domsub = dominance.iterated_elimination(game, 'strictdom')
game = game.restrict(domsub)
if args.restrictions:
restrictions = restrict.maximal_restrictions(game)
else:
restrictions = np.ones((1, game.num_strats), bool)
noeq_restrictions = []
candidates = []
for rest in restrictions:
rgame = game.restrict(rest)
reqa = nash.mixed_equilibria(
rgame, style=args.style, regret_thresh=args.regret_thresh,
dist_thresh=args.dist_thresh, processes=args.processes)
eqa = restrict.translate(rgame.trim_mixture_support(
reqa, thresh=args.support), rest)
if eqa.size:
candidates.extend(eqa)
else:
noeq_restrictions.append(rest)
equilibria = collect.mcces(args.dist_thresh * np.sqrt(2 * game.num_roles))
unconfirmed = collect.mcces(args.dist_thresh * np.sqrt(2 * game.num_roles))
unexplored = {}
for eqm in candidates:
support = eqm > 0
# FIXME This treats trimming support differently than quiesce does,
# which means quiesce could find an equilibria, and this would fail to
# find it.
gains = regret.mixture_deviation_gains(game, eqm)
role_gains = np.fmax.reduceat(gains, game.role_starts)
gain = np.nanmax(role_gains)
if np.isnan(gains).any() and gain <= args.regret_thresh:
# Not fully explored but might be good
unconfirmed.add(eqm, gain)
elif np.any(role_gains > args.regret_thresh):
# There are deviations, did we explore them?
dev_inds = ([np.argmax(gs == mg) for gs, mg
in zip(np.split(gains, game.role_starts[1:]),
role_gains)] +
game.role_starts)[role_gains > args.regret_thresh]
for dind in dev_inds:
devsupp = support.copy()
devsupp[dind] = True
if not np.all(devsupp <= restrictions, -1).any():
ind = restrict.to_id(game, devsupp)
old_info = unexplored.get(ind, (0, 0, 0, None))
new_info = (gains[dind], dind, old_info[2] + 1, eqm)
unexplored[ind] = max(new_info, old_info)
else:
# Equilibrium!
equilibria.add(eqm, np.max(gains))
# Output Game
args.output.write('Game Analysis\n')
args.output.write('=============\n')
args.output.write(str(game))
args.output.write('\n\n')
if args.dpr is not None:
args.output.write('With deviation preserving reduction: ')
args.output.write(args.dpr.replace(';', ' '))
args.output.write('\n\n')
elif args.hr is not None:
args.output.write('With hierarchical reduction: ')
args.output.write(args.hr.replace(';', ' '))
args.output.write('\n\n')
if args.dominance:
num = np.sum(~domsub)
if num:
args.output.write('Found {:d} dominated strateg{}\n'.format(
num, 'y' if num == 1 else 'ies'))
args.output.write(game.restriction_to_str(~domsub))
args.output.write('\n\n')
else:
args.output.write('Found no dominated strategies\n\n')
if args.restrictions:
num = restrictions.shape[0]
if num:
args.output.write(
'Found {:d} maximal complete restricted game{}\n\n'.format(
num, '' if num == 1 else 's'))
else:
args.output.write('Found no complete restricted games\n\n')
args.output.write('\n')
# Output social welfare
args.output.write('Social Welfare\n')
args.output.write('--------------\n')
welfare, profile = regret.max_pure_social_welfare(game)
if profile is None:
args.output.write('There was no profile with complete payoff data\n\n')
else:
args.output.write('\nMaximum social welfare profile:\n')
args.output.write(game.profile_to_str(profile))
args.output.write('\nWelfare: {:.4f}\n\n'.format(welfare))
if game.num_roles > 1:
for role, welfare, profile in zip(
game.role_names,
*regret.max_pure_social_welfare(game, by_role=True)):
args.output.write('Maximum "{}" welfare profile:\n'.format(
role))
args.output.write(game.profile_to_str(profile))
args.output.write('\nWelfare: {:.4f}\n\n'.format(welfare))
args.output.write('\n')
# Output Equilibria
args.output.write('Equilibria\n')
args.output.write('----------\n')
if equilibria:
args.output.write('Found {:d} equilibri{}\n\n'.format(
len(equilibria), 'um' if len(equilibria) == 1 else 'a'))
for i, (eqm, reg) in enumerate(equilibria, 1):
args.output.write('Equilibrium {:d}:\n'.format(i))
args.output.write(game.mixture_to_str(eqm))
args.output.write('\nRegret: {:.4f}\n\n'.format(reg))
else:
args.output.write('Found no equilibria\n\n')
args.output.write('\n')
# Output No-equilibria Subgames
args.output.write('No-equilibria Subgames\n')
args.output.write('----------------------\n')
if noeq_restrictions:
args.output.write(
'Found {:d} no-equilibria restricted game{}\n\n'.format(
len(noeq_restrictions),
'' if len(noeq_restrictions) == 1 else 's'))
noeq_restrictions.sort(key=lambda x: x.sum())
for i, subg in enumerate(noeq_restrictions, 1):
args.output.write(
'No-equilibria restricted game {:d}:\n'.format(i))
args.output.write(game.restriction_to_str(subg))
args.output.write('\n\n')
else:
args.output.write('Found no no-equilibria restricted games\n\n')
args.output.write('\n')
# Output Unconfirmed Candidates
args.output.write('Unconfirmed Candidate Equilibria\n')
args.output.write('--------------------------------\n')
if unconfirmed:
args.output.write('Found {:d} unconfirmed candidate{}\n\n'.format(
len(unconfirmed), '' if len(unconfirmed) == 1 else 's'))
ordered = sorted(
(sum(e > 0 for e in m), r, m) for m, r in unconfirmed)
for i, (_, reg_bound, eqm) in enumerate(ordered, 1):
args.output.write('Unconfirmed candidate {:d}:\n'.format(i))
args.output.write(game.mixture_to_str(eqm))
args.output.write('\nRegret at least: {:.4f}\n\n'.format(
reg_bound))
else:
args.output.write('Found no unconfirmed candidate equilibria\n\n')
args.output.write('\n')
# Output Unexplored Subgames
args.output.write('Unexplored Best-response Subgames\n')
args.output.write('---------------------------------\n')
if unexplored:
min_supp = min(restrict.from_id(game, sid).sum() for sid in unexplored)
args.output.write(
'Found {:d} unexplored best-response restricted game{}\n'.format(
len(unexplored), '' if len(unexplored) == 1 else 's'))
args.output.write(
'Smallest unexplored restricted game has support {:d}\n\n'.format(
min_supp))
ordered = sorted((
restrict.from_id(game, sind).sum(),
-gain, dev,
restrict.from_id(game, sind),
eqm,
) for sind, (gain, dev, _, eqm) in unexplored.items())
for i, (_, ngain, dev, sub, eqm) in enumerate(ordered, 1):
args.output.write('Unexplored restricted game {:d}:\n'.format(i))
args.output.write(game.restriction_to_str(sub))
args.output.write('\n{:.4f} for deviating to {} from:\n'.format(
-ngain, game.strat_name(dev)))
args.output.write(game.mixture_to_str(eqm))
args.output.write('\n\n')
else:
args.output.write(
'Found no unexplored best-response restricted games\n\n')
args.output.write('\n')
# Output json data
args.output.write('Json Data\n')
args.output.write('=========\n')
json_data = {
'equilibria': [game.mixture_to_json(eqm) for eqm, _ in equilibria]}
json.dump(json_data, args.output)
args.output.write('\n')
| {
"repo_name": "egtaonline/GameAnalysis",
"path": "gameanalysis/script/analyze.py",
"copies": "1",
"size": "11965",
"license": "apache-2.0",
"hash": -1247817724389972000,
"line_mean": 41.5800711744,
"line_max": 87,
"alpha_frac": 0.5908900961,
"autogenerated": false,
"ratio": 3.585555888522625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46764459846226253,
"avg_score": null,
"num_lines": null
} |
"""Analyze a game using gp learn"""
import argparse
import json
import sys
import warnings
from gameanalysis import learning
from gameanalysis import gamereader
from gameanalysis import nash
from gameanalysis import regret
def add_parser(subparsers):
"""Parser for learning script"""
parser = subparsers.add_parser(
'learning', help="""Analyze game using learning""",
description="""Perform game analysis with learned model""")
parser.add_argument(
'--input', '-i', metavar='<input-file>', default=sys.stdin,
type=argparse.FileType('r'), help="""Input file for script. (default:
stdin)""")
parser.add_argument(
'--output', '-o', metavar='<output-file>', default=sys.stdout,
type=argparse.FileType('w'), help="""Output file for script. (default:
stdout)""")
parser.add_argument(
'--dist-thresh', metavar='<distance-threshold>', type=float,
default=1e-3, help="""L2 norm threshold, inside of which, equilibria
are considered identical. (default: %(default)g)""")
parser.add_argument(
'--regret-thresh', '-r', metavar='<regret-threshold>', type=float,
default=1e-3, help="""Maximum regret to consider an equilibrium
confirmed. (default: %(default)g)""")
parser.add_argument(
'--supp-thresh', '-t', metavar='<support-threshold>', type=float,
default=1e-3, help="""Maximum probability to consider a strategy in
support. (default: %(default)g)""")
parser.add_argument(
'--rand-restarts', metavar='<random-restarts>', type=int, default=0,
help="""The number of random points to add to nash equilibrium finding.
(default: %(default)d)""")
parser.add_argument(
'--max-iters', '-m', metavar='<maximum-iterations>', type=int,
default=10000, help="""The maximum number of iterations to run through
replicator dynamics. (default: %(default)d)""")
parser.add_argument(
'--converge-thresh', '-c', metavar='<convergence-threshold>',
type=float, default=1e-8, help="""The convergence threshold for
replicator dynamics. (default: %(default)g)""")
parser.add_argument(
'--processes', '-p', metavar='<num-procs>', type=int, help="""Number of
processes to use to run nash finding. (default: number of cores)""")
parser.add_argument(
'--one', action='store_true', help="""If specified, run a potentially
expensive algorithm to guarantee an approximate equilibrium, if none
are found via other methods.""")
return parser
def main(args):
"""Entry point for learning script"""
with warnings.catch_warnings(record=True) as warns:
game = learning.rbfgame_train(gamereader.load(args.input))
methods = {'replicator': {'max_iters': args.max_iters,
'converge_thresh': args.converge_thresh},
'optimize': {}}
mixed_equilibria = game.trim_mixture_support(
nash.mixed_nash(game, regret_thresh=args.regret_thresh,
dist_thresh=args.dist_thresh, processes=args.processes,
at_least_one=args.one, **methods),
thresh=args.supp_thresh)
equilibria = [(eqm, regret.mixture_regret(game, eqm))
for eqm in mixed_equilibria]
# Output game
args.output.write('Game Learning\n')
args.output.write('=============\n')
args.output.write(str(game))
args.output.write('\n\n')
if any(w.category == UserWarning and
w.message.args[0] == (
'some lengths were at their bounds, this may indicate a poor '
'fit') for w in warns):
args.output.write('Warning\n')
args.output.write('=======\n')
args.output.write(
'Some length scales were at their limit. This is a strong\n'
'indication that a good representation was not found.\n')
args.output.write('\n\n')
# Output Equilibria
args.output.write('Equilibria\n')
args.output.write('----------\n')
if equilibria:
args.output.write('Found {:d} equilibri{}\n\n'.format(
len(equilibria), 'um' if len(equilibria) == 1 else 'a'))
for i, (eqm, reg) in enumerate(equilibria, 1):
args.output.write('Equilibrium {:d}:\n'.format(i))
args.output.write(game.mixture_to_str(eqm))
args.output.write('\nRegret: {:.4f}\n\n'.format(reg))
else:
args.output.write('Found no equilibria\n\n')
args.output.write('\n')
# Output json data
args.output.write('Json Data\n')
args.output.write('=========\n')
json_data = {
'equilibria': [game.mixture_to_json(eqm) for eqm, _ in equilibria]}
json.dump(json_data, args.output)
args.output.write('\n')
| {
"repo_name": "egtaonline/GameAnalysis",
"path": "gameanalysis/script/learning.py",
"copies": "1",
"size": "4824",
"license": "apache-2.0",
"hash": -1918946579374399700,
"line_mean": 40.947826087,
"line_max": 79,
"alpha_frac": 0.6127694859,
"autogenerated": false,
"ratio": 3.6993865030674846,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4812155988967484,
"avg_score": null,
"num_lines": null
} |
"""Analyze a game using gp learn"""
import argparse
import json
import sys
from gameanalysis import gameio
from gameanalysis import nash
from gameanalysis import regret
from gameanalysis import gpgame
def add_parser(subparsers):
parser = subparsers.add_parser(
'learning', help="""Analyze game using learning""",
description="""Perform game analysis""")
parser.add_argument(
'--input', '-i', metavar='<input-file>', default=sys.stdin,
type=argparse.FileType('r'), help="""Input file for script. (default:
stdin)""")
parser.add_argument(
'--output', '-o', metavar='<output-file>', default=sys.stdout,
type=argparse.FileType('w'), help="""Output file for script. (default:
stdout)""")
parser.add_argument(
'--dist-thresh', metavar='<distance-threshold>', type=float,
default=1e-3, help="""L2 norm threshold, inside of which, equilibria
are considered identical. (default: %(default)f)""")
parser.add_argument(
'--regret-thresh', '-r', metavar='<regret-threshold>', type=float,
default=1e-3, help="""Maximum regret to consider an equilibrium
confirmed. (default: %(default)f)""")
parser.add_argument(
'--supp-thresh', '-t', metavar='<support-threshold>', type=float,
default=1e-3, help="""Maximum probability to consider a strategy in
support. (default: %(default)f)""")
parser.add_argument(
'--rand-restarts', metavar='<random-restarts>', type=int, default=0,
help="""The number of random points to add to nash equilibrium finding.
(default: %(default)d)""")
parser.add_argument(
'--max-iters', '-m', metavar='<maximum-iterations>', type=int,
default=10000, help="""The maximum number of iterations to run through
replicator dynamics. (default: %(default)d)""")
parser.add_argument(
'--converge-thresh', '-c', metavar='<convergence-threshold>',
type=float, default=1e-8, help="""The convergence threshold for
replicator dynamics. (default: %(default)f)""")
parser.add_argument(
'--processes', '-p', metavar='<num-procs>', type=int, help="""Number of
processes to use to run nash finding. (default: number of cores)""")
return parser
def main(args):
game, serial = gameio.read_game(json.load(args.input))
# create gpgame
lgame = gpgame.PointGPGame(game)
# mixed strategy nash equilibria search
methods = {
'replicator': {
'max_iters': args.max_iters,
'converge_thresh': args.converge_thresh}}
mixed_equilibria = game.trim_mixture_support(
nash.mixed_nash(lgame, regret_thresh=args.regret_thresh,
dist_thresh=args.dist_thresh, processes=args.processes,
at_least_one=True, **methods),
args.supp_thresh)
equilibria = [(eqm, regret.mixture_regret(lgame, eqm))
for eqm in mixed_equilibria]
# Output game
args.output.write('Game Learning\n')
args.output.write('=============\n')
args.output.write(serial.to_game_printstr(game))
args.output.write('\n\n')
# Output social welfare
args.output.write('Social Welfare\n')
args.output.write('--------------\n')
welfare, profile = regret.max_pure_social_welfare(game)
args.output.write('\nMaximum social welfare profile:\n')
args.output.write(serial.to_prof_printstr(profile))
args.output.write('Welfare: {:.4f}\n\n'.format(welfare))
if game.num_roles > 1:
for role, welfare, profile in zip(
serial.role_names,
*regret.max_pure_social_welfare(game, True)):
args.output.write('Maximum "{}" welfare profile:\n'.format(
role))
args.output.write(serial.to_prof_printstr(profile))
args.output.write('Welfare: {:.4f}\n\n'.format(welfare))
args.output.write('\n')
# Output Equilibria
args.output.write('Equilibria\n')
args.output.write('----------\n')
if equilibria:
args.output.write('Found {:d} equilibri{}\n\n'.format(
len(equilibria), 'um' if len(equilibria) == 1 else 'a'))
for i, (eqm, reg) in enumerate(equilibria, 1):
args.output.write('Equilibrium {:d}:\n'.format(i))
args.output.write(serial.to_mix_printstr(eqm))
args.output.write('Regret: {:.4f}\n\n'.format(reg))
else:
args.output.write('Found no equilibria\n\n') # pragma: no cover
args.output.write('\n')
# Output json data
args.output.write('Json Data\n')
args.output.write('=========\n')
json_data = {
'equilibria': [serial.to_mix_json(eqm) for eqm, _ in equilibria]}
json.dump(json_data, args.output)
args.output.write('\n')
| {
"repo_name": "yackj/GameAnalysis",
"path": "gameanalysis/script/learning.py",
"copies": "1",
"size": "4827",
"license": "apache-2.0",
"hash": -4990934483481361000,
"line_mean": 38.5655737705,
"line_max": 79,
"alpha_frac": 0.6138388233,
"autogenerated": false,
"ratio": 3.5079941860465116,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4621833009346511,
"avg_score": null,
"num_lines": null
} |
""" Analyze and create superdarks for COS data
"""
try: from astropy.io import fits as pyfits
except ImportError: import pyfits
import numpy as np
import glob
import os
from superdark import SuperDark
data_dir = '/grp/hst/cos/Monitors/dark_2/data/'
def lightcurve( filename, step=1 ):
""" quick one until my lightcurve script works on darks """
SECOND_PER_MJD = 1.15741e-5
hdu = pyfits.open( filename )
times = hdu['timeline'].data['time'][::step].copy()
mjd = hdu[1].header['EXPSTART'] + times.copy()[:-1].astype( np.float64 ) * SECOND_PER_MJD
counts = np.histogram( hdu['events'].data['time'], bins=times )[0]
return counts
def screen_darks( dark_list ):
""" split darks into baseline and variant"""
print 'Screening Darks'
variant_file = open('variant.txt', 'w')
normal_file = open('normal.txt', 'w')
empty_file = open('empty.txt', 'w')
for darkfile in dark_list:
print darkfile
counts = lightcurve( darkfile, step=25 )
if not len(counts):
empty_file.write('{}\n'.format(darkfile) )
elif counts.std()/counts.mean() > .3:
variant_file.write( '{}\n'.format(darkfile) )
else:
normal_file.write( '{}\n'.format(darkfile) )
variant_file.close()
normal_file.close()
def create_superdarks( file_list ):
print 'Creating superdarks'
all_mjd = [pyfits.getval(item, 'EXPSTART', ext=1 ) for item in file_list ]
file_list = np.array( file_list )
all_mjd = np.array( all_mjd )
step = 30
first_mjd = int(all_mjd.min())
end_mjd = int(all_mjd.max())
print 'Running from ', first_mjd, end_mjd
for start in range( first_mjd, end_mjd, step)[:-1]:
print start, '-->', start+step
file_index = np.where( (all_mjd > start) &
(all_mjd < start+step) )[0]
if not len( file_index ):
print 'WARNING, no files found for interval'
dark_reffile = SuperDark( file_list[ file_index ] )
output = '{}_{}_drk.fits'.format( start, start+step )
dark_reffile.write( outname=output )
pyfits.setval( output, 'DATASTRT', ext=0, value=start )
pyfits.setval( output, 'DATAEND', ext=0, value=start+step )
if __name__ == "__main__":
all_darks = glob.glob( os.path.join( data_dir, '*corrtag*.fits' ) )
screen_darks( all_darks )
baseline_darks = np.genfromtxt('normal.txt', dtype=None )
create_superdarks( baseline_darks )
| {
"repo_name": "justincely/cosdark",
"path": "monitor.py",
"copies": "1",
"size": "2533",
"license": "bsd-3-clause",
"hash": 4944339630230761000,
"line_mean": 28.4534883721,
"line_max": 96,
"alpha_frac": 0.5973154362,
"autogenerated": false,
"ratio": 3.214467005076142,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4311782441276142,
"avg_score": null,
"num_lines": null
} |
# analyze androcov result
# giving the instrumentation.json generated by androcov and the logcat generated at runtime
import os
import re
import json
import argparse
from datetime import datetime
# logcat regex, which will match the log message generated by `adb logcat -v threadtime`
LOGCAT_THREADTIME_RE = re.compile('^(?P<date>\S+)\s+(?P<time>\S+)\s+(?P<pid>[0-9]+)\s+(?P<tid>[0-9]+)\s+'
'(?P<level>[VDIWEFS])\s+(?P<tag>[^:]*):\s+(?P<content>.*)$')
class Androcov(object):
def __init__(self, androcov_dir):
self.androcov_dir = androcov_dir
instrumentation_file_path = os.path.join(self.androcov_dir, "instrumentation.json")
self.instrumentation_detail = json.load(open(instrumentation_file_path))
self.all_methods = set(self.instrumentation_detail['allMethods'])
self.apk_path = self.instrumentation_detail['outputAPK']
def gen_androcov_report(self, logcat_path):
"""
generate a coverage report
:param logcat_path:
:return:
"""
reached_methods, reached_timestamps = Androcov._parse_reached_methods(logcat_path)
unreached_methods = self.all_methods - reached_methods
androcov_report = {'reached_methods_count': len(reached_methods),
'unreached_methods_count': len(unreached_methods),
'all_methods_count': len(self.all_methods),
'coverage': "%.0f%%" % (100.0 * len(reached_methods) / len(self.all_methods)),
'uncoverage': "%.0f%%" % (100.0 * len(unreached_methods) / len(self.all_methods))}
first_timestamp = reached_timestamps[0]
time_scale = int((reached_timestamps[-1] - first_timestamp).total_seconds()) + 2
timestamp_count = {}
for timestamp in range(0, time_scale):
timestamp_count[timestamp] = 0
for reached_timestamp in reached_timestamps:
delta_time = int((reached_timestamp - first_timestamp).total_seconds()) + 1
timestamp_count[delta_time] += 1
for timestamp in range(1, time_scale):
timestamp_count[timestamp] += timestamp_count[timestamp - 1]
androcov_report['timestamp_count'] = timestamp_count
return androcov_report
@staticmethod
def _parse_reached_methods(logcat_path):
reached_methods = set()
reached_timestamps = []
log_msgs = open(logcat_path).readlines()
androcov_log_re = re.compile(r'^\[androcov\] reach \d+: (<.+>)$')
for log_msg in log_msgs:
log_data = Androcov.parse_log(log_msg)
if log_data is None:
continue
log_content = log_data['content']
# if 'androcov' not in log_content:
# continue
m = re.match(androcov_log_re, log_content)
if not m:
continue
reached_method = m.group(1)
if reached_method in reached_methods:
continue
reached_methods.add(reached_method)
reached_timestamps.append(log_data['datetime'])
return reached_methods, reached_timestamps
@staticmethod
def parse_log(log_msg):
"""
parse a logcat message
the log should be in threadtime format
@param log_msg:
@return:
"""
m = LOGCAT_THREADTIME_RE.match(log_msg)
if not m:
return None
log_dict = {}
date = m.group('date').strip()
time = m.group('time').strip()
log_dict['pid'] = m.group('pid').strip()
log_dict['tid'] = m.group('tid').strip()
log_dict['level'] = m.group('level').strip()
log_dict['tag'] = m.group('tag').strip()
log_dict['content'] = m.group('content').strip()
datetime_str = "%s-%s %s" % (datetime.today().year, date, time)
log_dict['datetime'] = datetime.strptime(datetime_str, "%Y-%m-%d %H:%M:%S.%f")
return log_dict
def parse_args():
"""
parse command line input
generate options
"""
description = "Generate a report of coverage measured by androcov."
parser = argparse.ArgumentParser(description=description,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-androcov", action="store", dest="androcov_dir", required=True,
help="path to androcov directory")
parser.add_argument("-logcat", action="store", dest="logcat_path", required=True,
help="path to logcat file")
options = parser.parse_args()
# print options
return options
if __name__ == "__main__":
opts = parse_args()
androcov = Androcov(androcov_dir=opts.androcov_dir)
report = androcov.gen_androcov_report(opts.logcat_path)
print json.dumps(report, indent=2)
| {
"repo_name": "ylimit/androcov",
"path": "res/androcov_report.py",
"copies": "1",
"size": "4885",
"license": "mit",
"hash": 2170784050766296300,
"line_mean": 41.1120689655,
"line_max": 109,
"alpha_frac": 0.5893551689,
"autogenerated": false,
"ratio": 3.7838884585592565,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9862842170636156,
"avg_score": 0.002080291364620017,
"num_lines": 116
} |
# Analyze Color of Object
import os
import cv2
import numpy as np
from . import print_image
from . import plot_image
from . import fatal_error
from . import plot_colorbar
def _pseudocolored_image(device, histogram, bins, img, mask, background, channel, filename, resolution,
analysis_images, debug):
"""Pseudocolor image.
Inputs:
histogram = a normalized histogram of color values from one color channel
bins = number of color bins the channel is divided into
img = input image
mask = binary mask image
background = what background image?: channel image (img) or white
channel = color channel name
filename = input image filename
resolution = output image resolution
analysis_images = list of analysis image filenames
debug = print or plot. Print = save to file, Plot = print to screen.
Returns:
analysis_images = list of analysis image filenames
:param histogram: list
:param bins: int
:param img: numpy array
:param mask: numpy array
:param background: str
:param channel: str
:param filename: str
:param resolution: int
:param analysis_images: list
:return analysis_images: list
"""
mask_inv = cv2.bitwise_not(mask)
cplant = cv2.applyColorMap(histogram, colormap=2)
cplant1 = cv2.bitwise_and(cplant, cplant, mask=mask)
output_imgs = {"pseudo_on_img": {"background": "img", "img": None},
"pseudo_on_white": {"background": "white", "img": None}}
if background == 'img' or background == 'both':
# mask the background and color the plant with color scheme 'jet'
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_back = cv2.bitwise_and(img_gray, img_gray, mask=mask_inv)
img_back3 = np.dstack((img_back, img_back, img_back))
output_imgs["pseudo_on_img"]["img"] = cv2.add(cplant1, img_back3)
if background == 'white' or background == 'both':
# Get the image size
if np.shape(img)[2] == 3:
ix, iy, iz = np.shape(img)
else:
ix, iy = np.shape(img)
size = ix, iy
back = np.zeros(size, dtype=np.uint8)
w_back = back + 255
w_back3 = np.dstack((w_back, w_back, w_back))
img_back3 = cv2.bitwise_and(w_back3, w_back3, mask=mask_inv)
output_imgs["pseudo_on_white"]["img"] = cv2.add(cplant1, img_back3)
if filename:
for key in output_imgs:
if output_imgs[key]["img"] is not None:
fig_name_pseudo = str(filename[0:-4]) + '_' + str(channel) + '_pseudo_on_' + \
output_imgs[key]["background"] + '.jpg'
path = os.path.dirname(filename)
print_image(output_imgs[key]["img"], fig_name_pseudo)
analysis_images.append(['IMAGE', 'pseudo', fig_name_pseudo])
else:
path = "."
if debug is not None:
if debug == 'print':
for key in output_imgs:
if output_imgs[key]["img"] is not None:
print_image(output_imgs[key]["img"], (str(device) + "_" + output_imgs[key]["background"] +
'_pseudocolor.jpg'))
fig_name = 'VIS_pseudocolor_colorbar_' + str(channel) + '_channel.svg'
if not os.path.isfile(os.path.join(path, fig_name)):
plot_colorbar(path, fig_name, bins)
elif debug == 'plot':
for key in output_imgs:
if output_imgs[key]["img"] is not None:
plot_image(output_imgs[key]["img"])
return analysis_images
def analyze_color(img, imgname, mask, bins, device, debug=None, hist_plot_type=None, pseudo_channel='v',
pseudo_bkg='img', resolution=300, filename=False):
"""Analyze the color properties of an image object
Inputs:
img = image
imgname = name of input image
mask = mask made from selected contours
device = device number. Used to count steps in the pipeline
debug = None, print, or plot. Print = save to file, Plot = print to screen.
hist_plot_type = 'None', 'all', 'rgb','lab' or 'hsv'
color_slice_type = 'None', 'rgb', 'hsv' or 'lab'
pseudo_channel = 'None', 'l', 'm' (green-magenta), 'y' (blue-yellow), h','s', or 'v', creates pseduocolored image
based on the specified channel
pseudo_bkg = 'img' => channel image, 'white' => white background image, 'both' => both img and white options
filename = False or image name. If defined print image
Returns:
device = device number
hist_header = color histogram data table headers
hist_data = color histogram data table values
analysis_images = list of output images
:param img: numpy array
:param imgname: str
:param mask: numpy array
:param bins: int
:param device: int
:param debug: str
:param hist_plot_type: str
:param pseudo_channel: str
:param pseudo_bkg: str
:param resolution: int
:param filename: str
:return device: int
:return hist_header: list
:return hist_data: list
:return analysis_images: list
"""
device += 1
masked = cv2.bitwise_and(img, img, mask=mask)
b, g, r = cv2.split(masked)
lab = cv2.cvtColor(masked, cv2.COLOR_BGR2LAB)
l, m, y = cv2.split(lab)
hsv = cv2.cvtColor(masked, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
# Color channel dictionary
norm_channels = {"b": b / (256 / bins),
"g": g / (256 / bins),
"r": r / (256 / bins),
"l": l / (256 / bins),
"m": m / (256 / bins),
"y": y / (256 / bins),
"h": h / (256 / bins),
"s": s / (256 / bins),
"v": v / (256 / bins)
}
# Histogram plot types
hist_types = {"all": ("b", "g", "r", "l", "m", "y", "h", "s", "v"),
"rgb": ("b", "g", "r"),
"lab": ("l", "m", "y"),
"hsv": ("h", "s", "v")}
# If the user-input pseudo_channel is not None and is not found in the list of accepted channels, exit
if pseudo_channel is not None and pseudo_channel not in norm_channels:
fatal_error("Pseudocolor channel was " + str(pseudo_channel) +
', but can only be one of the following: None, "l", "m", "y", "h", "s" or "v"!')
# If the user-input pseudocolored image background is not in the accepted input list, exit
if pseudo_bkg not in ["white", "img", "both"]:
fatal_error("The pseudocolored image background was " + str(pseudo_bkg) +
', but can only be one of the following: "white", "img", or "both"!')
# If the user-input histogram color-channel plot type is not in the list of accepted channels, exit
if hist_plot_type is not None and hist_plot_type not in hist_types:
fatal_error("The histogram plot type was " + str(hist_plot_type) +
', but can only be one of the following: None, "all", "rgb", "lab", or "hsv"!')
histograms = {
"b": {"label": "blue", "graph_color": "blue",
"hist": cv2.calcHist([norm_channels["b"]], [0], mask, [bins], [0, (bins - 1)])},
"g": {"label": "green", "graph_color": "forestgreen",
"hist": cv2.calcHist([norm_channels["g"]], [0], mask, [bins], [0, (bins - 1)])},
"r": {"label": "red", "graph_color": "red",
"hist": cv2.calcHist([norm_channels["r"]], [0], mask, [bins], [0, (bins - 1)])},
"l": {"label": "lightness", "graph_color": "dimgray",
"hist": cv2.calcHist([norm_channels["l"]], [0], mask, [bins], [0, (bins - 1)])},
"m": {"label": "green-magenta", "graph_color": "magenta",
"hist": cv2.calcHist([norm_channels["m"]], [0], mask, [bins], [0, (bins - 1)])},
"y": {"label": "blue-yellow", "graph_color": "yellow",
"hist": cv2.calcHist([norm_channels["y"]], [0], mask, [bins], [0, (bins - 1)])},
"h": {"label": "hue", "graph_color": "blueviolet",
"hist": cv2.calcHist([norm_channels["h"]], [0], mask, [bins], [0, (bins - 1)])},
"s": {"label": "saturation", "graph_color": "cyan",
"hist": cv2.calcHist([norm_channels["s"]], [0], mask, [bins], [0, (bins - 1)])},
"v": {"label": "value", "graph_color": "orange",
"hist": cv2.calcHist([norm_channels["v"]], [0], mask, [bins], [0, (bins - 1)])}
}
hist_data_b = [l[0] for l in histograms["b"]["hist"]]
hist_data_g = [l[0] for l in histograms["g"]["hist"]]
hist_data_r = [l[0] for l in histograms["r"]["hist"]]
hist_data_l = [l[0] for l in histograms["l"]["hist"]]
hist_data_m = [l[0] for l in histograms["m"]["hist"]]
hist_data_y = [l[0] for l in histograms["y"]["hist"]]
hist_data_h = [l[0] for l in histograms["h"]["hist"]]
hist_data_s = [l[0] for l in histograms["s"]["hist"]]
hist_data_v = [l[0] for l in histograms["v"]["hist"]]
binval = np.arange(0, bins)
bin_values = [l for l in binval]
# Store Color Histogram Data
hist_header = [
'HEADER_HISTOGRAM',
'bin-number',
'bin-values',
'blue',
'green',
'red',
'lightness',
'green-magenta',
'blue-yellow',
'hue',
'saturation',
'value'
]
hist_data = [
'HISTOGRAM_DATA',
bins,
bin_values,
hist_data_b,
hist_data_g,
hist_data_r,
hist_data_l,
hist_data_m,
hist_data_y,
hist_data_h,
hist_data_s,
hist_data_v
]
analysis_images = []
if pseudo_channel is not None:
analysis_images = _pseudocolored_image(device, norm_channels[pseudo_channel], bins, img, mask, pseudo_bkg,
pseudo_channel, filename, resolution, analysis_images, debug)
if hist_plot_type is not None and filename:
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
# Create Histogram Plot
for channel in hist_types[hist_plot_type]:
plt.plot(histograms[channel]["hist"], color=histograms[channel]["graph_color"],
label=histograms[channel]["label"])
plt.xlim([0, bins - 1])
plt.legend()
# Print plot
fig_name = (str(filename[0:-4]) + '_' + str(hist_plot_type) + '_hist.svg')
plt.savefig(fig_name)
analysis_images.append(['IMAGE', 'hist', fig_name])
if debug == 'print':
fig_name = (str(device) + '_' + str(hist_plot_type) + '_hist.svg')
plt.savefig(fig_name)
plt.clf()
return device, hist_header, hist_data, analysis_images
| {
"repo_name": "AntonSax/plantcv",
"path": "plantcv/analyze_color.py",
"copies": "2",
"size": "11048",
"license": "mit",
"hash": 4074787350292248600,
"line_mean": 39.9185185185,
"line_max": 119,
"alpha_frac": 0.5419985518,
"autogenerated": false,
"ratio": 3.5218361491871213,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5063834700987122,
"avg_score": null,
"num_lines": null
} |
'''analyze columns in a transaction3 csv file
INPUT FILE: specified on command line via --in
INPUT/transactions3-al-g-sfr.csv
OUTPUT FILE: specified on command line via --out
'''
import numpy as np
import pandas as pd
import pdb
from pprint import pprint
import sys
from Bunch import Bunch
from directory import directory
from Logger import Logger
from Report import Report
import parse_command_line
def usage(msg=None):
if msg is not None:
print msg
print 'usage : python transactions3-analysis.py --csv RELPATH --txt RELPATH [--test]'
print ' --csv RELPATH: path to input file, a transaction3-format csv file'
print ' --txt RELPATH: path to output file, which will contain a text report'
print ' --test : run in test mode'
print 'where'
print ' RELPATH is a path relative to the WORKING directory'
sys.exit(1)
def make_control(argv):
# return a Bunch
print argv
if len(argv) not in (5, 6):
usage('invalid number of arguments')
pcl = parse_command_line.ParseCommandLine(argv)
arg = Bunch(
base_name=argv[0].split('.')[0],
csv=pcl.get_arg('--csv'),
test=pcl.has_arg('--test'),
txt=pcl.get_arg('--txt'),
)
if arg.csv is None:
usage('missing --csv')
if arg.txt is None:
usage('missing --txt')
debug = False
return Bunch(
arg=arg,
debug=debug,
path_in=directory('working') + arg.csv,
path_out=directory('working') + arg.txt,
test=arg.test,
)
def analyze(transactions, column_name, report):
values = transactions[column_name]
if values.dtype not in (np.dtype('int64'), np.dtype('float64'), np.dtype('object')):
print column_name, type(values), values.dtype
pdb.set_trace()
report.append(' ')
report.append(column_name)
ndframe = values.describe()
report.append(ndframe)
def main(argv):
control = make_control(argv)
sys.stdout = Logger(base_name=control.arg.base_name)
print control
transactions = pd.read_csv(control.path_in,
nrows=100 if control.arg.test else None,
)
report = Report()
report.append('Analysis of transactions3 file: ' + control.arg.csv)
report.append('shape is ' + str(transactions.shape))
report.append(' ')
column_names = sorted(transactions.columns)
for column_name in column_names:
analyze(transactions, column_name, report)
pdb.set_trace()
report.write(control.path_out)
print control
if control.test:
print 'DISCARD OUTPUT: test'
print 'done'
return
if __name__ == '__main__':
if False:
# avoid pyflakes warnings
pdb.set_trace()
pprint()
parse_command_line()
pd.DataFrame()
np.array()
main(sys.argv)
| {
"repo_name": "rlowrance/re-local-linear",
"path": "transactions3-analysis.py",
"copies": "1",
"size": "2868",
"license": "mit",
"hash": -2445592685329778700,
"line_mean": 25.0727272727,
"line_max": 90,
"alpha_frac": 0.6241283124,
"autogenerated": false,
"ratio": 3.7102199223803365,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9832783685325954,
"avg_score": 0.00031290989087645707,
"num_lines": 110
} |
# Analyze distribution of RGZ counterparts in WISE color-color space
#
rgz_dir = '/Users/willettk/Astronomy/Research/GalaxyZoo/rgz-analysis'
paper_dir = '/Users/willettk/Astronomy/Research/GalaxyZoo/radiogalaxyzoo/paper'
from astropy.io import fits
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
from matplotlib.colors import LogNorm
from numpy import ma
from scipy.ndimage.filters import gaussian_filter
wise_snr = 5.0
def compare_density():
# WISE All-sky sample
#
filenames = ['%s/%s.fits' % (rgz_dir,x) for x in ('wise_allsky_2M','gurkan/gurkan_all','rgz_75_wise')]
labels = ('WISE all-sky sources','Gurkan+14 radio galaxies','RGZ 75% radio galaxies')
print ''
for fname,label in zip(filenames,labels):
with fits.open(fname) as f:
d = f[1].data
if label == 'RGZ 75% radio galaxies':
d = d[d['ratio']>=0.75]
# SNR cut
snr_w1 = d['snr1'] >= wise_snr
snr_w2 = d['snr2'] >= wise_snr
snr_w3 = d['snr3'] >= wise_snr
rgz = d[rgz75 & snr_w1 & snr_w2 & snr_w3]
w1 = d['w1mpro']
w2 = d['w2mpro']
w3 = d['w3mpro']
w4 = d['w4mpro']
x = w2-w3
y = w1-w2
# AGN wedge is INCORRECTLY cited in Gurkan+14; check original Mateos+12 for numbers
#
wedge_lims = (y > -3.172*x + 7.624) & (y > (0.315*x - 0.222)) & (y < (0.315*x + 0.796))
#
# Very rough loci from Wright et al. (2010)
stars_lims = (x > 0) & (x < 1) & (y > 0.1) & (y < 0.4)
el_lims = (x > 0.5) & (x < 1.3) & (y > 0.) & (y < 0.2)
sp_lims = (x > 1.5) & (x < 3.0) & (y > 0.1) & (y < 0.4)
agn_frac = wedge_lims.sum()/float(len(d))
stars_frac = stars_lims.sum()/float(len(d))
el_frac = el_lims.sum()/float(len(d))
sp_frac = sp_lims.sum()/float(len(d))
print 'Fraction of %25s in AGN wedge: %4.1f percent' % (label,agn_frac*100)
print 'Fraction of %25s in stars locus: %4.1f percent' % (label,stars_frac*100)
print 'Fraction of %25s in elliptical locus: %4.1f percent' % (label,el_frac*100)
print 'Fraction of %25s in spiral locus: %4.1f percent' % (label,sp_frac*100)
print ''
print ''
'''
# Make empty arrays for TOPCAT
#
xb,yb = 2.250,0.487
xt,yt = 1.958,1.413
xab = np.linspace(xb,6,100)
xat = np.linspace(xt,6,100)
xal = np.linspace(xb,xt,100)
yab = 0.315*xab - 0.222
yat = 0.315*xat + 0.796
yal =-3.172*xal + 7.624
xall = np.append(xab,np.append(xat,xal))
yall = np.append(yab,np.append(yat,yal))
with open('%s/csv/agn_wedge.csv' % rgz_dir,'w') as f:
for x,y in zip(xall,yall):
print >> f,x,y
'''
# Bin data and look at differences?
#
with fits.open(filenames[0]) as f:
wise = f[1].data
with fits.open(filenames[2]) as f:
rgz = f[1].data
bins_w2w3 = np.linspace(-1,7,25)
bins_w1w2 = np.linspace(-0.5,3,25)
hw,xedges,yedges = np.histogram2d(wise['w2mpro']-wise['w3mpro'],wise['w1mpro']-wise['w2mpro'],bins=(bins_w2w3,bins_w1w2))
hr,xedges,yedges = np.histogram2d(rgz['w2mpro']-rgz['w3mpro'],rgz['w1mpro']-rgz['w2mpro'],bins=(bins_w2w3,bins_w1w2))
from matplotlib import pyplot as plt
from matplotlib import cm
fig = plt.figure(1,(10,5))
fig.clf()
hw_norm = hw/float(np.max(hw))
hr_norm = hr/float(np.max(hr))
from numpy import ma
hw_norm_masked = ma.masked_array(hw_norm,mask=(hw <= 10))
hr_norm_masked = ma.masked_array(hr_norm,mask=(hr <= 10))
extent = [bins_w2w3[0],bins_w2w3[-1],bins_w1w2[0],bins_w1w2[-1]]
ax1 = fig.add_subplot(121)
cmap = cm.jet
cmap.set_bad('w')
im1 = ax1.imshow(hw_norm_masked.T, alpha=1.0, extent=extent, vmin = 0., vmax = 1., interpolation='nearest', origin='lower')
ax1.set_title('WISE All-Sky')
ax1.set_xlabel('(W2-W3)')
ax1.set_ylabel('(W1-W2)')
ax1.set_aspect('auto')
ax2 = fig.add_subplot(122)
cmap = cm.jet
im2 = ax2.imshow(hr_norm_masked.T, alpha=1.0, extent=extent,vmin = 0., vmax = 1., interpolation='nearest', origin='lower')
ax2.set_title('RGZ 75%')
ax2.set_xlabel('(W2-W3)')
ax2.set_aspect('auto')
position=fig.add_axes([0.92,0.1,0.02,0.80])
cb = plt.colorbar(im2,cax=position,orientation='vertical')
cb.set_label('Normalized ratio',fontsize=16)
'''
ax3 = fig.add_subplot(133)
cmap = cm.jet
im3 = ax3.imshow((np.log10(hr_norm/hw_norm)).T, alpha=1.0, extent=extent,interpolation='nearest', origin='lower')
ax3.set_title('RGZ/WISE ratio')
ax3.set_aspect('auto')
position=fig.add_axes([0.92,0.1,0.02,0.80])
cb = plt.colorbar(im3,cax=position,orientation='vertical')
cb.set_label('log(ratio)',fontsize=16)
'''
#plt.show()
fig.savefig('%s/wise_rgz_fractions.png' % rgz_dir)
return None
def wise_rgz_gurkan():
plt.ion()
# WISE All-sky sample
#
filenames = ['%s/fits/%s.fits' % (rgz_dir,x) for x in ('wise_allsky_2M','gurkan_all','rgz_75_wise')]
labels = ('WISE all-sky sources','Gurkan+14 radio galaxies','RGZ 75% radio galaxies')
print ''
for fname,label in zip(filenames,labels):
with fits.open(fname) as f:
d = f[1].data
# Restrict the RGZ-WISE matches to 75% consensus
if label == 'RGZ 75% radio galaxies':
d = f[1].data
rgz75 = d['ratio'] >= 0.75
snr_w1 = d['snr1'] >= wise_snr
snr_w2 = d['snr2'] >= wise_snr
snr_w3 = d['snr3'] >= wise_snr
d = d[rgz75 & snr_w1 & snr_w2 & snr_w3]
w1 = d['w1mpro']
w2 = d['w2mpro']
w3 = d['w3mpro']
w4 = d['w4mpro']
x = w2-w3
y = w1-w2
# AGN wedge is INCORRECTLY cited in Gurkan+14; check original Mateos+12 for numbers
#
wedge_lims = (y > -3.172*x + 7.624) & (y > (0.315*x - 0.222)) & (y < (0.315*x + 0.796))
#
# Very rough loci from Wright et al. (2010)
stars_lims = (x > 0) & (x < 1) & (y > 0.1) & (y < 0.4)
el_lims = (x > 0.5) & (x < 1.3) & (y > 0.) & (y < 0.2)
sp_lims = (x > 1.5) & (x < 3.0) & (y > 0.1) & (y < 0.4)
agn_frac = wedge_lims.sum()/float(len(d))
stars_frac = stars_lims.sum()/float(len(d))
el_frac = el_lims.sum()/float(len(d))
sp_frac = sp_lims.sum()/float(len(d))
print 'Fraction of %25s in AGN wedge: %4.1f percent' % (label,agn_frac*100)
print 'Fraction of %25s in stars locus: %4.1f percent' % (label,stars_frac*100)
print 'Fraction of %25s in elliptical locus: %4.1f percent' % (label,el_frac*100)
print 'Fraction of %25s in spiral locus: %4.1f percent' % (label,sp_frac*100)
print ''
print ''
# Bin data and look at differences?
#
with fits.open(filenames[0]) as f:
d = f[1].data
maglim_w1 = d['w1snr'] > wise_snr
maglim_w2 = d['w2snr'] > wise_snr
maglim_w3 = d['w3snr'] > wise_snr
wise = d[maglim_w1 & maglim_w2 & maglim_w3]
with fits.open(filenames[2]) as f:
d = f[1].data
rgz75 = d['ratio'] >= 0.75
snr_w1 = d['snr1'] >= wise_snr
snr_w2 = d['snr2'] >= wise_snr
snr_w3 = d['snr3'] >= wise_snr
rgz = d[rgz75 & snr_w1 & snr_w2 & snr_w3]
xmin,xmax = -1,6
ymin,ymax = -0.5,3
bins_w2w3 = np.linspace(xmin,xmax,40)
bins_w1w2 = np.linspace(ymin,ymax,40)
hw,xedges,yedges = np.histogram2d(wise['w2mpro']-wise['w3mpro'],wise['w1mpro']-wise['w2mpro'],bins=(bins_w2w3,bins_w1w2))
hr,xedges,yedges = np.histogram2d(rgz['w2mpro']-rgz['w3mpro'],rgz['w1mpro']-rgz['w2mpro'],bins=(bins_w2w3,bins_w1w2))
fig = plt.figure(1,(9,8))
fig.clf()
hw_norm = hw/float(np.max(hw))
hr_norm = hr/float(np.max(hr))
hw_norm_masked = ma.masked_array(hw,mask=(hw < 10))
hr_norm_masked = ma.masked_array(hr_norm,mask=(hr <= 10))
extent = [bins_w2w3[0],bins_w2w3[-1],bins_w1w2[0],bins_w1w2[-1]]
ax1 = fig.add_subplot(111,position=(0.10,0.10,0.75,0.85))
# WISE all-sky
cmap = cm.cubehelix_r
cmap.set_bad('w')
Z = hw_norm_masked
im1 = ax1.imshow(Z.T, cmap=cmap, alpha=1.0, extent=extent, interpolation='nearest', origin='lower', norm=LogNorm())
'''
fi = gaussian_filter(hw.T,0.5)
levels=np.linspace(10,20000,10)
CS = ax1.contour(bins_w2w3[1:],bins_w1w2[1:],fi,levels,colors='r',linewidths=1)
'''
# RGZ 75% catalog
fi = gaussian_filter(hr.T,0.5)
levels=np.linspace(3,50,8)
CS = ax1.contour(bins_w2w3[1:],bins_w1w2[1:],fi,levels,colors='k',linewidths=1.5)
CS.collections[0].set_label('RGZ 75%')
# Gurkan
with fits.open(filenames[1]) as f:
gurkan = f[1].data
ax1.scatter(gurkan['w2mpro']-gurkan['w3mpro'],gurkan['w1mpro']-gurkan['w2mpro'],color='g',s=10,label='PRGs (Gurkan+14)')
xb,yb = 2.250,0.487
xt,yt = 1.958,1.413
xab = np.linspace(xb,6,100)
xat = np.linspace(xt,6,100)
xal = np.linspace(xb,xt,100)
yab = 0.315*xab - 0.222
yat = 0.315*xat + 0.796
yal =-3.172*xal + 7.624
ax1.plot(xab,yab,color='r',linestyle='--',label='AGN "wedge"')
ax1.plot(xat,yat,color='r',linestyle='--')
ax1.plot(xal,yal,color='r',linestyle='--')
ax1.set_xlabel(r'$(W2-W3)$',fontsize=20)
ax1.set_ylabel(r'$(W1-W2)$',fontsize=20)
ax1.set_xlim(xmin,xmax)
ax1.set_ylim(ymin,ymax)
ax1.set_aspect('auto')
cb_position=fig.add_axes([0.88,0.1,0.02,0.85])
cb = plt.colorbar(im1,cax=cb_position,orientation='vertical')
cb.set_label('WISE all-sky sources',fontsize=16)
h,l = ax1.get_legend_handles_labels()
ax1.legend(h,l,loc='upper left',scatterpoints=2)
plt.show()
# Measure number of galaxies in the new loci
#
locus1 = ((rgz['w1mpro'] - rgz['w2mpro']) > -0.2) & ((rgz['w1mpro'] - rgz['w2mpro']) < 0.3) & ((rgz['w2mpro'] - rgz['w3mpro']) > -0.2) & ((rgz['w2mpro'] - rgz['w3mpro']) < 1.0)
locus2 = ((rgz['w1mpro'] - rgz['w2mpro']) > 0.1) & ((rgz['w1mpro'] - rgz['w2mpro']) < 0.5) & ((rgz['w2mpro'] - rgz['w3mpro']) > 3.5) & ((rgz['w2mpro'] - rgz['w3mpro']) < 4.8)
locus3 = ((rgz['w1mpro'] - rgz['w2mpro']) > 0.8) & ((rgz['w1mpro'] - rgz['w2mpro']) < 1.5) & ((rgz['w2mpro'] - rgz['w3mpro']) > 2.2) & ((rgz['w2mpro'] - rgz['w3mpro']) < 3.6)
print 'Locus 1 (stars): %i, %.1f' % (locus1.sum(),locus1.sum() / float(len(rgz))*100)
print 'Locus 2 (LIRGs): %i, %.1f' % (locus2.sum(),locus2.sum() / float(len(rgz))*100)
print 'Locus 3 (QSOs): %i, %.1f' % (locus3.sum(),locus3.sum() / float(len(rgz))*100)
fig.savefig('%s/figures/wise_colorcolor_sn5.eps' % paper_dir)
return None
def wise_rgz_gurkan_lowsn():
plt.ion()
# WISE All-sky sample
#
filenames = ['%s/%s.fits' % (rgz_dir,x) for x in ('wise_allsky_2M','gurkan/gurkan_all','rgz_75_wise_16jan')]
labels = ('WISE all-sky sources','Gurkan+14 radio galaxies','RGZ 75% radio galaxies')
print ''
for fname,label in zip(filenames,labels):
with fits.open(fname) as f:
d = f[1].data
# Restrict the RGZ-WISE matches to 75% consensus
if label == 'RGZ 75% radio galaxies':
rgz75 = d['ratio'] >= 0.75
snr_w1 = d['snr1'] >= wise_snr
snr_w2 = d['snr2'] >= wise_snr
snr_w3 = d['snr3'] >= wise_snr
d = d[np.logical_not(rgz75 & snr_w1 & snr_w2 & snr_w3)]
w1 = d['w1mpro']
w2 = d['w2mpro']
w3 = d['w3mpro']
w4 = d['w4mpro']
x = w2-w3
y = w1-w2
# AGN wedge is INCORRECTLY cited in Gurkan+14; check original Mateos+12 for numbers
#
wedge_lims = (y > -3.172*x + 7.624) & (y > (0.315*x - 0.222)) & (y < (0.315*x + 0.796))
#
# Very rough loci from Wright et al. (2010)
stars_lims = (x > 0) & (x < 1) & (y > 0.1) & (y < 0.4)
el_lims = (x > 0.5) & (x < 1.3) & (y > 0.) & (y < 0.2)
sp_lims = (x > 1.5) & (x < 3.0) & (y > 0.1) & (y < 0.4)
agn_frac = wedge_lims.sum()/float(len(d))
stars_frac = stars_lims.sum()/float(len(d))
el_frac = el_lims.sum()/float(len(d))
sp_frac = sp_lims.sum()/float(len(d))
print 'Fraction of %25s in AGN wedge: %4.1f percent' % (label,agn_frac*100)
print 'Fraction of %25s in stars locus: %4.1f percent' % (label,stars_frac*100)
print 'Fraction of %25s in elliptical locus: %4.1f percent' % (label,el_frac*100)
print 'Fraction of %25s in spiral locus: %4.1f percent' % (label,sp_frac*100)
print ''
print ''
# Bin data and look at differences?
#
with fits.open(filenames[0]) as f:
d = f[1].data
maglim_w1 = d['snr1'] > wise_snr
maglim_w2 = d['snr2'] > wise_snr
maglim_w3 = d['snr3'] < wise_snr
wise = d[maglim_w1 & maglim_w2 & maglim_w3]
with fits.open(filenames[2]) as f:
d = f[1].data
rgz75 = d['ratio'] >= 0.75
snr_w1 = d['snr1'] >= wise_snr
snr_w2 = d['snr2'] >= wise_snr
snr_w3 = d['snr3'] <= wise_snr
rgz = d[rgz75 & snr_w1 & snr_w2 & snr_w3]
xmin,xmax = -1,6
ymin,ymax = -0.5,3
bins_w2w3 = np.linspace(xmin,xmax,40)
bins_w1w2 = np.linspace(ymin,ymax,40)
hw,xedges,yedges = np.histogram2d(wise['w2mpro']-wise['w3mpro'],wise['w1mpro']-wise['w2mpro'],bins=(bins_w2w3,bins_w1w2))
hr,xedges,yedges = np.histogram2d(rgz['w2mpro']-rgz['w3mpro'],rgz['w1mpro']-rgz['w2mpro'],bins=(bins_w2w3,bins_w1w2))
fig = plt.figure(1,(9,8))
fig.clf()
hw_norm = hw/float(np.max(hw))
hr_norm = hr/float(np.max(hr))
hw_norm_masked = ma.masked_array(hw,mask=(hw < 10))
hr_norm_masked = ma.masked_array(hr_norm,mask=(hr <= 10))
extent = [bins_w2w3[0],bins_w2w3[-1],bins_w1w2[0],bins_w1w2[-1]]
ax1 = fig.add_subplot(111,position=(0.10,0.10,0.75,0.85))
# WISE all-sky
cmap = cm.YlOrRd
cmap.set_bad('w')
Z = hw_norm_masked
im1 = ax1.imshow(Z.T, cmap=cmap, alpha=1.0, extent=extent, interpolation='nearest', origin='lower')
'''
fi = gaussian_filter(hw.T,0.5)
levels=np.linspace(10,20000,10)
CS = ax1.contour(bins_w2w3[1:],bins_w1w2[1:],fi,levels,colors='r',linewidths=1)
'''
# RGZ 75% catalog
fi = gaussian_filter(hr.T,0.5)
levels=np.linspace(3,hr.max(),10)
CS = ax1.contour(bins_w2w3[1:],bins_w1w2[1:],fi,levels,colors='b',linewidths=1.5)
CS.collections[0].set_label('RGZ 75%')
# Gurkan
with fits.open(filenames[1]) as f:
gurkan = f[1].data
ax1.scatter(gurkan['w2mpro']-gurkan['w3mpro'],gurkan['w1mpro']-gurkan['w2mpro'],color='g',s=10,label='PRGs (Gurkan+14)')
xb,yb = 2.250,0.487
xt,yt = 1.958,1.413
xab = np.linspace(xb,6,100)
xat = np.linspace(xt,6,100)
xal = np.linspace(xb,xt,100)
yab = 0.315*xab - 0.222
yat = 0.315*xat + 0.796
yal =-3.172*xal + 7.624
ax1.plot(xab,yab,color='k',linestyle='--',label='AGN "wedge"')
ax1.plot(xat,yat,color='k',linestyle='--')
ax1.plot(xal,yal,color='k',linestyle='--')
ax1.set_xlabel(r'$(W2-W3)$',fontsize=20)
ax1.set_ylabel(r'$(W1-W2)$',fontsize=20)
ax1.set_xlim(xmin,xmax)
ax1.set_ylim(ymin,ymax)
ax1.set_aspect('auto')
cb_position=fig.add_axes([0.88,0.1,0.02,0.85])
cb = plt.colorbar(im1,cax=cb_position,orientation='vertical')
cb.set_label('WISE all-sky sources',fontsize=16)
h,l = ax1.get_legend_handles_labels()
ax1.legend(h,l,loc='upper left',scatterpoints=2)
plt.show()
fig.savefig('%s/figures/wise_colorcolor_lowsn.eps' % paper_dir)
return None
wise_rgz_gurkan()
| {
"repo_name": "afgaron/rgz-analysis",
"path": "python/wise_colorcolor.py",
"copies": "2",
"size": "16023",
"license": "mit",
"hash": 800573951823259600,
"line_mean": 33.5323275862,
"line_max": 180,
"alpha_frac": 0.5542033327,
"autogenerated": false,
"ratio": 2.4973503740648377,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40515537067648383,
"avg_score": null,
"num_lines": null
} |
""" analyze_errors.py
Usage: analyze_errors.py <pred_out>
"""
from collections import Counter
import json
from text.dataset import Example
""" wanted format
in may 0000 , after finishing the 5-year-term of president of the republic of [macedonia]_2 , [branko crvenkovski]_1 returned to the sdum and was reelected leader of the party .
branko crvenkovski PERSON
macedonia LOCATION
per:countries_of_residence
PATH=7
LOCATION **PAD** LOCATION
republic nmod1 LOCATION
president nmod1 O
5-year-term nmod1 DURATION
finishing dobj1 O
returned advcl1 O
PERSON nsubj2 PERSON
"""
def safe_encode(lines):
return u"\n".join(lines).encode('utf-8').strip()
def print_example(ex, fout):
lines = [
' '.join(ex.debug),
ex.subject + ' ' + ex.subject_ner,
ex.object + ' ' + ex.object_ner,
ex.relation + ' ' + ex.predicted_relation,
'PATH = ' + str(len(ex.words)),
]
for word, dep, ner in zip(ex.words, ex.parse, ex.ner):
lines += [' '.join([word, dep, ner])]
fout.write(safe_encode(lines) + "\n\n")
if __name__ == '__main__':
from docopt import docopt
args = docopt(__doc__)
error_by_length = Counter()
length_count = Counter()
with open(args['<pred_out>']) as fin, open(args['<pred_out>'] + '.analysis', 'wb') as fout:
for line in fin:
ex = Example(**json.loads(line))
length_count[len(ex.parse)] += 1
if ex.relation != ex.predicted_relation:
print_example(ex, fout)
error_by_length[len(ex.parse)] += 1
print >> fout, "length\tcount\tnum_error\tpercent_error"
for length, count in length_count.most_common():
num_error = error_by_length[length]
print >> fout, "\t".join([str(e) for e in [length, count, num_error, num_error/float(count)]])
| {
"repo_name": "vzhong/sent2rel",
"path": "analyze_errors.py",
"copies": "1",
"size": "1846",
"license": "mit",
"hash": 7515135010268994000,
"line_mean": 30.8275862069,
"line_max": 177,
"alpha_frac": 0.6175514626,
"autogenerated": false,
"ratio": 3.1772805507745265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42948320133745266,
"avg_score": null,
"num_lines": null
} |
"""Analyze how well a system can be reduced by POD methods.
Evaluate pod.py and how well it fits a particular problem. This file contains
helper functions to compare reductions, create plots and creat TeX tables.
Notes
-----
This file should also take care of profiling in the future.
"""
from __future__ import division, print_function
import random
import math
import numpy as np
from scipy import linalg
from matplotlib.pyplot import plot, subplot, legend, figure, semilogy
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
import example2sys as e2s
import pod
import time
font_options = {}
def _systemsToReduce(k_bal_trunc, k_cont_trunc):
red_sys = []
for k in k_bal_trunc:
if k:
with_k_str = "\nwith k = {}".format(k)
else:
with_k_str = ""
red_sys.append({"name": "balanced truncation" + with_k_str,
"shortname": "BT",
"reduction": "truncation_square_root_trans_matrix",
"k": k})
for k in k_cont_trunc:
with_k_str = "\nwith k = {}".format(k)
red_sys.append({"name": "controllability truncation" + with_k_str,
"shortname": "CT",
"reduction": "controllability_truncation",
"k": k})
return red_sys
def _relativeErrors(Y, Yhat, min_error=0.):
diff = Y - Yhat
Y_above_min = np.where(abs(diff) <= min_error,
np.copysign(np.inf, diff),
np.copysign(Y, diff))
err = diff / Y_above_min
return diff, err
def reducedAnalysis1D(unred_sys, k=10, k2=28,
T0=0., T=1., number_of_steps=100):
print("REDUCTIONS\n--------------")
k_bal_trunc = [None, k]
k_cont_trunc = [k2] * (k2 is not None) + [k]
red_sys = _systemsToReduce(k_bal_trunc, k_cont_trunc)
red_sys = reduce(unred_sys[0]["sys"], red_sys)
print("===============\nEVALUATIONS\n===============")
timeSteps = list(np.linspace(T0, T, number_of_steps))
systems = unred_sys + red_sys
for system in systems:
print(system["name"])
with Timer():
system["Y"] = system["sys"](timeSteps)
print("===============\nERRORS\n===============")
norm_order = np.inf
Y = systems[0]["Y"]
for system in systems:
print(system["name"], "has order", system["sys"].order)
system["diff"], system["rel_eps"] = \
zip(*[_relativeErrors(y, yhat, system.get("error_bound", 0.))
for y, yhat in zip(Y, system["Y"])])
system["eps"] = [linalg.norm(diff, ord=norm_order)
for diff in system["diff"]]
print("and a maximal error of {}".format(max(system["eps"])))
print("and an error at t=T of {}".format(system["eps"][-1]))
print("==============\nPLOTS\n==============")
figure(1)
for system in systems:
plot(timeSteps, system["Y"], label=system["name"])
legend(loc="lower right")
figure(2)
for system in systems[1:4]:
subplot(1, 2, 1)
plot(timeSteps, system["eps"], label=system["name"])
legend(loc="upper left")
for system in systems[4:]:
subplot(1, 2, 2)
plot(timeSteps, system["eps"], label=system["name"])
legend(loc="upper left")
markers = ['o', 'v', '*', 'x', 'd']
figure(3)
for system, marker in zip(systems[1:], markers):
sv = list(system["sys"].hsv)
semilogy(range(len(sv)), sv,
marker=marker, label=system["name"])
legend(loc="lower left")
return systems
def reducedAnalysis2D(unred_sys, control, k=10, k2=None,
T0=0., T=1., L=1., number_of_steps=100,
picture_destination=
"../plots/plot_{}_t{:.2f}_azim_{}.png"):
print("REDUCTIONS\n--------------")
k_bal_trunc = [None, k]
k_cont_trunc = [k2] * (k2 is not None) + [k]
red_sys = _systemsToReduce(k_bal_trunc, k_cont_trunc)
red_sys = reduce(unred_sys[0]["sys"], red_sys)
print("============\nEVALUATIONS\n===============")
timeSteps = list(np.linspace(T0, T, number_of_steps))
systems = unred_sys + red_sys
for system in systems:
print(system["name"])
with Timer():
system["Y"] = system["sys"](timeSteps)
print("===============\nERRORS\n===============")
norm_order = np.inf
Y = systems[0]["Y"]
for system in systems:
print(system["name"], "has order", system["sys"].order)
system["diff"], system["rel_eps"] = \
zip(*[_relativeErrors(y, yhat, system.get("error_bound", 0.))
for y, yhat in zip(Y, system["Y"])])
system["eps"] = [linalg.norm(diff, ord=norm_order)
for diff in system["diff"]]
print("and a maximal error of {}".format(max(system["eps"])))
print("and an error at t=T of {}".format(system["eps"][-1]))
print("==============\nPLOTS\n==============")
draw_figures = raw_input("Do you want to draw figures? (y/N) ")
if draw_figures == "y":
figure(2)
for system in systems[1:]:
plot(timeSteps, system["eps"], label=system["name"])
legend(loc="upper left")
fig = figure()
number_of_outputs = len(Y[0])
X, Y = [], []
for i in range(len(timeSteps)):
X.append([timeSteps[i] for _ in range(number_of_outputs)])
Y.append([j*L/(number_of_outputs-1)
for j in range(number_of_outputs)])
axes = []
for system in range(len(systems)):
axes.append(fig.add_subplot(221+system+10*(len(systems) > 4),
projection='3d'))
Z = []
for i in range(len(timeSteps)):
Z.append(list(systems[system]["Y"][i]))
axes[-1].plot_surface(X, Y, Z, rstride=1, cstride=1,
cmap=cm.coolwarm,
linewidth=0, antialiased=False)
axes[-1].set_title(systems[system]["name"])
axes[-1].set_xlabel("t")
axes[-1].set_ylabel("l")
axes[-1].set_zlabel("temperature")
save_figures = raw_input("Do you want to save the figures? (y/N) ")
if save_figures == "y":
for ii in xrange(360, 0, -10):
for ax in axes:
ax.azim = ii
fig.savefig(picture_destination.format(control, T,
axes[0].azim))
return systems
def controllableHeatSystemComparison(N=1000, k=None, k2=None,
r=0.05, T0=0., T=1., L=1.,
number_of_steps=100,
control="sin",
integrator="dopri5",
integrator_options={}):
if k is None:
k = max(1, int(N/50))
print("SETUP\n====================")
unred_sys = [{"name": "Controllable heat equation"}]
print(unred_sys[0]["name"])
with Timer():
unred_sys[0]["sys"] = e2s.controllableHeatSystem(N=N, L=L,
control=control)
unred_sys[0]["sys"].integrator = integrator
unred_sys[0]["sys"].integrator_options = integrator_options
pic_path = "../plots/controllable_heat_{}_t{:.2f}_azim_{}.png"
reducedAnalysis2D(unred_sys, control, k, k2, T0, T, L, number_of_steps,
picture_destination=pic_path)
def optionPricingComparison(N=1000, k=None,
option="put", r=0.05, T=1., K=10., L=None,
integrator="dopri5", integrator_options={}):
if k is None:
k = max(1, int(N/50))
if L is None:
L = 3 * K
print("SETUP\n====================")
unred_sys = [{"name": ("Heat equation for {} option pricing" +
" with n = {}").format(option, N)}]
print(unred_sys[0]["name"])
with Timer():
unred_sys[0]["sys"] = e2s.optionPricing(N=N, option=option,
r=r, T=T, K=K, L=L)
unred_sys[0]["sys"].integrator = integrator
unred_sys[0]["sys"].integrator_options = integrator_options
print("REDUCTIONS\n--------------")
k_bal_trunc = [None, k]
k_cont_trunc = [k]
red_sys = _systemsToReduce(k_bal_trunc, k_cont_trunc)
red_sys = reduce(unred_sys[0]["sys"], red_sys)
print("============\nEVALUATIONS\n===============")
timeSteps = list(np.linspace(0, T, 30))
systems = unred_sys + red_sys
for system in systems:
print(system["name"])
with Timer():
system["Y"] = system["sys"](timeSteps)
print("===============\nERRORS\n===============")
norm_order = np.inf
Y = systems[0]["Y"]
for system in systems:
print(system["name"], "has order", system["sys"].order)
system["eps"] = [0.] + [linalg.norm(y-yhat, ord=norm_order)
for y, yhat in zip(Y[1:], system["Y"][1:])]
print("and a maximal error of", max(system["eps"]))
print("==============\nPLOTS\n==============")
fig = figure(1)
N2 = int(1.5*K*N/L)
X, Y = [], []
for i in range(len(timeSteps)):
X.append([timeSteps[i] for _ in range(N2)])
Y.append([j*L/N for j in range(N2)])
axes = []
for system in range(6):
axes.append(fig.add_subplot(221+system, projection='3d'))
Z = []
for i in range(len(timeSteps)):
Z.append(list(systems[system]["Y"][i])[:N2])
axes[-1].plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
axes[-1].set_title(systems[system]["name"], **font_options)
axes[-1].set_xlabel("t", **font_options)
axes[-1].set_ylabel("K", **font_options)
axes[-1].set_zlabel("Lambda(K)", **font_options)
for ax in axes:
ax.azim = 26
fig.savefig("../plots/{}_option_azim_{}.png".format(option, axes[0].azim))
figure(2)
for system in systems[1:]:
plot(timeSteps, system["eps"], label=system["name"])
legend(loc="upper left")
def thermalRCNetworkComparison(R=1e90, C=1e87, n=100, k=10, k2=28, r=3,
T0=0., T=1., omega=math.pi, number_of_steps=100,
control="sin", input_scale=1.,
integrator="dopri5",
integrator_options={}):
u = lambda t, x=None: np.array([e2s.simple_functions[control](omega*t)])
print("===============\nSETUP\n===============")
unred_sys = [{"name": "Thermal RC Netwok with n = {}".format(n)}]
print(unred_sys[0]["name"])
with Timer():
C0, unred_sys[0]["sys"] = e2s.thermalRCNetwork(R, C, n, r, u,
input_scale=input_scale)
unred_sys[0]["sys"].integrator = integrator
unred_sys[0]["sys"].integrator_options = integrator_options
reducedAnalysis1D(unred_sys, k, k2, T0, T, number_of_steps)
def loadHeat(k=10, k2=28, T0=0., T=1., number_of_steps=100,
control="sin", omega=math.pi, control_scale=1.,
all_state_vars=False,
integrator="dopri5",
integrator_options={}):
u = lambda t, x=None: np.array([e2s.simple_functions[control](omega*t) *
control_scale])
unred_sys = [{"name": "Heat equation from\nthe SLICOT benchmarks",
"shortname": "Heat eq"}]
print(unred_sys[0]["name"])
with Timer():
unred_sys[0]["sys"] = e2s.example2sys("heat-cont.mat")
unred_sys[0]["sys"].control = u
unred_sys[0]["sys"].integrator = integrator
unred_sys[0]["sys"].integrator_options = integrator_options
if all_state_vars:
unred_sys[0]["sys"].C = np.eye(unred_sys[0]["sys"].order)
return unred_sys
def compareHeat(k=10, k2=28, T0=0., T=10., number_of_steps=300,
control="sin", omega=math.pi, control_scale=1.,
integrator="dopri5",
integrator_options={}):
unred_sys = loadHeat(k, k2, T0, T, number_of_steps,
control, omega, control_scale,
False,
integrator, integrator_options)
systems = reducedAnalysis1D(unred_sys, k, k2, T0, T, number_of_steps)
return systems
def compareHeatStates(k=10, k2=37, T0=0., T=10., number_of_steps=300,
control="sin", omega=math.pi, control_scale=1.,
integrator="dopri5",
integrator_options={}):
unred_sys = loadHeat(k, k2, T0, T, number_of_steps,
control, omega, control_scale,
True,
integrator, integrator_options)
L = 1.
pic_path = "../plots/slicot_heat_{}_t{:.2f}_azim_{}.png"
systems = reducedAnalysis2D(unred_sys, control, k, k2, T0, T, L,
number_of_steps,
picture_destination=pic_path)
return systems
def reduce(sys, red_sys):
for system in red_sys:
print(system["name"])
with Timer():
system["sys"] = \
pod.lss(sys,
reduction=system.get("reduction", None),
k=system.get("k", None))
system["error_bound"] = system["sys"].hsv[0] * \
np.finfo(float).eps * sys.order
return red_sys
def tableFormat(systems, eps=True, rel_eps=False, solving_time=False,
hankel_norm=False, min_tol=False):
th = ("Reduction"
" & Order")
tb_template = ("\\\\\midrule\n{:7s}"
" & \\num{{{:3d}}}")
if (eps + rel_eps) == 2:
th += (" & \\multicolumn{2}{r|}{\\specialcell[r]{Max. \\& Rel. Error"
"\\\\at $0\\leq t \\leq T$}}"
" & \\multicolumn{2}{r|}{\\specialcell[r]{Max. \\& Rel. Error"
"\\\\at $t=T$}}")
tb_template += (" & \\num{{{:15.10e}}} & \\num{{{:15.10e}}}"
" & \\num{{{:15.10e}}} & \\num{{{:15.10e}}}")
elif eps:
th += (" & \\specialcell[r]{Max. Error\\\\at $0\\leq t \\leq T$}"
" & \\specialcell[r]{Max. Error\\\\at $t=T$}")
tb_template += " & \\num{{{:15.10e}}} & \\num{{{:15.10e}}}"
elif rel_eps:
th += (" & \\specialcell[r]{Rel. Error\\\\at $0\\leq t \\leq T$}"
" & \\specialcell[r]{Rel. Error\\\\at $t=T$}")
tb_template += " & \\num{{{:15.10e}}} & \\num{{{:15.10e}}}"
if solving_time:
th += " & \\specialcell[r]{Solving Time}"
tb_template += " & \\SI{{{:8.3e}}}{{\\second}}"
if hankel_norm:
th += " & Hankel Norm"
tb_template += " & \\num{{{:15.10e}}}"
if min_tol:
th += " & Minimal Tolerance"
tb_template += " & \\num{{{:15.10e}}}"
tb = []
for system in systems:
results = [
system.get("shortname", "Original"),
system["sys"].order
]
if eps:
try:
results.append(max(system["eps"]))
except KeyError:
results.append(0.)
if rel_eps:
try:
results.append(max(map(max, system["rel_eps"])))
except KeyError:
results.append(0.)
if eps:
try:
results.append(system["eps"][-1])
except KeyError:
results.append(0.)
if rel_eps:
try:
results.append(max(system["rel_eps"][-1]))
except KeyError:
results.append(0.)
if solving_time:
results.append(0.)
if hankel_norm:
results.append(system["sys"].hsv[0])
if min_tol:
try:
results.append(system["error_bound"])
except KeyError:
results.append(0.)
tb.append(tb_template.format(*results))
table = th
for line in tb:
table += line
print(table)
| {
"repo_name": "johannes-scharlach/pod-control",
"path": "src/analysis.py",
"copies": "1",
"size": "16379",
"license": "mit",
"hash": 8327374377665420000,
"line_mean": 31.4336633663,
"line_max": 79,
"alpha_frac": 0.4938030405,
"autogenerated": false,
"ratio": 3.4930688846235873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4486871925123587,
"avg_score": null,
"num_lines": null
} |
""" Analyze libraries in trees
Analyze library dependencies in paths and wheel files
"""
import os
from os.path import basename, join as pjoin, realpath
import warnings
from .tools import (get_install_names, zip2dir, get_rpaths,
get_environment_variable_paths)
from .tmpdirs import TemporaryDirectory
def tree_libs(start_path, filt_func=None):
""" Return analysis of library dependencies within `start_path`
Parameters
----------
start_path : str
root path of tree to search for libraries depending on other libraries.
filt_func : None or callable, optional
If None, inspect all files for library dependencies. If callable,
accepts filename as argument, returns True if we should inspect the
file, False otherwise.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``).
``libpath`` is canonical (``os.path.realpath``) filename of library, or
library name starting with {'@rpath', '@loader_path',
'@executable_path'}.
``dependings_dict`` is a dict with (key, value) pairs of
(``depending_libpath``, ``install_name``), where ``dependings_libpath``
is the canonical (``os.path.realpath``) filename of the library
depending on ``libpath``, and ``install_name`` is the "install_name" by
which ``depending_libpath`` refers to ``libpath``.
Notes
-----
See:
* https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man1/dyld.1.html # noqa: E501
* http://matthew-brett.github.io/pydagogue/mac_runtime_link.html
"""
lib_dict = {}
env_var_paths = get_environment_variable_paths()
for dirpath, dirnames, basenames in os.walk(start_path):
for base in basenames:
depending_libpath = realpath(pjoin(dirpath, base))
if filt_func is not None and not filt_func(depending_libpath):
continue
rpaths = get_rpaths(depending_libpath)
search_paths = rpaths + env_var_paths
for install_name in get_install_names(depending_libpath):
# If the library starts with '@rpath' we'll try and resolve it
# We'll do nothing to other '@'-paths
# Otherwise we'll search for the library using env variables
if install_name.startswith('@rpath'):
lib_path = resolve_rpath(install_name, search_paths)
elif install_name.startswith('@'):
lib_path = install_name
else:
lib_path = search_environment_for_lib(install_name)
if lib_path in lib_dict:
lib_dict[lib_path][depending_libpath] = install_name
else:
lib_dict[lib_path] = {depending_libpath: install_name}
return lib_dict
def resolve_rpath(lib_path, rpaths):
""" Return `lib_path` with its `@rpath` resolved
If the `lib_path` doesn't have `@rpath` then it's returned as is.
If `lib_path` has `@rpath` then returns the first `rpaths`/`lib_path`
combination found. If the library can't be found in `rpaths` then a
detailed warning is printed and `lib_path` is returned as is.
Parameters
----------
lib_path : str
The path to a library file, which may or may not start with `@rpath`.
rpaths : sequence of str
A sequence of search paths, usually gotten from a call to `get_rpaths`.
Returns
-------
lib_path : str
A str with the resolved libraries realpath.
"""
if not lib_path.startswith('@rpath/'):
return lib_path
lib_rpath = lib_path.split('/', 1)[1]
for rpath in rpaths:
rpath_lib = realpath(pjoin(rpath, lib_rpath))
if os.path.exists(rpath_lib):
return rpath_lib
warnings.warn(
"Couldn't find {0} on paths:\n\t{1}".format(
lib_path,
'\n\t'.join(realpath(path) for path in rpaths),
)
)
return lib_path
def search_environment_for_lib(lib_path):
""" Search common environment variables for `lib_path`
We'll use a single approach here:
1. Search for the basename of the library on DYLD_LIBRARY_PATH
2. Search for ``realpath(lib_path)``
3. Search for the basename of the library on DYLD_FALLBACK_LIBRARY_PATH
This follows the order that Apple defines for "searching for a
library that has a directory name in it" as defined in their
documentation here:
https://developer.apple.com/library/archive/documentation/DeveloperTools/Conceptual/DynamicLibraries/100-Articles/DynamicLibraryUsageGuidelines.html#//apple_ref/doc/uid/TP40001928-SW10
See the script "testing_osx_rpath_env_variables.sh" in tests/data
for a more in-depth explanation. The case where LD_LIBRARY_PATH is
used is a narrow subset of that, so we'll ignore it here to keep
things simple.
Parameters
----------
lib_path : str
Name of the library to search for
Returns
-------
lib_path : str
Full path of ``basename(lib_path)``'s location, if it can be found, or
``realpath(lib_path)`` if it cannot.
"""
lib_basename = basename(lib_path)
potential_library_locations = []
# 1. Search on DYLD_LIBRARY_PATH
potential_library_locations += _paths_from_var('DYLD_LIBRARY_PATH',
lib_basename)
# 2. Search for realpath(lib_path)
potential_library_locations.append(realpath(lib_path))
# 3. Search on DYLD_FALLBACK_LIBRARY_PATH
potential_library_locations += \
_paths_from_var('DYLD_FALLBACK_LIBRARY_PATH', lib_basename)
for location in potential_library_locations:
if os.path.exists(location):
return location
return realpath(lib_path)
def get_prefix_stripper(strip_prefix):
""" Return function to strip `strip_prefix` prefix from string if present
Parameters
----------
strip_prefix : str
Prefix to strip from the beginning of string if present
Returns
-------
stripper : func
function such that ``stripper(a_string)`` will strip `prefix` from
``a_string`` if present, otherwise pass ``a_string`` unmodified
"""
n = len(strip_prefix)
def stripper(path):
return path if not path.startswith(strip_prefix) else path[n:]
return stripper
def get_rp_stripper(strip_path):
""" Return function to strip ``realpath`` of `strip_path` from string
Parameters
----------
strip_path : str
path to strip from beginning of strings. Processed to ``strip_prefix``
by ``realpath(strip_path) + os.path.sep``.
Returns
-------
stripper : func
function such that ``stripper(a_string)`` will strip ``strip_prefix``
from ``a_string`` if present, otherwise pass ``a_string`` unmodified
"""
return get_prefix_stripper(realpath(strip_path) + os.path.sep)
def stripped_lib_dict(lib_dict, strip_prefix):
""" Return `lib_dict` with `strip_prefix` removed from start of paths
Use to give form of `lib_dict` that appears relative to some base path
given by `strip_prefix`. Particularly useful for analyzing wheels where we
unpack to a temporary path before analyzing.
Parameters
----------
lib_dict : dict
See :func:`tree_libs` for definition. All depending and depended paths
are canonical (therefore absolute)
strip_prefix : str
Prefix to remove (if present) from all depended and depending library
paths in `lib_dict`
Returns
-------
relative_dict : dict
`lib_dict` with `strip_prefix` removed from beginning of all depended
and depending library paths.
"""
relative_dict = {}
stripper = get_prefix_stripper(strip_prefix)
for lib_path, dependings_dict in lib_dict.items():
ding_dict = {}
for depending_libpath, install_name in dependings_dict.items():
ding_dict[stripper(depending_libpath)] = install_name
relative_dict[stripper(lib_path)] = ding_dict
return relative_dict
def wheel_libs(wheel_fname, filt_func=None):
""" Return analysis of library dependencies with a Python wheel
Use this routine for a dump of the dependency tree.
Parameters
----------
wheel_fname : str
Filename of wheel
filt_func : None or callable, optional
If None, inspect all files for library dependencies. If callable,
accepts filename as argument, returns True if we should inspect the
file, False otherwise.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``). ``libpath`` is library being depended on,
relative to wheel root path if within wheel tree. ``dependings_dict``
is (key, value) of (``depending_lib_path``, ``install_name``). Again,
``depending_lib_path`` is library relative to wheel root path, if
within wheel tree.
"""
with TemporaryDirectory() as tmpdir:
zip2dir(wheel_fname, tmpdir)
lib_dict = tree_libs(tmpdir, filt_func)
return stripped_lib_dict(lib_dict, realpath(tmpdir) + os.path.sep)
def _paths_from_var(varname, lib_basename):
var = os.environ.get(varname)
if var is None:
return []
return [pjoin(path, lib_basename) for path in var.split(':')]
| {
"repo_name": "matthew-brett/delocate",
"path": "delocate/libsana.py",
"copies": "1",
"size": "9559",
"license": "bsd-2-clause",
"hash": -177858993633683740,
"line_mean": 33.8868613139,
"line_max": 188,
"alpha_frac": 0.6324929386,
"autogenerated": false,
"ratio": 4.0130142737195635,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 274
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.