text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
#!/usr/bin/env python
"""
"""
import askap.analysis.evaluation
import matplotlib
matplotlib.use('Agg')
from numpy import *
import os
from astropy.io import fits
from askap.analysis.evaluation.readData import *
from askap.analysis.evaluation.distributionPlotsNew import *
from askap.analysis.evaluation.distributionPlots import *
from askap.analysis.evaluation.sourceSelection import *
from optparse import OptionParser
import askap.parset as parset
import askap.logging
#############
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-c","--config", dest="inputfile", default="", help="Input parameter file [default: %default]")
(options, args) = parser.parse_args()
if(options.inputfile==''):
inputPars = parset.ParameterSet()
elif(not os.path.exists(options.inputfile)):
logging.warning("Config file %s does not exist! Using default parameter values."%options.inputfile)
inputPars = parset.ParameterSet()
else:
inputPars = parset.ParameterSet(options.inputfile).Eval
sourceCatFile = inputPars.get_value('sourceCatalogue','')
if(sourceCatFile == ''):
logging.error('Eval.sourceCatalogue not provided. Doing no evaluation.')
exit(0)
if(not os.access(sourceCatFile,os.F_OK)):
logging.error("Eval.sourceCatalogue %s does not exist. Doing no evaluation."%sourceCatFile)
exit(0)
sourceCatType = inputPars.get_value('sourceCatalogueType','Selavy')
sourceCat = readCat(sourceCatFile,sourceCatType)
sourceFluxScale = inputPars.get_value('sourceFluxScale',1.0)
refCatFile = inputPars.get_value('refCatalogue','')
if(refCatFile == ''):
logging.error('Eval.refCatalogue not provided. Doing no evaluation.')
exit(0)
if(not os.access(refCatFile,os.F_OK)):
logging.error("Eval.refCatalogue %s does not exist. Doing no evaluation."%refCatFile)
exit(0)
refCatType = inputPars.get_value('refCatalogueType','Selavy')
refCat = readCat(refCatFile,refCatType)
refFluxScale = inputPars.get_value('refFluxScale',1.0)
matchfile = inputPars.get_value('matchfile','matches.txt')
if(not os.access(matchfile,os.F_OK)):
logging.error("Match file %s does not exist. Doing no evaluation."%matchfile)
exit(0)
matchlist = readMatches(matchfile,sourceCat,refCat)
missfile = inputPars.get_value('missfile',"misses.txt")
if(not os.access(missfile,os.F_OK)):
logging.error("Miss file %s does not exist. Doing no evaluation"%missfile)
exit(0)
srcmisslist = readMisses(missfile,sourceCat,'S')
refmisslist = readMisses(missfile,refCat,'R')
plotType = inputPars.get_value('plotType','all')
if plotType == 'all':
plotTypeArray = [True,False]
elif plotType == 'single':
plotTypeArray = [True]
elif plotType == 'individual':
plotTypeArray = [False]
else:
plotTypeArray = [True,False]
imageName=inputPars.get_value('image','image.i.clean.taylor.0.restored.fits')
haveBeam = os.path.exists(imageName)
if haveBeam:
image = fits.open(imageName)
imhead = image[0].header
bmaj = imhead.get('bmaj')*3600.
bmin = imhead.get('bmin')*3600.
bpa = imhead.get('bpa')
else:
print "Image file %s does not exist. Not showing beam size."%imageName
outputType = inputPars.get_value('outputType','png')
# Tool used to determine whether a given missed reference source should be included
selector = sourceSelector(inputPars)
############################
# Arrays needed for plotting
refFlux=[]
refMaj=[]
fluxratio=[]
fluxdiff=[]
majratio=[]
srcAxialRatio=[]
refAxialRatio=[]
paDiff=[]
dra=[]
ddec=[]
for m in matchlist:
rflux=m.ref.flux()*refFluxScale
sflux=m.src.flux()*sourceFluxScale
refFlux.append(rflux)
refMaj.append(m.ref.maj)
fluxratio.append(sflux/rflux)
fluxdiff.append(sflux-rflux)
majratio.append(m.src.maj/m.ref.maj)
srcAxialRatio.append(m.src.min/m.src.maj)
refAxialRatio.append(m.ref.min/m.ref.maj)
paDiff.append(m.src.pa - m.ref.pa)
dra.append((m.src.ra - m.ref.ra)*cos(m.ref.dec*pi/180.) * 3600.)
ddec.append((m.src.dec - m.ref.dec) * 3600.)
refFlux=np.array(refFlux,dtype=float)
refMaj=np.array(refMaj,dtype=float)
fluxratio=np.array(fluxratio,dtype=float)
fluxdiff=np.array(fluxdiff,dtype=float)
majratio =np.array(majratio,dtype=float)
srcAxialRatio =np.array(srcAxialRatio,dtype=float)
refAxialRatio =np.array(refAxialRatio,dtype=float)
paDiff=np.array(paDiff,dtype=float)
dra=np.array(dra,dtype=float)
ddec=np.array(ddec,dtype=float)
dpos = np.sqrt(dra**2 + ddec**2)
for doSinglePlot in plotTypeArray:
if doSinglePlot:
print "Producing a single plot"
dot='k,'
else:
print "Producing individual plots"
dot='k.'
############################
# Flux ratio plot, with guide lines showing noise and search limit
imageNoise = inputPars.get_value('imageNoise','')
if imageNoise == '':
logging.error('Eval.imageNoise not provided. Doing no evaluation.')
exit(0)
else:
imageNoise=float(imageNoise)
ratioMin = inputPars.get_value('ratioMin',0.)
ratioMax = inputPars.get_value('ratioMax',2.5)
fluxMin = inputPars.get_value('fluxMin',-1.)
fluxMax = inputPars.get_value('fluxMax',-1.)
if doSinglePlot:
plt.figure(num=3,figsize=(12,9),dpi=72)
subplots_adjust(wspace=0.5,hspace=0.5)
else:
plt.figure(num=2,figsize=(8,8),dpi=72)
print "Flux ratio vs flux"
if doSinglePlot:
plt.subplot(3,4,1)
plt.semilogx()
plt.plot(refFlux,fluxratio,dot)
themin,themax=plt.xlim()
if fluxMin > 0:
themin = fluxMin
if fluxMax > 0:
themax = fluxMax
plt.xlim(themin,themax)
plt.ylim(ratioMin,ratioMax)
x=10**np.linspace(log10(themin),log10(themax),1000)
plt.plot(x,(x+imageNoise)/x,'k-')
plt.plot(x,(x-imageNoise)/x,'k-')
plt.plot(x,np.ones(x.size),'k-')
plt.plot(x,(x+imageNoise*3)/x,'k--')
plt.plot(x,(x-imageNoise*3)/x,'k--')
plt.plot(x,(imageNoise*5)/x,'k:')
if doSinglePlot:
plt.xlabel('Flux')
plt.ylabel('Ratio(Flux)')
else:
plt.xlabel('Reference flux')
plt.ylabel('Source flux / Reference flux')
plt.title(sourceCatFile)
plt.savefig('fluxRatio.%s'%outputType)
############################
# Flux difference vs reference flux
# Only for individual plots
if not doSinglePlot:
print "Flux diff vs flux"
plt.cla()
plt.semilogx()
plt.plot(refFlux,fluxdiff,dot)
plt.xlim(themin,themax)
plt.xlabel('Reference flux')
plt.ylabel('Source flux - Reference flux')
plt.title(sourceCatFile)
plt.savefig('fluxDiff.%s'%outputType)
############################
# Relative Flux difference vs reference flux
# Only for individual plots
if not doSinglePlot:
print "Relative Flux diff vs flux"
plt.cla()
plt.semilogx()
plt.plot(refFlux,fluxdiff/refFlux,dot)
plt.xlim(themin,themax)
plt.xlabel('Reference flux')
plt.ylabel('(Source flux - Reference flux)/Reference flux')
plt.title(sourceCatFile)
plt.savefig('fluxDiffRelative.%s'%outputType)
############################
# Major axis ratio vs flux plot
if doSinglePlot:
plt.subplot(3,4,2)
else:
plt.cla()
print "Major axis ratio vs flux"
plt.semilogx()
plt.plot(refFlux,majratio,dot)
plt.xlim(themin,themax)
plt.ylim(ratioMin,ratioMax)
if doSinglePlot:
plt.xlabel('Flux')
plt.ylabel('Ratio(Major Axis)')
else:
plt.xlabel('Reference flux')
plt.ylabel('Source Major Axis / Reference Major Axis')
plt.title(sourceCatFile)
plt.savefig('majorAxisRatio_flux.%s'%outputType)
############################
# Major axis ratio vs major axis plot
if doSinglePlot:
plt.subplot(3,4,3)
else:
plt.cla()
print "Major axis ratio vs major axis"
plt.plot(refMaj,majratio,dot)
majmin,majmax=plt.xlim()
if haveBeam:
plt.axvline(bmaj,color='r')
x=np.linspace(majmin,majmax,101)
plt.plot(x,bmaj/x,'r-')
plt.plot(refMaj,majratio,dot)
plt.ylim(ratioMin,ratioMax)
if doSinglePlot:
plt.xlabel('Major axis')
plt.ylabel('Ratio(Major Axis)')
else:
plt.xlabel('Reference major axis')
plt.ylabel('Source Major Axis / Reference Major Axis')
plt.title(sourceCatFile)
plt.savefig('majorAxisRatio_majorAxis.%s'%outputType)
############################
# Major axis ratio vs flux ratio plot
if doSinglePlot:
plt.subplot(3,4,4)
else:
plt.cla()
print "Flux ratio vs major axis ratio"
plt.plot(fluxratio,majratio,dot)
plt.xlim(ratioMin,ratioMax)
plt.ylim(ratioMin,ratioMax)
if doSinglePlot:
plt.xlabel('Ratio(Flux)')
plt.ylabel('Ratio(Major Axis)')
else:
plt.xlabel('Source flux / Reference flux')
plt.ylabel('Source Major Axis / Reference Major Axis')
plt.title(sourceCatFile)
plt.savefig('majorAxisRatio_fluxRatio.%s'%outputType)
############################
# Major axis vs flux diff plot
if not doSinglePlot:
print "Flux diff vs major axis"
plt.cla()
plt.plot(refMaj,fluxdiff,dot)
if haveBeam:
plt.axvline(bmaj,color='r')
plt.ylabel('Source flux - Reference flux')
plt.xlabel('Reference Major Axis')
plt.title(sourceCatFile)
plt.savefig('majorAxis_fluxdiff.%s'%outputType)
############################
# Major axis ratio vs flux diff plot
if not doSinglePlot:
print "Major axis ratio vs flux diff"
plt.cla()
plt.plot(fluxdiff,majratio,dot)
plt.ylim(ratioMin,ratioMax)
plt.xlabel('Source flux - Reference flux')
plt.ylabel('Source Major Axis / Reference Major Axis')
plt.title(sourceCatFile)
plt.savefig('majorAxisRatio_fluxdiff.%s'%outputType)
############################
# Positional Offset vs flux diff plot
if not doSinglePlot:
print "Flux diff vs positional offset"
plt.cla()
plt.plot(dpos,fluxdiff,dot)
plt.ylabel('Source flux - Reference flux')
plt.xlabel('Positional offset [arcsec]')
plt.title(sourceCatFile)
plt.savefig('posoffset_fluxdiff.%s'%outputType)
############################
# Positional Offset vs flux ratio plot
if not doSinglePlot:
print "Flux ratio vs positional offset"
plt.cla()
plt.plot(dpos,fluxratio,dot)
plt.ylabel('Source flux / Reference flux')
plt.xlabel('Positional offset [arcsec]')
plt.title(sourceCatFile)
plt.savefig('posoffset_fluxratio.%s'%outputType)
############################
# Axial ratio change, vs flux
# *** DO NOT INCLUDE IN THE SINGLE PLOT ***
if not doSinglePlot:
print "Axial ratio change vs flux"
plt.cla()
plt.semilogx()
plt.plot(refFlux,srcAxialRatio/refAxialRatio,dot)
plt.xlim(themin,themax)
plt.ylim(ratioMin,ratioMax)
plt.xlabel('Reference flux')
plt.ylabel('Source Axial Ratio / Reference Axial Ratio')
plt.title(sourceCatFile)
plt.savefig('axialRatioChange_flux.%s'%outputType)
############################
# Axial ratio change, vs major axis
# *** DO NOT INCLUDE IN THE SINGLE PLOT ***
if not doSinglePlot:
print "Axial ratio change vs major axis"
plt.cla()
#plt.semilogx()
plt.plot(refMaj,srcAxialRatio/refAxialRatio,dot)
#plt.xlim(themin,themax)
plt.ylim(ratioMin,ratioMax)
plt.xlabel('Reference major axis')
plt.ylabel('Source Axial Ratio / Reference Axial Ratio')
plt.title(sourceCatFile)
plt.savefig('axialRatioChange_majoraxis.%s'%outputType)
############################
# Position angle change, vs flux
if doSinglePlot:
plt.subplot(3,4,6)
else:
plt.cla()
print "Position angle change vs flux"
plt.semilogx()
plt.plot(refFlux,paDiff,dot)
plt.xlim(themin,themax)
# plt.ylim(ratioMin,ratioMax)
if doSinglePlot:
plt.xlabel('Reference flux')
plt.ylabel('Diff(Position Angle)')
else:
plt.xlabel('Reference flux')
plt.ylabel('Source Position Angle - Reference Position Angle [deg]')
plt.title(sourceCatFile)
plt.savefig('posangDiff_flux.%s'%outputType)
############################
# Position angle change, vs major axis
if doSinglePlot:
plt.subplot(3,4,7)
else:
plt.cla()
print "Position angle change vs major axis"
plt.semilogx()
plt.plot(refMaj,paDiff,dot)
if doSinglePlot:
plt.xlabel('Major Axis')
plt.ylabel('Diff(Position Angle)')
else:
plt.xlabel('Major Axis')
plt.ylabel('Source Position Angle - Reference Position Angle [deg]')
plt.title(sourceCatFile)
plt.savefig('posangDiff_majorAxis.%s'%outputType)
############################
# Positional offsets
if doSinglePlot:
plt.subplot(3,4,5)
else:
plt.cla()
print "Positional offsets"
plt.plot(dra,ddec,dot)
plt.axis('equal')
#plot error ellipse
angle=linspace(0,2*pi,100)
plt.plot(dra.std()*cos(angle)+dra.mean(),ddec.std()*sin(angle)+ddec.mean(),'r-')
if doSinglePlot:
plt.xlabel(r'$\Delta$RA $\cos\delta$ [arcsec]')
plt.ylabel(r'$\Delta$Dec [arcsec]')
else:
plt.xlabel('(Source RA - Reference RA) * cos(ref.Dec) [arcsec]')
plt.ylabel('Source Dec - Reference Dec [arcsec]')
plt.title(sourceCatFile)
plt.savefig('posOffsets.%s'%outputType)
##################################
# Completeness & Reliability plots
f=[]
for m in matchlist:
f.append(m.ref.flux()*refFluxScale)
f.append(m.src.flux()*sourceFluxScale)
for s in srcmisslist:
f.append(s.flux()*sourceFluxScale)
for r in refmisslist:
if selector.isGood(r):
f.append(r.flux()*refFluxScale)
f=np.array(f,dtype=float)
minFlux=floor(log10(f.min())*2.)/2.
maxFlux=ceil(log10(f.max())*2.)/2.
numMatchBinnedByFlux = np.zeros(int((maxFlux-minFlux)*10))
numMissSrcBinnedByFlux = np.zeros(int((maxFlux-minFlux)*10))
numMissRefBinnedByFlux = np.zeros(int((maxFlux-minFlux)*10))
for m in matchlist:
binNumber = int((log10(m.src.flux()*sourceFluxScale)-minFlux)*10)
numMatchBinnedByFlux[binNumber] += 1
for s in srcmisslist:
binNumber = int((log10(s.flux()*sourceFluxScale)-minFlux)*10)
numMissSrcBinnedByFlux[binNumber] += 1
for r in refmisslist:
if selector.isGood(r):
binNumber = int((log10(r.flux()*refFluxScale)-minFlux)*10)
numMissRefBinnedByFlux[binNumber] += 1
numSrcBinnedByFlux = numMatchBinnedByFlux + numMissSrcBinnedByFlux
numRefBinnedByFlux = numMatchBinnedByFlux + numMissRefBinnedByFlux
# Have the additional +3 here to match the extension down below (clist)
fluxBin=10**(minFlux-0.1+arange((maxFlux-minFlux)*10+3)/10.)
fluxBinPlot=10**(minFlux-0.1+arange((maxFlux-minFlux)*10)/10.)
completenessBinnedByFlux=np.zeros(numMatchBinnedByFlux.shape)
completenessBinnedByFlux[numRefBinnedByFlux>0] = numMatchBinnedByFlux[numRefBinnedByFlux>0] / numRefBinnedByFlux[numRefBinnedByFlux>0]
completenessBinnedByFlux[numRefBinnedByFlux==0] = -1
reliabilityBinnedByFlux = np.zeros(numMatchBinnedByFlux.shape)
reliabilityBinnedByFlux[numSrcBinnedByFlux>0] = numMatchBinnedByFlux[numSrcBinnedByFlux>0] / numSrcBinnedByFlux[numSrcBinnedByFlux>0]
reliabilityBinnedByFlux[numSrcBinnedByFlux==0] = -1
completenessReliability = np.zeros(numMatchBinnedByFlux.shape)
completenessReliability[(numSrcBinnedByFlux>0)&(numRefBinnedByFlux>0)] = completenessBinnedByFlux[(numSrcBinnedByFlux>0)&(numRefBinnedByFlux>0)] * reliabilityBinnedByFlux[(numSrcBinnedByFlux>0)&(numRefBinnedByFlux>0)]
clist=[0]
clist.extend(completenessBinnedByFlux)
clist.append(completenessBinnedByFlux[-1])
clist.append(0.)
completenessBinnedByFlux=np.array(clist)
rlist=[0]
rlist.extend(reliabilityBinnedByFlux)
rlist.append(reliabilityBinnedByFlux[-1])
rlist.append(0.)
reliabilityBinnedByFlux=np.array(rlist)
jointValid = (numRefBinnedByFlux>0) * (numSrcBinnedByFlux>0)
reliabilityBinnedByFluxJoint = numMatchBinnedByFlux[jointValid] / numSrcBinnedByFlux[jointValid]
completenessBinnedByFluxJoint = numMatchBinnedByFlux[jointValid] / numRefBinnedByFlux[jointValid]
if doSinglePlot:
plt.subplot(3,4,9)
crossSize=3.
else:
plt.cla()
print "Completeness"
crossSize=10.
plt.semilogx()
plt.axis('normal')
plt.step(fluxBin,completenessBinnedByFlux,where='post')
for i in range(len(fluxBin)):
if completenessBinnedByFlux[i] < 0.:
plt.plot(fluxBin[i]*10**0.05,-0.01,'kx',markersize=crossSize)
plt.ylim(-0.05,1.05)
plt.xlim(10**minFlux,10**(maxFlux+0.2))
plt.xlabel('Flux')
plt.ylabel('Completeness')
if not doSinglePlot:
plt.title(sourceCatFile)
plt.savefig('completeness.%s'%outputType)
if doSinglePlot:
plt.subplot(3,4,10)
else:
plt.cla()
print "Reliability"
plt.semilogx()
plt.step(fluxBin,reliabilityBinnedByFlux,where='post')
for i in range(len(fluxBin)):
if reliabilityBinnedByFlux[i] < 0.:
plt.plot(fluxBin[i]*10**0.05,-0.01,'kx',markersize=crossSize)
plt.ylim(-0.05,1.05)
plt.xlim(10**(minFlux-0.2),10**(maxFlux+0.2))
plt.xlabel('Flux')
plt.ylabel('Reliability')
if not doSinglePlot:
plt.title(sourceCatFile)
plt.savefig('reliability.%s'%outputType)
if doSinglePlot:
plt.subplot(3,4,8)
else:
plt.cla()
print "Completeness vs Reliability"
plt.plot(reliabilityBinnedByFluxJoint,completenessBinnedByFluxJoint,'bo')
plt.plot(reliabilityBinnedByFluxJoint,completenessBinnedByFluxJoint,'b-')
plt.plot(reliabilityBinnedByFluxJoint[0],completenessBinnedByFluxJoint[0],'ro')
plt.plot(reliabilityBinnedByFluxJoint[-1],completenessBinnedByFluxJoint[-1],'go')
plt.ylim(0,1.1)
plt.xlim(0,1.1)
plt.xlabel('Reliability')
plt.ylabel('Completeness')
if not doSinglePlot:
plt.title(sourceCatFile)
plt.savefig('completeness_reliability.%s'%outputType)
if not doSinglePlot:
plt.cla()
print "Completeness x Reliability"
# plt.semilogx()
# plt.step(fluxBinPlot,completenessReliability,where='post')
plt.plot(fluxBinPlot,completenessReliability)
plt.ylim(-0.05,1.05)
# plt.xlim(10**(minFlux-0.2),10**(maxFlux+0.2))
plt.xlabel('Flux')
plt.ylabel('Completeness x Reliability')
plt.title(sourceCatFile)
plt.savefig('completeness_x_reliability.%s'%outputType)
#############################
f=[]
a=[]
for m in matchlist:
f.append(m.ref.flux()*refFluxScale)
f.append(m.src.flux()*sourceFluxScale)
a.append(m.src.maj)
a.append(m.ref.maj)
for s in srcmisslist:
f.append(s.flux()*sourceFluxScale)
a.append(s.maj)
for r in refmisslist:
if selector.isGood(r):
f.append(r.flux()*refFluxScale)
a.append(r.maj)
f=np.array(f,dtype=float)
minFlux=floor(log10(f.min())*2.)/2.
maxFlux=ceil(log10(f.max())*2.)/2.
a=np.array(a,dtype=float)
amin=floor(a.min()/5.)*5
amax=ceil(a.max()/5.)*5
nmatch2d=np.zeros((int((amax-amin)/5),int((maxFlux-minFlux)*10)))
nmissSrc2d=np.zeros((int((amax-amin)/5),int((maxFlux-minFlux)*10)))
nmissRef2d=np.zeros((int((amax-amin)/5),int((maxFlux-minFlux)*10)))
for m in matchlist:
abin=int((m.ref.maj-amin)/5.)
fbin=int((log10(m.src.flux()*sourceFluxScale)-minFlux)*10)
nmatch2d[abin][fbin] += 1
for s in srcmisslist:
abin=int((s.maj-amin)/5.)
fbin=int((log10(s.flux()*sourceFluxScale)-minFlux)*10)
nmissSrc2d[abin][fbin] += 1
for r in refmisslist:
if selector.isGood(r):
abin=int((r.maj-amin)/5.)
fbin=int((log10(r.flux()*refFluxScale)-minFlux)*10)
nmissRef2d[abin][fbin] += 1
nSrc2d = nmatch2d + nmissSrc2d
nRef2d = nmatch2d + nmissRef2d
comp2d = np.zeros(nmatch2d.shape)
rel2d = np.zeros(nmatch2d.shape)
comp2d[nRef2d>0] = nmatch2d[nRef2d>0] / nRef2d[nRef2d>0]
comp2d[nRef2d==0] = nan
rel2d[nSrc2d>0] = nmatch2d[nSrc2d>0] / nSrc2d[nSrc2d>0]
rel2d[nSrc2d==0] = nan
if doSinglePlot:
plt.subplot(3,4,11)
else:
plt.cla()
print "Completeness by flux and major axis"
extent=(minFlux,maxFlux,amin,amax)
plt.imshow(comp2d,cmap='rainbow',interpolation='nearest',origin='lower',extent=extent)
plt.axis('normal')
plt.ylim(amin,amax)
plt.xlabel('log10(Flux)')
if doSinglePlot:
plt.xticks(rotation=45)
plt.ylabel('Major axis')
if doSinglePlot:
plt.title('Completeness',fontsize='small')
else:
plt.title(sourceCatFile)
plt.savefig('completeness_by_flux_majoraxis.%s'%outputType)
if doSinglePlot:
plt.subplot(3,4,12)
else:
plt.cla()
print "Reliability by flux and major axis"
extent=(minFlux,maxFlux,amin,amax)
plt.imshow(rel2d,cmap='rainbow',interpolation='nearest',origin='lower',extent=extent)
plt.axis('normal')
plt.ylim(amin,amax)
plt.xlabel('log10(Flux)')
if doSinglePlot:
plt.xticks(rotation=45)
plt.ylabel('Major axis')
if doSinglePlot:
plt.title('Reliability',fontsize='small')
else:
plt.title(sourceCatFile)
plt.savefig('reliability_by_flux_majoraxis.%s'%outputType)
#############################
if doSinglePlot:
plt.suptitle(sourceCatFile,y=0.95)
plt.savefig('finderEval.%s'%outputType)
plt.close()
|
{"hexsha": "1fd7c064e44d7a3a2632c9d4136018e4e81d4657", "size": 24517, "ext": "py", "lang": "Python", "max_stars_repo_path": "Code/Components/Analysis/evaluation/current/scripts/finderEval.py", "max_stars_repo_name": "rtobar/askapsoft", "max_stars_repo_head_hexsha": "6bae06071d7d24f41abe3f2b7f9ee06cb0a9445e", "max_stars_repo_licenses": ["BSL-1.0", "Apache-2.0", "OpenSSL"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-18T08:37:43.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-18T08:37:43.000Z", "max_issues_repo_path": "Code/Components/Analysis/evaluation/current/scripts/finderEval.py", "max_issues_repo_name": "ATNF/askapsoft", "max_issues_repo_head_hexsha": "d839c052d5c62ad8a511e58cd4b6548491a6006f", "max_issues_repo_licenses": ["BSL-1.0", "Apache-2.0", "OpenSSL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Code/Components/Analysis/evaluation/current/scripts/finderEval.py", "max_forks_repo_name": "ATNF/askapsoft", "max_forks_repo_head_hexsha": "d839c052d5c62ad8a511e58cd4b6548491a6006f", "max_forks_repo_licenses": ["BSL-1.0", "Apache-2.0", "OpenSSL"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4836309524, "max_line_length": 225, "alphanum_fraction": 0.579516254, "include": true, "reason": "from numpy,from astropy", "num_tokens": 6362}
|
# Data Management
import pandas
# External Interfaces
import glob
import kaggle
import os
from zipfile import ZipFile
# Evaluation
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.model_selection import train_test_split
# Processing
import numpy
import scipy
from scipy.stats import chi2
# Modeling
from sklearn.linear_model import LinearRegression
from sklearn.decomposition import PCA
from sklearn import svm
from sklearn.svm import OneClassSVM
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import LocalOutlierFactor
from sklearn.ensemble import IsolationForest
X = pandas.read_pickle('../data/refined-cicids2017.pkl')
Y = pandas.read_pickle('../data/simplified-labels.pkl')
# Search k 5 through 25
best_predictions = []
best_roc_auc_score = 0
best_n_estimators = 0
print('Starting grid search')
for n_estimators in range(100, 600, 100):
print('Testing number of estimators : ' + str(n_estimators))
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, train_size=0.8, test_size=0.2, shuffle=True)
isoforest = IsolationForest(n_estimators=n_estimators, verbose=1, warm_start=False)
isoforest.fit(X_train)
predictions = isoforest.predict(X_test)
score = roc_auc_score(Y_test, predictions)
print('Score : ' + str(score))
print()
if score > best_roc_auc_score:
best_roc_auc_score = score
best_n_estimators = n_estimators
best_predictions = predictions
print('Grid search complete')
print('Best score : ' + str(best_roc_auc_score))
print('Best number of estimators : ' + str(best_n_estimators))
numpy.save('../data/isoforest-predictions.npy', best_predictions)
numpy.save('../data/isoforest-targets.npy', Y_test)
numpy.save('../data/isoforest-score.npy', best_roc_auc_score)
|
{"hexsha": "6ce53cd5717b954c6eb626b007f3c1394537c445", "size": 1903, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/ex2_isoforest.py", "max_stars_repo_name": "christian-westbrook/intrusion-detection", "max_stars_repo_head_hexsha": "7f7e8470327ead1cd122918452d1238a90361c75", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/ex2_isoforest.py", "max_issues_repo_name": "christian-westbrook/intrusion-detection", "max_issues_repo_head_hexsha": "7f7e8470327ead1cd122918452d1238a90361c75", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/ex2_isoforest.py", "max_forks_repo_name": "christian-westbrook/intrusion-detection", "max_forks_repo_head_hexsha": "7f7e8470327ead1cd122918452d1238a90361c75", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.734375, "max_line_length": 106, "alphanum_fraction": 0.7771939044, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 450}
|
```python
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import json
import sympy
% matplotlib inline
f = open('./exerc_phyton.txt')
```
```python
V=np.genfromtxt(f,skip_header=6,delimiter='')
```
```python
t=V[:,0]
print(t)
```
[0. 0.0201 0.0402 0.0603 0.0804 0.1005 0.1206 0.1407 0.1608 0.1809
0.201 0.2211 0.2412 0.2613 0.2814 0.3015 0.3216 0.3417 0.3618 0.3819
0.402 0.4221 0.4422 0.4623 0.4824 0.5025 0.5226 0.5427 0.5628 0.5829
0.603 0.6231 0.6432 0.6633 0.6834 0.7035 0.7236 0.7437 0.7638 0.7839
0.804 0.8241 0.8442 0.8643 0.8844 0.9045 0.9246 0.9447 0.9648 0.9849
1.005 1.0251 1.0452 1.0653 1.0854 1.1055 1.1256 1.1457 1.1658 1.1859
1.206 1.2261 1.2462 1.2663 1.2864 1.3065 1.3266 1.3467 1.3668 1.3869
1.407 1.4271 1.4472 1.4673 1.4874 1.5075 1.5276 1.5477 1.5678 1.5879
1.608 1.6281 1.6482 1.6683 1.6884 1.7085 1.7286 1.7487 1.7688 1.7889
1.809 1.8291 1.8492 1.8693 1.8894 1.9095 1.9296 1.9497 1.9698 1.9899
2.01 2.0301 2.0502 2.0703 2.0904 2.1105 2.1306 2.1507 2.1708 2.1909
2.211 2.2311 2.2512 2.2713 2.2914 2.3115 2.3316 2.3517 2.3718 2.3919
2.412 2.4321 2.4522 2.4723 2.4924 2.5125 2.5326 2.5527 2.5728 2.5929
2.613 2.6331 2.6532 2.6733 2.6934 2.7135 2.7336 2.7537 2.7738 2.7939
2.814 2.8341]
```python
Raw=V[:,1]
print(Raw)
```
[0.1517 0.1492 0.1524 0.1576 0.1635 0.1744 0.1937 0.2126 0.2288 0.2552
0.2833 0.3125 0.3463 0.3879 0.4324 0.4825 0.5358 0.5928 0.6446 0.6966
0.753 0.8116 0.8639 0.9251 0.9865 1.0494 1.1175 1.1893 1.261 1.3333
1.4078 1.4778 1.5505 1.6178 1.6855 1.7481 1.8074 1.8658 1.9245 1.9792
2.0339 2.0724 2.1126 2.1452 2.1701 2.1844 2.2012 2.2047 2.2021 2.194
2.1824 2.1634 2.1433 2.1116 2.0795 2.0427 1.9985 1.9519 1.9 1.8442
1.781 1.7189 1.6515 1.5813 1.5132 1.4401 1.371 1.2987 1.2265 1.1594
1.0909 1.0255 0.963 0.9033 0.8488 0.7928 0.7359 0.6853 0.637 0.5913
0.5533 0.5207 0.4856 0.4531 0.4276 0.4055 0.3856 0.3698 0.3665 0.3648
0.3835 0.4091 0.4622 0.5367 0.6447 0.7663 0.9091 1.068 1.2216 1.3753
1.5169 1.6317 1.7246 1.7819 1.8011 1.7825 1.7405 1.675 1.5954 1.509
1.4121 1.3088 1.2 1.0853 0.9715 0.8626 0.7664 0.6798 0.6014 0.5392
0.4865 0.4359 0.396 0.3587 0.3254 0.3015 0.28 0.2676 0.2499 0.2421
0.2323 0.2267 0.2226 0.214 0.208 0.1993 0.1953 0.1811 0.1695 0.1565
0.1438 0.1356]
```python
Noisy=V[:,2]
print(Noisy)
```
[0.1458 0.1509 0.1575 0.1427 0.1714 0.1731 0.193 0.2097 0.2356 0.2637
0.2816 0.3121 0.348 0.3904 0.4242 0.4815 0.5402 0.5954 0.637 0.6885
0.7427 0.805 0.8624 0.9323 0.9806 1.0501 1.1193 1.1841 1.2583 1.3459
1.4078 1.4811 1.5445 1.6084 1.6825 1.743 1.8056 1.8578 1.9353 1.98
2.0374 2.0685 2.1042 2.1454 2.1682 2.1889 2.2148 2.2083 2.2075 2.1963
2.1792 2.1676 2.1447 2.1102 2.0811 2.0507 2.0074 1.9517 1.8948 1.838
1.7831 1.7264 1.6599 1.5845 1.5161 1.451 1.3744 1.298 1.2351 1.1523
1.0863 1.0287 0.9668 0.9019 0.8474 0.7965 0.7364 0.6893 0.6387 0.5947
0.5533 0.5216 0.4897 0.4536 0.425 0.4077 0.3842 0.3629 0.3695 0.3671
0.3671 0.4053 0.4659 0.546 0.6375 0.7571 0.914 1.0704 1.2246 1.3675
1.5081 1.6388 1.7404 1.7738 1.8075 1.7801 1.7457 1.6798 1.5943 1.517
1.4107 1.318 1.2067 1.0783 0.9658 0.8627 0.7659 0.67 0.5805 0.5361
0.4845 0.4381 0.4025 0.3486 0.3274 0.3067 0.2782 0.2703 0.242 0.2471
0.2435 0.2299 0.2175 0.2157 0.2061 0.2006 0.1869 0.1873 0.1518 0.1583
0.1389 0.127 ]
```python
Acell=V[:,3]
```
```python
deltat=t[1]-t[0]
print (deltat)
```
0.0201
```python
from numpy import diff
velocity=diff(Raw)/deltat
print(velocity)
```
[-0.12437811 0.15920398 0.25870647 0.29353234 0.54228856 0.960199
0.94029851 0.80597015 1.31343284 1.39800995 1.45273632 1.68159204
2.06965174 2.21393035 2.49253731 2.65174129 2.8358209 2.57711443
2.58706468 2.80597015 2.91542289 2.60199005 3.04477612 3.05472637
3.12935323 3.3880597 3.5721393 3.56716418 3.59701493 3.70646766
3.48258706 3.61691542 3.34825871 3.3681592 3.11442786 2.95024876
2.90547264 2.92039801 2.72139303 2.72139303 1.91542289 2.
1.62189055 1.23880597 0.71144279 0.8358209 0.17412935 -0.12935323
-0.40298507 -0.57711443 -0.94527363 -1. -1.57711443 -1.59701493
-1.83084577 -2.19900498 -2.31840796 -2.58208955 -2.7761194 -3.14427861
-3.08955224 -3.35323383 -3.49253731 -3.3880597 -3.63681592 -3.43781095
-3.59701493 -3.5920398 -3.33830846 -3.4079602 -3.25373134 -3.10945274
-2.97014925 -2.71144279 -2.78606965 -2.83084577 -2.51741294 -2.40298507
-2.27363184 -1.89054726 -1.62189055 -1.74626866 -1.61691542 -1.26865672
-1.09950249 -0.99004975 -0.78606965 -0.1641791 -0.08457711 0.93034826
1.27363184 2.64179104 3.70646766 5.37313433 6.04975124 7.10447761
7.90547264 7.64179104 7.64676617 7.04477612 5.71144279 4.62189055
2.85074627 0.95522388 -0.92537313 -2.08955224 -3.25870647 -3.960199
-4.29850746 -4.82089552 -5.13930348 -5.41293532 -5.70646766 -5.66169154
-5.41791045 -4.78606965 -4.30845771 -3.90049751 -3.09452736 -2.62189055
-2.51741294 -1.98507463 -1.85572139 -1.65671642 -1.18905473 -1.06965174
-0.61691542 -0.88059701 -0.3880597 -0.48756219 -0.27860697 -0.2039801
-0.4278607 -0.29850746 -0.43283582 -0.19900498 -0.70646766 -0.57711443
-0.64676617 -0.6318408 -0.4079602 ]
```python
dacell2=diff(velocity)/deltat
print(dacell2)
```
[ 14.10856167 4.95037252 1.73263038 12.37593129 20.79156457
-0.9900745 -6.6830029 25.24689983 4.20781664 2.72270488
11.38585679 19.30645281 7.17804015 13.86104304 7.92059602
9.15818915 -12.87096854 0.49503725 10.89081953 5.44540977
-15.59367342 22.02915769 0.49503725 3.71277939 12.87096854
9.15818915 -0.24751863 1.48511175 5.44540977 -11.13833816
6.6830029 -13.36600579 0.9900745 -12.62344991 -8.16811465
-2.22766763 0.74255588 -9.90074503 0. -40.09801738
4.20781664 -18.81141556 -19.05893418 -26.23697433 6.18796564
-32.91997723 -15.09863617 -13.61352442 -8.6631519 -18.31637831
-2.72270488 -28.71216059 -0.9900745 -11.63337541 -18.31637831
-5.94044702 -13.11848717 -9.65322641 -18.31637831 2.72270488
-13.11848717 -6.93052152 5.19789114 -12.37593129 9.90074503
-7.92059602 0.24751863 12.62344991 -3.46526076 7.6730774
7.17804015 6.93052152 12.87096854 -3.71277939 -2.22766763
15.59367342 5.69292839 6.43548427 19.05893418 13.36600579
-6.18796564 6.43548427 17.3263038 8.41563328 5.44540977
10.14826366 30.93982822 3.96029801 50.49379966 17.07878518
68.06762209 52.96898592 82.91873964 33.66253311 52.47394866
39.85049875 -13.11848717 0.24751863 -29.94975372 -66.33499171
-54.20657905 -88.11663078 -94.30459642 -93.56204054 -57.91935843
-58.16687706 -34.90012623 -16.83126655 -25.98945571 -15.84119205
-13.61352442 -14.60359892 2.22766763 12.12841266 31.43486547
23.76178807 20.29652731 40.09801738 23.51426945 5.19789114
26.48449296 6.43548427 9.90074503 23.26675082 5.94044702
22.52419495 -13.11848717 24.50434395 -4.95037252 10.39578228
3.71277939 -11.13833816 6.43548427 -6.6830029 11.63337541
-25.24689983 6.43548427 -3.46526076 0.74255588 11.13833816]
```python
tamanhodacell2=np.size(dacell2)
```
```python
novo_tempo=t[0:tamanhodacell2]
novo_aceleracao_medida=Acell[0:tamanhodacell2]
```
```python
hfig,hax=plt.subplots(1,1,sharex = True, squeeze=True, figsize=(9,5))
plt.plot(t,Acell, label='Aceleração medida')
plt.plot(novo_tempo,dacell2,label='Aceleração calculada')
hax.legend(frameon=False)
hax.set_ylabel('Amplitude [m/$s^2$]')
hax.set_xlabel('Time[s]')
```
```python
velocidadeNoisy=diff(Noisy)/deltat
Aceleracaonoisy2=diff(velocidadeNoisy)/deltat
```
```python
hfig,hax=plt.subplots(1,1,sharex = True, squeeze=True, figsize=(9,5))
plt.plot(t,Acell, label='Aceleração medida')
plt.plot(novo_tempo,Aceleracaonoisy2,label='Aceleração calculada Noisy')
hax.legend(frameon=False)
hax.set_ylabel('Amplitude [m/$s^2$]')
hax.set_xlabel('Time[s]')
```
```python
hfig,hax=plt.subplots(1,1,sharex = True, squeeze=True, figsize=(9,5))
plt.plot(novo_tempo,dacell2,label='Aceleração calculada')
plt.plot(novo_tempo,Aceleracaonoisy2,label='Aceleração calculada Noisy')
hax.legend(frameon=False)
hax.set_ylabel('Amplitude [m/$s^2$]')
hax.set_xlabel('Time[s]')
```
```python
```
|
{"hexsha": "4286bb65075b8079cc33beee4b5a26164fbe90f7", "size": 177754, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "exercicio_aula_5-checkpoint.ipynb", "max_stars_repo_name": "regifukuchi/bmc-1", "max_stars_repo_head_hexsha": "f4418212664758511bb3f4d4ca2318ac48a55e88", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "exercicio_aula_5-checkpoint.ipynb", "max_issues_repo_name": "regifukuchi/bmc-1", "max_issues_repo_head_hexsha": "f4418212664758511bb3f4d4ca2318ac48a55e88", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "exercicio_aula_5-checkpoint.ipynb", "max_forks_repo_name": "regifukuchi/bmc-1", "max_forks_repo_head_hexsha": "f4418212664758511bb3f4d4ca2318ac48a55e88", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 437.8177339901, "max_line_length": 61948, "alphanum_fraction": 0.9343981007, "converted": true, "num_tokens": 4732}
|
Debats du Senat (hansard)
1ere Session, 36 e Legislature,
Volume 137, Numero 157
Le lundi 13 septembre 1999
L'honorable Gildas L. Molgat, President
Le point sur le projet de nouveau Musee canadien de la guerre
Reponse a une demande d'epinglettes du drapeau
La vision bloquiste de l'identite quebecoise
La Cour supreme du Canada
L'annonce par le juge en chef de son intention de demissionner-Avis d'interpellation
La politique de faire voler des civils a bord d'appareils CF-18
L'avancement du projet de nouveau Musee de la guerre
Le Manitoba-La saisie des biens d'un contribuable pour non-paiement d'impot
La loi canadienne sur la protection de l'environnement (1999)
Troisieme lecture-Report du vote sur la motion d'amendement-Recours au Reglement
Projet de loi sur l'Office d'investissement des regimes de pensions du secteur public
Troisieme lecture-Suite du debat
La loi canadienne sur la protection de l'environnement (1999)
Troisieme lecture-Motion d'amendement-Report du vote
Adoption avec dissidence de la motion d'attribution de temps
Troisieme lecture-Motions d'amendement
Projet de loi sur l'Office d'investissement du regime de pensions du secteur public
Attribution du temps pour le debat-Avis de motion
Les travaux du Senat
Le lundi 13 septembre 1999
La seance est ouverte a 16 heures, le President etant au fauteuil.
Le point sur le projet de nouveau Musee canadien de la guerre
Cette question en souleve une autre, plus fondamentale.
Le directeur du musee, M. Jack Granatstein, declarait ce qui suit:
C'est certainement ce que veulent les anciens combattants.
Mon collegue, le senateur Balfour, est president du sous-comite des anciens combattants.
Cette perspective devrait faire reflechir les temoins passes et futurs.
Le Senat ne devrait pas avoir a tenir d'audiences.
Ils ont fete ensemble notre langue, leurs aspirations et leurs reves communs.
Reponse a une demande d'epinglettes du drapeau
L'honorable Thelma J. Chalifoux :
Il dit que nous, les senateurs, nous ecoutons les Canadiens ordinaires.
Ils fournissent toutes sortes de services de base a ces equipages.
La reponse a ete formidable.
L'honorable Lois M. Wilson :
Trois autres delegues representaient la societe civile.
Nous esperons voir une strategie integree a ce sujet tres bientot.
La vision bloquiste de l'identite quebecoise
L'honorable Jean-Robert Gauthier :
Pour moi, un Quebecois, c'est une personne qui vit au Quebec.
Pour eux, c'est autre chose.
Le quotidien La Presse ,le 11 septembre dernier, titrait un article:
Le Bloc enterre la nation canadienne-francaise.
Dans le document du Bloc sur l'identite quebecoise, on lisait, et je cite encore:
La nation canadienne-francaise n'existe plus sur le territoire du Quebec ...
A ce que je sache, le Quebec fait toujours partie du Canada.
La nation canadienne-francaise est loin d'etre morte.
La Cour supreme du Canada
L'annonce par le juge en chef de son intention de demissionner-Avis d'interpellation
L'honorable Anne C. Cools :
L'honorable Noel A. Kinsella (chef adjoint de l'opposition) :
Le ministre a-t-il quelque chose de nouveau a nous dire a ce sujet?
L'honorable B. Alasdair Graham (leader du gouvernement) :
Oui, honorables senateurs, je deposerai le document demain au plus tard.
L'honorable Noel A. Kinsella (chef adjoint de l'opposition) :
Honorables senateurs, je remercie l'honorable senateur pour sa question.
Je remercie l'honorable senateur pour sa reponse.
La politique de faire voler des civils a bord d'appareils CF-18
L'honorable Noel A. Kinsella (chef adjoint de l'opposition) :
Je sais que mon collegue, le senateur Atkins aimerait bien le faire.
L'honorable B. Alasdair Graham (leader du gouvernement) :
Je vois le senateur Robertson sourire.
Elle doit avoir profite elle aussi d'un vol de ce genre.
Samedi apres-midi, j'etais a la base aerienne de Shearwater avec mes petits-enfants.
C'est Cormorant qui payait.
Les gens pouvaient aussi faire un tour d'essai des vehicules terrestres Bison.
Je ne suis pas certain que cette observation soit juste.
L'avancement du projet de nouveau Musee de la guerre
J'ai quelques questions au sujet du Musee canadien de la guerre.
Des demandes de propositions ont-elles ete faites?
Quel est au juste le plan du gouvernement a l'heure actuelle?
L'honorable B. Alasdair Graham (leader du gouvernement) :
Est-ce la un principe politique etabli?
L'honorable B. Alasdair Graham (leader du gouvernement) :
En ce qui concerne le gouvernement, le projet peut aller de l'avant.
Le Manitoba-La saisie des biens d'un contribuable pour non-paiement d'impot
Cet incident montre ce qui arrive lorsque les institutions gouvernementales ont trop de pouvoirs.
L'honorable B. Alasdair Graham (leader du gouvernement) :
Oui, je vais faire de mon mieux.
L'honorable A. Raynell Andreychuk :
L'honorable B. Alasdair Graham (leader du gouvernement) :
Cela s'inscrit dans les exigences medicales.
Je reviens donc a ma premiere question.
Cependant, il s'agit la d'une autre question a examiner un autre jour.
Sommes-nous prets a jouer ce role?
Honorables senateurs, la reponse a cette question n'est pas simple.
J'ai moi-meme vu nos Casques bleus a l'9uvre en Namibie.
Je les ai vus au Nicaragua et dans d'autres pays.
L'honorable Pierre Claude Nolin :
Honorables senateurs, je veux bien comprendre ce que l'on repond a l'honorable senateur.
Je vais me renseigner, honorables senateurs.
J'ignore si cela a deja ete determine, honorables senateurs.
Toutefois, je vais me renseigner au plus tot.
L'honorable Gerald J. Comeau :
L'honorable B. Alasdair Graham (leader du gouvernement) :
La loi canadienne sur la protection de l'environnement (1999)
Troisieme lecture-Report du vote sur la motion d'amendement-Recours au Reglement
L'honorable John Lynch-Staunton (chef de l'opposition) :
Son Honneur le President :
Je remercie l'honorable senateur Lynch-Staunton d'avoir souleve la question.
Je vais l'examiner immediatement.
L'honorable Noel A. Kinsella (chef adjoint de l'opposition) :
Le paragraphe 23(4) de notre Reglement est ainsi libelle:
Une motion de renvoi figure dans la categorie des motions dilatoires.
L'honorable Anne C. Cools :
Honorables senateurs, j'essaie de comprendre ce qui est demande.
Son Honneur le President :
Projet de loi sur l'Office d'investissement des regimes de pensions du secteur public
Troisieme lecture-Suite du debat
J'ai de la difficulte a voir la logique de cet argument.
Demandez aux pecheurs de la cote est et de la cote ouest.
Leurs prestations de retraite sont inexistantes.
La question ici est de savoir comment cet argent sera utilise.
L'excedent pourrait effectivement etre utilise pour repondre a un tel besoin.
Toutefois, le versement de prestations additionnelles pose un danger.
Les remarques du senateur Lawson etaient certainement tres a propos.
Lorsque j'etais enfant, j'ai vecu pendant 15 ans dans un poste isole.
Mon pere etait le seul policier.
Il etait de garde 24 heures par jour.
Pendant ces 15 annees, je ne l'ai jamais vu sans son uniforme.
Les tribunaux determineront a qui appartient le surplus.
C'est une autre bataille que j'ai bien l'intention de livrer.
Un projet de loi mort-ne ne donne rien.
Honorables senateurs, je felicite le senateur Christensen de son premier discours au Senat.
Honorables senateurs, je repondrai avec plaisir.
Mon pere a pris sa retraite de la GRC en 1949.
J'etais a alors en septieme ou en huitieme annee.
L'honorable Pierre Claude Nolin :
Ce faisant, vous avez fait allusion a mon allocution de vendredi matin.
Qu'en pensez-vous?
Je trouve cela difficile a avaler.
Les juges de la cour d'appel ont declare...
Les premiers discours au Senat ne doivent pas etre contestes.
Nous n'avons pas critique votre premier discours au Senat.
(Sur la motion du senateur Andreychuk, le debat est ajourne.)
La loi canadienne sur la protection de l'environnement (1999)
Troisieme lecture-Motion d'amendement-Report du vote
L'honorable Thelma J. Chalifoux :
Honorables senateurs, j'ai ma propre opinion sur les choses.
Honorables senateurs, je vois dans ce projet de loi un document vivant.
Le projet de loi a l'etude n'en fait pas mention.
La definition de terre autochtone doit etre revue.
L'honorable Pierre Claude Nolin :
L'honorable senateur est-elle d'accord avec ce que propose le senateur Hays?
Je me suis entretenue de ce projet de loi ce matin avec Gerald Morin.
Je lui ai tout explique, et il etait du meme avis.
L'a-t-elle lu?
Je presenterai demain une allocution dans laquelle je citerai amplement ce jugement.
Son Honneur le President :
Honorables senateurs, je regrette, mais je dois interrompre le debat.
Son Honneur le President :
L'honorable senateur Ghitter, appuye par l'honorable senateur Cochrane, a propose la motion suivante:
Je declare que la motion d'amendement est recevable.
L'honorable John Lynch-Staunton (chef de l'opposition) :
La sonnerie doit se faire entendre.
Qu'on fasse donc sonner le timbre.
Son Honneur le President :
Nous avons trois minutes.
Son Honneur le President :
Je renvoie l'honorable senateur a l'ouvrage de Beauchesne.
La motion de renvoi constitue un amendement et est par consequent sujette a debat.
Son Honneur le President :
Honorables senateurs, je ne sais pas d'ou est venue cette information.
C'est constamment un probleme pour moi et pour les greffiers au Bureau.
Toutefois, il ne m'appartient pas d'intervenir.
C'est une question sur laquelle le Senat pourrait se pencher.
Cela dit, est-ce a nous qu'il appartient d'intervenir?
C'est la question que le Senat doit trancher lui-meme.
Pendant que nous y sommes, depassons la limite de 15 minutes.
Oublions completement le Reglement.
Son Honneur le President :
Son Honneur le President :
(La motion d'amendement, mise aux voix, est rejetee.)
Son Honneur le President :
La motion dont est maintenant saisi le Senat est la motion de troisieme lecture.
Je ne suis pas la seule a etre de cet avis.
Je voudrais insister la-dessus.
C'est la seule option qui s'offre a nous.
Le Senat aurait pu rendre un service insigne au pays.
Il n'en a rien fait.
Dans leur excellent expose ils se sont exprimes en ces termes:
Et l'Inuit Tapirisat du Canada?
Se pourrait-il que ce ne soit qu'une exageration?
Ce graphique comprend 33 cases.
L'Institut canadien de la sante infantile l'a d'ailleurs clairement souligne:
L'environnement est de ces facteurs qui se trouvent tout au haut de la liste...
Or, ce n'est pas une question d'accent, honorables senateurs.
Cet enjeu etait primordial a leurs yeux.
Ils nous l'ont dit.
Ils ont ete tres honnetes.
Par consequent, l'article 65.1 n'a aucune utilite.
Je ne repeterai pas ce qui y est dit.
Le livre rouge dit ce qui suit, et nous sommes d'accord:
Nous avons appris qu'en Saskatchewan, il existe une usine a effluents zero.
Elle attire des clients europeens en raison de son processus en circuit ferme.
Il faut cesser de les utiliser et d'en faire des sous-produits.
Il n'y en a pas beaucoup.
On en a identifie seulement une douzaine.
Elle a mis au point des substances chimiques de remplacement moins nuisibles.
avons deja vu l'effet de ce changement.
Ce n'est pas plus complique que cela.
Je voudrais aussi parler...
Son Honneur le President :
Demandez-vous la permission de poursuivre?
Son Honneur le President :
Je tiens a expliquer ce qui est en jeu.
Il s'agit d'un nouveau genre de biotechnologie.
Ce n'est plus l'amelioration genetique que nous pratiquons depuis des annees.
Cette operation franchit le mur des especes.
Les gouvernements ont fait des investissements importants dans la biotechnologie.
Bien entendu, ce serait tres mauvais pour les pays du tiers monde.
Le debat sur les biotechnologies a ete intense en Europe.
Nous savons tous, par exemple, que les produits transgeniques peuvent etre toxiques.
Si le Cabinet decide qu'un autre reglement est equivalent, ce sera suffisant.
C'est different, parce que la biotechnologie peut nuire a la biodiversite.
C'est maintenant le Cabinet qui a ce pouvoir.
On ne s'entend vraiment pas la-dessus.
Les relations entre l'organisme de reglementation et l'organisme reglemente...
Son Honneur le President :
Honorables senateurs, il est18 heures.
L'honorable Sharon Carstairs (leader adjoint du gouvernement) :
Son Honneur le President :
Le gouvernement a agi de facon prematuree.
Il s'agit la d'une procedure des plus inhabituelles.
Sante Canada evaluera les effets sur la sante.
L'Agence canadienne d'inspection des aliments releve du ministere de l'Agriculture.
Le projet de loi C-32 est une mesure legislative extraordinaire.
Le projet de loi a des repercussions sur de nombreux traites.
Honorables senateurs, est-il necessaire que je donne lecture de chaque amendement?
Son Honneur le President :
Honorables senateurs, acceptez-vous que les amendements ne soient pas lus?
L'honorable Sharon Carstairs (leader adjoint du gouvernement) :
Son Honneur le President :
L'honorable senateur Spivak, appuyee par l'honorable senateur Cochrane, propose:
Son Honneur le President :
Son Honneur le President :
Plait-il aux honorables senateurs d'adopter la motion?
L'honorable Noel A. Kinsella (chef adjoint de l'opposition) :
(Sur la motion du senateur Kinsella, le debat est ajourne.)
Adoption avec dissidence de la motion d'attribution de temps
L'honorable Sharon Carstairs (leader adjoint du gouvernement) :
Son Honneur le President :
Permission accordee, honorables senateurs?
Son Honneur le President :
D'accord, honorables senateurs?
L'honorable Marcel Prud'homme :
L'honorable Noel A. Kinsella (chef adjoint de l'opposition) :
Le gouvernement nous a donc informes qu'il etait pret a le faire.
Je pense que les senateurs Cochrane et Buchanan, notamment, veulent prendre la parole.
Son Honneur le President :
Honorables senateurs, j'ai vu le senateur Prud'homme se lever.
Honorables senateurs, on vient d'accepter trois heures de debat.
Je ne m'oppose pas a l'entente qui a ete conclue.
Le senateur Carstairs m'en a informe.
Son Honneur le President :
Elles devraient bientot revenir.
Nous devons faire assez de copies pour tous les senateurs.
Son Honneur le President :
Honorables senateurs, je presente la motion inscrite a mon nom.
Son Honneur le President :
Que, conformement a l'article 39 du Reglement, pas plus de six heures de deliberations...
Son Honneur le President :
Vous plait-il, honorables senateurs, d'adopter la motion?
Son Honneur le President :
Que les senateurs qui sont en faveur de la motion veuillent bien dire oui.
Son Honneur le President :
Que les senateurs qui sont contre la motion veuillent bien dire non.
Son Honneur le President :
A mon avis, les oui l'emportent.
(La motion, mise aux voix, est adoptee avec dissidence.)
Troisieme lecture-Motions d'amendement
a ) dans le preambule,
(B) par suppression des lignes 7 a 11;
b ) a l'article 2,
(ii) par la suppression des lignes 23 a 26;
d ) a l'article 77, a la page 49,
(i) par substitution aux lignes 5 et 6 de ce qui suit:
(4), sa quasi-elimination.,
(ii) par substitution aux lignes 25 a 27 de ce qui suit:
e ) a l'article 79,
modifiee est la quasi-elimination de la substance, le ministre doit, dans la declaration,
proposees en vue de la quasi-elimination de la substance relativement a;
f ) a l'article 91, a la page 64,
(i) par substitution, aux lignes 5 a 7, de ce qui suit:
dans laquelle la mesure prevue est la quasi-elimination de la substance, doit,
(ii) par substitution, aux lignes 21 a 23, de ce qui suit:
relativement a la quasi-elimination de la substance et resumant les motifs de;
g ) a l'article 106, a la page 79,
(i) par substitution, aux lignes 18 et 19, de ce qui suit:
(ii) par substitution, aux lignes 23 a 28, de ce qui suit:
a) un preavis avant la fabrication, l'importation et la vente de l'organisme vivant;
i ) a l'article 347,
Son Honneur le President :
Honorables senateurs, nous reprenons maintenant le debat sur le projet de loi C-32.
D'autres senateurs desirent-ils prendre la parole?
L'honorable John Lynch-Staunton (chef de l'opposition) :
Vous avez bien raison, Votre Honneur, nous devons etre en train de reecrire le Reglement.
L'avis de motion a ete adopte.
Je crois que le senateur Murray a bien raison.
Qu'avons-nous adopte?
Son Honneur le President :
La motion d'attribution de temps est maintenant adoptee.
C'est la mon interpretation de la situation.
Quelle sera la duree du debat, Votre Honneur?
Son Honneur le President :
Au bout de six heures de debat, les questions seront-elles toutes mises aux voix?
Son Honneur le President :
Je crois comprendre qu'il pourrait y avoir un bon nombre d'amendements.
Chacun d'entre eux devrait etre aborde a ce moment la.
Est-ce parfaitement clair, honorables senateurs?
L'honorable Marcel Prud'homme :
Est-ce que les six heures de debat auront lieu ce soir?
L'honorable Sharon Carstairs (leader adjoint du gouvernement) :
Est-ce ce que nous devons comprendre?
Il serait preferable que ces ententes soient mieux expliquees.
Je m'exprime comme si j'etais un nouveau senateur.
Tout le monde veut savoir ce qui se passe.
Son Honneur le President :
Oui, tout le debat se fera ce soir.
Quand aura lieu le vote?
Son Honneur le President :
Cependant, le debat ne peut pas durer plus de six heures.
Je m'en remets a vous, Votre Honneur.
Je m'efforce d'etre comprehensif et cooperatif.
Son Honneur le President :
Je ne sais pas du tout quels autres senateurs pourraient vouloir intervenir.
C'est tout ce que je peux dire au senateur.
Quoi qu'il en soit, le debat ne peut pas depasser six heures.
La precipitation n'a jamais permis l'adoption de bonnes lois.
C'est une chose que nous devons eviter.
C'est tout ce que je demande.
Honorables senateurs, on pourrait peut-etre eclaircir cette question.
J'admets que c'est tres embrouille.
On distribue maintenant les amendements du senateur Spivak.
Je crois comprendre que d'autres senateurs pourraient en avoir a presenter.
C'est l'accord du Senat qu'il faudrait rechercher.
Son Honneur le President :
Je ne peux le faire tant que je ne les ai pas recus.
C'est a chaque senateur qui a des amendements a proposer de le demander.
Je ne peux pas le faire a leur place.
Peut-etre que les senateurs qui desirent proposer des amendements accepteront cette solution.
La majorite des senateurs ont appuye cette position.
Telle est la situation.
C'est mon point de vue.
Elle n'aurait pas du preciserle 14 septembre.
Son Honneur le President :
Il s'agit du paragraphe 39(7) du Reglement, qui dit:
J'aurai donc besoin de la permission du Senat pour proceder a des amendements.
Je vous previens tout de suite.
Votre Honneur, c'est tres juste.
Conformement au Reglement de cette Chambre, il ne peut y avoir aucun amendement.
Son Honneur le President :
Honorables senateurs, j'aimerais faire quelques remarques sur ce projet de loi.
On a coupe court aux seances du comite sur ce projet de loi.
Des honorables senateurs vous ont expose ces lacunes en detail.
J'implore donc les honorables senateurs de l'ameliorer.
Dans son temoignage devant notre comite, elle a dit:
Nous parlons ici de dangers qu'on ne peut souvent pas meme voir ou identifier.
Il est inevitable qu'ils avalent alors un peu d'eau.
On nous a raconte des histoires epouvantables a ce sujet.
Leurs enfants sont malades; ils sont condamnes a une mort prematuree.
Ils passent la semaine dans des ecoles situees tout a cote de toute cette contamination.
Partout les familles font ce qu'elles peuvent pour proteger leurs enfants.
On ne connait pas tous les dangers, et certains effets sont invisibles.
Par consequent, honorables senateurs, je propose:
Commission de la protection de la sante environnementale des enfants
(6) La commission a notamment pour mission:
b ) de veiller a ce que l'information scientifique actuelle soit fiable et bien diffusee;
Honorables senateurs, je propose en outre:
L'honorable Fernand Robichaud ( Son Honneur le President suppleant ) :
L'honorable Pierre Claude Nolin :
Voici les principales dispositions qui visent les autochtones dans ce projet de loi.
Selon Michael Anderson, recherchiste pour le groupe Manitoba Keewatinowi Okimakanak, et je cite:
Elles s'appliquent egalement pour determiner quelle communaute sera eligible pour sieger sur le comite.
Selon la loi, ces regimes seront harmonises aux regimes environnementaux federal et provinciaux.
Vous conclurez avec moi que 14 Premieres nations, ce n'est pas beaucoup.
Selon la nouvelle loi, les dispositions s'appliquent au niveau des terres autochtones.
Le Parlement ne peut pas se permettre de les oublier.
Selon Jody Pierce, du Conseil des Metis de la Colombie-Britannique, et je cite:
Je vais proceder a ce petit examen.
De plus, la cour ajoutait, et je cite:
Son Honneur le President :
Lui donnez-vous la permission de continuer, honorables senateurs?
Ce n'est pas un petit probleme.
Je ne suis pas d'accord.
La Cour supreme a souligne les droits et les obligations imposes au gouvernement federal.
Le Parlement doit respecter ces droits.
C'est ce que nous devons faire.
Il m'apparait tres evident que le projet de loi souffre d'une lacune.
Et en francais, on dit:
Il me semble que cela saute aux yeux.
On a entendu au comite un juriste linguiste, ce qui est assez rare.
La version anglaise ne veut pas dire la meme chose.
La version francaise est beaucoup plus globale et beaucoup plus complete que la version anglaise.
L'honorable Pierre Claude Nolin :
d) un representant pour les Metis, choisi par le Ralliement national des Metis.
Dans l'autre langue officielle, et je cite:
Vous remarquerez qu'en anglais, il y a un amendement additionnel.
L'amendement parlera de lui-meme.
each of the provinces;; and
d) one representative for all Metis selected by the Metis National Council..
Je vais vous le lire en anglais.
postponing effective measures to prevent.
Son Honneur le President :
Il est propose par le senateur Nolin, appuye le senateur Spivak:
Que le projet de loi C-32 ne soit pas maintenant lu une troisieme fois...
Honorables senateurs, je desire poser une question au senateur Nolin.
Etes-vous en mesure de confirmer la contradiction dans la version francaise?
Dans le texte francais, les mesures ne seraient pas necessairement efficaces par rapport aux couts.
En fait, elles pourraient couter plus que d'autres mesures tout en etant effectives.
Je vais repeter ce que j'ai dit en francais, en essayant d'etre clair.
Dans la version anglaise, toutes les mesures prises doivent etre efficaces en termes de couts.
M. Perez etait le temoin qui representait l'Institut canadien des produits petroliers.
Cette reponse nous en dit long.
L'industrie voulait inclure des mesures positives et efficaces en termes de couts.
Personne ne s'est interesse a la version francaise, sauf le Senat.
Il nous incombe d'apporter la correction voulue.
Honorables senateurs, j'ai une autre question pour le senateur Nolin.
L'efficacite par rapport aux couts peut empecher l'application du principe de la prudence.
Ma question est hypothetique.
Cette definition pourrait comporter des couts beaucoup plus eleves.
La meilleure facon de faire des lois, c'est d'en dire le moins possible.
Il faut eviter d'en dire trop.
On veut que les mesures soient efficaces, non pas effectives.
Efficace est le mot qui convient, car effectif signifie qui produit un effet.
L'honorable Noel A. Kinsella (chef adjoint de l'opposition) :
Honorables senateurs, existe-t-il un rapport avec la fameuse Declaration de Rio?
Son Honneur le President :
Honorables senateurs, la permission est-elle accordee au senateur Spivak?
Cela s'est deja produit dans deux cas.
Il y a trente ans, le tabagisme n'etait pas directement associe au cancer.
Il y a egalement eu le cas de l'empoisonnement par le plomb.
Ils ne voteront pas pour modifier le projet de loi.
Alors, pourquoi sommes-nous ici?
Je vais vous le dire.
Honorables senateurs, nous sommes saisis d'un projet de loi boiteux.
Un projet de loi boiteux doit etre corrige pour etre ameliore.
Je lui signale que j'ai observe les travaux de ce comite.
Il y a, a mon avis, trois aspects de la situation qui posent probleme.
Premierement, il s'agit d'une mauvaise mesure legislative.
Je vous expliquerai dans un instant pourquoi il en est ainsi.
Deuxiemement, les temoins qui ont comparu ont ete trompes.
J'y reviendrai un peu plus tard.
Qu'est-ce qui fait qu'un projet de loi est bon, mauvais ou moyen?
Le gouvernement a completement mutile le rapport du comite.
Une lettre d'une autre grande societe, Alcan, a ete rendue publique apres le vote.
Oui, vous avez peut-etre raison.
Qu'a fait le gouvernement?
Ils ne connaissaient pas l'existence de la lettre envoyee par Alcan au premier ministre.
C'est pour cette raison qu'ils ont appuye le projet de loi.
Je regarde la banquette d'en avant ici et je vois d'anciens ministres.
J'ai eu affaire a bien des ministres.
Quel a ete le resultat?
Vous pouvez rire, mais vous savez que c'est la verite.
Meme le senateur Kenny sait que c'est la verite!
On n'a pas tenu compte du point de vue des temoins.
Est-elle votre amie?
Sa mere est une bonne amie a moi.
Cette question sera examinee maintenant parce que je vais comparaitre devant le comite du Senat.
A-t-elle deja vote pour vous, senateur Buchanan?
Vous connaissiez la reponse a cette question de toute facon.
Sur le plan politique, savez-vous ce qui est en jeu ici?
La credibilite du Senat.
Les senateurs nous ecouteront.
Il n'etait en fonction que depuis cinq jours!
Autrement dit, il reconnait qu'il s'agit d'une mauvaise mesure legislative.
Ce n'est pas dans une lettre qu'il a tenu ces propos.
Il a aussi ecrit une lettre.
Apres s'etre rendu compte qu'il avait gaffe, peut-etre.
Mais c'etait trop tard.
Il sait que c'est une mauvaise mesure.
Il respecte l'opinion du senateur Spivak!
Il dit que le projet de loi C-88 actuel est meilleur.
Il ajoute qu'on peut s'en accommoder.
Qu'est-ce que cela signifie, senateur Taylor?
Il dit qu'on peut s'en accommoder.
Il dit qu'entre-temps, nous allons nous accommoder de celle qui est en vigueur.
C'est la un raisonnement eclaire de la part du ministre.
Autrement dit, le ministre nous invite a ne pas adopter un mauvais projet de loi.
Le terme consensus est toujours un excellent mot.
L'emploi de ce mot est tout a fait de mise en l'occurrence.
Qu'arrive-t-il en l'occurrence au processus politique?
Ils peuvent agir ainsi parce qu'ils ont la majorite.
Nous avions a peine commence a entendre des temoins.
Il n'en avait meme pas encore entendu un seul.
La situation est a ce point ridicule.
Il s'agit d'un mauvais projet de loi.
Le processus politique a subi un dur coup, pire, il a ete carrement torpille.
Nous n'avons cependant pas encore ete detruits; je peux vous l'assurer.
C'est pourquoi nous sommes ici.
Son Honneur le President :
Une minute normale ou une minute a la Buchanan?
Comme d'habitude, je suis a votre merci.
Son Honneur le President :
Est-ce d'accord, honorables senateurs?
Cet homme a toujours ete et demeure une personne honorable.
Ce n'est pas vrai.
Qu'avons-nous ete forces de faire dans ce cas?
Voici ce qu'en disaitJohn A. Macdonald:
Voila ce qu'il a dit.
Je vous pose une seule question.
Qu'est-ce qui presse tant?
Le ministre nous a dit: Eh bien, je veux qu'on en finisse.
Je veux qu'on regle cette question.
Cela traine a la Chambre des communes depuis des annees.
Qu'est-ce qui presse tant?
Pourquoi museler le Senat?
Parce qu'ils veulent proroger.
Un bon projet de loi est plus important.
Je ne veux pas vous entendre dire: Mais cela va encore prendre des annees.
Toutefois, le gouvernement a deja decide.
Ameliorons ce projet de loi.
L'honorable Marcel Prud'homme :
Moi, je pense avoir compris.
Ensuite, j'attends une reponse.
S'il n'en vient aucune, alors je n'ai plus aucun doute.
J'ai eu l'honneur et le plaisir de travailler avec Charles Caccia.
Je tiens a lui rendre hommage.
Il est opiniatre et entete.
Il a preside pendant un certain temps l'Union parlementaire internationale.
Je ne tiens pas a prononcer un long discours passionne.
Je vois que la fin arrive et je le regrette.
Il est tres respecte.
Le Senat a prefere ne pas poursuivre le debat.
Certains des nouveaux senateurs constatent probablement deja que cette enceinte est partisane.
J'espere qu'un jour nous n'hesiterons pas a prendre nos responsabilites.
Le Parlement comprend deux Chambres: la Chambre des communes et le Senat.
Je veux que les nouveaux senateurs comprennent que nous sommes des parlementaires.
Il faut retablir la verite des faits.
Je constate a regret que les senateurs veulent aller de l'avant.
Nous avions la une bonne occasion de ne pas voter selon la discipline de parti.
Comme ce ne sera pas le cas, il faudra donc attendre une prochaine fois.
Son Honneur le President :
Il y a trois groupes d'amendements.
Le dernier a ete propose par le senateur Nolin, avec l'appui du senateur Spivak.
Le groupe precedent avait ete propose par le senateur Cochrane, appuye par le senateur Robertson.
Le premier groupe etait une proposition du senateur Spivak, avec l'appui du senateur Cochrane.
Honorables senateurs, je suis sur que vous avez lu tous les amendements.
Si vous en etes convaincu, nous pouvons proceder de la sorte.
Son Honneur le President :
Senateur Prud'homme, j'aimerais pouvoir vous repondre.
Nous sommes d'accord.
Pour d'autres, il ne sera pas necessaire de recourir a cette procedure.
Si tous sont d'accord, la sonnerie d'appel retentira pendant une demi-heure.
Son Honneur le President :
La procedure dans laquelle nous sommes engages est totalement irreguliere.
C'est donc ainsi que nous allons proceder.
Je passe a la premiere question dont le Senat est saisi.
L'honorable senateur Nolin, appuye par l'honorable senateur Spivak, propose:
Son Honneur le President :
Que les senateurs qui sont en faveur de l'amendement veuillent bien dire oui.
Son Honneur le President :
Que les senateurs qui sont contre veuillent bien dire non.
Son Honneur le President :
Je declare que les non l'emportent.
L'amendement est rejete.
Et deux senateurs s'etant leves:
Son Honneur le President :
Son Honneur le President :
J'ai commence a lire vos amendements et on m'a dit: Suffit!
Je n'ai pas donc lu les deux amendements.
Votre Honneur, deux senateurs se sont leves.
Nous demandons un vote par assis et leves.
Son Honneur le President :
Son Honneur le President :
On demande un vote par assis et debout.
Son Honneur le President :
Honorables senateurs, la permission est-elle accordee?
Je parle evidemment en mon nom personnel.
Ce serait arrogant de ma part de presenter les choses autrement.
Son Honneur le President :
Est-ce d'accord, honorables senateurs?
Son Honneur le President :
Son Honneur le President :
Le vote aura lieu a 20 h 35.
Son Honneur le President :
Puis-je etre dispense de lire les amendements?
(Les motions d'amendement (senateur Nolin), mises aux voix, sont rejetees.)
Son Honneur le President :
Il y a trois amendements differents.
On m'a demande de les traiter separement.
a ) dans le preambule, a la page 2...
Son Honneur le President :
Vous plait-il, honorables senateurs, d'adopter la motion d'amendement?
Son Honneur le President :
Que les senateurs qui sont en faveur de la motion veuillent bien dire oui.
Son Honneur le President :
Que les senateurs qui sont contre veuillent bien dire non.
Son Honneur le President :
A mon avis, les non l'emportent.
Je declare la motion d'amendement rejetee.
Son Honneur le President :
Son Honneur le President :
Avec le consentement des senateurs, nous pouvons faire ce que nous voulons.
L'autre possibilite serait de tenir le vote immediatement.
Nous pourrions tenir ce vote mercredi.
Je crois que nous avions decide que le vote devait se faire sans delai.
Les senateurs le savaient au moment de la premiere sonnerie d'appel de trente minutes.
Je ne veux pas me montrer difficile.
Je m'y oppose energiquement.
Je suis d'avis que la sonnerie doit retentir pendant au moins cinq minutes.
Nous nous sommes toutefois entendus pour permettre quatre amendements.
Je serais d'accord avec la suggestion de Son Honneur.
Honorables senateurs, je pense que le gouvernement obtiendra ce qu'il desire ce soir.
Nous ne devrions pas nous enteter pour cinq, six ou sept minutes.
Le senateur Murray a raison d'intervenir.
Il n'est meme pas 21 heures.
Le gouvernement reussira probablement a faire adopter le projet de loi avant 21 heures.
Son Honneur le President :
On est donc d'accord pour que le timbre retentisse pendant cinq minutes.
La est le probleme.
Et deux honorables senateurs s'etant leves:
Son Honneur le President :
Son Honneur le President :
Puis-je me dispenser de lire les amendements?
(Les motions d'amendement, mises aux voix, sont rejetees.)
Son Honneur le President :
Suis-je exempte de la lecture des amendements?
Son Honneur le President :
Que ceux qui sont en faveur de la motion d'amendement veuillent bien dire oui.
Son Honneur le President :
Que ceux qui sont contre la motion d'amendement veuillent bien dire non.
Son Honneur le President :
A mon avis, les non l'emportent.
Et deux honorables senateurs s'etant leves.
Son Honneur le President :
Le vote aura lieu a 21 heures.
a ) a l'article 44, a la page 28...
(La motion, mise aux voix, est rejetee.)
Son Honneur le President :
Son Honneur le President :
Son Honneur le President :
Son Honneur le President :
A mon avis, les non l'emportent.
Et deux honorables senateurs s'etant leves:
Son Honneur le President :
Le vote aura lieu a 21 h 10.
Son Honneur le President :
(La motion d'amendement du senateur Spivak, mise aux voix, est rejetee.)
Son Honneur le President :
Plait-il aux honorables senateurs d'adopter la motion?
Son Honneur le President :
Que les senateurs qui sont en faveur de la motion veuillent bien dire oui.
Son Honneur le President :
Que les senateurs qui sont contre la motion veuillent bien dire non.
Son Honneur le President :
A mon avis, les non l'emportent.
Et deux senateurs s'etant leves:
Son Honneur le President :
Il y aura un vote par appel nominal a 21 h 20.
L'honorable Sharon Carstairs (leader adjoint du gouvernement) :
Malheureusement, nous n'avons pu aboutir a un accord mutuellement satisfaisant.
Par consequent, je donne avis que, le 14 septembre 1999, je proposerai:
Les travaux du Senat
L'honorable Sharon Carstairs (leader adjoint du gouvernement) :
Son Honneur le President :
Etes-vous d'accord, honorables senateurs?
Permission ayant ete accordee de revenir aux avis de motion du gouvernement:
L'honorable Sharon Carstairs (leader adjoint du gouvernement) :
L'honorable Noel A. Kinsella (chef adjoint de l'opposition) :
Je pose tout simplement une question a des fins d'eclaircissement.
Oui, honorables senateurs, c'est ce qui est entendu.
Son Honneur le President :
Plait-il aux honorables senateurs d'adopter la motion?
(Le Senat s'ajourne au mardi 14 septembre 1999, a 9 heures.)
|
{"hexsha": "afe27ee80fc00e226e3cb09a39c4510cdf72e00d", "size": 35326, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "data/Hansard/Training/hansard.36.1.senate.debates.1999-09-13.157.f", "max_stars_repo_name": "j1ai/Canadian_Hansards_Neural_Machine_Translation", "max_stars_repo_head_hexsha": "554666a89090fc1b1d1fb83601a2e9da132e6ad0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data/Hansard/Training/hansard.36.1.senate.debates.1999-09-13.157.f", "max_issues_repo_name": "j1ai/Canadian_Hansards_Neural_Machine_Translation", "max_issues_repo_head_hexsha": "554666a89090fc1b1d1fb83601a2e9da132e6ad0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/Hansard/Training/hansard.36.1.senate.debates.1999-09-13.157.f", "max_forks_repo_name": "j1ai/Canadian_Hansards_Neural_Machine_Translation", "max_forks_repo_head_hexsha": "554666a89090fc1b1d1fb83601a2e9da132e6ad0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.6467958271, "max_line_length": 105, "alphanum_fraction": 0.7785766857, "num_tokens": 9993}
|
from __future__ import absolute_import
from numpy import *
def medianboxfilter2d(x, y, values, scale):
assert len(x) == len(y) == len(values)
values_filtered = zeros_like(values)
for i in range(len(x)):
xi = x[i]
yi = y[i]
mask = (x > xi - scale/2) & (x < xi + scale/2) & \
(y > yi - scale/2) & (y < yi + scale/2)
values_filtered[i] = median(values[mask])
return values_filtered
def mediancirclefilter2d(x, y, values, scale):
assert len(x) == len(y) == len(values)
values_filtered = zeros_like(values)
for i in range(len(x)):
xi = x[i]
yi = y[i]
r = sqrt((xi-x)**2+(yi-y)**2)
mask = r < scale
values_filtered[i] = median(values[mask])
return values_filtered
class MedianBoxFilter2d(object):
def __init__(self, scale):
self.scale = scale
def filter(self, x, y, values):
return medianboxfilter2d(x, y, values, self.scale)
class MedianCircleFilter2d(object):
def __init__(self, scale):
self.scale = scale
def filter(self, x, y, values):
return mediancirclefilter2d(x, y, values, self.scale)
|
{"hexsha": "3e45b116fe85cd628526f4ec5eda1b2ca4eda22d", "size": 1044, "ext": "py", "lang": "Python", "max_stars_repo_path": "mab/utils/scatterfilters.py", "max_stars_repo_name": "maartenbreddels/mab", "max_stars_repo_head_hexsha": "112dcfbc4a74b07aff13d489b3776bca58fe9bdf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-12-01T04:10:34.000Z", "max_stars_repo_stars_event_max_datetime": "2018-12-01T04:10:34.000Z", "max_issues_repo_path": "mab/utils/scatterfilters.py", "max_issues_repo_name": "maartenbreddels/mab", "max_issues_repo_head_hexsha": "112dcfbc4a74b07aff13d489b3776bca58fe9bdf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mab/utils/scatterfilters.py", "max_forks_repo_name": "maartenbreddels/mab", "max_forks_repo_head_hexsha": "112dcfbc4a74b07aff13d489b3776bca58fe9bdf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.1, "max_line_length": 55, "alphanum_fraction": 0.6647509579, "include": true, "reason": "from numpy", "num_tokens": 312}
|
SUBROUTINE ALG02
C
LOGICAL DEBUG
REAL LOSS,LAMI,LAMIP1,LAMIM1
DIMENSION II(21,30),JJ(21,30),IDATA(24),RDATA(6),NAME(2)
COMMON /UD3PRT/ IPRTC
COMMON /UDSIGN/ NSIGN
COMMON /UPAGE / LIMIT,LQ
COMMON /UD300C/ NSTNS,NSTRMS,NMAX,NFORCE,NBL,NCASE,NSPLIT,NREAD,
1 NPUNCH,NPAGE,NSET1,NSET2,ISTAG,ICASE,IFAILO,IPASS,
2 I,IVFAIL,IFFAIL,NMIX,NTRANS,NPLOT,ILOSS,LNCT,ITUB,
3 IMID,IFAIL,ITER,LOG1,LOG2,LOG3,LOG4,LOG5,LOG6,
4 IPRINT,NMANY,NSTPLT,NEQN,NSPEC(30),NWORK(30),
5 NLOSS(30),NDATA(30),NTERP(30),NMACH(30),NL1(30),
6 NL2(30),NDIMEN(30),IS1(30),IS2(30),IS3(30),
7 NEVAL(30),NDIFF(4),NDEL(30),NLITER(30),NM(2),
8 NRAD(2),NCURVE(30),NWHICH(30),NOUT1(30),NOUT2(30),
9 NOUT3(30),NBLADE(30),DM(11,5,2),WFRAC(11,5,2),
O R(21,30),XL(21,30),X(21,30),H(21,30),S(21,30),
1 VM(21,30),VW(21,30),TBETA(21,30),DIFF(15,4),
2 FDHUB(15,4),FDMID(15,4),FDTIP(15,4),TERAD(5,2),
3 DATAC(100),DATA1(100),DATA2(100),DATA3(100),
4 DATA4(100),DATA5(100),DATA6(100),DATA7(100),
5 DATA8(100),DATA9(100),FLOW(10),SPEED(30),
6 SPDFAC(10),BBLOCK(30),BDIST(30),WBLOCK(30),
7 WWBL(30),XSTN(150),RSTN(150),DELF(30),DELC(100),
8 DELTA(100),TITLE(18),DRDM2(30),RIM1(30),XIM1(30)
COMMON /UD300C/ WORK(21),LOSS(21),TANEPS(21),XI(21),VV(21),
1 DELW(21),LAMI(21),LAMIM1(21),LAMIP1(21),PHI(21),
2 CR(21),GAMA(21),SPPG(21),CPPG(21),HKEEP(21),
3 SKEEP(21),VWKEEP(21),DELH(30),DELT(30),VISK,SHAPE,
4 SCLFAC,EJ,G,TOLNCE,XSCALE,PSCALE,PLOW,RLOW,XMMAX,
5 RCONST,FM2,HMIN,C1,PI,CONTR,CONMX
EQUIVALENCE (H(1,1),II(1,1)),(S(1,1),JJ(1,1))
DATA NAME / 4HALG0, 4H2 /
C
DEBUG = .FALSE.
CALL SSWTCH (20,J)
IF (J .EQ. 1) DEBUG =.TRUE.
NEVAL(1) = 0
CALL FREAD (LOG1,TITLE,18,1)
IF (IPRTC .EQ. 1) WRITE (LOG2,110) TITLE
110 FORMAT (10X,10HINPUT DATA, /10X,10(1H*), //10X,5HTITLE,34X,2H= ,
1 18A4)
LNCT = LNCT + 4
CALL ALG1 (LNCT)
CALL FREAD (LOG1,IDATA,21,1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',111,IDATA,21)
NSTNS = IDATA( 1)
NSTRMS = IDATA( 2)
NMAX = IDATA( 3)
NFORCE = IDATA( 4)
NBL = IDATA( 5)
NCASE = IDATA( 6)
NSPLIT = IDATA( 7)
NSET1 = IDATA( 8)
NSET2 = IDATA( 9)
NREAD = IDATA(10)
NPUNCH = IDATA(11)
NPLOT = IDATA(12)
NPAGE = IDATA(13)
NTRANS = IDATA(14)
NMIX = IDATA(15)
NMANY = IDATA(16)
NSTPLT = IDATA(17)
NEQN = IDATA(18)
NLE = IDATA(19)
NTE = IDATA(20)
NSIGN = IDATA(21)
IF (NSTRMS .EQ. 0) NSTRMS = 11
IF (NMAX .EQ. 0) NMAX = 40
IF (NFORCE .EQ. 0) NFORCE = 10
IF (NCASE .EQ. 0) NCASE = 1
IF (NPAGE .EQ. 0) NPAGE = 60
LQ = LOG2
LIMIT = NPAGE
CALL ALG03 (LNCT,19)
IF (IPRTC .EQ. 1) WRITE (LOG2,130) NSTNS,NSTRMS,NMAX,NFORCE,NBL,
1 NCASE,NSPLIT,NSET1,NSET2,NREAD,NPUNCH,NPLOT,NPAGE,NTRANS,
2 NMIX,NMANY,NSTPLT,NEQN,NLE,NTE,NSIGN
130 FORMAT (//10X,'NUMBER OF STATIONS',21X,1H=,I3, /10X,'NUMBER OF ',
1 'STREAMLINES',18X,1H=,I3, /10X,20HMAX NUMBER OF PASSES,19X,
2 1H=,I3, /10X,30HMAX NUMBER OF ARBITRARY PASSES,9X,1H=,I3,
3 /10X,29HBOUNDARY LAYER CALC INDICATOR,10X,1H=,I3, /10X,
4 24HNUMBER OF RUNNING POINTS,15X,1H=,I3, /10X,
5 33HSTREAMLINE DISTRIBUTION INDICATOR,6X,1H=,I3, /10X,
6 34HNUMBER OF LOSS/D-FACTOR CURVE SETS,5X,1H=,I3, /10X,
7 34HNUMBER OF LOSS/T.E.LOSS CURVE SETS,5X,1H=,I3, /10X,
8 26HSTREAMLINE INPUT INDICATOR,13X,1H=,I3, /10X,
9 27HSTREAMLINE OUTPUT INDICATOR,12X,1H=,I3, /10X,
O 24HPRECISION PLOT INDICATOR,15X,1H=,I3, /10X,
1 24HMAX NUMBER OF LINES/PAGE,15X,1H=,I3, /10X,
2 29HWAKE TRANSPORT CALC INDICATOR,10X,1H=,I3, /10X,
3 32HMAINSTREAM MIXING CALC INDICATOR,7X,1H=,I3, /10X,
4 33HNO OF STATIONS FROM ANALYTIC SECN,6X,1H=,I3, /10X,
5 27HLINE-PRINTER PLOT INDICATOR,12X,1H=,I3, /10X,
6 32HMOMENTUM EQUATION FORM INDICATOR,7X,1H=,I3, /10X,
7 30HSTATION NUMBER AT LEADING EDGE,9X,1H=,I3, /10X,
8 31HSTATION NUMBER AT TRAILING EDGE,8X,1H=,I3, /10X,
9 37HCOMPRESSOR DIR. OF ROTATION INDICATOR,2X,1H=,I3)
ITUB = NSTRMS - 1
IMID = NSTRMS/2 + 1
IF (NMANY .EQ. 0) GO TO 136
CALL FREAD (LOG1,NWHICH,NMANY,1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',132,NWHICH,NMANY)
CALL ALG03 (LNCT,2)
IF (IPRTC .EQ. 1) WRITE (LOG2,134) (NWHICH(I),I=1,NMANY)
134 FORMAT (//10X,'GEOMETRY COMES FROM ANALYTIC SECTION FOR STATIONS',
1 23I3)
136 CALL ALG03 (LNCT,7)
CALL FREAD (LOG1,RDATA,6,1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',136,RDATA,6)
G = RDATA(1)
EJ = RDATA(2)
SCLFAC = RDATA(3)
TOLNCE = RDATA(4)
VISK = RDATA(5)
SHAPE = RDATA(6)
IF (G .EQ. 0.0) G = 32.174
IF (EJ .EQ. 0.0) EJ = 778.16
IF (SCLFAC .EQ. 0.) SCLFAC = 12.0
IF (TOLNCE .EQ. 0.) TOLNCE = 0.001
IF (VISK .EQ. 0.0) VISK = 0.00018
IF (SHAPE.EQ. 0.0) SHAPE = 0.7
IF (IPRTC .EQ. 1) WRITE (LOG2,150) G,EJ,SCLFAC,TOLNCE,VISK,SHAPE
150 FORMAT (//10X,22HGRAVITATIONAL CONSTANT,17X,1H=,F8.4, /10X,
1 17HJOULES EQUIVALENT,22X,1H=,F8.3, /10X,
2 29HLINEAR DIMENSION SCALE FACTOR,10X,1H=,F8.4, /10X,
3 15HBASIC TOLERANCE,24X,1H=,F8.5, /10X,
4 19HKINEMATIC VISCOSITY,20X,1H=,F8.5, /10X,
5 17HB.L. SHAPE FACTOR,22X,1H=,F8.5)
CALL ALG03 (LNCT,7)
CALL FREAD (LOG1,RDATA,6,1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',151,RDATA,6)
XSCALE = RDATA(1)
PSCALE = RDATA(2)
RLOW = RDATA(3)
PLOW = RDATA(4)
XMMAX = RDATA(5)
RCONST = RDATA(6)
IF (XMMAX .EQ.0.0) XMMAX = 0.6
IF (RCONST.EQ.0.0) RCONST = 6.0
IF (IPRTC .EQ. 1) WRITE (LOG2,160) XSCALE,PSCALE,RLOW,PLOW,XMMAX,
1 RCONST
160 FORMAT (//10X,29HPLOTTING SCALE FOR DIMENSIONS,10X,1H=,F7.3, /10X,
1 28HPLOTTING SCALE FOR PRESSURES,11X,1H=,F7.3, /10X,
2 22HMINIMUM RADIUS ON PLOT,17X,1H=,F7.3, /10X,
3 24HMINIMUM PRESSURE ON PLOT,15X,1H=,F7.3, /10X,
4 40HMAXIMUM M-SQUARED IN RELAXATION FACTOR =,F8.4, /10X,
5 29HCONSTANT IN RELAXATION FACTOR,10X,1H=,F8.4)
CALL ALG03 (LNCT,3)
CALL FREAD (LOG1,RDATA,2,1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',162,RDATA,2)
CONTR = RDATA(1)
CONMX = RDATA(2)
IF (IPRTC .EQ. 1) WRITE (LOG2,164) CONTR,CONMX
164 FORMAT (//10X,22HWAKE TRANSFER CONSTANT,17X,1H=,F8.5, /10X,
1 25HTURBULENT MIXING CONSTANT,14X,1H=,F8.5)
CALL ALG03 (LNCT,5+NCASE)
DO 168 K = 1,NCASE
CALL FREAD (LOG1,FLOW(K),1,0)
168 CALL FREAD (LOG1,SPDFAC(K),1,1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',171,FLOW,NCASE)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',172,SPDFAC,NCASE)
IF (IPRTC .EQ. 1) WRITE(LOG2,180) (K,FLOW(K),SPDFAC(K),K=1,NCASE)
180 FORMAT (//10X,21HPOINTS TO BE COMPUTED, //10X,2HNO,6X,8HFLOWRATE,
1 4X,12HSPEED FACTOR, //,(10X,I2,F13.3,F14.3))
CALL FREAD (LOG1,L1,1,1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',180,L1,1)
DO 185 K = 1,L1
CALL FREAD (LOG1,XSTN(K),1,0)
185 CALL FREAD (LOG1,RSTN(K),1,1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',191,XSTN,L1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',192,RSTN,L1)
ISTAG = 0
IF (RSTN(1) .EQ. 0.0) ISTAG = 1
NSPEC(1) = L1
CALL ALG03 (LNCT,7+L1)
IF (IPRTC .EQ. 1) WRITE (LOG2,200) L1,(XSTN(K),RSTN(K),K=1,L1)
200 FORMAT (//10X,'ANNULUS / COMPUTING STATION GEOMETRY', //10X,
1 24HSTATION 1 SPECIFIED BY,I3,7H POINTS, //17X,4HXSTN,8X,
2 4HRSTN,//,(F22.4,F12.4))
IS1(1) = 1
LAST = L1
DO 220 I = 2,NSTNS
CALL FREAD (LOG1,L1,1,1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',210,L1,1)
NEXT = LAST + 1
LAST = LAST + L1
IF (LAST .GT. 150) GO TO 550
DO 215 K = NEXT,LAST
CALL FREAD (LOG1,XSTN(K),1,0)
215 CALL FREAD (LOG1,RSTN(K),1,1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',215,XSTN(NEXT),LAST-NEXT+1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',216,RSTN(NEXT),LAST-NEXT+1)
IF (RSTN(NEXT) .EQ. 0.0) ISTAG = I
CALL ALG03 (LNCT,5+L1)
IS1(I) = NEXT
NSPEC(I) = L1
220 IF (IPRTC .EQ. 1) WRITE (LOG2,230) I,L1,(XSTN(K),RSTN(K),
1 K=NEXT,LAST)
230 FORMAT (//10X,7HSTATION,I3,14H SPECIFIED BY,I3,7H POINTS, //17X,
1 4HXSTN,8X,4HRSTN, //,(F22.4,F12.4))
SPEED(1) = 0.0
CALL FREAD (LOG1,IDATA,4,1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',233,IDATA,4)
L1 = IDATA(1)
NTERP(1) = IDATA(2)
NDIMEN(1) = IDATA(3)
NMACH(1) = IDATA(4)
DO 335 K = 1,L1
CALL FREAD (LOG1,RDATA,4,1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',234,RDATA,4)
DATAC(K) = RDATA(1)
DATA1(K) = RDATA(2)
DATA2(K) = RDATA(3)
335 DATA3(K) = RDATA(4)
CALL ALG03 (LNCT,7+L1)
IS2(1) = 1
NDATA(1) = L1
LAST = L1
IF (IPRTC .EQ. 1) WRITE (LOG2,250) L1,NTERP(1),NDIMEN(1),NMACH(1),
1 (DATAC(K),DATA1(K),DATA2(K),DATA3(K),K=1,L1)
250 FORMAT (//10X,24HSTATION CALCULATION DATA, //7X,
1 18HSTATION 1 NDATA=,I3,7H NTERP=,I2,8H NDIMEN=,I2,
2 7H NMACH=,I2, //11X,5HDATAC,6X,14HTOTAL PRESSURE,4X,
3 17HTOTAL TEMPERATURE,4X,11HWHIRL ANGLE, //,
4 (5X,F12.4,F15.4,F19.3,F18.3))
DO 252 K = 1,L1
252 DATA1(K) = DATA1(K)*SCLFAC**2
LASTD = 0
NOUT1(1) = 0
NOUT2(1) = 0
DO 320 I = 2,NSTNS
LOGN = LOG1
IF (NMANY .EQ. 0) GO TO 258
DO 254 L1 = 1,NMANY
IF (NWHICH(L1) .EQ. I) GO TO 256
254 CONTINUE
GO TO 258
256 LOGN = LOG5
258 CALL FREAD (LOGN,IDATA,16,1)
CWKBD IF (DEBUG .AND. LOGN.EQ.LOG1) CALL BUG1 ('ALG02 ',258,IDATA,16)
NDATA(I) = IDATA(1)
NTERP(I) = IDATA(2)
NDIMEN(I) = IDATA(3)
NMACH(I) = IDATA(4)
NWORK(I) = IDATA(5)
NLOSS(I) = IDATA(6)
NL1(I) = IDATA(7)
NL2(I) = IDATA(8)
NEVAL(I) = IDATA(9)
NCURVE(I) = IDATA(10)
NLITER(I) = IDATA(11)
NDEL(I) = IDATA(12)
NOUT1(I) = IDATA(13)
NOUT2(I) = IDATA(14)
NOUT3(I) = IDATA(15)
NBLADE(I) = IDATA(16)
L1 = 3
IF (NDATA(I) .NE. 0) L1 = L1 + 5 + NDATA(I)
IF (NDEL(I) .NE. 0) L1 = L1 + 3 + NDEL(I)
CALL ALG03 (LNCT,L1)
IF (IPRTC .EQ. 1) WRITE (LOG2,270) I,NDATA(I),NTERP(I),NDIMEN(I),
1 NMACH(I),NWORK(I),NLOSS(I),NL1(I),NL2(I),NEVAL(I),NCURVE(I)
2, NLITER(I),NDEL(I),NOUT1(I),NOUT2(I),NOUT3(I),NBLADE(I)
270 FORMAT (//7X,7HSTATION,I3, 8H NDATA=,I3,7H NTERP=,I2,8H NDIMEN=,
1 I2,7H NMACH=,I2,7H NWORK=,I2,7H NLOSS=,I2,5H NL1=,I3,
2 5H NL2=,I3,7H NEVAL=,I2,8H NCURVE=,I2,8H NLITER=,I3,
3 6H NDEL=,I3, /19X,6HNOUT1=,I2,7H NOUT2=,I2,7H NOUT3=,I2,
4 8H NBLADE=,I3)
SPEED(I) = 0.0
IF (NDATA(I) .EQ. 0) GO TO 320
NEXT = LAST + 1
LAST = LAST + NDATA(I)
IS2(I) = NEXT
IF (LAST .GT. 100) GO TO 550
CALL FREAD (LOGN,SPEED(I),1,1)
CWKBD IF (DEBUG .AND.LOGN.EQ.LOG1) CALL BUG1 ('ALG02 ',271,SPEED(I),1)
DO 275 K = NEXT,LAST
CALL FREAD (LOGN,RDATA,6,1)
CWKBD IF (DEBUG .AND. LOGN.EQ.LOG1) CALL BUG1 ('ALG02 ',272,RDATA,6)
DATAC(K) = RDATA(1)
DATA1(K) = RDATA(2)
DATA2(K) = RDATA(3)
DATA3(K) = RDATA(4)
DATA4(K) = RDATA(5)
DATA5(K) = RDATA(6)
CALL FREAD (LOGN,RDATA,4,1)
CWKBD IF (DEBUG .AND. LOGN.EQ.LOG1) CALL BUG1 ('ALG02 ',273,RDATA,4)
DATA6(K) = RDATA(1)
DATA7(K) = RDATA(2)
DATA8(K) = RDATA(3)
275 DATA9(K) = RDATA(4)
IF (IPRTC .EQ. 1) WRITE (LOG2,290) SPEED(I),(DATAC(K),DATA1(K),
1 DATA2(K),DATA3(K),DATA4(K),DATA5(K),DATA6(K),DATA7(K),
2 DATA8(K),DATA9(K),K=NEXT,LAST)
290 FORMAT (//10X,7HSPEED =,F9.2, //13X,5HDATAC,7X,5HDATA1,7X,5HDATA2,
1 7X,5HDATA3,7X,5HDATA4,7X,5HDATA5,7X,5HDATA6,7X,5HDATA7,7X,
2 5HDATA8,7X,5HDATA9, //,
3 (10X,F9.4,F12.3,F13.6,F11.4,F12.5,F12.5,4F12.4))
IF (NWORK(I) .NE. 1) GO TO 296
DO 294 K = NEXT,LAST
294 DATA1(K) = DATA1(K)*SCLFAC**2
296 IF (NEVAL(I).GT.0 .AND. NSTRMS.GT.NDATA(I)) LAST = LAST + NSTRMS -
1 NDATA(I)
IF (NDEL(I) .EQ. 0) GO TO 320
NEXT = LASTD + 1
LASTD = LASTD + NDEL(I)
IS3(I) = NEXT
IF (LASTD .GT. 100) GO TO 550
DO 298 K = NEXT,LASTD
CALL FREAD (LOG1,DELC(K), 1,0)
298 CALL FREAD (LOG1,DELTA(K),1,1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',298,DELC(NEXT),LASTD-NEXT+1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',299,DELTA(NEXT),LASTD-NEXT+1)
IF (IPRTC .EQ. 1) WRITE(LOG2,310)(DELC(K),DELTA(K),K=NEXT,LASTD)
310 FORMAT (//13X,4HDELC,8X,5HDELTA, //,(10X,F9.4,F12.4))
320 CONTINUE
CALL ALG03 (LNCT,5+NSTNS)
DO 325 I = 1,NSTNS
CALL FREAD (LOG1,RDATA,3,1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',323,RDATA,3)
WBLOCK(I) = RDATA(1)
BBLOCK(I) = RDATA(2)
325 BDIST(I) = RDATA(3)
IF (IPRTC .EQ. 1) WRITE (LOG2,340) (I,WBLOCK(I),BBLOCK(I),
1 BDIST(I),I=1,NSTNS)
340 FORMAT (//10X,'BLOCKAGE FACTOR SPECIFICATIONS', //10X,'STATION ',
1 ' WALL BLOCKAGE WAKE BLOCKAGE WAKE DISTRIBUTION FACTOR',
2 //,(10X,I4,F16.5,F16.5,F19.3))
IF (NSET1 .EQ. 0) GO TO 380
DO 370 K = 1,NSET1
CALL FREAD (LOG1,L1,1,1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',342,L1,1)
DO 345 J = 1,L1
CALL FREAD (LOG1,RDATA,4,1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',343,RDATA,4)
DIFF(J,K) = RDATA(1)
FDHUB(J,K) = RDATA(2)
FDMID(J,K) = RDATA(3)
345 FDTIP(J,K) = RDATA(4)
CALL ALG03 (LNCT,6+L1)
IF (IPRTC .EQ. 1) WRITE (LOG2,360) K,L1,(DIFF(J,K),FDHUB(J,K),
1 FDMID(J,K),FDTIP(J,K),J=1,L1)
360 FORMAT (//10X,'LOSS PARAMETER / DIFFUSION FACTOR CURVES FOR BLADE'
1, ' TYPE',I2,I5,' D-FACTORS GIVEN', //15X,9HDIFFUSION,5X,
2 'L O S S P A R A M E T E R S', /16X,7HFACTORS,8X,3HHUB,
3 9X,3HMID,8X,3HTIP,//,(15X,F8.3,F13.5,F12.5,F11.5))
370 NDIFF(K) = L1
380 IF (NSET2 .EQ. 0) GO TO 450
DO 440 K = 1,NSET2
CALL FREAD (LOG1,IDATA,2,1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',385,IDATA,2)
L1 = IDATA(1)
L2 = IDATA(2)
CALL ALG03 (LNCT,7+L1)
NM(K) = L1
NRAD(K) = L2
CALL FREAD (LOG1,TERAD(1,K),1,1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',391,TERAD(1,K),1)
DO 398 J = 1,L1
CALL FREAD (LOG1,RDATA,2,1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',398,RDATA,2)
DM(J,1,K) = RDATA(1)
398 WFRAC(J,1,K) = RDATA(2)
IF (IPRTC .EQ. 1) WRITE (LOG2,410) K,L1,L2,TERAD(1,K),(DM(J,1,K),
1 WFRAC(J,1,K),J=1,L1)
410 FORMAT (//10X,'FRACTIONAL LOSS DISTRIBUTION CURVES FOR BLADE ',
1 'CLASS',I2,I5,' POINTS GIVEN AT',I3,' RADIAL LOCATIONS', //
2 10X,'FRACTION OF COMPUTING STATION LENGTH AT BLADE EXIT =',
3 F7.4, //10X,'FRACTION OF MERIDIONAL CHORD',4X,
4 'LOSS/LOSS AT TRAILING EDGE', //,(15X,F11.4,20X,F11.4))
IF (L2 .EQ. 1) GO TO 440
DO 420 L = 2,L2
CALL ALG03 (LNCT,5+L1)
CALL FREAD (LOG1,TERAD(L,K),1,1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',411,TERAD(L,K),1)
DO 415 J = 1,L1
CALL FREAD (LOG1,RDATA,2,1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',412,RDATA,2)
DM(J,L,K) = RDATA(1)
415 WFRAC(J,L,K) = RDATA(2)
420 IF (IPRTC .EQ. 1) WRITE (LOG2,430) TERAD(L,K),(DM(J,L,K),
1 WFRAC(J,L,K),J=1,L1)
430 FORMAT (//10X,'FRACTION OF COMPUTING STATION LENGTH AT BLADE ',
1 'EXIT =',F7.4, //10X,'FRACTION OF MERIDIONAL CHORD',4X,
2 'LOSS/LOSSAT TRAILING EDGE', //,(15X,F11.4,20X,F11.4))
440 CONTINUE
450 IF (NSPLIT.EQ.0 .AND. NREAD.EQ.0) GO TO 570
DO 455 J = 1,NSTRMS,6
455 CALL FREAD (LOG1,DELF(J),6,1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',455,DELF,NSTRMS)
L1 = 5
IF (NSTRMS .GE. 16) L1 = 8
CALL ALG03 (LNCT,L1)
IF (IPRTC .EQ. 1) WRITE (LOG2,470)
L1 = NSTRMS
IF (NSTRMS .GT. 15) L1 = 15
IF (IPRTC .EQ. 1) WRITE (LOG2,480) (J,J=1,L1)
480 FORMAT (//10X,'STREAMLINE',I5,14I7)
470 FORMAT (//10X,'PROPORTIONS OF TOTAL FLOW BETWEEN HUB AND EACH ',
1 'STREAMLINE ARE TO BE AS FOLLOWS')
IF (IPRTC .EQ. 1) WRITE(LOG2,490) (DELF(J),J=1,L1)
490 FORMAT (10X,4HFLOW,7X,15F7.4)
IF (NSTRMS .LE. 15) GO TO 500
L1 = L1 + 1
IF (IPRTC .EQ. 1) WRITE (LOG2,480) (J,J=L1,NSTRMS)
IF (IPRTC .EQ. 1) WRITE (LOG2,490) (DELF(J),J=L1,NSTRMS)
500 IF (NREAD .EQ. 0) GO TO 570
DO 505 I = 1,NSTNS
DO 505 J = 1,NSTRMS
CALL FREAD (LOG1,RDATA,3,0)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',501,RDATA,3)
R(J,I) = RDATA(1)
X(J,I) = RDATA(2)
XL(J,I) = RDATA(3)
CALL FREAD (LOG1,IDATA,2,1)
CWKBD IF (DEBUG) CALL BUG1 ('ALG02 ',502,IDATA,2)
II(J,I) = IDATA(1)
505 JJ(J,I) = IDATA(2)
CALL ALG03 (LNCT,5+NSTRMS)
IF (IPRTC .EQ. 1) WRITE (LOG2,520)
520 FORMAT (//10X,'ESTIMATED STREAMLINE COORDINATES')
DO 530 I = 1,NSTNS
IF (I .GT. 1) CALL ALG03 (LNCT,3+NSTRMS)
530 IF (IPRTC .EQ. 1) WRITE (LOG2,540) (I,J,R(J,I),X(J,I),XL(J,I),
1 II(J,I),JJ(J,I),J=1,NSTRMS)
540 FORMAT (//10X,'STATION STREAMLINE RADIUS AXIAL COORDINATE ',
1 'L -COORDINATE CHECKS- I J', //,
2 (3X,2I11,F14.4,F12.4,F16.4,I17,I5))
GO TO 570
550 WRITE (LOG2,560)
560 FORMAT (////10X,'JOB STOPPED - TOO MUCH INPUT DATA')
CALL MESAGE (-37,0,NAME)
570 RETURN
END
|
{"hexsha": "e88725799a2630eb345d0d9c814c80d05993322b", "size": 18589, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "mis/alg02.f", "max_stars_repo_name": "ldallolio/NASTRAN-95", "max_stars_repo_head_hexsha": "6d2c175f5b53ebaec4ba2b5186f7926ef9d0ed47", "max_stars_repo_licenses": ["NASA-1.3"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2016-01-09T14:33:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-18T11:51:42.000Z", "max_issues_repo_path": "mis/alg02.f", "max_issues_repo_name": "gassive/NASTRAN95", "max_issues_repo_head_hexsha": "98cb3acaa7990d639360601648498834c7782056", "max_issues_repo_licenses": ["NASA-1.3"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2016-01-17T07:30:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-06T19:37:44.000Z", "max_forks_repo_path": "mis/alg02.f", "max_forks_repo_name": "gassive/NASTRAN95", "max_forks_repo_head_hexsha": "98cb3acaa7990d639360601648498834c7782056", "max_forks_repo_licenses": ["NASA-1.3"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-04-07T20:51:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-04T14:16:01.000Z", "avg_line_length": 42.4406392694, "max_line_length": 73, "alphanum_fraction": 0.539512615, "num_tokens": 8271}
|
# SPDX-FileCopyrightText: 2021 Division of Intelligent Medical Systems, DKFZ
# SPDX-FileCopyrightText: 2021 Janek Groehl
# SPDX-License-Identifier: MIT
from simpa.core.device_digital_twins import SlitIlluminationGeometry, LinearArrayDetectionGeometry, PhotoacousticDevice
from simpa import perform_k_wave_acoustic_forward_simulation
from simpa.core.simulation_modules.reconstruction_module.reconstruction_module_delay_and_sum_adapter import \
reconstruct_delay_and_sum_pytorch
from simpa import MCXAdapter, ModelBasedVolumeCreationAdapter, \
GaussianNoise
from simpa.utils import Tags, Settings, TISSUE_LIBRARY
from simpa.core.simulation import simulate
from simpa.io_handling import load_data_field
import numpy as np
from simpa.utils.path_manager import PathManager
from simpa_tests.manual_tests import ManualIntegrationTestClass
import matplotlib.pyplot as plt
# FIXME temporary workaround for newest Intel architectures
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
class KWaveAcousticForwardConvenienceFunction(ManualIntegrationTestClass):
"""
This class test the convenience function for acoustic forward simulation.
It first creates a volume and runs an optical forward simulation.
Then the function is actually tested.
Lastly the generated time series data is reconstructed to compare whether everything worked.
"""
def setup(self):
"""
Runs a pipeline consisting of volume creation and optical simulation. The resulting hdf5 file of the
simple test volume is saved at SAVE_PATH location defined in the path_config.env file.
"""
self.path_manager = PathManager()
self.VOLUME_TRANSDUCER_DIM_IN_MM = 75
self.VOLUME_PLANAR_DIM_IN_MM = 20
self.VOLUME_HEIGHT_IN_MM = 25
self.SPACING = 0.25
self.RANDOM_SEED = 4711
self.VOLUME_NAME = "TestKWaveAcousticForwardConvenienceFunction_" + str(self.RANDOM_SEED)
np.random.seed(self.RANDOM_SEED)
# These parameters set the general properties of the simulated volume
self.general_settings = {
Tags.RANDOM_SEED: self.RANDOM_SEED,
Tags.VOLUME_NAME: self.VOLUME_NAME,
Tags.SIMULATION_PATH: self.path_manager.get_hdf5_file_save_path(),
Tags.SPACING_MM: self.SPACING,
Tags.DIM_VOLUME_Z_MM: self.VOLUME_HEIGHT_IN_MM,
Tags.DIM_VOLUME_X_MM: self.VOLUME_TRANSDUCER_DIM_IN_MM,
Tags.DIM_VOLUME_Y_MM: self.VOLUME_PLANAR_DIM_IN_MM,
Tags.WAVELENGTHS: [700]
}
self.settings = Settings(self.general_settings)
self.settings.set_volume_creation_settings({
Tags.SIMULATE_DEFORMED_LAYERS: True,
Tags.STRUCTURES: self.create_example_tissue()
})
self.settings.set_optical_settings({
Tags.OPTICAL_MODEL_NUMBER_PHOTONS: 1e7,
Tags.OPTICAL_MODEL_BINARY_PATH: self.path_manager.get_mcx_binary_path(),
Tags.OPTICAL_MODEL: Tags.OPTICAL_MODEL_MCX,
Tags.ILLUMINATION_TYPE: Tags.ILLUMINATION_TYPE_PENCIL,
Tags.LASER_PULSE_ENERGY_IN_MILLIJOULE: 50,
Tags.MCX_ASSUMED_ANISOTROPY: 0.9
})
self.settings["noise_model"] = {
Tags.NOISE_MEAN: 0.0,
Tags.NOISE_STD: 0.4,
Tags.NOISE_MODE: Tags.NOISE_MODE_ADDITIVE,
Tags.DATA_FIELD: Tags.DATA_FIELD_INITIAL_PRESSURE,
Tags.NOISE_NON_NEGATIVITY_CONSTRAINT: True
}
self.device = PhotoacousticDevice(device_position_mm=np.array([self.VOLUME_TRANSDUCER_DIM_IN_MM/2,
self.VOLUME_PLANAR_DIM_IN_MM/2,
0]))
self.device.set_detection_geometry(LinearArrayDetectionGeometry(device_position_mm=
self.device.device_position_mm, pitch_mm=0.25,
number_detector_elements=200))
self.device.add_illumination_geometry(SlitIlluminationGeometry(slit_vector_mm=[100, 0, 0]))
# run pipeline including volume creation and optical mcx simulation
self.pipeline = [
ModelBasedVolumeCreationAdapter(self.settings),
MCXAdapter(self.settings),
GaussianNoise(self.settings, "noise_model")
]
def teardown(self):
os.remove(self.settings[Tags.SIMPA_OUTPUT_PATH])
def perform_test(self):
simulate(self.pipeline, self.settings, self.device)
self.test_convenience_function()
def test_convenience_function(self):
# load initial pressure
initial_pressure = load_data_field(self.path_manager.get_hdf5_file_save_path() + "/" +
self.VOLUME_NAME + ".hdf5",
Tags.DATA_FIELD_INITIAL_PRESSURE, wavelength=700)
image_slice = np.s_[:, 40, :]
self.initial_pressure = np.rot90(initial_pressure[image_slice], -1)
# define acoustic settings and run simulation with convenience function
acoustic_settings = {
Tags.ACOUSTIC_SIMULATION_3D: True,
Tags.ACOUSTIC_MODEL_BINARY_PATH: self.path_manager.get_matlab_binary_path(),
Tags.KWAVE_PROPERTY_ALPHA_POWER: 0.00,
Tags.KWAVE_PROPERTY_SENSOR_RECORD: "p",
Tags.KWAVE_PROPERTY_PMLInside: False,
Tags.KWAVE_PROPERTY_PMLSize: [31, 32],
Tags.KWAVE_PROPERTY_PMLAlpha: 1.5,
Tags.KWAVE_PROPERTY_PlotPML: False,
Tags.RECORDMOVIE: False,
Tags.MOVIENAME: "visualization_log",
Tags.ACOUSTIC_LOG_SCALE: True,
Tags.MODEL_SENSOR_FREQUENCY_RESPONSE: False
}
time_series_data = perform_k_wave_acoustic_forward_simulation(initial_pressure=self.initial_pressure,
detection_geometry=self.device.
get_detection_geometry(),
speed_of_sound=1540, density=1000,
alpha_coeff=0.0)
# reconstruct the time series data to compare it with initial pressure
self.settings.set_reconstruction_settings({
Tags.RECONSTRUCTION_MODE: Tags.RECONSTRUCTION_MODE_PRESSURE,
Tags.RECONSTRUCTION_BMODE_BEFORE_RECONSTRUCTION: True,
Tags.RECONSTRUCTION_BMODE_METHOD: Tags.RECONSTRUCTION_BMODE_METHOD_HILBERT_TRANSFORM,
Tags.DATA_FIELD_SPEED_OF_SOUND: 1540,
Tags.SPACING_MM: 0.25,
Tags.SENSOR_SAMPLING_RATE_MHZ: 40,
})
self.reconstructed = reconstruct_delay_and_sum_pytorch(
time_series_data.copy(), self.device.get_detection_geometry(), self.settings)
def visualise_result(self, show_figure_on_screen=True, save_path=None):
'''plot initial pressure and reconstructed image volume to manually compare'''
plt.subplot(2, 2, 1)
plt.title("Initial Pressure Pipeline")
plt.imshow(self.initial_pressure)
plt.subplot(2, 2, 2)
plt.title("Reconstructed Image Pipeline")
plt.imshow(np.rot90(self.reconstructed, -1))
plt.tight_layout()
if show_figure_on_screen:
plt.show()
else:
if save_path is None:
save_path = ""
plt.savefig(save_path + f"TestKWaveConvenienceFunction.png")
plt.close()
def create_example_tissue(self):
"""
This is a very simple example script of how to create a tissue definition.
It contains a muscular background, an epidermis layer on top of the muscles
and a blood vessel.
"""
background_dictionary = Settings()
background_dictionary[Tags.MOLECULE_COMPOSITION] = TISSUE_LIBRARY.constant(1e-10, 1e-10, 1.0)
background_dictionary[Tags.STRUCTURE_TYPE] = Tags.BACKGROUND
muscle_dictionary = Settings()
muscle_dictionary[Tags.PRIORITY] = 1
muscle_dictionary[Tags.STRUCTURE_START_MM] = [0, 0, 0]
muscle_dictionary[Tags.STRUCTURE_END_MM] = [0, 0, 100]
muscle_dictionary[Tags.MOLECULE_COMPOSITION] = TISSUE_LIBRARY.constant(0.05, 100, 0.9)
muscle_dictionary[Tags.CONSIDER_PARTIAL_VOLUME] = True
muscle_dictionary[Tags.ADHERE_TO_DEFORMATION] = True
muscle_dictionary[Tags.STRUCTURE_TYPE] = Tags.HORIZONTAL_LAYER_STRUCTURE
vessel_1_dictionary = Settings()
vessel_1_dictionary[Tags.PRIORITY] = 3
vessel_1_dictionary[Tags.STRUCTURE_START_MM] = [self.VOLUME_TRANSDUCER_DIM_IN_MM/2,
0, 10]
vessel_1_dictionary[Tags.STRUCTURE_END_MM] = [
self.VOLUME_TRANSDUCER_DIM_IN_MM/2, self.VOLUME_PLANAR_DIM_IN_MM, 10]
vessel_1_dictionary[Tags.STRUCTURE_RADIUS_MM] = 3
vessel_1_dictionary[Tags.MOLECULE_COMPOSITION] = TISSUE_LIBRARY.blood()
vessel_1_dictionary[Tags.CONSIDER_PARTIAL_VOLUME] = True
vessel_1_dictionary[Tags.ADHERE_TO_DEFORMATION] = False
vessel_1_dictionary[Tags.STRUCTURE_TYPE] = Tags.CIRCULAR_TUBULAR_STRUCTURE
vessel_2_dictionary = Settings()
vessel_2_dictionary[Tags.PRIORITY] = 3
vessel_2_dictionary[Tags.STRUCTURE_START_MM] = [self.VOLUME_TRANSDUCER_DIM_IN_MM/2 - 10,
0, 5]
vessel_2_dictionary[Tags.STRUCTURE_END_MM] = [
self.VOLUME_TRANSDUCER_DIM_IN_MM/2 - 10, self.VOLUME_PLANAR_DIM_IN_MM, 5]
vessel_2_dictionary[Tags.STRUCTURE_RADIUS_MM] = 2
vessel_2_dictionary[Tags.MOLECULE_COMPOSITION] = TISSUE_LIBRARY.blood()
vessel_2_dictionary[Tags.CONSIDER_PARTIAL_VOLUME] = True
vessel_2_dictionary[Tags.ADHERE_TO_DEFORMATION] = False
vessel_2_dictionary[Tags.STRUCTURE_TYPE] = Tags.CIRCULAR_TUBULAR_STRUCTURE
epidermis_dictionary = Settings()
epidermis_dictionary[Tags.PRIORITY] = 8
epidermis_dictionary[Tags.STRUCTURE_START_MM] = [0, 0, 1]
epidermis_dictionary[Tags.STRUCTURE_END_MM] = [0, 0, 1.1]
epidermis_dictionary[Tags.MOLECULE_COMPOSITION] = TISSUE_LIBRARY.epidermis()
epidermis_dictionary[Tags.CONSIDER_PARTIAL_VOLUME] = True
epidermis_dictionary[Tags.ADHERE_TO_DEFORMATION] = True
epidermis_dictionary[Tags.STRUCTURE_TYPE] = Tags.HORIZONTAL_LAYER_STRUCTURE
tissue_dict = Settings()
tissue_dict[Tags.BACKGROUND] = background_dictionary
tissue_dict["muscle"] = muscle_dictionary
tissue_dict["epidermis"] = epidermis_dictionary
tissue_dict["vessel_1"] = vessel_1_dictionary
tissue_dict["vessel_2"] = vessel_2_dictionary
return tissue_dict
if __name__ == '__main__':
test = KWaveAcousticForwardConvenienceFunction()
test.run_test(show_figure_on_screen=False)
|
{"hexsha": "f81e6f765fb2c951a1b3a358bc3ab07fe69f4752", "size": 11140, "ext": "py", "lang": "Python", "max_stars_repo_path": "simpa_tests/manual_tests/acoustic_forward_models/KWaveAcousticForwardConvenienceFunction.py", "max_stars_repo_name": "IMSY-DKFZ/simpa", "max_stars_repo_head_hexsha": "b8bddcf43a4bff2564f0ec208dc511b82e49bfb4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2022-03-14T15:40:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T02:34:25.000Z", "max_issues_repo_path": "simpa_tests/manual_tests/acoustic_forward_models/KWaveAcousticForwardConvenienceFunction.py", "max_issues_repo_name": "jgroehl/simpa", "max_issues_repo_head_hexsha": "e56f0802e5a8555ee8bb139dd4f776025e7e9267", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2022-03-18T07:19:12.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T12:15:19.000Z", "max_forks_repo_path": "simpa_tests/manual_tests/acoustic_forward_models/KWaveAcousticForwardConvenienceFunction.py", "max_forks_repo_name": "IMSY-DKFZ/simpa", "max_forks_repo_head_hexsha": "b8bddcf43a4bff2564f0ec208dc511b82e49bfb4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.8596491228, "max_line_length": 119, "alphanum_fraction": 0.6682226212, "include": true, "reason": "import numpy", "num_tokens": 2469}
|
#include <sjc.h>
#include <boost/process.hpp>
namespace fs = boost::filesystem;
namespace po = boost::program_options;
namespace bp = boost::process;
#ifdef YYDEBUG
extern int yydebug;
#endif
void __fail(const char* s) {
printf("FAIL: %s\n", s);
exit(-1);
}
void createProject(string typeName) {
auto fullPath = fs::current_path();
auto workspaceFolderBasename = fullPath.filename().string();
printf("Creating new %s project for app %s\n", typeName.c_str(), workspaceFolderBasename.c_str());
fs::create_directory(".vscode");
ofstream streamTasks;
streamTasks.open(".vscode/tasks.json");
if (typeName == "ui") {
streamTasks <<
R"({
// See https://go.microsoft.com/fwlink/?LinkId=733558
// for the documentation about the tasks.json format
"version": "2.0.0",
"tasks": [
{
"label": "sjc",
"type": "shell",
"command": "../sj/sjc main.sj --no-lines",
"promptOnClose": true,
"group": "build",
"presentation": {
"echo": true,
"reveal": "always",
"focus": true,
"panel": "shared"
},
"problemMatcher": [
"$gcc"
]
},
{
"label": "gcc",
"type": "shell",
"command": "gcc -g main.c -I. -I/usr/local/include/freetype2 -I/usr/local/include -L/usr/local/lib -lSDL2 -lSDL2main -lpng16 -lfreetype -o ${workspaceFolderBasename} -framework OpenGL",
"windows": {
"command": "gcc -g main.c -I. -I/mingw64/include/freetype2 -I/mingw64/include/SDL2 -L/mingw64/lib -Dmain=SDL_main -DWIN32 -lmingw32 -lSDL2main -lSDL2 -llibpng16 -lopengl32 -lfreetype -lglew32 -o ${workspaceFolderBasename}.exe"
},
"dependsOn" : "sjc",
"group": {
"kind": "build",
"isDefault": true
},
"problemMatcher": [
"$gcc"
]
},
{
"label": "emcc",
"type": "shell",
"command": "emcc -g main.c -o ${workspaceFolderBasename}.html -I. -s USE_SDL=2 -s USE_FREETYPE=1 -s USE_LIBPNG=1 -s USE_WEBGL2=1 --preload-file assets",
"dependsOn" : "sjc",
"group": "build",
"problemMatcher": [
"$gcc"
]
},
{
"label": "emrun",
"type": "shell",
"command": "emcc -g main.c -o ${workspaceFolderBasename}.html -I. -s USE_SDL=2 -s USE_FREETYPE=1 -s USE_LIBPNG=1 -s USE_WEBGL2=1 --preload-file assets --emrun && emrun ${workspaceFolderBasename}.html",
"dependsOn" : "sjc",
"group": "build",
"problemMatcher": [
"$gcc"
]
}
]
})";
} else {
streamTasks <<
R"({
// See https://go.microsoft.com/fwlink/?LinkId=733558
// for the documentation about the tasks.json format
"version": "2.0.0",
"tasks": [
{
"label": "sjc",
"type": "shell",
"command": "../sj/sjc main.sj --no-lines",
"promptOnClose": true,
"group": "build",
"presentation": {
"echo": true,
"reveal": "always",
"focus": true,
"panel": "shared"
},
"problemMatcher": [
"$gcc"
]
},
{
"label": "gcc",
"type": "shell",
"command": "gcc -g main.c -I. -o ${workspaceFolderBasename}",
"windows": {
"command": "gcc -g main.c -I. -o ${workspaceFolderBasename}.exe"
},
"dependsOn" : "sjc",
"group": {
"kind": "build",
"isDefault": true
},
"problemMatcher": [
"$gcc"
]
}
]
})";
}
ofstream streamSettings;
streamSettings.open(".vscode/settings.json");
streamSettings <<
R"({
"terminal.integrated.shell.windows": "C:\\msys64\\usr\\bin\\bash.exe",
"terminal.integrated.shellArgs.windows": [
"--login",
"-i",
],
"terminal.integrated.env.windows": {
"CHERE_INVOKING": "1",
"MSYSTEM": "MINGW64",
},
})";
ofstream streamLaunch;
streamLaunch.open(".vscode/launch.json");
streamLaunch <<
R"({
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "(gdb) Launch",
"type": "cppdbg",
"request": "launch",
"program": "${workspaceFolder}/${workspaceFolderBasename}",
"windows": {
"program": "${workspaceFolder}/${workspaceFolderBasename}.exe",
"miDebuggerPath": "C:/msys64/mingw64/bin/gdb.exe"
},
"args": [],
"stopAtEntry": false,
"cwd": "${workspaceFolder}",
"environment": [],
"externalConsole": true,
"MIMode": "gdb",
"miDebuggerPath": "/usr/local/bin/gdb",
"setupCommands": [
{
"description": "Enable pretty-printing for gdb",
"text": "-enable-pretty-printing",
"ignoreFailures": true
}
],
"preLaunchTask": "gcc"
}
]
})";
auto contents = string(R"(main.c
{workspaceFolderBasename}.exe
{workspaceFolderBasename}
{workspaceFolderBasename}.dSYM/
)");
boost::replace_all(contents, "{workspaceFolderBasename}", workspaceFolderBasename);
ofstream streamGitIgnore;
streamGitIgnore.open(".gitignore");
streamGitIgnore << contents;
ofstream streamMain;
streamMain.open("main.sj");
if (typeName == "ui") {
streamMain <<
R"(library "release-1.0:https://github.com/justinmann/sj-lib-ui.git"
root : textElement(
text : "Hello World"
)
runLoop())";
} else {
streamMain <<
R"(console.writeLine("hello world"))";
}
}
int main(int argc, char **argv) {
po::options_description generic_options("Generic options");
generic_options.add_options()
("help", "show helper")
;
po::options_description config_options("Configuration");
config_options.add_options()
("no-lines", "do not output #lineno directive")
("vs-errors", "output vs compatible error format")
("debug", "output debug files")
("debug-file", po::value<string>(), "filename for debug output")
("debug-leaks", "add extra debug logging to detect memory leaks")
("debug-no-free", "do not free any objects, only use this when debugging a leak")
("c-file", po::value<string>(), "filename for c output")
("error-file", po::value<string>(), "filename for error output")
("new-project", po::value<string>(), "ui or console")
("skip-library-pull", "skip updating the submodules for libraries")
("skip-library-copy", "skip copying assets from the libraries")
#ifdef YYDEBUG
("debug-parser", "add extra debug logging to detect memory leaks")
#endif
;
po::options_description hidden_options("Hidden options");
hidden_options.add_options()
("sj-file", "file to compile")
;
po::options_description cmdline_options;
cmdline_options.add(generic_options).add(config_options).add(hidden_options);
po::options_description config_file_options;
config_file_options.add(config_options).add(hidden_options);
po::options_description visible("Allowed options");
visible.add(generic_options).add(config_options);
po::positional_options_description p;
p.add("sj-file", -1);
po::variables_map vm;
po::store(po::command_line_parser(argc, argv).
options(cmdline_options).positional(p).run(), vm);
po::notify(vm);
if (vm.count("help") || (!vm.count("sj-file") && !vm.count("new-project"))) {
cout << visible << "\n";
return 1;
}
auto libraryPull = vm.count("skip-library-pull") == 0;
auto libraryCopy = vm.count("skip-library-copy") == 0;
bool outputLines = vm.count("no-lines") == 0;
bool outputDebug = vm.count("debug");
bool outputVSErrors = vm.count("vs-errors");
bool outputDebugLeaks = vm.count("debug-leaks");
bool outputFree = vm.count("debug-no-free") == 0;
#ifdef YYDEBUG
yydebug = vm.count("debug-parser"); // use this to trigger the verbose debug output from bison
#endif
auto cFilename = vm.count("c-file") ? vm["c-file"].as<string>() : string();
auto debugFilename = vm.count("debug-file") ? vm["debug-file"].as<string>() : string();
auto errorFilename = vm.count("error-file") ? vm["error-file"].as<string>() : string();
auto sjFilename = vm.count("sj-file") ? vm["sj-file"].as<string>() : string();
auto newProject = vm.count("new-project") ? vm["new-project"].as<string>() : string();
if (sjFilename.size() > 0) {
auto path = fs::path(sjFilename);
if (cFilename.size() == 0) {
cFilename = fs::change_extension(path, ".cpp").string();
}
if (outputDebug) {
if (debugFilename.size() == 0) {
debugFilename = fs::change_extension(path, ".debug").string();
}
}
Compiler compiler(outputLines, outputVSErrors, outputDebugLeaks, outputFree, libraryPull, libraryCopy);
compiler.transpile(path.string(), cFilename, errorFilename, debugFilename);
}
if (newProject.size() > 0) {
createProject(newProject);
}
return 0;
}
|
{"hexsha": "452623f342d2de7d410fedb72b1a01a0c583c932", "size": 9922, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/main.cpp", "max_stars_repo_name": "justinmann/sj", "max_stars_repo_head_hexsha": "24d0a75723b024f17de6dab9070979a4f1bf1a60", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2017-01-04T02:27:10.000Z", "max_stars_repo_stars_event_max_datetime": "2017-01-22T05:36:41.000Z", "max_issues_repo_path": "src/main.cpp", "max_issues_repo_name": "justinmann/sj", "max_issues_repo_head_hexsha": "24d0a75723b024f17de6dab9070979a4f1bf1a60", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/main.cpp", "max_forks_repo_name": "justinmann/sj", "max_forks_repo_head_hexsha": "24d0a75723b024f17de6dab9070979a4f1bf1a60", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-06-15T12:17:26.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-15T12:17:26.000Z", "avg_line_length": 32.7458745875, "max_line_length": 258, "alphanum_fraction": 0.5446482564, "num_tokens": 2404}
|
# module for estimating pose by extended kalman filter
# initial pose is decided randomly
# global localization problem
using Distributions, LinearAlgebra, StatsBase
include(joinpath(split(@__FILE__, "src")[1], "src/model/map/map.jl"))
include(joinpath(split(@__FILE__, "src")[1], "src/common/covariance_ellipse/covariance_ellipse.jl"))
include(joinpath(split(@__FILE__, "src")[1], "src/common/state_transition/state_transition.jl"))
include(joinpath(split(@__FILE__, "src")[1], "src/common/observation_function/observation_function.jl"))
mutable struct GlobalKf
belief
motion_noise_stds
estimated_pose
estimated_cov
map
dist_dev
dir_dev
function GlobalKf(init_pose::Array;
motion_noise_stds::Dict=Dict("nn"=>0.20, "no"=>0.001, "on"=>0.11, "oo"=>0.20),
env_map=nothing, dist_dev_rate=0.14, dir_dev=0.05)
self = new()
self.belief = MvNormal([rand(Uniform(-5.0, 5.0)), rand(Uniform(-5.0, 5.0)), rand(Uniform(-pi, pi))],
diagm(0 => [1e+4, 1e+4, 1e+4]))
self.motion_noise_stds = motion_noise_stds
self.estimated_pose = self.belief.μ
self.estimated_cov = self.belief.Σ
self.map = env_map
self.dist_dev = dist_dev_rate
self.dir_dev = dir_dev
return self
end
end
function mat_M(speed, yaw_rate, time, stds)
return diagm(0 => [stds["nn"]^2*abs(speed)/time + stds["no"]^2*abs(yaw_rate)/time,
stds["on"]^2*abs(speed)/time + stds["oo"]^2*abs(yaw_rate)/time])
end
function mat_A(speed, yaw_rate, time, theta)
st, ct = sin(theta), cos(theta)
stw, ctw = sin(theta + yaw_rate * time), cos(theta + yaw_rate * time)
return [(stw - st)/yaw_rate -speed/(yaw_rate^2)*(stw - st) + speed/yaw_rate*time*ctw;
(-ctw + ct)/yaw_rate -speed/(yaw_rate^2)*(-ctw + ct) + speed/yaw_rate*time*stw;
0 time]
end
function mat_F(speed, yaw_rate, time, theta)
F = diagm(0 => [1.0, 1.0, 1.0])
F[1, 3] = speed / yaw_rate * (cos(theta + yaw_rate * time) - cos(theta))
F[2, 3] = speed / yaw_rate * (sin(theta + yaw_rate * time) - sin(theta))
return F
end
function mat_H(mu_pose, obj_pose)
obj_x, obj_y = obj_pose[1], obj_pose[2]
mu_x, mu_y = mu_pose[1], mu_pose[2]
mu_l = sqrt((mu_x - obj_x)^2 + (mu_y - obj_y)^2)
return [(mu_x - obj_x)/mu_l (mu_y - obj_y)/mu_l 0.0;
(obj_y - mu_y)/(mu_l^2) (mu_x - obj_x)/(mu_l^2) -1.0]
end
function mat_Q(dist_dev, dir_dev)
return [dist_dev^2 0.0;
0.0 dir_dev^2]
end
function motion_update(self::GlobalKf, speed,
yaw_rate, time)
if abs(yaw_rate) < 1e-5 # to prevent division by zero
yaw_rate = 1e-5
end
M = mat_M(speed, yaw_rate, time, self.motion_noise_stds)
A = mat_A(speed, yaw_rate, time, self.estimated_pose[3])
F = mat_F(speed, yaw_rate, time, self.estimated_pose[3])
self.estimated_cov = F * self.estimated_cov * F' + A * M * A'
self.estimated_pose = state_transition(speed, yaw_rate, time, self.estimated_pose)
end
function observation_update(self::GlobalKf, observation)
for obs in observation
z = obs[1] # [distance, direction]
id = obs[2]
H = mat_H(self.estimated_pose, self.map.objects[id].pose)
estimated_z = observation_function(self.estimated_pose,
self.map.objects[id].pose)
# observation noise
Q = mat_Q(estimated_z[1]*self.dist_dev, self.dir_dev)
# kalman gain
K = self.estimated_cov * H' * inv(Q + H*self.estimated_cov*H')
self.estimated_pose += K * (z - estimated_z)
self.estimated_cov = (Matrix{Float64}(I, 3, 3) - K*H) * self.estimated_cov
end
end
function draw!(self::GlobalKf)
draw_covariance_ellipse!(self.estimated_pose, self.estimated_cov, 3)
end
|
{"hexsha": "67e7512c6cf618f97e8f08e37da35bc9374774a1", "size": 3742, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/localization/global_localization/global_kf.jl", "max_stars_repo_name": "ShisatoYano/JuliaAutonomy", "max_stars_repo_head_hexsha": "d1643add4ab9625996fafeac23fc03f25eedff12", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2021-03-10T12:43:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-01T16:40:19.000Z", "max_issues_repo_path": "src/localization/global_localization/global_kf.jl", "max_issues_repo_name": "ShisatoYano/JuliaAutonomy", "max_issues_repo_head_hexsha": "d1643add4ab9625996fafeac23fc03f25eedff12", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/localization/global_localization/global_kf.jl", "max_forks_repo_name": "ShisatoYano/JuliaAutonomy", "max_forks_repo_head_hexsha": "d1643add4ab9625996fafeac23fc03f25eedff12", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-08-14T02:46:28.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-07T09:19:40.000Z", "avg_line_length": 36.3300970874, "max_line_length": 105, "alphanum_fraction": 0.653393907, "num_tokens": 1187}
|
from django.db import models
from django.db.models import JSONField
import requests
from pygbif import occurrences
from wikidataintegrator import wdi_core
import pandas as pd
import numpy as np
from ete3 import NCBITaxa
class ENAtoGBIF:
"""
input: ena_query, ena_accession (list)
output: ena2gbif (dict)
"""
all_sequence_return_fields = "accession,study_accession,sample_accession,tax_id,scientific_name,base_count,bio_material,cell_line,cell_type,collected_by,collection_date,country,cultivar,culture_collection,dataclass,description,dev_stage,ecotype,environmental_sample,first_public,germline,host,identified_by,isolate,isolation_source,keywords,lab_host,last_updated,location,mating_type,mol_type,organelle,serotype,serovar,sex,submitted_sex,specimen_voucher,strain,sub_species,sub_strain,tax_division,tissue_lib,tissue_type,topology,variety,altitude,haplotype,plasmid,sequence_md5,sequence_version,sequence_version"
base_url = "https://www.ebi.ac.uk/ena/portal/api/"
ena_accession = None
ena_query = None
ena_return = None
ena_query_param = {
"result": "sequence",
"fields": all_sequence_return_fields,
"format": "json",
"limit": 0
}
gbif_query = {
"institutionCode" : "",
"taxonKey" : ""
}
def __init__(self, gbif_query:dict=None,ena_accession:list=None, ena_query:str=None):
self.ena_accession = ena_accession # accession candidates (i.e. from user/ PaperParser)
self.ena_query = ena_query # more flexible search "specimen_voucher=\"*BR)*\"", this will be placed directly in the api query string
if not (self.ena_accession == None or self.ena_query == None):
raise Exception("Only accept either one of these: ena_accession, ena_query. Not both.")
if self.ena_accession is None and self.ena_query is None:
raise Exception("At least one of these should be provided.")
if gbif_query:
#self.gbif_query.update(gbif_query)
self.gbif_query = gbif_query
def get_ena_results(self):
# construct query strong from list of ena_accession
# FIXME: ena api refuse to process wrong accession, have to filter it before query
if not self.ena_query:
search_r = requests.get(f"{self.base_url}search?includeAccessions={','.join([str(s) for s in self.ena_accession])}", params=self.ena_query_param)
else:
search_r = requests.get(f"{self.base_url}search?query={self.ena_query}", params=self.ena_query_param)
print(search_r.status_code)
results = search_r.json()
# Change this to {'AF123': {'sex': '', 'host': '', 'tax_id': '84861'....}, 'AF456': {'sex': 'm', 'host': '', ...
# also save it
self.ena_return = {r['accession']: r for r in results}
return {r['accession']: r for r in results}
def get_gbif_results(self):
first = occurrences.search(**self.gbif_query)
results = first['results']
for offset in range(300, min(first['count'], 90000), 300):
args = {**self.gbif_query, **{'offset': offset}}
results += occurrences.search(**args)['results']
return {r['gbifID']: r for r in results}
def get_wikidata_results(self, tax_ids:list=None):
if tax_ids is None:
assert (self.ena_return is not None) , "Empty ena API return"
tax_ids = []
for a,d in self.ena_return:
tax_ids.append(d["tax_id"])
# TODO: if there is no match, go up to family level
query_template = """
SELECT ?taxon ?taxonLabel ?ncbi_taxonID ?gbifid WHERE {
VALUES ?ncbi_taxonID {%s}
?taxon wdt:P685 ?ncbi_taxonID.
OPTIONAL {?taxon wdt:P846 ?gbifid .}
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
}
"""
results = {}
for tax_ids_subset in np.array_split(list(tax_ids), 30):
query = query_template % ('"' + '" "'.join(tax_ids_subset.tolist()) + '"')
try:
# result_df.shape[0] should match ncbi_taxonID
result_df = wdi_core.WDFunctionsEngine.execute_sparql_query(query=query, as_dataframe=True)
# TODO: check which cell in column gbifid is empty, compare the df['ncbi_taxonID'] with the query listy
# TODO: filter the unmatch ncbi_taxonID and go up to family level
# query wikidata using the same query_template (should to it recursively, but can also stop if we cannot find the match order name)
if results == {}:
results = result_df
else:
results.append(result_df)
except Exception as e:
print(e)
# Find the family name of them and put it to WHERE?
return results.replace(np.nan, '').to_dict()
# FIXME: maybe better to use the gbif API
def ncbi_taxnomy_get_lineage(self,ncbi_taxonID:list):
lineage_ls = []
# http://etetoolkit.org/docs/latest/tutorial/tutorial_ncbitaxonomy.html
self.ncbi.update_taxonomy_database() # this may take long time, better to include the sqlite db (~300mb) in the image
for i in ncbi_taxonID:
lineage_ls.append(self.ncbi.get_lineage(int(i)))
return lineage_ls
|
{"hexsha": "35bc4eb1c0f6845932ab2459a75a9fe03710c164", "size": 5462, "ext": "py", "lang": "Python", "max_stars_repo_path": "projects/33/app/django/web/ena.py", "max_stars_repo_name": "elixir-europe/Biohackathon-projects-2020", "max_stars_repo_head_hexsha": "45afecf96bf33fe1015d8c23fd2c251a20274b59", "max_stars_repo_licenses": ["Unlicense", "MIT"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2020-07-22T18:48:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T23:23:02.000Z", "max_issues_repo_path": "projects/33/app/django/web/ena.py", "max_issues_repo_name": "elixir-europe/Biohackathon-projects-2020", "max_issues_repo_head_hexsha": "45afecf96bf33fe1015d8c23fd2c251a20274b59", "max_issues_repo_licenses": ["Unlicense", "MIT"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2020-06-24T16:48:29.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:51:48.000Z", "max_forks_repo_path": "projects/33/app/django/web/ena.py", "max_forks_repo_name": "elixir-europe/Biohackathon-projects-2020", "max_forks_repo_head_hexsha": "45afecf96bf33fe1015d8c23fd2c251a20274b59", "max_forks_repo_licenses": ["Unlicense", "MIT"], "max_forks_count": 36, "max_forks_repo_forks_event_min_datetime": "2020-06-27T18:35:55.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-15T09:19:53.000Z", "avg_line_length": 49.2072072072, "max_line_length": 616, "alphanum_fraction": 0.6453679971, "include": true, "reason": "import numpy", "num_tokens": 1353}
|
# -*- coding: utf-8 -*-
"""
@author: Bruno Dato
"""
import itertools
import matplotlib.pyplot as plt
import math
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import StratifiedKFold
from scipy.io.wavfile import read
from sklearn.neural_network import MLPClassifier
print(__doc__)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('Vraies classes')
plt.xlabel('Predictions')
aa = np.zeros([100,1024])
ee = np.zeros([100,1024])
eh = np.zeros([100,1024])
ii = np.zeros([100,1024])
oe = np.zeros([100,1024])
oh = np.zeros([100,1024])
oo = np.zeros([100,1024])
uu = np.zeros([100,1024])
yy = np.zeros([100,1024])
# Read de wav files #
for i in range(0,100,1):
if i<10:
aa[i] = read('data/aa0'+str(i)+'.wav','r')[1]
ee[i] = read('data/ee0'+str(i)+'.wav','r')[1]
eh[i] = read('data/eh0'+str(i)+'.wav','r')[1]
ii[i] = read('data/ii0'+str(i)+'.wav','r')[1]
oe[i] = read('data/oe0'+str(i)+'.wav','r')[1]
oh[i] = read('data/oh0'+str(i)+'.wav','r')[1]
oo[i] = read('data/oo0'+str(i)+'.wav','r')[1]
uu[i] = read('data/uu0'+str(i)+'.wav','r')[1]
yy[i] = read('data/yy0'+str(i)+'.wav','r')[1]
else:
aa[i] = read('data/aa'+str(i)+'.wav','r')[1]
ee[i] = read('data/ee'+str(i)+'.wav','r')[1]
eh[i] = read('data/eh'+str(i)+'.wav','r')[1]
ii[i] = read('data/ii'+str(i)+'.wav','r')[1]
oe[i] = read('data/oe'+str(i)+'.wav','r')[1]
oh[i] = read('data/oh'+str(i)+'.wav','r')[1]
oo[i] = read('data/oo'+str(i)+'.wav','r')[1]
uu[i] = read('data/uu'+str(i)+'.wav','r')[1]
yy[i] = read('data/yy'+str(i)+'.wav','r')[1]
data = np.concatenate((aa,ee,eh,ii,oe,eh,oo,uu,yy))
# FFT and real ceptrum of sounds #
fft_dim = 32
voyelles_FFT=np.zeros([900,1024])
voyelles_FFT_reduit=np.zeros([900,fft_dim])
log_FFT=np.zeros([900,1024])
voyelles_CEPSTR=np.zeros([900,1024])
voyelles_CEPSTR_reduit=np.zeros([900,31])
for j in range(0,900,1):
voyelles_FFT[j] = abs(np.fft.fft(np.hamming(1024)*data[j],1024))
voyelles_FFT_reduit[j] = abs(np.fft.fft(np.hamming(1024)*data[j],fft_dim))
for j in range(0,900,1):
for k in range(0,1024,1):
log_FFT[j,k] = math.log(voyelles_FFT[j,k])
for j in range(0,900,1):
voyelles_CEPSTR[j] = abs(np.fft.ifft(log_FFT[j],1024))
voyelles_CEPSTR_reduit[j] = voyelles_CEPSTR[j,1:32]
# Target #
voyelles_target_names=np.zeros([9], dtype='a2')
voyelles_target_names[0]="aa"
voyelles_target_names[1]="ee"
voyelles_target_names[2]="eh"
voyelles_target_names[3]="ii"
voyelles_target_names[4]="oe"
voyelles_target_names[5]="oh"
voyelles_target_names[6]="oo"
voyelles_target_names[7]="uu"
voyelles_target_names[8]="yy"
voyelles_target=np.zeros([900], dtype='i')
for m in range(0,900,1):
if m>=0 and m<100:
voyelles_target[m] = 0
if m>=100 and m<200:
voyelles_target[m] = 1
if m>=200 and m<300:
voyelles_target[m] = 2
if m>=300 and m<400:
voyelles_target[m] = 3
if m>=400 and m<500:
voyelles_target[m] = 4
if m>=500 and m<600:
voyelles_target[m] = 5
if m>=600 and m<700:
voyelles_target[m] = 6
if m>=700 and m<800:
voyelles_target[m] = 7
if m>=800 and m<900:
voyelles_target[m] = 8
# Preprocessing #
#voyelles_data_scaled = scale(voyelles_FFT_reduit);
voyelles_data_scaled = scale(voyelles_CEPSTR_reduit);
# PCA
voyelles_pca = PCA(n_components=len(np.unique(voyelles_target))).fit_transform(voyelles_data_scaled)
# LDA
voyelles_lda = LinearDiscriminantAnalysis(n_components=len(np.unique(voyelles_target)))
voyelles_lda_data = voyelles_lda.fit(voyelles_data_scaled, voyelles_target).transform(voyelles_data_scaled)
# DATA USED #
voyelles_data = voyelles_lda_data
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(n_splits=4)
# Only take the first fold.
train_index, test_index = next(iter(skf.split(voyelles_data, voyelles_target)))
X_train = voyelles_data[train_index]
y_train = voyelles_target[train_index]
X_test = voyelles_data[test_index]
y_test = voyelles_target[test_index]
n_classes = len(np.unique(y_train))
mlp = MLPClassifier(hidden_layer_sizes=(64,64,64), max_iter=20, alpha=1e-4,
solver='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
mlp.fit(X_train, y_train)
print("Score sur apprentissage : %f " % mlp.score(X_train, y_train))
print("Score sur test: %f " % mlp.score(X_test, y_test))
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, mlp.predict(X_test))
np.set_printoptions(precision=2)
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=voyelles_target_names, normalize=True,
title='Matrice de confusion normalisee')
plt.show()
|
{"hexsha": "bbfc609288080466d77f6ead511117fc736d4be9", "size": 6082, "ext": "py", "lang": "Python", "max_stars_repo_path": "Multilayer_Perceptron.py", "max_stars_repo_name": "BrunoDatoMeneses/Pattern-Recognition-vowel-work", "max_stars_repo_head_hexsha": "9eed7db4fb8818880339341d9599fa3e1df61ec5", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Multilayer_Perceptron.py", "max_issues_repo_name": "BrunoDatoMeneses/Pattern-Recognition-vowel-work", "max_issues_repo_head_hexsha": "9eed7db4fb8818880339341d9599fa3e1df61ec5", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Multilayer_Perceptron.py", "max_forks_repo_name": "BrunoDatoMeneses/Pattern-Recognition-vowel-work", "max_forks_repo_head_hexsha": "9eed7db4fb8818880339341d9599fa3e1df61ec5", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.5539906103, "max_line_length": 107, "alphanum_fraction": 0.6390989806, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1826}
|
import streamlit as st
import numpy as np
from keras import models
from keras.preprocessing.image import img_to_array
from PIL import Image
from concat import concat_imgs
DATA_DIR = 'att_resnet_best_weights.34-0.5114'
def main():
st.title('InstaVis Checker')
st.subheader('Are you a guru of creativity or just another guy with boring photos?')
st.subheader('Let\'s find out!')
uploaded_files = st.file_uploader("Upload 9 images from your Instagram profile",
type=["jpg", "jpeg", "png"], accept_multiple_files=True)
err = st.text(' ')
if len(uploaded_files) > 0:
if len(uploaded_files) != 9:
err.text('You should upload 9 images in following formats: jpg, jpeg or png')
else:
err.text(' ')
uploaded_imgs = []
for uploaded_file in uploaded_files:
uploaded_imgs.append(Image.open(uploaded_file))
print(uploaded_imgs)
merged_img = concat_imgs(uploaded_imgs)
st.subheader("That's how potential subscribers see your content")
st.image(merged_img, channels="RGB")
submit = st.button('Am I a genius creator?')
if submit:
st.text("A few seconds, please, and we'll find out!")
class_lbl = predict(merged_img)
if class_lbl == 8: # good_food category was predicted
st.subheader('This is incredibly delicious food. Good job! We are confident you will be successful! ❤️')
elif class_lbl == 7: # bad_food category was predicted
st.subheader(
'We can see that you have tried, but do not stop!💪 You can show yours food much more appetizing!')
st.subheader('Check out some examples for inspiration ❤️')
st.text('We are sure that you will succeed and and many new subscribers!')
elif class_lbl == 5: # good_brand category was predicted
st.subheader(
'Even big brands can envy your visuals. Good job! We are confident that you will be successful!❤️')
elif class_lbl == 4 or class_lbl == 6: # bad_brand or bad_beauty_services category were predicted
st.subheader('You can do better!💪')
st.text('On social networks, we can\'t touch the goods, so we have to trust our eyes.')
st.text('Try to show your potential customers your product from different angles.')
st.subheader('Check out some examples for inspiration ❤️')
st.text('We are sure that you will succeed and new clients will not keep you waiting!')
elif class_lbl == 0: # bad_thematic category was predicted
st.subheader('You can do better!💪')
st.text('The quality of your content is equal to the quality of your services.')
st.text('Try to make your content more diverse')
st.subheader('Check out some examples for inspiration ❤️')
st.text('We are sure that you will succeed and and many new subscribers!')
elif class_lbl == 1: # good_thematic category was predicted
st.subheader('You have a good thematic blog that can successfully compete with market leaders!❤️')
elif class_lbl == 3: # good_lifestyle category was predicted
st.subheader(
'We are delighted! You are a visual guru and perfectly combine objects and colors in the photo!❤️')
elif class_lbl == 2: # bad_lifestyle category was predicted
st.subheader('You can do better!💪')
st.text('Sorry, but your content seems a little boring and monotonous 🥱')
st.text('Try to limit the range of colors, add a variety of objects to the photo,')
st.text('and experiment with the angle. An unusual approach can lead you to success!')
st.subheader('Check out some examples for inspiration ❤️')
st.text('We are sure that you will succeed and and many new subscribers!')
st.subheader('Loading another examples for your inspiration')
if class_lbl == 0 or class_lbl == 1:
st.write('Great! Uploading good thematic blog example for you...')
best_merge = Image.open('good_examples/good_thematic.jpg')
best_merge = best_merge.resize((500, 500))
st.image(best_merge, channels="RGB")
elif class_lbl == 2 or class_lbl == 3:
st.write('Great! Uploading good lifestyle blog example for you...')
best_merge = Image.open('good_examples/good_lifestyle.jpg')
best_merge = best_merge.resize((500, 500))
st.image(best_merge, channels="RGB")
elif class_lbl == 4 or class_lbl == 5 or class_lbl == 6:
st.write('Great! Uploading good commercial blog example for you...')
best_merge = Image.open('good_examples/good_commerce.jpg')
best_merge = best_merge.resize((500, 500))
st.image(best_merge, channels="RGB")
else:
st.write('Great! Uploading good food blog example for you...')
best_merge = Image.open('good_examples/good_food.jpg')
best_merge = best_merge.resize((500, 500))
st.image(best_merge, channels="RGB")
@st.cache
def predict(merged_img: object) -> int:
img = prepare_img(merged_img)
model = load_model(DATA_DIR)
prediction = model.predict(img)
return np.argmax(prediction[0])
@st.cache(allow_output_mutation=True)
def load_model(model_path: str):
model = models.load_model(model_path)
return model
def prepare_img(img):
img = img.resize((160, 160))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = img / 255.
return img
if __name__ == "__main__":
main()
|
{"hexsha": "9c5889946a49684b069891e18bdffcffa05c9efe", "size": 5755, "ext": "py", "lang": "Python", "max_stars_repo_path": "app/instavis_check_app.py", "max_stars_repo_name": "nast1415/instavis-check", "max_stars_repo_head_hexsha": "28620b321bb47ea631bd558f5f62a35427701331", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-17T22:12:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-17T22:12:58.000Z", "max_issues_repo_path": "app/instavis_check_app.py", "max_issues_repo_name": "nast1415/instavis-check", "max_issues_repo_head_hexsha": "28620b321bb47ea631bd558f5f62a35427701331", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app/instavis_check_app.py", "max_forks_repo_name": "nast1415/instavis-check", "max_forks_repo_head_hexsha": "28620b321bb47ea631bd558f5f62a35427701331", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.2692307692, "max_line_length": 116, "alphanum_fraction": 0.6394439618, "include": true, "reason": "import numpy", "num_tokens": 1286}
|
import numpy as np
import pandas as pd
from scipy.spatial.distance import pdist
import matplotlib.pyplot as plt
import matplotlib
from scipy.cluster import hierarchy
from matplotlib import cm
from adjustText import adjust_text
import scipy
import matplotlib.patheffects as path_effects
from scipy.spatial.distance import squareform
from sklearn.metrics.pairwise import euclidean_distances
import os
def plotting(df,
stimulators,
inhibitors,
bar_df,
saveDir,
metric = 'euclidean',
linkageMethod = 'ward',
n_clusters = 10,
adjustText = True,
majorMetric = "Correlation",
suffix = ""):
'''df has all genes in rows, and receptors in columns, values are gene expression correlation'''
def addDendro(fig, dataGenes, M, coords, linewidth=0.25, adjustText = adjustText):
genesSubset = list(stimulators) + list(inhibitors)
ax = fig.add_axes(coords, frame_on=False)
Z = hierarchy.linkage(np.nan_to_num(M, nan=max(M)), method=linkageMethod, optimal_ordering=True)
origLineWidth = matplotlib.rcParams['lines.linewidth']
matplotlib.rcParams['lines.linewidth'] = linewidth
cmap = cm.gist_ncar(np.linspace(0, 0.5, n_clusters + 1))
hierarchy.set_link_color_palette([matplotlib.colors.rgb2hex(rgb[:3]) for rgb in cmap])
D = hierarchy.dendrogram(Z, ax=ax, color_threshold = (Z[-n_clusters,2] + Z[-n_clusters+1,2]) / 2, above_threshold_color='k', orientation='top')
hierarchy.set_link_color_palette(None)
matplotlib.rcParams['lines.linewidth'] = origLineWidth
reindexed = pd.Index(dataGenes[D['leaves']]).reindex(pd.Index(genesSubset).intersection(dataGenes))
genes = reindexed[0][reindexed[1] > -1].values
locations = reindexed[1][reindexed[1] > -1]
if True:
tickLabelsColors = np.array(['navy']*len(dataGenes), dtype=np.dtype('U20'))
xtickslabels = np.array(['']*len(dataGenes), dtype=np.dtype('U20'))
for gene, location in zip(genes, locations):
xtickslabels[location] = gene
tickLabelsColors[location] = 'green' if (gene in stimulators) else 'red'
ax.set_xticklabels(xtickslabels, fontsize=4)
ax.tick_params(axis='y', labelsize=4, width=0.25, length=1)
ax.set_yticklabels([])
ax.set_yticks([])
for xtick, color in zip(ax.get_xticklabels(), tickLabelsColors):
xtick.set_color(color)
texts = []
origPos = []
for xpos, xtext, color in zip(ax.get_xticks(), xtickslabels, tickLabelsColors):
if xtext != '':
texts.append(ax.text(xpos, -2., xtext, fontsize=6, rotation=90, va='top', ha='center', color=color))
origPos.append(xpos)
ticks_x = []
ticks_y = []
vdistance = -0.01 * ax.get_ylim()[1]
for tick in ax.get_xticks():
ticks_x.extend([tick, tick, None])
ticks_y.extend([0, vdistance, None])
ax.plot(ticks_x, ticks_y, color='k', lw=0.4, clip_on=False)
ax.set_xticklabels([])
if adjustText:
adjust_text(texts, va='top', ha='center', autoalign='x', lim=400, only_move={'text':'x'})
v = 0.04 * ax.get_ylim()[1]
for text, opos in zip(texts, origPos):
text._y = -v
ax.plot([text._x, opos], [text._y, 0.], color=text._color, lw=0.5, clip_on=False)
if True:
clusters = scipy.cluster.hierarchy.fcluster(Z, t=n_clusters, criterion='maxclust')[D['leaves']] - 1
clusterBoundaries = (np.where(clusters - np.roll(clusters, 1) != 0)[0]/ len(D['leaves'])) * ax.get_xlim()[1]
clusterBoundaries = np.append(clusterBoundaries, ax.get_xlim()[1])
clusterCenters = clusterBoundaries[:-1] + ((clusterBoundaries - np.roll(clusterBoundaries, 1))/2.)[1:]
vposition = (Z[-n_clusters,2] + Z[-n_clusters+1,2]) / 5
for cluster, position in zip(np.unique(clusters), clusterCenters):
ltext = ax.text(position, vposition, '#%s' % cluster, fontsize=7, color='white', va='center', ha='center')
ltext.set_path_effects([path_effects.Stroke(linewidth=1., foreground='k'), path_effects.Normal()])
return {'order': D['leaves'],
'M': squareform(M)[:, D['leaves']][D['leaves'], :],
'genes': genes,
'allGenes': dataGenes[D['leaves']],
'locations': locations,
'tickLabelsColors': tickLabelsColors,
'xtickslabels': xtickslabels,
'clusters': clusters,
'clusterBoundaries': clusterBoundaries / 10.,
'clusterCenters': clusterCenters / 10.}
def addHeatmap(fig, dataArgs, coords, adjustText = adjustText):
M = dataArgs['M']
order = dataArgs['order']
genes = dataArgs['genes']
locations = dataArgs['locations']
tickLabelsColors = dataArgs['tickLabelsColors']
tickslabels = dataArgs['xtickslabels']
clusters = dataArgs['clusters']
clusterBoundaries = dataArgs['clusterBoundaries']
clusterCenters = dataArgs['clusterCenters']
ax = fig.add_axes(coords, frame_on=False)
masked_M = np.ma.array(M, mask=np.isnan(M))
cmap = plt.cm.Greens_r
cmap.set_bad('red')
im = ax.imshow(masked_M, cmap=cmap, aspect='auto', interpolation='None', extent=(-0.5, M.shape[0] - 0.5, M.shape[1] - 0.5, -0.5))
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# Selected x tick labels
if True:
ax.set_xticks(range(len(tickslabels)))
ax.set_xticklabels(tickslabels, fontsize=4)
for xtick, color in zip(ax.get_xticklabels(), tickLabelsColors):
xtick.set_color(color)
texts = []
origPos = []
for xpos, xtext, color in zip(ax.get_xticks(), tickslabels, tickLabelsColors):
if xtext != '':
texts.append(ax.text(xpos, 1.01*ax.get_ylim()[0], xtext, fontsize=6, rotation=90, va='top', ha='center', color=color))
origPos.append(xpos)
ax.set_xticklabels([])
ax.set_xticks([])
if adjustText:
adjust_text(texts, va='top', ha='center', autoalign='x', lim=400, only_move={'text':'x'})
v = ax.get_ylim()[0]
for text, opos in zip(texts, origPos):
text._y = 1.01 * v
ax.plot([text._x, opos], [text._y, v], color=text._color, lw=0.5, clip_on=False)
# Selected y tick labels
if True:
ax.set_yticks(range(len(tickslabels)))
ax.set_yticklabels(tickslabels, fontsize=4)
for ytick, color in zip(ax.get_yticklabels(), tickLabelsColors):
ytick.set_color(color)
texts = []
origPos = []
for ypos, xtext, color in zip(ax.get_yticks(), tickslabels, tickLabelsColors):
if xtext != '':
texts.append(ax.text(-0.01*ax.get_xlim()[1], ypos, xtext, fontsize=6, va='center', ha='right', color=color))
origPos.append(ypos)
ax.set_yticklabels([])
ax.set_yticks([])
if adjustText:
adjust_text(texts, va='center', ha='right', autoalign='y', lim=400, only_move={'text':'y'})
v = -0.01 * ax.get_xlim()[1]
for text, opos in zip(texts, origPos):
text._x = v
ax.plot([0., text._x], [opos, text._y], color=text._color, lw=0.5, clip_on=False)
# Clusters outline boxes
if True:
for cluster, position in zip(np.unique(clusters), clusterCenters):
ltext = ax.text(position, position, '#%s' % cluster, fontsize=7, color='white', va='center', ha='center')
ltext.set_path_effects([path_effects.Stroke(linewidth=1., foreground='k'), path_effects.Normal()])
clusterBoundaries -= 0.5
for i in range(len(np.unique(clusters))):
ax.plot([clusterBoundaries[i], clusterBoundaries[i+1], clusterBoundaries[i+1], clusterBoundaries[i], clusterBoundaries[i]],
[clusterBoundaries[i], clusterBoundaries[i], clusterBoundaries[i+1], clusterBoundaries[i+1], clusterBoundaries[i]],
'--', lw=0.75, color='k', clip_on=False)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# Colorbar
if True:
ax = fig.add_axes([0.85, 0.1, 0.025, 0.6], frame_on=False)
ax.set_xticks([])
ax.set_xticklabels([])
ax.set_yticks([])
ax.set_yticklabels([])
clb = fig.colorbar(im, ax=ax, fraction=0.4, label='Eucl. dist. of gene expr. %s dist.' % majorMetric)
clb.ax.tick_params(labelsize=6)
return
def addBar(fig, dataArgs, mode, coords):
M = dataArgs['M']
order = dataArgs['order']
genes = dataArgs['genes']
locations = dataArgs['locations']
tickLabelsColors = dataArgs['tickLabelsColors']
tickslabels = dataArgs['xtickslabels']
clusters = dataArgs['clusters']
clusterBoundaries = dataArgs['clusterBoundaries']
clusterCenters = dataArgs['clusterCenters']
allGenes = dataArgs['allGenes']
ax = fig.add_axes(coords, frame_on=True)
ax.set_xlim([min(clusterBoundaries), max(clusterBoundaries)])
if mode == 0:
ylabel='Binomial\nP-Val'
data =pd.read_hdf(bar_df, key='df')["BN"].reindex(allGenes).values
elif mode == 1:
ylabel='Upregulated\nP-Val'
data =pd.read_hdf(bar_df, key='df')["P-Val"].reindex(allGenes).values
elif mode == 2:
ylabel='Fold\nChange'
data =pd.read_hdf(bar_df, key='df')["LFC"].reindex(allGenes).values
elif mode == 3:
ylabel='Fraction'
data =pd.read_hdf(bar_df, key='df')["Fraction"].reindex(allGenes).values
elif mode == 4:
clust_df = pd.DataFrame(clusters,index = allGenes,columns = ["Clust"])
#clust_df = clust_df.sort_values("Order")
#clust_df["Clust"] = cluster
all_values = []
for c in set(clust_df["Clust"]):
cg = clust_df.loc[clust_df["Clust"] == c].index
euclid_df = euclidean_distances(df[cg].fillna(0).T)
euclid_df = pd.DataFrame(euclid_df,index= cg,columns=cg)
euclid_values = euclid_df.mean().values
all_values += list(euclid_values)
data = [max(all_values) - x for x in all_values]
ylabel = "Cluster\nCloseness"
elif mode == 5:
clust_df = pd.DataFrame(clusters,index = allGenes,columns = ["Clust"])
#clust_df = clust_df.sort_values("Order")
#clust_df["Clust"] = cluster
all_values = []
euclid_df = euclidean_distances(df.loc[allGenes,allGenes].fillna(0).T)
euclid_df = pd.DataFrame(euclid_df,allGenes,allGenes)
all_values = []
for g in allGenes:
neigh = dendro_dist.sort_values(g).head(21).tail(20).index
if g == "KDR":
print(neigh)
all_values.append(euclid_df.loc[g,neigh].mean())
data = [max(all_values) - x for x in all_values]
ylabel = "Neighborhood\nCloseness"
elif mode == 6:
ylabel='Angiogenesis\nLiterature'
data =pd.read_hdf(bar_df, key='df')["angiogenesis"].reindex(allGenes).values
elif mode == 7:
ylabel='Endothelial\nLiterature'
data =pd.read_hdf(bar_df, key='df')["endothelial"].reindex(allGenes).values
elif mode == 8:
ylabel='Conservation'
data =pd.read_hdf(bar_df, key='df')["DD-Conservation"].reindex(allGenes).values
elif mode == 9:
ylabel='ED_Conservation'
data =pd.read_hdf(bar_df, key='df')["ED-Conservation"].reindex(allGenes).values
elif mode == 10:
ylabel='CC_Conservation'
data =pd.read_hdf(bar_df, key='df')["CC-Conservation"].reindex(allGenes).values
elif mode == 11:
ylabel='All 4 Window\nAvaerage'
data =pd.read_hdf(bar_df, key='df')["All-4_WS21"].reindex(allGenes).values
elif mode == 12:
ylabel='Ind 3 Window\nAvaerage'
data =pd.read_hdf(bar_df, key='df')["Independent-3_WS21"].reindex(allGenes).values
ax.bar(range(len(clusters)), data, width=ax.get_xlim()[1]/len(clusters), color=tickLabelsColors)
ax.set_xticks([])
ax.set_xticklabels([])
yticks = np.round(ax.get_ylim(), 1)
ax.set_yticks(yticks)
ax.set_yticklabels(yticks)
ax.tick_params(axis='y', labelsize=6, width=0.75, length=3)
if True:
ylim = ax.get_ylim()
for i in range(1, len(np.unique(clusters))):
ax.plot([clusterBoundaries[i] - 0.5]*2, [ylim[0], ylim[1]], '--', lw=0.5, color='k', clip_on=False)
ax.text(-0.01, 0.5, ylabel, fontsize=8, rotation=0, va='center', ha='right', transform=ax.transAxes)
return
mmin, mmax = np.nanmin(np.nanmin(df.values)), np.nanmax(np.nanmax(df.values))
"""
if majorMetric == 'correlation':
missingFillValue = 1.0
elif majorMetric == 'cosine':
missingFillValue = 1.0
elif majorMetric == 'euclidean':
missingFillValue = mmax
else:
missingFillValue = mmax
"""
missingFillValue = 0
print('Filing missing values with:', missingFillValue, flush=True)
M = pdist(df.fillna(missingFillValue).values.T, metric=metric)
fig = plt.figure(figsize=(8, 12))
dataArgs = addDendro(fig, df.columns, M, [0.1, 0.8, 0.75, 0.165])
addHeatmap(fig, dataArgs, [0.1, 0.1, 0.75, 0.4])
print("Making Dendro Dist")
dendro_dist =[]
if False:
allGenes = dataArgs['allGenes']
print(len(allGenes))
for i in range(len(allGenes)):
dendro_dist.append([])
for j in range(len(allGenes)):
g1 = allGenes[i]
g2 = allGenes[j]
dendro_dist[i].append(abs(i-j))
dendro_dist = pd.DataFrame(dendro_dist,allGenes,allGenes)
print("Plotting")
st, delta = 0.52, 0.035
addBar(fig, dataArgs, 0, [0.1, st + 0*(delta + 0.015), 0.75, delta])
addBar(fig, dataArgs, 3, [0.1, st + 1*(delta + 0.015), 0.75, delta])
addBar(fig, dataArgs, 8, [0.1, st + 2*(delta + 0.015), 0.75, delta])
#addBar(fig, dataArgs, 11, [0.1, st + 3*(delta + 0.015), 0.75, delta])
addBar(fig, dataArgs, 12, [0.1, st + 3*(delta + 0.015), 0.75, delta])
addBar(fig, dataArgs, 11, [0.1, st + 4*(delta + 0.015), 0.75, delta])
print("plotting last")
#addBar(fig, dataArgs, 8, [0.1, st + 4*(delta + 0.015), 0.75, delta])
#print(dataArgs["clusters"])
#print(len(dataArgs["clusters"]))
#print(df.shape)
#print(dataArgs["allGenes"])
#print(dataArgs["order"])
fig.suptitle('Data: %s (%s receptors)' % (suffix, df.shape[1]), fontsize=12)
fig.savefig(os.path.join(saveDir, '%s dendrogram-heatmap-%s.png' % (suffix, majorMetric)), dpi=600)
plt.close(fig)
return
|
{"hexsha": "bae12fe5b5e25a7ad1586eac9e995250346177e4", "size": 15856, "ext": "py", "lang": "Python", "max_stars_repo_path": "validation/plotting_08_02_2020_from_SD.py", "max_stars_repo_name": "sdomanskyi/decneo", "max_stars_repo_head_hexsha": "c3b78d7cb24fbecde317850ea5068394029a7d03", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "validation/plotting_08_02_2020_from_SD.py", "max_issues_repo_name": "sdomanskyi/decneo", "max_issues_repo_head_hexsha": "c3b78d7cb24fbecde317850ea5068394029a7d03", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "validation/plotting_08_02_2020_from_SD.py", "max_forks_repo_name": "sdomanskyi/decneo", "max_forks_repo_head_hexsha": "c3b78d7cb24fbecde317850ea5068394029a7d03", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.5524296675, "max_line_length": 151, "alphanum_fraction": 0.5662209889, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 4208}
|
#! /usr/bin/env python3
# coding: utf-8
import logging
import numpy
from src.raw.rawmap import RawMap
class Waterfalls():
@property
def waterfalls(self):
"""Access the waterfalls property"""
return self._waterfalls
def __init__(self, rawmap: RawMap, map_width: int, map_height: int):
self._rawmap = rawmap
self._map_width = map_width
self._map_height = map_height
def calculate_waterfalls(self):
# Retrieve working variables
rivermap = self._rawmap.rivermap
cliffmap = self._rawmap.cliffs
map_width, map_height = self._map_width, self._map_height
# Init result
waterfalls = numpy.zeros((map_width, map_height), numpy.float64)
# Test each cliff if there is a river on it
for x in range(map_width):
for y in range(map_height):
if cliffmap[x, y] > 0 and rivermap[x, y] > 0:
waterfalls[x, y] = cliffmap[x, y]
# Set the waterfall result
self._waterfalls = waterfalls
|
{"hexsha": "c90d474a1e23a534443ed4adf8b47bd174ba2f0f", "size": 1061, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/raw/waterfalls.py", "max_stars_repo_name": "Leikt/map_generator", "max_stars_repo_head_hexsha": "86c6359ed84056f32642cb7e23db855beba62923", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-09T10:02:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-09T10:02:04.000Z", "max_issues_repo_path": "src/raw/waterfalls.py", "max_issues_repo_name": "Leikt/map_generator", "max_issues_repo_head_hexsha": "86c6359ed84056f32642cb7e23db855beba62923", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/raw/waterfalls.py", "max_forks_repo_name": "Leikt/map_generator", "max_forks_repo_head_hexsha": "86c6359ed84056f32642cb7e23db855beba62923", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4722222222, "max_line_length": 72, "alphanum_fraction": 0.6248821866, "include": true, "reason": "import numpy", "num_tokens": 265}
|
"""
Collect functions related to the stereographic projection.
A stereographic projection is a mapping between a direction in 3D space and a
position in a 2D plane. The direction can be described in polar coordinates
by (theta,phi), where theta denotes the angle between the direction and
the z axis, and phi denotes the azimuthal angle. The point in the 2D
plane can be described by its Cartesian coordinates (x,y) or its polar
coordinates (r,phi). phi is the same for the direction and the point.
"""
import numpy as np
def stereographic_projection(theta, phi=None):
"""
Perform the stereographic projection (theta,phi) -> (r,phi).
:param theta: Polar angle (rad)
:param phi: Azimuthal angle (rad)
:return (r,phi): Polar coordinates in the plane
"""
r = 2 * np.tan(0.5*theta)
if phi is None:
return r
else:
return r, phi
def inverse_stereographic_projection(r, phi=None):
"""
Perform the inverse stereographic projection (r,phi) -> (theta,phi).
:param r: Distance from origin
:param phi: Azimuthal angle (rad)
:return (theta,phi): polar coordinates of the direction
"""
theta = 2 * np.arctan(0.5*r)
if phi is None:
return theta
else:
return theta, phi
def cartesian(r, phi):
"""
Convert polar to Cartesian coordinates.
:param r: Distance from origin
:param phi: Azimutal angle
:return (x,y): Cartesian coordinates
"""
x = r * np.cos(phi)
y = r * np.sin(phi)
return x, y
def polar(x, y):
"""
Convert Cartesian to polar coordinates.
:param x: Coordinate along the x axis
:param y: Coordinate along the y axis
:return (r,phi): (radius, azimuthal angle)
"""
r = np.hypot(x, y)
phi = np.arctan2(y, x)
return r, phi
# not sure where we need this:
def intersect_bounding_box(circle, bbox):
xc, yc, r = circle
xmin, ymin, xmax, ymax = bbox
intersections = []
# intersections with x=xmin and x=xmax
for x in (xmin, xmax):
discriminant = r**2 - (x-xc)**2
if discriminant >= 0:
for sign in (1, -1):
y = yc + sign * np.sqrt(discriminant)
if y >= ymin and y <= ymax:
intersections.append((x, y))
# intersections with y=ymin and y=ymax
for y in (ymin, ymax):
discriminant = r**2 - (y-yc)**2
if discriminant >= 0:
for sign in (1, -1):
x = xc + sign * np.sqrt(discriminant)
if x >= xmin and x <= xmax:
intersections.append((x, y))
# remove duplicates (could be in the corners)
intersections = tuple(set(intersections))
return intersections
if __name__ == '__main__':
import matplotlib.pyplot as plt
xs, ys = get_wedge()
plt.plot(xs, ys)
xs, ys = get_triangle(10)
plt.plot(xs, ys, '--')
plt.gca().set_aspect('equal')
plt.show()
|
{"hexsha": "cbf5af628a4ec5d10ee5d8b86af0029e8e5fe8c9", "size": 2937, "ext": "py", "lang": "Python", "max_stars_repo_path": "stereographic.py", "max_stars_repo_name": "hobler/chanmap", "max_stars_repo_head_hexsha": "6d1e2f7dd42a36cf0d127a421060e18d96888746", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "stereographic.py", "max_issues_repo_name": "hobler/chanmap", "max_issues_repo_head_hexsha": "6d1e2f7dd42a36cf0d127a421060e18d96888746", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "stereographic.py", "max_forks_repo_name": "hobler/chanmap", "max_forks_repo_head_hexsha": "6d1e2f7dd42a36cf0d127a421060e18d96888746", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.4485981308, "max_line_length": 77, "alphanum_fraction": 0.6128702758, "include": true, "reason": "import numpy", "num_tokens": 776}
|
[STATEMENT]
lemma "\<exists>F::nat set set. finite F \<and> infinite (shattered_by F)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>F. finite F \<and> infinite (shattered_by F)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<exists>F. finite F \<and> infinite (shattered_by F)
[PROOF STEP]
let ?F = "{odd -` {True}, odd -` {False}}"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<exists>F. finite F \<and> infinite (shattered_by F)
[PROOF STEP]
have 0: "finite ?F"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finite {odd -` {True}, odd -` {False}}
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
finite {odd -` {True}, odd -` {False}}
goal (1 subgoal):
1. \<exists>F. finite F \<and> infinite (shattered_by F)
[PROOF STEP]
let ?f = "\<lambda>n::nat. {n}"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<exists>F. finite F \<and> infinite (shattered_by F)
[PROOF STEP]
let ?N = "range ?f"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<exists>F. finite F \<and> infinite (shattered_by F)
[PROOF STEP]
have "inj (\<lambda>n. {n})"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. inj (\<lambda>n. {n})
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
inj (\<lambda>n. {n})
goal (1 subgoal):
1. \<exists>F. finite F \<and> infinite (shattered_by F)
[PROOF STEP]
with infinite_iff_countable_subset[of ?N]
[PROOF STATE]
proof (chain)
picking this:
infinite (range (\<lambda>n. {n})) = (\<exists>f. inj f \<and> range f \<subseteq> range (\<lambda>n. {n}))
inj (\<lambda>n. {n})
[PROOF STEP]
have infinite_N: "infinite ?N"
[PROOF STATE]
proof (prove)
using this:
infinite (range (\<lambda>n. {n})) = (\<exists>f. inj f \<and> range f \<subseteq> range (\<lambda>n. {n}))
inj (\<lambda>n. {n})
goal (1 subgoal):
1. infinite (range (\<lambda>n. {n}))
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
infinite (range (\<lambda>n. {n}))
goal (1 subgoal):
1. \<exists>F. finite F \<and> infinite (shattered_by F)
[PROOF STEP]
have F_shatters_any_singleton: "?F shatters {n::nat}" for n
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {odd -` {True}, odd -` {False}} shatters {n}
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. {odd -` {True}, odd -` {False}} shatters {n}
[PROOF STEP]
have Pow_n: "Pow {n} = {{n}, {}}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Pow {n} = {{n}, {}}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
Pow {n} = {{n}, {}}
goal (1 subgoal):
1. {odd -` {True}, odd -` {False}} shatters {n}
[PROOF STEP]
have 1: "Pow {n} \<subseteq> ?F \<inter>* {n}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
[PROOF STEP]
proof (cases "odd n")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. odd n \<Longrightarrow> Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
2. \<not> odd n \<Longrightarrow> Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
odd n
goal (2 subgoals):
1. odd n \<Longrightarrow> Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
2. \<not> odd n \<Longrightarrow> Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
[PROOF STEP]
from True
[PROOF STATE]
proof (chain)
picking this:
odd n
[PROOF STEP]
have "(odd -` {False}) \<inter> {n} = {}"
[PROOF STATE]
proof (prove)
using this:
odd n
goal (1 subgoal):
1. odd -` {False} \<inter> {n} = {}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
odd -` {False} \<inter> {n} = {}
goal (2 subgoals):
1. odd n \<Longrightarrow> Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
2. \<not> odd n \<Longrightarrow> Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
[PROOF STEP]
hence 0: "{} \<in> ?F \<inter>* {n}"
[PROOF STATE]
proof (prove)
using this:
odd -` {False} \<inter> {n} = {}
goal (1 subgoal):
1. {} \<in> {odd -` {True}, odd -` {False}} \<inter>* {n}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
{} \<in> {odd -` {True}, odd -` {False}} \<inter>* {n}
goal (2 subgoals):
1. odd n \<Longrightarrow> Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
2. \<not> odd n \<Longrightarrow> Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
[PROOF STEP]
from True
[PROOF STATE]
proof (chain)
picking this:
odd n
[PROOF STEP]
have "(odd -` {True}) \<inter> {n} = {n}"
[PROOF STATE]
proof (prove)
using this:
odd n
goal (1 subgoal):
1. odd -` {True} \<inter> {n} = {n}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
odd -` {True} \<inter> {n} = {n}
goal (2 subgoals):
1. odd n \<Longrightarrow> Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
2. \<not> odd n \<Longrightarrow> Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
[PROOF STEP]
hence 1: "{n} \<in> ?F \<inter>* {n}"
[PROOF STATE]
proof (prove)
using this:
odd -` {True} \<inter> {n} = {n}
goal (1 subgoal):
1. {n} \<in> {odd -` {True}, odd -` {False}} \<inter>* {n}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
{n} \<in> {odd -` {True}, odd -` {False}} \<inter>* {n}
goal (2 subgoals):
1. odd n \<Longrightarrow> Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
2. \<not> odd n \<Longrightarrow> Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
[PROOF STEP]
from 0 1 Pow_n
[PROOF STATE]
proof (chain)
picking this:
{} \<in> {odd -` {True}, odd -` {False}} \<inter>* {n}
{n} \<in> {odd -` {True}, odd -` {False}} \<inter>* {n}
Pow {n} = {{n}, {}}
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
{} \<in> {odd -` {True}, odd -` {False}} \<inter>* {n}
{n} \<in> {odd -` {True}, odd -` {False}} \<inter>* {n}
Pow {n} = {{n}, {}}
goal (1 subgoal):
1. Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
goal (1 subgoal):
1. \<not> odd n \<Longrightarrow> Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> odd n \<Longrightarrow> Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
\<not> odd n
goal (1 subgoal):
1. \<not> odd n \<Longrightarrow> Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
[PROOF STEP]
from False
[PROOF STATE]
proof (chain)
picking this:
\<not> odd n
[PROOF STEP]
have "(odd -` {True}) \<inter> {n} = {}"
[PROOF STATE]
proof (prove)
using this:
\<not> odd n
goal (1 subgoal):
1. odd -` {True} \<inter> {n} = {}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
odd -` {True} \<inter> {n} = {}
goal (1 subgoal):
1. \<not> odd n \<Longrightarrow> Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
[PROOF STEP]
hence 0: "{} \<in> ?F \<inter>* {n}"
[PROOF STATE]
proof (prove)
using this:
odd -` {True} \<inter> {n} = {}
goal (1 subgoal):
1. {} \<in> {odd -` {True}, odd -` {False}} \<inter>* {n}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
{} \<in> {odd -` {True}, odd -` {False}} \<inter>* {n}
goal (1 subgoal):
1. \<not> odd n \<Longrightarrow> Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
[PROOF STEP]
from False
[PROOF STATE]
proof (chain)
picking this:
\<not> odd n
[PROOF STEP]
have "(odd -` {False}) \<inter> {n} = {n}"
[PROOF STATE]
proof (prove)
using this:
\<not> odd n
goal (1 subgoal):
1. odd -` {False} \<inter> {n} = {n}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
odd -` {False} \<inter> {n} = {n}
goal (1 subgoal):
1. \<not> odd n \<Longrightarrow> Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
[PROOF STEP]
hence 1: "{n} \<in> ?F \<inter>* {n}"
[PROOF STATE]
proof (prove)
using this:
odd -` {False} \<inter> {n} = {n}
goal (1 subgoal):
1. {n} \<in> {odd -` {True}, odd -` {False}} \<inter>* {n}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
{n} \<in> {odd -` {True}, odd -` {False}} \<inter>* {n}
goal (1 subgoal):
1. \<not> odd n \<Longrightarrow> Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
[PROOF STEP]
from 0 1 Pow_n
[PROOF STATE]
proof (chain)
picking this:
{} \<in> {odd -` {True}, odd -` {False}} \<inter>* {n}
{n} \<in> {odd -` {True}, odd -` {False}} \<inter>* {n}
Pow {n} = {{n}, {}}
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
{} \<in> {odd -` {True}, odd -` {False}} \<inter>* {n}
{n} \<in> {odd -` {True}, odd -` {False}} \<inter>* {n}
Pow {n} = {{n}, {}}
goal (1 subgoal):
1. Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
goal (1 subgoal):
1. {odd -` {True}, odd -` {False}} shatters {n}
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
Pow {n} \<subseteq> {odd -` {True}, odd -` {False}} \<inter>* {n}
goal (1 subgoal):
1. {odd -` {True}, odd -` {False}} shatters {n}
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
{odd -` {True}, odd -` {False}} shatters {n}
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
{odd -` {True}, odd -` {False}} shatters {?n3}
goal (1 subgoal):
1. \<exists>F. finite F \<and> infinite (shattered_by F)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
{odd -` {True}, odd -` {False}} shatters {?n3}
[PROOF STEP]
have "?N \<subseteq> shattered_by ?F"
[PROOF STATE]
proof (prove)
using this:
{odd -` {True}, odd -` {False}} shatters {?n3}
goal (1 subgoal):
1. range (\<lambda>n. {n}) \<subseteq> shattered_by {odd -` {True}, odd -` {False}}
[PROOF STEP]
unfolding shattered_by_def
[PROOF STATE]
proof (prove)
using this:
{odd -` {True}, odd -` {False}} shatters {?n3}
goal (1 subgoal):
1. range (\<lambda>n. {n}) \<subseteq> {A. {odd -` {True}, odd -` {False}} shatters A}
[PROOF STEP]
by force
[PROOF STATE]
proof (state)
this:
range (\<lambda>n. {n}) \<subseteq> shattered_by {odd -` {True}, odd -` {False}}
goal (1 subgoal):
1. \<exists>F. finite F \<and> infinite (shattered_by F)
[PROOF STEP]
from 0 infinite_super[OF this infinite_N]
[PROOF STATE]
proof (chain)
picking this:
finite {odd -` {True}, odd -` {False}}
infinite (shattered_by {odd -` {True}, odd -` {False}})
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
finite {odd -` {True}, odd -` {False}}
infinite (shattered_by {odd -` {True}, odd -` {False}})
goal (1 subgoal):
1. \<exists>F. finite F \<and> infinite (shattered_by F)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<exists>F. finite F \<and> infinite (shattered_by F)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 4810, "file": "Sauer_Shelah_Lemma_Shattering", "length": 58}
|
import unittest
import numpy as np
import tensorflow as tf
import twodlearn as tdl
import twodlearn.convnet
import twodlearn.bayesnet.bayesnet
import twodlearn.bayesnet.gaussian_process
import twodlearn.templates.bayesnet
class ConvnetTest(unittest.TestCase):
def test_error(self):
layer1 = tdl.convnet.Conv2DLayer(kernel_size=[5, 5])
with self.assertRaises(tdl.core.exceptions.ArgumentNotProvided):
layer1.kernel.init()
def test_conv1x1(self):
layer = tdl.convnet.Conv1x1Proj(
units=3, activation=tf.keras.layers.ReLU())
input = np.random.normal(size=(32, 28, 28, 10)).astype(np.float32)
proj = layer(input)
assert proj.shape.as_list() == [32, 28, 28, 3]
assert (proj.shape.as_list() ==
layer.compute_output_shape(input.shape).as_list())
layer_t = layer.get_transpose()
tran = layer_t(proj)
assert input.shape == tuple(tran.shape.as_list())
assert tran.shape.as_list() == [32, 28, 28, 10]
assert (tran.shape.as_list() ==
layer_t.compute_output_shape(proj.shape).as_list())
assert ((set(tdl.core.get_trainable(layer)) &
set(tdl.core.get_trainable(layer_t))) ==
set([layer.kernel]))
def test_conv1x1_bias(self):
layer = tdl.convnet.Conv1x1Proj(
units=3, activation=tf.keras.layers.ReLU(),
use_bias=False)
assert not tdl.core.is_property_initialized(layer, 'bias')
input = np.random.normal(size=(32, 28, 28, 10)).astype(np.float32)
proj = layer(input)
assert layer.bias is None
layer2 = tdl.convnet.Conv1x1Proj(
units=3, activation=tf.keras.layers.ReLU(),
bias=None)
assert layer2.use_bias is False
def test_conv(self):
with tf.Session().as_default():
input = tf.convert_to_tensor(
np.random.normal(size=(32, 28, 28, 10)).astype(np.float32))
layer_tf = tf.keras.layers.Conv2D(
filters=15, kernel_size=[5, 5],
strides=[2, 3], padding='valid', dilation_rate=[1, 1])
output_tf = layer_tf(input)
layer_tdl = tdl.convnet.Conv2DLayer(
filters=15, kernel_size=[5, 5],
strides=[2, 3], padding='valid', dilation_rate=[1, 1],
kernel=layer_tf.kernel
)
output_tdl = layer_tdl(input)
tdl.core.initialize_variables(layer_tf)
tdl.core.initialize_variables(layer_tdl)
max_error = tf.reduce_max(tf.abs(output_tdl - output_tf)).eval()
assert max_error < 1e-10
def test_conv2(self):
with tf.Session().as_default():
input = tf.convert_to_tensor(
np.random.normal(size=(32, 28, 28, 10)).astype(np.float32))
layer_tf = tf.keras.layers.Conv2D(
filters=15, kernel_size=[5, 5],
strides=[2, 3], padding='valid', dilation_rate=[1, 1],
use_bias=False)
_ = layer_tf(input)
layer_tdl = tdl.convnet.Conv2DLayer(
filters=15, kernel_size=[5, 5],
strides=[2, 3], padding='valid', dilation_rate=[1, 1],
use_bias=False
)
assert not tdl.core.is_property_initialized(layer_tdl, 'bias')
_ = layer_tdl(input)
assert layer_tdl.bias is None
assert (layer_tf.kernel.shape.as_list() ==
layer_tdl.kernel.shape.as_list())
layer2 = tdl.convnet.Conv2DLayer(
filters=3, kernel_size=[5, 5],
strides=[2, 3], padding='valid', dilation_rate=[1, 1],
bias=None)
assert layer2.use_bias is False
def test_convtrans1(self):
with tf.Session().as_default():
input = tf.convert_to_tensor(
np.random.normal(size=(32, 8, 8, 10)).astype(np.float32))
layer_tf = tf.keras.layers.Conv2DTranspose(
filters=5,
kernel_size=[5, 5],
strides=(2, 2),
use_bias=True)
output_tf = layer_tf(input)
layer_tdl = tdl.convnet.Conv2DTranspose(
filters=5,
kernel_size=[5, 5],
strides=[2, 2],
use_bias=True)
output_tdl = layer_tdl(input)
assert (layer_tf.kernel.shape.as_list() ==
layer_tdl.kernel.shape.as_list())
assert (layer_tf.bias.shape.as_list() ==
layer_tdl.bias.shape.as_list())
assert (output_tf.shape.as_list() == output_tdl.shape.as_list())
def test_convtrans2(self):
with tf.Session().as_default():
input = tf.convert_to_tensor(
np.random.normal(size=(32, 8, 8, 10)).astype(np.float32))
layer_tf = tf.keras.layers.Conv2DTranspose(
filters=5,
kernel_size=[5, 5],
strides=(2, 2),
use_bias=True)
output_tf = layer_tf(input)
layer_tdl = tdl.convnet.Conv2DTranspose(
filters=5,
kernel_size=[5, 5],
kernel=layer_tf.kernel,
strides=[2, 2],
use_bias=True)
output_tdl = layer_tdl(input)
tdl.core.initialize_variables(layer_tf)
tdl.core.initialize_variables(layer_tdl)
max_error = tf.reduce_max(tf.abs(output_tdl - output_tf)).eval()
assert max_error < 1e-10
def test_convtrans3(self):
with tf.Session().as_default():
input = tf.placeholder(tf.float32, shape=[None, 8, 8, 10])
layer_tdl = tdl.convnet.Conv2DTranspose(
filters=5,
kernel_size=[5, 5],
strides=[2, 2],
use_bias=True)
output_tdl = layer_tdl(input)
tdl.core.initialize_variables(layer_tdl)
dynamic_shape = tf.shape(output_tdl).eval(
{input: np.random.normal(size=[32, 8, 8, 10])})
assert all(dynamic_shape[1:] == output_tdl.shape[1:].as_list())
assert output_tdl.shape.as_list() == [None, 19, 19, 5]
def test_lazzy_init(self):
with tf.Session().as_default():
layer = tdl.convnet.Conv2DLayer(
filters=5, kernel_size=[5, 5], strides=[2, 2],
kernel={'trainable': False},
use_bias=True
)
assert not tdl.core.is_property_initialized(layer, 'kernel')
assert not tdl.core.is_property_initialized(layer, 'bias')
layer.build(input_shape=[None, 8, 8, 10])
assert tdl.core.is_property_initialized(layer, 'kernel')
assert tdl.core.is_property_initialized(layer, 'bias')
assert layer.kernel.trainable is False
def test_lazzy_init2(self):
with tf.Session().as_default():
layer = tdl.convnet.Conv2DLayer(
filters=5, kernel_size=[5, 5], strides=[2, 2],
kernel={'trainable': False}
)
assert not tdl.core.is_property_initialized(layer, 'kernel')
assert not tdl.core.is_property_initialized(layer, 'bias')
layer.build(input_shape=[None, 8, 8, 10])
assert tdl.core.is_property_initialized(layer, 'kernel')
assert tdl.core.is_property_initialized(layer, 'bias')
assert layer.kernel.trainable is False
def test_bias(self):
with tf.Session().as_default():
with self.assertRaises(ValueError):
layer = tdl.convnet.Conv2DLayer(
filters=5, kernel_size=[5, 5], strides=[2, 2],
kernel={'trainable': False},
bias={'trainable': True},
use_bias=True
)
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "9efed4651173a9adbbedcddec8a2bbd4e92c50dd", "size": 8199, "ext": "py", "lang": "Python", "max_stars_repo_path": "twodlearn/tests/convnet_test.py", "max_stars_repo_name": "danmar3/twodlearn", "max_stars_repo_head_hexsha": "02b23bf07618d5288e338bd8f312cc38aa58c195", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "twodlearn/tests/convnet_test.py", "max_issues_repo_name": "danmar3/twodlearn", "max_issues_repo_head_hexsha": "02b23bf07618d5288e338bd8f312cc38aa58c195", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "twodlearn/tests/convnet_test.py", "max_forks_repo_name": "danmar3/twodlearn", "max_forks_repo_head_hexsha": "02b23bf07618d5288e338bd8f312cc38aa58c195", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.2628865979, "max_line_length": 77, "alphanum_fraction": 0.5479936578, "include": true, "reason": "import numpy", "num_tokens": 1877}
|
from biom import load_table
import numpy as np
import pandas as pd
import os
import argparse
'''
This file does the following:
- breaks out the biom tables into subjects and
collection sites (stool, saliva, etc.).
- Adds taxonomy information to the files.
- Sorts the tables by collection date.
'''
def get_collection_days(table):
'''
This function is because one of the studies does not use a timestamp
but uses a collection day which are ints stored as strings. of course...
'''
str_vals = [m['collection_day'] for m in table.metadata()]
int_vals = [int(s) if s != '' else 0 for s in str_vals]
return int_vals
def merge_dicts(*dict_args):
"""
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts. This file only
takes one biom file at a time becuase there is a chance of naming
collisions with doing multiple files.
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
def main():
# Read in our arguments
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--biom", type=str,
help="The BIOM file to handle.")
parser.add_argument("-t", "--taxonomy", type=str,
help="The file or directory of taxonomy data.")
parser.add_argument("-f", "--field", type=str, default='collection_timestamp',
help="The field to look in for timestamp info.")
args = parser.parse_args()
biom_name = args.biom
biom_base = ''.join(biom_name.split('.')[:-1])
tax_name = args.taxonomy
field = args.field
if field not in ['collection_timestamp', 'collection_day']:
raise ValueError('field must be in [collection_timestamp, collection_day]')
'''
Load in the taxonomy data.
'''
if os.path.isdir(tax_name):
tax_files = [os.path.join(tax_name, f) for f in os.listdir(tax_name) if f.endswith('.txt')]
tax_dicts = []
# Read in the files.
for file in tax_files:
tax_file = np.loadtxt(file, delimiter='\t', dtype=str)
tax_dicts.append(dict(zip(tax_file[:, 0], tax_file[:, 1])))
# Combine the dictionaries.
mapping = merge_dicts(*tax_dicts)
elif os.path.isfile(tax_name):
tax_file = np.loadtxt(tax_name, delimiter='\t', dtype=str)
mapping = dict(zip(tax_file[:, 0], tax_file[:, 1]))
else:
raise ValueError('Please check the file or directory being supplied'
'for the taxonomy.')
print('Finished loading taxonomy')
'''
Break out into each sample based on the metadata.
'''
table = load_table(biom_name)
output_tables = []
output_fnames = []
all_subjects = list(set([m['host_subject_id'] for m in table.metadata()]))
all_samples = list(set([m['sample_type'] for m in table.metadata()]))
all_subjects = [a for a in all_subjects if not a.lower().startswith('blank')]
print('Subjects:\n{}'.format(all_subjects))
print('Samples:\n{}'.format(all_samples))
# Subset each of the files.
for i, subject in enumerate(all_subjects):
subject_fxn = lambda val, id_, md: md['host_subject_id'] == '{}'.format(subject)
subject_sub = table.filter(subject_fxn, inplace=False)
for sample in all_samples:
sample_fxn = lambda val, id_, md: md['sample_type'] == '{}'.format(sample)
sample_sub = subject_sub.filter(sample_fxn, inplace=False)
# If its non-empty then add it to our output
if sample_sub.shape[1] > 0:
output_tables.append(sample_sub)
new_name = biom_base + '_{}_{}'.format(subject, sample)
output_fnames.append(new_name)
print('Finished {} of {} subjects'.format(i + 1, len(all_subjects)))
print(output_tables)
print(output_fnames)
'''
Add the taxonomy and sort
'''
for j, table in enumerate(output_tables):
# need this check for some reason because if not then it errors converting
# to df.
if table.shape[1] > 1:
df = pd.DataFrame(table.to_dataframe())
tv = df.values
tcols = df.columns
# Add taxonomy to each sample.
indices = list(df.index.values)
new_index = [mapping[i] for i in indices]
to_save = pd.DataFrame(tv, index=new_index, columns=tcols)
# Transpose because we will store the dates as a column and sort.
to_save = to_save.T
# If we want to sort the dates. Some samples don't have correct date
# information associated so this doesn't work.
if field == 'collection_timestamp':
dates = [m['collection_timestamp'] for m in table.metadata()]
to_save['date'] = pd.to_datetime(dates, infer_datetime_format=True,
errors='coerce')
else:
# For those bad samples that don't have time timestamps use
# this method.
dates = get_collection_days(table)
print(dates)
to_save['date'] = dates
# Drop values with no timestamp.
to_save.dropna(subset=['date'], inplace=True)
# Sort by date.
to_save.sort_values(by=['date'], inplace=True)
# Capture the values to use as columns.
dates = to_save['date']
# Get rid of the column now that the values are sorted.
to_save.drop(['date'], axis=1, inplace=True)
# Transpose back to original shape.
to_save = to_save.T
# Reassign the time values as columns.
to_save.columns = dates
# Print relevant data and save the file.
print(to_save.shape)
output_fname = output_fnames[j] + '_sorted_tax.csv'
print(output_fname)
to_save.to_csv(output_fname)
if __name__ == '__main__':
main()
|
{"hexsha": "4fc72ab8e9dd1ceeca482e9f81cc1c5d756d938d", "size": 6090, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_preprocessing/host_site_separator_time_sorting.py", "max_stars_repo_name": "michaelwiest/microbiome_rnn", "max_stars_repo_head_hexsha": "6109da20c49e3027f746257aee90cadc423cc75b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data_preprocessing/host_site_separator_time_sorting.py", "max_issues_repo_name": "michaelwiest/microbiome_rnn", "max_issues_repo_head_hexsha": "6109da20c49e3027f746257aee90cadc423cc75b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data_preprocessing/host_site_separator_time_sorting.py", "max_forks_repo_name": "michaelwiest/microbiome_rnn", "max_forks_repo_head_hexsha": "6109da20c49e3027f746257aee90cadc423cc75b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7898089172, "max_line_length": 99, "alphanum_fraction": 0.60591133, "include": true, "reason": "import numpy", "num_tokens": 1340}
|
function [inflMap, colXCoord, rowYCoord, mi] = getLSInfluenceMapFactorMovie(LS)
%"getLSInfluenceMap"
% Gets an image of the influence generated by the beam described in LS.
% Use getDICOMLeafPositions to generate LS.
%
%JRA&KZ 02/8/05
%
%Usage:
% function inflMap = getLSInfluenceMap(LS);
%
% Copyright 2010, Joseph O. Deasy, on behalf of the CERR development team.
%
% This file is part of The Computational Environment for Radiotherapy Research (CERR).
%
% CERR development has been led by: Aditya Apte, Divya Khullar, James Alaly, and Joseph O. Deasy.
%
% CERR has been financially supported by the US National Institutes of Health under multiple grants.
%
% CERR is distributed under the terms of the Lesser GNU Public License.
%
% This version of CERR is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% CERR is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
% without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
% See the GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with CERR. If not, see <http://www.gnu.org/licenses/>.
%Maximum precision of leaf position, in mm. Varian End and Side Accuracy 1.0 mm at
%isocenter. End and Side Repeatability 0.5 mm
precision = .5;
%Get x max, min and round to precision value.
%If X jaws dosn't exist in DICOM
if ~isfield(LS,'xLimits')
xMax = ceil(max(vertcat(LS.xLeafPositions{:}),[],1) / precision) * precision;
xMin = floor(min(vertcat(LS.xLeafPositions{:}),[],1) / precision) * precision;
LS.xLimits{1}(1) = xMin;
LS.xLimits{1}(2) = xMax;
end
xMax = ceil(max(vertcat(LS.xLimits{:}),[],1) / precision) * precision;
xMin = floor(min(vertcat(LS.xLimits{:}),[],1) / precision) * precision;
fieldSize.x = max(xMax) - min(xMin);
fieldLim.x = [max(xMax) min(xMin)];
yMax = ceil(max(vertcat(LS.yLimits{:}),[],1) / precision) * precision;
yMin = floor(min(vertcat(LS.yLimits{:}),[],1) / precision) * precision;
fieldSize.y = max(yMax) - min(yMin);
fieldLim.y = [max(yMax) min(yMin)];
yRes = precision;
nyElements = ceil(fieldSize.y/yRes);
xRes = precision;
nxElements = ceil(fieldSize.x/xRes);
inflMap=zeros(nyElements, nxElements);
colDividerXCoord = linspace(fieldLim.x(2), fieldLim.x(1), nxElements+1);
rowDividerYCoord = linspace(fieldLim.y(2), fieldLim.y(1), nyElements+1);
if isfield(LS, 'yLeafPositions')
rowLeafPositions = round(interp1(rowDividerYCoord, 1:nyElements+1, LS.yLeafPositions,'linear', 'extrap'));
rowLeafPositions = clip(rowLeafPositions, 1, nyElements+1, 'limits');
leafBoundariesToKeep = [diff(rowLeafPositions)>0;true];
rowLeafPositions = rowLeafPositions(leafBoundariesToKeep);
leavesToKeep = leafBoundariesToKeep(1:end-1);
else
LS.xLeafPositions{1} = [xMin xMax];
LS.meterSetWeight = {1};
rowLeafPositions = [1 nyElements+1];
leavesToKeep = 1;
end
if length(LS.meterSetWeight) == 1
doses = LS.meterSetWeight{:};
else
doses = [0 diff([LS.meterSetWeight{:}])];
end
backupMap = inflMap;
%h = waitbar(0,['Generating Fluence Map From MLC Positions For Beam ',num2str(beamIndex)],'Name','Please wait...');
for i=1:length(LS.xLeafPositions)
mapMovie = backupMap;
inflMap = backupMap;
nLeaves = length(LS.xLeafPositions{i})/2;
if length(LS.xLimits) > 1
jpL = LS.xLimits{i}(1);
jpR = LS.xLimits{i}(2);
else
jpL = LS.xLimits{1}(1);
jpR = LS.xLimits{1}(2);
end
lpL = LS.xLeafPositions{i}(1:nLeaves);
lpR = LS.xLeafPositions{i}(nLeaves+1:end);
lpLK = lpL(leavesToKeep);
lpRK = lpR(leavesToKeep);
MLCopeningSize(:,i) = lpRK - lpLK;
lpLCols = interp1(colDividerXCoord, 1:nxElements+1, lpLK, 'linear', 'extrap');
lpRCols = interp1(colDividerXCoord, 1:nxElements+1, lpRK, 'linear', 'extrap');
%Column divider positions of jaws.
jpLCol = interp1(colDividerXCoord, 1:nxElements+1, jpL, 'linear', 'extrap');
jpRCol = interp1(colDividerXCoord, 1:nxElements+1, jpR, 'linear', 'extrap');
jpLCol = round(jpLCol);
jpRCol = round(jpRCol);
lpLCols = clip(lpLCols, jpLCol, jpRCol, 'limits');
lpRCols = clip(lpRCols, jpLCol, jpRCol, 'limits');
lpLCols = round(lpLCols);
lpRCols = round(lpRCols);
A1 = 0.0013;
A2 = 0.078;
K = 1.5;
LAMDA = 7.69;
for j=1:length(lpLCols)
%HCF from output ratio for MLC fields Zhu, MedPhys
F_X = abs(lpLCols(j) - (lpRCols(j)-1))*precision/10;
F_Y = abs(rowLeafPositions(j+1) - rowLeafPositions(j))*precision/10;
F_X = abs(lpLCols(j) - (lpRCols(j)-1))*precision/10;
F_Y = abs(rowLeafPositions(j+1) - rowLeafPositions(j))*precision/10;
F = (1+K)*F_X * F_Y/(K*F_X + F_Y);
HCF = (1+A1*F)*(1+A2*(erf(F/LAMDA))^2)/((1+A1*10)*(1+A2*(erf(10/LAMDA))^2));
inflMap(rowLeafPositions(j):rowLeafPositions(j+1)-1, lpLCols(j):lpRCols(j)-1) = inflMap(rowLeafPositions(j):rowLeafPositions(j+1)-1, lpLCols(j):lpRCols(j)-1) + HCF*doses(i);
inflMap(rowLeafPositions(j):rowLeafPositions(j+1)-1, jpLCol:lpLCols(j)-1) = inflMap(rowLeafPositions(j):rowLeafPositions(j+1)-1, jpLCol:lpLCols(j)-1);
inflMap(rowLeafPositions(j):rowLeafPositions(j+1)-1, lpRCols(j):jpRCol-1) = inflMap(rowLeafPositions(j):rowLeafPositions(j+1)-1, lpRCols(j):jpRCol-1);
end
%waitbar(i/length(LS.xLeafPositions));
% frame = inflMap;
%imagesc(inflMap);
%mi(:,:,i) = inflMap;
mapMovie = inflMap;
mapMovie(mapMovie == 0) = 1;
mapMovie(mapMovie ~= 1) = 2;
%colormap([0 0 0; 1 1 1]);
%mi(:,:,i) = inflMap;
mi(i) = im2frame(mapMovie, [0 0 1; 1 0 0]);
% drawnow;
% pause(.006);
end
%close(h);
colXCoord = colDividerXCoord(1:end-1) + precision/2;
rowYCoord = rowDividerYCoord(1:end-1) + precision/2;
|
{"author": "cerr", "repo": "CERR", "sha": "d320754abad9dcb78508ab69f33ae9f644202114", "save_path": "github-repos/MATLAB/cerr-CERR", "path": "github-repos/MATLAB/cerr-CERR/CERR-d320754abad9dcb78508ab69f33ae9f644202114/IMRTP/recompDose/FFDC/getLSInfluenceMapFactorMovie.m"}
|
[STATEMENT]
lemma (in abelian_group) four_elem_comm:
assumes "a \<in> carrier G" and "b \<in> carrier G" and "c \<in> carrier G" and "d \<in> carrier G"
shows "a \<ominus> c \<oplus> b \<ominus> d = a \<oplus> b \<ominus> c \<ominus> d"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a \<ominus> c \<oplus> b \<ominus> d = a \<oplus> b \<ominus> c \<ominus> d
[PROOF STEP]
using assms a_assoc a_comm
[PROOF STATE]
proof (prove)
using this:
a \<in> carrier G
b \<in> carrier G
c \<in> carrier G
d \<in> carrier G
\<lbrakk>?x \<in> carrier G; ?y \<in> carrier G; ?z \<in> carrier G\<rbrakk> \<Longrightarrow> ?x \<oplus> ?y \<oplus> ?z = ?x \<oplus> (?y \<oplus> ?z)
\<lbrakk>?x \<in> carrier G; ?y \<in> carrier G\<rbrakk> \<Longrightarrow> ?x \<oplus> ?y = ?y \<oplus> ?x
goal (1 subgoal):
1. a \<ominus> c \<oplus> b \<ominus> d = a \<oplus> b \<ominus> c \<ominus> d
[PROOF STEP]
by (simp add: a_minus_def)
|
{"llama_tokens": 394, "file": "Localization_Ring_Localization", "length": 2}
|
module misc
use precision, only : i4k, i8k, r4k, r8k
implicit none
!! author: Ian Porter
!! date: 12/13/2017
!!
!! this module contains miscellaneous routines used to read/write to the .vtk file
!!
private
public :: interpret_string, def_len, to_uppercase, to_lowercase, char_dt, sleep_for, convert_to_string, trim_from_string
interface get_string_value
procedure :: get_string_char
procedure :: get_string_int
procedure :: get_string_real
end interface
interface convert_to_string
procedure :: convert_real32_to_string
procedure :: convert_real64_to_string
procedure :: convert_real64_array_to_string
procedure :: convert_int32_to_string
procedure :: convert_int64_to_string
procedure :: convert_logical_to_string
end interface convert_to_string
integer(i4k), parameter :: def_len = 1024 !! default character length for each line in file
type char_dt
!! character string dt
character(len=:), allocatable :: text
end type char_dt
interface
module subroutine interpret_string (line, datatype, ignore, separator, reals, ints, chars)
implicit none
!! interprets a string (typically read from an input file) into a user-defined # of character and/or integer inputs
character(len=*), intent(inout) :: line
character(len=*), intent(in), optional :: ignore
character(len=*), intent(in), optional :: separator
character(len=1), dimension(:), intent(in) :: datatype
integer(i4k), dimension(:), allocatable, optional :: ints
real(r8k), dimension(:), allocatable, optional :: reals
type(char_dt), dimension(:), allocatable, optional :: chars
end subroutine interpret_string
module subroutine reduce_string (string, sep)
implicit none
character(len=:), allocatable, intent(inout) :: string
character(len=*), intent(in) :: sep
end subroutine reduce_string
module subroutine get_string_char (string, sep, name)
implicit none
character(len=*), intent(in) :: string, sep
character(len=:), allocatable, intent(out) :: name
end subroutine get_string_char
module subroutine get_string_int (string, sep, name)
implicit none
character(len=*), intent(in) :: string, sep
integer(i4k), intent(out) :: name
end subroutine get_string_int
module subroutine get_string_real (string, sep, name)
implicit none
character(len=*), intent(in) :: string, sep
real(r8k), intent(out) :: name
end subroutine get_string_real
module function convert_real32_to_string (var) result (string)
implicit none
!! converts a real32 to a character string
real(r4k), intent(in) :: var !! real variable
character(len=:), allocatable :: string !! character string
end function convert_real32_to_string
module function convert_real64_to_string (var) result (string)
implicit none
!! converts a real64 to a character string
real(r8k), intent(in) :: var !! real variable
character(len=:), allocatable :: string !! character string
end function convert_real64_to_string
module function convert_real64_array_to_string (var) result (string)
implicit none
!! converts a real64 to a character string
real(r8k), dimension(:), intent(in) :: var !! real array
character(len=:), allocatable :: string !! character string
end function convert_real64_array_to_string
module function convert_int32_to_string (var) result (string)
implicit none
!! converts an int32 to a character string
integer(i4k), intent(in) :: var !! integer variable
character(len=:), allocatable :: string !! character string
end function convert_int32_to_string
module function convert_int64_to_string (var) result (string)
implicit none
!! converts an int64 to a character string
integer(i8k), intent(in) :: var !! integer variable
character(len=:), allocatable :: string !! character string
end function convert_int64_to_string
module function convert_logical_to_string (var) result (string)
implicit none
!! converts a logical to a character string
logical, intent(in) :: var !! logical variable
character(len=:), allocatable :: string !! character string
end function convert_logical_to_string
pure module function to_uppercase (string) result (new_string)
implicit none
!! author: Ian Porter
!! date: 01/23/2019
!!
!! this function changes lowercase text in a string to uppercase text
!!
character(len=*), intent(in) :: string
character(len=:), allocatable :: new_string
end function to_uppercase
pure module function to_lowercase (string) result (new_string)
implicit none
!! author: Ian Porter
!! date: 01/23/2019
!!
!! this function changes uppercase text in a string to lowercase text
!!
character(len=*), intent(in) :: string
character(len=:), allocatable :: new_string
end function to_lowercase
module subroutine sleep_for (msecs)
implicit none
!! author: zaak beekman, paratools
!! date: 8/8/2018
!!
!! this performs a 'sleep' for a specified amount of time
!!
integer(i4k), intent(in) :: msecs !! # of milliseconds to sleep for
end subroutine
recursive module function trim_from_string (string, item, case_sensitive) result (new_string)
implicit none
!! author: Ian Porter, gse
!! date: 11/06/2019
!!
!! this function trims <item> from a string
!!
character(len=*), intent(in) :: string !! string to be converted
character(len=*), intent(in) :: item !! item to be trimmed from string
logical, intent(in), optional :: case_sensitive
!! flag for whether or not to search using case sensitivity (false by default)
character(len=:), allocatable :: new_string !! new string
end function trim_from_string
end interface
end module misc
|
{"hexsha": "e68ef990c9247217ec1d166816b86a20c79ec661", "size": 6874, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/utilities/Misc.f90", "max_stars_repo_name": "porteri/vtkmofo", "max_stars_repo_head_hexsha": "f4c188ecbbc4620df0bf7962241cdc81c9dbe1b7", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2017-12-20T18:01:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-11T08:05:11.000Z", "max_issues_repo_path": "src/utilities/Misc.f90", "max_issues_repo_name": "porteri/vtkmofo", "max_issues_repo_head_hexsha": "f4c188ecbbc4620df0bf7962241cdc81c9dbe1b7", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 33, "max_issues_repo_issues_event_min_datetime": "2017-12-11T15:48:39.000Z", "max_issues_repo_issues_event_max_datetime": "2020-01-11T13:54:43.000Z", "max_forks_repo_path": "src/utilities/Misc.f90", "max_forks_repo_name": "nrc-fuels/vtkmofo", "max_forks_repo_head_hexsha": "d6c12c45ae9ab47628b1dcee1e05e7ac9ef89100", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2017-12-20T18:10:15.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-05T22:25:21.000Z", "avg_line_length": 39.9651162791, "max_line_length": 127, "alphanum_fraction": 0.6019784696, "num_tokens": 1461}
|
# Copyright 2021 Fedlearn authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os,sys
import numpy as np
from typing import Any
root_path = os.getcwd()
sys.path.append(root_path)
sys.path.append(os.path.join(root_path,'demos/HFL'))
from abc import ABC, abstractmethod
from demos.HFL.common.hfl_message import HFL_MSG
class Raw_Msg_Observer(ABC):
@abstractmethod
def receive_message(self, msg_data:Any) -> Any:
pass
class Msg_Handler(ABC):
@abstractmethod
def handle_message(self, msg_type, msg_data:HFL_MSG) -> None:
pass
|
{"hexsha": "c362f685f7fa870576c4df27037f8c4f28d6b9ae", "size": 1067, "ext": "py", "lang": "Python", "max_stars_repo_path": "demos/HFL/common/msg_handler.py", "max_stars_repo_name": "monadyn/fedlearn-algo", "max_stars_repo_head_hexsha": "c4459d421139b0bb765527d636fff123bf17bda4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 86, "max_stars_repo_stars_event_min_datetime": "2021-07-20T01:54:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-06T04:02:40.000Z", "max_issues_repo_path": "demos/HFL/common/msg_handler.py", "max_issues_repo_name": "fedlearnAI/fedlearnalgo", "max_issues_repo_head_hexsha": "63d9ceb64d331ff2b5103ae49e54229cad7e2095", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-07-23T21:22:16.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-12T15:48:35.000Z", "max_forks_repo_path": "demos/HFL/common/msg_handler.py", "max_forks_repo_name": "fedlearnAI/fedlearnalgo", "max_forks_repo_head_hexsha": "63d9ceb64d331ff2b5103ae49e54229cad7e2095", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 28, "max_forks_repo_forks_event_min_datetime": "2021-07-20T07:15:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-22T20:04:57.000Z", "avg_line_length": 30.4857142857, "max_line_length": 74, "alphanum_fraction": 0.7525773196, "include": true, "reason": "import numpy", "num_tokens": 249}
|
import numpy as np
import tempfile
def default_params(model, time_scale, max_days, px_count, prng_seed):
"""The default particle filter parameters.
Memory usage can reach extreme levels with a large number of particles,
and so it may be necessary to keep only a sliding window of the entire
particle history matrix in memory.
:param model: The system model.
:param time_scale: The simulation time scale.
:param max_days: The number of contiguous days that must be kept in memory
(e.g., the largest observation period).
:param px_count: The number of particles.
:param prng_seed: The seed for the pseudo-random number generators.
"""
details = model.describe()
p_min = [vmin for (name, smooth, vmin, vmax) in details]
p_max = [vmax for (name, smooth, vmin, vmax) in details]
params = {
'resample': {
# Resample when the effective number of particles is 25%.
'threshold': 0.25,
# The deterministic method is the best resampling method, see the
# appendix of Kitagawa 1996 (DOI:10.2307/1390750).
'method': 'deterministic',
# Resample from the weighted discrete probability distribution,
# rather than using a continuous approximation (regularisation).
'regularisation': False,
# By default, continue without regularisation if the parameter
# covariance matrix is not positive definite.
'regularise_or_fail': False,
# The minimum range of values that a parameter must have in order
# to be subject to the post-regularised particle filter.
'reg_toln': 1e-8,
},
'hist': {
# The sliding window size, in days.
'wind_size': 2 * max_days,
# The amount to shift the sliding window, in days.
'wind_shift': max_days,
# The number of particles.
'px_count': px_count,
# The number of extra state columns, in addition to the model
# state vector. Note that this number must be at least 2, since
# the matrix must store the particle weight and parent index.
'extra_cols': 2,
# Functions that are responsible for initialising extra state
# columns (except for the particle weight and parent index).
# Mapping is name -> function.
'extra_col_fns': {},
},
# Use the provided PRNG seed, if any.
'prng_seed': prng_seed,
# Define the PRNGs that should be created.
'random': ['resample', 'model', 'hist_extra_cols'],
# Define the simulation time scale.
'component': {
'time': time_scale,
'model': model,
'random': {},
'lookup': {},
'obs': {},
'summary_monitor': {},
'summary_table': {},
},
# Simulate 5 time-steps per unit time.
# TODO: move into params['time']
'steps_per_unit': 5,
# Provide only the most recent observation period (for likelihoods).
# TODO: move into params['hist']
'last_n_periods': 1,
# Whether to reduce the estimation run so that it only extends to the
# latest forecasting date.
'minimal_estimation_run': True,
# An array that enumerates the particles.
'px_range': None,
'time': {
# The simulation period.
'start': None,
'until': None,
},
'model': {
# The lower bounds for each model parameter.
'param_min': np.array(p_min),
# The upper bounds for each model parameter.
'param_max': np.array(p_max),
# The model prior distributions.
'prior': {},
},
'data': {
# Observations data.
'obs': {},
# Lookup tables.
'lookup': {},
},
'summary': {
# If ``False`` (the default) statistics are calculated from the
# date of the first *observation*. If ``True``, statistics are
# calculated from the very beginning of the simulation period.
'from_first_day': False,
# If ``False`` (the default) statistics are calculated for the
# initial estimation simulation and for forecasting simulations.
# If ``True``, statistics are only calculated for forecasting
# simulations.
'only_forecasts': False,
'meta': {
# Additional packages whose versions should be recorded.
'packages': [],
},
},
# Observation model parameters.
'obs': {},
# Event hooks.
'hooks': {
'log_llhd': [],
},
# Directory for storing output files.
'out_dir': '.',
# Directory for storing temporary files.
'tmp_dir': tempfile.gettempdir(),
}
return params
|
{"hexsha": "6d8e3e769be2ea308a78870c1c1abd0c255ba51c", "size": 5045, "ext": "py", "lang": "Python", "max_stars_repo_path": "local_pypfilt/src/pypfilt/params.py", "max_stars_repo_name": "ruarai/epifx.covid", "max_stars_repo_head_hexsha": "be7aecbf9e86c3402f6851ea65f6705cdb59f3cf", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "local_pypfilt/src/pypfilt/params.py", "max_issues_repo_name": "ruarai/epifx.covid", "max_issues_repo_head_hexsha": "be7aecbf9e86c3402f6851ea65f6705cdb59f3cf", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "local_pypfilt/src/pypfilt/params.py", "max_forks_repo_name": "ruarai/epifx.covid", "max_forks_repo_head_hexsha": "be7aecbf9e86c3402f6851ea65f6705cdb59f3cf", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.0396825397, "max_line_length": 78, "alphanum_fraction": 0.5690782953, "include": true, "reason": "import numpy", "num_tokens": 1069}
|
SUBROUTINE FN_MERGE( FileSpec, Path, Name, Extension, Version )
!***********************************************************************
!* Merges a File Specification from its Path, Name, Extension, and Version
!*
!* Language: Fortran
!*
!* Author: Stuart G. Mentzer
!*
!* Date: 1999/08/20
!***********************************************************************
! Headers
INCLUDE 'platform.fi'
INCLUDE 'uds_fxn.fi'
! Arguments ______________________________________________________
CHARACTER*(*) FileSpec ! File specification to create
CHARACTER*(*) Path ! File path
CHARACTER*(*) Name ! File name (without path or extension)
CHARACTER*(*) Extension ! File extension (without separator)
CHARACTER*(*) Version ! File version (without separator)
! Variables ______________________________________________________
INTEGER L, LFS
! Merge the file specification
LFS = LEN( FileSpec )
FileSpec = Path
L = LEN_TRIM( FileSpec )
IF ( ( L .GT. 0 ) .AND. ( L .LT. LFS ) ) THEN ! Has path
IF ( FileSpec(L:L) .NE. DIR_FILE_SEP ) THEN
FileSpec(L+1:) = DIR_FILE_SEP
L = LEN_TRIM( FileSpec )
END IF
END IF
IF ( L .LT. LFS ) FileSpec(L+1:) = Name
L = LEN_TRIM( FileSpec )
IF ( ( .NOT. BLANK( Extension ) ) .AND. ( L .LT. LFS ) ) THEN
! Has extension
IF ( Extension(1:1) .NE. '.' ) THEN
FileSpec(L+1:L+1) = '.'
L = L + 1
END IF
IF ( L .LT. LFS ) FileSpec(L+1:) = Extension
L = LEN_TRIM( FileSpec )
END IF
IF ( ( .NOT. BLANK( Version ) ) .AND. ( L .LT. LFS ) ) THEN
! Has version
IF ( Version(1:1) .NE. VERSION_SEP ) THEN
FileSpec(L+1:) = VERSION_SEP
L = LEN_TRIM( FileSpec )
END IF
IF ( L .LT. LFS ) FileSpec(L+1:) = Version
END IF
RETURN
END
|
{"hexsha": "7d4d70fa631ea60af60a1e5a0fd10ccd9a6de630", "size": 1946, "ext": "for", "lang": "FORTRAN", "max_stars_repo_path": "src/lib/fn_merge.for", "max_stars_repo_name": "DeadParrot/NHTSA-Tools", "max_stars_repo_head_hexsha": "e8de2d5aa3d6de96a858ae70ecc4e75fa3d80ac4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2017-03-14T03:50:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-02T21:45:12.000Z", "max_issues_repo_path": "src/lib/fn_merge.for", "max_issues_repo_name": "DeadParrot/NHTSA-Tools", "max_issues_repo_head_hexsha": "e8de2d5aa3d6de96a858ae70ecc4e75fa3d80ac4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/lib/fn_merge.for", "max_forks_repo_name": "DeadParrot/NHTSA-Tools", "max_forks_repo_head_hexsha": "e8de2d5aa3d6de96a858ae70ecc4e75fa3d80ac4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-10-31T23:57:05.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-31T23:57:05.000Z", "avg_line_length": 28.6176470588, "max_line_length": 74, "alphanum_fraction": 0.5251798561, "num_tokens": 519}
|
module interfaceExtensionAndDelegation where
open import Data.Product
open import Data.Nat.Base
open import Data.Nat.Show
open import Data.String.Base using (String; _++_)
open import Size
open import NativeIO
open import interactiveProgramsAgda using (ConsoleInterface; _>>=_; do;
IO; return; putStrLn;
translateIOConsole )
open import objectsInAgda using (Interface; Method; Result; CellMethod;
get; put; CellResult; cellI; IOObject;
CellC; method; simpleCell )
data CounterMethod A : Set where
super : (m : CellMethod A) → CounterMethod A
stats : CounterMethod A
statsCellI : (A : Set) → Interface
Method (statsCellI A) = CounterMethod A
Result (statsCellI A) (super m) = Result (cellI A) m
Result (statsCellI A) stats = Unit
CounterC : (i : Size) → Set
CounterC = IOObject ConsoleInterface (statsCellI String)
pattern getᶜ = super get
pattern putᶜ x = super (put x)
{- Methods of CounterC are now
getᶜ (putᶜ x) stats
-}
counterCell : ∀{i} (c : CellC i) (ngets nputs : ℕ) → CounterC i
method (counterCell c ngets nputs) getᶜ =
method c get >>= λ { (s , c') →
return (s , counterCell c' (1 + ngets) nputs) }
method (counterCell c ngets nputs) (putᶜ x) =
method c (put x) >>= λ { (_ , c') →
return (_ , counterCell c' ngets (1 + nputs)) }
method (counterCell c ngets nputs) stats =
do (putStrLn ("Counted "
++ show ngets ++ " calls to get and "
++ show nputs ++ " calls to put.")) λ _ →
return (_ , counterCell c ngets nputs)
program : String → IO ConsoleInterface ∞ Unit
program arg =
let c₀ = counterCell (simpleCell "Start") 0 0 in
method c₀ getᶜ >>= λ{ (s , c₁) →
do (putStrLn s) λ _ →
method c₁ (putᶜ arg) >>= λ{ (_ , c₂) →
method c₂ getᶜ >>= λ{ (s' , c₃) →
do (putStrLn s') λ _ →
method c₃ (putᶜ "Over!") >>= λ{ (_ , c₄) →
method c₄ stats >>= λ{ (_ , c₅) →
return _ }}}}}
main : NativeIO Unit
main = translateIOConsole (program "Hello")
|
{"hexsha": "292887218748123ddfd1721e7be70d43a2d1bcc3", "size": 2269, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "presentationsAndExampleCode/agdaImplementorsMeetingGlasgow22April2016AntonSetzer/interfaceExtensionAndDelegation.agda", "max_stars_repo_name": "agda/ooAgda", "max_stars_repo_head_hexsha": "7cc45e0148a4a508d20ed67e791544c30fecd795", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2016-06-19T12:57:55.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-12T23:15:25.000Z", "max_issues_repo_path": "presentationsAndExampleCode/agdaImplementorsMeetingGlasgow22April2016AntonSetzer/interfaceExtensionAndDelegation.agda", "max_issues_repo_name": "agda/ooAgda", "max_issues_repo_head_hexsha": "7cc45e0148a4a508d20ed67e791544c30fecd795", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "presentationsAndExampleCode/agdaImplementorsMeetingGlasgow22April2016AntonSetzer/interfaceExtensionAndDelegation.agda", "max_forks_repo_name": "agda/ooAgda", "max_forks_repo_head_hexsha": "7cc45e0148a4a508d20ed67e791544c30fecd795", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-09-01T15:02:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-12T11:41:00.000Z", "avg_line_length": 33.3676470588, "max_line_length": 72, "alphanum_fraction": 0.560599383, "num_tokens": 675}
|
from __future__ import absolute_import
import os
import numpy as np
import pygame
import weakref
import carla
from carla import ColorConverter as cc
CARLA_OUT_PATH = os.environ.get("CARLA_OUT", os.path.expanduser("~/carla_out"))
if CARLA_OUT_PATH and not os.path.exists(CARLA_OUT_PATH):
os.makedirs(CARLA_OUT_PATH)
class CameraManager(object):
"""This class from carla, manual_control.py
"""
def __init__(self, parent_actor, hud):
self.image = None # need image to encode obs.
self.image_list = [] # for save images later.
self.sensor = None
self._surface = None
self._parent = parent_actor
self._hud = hud
self._recording = False
self._memory_record = False
# TODO: Make the camera positioning configurable. Toggling is already
# supported through toggle_camera
self._camera_transforms = [
carla.Transform(carla.Location(x=1.6, z=1.7)),
carla.Transform(
carla.Location(x=-5.5, z=2.8), carla.Rotation(pitch=-15))
]
# 0 is dashcam view; 1 is tethered view
self._transform_index = 0
self._sensors = [
['sensor.camera.rgb', cc.Raw, 'Camera RGB'],
['sensor.camera.depth', cc.Raw, 'Camera Depth (Raw)'],
['sensor.camera.depth', cc.Depth, 'Camera Depth (Gray Scale)'],
[
'sensor.camera.depth', cc.LogarithmicDepth,
'Camera Depth (Logarithmic Gray Scale)'
],
[
'sensor.camera.semantic_segmentation', cc.Raw,
'Camera Semantic Segmentation (Raw)'
],
[
'sensor.camera.semantic_segmentation', cc.CityScapesPalette,
'Camera Semantic Segmentation (CityScapes Palette)'
], ['sensor.lidar.ray_cast', None, 'Lidar (Ray-Cast)']
]
world = self._parent.get_world()
bp_library = world.get_blueprint_library()
for item in self._sensors:
bp = bp_library.find(item[0])
if item[0].startswith('sensor.camera'):
bp.set_attribute('image_size_x', str(hud.dim[0]))
bp.set_attribute('image_size_y', str(hud.dim[1]))
item.append(bp)
self._index = None
self.callback_count = 0
def __del__(self):
if self.sensor is not None:
self.sensor.destroy()
def set_recording_option(self, option):
"""Set class vars to select recording method.
Option 1: save image to disk while the program runs.(Default)
Option 2: save to memory first. Save to disk when program finishes.
Args:
option (int): record method.
Returns:
N/A.
"""
# TODO: The options should be more verbose. Strings instead of ints
if option == 1:
self._recording = True
elif option == 2:
self._memory_record = True
def toggle_camera(self):
self._transform_index = (self._transform_index + 1) % len(
self._camera_transforms)
self.sensor.set_transform(
self._camera_transforms[self._transform_index])
# TODO: Remove the hardcoded int index and make it sensor_type
def set_sensor(self, index, notify=True):
index = index % len(self._sensors)
# TODO: Remove the hardcoded 0 ad use camera_type
# TODO: Use same keys as used in self._sensors
needs_respawn = True if self._index is None \
else self._sensors[index][0] != self._sensors[self._index][0]
if needs_respawn:
if self.sensor is not None:
self.sensor.destroy()
self._surface = None
self.sensor = self._parent.get_world().spawn_actor(
self._sensors[index][-1],
self._camera_transforms[self._transform_index],
attach_to=self._parent)
# We need to pass the lambda a weak reference to self to avoid
# circular reference.
weak_self = weakref.ref(self)
self.sensor.listen(
lambda image: CameraManager._parse_image(weak_self, image))
if notify:
self._hud.notification(self._sensors[index][2])
self._index = index
def next_sensor(self):
self.set_sensor(self._index + 1)
def toggle_recording(self):
self._recording = not self._recording
self._hud.notification(
'Recording %s' % ('On' if self._recording else 'Off'))
def render(self, display):
if self._surface is not None:
display.blit(self._surface, (0, 0))
@staticmethod
def _parse_image(weak_self, image):
self = weak_self()
self.image = image
self.callback_count += 1
if not self:
return
if self._sensors[self._index][0].startswith('sensor.lidar'):
points = np.frombuffer(image.raw_data, dtype=np.dtype('f4'))
points = np.reshape(points, (int(points.shape[0] / 3), 3))
lidar_data = np.array(points[:, :2])
lidar_data *= min(self._hud.dim) / 100.0
lidar_data += (0.5 * self._hud.dim[0], 0.5 * self._hud.dim[1])
lidar_data = np.fabs(lidar_data)
lidar_data = lidar_data.astype(np.int32)
lidar_data = np.reshape(lidar_data, (-1, 2))
lidar_img_size = (self._hud.dim[0], self._hud.dim[1], 3)
lidar_img = np.zeros(lidar_img_size)
lidar_img[tuple(lidar_data.T)] = (255, 255, 255)
self._surface = pygame.surfarray.make_surface(lidar_img)
else:
image.convert(self._sensors[self._index][1])
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
self._surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
if self._recording:
image_dir = os.path.join(
CARLA_OUT_PATH, 'images/{}/%04d.png'.format(self._parent.id) %
image.frame_number)
image.save_to_disk(image_dir) # , env.cc
# image.save_to_disk('_out/%08d' % image.frame_number)
elif self._memory_record:
self.image_list.append(image)
else:
pass
|
{"hexsha": "047356f07dd1b1d588376e5a00a357b53f502b0d", "size": 6458, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/macad_gym/core/sensors/camera_manager.py", "max_stars_repo_name": "bbrito/macad-gym", "max_stars_repo_head_hexsha": "1a9e795e0f01e506faea9f3a04a7df9607fc0b1f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/macad_gym/core/sensors/camera_manager.py", "max_issues_repo_name": "bbrito/macad-gym", "max_issues_repo_head_hexsha": "1a9e795e0f01e506faea9f3a04a7df9607fc0b1f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/macad_gym/core/sensors/camera_manager.py", "max_forks_repo_name": "bbrito/macad-gym", "max_forks_repo_head_hexsha": "1a9e795e0f01e506faea9f3a04a7df9607fc0b1f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.9036144578, "max_line_length": 79, "alphanum_fraction": 0.5870238464, "include": true, "reason": "import numpy", "num_tokens": 1513}
|
import cPickle as pickle
import numpy as np
import argparse
from PIL import Image
import cv2
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, '../sunrgbd_data'))
from sunrgbd_data import sunrgbd_object
from utils import rotz, compute_box_3d, load_zipped_pickle
sys.path.append(os.path.join(BASE_DIR, '../../train'))
from box_util import box3d_iou
import roi_seg_box3d_dataset
from roi_seg_box3d_dataset import rotate_pc_along_y, NUM_HEADING_BIN
from eval_det import eval_det
from compare_matlab_and_python_eval import get_gt_cls
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', default=None, help='data path for .pickle file, the one used for val in train.py [default: None]')
parser.add_argument('--result_path', default=None, help='result path for .pickle file from test.py [default: None]')
parser.add_argument('--from_rgb_detection', action='store_true', help='test from data file from rgb detection.')
FLAGS = parser.parse_args()
IMG_DIR = '/home/rqi/Data/mysunrgbd/training/image'
TEST_DATASET = roi_seg_box3d_dataset.ROISegBoxDataset(npoints=2048, split='val', rotate_to_center=True, overwritten_data_path=FLAGS.data_path, from_rgb_detection=FLAGS.from_rgb_detection)
dataset = sunrgbd_object('/home/rqi/Data/mysunrgbd', 'training')
ps_list, segp_list, center_list, heading_cls_list, heading_res_list, size_cls_list, size_res_list, rot_angle_list, score_list = load_zipped_pickle(FLAGS.result_path)
# For detection evaluation
pred_all = {}
gt_all = {}
ovthresh = 0.25
print len(segp_list), len(TEST_DATASET)
raw_input()
# Get GT boxes
print 'Construct GT boxes...'
classname_list = ['bed','table','sofa','chair','toilet','desk','dresser','night_stand','bookshelf','bathtub']
"""
for i in range(len(TEST_DATASET)):
img_id = TEST_DATASET.id_list[i]
if img_id in gt_all: continue # All ready counted..
gt_all[img_id] = []
objects = dataset.get_label_objects(img_id)
calib = dataset.get_calibration(img_id)
for obj in objects:
if obj.classname not in classname_list: continue
box3d_pts_2d, box3d_pts_3d = compute_box_3d(obj, calib)
box3d_pts_3d = calib.project_upright_depth_to_upright_camera(box3d_pts_3d)
box3d_pts_3d_flipped = np.copy(box3d_pts_3d)
box3d_pts_3d_flipped[0:4,:] = box3d_pts_3d[4:,:]
box3d_pts_3d_flipped[4:,:] = box3d_pts_3d[0:4,:]
gt_all[img_id].append((obj.classname, box3d_pts_3d_flipped))
"""
#gt_all2 = {}
gt_cls = {}
for classname in classname_list:
gt_cls[classname] = get_gt_cls(classname)
for img_id in gt_cls[classname]:
if img_id not in gt_all:
gt_all[img_id] = []
for box in gt_cls[classname][img_id]:
gt_all[img_id].append((classname, box))
#print gt_all[1]
#print gt_all2[1]
raw_input()
# Get PRED boxes
print 'Construct PRED boxes...'
for i in range(len(TEST_DATASET)):
img_id = TEST_DATASET.id_list[i]
classname = TEST_DATASET.type_list[i]
center = center_list[i].squeeze()
ret = TEST_DATASET[i]
if FLAGS.from_rgb_detection:
rot_angle = ret[1]
else:
rot_angle = ret[7]
# Get heading angle and size
#print heading_cls_list[i], heading_res_list[i], size_cls_list[i], size_res_list[i]
heading_angle = roi_seg_box3d_dataset.class2angle(heading_cls_list[i], heading_res_list[i], NUM_HEADING_BIN)
box_size = roi_seg_box3d_dataset.class2size(size_cls_list[i], size_res_list[i])
corners_3d_pred = roi_seg_box3d_dataset.get_3d_box(box_size, heading_angle, center)
corners_3d_pred = rotate_pc_along_y(corners_3d_pred, -rot_angle)
if img_id not in pred_all:
pred_all[img_id] = []
pred_all[img_id].append((classname, corners_3d_pred, score_list[i]))
print pred_all[1]
raw_input()
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rc('axes', linewidth=2)
print 'Computing AP...'
rec, prec, ap = eval_det(pred_all, gt_all, ovthresh)
for classname in ap.keys():
print '%015s: %f' % (classname, ap[classname])
plt.plot(rec[classname], prec[classname], lw=3)
fig = plt.gcf()
fig.subplots_adjust(bottom=0.25)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall', fontsize=24)
plt.ylabel('Precision', fontsize=24)
plt.title(classname, fontsize=24)
plt.show()
raw_input()
print 'mean AP: ', np.mean([ap[classname] for classname in ap])
|
{"hexsha": "a891dacad8fb8f92fa089ec034e433696a6afa6f", "size": 4428, "ext": "py", "lang": "Python", "max_stars_repo_path": "sunrgbd/sunrgbd_detection/evaluate.py", "max_stars_repo_name": "dkoguciuk/frustum-pointnets", "max_stars_repo_head_hexsha": "2ffdd345e1fce4775ecb508d207e0ad465bcca80", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1426, "max_stars_repo_stars_event_min_datetime": "2018-04-13T20:38:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T06:34:54.000Z", "max_issues_repo_path": "sunrgbd/sunrgbd_detection/evaluate.py", "max_issues_repo_name": "dkoguciuk/frustum-pointnets", "max_issues_repo_head_hexsha": "2ffdd345e1fce4775ecb508d207e0ad465bcca80", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 126, "max_issues_repo_issues_event_min_datetime": "2018-04-17T07:39:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-09T05:33:03.000Z", "max_forks_repo_path": "sunrgbd/sunrgbd_detection/evaluate.py", "max_forks_repo_name": "dkoguciuk/frustum-pointnets", "max_forks_repo_head_hexsha": "2ffdd345e1fce4775ecb508d207e0ad465bcca80", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 568, "max_forks_repo_forks_event_min_datetime": "2018-04-13T20:41:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T11:20:56.000Z", "avg_line_length": 37.5254237288, "max_line_length": 187, "alphanum_fraction": 0.7332881662, "include": true, "reason": "import numpy", "num_tokens": 1231}
|
"""
get_line(table::Table)
Get the next line of the table by using `table.current_values`.
Call [`format_table_value`](@ref) to format each value and use the alignments to create the line such that it fits to [`get_header`](@ref).
"""
function get_line(table::Table)
setup = table.setup
ln = ""
for c in 1:length(setup.ids)
width = setup.widths[c]
values = table.current_values
default_precision = setup.precisions[c]
if isassigned(values, c)
val = values[c]
s_val = format_table_value(width-2, get_value(val); default_precision)
else
s_val = "-"
end
padding = width - 2 - length(s_val)
alignment = setup.alignments[c]
if alignment == :center
left_padding = repeat(" ", fld(padding, 2) + 1)
right_padding = repeat(" ", cld(padding, 2) + 1)
ln = "$(ln)$(left_padding)$(s_val)$(right_padding)"
elseif alignment == :left
right_padding = repeat(" ", padding + 1)
ln = "$(ln) $(s_val)$(right_padding)"
elseif alignment == :right
left_padding = repeat(" ", padding + 1)
ln = "$(ln)$(left_padding)$(s_val) "
else
@error "Only the alignments :left, :right and :center are defined. $alignment isn't defined."
end
end
return ln
end
"""
fill_from_prev!(table::Table)
If a value isn't given by a new called [`set_value!`](@ref) since the last call to [`print_line`](@ref) the previous value will be used.
This function overwrites `table.current_values` to set unassigned values to `table.prev_values`.
"""
function fill_from_prev!(table::Table)
for i in 1:length(table.current_values)
if !isassigned(table.current_values, i) || isnothing(table.current_values[i])
table.current_values[i] = table.prev_values[i]
end
end
end
"""
shall_print_line(table::Table; force=false)
Return whether the new line shall be printed. If `force = true` return true immediately.
Otherwise check if at least one value differs enough from the previous value by calling [`differs_enough`](@ref).
"""
function shall_print_line(table::Table; force=false)
force && return true
# check if a current value differs enough from the previous value
shall_update = false
for i in 1:length(table.current_values)
!isassigned(table.current_values, i) && continue
value = table.current_values[i]
if !isassigned(table.prev_values, i) || differs_enough(value, table.prev_values[i])
shall_update = true
break
end
end
return shall_update
end
"""
print_line(table::Table; force=false)
Print the new line of the table if it differs enough from the previous line or if `force = true`.
If the new line gets printed set the `prev_values` to `current_values` and the `current_values` to an `nothing`.
"""
function print_line(table::Table; force=false)
fill_from_prev!(table)
shall_print_line(table; force) || return
println(get_line(table))
update_for_new_row(table)
return
end
|
{"hexsha": "48def76d46cced77e86feafa206963945f4beb4a", "size": 3142, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/line.jl", "max_stars_repo_name": "Wikunia/TableLogger.jl", "max_stars_repo_head_hexsha": "b003e4d3731142e5cd7fe0a88b9f6d0409328017", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-12-31T21:55:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-01T06:27:58.000Z", "max_issues_repo_path": "src/line.jl", "max_issues_repo_name": "Wikunia/TableLogger.jl", "max_issues_repo_head_hexsha": "b003e4d3731142e5cd7fe0a88b9f6d0409328017", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2021-12-29T19:32:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-04T21:32:20.000Z", "max_forks_repo_path": "src/line.jl", "max_forks_repo_name": "Wikunia/TableLogger.jl", "max_forks_repo_head_hexsha": "b003e4d3731142e5cd7fe0a88b9f6d0409328017", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3033707865, "max_line_length": 139, "alphanum_fraction": 0.6476766391, "num_tokens": 745}
|
# -*- coding: utf-8 -*-
from data.corpus import Sentences
from stats.stat_functions import compute_ranks, compute_freqs, merge_to_joint
from stats.mle import Mandelbrot
from stats.entropy import mandelbrot_entropy, typicality
import numpy as np
import numpy.random as rand
def get_model(corpus, n):
big_ranks = compute_ranks(Sentences.subsample(corpus, n))
freqs = compute_freqs(Sentences.subsample(corpus, n))
joint = merge_to_joint(big_ranks, freqs)
xs, ys = list(zip(*sorted(joint.values())))
mandelbrot = Mandelbrot(ys, xs)
mandelbrot_fit = mandelbrot.fit(start_params=np.asarray([1.0, 1.0]),
method="powell", full_output=True)
mandelbrot.register_fit(mandelbrot_fit)
mandelbrot.print_result()
auto_typ = typicality(mandelbrot, joint)
return big_ranks, mandelbrot, auto_typ
def establish_typical_set(corpus, rank_dict, zipf_model, n, m):
typicalities = []
for i in range(m):
sub = Sentences.subsample(corpus, n)
sub_freqs = compute_freqs(sub)
sub_joints = merge_to_joint(rank_dict, sub_freqs)
sub_typicality = typicality(zipf_model, sub_joints)
typicalities.append(sub_typicality)
mean_typ, std_typ = np.mean(typicalities), np.var(typicalities)**.5
return mean_typ, std_typ
def setup_filtering(corpus, big_n, k, m):
rank_dict, zipf_model, auto_typ = get_model(corpus, big_n)
mean_typ, std_typ = establish_typical_set(corpus, rank_dict, zipf_model, k, m)
return zipf_model, rank_dict, mean_typ, std_typ, auto_typ
def sent_neg_log_prob(sent, zipf_model, rank_dict):
ranks = [rank_dict[w] if w in rank_dict else len(rank_dict)+1
for w in sent]
log_probs = zipf_model.prob(params=zipf_model.optim_params,
ranks=ranks, log=True)
return - np.sum(log_probs)
# add safety measure against non-halting
def filter_typicality_incremental(sents, zipf_model, rank_dict, auto_typ, n,
epsilon, direction):
if epsilon > 0 and direction(0, 1):
raise ValueError("use EITHER epsilon < 0 and direction == < "
"OR epsilon > 0 and direction == >")
sampled = 0
used = set()
theoretical_entropy = mandelbrot_entropy(*zipf_model.optim_params)
cur_nll = 0
num_not_found = 0
num_iter = 0
while sampled < n:
num_iter += 1
cur_sample = rand.randint(len(sents))
if cur_sample in used:
continue
cur_sent = sents[cur_sample]
if not cur_sent:
continue
coeff = 1/(sampled + len(cur_sent))
sent_nll = sent_neg_log_prob(cur_sent, zipf_model, rank_dict)
cur_typ = theoretical_entropy - coeff*(cur_nll + sent_nll)
if direction(cur_typ - auto_typ, epsilon):
used.add(cur_sample)
sampled += len(cur_sent)
cur_nll += sent_nll
yield cur_sent
else:
num_not_found += 1
# if num_not_found >= n:
# print("NUM ITER: ", num_iter)
# raise RuntimeError("number of samples has outgrown n! aborting")
print("NUM ITER: ", num_iter)
print("NUM NOT FOUND: ", num_not_found)
|
{"hexsha": "a29d02cbc8102be783e58eef902b38617a8742da", "size": 3465, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/filtering/typicality.py", "max_stars_repo_name": "valevo/thesis", "max_stars_repo_head_hexsha": "6671fa7ed8aefd3e89fd29ee97fa31a3c4315868", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-07-07T11:40:49.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-07T11:40:49.000Z", "max_issues_repo_path": "src/filtering/typicality.py", "max_issues_repo_name": "valevo/Thesis", "max_issues_repo_head_hexsha": "6671fa7ed8aefd3e89fd29ee97fa31a3c4315868", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/filtering/typicality.py", "max_forks_repo_name": "valevo/Thesis", "max_forks_repo_head_hexsha": "6671fa7ed8aefd3e89fd29ee97fa31a3c4315868", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2162162162, "max_line_length": 89, "alphanum_fraction": 0.6112554113, "include": true, "reason": "import numpy", "num_tokens": 863}
|
#!/usr/bin/env python
import numpy as np
import time
import roslib
import sys
import rospy
import cv2
from std_msgs.msg import String, Float64
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
#rosservice call /gazebo/set_model_state '{model_state: { model_name: turtlebot3_waffle, pose: { position: { x: -1.55, y: 1.915 ,z: 0 }, orientation: {x: -1.72, y: 0.0015, z: 4.225, w: 0.999 } }, twist: { linear: {x: 0.0 , y: 0 ,z: 0 } , angular: { x: 0.0 , y: 0 , z: 7.66 } } , reference_frame: world } }'
roslib.load_manifest('turtlebot3_gazebo')
kernel_dilation = cv2.getStructuringElement(cv2.MORPH_RECT,(50,50))
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
out = cv2.VideoWriter("main.mp4", fourcc, 30, (960,540))
class Image_converter:
def __init__(self):
#self.image_pub = rospy.Publisher("/camera/rgb/image_raw",Image)
self.velocity_publisher = rospy.Publisher('cmd_vel', Twist, queue_size=10)
self.vel_msg = Twist()
self.bridge = CvBridge()
rospy.init_node('Image_converter', anonymous=True)
self.image_sub = rospy.Subscriber("/camera/rgb/image_raw",Image,self.callback)
self.vel_msg.linear.x = 2
self.vel_msg.linear.z = 0
def get_gray(self, img):
self.main_img = img
self.img_gray = cv2.cvtColor(self.main_img, cv2.COLOR_BGR2GRAY)
self.img_blur = cv2.GaussianBlur(self.img_gray, (5,5), 0)
_, self.binary = cv2.threshold(self.img_blur, 127, 255, cv2.THRESH_BINARY)
return self.binary
def get_roi(self, img):
self.mask = np.zeros_like(img)
roi_range=np.array([[(100, 100),(860, 100),(690, 540), (300, 540)]],dtype=np.int32)
cv2.fillPoly(self.mask,roi_range,255)
return cv2.bitwise_and(img, self.mask)
def get_contour_center(self, img):
self.contours, _ = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
max_index = 0
if len(self.contours) == 0:
return False
if len(self.contours) > 1 :
area_list = []
for cnt in self.contours:
M = cv2.moments(cnt)
area = cv2.contourArea(cnt)
area_list.append(area)
max_index = np.argmax(area_list)
M = cv2.moments(self.contours[max_index])
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
cv2.line(self.main_img, (cx,cy), (cx,cy) ,(0,255,0),5)
cv2.drawContours(self.main_img, self.contours, -1, (0,255,0), 3)
return (cx, cy)
def callback(self, data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
(rows,cols,channels) = cv_image.shape
if cols > 60 and rows > 60 :
#print(cv_image.shape)
self.main_img = cv2.resize(cv_image,(0,0),fx = 0.5, fy = 0.5, interpolation = cv2.INTER_AREA)
self.gray_img = self.get_gray(self.main_img)
self.roi_img = self.get_roi(self.gray_img)
self.fgmask_dila = cv2.dilate(self.roi_img,kernel_dilation,iterations = 1)
#self.lines=cv2.HoughLinesP(self.img_canny,1,np.pi/180,25,minLineLength=1,maxLineGap=210)
self.center_point = self.get_contour_center(self.fgmask_dila)
if self.center_point:
self.error = (self.center_point[0]-480, self.center_point[1])
print(self.error)
self.vel_msg.linear.x = 0.35
self.vel_msg.angular.z = -(self.error[0] / 60)
self.velocity_publisher.publish(self.vel_msg)
cv2.line(self.main_img, (480,0),(480, 640),(0,0,255),5)
#cv2.imshow("Image window", self.fgmask_dila)
cv2.imshow("main", self.main_img)
out.write(self.main_img)
key = cv2.waitKey(3)
if key == 27 :
self.vel_msg.linear.x = 0
self.vel_msg.angular.z = 0
self.velocity_publisher.publish(self.vel_msg)
cv2.destroyAllWindows()
return 0
#try:
# self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
#except CvBridgeError as e:
# print(e)
def main(args):
ic = Image_converter()
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
|
{"hexsha": "30b7279ab4fe4cfd600c15527b30a62453c9ddac", "size": 4685, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/turtlebot3_gazebo/src/topic2python.py", "max_stars_repo_name": "diddytpq/Turtlebot-line-tracking-in-gazebo", "max_stars_repo_head_hexsha": "dd671546627bdd84db1591da3e71d967a4891c2c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/turtlebot3_gazebo/src/topic2python.py", "max_issues_repo_name": "diddytpq/Turtlebot-line-tracking-in-gazebo", "max_issues_repo_head_hexsha": "dd671546627bdd84db1591da3e71d967a4891c2c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/turtlebot3_gazebo/src/topic2python.py", "max_forks_repo_name": "diddytpq/Turtlebot-line-tracking-in-gazebo", "max_forks_repo_head_hexsha": "dd671546627bdd84db1591da3e71d967a4891c2c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8869047619, "max_line_length": 306, "alphanum_fraction": 0.5846318036, "include": true, "reason": "import numpy", "num_tokens": 1255}
|
"""
The purpose of this code is to set the train, val, and test data sets
It can be run on sherlock using
ml load chemistry
ml load schrodinger
$ $SCHRODINGER/run python3 get_pocket_com.py
"""
from tqdm import tqdm
import pickle
import schrodinger.structutils.analyze as analyze
from schrodinger.structure import StructureReader
import os
import scipy.spatial
prot_file = '/oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/refined_random_with_unaligned.txt'
data_root = '/oak/stanford/groups/rondror/projects/ligand-docking/pdbbind_2019/data'
DIST = 6.0
def get_volume(structs):
x_dim = max(structs[:, 0]) - min(structs[:, 0])
y_dim = max(structs[:, 1]) - min(structs[:, 1])
z_dim = max(structs[:, 2]) - min(structs[:, 2])
return x_dim * y_dim * z_dim
def get_pocket_res(protein, ligand):
"""
Given a co-crystallized protein and ligand, extract residues within specified distance of ligand.
Args:
protein (Biopython Structure object): receptor protein
ligand (RDKit Mol object): co-crystallized ligand
dist (float): distance cutoff for defining binding site
Returns:
key_residues (set of Biopython Residue objects): set of key binding site residues
"""
# get protein coordinates
prot_atoms = protein.getAtomIndices()
prot_coords = protein.getXYZ()
# get ligand coordinates
lig_coords = ligand.getXYZ()
kd_tree = scipy.spatial.KDTree(prot_coords)
key_pts = kd_tree.query_ball_point(lig_coords, r=DIST, p=2.0)
key_pts = set([k for l in key_pts for k in l])
return analyze.center_of_mass(protein, list(key_pts.intersection(prot_atoms)))
def main():
coms = {}
with open(prot_file) as fp:
for line in tqdm(fp, desc='protein file'):
if line[0] == '#': continue
protein, target, start = line.strip().split()
if protein not in coms:
coms[protein] = {}
if start not in coms[protein]:
start_receptor_file = os.path.join(data_root, '{}/structures/aligned/{}_prot.mae'.format(protein, start))
start_ligand_file = os.path.join(data_root, '{}/structures/aligned/{}_lig.mae'.format(protein, start))
start_struct = list(StructureReader(start_receptor_file))[0]
start_lig = list(StructureReader(start_ligand_file))[0]
# print(protein, start)
coms[protein][start] = get_pocket_res(start_struct, start_lig)
if target not in coms[protein]:
target_receptor_file = os.path.join(data_root, '{}/structures/aligned/{}_prot.mae'.format(protein, target))
target_ligand_file = os.path.join(data_root, '{}/structures/aligned/{}_lig.mae'.format(protein, target))
target_struct = list(StructureReader(target_receptor_file))[0]
target_lig = list(StructureReader(target_ligand_file))[0]
# print(protein, target)
get_pocket_res(target_struct, target_lig)
with open('pocket_com.pkl', 'wb') as f:
pickle.dump(coms, f)
if __name__=="__main__":
main()
|
{"hexsha": "8ebf63a8cfb2756a8b2a59c1cec176f473879fb8", "size": 3149, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/data_analysis/get_pocket_com.py", "max_stars_repo_name": "sidhikabalachandar/lig_clash_score", "max_stars_repo_head_hexsha": "449bac16a7c2b9779e7cd51ff17eb5e41be6ff99", "max_stars_repo_licenses": ["FTL"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/data_analysis/get_pocket_com.py", "max_issues_repo_name": "sidhikabalachandar/lig_clash_score", "max_issues_repo_head_hexsha": "449bac16a7c2b9779e7cd51ff17eb5e41be6ff99", "max_issues_repo_licenses": ["FTL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/data_analysis/get_pocket_com.py", "max_forks_repo_name": "sidhikabalachandar/lig_clash_score", "max_forks_repo_head_hexsha": "449bac16a7c2b9779e7cd51ff17eb5e41be6ff99", "max_forks_repo_licenses": ["FTL"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9397590361, "max_line_length": 123, "alphanum_fraction": 0.666560813, "include": true, "reason": "import scipy", "num_tokens": 795}
|
import numpy as np
import pandas as pd
from utils.constants import *
from utils.strings import *
class Processor:
'''Preprocessor for Bitcoin prices dataset as obtained by following the procedure
described in https://github.com/philipperemy/deep-learning-bitcoin'''
def __init__(self, config, logger):
self.dataset_path = config[DATASET_PATH]
self.logger = logger
self.history_length = config[HISTORY_LENGTH]
self.horizon = config[HORIZON]
self.preprocess()
self.generate_attributes()
@property
def diff_blocks(self):
return self._diff_blocks
@property
def price_blocks(self):
return self._price_blocks
@property
def timestamp_blocks(self):
return self._timestamp_blocks
def preprocess(self):
data = pd.read_csv(self.dataset_path)
message = 'Columns found in the dataset {}'.format(data.columns)
self.logger.info(message)
data = data.dropna()
start_time_stamp = data['Timestamp'][0]
timestamps = data['Timestamp'].apply(lambda x: (x - start_time_stamp) / 60)
timestamps = timestamps - range(timestamps.shape[0])
data.insert(0, 'blocks', timestamps)
blocks = data.groupby('blocks')
message = 'Number of blocks of continuous prices found are {}'.format(len(blocks))
self.logger.info(message)
self._data_blocks = []
distinct_episodes = 0
for name, indices in blocks.indices.items():
'''
Length of the block should exceed the history length and horizon by 1.
Extra 1 is required to normalize each price block by previos time stamp
'''
if len(indices) > (self.history_length + self.horizon + 1):
self._data_blocks.append(blocks.get_group(name))
# similarly, we subtract an extra 1 to calculate the number of distinct episodes
distinct_episodes = distinct_episodes + (len(indices) - (self.history_length + self.horizon) + 1 + 1)
data = None
message_list = ['Number of usable blocks obtained from the dataset are {}'.format(len(self._data_blocks))]
message_list.append('Number of distinct episodes for the current configuration are {}'.format(distinct_episodes))
map(self.logger.info, message_list)
def generate_attributes(self):
self._diff_blocks = []
self._price_blocks = []
self._timestamp_blocks = []
for data_block in self._data_blocks:
block = data_block[['price_close', 'price_low', 'price_high', 'volume']]
closing_prices = block['price_close']
diff_block = closing_prices.shift(-1)[:-1].subtract(closing_prices[:-1])
# currently normalizing the prices by previous prices of the same category
normalized_block = block.shift(-1)[:-1].truediv(block[:-1])
self._diff_blocks.append(diff_block.as_matrix())
self._price_blocks.append(normalized_block.as_matrix())
self._timestamp_blocks.append(data_block['DateTime_UTC'].values[1:])
self._data_blocks = None #free memory
|
{"hexsha": "70f629f4f9e84274f10a0c6755196c4872183f1b", "size": 3261, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/process/processor.py", "max_stars_repo_name": "wknight1/deep-trading-agent", "max_stars_repo_head_hexsha": "58e6617fa78b18c31460962511ab83af430cc326", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 671, "max_stars_repo_stars_event_min_datetime": "2017-10-01T06:43:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T09:51:32.000Z", "max_issues_repo_path": "code/process/processor.py", "max_issues_repo_name": "lightvault/deep-trading-agent", "max_issues_repo_head_hexsha": "58e6617fa78b18c31460962511ab83af430cc326", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2017-11-18T14:05:08.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-02T22:21:09.000Z", "max_forks_repo_path": "code/process/processor.py", "max_forks_repo_name": "lightvault/deep-trading-agent", "max_forks_repo_head_hexsha": "58e6617fa78b18c31460962511ab83af430cc326", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 240, "max_forks_repo_forks_event_min_datetime": "2017-09-21T18:33:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T17:45:45.000Z", "avg_line_length": 39.7682926829, "max_line_length": 121, "alphanum_fraction": 0.6375344986, "include": true, "reason": "import numpy", "num_tokens": 667}
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Use this file to generate train and val dataset
"""
import os
import os.path
import sys
from PIL import Image
import numpy as np
from numpy.random import randint
import mindspore.dataset as ds
from src.transforms import GroupNormalize, Stack, ToMindSporeFormatTensor, GroupScale, \
GroupCenterCrop, GroupMultiScaleCrop, GroupRandomHorizontalFlip
class VideoRecord:
"""
the util to generate data set.
"""
def __init__(self, row):
self._data = row
@property
def path(self):
return self._data[0]
@property
def num_frames(self):
return int(self._data[1])
@property
def label(self):
return int(self._data[2])
class TSNDataSet():
"""
to generate data set.
"""
def __init__(self, root_path, list_file,
num_segments=3, new_length=1, modality='RGB',
image_tmpl='img_{:05d}.jpg', transform=None,
force_grayscale=False, random_shift=True, test_mode=False):
self.root_path = root_path
dirname, _ = os.path.split(os.path.abspath(sys.argv[0]))
self.list_file = os.path.join(dirname, list_file)
self.num_segments = num_segments
self.new_length = new_length
self.modality = modality
self.image_tmpl = image_tmpl
self.random_shift = random_shift
self.test_mode = test_mode
self.transform = transform
if self.modality == 'RGBDiff':
self.new_length += 1 # Diff needs one more image to calculate diff
self._parse_list()
def __getitem__(self, index):
record = self.video_list[index]
if not self.test_mode:
segment_indices = self._sample_indices(record) if self.random_shift else self._get_val_indices(record)
else:
segment_indices = self._get_test_indices(record)
pilImgs, label = self.get(record, segment_indices)
return pilImgs, label
def __len__(self):
return len(self.video_list)
def _parse_list(self):
self.video_list = [VideoRecord(x.strip().split(' ')) for x in open(self.list_file)]
def _sample_indices(self, record):
"""
:param record: VideoRecord
:return: list
"""
average_duration = (record.num_frames - self.new_length + 1) // self.num_segments
if average_duration > 0:
offsets = np.multiply(list(range(self.num_segments)), average_duration) + randint(average_duration,
size=self.num_segments)
elif record.num_frames > self.num_segments:
offsets = np.sort(randint(record.num_frames - self.new_length + 1, size=self.num_segments))
else:
offsets = np.zeros((self.num_segments,))
return offsets + 1
def _get_val_indices(self, record):
if record.num_frames > self.num_segments + self.new_length - 1:
tick = (record.num_frames - self.new_length + 1) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segments)])
else:
offsets = np.zeros((self.num_segments,))
return offsets + 1
def _get_test_indices(self, record):
tick = (record.num_frames - self.new_length + 1) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segments)])
return offsets + 1
def get(self, record, indices):
"""
get record
"""
images = list()
for seg_ind in indices:
p = int(seg_ind)
for i in range(self.new_length):
assert i < self.new_length
seg_imgs = self._load_image(record.path, p)
images.extend(seg_imgs)
if p < record.num_frames:
p += 1
if self.transform:
for t in self.transform:
if isinstance(t, list):
for sub_t in t:
images = sub_t(images)
else:
images = t(images)
return images, record.label
def _load_image(self, directory, idx):
if self.modality == 'RGB' or self.modality == 'RGBDiff':
return [Image.open(os.path.join(directory, self.image_tmpl.format(idx))).convert('RGB')]
if self.modality == 'Flow':
x_img = Image.open(os.path.join(directory, self.image_tmpl.format('x', idx))).convert('L')
y_img = Image.open(os.path.join(directory, self.image_tmpl.format('y', idx))).convert('L')
return [x_img, y_img]
raise ValueError("Unknown {}".format(directory))
def create_dataset_train(args, rgb_read_format, input_size=224, data_length=1):
"""
create train dataloader
"""
train_augmentation = [GroupMultiScaleCrop(input_size, [1, .875, .75, .66]),
GroupRandomHorizontalFlip(is_flow=False)]
input_mean = [104, 117, 128]
input_std = [1]
normalize = GroupNormalize(input_mean, input_std)
train_transforms = [train_augmentation,
Stack(roll=True),
ToMindSporeFormatTensor(div=False),
normalize]
if args.modality in ["RGB", "RGBDiff"]:
image_tmpl = args.rgb_prefix + rgb_read_format
else:
image_tmpl = args.flow_prefix + rgb_read_format
train_dataset_generator = TSNDataSet("", args.train_list, num_segments=args.num_segments,
new_length=data_length,
modality=args.modality, transform=train_transforms,
image_tmpl=image_tmpl)
print("Train dataset generator length: ", len(train_dataset_generator))
return train_dataset_generator
def create_dataset_val(args, rgb_read_format, input_size=224, data_length=1):
"""
create val dataloader
"""
input_mean = [104, 117, 128]
input_std = [1]
normalize = GroupNormalize(input_mean, input_std)
crop_size = input_size
scale_size = input_size * 256 // 224
val_transforms = [GroupScale(int(scale_size)),
GroupCenterCrop(crop_size),
Stack(roll=True),
ToMindSporeFormatTensor(div=False),
normalize
]
if args.modality in ["RGB", "RGBDiff"]:
image_tmpl = args.rgb_prefix + rgb_read_format
else:
image_tmpl = args.flow_prefix + rgb_read_format
val_dataset_generator = TSNDataSet("", args.val_list, num_segments=args.num_segments,
new_length=data_length,
modality=args.modality,
image_tmpl=image_tmpl,
random_shift=False,
transform=val_transforms)
val_dataset = ds.GeneratorDataset(val_dataset_generator, ["image", "label"], shuffle=False)
val_dataset = val_dataset.batch(args.batch_size, drop_remainder=True)
return val_dataset
|
{"hexsha": "8ac1653a0c671e618d4f6ff44dc5a0f141d79ead", "size": 7884, "ext": "py", "lang": "Python", "max_stars_repo_path": "research/cv/ecolite/src/dataset.py", "max_stars_repo_name": "mindspore-ai/models", "max_stars_repo_head_hexsha": "9127b128e2961fd698977e918861dadfad00a44c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 77, "max_stars_repo_stars_event_min_datetime": "2021-10-15T08:32:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T13:09:11.000Z", "max_issues_repo_path": "research/cv/ecolite/src/dataset.py", "max_issues_repo_name": "mindspore-ai/models", "max_issues_repo_head_hexsha": "9127b128e2961fd698977e918861dadfad00a44c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-10-30T14:44:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-14T06:57:57.000Z", "max_forks_repo_path": "research/cv/ecolite/src/dataset.py", "max_forks_repo_name": "mindspore-ai/models", "max_forks_repo_head_hexsha": "9127b128e2961fd698977e918861dadfad00a44c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 24, "max_forks_repo_forks_event_min_datetime": "2021-10-15T08:32:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T18:45:20.000Z", "avg_line_length": 36.8411214953, "max_line_length": 117, "alphanum_fraction": 0.598173516, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1677}
|
(** * ugregex_dec: simple decision procedure for untyped generalised regular expressions *)
(** We implement a rather basic algorithm consisting in trying to
build a bisimulation on-the-fly, using partial derivatives.
We prove the correctness of this algorithm, but not completeness
("it merely let you sleep better" according to Krauss and Nipkow).
This very simple algorithm seems to be sufficient for reasonable
expressions; we plan to improve it to be able to handle larger
ones. *)
Require Import lset kat positives sums glang boolean comparisons powerfix.
Require Export ugregex.
Set Implicit Arguments.
Section l.
Variable Pred: nat.
Notation Sigma := positive.
Notation Atom := (ord (pow2 Pred)).
Notation tt := ugregex_tt.
Notation ugregex := (ugregex_monoid_ops Pred tt tt).
Notation uglang := (glang_kat_ops Pred Sigma traces_tt traces_tt).
Notation lang := (@lang Pred).
Ltac fold_ugregex_type := change (@ugregex.ugregex Pred) with (@car ugregex) in *.
Ltac fold_ugregex := ra_fold ugregex_monoid_ops tt; fold_ugregex_type.
(** * Partial derivatives *)
(** reversed product *)
Notation tod e := (fun f => u_dot f e) (only parsing).
(** [pderiv a i e] returns the set of partial derivatives of [e] along
transition [(a,i)] (since we work with KAT regular expressions,
labels are composed of an atom together with a letter) *)
Fixpoint pderiv a i (e: ugregex): list ugregex :=
match e with
| u_prd _ => []
| u_var _ j => if eqb_pos i j then [u_one _] else []
| u_pls e f => union (pderiv a i e) (pderiv a i f)
| u_dot e f =>
if epsilon a e then union (map (tod f) (pderiv a i e)) (pderiv a i f)
else map (tod f) (pderiv a i e)
| u_itr e => map (tod (u_str e)) (pderiv a i e)
end.
(** [epsilon] was defined in [ugregex],
we now to extend both notions to sets of expressions, homomorphically: *)
Definition epsilon' a (l: list ugregex): bool :=
fold_right (fun e b => b ||| epsilon a e) false l.
Definition pderiv' a i (l: list ugregex): list ugregex :=
fold_right (fun e => union (pderiv a i e)) [] l.
(** specification of [epsilon'] *)
Lemma epsilon'_eq a l: epsilon a (sup id l) ≡ epsilon' a l.
Proof.
induction l. reflexivity. simpl.
rewrite <- IHl. unfold id.
rewrite <-2Bool.orb_lazy_alt. apply Bool.orb_comm.
Qed.
(** correctness of partial derivatives *)
Lemma deriv_eq a i e: deriv a i e ≡ sup id (pderiv (set.mem a) i e).
Proof.
induction e; simpl; fold_ugregex.
case eqb_pos. 2: reflexivity. now rewrite sup_singleton.
reflexivity.
rewrite union_app, sup_app. now apply cup_weq.
assert (H: deriv a i e1 ⋅ e2 ≡ sup id (map (tod e2) (pderiv (set.mem a) i e1))).
rewrite sup_map. setoid_rewrite <-(dotsumx (X:=ugregex_monoid_ops _)).
now apply dot_weq.
case epsilon.
rewrite union_app, sup_app.
setoid_rewrite dot1x. now apply cup_weq.
setoid_rewrite dot0x. now rewrite cupxb.
rewrite sup_map. setoid_rewrite <-(dotsumx (X:=ugregex_monoid_ops _)).
now apply dot_weq.
Qed.
Lemma deriv'_eq a i l: deriv a i (sup id l) ≡ sup id (pderiv' (set.mem a) i l).
Proof.
induction l. reflexivity. simpl (sup _ _).
rewrite union_app, sup_app.
apply cup_weq. apply deriv_eq. assumption.
Qed.
(** Kleene variables of an expression *)
Fixpoint vars (e: ugregex): list Sigma :=
match e with
| u_prd _ => []
| u_var _ i => [i]
| u_pls e f | u_dot e f => union (vars e) (vars f)
| u_itr e => vars e
end.
(** partial derivatives do not increase the set of Kleene variables *)
Lemma deriv_vars a i (e: ugregex): \sup_(x\in pderiv a i e) vars x ≦ vars e.
Proof.
induction e; simpl pderiv; simpl vars.
case eqb_pos; apply leq_bx.
apply leq_bx.
rewrite 2union_app, sup_app. now apply cup_leq.
setoid_rewrite union_app at 2.
assert (H: \sup_(x\in map (tod e2) (pderiv a i e1)) vars x ≦ vars e1 ++ vars e2).
rewrite sup_map. simpl vars. setoid_rewrite union_app. rewrite supcup.
apply cup_leq. assumption. now apply leq_supx.
case epsilon. rewrite union_app, sup_app, H. hlattice. assumption.
rewrite sup_map. simpl vars. setoid_rewrite union_app. rewrite supcup.
apply leq_cupx. assumption. now apply leq_supx.
Qed.
Lemma deriv'_vars a i l: \sup_(x\in pderiv' a i l) vars x ≦ sup vars l.
Proof.
induction l. reflexivity. setoid_rewrite union_app. rewrite sup_app.
apply cup_leq. apply deriv_vars. assumption.
Qed.
(** deriving an expression w.r.t. a letter it does not contain necessarily gives [0] *)
Lemma deriv_out a i e I: vars e ≦ I -> ~In i I -> deriv a i e ≡ 0.
Proof.
intros He Hi. induction e; simpl deriv; simpl vars in He; fold_ugregex.
case eqb_spec. 2: reflexivity. intros <-. apply Hi in He as []. now left.
reflexivity.
rewrite union_app in He.
rewrite IHe1, IHe2 by (rewrite <-He; lattice). apply cupI.
rewrite union_app in He.
rewrite IHe1, IHe2 by (rewrite <-He; lattice). rewrite dot0x, dotx0. apply cupI.
rewrite IHe by assumption. apply dot0x.
Qed.
(** we need binary relations on sets of expressions, we represent them
as lists of pairs (this could easily be optimised) *)
Definition rel_mem (p: list ugregex * list ugregex) := existsb (eqb p).
Notation rel_insert p rel := (p::rel).
Notation rel_empty := [].
(* OPT *)
(* Definition rel_mem := trees.mem (pair_compare (list_compare compare)). *)
(* Definition rel_insert := trees.insert (pair_compare (list_compare compare)). *)
(* Notation rel_empty := (@trees.L _) *)
Lemma rel_mem_spec p rel: reflect (In p rel) (rel_mem p rel).
Proof.
induction rel. constructor. tauto.
simpl rel_mem. case eqb_spec.
intros <-. constructor. now left.
case IHrel; constructor. now right. intros [?|?]; congruence.
Qed.
(** * Main loop for the on-the-fly bisimulation algorithm *)
(** [epsilon'] and [deriv'] provide us with a (generalised) DFA whose
states are sets of generalised expressions ([list ugregex]). We
simply try compute bisimulations in this DFA. *)
Section a.
(** we assume a set of Kleene variable, and a set of atoms; the
following algorithm tries to compute bisimulations w.r.t. those
sets. *)
Variable I: list positive.
Variable A: list (ord Pred -> bool).
Definition obind X Y (f: X -> option Y) (x: option X): option Y :=
match x with Some x => f x | _ => None end.
Fixpoint ofold X Y (f: X -> Y -> option Y) (l: list X) (y: Y): option Y :=
match l with
| [] => Some y
| x::q => obind (f x) (ofold f q y)
end.
(** [loop_aux e f a todo] checks the accepting status of [e] and [f] along [a],
- if a mismatch is found, we can stop (a counter example has bee found)
- otherwise, it inserts all derivatives of the pair [(e,f)] along [{a}⋅I] into [todo] *)
Definition loop_aux e f :=
fun a todo =>
if eqb_bool (epsilon' a e) (epsilon' a f)
then Some (fold_right (fun i => cons (pderiv' a i e, pderiv' a i f)) todo I)
else None.
(** [ofold (loop_aux e f) A todo] does the same, for all [a\in A] *)
(** [loop n rel todo] is the main loop of the algorithm:
it tries to prove that all pairs in [todo] are bisimilar, assuming
that those in [rel] are bisimilar.
- if a pair of [todo] was already in [rel], it can be skipped;
- otherwise, its accepting status is checked, all derivatives are
inserted in [todo], and the pair is added to [rel]
The number of iterations is bounded by [2^n], using the [powerfix] operator. *)
Definition loop n := powerfix n (fun loop rel todo =>
match todo with
| [] => Some true
| (e,f)::todo =>
if rel_mem (e,f) rel then loop rel todo else
match ofold (loop_aux e f) A todo with
| Some todo => loop (rel_insert (e,f) rel) todo
| None => Some false
end
end
) (fun _ _ => None).
(** * Correctness of the main loop *)
(** [prog] is a predicate on binary relations:
[prog rel (rel++todo)] is the invariant of the main loop *)
Definition prog R S :=
forall e f, In (e,f) R -> sup vars (e++f) ≦ I /\
forall a, In a A -> epsilon' a e = epsilon' a f /\
forall i, In i I -> In (pderiv' a i e, pderiv' a i f) S.
Lemma prog_cup_x R R' S: prog R S -> prog R' S -> prog (R++R') S.
Proof. intros H H' e f Hef. apply in_app_iff in Hef as [?|?]. now apply H. now apply H'. Qed.
Lemma prog_x_leq R S S': prog R S -> S ≦ S' -> prog R S'.
Proof.
intros H H' e f Hef. apply H in Hef as [? Hef].
split. assumption. split. now apply Hef. intros. now apply H', Hef.
Qed.
Definition below_I todo := forall e f, In (e,f) todo -> sup vars (e++f) ≦ I.
(** specification of the inner loop *)
Lemma loop_aux_spec e f a todo todo':
below_I ((e,f)::todo) ->
loop_aux e f a todo = Some todo' ->
epsilon' a e = epsilon' a f /\
todo ≦ todo' /\
below_I todo' /\
forall i, In i I -> In (pderiv' a i e, pderiv' a i f) todo'.
Proof.
unfold loop_aux. case eqb_bool_spec. 2: discriminate. intros Heps Hvars E.
split. assumption. injection E. clear E Heps. revert todo'.
induction I as [|i J IH]; simpl fold_right; intro todo'.
intros <-. split. reflexivity. split. intros ? ? ?. apply Hvars; now right. intros _ [].
intro E. destruct todo' as [|p todo']. discriminate.
injection E. intros H <-. clear E. apply IH in H as [H1 [H2 H3]]. clear IH.
split. fold_cons. rewrite <- H1. lattice.
split. intros ? ? [E|H].
injection E; intros <- <-. rewrite sup_app, 2deriv'_vars, <-sup_app. apply Hvars. now left.
now apply H2.
intros b [<-|Hb]. now left. right. now apply H3.
Qed.
Lemma fold_loop_aux_spec e f todo: forall todo',
below_I ((e,f)::todo) ->
ofold (loop_aux e f) A todo = Some todo' ->
todo ≦ todo' /\
below_I todo' /\
forall a, In a A -> epsilon' a e = epsilon' a f /\
forall i, In i I -> In (pderiv' a i e, pderiv' a i f) todo'.
Proof.
induction A as [|b B IH]; simpl ofold; intros todo'.
intros Hvars H. injection H. intros <-. split. reflexivity.
split. intros ? ? ?. apply Hvars. now right. intros _ [].
unfold obind. fold_ugregex_type. case_eq (ofold (X:=ord Pred -> bool) (loop_aux e f) B todo).
2: discriminate.
intros todo'' Htodo'' Hvars Htodo'.
apply IH in Htodo'' as [Htodo''_leq [Hvars' Htodo'']]. 2: assumption. clear IH.
apply loop_aux_spec in Htodo' as (Heps&Htodo'_leq&Hvars''&Htodo').
split. etransitivity; eassumption.
split. assumption.
intros a [<-|Ha]. now split.
apply Htodo'' in Ha as [Haeps Ha]. split. assumption.
intros. now apply Htodo'_leq, Ha.
intros ? ? [E|?]. injection E; intros <- <-. apply Hvars; now left. now apply Hvars'.
Qed.
Lemma In_cons X (a: X) l: In a l -> [a]++l ≦ l.
Proof. now intros ? ? [<-|?]. Qed.
(** specification of the outer loop *)
Lemma prog_loop n: forall rel todo,
loop n rel todo = Some true ->
prog rel (rel++todo) ->
below_I todo ->
exists rel', rel++todo ≦ rel' /\ prog rel' rel'.
Proof.
(* TODO: use powerfix_invariant *)
unfold loop. rewrite powerfix_linearfix. generalize (pow2 n). clear n. intro n.
induction n; intros rel todo Hloop Hrel Hvars. discriminate.
simpl in Hloop. destruct todo as [|[e f] todo].
exists rel. split. now rewrite <- app_nil_end. now rewrite <-app_nil_end in Hrel.
revert Hloop. case rel_mem_spec.
intros Hef Hloop. apply IHn in Hloop as (rel'&H1&H2).
eexists. split. 2: eassumption.
rewrite <- H1. rewrite <-(In_cons Hef) at 2. fold_cons. lattice.
eapply prog_x_leq. apply Hrel.
rewrite <-(In_cons Hef) at 2. fold_cons. lattice.
intros ? ? ?. apply Hvars. now right.
intros _. fold_ugregex_type. case_eq (ofold (X:=ord Pred -> bool) (loop_aux e f) A todo).
2: discriminate.
intros todo' Htodo' Hloop.
apply fold_loop_aux_spec in Htodo' as [Htodo' [Hvars' Hef]]. 2: assumption.
destruct (IHn _ _ Hloop) as (rel'&Hrel'&Hrel''). 2: assumption.
clear - Hef Hvars Hvars' Hrel Htodo'.
apply (@prog_cup_x [_]). eapply prog_x_leq.
intros ? ? [E|[]]. injection E; intros <- <-; clear E.
split. apply Hvars. now left. apply Hef. lattice.
eapply prog_x_leq. apply Hrel. rewrite <- Htodo'. fold_cons. lattice.
eexists. split. 2: eassumption. rewrite <-Hrel', <-Htodo'. fold_cons. lattice.
Qed.
End a.
Existing Instance lang'_weq.
(** correctness of the bisimulation proof method, at the abstract level *)
Lemma prog_correct I l rel:
(forall a, In (set.mem a) l) ->
prog I l rel rel -> below_I I rel ->
forall e f, In (e,f) rel -> sup lang e ≡ sup lang f.
Proof.
intros Hl Hrel Hvars e f Hef.
rewrite <-2lang_sup, 2lang_lang'.
intro w. revert e f Hef. induction w; simpl lang'; intros e f Hef.
- apply Hrel in Hef as [_ Hef].
rewrite 2epsilon'_eq. destruct (Hef _ (Hl a)) as [-> _]. reflexivity.
- destruct (fun H => In_dec H i I) as [Hi|Hi]. decide equality.
etransitivity. apply lang'_weq. apply deriv'_eq.
etransitivity. 2: apply lang'_weq; symmetry; apply deriv'_eq.
apply IHw. apply Hrel. assumption. apply Hl. assumption.
clear IHw. revert w. apply lang'_weq. rewrite 2deriv_sup.
rewrite 2sup_b. reflexivity.
intros f' Hf. eapply deriv_out. 2: eassumption.
etransitivity. 2: apply Hvars. 2: apply Hef. apply leq_xsup. apply in_app_iff. now right.
intros e' He. eapply deriv_out. 2: eassumption.
etransitivity. 2: apply Hvars. 2: apply Hef. apply leq_xsup. apply in_app_iff. now left.
Qed.
(** * Final algorithm, correctness *)
(** the final algorithm is obtained by callign the main loop with
appropriate arguments *)
Definition eqb_kat (e f: ugregex) :=
let atoms := map (@set.mem _) (seq _) in
let vars := vars (e+f) in
loop vars atoms 1000 rel_empty [([e],[f])%list].
(* stated as this, the algorithm is not complete: we would need to
replace 1000 with the size of [e+f]... bzzz *)
(** correctness of the algorithm *)
Theorem eqb_kat_correct e f: eqb_kat e f = Some true -> e ≡ f.
Proof.
unfold eqb_kat. intro H. apply prog_loop in H as [rel [Hef Hrel]].
2: intros _ _ [].
2: simpl vars; intros ? ? [E|[]]; injection E; intros <- <-;
rewrite union_app, sup_app, 2sup_singleton; reflexivity.
eapply prog_correct in Hrel.
2: intro; apply in_map, in_seq.
3: apply Hef; now left.
rewrite 2sup_singleton in Hrel. assumption.
intros ? ? ?. now apply Hrel.
Qed.
End l.
|
{"author": "damien-pous", "repo": "relation-algebra", "sha": "13b99896782e449c7ca3910e48e18427517c8135", "save_path": "github-repos/coq/damien-pous-relation-algebra", "path": "github-repos/coq/damien-pous-relation-algebra/relation-algebra-13b99896782e449c7ca3910e48e18427517c8135/theories/ugregex_dec.v"}
|
{-# OPTIONS --without-K --safe #-}
module Cham.Label where
open import Cham.Name
data Label : Set where
_⁺ : Name → Label
_⁻ : Name → Label
|
{"hexsha": "3c70f751d13cab5d1ce3783d365a5b3fe3eb6b16", "size": 148, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "agda/Cham/Label.agda", "max_stars_repo_name": "riz0id/chemical-abstract-machine", "max_stars_repo_head_hexsha": "292023fc36fa67ca4a81cff9a875a325a79b9d6f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "agda/Cham/Label.agda", "max_issues_repo_name": "riz0id/chemical-abstract-machine", "max_issues_repo_head_hexsha": "292023fc36fa67ca4a81cff9a875a325a79b9d6f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "agda/Cham/Label.agda", "max_forks_repo_name": "riz0id/chemical-abstract-machine", "max_forks_repo_head_hexsha": "292023fc36fa67ca4a81cff9a875a325a79b9d6f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 13.4545454545, "max_line_length": 34, "alphanum_fraction": 0.6351351351, "num_tokens": 42}
|
from glob import glob
import json
import torch
import numpy as np
def make_new_fileset():
in_path = "finished_files/train/"
out_path = "mono_abs_train_small2/"
flist = glob(in_path +"*")
new_flist = []
ext_snts = []
abs_snts = []
for fn in flist[:100]:
jd = json.load(open(fn,"r"))
art = jd['article']
ext = [art[ix].split() for ix in jd['extracted']]
abss = [s.split() for s in jd["abstract"]][0]
i_match = sorted([(i,len(set(s) & set(abss))) for i,s in enumerate(ext)], key=lambda x:x[1], reverse=True)[0]
if len(ext[i_match[0]]) > 1.2*len(abss) and len(set(ext[i_match[0]]) & set(abss)) / len(set(abss)) >0.6:
new_flist.append(fn)
ext_snts.append(ext[i_match[0]])
abs_snts.append(abss)
jd["extracted"] = [jd["extracted"][i_match[0]]]
json.dump(jd, open(out_path+fn.split('/')[-1],"w"), ensure_ascii=False,indent=4)
ext_snt_len = [len(s) for s in ext_snts]
abs_snt_len = [len(s) for s in abs_snts]
print(f"extracted sent_len : mean = {np.mean(ext_snt_len)}, std = {np.std(ext_snt_len)}")
print(f"abstracted sent_len : mean = {np.mean(abs_snt_len)}, std = {np.std(abs_snt_len)}")
print(f"tot num of flist : {len(flist)}")
print(f"num of selected flist : {len(new_flist)}")
if __name__ == '__main__':
make_new_fileset()
|
{"hexsha": "8759d34bd07d5646e6232ca24ca8389a710749b2", "size": 1402, "ext": "py", "lang": "Python", "max_stars_repo_path": "for_AIHUB_summ/make_files_for_aihub.py", "max_stars_repo_name": "won2lee/summ_fast_abs_rl", "max_stars_repo_head_hexsha": "6aeb0fe760ed0bc693ba3194f2813ef9cccc07bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "for_AIHUB_summ/make_files_for_aihub.py", "max_issues_repo_name": "won2lee/summ_fast_abs_rl", "max_issues_repo_head_hexsha": "6aeb0fe760ed0bc693ba3194f2813ef9cccc07bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "for_AIHUB_summ/make_files_for_aihub.py", "max_forks_repo_name": "won2lee/summ_fast_abs_rl", "max_forks_repo_head_hexsha": "6aeb0fe760ed0bc693ba3194f2813ef9cccc07bf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.0571428571, "max_line_length": 117, "alphanum_fraction": 0.5941512126, "include": true, "reason": "import numpy", "num_tokens": 427}
|
# This file was generated, do not modify it. # hide
ẑ[:lambda] = 5.0;
|
{"hexsha": "30e99338ba06684038b4c277c3ab25348e1f8384", "size": 70, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "_assets/pages/getting-started/learning-networks/code/ex16.jl", "max_stars_repo_name": "giordano/DataScienceTutorials.jl", "max_stars_repo_head_hexsha": "8284298842e0d77061cf8ee767d0899fb7d051ff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2021-08-09T11:35:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T06:20:43.000Z", "max_issues_repo_path": "_assets/pages/getting-started/learning-networks/code/ex16.jl", "max_issues_repo_name": "giordano/DataScienceTutorials.jl", "max_issues_repo_head_hexsha": "8284298842e0d77061cf8ee767d0899fb7d051ff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 56, "max_issues_repo_issues_event_min_datetime": "2019-10-22T00:06:41.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-21T14:38:09.000Z", "max_forks_repo_path": "_assets/pages/getting-started/learning-networks/code/ex16.jl", "max_forks_repo_name": "giordano/DataScienceTutorials.jl", "max_forks_repo_head_hexsha": "8284298842e0d77061cf8ee767d0899fb7d051ff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2019-11-20T16:25:04.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-05T11:55:15.000Z", "avg_line_length": 35.0, "max_line_length": 51, "alphanum_fraction": 0.6571428571, "num_tokens": 26}
|
\font\mainfont=cmr10
\font\mi=cmti10
\font\subsectionfont=cmbx10
\font\sectionfont=cmbx12
\font\headingfont=cmbx14
\font\titlefont=cmbx16
\def\RCS$#1: #2 ${\expandafter\def\csname RCS#1\endcsname{#2}}
\def\heading#1{\noindent {\headingfont #1} \hfill\break}
\newcount\footnotes \footnotes=0
\def\footnoter#1{\advance\footnotes by 1 \footnote{$^{\the\footnotes}$}{\rm #1}}
\newcount\sectionnum \sectionnum=0
\newcount\subsectionnum \subsectionnum=0
\def\section#1{\advance\sectionnum by 1 \subsectionnum=0 \noindent {\sectionfont \the\sectionnum. #1} \hfill\break}
\def\subsection#1{\advance\subsectionnum by 1 \noindent {\subsectionfont \the\sectionnum.\the\subsectionnum. #1} \hfill\break}
\def\title#1{\centerline{\titlefont #1} \centerline{\sevenrm \RCSId} \vskip 12 pt}
\newcount\itemnum
\def\items{\advance\itemnum by 1 \itemitem {\the\itemnum)}}
\def\iDesk{{\mi iDesk}}
\def\iDesks{{\mi iDesks}}
\def\UOW{{\mi University of Wollongong}}
\parskip 12 pt
\parindent 24 pt
\title{HCI Decisions Report.}
\mainfont
\heading{Introduction.}
This report will outline the Human Computer Interface decisions that were made in the design of the various screens used for the Login, Save, Load and Print subsystems also for the design of the \iDesks\ graphical layout.
\heading{iDesk interface.}
The screen is split in half, each side of the screen may load up either of the input methods, e.g. Lecture notes, live video feed, personal notes, etc... Each side is configurable as to what will be displayed.
Below the two main windows, the subtitle window is located.
The icon menu, which is always displayed, located at the top of the \iDesks\ interface, cantered in relation to the maximal width of the \iDesk.
\heading{Screens.}
All actions apart from Login, such as Save, Load and Print, change the modality of the \iDesk\ interface so that the user is locked in to using only the current foreground window.
\section{Login.}
The login subsystem has common interface elements that are shared by all of it's states:
\itemnum=0
\items A Subtle grey background to ensure that there is no incidents of floating text due to a solid black background.
\parskip 0 pt
\items Dark grey (shaded transparent) foreground windows to delineate the active parts of the screen from the inactive.
\items White text that contrasts well with the foreground windows.
\items Black boxes for where the user is to enter information.
\items Small \UOW\ logo in the top left hand corner to brand the interface.
\items Main window cantered to draw the users attention.
\items A welcome message to placate the user, located at the top of the main window.
\parskip 12 pt
\subsection{Prompt for user biometric scan.}
No cognitive burden on the user, the user is not required to recall any information.
Simple instructions under the welcome message in common language.
A red progress bar on a white background to indicate the progress of the biometric scan.
A black box with the scanned image of the thumb to show the user that the scan is taking place and that it is taking a scan of their thumb.
\subsection{Prompt for user password.}
Simple instructions under the welcome message requesting that the user enter their password in a large black box the main window.
As the password is really a scanned signature of the user, the authentication system will use a heuristic analysis of the signature to determine if it is really the user. To ensure that the user knows that they have entered, their signature is displayed on the screen. The security of this situation is acceptable as the risk analysis of having a users signature displayed is deemed to be lower than the probability that a person will be able to assimilate that persons writing style and fabricate their fingerprint.
\subsection{Authentication failure.}
Error message place in a large black box in the main window clearly stating the error and what the user must do to recover from it.
Simple red circle with a X in it drawing the users attention to the error message.
\subsection{Authorisation failure.}
Similar to the Authentication failure error screen except that the message contents is relative to this particular error.
\subsection{Welcome.}
Text stating what the login subsystem is now doing.
A progress bar, similar to the one use in the biometric scan is used to show the user where the system is at in regards to loading their preferences.
\section{Save.}
The save dialog has been designed with many features in common with existing industry defacto interfaces to aid the user by giving them a sense of familiarity decreasing the cognitive burden in having to learn yet another way of doing something.
\subsection{Save screen.}
The save screen has been designed as a pop-up that will appear in the middle of the \iDesk\ to draw the users attention to what action they have selected. In addition to this the pop-up is modal in the sense that the user can only interact with the current dialog.
The save pop-up itself has four sections, a selectable media side bar, a box describing the current contents of the selected media, a list of check boxes detailing which sections of the current session will be saved, a drop down text box where the user is able to manually enter the file name or to choose from a pre-determined list of file names. There are two action buttons, the ``save'' button which will commit the users action and a ``cancel'' button which will exit the entire save sub-system.
\subsection{Save error pop-up.}
Simple small pop-up using the same error icon as with the Login sub-system's error messages.
Displays a message indicating to the user what the error was and they are left up to their own devices on how to solve the problem.
The uses is locked in to pressing the ``OK'' button to acknowledge the error and get on with their \iDesk\ session.
\subsection{Save success pop-up.}
An informative pop-up window indicating that the save operation was successful.
Similar to the error pop-up, the user must acknowledge this message by pressing the ``OK'' button.
\section{Load.}
\subsection{Load screen.}
Identical to the Save dialog, except that the list of check boxes will have grayed out options for those sections which do not exist in the current save file. The user may then select which sections from the file they desire.
\subsection{Load error pop-up.}
Identical to the Save error pop-up. Except for the error message being relative to the desired load operation.
\subsection{Load success pop-up.}
Similarly identical to the Save success pop-up.
\section{Print.}
Similar to Save and Load in it's modality.
Each part of the dialog which is not an information pop-up will have a ``cancel'', ``back'' and ``next'' buttons, except as indicated in the relevant sections.
\subsection{Printer selection dialog.}
The top section will be a white box with selectable printer icons, when a printer has been selected the information section below the printers will then be updated to contain the particular information of that selected printer.
The ``back'' button is grayed out in this part of the dialog as there is no previous screen in the dialog.
\subsection{Select data to print.}
The dialog changes to display four check boxes detailing the lecture material that can be selected for printing. Notes, scans, etc...
\subsection{Printer options dialog.}
Printer options: Page range, layout, scaling, copies \& collation.
Page range: All, current page, listed range in text box.
Layout: Zoom, as in the number of pages per printed sheet.
Scaling: Expand or shrink the material to fit the printed medium of the selected printer.
Copies \& collation: Text box to enter the desired number of copies, when used shall allow the user to select the ``collate'' check box, which is otherwise grayed out when there is only one copy desired.
\subsection{Print error pop-up.}
Similar to the error pop-ups used in the Save and Load subsections.
This section will encompass the ``No printers available.'' error.
\subsection{Print success pop-up.}
Similar to the success pop-ups of Save and Load.
\heading{Conclusion.}
As we have detailed in the above documentation, the major factor in the design of our interface for the \iDesk\ is to maximise ease of use and familiarity with existing interface to help the user by not introducing new interfaces paradigms that require unnecessary effort to learn.
\bye
|
{"hexsha": "b8bad44b5e7d360e63ad8e13bd9299972f2deb9f", "size": 8437, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "src/UOW/CSCI324/ass5/HCI-report.tex", "max_stars_repo_name": "felx/detritus", "max_stars_repo_head_hexsha": "b64d28b47381ea1e8c6b5282910365dc4292d57f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/UOW/CSCI324/ass5/HCI-report.tex", "max_issues_repo_name": "felx/detritus", "max_issues_repo_head_hexsha": "b64d28b47381ea1e8c6b5282910365dc4292d57f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/UOW/CSCI324/ass5/HCI-report.tex", "max_forks_repo_name": "felx/detritus", "max_forks_repo_head_hexsha": "b64d28b47381ea1e8c6b5282910365dc4292d57f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-07-03T09:22:08.000Z", "max_forks_repo_forks_event_max_datetime": "2018-07-03T09:22:08.000Z", "avg_line_length": 44.6402116402, "max_line_length": 516, "alphanum_fraction": 0.7820315278, "num_tokens": 1938}
|
#include <bitset> // std::bitset
#include <cassert> // assert
#include <iostream> // std::cout
#include <map> // std::map<T,U>
#include <string> // std::string
#include <vector> // std::vector<T>
#include <seqan/sequence.h> // seqan::Dna5String
#include <boost/log/trivial.hpp> // BOOST_LOG_TRIVIAL macro
#include <graphtyper/graph/absolute_position.hpp>
#include <graphtyper/graph/graph.hpp>
#include <graphtyper/typer/alignment.hpp>
#include <graphtyper/typer/genotype_paths.hpp>
#include <graphtyper/typer/segment_calling.hpp>
#include <graphtyper/typer/vcf_writer.hpp>
#include <graphtyper/typer/vcf.hpp>
#include <graphtyper/utilities/sam_reader.hpp>
#include <graphtyper/utilities/io.hpp>
#include <graphtyper/utilities/graph_help_functions.hpp>
/*
namespace
{
void
print_explain_map(std::vector<std::string> const & hap_ids,
std::map<uint32_t, std::vector<std::bitset<gyper::MAX_NUMBER_OF_HAPLOTYPES> > > const & explain_map,
int32_t index = -1,
int32_t test_index = -1)
{
// Index == -1 means all indexes will be printed
if (index == -1)
{
for (auto it = explain_map.begin(); it != explain_map.end(); ++it)
{
// Type of it->second is std::vector<std::bitset<gyper::MAX_NUMBER_OF_HAPLOTYPES> >
std::cout.width(5);
std::cout << it->first << ": ";
for (auto const & explain_bitset : it->second)
{
// for (unsigned e = 0; e < explain_bitset.size(); ++e)
// {
// if (explain_bitset.test(e))
// {
// std::cout << e;
// break;
// }
//
// }
std::cout << explain_bitset.any();
}
std::cout << "\n";
}
}
else if (test_index == -1)
{
std::cout << index << ": ";
for (auto it = explain_map.begin(); it != explain_map.end(); ++it)
{
// Type of it->second is std::vector<std::bitset<gyper::MAX_NUMBER_OF_HAPLOTYPES> >
std::cout << it->second[index].any() << " ";
}
std::cout << std::endl;
}
else
{
// std::cout << index << ": ";
for (auto it = explain_map.begin(); it != explain_map.end(); ++it)
{
if (static_cast<int64_t>(it->first) == index)
{
std::cout << test_index << ": ";
for (unsigned i = 0; i < it->second.size(); ++i)
{
if (it->second[i].test(test_index))
{
std::cout << hap_ids[i] << " ";
}
}
std::cout << std::endl;
break;
}
// Type of it->second is std::vector<std::bitset<gyper::MAX_NUMBER_OF_HAPLOTYPES> >
// std::cout << it->second[index].any() << " ";
}
std::cout << "\n";
}
}
void
insert_into_explain_map(std::map<uint32_t, std::vector<std::bitset<gyper::MAX_NUMBER_OF_HAPLOTYPES> > > & explain_map,
std::pair<uint32_t, std::bitset<gyper::MAX_NUMBER_OF_HAPLOTYPES> > const & var_explanation,
unsigned i,
std::size_t var_num)
{
auto find_it = explain_map.find(var_explanation.first);
if (find_it == explain_map.end())
{
// Not found
// std::cout << "[caller] INFO: Inserting new variant " << var_explanation.first << std::endl;
std::vector<std::bitset<gyper::MAX_NUMBER_OF_HAPLOTYPES> > new_vec(var_num);
new_vec[i] = var_explanation.second;
explain_map[var_explanation.first] = std::move(new_vec);
}
else
{
// Was found
find_it->second[i] |= var_explanation.second;
}
}
void
add_start_on_explain_map(std::map<uint32_t, std::vector<std::bitset<gyper::MAX_NUMBER_OF_HAPLOTYPES> > > & explain_map)
{
std::vector<uint8_t> has_started(explain_map.begin()->second.size(), 0);
for (auto it = explain_map.begin(); it != explain_map.end(); ++it)
{
// Type of it->second is std::vector<std::bitset<gyper::MAX_NUMBER_OF_HAPLOTYPES> >
for (unsigned i = 0; i < it->second.size(); ++i)
{
assert(it->second.size() == explain_map.begin()->second.size());
if (has_started[i])
{
continue;
}
else if (it->second[i].any())
{
has_started[i] = 1;
}
else
{
// Set all as true
it->second[i].set();
}
}
}
}
void
remove_insignificant_variants(
std::map<uint32_t, std::vector<std::bitset<gyper::MAX_NUMBER_OF_HAPLOTYPES> > > & explain_map)
{
for (auto it = explain_map.cbegin(); it != explain_map.cend();)
{
// Type of it->second is std::vector<std::bitset<gyper::MAX_NUMBER_OF_HAPLOTYPES> >
unsigned coverage = 0;
for (auto explain_bitset : it->second)
{
if (explain_bitset.any())
{
++coverage;
}
}
double static const FILTER = 0.2;
if (static_cast<double>(coverage) / static_cast<double>(it->second.size()) < FILTER)
{
// Remove if fraction of coverage is lower than FILTER
explain_map.erase(it++);
}
else
{
++it;
}
}
}
void
remove_out_of_order_variants(
std::map<uint32_t, std::vector<std::bitset<gyper::MAX_NUMBER_OF_HAPLOTYPES> > > & exon_explain_map,
std::map<uint32_t, std::vector<std::bitset<gyper::MAX_NUMBER_OF_HAPLOTYPES> > > & intron_explain_map)
{
if (exon_explain_map.size() == 0)
return;
BOOST_LOG_TRIVIAL(debug) << __HERE__ << " Removing out of order variants.";
// Find all unique variants
std::vector<uint32_t> uniq_variants;
for (auto it = exon_explain_map.cbegin(); it != exon_explain_map.cend(); ++it)
{
auto find_it = std::find(uniq_variants.begin(), uniq_variants.end(), it->first);
if (find_it == uniq_variants.end())
{
uniq_variants.push_back(it->first);
}
}
for (auto it = intron_explain_map.cbegin(); it != intron_explain_map.cend(); ++it)
{
auto find_it = std::find(uniq_variants.begin(), uniq_variants.end(), it->first);
if (find_it == uniq_variants.end())
{
uniq_variants.push_back(it->first);
}
}
// Sort the unique variants
std::sort(uniq_variants.begin(), uniq_variants.end());
assert(uniq_variants.size() > 0);
// Find the longest sequence of consecutive variants
unsigned i = 0;
uint32_t max_start_i = 0;
uint32_t max_end_i = 0;
while (i < uniq_variants.size() - 1)
{
uint32_t start_i = i;
uint32_t end_i = i + 1;
while (uniq_variants[i] + 1 == uniq_variants[i + 1] or uniq_variants[i] + 2 == uniq_variants[i + 1])
{
if (uniq_variants[i] + 2 == uniq_variants[i + 1])
{
++end_i;
++i;
}
++end_i;
++i;
}
if (end_i - start_i > max_end_i - max_start_i)
{
max_end_i = end_i;
max_start_i = start_i;
}
++i;
}
std::vector<uint32_t> longest_uniq_variants(uniq_variants.begin() + max_start_i, uniq_variants.begin() + max_end_i);
for (auto it = exon_explain_map.cbegin(); it != exon_explain_map.cend();)
{
// Type of it->second is std::vector<std::bitset<gyper::MAX_NUMBER_OF_HAPLOTYPES> >
if (std::find(longest_uniq_variants.begin(), longest_uniq_variants.end(), it->first) == longest_uniq_variants.end())
{
// Remove the variant if it is not found in the longest unique variants vector
BOOST_LOG_TRIVIAL(debug) << __HERE__ << " Removing from exon " << it->first;
exon_explain_map.erase(it++);
}
else
{
++it;
}
}
for (auto it = intron_explain_map.cbegin(); it != intron_explain_map.cend();)
{
// Type of it->second is std::vector<std::bitset<gyper::MAX_NUMBER_OF_HAPLOTYPES> >
if (std::find(longest_uniq_variants.begin(), longest_uniq_variants.end(), it->first) == longest_uniq_variants.end())
{
// Remove the variant if it is not found in the longest unique variants vector
BOOST_LOG_TRIVIAL(debug) << "[graphtyper::segment_calling] Removing from intron " << it->first;
intron_explain_map.erase(it++);
}
else
{
++it;
}
}
}
void
add_end_on_explain_map(std::map<uint32_t, std::vector<std::bitset<gyper::MAX_NUMBER_OF_HAPLOTYPES> > > & explain_map)
{
std::vector<uint8_t> has_ended(explain_map.begin()->second.size(), 0);
for (auto it = explain_map.rbegin(); it != explain_map.rend(); ++it)
{
// Type of it->second is std::vector<std::bitset<gyper::MAX_NUMBER_OF_HAPLOTYPES> >
assert(it->second.size() == explain_map.begin()->second.size());
for (unsigned i = 0; i < it->second.size(); ++i)
{
if (has_ended[i])
{
continue;
}
else if (it->second[i].any())
{
has_ended[i] = 1;
}
else
{
// Sets all as true
it->second[i].set();
}
}
}
}
std::size_t
determine_reference_index(std::map<uint32_t,
std::vector<std::bitset<gyper::MAX_NUMBER_OF_HAPLOTYPES> > > const & exon_explain_map,
std::map<uint32_t, std::vector<std::bitset<gyper::MAX_NUMBER_OF_HAPLOTYPES> > > const & intron_explain_map
)
{
BOOST_LOG_TRIVIAL(debug) << "[graphtyper::segment_calling] Determining reference index from explain maps.";
std::vector<uint32_t> ref_counts(intron_explain_map.begin()->second.size(), 0u);
for (auto it = exon_explain_map.cbegin(); it != exon_explain_map.cend(); ++it)
{
// Type of it->second is std::vector<std::bitset<gyper::MAX_NUMBER_OF_HAPLOTYPES> >
for (unsigned i = 0; i < it->second.size(); ++i)
{
if (it->second[i].test(0))
{
assert(i < ref_counts.size());
++ref_counts[i];
}
}
}
for (auto it = intron_explain_map.cbegin(); it != intron_explain_map.cend(); ++it)
{
// Type of it->second is std::vector<std::bitset<gyper::MAX_NUMBER_OF_HAPLOTYPES> >
for (unsigned i = 0; i < it->second.size(); ++i)
{
if (it->second[i].test(0))
{
assert(i < ref_counts.size());
++ref_counts[i];
}
}
}
int64_t max_ref_counts = -1;
std::size_t max_ref_counts_index = 0;
for (std::size_t i = 0; i < ref_counts.size(); ++i)
{
if (ref_counts[i] > max_ref_counts)
{
max_ref_counts_index = i;
max_ref_counts = ref_counts[i];
}
}
if (max_ref_counts < static_cast<int64_t>(exon_explain_map.size()) + static_cast<int64_t>(intron_explain_map.size()))
{
BOOST_LOG_TRIVIAL(warning) << "[graphtyper::segment_calling] No path is purely reference. " << max_ref_counts <<
" out of "
<< exon_explain_map.size() + intron_explain_map.size();
}
return max_ref_counts_index;
}
void
put_reference_in_front(std::map<uint32_t, std::vector<std::bitset<gyper::MAX_NUMBER_OF_HAPLOTYPES> > > & explain_map,
std::vector<std::string> & hap_ids,
std::size_t const ref_index,
bool const change_hap_ids
)
{
assert(hap_ids.size() > ref_index);
if (change_hap_ids)
{
std::string ref_str(hap_ids[ref_index]);
hap_ids.erase(hap_ids.begin() + ref_index);
hap_ids.insert(hap_ids.begin(), ref_str);
}
if (ref_index == 0)
{
return;
}
for (auto map_it = explain_map.begin(); map_it != explain_map.end(); ++map_it)
{
std::vector<std::bitset<gyper::MAX_NUMBER_OF_HAPLOTYPES> > explain_vec(map_it->second);
std::bitset<gyper::MAX_NUMBER_OF_HAPLOTYPES> ref_bitset(explain_vec[ref_index]);
explain_vec.erase(explain_vec.begin() + ref_index);
explain_vec.insert(explain_vec.begin(), ref_bitset);
map_it->second = std::move(explain_vec);
}
}
} // anon namespace
namespace gyper
{
void
segment_calling(std::vector<std::string> const & segment_fasta_files,
VcfWriter & writer,
std::string const & segment_path,
std::vector<std::string> const & samples)
{
assert(samples.size() > 0);
BOOST_LOG_TRIVIAL(debug) << "[graphtyper::segment_calling] Segment VCF is at " << segment_path;
Vcf segment_vcf(WRITE_BGZF_MODE, segment_path);
for (auto const & sample : samples)
segment_vcf.sample_names.push_back(sample);
// Update all maximum log scores
for (auto & haplototype : writer.haplotypes)
haplototype.update_max_log_score();
BOOST_LOG_TRIVIAL(debug) << __HERE__ << " Gathering segments from "
<< segment_fasta_files.size() << " segments.";
std::vector<Segment> segments;
using THapPaths = std::vector<GenotypePaths>;
// haplotype ID to a all its genotype paths results
std::vector<std::map<std::string, THapPaths> > all_haplotype_paths;
std::vector<std::vector<uint8_t> > has_long_exon; // haplotype ID to a all its genotype paths results
{
for (auto seg_it = segment_fasta_files.cbegin(); seg_it != segment_fasta_files.cend(); ++seg_it)
{
// Type of *seg_it is std::string, it is the fasta filename of the current segment
std::map<std::string, std::vector<seqan::Dna5String> > mhc_hap = read_haplotypes_from_fasta(*seg_it);
std::map<std::string, THapPaths> haplotype_paths;
for (auto hap_it = mhc_hap.cbegin(); hap_it != mhc_hap.cend(); ++hap_it)
{
std::cout << "ID " << hap_it->first << std::endl;
haplotype_paths[hap_it->first] = find_haplotype_paths(hap_it->second);
}
std::vector<uint8_t> gene_has_long_exons;
for (unsigned i = 0; i < mhc_hap.begin()->second.size(); ++i)
{
// if (i % 2 == 1 && seqan::length(mhc_hap.begin()->second[i]) >= 2 * K)
if (i % 2 == 1 && i < 10) // Only check exons 1-4
gene_has_long_exons.push_back(1u);
else
gene_has_long_exons.push_back(0u);
}
//if (gene_has_long_exons.size() == 1)
// gene_has_long_exons[0] = 1u;
has_long_exon.push_back(std::move(gene_has_long_exons));
all_haplotype_paths.push_back(std::move(haplotype_paths));
}
}
BOOST_LOG_TRIVIAL(debug) << "[graphtyper::segment_calling] Iterating paths of segments. ";
for (auto haplotype_paths_it = all_haplotype_paths.cbegin();
haplotype_paths_it != all_haplotype_paths.cend();
++haplotype_paths_it)
{
// Type of haplotype_paths_it is std::map<std::string, THapPaths>::iterator
std::vector<std::string> hap_ids;
using TExplainMap = std::map<uint32_t, std::vector<std::bitset<MAX_NUMBER_OF_HAPLOTYPES> > >;
TExplainMap exon_explain_map;
TExplainMap intron_explain_map;
{
int i = 0;
for (auto it = haplotype_paths_it->begin(); it != haplotype_paths_it->end(); ++i, ++it)
{
// Type of it->first is std::string
// Type of it->second is std::vector<std::vector<GenotypePaths> >
if (it->second.size() > 0)
BOOST_LOG_TRIVIAL(debug) << "[graphtyper::segment_calling] Name = " << it->first;
BOOST_LOG_TRIVIAL(debug) << "[graphtyper::segment_calling] Number of genotype paths = "
<< it->second.size();
std::size_t const k = std::distance(all_haplotype_paths.cbegin(), haplotype_paths_it);
// Previous path explanation
std::vector<std::vector<std::pair<uint32_t, std::bitset<MAX_NUMBER_OF_HAPLOTYPES> > > > path_explanations;
for (unsigned j = 0; j < it->second.size(); ++j)
{
GenotypePaths const & path = it->second[j];
BOOST_LOG_TRIVIAL(debug) << "[graphtyper::segment_calling] INFO: "
<< it->first
<< ", index "
<< j << ", long exon? "
<< static_cast<uint16_t>(has_long_exon[k][j]);
if (it->second[j].paths.size() == 0)
{
if (has_long_exon[k][j])
{
BOOST_LOG_TRIVIAL(warning) << __HERE__ << " No path found for a long exon sequence";
}
else
{
BOOST_LOG_TRIVIAL(debug) << __HERE__ << " No path found for a short sequence or an intron";
}
}
else if (it->second[j].paths.size() == 1)
{
BOOST_LOG_TRIVIAL(debug) << __HERE__ << " Unique path found: "
<< absolute_pos.get_contig_position(it->second[j].paths[0].start_ref_reach_pos()).
second << "-"
<< absolute_pos.get_contig_position(it->second[j].paths[0].end_ref_reach_pos()).
second << " "
<< static_cast<uint64_t>(it->second[j].paths[0].mismatches);
}
else if (it->second[j].paths.size() > 1)
{
for (auto const & dup_path : it->second[j].paths)
{
BOOST_LOG_TRIVIAL(debug)
<< "[graphtyper::segment_calling] INFO: Multiple paths found: "
<< absolute_pos.get_contig_position(dup_path.start_ref_reach_pos()).second << "-"
<< absolute_pos.get_contig_position(dup_path.end_ref_reach_pos()).second;
}
}
std::vector<std::pair<uint32_t, std::bitset<MAX_NUMBER_OF_HAPLOTYPES> > > path_explanation;
writer.find_path_explanation(path, path_explanation);
path_explanations.push_back(std::move(path_explanation));
}
// Add to explain maps
for (unsigned j = 0; j < path_explanations.size(); ++j)
{
assert(it->second.size() == path_explanations.size());
for (unsigned p = 0; p < path_explanations[j].size(); ++p)
{
auto & var_explanation = path_explanations[j][p];
// Type of var_explanation is std::pair<uint32_t, std::bitset<MAX_NUMBER_OF_HAPLOTYPES> >
assert(k < has_long_exon.size());
assert(j < has_long_exon[k].size());
// std::cout << "[graphtyper::segment_calling] Var explain " << var_explanation.first << " ";
// std::cout << var_explanation.second.count() << " ";
//
// for (unsigned i = 0; i < 100; ++i)
// std::cout << var_explanation.second.test(i);
//
// std::cout << std::endl;
if (has_long_exon[k][j])
{
insert_into_explain_map(exon_explain_map, var_explanation, i, haplotype_paths_it->size());
}
else
{
insert_into_explain_map(intron_explain_map, var_explanation, i, haplotype_paths_it->size());
}
}
}
hap_ids.push_back(it->first);
}
}
// Resize all exon explain maps
for (auto & e : exon_explain_map)
e.second.resize(hap_ids.size());
// Resize all intron explain maps
for (auto & i : intron_explain_map)
i.second.resize(hap_ids.size());
BOOST_LOG_TRIVIAL(debug) << "[graphtyper::segment_calling] Done creating explain maps.";
// DEBUG
// print_explain_map(exon_explain_map);
// std::cout << std::endl;
// print_explain_map(intron_explain_map);
// DEBUG ENDS HERE
// Remove variants which only a small portion overlaps
if (intron_explain_map.size() == 0)
{
BOOST_LOG_TRIVIAL(error) << "Could not align any introns to the graph. Did you align to the correct graph?";
std::exit(1);
}
remove_out_of_order_variants(exon_explain_map, intron_explain_map);
remove_insignificant_variants(exon_explain_map);
remove_insignificant_variants(intron_explain_map);
BOOST_LOG_TRIVIAL(debug) << "[graphtyper::segment_calling] Done removing out of order and insignificant variants";
// This condition is required to avoid segfault!
if (exon_explain_map.size() > 0)
{
BOOST_LOG_TRIVIAL(debug) << "[graphtyper::segment_calling] exon_explain_map sequences.size() = "
<< exon_explain_map.begin()->second.size();
add_start_on_explain_map(intron_explain_map);
add_end_on_explain_map(intron_explain_map);
}
else
{
BOOST_LOG_TRIVIAL(debug) << "[graphtyper::segment_calling] exon_explain_map sequences is empty";
}
BOOST_LOG_TRIVIAL(debug) << "[graphtyper::segment_calling] intron_explain_map sequences.size() = " <<
intron_explain_map.begin()->second.size();
//// DEBUG
//if (exon_explain_map.size() > 0)
//{
// print_explain_map(hap_ids, exon_explain_map, 19, 405);
// std::cout << std::endl;
//}
// if (intron_explain_map.size() > 0)
// print_explain_map(intron_explain_map);
// DEBUG ENDS HERE
// std::cout << "[caller] Overall number of haplotypes before removing is " << hap_ids.size() << std::endl;
// remove_non_existing_alleles(hap_ids, exon_explain_map, intron_explain_map);
// std::cout << "[caller] Overall number of haplotypes after removing is " << hap_ids.size() << std::endl;
std::size_t ref_index = determine_reference_index(exon_explain_map, intron_explain_map);
put_reference_in_front(exon_explain_map, hap_ids, ref_index, false); // Last parameter is change hap_ids
put_reference_in_front(intron_explain_map, hap_ids, ref_index, true);
BOOST_LOG_TRIVIAL(debug) << "[graphtyper::segment_calling] Generating scores with reference " << hap_ids[0];
// Create segment for these results
BOOST_LOG_TRIVIAL(debug) << "[graphtyper::segment_calling] Creating a new segment.";
assert(haplotype_paths_it->size() > 0);
assert(haplotype_paths_it->begin()->second.size() > 0);
unsigned s = 0;
unsigned e = 0;
while ((haplotype_paths_it->begin()->second.begin() + s)->longest_paths().size() == 0
)
{
++s;
if (s == haplotype_paths_it->begin()->second.size())
{
BOOST_LOG_TRIVIAL(warning) << "[graphtyper::segment_calling] Could not find a segment which matched";
--s;
break;
}
assert(s < haplotype_paths_it->begin()->second.size());
}
while ((haplotype_paths_it->begin()->second.rbegin() + e)->longest_paths().size() == 0 &&
e != haplotype_paths_it->begin()->second.size() - 1
)
{
++e;
}
assert((haplotype_paths_it->begin()->second.begin() + s)->longest_paths().size() > 0);
assert((haplotype_paths_it->begin()->second.begin() + e)->longest_paths().size() > 0);
Path const longest_path_start = (haplotype_paths_it->begin()->second.begin() + s)->longest_paths().front();
Path const longest_path_end = (haplotype_paths_it->begin()->second.rbegin() + e)->longest_paths().front();
int64_t seq_size = static_cast<int64_t>(longest_path_end.end_correct_pos()) -
static_cast<int64_t>(longest_path_start.start_correct_pos()) + 1;
uint32_t segment_start = longest_path_start.start_correct_pos();
BOOST_LOG_TRIVIAL(debug) << __HERE__ << " Segment sequence size = " << seq_size;
if (seq_size < 0)
{
seq_size = static_cast<int64_t>(longest_path_start.end_correct_pos()) -
static_cast<int64_t>(longest_path_end.start_correct_pos()) + 1;
segment_start = longest_path_end.start_correct_pos();
}
BOOST_LOG_TRIVIAL(debug) << __HERE__ << " Number of hap_ids is " << hap_ids.size();
Segment seg(segment_start, static_cast<uint32_t>(seq_size), hap_ids);
std::vector<std::vector<uint32_t> > hap_scores(samples.size());
// Segment created
// Check the score of all the exons, if there are any exons (if we only have full sequences without features, we assume all the sequences are introns)
if (exon_explain_map.size() > 0)
{
for (uint32_t s = 0; s < samples.size(); ++s)
{
BOOST_LOG_TRIVIAL(debug) << __HERE__ << " Sample name is " << samples[s];
// std::cout << "derp function starting" << std::endl;
std::vector<uint32_t> hap_score = writer.explain_map_to_haplotype_scores(s, exon_explain_map);
// std::cout << "derp function done" << std::endl;
assert(hap_score.size() > 0);
auto max_score_it = std::max_element(hap_score.begin(), hap_score.end());
uint32_t const max_score = *max_score_it;
BOOST_LOG_TRIVIAL(debug) << "[graphtyper::segment_calling] Highest exon score is " << max_score;
// std::cout << "Best index should be " << to_pair(std::distance(hap_score.begin(), max_score_it)).first
// << "/" << to_pair(std::distance(hap_score.begin(), max_score_it)).second << std::endl;
std::vector<std::pair<uint32_t, uint32_t> > best_indexes;
for (uint32_t i = 0; i < hap_score.size(); ++i)
{
if (hap_score[i] >= max_score)
best_indexes.push_back(to_pair(i));
}
assert(best_indexes.size() > 0);
BOOST_LOG_TRIVIAL(debug) << "[graphtyper::segment_calling] Number of best indexes are " << best_indexes.size();
if (best_indexes.size() <= 100)
{
for (auto const & best_index : best_indexes)
BOOST_LOG_TRIVIAL(debug) << "[graphtyper::segment_calling] Best alleles: " << hap_ids[best_index.first] <<
"/" << hap_ids[best_index.second];
if (best_indexes.size() > 1)
{
BOOST_LOG_TRIVIAL(debug) <<
"[graphtyper::segment_calling] The best exon score is not unique, but there are less than 100 best exon scores";
// Add intron scores
std::vector<uint32_t> intron_scores(best_indexes.size(), 0);
uint32_t max_intron_score = 0;
uint32_t second_max_intron_score = 0;
for (unsigned i = 0; i < best_indexes.size(); ++i)
{
intron_scores[i] = writer.explain_map_specific_indexes_to_haplotype_scores(s,
best_indexes[i],
intron_explain_map);
if (intron_scores[i] > max_intron_score)
{
second_max_intron_score = max_intron_score;
max_intron_score = intron_scores[i];
}
else if (intron_scores[i] != max_intron_score && intron_scores[i] > second_max_intron_score)
{
// Also check if we found a larger second largest score
second_max_intron_score = intron_scores[i];
}
}
assert(second_max_intron_score <= max_intron_score);
BOOST_LOG_TRIVIAL(debug) << "[graphtyper::segment_calling] Max and second max intron score is "
<< max_intron_score << ", " << second_max_intron_score;
// Increase scores of alleles with the most likely introns
if (max_intron_score > 0)
{
for (unsigned i = 0; i < intron_scores.size(); ++i)
{
assert(intron_scores[i] <= max_intron_score);
if (intron_scores[i] == max_intron_score)
{
BOOST_LOG_TRIVIAL(debug) << "[graphtyper::segment_calling] Increasing scores of "
<< hap_ids[best_indexes[i].first] << "/"
<< hap_ids[best_indexes[i].second];
hap_score[to_index(best_indexes[i].first, best_indexes[i].second)] +=
std::max(10u, (max_intron_score - second_max_intron_score) / 2);
}
}
}
}
else if (best_indexes.size() == 1)
{
BOOST_LOG_TRIVIAL(debug) << "[graphtyper::segment_calling] Unique best exon score";
}
else
{
BOOST_LOG_TRIVIAL(debug) << "[graphtyper::segment_calling] No best exon score";
}
}
else
{
BOOST_LOG_TRIVIAL(debug) << "[graphtyper::segment_calling] There are more than 100 best exon scores";
}
// std::cout << "Inserting scores..." << std::endl;
seg.insert_score(hap_score);
// std::cout << "Done inserting scores" << std::endl;
}
}
else
{
for (uint32_t s = 0; s < samples.size(); ++s)
{
BOOST_LOG_TRIVIAL(debug) << "[graphtyper::segment_calling] Sample name is " << samples[s];
assert(intron_explain_map.size() > 0);
std::vector<uint32_t> hap_score = writer.explain_map_to_haplotype_scores(s, intron_explain_map);
assert(hap_score.size() > 0);
auto max_it = std::max_element(hap_score.begin(), hap_score.end());
BOOST_LOG_TRIVIAL(debug) << "[graphtyper::segment_calling] Highest total score is " << *max_it;
for (unsigned k = 0; k < hap_score.size(); ++k)
{
if (hap_score[k] == *max_it)
{
std::pair<uint16_t, uint16_t> calls = gyper::to_pair(k);
BOOST_LOG_TRIVIAL(debug) << "[graphtyper::segment_calling] Call with highest total score is "
<< hap_ids[calls.first] << "/" << hap_ids[calls.second];
}
}
seg.insert_score(hap_score);
}
}
segment_vcf.add_segment(std::move(seg));
}
if (segment_vcf.segments.size() > 0)
{
segment_vcf.open_for_writing();
segment_vcf.write_header();
segment_vcf.write_segments();
segment_vcf.close_vcf_file();
}
}
} // namespace gyper
*/
|
{"hexsha": "a4148d6c24e987e6cbd0395faac86a92aad74db4", "size": 29281, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/typer/segment_calling.cpp", "max_stars_repo_name": "h-2/graphtyper", "max_stars_repo_head_hexsha": "692eac909f00a888dcc14487fe57907ff88f6d17", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/typer/segment_calling.cpp", "max_issues_repo_name": "h-2/graphtyper", "max_issues_repo_head_hexsha": "692eac909f00a888dcc14487fe57907ff88f6d17", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/typer/segment_calling.cpp", "max_forks_repo_name": "h-2/graphtyper", "max_forks_repo_head_hexsha": "692eac909f00a888dcc14487fe57907ff88f6d17", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7755344418, "max_line_length": 154, "alphanum_fraction": 0.5964960213, "num_tokens": 7757}
|
\newlist{coloritemize}{itemize}{1}
\setlist[coloritemize]{label=\textcolor{itemizecolor}{\textbullet}}
\colorlet{itemizecolor}{.}% Default colour for \item in itemizecolor
\setlength{\parindent}{0pt}% Just for this example
This is a LaTeX document holding The answers/questions from 2014 exam papers to 2019 for the module 45630 -- Software Engineering.
This .pdf acts as a study aid for any student preparing for the 2hr written paper session during May or August(Repeat).Student must answer 3/4 of the
questions on the paper.
\colorlet{itemizecolor}{black}
\begin{coloritemize}
\item Black is Examiners Question
\end{coloritemize}
\colorlet{itemizecolor}{blue}
\begin{coloritemize}
\item Blue is my sample answer
\end{coloritemize}
\subsection{Software Engineering - Exam paper 2018-19 Semester 8}
\begin{enumerate}
\item Question 1 (100 marks)
\begin{itemize}
\item In Agile development, requirements are captured in terms of User Stories. The
attributes of a well-written user story can be summarised by the acronym SMART.
Explain what a SMART user story is.(40 marks)
\begin{coloritemize}
\item my answer
\end{coloritemize}
\item In light of the above, consider each of the following user stories for a new music
streaming service you’re developing called Tuneify. Describe any flaws these stories
have and explain how they might be improved, giving an example.(30 marks)\\
(i)Tuneify should have a responsive user interface\\
(ii)As a user, I should be able to easily find new music on Tuneify\\
\begin{coloritemize}
\item my answer
\end{coloritemize}
\item Explain how the Cucumber tool is used in Behaviour-Driven Design (BDD)(30 marks)
\begin{coloritemize}
\item my answer
\end{coloritemize}
\end{itemize}
\item Question 2 (100 marks)
\begin{itemize}
\item Discuss some important qualities of good automated tests.(40 marks)
\begin{coloritemize}
\item my answer
\end{coloritemize}
\item When writing automated tests, developers frequently make use of test doubles. Explain what test doubles are and why they are useful. (30 marks)
\begin{coloritemize}
\item my answer
\end{coloritemize}
\item ) Describe the role that unit, functional and integration tests play in an automated
testing strategy.(30 marks)
\begin{coloritemize}
\item my answer
\end{coloritemize}
\end{itemize}
\item Question 3 (100 marks)
\begin{itemize}
\item In the context of web applications, briefly explain what is meant by the following:(50 marks)\\
– Client-Server architecture\\
– HTTP is a stateless protocol\\
– Scalability\\
– ReST API\\
– Service-Oriented Architecture\\
\begin{coloritemize}
\item my answer
\end{coloritemize}
\item Developers make extensive use of frameworks like Ruby on Rails in developing applications, particularly for the web. Explain what a framework is, and discuss some
of the pros and cons of using frameworks(25 marks)
\begin{coloritemize}
\item my answer
\end{coloritemize}
\item Ruby on Rails uses the MVC architecture. Explain the role of the 3 main components
in this architecture(25 marks)
\begin{coloritemize}
\item my answer
\end{coloritemize}
\end{itemize}
\item Question 4 (100 marks)
\begin{itemize}
\item A major customer of your organisation is using an old product which is built from legacy
code. They have requested that a new feature be added, and your manager has asked
you to make the required changes to the legacy code, which, unfortunately, has very few
tests.\\ Two possible approaches to this task can be called the Edit and Pray approach and
the Cover and Modify approach. Discuss these possible approaches.(30 marks)
\begin{coloritemize}
\item my answer
\end{coloritemize}
\item When exploring the legacy code it becomes clear that some of it needs to be refactored. Explain what is meant by refactoring and discuss some of the reasons why
code may need to be refactored.(40 marks)
\begin{coloritemize}
\item my answer
\end{coloritemize}
\item Briefly explain the four main types of software maintenance. Which type of maintenance did your manager ask you to carry out in the hypothetical task above?(30 marks)
\begin{coloritemize}
\item my answer
\end{coloritemize}
\end{itemize}
\end{enumerate}
|
{"hexsha": "bd2ab531d6412be6d9c18d6b19a7abb0b298c0cf", "size": 4405, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "examPrepSoftwareEngineering-master/LaTeX-Project-WriteUp/chapters/2018-19SE.tex", "max_stars_repo_name": "OmalleyTomas98/4thYearPapersSolution-Master", "max_stars_repo_head_hexsha": "67d02f691de2f880d9cb6e5ba84accfcc42c2aa9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examPrepSoftwareEngineering-master/LaTeX-Project-WriteUp/chapters/2018-19SE.tex", "max_issues_repo_name": "OmalleyTomas98/4thYearPapersSolution-Master", "max_issues_repo_head_hexsha": "67d02f691de2f880d9cb6e5ba84accfcc42c2aa9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examPrepSoftwareEngineering-master/LaTeX-Project-WriteUp/chapters/2018-19SE.tex", "max_forks_repo_name": "OmalleyTomas98/4thYearPapersSolution-Master", "max_forks_repo_head_hexsha": "67d02f691de2f880d9cb6e5ba84accfcc42c2aa9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.6995073892, "max_line_length": 174, "alphanum_fraction": 0.7416572077, "num_tokens": 1113}
|
import use_cases.utils.textools as tt
from use_cases.utils.comunas import get_comunas_id
import pandas as pd
import numpy as np
import re, os
def change_valid_to_bool(x):
if x == '1':
x = True
else:
x = False
return x
def create_table_dialogues(frame, filter):
new_frame = frame.copy()
filter = filter.rename(columns={'ID_diag': 'ID'})
new_frame['Grupo'] = tt.check_nan(new_frame['Grupo'])
new_frame = pd.merge(new_frame, filter, how="inner", on=["ID"])
new_frame = new_frame[['ID Archivo', 'Fecha', 'Hora Inicio',
'Hora Termino', 'Lugar', 'Dirección',
'Comuna', 'Participantes',
'Grupo', 'Valido']]
new_frame = tt.to_unicode(new_frame)
new_frame = tt.eliminate_nrs(new_frame)
new_frame = new_frame.rename(columns={'file_id':'diag_id'})
new_frame.columns =['id', 'date', 'init_time', 'end_time',
'location', 'address', 'comuna_id', 'n_members',
'group_name', 'valid']
new_frame = new_frame.apply(lambda x: get_comunas_id(x, 'comuna_id'), 1)
new_frame['valid'] = new_frame['valid'].apply(lambda x: change_valid_to_bool(x), 1)
return new_frame
|
{"hexsha": "ee972c59cda728993f0589e785748cc7f2ac85bf", "size": 1239, "ext": "py", "lang": "Python", "max_stars_repo_path": "preprocessing/use_cases/dialogues.py", "max_stars_repo_name": "MinCiencia/ECQQ", "max_stars_repo_head_hexsha": "f93a01ce2dd140d073bd81afb9b4733c1d8a34c3", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-10-05T00:55:28.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-21T10:56:14.000Z", "max_issues_repo_path": "preprocessing/use_cases/dialogues.py", "max_issues_repo_name": "MinCiencia/ECQQ", "max_issues_repo_head_hexsha": "f93a01ce2dd140d073bd81afb9b4733c1d8a34c3", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "preprocessing/use_cases/dialogues.py", "max_forks_repo_name": "MinCiencia/ECQQ", "max_forks_repo_head_hexsha": "f93a01ce2dd140d073bd81afb9b4733c1d8a34c3", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-14T17:31:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-14T17:31:16.000Z", "avg_line_length": 32.6052631579, "max_line_length": 87, "alphanum_fraction": 0.6150121065, "include": true, "reason": "import numpy", "num_tokens": 319}
|
# -*- coding: utf-8 -*-
"""
.. module: pyAPES
:synopsis: APES-model component
.. moduleauthor:: Kersti Haahti
Model framework for Atmosphere-Plant Exchange Simulations
Created on Tue Oct 02 09:04:05 2018
Note:
migrated to python3
- print on same line
- dict.keys(), but these are iterated after in for-each-loop
References:
Launiainen, S., Katul, G.G., Lauren, A. and Kolari, P., 2015. Coupling boreal
forest CO2, H2O and energy flows by a vertically structured forest canopy –
Soil model with separate bryophyte layer. Ecological modelling, 312, pp.385-405.
To call model and run single simulation and read results: see example in sandbox.py
from tools.iotools import read_results
from pyAPES import driver
# for NetCDF-outputs
outputfile = driver(create_ncf=True, result_file='test.nc')
results = read_results(outputfile) # opens NetCDF-file using xarray
# for returning results directly
results = driver(create_ncf=False) # returns dict with integer keys
results = results[0] # first simulation
LAST EDIT: 15.1.2020 Samuli Launiainen
* new forestfloor and altered outputs
Todo:
- make minimal example of handling and plotting outputs using xarray -tools;
now see tools.iotools.read_forcing for documentation!
"""
import time
import logging
import numpy as np
from pandas import date_range
from tools.iotools import initialize_netcdf, write_ncf
from canopy.canopy import CanopyModel
from soil.soil import Soil
from canopy.constants import WATER_DENSITY
def driver(parameters,
create_ncf=False,
result_file=None):
"""
Reads parameters as argument, prepares output files, runs model.
Args:
parameters (dict/list): either single parameter dictionary or list of parameters
create_ncf (bool): results saved to netCDF4 file
result_file (str): name of result file
"""
# --- CONFIGURATION PARAMETERS of LOGGING and NetCDF -outputs read
from parameters.outputs import output_variables, logging_configuration
from logging.config import dictConfig
# --- LOGGING ---
dictConfig(logging_configuration)
logger = logging.getLogger(__name__)
# --- CHECK PARAMETERS ---
if isinstance(parameters, dict):
Nsim = 1
parameters = [parameters]
elif isinstance(parameters, list):
Nsim = len(parameters)
else:
raise TypeError('Parameters should be either dict or list.')
logger.info('Simulation started. Number of simulations: {}'.format(Nsim))
# --- SIMULATIOS AND OUTPUTS ---
tasks = []
for k in range(Nsim):
tasks.append(
Model(
parameters[k]['general']['dt'],
parameters[k]['canopy'],
parameters[k]['soil'],
parameters[k]['forcing'],
output_variables['variables'],
nsim=k
)
)
if create_ncf: # outputs to NetCDF-file, returns filename
gpara = parameters[0]['general'] # same for all tasks
timestr = time.strftime('%Y%m%d%H%M')
if result_file:
filename = result_file
else:
filename = timestr + '_pyAPES_results.nc'
#freq = '{}S'.format(gpara['dt'])
#time_index = date_range(gpara['start_time'], gpara['end_time'], freq=freq, closed='left')
time_index = parameters[0]['forcing'].index
ncf, _ = initialize_netcdf(
output_variables['variables'],
Nsim,
tasks[k].Nsoil_nodes,
tasks[k].Ncanopy_nodes,
tasks[k].Nplant_types,
tasks[k].Nground_types,
time_index=time_index,
filepath=gpara['results_directory'],
filename=filename)
for task in tasks:
logger.info('Running simulation number (start time %s): %s' % (
time.strftime('%Y-%m-%d %H:%M'), task.Nsim))
running_time = time.time()
results = task.run()
logger.info('Running time %.2f seconds' % (time.time() - running_time))
write_ncf(nsim=task.Nsim, results=results, ncf=ncf)
del results
output_file = gpara['results_directory'] + filename
logger.info('Ready! Results are in: ' + output_file)
ncf.close()
return output_file, tasks[0]
else: # returns dictionary of outputs
running_time = time.time()
results = {task.Nsim: task.run() for task in tasks}
logger.info('Running time %.2f seconds' % (time.time() - running_time))
return results, tasks[0] # this would return also 1st Model instance
class Model(object):
"""
pyAPES - main model class.
Combines submodels 'CanopyModel' and 'Soil' and handles data-transfer
between these model components and writing results.
Last edit: SL 13.01.2020
"""
def __init__(self,
dt,
canopy_para,
soil_para,
forcing,
outputs,
nsim=0):
logger = logging.getLogger(__name__)
self.dt = dt
self.Nsteps = len(forcing)
self.forcing = forcing
self.Nsim = nsim
self.Nsoil_nodes = len(soil_para['grid']['dz'])
self.Ncanopy_nodes = canopy_para['grid']['Nlayers']
# create soil model instance
self.soil = Soil(soil_para)
if 'Wa' in forcing and soil_para['water_model']['solve'] is False:
logger.info("Soil moisture from forcing file")
soil_para['water_model']['initial_condition']['volumetric_water_content'] = (
forcing['Wa'].iloc[0])
if 'Tsa' in forcing and soil_para['heat_model']['solve'] is False:
logger.info("Soil temperature from forcing file")
soil_para['heat_model']['initial_condition']['temperature'] = (
forcing['Tsa'].iloc[0])
# create canopy model instance
# initial delayed temperature and degreedaysum for pheno & LAI-models
if canopy_para['ctr']['pheno_cycle'] and 'X' in forcing:
for pt in list(canopy_para['planttypes'].keys()):
canopy_para['planttypes'][pt]['phenop'].update({'Xo': forcing['X'].iloc[0]})
if canopy_para['ctr']['seasonal_LAI'] and 'DDsum' in forcing:
for pt in list(canopy_para['planttypes'].keys()):
canopy_para['planttypes'][pt]['laip'].update({'DDsum0': forcing['DDsum'].iloc[0]})
self.canopy_model = CanopyModel(canopy_para, self.soil.grid['dz'])
self.Nplant_types = len(self.canopy_model.planttypes)
self.Nground_types = len(self.canopy_model.forestfloor.bottomlayer_types)
# initialize structure to save results
self.results = _initialize_results(outputs,
self.Nsteps,
self.Nsoil_nodes,
self.Ncanopy_nodes,
self.Nplant_types,
self.Nground_types)
def run(self):
"""
Loops through self.forcing and appends to self.results.
self.forcing variables and units; correspond to uppermost gridpoint:
precipitation [kg m-2 s-1]
air_pressure [Pa]
air_temperature [degC]
wind_speed [m/s]
friction_velocity [m/s]
h2o[mol/mol]
co2 [ppm]
zenith_angle [rad]
lw_in: Downwelling long wave radiation [W/m2]
diffPar: Diffuse PAR [W/m2]
dirPar: Direct PAR [W/m2]
diffNir: Diffuse NIR [W/m2]
dirNir: Direct NIR [W/m2]
"""
logger = logging.getLogger(__name__)
logger.info('Running simulation {}'.format(self.Nsim))
time0 = time.time()
#print('RUNNING')
k_steps=np.arange(0, self.Nsteps, int(self.Nsteps/10))
for k in range(0, self.Nsteps):
# --- print progress on screen
if k in k_steps[:-1]:
s = str(np.where(k_steps==k)[0][0]*10) + '%'
print('{0}..'.format(s), end=' ')
# --- CanopyModel ---
# run daily loop: updates LAI, phenology and moisture stress ---
if self.forcing['doy'].iloc[k] != self.forcing['doy'].iloc[k-1] or k == 0:
self.canopy_model.run_daily(
self.forcing['doy'].iloc[k],
self.forcing['Tdaily'].iloc[k])
# compile forcing dict for canopy model: soil_ refers to state of soil model
canopy_forcing = {
'wind_speed': self.forcing['U'].iloc[k], # [m s-1]
'friction_velocity': self.forcing['Ustar'].iloc[k], # [m s-1]
'air_temperature': self.forcing['Tair'].iloc[k], # [deg C]
'precipitation': self.forcing['Prec'].iloc[k], # [kg m-2 s-1]
'h2o': self.forcing['H2O'].iloc[k], # [mol mol-1]
'co2': self.forcing['CO2'].iloc[k], # [ppm]
'PAR': {'direct': self.forcing['dirPar'].iloc[k], # [W m-2]
'diffuse': self.forcing['diffPar'].iloc[k]},
'NIR': {'direct': self.forcing['dirNir'].iloc[k], # [W m-2]
'diffuse': self.forcing['diffNir'].iloc[k]},
'lw_in': self.forcing['LWin'].iloc[k], # [W m-2]
'air_pressure': self.forcing['P'].iloc[k], # [Pa]
'zenith_angle': self.forcing['Zen'].iloc[k], # [rad]
# from soil model
'soil_temperature': self.soil.heat.T[self.canopy_model.ix_roots], # [deg C]
'soil_water_potential': self.soil.water.h[self.canopy_model.ix_roots], # [m] ?
'soil_volumetric_water': self.soil.heat.Wliq[self.canopy_model.ix_roots], # [m3 m-3]
'soil_volumetric_air': self.soil.heat.Wair[self.canopy_model.ix_roots], # [m3 m-3]
'soil_pond_storage': self.soil.water.h_pond * WATER_DENSITY, # [kg m-2]
}
canopy_parameters = {
'soil_depth': self.soil.grid['z'][0], # [m]
'soil_hydraulic_conductivity': self.soil.water.Kv[self.canopy_model.ix_roots], # [m s-1]
'soil_thermal_conductivity': self.soil.heat.thermal_conductivity[0], # [W m-1 K-1]?
'date': self.forcing.index[k] # pd.datetime
}
# call self.canopy_model.run to solve above-ground part
out_canopy, out_planttype, out_ffloor, out_groundtype = self.canopy_model.run(
dt=self.dt,
forcing=canopy_forcing,
parameters=canopy_parameters
)
# --- Soil model ---
# compile forcing for Soil: potential infiltration and evaporation are at from ground surface
# water fluxes must be in [m s-1]
soil_forcing = {
'potential_infiltration': out_ffloor['throughfall'] / WATER_DENSITY,
'potential_evaporation': ((out_ffloor['soil_evaporation'] +
out_ffloor['capillary_rise']) / WATER_DENSITY),
'pond_recharge': out_ffloor['pond_recharge'] / WATER_DENSITY,
'atmospheric_pressure_head': -1.0E6, # set to large value, because potential_evaporation already account for h_soil
'ground_heat_flux': -out_ffloor['ground_heat'],
'date': self.forcing.index[k]}
if 'Ws' in self.forcing and self.soil.solve_water is False:
soil_forcing.update({
'state_water':{'volumetric_water_content': self.forcing['Ws'].iloc[k]}})
if 'Tsa' in self.forcing and self.soil.solve_heat is False:
soil_forcing.update({
'state_heat':{'temperature': self.forcing['Tsa'].iloc[k]}})
# call self.soil to solve below-ground water and heat flow
soil_flux, soil_state = self.soil.run(
dt=self.dt,
forcing=soil_forcing,
water_sink=out_canopy['root_sink'])
# --- append results and copy of forcing to self.results
forcing_output = {
'wind_speed': self.forcing['U'].iloc[k],
'friction_velocity': self.forcing['Ustar'].iloc[k],
'air_temperature': self.forcing['Tair'].iloc[k],
'precipitation': self.forcing['Prec'].iloc[k],
'h2o': self.forcing['H2O'].iloc[k],
'co2': self.forcing['CO2'].iloc[k],
'pressure': self.forcing['P'].iloc[k],
'par': self.forcing['dirPar'].iloc[k] + self.forcing['diffPar'].iloc[k],
'nir': self.forcing['dirNir'].iloc[k] + self.forcing['diffNir'].iloc[k],
'lw_in': self.forcing['LWin'].iloc[k]
}
soil_state.update(soil_flux)
self.results = _append_results('forcing', k, forcing_output, self.results)
self.results = _append_results('canopy', k, out_canopy, self.results)
self.results = _append_results('ffloor', k, out_ffloor, self.results)
self.results = _append_results('soil', k, soil_state, self.results)
self.results = _append_results('pt', k, out_planttype, self.results)
self.results = _append_results('gt', k, out_groundtype, self.results)
print('100%')
ptnames = [pt.name for pt in self.canopy_model.planttypes]
self.results = _append_results('canopy', None, {'z': self.canopy_model.z,
'planttypes': np.array(ptnames)}, self.results)
gtnames = [gt.name for gt in self.canopy_model.forestfloor.bottomlayer_types]
self.results = _append_results('ffloor', None, {'groundtypes': np.array(gtnames)}, self.results)
self.results = _append_results('soil', None, {'z': self.soil.grid['z']}, self.results)
logger.info('Finished simulation %.0f, running time %.2f seconds' % (self.Nsim, time.time() - time0))
return self.results
def _initialize_results(variables, Nstep, Nsoil_nodes, Ncanopy_nodes, Nplant_types, Nground_types):
"""
Creates temporary results dictionary to accumulate simulation results
SL 12.11.2019: removed if 'date' in dimensions and added option to save planttype profiles
"""
results = {}
for var in variables:
var_name = var[0]
dimensions = var[2]
if 'canopy' in dimensions:
if 'planttype' in dimensions:
var_shape = [Nstep, Nplant_types, Ncanopy_nodes]
else:
var_shape = [Nstep, Ncanopy_nodes]
elif 'soil' in dimensions:
var_shape = [Nstep, Nsoil_nodes]
elif 'planttype' in dimensions and 'canopy' not in dimensions:
var_shape = [Nstep, Nplant_types]
elif 'groundtype' in dimensions:
if 'date' not in dimensions:
var_shape = [Nground_types]
else:
var_shape = [Nstep, Nground_types]
else:
var_shape = [Nstep]
results[var_name] = np.full(var_shape, np.NAN)
# print(var_name, var_shape, dimensions)
return results
def _append_results(group, step, step_results, results):
"""
Adds results from each simulation steps to temporary results dictionary
"""
results_keys = results.keys()
step_results_keys = step_results.keys()
for key in step_results_keys:
variable = group + '_' + key
if variable in results_keys:
if key == 'z' or key == 'planttypes' or key == 'groundtypes':
results[variable] = step_results[key]
else:
#print(variable, key, np.shape(results[variable][step]), np.shape(step_results[key]))
results[variable][step] = step_results[key]
return results
#if __name__ == '__main__':
#
# from parameters.parametersets import lettosuo_parameters
# outputfile=driver(create_ncf=True, parametersets=lettosuo_parameters)
#
# print(outputfile)
|
{"hexsha": "2f85e2431e795b8f9dfbbc1af603da4f24935e87", "size": 16459, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyAPES.py", "max_stars_repo_name": "LukeEcomod/pyAPES_VESBO", "max_stars_repo_head_hexsha": "fdb4f44907e3055eb42db4a1260e0d7b9c55b415", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-12-21T16:33:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-02T05:13:14.000Z", "max_issues_repo_path": "pyAPES.py", "max_issues_repo_name": "LukeEcomod/pyAPES_VESBO", "max_issues_repo_head_hexsha": "fdb4f44907e3055eb42db4a1260e0d7b9c55b415", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyAPES.py", "max_forks_repo_name": "LukeEcomod/pyAPES_VESBO", "max_forks_repo_head_hexsha": "fdb4f44907e3055eb42db4a1260e0d7b9c55b415", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.0950118765, "max_line_length": 132, "alphanum_fraction": 0.5780424084, "include": true, "reason": "import numpy", "num_tokens": 3873}
|
import argparse
import glob
import os
import numpy as np
import importlib
from ast import literal_eval
from xwavecal.utils.fits_utils import Translator
def parse_args(args=None):
parser = argparse.ArgumentParser(description='Reduce an xwavecal spectrograph frame.')
parser.add_argument("--output-dir", required=True,
help="Directory within which to save the processed data files.")
parser.add_argument("--input-dir", required=False, default=None,
help="Directory which contains the raw data files.")
parser.add_argument('--data-paths', nargs='+', required=False, default=None,
help="path(s) to data, usage: '--data_paths path/to/first.fits path/to/second.fits'")
parser.add_argument("--fpack", required=False, action='store_true',
help="fpack output files with the default quantization.")
parser.add_argument("--config-file", required=True,
help="Path to the instrument specific configuration file.")
parser.add_argument("--frame-type", required=False, default='any',
help="Frame type to either fit traces to or wavelength calibrate."
"Make sure frame type settings are appropriately set in the config file."
"lampflat files are used for tracing, wavecals are wavelength calibration"
"frames such as ThAr exposures. Must agree with the frame names in [stages],"
"e.g. lampflat, wavecal etc. Ignore to reduce all valid files",
type=str.lower)
args = parser.parse_args(args)
if args.data_paths is None and args.input_dir is None:
raise ValueError('both input_dir and data_paths are None. Must specify raw data or a directory of raw data to process.')
if not os.path.exists(args.config_file):
raise FileNotFoundError('{0} not found.'.format(args.config_file))
return args
def get_data_paths(dir_path, files_contain=None):
all_files = glob.glob(os.path.join(dir_path, '*'))
return [file for file in all_files if all([item in file for item in files_contain])]
def order_data(data_paths, data_class, primary_ext, header_keys, type_keys):
translator = Translator(header_keys, type_keys)
is_not_lampflat = lambda path: 0 if data_class.load(path, primary_ext, translator).get_header_val('type') == 'lampflat' else 1
data_paths = list(data_paths)
if len(data_paths) > 0:
data_paths.sort(key=is_not_lampflat)
return data_paths
def select_data_of_type(data_paths, data_class, primary_ext, header_keys, type_keys, frame_type='any'):
is_type = lambda x: x == frame_type
if frame_type == 'any':
is_type = lambda x: type(x) is str
translator = Translator(header_keys, type_keys)
correct = lambda path: 1 if is_type(data_class.load(path, primary_ext, translator).get_header_val('type')) else 0
return np.array(data_paths)[np.where([correct(path) for path in data_paths])]
def import_obj(full_class_string):
"""
dynamically import a class or function from a string
"""
class_data = full_class_string.split(".")
module_path = ".".join(class_data[:-1])
class_str = class_data[-1]
module = importlib.import_module(module_path)
return getattr(module, class_str)
def safe_eval(item):
"""
:param item: str, int or dict
:return: Any strings have erroneous leading " or ' removed.
"""
out = literal_eval(item)
if isinstance(out, str):
return out.replace("'", '').replace('"', '')
return out
|
{"hexsha": "ea44e17be3e424eeb6b2430d99546bcebf484bbb", "size": 3656, "ext": "py", "lang": "Python", "max_stars_repo_path": "xwavecal/utils/runtime_utils.py", "max_stars_repo_name": "gmbrandt/echelle", "max_stars_repo_head_hexsha": "7e6678cd541ccf025fc187eca7f1344efe85f265", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "xwavecal/utils/runtime_utils.py", "max_issues_repo_name": "gmbrandt/echelle", "max_issues_repo_head_hexsha": "7e6678cd541ccf025fc187eca7f1344efe85f265", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "xwavecal/utils/runtime_utils.py", "max_forks_repo_name": "gmbrandt/echelle", "max_forks_repo_head_hexsha": "7e6678cd541ccf025fc187eca7f1344efe85f265", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.0481927711, "max_line_length": 130, "alphanum_fraction": 0.6695842451, "include": true, "reason": "import numpy", "num_tokens": 783}
|
import numpy as np
import logging
from scipy.ndimage import zoom
logging.basicConfig(level=logging.INFO)
from synbols.data_io import pack_dataset
from synbols.drawing import Camouflage, color_sampler, Gradient, ImagePattern, NoPattern, SolidColor
from synbols.generate import generate_char_grid, dataset_generator, basic_attribute_sampler, add_occlusion, \
flatten_mask_except_first
from synbols.fonts import LANGUAGE_MAP
from synbols.generate import rand_seed
from synbols.visualization import plot_dataset
import matplotlib.pyplot as plt
font_list = """\
jotione
lovedbytheking
flavors
mrbedfort
butterflykids
newrocker
smokum
jimnightshade
""".splitlines()
def make_image(attr_sampler, file_name):
x, _, y = pack_dataset(dataset_generator(attr_sampler, 1000))
plot_dataset(x, y, h_axis='font', v_axis='char')
plt.savefig(file_name)
def savefig(file_name):
plt.savefig(file_name, dpi=300, bbox_inches='tight', pad_inches=0)
def show_fonts(seed):
rng = np.random.RandomState(seed)
def attr_sampler():
for char in 'abCD':
for font in font_list:
yield basic_attribute_sampler(
alphabet=LANGUAGE_MAP['english'], char=char, font=font, is_bold=False, is_slant=False,
resolution=(128, 128), pixel_noise_scale=0)(seed=rand_seed(rng))
x, _, y = pack_dataset(dataset_generator(attr_sampler(), 1000))
plot_dataset(x, y, h_axis='font', v_axis='char', hide_axis=True)
# savefig('fonts.png')
def show_languages(seed):
language_list = ['korean',
'chinese',
'telugu',
'thai',
'gujarati',
'arabic',
'tamil',
'russian']
rng = np.random.RandomState(seed)
def attr_sampler():
for lang in language_list:
alphabet = LANGUAGE_MAP[lang].get_alphabet()
for i in range(4):
yield basic_attribute_sampler(
alphabet=alphabet, char=lambda rng: rng.choice(alphabet.symbols),
font=lambda rng: rng.choice(alphabet.fonts),
is_bold=False, is_slant=False, resolution=(128, 128), pixel_noise_scale=0)(seed=rand_seed(rng))
x, _, y = pack_dataset(dataset_generator(attr_sampler(), 1000))
h_values, v_values = plot_dataset(x, y, h_axis='alphabet', v_axis=None, n_col=len(language_list), n_row=4,
hide_axis=True)
# map = {'chinese-simplified': 'chinese'}
# h_values = [map.get(val, val) for val in h_values]
ax = plt.gca()
ax.set_xticks((np.arange(len(h_values)) + 0.5) * x.shape[1])
ax.set_xticklabels(h_values, rotation=0)
ax.get_xaxis().set_visible(True)
plt.xlabel('')
# savefig('language.png')
def show_background(seed):
rng = np.random.RandomState(seed)
kwargs = dict(resolution=(128, 128), alphabet=LANGUAGE_MAP['english'].get_alphabet(), char='a', inverse_color=False,
pixel_noise_scale=0)
attr_list = [
basic_attribute_sampler(background=SolidColor((0.2, 0.2, 0)), foreground=SolidColor((0.8, 0, 0.8)), **kwargs),
basic_attribute_sampler(background=lambda _rng: Gradient(types=('radial',), seed=rand_seed(_rng)),
foreground=lambda _rng: Gradient(types=('radial',), seed=rand_seed(_rng)),
**kwargs),
basic_attribute_sampler(background=lambda _rng: Camouflage(stroke_angle=np.pi / 4, seed=rand_seed(_rng)),
foreground=lambda _rng: Camouflage(stroke_angle=np.pi * 3 / 4, seed=rand_seed(_rng)),
**kwargs),
basic_attribute_sampler(background=lambda _rng: ImagePattern(seed=rand_seed(_rng)),
foreground=lambda _rng: ImagePattern(seed=rand_seed(_rng)),
**kwargs),
add_occlusion(basic_attribute_sampler(**kwargs), n_occlusion=3,
scale=lambda _rng: 0.3 * np.exp(_rng.randn() * 0.1),
translation=lambda _rng: tuple(_rng.rand(2) * 2 - 1))
]
def attr_sampler():
for attr in attr_list:
yield attr(seed=rand_seed(rng))
x, _, y = pack_dataset(dataset_generator(attr_sampler(), 1000, flatten_mask_except_first))
plot_dataset(x, y, h_axis='scale', v_axis=None, n_col=5, n_row=1, hide_axis=True)
ax = plt.gca()
ax.set_xticks((np.arange(5) + 0.5) * x.shape[1])
ax.set_xticklabels(['Solid', 'Gradient', 'Camouflage', 'Natural', 'Occlusions'], rotation=0)
ax.get_xaxis().set_visible(True)
plt.xlabel('')
# savefig('background.png')
def pack_dataset_resample(generator, resolution=128):
"""Turn a the output of a generator of (x,y) pairs into a numpy array containing the full dataset"""
x, mask, y = zip(*generator)
x = [zoom(img, (resolution / img.shape[0],) * 2 + (1,), order=0) for img in x]
return np.stack(x), y
def show_resolution(seed):
rng = np.random.RandomState(seed)
kwargs = dict(alphabet=LANGUAGE_MAP['english'].get_alphabet(), is_bold=False, is_slant=False,
inverse_color=False, pixel_noise_scale=0)
attr_list = [
basic_attribute_sampler(resolution=(8, 8), char='b', font='arial', scale=0.9, rotation=0,
background=SolidColor((0, 0, 0)),
foreground=SolidColor((0.5, 0.5, 0)), **kwargs),
basic_attribute_sampler(resolution=(16, 16), char='x', font='time', scale=0.7, **kwargs),
basic_attribute_sampler(resolution=(32, 32), char='g', font='flavors', scale=0.6, rotation=1, **kwargs),
basic_attribute_sampler(resolution=(64, 64), scale=0.3, n_symbols=5, **kwargs),
basic_attribute_sampler(resolution=(128, 128), scale=0.1, n_symbols=30, **kwargs),
]
def attr_sampler():
for attr in attr_list:
yield attr(seed=rand_seed(rng))
x, y = pack_dataset_resample(dataset_generator(attr_sampler(), 1000))
plot_dataset(x, y, h_axis='rotation', v_axis=None, n_col=5, n_row=1, hide_axis=True)
ax = plt.gca()
ax.set_xticks((np.arange(len(attr_list)) + 0.5) * x.shape[1])
ax.set_xticklabels(['8 x 8', '16 x 16', '32 x 32', '64 x 64', '128 x 128'], rotation=0)
ax.get_xaxis().set_visible(True)
plt.xlabel('')
# savefig('resolution.png')
def alphabet_sizes():
for name, alphabet in LANGUAGE_MAP.items():
print(name, len(alphabet.symbols))
if __name__ == "__main__":
# plt.figure('languages', figsize=(5, 3))
# show_languages()
#
# plt.figure('fonts', figsize=(5, 3))
# show_fonts()
#
# plt.figure('resolution', figsize=(5, 3))
# show_resolution()
#
# plt.figure('background', figsize=(5, 3))
# show_background()
# alphabet_sizes()
for i in range(1):
plt.figure('group %d' % i, figsize=(10, 6))
plt.subplot(2, 2, 1)
show_fonts(6)
plt.title('a) fonts')
plt.subplot(2, 2, 2)
show_languages(3)
plt.title('b) languages')
plt.subplot(2, 2, 3)
show_resolution(1)
plt.title('c) resolution')
plt.subplot(2, 2, 4)
show_background(2)
plt.title('d) background and foreground')
savefig('group %d.png' % i)
# plt.show()
|
{"hexsha": "14dab4cb7f7757749b562d0aa7616694a5195ba5", "size": 7417, "ext": "py", "lang": "Python", "max_stars_repo_path": "results/paper_images.py", "max_stars_repo_name": "danwley/synbols-benchmarks", "max_stars_repo_head_hexsha": "799f85c4bf6a84e0f6b6ad05878bc21c2d40e4c9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2020-11-18T02:21:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-22T16:49:46.000Z", "max_issues_repo_path": "results/paper_images.py", "max_issues_repo_name": "danwley/synbols-benchmarks", "max_issues_repo_head_hexsha": "799f85c4bf6a84e0f6b6ad05878bc21c2d40e4c9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "results/paper_images.py", "max_forks_repo_name": "danwley/synbols-benchmarks", "max_forks_repo_head_hexsha": "799f85c4bf6a84e0f6b6ad05878bc21c2d40e4c9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-03-11T00:51:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-22T13:58:37.000Z", "avg_line_length": 35.8309178744, "max_line_length": 120, "alphanum_fraction": 0.6180396387, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1871}
|
SUBROUTINE ULAMSPIRAL(START,ORDER) !Idle scribbles can lead to new ideas.
Careful with phasing: each lunge's first number is the second placed along its direction.
INTEGER START !Usually 1.
INTEGER ORDER !MUST be an odd number, so there is a middle.
INTEGER L,M,N !Counters.
INTEGER STEP,LUNGE !In some direction.
COMPLEX WAY,PLACE !Just so.
CHARACTER*1 SPLOT(0:1) !Tricks for output.
PARAMETER (SPLOT = (/" ","*"/)) !Selected according to ISPRIME(n)
INTEGER TILE(ORDER,ORDER) !Work area.
WRITE (6,1) START,ORDER !Here we go.
1 FORMAT ("Ulam spiral starting with ",I0,", of order ",I0,/)
IF (MOD(ORDER,2) .NE. 1) STOP "The order must be odd!" !Otherwise, out of bounds.
M = ORDER/2 + 1 !Find the number of the middle.
PLACE = CMPLX(M,M) !Start there.
WAY = (1,0) !Thence in the +x direction.
N = START !Different start, different layout.
DO L = 1,ORDER !Advance one step, then two, then three, etc.
DO LUNGE = 1,2 !But two lunges for each length.
DO STEP = 1,L !Take the steps.
TILE(INT(REAL(PLACE)),INT(AIMAG(PLACE))) = N !This number for this square.
PLACE = PLACE + WAY !Make another step.
N = N + 1 !Count another step.
END DO !And consider making another.
IF (N .GE. ORDER**2) EXIT !Otherwise, one lunge too many!
WAY = WAY*(0,1) !Rotate a quarter-turn counter-clockwise.
END DO !And make another lunge.
END DO !Until finished.
Cast forth the numbers.
c DO L = ORDER,1,-1 !From the top of the grid to the bottom.
c WRITE (6,66) TILE(1:ORDER,L) !One row at at time.
c 66 FORMAT (666I6) !This will do for reassurance.
c END DO !Line by line.
Cast forth the splots.
DO L = ORDER,1,-1 !Just put out a marker.
WRITE (6,67) (SPLOT(ISPRIME(TILE(M,L))),M = 1,ORDER) !One line at a time.
67 FORMAT (666A1) !A single character at each position.
END DO !On to the next row.
END SUBROUTINE ULAMSPIRAL !So much for a boring lecture.
INTEGER FUNCTION ISPRIME(N) !Returns 0 or 1.
INTEGER N !The number.
INTEGER F,Q !Factor and quotient.
ISPRIME = 0 !The more likely outcome.
IF (N.LE.1) RETURN !Just in case the start is peculiar.
IF (N.LE.3) GO TO 2 !Oops! I forgot this!
IF (MOD(N,2).EQ.0) RETURN !Special case.
F = 1 !Now get stuck in to testing odd numbers.
1 F = F + 2 !A trial factor.
Q = N/F !The quotient.
IF (N .EQ. Q*F) RETURN !No remainder? Not a prime.
IF (Q.GT.F) GO TO 1 !Thus chug up to the square root.
2 ISPRIME = 1 !Well!
END FUNCTION ISPRIME !Simple enough.
PROGRAM TWIRL
CALL ULAMSPIRAL(1,49)
END
|
{"hexsha": "216e1bba029762c03c64f4d71fd626b0669e263f", "size": 2871, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "Task/Ulam-spiral--for-primes-/Fortran/ulam-spiral--for-primes--2.f", "max_stars_repo_name": "LaudateCorpus1/RosettaCodeData", "max_stars_repo_head_hexsha": "9ad63ea473a958506c041077f1d810c0c7c8c18d", "max_stars_repo_licenses": ["Info-ZIP"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-11-09T22:08:38.000Z", "max_stars_repo_stars_event_max_datetime": "2018-11-09T22:08:38.000Z", "max_issues_repo_path": "Task/Ulam-spiral--for-primes-/Fortran/ulam-spiral--for-primes--2.f", "max_issues_repo_name": "seanwallawalla-forks/RosettaCodeData", "max_issues_repo_head_hexsha": "9ad63ea473a958506c041077f1d810c0c7c8c18d", "max_issues_repo_licenses": ["Info-ZIP"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Task/Ulam-spiral--for-primes-/Fortran/ulam-spiral--for-primes--2.f", "max_forks_repo_name": "seanwallawalla-forks/RosettaCodeData", "max_forks_repo_head_hexsha": "9ad63ea473a958506c041077f1d810c0c7c8c18d", "max_forks_repo_licenses": ["Info-ZIP"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-11-09T22:08:40.000Z", "max_forks_repo_forks_event_max_datetime": "2018-11-09T22:08:40.000Z", "avg_line_length": 48.6610169492, "max_line_length": 89, "alphanum_fraction": 0.5945663532, "num_tokens": 905}
|
[STATEMENT]
lemma neg_inter_pos_0:
assumes "hahn_space_decomp M1 M2"
and "hahn_space_decomp P N"
and "A \<in> sets M"
and "A \<subseteq> P"
shows "\<mu> (A \<inter> M2) = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<mu> (A \<inter> M2) = 0
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<mu> (A \<inter> M2) = 0
[PROOF STEP]
have "\<mu> (A \<inter> M2) = \<mu> (A \<inter> ((M2 \<inter> N) \<union> (M2 \<inter> (sym_diff M2 N))))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<mu> (A \<inter> M2) = \<mu> (A \<inter> (M2 \<inter> N \<union> M2 \<inter> sym_diff M2 N))
[PROOF STEP]
by (metis Diff_subset_conv Int_Un_distrib Un_upper1 inf.orderE)
[PROOF STATE]
proof (state)
this:
\<mu> (A \<inter> M2) = \<mu> (A \<inter> (M2 \<inter> N \<union> M2 \<inter> sym_diff M2 N))
goal (1 subgoal):
1. \<mu> (A \<inter> M2) = 0
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
\<mu> (A \<inter> M2) = \<mu> (A \<inter> (M2 \<inter> N \<union> M2 \<inter> sym_diff M2 N))
goal (1 subgoal):
1. \<mu> (A \<inter> M2) = 0
[PROOF STEP]
have "... = \<mu> ((A \<inter> (M2 \<inter> N)) \<union> (A \<inter> (M2 \<inter> (sym_diff M2 N))))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<mu> (A \<inter> (M2 \<inter> N \<union> M2 \<inter> sym_diff M2 N)) = \<mu> (A \<inter> (M2 \<inter> N) \<union> A \<inter> (M2 \<inter> sym_diff M2 N))
[PROOF STEP]
by (simp add: Int_Un_distrib)
[PROOF STATE]
proof (state)
this:
\<mu> (A \<inter> (M2 \<inter> N \<union> M2 \<inter> sym_diff M2 N)) = \<mu> (A \<inter> (M2 \<inter> N) \<union> A \<inter> (M2 \<inter> sym_diff M2 N))
goal (1 subgoal):
1. \<mu> (A \<inter> M2) = 0
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
\<mu> (A \<inter> (M2 \<inter> N \<union> M2 \<inter> sym_diff M2 N)) = \<mu> (A \<inter> (M2 \<inter> N) \<union> A \<inter> (M2 \<inter> sym_diff M2 N))
goal (1 subgoal):
1. \<mu> (A \<inter> M2) = 0
[PROOF STEP]
have "... = \<mu> (A \<inter> (M2 \<inter> N)) + \<mu> (A \<inter> (M2 \<inter> (sym_diff M2 N)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<mu> (A \<inter> (M2 \<inter> N) \<union> A \<inter> (M2 \<inter> sym_diff M2 N)) = \<mu> (A \<inter> (M2 \<inter> N)) + \<mu> (A \<inter> (M2 \<inter> sym_diff M2 N))
[PROOF STEP]
proof (rule signed_measure_add)
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. signed_measure ?M \<mu>
2. A \<inter> (M2 \<inter> N) \<in> sets ?M
3. A \<inter> (M2 \<inter> sym_diff M2 N) \<in> sets ?M
4. A \<inter> (M2 \<inter> N) \<inter> (A \<inter> (M2 \<inter> sym_diff M2 N)) = {}
[PROOF STEP]
show "signed_measure M \<mu>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. signed_measure M \<mu>
[PROOF STEP]
using sgn_meas
[PROOF STATE]
proof (prove)
using this:
signed_measure M \<mu>
goal (1 subgoal):
1. signed_measure M \<mu>
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
signed_measure M \<mu>
goal (3 subgoals):
1. A \<inter> (M2 \<inter> N) \<in> sets M
2. A \<inter> (M2 \<inter> sym_diff M2 N) \<in> sets M
3. A \<inter> (M2 \<inter> N) \<inter> (A \<inter> (M2 \<inter> sym_diff M2 N)) = {}
[PROOF STEP]
show "A \<inter> (M2 \<inter> N) \<in> sets M"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. A \<inter> (M2 \<inter> N) \<in> sets M
[PROOF STEP]
by (meson assms(1) assms(2) assms(3) hahn_space_decomp_def sets.Int
signed_measure_space.neg_meas_setD1 signed_measure_space_axioms)
[PROOF STATE]
proof (state)
this:
A \<inter> (M2 \<inter> N) \<in> sets M
goal (2 subgoals):
1. A \<inter> (M2 \<inter> sym_diff M2 N) \<in> sets M
2. A \<inter> (M2 \<inter> N) \<inter> (A \<inter> (M2 \<inter> sym_diff M2 N)) = {}
[PROOF STEP]
show "A \<inter> (M2 \<inter> sym_diff M2 N) \<in> sets M"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. A \<inter> (M2 \<inter> sym_diff M2 N) \<in> sets M
[PROOF STEP]
by (meson Diff_subset assms(1) assms(2) assms(3) hahn_space_decomp_def
neg_meas_setD1 neg_meas_set_union neg_meas_subset sets.Diff sets.Int)
[PROOF STATE]
proof (state)
this:
A \<inter> (M2 \<inter> sym_diff M2 N) \<in> sets M
goal (1 subgoal):
1. A \<inter> (M2 \<inter> N) \<inter> (A \<inter> (M2 \<inter> sym_diff M2 N)) = {}
[PROOF STEP]
show "A \<inter> (M2 \<inter> N) \<inter> (A \<inter> (M2 \<inter> sym_diff M2 N)) = {}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. A \<inter> (M2 \<inter> N) \<inter> (A \<inter> (M2 \<inter> sym_diff M2 N)) = {}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
A \<inter> (M2 \<inter> N) \<inter> (A \<inter> (M2 \<inter> sym_diff M2 N)) = {}
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<mu> (A \<inter> (M2 \<inter> N) \<union> A \<inter> (M2 \<inter> sym_diff M2 N)) = \<mu> (A \<inter> (M2 \<inter> N)) + \<mu> (A \<inter> (M2 \<inter> sym_diff M2 N))
goal (1 subgoal):
1. \<mu> (A \<inter> M2) = 0
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
\<mu> (A \<inter> (M2 \<inter> N) \<union> A \<inter> (M2 \<inter> sym_diff M2 N)) = \<mu> (A \<inter> (M2 \<inter> N)) + \<mu> (A \<inter> (M2 \<inter> sym_diff M2 N))
goal (1 subgoal):
1. \<mu> (A \<inter> M2) = 0
[PROOF STEP]
have "... = \<mu> (A \<inter> (M2 \<inter> (sym_diff M2 N)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<mu> (A \<inter> (M2 \<inter> N)) + \<mu> (A \<inter> (M2 \<inter> sym_diff M2 N)) = \<mu> (A \<inter> (M2 \<inter> sym_diff M2 N))
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<mu> (A \<inter> (M2 \<inter> N)) + \<mu> (A \<inter> (M2 \<inter> sym_diff M2 N)) = \<mu> (A \<inter> (M2 \<inter> sym_diff M2 N))
[PROOF STEP]
have "A \<inter> (M2 \<inter> N) = {}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. A \<inter> (M2 \<inter> N) = {}
[PROOF STEP]
using assms hahn_space_decomp_def
[PROOF STATE]
proof (prove)
using this:
hahn_space_decomp M1 M2
hahn_space_decomp P N
A \<in> sets M
A \<subseteq> P
hahn_space_decomp ?M1.0 ?M2.0 \<equiv> pos_meas_set ?M1.0 \<and> neg_meas_set ?M2.0 \<and> space M = ?M1.0 \<union> ?M2.0 \<and> ?M1.0 \<inter> ?M2.0 = {}
goal (1 subgoal):
1. A \<inter> (M2 \<inter> N) = {}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
A \<inter> (M2 \<inter> N) = {}
goal (1 subgoal):
1. \<mu> (A \<inter> (M2 \<inter> N)) + \<mu> (A \<inter> (M2 \<inter> sym_diff M2 N)) = \<mu> (A \<inter> (M2 \<inter> sym_diff M2 N))
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
A \<inter> (M2 \<inter> N) = {}
goal (1 subgoal):
1. \<mu> (A \<inter> (M2 \<inter> N)) + \<mu> (A \<inter> (M2 \<inter> sym_diff M2 N)) = \<mu> (A \<inter> (M2 \<inter> sym_diff M2 N))
[PROOF STEP]
using signed_measure_empty[OF sgn_meas]
[PROOF STATE]
proof (prove)
using this:
A \<inter> (M2 \<inter> N) = {}
\<mu> {} = 0
goal (1 subgoal):
1. \<mu> (A \<inter> (M2 \<inter> N)) + \<mu> (A \<inter> (M2 \<inter> sym_diff M2 N)) = \<mu> (A \<inter> (M2 \<inter> sym_diff M2 N))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<mu> (A \<inter> (M2 \<inter> N)) + \<mu> (A \<inter> (M2 \<inter> sym_diff M2 N)) = \<mu> (A \<inter> (M2 \<inter> sym_diff M2 N))
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<mu> (A \<inter> (M2 \<inter> N)) + \<mu> (A \<inter> (M2 \<inter> sym_diff M2 N)) = \<mu> (A \<inter> (M2 \<inter> sym_diff M2 N))
goal (1 subgoal):
1. \<mu> (A \<inter> M2) = 0
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
\<mu> (A \<inter> (M2 \<inter> N)) + \<mu> (A \<inter> (M2 \<inter> sym_diff M2 N)) = \<mu> (A \<inter> (M2 \<inter> sym_diff M2 N))
goal (1 subgoal):
1. \<mu> (A \<inter> M2) = 0
[PROOF STEP]
have "... = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<mu> (A \<inter> (M2 \<inter> sym_diff M2 N)) = 0
[PROOF STEP]
proof (rule hahn_decomp_ess_unique[OF assms(1) assms(2)])
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. A \<inter> (M2 \<inter> sym_diff M2 N) \<subseteq> sym_diff M1 P \<union> sym_diff M2 N
2. A \<inter> (M2 \<inter> sym_diff M2 N) \<in> sets M
[PROOF STEP]
show "A \<inter> (M2 \<inter> sym_diff M2 N) \<subseteq> sym_diff M1 P \<union> sym_diff M2 N"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. A \<inter> (M2 \<inter> sym_diff M2 N) \<subseteq> sym_diff M1 P \<union> sym_diff M2 N
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
A \<inter> (M2 \<inter> sym_diff M2 N) \<subseteq> sym_diff M1 P \<union> sym_diff M2 N
goal (1 subgoal):
1. A \<inter> (M2 \<inter> sym_diff M2 N) \<in> sets M
[PROOF STEP]
show "A \<inter> (M2 \<inter> sym_diff M2 N) \<in> sets M"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. A \<inter> (M2 \<inter> sym_diff M2 N) \<in> sets M
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. A \<inter> (M2 \<inter> sym_diff M2 N) \<in> sets M
[PROOF STEP]
have "sym_diff M2 N \<in> sets M"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sym_diff M2 N \<in> sets M
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
hahn_space_decomp M1 M2
hahn_space_decomp P N
A \<in> sets M
A \<subseteq> P
goal (1 subgoal):
1. sym_diff M2 N \<in> sets M
[PROOF STEP]
by (meson hahn_space_decomp_def sets.Diff sets.Un
signed_measure_space.neg_meas_setD1 signed_measure_space_axioms)
[PROOF STATE]
proof (state)
this:
sym_diff M2 N \<in> sets M
goal (1 subgoal):
1. A \<inter> (M2 \<inter> sym_diff M2 N) \<in> sets M
[PROOF STEP]
hence "M2 \<inter> sym_diff M2 N \<in> sets M"
[PROOF STATE]
proof (prove)
using this:
sym_diff M2 N \<in> sets M
goal (1 subgoal):
1. M2 \<inter> sym_diff M2 N \<in> sets M
[PROOF STEP]
by (meson assms(1) hahn_space_decomp_def neg_meas_setD1 sets.Int)
[PROOF STATE]
proof (state)
this:
M2 \<inter> sym_diff M2 N \<in> sets M
goal (1 subgoal):
1. A \<inter> (M2 \<inter> sym_diff M2 N) \<in> sets M
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
M2 \<inter> sym_diff M2 N \<in> sets M
goal (1 subgoal):
1. A \<inter> (M2 \<inter> sym_diff M2 N) \<in> sets M
[PROOF STEP]
by (simp add: assms sets.Int)
[PROOF STATE]
proof (state)
this:
A \<inter> (M2 \<inter> sym_diff M2 N) \<in> sets M
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
A \<inter> (M2 \<inter> sym_diff M2 N) \<in> sets M
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<mu> (A \<inter> (M2 \<inter> sym_diff M2 N)) = 0
goal (1 subgoal):
1. \<mu> (A \<inter> M2) = 0
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
\<mu> (A \<inter> M2) = 0
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<mu> (A \<inter> M2) = 0
goal (1 subgoal):
1. \<mu> (A \<inter> M2) = 0
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
\<mu> (A \<inter> M2) = 0
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 4909, "file": "Hahn_Jordan_Decomposition_Hahn_Jordan_Decomposition", "length": 49}
|
const A = [1.0 2.0 3.0; 4.0 5.0 6.0; 7.0 8.0 9.0]
cost(M::PowerManifold, p) = -0.5 * norm(transpose(p[M, 1]) * A * p[M, 2])^2
function egrad(M::PowerManifold, X::Array)
U = X[M, 1]
V = X[M, 2]
AV = A * V
AtU = transpose(A) * U
AR = similar(X)
AR[:, :, 1] .= -AV * (transpose(AV) * U)
AR[:, :, 2] .= -AtU * (transpose(AtU) * V)
return AR
end
struct EGrad{T,TM}
M::TM
A::Matrix{T}
end
function (e::EGrad)(Y::Array, X::Array)
U = X[e.M, 1]
V = X[e.M, 2]
AV = A * V
AtU = transpose(A) * U
view(Y, :, :, 1) .= -AV * (transpose(AV) * U)
view(Y, :, :, 2) .= -AtU * (transpose(AtU) * V)
return Y
end
rgrad(M::PowerManifold, p) = project(M, p, egrad(M, p))
struct RGrad{T,TM}
egrad::EGrad{T,TM}
end
function RGrad(M::PowerManifold, A::Matrix{T}) where {T}
return RGrad{T,typeof(M)}(EGrad{T,typeof(M)}(M, A))
end
function (r::RGrad)(M::PowerManifold, X, p)
return project!(M, X, p, r.egrad(X, p))
end
function e2rHess(M::Grassmann, p, X, e_grad, e_hess)
return project(M, p, project(M, p, e_hess) - X * (p' * e_grad))
end
function e2rhess!(M::Grassmann, Y, p, X, e_grad, e_Hess)
project!(M, Y, p, e_Hess)
Y .-= X * (p' * e_grad)
return project!(M, Y, p, Y)
end
function eHess(M::AbstractManifold, X::Array{Float64,3}, H::Array{Float64,3})
U = X[M, 1]
V = X[M, 2]
Udot = H[M, 1]
Vdot = H[M, 2]
AV = A * V
AtU = transpose(A) * U
AVdot = A * Vdot
AtUdot = transpose(A) * Udot
R = similar(X)
#! format: off
view(R, :, :, 1) .= -(
AVdot * transpose(AV) * U +
AV * transpose(AVdot) * U +
AV * transpose(AV) * Udot
)
view(R, :, :, 2) .= -(
AtUdot * transpose(AtU) * V +
AtU * transpose(AtUdot) * V +
AtU * transpose(AtU) * Vdot
)
#! format: on
return R
end
struct EHess{T,TM}
M::TM
A::Matrix{T}
end
function (e::EHess)(Y, X, H)
U = X[e.M, 1]
V = X[e.M, 2]
Udot = H[e.M, 1]
Vdot = H[e.M, 2]
AV = e.A * V
AtU = transpose(e.A) * U
AVdot = e.A * Vdot
AtUdot = transpose(e.A) * Udot
#! format: off
view(Y, :, :, 1) .= -AVdot * transpose(AV) * U - AV * transpose(AVdot) * U - AV * transpose(AV) * Udot
view(Y, :, :, 2) .= AtUdot * transpose(AtU) * V + AtU * transpose(AtUdot) * V + AtU * transpose(AtU) * Vdot
#! format: on
return Y
end
function rhess(M::PowerManifold, p, X)
eG = egrad(M, p)
eH = eHess(M, p, X)
Ha = similar(p)
for i in 1:2
e2rhess!(
M.manifold,
view(Ha, :, :, i),
view(p, :, :, i),
view(X, :, :, i),
view(eG, :, :, i),
view(eH, :, :, i),
)
end
return Ha
end
struct RHess{T,TM}
e_grad!::EGrad{T,TM}
e_hess!::EHess{T,TM}
G::Array{T,3}
H::Array{T,3}
end
function RHess(M::AbstractManifold, A::Matrix{T}, p) where {T}
return RHess{T,typeof(M)}(
EGrad(M, A), EHess(M, A), zeros(T, size(A, 1), p, 2), zeros(T, size(A, 1), p, 2)
)
end
function (r::RHess)(M::PowerManifold, Y, p, X)
r.e_grad!(r.G, p)
r.e_hess!(r.H, p, X)
for i in 1:2
e2rhess!(
M.manifold,
view(Y, :, :, i),
view(p, :, :, i),
view(X, :, :, i),
view(r.G, :, :, i),
view(r.H, :, :, i),
)
end
return Y
end
|
{"hexsha": "0c197472e5a499829bbd2a00ab19538952b4f674", "size": 3416, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/solvers/trust_region_model.jl", "max_stars_repo_name": "const-ae/Manopt.jl", "max_stars_repo_head_hexsha": "cdaeee451d53d4813d37cf859f2ca6adcad82635", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 141, "max_stars_repo_stars_event_min_datetime": "2020-03-30T08:00:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T09:37:54.000Z", "max_issues_repo_path": "test/solvers/trust_region_model.jl", "max_issues_repo_name": "const-ae/Manopt.jl", "max_issues_repo_head_hexsha": "cdaeee451d53d4813d37cf859f2ca6adcad82635", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 90, "max_issues_repo_issues_event_min_datetime": "2020-03-30T08:00:02.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-27T08:55:37.000Z", "max_forks_repo_path": "test/solvers/trust_region_model.jl", "max_forks_repo_name": "const-ae/Manopt.jl", "max_forks_repo_head_hexsha": "cdaeee451d53d4813d37cf859f2ca6adcad82635", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2020-04-14T11:49:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-16T21:36:59.000Z", "avg_line_length": 25.3037037037, "max_line_length": 111, "alphanum_fraction": 0.4847775176, "num_tokens": 1344}
|
import pytest
from mini_lambda import FunctionDefinitionError, make_lambda_friendly_method
from mini_lambda.main import _LambdaExpression
def test_doc_index_1():
""" Tests that the first example in the documentation main page works """
# import magic variable 's'
from mini_lambda import s
# write an expression and wrap it with _() to make a function
from mini_lambda import _
say_hello_function = _('Hello, ' + s + ' !')
# use the function
print(say_hello_function('world')) # 'Hello, world !'
assert say_hello_function('world') == 'Hello, world !'
print(say_hello_function)
assert str(say_hello_function) == "'Hello, ' + s + ' !'"
def test_doc_index_2():
""" Tests that the second example in the documentation main page works """
from mini_lambda import s, x, _, Log # this is a dynamic creation hence pycharm does not see it
# various lambda functions
is_lowercase = _(s.islower())
get_prefix_upper_shebang = _(s[0:4].upper() + ' !')
numeric_test_1 = _(-x > x ** 2)
numeric_test_2 = _(((1 - 2 * x) <= -x) | (-x > x ** 2))
complex_identity = _(Log(10 ** x, 10))
# use the functions
assert is_lowercase('Hello') is False
assert get_prefix_upper_shebang('hello') == 'HELL !'
assert numeric_test_1(0.5) is False
assert numeric_test_2(1) is True
assert complex_identity(10) == 10
# string representation
print(is_lowercase) # s.islower()
print(get_prefix_upper_shebang) # s[0:4].upper() + ' !'
print(numeric_test_1) # -x > x ** 2
print(numeric_test_2) # (1 - 2 * x <= -x) | (-x > x ** 2)
print(complex_identity) # log(10 ** x, 10)
assert str(is_lowercase) == 's.islower()'
assert str(get_prefix_upper_shebang) == "s[0:4].upper() + ' !'"
assert str(numeric_test_1) == '-x > x ** 2'
assert str(numeric_test_2) == '(1 - 2 * x <= -x) | (-x > x ** 2)'
assert str(complex_identity) == 'log(10 ** x, 10)'
def test_doc_usage_input_variables():
""" Tests that the examples in doc/usage in the input variables section work """
from mini_lambda import InputVar
t = InputVar('t')
import pandas as pd
df = InputVar('df', pd.DataFrame)
def test_doc_usage_expressions_1():
""" Tests that the first example in doc/usage in the expressions section works """
from mini_lambda import x
# A variable is a lambda expression
print(type(x)) # <class 'mini_lambda.main._LambdaExpression'>
assert type(x) == _LambdaExpression
# Evaluating the lambda expression applies the identity function
print(x.evaluate(1234)) # 1234
assert x.evaluate(1234) == 1234
print(x.to_string()) # x
assert x.to_string() == 'x'
def test_doc_usage_expressions_2():
""" Tests that the second set of examples in doc/usage in the expressions section works """
from mini_lambda import x, _, L, F
# An expression is built using python syntax with a variable
my_first_expr = (1 + 1) * x + 1 > 0
assert my_first_expr.evaluate(-1 / 2) is False
assert my_first_expr.to_string() == "2 * x + 1 > 0"
assert my_first_expr(-1/2).to_string() == "(2 * x + 1 > 0)(-0.5)"
one = my_first_expr.as_function() # explicit conversion
two = _(my_first_expr) # _() does the same thing
three = L(my_first_expr) # L() is an alias for _()
four = F(my_first_expr) #F too
five, six = _(my_first_expr, x) # both accept multiple arguments
# you can now use the functions directly
assert one(-1 / 2) is False
assert two(-1 / 2) is False
assert three(-1 / 2) is False
assert four(-1 / 2) is False
assert five(-1 / 2) is False
assert six(-1 / 2) == -0.5
# string representation
assert str(one) == "2 * x + 1 > 0"
assert str(six) == "x"
def test_doc_usage_expressions_3_all_at_once():
""" Tests that the last example in doc/usage in the expressions section works """
from mini_lambda import s, _, Print
say_hello = _(Print('Hello, ' + s + ' !'))
say_hello('world')
def test_doc_usage_syntax_1():
""" Tests that the first example in doc/usage in the syntax section works """
from mini_lambda import i, s, l, f, d, x
from math import trunc
expr = i < 5 # comparing (<, >, <=, >=, ==, !=)
expr = s.lower() # accessing fields and methods (recursive)
expr = f(10) # calling
expr = reversed(l) # reversing
expr = d['key'] # getting
expr = s[0:3] # slicing
expr = 2 * i ** 5 % 2 # calc-ing (+,-,/,//,%,divmod,**,@,<<,>>,abs,~)
expr = trunc(x) # calculating (round, math.trunc)
expr = s.format(1, 2) # formatting
expr = (x > 1) & (x < 5) # boolean logic: &,|,^
def test_doc_usage_syntax_2():
""" Tests that the second example in doc/usage in the syntax section works """
from mini_lambda import b, i, s, l, x
from mini_lambda import Slice, Get, Not, In
from mini_lambda import Iter, Repr, Format, Len, Int, Any, Log, DDecimal
from math import log
from decimal import Decimal
# boolean logic
with pytest.raises(FunctionDefinitionError):
expr = (x > 1) and (x < 5) # fails
expr = (x > 1) & (x < 5) # OK
# iterating
with pytest.raises(FunctionDefinitionError):
expr = next(iter(s)) # fails
expr = next(Iter(s)) # OK
# calling with the variable as arg
with pytest.raises(FunctionDefinitionError):
expr = log(x) # fails
expr = Log(x) # OK
# constructing with the variable as arg
with pytest.raises(TypeError):
expr = Decimal(x) # fails
expr = DDecimal(x) # OK
# getting with the variable as the key
with pytest.raises(FunctionDefinitionError):
expr = {'a': 1}[s] # fails
expr = Get({'a': 1}, s) # OK
# slicing with the variable as index
with pytest.raises(FunctionDefinitionError):
expr = 'hello'[0:i] # fails
expr = Get('hello', Slice(0, i)) # OK
# representing: Repr/Str/Bytes/Sizeof/Hash
with pytest.raises(FunctionDefinitionError):
expr = repr(l) # fails
expr = Repr(l) # OK
# formatting with the variable in the args
with pytest.raises(FunctionDefinitionError):
expr = '{} {}'.format(s, s) # fails
expr = Format('{} {}', s, s) # OK
# sizing
with pytest.raises(FunctionDefinitionError):
expr = len(l) # fails
expr = Len(l) # OK
# casting (Bool, Int, Float, Complex, Hex, Oct)
with pytest.raises(FunctionDefinitionError):
expr = int(s) # fails
expr = Int(s) # OK
# not
with pytest.raises(FunctionDefinitionError):
expr = not b # fails
expr = b.not_() # OK
expr = Not(b) # OK
# any/all
with pytest.raises(FunctionDefinitionError):
expr = any(l) # fails
expr = l.any_() # OK
expr = Any(l) # OK
# membership testing (variable as container)
with pytest.raises(FunctionDefinitionError):
expr = 'f' in l # fails
expr = l.contains('f') # OK
expr = In('f', l) # OK
# membership testing (variable as item)
with pytest.raises(FunctionDefinitionError):
expr = x in [1, 2] # fails
expr = x.is_in([1, 2]) # OK
expr = In(x, [1, 2]) # OK
with pytest.raises(FunctionDefinitionError):
expr = 0 < x < 1 # chained comparisons (use parenthesis and & instead)
with pytest.raises(FunctionDefinitionError):
expr = [i for i in l] # list/tuple/set/dict comprehensions (no workaround)
def test_doc_usage_other_constants():
""" Tests that the example in doc/usage in the others/constants section works """
from mini_lambda import x, _, E, C
from math import e
assert str(_(x + e)) == 'x + 2.718281828459045'
assert str(_(x + E)) == 'x + e'
assert str(_(E + E)) == 'e + e'
# define the constant
E = C(e, 'e')
# use it in expressions. The name appears when printed
assert str(_(x + E)) == 'x + e'
def test_doc_usage_other_functions_1 ():
""" Tests that the example in doc/usage in the others/functions section (1) works """
from mini_lambda import x, _
# ** standard class function
StartsWith = make_lambda_friendly_method(str.startswith)
# now you can use `StartsWith` in your lambda expressions
str_tester = _(StartsWith('hello', 'el', x))
# first check that with one argument it works
str_tester(0) # False
str_tester(1) # True
print(str_tester) # "startswith('hello', 'el', x)"
# ** static and class functions
class Foo:
@staticmethod
def bar1(times, num, den):
return times * num / den
@classmethod
def bar2(cls, times, num, den):
return times * num / den
FooBar1 = make_lambda_friendly_method(Foo.bar1)
fun1 = _(FooBar1(x, den=x, num=1))
assert fun1(5.5) == 1
FooBar2a = make_lambda_friendly_method(Foo.bar2) # the `cls` argument is `Foo` and cant be changed
fun2a = _(FooBar2a(x, den=x, num=1))
assert fun2a(5.5) == 1
FooBar2b = make_lambda_friendly_method(Foo.bar2.__func__) # the `cls` argument can be changed
fun2b = _(FooBar2b(Foo, x, den=x, num=1))
assert fun2b(5.5) == 1
def test_doc_usage_other_functions_2():
""" Tests that the example in doc/usage in the others/functions section (2) works """
from mini_lambda import x, _
class Foo:
@staticmethod
def bar1(times, num, den):
return times * num / den
@classmethod
def bar2(cls, times, num, den):
return times * num / den
FooBar1 = make_lambda_friendly_method(Foo.bar1)
fun1 = _(FooBar1(x, den=x, num=1))
FooBar2a = make_lambda_friendly_method(Foo.bar2) # the `cls` argument is `Foo` and cant be changed
fun2a = _(FooBar2a(x, den=x, num=1))
FooBar2b = make_lambda_friendly_method(Foo.bar2.__func__) # the `cls` argument can be changed
fun2b = _(FooBar2b(Foo, x, den=x, num=1))
assert fun1(5.5) == 1
# apparently the order may vary: in travis it is reversed
assert(str(fun1)) in {'bar1(x, den=x, num=1)', 'bar1(x, num=1, den=x)'}
assert fun2a(5.5) == 1
# apparently the order may vary: in travis it is reversed
assert (str(fun2a)) in {'bar2(x, den=x, num=1)', 'bar2(x, num=1, den=x)'}
assert fun2b(5.5) == 1
# apparently the order may vary: in travis it is reversed
assert (str(fun2b)) in {'bar2(Foo, x, den=x, num=1)', 'bar2(Foo, x, num=1, den=x)'}
def test_doc_usage_other_classes():
""" Tests that the example in doc/usage in the others/classes section works """
from mini_lambda import _, make_lambda_friendly_class
from mini_lambda.numpy import X
import numpy as np
import pandas as pd
DDataframe = make_lambda_friendly_class(pd.DataFrame)
expr = _( DDataframe(X).max().values[0] )
assert expr(np.array([1, 2])) == 2
assert str(expr) == "DataFrame(X).max().values[0]"
def test_doc_usage_all_at_once():
""" Tests that the example in doc/usage in the others/anything section works """
from mini_lambda import _, C
from mini_lambda.numpy import X
import numpy as np
import pandas as pd
all_at_once = _(C(print)(C(pd.DataFrame)(X).transpose()))
all_at_once(np.array([1, 2]))
assert str(all_at_once) == 'print(DataFrame(X).transpose())'
def test_doc_usage_already_imported():
""" Tests that the example in doc/usage in the others/preconverted section works """
from mini_lambda import DDecimal # Decimal class
from mini_lambda import Print # print() function
from mini_lambda import Pi # math.pi constant
|
{"hexsha": "d82d308b2cfcf1b431871c7e70ed75c540ebe647", "size": 12167, "ext": "py", "lang": "Python", "max_stars_repo_path": "mini_lambda/tests/test_readme.py", "max_stars_repo_name": "semiversus/python-mini-lambda", "max_stars_repo_head_hexsha": "35ec4b6304b08ffd28939ffef7ead6b150dc1525", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mini_lambda/tests/test_readme.py", "max_issues_repo_name": "semiversus/python-mini-lambda", "max_issues_repo_head_hexsha": "35ec4b6304b08ffd28939ffef7ead6b150dc1525", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mini_lambda/tests/test_readme.py", "max_forks_repo_name": "semiversus/python-mini-lambda", "max_forks_repo_head_hexsha": "35ec4b6304b08ffd28939ffef7ead6b150dc1525", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1038575668, "max_line_length": 103, "alphanum_fraction": 0.6027780061, "include": true, "reason": "import numpy", "num_tokens": 3366}
|
from __future__ import print_function
import argparse
import torch
import os
import numpy as np
import torch.utils.data
from torch import nn, optim, save
from PIL import Image
from torch.nn import functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
from torch.utils.data import Dataset, DataLoader
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
imsize = 256 if torch.cuda.is_available() else 64
loader = transforms.Compose([
transforms.Resize(imsize),
transforms.ToTensor()])
def image_loader(image_name):
image = Image.open(image_name).convert('L')
image = loader(image).unsqueeze(0)
return image.to(device, torch.float)
parser = argparse.ArgumentParser(description='VAE MNIST Example')
parser.add_argument('--batch-size', type=int, default=50, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=50, metavar='N', help='number of epochs to train (default: 10)')
parser.add_argument('--no-cuda', action='store_true', default=False, help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=20, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if args.cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
class VAE(nn.Module):
def __init__(self):
super(VAE, self).__init__()
self.fc1 = nn.Linear(6144, 400)
self.fc21 = nn.Linear(400, 100)
self.fc22 = nn.Linear(400, 100)
self.fc3 = nn.Linear(100, 400)
self.fc4 = nn.Linear(400, 6144)
def encode(self, x):
h1 = F.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def decode(self, z):
h3 = F.relu(self.fc3(z))
return torch.sigmoid(self.fc4(h3))
def forward(self, x):
mu, logvar = self.encode(x.view(-1, 6144))
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar
model = VAE().to(device)
model.load_state_dict(torch.load('./models/last_model' ))
model.eval()
optimizer = optim.Adam(model.parameters(), lr=1e-3)
# Reconstruction + KL divergence losses summed over all elements and batch
def loss_function(recon_x, x, mu, logvar):
BCE = F.binary_cross_entropy(recon_x, x.view(-1, 6144), reduction='sum')
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return BCE + KLD
if __name__ == "__main__":
PATH = 'sequences/1'
global_output_data = np.array([])
for j in range(500):
LPATH = PATH + '/seq_' + str(j)
with open(LPATH + '/actions.txt', 'r') as f:
actions = f.read()
local_data = []
output_data = np.array([])
for i in range(150):
local_data.append(image_loader(LPATH + '/' + str(i) + '.png'))
encoded = model.encode(local_data[i].view(-1,6144))[0][0]
no_grad = model.encode(local_data[i].view(-1,6144))[0][0].detach()
output_data = np.append(output_data, no_grad)
seq = torch.from_numpy(output_data).view(150, 100)
global_output_data = np.append(global_output_data, seq)
torch.save(seq, LPATH + '/encoded.txt')
print("Sequence ", str(j), " finished")
torch.save(global_output_data, PATH + '/encoded.txt')
|
{"hexsha": "bbd37148fa467dae8ad5582a53997686ae15c793", "size": 4046, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/Seq_Encoder.py", "max_stars_repo_name": "Krolion/SW1", "max_stars_repo_head_hexsha": "68a1051ac2665960a1338205c45ac32c549f52f6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Seq_Encoder.py", "max_issues_repo_name": "Krolion/SW1", "max_issues_repo_head_hexsha": "68a1051ac2665960a1338205c45ac32c549f52f6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Seq_Encoder.py", "max_forks_repo_name": "Krolion/SW1", "max_forks_repo_head_hexsha": "68a1051ac2665960a1338205c45ac32c549f52f6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7166666667, "max_line_length": 114, "alphanum_fraction": 0.6475531389, "include": true, "reason": "import numpy", "num_tokens": 1046}
|
from __future__ import annotations
from typing import NoReturn
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsRegressor
from sklearn.metrics import roc_auc_score
from IMLearn.base import BaseEstimator
class AgodaCancellationEstimator(BaseEstimator):
"""
An estimator for solving the Agoda Cancellation challenge
"""
NUM_ESTIMATORS = 500
LOSS_THRESHOLD = 60 * 60 * 24 * 3
ORIGINAL_DATES_COLS = ['X_booking_datetime_original', 'X_checkin_date_original']
Y_COLUMNS = ['time_to_cancel', 'cancel_time_to_checkin', 'real_cancellation_datetime']
MIN_DATE_THRESHOLD = '2018-12-05'
MAX_DATE_THRESHOLD = '2018-12-15'
N_NEIGHBORS = 5
def __init__(self, final) -> AgodaCancellationEstimator:
"""
Instantiate an estimator for solving the Agoda Cancellation challenge
Parameters
----------
Attributes
----------
"""
super().__init__()
self.final = final
self._clf = RandomForestClassifier(n_estimators=self.NUM_ESTIMATORS)
self._reg_after_booking = KNeighborsRegressor(self.N_NEIGHBORS, weights='distance')
self._reg_before_checkin = KNeighborsRegressor(self.N_NEIGHBORS, weights='distance')
@classmethod
def _get_y(cls, y, col_index):
return y[cls.Y_COLUMNS[col_index]]
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
"""
Fit an estimator for given samples
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
Notes
-----
"""
raw_X = X[X.columns.difference(self.ORIGINAL_DATES_COLS)]
y_cancelled_classes = self._get_y(y, 2).notna()
self._clf.fit(raw_X, y_cancelled_classes)
reg_X = raw_X[y_cancelled_classes == 1]
reg_y_after_booking = self._get_y(y, 0)[y_cancelled_classes == 1]
reg_y_before_checkin = self._get_y(y, 1)[y_cancelled_classes == 1]
self._reg_after_booking.fit(reg_X, reg_y_after_booking)
self._reg_before_checkin.fit(reg_X, reg_y_before_checkin)
def _predict(self, X: np.ndarray) -> np.ndarray:
"""
Predict responses for given samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
"""
raw_X = X[X.columns.difference(self.ORIGINAL_DATES_COLS)]
classification_prediction = self._clf.predict(raw_X)
reg_X = raw_X[classification_prediction == 1]
regression_prediction_after_booking = self._reg_after_booking.predict(reg_X)
regression_prediction_before_checkin = self._reg_before_checkin.predict(reg_X)
prediction_a = X.loc[classification_prediction == 1][self.ORIGINAL_DATES_COLS[0]] + pd.to_timedelta(regression_prediction_after_booking, unit='s')
prediction_b = X.loc[classification_prediction == 1][self.ORIGINAL_DATES_COLS[1]] - pd.to_timedelta(regression_prediction_before_checkin, unit='s')
cancellation_time = prediction_a + ((prediction_b - prediction_a) / 2)
results = X[[]]
if not self.final:
results.loc[cancellation_time.index, 'prediction'] = cancellation_time
return results.prediction
is_in_dates = np.logical_and(cancellation_time >= self.MIN_DATE_THRESHOLD,
cancellation_time <= self.MAX_DATE_THRESHOLD)
results.loc[is_in_dates.index, 'prediction'] = is_in_dates
return np.where(classification_prediction == 1, results.prediction, False).astype(int)
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
"""
Evaluate performance under loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under loss function
"""
predictions = self.predict(X)
real_cancel_times = self._get_y(y, 2)
if self.final:
return roc_auc_score(real_cancel_times.notna(), predictions)
cancel_time_predictions = (real_cancel_times - predictions) / np.timedelta64(1, 's') < self.LOSS_THRESHOLD
correct_or_incorrect = np.where(np.isnat(predictions),
real_cancel_times.isna(),
cancel_time_predictions).astype(int)
print("Error rate / Misclassification Error:", np.mean(1 - correct_or_incorrect))
print("Accuracy:", np.mean(correct_or_incorrect))
return roc_auc_score(correct_or_incorrect, np.ones(len(correct_or_incorrect)))
|
{"hexsha": "147dd93dd1b8bf3d10e5263e0c76550f178e7397", "size": 5141, "ext": "py", "lang": "Python", "max_stars_repo_path": "challenge/agoda_cancellation_estimator.py", "max_stars_repo_name": "nirpet/IML.HUJI", "max_stars_repo_head_hexsha": "6f8c7719760df3e381115f01cd5c3cfc9951b59c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "challenge/agoda_cancellation_estimator.py", "max_issues_repo_name": "nirpet/IML.HUJI", "max_issues_repo_head_hexsha": "6f8c7719760df3e381115f01cd5c3cfc9951b59c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "challenge/agoda_cancellation_estimator.py", "max_forks_repo_name": "nirpet/IML.HUJI", "max_forks_repo_head_hexsha": "6f8c7719760df3e381115f01cd5c3cfc9951b59c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9727891156, "max_line_length": 155, "alphanum_fraction": 0.652013227, "include": true, "reason": "import numpy", "num_tokens": 1123}
|
using PyPlot
using DelimitedFiles
using PyCall
mpl = pyimport("tikzplotlib")
d = readdlm("timing.txt")
idx = sortperm(d[:,1])
d = d[idx,:]
close("all")
plot(d[:,1], 3.693 * ones(length(d[:,1])), "--", label="Fortran")
loglog(d[:,1], d[:,2], "o-", label="ADSeismic MPI")
legend()
xlabel("Number of Processors")
ylabel("Time (sec)")
xticks(d[:,1], Int.(d[:,1]))
grid("on", which="both", linestyle=":")
savefig("acoustic_time_forward.png")
close("all")
loglog(d[:,1], d[:,3], "o-", color ="orange", label="ADSeismic MPI")
legend()
xlabel("Number of Processors")
ylabel("Time (sec)")
xticks(d[:,1], Int.(d[:,1]))
grid("on", which="both", linestyle=":")
savefig("acoustic_time_backward.png")
close("all")
loglog(d[:,1], d[:,2], "o-", label="Forward")
loglog(d[:,1], d[:,3], "o-", color ="orange", label="Backward")
legend()
xlabel("Number of Processors")
ylabel("Time (sec)")
xticks(d[:,1], Int.(d[:,1]))
grid("on", which="both", linestyle=":")
savefig("acoustic_time_forward_and_backward.png")
mpl.save("../figures/acoustic_time_forward_and_backward.tex")
close("all")
figure(figsize=(10,4))
subplot(121)
title("Forward")
loglog(d[:,1], d[1,2]./d[:,2], "o-", label="Speedup")
loglog(d[:,1], d[1,2]./(d[:,2].*d[:,1]), "o-", label="Efficiency")
legend()
xlabel("Number of Processors")
ylabel("Time (sec)")
xticks(d[:,1], Int.(d[:,1]))
grid("on", which="both", linestyle=":")
subplot(122)
title("Backward")
loglog(d[:,1], d[1,3]./d[:,3], "o-", label="Speedup")
loglog(d[:,1], d[1,3]./(d[:,3].*d[:,1]), "o-", label="Efficiency")
legend()
xlabel("Number of Processors")
ylabel("Time (sec)")
xticks(d[:,1], Int.(d[:,1]))
grid("on", which="both", linestyle=":")
savefig("acoustic_speedup_and_efficiency.png")
mpl.save("../figures/acoustic_speedup_and_efficiency.tex")
|
{"hexsha": "3995395e7349d4e1704086c96955d545a87784c9", "size": 1765, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/mpi_acoustic/strong_scaling/plot.jl", "max_stars_repo_name": "kailaix/ADSeismic.jl", "max_stars_repo_head_hexsha": "c4cd214a6de10cadc1a59b1c302ccfe42d4d25f9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 36, "max_stars_repo_stars_event_min_datetime": "2020-03-21T09:56:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-15T14:05:14.000Z", "max_issues_repo_path": "examples/mpi_acoustic/strong_scaling/plot.jl", "max_issues_repo_name": "kailaix/ADSeismic.jl", "max_issues_repo_head_hexsha": "c4cd214a6de10cadc1a59b1c302ccfe42d4d25f9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-08-12T15:40:39.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-15T14:09:40.000Z", "max_forks_repo_path": "examples/mpi_acoustic/strong_scaling/plot.jl", "max_forks_repo_name": "kailaix/ADSeismic.jl", "max_forks_repo_head_hexsha": "c4cd214a6de10cadc1a59b1c302ccfe42d4d25f9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 17, "max_forks_repo_forks_event_min_datetime": "2020-04-18T09:25:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T08:06:13.000Z", "avg_line_length": 27.578125, "max_line_length": 68, "alphanum_fraction": 0.6294617564, "num_tokens": 560}
|
module Display
using UUIDs
import LibGit2
using ..Types
const colors = Dict(
' ' => :white,
'+' => :light_green,
'-' => :light_red,
'↑' => :light_yellow,
'~' => :light_yellow,
'↓' => :light_magenta,
'?' => :red,
)
const color_dark = :light_black
function git_file_stream(repo::LibGit2.GitRepo, spec::String; fakeit::Bool=false)::IO
blob = try LibGit2.GitBlob(repo, spec)
catch err
err isa LibGit2.GitError && err.code == LibGit2.Error.ENOTFOUND || rethrow(err)
fakeit && return devnull
end
iob = IOBuffer(LibGit2.content(blob))
close(blob)
return iob
end
function status(ctx::Context, mode::PackageMode, use_as_api=false)
env = ctx.env
project₀ = project₁ = env.project
manifest₀ = manifest₁ = env.manifest
diff = nothing
if !use_as_api
pkg = ctx.env.pkg
if pkg !== nothing
printstyled("Project "; color=Base.info_color(), bold=true)
println(pkg.name, " v", pkg.version)
end
end
if env.git != nothing
git_path = LibGit2.path(env.git)
project_path = relpath(env.project_file, git_path)
manifest_path = relpath(env.manifest_file, git_path)
project₀ = read_project(git_file_stream(env.git, "HEAD:$project_path", fakeit=true))
manifest₀ = read_manifest(git_file_stream(env.git, "HEAD:$manifest_path", fakeit=true))
end
if mode == PKGMODE_PROJECT || mode == PKGMODE_COMBINED
# TODO: handle project deps missing from manifest
m₀ = filter_manifest(in_project(project₀["deps"]), manifest₀)
m₁ = filter_manifest(in_project(project₁["deps"]), manifest₁)
diff = manifest_diff(ctx, m₀, m₁)
if !use_as_api
printpkgstyle(ctx, :Status, pathrepr(ctx, env.project_file); ignore_indent=true)
print_diff(ctx, diff)
end
end
if mode == PKGMODE_MANIFEST
diff = manifest_diff(ctx, manifest₀, manifest₁)
if !use_as_api
printpkgstyle(ctx, :Status, pathrepr(ctx, env.manifest_file); ignore_indent=true)
print_diff(ctx, diff)
end
elseif mode == PKGMODE_COMBINED
p = not_in_project(merge(project₀["deps"], project₁["deps"]))
m₀ = filter_manifest(p, manifest₀)
m₁ = filter_manifest(p, manifest₁)
c_diff = filter!(x->x.old != x.new, manifest_diff(ctx, m₀, m₁))
if !isempty(c_diff)
if !use_as_api
printpkgstyle(ctx, :Status, pathrepr(ctx, env.manifest_file); ignore_indent=true)
print_diff(ctx, c_diff)
end
diff = Base.vcat(c_diff, diff)
end
end
return diff
end
function print_project_diff(ctx::Context, env₀::EnvCache, env₁::EnvCache)
pm₀ = filter_manifest(in_project(env₀.project["deps"]), env₀.manifest)
pm₁ = filter_manifest(in_project(env₁.project["deps"]), env₁.manifest)
diff = filter!(x->x.old != x.new, manifest_diff(ctx, pm₀, pm₁))
if isempty(diff)
printstyled(color = color_dark, " [no changes]\n")
else
print_diff(ctx, diff)
end
end
function print_manifest_diff(ctx::Context, env₀::EnvCache, env₁::EnvCache)
diff = manifest_diff(ctx, env₀.manifest, env₁.manifest)
diff = filter!(x->x.old != x.new, diff)
if isempty(diff)
printstyled(color = color_dark, " [no changes]\n")
else
print_diff(ctx, diff)
end
end
struct VerInfo
hash::Union{SHA1,Nothing}
path::Union{String,Nothing}
ver::Union{VersionNumber,Nothing}
pinned::Bool
repo::Union{Types.GitRepo, Nothing}
end
revstring(str::String) = occursin(r"\b([a-f0-9]{40})\b", str) ? str[1:7] : str
vstring(ctx::Context, a::VerInfo) =
string((a.ver == nothing && a.hash != nothing) ? "[$(string(a.hash)[1:16])]" : "",
a.ver != nothing ? "v$(a.ver)" : "",
a.path != nothing ? " [$(pathrepr(ctx, a.path))]" : "",
a.repo != nothing ? " #$(revstring(a.repo.rev))" : "",
a.pinned == true ? " ⚲" : "",
)
Base.:(==)(a::VerInfo, b::VerInfo) =
a.hash == b.hash && a.ver == b.ver && a.pinned == b.pinned
≈(a::VerInfo, b::VerInfo) = a.hash == b.hash &&
(a.ver == nothing || b.ver == nothing || a.ver == b.ver) &&
(a.pinned == b.pinned)
struct DiffEntry
uuid::UUID
name::String
old::Union{VerInfo,Nothing}
new::Union{VerInfo,Nothing}
end
function print_diff(io::IO, ctx::Context, diff::Vector{DiffEntry})
same = all(x.old == x.new for x in diff)
for x in diff
warnings = String[]
if x.old != nothing && x.new != nothing
if x.old ≈ x.new
verb = ' '
vstr = vstring(ctx, x.new)
else
if x.old.hash != x.new.hash && x.old.ver != x.new.ver
verb = x.old.ver == nothing || x.new.ver == nothing ||
x.old.ver == x.new.ver ? '~' :
x.old.ver < x.new.ver ? '↑' : '↓'
elseif x.old.ver == x.new.ver && x.old.pinned != x.new.pinned ||
x.old.repo != nothing || x.new.repo != nothing
verb = '~'
else
verb = '?'
msg = x.old.hash == x.new.hash ?
"hashes match but versions don't: $(x.old.ver) ≠ $(x.new.ver)" :
"versions match but hashes don't: $(x.old.hash) ≠ $(x.new.hash)"
push!(warnings, msg)
end
vstr = (x.old.ver == x.new.ver && x.old.pinned == x.new.pinned) ?
vstring(ctx, x.new) :
vstring(ctx, x.old) * " ⇒ " * vstring(ctx, x.new)
end
elseif x.new != nothing
verb = '+'
vstr = vstring(ctx, x.new)
elseif x.old != nothing
verb = '-'
vstr = vstring(ctx, x.old)
else
verb = '?'
vstr = "[unknown]"
end
v = same ? "" : " $verb"
printstyled(io, " [$(string(x.uuid)[1:8])]"; color = color_dark)
printstyled(io, "$v $(x.name) $vstr\n"; color = colors[verb])
end
end
# TODO: Use the Context stream
print_diff(ctx::Context, diff::Vector{DiffEntry}) = print_diff(stdout, ctx, diff)
function manifest_by_uuid(manifest::Dict)
entries = Dict{UUID,Dict}()
for (name, infos) in manifest, info in infos
uuid = UUID(info["uuid"])
haskey(entries, uuid) && @warn("Duplicate UUID in manifest: $uuid")
entries[uuid] = merge(info, Dict("name" => name))
end
return entries
end
function name_ver_info(info::Dict)
name = info["name"]
hash = haskey(info, "git-tree-sha1") ? SHA1(info["git-tree-sha1"]) : nothing
ver = haskey(info, "version") ? VersionNumber(info["version"]) : nothing
path = get(info, "path", nothing)
pin = get(info, "pinned", false)
if haskey(info, "repo-url")
repo = Types.GitRepo(info["repo-url"], info["repo-rev"])
else
repo = nothing
end
name, VerInfo(hash, path, ver, pin, repo)
end
function manifest_diff(ctx::Context, manifest₀::Dict, manifest₁::Dict)
diff = DiffEntry[]
entries₀ = manifest_by_uuid(manifest₀)
entries₁ = manifest_by_uuid(manifest₁)
for uuid in union(keys(entries₀), keys(entries₁))
name₀ = name₁ = v₀ = v₁ = nothing
haskey(entries₀, uuid) && ((name₀, v₀) = name_ver_info(entries₀[uuid]))
haskey(entries₁, uuid) && ((name₁, v₁) = name_ver_info(entries₁[uuid]))
name₀ == nothing && (name₀ = name₁)
name₁ == nothing && (name₁ = name₀)
if name₀ == name₁
push!(diff, DiffEntry(uuid, name₀, v₀, v₁))
else
push!(diff, DiffEntry(uuid, name₀, v₀, nothing))
push!(diff, DiffEntry(uuid, name₁, nothing, v₁))
end
end
sort!(diff, by=x->(x.uuid in keys(ctx.stdlibs), x.name, x.uuid))
end
function filter_manifest!(predicate, manifest::Dict)
empty = String[]
for (name, infos) in manifest
filter!(infos) do info
predicate(name, info)
end
isempty(infos) && push!(empty, name)
end
for name in empty
pop!(manifest, name)
end
return manifest
end
filter_manifest(predicate, manifest::Dict) =
filter_manifest!(predicate, deepcopy(manifest))
# This is precompilable, an anonymous function is not.
struct InProject{D <: Dict}
deps::D
neg::Bool
end
function (ip::InProject)(name::String, info::Dict)
v = haskey(ip.deps, name) && haskey(info, "uuid") && ip.deps[name] == info["uuid"]
return ip.neg ? !v : v
end
in_project(deps::Dict) = InProject(deps, false)
not_in_project(deps::Dict) = InProject(deps, true)
end # module
|
{"hexsha": "80e72fe1ad006bfbe8e0df037458d3e92e65c057", "size": 8756, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "stdlib/Pkg3/src/Display.jl", "max_stars_repo_name": "djsegal/julia-fork", "max_stars_repo_head_hexsha": "dd3d14e5e7d24985cba6185e2d07a62ee9943d4e", "max_stars_repo_licenses": ["Zlib"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-04-16T17:50:52.000Z", "max_stars_repo_stars_event_max_datetime": "2018-04-16T17:50:52.000Z", "max_issues_repo_path": "stdlib/Pkg3/src/Display.jl", "max_issues_repo_name": "djsegal/julia-fork", "max_issues_repo_head_hexsha": "dd3d14e5e7d24985cba6185e2d07a62ee9943d4e", "max_issues_repo_licenses": ["Zlib"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "stdlib/Pkg3/src/Display.jl", "max_forks_repo_name": "djsegal/julia-fork", "max_forks_repo_head_hexsha": "dd3d14e5e7d24985cba6185e2d07a62ee9943d4e", "max_forks_repo_licenses": ["Zlib"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6086956522, "max_line_length": 97, "alphanum_fraction": 0.5754910918, "num_tokens": 2386}
|
program complex_06
implicit none
real, parameter :: a = 3.0, b = 4.0
complex, parameter :: i_ = (0, 1)
complex, parameter :: z = a + i_*b
real, parameter :: x = z
real, parameter :: y = real(z)
real, parameter :: w = aimag(z)
print *, x, y, w
end program
|
{"hexsha": "1184957571d40918d4b2bc22dbf454d8c04bc123", "size": 258, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "integration_tests/complex_06.f90", "max_stars_repo_name": "Thirumalai-Shaktivel/lfortran", "max_stars_repo_head_hexsha": "bb39faf1094b028351d5aefe27d64ee69302300a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 316, "max_stars_repo_stars_event_min_datetime": "2019-03-24T16:23:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T07:28:33.000Z", "max_issues_repo_path": "integration_tests/complex_06.f90", "max_issues_repo_name": "Thirumalai-Shaktivel/lfortran", "max_issues_repo_head_hexsha": "bb39faf1094b028351d5aefe27d64ee69302300a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-07-29T04:58:03.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-04T16:40:06.000Z", "max_forks_repo_path": "integration_tests/complex_06.f90", "max_forks_repo_name": "Thirumalai-Shaktivel/lfortran", "max_forks_repo_head_hexsha": "bb39faf1094b028351d5aefe27d64ee69302300a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2019-03-28T19:40:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T07:28:55.000Z", "avg_line_length": 18.4285714286, "max_line_length": 35, "alphanum_fraction": 0.6240310078, "num_tokens": 89}
|
__author__ = 'francois'
from string import Template
import sqlite3
import numpy as np
import pandas as pd
import os
def getLockFile(db):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), ".%s.db_lock"%db)
class Storage(object):
def get_data(self):
pass
class ProcessedStorage(Storage):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def prepare(self):
pass
def write_row(self, rowdict):
pass
def flush(self):
pass
class RawStorage(Storage):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def prepare(self):
pass
def write_row(self, rowdict):
pass
def flush(self):
pass
import zipfile
import tempfile
import shutil
import json
class Zip(RawStorage):
CONFIG = "conf.json"
def __init__(self, database, mode='w'):
self.database = database + ".zip"
self.zip = None
self.data = None
self.rowlist = []
self.lock = FileLock(getLockFile(self.database))
self.mode = mode
def __enter__(self):
if self.mode == 'w':
self.lock.lock()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.mode == 'w':
self.lock.unlock()
def prepare(self):
self.zip = zipfile.ZipFile(self.database, 'a', zipfile.ZIP_DEFLATED)
def store(self, output_dir, conf, root):
for key, value in conf.iteritems():
conf[key] = os.path.relpath(value, output_dir)
with open(os.path.join(output_dir, self.CONFIG), 'w') as f:
json.dump(conf, f)
for root_dir, dirs, files in os.walk(output_dir):
for fi in files:
path = os.path.join(root_dir, fi)
self.zip.write(path, os.path.relpath(path, root))
# self.zip.write(output_dir, os.path.relpath(output_dir, root))
def get_data(self):
if self.data is None:
self.data = pd.DataFrame(self.rowlist).convert_objects(convert_dates=True, convert_numeric=True, convert_timedeltas=True)
return self.data
def get_results(self):
if self.zip is None:
self.zip = zipfile.ZipFile(self.database, 'r', zipfile.ZIP_DEFLATED)
extract_dir = tempfile.mkdtemp()
print "Writing to %s" % extract_dir
self.zip.extractall(extract_dir)
# results = []
for output_dir, dirs, files in os.walk(extract_dir):
if output_dir == extract_dir:
continue
with open(os.path.join(output_dir, self.CONFIG), 'r') as f:
conf = json.load(f)
for key, value in conf.iteritems():
conf[key] = os.path.join(output_dir, value)
yield (output_dir, conf)
shutil.rmtree(extract_dir)
def write_row(self, rowdict):
self.rowlist.append(rowdict)
def flush(self):
self.zip.close()
class PandasHDF(ProcessedStorage):
def __init__(self, database):
self.data = pd.DataFrame()
self.database = database + ".hd5"
self.rowlist = []
self.convert = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def prepare(self):
pass
def write_row(self, rowdict):
self.rowlist.append(rowdict)
def get_data(self):
if self.data is None:
if os.path.exists(self.database):
self.data = pd.read_hdf(self.database, "dt")
return self.data
def flush(self):
with FileLock(FileLock(getLockFile(self.database))) as lock:
newdata = pd.DataFrame(self.rowlist)
newdata.to_hdf(self.database, "dt", format = 't', append = True)
class PandasJson(ProcessedStorage):
def __init__(self, database):
self.data = pd.DataFrame()
self.database = database+".json"
self.rowlist = []
self.convert = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def prepare(self):
if os.path.exists(self.database):
self.data = pd.read_json(self.database, orient = 'split')
def write_row(self, rowdict):
self.rowlist.append(rowdict)
def get_data(self):
if self.convert is None:
self.convert = self.data.convert_objects(convert_dates=True, convert_numeric=True, convert_timedeltas=True)
return self.convert
def flush(self):
with FileLock(FileLock(getLockFile(self.database))) as lock:
newdata = pd.DataFrame(self.rowlist)
d = pd.concat([self.data, newdata])
d.to_json(self.database, orient = 'split')
class PandasPickle(ProcessedStorage):
def __init__(self, database):
self.data = pd.DataFrame()
self.database = database+".pickle"
self.rowlist = []
self.convert = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def prepare(self):
if os.path.exists(self.database):
self.data = pd.read_pickle(self.database)
def write_row(self, rowdict):
self.rowlist.append(rowdict)
def get_data(self):
if self.convert is None:
self.convert = self.data.convert_objects(convert_dates=True, convert_numeric=True, convert_timedeltas=True)
return self.convert
def flush(self):
with FileLock(FileLock(getLockFile(self.database))) as lock:
newdata = pd.DataFrame(self.rowlist)
d = pd.concat([self.data, newdata])
d.to_pickle(self.database)
from csv import QUOTE_ALL
from StringIO import StringIO
import cPickle
class PandasCsv(ProcessedStorage):
ARRAY_TYPE = "np2darray"
def __init__(self, database):
self.data = None
self.types = None
self.database = database+".csv"
self.rowlist = []
self.create = False
def __enter__(self):
if os.path.exists(self.database):
self.types = pd.read_csv(self.database, nrows = 1).iloc[0].to_dict()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def prepare(self):
self.create = not os.path.exists(self.database)
def flush(self):
with FileLock(FileLock(getLockFile(self.database))) as lock:
if not self.create:
newdata = pd.DataFrame(self.rowlist)
else:
newdata = pd.DataFrame([self.types]+self.rowlist)
newdata.to_csv(self.database, mode = 'a', index = False, header = self.create, quoting = QUOTE_ALL, line_terminator=";")
def write_row(self, rowdict):
if self.types is None:
self.types = pd.DataFrame([rowdict]).dtypes.to_dict()
for key, val in rowdict.iteritems():
if type(val) is np.ndarray:
self.types[key] = self.ARRAY_TYPE
for key, val in rowdict.iteritems():
if type(val) is np.ndarray:
rowdict[key] = self.dump_array(val)
self.rowlist.append(rowdict)
def get_data(self):
if self.data is None:
if os.path.exists(self.database):
self.data = pd.read_csv(self.database, skiprows=[1], lineterminator=";")
self.data.apply(self.pump_arrays, axis = 1)
return self.data
def pump_arrays(self, row):
for key, val in self.types.iteritems():
if val == self.ARRAY_TYPE:
row[key] = self.load_array(row[key])
return row
def load_array(self, arr):
return cPickle.loads(arr)
# return np.loadtxt(StringIO(arr), delimiter="|")
def dump_array(self, arr):
return cPickle.dumps(arr)
# out = StringIO()
# np.savetxt(out, arr, delimiter="|")
# return out.getvalue()
### Sqlite implementation ###
class TypeHelper(object):
TXT = "TEXT"
INT = "INT"
FLOAT = "REAL"
BLOB = "BLOB"
ARRAY = "ARRAY"
@classmethod
def getType(cls, sample):
if type(sample) is np.ndarray:
return cls.ARRAY
elif type(sample) in [basestring, str]:
return cls.getTypeFromString(sample)
elif type(sample) in (float, np.float_):
return cls.FLOAT
elif type(sample) is int:
return cls.INT
elif type(sample) is bool:
return cls.TXT
return cls.BLOB
@classmethod
def getTypeFromString(cls, s):
try:
float(s)
return cls.FLOAT
except ValueError:
pass
try:
import unicodedata
d = unicodedata.numeric(s)
if type(d) == float:
return cls.FLOAT
elif type(d) == int:
return cls.INT
except (TypeError, ValueError):
pass
return cls.TXT
@staticmethod
def is_number(s):
try:
return float(s)
# return True
except ValueError:
pass
try:
import unicodedata
return unicodedata.numeric(s)
# return True
except (TypeError, ValueError):
pass
return False
import io
def adapt_array(arr):
out = io.BytesIO()
np.save(out, arr)
out.seek(0)
# http://stackoverflow.com/a/3425465/190597 (R. Hill)
return buffer(out.read())
def convert_array(text):
out = io.BytesIO(text)
out.seek(0)
return np.load(out)
class Sqlite3(ProcessedStorage):
table = 'experiments'
def get_data(self):
return self.connection
def __init__(self, database):
# Converts np.array to TEXT when inserting
sqlite3.register_adapter(np.ndarray, adapt_array)
# Converts TEXT to np.array when selecting
sqlite3.register_converter(TypeHelper.ARRAY, convert_array)
self.connection = sqlite3.connect(database, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
def prepare(self):
self._prepare_base()
def write_row(self, rowdict):
self._add_cols(rowdict)
self._write_results(rowdict)
def __write(self, cmd, subst=None):
c = self.connection.cursor()
if subst is None:
c.execute(cmd)
else:
c.execute(cmd, subst)
self.connection.commit()
def read(self, cmd, subst=None):
c = self.connection.cursor()
if subst is None:
c.execute(cmd)
else:
c.execute(cmd, subst)
return c.fetchall()
def _add_column(self, table, colname, type="TEXT"):
try:
self.__write(Template('''ALTER TABLE "$table" ADD COLUMN "$colname" $type;''').substitute(table=table,
colname=colname,
type=type))
except sqlite3.OperationalError as e:
# print e
pass
def table_exists(self, table):
return self.read('''SELECT name FROM sqlite_master WHERE type="table" AND name='%s';''' % table)
def _prepare_base(self):
if not self.table_exists("experiments"):
self.__write('''CREATE TABLE %s (exp_id INTEGER, PRIMARY KEY(exp_id ASC));''' % self.table)
def _add_cols(self, rowdict):
for col, val in rowdict.iteritems():
self._add_column(self.table, col, TypeHelper.getType(val))
def _write_results(self, results):
cmd = '''INSERT INTO experiments('%s') VALUES (%s);'''
xs = ", ".join(["?"] * len(results))
# print cmd%("','".join(results.keys()), xs)
self.__write(cmd % ("','".join(results.keys()), xs), results.values())
def printTable(self):
tables = self.read('''SELECT name FROM sqlite_master WHERE type='table';''')
res = ""
if tables[0] is None:
return "No tables found (db is empty)..."
for table in tables[0]:
res += "Table '%s' :[%s]\n" % (table, self.table_info(table))
return res
def table_info(self, table):
return ", ".join(zip(*self.read('''PRAGMA TABLE_INFO(%s);''' % table))[1])
def table_content(self, table):
return self.read('''SELECT * FROM %s''' % table)
import fcntl
class FileLock(object):
def __init__(self, file):
self.file = file
def lock(self):
self.fp = open(self.file, 'w')
# try:
fcntl.lockf(self.fp, fcntl.LOCK_EX)
def unlock(self):
fcntl.lockf(self.fp, fcntl.LOCK_UN)
self.fp.close()
def __enter__(self):
self.lock()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.unlock()
# import cmd
#
# class SqliteShell(cmd.Cmd):
# PROMPT = "Sqlite (%s) > "
# OUT = ">> %s"
#
# def __init__(self, database, db):
# cmd.Cmd.__init__(self)
# self.prompt = self.PROMPT % database
# self.db = db
#
# def precmd(self, line):
# """Hook method executed just before the command line is
# interpreted, but after the input prompt is generated and issued.
#
# """
# return line
#
# def postcmd(self, stop, line):
# """Hook method executed just after a command dispatch is finished."""
# return stop
#
# def preloop(self):
# """Hook method executed once when the cmdloop() method is called."""
# pass
#
# def postloop(self):
# """Hook method executed once when the cmdloop() method is about to
# return.
#
# """
# pass
#
# def sql(self, arg):
# if arg is not None:
# try:
# print self.OUT % repr(self.db.read(arg))
# except sqlite3.Error as e:
# print e.message
#
# def disp_data(self, arg):
# print self.db.table_info("experiments")
# print self.db.table_content("experiments")
|
{"hexsha": "a369f243e851be6a654a7da1f3f0e296fccee247", "size": 14352, "ext": "py", "lang": "Python", "max_stars_repo_path": "sims/storage.py", "max_stars_repo_name": "netixx/autotopo", "max_stars_repo_head_hexsha": "5cf5ba8f146fc26407fb842adee85f7be2880fe3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sims/storage.py", "max_issues_repo_name": "netixx/autotopo", "max_issues_repo_head_hexsha": "5cf5ba8f146fc26407fb842adee85f7be2880fe3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sims/storage.py", "max_forks_repo_name": "netixx/autotopo", "max_forks_repo_head_hexsha": "5cf5ba8f146fc26407fb842adee85f7be2880fe3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.6531791908, "max_line_length": 133, "alphanum_fraction": 0.5783862876, "include": true, "reason": "import numpy", "num_tokens": 3277}
|
#!/usr/bin/env python
# license removed for brevity
import os
import sys
current_folder = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_folder)
main_folder = os.path.join(current_folder, "..")
sys.path.append(main_folder)
import time
import numpy as np
full_path = os.path.dirname(__file__)
sys.path.append(full_path)
config_folder = os.path.join(current_folder, "..", "..", "matrix", "python")
sys.path.append(config_folder)
from console_formatter import Console_Formatter
#from dataset_packer import DATASET_PACKER
from dataset_retriever import DATASET_RETRIEVER
from dataset_label_encoder import DATASET_LABEL_ENCODER
if __name__ == "__main__":
from api_coco import API_COCO
coco = API_COCO()
data_retr = DATASET_RETRIEVER()
path = '/home/dataset/MS_COCO'
#data_retr.load_coco_mask(path, class_names=[])
data_retr.load_coco_mask(path, class_names=['person', 'dog'])
image_info = data_retr.get_dataset_data()
#print(image_info)
dl_encoder = DATASET_LABEL_ENCODER()
for i in image_info:
dl_encoder.add_class(image_info[i].keys())
labeled_data, convert_table, deconvert_table = dl_encoder.label_dataset_data(image_info)
print(convert_table)
print(deconvert_table)
#print(labeled_data)
|
{"hexsha": "d8a4ade4098a27d7ceb80f1f033d4e1d80a8100e", "size": 1341, "ext": "py", "lang": "Python", "max_stars_repo_path": "include/tf_nn_motor/models/dataset_test.py", "max_stars_repo_name": "lanfis/Raptor", "max_stars_repo_head_hexsha": "0db750de5acaeaca458acdad4cbf6383d5da3a20", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/tf_nn_motor/models/dataset_test.py", "max_issues_repo_name": "lanfis/Raptor", "max_issues_repo_head_hexsha": "0db750de5acaeaca458acdad4cbf6383d5da3a20", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/tf_nn_motor/models/dataset_test.py", "max_forks_repo_name": "lanfis/Raptor", "max_forks_repo_head_hexsha": "0db750de5acaeaca458acdad4cbf6383d5da3a20", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8, "max_line_length": 93, "alphanum_fraction": 0.7263236391, "include": true, "reason": "import numpy", "num_tokens": 319}
|
# --------------------------------------------------------
# Motion R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Simon Meister, based on code by Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import, division, print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim import losses
from tensorflow.contrib.slim import arg_scope
import numpy as np
from layers.proposal_layer import proposal_layer
from layers.anchor_target_layer import anchor_target_layer
from layers.proposal_target_layer import proposal_target_layer
from layers.generate_level_anchors import generate_level_anchors
from layers.assign_to_levels import assign_to_levels
from layers.test_layer import test_layer
from layers.roi_refine_layer import roi_refine_layer
from layers.mask_util import color_mask
from model.config import cfg
class Network(object):
def __init__(self, example, is_training):
self._pyramid_strides = [64, 32, 16, 8, 4]
self._pyramid_indices = [6, 5, 4, 3, 2]
self._batch_size = 1
self._predictions = {}
self._losses = {}
self._anchor_targets = {}
self._proposal_targets = {}
self._mask_targets = {}
self._layers = {}
self._act_summaries = []
self._score_summaries = {}
self._train_summaries = []
self._event_summaries = {}
self._input = example
self._num_classes = example['num_classes']
self._mode = 'TRAIN' if is_training else 'TEST'
self._anchor_scales = cfg.ANCHOR_SCALES,
self._num_scales = len(self._anchor_scales)
self._anchor_ratios = cfg.ANCHOR_RATIOS
self._num_ratios = len(self._anchor_ratios)
self._num_anchors = self._num_scales * self._num_ratios
weights_regularizer = tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY)
if cfg.TRAIN.BIAS_DECAY:
biases_regularizer = weights_regularizer
else:
biases_regularizer = tf.no_regularizer
with arg_scope([slim.conv2d, slim.conv2d_in_plane, \
slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected],
weights_regularizer=weights_regularizer,
biases_regularizer=biases_regularizer,
biases_initializer=tf.constant_initializer(0.0)):
self.build_network(is_training)
for var in tf.trainable_variables():
self._train_summaries.append(var)
if not is_training and cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
stds = np.tile(np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS), (self.__input['size']))
means = np.tile(np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS), (self.__input['size']))
self._predictions['bbox_pred'] *= stds
self._predictions['bbox_pred'] += means
if is_training:
self._add_losses()
with tf.device('/cpu:0'):
self.summary_image = self._get_summary_image(
self._input['image'],
self._predictions['rois'],
self._predictions['classes'],
self._predictions['mask_scores'])
for key, var in self._event_summaries.items():
tf.summary.scalar(key, var)
for key, var in self._score_summaries.items():
self._add_score_summary(key, var)
for var in self._act_summaries:
self._add_act_summary(var)
for var in self._train_summaries:
self._add_train_summary(var)
###########################################################################
# Mask R-CNN layers
###########################################################################
def _crop_rois(self, image, rois, name, resized_height, resized_width, batch_ids=None):
with tf.variable_scope(name) as scope:
if batch_ids is None:
batch_ids = rois[:, 0]
# Get the normalized coordinates of bboxes
height = tf.to_float(self._input['size'][0])
width = tf.to_float(self._input['size'][1])
x1 = rois[:, 1] / width
y1 = rois[:, 2] / height
x2 = rois[:, 3] / width
y2 = rois[:, 4] / height
boxes = tf.stack([y1, x1, y2, x2], axis=1)
crops = tf.image.crop_and_resize(image, boxes,
tf.to_int32(batch_ids),
[resized_height, resized_width],
name='crops')
return crops
def _assign_to_levels(self, boxes):
assignments, = tf.py_func(assign_to_levels,
[boxes, self._input['size'], len(self._pyramid_strides),
self._pyramid_strides[-1]],
[tf.int32], name='assign_to_levels')
assignments.set_shape([None])
return assignments
def _crop_rois_from_pyramid(self, rois, pyramid, name):
"""rois is (N, 5), where first entry is batch"""
with tf.variable_scope(name) as scope:
level_assignments = self._assign_to_levels(rois[:, 1:])
reordered_roi_crops = []
reordered_indices = []
for i, level in enumerate(pyramid):
indices = tf.where(tf.equal(level_assignments, i))[:, 0]
reordered_rois = tf.gather(rois, indices)
roi_crops = self._crop_rois(level, reordered_rois,
resized_height=14, resized_width=14,
name='roi_crops_{}'.format(i))
reordered_roi_crops.append(roi_crops)
reordered_indices.append(indices)
reordered_roi_crops = tf.concat(reordered_roi_crops, axis=0)
reordered_indices = tf.to_int32(tf.concat(reordered_indices, axis=0))
num_rois = tf.unstack(tf.shape(rois))[0]
roi_crops_shape = tf.stack([num_rois, 14, 14, 256], axis=0)
reordered_indices = tf.expand_dims(reordered_indices, axis=1)
roi_crops = tf.scatter_nd(reordered_indices, reordered_roi_crops, roi_crops_shape)
return roi_crops
def _build_anchors(self, pyramid):
anchors = []
for level, stride in zip(pyramid, self._pyramid_strides):
level_anchors = self._generate_level_anchors(level, stride)
anchors.append(level_anchors)
anchors = tf.concat(anchors, axis=0)
self._anchors = anchors
return anchors
def _generate_level_anchors(self, level, stride):
with tf.variable_scope('ANCHOR_' + str(stride)) as scope:
height, width = tf.unstack(tf.shape(level))[1:3]
anchors, = tf.py_func(generate_level_anchors,
[height, width, stride,
self._anchor_scales, self._anchor_ratios],
[tf.float32], name='generate_level_anchors')
anchors.set_shape([None, 4])
return anchors
def _roi_refine_layer(self, rois, cls_scores, bbox_pred, name):
with tf.variable_scope(name) as scope:
rois, = tf.py_func(
roi_refine_layer,
[rois, cls_scores, bbox_pred, self._input['size']],
[tf.float32])
rois.set_shape([None, 5])
return rois
def _test_layer(self, rois, roi_scores, cls_scores, name):
with tf.variable_scope(name) as scope:
rois, roi_scores, cls_scores = tf.py_func(
test_layer,
[rois, roi_scores, cls_scores, self._mode],
[tf.float32, tf.float32, tf.float32])
rois.set_shape([None, 5])
roi_scores.set_shape([None])
cls_scores.set_shape([None])
return rois, roi_scores, cls_scores
###########################################################################
# Faster R-CNN layers
###########################################################################
def _anchor_target_layer(self, name):
with tf.variable_scope(name) as scope:
rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = tf.py_func(
anchor_target_layer,
[self._input['boxes'], self._input['size'],
self._anchors, self._num_anchors],
[tf.float32, tf.float32, tf.float32, tf.float32])
rpn_labels.set_shape([None])
rpn_bbox_targets.set_shape([None, 4])
rpn_bbox_inside_weights.set_shape([None, 4])
rpn_bbox_outside_weights.set_shape([None, 4])
rpn_labels = tf.to_int32(rpn_labels, name='to_int32')
self._anchor_targets['rpn_labels'] = rpn_labels
self._anchor_targets['rpn_bbox_targets'] = rpn_bbox_targets
self._anchor_targets['rpn_bbox_inside_weights'] = rpn_bbox_inside_weights
self._anchor_targets['rpn_bbox_outside_weights'] = rpn_bbox_outside_weights
self._score_summaries.update(self._anchor_targets)
return rpn_labels
def _proposal_layer(self, rpn_scores, rpn_bbox_pred, name):
with tf.variable_scope(name) as scope:
rois, rpn_logits = tf.py_func(proposal_layer,
[rpn_scores, rpn_bbox_pred, self._input['size'],
self._mode, self._anchors, self._num_anchors],
[tf.float32, tf.float32])
rois.set_shape([None, 5])
rpn_logits.set_shape([None])
return rois, rpn_logits
def _proposal_target_layer(self, rois, roi_scores, name):
with tf.variable_scope(name) as scope:
rois, roi_scores, labels, bbox_targets, bbox_inside_weights, \
bbox_outside_weights, mask_targets = tf.py_func(
proposal_target_layer,
[rois, roi_scores,
self._input['boxes'], self._input['masks'], self._num_classes],
[tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32,
tf.float32])
rois.set_shape([None, 5])
roi_scores.set_shape([None])
labels.set_shape([None])
bbox_targets.set_shape([None, self._num_classes * 4])
bbox_inside_weights.set_shape([None, self._num_classes * 4])
bbox_outside_weights.set_shape([None, self._num_classes * 4])
#gt_crops = self._crop_rois(self._gt_masks, rois,
# batch_ids=gt_assignments,
# resized_height=28, resized_width=28,
# name='gt_crops')
self._proposal_targets['rois'] = rois
self._proposal_targets['labels'] = tf.to_int32(labels, name='to_int32')
self._proposal_targets['bbox_targets'] = bbox_targets
self._proposal_targets['bbox_inside_weights'] = bbox_inside_weights
self._proposal_targets['bbox_outside_weights'] = bbox_outside_weights
self._proposal_targets['mask_targets'] = mask_targets
self._score_summaries.update(self._proposal_targets)
return rois, roi_scores
###########################################################################
# Utilities
###########################################################################
def _l1_loss(self, diff, valid):
# diff may also contain inf or nan, so we use tf.where instead of multiplying
# with a mask tensor
diff = tf.where(valid, diff, tf.zeros(tf.shape(diff)))
count = tf.reduce_sum(tf.to_float(valid))
loss = tf.reduce_sum(tf.abs(diff)) / count
return loss
def _smooth_l1_loss(self, bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights,
sigma=1.0, dim=[1]):
"""Computes smooth l1 loss between bbox_pred and bbox_targets.
There are two usages:
1. All examples are weighted the same, there are no ignored terms:
- bbox_inside_weights is 0 at negative examples, a positive constant otherwise
- bbox_outside_weights is 0 at negative examples, 1 otherwise
=> all diffs are averaged and thus negative examples contribute to the loss
via the normalization
- dim should be [1] so that we sum over 4 target numbers but average over examples
2. Manual weighting and support for ignored terms:
- bbox_inside_weights is zero at negative and ignored examples, a positive constant
otherwise
- bbox_outside_weights is zero at ignored examples and non-zero otherwise
=> used to scale losses before summing them up. E.g. for uniform weighting of
pos. and neg., set bbox_outside_weights to 1 / num_non_ignored at non-ignored
examples.
- dim should be [0, 1] to sum along all axes
"""
sigma_2 = sigma ** 2
box_diff = bbox_pred - bbox_targets
in_box_diff = bbox_inside_weights * box_diff
abs_in_box_diff = tf.abs(in_box_diff)
#smoothL1_sign = tf.stop_gradient(tf.to_float(tf.less(abs_in_box_diff, 1. / sigma_2)))
smoothL1_sign = tf.to_float(tf.less(abs_in_box_diff, 1. / sigma_2))
in_loss_box = tf.pow(in_box_diff, 2) * (sigma_2 / 2.) * smoothL1_sign \
+ (abs_in_box_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign)
out_loss_box = bbox_outside_weights * in_loss_box
loss_box = tf.reduce_mean(tf.reduce_sum(
out_loss_box,
axis=dim
))
return loss_box
def _add_losses(self, sigma_rpn=3.0):
with tf.variable_scope('loss') as scope:
# RPN, class loss
rpn_logits = self._predictions['rpn_logits']
rpn_labels = self._anchor_targets['rpn_labels']
rpn_select = tf.where(tf.not_equal(rpn_labels, -1))
rpn_logits = tf.gather(rpn_logits, rpn_select)
rpn_labels = tf.gather(rpn_labels, rpn_select)
rpn_cross_entropy = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=rpn_logits, labels=rpn_labels))
# RPN, bbox loss
rpn_bbox_pred = self._predictions['rpn_bbox_pred']
rpn_bbox_targets = self._anchor_targets['rpn_bbox_targets']
rpn_bbox_inside_weights = self._anchor_targets['rpn_bbox_inside_weights']
rpn_bbox_outside_weights = self._anchor_targets['rpn_bbox_outside_weights']
rpn_loss_box = self._smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets,
rpn_bbox_inside_weights, rpn_bbox_outside_weights,
sigma=sigma_rpn, dim=[0, 1])
# RCNN, class loss
cls_logits = self._predictions['cls_logits']
label = self._proposal_targets['labels']
cross_entropy = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=cls_logits, labels=label))
# RCNN, bbox loss
bbox_pred = self._predictions['bbox_pred']
bbox_targets = self._proposal_targets['bbox_targets']
bbox_inside_weights = self._proposal_targets['bbox_inside_weights']
bbox_outside_weights = self._proposal_targets['bbox_outside_weights']
loss_box = self._smooth_l1_loss(bbox_pred, bbox_targets,
bbox_inside_weights, bbox_outside_weights,
dim=[1])
# RCNN, mask loss
mask_targets = self._proposal_targets['mask_targets']
mask_logits = self._predictions['mask_logits']
loss_mask = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
labels=mask_targets, logits=mask_logits))
# Depth prediction, supervised loss
loss_depth = 0
if 'depth' in self._input:
gt_depth = self._input['depth']
depth_pred = self._predictions['depth_pred']
if cfg.TRAIN.INVERSE_DEPTH:
depth_target = 1. / gt_depth
valid = gt_depth > 0
else:
depth_target = gt_depth
valid = tf.logical_and(gt_depth > 0, gt_depth < np.inf)
diff = depth_pred - depth_target
loss_depth = self._l1_loss(diff, valid)
self._losses['cross_entropy'] = cross_entropy
self._losses['loss_box'] = loss_box
self._losses['rpn_cross_entropy'] = rpn_cross_entropy
self._losses['rpn_loss_box'] = rpn_loss_box
self._losses['mask_loss'] = loss_mask
self._losses['depth'] = loss_depth
loss = 0
if cfg.TRAIN.RPN:
loss += rpn_cross_entropy + rpn_loss_box
if cfg.TRAIN.RCNN:
loss += cross_entropy + loss_box + loss_mask
if cfg.TRAIN.MOTION:
if cfg.TRAIN.SUPERVISE_DEPTH:
loss += loss_depth
self._losses['total_loss'] = loss
self._event_summaries.update(self._losses)
return loss
def build_network(self, is_training=True):
raise NotImplementedError
###########################################################################
# Summaries
###########################################################################
def _color_mask(self, rois, classes, masks, height, width):
im, = tf.py_func(
color_mask,
[rois, classes, masks, height, width],
[tf.float32])
im.set_shape([None, None, 3])
return im
def _get_summary_image(self, image, rois, classes, masks):
# add back mean
image += cfg.PIXEL_MEANS / 255.0
# dims for normalization
width = tf.to_float(tf.shape(image)[2])
height = tf.to_float(tf.shape(image)[1])
# from [batch, x1, y1, x2, y2] to normalized [y1, x1, y1, x1]
cols = tf.unstack(rois, axis=1)
boxes = tf.stack([cols[2] / height,
cols[1] / width,
cols[4] / height,
cols[3] / width], axis=1)
# add batch dimension (assume batch_size==1)
assert image.get_shape()[0] == 1
boxes = tf.expand_dims(boxes, dim=0)
image = tf.image.draw_bounding_boxes(image, boxes)
#color_mask = self._color_mask(rois, classes, masks, # TODO add again
# *tf.unstack(tf.shape(image))[1:3])
#image = image + 0.4 * color_mask
return image
def _add_act_summary(self, tensor):
tf.summary.histogram('ACT/' + tensor.op.name + '/activations', tensor)
tf.summary.scalar('ACT/' + tensor.op.name + '/zero_fraction',
tf.nn.zero_fraction(tensor))
def _add_score_summary(self, key, tensor):
tf.summary.histogram('SCORE/' + tensor.op.name + '/' + key + '/scores', tensor)
def _add_train_summary(self, var):
tf.summary.histogram('TRAIN/' + var.op.name, var)
|
{"hexsha": "8f2278b9e61c1783a9e3d68e03d6a70880ca1d9a", "size": 19677, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/nets/network.py", "max_stars_repo_name": "simonmeister/old-motion-rcnn", "max_stars_repo_head_hexsha": "1f62d5e0fa5111b8ad68cea90ad23c9e8e151bd1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-01-21T21:05:50.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-21T21:05:50.000Z", "max_issues_repo_path": "lib/nets/network.py", "max_issues_repo_name": "simonmeister/old-motion-rcnn", "max_issues_repo_head_hexsha": "1f62d5e0fa5111b8ad68cea90ad23c9e8e151bd1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/nets/network.py", "max_forks_repo_name": "simonmeister/old-motion-rcnn", "max_forks_repo_head_hexsha": "1f62d5e0fa5111b8ad68cea90ad23c9e8e151bd1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.5180995475, "max_line_length": 105, "alphanum_fraction": 0.5719875997, "include": true, "reason": "import numpy", "num_tokens": 4280}
|
[STATEMENT]
lemma card_length_sum_list: "card {l::nat list. size l = m \<and> sum_list l = N} = (N + m - 1) choose N"
\<comment> \<open>by Holden Lee, tidied by Tobias Nipkow\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
[PROOF STEP]
proof (cases m)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. m = 0 \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
2. \<And>nat. m = Suc nat \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
[PROOF STEP]
case 0
[PROOF STATE]
proof (state)
this:
m = 0
goal (2 subgoals):
1. m = 0 \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
2. \<And>nat. m = Suc nat \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
m = 0
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
m = 0
goal (1 subgoal):
1. card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
[PROOF STEP]
by (cases N) (auto cong: conj_cong)
[PROOF STATE]
proof (state)
this:
card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
goal (1 subgoal):
1. \<And>nat. m = Suc nat \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>nat. m = Suc nat \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
[PROOF STEP]
case (Suc m')
[PROOF STATE]
proof (state)
this:
m = Suc m'
goal (1 subgoal):
1. \<And>nat. m = Suc nat \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
[PROOF STEP]
have m: "m \<ge> 1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 1 \<le> m
[PROOF STEP]
by (simp add: Suc)
[PROOF STATE]
proof (state)
this:
1 \<le> m
goal (1 subgoal):
1. \<And>nat. m = Suc nat \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
1 \<le> m
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
1 \<le> m
goal (1 subgoal):
1. card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
[PROOF STEP]
proof (induct "N + m - 1" arbitrary: N m)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>N m. \<lbrakk>0 = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
2. \<And>x N m. \<lbrakk>\<And>N m. \<lbrakk>x = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N; Suc x = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
[PROOF STEP]
case 0 \<comment> \<open>In the base case, the only solution is [0].\<close>
[PROOF STATE]
proof (state)
this:
0 = N + m - 1
1 \<le> m
goal (2 subgoals):
1. \<And>N m. \<lbrakk>0 = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
2. \<And>x N m. \<lbrakk>\<And>N m. \<lbrakk>x = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N; Suc x = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
[PROOF STEP]
have [simp]: "{l::nat list. length l = Suc 0 \<and> (\<forall>n\<in>set l. n = 0)} = {[0]}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {l. length l = Suc 0 \<and> (\<forall>n\<in>set l. n = 0)} = {[0]}
[PROOF STEP]
by (auto simp: length_Suc_conv)
[PROOF STATE]
proof (state)
this:
{l. length l = Suc 0 \<and> (\<forall>n\<in>set l. n = 0)} = {[0]}
goal (2 subgoals):
1. \<And>N m. \<lbrakk>0 = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
2. \<And>x N m. \<lbrakk>\<And>N m. \<lbrakk>x = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N; Suc x = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
[PROOF STEP]
have "m = 1 \<and> N = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. m = 1 \<and> N = 0
[PROOF STEP]
using 0
[PROOF STATE]
proof (prove)
using this:
0 = N + m - 1
1 \<le> m
goal (1 subgoal):
1. m = 1 \<and> N = 0
[PROOF STEP]
by linarith
[PROOF STATE]
proof (state)
this:
m = 1 \<and> N = 0
goal (2 subgoals):
1. \<And>N m. \<lbrakk>0 = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
2. \<And>x N m. \<lbrakk>\<And>N m. \<lbrakk>x = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N; Suc x = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
m = 1 \<and> N = 0
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
m = 1 \<and> N = 0
goal (1 subgoal):
1. card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
goal (1 subgoal):
1. \<And>x N m. \<lbrakk>\<And>N m. \<lbrakk>x = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N; Suc x = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x N m. \<lbrakk>\<And>N m. \<lbrakk>x = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N; Suc x = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
[PROOF STEP]
case (Suc k)
[PROOF STATE]
proof (state)
this:
\<lbrakk>k = ?N + ?m - 1; 1 \<le> ?m\<rbrakk> \<Longrightarrow> card {l. length l = ?m \<and> sum_list l = ?N} = ?N + ?m - 1 choose ?N
Suc k = N + m - 1
1 \<le> m
goal (1 subgoal):
1. \<And>x N m. \<lbrakk>\<And>N m. \<lbrakk>x = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N; Suc x = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
[PROOF STEP]
have c1: "card {l::nat list. size l = (m - 1) \<and> sum_list l = N} = (N + (m - 1) - 1) choose N"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. card {l. length l = m - 1 \<and> sum_list l = N} = N + (m - 1) - 1 choose N
[PROOF STEP]
proof (cases "m = 1")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. m = 1 \<Longrightarrow> card {l. length l = m - 1 \<and> sum_list l = N} = N + (m - 1) - 1 choose N
2. m \<noteq> 1 \<Longrightarrow> card {l. length l = m - 1 \<and> sum_list l = N} = N + (m - 1) - 1 choose N
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
m = 1
goal (2 subgoals):
1. m = 1 \<Longrightarrow> card {l. length l = m - 1 \<and> sum_list l = N} = N + (m - 1) - 1 choose N
2. m \<noteq> 1 \<Longrightarrow> card {l. length l = m - 1 \<and> sum_list l = N} = N + (m - 1) - 1 choose N
[PROOF STEP]
with Suc.hyps
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>k = ?N + ?m - 1; 1 \<le> ?m\<rbrakk> \<Longrightarrow> card {l. length l = ?m \<and> sum_list l = ?N} = ?N + ?m - 1 choose ?N
Suc k = N + m - 1
m = 1
[PROOF STEP]
have "N \<ge> 1"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>k = ?N + ?m - 1; 1 \<le> ?m\<rbrakk> \<Longrightarrow> card {l. length l = ?m \<and> sum_list l = ?N} = ?N + ?m - 1 choose ?N
Suc k = N + m - 1
m = 1
goal (1 subgoal):
1. 1 \<le> N
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
1 \<le> N
goal (2 subgoals):
1. m = 1 \<Longrightarrow> card {l. length l = m - 1 \<and> sum_list l = N} = N + (m - 1) - 1 choose N
2. m \<noteq> 1 \<Longrightarrow> card {l. length l = m - 1 \<and> sum_list l = N} = N + (m - 1) - 1 choose N
[PROOF STEP]
with True
[PROOF STATE]
proof (chain)
picking this:
m = 1
1 \<le> N
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
m = 1
1 \<le> N
goal (1 subgoal):
1. card {l. length l = m - 1 \<and> sum_list l = N} = N + (m - 1) - 1 choose N
[PROOF STEP]
by (simp add: binomial_eq_0)
[PROOF STATE]
proof (state)
this:
card {l. length l = m - 1 \<and> sum_list l = N} = N + (m - 1) - 1 choose N
goal (1 subgoal):
1. m \<noteq> 1 \<Longrightarrow> card {l. length l = m - 1 \<and> sum_list l = N} = N + (m - 1) - 1 choose N
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. m \<noteq> 1 \<Longrightarrow> card {l. length l = m - 1 \<and> sum_list l = N} = N + (m - 1) - 1 choose N
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
m \<noteq> 1
goal (1 subgoal):
1. m \<noteq> 1 \<Longrightarrow> card {l. length l = m - 1 \<and> sum_list l = N} = N + (m - 1) - 1 choose N
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
m \<noteq> 1
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
m \<noteq> 1
goal (1 subgoal):
1. card {l. length l = m - 1 \<and> sum_list l = N} = N + (m - 1) - 1 choose N
[PROOF STEP]
using Suc
[PROOF STATE]
proof (prove)
using this:
m \<noteq> 1
\<lbrakk>k = ?N + ?m - 1; 1 \<le> ?m\<rbrakk> \<Longrightarrow> card {l. length l = ?m \<and> sum_list l = ?N} = ?N + ?m - 1 choose ?N
Suc k = N + m - 1
1 \<le> m
goal (1 subgoal):
1. card {l. length l = m - 1 \<and> sum_list l = N} = N + (m - 1) - 1 choose N
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
card {l. length l = m - 1 \<and> sum_list l = N} = N + (m - 1) - 1 choose N
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
card {l. length l = m - 1 \<and> sum_list l = N} = N + (m - 1) - 1 choose N
goal (1 subgoal):
1. \<And>x N m. \<lbrakk>\<And>N m. \<lbrakk>x = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N; Suc x = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
[PROOF STEP]
from Suc
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>k = ?N + ?m - 1; 1 \<le> ?m\<rbrakk> \<Longrightarrow> card {l. length l = ?m \<and> sum_list l = ?N} = ?N + ?m - 1 choose ?N
Suc k = N + m - 1
1 \<le> m
[PROOF STEP]
have c2: "card {l::nat list. size l = m \<and> sum_list l + 1 = N} =
(if N > 0 then ((N - 1) + m - 1) choose (N - 1) else 0)"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>k = ?N + ?m - 1; 1 \<le> ?m\<rbrakk> \<Longrightarrow> card {l. length l = ?m \<and> sum_list l = ?N} = ?N + ?m - 1 choose ?N
Suc k = N + m - 1
1 \<le> m
goal (1 subgoal):
1. card {l. length l = m \<and> sum_list l + 1 = N} = (if 0 < N then N - 1 + m - 1 choose (N - 1) else 0)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>\<And>N m. \<lbrakk>k = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N; Suc k = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l + 1 = N} = (if 0 < N then N - 1 + m - 1 choose (N - 1) else 0)
[PROOF STEP]
have *: "n > 0 \<Longrightarrow> Suc m = n \<longleftrightarrow> m = n - 1" for m n
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < n \<Longrightarrow> (Suc m = n) = (m = n - 1)
[PROOF STEP]
by arith
[PROOF STATE]
proof (state)
this:
0 < ?n \<Longrightarrow> (Suc ?m = ?n) = (?m = ?n - 1)
goal (1 subgoal):
1. \<lbrakk>\<And>N m. \<lbrakk>k = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N; Suc k = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l + 1 = N} = (if 0 < N then N - 1 + m - 1 choose (N - 1) else 0)
[PROOF STEP]
from Suc
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>k = ?N + ?m - 1; 1 \<le> ?m\<rbrakk> \<Longrightarrow> card {l. length l = ?m \<and> sum_list l = ?N} = ?N + ?m - 1 choose ?N
Suc k = N + m - 1
1 \<le> m
[PROOF STEP]
have "N > 0 \<Longrightarrow>
card {l::nat list. size l = m \<and> sum_list l + 1 = N} =
((N - 1) + m - 1) choose (N - 1)"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>k = ?N + ?m - 1; 1 \<le> ?m\<rbrakk> \<Longrightarrow> card {l. length l = ?m \<and> sum_list l = ?N} = ?N + ?m - 1 choose ?N
Suc k = N + m - 1
1 \<le> m
goal (1 subgoal):
1. 0 < N \<Longrightarrow> card {l. length l = m \<and> sum_list l + 1 = N} = N - 1 + m - 1 choose (N - 1)
[PROOF STEP]
by (simp add: *)
[PROOF STATE]
proof (state)
this:
0 < N \<Longrightarrow> card {l. length l = m \<and> sum_list l + 1 = N} = N - 1 + m - 1 choose (N - 1)
goal (1 subgoal):
1. \<lbrakk>\<And>N m. \<lbrakk>k = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N; Suc k = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l + 1 = N} = (if 0 < N then N - 1 + m - 1 choose (N - 1) else 0)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
0 < N \<Longrightarrow> card {l. length l = m \<and> sum_list l + 1 = N} = N - 1 + m - 1 choose (N - 1)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
0 < N \<Longrightarrow> card {l. length l = m \<and> sum_list l + 1 = N} = N - 1 + m - 1 choose (N - 1)
goal (1 subgoal):
1. card {l. length l = m \<and> sum_list l + 1 = N} = (if 0 < N then N - 1 + m - 1 choose (N - 1) else 0)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
card {l. length l = m \<and> sum_list l + 1 = N} = (if 0 < N then N - 1 + m - 1 choose (N - 1) else 0)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
card {l. length l = m \<and> sum_list l + 1 = N} = (if 0 < N then N - 1 + m - 1 choose (N - 1) else 0)
goal (1 subgoal):
1. \<And>x N m. \<lbrakk>\<And>N m. \<lbrakk>x = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N; Suc x = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
[PROOF STEP]
from Suc.prems
[PROOF STATE]
proof (chain)
picking this:
1 \<le> m
[PROOF STEP]
have "(card {l::nat list. size l = (m - 1) \<and> sum_list l = N} +
card {l::nat list. size l = m \<and> sum_list l + 1 = N}) = (N + m - 1) choose N"
[PROOF STATE]
proof (prove)
using this:
1 \<le> m
goal (1 subgoal):
1. card {l. length l = m - 1 \<and> sum_list l = N} + card {l. length l = m \<and> sum_list l + 1 = N} = N + m - 1 choose N
[PROOF STEP]
by (auto simp: c1 c2 choose_reduce_nat[of "N + m - 1" N] simp del: One_nat_def)
[PROOF STATE]
proof (state)
this:
card {l. length l = m - 1 \<and> sum_list l = N} + card {l. length l = m \<and> sum_list l + 1 = N} = N + m - 1 choose N
goal (1 subgoal):
1. \<And>x N m. \<lbrakk>\<And>N m. \<lbrakk>x = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N; Suc x = N + m - 1; 1 \<le> m\<rbrakk> \<Longrightarrow> card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
card {l. length l = m - 1 \<and> sum_list l = N} + card {l. length l = m \<and> sum_list l + 1 = N} = N + m - 1 choose N
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
card {l. length l = m - 1 \<and> sum_list l = N} + card {l. length l = m \<and> sum_list l + 1 = N} = N + m - 1 choose N
goal (1 subgoal):
1. card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
[PROOF STEP]
using card_length_sum_list_rec[OF Suc.prems]
[PROOF STATE]
proof (prove)
using this:
card {l. length l = m - 1 \<and> sum_list l = N} + card {l. length l = m \<and> sum_list l + 1 = N} = N + m - 1 choose N
card {l. length l = m \<and> sum_list l = ?N} = card {l. length l = m - 1 \<and> sum_list l = ?N} + card {l. length l = m \<and> sum_list l + 1 = ?N}
goal (1 subgoal):
1. card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
card {l. length l = m \<and> sum_list l = N} = N + m - 1 choose N
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 7573, "file": null, "length": 60}
|
(* Author: Amine Chaieb, University of Cambridge
*)
section \<open>Permutations, both general and specifically on finite sets.\<close>
theory Permutations
imports
"HOL-Library.Multiset"
"HOL-Library.Disjoint_Sets"
Transposition
begin
subsection \<open>Auxiliary\<close>
abbreviation (input) fixpoints :: \<open>('a \<Rightarrow> 'a) \<Rightarrow> 'a set\<close>
where \<open>fixpoints f \<equiv> {x. f x = x}\<close>
lemma inj_on_fixpoints:
\<open>inj_on f (fixpoints f)\<close>
by (rule inj_onI) simp
lemma bij_betw_fixpoints:
\<open>bij_betw f (fixpoints f) (fixpoints f)\<close>
using inj_on_fixpoints by (auto simp add: bij_betw_def)
subsection \<open>Basic definition and consequences\<close>
definition permutes :: \<open>('a \<Rightarrow> 'a) \<Rightarrow> 'a set \<Rightarrow> bool\<close> (infixr \<open>permutes\<close> 41)
where \<open>p permutes S \<longleftrightarrow> (\<forall>x. x \<notin> S \<longrightarrow> p x = x) \<and> (\<forall>y. \<exists>!x. p x = y)\<close>
lemma bij_imp_permutes:
\<open>p permutes S\<close> if \<open>bij_betw p S S\<close> and stable: \<open>\<And>x. x \<notin> S \<Longrightarrow> p x = x\<close>
proof -
note \<open>bij_betw p S S\<close>
moreover have \<open>bij_betw p (- S) (- S)\<close>
by (auto simp add: stable intro!: bij_betw_imageI inj_onI)
ultimately have \<open>bij_betw p (S \<union> - S) (S \<union> - S)\<close>
by (rule bij_betw_combine) simp
then have \<open>\<exists>!x. p x = y\<close> for y
by (simp add: bij_iff)
with stable show ?thesis
by (simp add: permutes_def)
qed
context
fixes p :: \<open>'a \<Rightarrow> 'a\<close> and S :: \<open>'a set\<close>
assumes perm: \<open>p permutes S\<close>
begin
lemma permutes_inj:
\<open>inj p\<close>
using perm by (auto simp: permutes_def inj_on_def)
lemma permutes_image:
\<open>p ` S = S\<close>
proof (rule set_eqI)
fix x
show \<open>x \<in> p ` S \<longleftrightarrow> x \<in> S\<close>
proof
assume \<open>x \<in> p ` S\<close>
then obtain y where \<open>y \<in> S\<close> \<open>p y = x\<close>
by blast
with perm show \<open>x \<in> S\<close>
by (cases \<open>y = x\<close>) (auto simp add: permutes_def)
next
assume \<open>x \<in> S\<close>
with perm obtain y where \<open>y \<in> S\<close> \<open>p y = x\<close>
by (metis permutes_def)
then show \<open>x \<in> p ` S\<close>
by blast
qed
qed
lemma permutes_not_in:
\<open>x \<notin> S \<Longrightarrow> p x = x\<close>
using perm by (auto simp: permutes_def)
lemma permutes_image_complement:
\<open>p ` (- S) = - S\<close>
by (auto simp add: permutes_not_in)
lemma permutes_in_image:
\<open>p x \<in> S \<longleftrightarrow> x \<in> S\<close>
using permutes_image permutes_inj by (auto dest: inj_image_mem_iff)
lemma permutes_surj:
\<open>surj p\<close>
proof -
have \<open>p ` (S \<union> - S) = p ` S \<union> p ` (- S)\<close>
by (rule image_Un)
then show ?thesis
by (simp add: permutes_image permutes_image_complement)
qed
lemma permutes_inv_o:
shows "p \<circ> inv p = id"
and "inv p \<circ> p = id"
using permutes_inj permutes_surj
unfolding inj_iff [symmetric] surj_iff [symmetric] by auto
lemma permutes_inverses:
shows "p (inv p x) = x"
and "inv p (p x) = x"
using permutes_inv_o [unfolded fun_eq_iff o_def] by auto
lemma permutes_inv_eq:
\<open>inv p y = x \<longleftrightarrow> p x = y\<close>
by (auto simp add: permutes_inverses)
lemma permutes_inj_on:
\<open>inj_on p A\<close>
by (rule inj_on_subset [of _ UNIV]) (auto intro: permutes_inj)
lemma permutes_bij:
\<open>bij p\<close>
unfolding bij_def by (metis permutes_inj permutes_surj)
lemma permutes_imp_bij:
\<open>bij_betw p S S\<close>
by (simp add: bij_betw_def permutes_image permutes_inj_on)
lemma permutes_subset:
\<open>p permutes T\<close> if \<open>S \<subseteq> T\<close>
proof (rule bij_imp_permutes)
define R where \<open>R = T - S\<close>
with that have \<open>T = R \<union> S\<close> \<open>R \<inter> S = {}\<close>
by auto
then have \<open>p x = x\<close> if \<open>x \<in> R\<close> for x
using that by (auto intro: permutes_not_in)
then have \<open>p ` R = R\<close>
by simp
with \<open>T = R \<union> S\<close> show \<open>bij_betw p T T\<close>
by (simp add: bij_betw_def permutes_inj_on image_Un permutes_image)
fix x
assume \<open>x \<notin> T\<close>
with \<open>T = R \<union> S\<close> show \<open>p x = x\<close>
by (simp add: permutes_not_in)
qed
lemma permutes_imp_permutes_insert:
\<open>p permutes insert x S\<close>
by (rule permutes_subset) auto
end
lemma permutes_id [simp]:
\<open>id permutes S\<close>
by (auto intro: bij_imp_permutes)
lemma permutes_empty [simp]:
\<open>p permutes {} \<longleftrightarrow> p = id\<close>
proof
assume \<open>p permutes {}\<close>
then show \<open>p = id\<close>
by (auto simp add: fun_eq_iff permutes_not_in)
next
assume \<open>p = id\<close>
then show \<open>p permutes {}\<close>
by simp
qed
lemma permutes_sing [simp]:
\<open>p permutes {a} \<longleftrightarrow> p = id\<close>
proof
assume perm: \<open>p permutes {a}\<close>
show \<open>p = id\<close>
proof
fix x
from perm have \<open>p ` {a} = {a}\<close>
by (rule permutes_image)
with perm show \<open>p x = id x\<close>
by (cases \<open>x = a\<close>) (auto simp add: permutes_not_in)
qed
next
assume \<open>p = id\<close>
then show \<open>p permutes {a}\<close>
by simp
qed
lemma permutes_univ: "p permutes UNIV \<longleftrightarrow> (\<forall>y. \<exists>!x. p x = y)"
by (simp add: permutes_def)
lemma permutes_swap_id: "a \<in> S \<Longrightarrow> b \<in> S \<Longrightarrow> transpose a b permutes S"
by (rule bij_imp_permutes) (auto intro: transpose_apply_other)
lemma permutes_superset:
\<open>p permutes T\<close> if \<open>p permutes S\<close> \<open>\<And>x. x \<in> S - T \<Longrightarrow> p x = x\<close>
proof -
define R U where \<open>R = T \<inter> S\<close> and \<open>U = S - T\<close>
then have \<open>T = R \<union> (T - S)\<close> \<open>S = R \<union> U\<close> \<open>R \<inter> U = {}\<close>
by auto
from that \<open>U = S - T\<close> have \<open>p ` U = U\<close>
by simp
from \<open>p permutes S\<close> have \<open>bij_betw p (R \<union> U) (R \<union> U)\<close>
by (simp add: permutes_imp_bij \<open>S = R \<union> U\<close>)
moreover have \<open>bij_betw p U U\<close>
using that \<open>U = S - T\<close> by (simp add: bij_betw_def permutes_inj_on)
ultimately have \<open>bij_betw p R R\<close>
using \<open>R \<inter> U = {}\<close> \<open>R \<inter> U = {}\<close> by (rule bij_betw_partition)
then have \<open>p permutes R\<close>
proof (rule bij_imp_permutes)
fix x
assume \<open>x \<notin> R\<close>
with \<open>R = T \<inter> S\<close> \<open>p permutes S\<close> show \<open>p x = x\<close>
by (cases \<open>x \<in> S\<close>) (auto simp add: permutes_not_in that(2))
qed
then have \<open>p permutes R \<union> (T - S)\<close>
by (rule permutes_subset) simp
with \<open>T = R \<union> (T - S)\<close> show ?thesis
by simp
qed
lemma permutes_bij_inv_into: \<^marker>\<open>contributor \<open>Lukas Bulwahn\<close>\<close>
fixes A :: "'a set"
and B :: "'b set"
assumes "p permutes A"
and "bij_betw f A B"
shows "(\<lambda>x. if x \<in> B then f (p (inv_into A f x)) else x) permutes B"
proof (rule bij_imp_permutes)
from assms have "bij_betw p A A" "bij_betw f A B" "bij_betw (inv_into A f) B A"
by (auto simp add: permutes_imp_bij bij_betw_inv_into)
then have "bij_betw (f \<circ> p \<circ> inv_into A f) B B"
by (simp add: bij_betw_trans)
then show "bij_betw (\<lambda>x. if x \<in> B then f (p (inv_into A f x)) else x) B B"
by (subst bij_betw_cong[where g="f \<circ> p \<circ> inv_into A f"]) auto
next
fix x
assume "x \<notin> B"
then show "(if x \<in> B then f (p (inv_into A f x)) else x) = x" by auto
qed
lemma permutes_image_mset: \<^marker>\<open>contributor \<open>Lukas Bulwahn\<close>\<close>
assumes "p permutes A"
shows "image_mset p (mset_set A) = mset_set A"
using assms by (metis image_mset_mset_set bij_betw_imp_inj_on permutes_imp_bij permutes_image)
lemma permutes_implies_image_mset_eq: \<^marker>\<open>contributor \<open>Lukas Bulwahn\<close>\<close>
assumes "p permutes A" "\<And>x. x \<in> A \<Longrightarrow> f x = f' (p x)"
shows "image_mset f' (mset_set A) = image_mset f (mset_set A)"
proof -
have "f x = f' (p x)" if "x \<in># mset_set A" for x
using assms(2)[of x] that by (cases "finite A") auto
with assms have "image_mset f (mset_set A) = image_mset (f' \<circ> p) (mset_set A)"
by (auto intro!: image_mset_cong)
also have "\<dots> = image_mset f' (image_mset p (mset_set A))"
by (simp add: image_mset.compositionality)
also have "\<dots> = image_mset f' (mset_set A)"
proof -
from assms permutes_image_mset have "image_mset p (mset_set A) = mset_set A"
by blast
then show ?thesis by simp
qed
finally show ?thesis ..
qed
subsection \<open>Group properties\<close>
lemma permutes_compose: "p permutes S \<Longrightarrow> q permutes S \<Longrightarrow> q \<circ> p permutes S"
unfolding permutes_def o_def by metis
lemma permutes_inv:
assumes "p permutes S"
shows "inv p permutes S"
using assms unfolding permutes_def permutes_inv_eq[OF assms] by metis
lemma permutes_inv_inv:
assumes "p permutes S"
shows "inv (inv p) = p"
unfolding fun_eq_iff permutes_inv_eq[OF assms] permutes_inv_eq[OF permutes_inv[OF assms]]
by blast
lemma permutes_invI:
assumes perm: "p permutes S"
and inv: "\<And>x. x \<in> S \<Longrightarrow> p' (p x) = x"
and outside: "\<And>x. x \<notin> S \<Longrightarrow> p' x = x"
shows "inv p = p'"
proof
show "inv p x = p' x" for x
proof (cases "x \<in> S")
case True
from assms have "p' x = p' (p (inv p x))"
by (simp add: permutes_inverses)
also from permutes_inv[OF perm] True have "\<dots> = inv p x"
by (subst inv) (simp_all add: permutes_in_image)
finally show ?thesis ..
next
case False
with permutes_inv[OF perm] show ?thesis
by (simp_all add: outside permutes_not_in)
qed
qed
lemma permutes_vimage: "f permutes A \<Longrightarrow> f -` A = A"
by (simp add: bij_vimage_eq_inv_image permutes_bij permutes_image[OF permutes_inv])
subsection \<open>Mapping permutations with bijections\<close>
lemma bij_betw_permutations:
assumes "bij_betw f A B"
shows "bij_betw (\<lambda>\<pi> x. if x \<in> B then f (\<pi> (inv_into A f x)) else x)
{\<pi>. \<pi> permutes A} {\<pi>. \<pi> permutes B}" (is "bij_betw ?f _ _")
proof -
let ?g = "(\<lambda>\<pi> x. if x \<in> A then inv_into A f (\<pi> (f x)) else x)"
show ?thesis
proof (rule bij_betw_byWitness [of _ ?g], goal_cases)
case 3
show ?case using permutes_bij_inv_into[OF _ assms] by auto
next
case 4
have bij_inv: "bij_betw (inv_into A f) B A" by (intro bij_betw_inv_into assms)
{
fix \<pi> assume "\<pi> permutes B"
from permutes_bij_inv_into[OF this bij_inv] and assms
have "(\<lambda>x. if x \<in> A then inv_into A f (\<pi> (f x)) else x) permutes A"
by (simp add: inv_into_inv_into_eq cong: if_cong)
}
from this show ?case by (auto simp: permutes_inv)
next
case 1
thus ?case using assms
by (auto simp: fun_eq_iff permutes_not_in permutes_in_image bij_betw_inv_into_left
dest: bij_betwE)
next
case 2
moreover have "bij_betw (inv_into A f) B A"
by (intro bij_betw_inv_into assms)
ultimately show ?case using assms
by (auto simp: fun_eq_iff permutes_not_in permutes_in_image bij_betw_inv_into_right
dest: bij_betwE)
qed
qed
lemma bij_betw_derangements:
assumes "bij_betw f A B"
shows "bij_betw (\<lambda>\<pi> x. if x \<in> B then f (\<pi> (inv_into A f x)) else x)
{\<pi>. \<pi> permutes A \<and> (\<forall>x\<in>A. \<pi> x \<noteq> x)} {\<pi>. \<pi> permutes B \<and> (\<forall>x\<in>B. \<pi> x \<noteq> x)}"
(is "bij_betw ?f _ _")
proof -
let ?g = "(\<lambda>\<pi> x. if x \<in> A then inv_into A f (\<pi> (f x)) else x)"
show ?thesis
proof (rule bij_betw_byWitness [of _ ?g], goal_cases)
case 3
have "?f \<pi> x \<noteq> x" if "\<pi> permutes A" "\<And>x. x \<in> A \<Longrightarrow> \<pi> x \<noteq> x" "x \<in> B" for \<pi> x
using that and assms by (metis bij_betwE bij_betw_imp_inj_on bij_betw_imp_surj_on
inv_into_f_f inv_into_into permutes_imp_bij)
with permutes_bij_inv_into[OF _ assms] show ?case by auto
next
case 4
have bij_inv: "bij_betw (inv_into A f) B A" by (intro bij_betw_inv_into assms)
have "?g \<pi> permutes A" if "\<pi> permutes B" for \<pi>
using permutes_bij_inv_into[OF that bij_inv] and assms
by (simp add: inv_into_inv_into_eq cong: if_cong)
moreover have "?g \<pi> x \<noteq> x" if "\<pi> permutes B" "\<And>x. x \<in> B \<Longrightarrow> \<pi> x \<noteq> x" "x \<in> A" for \<pi> x
using that and assms by (metis bij_betwE bij_betw_imp_surj_on f_inv_into_f permutes_imp_bij)
ultimately show ?case by auto
next
case 1
thus ?case using assms
by (force simp: fun_eq_iff permutes_not_in permutes_in_image bij_betw_inv_into_left
dest: bij_betwE)
next
case 2
moreover have "bij_betw (inv_into A f) B A"
by (intro bij_betw_inv_into assms)
ultimately show ?case using assms
by (force simp: fun_eq_iff permutes_not_in permutes_in_image bij_betw_inv_into_right
dest: bij_betwE)
qed
qed
subsection \<open>The number of permutations on a finite set\<close>
lemma permutes_insert_lemma:
assumes "p permutes (insert a S)"
shows "transpose a (p a) \<circ> p permutes S"
apply (rule permutes_superset[where S = "insert a S"])
apply (rule permutes_compose[OF assms])
apply (rule permutes_swap_id, simp)
using permutes_in_image[OF assms, of a]
apply simp
apply (auto simp add: Ball_def)
done
lemma permutes_insert: "{p. p permutes (insert a S)} =
(\<lambda>(b, p). transpose a b \<circ> p) ` {(b, p). b \<in> insert a S \<and> p \<in> {p. p permutes S}}"
proof -
have "p permutes insert a S \<longleftrightarrow>
(\<exists>b q. p = transpose a b \<circ> q \<and> b \<in> insert a S \<and> q permutes S)" for p
proof -
have "\<exists>b q. p = transpose a b \<circ> q \<and> b \<in> insert a S \<and> q permutes S"
if p: "p permutes insert a S"
proof -
let ?b = "p a"
let ?q = "transpose a (p a) \<circ> p"
have *: "p = transpose a ?b \<circ> ?q"
by (simp add: fun_eq_iff o_assoc)
have **: "?b \<in> insert a S"
unfolding permutes_in_image[OF p] by simp
from permutes_insert_lemma[OF p] * ** show ?thesis
by blast
qed
moreover have "p permutes insert a S"
if bq: "p = transpose a b \<circ> q" "b \<in> insert a S" "q permutes S" for b q
proof -
from permutes_subset[OF bq(3), of "insert a S"] have q: "q permutes insert a S"
by auto
have a: "a \<in> insert a S"
by simp
from bq(1) permutes_compose[OF q permutes_swap_id[OF a bq(2)]] show ?thesis
by simp
qed
ultimately show ?thesis by blast
qed
then show ?thesis by auto
qed
lemma card_permutations:
assumes "card S = n"
and "finite S"
shows "card {p. p permutes S} = fact n"
using assms(2,1)
proof (induct arbitrary: n)
case empty
then show ?case by simp
next
case (insert x F)
{
fix n
assume card_insert: "card (insert x F) = n"
let ?xF = "{p. p permutes insert x F}"
let ?pF = "{p. p permutes F}"
let ?pF' = "{(b, p). b \<in> insert x F \<and> p \<in> ?pF}"
let ?g = "(\<lambda>(b, p). transpose x b \<circ> p)"
have xfgpF': "?xF = ?g ` ?pF'"
by (rule permutes_insert[of x F])
from \<open>x \<notin> F\<close> \<open>finite F\<close> card_insert have Fs: "card F = n - 1"
by auto
from \<open>finite F\<close> insert.hyps Fs have pFs: "card ?pF = fact (n - 1)"
by auto
then have "finite ?pF"
by (auto intro: card_ge_0_finite)
with \<open>finite F\<close> card.insert_remove have pF'f: "finite ?pF'"
apply (simp only: Collect_case_prod Collect_mem_eq)
apply (rule finite_cartesian_product)
apply simp_all
done
have ginj: "inj_on ?g ?pF'"
proof -
{
fix b p c q
assume bp: "(b, p) \<in> ?pF'"
assume cq: "(c, q) \<in> ?pF'"
assume eq: "?g (b, p) = ?g (c, q)"
from bp cq have pF: "p permutes F" and qF: "q permutes F"
by auto
from pF \<open>x \<notin> F\<close> eq have "b = ?g (b, p) x"
by (auto simp: permutes_def fun_upd_def fun_eq_iff)
also from qF \<open>x \<notin> F\<close> eq have "\<dots> = ?g (c, q) x"
by (auto simp: fun_upd_def fun_eq_iff)
also from qF \<open>x \<notin> F\<close> have "\<dots> = c"
by (auto simp: permutes_def fun_upd_def fun_eq_iff)
finally have "b = c" .
then have "transpose x b = transpose x c"
by simp
with eq have "transpose x b \<circ> p = transpose x b \<circ> q"
by simp
then have "transpose x b \<circ> (transpose x b \<circ> p) = transpose x b \<circ> (transpose x b \<circ> q)"
by simp
then have "p = q"
by (simp add: o_assoc)
with \<open>b = c\<close> have "(b, p) = (c, q)"
by simp
}
then show ?thesis
unfolding inj_on_def by blast
qed
from \<open>x \<notin> F\<close> \<open>finite F\<close> card_insert have "n \<noteq> 0"
by auto
then have "\<exists>m. n = Suc m"
by presburger
then obtain m where n: "n = Suc m"
by blast
from pFs card_insert have *: "card ?xF = fact n"
unfolding xfgpF' card_image[OF ginj]
using \<open>finite F\<close> \<open>finite ?pF\<close>
by (simp only: Collect_case_prod Collect_mem_eq card_cartesian_product) (simp add: n)
from finite_imageI[OF pF'f, of ?g] have xFf: "finite ?xF"
by (simp add: xfgpF' n)
from * have "card ?xF = fact n"
unfolding xFf by blast
}
with insert show ?case by simp
qed
lemma finite_permutations:
assumes "finite S"
shows "finite {p. p permutes S}"
using card_permutations[OF refl assms] by (auto intro: card_ge_0_finite)
subsection \<open>Hence a sort of induction principle composing by swaps\<close>
lemma permutes_induct [consumes 2, case_names id swap]:
\<open>P p\<close> if \<open>p permutes S\<close> \<open>finite S\<close>
and id: \<open>P id\<close>
and swap: \<open>\<And>a b p. a \<in> S \<Longrightarrow> b \<in> S \<Longrightarrow> p permutes S \<Longrightarrow> P p \<Longrightarrow> P (transpose a b \<circ> p)\<close>
using \<open>finite S\<close> \<open>p permutes S\<close> swap proof (induction S arbitrary: p)
case empty
with id show ?case
by (simp only: permutes_empty)
next
case (insert x S p)
define q where \<open>q = transpose x (p x) \<circ> p\<close>
then have swap_q: \<open>transpose x (p x) \<circ> q = p\<close>
by (simp add: o_assoc)
from \<open>p permutes insert x S\<close> have \<open>q permutes S\<close>
by (simp add: q_def permutes_insert_lemma)
then have \<open>q permutes insert x S\<close>
by (simp add: permutes_imp_permutes_insert)
from \<open>q permutes S\<close> have \<open>P q\<close>
by (auto intro: insert.IH insert.prems(2) permutes_imp_permutes_insert)
have \<open>x \<in> insert x S\<close>
by simp
moreover from \<open>p permutes insert x S\<close> have \<open>p x \<in> insert x S\<close>
using permutes_in_image [of p \<open>insert x S\<close> x] by simp
ultimately have \<open>P (transpose x (p x) \<circ> q)\<close>
using \<open>q permutes insert x S\<close> \<open>P q\<close>
by (rule insert.prems(2))
then show ?case
by (simp add: swap_q)
qed
lemma permutes_rev_induct [consumes 2, case_names id swap]:
\<open>P p\<close> if \<open>p permutes S\<close> \<open>finite S\<close>
and id': \<open>P id\<close>
and swap': \<open>\<And>a b p. a \<in> S \<Longrightarrow> b \<in> S \<Longrightarrow> p permutes S \<Longrightarrow> P p \<Longrightarrow> P (p \<circ> transpose a b)\<close>
using \<open>p permutes S\<close> \<open>finite S\<close> proof (induction rule: permutes_induct)
case id
from id' show ?case .
next
case (swap a b p)
then have \<open>bij p\<close>
using permutes_bij by blast
have \<open>P (p \<circ> transpose (inv p a) (inv p b))\<close>
by (rule swap') (auto simp add: swap permutes_in_image permutes_inv)
also have \<open>p \<circ> transpose (inv p a) (inv p b) = transpose a b \<circ> p\<close>
using \<open>bij p\<close> by (rule transpose_comp_eq [symmetric])
finally show ?case .
qed
subsection \<open>Permutations of index set for iterated operations\<close>
lemma (in comm_monoid_set) permute:
assumes "p permutes S"
shows "F g S = F (g \<circ> p) S"
proof -
from \<open>p permutes S\<close> have "inj p"
by (rule permutes_inj)
then have "inj_on p S"
by (auto intro: subset_inj_on)
then have "F g (p ` S) = F (g \<circ> p) S"
by (rule reindex)
moreover from \<open>p permutes S\<close> have "p ` S = S"
by (rule permutes_image)
ultimately show ?thesis
by simp
qed
subsection \<open>Permutations as transposition sequences\<close>
inductive swapidseq :: "nat \<Rightarrow> ('a \<Rightarrow> 'a) \<Rightarrow> bool"
where
id[simp]: "swapidseq 0 id"
| comp_Suc: "swapidseq n p \<Longrightarrow> a \<noteq> b \<Longrightarrow> swapidseq (Suc n) (transpose a b \<circ> p)"
declare id[unfolded id_def, simp]
definition "permutation p \<longleftrightarrow> (\<exists>n. swapidseq n p)"
subsection \<open>Some closure properties of the set of permutations, with lengths\<close>
lemma permutation_id[simp]: "permutation id"
unfolding permutation_def by (rule exI[where x=0]) simp
declare permutation_id[unfolded id_def, simp]
lemma swapidseq_swap: "swapidseq (if a = b then 0 else 1) (transpose a b)"
apply clarsimp
using comp_Suc[of 0 id a b]
apply simp
done
lemma permutation_swap_id: "permutation (transpose a b)"
proof (cases "a = b")
case True
then show ?thesis by simp
next
case False
then show ?thesis
unfolding permutation_def
using swapidseq_swap[of a b] by blast
qed
lemma swapidseq_comp_add: "swapidseq n p \<Longrightarrow> swapidseq m q \<Longrightarrow> swapidseq (n + m) (p \<circ> q)"
proof (induct n p arbitrary: m q rule: swapidseq.induct)
case (id m q)
then show ?case by simp
next
case (comp_Suc n p a b m q)
have eq: "Suc n + m = Suc (n + m)"
by arith
show ?case
apply (simp only: eq comp_assoc)
apply (rule swapidseq.comp_Suc)
using comp_Suc.hyps(2)[OF comp_Suc.prems] comp_Suc.hyps(3)
apply blast+
done
qed
lemma permutation_compose: "permutation p \<Longrightarrow> permutation q \<Longrightarrow> permutation (p \<circ> q)"
unfolding permutation_def using swapidseq_comp_add[of _ p _ q] by metis
lemma swapidseq_endswap: "swapidseq n p \<Longrightarrow> a \<noteq> b \<Longrightarrow> swapidseq (Suc n) (p \<circ> transpose a b)"
by (induct n p rule: swapidseq.induct)
(use swapidseq_swap[of a b] in \<open>auto simp add: comp_assoc intro: swapidseq.comp_Suc\<close>)
lemma swapidseq_inverse_exists: "swapidseq n p \<Longrightarrow> \<exists>q. swapidseq n q \<and> p \<circ> q = id \<and> q \<circ> p = id"
proof (induct n p rule: swapidseq.induct)
case id
then show ?case
by (rule exI[where x=id]) simp
next
case (comp_Suc n p a b)
from comp_Suc.hyps obtain q where q: "swapidseq n q" "p \<circ> q = id" "q \<circ> p = id"
by blast
let ?q = "q \<circ> transpose a b"
note H = comp_Suc.hyps
from swapidseq_swap[of a b] H(3) have *: "swapidseq 1 (transpose a b)"
by simp
from swapidseq_comp_add[OF q(1) *] have **: "swapidseq (Suc n) ?q"
by simp
have "transpose a b \<circ> p \<circ> ?q = transpose a b \<circ> (p \<circ> q) \<circ> transpose a b"
by (simp add: o_assoc)
also have "\<dots> = id"
by (simp add: q(2))
finally have ***: "transpose a b \<circ> p \<circ> ?q = id" .
have "?q \<circ> (transpose a b \<circ> p) = q \<circ> (transpose a b \<circ> transpose a b) \<circ> p"
by (simp only: o_assoc)
then have "?q \<circ> (transpose a b \<circ> p) = id"
by (simp add: q(3))
with ** *** show ?case
by blast
qed
lemma swapidseq_inverse:
assumes "swapidseq n p"
shows "swapidseq n (inv p)"
using swapidseq_inverse_exists[OF assms] inv_unique_comp[of p] by auto
lemma permutation_inverse: "permutation p \<Longrightarrow> permutation (inv p)"
using permutation_def swapidseq_inverse by blast
subsection \<open>Various combinations of transpositions with 2, 1 and 0 common elements\<close>
lemma swap_id_common:" a \<noteq> c \<Longrightarrow> b \<noteq> c \<Longrightarrow>
transpose a b \<circ> transpose a c = transpose b c \<circ> transpose a b"
by (simp add: fun_eq_iff transpose_def)
lemma swap_id_common': "a \<noteq> b \<Longrightarrow> a \<noteq> c \<Longrightarrow>
transpose a c \<circ> transpose b c = transpose b c \<circ> transpose a b"
by (simp add: fun_eq_iff transpose_def)
lemma swap_id_independent: "a \<noteq> c \<Longrightarrow> a \<noteq> d \<Longrightarrow> b \<noteq> c \<Longrightarrow> b \<noteq> d \<Longrightarrow>
transpose a b \<circ> transpose c d = transpose c d \<circ> transpose a b"
by (simp add: fun_eq_iff transpose_def)
subsection \<open>The identity map only has even transposition sequences\<close>
lemma symmetry_lemma:
assumes "\<And>a b c d. P a b c d \<Longrightarrow> P a b d c"
and "\<And>a b c d. a \<noteq> b \<Longrightarrow> c \<noteq> d \<Longrightarrow>
a = c \<and> b = d \<or> a = c \<and> b \<noteq> d \<or> a \<noteq> c \<and> b = d \<or> a \<noteq> c \<and> a \<noteq> d \<and> b \<noteq> c \<and> b \<noteq> d \<Longrightarrow>
P a b c d"
shows "\<And>a b c d. a \<noteq> b \<longrightarrow> c \<noteq> d \<longrightarrow> P a b c d"
using assms by metis
lemma swap_general: "a \<noteq> b \<Longrightarrow> c \<noteq> d \<Longrightarrow>
transpose a b \<circ> transpose c d = id \<or>
(\<exists>x y z. x \<noteq> a \<and> y \<noteq> a \<and> z \<noteq> a \<and> x \<noteq> y \<and>
transpose a b \<circ> transpose c d = transpose x y \<circ> transpose a z)"
proof -
assume neq: "a \<noteq> b" "c \<noteq> d"
have "a \<noteq> b \<longrightarrow> c \<noteq> d \<longrightarrow>
(transpose a b \<circ> transpose c d = id \<or>
(\<exists>x y z. x \<noteq> a \<and> y \<noteq> a \<and> z \<noteq> a \<and> x \<noteq> y \<and>
transpose a b \<circ> transpose c d = transpose x y \<circ> transpose a z))"
apply (rule symmetry_lemma[where a=a and b=b and c=c and d=d])
apply (simp_all only: ac_simps)
apply (metis id_comp swap_id_common swap_id_common' swap_id_independent transpose_comp_involutory)
done
with neq show ?thesis by metis
qed
lemma swapidseq_id_iff[simp]: "swapidseq 0 p \<longleftrightarrow> p = id"
using swapidseq.cases[of 0 p "p = id"] by auto
lemma swapidseq_cases: "swapidseq n p \<longleftrightarrow>
n = 0 \<and> p = id \<or> (\<exists>a b q m. n = Suc m \<and> p = transpose a b \<circ> q \<and> swapidseq m q \<and> a \<noteq> b)"
apply (rule iffI)
apply (erule swapidseq.cases[of n p])
apply simp
apply (rule disjI2)
apply (rule_tac x= "a" in exI)
apply (rule_tac x= "b" in exI)
apply (rule_tac x= "pa" in exI)
apply (rule_tac x= "na" in exI)
apply simp
apply auto
apply (rule comp_Suc, simp_all)
done
lemma fixing_swapidseq_decrease:
assumes "swapidseq n p"
and "a \<noteq> b"
and "(transpose a b \<circ> p) a = a"
shows "n \<noteq> 0 \<and> swapidseq (n - 1) (transpose a b \<circ> p)"
using assms
proof (induct n arbitrary: p a b)
case 0
then show ?case
by (auto simp add: fun_upd_def)
next
case (Suc n p a b)
from Suc.prems(1) swapidseq_cases[of "Suc n" p]
obtain c d q m where
cdqm: "Suc n = Suc m" "p = transpose c d \<circ> q" "swapidseq m q" "c \<noteq> d" "n = m"
by auto
consider "transpose a b \<circ> transpose c d = id"
| x y z where "x \<noteq> a" "y \<noteq> a" "z \<noteq> a" "x \<noteq> y"
"transpose a b \<circ> transpose c d = transpose x y \<circ> transpose a z"
using swap_general[OF Suc.prems(2) cdqm(4)] by metis
then show ?case
proof cases
case 1
then show ?thesis
by (simp only: cdqm o_assoc) (simp add: cdqm)
next
case prems: 2
then have az: "a \<noteq> z"
by simp
from prems have *: "(transpose x y \<circ> h) a = a \<longleftrightarrow> h a = a" for h
by (simp add: transpose_def)
from cdqm(2) have "transpose a b \<circ> p = transpose a b \<circ> (transpose c d \<circ> q)"
by simp
then have "transpose a b \<circ> p = transpose x y \<circ> (transpose a z \<circ> q)"
by (simp add: o_assoc prems)
then have "(transpose a b \<circ> p) a = (transpose x y \<circ> (transpose a z \<circ> q)) a"
by simp
then have "(transpose x y \<circ> (transpose a z \<circ> q)) a = a"
unfolding Suc by metis
then have "(transpose a z \<circ> q) a = a"
by (simp only: *)
from Suc.hyps[OF cdqm(3)[ unfolded cdqm(5)[symmetric]] az this]
have **: "swapidseq (n - 1) (transpose a z \<circ> q)" "n \<noteq> 0"
by blast+
from \<open>n \<noteq> 0\<close> have ***: "Suc n - 1 = Suc (n - 1)"
by auto
show ?thesis
apply (simp only: cdqm(2) prems o_assoc ***)
apply (simp only: Suc_not_Zero simp_thms comp_assoc)
apply (rule comp_Suc)
using ** prems
apply blast+
done
qed
qed
lemma swapidseq_identity_even:
assumes "swapidseq n (id :: 'a \<Rightarrow> 'a)"
shows "even n"
using \<open>swapidseq n id\<close>
proof (induct n rule: nat_less_induct)
case H: (1 n)
consider "n = 0"
| a b :: 'a and q m where "n = Suc m" "id = transpose a b \<circ> q" "swapidseq m q" "a \<noteq> b"
using H(2)[unfolded swapidseq_cases[of n id]] by auto
then show ?case
proof cases
case 1
then show ?thesis by presburger
next
case h: 2
from fixing_swapidseq_decrease[OF h(3,4), unfolded h(2)[symmetric]]
have m: "m \<noteq> 0" "swapidseq (m - 1) (id :: 'a \<Rightarrow> 'a)"
by auto
from h m have mn: "m - 1 < n"
by arith
from H(1)[rule_format, OF mn m(2)] h(1) m(1) show ?thesis
by presburger
qed
qed
subsection \<open>Therefore we have a welldefined notion of parity\<close>
definition "evenperm p = even (SOME n. swapidseq n p)"
lemma swapidseq_even_even:
assumes m: "swapidseq m p"
and n: "swapidseq n p"
shows "even m \<longleftrightarrow> even n"
proof -
from swapidseq_inverse_exists[OF n] obtain q where q: "swapidseq n q" "p \<circ> q = id" "q \<circ> p = id"
by blast
from swapidseq_identity_even[OF swapidseq_comp_add[OF m q(1), unfolded q]] show ?thesis
by arith
qed
lemma evenperm_unique:
assumes p: "swapidseq n p"
and n:"even n = b"
shows "evenperm p = b"
unfolding n[symmetric] evenperm_def
apply (rule swapidseq_even_even[where p = p])
apply (rule someI[where x = n])
using p
apply blast+
done
subsection \<open>And it has the expected composition properties\<close>
lemma evenperm_id[simp]: "evenperm id = True"
by (rule evenperm_unique[where n = 0]) simp_all
lemma evenperm_identity [simp]:
\<open>evenperm (\<lambda>x. x)\<close>
using evenperm_id by (simp add: id_def [abs_def])
lemma evenperm_swap: "evenperm (transpose a b) = (a = b)"
by (rule evenperm_unique[where n="if a = b then 0 else 1"]) (simp_all add: swapidseq_swap)
lemma evenperm_comp:
assumes "permutation p" "permutation q"
shows "evenperm (p \<circ> q) \<longleftrightarrow> evenperm p = evenperm q"
proof -
from assms obtain n m where n: "swapidseq n p" and m: "swapidseq m q"
unfolding permutation_def by blast
have "even (n + m) \<longleftrightarrow> (even n \<longleftrightarrow> even m)"
by arith
from evenperm_unique[OF n refl] evenperm_unique[OF m refl]
and evenperm_unique[OF swapidseq_comp_add[OF n m] this] show ?thesis
by blast
qed
lemma evenperm_inv:
assumes "permutation p"
shows "evenperm (inv p) = evenperm p"
proof -
from assms obtain n where n: "swapidseq n p"
unfolding permutation_def by blast
show ?thesis
by (rule evenperm_unique[OF swapidseq_inverse[OF n] evenperm_unique[OF n refl, symmetric]])
qed
subsection \<open>A more abstract characterization of permutations\<close>
lemma permutation_bijective:
assumes "permutation p"
shows "bij p"
proof -
from assms obtain n where n: "swapidseq n p"
unfolding permutation_def by blast
from swapidseq_inverse_exists[OF n] obtain q where q: "swapidseq n q" "p \<circ> q = id" "q \<circ> p = id"
by blast
then show ?thesis
unfolding bij_iff
apply (auto simp add: fun_eq_iff)
apply metis
done
qed
lemma permutation_finite_support:
assumes "permutation p"
shows "finite {x. p x \<noteq> x}"
proof -
from assms obtain n where "swapidseq n p"
unfolding permutation_def by blast
then show ?thesis
proof (induct n p rule: swapidseq.induct)
case id
then show ?case by simp
next
case (comp_Suc n p a b)
let ?S = "insert a (insert b {x. p x \<noteq> x})"
from comp_Suc.hyps(2) have *: "finite ?S"
by simp
from \<open>a \<noteq> b\<close> have **: "{x. (transpose a b \<circ> p) x \<noteq> x} \<subseteq> ?S"
by auto
show ?case
by (rule finite_subset[OF ** *])
qed
qed
lemma permutation_lemma:
assumes "finite S"
and "bij p"
and "\<forall>x. x \<notin> S \<longrightarrow> p x = x"
shows "permutation p"
using assms
proof (induct S arbitrary: p rule: finite_induct)
case empty
then show ?case
by simp
next
case (insert a F p)
let ?r = "transpose a (p a) \<circ> p"
let ?q = "transpose a (p a) \<circ> ?r"
have *: "?r a = a"
by simp
from insert * have **: "\<forall>x. x \<notin> F \<longrightarrow> ?r x = x"
by (metis bij_pointE comp_apply id_apply insert_iff swap_apply(3))
have "bij ?r"
using insert by (simp add: bij_comp)
have "permutation ?r"
by (rule insert(3)[OF \<open>bij ?r\<close> **])
then have "permutation ?q"
by (simp add: permutation_compose permutation_swap_id)
then show ?case
by (simp add: o_assoc)
qed
lemma permutation: "permutation p \<longleftrightarrow> bij p \<and> finite {x. p x \<noteq> x}"
(is "?lhs \<longleftrightarrow> ?b \<and> ?f")
proof
assume ?lhs
with permutation_bijective permutation_finite_support show "?b \<and> ?f"
by auto
next
assume "?b \<and> ?f"
then have "?f" "?b" by blast+
from permutation_lemma[OF this] show ?lhs
by blast
qed
lemma permutation_inverse_works:
assumes "permutation p"
shows "inv p \<circ> p = id"
and "p \<circ> inv p = id"
using permutation_bijective [OF assms] by (auto simp: bij_def inj_iff surj_iff)
lemma permutation_inverse_compose:
assumes p: "permutation p"
and q: "permutation q"
shows "inv (p \<circ> q) = inv q \<circ> inv p"
proof -
note ps = permutation_inverse_works[OF p]
note qs = permutation_inverse_works[OF q]
have "p \<circ> q \<circ> (inv q \<circ> inv p) = p \<circ> (q \<circ> inv q) \<circ> inv p"
by (simp add: o_assoc)
also have "\<dots> = id"
by (simp add: ps qs)
finally have *: "p \<circ> q \<circ> (inv q \<circ> inv p) = id" .
have "inv q \<circ> inv p \<circ> (p \<circ> q) = inv q \<circ> (inv p \<circ> p) \<circ> q"
by (simp add: o_assoc)
also have "\<dots> = id"
by (simp add: ps qs)
finally have **: "inv q \<circ> inv p \<circ> (p \<circ> q) = id" .
show ?thesis
by (rule inv_unique_comp[OF * **])
qed
subsection \<open>Relation to \<open>permutes\<close>\<close>
lemma permutes_imp_permutation:
\<open>permutation p\<close> if \<open>finite S\<close> \<open>p permutes S\<close>
proof -
from \<open>p permutes S\<close> have \<open>{x. p x \<noteq> x} \<subseteq> S\<close>
by (auto dest: permutes_not_in)
then have \<open>finite {x. p x \<noteq> x}\<close>
using \<open>finite S\<close> by (rule finite_subset)
moreover from \<open>p permutes S\<close> have \<open>bij p\<close>
by (auto dest: permutes_bij)
ultimately show ?thesis
by (simp add: permutation)
qed
lemma permutation_permutesE:
assumes \<open>permutation p\<close>
obtains S where \<open>finite S\<close> \<open>p permutes S\<close>
proof -
from assms have fin: \<open>finite {x. p x \<noteq> x}\<close>
by (simp add: permutation)
from assms have \<open>bij p\<close>
by (simp add: permutation)
also have \<open>UNIV = {x. p x \<noteq> x} \<union> {x. p x = x}\<close>
by auto
finally have \<open>bij_betw p {x. p x \<noteq> x} {x. p x \<noteq> x}\<close>
by (rule bij_betw_partition) (auto simp add: bij_betw_fixpoints)
then have \<open>p permutes {x. p x \<noteq> x}\<close>
by (auto intro: bij_imp_permutes)
with fin show thesis ..
qed
lemma permutation_permutes: "permutation p \<longleftrightarrow> (\<exists>S. finite S \<and> p permutes S)"
by (auto elim: permutation_permutesE intro: permutes_imp_permutation)
subsection \<open>Sign of a permutation as a real number\<close>
definition sign :: \<open>('a \<Rightarrow> 'a) \<Rightarrow> int\<close> \<comment> \<open>TODO: prefer less generic name\<close>
where \<open>sign p = (if evenperm p then 1 else - 1)\<close>
lemma sign_cases [case_names even odd]:
obtains \<open>sign p = 1\<close> | \<open>sign p = - 1\<close>
by (cases \<open>evenperm p\<close>) (simp_all add: sign_def)
lemma sign_nz [simp]: "sign p \<noteq> 0"
by (cases p rule: sign_cases) simp_all
lemma sign_id [simp]: "sign id = 1"
by (simp add: sign_def)
lemma sign_identity [simp]:
\<open>sign (\<lambda>x. x) = 1\<close>
by (simp add: sign_def)
lemma sign_inverse: "permutation p \<Longrightarrow> sign (inv p) = sign p"
by (simp add: sign_def evenperm_inv)
lemma sign_compose: "permutation p \<Longrightarrow> permutation q \<Longrightarrow> sign (p \<circ> q) = sign p * sign q"
by (simp add: sign_def evenperm_comp)
lemma sign_swap_id: "sign (transpose a b) = (if a = b then 1 else - 1)"
by (simp add: sign_def evenperm_swap)
lemma sign_idempotent [simp]: "sign p * sign p = 1"
by (simp add: sign_def)
lemma sign_left_idempotent [simp]:
\<open>sign p * (sign p * sign q) = sign q\<close>
by (simp add: sign_def)
term "(bij, bij_betw, permutation)"
subsection \<open>Permuting a list\<close>
text \<open>This function permutes a list by applying a permutation to the indices.\<close>
definition permute_list :: "(nat \<Rightarrow> nat) \<Rightarrow> 'a list \<Rightarrow> 'a list"
where "permute_list f xs = map (\<lambda>i. xs ! (f i)) [0..<length xs]"
lemma permute_list_map:
assumes "f permutes {..<length xs}"
shows "permute_list f (map g xs) = map g (permute_list f xs)"
using permutes_in_image[OF assms] by (auto simp: permute_list_def)
lemma permute_list_nth:
assumes "f permutes {..<length xs}" "i < length xs"
shows "permute_list f xs ! i = xs ! f i"
using permutes_in_image[OF assms(1)] assms(2)
by (simp add: permute_list_def)
lemma permute_list_Nil [simp]: "permute_list f [] = []"
by (simp add: permute_list_def)
lemma length_permute_list [simp]: "length (permute_list f xs) = length xs"
by (simp add: permute_list_def)
lemma permute_list_compose:
assumes "g permutes {..<length xs}"
shows "permute_list (f \<circ> g) xs = permute_list g (permute_list f xs)"
using assms[THEN permutes_in_image] by (auto simp add: permute_list_def)
lemma permute_list_ident [simp]: "permute_list (\<lambda>x. x) xs = xs"
by (simp add: permute_list_def map_nth)
lemma permute_list_id [simp]: "permute_list id xs = xs"
by (simp add: id_def)
lemma mset_permute_list [simp]:
fixes xs :: "'a list"
assumes "f permutes {..<length xs}"
shows "mset (permute_list f xs) = mset xs"
proof (rule multiset_eqI)
fix y :: 'a
from assms have [simp]: "f x < length xs \<longleftrightarrow> x < length xs" for x
using permutes_in_image[OF assms] by auto
have "count (mset (permute_list f xs)) y = card ((\<lambda>i. xs ! f i) -` {y} \<inter> {..<length xs})"
by (simp add: permute_list_def count_image_mset atLeast0LessThan)
also have "(\<lambda>i. xs ! f i) -` {y} \<inter> {..<length xs} = f -` {i. i < length xs \<and> y = xs ! i}"
by auto
also from assms have "card \<dots> = card {i. i < length xs \<and> y = xs ! i}"
by (intro card_vimage_inj) (auto simp: permutes_inj permutes_surj)
also have "\<dots> = count (mset xs) y"
by (simp add: count_mset length_filter_conv_card)
finally show "count (mset (permute_list f xs)) y = count (mset xs) y"
by simp
qed
lemma set_permute_list [simp]:
assumes "f permutes {..<length xs}"
shows "set (permute_list f xs) = set xs"
by (rule mset_eq_setD[OF mset_permute_list]) fact
lemma distinct_permute_list [simp]:
assumes "f permutes {..<length xs}"
shows "distinct (permute_list f xs) = distinct xs"
by (simp add: distinct_count_atmost_1 assms)
lemma permute_list_zip:
assumes "f permutes A" "A = {..<length xs}"
assumes [simp]: "length xs = length ys"
shows "permute_list f (zip xs ys) = zip (permute_list f xs) (permute_list f ys)"
proof -
from permutes_in_image[OF assms(1)] assms(2) have *: "f i < length ys \<longleftrightarrow> i < length ys" for i
by simp
have "permute_list f (zip xs ys) = map (\<lambda>i. zip xs ys ! f i) [0..<length ys]"
by (simp_all add: permute_list_def zip_map_map)
also have "\<dots> = map (\<lambda>(x, y). (xs ! f x, ys ! f y)) (zip [0..<length ys] [0..<length ys])"
by (intro nth_equalityI) (simp_all add: *)
also have "\<dots> = zip (permute_list f xs) (permute_list f ys)"
by (simp_all add: permute_list_def zip_map_map)
finally show ?thesis .
qed
lemma map_of_permute:
assumes "\<sigma> permutes fst ` set xs"
shows "map_of xs \<circ> \<sigma> = map_of (map (\<lambda>(x,y). (inv \<sigma> x, y)) xs)"
(is "_ = map_of (map ?f _)")
proof
from assms have "inj \<sigma>" "surj \<sigma>"
by (simp_all add: permutes_inj permutes_surj)
then show "(map_of xs \<circ> \<sigma>) x = map_of (map ?f xs) x" for x
by (induct xs) (auto simp: inv_f_f surj_f_inv_f)
qed
lemma list_all2_permute_list_iff:
\<open>list_all2 P (permute_list p xs) (permute_list p ys) \<longleftrightarrow> list_all2 P xs ys\<close>
if \<open>p permutes {..<length xs}\<close>
using that by (auto simp add: list_all2_iff simp flip: permute_list_zip)
subsection \<open>More lemmas about permutations\<close>
lemma permutes_in_funpow_image: \<^marker>\<open>contributor \<open>Lars Noschinski\<close>\<close>
assumes "f permutes S" "x \<in> S"
shows "(f ^^ n) x \<in> S"
using assms by (induction n) (auto simp: permutes_in_image)
lemma permutation_self: \<^marker>\<open>contributor \<open>Lars Noschinski\<close>\<close>
assumes \<open>permutation p\<close>
obtains n where \<open>n > 0\<close> \<open>(p ^^ n) x = x\<close>
proof (cases \<open>p x = x\<close>)
case True
with that [of 1] show thesis by simp
next
case False
from \<open>permutation p\<close> have \<open>inj p\<close>
by (intro permutation_bijective bij_is_inj)
moreover from \<open>p x \<noteq> x\<close> have \<open>(p ^^ Suc n) x \<noteq> (p ^^ n) x\<close> for n
proof (induction n arbitrary: x)
case 0 then show ?case by simp
next
case (Suc n)
have "p (p x) \<noteq> p x"
proof (rule notI)
assume "p (p x) = p x"
then show False using \<open>p x \<noteq> x\<close> \<open>inj p\<close> by (simp add: inj_eq)
qed
have "(p ^^ Suc (Suc n)) x = (p ^^ Suc n) (p x)"
by (simp add: funpow_swap1)
also have "\<dots> \<noteq> (p ^^ n) (p x)"
by (rule Suc) fact
also have "(p ^^ n) (p x) = (p ^^ Suc n) x"
by (simp add: funpow_swap1)
finally show ?case by simp
qed
then have "{y. \<exists>n. y = (p ^^ n) x} \<subseteq> {x. p x \<noteq> x}"
by auto
then have "finite {y. \<exists>n. y = (p ^^ n) x}"
using permutation_finite_support[OF assms] by (rule finite_subset)
ultimately obtain n where \<open>n > 0\<close> \<open>(p ^^ n) x = x\<close>
by (rule funpow_inj_finite)
with that [of n] show thesis by blast
qed
text \<open>The following few lemmas were contributed by Lukas Bulwahn.\<close>
lemma count_image_mset_eq_card_vimage:
assumes "finite A"
shows "count (image_mset f (mset_set A)) b = card {a \<in> A. f a = b}"
using assms
proof (induct A)
case empty
show ?case by simp
next
case (insert x F)
show ?case
proof (cases "f x = b")
case True
with insert.hyps
have "count (image_mset f (mset_set (insert x F))) b = Suc (card {a \<in> F. f a = f x})"
by auto
also from insert.hyps(1,2) have "\<dots> = card (insert x {a \<in> F. f a = f x})"
by simp
also from \<open>f x = b\<close> have "card (insert x {a \<in> F. f a = f x}) = card {a \<in> insert x F. f a = b}"
by (auto intro: arg_cong[where f="card"])
finally show ?thesis
using insert by auto
next
case False
then have "{a \<in> F. f a = b} = {a \<in> insert x F. f a = b}"
by auto
with insert False show ?thesis
by simp
qed
qed
\<comment> \<open>Prove \<open>image_mset_eq_implies_permutes\<close> ...\<close>
lemma image_mset_eq_implies_permutes:
fixes f :: "'a \<Rightarrow> 'b"
assumes "finite A"
and mset_eq: "image_mset f (mset_set A) = image_mset f' (mset_set A)"
obtains p where "p permutes A" and "\<forall>x\<in>A. f x = f' (p x)"
proof -
from \<open>finite A\<close> have [simp]: "finite {a \<in> A. f a = (b::'b)}" for f b by auto
have "f ` A = f' ` A"
proof -
from \<open>finite A\<close> have "f ` A = f ` (set_mset (mset_set A))"
by simp
also have "\<dots> = f' ` set_mset (mset_set A)"
by (metis mset_eq multiset.set_map)
also from \<open>finite A\<close> have "\<dots> = f' ` A"
by simp
finally show ?thesis .
qed
have "\<forall>b\<in>(f ` A). \<exists>p. bij_betw p {a \<in> A. f a = b} {a \<in> A. f' a = b}"
proof
fix b
from mset_eq have "count (image_mset f (mset_set A)) b = count (image_mset f' (mset_set A)) b"
by simp
with \<open>finite A\<close> have "card {a \<in> A. f a = b} = card {a \<in> A. f' a = b}"
by (simp add: count_image_mset_eq_card_vimage)
then show "\<exists>p. bij_betw p {a\<in>A. f a = b} {a \<in> A. f' a = b}"
by (intro finite_same_card_bij) simp_all
qed
then have "\<exists>p. \<forall>b\<in>f ` A. bij_betw (p b) {a \<in> A. f a = b} {a \<in> A. f' a = b}"
by (rule bchoice)
then obtain p where p: "\<forall>b\<in>f ` A. bij_betw (p b) {a \<in> A. f a = b} {a \<in> A. f' a = b}" ..
define p' where "p' = (\<lambda>a. if a \<in> A then p (f a) a else a)"
have "p' permutes A"
proof (rule bij_imp_permutes)
have "disjoint_family_on (\<lambda>i. {a \<in> A. f' a = i}) (f ` A)"
by (auto simp: disjoint_family_on_def)
moreover
have "bij_betw (\<lambda>a. p (f a) a) {a \<in> A. f a = b} {a \<in> A. f' a = b}" if "b \<in> f ` A" for b
using p that by (subst bij_betw_cong[where g="p b"]) auto
ultimately
have "bij_betw (\<lambda>a. p (f a) a) (\<Union>b\<in>f ` A. {a \<in> A. f a = b}) (\<Union>b\<in>f ` A. {a \<in> A. f' a = b})"
by (rule bij_betw_UNION_disjoint)
moreover have "(\<Union>b\<in>f ` A. {a \<in> A. f a = b}) = A"
by auto
moreover from \<open>f ` A = f' ` A\<close> have "(\<Union>b\<in>f ` A. {a \<in> A. f' a = b}) = A"
by auto
ultimately show "bij_betw p' A A"
unfolding p'_def by (subst bij_betw_cong[where g="(\<lambda>a. p (f a) a)"]) auto
next
show "\<And>x. x \<notin> A \<Longrightarrow> p' x = x"
by (simp add: p'_def)
qed
moreover from p have "\<forall>x\<in>A. f x = f' (p' x)"
unfolding p'_def using bij_betwE by fastforce
ultimately show ?thesis ..
qed
\<comment> \<open>... and derive the existing property:\<close>
lemma mset_eq_permutation:
fixes xs ys :: "'a list"
assumes mset_eq: "mset xs = mset ys"
obtains p where "p permutes {..<length ys}" "permute_list p ys = xs"
proof -
from mset_eq have length_eq: "length xs = length ys"
by (rule mset_eq_length)
have "mset_set {..<length ys} = mset [0..<length ys]"
by (rule mset_set_upto_eq_mset_upto)
with mset_eq length_eq have "image_mset (\<lambda>i. xs ! i) (mset_set {..<length ys}) =
image_mset (\<lambda>i. ys ! i) (mset_set {..<length ys})"
by (metis map_nth mset_map)
from image_mset_eq_implies_permutes[OF _ this]
obtain p where p: "p permutes {..<length ys}" and "\<forall>i\<in>{..<length ys}. xs ! i = ys ! (p i)"
by auto
with length_eq have "permute_list p ys = xs"
by (auto intro!: nth_equalityI simp: permute_list_nth)
with p show thesis ..
qed
lemma permutes_natset_le:
fixes S :: "'a::wellorder set"
assumes "p permutes S"
and "\<forall>i \<in> S. p i \<le> i"
shows "p = id"
proof -
have "p n = n" for n
using assms
proof (induct n arbitrary: S rule: less_induct)
case (less n)
show ?case
proof (cases "n \<in> S")
case False
with less(2) show ?thesis
unfolding permutes_def by metis
next
case True
with less(3) have "p n < n \<or> p n = n"
by auto
then show ?thesis
proof
assume "p n < n"
with less have "p (p n) = p n"
by metis
with permutes_inj[OF less(2)] have "p n = n"
unfolding inj_def by blast
with \<open>p n < n\<close> have False
by simp
then show ?thesis ..
qed
qed
qed
then show ?thesis by (auto simp: fun_eq_iff)
qed
lemma permutes_natset_ge:
fixes S :: "'a::wellorder set"
assumes p: "p permutes S"
and le: "\<forall>i \<in> S. p i \<ge> i"
shows "p = id"
proof -
have "i \<ge> inv p i" if "i \<in> S" for i
proof -
from that permutes_in_image[OF permutes_inv[OF p]] have "inv p i \<in> S"
by simp
with le have "p (inv p i) \<ge> inv p i"
by blast
with permutes_inverses[OF p] show ?thesis
by simp
qed
then have "\<forall>i\<in>S. inv p i \<le> i"
by blast
from permutes_natset_le[OF permutes_inv[OF p] this] have "inv p = inv id"
by simp
then show ?thesis
apply (subst permutes_inv_inv[OF p, symmetric])
apply (rule inv_unique_comp)
apply simp_all
done
qed
lemma image_inverse_permutations: "{inv p |p. p permutes S} = {p. p permutes S}"
apply (rule set_eqI)
apply auto
using permutes_inv_inv permutes_inv
apply auto
apply (rule_tac x="inv x" in exI)
apply auto
done
lemma image_compose_permutations_left:
assumes "q permutes S"
shows "{q \<circ> p |p. p permutes S} = {p. p permutes S}"
apply (rule set_eqI)
apply auto
apply (rule permutes_compose)
using assms
apply auto
apply (rule_tac x = "inv q \<circ> x" in exI)
apply (simp add: o_assoc permutes_inv permutes_compose permutes_inv_o)
done
lemma image_compose_permutations_right:
assumes "q permutes S"
shows "{p \<circ> q | p. p permutes S} = {p . p permutes S}"
apply (rule set_eqI)
apply auto
apply (rule permutes_compose)
using assms
apply auto
apply (rule_tac x = "x \<circ> inv q" in exI)
apply (simp add: o_assoc permutes_inv permutes_compose permutes_inv_o comp_assoc)
done
lemma permutes_in_seg: "p permutes {1 ..n} \<Longrightarrow> i \<in> {1..n} \<Longrightarrow> 1 \<le> p i \<and> p i \<le> n"
by (simp add: permutes_def) metis
lemma sum_permutations_inverse: "sum f {p. p permutes S} = sum (\<lambda>p. f(inv p)) {p. p permutes S}"
(is "?lhs = ?rhs")
proof -
let ?S = "{p . p permutes S}"
have *: "inj_on inv ?S"
proof (auto simp add: inj_on_def)
fix q r
assume q: "q permutes S"
and r: "r permutes S"
and qr: "inv q = inv r"
then have "inv (inv q) = inv (inv r)"
by simp
with permutes_inv_inv[OF q] permutes_inv_inv[OF r] show "q = r"
by metis
qed
have **: "inv ` ?S = ?S"
using image_inverse_permutations by blast
have ***: "?rhs = sum (f \<circ> inv) ?S"
by (simp add: o_def)
from sum.reindex[OF *, of f] show ?thesis
by (simp only: ** ***)
qed
lemma setum_permutations_compose_left:
assumes q: "q permutes S"
shows "sum f {p. p permutes S} = sum (\<lambda>p. f(q \<circ> p)) {p. p permutes S}"
(is "?lhs = ?rhs")
proof -
let ?S = "{p. p permutes S}"
have *: "?rhs = sum (f \<circ> ((\<circ>) q)) ?S"
by (simp add: o_def)
have **: "inj_on ((\<circ>) q) ?S"
proof (auto simp add: inj_on_def)
fix p r
assume "p permutes S"
and r: "r permutes S"
and rp: "q \<circ> p = q \<circ> r"
then have "inv q \<circ> q \<circ> p = inv q \<circ> q \<circ> r"
by (simp add: comp_assoc)
with permutes_inj[OF q, unfolded inj_iff] show "p = r"
by simp
qed
have "((\<circ>) q) ` ?S = ?S"
using image_compose_permutations_left[OF q] by auto
with * sum.reindex[OF **, of f] show ?thesis
by (simp only:)
qed
lemma sum_permutations_compose_right:
assumes q: "q permutes S"
shows "sum f {p. p permutes S} = sum (\<lambda>p. f(p \<circ> q)) {p. p permutes S}"
(is "?lhs = ?rhs")
proof -
let ?S = "{p. p permutes S}"
have *: "?rhs = sum (f \<circ> (\<lambda>p. p \<circ> q)) ?S"
by (simp add: o_def)
have **: "inj_on (\<lambda>p. p \<circ> q) ?S"
proof (auto simp add: inj_on_def)
fix p r
assume "p permutes S"
and r: "r permutes S"
and rp: "p \<circ> q = r \<circ> q"
then have "p \<circ> (q \<circ> inv q) = r \<circ> (q \<circ> inv q)"
by (simp add: o_assoc)
with permutes_surj[OF q, unfolded surj_iff] show "p = r"
by simp
qed
from image_compose_permutations_right[OF q] have "(\<lambda>p. p \<circ> q) ` ?S = ?S"
by auto
with * sum.reindex[OF **, of f] show ?thesis
by (simp only:)
qed
lemma inv_inj_on_permutes:
\<open>inj_on inv {p. p permutes S}\<close>
proof (intro inj_onI, unfold mem_Collect_eq)
fix p q
assume p: "p permutes S" and q: "q permutes S" and eq: "inv p = inv q"
have "inv (inv p) = inv (inv q)" using eq by simp
thus "p = q"
using inv_inv_eq[OF permutes_bij] p q by metis
qed
lemma permutes_pair_eq:
\<open>{(p s, s) |s. s \<in> S} = {(s, inv p s) |s. s \<in> S}\<close> (is \<open>?L = ?R\<close>) if \<open>p permutes S\<close>
proof
show "?L \<subseteq> ?R"
proof
fix x assume "x \<in> ?L"
then obtain s where x: "x = (p s, s)" and s: "s \<in> S" by auto
note x
also have "(p s, s) = (p s, Hilbert_Choice.inv p (p s))"
using permutes_inj [OF that] inv_f_f by auto
also have "... \<in> ?R" using s permutes_in_image[OF that] by auto
finally show "x \<in> ?R".
qed
show "?R \<subseteq> ?L"
proof
fix x assume "x \<in> ?R"
then obtain s
where x: "x = (s, Hilbert_Choice.inv p s)" (is "_ = (s, ?ips)")
and s: "s \<in> S" by auto
note x
also have "(s, ?ips) = (p ?ips, ?ips)"
using inv_f_f[OF permutes_inj[OF permutes_inv[OF that]]]
using inv_inv_eq[OF permutes_bij[OF that]] by auto
also have "... \<in> ?L"
using s permutes_in_image[OF permutes_inv[OF that]] by auto
finally show "x \<in> ?L".
qed
qed
context
fixes p and n i :: nat
assumes p: \<open>p permutes {0..<n}\<close> and i: \<open>i < n\<close>
begin
lemma permutes_nat_less:
\<open>p i < n\<close>
proof -
have \<open>?thesis \<longleftrightarrow> p i \<in> {0..<n}\<close>
by simp
also from p have \<open>p i \<in> {0..<n} \<longleftrightarrow> i \<in> {0..<n}\<close>
by (rule permutes_in_image)
finally show ?thesis
using i by simp
qed
lemma permutes_nat_inv_less:
\<open>inv p i < n\<close>
proof -
from p have \<open>inv p permutes {0..<n}\<close>
by (rule permutes_inv)
then show ?thesis
using i by (rule Permutations.permutes_nat_less)
qed
end
context comm_monoid_set
begin
lemma permutes_inv:
\<open>F (\<lambda>s. g (p s) s) S = F (\<lambda>s. g s (inv p s)) S\<close> (is \<open>?l = ?r\<close>)
if \<open>p permutes S\<close>
proof -
let ?g = "\<lambda>(x, y). g x y"
let ?ps = "\<lambda>s. (p s, s)"
let ?ips = "\<lambda>s. (s, inv p s)"
have inj1: "inj_on ?ps S" by (rule inj_onI) auto
have inj2: "inj_on ?ips S" by (rule inj_onI) auto
have "?l = F ?g (?ps ` S)"
using reindex [OF inj1, of ?g] by simp
also have "?ps ` S = {(p s, s) |s. s \<in> S}" by auto
also have "... = {(s, inv p s) |s. s \<in> S}"
unfolding permutes_pair_eq [OF that] by simp
also have "... = ?ips ` S" by auto
also have "F ?g ... = ?r"
using reindex [OF inj2, of ?g] by simp
finally show ?thesis.
qed
end
subsection \<open>Sum over a set of permutations (could generalize to iteration)\<close>
lemma sum_over_permutations_insert:
assumes fS: "finite S"
and aS: "a \<notin> S"
shows "sum f {p. p permutes (insert a S)} =
sum (\<lambda>b. sum (\<lambda>q. f (transpose a b \<circ> q)) {p. p permutes S}) (insert a S)"
proof -
have *: "\<And>f a b. (\<lambda>(b, p). f (transpose a b \<circ> p)) = f \<circ> (\<lambda>(b,p). transpose a b \<circ> p)"
by (simp add: fun_eq_iff)
have **: "\<And>P Q. {(a, b). a \<in> P \<and> b \<in> Q} = P \<times> Q"
by blast
show ?thesis
unfolding * ** sum.cartesian_product permutes_insert
proof (rule sum.reindex)
let ?f = "(\<lambda>(b, y). transpose a b \<circ> y)"
let ?P = "{p. p permutes S}"
{
fix b c p q
assume b: "b \<in> insert a S"
assume c: "c \<in> insert a S"
assume p: "p permutes S"
assume q: "q permutes S"
assume eq: "transpose a b \<circ> p = transpose a c \<circ> q"
from p q aS have pa: "p a = a" and qa: "q a = a"
unfolding permutes_def by metis+
from eq have "(transpose a b \<circ> p) a = (transpose a c \<circ> q) a"
by simp
then have bc: "b = c"
by (simp add: permutes_def pa qa o_def fun_upd_def id_def
cong del: if_weak_cong split: if_split_asm)
from eq[unfolded bc] have "(\<lambda>p. transpose a c \<circ> p) (transpose a c \<circ> p) =
(\<lambda>p. transpose a c \<circ> p) (transpose a c \<circ> q)" by simp
then have "p = q"
unfolding o_assoc swap_id_idempotent by simp
with bc have "b = c \<and> p = q"
by blast
}
then show "inj_on ?f (insert a S \<times> ?P)"
unfolding inj_on_def by clarify metis
qed
qed
subsection \<open>Constructing permutations from association lists\<close>
definition list_permutes :: "('a \<times> 'a) list \<Rightarrow> 'a set \<Rightarrow> bool"
where "list_permutes xs A \<longleftrightarrow>
set (map fst xs) \<subseteq> A \<and>
set (map snd xs) = set (map fst xs) \<and>
distinct (map fst xs) \<and>
distinct (map snd xs)"
lemma list_permutesI [simp]:
assumes "set (map fst xs) \<subseteq> A" "set (map snd xs) = set (map fst xs)" "distinct (map fst xs)"
shows "list_permutes xs A"
proof -
from assms(2,3) have "distinct (map snd xs)"
by (intro card_distinct) (simp_all add: distinct_card del: set_map)
with assms show ?thesis
by (simp add: list_permutes_def)
qed
definition permutation_of_list :: "('a \<times> 'a) list \<Rightarrow> 'a \<Rightarrow> 'a"
where "permutation_of_list xs x = (case map_of xs x of None \<Rightarrow> x | Some y \<Rightarrow> y)"
lemma permutation_of_list_Cons:
"permutation_of_list ((x, y) # xs) x' = (if x = x' then y else permutation_of_list xs x')"
by (simp add: permutation_of_list_def)
fun inverse_permutation_of_list :: "('a \<times> 'a) list \<Rightarrow> 'a \<Rightarrow> 'a"
where
"inverse_permutation_of_list [] x = x"
| "inverse_permutation_of_list ((y, x') # xs) x =
(if x = x' then y else inverse_permutation_of_list xs x)"
declare inverse_permutation_of_list.simps [simp del]
lemma inj_on_map_of:
assumes "distinct (map snd xs)"
shows "inj_on (map_of xs) (set (map fst xs))"
proof (rule inj_onI)
fix x y
assume xy: "x \<in> set (map fst xs)" "y \<in> set (map fst xs)"
assume eq: "map_of xs x = map_of xs y"
from xy obtain x' y' where x'y': "map_of xs x = Some x'" "map_of xs y = Some y'"
by (cases "map_of xs x"; cases "map_of xs y") (simp_all add: map_of_eq_None_iff)
moreover from x'y' have *: "(x, x') \<in> set xs" "(y, y') \<in> set xs"
by (force dest: map_of_SomeD)+
moreover from * eq x'y' have "x' = y'"
by simp
ultimately show "x = y"
using assms by (force simp: distinct_map dest: inj_onD[of _ _ "(x,x')" "(y,y')"])
qed
lemma inj_on_the: "None \<notin> A \<Longrightarrow> inj_on the A"
by (auto simp: inj_on_def option.the_def split: option.splits)
lemma inj_on_map_of':
assumes "distinct (map snd xs)"
shows "inj_on (the \<circ> map_of xs) (set (map fst xs))"
by (intro comp_inj_on inj_on_map_of assms inj_on_the)
(force simp: eq_commute[of None] map_of_eq_None_iff)
lemma image_map_of:
assumes "distinct (map fst xs)"
shows "map_of xs ` set (map fst xs) = Some ` set (map snd xs)"
using assms by (auto simp: rev_image_eqI)
lemma the_Some_image [simp]: "the ` Some ` A = A"
by (subst image_image) simp
lemma image_map_of':
assumes "distinct (map fst xs)"
shows "(the \<circ> map_of xs) ` set (map fst xs) = set (map snd xs)"
by (simp only: image_comp [symmetric] image_map_of assms the_Some_image)
lemma permutation_of_list_permutes [simp]:
assumes "list_permutes xs A"
shows "permutation_of_list xs permutes A"
(is "?f permutes _")
proof (rule permutes_subset[OF bij_imp_permutes])
from assms show "set (map fst xs) \<subseteq> A"
by (simp add: list_permutes_def)
from assms have "inj_on (the \<circ> map_of xs) (set (map fst xs))" (is ?P)
by (intro inj_on_map_of') (simp_all add: list_permutes_def)
also have "?P \<longleftrightarrow> inj_on ?f (set (map fst xs))"
by (intro inj_on_cong)
(auto simp: permutation_of_list_def map_of_eq_None_iff split: option.splits)
finally have "bij_betw ?f (set (map fst xs)) (?f ` set (map fst xs))"
by (rule inj_on_imp_bij_betw)
also from assms have "?f ` set (map fst xs) = (the \<circ> map_of xs) ` set (map fst xs)"
by (intro image_cong refl)
(auto simp: permutation_of_list_def map_of_eq_None_iff split: option.splits)
also from assms have "\<dots> = set (map fst xs)"
by (subst image_map_of') (simp_all add: list_permutes_def)
finally show "bij_betw ?f (set (map fst xs)) (set (map fst xs))" .
qed (force simp: permutation_of_list_def dest!: map_of_SomeD split: option.splits)+
lemma eval_permutation_of_list [simp]:
"permutation_of_list [] x = x"
"x = x' \<Longrightarrow> permutation_of_list ((x',y)#xs) x = y"
"x \<noteq> x' \<Longrightarrow> permutation_of_list ((x',y')#xs) x = permutation_of_list xs x"
by (simp_all add: permutation_of_list_def)
lemma eval_inverse_permutation_of_list [simp]:
"inverse_permutation_of_list [] x = x"
"x = x' \<Longrightarrow> inverse_permutation_of_list ((y,x')#xs) x = y"
"x \<noteq> x' \<Longrightarrow> inverse_permutation_of_list ((y',x')#xs) x = inverse_permutation_of_list xs x"
by (simp_all add: inverse_permutation_of_list.simps)
lemma permutation_of_list_id: "x \<notin> set (map fst xs) \<Longrightarrow> permutation_of_list xs x = x"
by (induct xs) (auto simp: permutation_of_list_Cons)
lemma permutation_of_list_unique':
"distinct (map fst xs) \<Longrightarrow> (x, y) \<in> set xs \<Longrightarrow> permutation_of_list xs x = y"
by (induct xs) (force simp: permutation_of_list_Cons)+
lemma permutation_of_list_unique:
"list_permutes xs A \<Longrightarrow> (x, y) \<in> set xs \<Longrightarrow> permutation_of_list xs x = y"
by (intro permutation_of_list_unique') (simp_all add: list_permutes_def)
lemma inverse_permutation_of_list_id:
"x \<notin> set (map snd xs) \<Longrightarrow> inverse_permutation_of_list xs x = x"
by (induct xs) auto
lemma inverse_permutation_of_list_unique':
"distinct (map snd xs) \<Longrightarrow> (x, y) \<in> set xs \<Longrightarrow> inverse_permutation_of_list xs y = x"
by (induct xs) (force simp: inverse_permutation_of_list.simps(2))+
lemma inverse_permutation_of_list_unique:
"list_permutes xs A \<Longrightarrow> (x,y) \<in> set xs \<Longrightarrow> inverse_permutation_of_list xs y = x"
by (intro inverse_permutation_of_list_unique') (simp_all add: list_permutes_def)
lemma inverse_permutation_of_list_correct:
fixes A :: "'a set"
assumes "list_permutes xs A"
shows "inverse_permutation_of_list xs = inv (permutation_of_list xs)"
proof (rule ext, rule sym, subst permutes_inv_eq)
from assms show "permutation_of_list xs permutes A"
by simp
show "permutation_of_list xs (inverse_permutation_of_list xs x) = x" for x
proof (cases "x \<in> set (map snd xs)")
case True
then obtain y where "(y, x) \<in> set xs" by auto
with assms show ?thesis
by (simp add: inverse_permutation_of_list_unique permutation_of_list_unique)
next
case False
with assms show ?thesis
by (auto simp: list_permutes_def inverse_permutation_of_list_id permutation_of_list_id)
qed
qed
end
|
{"author": "seL4", "repo": "isabelle", "sha": "e1ab32a3bb41728cd19541063283e37919978a4c", "save_path": "github-repos/isabelle/seL4-isabelle", "path": "github-repos/isabelle/seL4-isabelle/isabelle-e1ab32a3bb41728cd19541063283e37919978a4c/src/HOL/Combinatorics/Permutations.thy"}
|
from __future__ import absolute_import
import functools as ft
import warnings
from logging_helpers import _L
from lxml.etree import QName, Element
import lxml.etree
import networkx as nx
import numpy as np
import pandas as pd
from .core import ureg
from .load import draw, load
from six.moves import zip
__all__ = ['detect_neighbours', 'draw_with_segment_rays',
'write_connections_layer']
DEFAULT_DISTANCE_THRESHOLD = 0.175 * ureg.mm
def detect_neighbours(chip_info,
distance_threshold=DEFAULT_DISTANCE_THRESHOLD):
segments = get_segment_rays(chip_info, magnitude=distance_threshold)
return get_all_intersections(segments)
def draw_with_segment_rays(chip_info,
distance_threshold=DEFAULT_DISTANCE_THRESHOLD,
axis=None):
import matplotlib.pyplot as plt
if axis is None:
fig, axis = plt.subplots(figsize=(50, 50))
result = draw(chip_info, ax=axis)
# result = draw(chip_info)
axis = result['axis']
for p in result['patches'].values():
p.set_alpha(.3)
light_green = '#90cd97'
dark_green = '#059748'
df_intersections = detect_neighbours(chip_info, distance_threshold=.175 *
ureg.mm)
for idx_i, segment_i in df_intersections.iterrows():
axis.arrow(segment_i['x_mid'], segment_i['y_mid'],
segment_i['x_normal'], segment_i['y_normal'],
width=.25,
edgecolor=dark_green, facecolor=light_green)
def get_all_intersections(df_rays):
'''
Parameters
----------
segment_rays : pandas.DataFrame
See return type of :func:`get_segment_rays()`.
'''
intersections = []
for i, ((id_i, vertex_i), segment_i) in enumerate(df_rays.iterrows()):
p = segment_i[['x_mid', 'y_mid']].values
r = segment_i[['x_normal', 'y_normal']].values
df_intersections_i = get_intersections(df_rays, p, r)
# Do not include self electrode in consideration for neighbours.
self_mask = df_intersections_i.index.get_level_values('id') == id_i
df_intersections_i = df_intersections_i.loc[~self_mask]
if df_intersections_i.shape[0]:
intersections.append(((id_i, vertex_i), df_intersections_i))
if not intersections:
return pd.DataFrame()
index, values = list(zip(*intersections))
df_result = pd.concat(values, keys=index)
df_result.index.names = ['id', 'vertex_i',
'id_neighbour', 'vertex_i_neighbour']
return df_result
def get_intersections(df_rays, p, r):
# See: https://stackoverflow.com/a/565282/345236
q = df_rays[['x1', 'y1']].values
s = df_rays[['x2', 'y2']].values - q
r_x_s = np.cross(r, s)
r_x_s[r_x_s == 0] = np.NaN
t = np.cross((q - p), s) / r_x_s
u = np.cross((q - p), r) / r_x_s
df_tu = pd.DataFrame(np.column_stack([t, u]), columns=list('tu'),
index=df_rays.index)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
df_i = df_rays.join(df_tu).loc[(r_x_s != 0)
& (t >= 0) & (t <= 1) & (u >= 0)
& (u <= 1)]
intersect_points = p + df_i.t.values[:, None] * r
return df_i.join(pd.DataFrame(intersect_points, columns=['x_intersect',
'y_intersect'],
index=df_i.index)).drop(['t', 'u'], axis=1)
def _electrode_segment_rays(electrode, magnitude):
'''Compute ray cast "outwards" for each line segment of electrode shape.
Parameters
----------
electrode : dict
See ``electrodes`` item in :func:`dmf_chip.load()`.
magnitude : float
Magnitude of ray vectors (in pixels).
Returns
-------
pandas.DataFrame
Each row corresponds to a ray vector cast from the respective line
segment in the electrode shape, with the following columns::
- ``x1``, ``y1``: start point of line segment
- ``x2``, ``y2``: end point of line segment
- ``x_mid``, ``y_mid``: mid point of line segment
- ``length``: Cartesian length of line segment
- ``x_normal``, ``y_normal``: end point of cast ray
'''
points = np.array(electrode['points'])
if electrode['direction'] == 'counter-clockwise':
points = points[::-1]
# Vector direction/magnitude for each segment (relative to origin).
v = .5 * (points[1:] - points[:-1])
# Mid-point of segment.
x_mid, y_mid = .5 * (points[1:] + points[:-1]).T
length = np.sqrt((v ** 2)).sum(axis=1)
v_scaled = magnitude * (v / length[:, None])
x_normal = -v_scaled[:, 1]
y_normal = v_scaled[:, 0]
x1, y1 = points[:-1].T
x2, y2 = points[1:].T
result = pd.DataFrame(np.column_stack((x1, y1, x2, y2, x_mid, y_mid, length,
x_normal, y_normal)),
columns=['x1', 'y1', 'x2', 'y2', 'x_mid', 'y_mid',
'length', 'x_normal', 'y_normal'])
return result
def get_segment_rays(chip_info, magnitude=DEFAULT_DISTANCE_THRESHOLD):
magnitude_px = (magnitude * chip_info['__metadata__']['ppi'] *
ureg.ppi).to('pixel').magnitude
df_rays = pd.concat([_electrode_segment_rays(e_i, magnitude_px)
for e_i in chip_info['electrodes']],
keys=[e['id'] for e in chip_info['electrodes']])
df_rays.index.names = 'id', 'vertex_i'
return df_rays
def write_connections_layer(chip_file,
distance_threshold=DEFAULT_DISTANCE_THRESHOLD):
chip_info = load(chip_file)
df_intersections = detect_neighbours(chip_info,
distance_threshold=distance_threshold)
doc = lxml.etree.parse(chip_file)
root = doc.getroot()
nsmap = {k: v for k, v in root.nsmap.items() if k}
_xpath = ft.partial(root.xpath, namespaces=nsmap)
device_layer = _xpath('//svg:g[@inkscape:label="Device"]')[0]
connections_layers = _xpath('//svg:g[@inkscape:label="Connections"]')
# Remove existing neighbouring electrode connections layer(s) (if any).
for layer in connections_layers:
root.remove(layer)
# Determine and use first unused layer label number.
layer_ids = set(_xpath('//svg:g[@inkscape:label and @inkscape:groupmode='
'"layer"]/@id'))
i = 1
while True:
layer_id = 'layer%d' % i
if layer_id not in layer_ids:
break
i += 1
connections_layer = Element(QName(nsmap['svg'], 'g'),
attrib={QName(nsmap['inkscape'], 'label'):
'Connections',
QName(nsmap['inkscape'], 'groupmode'):
'layer', 'id': layer_id})
# Construct undirected graph from detected intersections.
edges = df_intersections.reset_index()[['id',
'id_neighbour']].values.tolist()
graph = nx.Graph(edges)
# Create one `<svg:path>` per electrode.
path_elements = []
centers = pd.Series((e['pole_of_accessibility']
for e in chip_info['electrodes']),
index=[e['id'] for e in chip_info['electrodes']])
for a, b, in graph.edges:
a_point, b_point = centers[[a, b]]
path_d = 'M %.2f,%.2f L %.2f,%.2f' % (a_point['x'], a_point['y'],
b_point['x'], b_point['y'])
path_elem = Element(QName(nsmap['svg'], 'path'),
attrib={'id': layer_id,
'style': 'stroke:#000000;stroke-width:0.1',
'd': path_d})
path_elements.append(path_elem)
connections_layer.extend(path_elements)
device_layer.addnext(connections_layer)
return doc
def _get_or_create(parent, name, attrib=None):
'''Get element specified by qualified tag name or create it.
Parameters
----------
parent : lxml.etree element
Parent element.
name : str
Name in form ``"<namespace alias>:<tagname>"``, e.g.,
``"dmf:ChipDesign"``. If :data:`parent` does not contain a child
matching the specified tag name and corresponding attributes, create a
new element.
attrib : dict, optional
Element attributes to match (or set, if creating new element).
Returns
-------
lxml.etree.Element
Matching child element (if available) or created element.
Examples
--------
Get ``<dmf:ChipDesign>`` element or create it if it does not exist:
>>>> from dmf_chip.edit import _get_or_create
>>>>
>>>> # Load xml document define `_xpath` alias...
>>>>
>>>> metadata = _xpath('/svg:svg/svg:metadata')[0]
>>>> chip_design = _get_or_create(metadata, 'dmf:ChipDesign')
'''
docroot = parent.getroottree().getroot()
nsmap = {k: v for k, v in docroot.nsmap.items() if k}
ns, tagname = name.split(':')
qname = QName(nsmap[ns], tagname)
# Short-hand to xpath using namespaces referenced in file.
_xpath = ft.wraps(parent.xpath)(ft.partial(parent.xpath, namespaces=nsmap))
xquery = './%s:%s' % (ns, tagname)
if attrib is not None:
attrib_str = ''.join('[@%s="%s"]' % (k, v) for k, v in attrib.items())
else:
attrib_str = ''
xquery += attrib_str
if not _xpath(xquery):
element = Element(qname, attrib=attrib)
parent.append(element)
_L().info('Add new element: `%s:%s%s`', ns, tagname, attrib_str)
else:
element = _xpath(xquery)[0]
_L().info('found element: `%s:%s%s`', ns, tagname, attrib_str)
return element
def write_test_route(chip_file, tour_ids, id_):
'''Write test route to SVG metadata.
Parameters
----------
chip_file : str
Path to chip design file.
tour_ids : list[str]
Ordered list of electrode ids defining tour waypoints.
id_ : str
Test route id.
Returns
-------
lxml.etree document
In-memory document with test route element added.
'''
doc = lxml.etree.parse(chip_file)
root = doc.getroot()
if 'dmf' not in root.nsmap:
root.nsmap['dmf'] = \
"https://github.com/sci-bots/dmf-chip-spec/releases/tag/v0.1"
NSMAP = {k: v for k, v in root.nsmap.items() if k}
# Short-hand to xpath using namespaces referenced in file.
_xpath = ft.wraps(root.xpath)(ft.partial(root.xpath, namespaces=NSMAP))
metadata = _xpath('/svg:svg/svg:metadata')[0]
chip_design = _get_or_create(metadata, 'dmf:ChipDesign')
test_routes = _get_or_create(chip_design, 'dmf:TestRoutes')
if test_routes.xpath('./dmf:TestRoute[@id="%s"]' % id_, namespaces=NSMAP):
raise NameError('Test route already exists with id: `%s`', id_)
test_route = _get_or_create(test_routes, 'dmf:TestRoute',
attrib={'id': id_, 'version': '0.1.0'})
for id_i in tour_ids:
element_i = Element(QName(NSMAP['dmf'], 'Waypoint'))
element_i.text = str(id_i)
test_route.append(element_i)
_L().info('Added %d waypoints.', len(tour_ids))
return doc
|
{"hexsha": "73d08ab42def173bdc57c91eab3cae03e179a653", "size": 11796, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/dmf_chip/edit.py", "max_stars_repo_name": "sci-bots/dmf-chip", "max_stars_repo_head_hexsha": "6fc192235f792046297fcf0250606c8838bb9257", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/dmf_chip/edit.py", "max_issues_repo_name": "sci-bots/dmf-chip", "max_issues_repo_head_hexsha": "6fc192235f792046297fcf0250606c8838bb9257", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-04-11T18:55:01.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-11T18:55:01.000Z", "max_forks_repo_path": "src/dmf_chip/edit.py", "max_forks_repo_name": "sci-bots/dmf-chip", "max_forks_repo_head_hexsha": "6fc192235f792046297fcf0250606c8838bb9257", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.7476635514, "max_line_length": 81, "alphanum_fraction": 0.5699389624, "include": true, "reason": "import numpy,import networkx", "num_tokens": 2842}
|
import wntr
import collections
import numpy as np
from magnets.utils.call_on_functions import *
def parallel_pipes(relations, wn, new_link_list, junc_dict, pipe_dict, unremovable_nodes, special_nodes, special_links_nodes, special_links, alpha):
connected_nodes = []
num_connections = []
num_junc = wn.num_junctions
junc_names = wn.junction_name_list
link_names = wn.link_name_list
parallel_pipes_list = []
for i in range(num_junc):
connected_nodes.append([])
for a,b in new_link_list:
if (a == junc_names[i]):
connected_nodes[i].append(b)
if (b == junc_names[i]):
connected_nodes[i].append(a)
for i in range(len(connected_nodes)):
has_dup = ([item for item, count in collections.Counter(connected_nodes[i]).items() if count > 1])
if len(has_dup)!= 0:
# if junc_names[i] not in unremovable_nodes:
if junc_names[i] not in special_nodes:
for j in range(len(has_dup)):
# if has_dup[j] not in unremovable_nodes:
if has_dup[j] not in special_nodes:
if ((junc_names[i],has_dup[j]) not in parallel_pipes_list and (has_dup[j],junc_names[i]) not in parallel_pipes_list):
if ((junc_names[i],has_dup[j]) not in special_links_nodes and (has_dup[j],junc_names[i]) not in special_links_nodes):
parallel_pipes_list.append((junc_names[i],has_dup[j]))
parallel_links = []
for j in range(len(parallel_pipes_list)):
parallel_links.append([])
a = parallel_pipes_list[j][0]
b = parallel_pipes_list[j][1]
for i in range(len(new_link_list)):
if (new_link_list[i][0] == a and new_link_list[i][1] == b) or (new_link_list[i][1] == a and new_link_list[i][0] == b):
if link_names[i] not in parallel_links and link_names[i] not in special_links:
parallel_links[j].append(link_names[i])
# update junc_dict and relations to only reflect single pipes connecting two nodes
junc_dict[a]['Connected nodes'] = list(np.unique(np.array((junc_dict[a]['Connected nodes']))))
junc_dict[b]['Connected nodes'] = list(np.unique(np.array((junc_dict[b]['Connected nodes']))))
relations[a] = list(np.unique(np.array((relations[a]))))
relations[b] = list(np.unique(np.array((relations[b]))))
# remove pipes in parallel and replace with single pipe
for k in range(len(parallel_links)):
leng = []
ks = []
for l in range(len(parallel_links[k])):
pipe = wn.get_link(parallel_links[k][l])
leng.append(pipe.length)
ks.append(calc_K(pipe.length, pipe.diameter,pipe.roughness, alpha))
new_l = min(leng)
K_sum = 0
for m in range(len(ks)):
K_sum = K_sum + (1/ks[m])**(1/1.852)
new_K = 1/(K_sum**1.852)
new_d = (alpha*new_l/((100**1.852)*(new_K)))**(1/4.87)
for n in range(len(parallel_links[k])):
wn.remove_link(parallel_links[k][n], force=True)
del pipe_dict[parallel_links[k][n]]
wn.add_pipe('{}'.format(parallel_links[k][0]), start_node_name=parallel_pipes_list[k][0], end_node_name=parallel_pipes_list[k][1],length=new_l, diameter = new_d, roughness=100, minor_loss=0)
pipe_dict[parallel_links[k][0]] = {'Start node name': parallel_pipes_list[k][0], 'End node name':parallel_pipes_list[k][1], 'Length': new_l, 'Diameter':new_d, 'Roughness':100}
new_link_list.append((parallel_pipes_list[k][0],parallel_pipes_list[k][1]))
return wn, junc_dict, pipe_dict, relations, new_link_list
|
{"hexsha": "ce77734ea9a1fa9a4978a1d5532f7b9e2f06a244", "size": 3852, "ext": "py", "lang": "Python", "max_stars_repo_path": "magnets/preprocessing/parallel_pipes.py", "max_stars_repo_name": "meghnathomas/MAGNets", "max_stars_repo_head_hexsha": "5e3011763d235398e7c613753b4b94ccb392f0d5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-01-12T15:20:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-12T18:41:43.000Z", "max_issues_repo_path": "magnets/preprocessing/parallel_pipes.py", "max_issues_repo_name": "meghnathomas/MAGNets", "max_issues_repo_head_hexsha": "5e3011763d235398e7c613753b4b94ccb392f0d5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "magnets/preprocessing/parallel_pipes.py", "max_forks_repo_name": "meghnathomas/MAGNets", "max_forks_repo_head_hexsha": "5e3011763d235398e7c613753b4b94ccb392f0d5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.15, "max_line_length": 198, "alphanum_fraction": 0.6064382139, "include": true, "reason": "import numpy", "num_tokens": 949}
|
import unittest
import numpy as np
import pandas as pd
from pyalink.alink import *
class TestDataFrame(unittest.TestCase):
def setUp(self):
data_null = np.array([
["007", 1, 1, 2.0, True],
[None, 2, 2, None, True],
["12", None, 4, 2.0, False],
["1312", 0, None, 1.2, None],
])
self.df_null = pd.DataFrame({
"f_string": data_null[:, 0],
"f_long": data_null[:, 1],
"f_int": data_null[:, 2],
"f_double": data_null[:, 3],
"f_boolean": data_null[:, 4]
})
data = np.array([
["a", 1, 1, 2.0, True],
["abc", 2, 2, 2.4, True],
["c", 4, 4, 2.0, False],
["a", 0, 1, 1.2, False],
])
self.df = pd.DataFrame({
"f_string": data[:, 0],
"f_long": data[:, 1],
"f_int": data[:, 2],
"f_double": data[:, 3],
"f_boolean": data[:, 4]
})
def test_memory_null(self):
from pyalink.alink.config import g_config
g_config["collect_storage_type"] = "memory"
schema = "f_string string,f_long long,f_int int,f_double double,f_boolean boolean"
op = dataframeToOperator(self.df_null, schema, op_type="batch")
col_names = op.getColNames()
col_types = op.getColTypes()
self.assertEqual(col_names[0], "f_string")
self.assertEqual(col_names[1], "f_long")
self.assertEqual(col_names[2], "f_int")
self.assertEqual(col_names[3], "f_double")
self.assertEqual(col_names[4], "f_boolean")
self.assertEqual(col_types[0], "VARCHAR")
self.assertEqual(col_types[1], "BIGINT")
self.assertEqual(col_types[2], "INT")
self.assertEqual(col_types[3], "DOUBLE")
self.assertEqual(col_types[4], "BOOLEAN")
df2 = op.collectToDataframe()
print(df2)
print(df2.dtypes)
self.assertEqual(df2['f_string'].dtype, pd.StringDtype())
self.assertEqual(df2['f_long'].dtype, pd.Int64Dtype())
self.assertEqual(df2['f_int'].dtype, pd.Int32Dtype())
self.assertEqual(df2['f_double'].dtype, np.float64)
self.assertEqual(df2['f_boolean'].dtype, pd.BooleanDtype())
def test_memory(self):
from pyalink.alink.config import g_config
g_config["collect_storage_type"] = "memory"
schema = "f_string string,f_long long,f_int int,f_double double,f_boolean boolean"
op = dataframeToOperator(self.df, schemaStr=schema, op_type="batch")
col_names = op.getColNames()
col_types = op.getColTypes()
self.assertEqual(col_names[0], "f_string")
self.assertEqual(col_names[1], "f_long")
self.assertEqual(col_names[2], "f_int")
self.assertEqual(col_names[3], "f_double")
self.assertEqual(col_names[4], "f_boolean")
self.assertEqual(col_types[0], "VARCHAR")
self.assertEqual(col_types[1], "BIGINT")
self.assertEqual(col_types[2], "INT")
self.assertEqual(col_types[3], "DOUBLE")
self.assertEqual(col_types[4], "BOOLEAN")
df2 = op.collectToDataframe()
print(df2)
print(df2.dtypes)
self.assertEqual(df2['f_string'].dtype, pd.StringDtype())
self.assertEqual(df2['f_long'].dtype, pd.Int64Dtype())
self.assertEqual(df2['f_int'].dtype, pd.Int32Dtype())
self.assertEqual(df2['f_double'].dtype, np.float64)
self.assertEqual(df2['f_boolean'].dtype, pd.BooleanDtype())
def test_string_not_converted_to_double(self):
data = np.array([
["007"],
["012"],
])
source = dataframeToOperator(pd.DataFrame.from_records(data), schemaStr="str string", op_type="batch")
df = source.collectToDataframe()
print(df)
self.assertEqual(df['str'].iloc[0], "007")
self.assertEqual(df['str'].iloc[1], "012")
def test_df_to_op_speed(self):
import time
start_time = time.time()
m = {0: True, 1: False, 2: None}
users = []
for col in range(10000):
r = col % 3
users.append([col, "1\"" + str(col) + "\"1", m.get(r)])
df = pd.DataFrame(users)
source = BatchOperator.fromDataframe(df, schemaStr='id int, label string, b boolean')
source.firstN(10).print()
end_time = time.time()
elapsed_time = end_time - start_time
print(elapsed_time)
self.assertTrue(elapsed_time < 10)
def test_op_to_df_speed(self):
import time
start_time = time.time()
m = {0: True, 1: False, 2: None}
users = []
for col in range(50000):
r = col % 3
users.append([col, "1\"" + str(col) + "\"1", m.get(r)])
df = pd.DataFrame(users)
source = BatchOperator.fromDataframe(df, schemaStr='id int, label string, b boolean')
output = source.collectToDataframe()
print(output)
print(output.dtypes)
print(type(output["b"][1]))
end_time = time.time()
elapsed_time = end_time - start_time
self.assertTrue(elapsed_time < 10)
def test_date_format(self):
import datetime
data = pd.DataFrame([
[0, datetime.datetime.fromisoformat('2021-11-01 00:00:00'), 100.0],
[0, datetime.datetime.fromisoformat('2021-11-02 00:00:00'), 100.0],
[0, datetime.datetime.fromisoformat('2021-11-03 00:00:00'), 100.0],
[0, datetime.datetime.fromisoformat('2021-11-04 00:00:00'), 100.0],
[0, datetime.datetime.fromisoformat('2021-11-05 00:00:00'), 100.0]
])
source = dataframeToOperator(data, schemaStr='id int, ts timestamp, val double', op_type='batch')
df = source.collectToDataframe()
self.assertFalse(df.iloc[1]['ts'] is pd.NaT)
|
{"hexsha": "59cebd7dc181e71913e5fc99b75628a2235befb6", "size": 5893, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/src/main/python/pyalink/alink/tests/common/types/conversion/test_dataframe_to_operator.py", "max_stars_repo_name": "wenwei8268/Alink", "max_stars_repo_head_hexsha": "c00702538c95a32403985ebd344eb6aeb81749a7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/src/main/python/pyalink/alink/tests/common/types/conversion/test_dataframe_to_operator.py", "max_issues_repo_name": "wenwei8268/Alink", "max_issues_repo_head_hexsha": "c00702538c95a32403985ebd344eb6aeb81749a7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/src/main/python/pyalink/alink/tests/common/types/conversion/test_dataframe_to_operator.py", "max_forks_repo_name": "wenwei8268/Alink", "max_forks_repo_head_hexsha": "c00702538c95a32403985ebd344eb6aeb81749a7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.5, "max_line_length": 110, "alphanum_fraction": 0.5779738673, "include": true, "reason": "import numpy", "num_tokens": 1528}
|
## top-level script to manipulate and analyze empirical/simulated CMS output
## last updated 09.07.2017 vitti@broadinstitute.org #should handle basedir vs writedir
import matplotlib as mp
mp.use('agg')
import matplotlib.pyplot as plt
from power.power_func import merge_windows, get_window, check_outliers, check_rep_windows, calc_pr, get_pval, plotManhattan, \
plotManhattan_extended, quick_plot, get_causal_rank, get_cdf_from_causal_ranks, plot_dist
from power.parse_func import get_neut_repfile_name, get_sel_repfile_name, get_emp_cms_file, read_cms_repfile, \
read_pr, read_vals_lastcol, get_pr_filesnames, load_regions, load_power_dict
from tempfile import TemporaryFile
from xlwt import Workbook, easyxf #add to cms-venv (?)
from pybedtools import BedTool
import numpy as np
import argparse
import sys
import os
####################
## DEFINE PARSER ###
####################
def full_parser_power():
parser=argparse.ArgumentParser(description="This script contains command-line utilities for calculating CMS 2.0 power from simulated data and significance for CMS scores from empirical data.")
subparsers = parser.add_subparsers(help="sub-commands")
#######################
## VISUALIZE OUTPUT ###
#######################
regionviz_parser = subparsers.add_parser('regionviz', help="visualize component and combined scores across a region of simulated or empirical data")
regionviz_parser.add_argument('--cmsInfile', action='store', type=str, help="input .cms file to visualize")
regionviz_parser.add_argument('--hilitePos', action='store', type=int, help="hilite one SNP (e.g. if causal variant known from simulated data)")
distviz_parser = subparsers.add_parser('distviz', help="visualize distribution of CMS component or composite scores for simulated or empirical data")
distviz_parser.add_argument('--takeIndex', action='store', type=int, help="zero-based index of datacolumn to aggregate", default=-1)
distviz_parser.add_argument('--infile_singular', action='store', type=str, help="visualize distribution from this singular .cms file")
distviz_parser.add_argument('--infile_list', action='store', type=str, help="pass a file with a list of input files to view distributions pooled across multiples chromosomes, multiple replicates, etc.")
distviz_parser.add_argument('--takeLog', action='store_true',)
#####################
## QUANTIFY POWER ###
#####################
if True:
cdf_parser = subparsers.add_parser('cdf', help = 'plot cumulative density function of causal rank')
fpr_parser = subparsers.add_parser('fpr', help='calculate false positive rate for CMS_gw based on neutral simulations')
tpr_parser = subparsers.add_parser('tpr', help='calculate false positive rate for CMS_gw based on simulations with selection')
roc_parser = subparsers.add_parser('roc', help="calculate receiving operator characteristic curve given false and true positive rates")
#roc_parser.add_argument('--maxFPR', type=float, action="store", default=.001)
cdf_parser.add_argument('--selPos', type=int, action='store', default=750000, help="position of the causal allele in simulates")
find_cutoff_parser = subparsers.add_parser('find_cutoff', help="get best TPR for a given FPR and return threshhold cutoffs for region detection")
find_cutoff_parser.add_argument('--maxFPR', type=float, action="store", default=".05")
#find_cutoff_parser.add_argument('fprloc', type=str, action="store", help="specific to model/pop")
#find_cutoff_parser.add_argument('tprloc', type=str, action="store", help="specific to model/pop")
fpr_parser.add_argument('--score', type=str, default="cms_normed", action="store", help="use this score to call regions")
tpr_parser.add_argument('--score', type=str, default="cms_normed", action="store", help="use this score to call regions")
#############################
## EMPIRICAL SIGNIFICANCE ###
#############################
if True:
gw_regions_parser = subparsers.add_parser('gw_regions', help="pull designated significant regions from genome-wide normalized results")
gw_regions_parser.add_argument('--geneFile', help="input file containing bounds ")
regionlog_parser = subparsers.add_parser('regionlog', help='write regions to excel sheet with gene overlap')
regionlog_parser.add_argument('input_filelist', help="list with paths for all (per-pop) region files to be logged", type = str, action='store')
regionlog_parser.add_argument('gene_bedfile', help="name of file with information on boundaries of known genes", type = str, action='store')
regionlog_parser.add_argument('--save_filename', help="filename of region log to write (.xls or .txt)", type=str, action='store', default='test.xls')
extended_manhattan_parser = subparsers.add_parser('extended_manhattan', help = "generate per-chrom plots as one fig")
extended_manhattan_parser.add_argument('--plotscore', help="string label for score to plot: {seldaf, ihs_normed, delihh_normed, nsl_normed, xpehh_normed, fst, deldaf, cms_unnormed, cms_normed}", type=str, default="cms_normed")
extended_manhattan_parser.add_argument('--regionsfile', help="optional; input file of regions designated as above threshhold")
extended_manhattan_parser.add_argument('--percentile', help="percentile to hilite")
extended_manhattan_parser.add_argument('--titlestring', help="title for plot")
extended_manhattan_parser.add_argument('--dpi', help="resolution for matplotlib", type=int, default=100)
##################
## SHARED ARGS ###
##################
for write_parser in [fpr_parser, tpr_parser, roc_parser, cdf_parser, gw_regions_parser, extended_manhattan_parser, find_cutoff_parser]:
write_parser.add_argument('--writedir', type =str, help='where to write output', default = "/idi/sabeti-scratch/jvitti/")
write_parser.add_argument('--checkOverwrite', action="store_true", default=False)
for model_parser in [fpr_parser, cdf_parser, tpr_parser, roc_parser, find_cutoff_parser]:
model_parser.add_argument('--model', type=str, default="nulldefault")
for sim_parser in [fpr_parser, tpr_parser, cdf_parser]:
sim_parser.add_argument('--simpop', action='store', help='simulated population', default=1)
sim_parser.add_argument('--nrep', type=int, default=1000)
for emp_parser in [extended_manhattan_parser, gw_regions_parser]:
emp_parser.add_argument('--emppop', action='store', help='empirical population', default="YRI")
for regions_parser in [fpr_parser, gw_regions_parser, tpr_parser]:
regions_parser.add_argument('regionlen', type = int, action='store', help='length of region to query', default="100000")
regions_parser.add_argument('thresshold', type = float, action='store', help='percentage of region to exceed cutoff', default="30")
regions_parser.add_argument('cutoff', type = float, action='store', help='minimum significant value for region definition', default="3.0")
regions_parser.add_argument('--saveLog', type =str, help="save results as text file", )
for suffixed_parser in [fpr_parser, tpr_parser, roc_parser, cdf_parser, extended_manhattan_parser, gw_regions_parser, find_cutoff_parser]:
suffixed_parser.add_argument('--suffix', type= str, action='store', default='', help='point to files saved with suffix to index a particular run (if included)')
for plot_parser in [regionviz_parser, distviz_parser, extended_manhattan_parser, cdf_parser, roc_parser]:
plot_parser.add_argument('--savefilename', action='store', help='path of image file to save', default="test.png")
return parser
#################################
## DEFINE EXECUTIVE FUNCTIONS ###
#################################
######## Visualize composite score
######## output for a given CMS run.
def execute_regionviz(args):
''' visualize component and composite scores for a region '''
savefilename = args.savefilename
cmsfilename = args.cmsInfile
if os.path.isfile(cmsfilename):
print('loading from... ' + cmsfilename)
physpos, genpos, daf, ihs_normed, delihh_normed, nsl_normed, xpehh_normed, fst, deldaf, cms_unnormed, cms_normed = read_cms_repfile(cmsfilename) #need to make this flexible to regional input vs gw. (vs. likes)
causal_index = -1
if args.hilitePos is not None:
if args.hilitePos in physpos:
causal_index = physpos.index(args.hilitePos)
f, (ax1, ax2, ax3, ax4, ax5, ax6, ax7) = plt.subplots(7, sharex = True)
quick_plot(ax1, physpos, ihs_normed, "ihs_normed", causal_index)
quick_plot(ax2, physpos, delihh_normed, "delihh_normed", causal_index)
quick_plot(ax3, physpos, nsl_normed, "nsl_normed", causal_index)
quick_plot(ax4, physpos, xpehh_normed, "xpehh_normed", causal_index)
quick_plot(ax5, physpos, fst, "fst", causal_index)
quick_plot(ax6, physpos, deldaf, "deldaf", causal_index)
quick_plot(ax7, physpos, cms_unnormed, "cms", causal_index)
plt.savefig(savefilename)
print("plotted to " + savefilename)
plt.close()
return
def execute_distviz(args):
''' visualize the distribution of a component/composite statistic in empirical/simulated data '''
allfiles = []
if args.infile_list is not None:
infile = open(args.infile_list)
for line in infile:
filename = line.strip('\n')
assert(os.path.isfile(filename))
allfiles.append(filename)
infile.close()
if args.infile_singular is not None:
if args.infile_singular not in allfiles:
allfiles.append(args.infile_singular)
if len(allfiles) == 0:
print('must supply input .cms files')
sys.exit(0)
print('loading cms values from ' + str(len(allfiles)) + " files...")
#pass index, expectedlen?
savefilename = args.savefilename
takeIndex = args.takeIndex
allvals = []
for infilename in allfiles:
infile = open(infilename, 'r')
infile.readline() #strip
for line in infile:
entries = line.split()
if len(entries) > takeIndex:
if not np.isnan(float(entries[takeIndex])):
allvals.append(float(entries[takeIndex])) #SOME EQUIVOCATION HERE #np.log(float(entries[takeIndex])))
else:
print('check input datafile and argument takeIndex')
infile.close()
if args.takeLog:
allvals = [np.log(item) for item in allvals]
plot_dist(allvals, savefilename)
return
def execute_extended_manhattan(args):
""" generate a genome-wide plot of CMS scores with option to hilight outlier regions """
plotscore = args.plotscore
selpop = args.emppop
basedir = args.writedir
suffix = args.suffix
savename = args.savefilename
dpi = args.dpi
numChr = 22
titlestring = args.titlestring
modelpops = {'YRI':1, 'GWD':1, 'LWK':1, 'MSL':1, 'ESN':1,
'CEU':2, 'FIN':2, 'IBS':2, 'TSI':2, 'GBR':2, 'IRN':2,
'CHB':3, 'JPT':3, 'KHV':3, 'CDX':3, 'CHS':3,
'BEB':4, 'STU':4, 'ITU':4, 'PJL':4, 'GIH':4}
pop = modelpops[selpop]
#colorDict = {1:'#FFB933', 2:'#0EBFF0', 3:'#ADCD00', 4:'#8B08B0'} #1000 Genomes group color scheme
colorDict = {1:'#cec627', 2:'#0EBFF0', 3:'#65ff00', 4:'#8B08B0'} #make it pop-!
f, axarr = plt.subplots(numChr, 1, sharex = True, sharey=True, dpi=dpi, figsize=(7, 10))
plt.suptitle(titlestring, fontsize=10)
plt.xlabel('position')
plt.ylabel('cms_gw normed score')
all_emp_pos, all_emp_scores = [], []
for chrom in range(1,numChr +1):
emp_cms_filename = get_emp_cms_file(selpop, chrom, normed=True, suffix=suffix, basedir=basedir)
print('loading chr ' + str(chrom) + ": " + emp_cms_filename)
if not os.path.isfile(emp_cms_filename):
print("missing: " + emp_cms_filename)
break
physpos, genpos, seldaf, ihs_normed, delihh_normed, nsl_normed, xpehh_normed, fst, deldaf, cms_unnormed, cms_normed = read_cms_repfile(emp_cms_filename)
iax = chrom-1
ax = axarr[iax]
#ax.grid()
plot_data = eval(plotscore)
plotManhattan_extended(ax, plot_data, physpos, chrom)
all_emp_pos.append(physpos)
all_emp_scores.append(plot_data)
################################
## HILITE SIGNIFICANT REGIONS ##
################################
if args.regionsfile is not None:
regionchrs, regionstarts, regionends = load_regions(args.regionsfile)
print('loaded ' + str(len(regionchrs)) + ' significant regions from ' + args.regionsfile)
for iregion in range(len(regionchrs)):
regionchr, regionstart, regionend = regionchrs[iregion], regionstarts[iregion], regionends[iregion]
this_chrom = int(regionchr.strip('chr'))
ichrom = this_chrom-1
chrompos, chromscores = all_emp_pos[ichrom], all_emp_scores[ichrom]
zipped = zip(chrompos, chromscores)
plotpos, plotvals = [], []
for locus in zipped:
if locus[0] >= regionstart:
plotpos.append(locus[0])
plotvals.append(locus[1])
if locus[0] > regionend:
break
axarr[ichrom].plot(plotpos, plotvals, color=colorDict[pop], markersize=1)
if args.percentile is not None:
percentile = float(args.percentile)
print('plotting data with heuristic cutoff for ' + str(percentile) + " percentile...")
flat_emp_scores = [item for sublist in all_emp_scores for item in sublist if not np.isnan(item)]
score_cutoff = float(np.percentile(flat_emp_scores, percentile))
print("score cutoff: " + str(score_cutoff))
for chrom in range(1,numChr +1):
iax = chrom-1
ax = axarr[iax]
maximumVal = ax.get_xlim()[1]
xpoints = np.array([0, maximumVal])
ypoints = np.array([score_cutoff, score_cutoff])
ax.plot(xpoints, ypoints ,linestyle = "dotted", color="red", markersize=.3)
#get empirical scores and positions for pass threshhold and plot them as above with color
these_scores, these_pos = all_emp_scores[iax], all_emp_pos[iax]
zipped = zip(these_scores, these_pos)
significant = [item for item in zipped if item[0] >= score_cutoff]
signif_vals = [item[0] for item in significant]
signif_pos = [item[1] for item in significant]
ax.plot(signif_pos, signif_vals, color=colorDict[pop], linestyle='None', marker=".", markersize=.3)#, markersize=1)
plt.savefig(savename)
print('saved to: ' + savename)
return
######## Quantify and visualize power
######## across significance cutoffs.
def execute_cdf(args):
""" visualize power to localize variants: estimate p(causal variant captured | signif thresshold includes x top SNPs) from simulates. plot as cumulative density function"""
reps = args.nrep
savefilename = args.savefilename
writedir = args.writedir
scenars = ['0.70', '0.80', '0.90']#'0.10', '0.20', '0.30', '0.40', '0.50', '0.60', '0.70', '0.80', '0.90']
model = args.model
causalPos = args.selPos
suffix = args.suffix
#causal_ranks_all = []
causal_ranks_1, causal_ranks_2, causal_ranks_3, causal_ranks_4 = [], [], [], []
for pop in [1, 2, 3, 4]:
for scenar in scenars:
for irep in range(1, reps+1):
cmsfilename = get_sel_repfile_name(model, irep, pop, scenar, normed = False, basedir=writedir, suffix=suffix)
if os.path.isfile(cmsfilename):
physpos, genpos, seldaf, ihs_normed, delihh_normed, nsl_normed, xpehh_normed, fst, deldaf, cms_unnormed, cms_normed = read_cms_repfile(cmsfilename)
if causalPos in physpos:
causal_index = physpos.index(causalPos)
causal_unnormed = cms_unnormed[causal_index]
causal_rank = get_causal_rank(cms_unnormed, causal_unnormed)
#print(cmsfilename)
#print('causal rank: ' + str(causal_rank))
#causal_ranks.append(causal_rank)
this_array = eval('causal_ranks_' + str(pop))
if not np.isnan(causal_rank):
this_array.append(causal_rank)
else:
print("missing; " + cmsfilename)
print("for pop 1, loaded " + str(len(causal_ranks_1)) + " replicates.")
print("for pop 2, loaded " + str(len(causal_ranks_2)) + " replicates.")
print("for pop 3, loaded " + str(len(causal_ranks_3)) + " replicates.")
print("for pop 4, loaded " + str(len(causal_ranks_4)) + " replicates.")
cdf_fig, cdf_ax = plt.subplots()
if len(causal_ranks_1) > 0:
cdf_bins1, cdf1 = get_cdf_from_causal_ranks(causal_ranks_1)
cdf_ax.plot(cdf_bins1[1:], cdf1, color="yellow")
if len(causal_ranks_2) > 0:
cdf_bins2, cdf2 = get_cdf_from_causal_ranks(causal_ranks_2)
cdf_ax.plot(cdf_bins2[1:], cdf2, color="blue")
if len(causal_ranks_3) > 0:
cdf_bins3, cdf3 = get_cdf_from_causal_ranks(causal_ranks_3)
cdf_ax.plot(cdf_bins3[1:], cdf3, color="green")
if len(causal_ranks_4) > 0:
cdf_bins4, cdf4 = get_cdf_from_causal_ranks(causal_ranks_4)
cdf_ax.plot(cdf_bins4[1:], cdf4, color="purple")
cdf_ax.set_xlim([0, 50])
plt.title(model) #+ ", " + str(len(causal_ranks)) + " selection replicates")
plt.ylabel('probability that the causal variant is captured')
plt.xlabel('significance thresshold (i.e., examining the top x variants)')
plt.savefig(savefilename)
plt.close()
print('plotted to ' + savefilename)
return
def execute_fpr(args):
''' estimate false positive rate for region identification '''
model = args.model
regionlen = args.regionlen
thresshold = args.thresshold
cutoff = args.cutoff
numReps = args.nrep
pop = args.simpop
suffix = args.suffix
writedir = args.writedir
takeScore = args.score
all_scores = []
all_percentages = []
if True:
for irep in range(1, numReps + 1):
repfilename = get_neut_repfile_name(model, irep, pop, normed=True, suffix=suffix, basedir=writedir)
if (irep==1):
print(repfilename)
physpos, genpos, seldaf, ihs_normed, delihh_normed, nsl_normed, xpehh_normed, fst, deldaf, cms_unnormed, cms_normed = read_cms_repfile(repfilename)
#physpos, genpos, ihs_normed, delihh_normed, nsl_normed, xpehh_normed, fst, deldaf, cms_unnormed, cms_normed = read_cms_repfile(repfilename)
these_scores = eval(takeScore)
if len(these_scores) > 0:
all_scores.append(these_scores)
rep_percentages = check_rep_windows(physpos, these_scores, regionlen, cutoff = cutoff)
all_percentages.append(rep_percentages)
#FOR DEBUG
#print(str(rep_percentages) + "\t" + repfilename)
if len(rep_percentages) > 0:
if max(rep_percentages) > thresshold:
print("false positive: " + repfilename)
print('loaded ' + str(len(all_scores)) + " replicates populations for model " + model + "...")
fpr = calc_pr(all_percentages, thresshold)
print('false positive rate: ' + str(fpr) + "\n")
if args.saveLog is not None:
writefilename = args.saveLog
writefile = open(writefilename, 'w')
writefile.write(str(fpr)+'\n')
writefile.write(model + "\t" + str(regionlen) + "\t" + str(thresshold) + '\t' + str(cutoff) + '\n')
writefile.close()
print('wrote to : ' + str(writefilename))
return
def execute_tpr(args):
''' estimate true positive rate for region detection '''
model = args.model
regionlen = args.regionlen
thresshold = args.thresshold
cutoff = args.cutoff
numReps = args.nrep
pop = args.simpop
suffix = args.suffix
writedir = args.writedir
takeScore = args.score
all_scores = []
all_percentages = []
#if args.saveLog is not None:
# writefilename = args.saveLog
# if os.path.isfile(writefilename):
# print(writefilename + " already exists; aborting.")
# sys.exit(0)
#per seldaf
dafbins = [['0.10', '0.20', '0.30', '0.40', '0.50', '0.60', '0.70', '0.80', '0.90'], ['0.10', '0.20', '0.30'], ['0.40', '0.50', '0.60'], ['0.70', '0.80', '0.90'], ['0.90']]
daflabels = ['all', 'lo', 'mid', 'hi','highest']
for ibin in [3]:#[1, 2, 3, 4]:#range(1):
thesebins, thislabel = dafbins[ibin], daflabels[ibin]
allrepfilenames = []
for selbin in thesebins:
for irep in range(1, numReps + 1):
repfilename = get_sel_repfile_name(model, irep, pop, selbin, normed=True, suffix=suffix, basedir=writedir)
if (irep==1):
print(repfilename)
if os.path.isfile(repfilename):
allrepfilenames.append(repfilename)
print('loaded ' + str(len(allrepfilenames)) + " replicates...")
#numToTake = min(500, len(allrepfilenames))
#chosen = np.random.choice(allrepfilenames, numToTake, replace=False) #take random sample
chosen = allrepfilenames #this was just to expedite, no?
for repfilename in chosen:
physpos, genpos, seldaf, ihs_normed, delihh_normed, nsl_normed, xpehh_normed, fst, deldaf, cms_unnormed, cms_normed = read_cms_repfile(repfilename)
#physpos, genpos, ihs_normed, delihh_normed, nsl_normed, xpehh_normed, fst, deldaf, cms_unnormed, cms_normed = read_cms_repfile(repfilename)
these_scores = eval(takeScore)
if len(these_scores) > 0:
all_scores.append(these_scores)
rep_percentages = check_rep_windows(physpos, these_scores, regionlen, cutoff = cutoff)
all_percentages.append(rep_percentages)
print('loaded ' + str(len(all_scores)) + " replicates populations for model " + model + "...")
tpr = calc_pr(all_percentages, thresshold)
print('true positive rate: ' + str(tpr) + "\n")
if args.saveLog is not None:
writefilename = args.saveLog +"_" + thislabel
writefile = open(writefilename, 'w')
writefile.write(str(tpr)+'\n')
writefile.write(model + "\t" + str(regionlen) + "\t" + str(thresshold) + '\t' + str(cutoff) + '\n')
writefile.close()
print('wrote to : ' + str(writefilename))
return
def execute_roc(args):
''' plot receiver operating characteristic curve -- false positive rate vs. true positive rate '''
writedir = args.writedir
likes_dir_suffix = args.suffix #e.g. _maf20
model = args.model
modeldir = writedir + model + "/"
#make selFreq toggleable? pass to get_pr_filenames
savefilename = args.savefilename
allfpr, alltpr = load_power_dict(modeldir, likes_dir_suffix)
fpr_keys = allfpr.keys()
tpr_keys = alltpr.keys()
regionlens = list(set([item[0] for item in fpr_keys]))
thressholds =list(set([item[1] for item in fpr_keys]))
cutoffs = list(set([item[2] for item in fpr_keys]))
freq_class = "hi"
###############
## PLOT DATA ##
###############
fig, ax = plt.subplots(1)
colorDict = {'ave':'black', 1:'goldenrod', 2:'blue', 3:'green', 4:'purple'}
for plot_set in [1, 2, 3, 4, 'ave']:
plotfpr, plottpr = [], []
for regionlen in regionlens:
for percentage in thressholds:
for cutoff in cutoffs:
this_key = (regionlen, percentage, cutoff, plot_set, freq_class) #make this toggleable - might want to print per-pop #(regionlen, percentage, cutoff, pop, freq_class)
if this_key in fpr_keys and this_key in tpr_keys:
plotfpr.append(allfpr[this_key])
plottpr.append(alltpr[this_key])
else:
#print(this_key) #missing datapoint
pass
if (len(plotfpr)) > 0:
plotfpr, plottpr = zip(*sorted(zip(plotfpr, plottpr)))
ax.scatter(plotfpr, plottpr, label=str(plot_set), color=colorDict[plot_set], s=.5)
plt.suptitle('ROC for ' + model + " " + likes_dir_suffix)
ax.set_xlabel('FPR')
ax.set_ylabel('TPR')
ax.set_xlim([-.1,1])
ax.set_ylim([0,1])
plt.legend()
plt.savefig(savefilename)
plt.close()
print("plotted to " + savefilename)
return
def execute_find_cutoff(args): #MUST ADD TRACK OF SUFFIX
''' given FPR and TPR calculations, select an optimal significance cutoff subject to a specified criterion '''
writedir = args.writedir
likes_dir_suffix = args.suffix #e.g. _maf20
model = args.model
modeldir = writedir + model + "/"
maxFPR = args.maxFPR
#############################
## CHOOSE OPT MEETING CRIT ##
#############################
all_fpr, all_tpr = load_power_dict(modeldir,likes_dir_suffix)
for pop in [1, 2, 3, 4, "ave"]:
best_tpr, best_fpr = 0, 0
best_cutoff = 0
print("Now finding optimal with a maximum FPR of " + str(maxFPR) + " for pop " + str(pop) + " using demographic model: " + model)
fpr_keys = all_fpr.keys()
tpr_keys = all_tpr.keys()
thesekeys_fpr = [key for key in fpr_keys if pop in key]
thesekeys_tpr = [key for key in tpr_keys if pop in key]
for key in thesekeys_fpr:
if all_fpr[key] <= maxFPR:
#tprkey = (key[0], key[1], key[2], freq_class)
tprkey = key
if tprkey in tpr_keys:
tpr = all_tpr[tprkey]
if tpr > best_tpr:
best_tpr = tpr
best_cutoff = tprkey
best_fpr = all_fpr[key]
print(best_cutoff)
print("FPR: " + str(best_fpr))
print("TPR: " + str(best_tpr) + "\n")
return
######## Apply significance cutoffs
######## to empirical results.
def execute_gw_regions(args):
''' apply significance cutoff to genome-wide data to identify regions '''
basedir = args.writedir
pop = args.emppop
thresshold = args.thresshold
cutoff = args.cutoff
windowlen = args.regionlen
suffix = args.suffix
chroms = range(1,23)
signif_windows = []
####################
## LOOP OVER CHRS ##
####################
for chrom in chroms:
chrom_signif = []
normedempfilename = get_emp_cms_file(pop, chrom, normed=True, suffix=suffix, basedir=basedir)
if not os.path.isfile(normedempfilename):
print("missing: " + normedempfilename)
else:
physpos, genpos, seldaf, ihs_normed, delihh_normed, nsl_normed, xpehh_normed, fst, deldaf, cms_unnormed, cms_normed = read_cms_repfile(normedempfilename)
for iPos in range(len(physpos)):
##################
## CHECK REGION ##
##################
window_scores = get_window(iPos, physpos, cms_normed, windowlen)
percentage = check_outliers(window_scores, cutoff)
if percentage > thresshold:
chrom_signif.append(physpos[iPos])
signif_windows.append(chrom_signif)
##############################
## MERGE CONTIGUOUS WINDOWS ##
##############################
final_starts = []
final_ends = []
print('merging regions')
for chrom_signif in signif_windows:
starts, ends = merge_windows(chrom_signif, windowlen)
final_starts.append(starts)
final_ends.append(ends)
###################
## WRITE TO FILE ##
###################
if args.saveLog is not None:
writefilename = args.saveLog
writefile = open(writefilename, 'w')
for ichrom in range(len(final_starts)):
chromnum = ichrom + 1
starts = final_starts[ichrom]
ends = final_ends[ichrom]
for iregion in range(len(starts)-1):
writeline = "chr" + str(chromnum) + "\t" + str(starts[iregion]) + "\t" + str(ends[iregion]) + '\n'
writefile.write(writeline)
writefile.close()
print('wrote to ' + writefilename)
return
def execute_regionlog(args):
input_filelist = args.input_filelist
genefilename = args.gene_bedfile
savefilename = args.save_filename
if ".xls" in savefilename:
writeExcel = True
else:
writeExcel = False
##################
## LOAD REGIONS ##
##################
regionfiles = []
takepops = []
infile = open(input_filelist, 'r')
for line in infile:
regionfilename = line.strip('\n')
filename = line.split('/')[-1]
this_pop = filename.split('_')[0]
if os.path.isfile(regionfilename):
regionfiles.append(regionfilename)
takepops.append(this_pop)
if len(regionfiles) == 0:
print("found no regions")
return
else:
totalselregions = 0
print('loaded regions from ' + str(len(regionfiles)) + " files...")
header = ['chrom', 'start', 'end', 'len (kb)', 'pop', 'genes',]
####################
## PREPARE OUTPUT ##
####################
if writeExcel:
boldstyle = easyxf('font: bold 1;')
wrapstyle = easyxf('alignment: wrap on, vert center, horiz center')
book = Workbook()
sheet1 = book.add_sheet('gw significant regions')
colWidths = [10, 15, 15, 10, 25, 10]
for icol in range(len(colWidths)):
sheet1.col(icol).width = colWidths[icol] * 256 #~x char wide
for icol in range(len(header)):
sheet1.write(0, icol, header[icol], boldstyle)
else:
writefile = open(savefilename, 'w')
writestring = ""
for icol in range(len(header)):
writestring += header[icol] + "\t"
writestring = writestring.strip('\t')
writefile.write(writestring + "\n")
####################################
## CHECK REGIONS FOR GENE OVERLAP ##
###################################
irow = -1 #0
for iregionfilename in range(len(regionfiles)):
regionfilename = regionfiles[iregionfilename]
pop = takepops[iregionfilename]
genes = BedTool(genefilename)
regions = BedTool(regionfilename)
intersect = regions.intersect(genes, wa = True, wb = True)
#narrow down
geneDict = {}
for item in intersect:
selregion_chr = item[0]
selregion_start, selregion_end = item[1], item[2]
key = (selregion_chr, selregion_start, selregion_end)
generegion_id = item[6]
if key not in geneDict.keys():
geneDict[key] = [generegion_id]
else:
geneDict[key].append(generegion_id)
for region in regions:
totalselregions +=1
irow +=1
chrom, start, end = region[0], region[1], region[2]
key = (chrom, start, end)
regionlen = int(end) - int(start)
kb_regionlen=round(regionlen/1000)
if key in geneDict.keys():
genelist = geneDict[key]
genes = set(genelist)
genestring = ""
for gene in genes:
genestring += gene + ", "
genestring = genestring[:-2]
if writeExcel:
sheet1.write(irow+1, 0, chrom, wrapstyle)
sheet1.write(irow+1, 1, int(start), wrapstyle)
sheet1.write(irow+1, 2, int(end), wrapstyle)
sheet1.write(irow+1, 3, kb_regionlen, wrapstyle)
sheet1.write(irow+1, 4, pop, wrapstyle)
sheet1.write(irow+1, 5, genestring, wrapstyle)
else:
writestring = str(chrom) + "\t" + str(start) + "\t" + str(end) + "\t" + str(kb_regionlen) + "\t" + pop + "\t" + genestring + "\n"
writefile.write(writestring)
else:
if writeExcel:
sheet1.write(irow+1, 0, chrom, wrapstyle)
sheet1.write(irow+1, 1, int(start), wrapstyle)
sheet1.write(irow+1, 2, int(end), wrapstyle)
sheet1.write(irow+1, 3, kb_regionlen,wrapstyle)
sheet1.write(irow+1, 4, pop,wrapstyle)
else:
writestring = str(chrom) + "\t" + str(start) + "\t" + str(end) + "\t" + str(kb_regionlen) + "\t" + pop + "\t" + "-" + "\n"
writefile.write(writestring)
if writeExcel:
book.save(savefilename)
book.save(TemporaryFile())
else:
writefile.close()
print('wrote ' + str(totalselregions) + ' significant regions to: ' + savefilename)
return
##########
## MAIN ##
##########
if __name__ == '__main__':
runparser = full_parser_power()
args = runparser.parse_args()
# if called with no arguments, print help
if len(sys.argv)==1:
runparser.parse_args(['--help'])
subcommand = sys.argv[1]
function_name = 'execute_' + subcommand + "(args)"
eval(function_name) #points to functions defined above, which wrap other programs in the pipeline
|
{"hexsha": "90875dae7c1a85a7a4fd1f43641b6e5348ae9489", "size": 30044, "ext": "py", "lang": "Python", "max_stars_repo_path": "cms/power.py", "max_stars_repo_name": "broadinstitute/cms", "max_stars_repo_head_hexsha": "4743ffd3feac08f02be7719c82b3371cb94a4d6b", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2015-05-18T14:39:00.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-22T12:57:07.000Z", "max_issues_repo_path": "cms/power.py", "max_issues_repo_name": "broadinstitute/cms", "max_issues_repo_head_hexsha": "4743ffd3feac08f02be7719c82b3371cb94a4d6b", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 33, "max_issues_repo_issues_event_min_datetime": "2015-04-13T20:48:02.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-19T07:27:30.000Z", "max_forks_repo_path": "cms/power.py", "max_forks_repo_name": "broadinstitute/cms", "max_forks_repo_head_hexsha": "4743ffd3feac08f02be7719c82b3371cb94a4d6b", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2016-03-31T06:56:01.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-30T16:35:45.000Z", "avg_line_length": 43.2287769784, "max_line_length": 228, "alphanum_fraction": 0.6941485821, "include": true, "reason": "import numpy", "num_tokens": 8716}
|
import os
import sys
import numpy as np
import pandas as pd
import logging
if '../../' not in sys.path:
sys.path.append('../../')
import src.optimization as optimization
import src.protocol_ansatz as protocol_ansatz
model = 'lmg'
model_parameters = dict(num_spins=50)
optimization_method = 'Nelder-Mead'
protocol = protocol_ansatz.CRABVariableEndpointsProtocolAnsatz(num_frequencies=4)
initial_parameters = [[-0.1, 0.1]] * (2 * protocol.num_frequencies)
initial_parameters += [0, 1] # these are the initial values of y0 and y1
parameters_constraints = [-2, 2]
# ------ build and check name for output file
additional_file_name_qualifiers = None
output_file_name = model + '_' + str(protocol)
if str(protocol)[:4] == 'crab':
output_file_name += '{}freq'.format(protocol.num_frequencies)
output_file_name += '_' + optimization_method.replace('-', '').lower()
output_file_name += '_bound{:02}'.format(parameters_constraints[1])
if additional_file_name_qualifiers is not None:
output_file_name += '_' + additional_file_name_qualifiers
filenum = 1
_output_file_name = output_file_name
while os.path.isfile(_output_file_name + '.csv'):
_output_file_name = output_file_name + '({:02})'.format(filenum)
filenum += 1
output_file_name = _output_file_name + '.csv'
# ------ set up logger
logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s]"
"[%(levelname)-5.5s] %(message)s")
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.DEBUG)
# consoleHandler = logging.StreamHandler()
# consoleHandler.setFormatter(logFormatter)
# rootLogger.addHandler(consoleHandler)
fileHandler = logging.FileHandler(output_file_name[:-4] + '.log')
fileHandler.setFormatter(logFormatter)
fileHandler.setLevel(logging.DEBUG)
rootLogger.addHandler(fileHandler)
logging.info('Output file name will be "{}"'.format(output_file_name))
# ------ start optimization
results = optimization.find_best_protocol(
problem_specification=dict(
model=model,
model_parameters=model_parameters,
task='critical point'
),
optimization_specs=dict(
protocol=protocol,
optimization_method=optimization_method,
initial_parameters=initial_parameters,
parameters_constraints=parameters_constraints
),
other_options=dict(
scan_times=np.linspace(0.1, 2, 100)
)
)
# ------ save results to file
results.to_csv(output_file_name)
|
{"hexsha": "3f4c8909f0bb22174a08f10a2993e18774dc4ed4", "size": 2449, "ext": "py", "lang": "Python", "max_stars_repo_path": "results/lz_optimizations_crabVariableEndPoints_20200623/script_lmg_crab4freq_neldermead_bound02.py", "max_stars_repo_name": "lucainnocenti/ultrafast-critical-ground-state-preparation-2007.07381", "max_stars_repo_head_hexsha": "29f80dcf914096555cee9bc2e18249a2c95d6a50", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-21T02:31:41.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-21T02:31:41.000Z", "max_issues_repo_path": "results/rabi_and_lmg_optimizations_crab_variableendpoints_20190919/script_lmg_crab4freq_neldermead_bound02.py", "max_issues_repo_name": "lucainnocenti/ultrafast-critical-ground-state-preparation-2007.07381", "max_issues_repo_head_hexsha": "29f80dcf914096555cee9bc2e18249a2c95d6a50", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "results/rabi_and_lmg_optimizations_crab_variableendpoints_20190919/script_lmg_crab4freq_neldermead_bound02.py", "max_forks_repo_name": "lucainnocenti/ultrafast-critical-ground-state-preparation-2007.07381", "max_forks_repo_head_hexsha": "29f80dcf914096555cee9bc2e18249a2c95d6a50", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3974358974, "max_line_length": 81, "alphanum_fraction": 0.7272356064, "include": true, "reason": "import numpy", "num_tokens": 553}
|
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from causalgraphicalmodels import CausalGraphicalModel, StructuralCausalModel
import pylogit
from collections import OrderedDict
import pylogit as cm
from functools import reduce
import statsmodels.api as sm
import statsmodels.formula.api as smf
from math import ceil
from IPython import display
import seaborn as sns
import numpy as np
import numpy.random as npr
import pandas as pd
import tensorflow as tf
import tensorflow_probability as tfp
import statsmodels.api as sm
from tensorflow_probability import edward2 as ed
from pandas.plotting import scatter_matrix
from scipy import sparse, stats
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, classification_report, roc_auc_score, roc_curve
import matplotlib
matplotlib.rcParams.update({'font.sans-serif' : 'Helvetica',
'axes.labelsize': 10,
'xtick.labelsize' : 6,
'ytick.labelsize' : 6,
'axes.titlesize' : 10})
import matplotlib.pyplot as plt
import seaborn as sns
color_names = ["windows blue",
"amber",
"crimson",
"faded green",
"dusty purple",
"greyish"]
colors = sns.xkcd_palette(color_names)
sns.set(style="white", palette=sns.xkcd_palette(color_names), color_codes = False)
"""
References: Blei et al 2018 (https://github.com/blei-lab/deconfounder_tutorial)
"""
def confounder_ppca(X, latent_dim, holdout_portion):
"""
Function to estimate a substitute confounder using PPCA.
Adopted from the deconfounder_tutorial.ipynb
https://github.com/blei-lab/deconfounder_tutorial
Args:
X: A numpy array or pandas dataframe of the original covariates
dimension: (n x m)
latent_dim: The number of latend factors to be estimated
holdout_portion: Fraction of the data to be used as holdout
Returns:
w_mean_inferred: (latent_dim x n) matrix
w_std_inferred: (latent_dim x n) matrix
z_mean_inferred: mean of substitute confounder
dimension (n x latend_dim)
z_std_inferred: std of substitute confounder
dimension (n x latend_dim)
x_vad: (nxm) matrix with the heldout entries only and 0 elsewhere
holdout_mask: sparse (nxm) matrix with 1 on the heldout entries and
0 elsewhere, s.t x_vad = X*holdout_mask
holdout_rows: row indeces of the heldout entries
"""
num_datapoints, data_dim = X.shape
holdout_portion = holdout_portion
n_holdout = int(holdout_portion * num_datapoints * data_dim)
holdout_row = np.random.randint(num_datapoints, size=n_holdout)
holdout_col = np.random.randint(data_dim, size=n_holdout)
holdout_mask = (sparse.coo_matrix((np.ones(n_holdout), \
(holdout_row, holdout_col)), \
shape = X.shape)).toarray()
holdout_subjects = np.unique(holdout_row)
x_train = np.multiply(1-holdout_mask, X)
x_vad = np.multiply(holdout_mask, X)
def ppca_model(data_dim, latent_dim, num_datapoints, stddv_datapoints):
w = ed.Normal(loc=tf.zeros([latent_dim, data_dim]),
scale=tf.ones([latent_dim, data_dim]),
name="w") # parameter
z = ed.Normal(loc=tf.zeros([num_datapoints, latent_dim]),
scale=tf.ones([num_datapoints, latent_dim]),
name="z") # local latent variable / substitute confounder
x = ed.Normal(loc=tf.multiply(tf.matmul(z, w), 1-holdout_mask),
scale=stddv_datapoints * tf.ones([num_datapoints, data_dim]),
name="x") # (modeled) data
return x, (w, z)
log_joint = ed.make_log_joint_fn(ppca_model)
latent_dim = latent_dim
stddv_datapoints = 0.1
model = ppca_model(data_dim=data_dim,
latent_dim=latent_dim,
num_datapoints=num_datapoints,
stddv_datapoints=stddv_datapoints)
def variational_model(qw_mean, qw_stddv, qz_mean, qz_stddv):
qw = ed.Normal(loc=qw_mean, scale=qw_stddv, name="qw")
qz = ed.Normal(loc=qz_mean, scale=qz_stddv, name="qz")
return qw, qz
log_q = ed.make_log_joint_fn(variational_model)
def target(w, z):
"""Unnormalized target density as a function of the parameters."""
return log_joint(data_dim=data_dim,
latent_dim=latent_dim,
num_datapoints=num_datapoints,
stddv_datapoints=stddv_datapoints,
w=w, z=z, x=x_train)
def target_q(qw, qz):
return log_q(qw_mean=qw_mean, qw_stddv=qw_stddv,
qz_mean=qz_mean, qz_stddv=qz_stddv,
qw=qw, qz=qz)
qw_mean = tf.Variable(np.ones([latent_dim, data_dim]), dtype=tf.float32)
qz_mean = tf.Variable(np.ones([num_datapoints, latent_dim]), dtype=tf.float32)
qw_stddv = tf.nn.softplus(tf.Variable(-4 * np.ones([latent_dim, data_dim]), dtype=tf.float32))
qz_stddv = tf.nn.softplus(tf.Variable(-4 * np.ones([num_datapoints, latent_dim]), dtype=tf.float32))
qw, qz = variational_model(qw_mean=qw_mean, qw_stddv=qw_stddv,
qz_mean=qz_mean, qz_stddv=qz_stddv)
energy = target(qw, qz)
entropy = -target_q(qw, qz)
elbo = energy + entropy
optimizer = tf.train.AdamOptimizer(learning_rate = 0.05)
train = optimizer.minimize(-elbo)
init = tf.global_variables_initializer()
t = []
num_epochs = 500
with tf.Session() as sess:
sess.run(init)
for i in range(num_epochs):
sess.run(train)
if i % 5 == 0:
t.append(sess.run([elbo]))
w_mean_inferred = sess.run(qw_mean)
w_stddv_inferred = sess.run(qw_stddv)
z_mean_inferred = sess.run(qz_mean)
z_stddv_inferred = sess.run(qz_stddv)
print("Inferred axes:")
print(w_mean_inferred)
print("Standard Deviation:")
print(w_stddv_inferred)
plt.plot(range(1, num_epochs, 5), t)
plt.show()
def replace_latents(w, z):
def interceptor(rv_constructor, *rv_args, **rv_kwargs):
"""Replaces the priors with actual values to generate samples from."""
name = rv_kwargs.pop("name")
if name == "w":
rv_kwargs["value"] = w
elif name == "z":
rv_kwargs["value"] = z
return rv_constructor(*rv_args, **rv_kwargs)
return interceptor
return [w_mean_inferred, w_stddv_inferred, z_mean_inferred, z_stddv_inferred], x_vad, holdout_mask, holdout_row
def ppca_model(data_dim, latent_dim, num_datapoints, stddv_datapoints, holdout_mask):
w = ed.Normal(loc=tf.zeros([latent_dim, data_dim]),
scale=tf.ones([latent_dim, data_dim]),
name="w") # parameter
z = ed.Normal(loc=tf.zeros([num_datapoints, latent_dim]),
scale=tf.ones([num_datapoints, latent_dim]),
name="z") # local latent variable / substitute confounder
x = ed.Normal(loc=tf.multiply(tf.matmul(z, w), 1-holdout_mask),
scale=stddv_datapoints * tf.ones([num_datapoints, data_dim]),
name="x") # (modeled) data
return x, (w, z)
def replace_latents(w, z):
def interceptor(rv_constructor, *rv_args, **rv_kwargs):
"""Replaces the priors with actual values to generate samples from."""
name = rv_kwargs.pop("name")
if name == "w":
rv_kwargs["value"] = w
elif name == "z":
rv_kwargs["value"] = z
return rv_constructor(*rv_args, **rv_kwargs)
return interceptor
|
{"hexsha": "cbf3155d77065d7df2d0cd497243478b01f92089", "size": 7976, "ext": "py", "lang": "Python", "max_stars_repo_path": "notebooks/working/factor_models.py", "max_stars_repo_name": "hassanobeid1994/tr_b_causal_2020", "max_stars_repo_head_hexsha": "1ffaeb7dcefccf5e1f24c459e9a2f140b2a052a5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "notebooks/working/factor_models.py", "max_issues_repo_name": "hassanobeid1994/tr_b_causal_2020", "max_issues_repo_head_hexsha": "1ffaeb7dcefccf5e1f24c459e9a2f140b2a052a5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 89, "max_issues_repo_issues_event_min_datetime": "2020-02-10T02:52:11.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-23T03:50:27.000Z", "max_forks_repo_path": "notebooks/working/factor_models.py", "max_forks_repo_name": "hassan-obeid/tr_b_causal_2020", "max_forks_repo_head_hexsha": "1ffaeb7dcefccf5e1f24c459e9a2f140b2a052a5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3793103448, "max_line_length": 115, "alphanum_fraction": 0.6334002006, "include": true, "reason": "import numpy,from scipy,import statsmodels", "num_tokens": 2005}
|
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the regression Monte Carlo algorithm."""
import numpy as np
import tensorflow.compat.v2 as tf
from tf_quant_finance.experimental.lsm_algorithm import lsm
from tf_quant_finance.experimental.lsm_algorithm import payoff
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
@test_util.run_all_in_graph_and_eager_modes
class LsmTest(tf.test.TestCase):
def setUp(self):
"""Sets `samples` as in the Longstaff-Schwartz paper."""
super(LsmTest, self).setUp()
# See Longstaff, F.A. and Schwartz, E.S., 2001. Valuing American options by
# simulation: a simple least-squares approach.
samples = [[1.0, 1.09, 1.08, 1.34],
[1.0, 1.16, 1.26, 1.54],
[1.0, 1.22, 1.07, 1.03],
[1.0, 0.93, 0.97, 0.92],
[1.0, 1.11, 1.56, 1.52],
[1.0, 0.76, 0.77, 0.90],
[1.0, 0.92, 0.84, 1.01],
[1.0, 0.88, 1.22, 1.34]]
# Expand dims to reflect that `samples` represent sample paths of
# a 1-dimensional process
self.samples = np.expand_dims(samples, -1)
# Interest rates between exercise times
interest_rates = [0.06, 0.06, 0.06]
# Corresponding discount factors
self.discount_factors = np.exp(-np.cumsum(interest_rates))
def test_loop_condition(self):
"""Tests that the loop will stop countdown at zero and not before."""
self.assertTrue(lsm.lsm_loop_cond(1, None))
self.assertFalse(lsm.lsm_loop_cond(0, None))
def test_continuation_value(self):
"""Tests continuation value returns the discounted sum of later payoffs."""
exercise_index = 2
for dtype in (np.float32, np.float64):
discount_factors = tf.constant(
[[1.0, 0.9, 0.8, 0.7, 0.6]], dtype=dtype)
cashflow = tf.ones(shape=[10, 5, 4], dtype=dtype)
continuation_value = lsm.continuation_value_fn(cashflow,
discount_factors,
exercise_index)
expected_continuation = 1.625 * np.ones([10, 5])
self.assertAllClose(
continuation_value, expected_continuation, rtol=1e-8, atol=1e-8)
def test_expected_continuation(self):
"""Tests that expected continuation works in V=1 case.
In particular this verifies that the regression done to get the expected
continuation value is performed on those elements which have a positive
exercise value.
"""
for dtype in (np.float32, np.float64):
a = tf.range(start=-2, limit=3, delta=1, dtype=dtype)
design = tf.concat([a, a], axis=0)
design = tf.concat([[tf.ones_like(design), design]], axis=1)
# These values ensure that the expected continuation value is `(1,...,1).`
exercise_now = tf.expand_dims(
tf.concat([tf.ones_like(a), tf.zeros_like(a)], axis=0), -1)
cashflow = tf.expand_dims(
tf.concat([tf.ones_like(a), -tf.ones_like(a)], axis=0), -1)
expected_exercise = lsm.expected_exercise_fn(
design, cashflow, exercise_now)
self.assertAllClose(expected_exercise, tf.ones_like(cashflow))
def test_european_option_put(self):
"""Tests that LSM price of European put option is computed as expected."""
# This is the same example as in Section 1 of
# Longstaff, F.A. and Schwartz, E.S., 2001. Valuing American options by
# simulation: a simple least-squares approach. The review of financial
# studies, 14(1), pp.113-147.
basis_fn = lsm.make_polynomial_basis(2)
for dtype in (np.float32, np.float64):
payoff_fn = payoff.make_basket_put_payoff([1.1], dtype=dtype)
# Option price
european_put_price = lsm.least_square_mc(
self.samples, [3], payoff_fn, basis_fn,
discount_factors=[self.discount_factors[-1]], dtype=dtype)
self.assertAllClose(european_put_price, [0.0564],
rtol=1e-4, atol=1e-4)
def test_american_option_put(self):
"""Tests that LSM price of American put option is computed as expected."""
# This is the same example as in Section 1 of
# Longstaff, F.A. and Schwartz, E.S., 2001. Valuing American options by
# simulation: a simple least-squares approach. The review of financial
# studies, 14(1), pp.113-147.
basis_fn = lsm.make_polynomial_basis(2)
for dtype in (np.float32, np.float64):
payoff_fn = payoff.make_basket_put_payoff([1.1], dtype=dtype)
# Option price
american_put_price = lsm.least_square_mc(
self.samples, [1, 2, 3], payoff_fn, basis_fn,
discount_factors=self.discount_factors, dtype=dtype)
self.assertAllClose(american_put_price, [0.1144],
rtol=1e-4, atol=1e-4)
def test_american_basket_option_put(self):
"""Tests the LSM price of American Basket put option."""
# This is the same example as in Section 1 of
# Longstaff, F.A. and Schwartz, E.S., 2001. Valuing American options by
# simulation: a simple least-squares approach. The review of financial
# studies, 14(1), pp.113-147.
# This is the minimum number of basis functions for the tests to pass.
basis_fn = lsm.make_polynomial_basis(10)
exercise_times = [1, 2, 3]
dtype = np.float64
payoff_fn = payoff.make_basket_put_payoff([1.1, 1.2, 1.3], dtype=dtype)
# Create a 2-d process which is simply follows the `samples` paths:
samples = tf.convert_to_tensor(self.samples, dtype=dtype)
samples_2d = tf.concat([samples, samples], -1)
# Price American basket option
american_basket_put_price = lsm.least_square_mc(
samples_2d, exercise_times, payoff_fn, basis_fn,
discount_factors=self.discount_factors, dtype=dtype)
# Since the marginal processes of `samples_2d` are 100% correlated, the
# price should be the same as of the American option computed for
# `samples`
american_put_price = lsm.least_square_mc(
self.samples, exercise_times, payoff_fn, basis_fn,
discount_factors=self.discount_factors, dtype=dtype)
self.assertAllClose(american_basket_put_price, american_put_price,
rtol=1e-4, atol=1e-4)
self.assertAllEqual(american_basket_put_price.shape, [3])
if __name__ == '__main__':
tf.test.main()
|
{"hexsha": "a7d713c4ecd23cb48a3a61295630f53aa6ab74ce", "size": 6876, "ext": "py", "lang": "Python", "max_stars_repo_path": "tf_quant_finance/experimental/lsm_algorithm/lsm_test.py", "max_stars_repo_name": "slowy07/tf-quant-finance", "max_stars_repo_head_hexsha": "0976f720fb58a2d7bfd863640c12a2425cd2f94f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3138, "max_stars_repo_stars_event_min_datetime": "2019-07-24T21:43:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T12:11:09.000Z", "max_issues_repo_path": "tf_quant_finance/experimental/lsm_algorithm/lsm_test.py", "max_issues_repo_name": "Aarif1430/tf-quant-finance", "max_issues_repo_head_hexsha": "9372eb1ddf2b48cb1a3d4283bc67a10647ddc7a6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 63, "max_issues_repo_issues_event_min_datetime": "2019-09-07T19:16:03.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T19:29:40.000Z", "max_forks_repo_path": "tf_quant_finance/experimental/lsm_algorithm/lsm_test.py", "max_forks_repo_name": "Aarif1430/tf-quant-finance", "max_forks_repo_head_hexsha": "9372eb1ddf2b48cb1a3d4283bc67a10647ddc7a6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 423, "max_forks_repo_forks_event_min_datetime": "2019-07-26T21:28:05.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T13:07:44.000Z", "avg_line_length": 45.2368421053, "max_line_length": 95, "alphanum_fraction": 0.6730657359, "include": true, "reason": "import numpy", "num_tokens": 1832}
|
[STATEMENT]
lemma cmp\<^sub>U\<^sub>P_ide_simps [simp]:
assumes "B.ide (fst fg)" and "B.ide (snd fg)" and "src\<^sub>B (fst fg) = trg\<^sub>B (snd fg)"
shows "Dom (cmp\<^sub>U\<^sub>P fg) = \<^bold>\<langle>fst fg\<^bold>\<rangle> \<^bold>\<star> \<^bold>\<langle>snd fg\<^bold>\<rangle>"
and "Cod (cmp\<^sub>U\<^sub>P fg) = \<^bold>\<langle>fst fg \<star>\<^sub>B snd fg\<^bold>\<rangle>"
and "Map (cmp\<^sub>U\<^sub>P fg) = fst fg \<star>\<^sub>B snd fg"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Dom (cmp\<^sub>U\<^sub>P fg) = \<^bold>\<langle>fst fg\<^bold>\<rangle> \<^bold>\<star> \<^bold>\<langle>snd fg\<^bold>\<rangle> &&& Cod (cmp\<^sub>U\<^sub>P fg) = \<^bold>\<langle>fst fg \<star>\<^sub>B snd fg\<^bold>\<rangle> &&& Map (cmp\<^sub>U\<^sub>P fg) = fst fg \<star>\<^sub>B snd fg
[PROOF STEP]
using assms B.VV.ide_char\<^sub>S\<^sub>b\<^sub>C B.VV.arr_char\<^sub>S\<^sub>b\<^sub>C
[PROOF STATE]
proof (prove)
using this:
B.ide (fst fg)
B.ide (snd fg)
src\<^sub>B (fst fg) = trg\<^sub>B (snd fg)
B.VV.ide ?a = (B.VV.arr ?a \<and> B.VxV.ide ?a)
B.VV.arr ?f = (B.arr (fst ?f) \<and> B.arr (snd ?f) \<and> src\<^sub>B (fst ?f) = trg\<^sub>B (snd ?f))
goal (1 subgoal):
1. Dom (cmp\<^sub>U\<^sub>P fg) = \<^bold>\<langle>fst fg\<^bold>\<rangle> \<^bold>\<star> \<^bold>\<langle>snd fg\<^bold>\<rangle> &&& Cod (cmp\<^sub>U\<^sub>P fg) = \<^bold>\<langle>fst fg \<star>\<^sub>B snd fg\<^bold>\<rangle> &&& Map (cmp\<^sub>U\<^sub>P fg) = fst fg \<star>\<^sub>B snd fg
[PROOF STEP]
by auto
|
{"llama_tokens": 710, "file": "Bicategory_Strictness", "length": 2}
|
# -*- coding: utf-8 -*-
import warnings
import numpy as np
import pandas as pd
from lifelines.fitters import UnivariateFitter
from lifelines.utils import (
_preprocess_inputs,
_additive_estimate,
_to_array,
StatError,
inv_normal_cdf,
median_survival_times,
check_nans_or_infs,
StatisticalWarning,
coalesce,
CensoringType,
)
from lifelines.plotting import plot_loglogs, _plot_estimate
class KaplanMeierFitter(UnivariateFitter):
"""
Class for fitting the Kaplan-Meier estimate for the survival function.
Parameters
----------
alpha: float, option (default=0.05)
The alpha value associated with the confidence intervals.
Examples
--------
>>> from lifelines import KaplanMeierFitter
>>> from lifelines.datasets import load_waltons
>>> waltons = load_waltons()
>>> kmf = KaplanMeierFitter()
>>> kmf.fit(waltons['T'], waltons['E'])
>>> kmf.plot()
Attributes
----------
survival_function_ : DataFrame
The estimated survival function (with custom timeline if provided)
median_ : float
The estimated median time to event. np.inf if doesn't exist.
confidence_interval_ : DataFrame
The lower and upper confidence intervals for the survival function. An alias of
``confidence_interval_survival_function_``
confidence_interval_survival_function_ : DataFrame
The lower and upper confidence intervals for the survival function. An alias of
``confidence_interval_``
cumumlative_density_ : DataFrame
The estimated cumulative density function (with custom timeline if provided)
confidence_interval_cumumlative_density_ : DataFrame
The lower and upper confidence intervals for the cumulative density
durations: array
The durations provided
event_observed: array
The event_observed variable provided
timeline: array
The time line to use for plotting and indexing
entry: array or None
The entry array provided, or None
event_table: DataFrame
A summary of the life table
"""
def fit(
self,
durations,
event_observed=None,
timeline=None,
entry=None,
label="KM_estimate",
left_censorship=False,
alpha=None,
ci_labels=None,
weights=None,
): # pylint: disable=too-many-arguments,too-many-locals
"""
Fit the model to a right-censored dataset
Parameters
----------
durations: an array, list, pd.DataFrame or pd.Series
length n -- duration subject was observed for
event_observed: an array, list, pd.DataFrame, or pd.Series, optional
True if the the death was observed, False if the event was lost (right-censored). Defaults all True if event_observed==None
timeline: an array, list, pd.DataFrame, or pd.Series, optional
return the best estimate at the values in timelines (postively increasing)
entry: an array, list, pd.DataFrame, or pd.Series, optional
relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population
entered study when they were "born".
label: string, optional
a string to name the column of the estimate.
alpha: float, optional
the alpha value in the confidence intervals. Overrides the initializing alpha for this call to fit only.
left_censorship: bool, optional (default=False)
Deprecated, use ``fit_left_censoring``
ci_labels: tuple, optional
add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<1-alpha/2>
weights: an array, list, pd.DataFrame, or pd.Series, optional
if providing a weighted dataset. For example, instead
of providing every subject as a single element of `durations` and `event_observed`, one could
weigh subject differently.
Returns
-------
self: KaplanMeierFitter
self with new properties like ``survival_function_``, ``plot()``, ``median``
"""
if left_censorship:
warnings.warn(
"kwarg left_censorship is deprecated and will be removed in a future release. Please use ``.fit_left_censoring`` instead.",
DeprecationWarning,
)
self._censoring_type = CensoringType.RIGHT
return self._fit(durations, event_observed, timeline, entry, label, alpha, ci_labels, weights)
def fit_left_censoring(
self,
durations,
event_observed=None,
timeline=None,
entry=None,
label="KM_estimate",
alpha=None,
ci_labels=None,
weights=None,
):
"""
Fit the model to a left-censored dataset
Parameters
----------
durations: an array, list, pd.DataFrame or pd.Series
length n -- duration subject was observed for
event_observed: an array, list, pd.DataFrame, or pd.Series, optional
True if the the death was observed, False if the event was lost (right-censored). Defaults all True if event_observed==None
timeline: an array, list, pd.DataFrame, or pd.Series, optional
return the best estimate at the values in timelines (postively increasing)
entry: an array, list, pd.DataFrame, or pd.Series, optional
relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population
entered study when they were "born".
label: string, optional
a string to name the column of the estimate.
alpha: float, optional
the alpha value in the confidence intervals. Overrides the initializing alpha for this call to fit only.
left_censorship: bool, optional (default=False)
Deprecated, use ``fit_left_censoring``
ci_labels: tuple, optional
add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<1-alpha/2>
weights: an array, list, pd.DataFrame, or pd.Series, optional
if providing a weighted dataset. For example, instead
of providing every subject as a single element of `durations` and `event_observed`, one could
weigh subject differently.
Returns
-------
self: KaplanMeierFitter
self with new properties like ``survival_function_``, ``plot()``, ``median``
"""
self._censoring_type = CensoringType.LEFT
return self._fit(durations, event_observed, timeline, entry, label, alpha, ci_labels, weights)
def _fit(
self,
durations,
event_observed=None,
timeline=None,
entry=None,
label="KM_estimate",
alpha=None,
ci_labels=None,
weights=None,
): # pylint: disable=too-many-arguments,too-many-locals
"""
Parameters
----------
durations: an array, list, pd.DataFrame or pd.Series
length n -- duration subject was observed for
event_observed: an array, list, pd.DataFrame, or pd.Series, optional
True if the the death was observed, False if the event was lost (right-censored). Defaults all True if event_observed==None
timeline: an array, list, pd.DataFrame, or pd.Series, optional
return the best estimate at the values in timelines (postively increasing)
entry: an array, list, pd.DataFrame, or pd.Series, optional
relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population
entered study when they were "born".
label: string, optional
a string to name the column of the estimate.
alpha: float, optional
the alpha value in the confidence intervals. Overrides the initializing alpha for this call to fit only.
left_censorship: bool, optional (default=False)
True if durations and event_observed refer to left censorship events. Default False
ci_labels: tuple, optional
add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<1-alpha/2>
weights: an array, list, pd.DataFrame, or pd.Series, optional
if providing a weighted dataset. For example, instead
of providing every subject as a single element of `durations` and `event_observed`, one could
weigh subject differently.
Returns
-------
self: KaplanMeierFitter
self with new properties like ``survival_function_``, ``plot()``, ``median``
"""
self._check_values(durations)
if event_observed is not None:
self._check_values(event_observed)
self._label = label
if weights is not None:
weights = np.asarray(weights)
if (weights.astype(int) != weights).any():
warnings.warn(
"""It looks like your weights are not integers, possibly propensity scores then?
It's important to know that the naive variance estimates of the coefficients are biased. Instead use Monte Carlo to
estimate the variances. See paper "Variance estimation when using inverse probability of treatment weighting (IPTW) with survival analysis"
or "Adjusted Kaplan-Meier estimator and log-rank test with inverse probability of treatment weighting for survival data."
""",
StatisticalWarning,
)
# if the user is interested in left-censorship, we return the cumulative_density_, no survival_function_,
is_left_censoring = self._censoring_type == CensoringType.LEFT
primary_estimate_name = "survival_function_" if not is_left_censoring else "cumulative_density_"
secondary_estimate_name = "cumulative_density_" if not is_left_censoring else "survival_function_"
self.durations, self.event_observed, self.timeline, self.entry, self.event_table = _preprocess_inputs(
durations, event_observed, timeline, entry, weights
)
alpha = alpha if alpha else self.alpha
log_estimate, cumulative_sq_ = _additive_estimate(
self.event_table, self.timeline, self._additive_f, self._additive_var, is_left_censoring
)
if entry is not None:
# a serious problem with KM is that when the sample size is small and there are too few early
# truncation times, it may happen that is the number of patients at risk and the number of deaths is the same.
# we adjust for this using the Breslow-Fleming-Harrington estimator
n = self.event_table.shape[0]
net_population = (self.event_table["entrance"] - self.event_table["removed"]).cumsum()
if net_population.iloc[: int(n / 2)].min() == 0:
ix = net_population.iloc[: int(n / 2)].idxmin()
raise StatError(
"""There are too few early truncation times and too many events. S(t)==0 for all t>%g. Recommend BreslowFlemingHarringtonFitter."""
% ix
)
# estimation
setattr(self, primary_estimate_name, pd.DataFrame(np.exp(log_estimate), columns=[self._label]))
setattr(self, secondary_estimate_name, pd.DataFrame(1 - np.exp(log_estimate), columns=[self._label]))
self.__estimate = getattr(self, primary_estimate_name)
self.confidence_interval_ = self._bounds(cumulative_sq_[:, None], alpha, ci_labels)
self.median_ = median_survival_times(self.__estimate, left_censorship=is_left_censoring)
self._cumulative_sq_ = cumulative_sq_
setattr(self, "confidence_interval_" + primary_estimate_name, self.confidence_interval_)
setattr(self, "confidence_interval_" + secondary_estimate_name, 1 - self.confidence_interval_)
# estimation methods
self._estimation_method = primary_estimate_name
self._estimate_name = primary_estimate_name
self._predict_label = label
self._update_docstrings()
return self
def _check_values(self, array):
check_nans_or_infs(array)
def plot_loglogs(self, *args, **kwargs):
r"""
Plot :math:`\log(S(t))` against :math:`\log(t)`
"""
return plot_loglogs(self, *args, **kwargs)
def survival_function_at_times(self, times, label=None):
"""
Return a Pandas series of the predicted survival value at specific times
Parameters
-----------
times: iterable or float
Returns
--------
pd.Series
"""
label = coalesce(label, self._label)
return pd.Series(self.predict(times), index=_to_array(times), name=label)
def cumulative_density_at_times(self, times, label=None):
"""
Return a Pandas series of the predicted cumulative density at specific times
Parameters
-----------
times: iterable or float
Returns
--------
pd.Series
"""
label = coalesce(label, self._label)
return pd.Series(1 - self.predict(times), index=_to_array(times), name=label)
def plot_survival_function(self, **kwargs):
"""Alias of ``plot``"""
return _plot_estimate(
self,
estimate=self.survival_function_,
confidence_intervals=self.confidence_interval_survival_function_,
**kwargs
)
def plot_cumulative_density(self, **kwargs):
"""
Plots a pretty figure of {0}.{1}
Matplotlib plot arguments can be passed in inside the kwargs, plus
Parameters
-----------
show_censors: bool
place markers at censorship events. Default: False
censor_styles: bool
If show_censors, this dictionary will be passed into the plot call.
ci_alpha: bool
the transparency level of the confidence interval. Default: 0.3
ci_force_lines: bool
force the confidence intervals to be line plots (versus default shaded areas). Default: False
ci_show: bool
show confidence intervals. Default: True
ci_legend: bool
if ci_force_lines is True, this is a boolean flag to add the lines' labels to the legend. Default: False
at_risk_counts: bool
show group sizes at time points. See function ``add_at_risk_counts`` for details. Default: False
loc: slice
specify a time-based subsection of the curves to plot, ex:
>>> model.plot(loc=slice(0.,10.))
will plot the time values between t=0. and t=10.
iloc: slice
specify a location-based subsection of the curves to plot, ex:
>>> model.plot(iloc=slice(0,10))
will plot the first 10 time points.
invert_y_axis: bool
boolean to invert the y-axis, useful to show cumulative graphs instead of survival graphs. (Deprecated, use ``plot_cumulative_density()``)
Returns
-------
ax:
a pyplot axis object
"""
return _plot_estimate(
self,
estimate=self.cumulative_density_,
confidence_intervals=self.confidence_interval_cumulative_density_,
**kwargs
)
def _bounds(self, cumulative_sq_, alpha, ci_labels):
# This method calculates confidence intervals using the exponential Greenwood formula.
# See https://www.math.wustl.edu/%7Esawyer/handouts/greenwood.pdf
z = inv_normal_cdf(1 - alpha / 2)
df = pd.DataFrame(index=self.timeline)
v = np.log(self.__estimate.values)
if ci_labels is None:
ci_labels = ["%s_upper_%g" % (self._label, 1 - alpha), "%s_lower_%g" % (self._label, 1 - alpha)]
assert len(ci_labels) == 2, "ci_labels should be a length 2 array."
df[ci_labels[0]] = np.exp(-np.exp(np.log(-v) + z * np.sqrt(cumulative_sq_) / v))
df[ci_labels[1]] = np.exp(-np.exp(np.log(-v) - z * np.sqrt(cumulative_sq_) / v))
return df
def _additive_f(self, population, deaths):
np.seterr(invalid="ignore", divide="ignore")
return np.log(population - deaths) - np.log(population)
def _additive_var(self, population, deaths):
np.seterr(divide="ignore")
return (deaths / (population * (population - deaths))).replace([np.inf], 0)
def plot_cumulative_hazard(self, **kwargs):
raise NotImplementedError(
"The Kaplan-Meier estimator is not used to estimate the cumulative hazard. Try the NelsonAalenFitter or any other parametric model"
)
def plot_hazard(self, **kwargs):
raise NotImplementedError(
"The Kaplan-Meier estimator is not used to estimate the hazard. Try the NelsonAalenFitter or any other parametric model"
)
|
{"hexsha": "d48f563ef2b5f12301b00d2c0341d910fda1fd4d", "size": 17486, "ext": "py", "lang": "Python", "max_stars_repo_path": "lifelines/fitters/kaplan_meier_fitter.py", "max_stars_repo_name": "sachinruk/lifelines", "max_stars_repo_head_hexsha": "8de4afb21b69f96d51c3923cb66b9086e50d6944", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lifelines/fitters/kaplan_meier_fitter.py", "max_issues_repo_name": "sachinruk/lifelines", "max_issues_repo_head_hexsha": "8de4afb21b69f96d51c3923cb66b9086e50d6944", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lifelines/fitters/kaplan_meier_fitter.py", "max_forks_repo_name": "sachinruk/lifelines", "max_forks_repo_head_hexsha": "8de4afb21b69f96d51c3923cb66b9086e50d6944", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.6487804878, "max_line_length": 174, "alphanum_fraction": 0.6445728011, "include": true, "reason": "import numpy", "num_tokens": 3770}
|
import numpy as np
import scipy as sp
# import cplex as cp
import matplotlib.pyplot as plt
from scipy.integrate import ode
import cobra as cb
# import json
import pandas as pd
import sys
import surfinFBA as surf
import time
start_time = time.time()
from cycler import cycler
from datetime import datetime
#### Microbe A (Brian):
#
# y1 -1-> A -2-> B -3-
# -3/6->C -7->growth
# y2 -4-> D -5-> E -6-
# 5->y3
#
Gamma1A = -np.array([[-1,0,0,0,0,0,0],[0,0,0,-1,0,0,0],[0,0,0,0,1,0,0]])
Gamma2A = np.array([[1,-1,0,0,0,0,0],[0,1,-1,0,0,0,0],[0,0,1,0,0,1,-1],[0,0,0,1,-1,0,0],[0,0,0,0,1,-1,0]])
alsA = np.array([0.5,0.6,0.5])
lbds_exA = np.array([0,0,-100])
upbds_intA = np.array([100,100,100,100,100,100,100])
lbds_intA = np.array([0,0,0,0,0,0,0])
lilgammaA = np.array([0,0,0,0,0,0,1.0])
modelA = surf.SurfMod(Gamma1A,Gamma2A,lilgammaA,lbds_intA,upbds_intA,alsA,lbds_exA,Name = "Brian")
#### Microbe B (Dennis):
#
# y1 -1-> A -2-> Growth
#
#
# y3 -3-> B -4-> DEATH
#
#
Gamma1B = -np.array([[-1,0,0,0],[0,0,0,0],[0,0,-1,0]])
Gamma2B = np.array([[1,-1,0,0],[0,0,1,-1]])
alsB = np.array([0.7,0.6,2])
lbds_exB = np.array([0,0,10])####Need to figure out how to poison! Need positive lower bound, which will have to move with upper bound.
upbds_intB = np.array([100,100,100,100])
lbds_intB = np.array([0,0,0,0])
lilgammaB = np.array([0,1,0,-1]).astype(float)
modelB = surf.SurfMod(Gamma1B,Gamma2B,lilgammaB,lbds_intB,upbds_intB,alsB,lbds_exB,Name = "Dennis")
#### Microbe C (Carl):
#
# y1 -1-> A -2-> B -3-> growth
# 2->y2
Gamma1C = -np.array([[-1,0,0],[0,1,0],[0,0,0]])
Gamma2C = np.array([[1,-1,0],[0,1,-1]])
alsC = np.array([0.5,0.6,0.5])
lbds_exC = np.array([0,-100,0])
upbds_intC = np.array([100,100,100])
lbds_intC = np.array([0,0,0])
lilgammaC = np.array([0,0,1.0])
modelC = surf.SurfMod(Gamma1C,Gamma2C,lilgammaC,lbds_intC,upbds_intC,alsC,lbds_exC,Name = "Carl")
xA_init = 2
xB_init = 2
xC_init = 2
# y0 = [10,0,0]
###USAGE: Surfin_FBA(model_list,x0,y0,dilution_rates,metabolite_inflow,metabolite_dilution,endtime)
x0 = {'Brian':xA_init,'Dennis':xB_init,'Carl':xC_init}
y0 = {'Surfin USA':2,'Pet Sounds':0,'Good Vibrations':0}
x,y,v,t,usage = surf.Surfin_FBA([modelA,modelB,modelC],x0,y0,[1,1,1],[1,0,0],15,metabolite_names = ['Surfin USA','Pet Sounds','Good Vibrations'], detail_activity = 1, report_activity = 1, initres = 0.01,concurrent = False,solver = 'gb')
fig,ax = plt.subplots(5,1,figsize = (10,10),tight_layout = True)
ax[0].set_prop_cycle(cycler(color = ['green', 'red','blue']))
labels1 = []
labels2 = []
for nm,tc in x.items():
ax[0].plot(t,tc)
labels1 +=[nm]
ax[0].legend(labels1,prop={'size': 20})
for nm,tc in y.items():
ax[1].plot(t,tc)
labels2 +=[nm]
ax[1].legend(labels2,prop={'size': 20})
#
for met in y0:
ax[2].plot(usage['Brian'][met],label =met)
ax[3].plot(usage['Dennis'][met],label =met)
ax[4].plot(usage['Carl'][met],label =met)
ax[2].legend()
ax[3].legend()
ax[4].legend()
svfgr = False
if svfgr:
fig.savefig('simulations/toy_community_' + datetime.now().strftime("%B%d%H%M"))
fig.close()
else:
plt.show()
print("--- %s seconds ---" % (time.time() - start_time))
|
{"hexsha": "c27374bc6894133c8385f5c304e0cb493845e523", "size": 3280, "ext": "py", "lang": "Python", "max_stars_repo_path": "build/lib/surfinFBA/examples/toy_model_examples.py", "max_stars_repo_name": "jdbrunner/surfin_fba", "max_stars_repo_head_hexsha": "1566282ddb628be3914e54b6ccd4468958338699", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-03-12T17:43:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-12T22:20:19.000Z", "max_issues_repo_path": "build/lib/surfinFBA/examples/toy_model_examples.py", "max_issues_repo_name": "jdbrunner/surfin_fba", "max_issues_repo_head_hexsha": "1566282ddb628be3914e54b6ccd4468958338699", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "build/lib/surfinFBA/examples/toy_model_examples.py", "max_forks_repo_name": "jdbrunner/surfin_fba", "max_forks_repo_head_hexsha": "1566282ddb628be3914e54b6ccd4468958338699", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.7218543046, "max_line_length": 236, "alphanum_fraction": 0.6115853659, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 1321}
|
module MOD_WRITMSC
contains
SUBROUTINE WRITMSC (ifl,string,n,char,iflag,idummy,ierr)
implicit real*8(a-h,o-z)
character string*N
character*10 char
ierr=1
if (iflag.eq.-1) then
write (ifl,'(A10)') char
write (ifl,*) string
ierr=0
else
write(6,*) ' MODFILE write mode unknown'
pause
stop
endif
return
end subroutine
end module
|
{"hexsha": "1ccab2dbb73b0df8d81387fa3931ac71c4088271", "size": 427, "ext": "for", "lang": "FORTRAN", "max_stars_repo_path": "src/writmsc.for", "max_stars_repo_name": "rtagirov/nessy", "max_stars_repo_head_hexsha": "aa6c26243e6231f267b42763e020866da962fdfb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/writmsc.for", "max_issues_repo_name": "rtagirov/nessy", "max_issues_repo_head_hexsha": "aa6c26243e6231f267b42763e020866da962fdfb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2016-11-21T10:53:14.000Z", "max_issues_repo_issues_event_max_datetime": "2017-03-17T18:31:42.000Z", "max_forks_repo_path": "src/writmsc.for", "max_forks_repo_name": "rtagirov/nessy-src", "max_forks_repo_head_hexsha": "aa6c26243e6231f267b42763e020866da962fdfb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-15T03:13:57.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-15T03:13:57.000Z", "avg_line_length": 18.5652173913, "max_line_length": 63, "alphanum_fraction": 0.5784543326, "num_tokens": 126}
|
""" Define baxter environment class FurnitureBaxterEnv. """
from collections import OrderedDict
import numpy as np
from env.furniture import FurnitureEnv
import env.transform_utils as T
class FurnitureBaxterEnv(FurnitureEnv):
"""
Baxter robot environment.
"""
def __init__(self, config):
"""
Args:
config: configurations for the environment.
"""
config.agent_type = 'Baxter'
super().__init__(config)
self._env_config.update({
"success_reward": 100,
})
@property
def observation_space(self):
"""
Returns the observation space.
"""
ob_space = super().observation_space
if self._robot_ob:
if self._control_type == 'impedance':
ob_space['robot_ob'] = [64]
elif self._control_type == 'ik':
ob_space['robot_ob'] = [(3 + 4 + 3 + 3 + 1) * 2]
return ob_space
@property
def dof(self):
"""
Returns the DoF of the robot.
"""
dof = 0 # 'No' Agent
if self._control_type == 'impedance':
dof = (7 + 2) * 2
elif self._control_type == 'ik':
dof = (3 + 3 + 1) * 2 + 1 # (move, rotate, select) * 2 + connect
return dof
def _step(self, a):
"""
Takes a simulation step with @a and computes reward.
"""
prev_reward, _, old_info = self._compute_reward()
ob, _, done, _ = super()._step(a)
reward, done, info = self._compute_reward()
ctrl_reward = self._ctrl_reward(a)
info['reward_ctrl'] = ctrl_reward
connect_reward = reward - prev_reward
info['reward_connect'] = connect_reward
if self._success:
print('Success!')
reward = ctrl_reward + connect_reward
return ob, reward, done, info
def _reset(self, furniture_id=None, background=None):
"""
Resets simulation.
Args:
furniture_id: ID of the furniture model to reset.
background: name of the background scene to reset.
"""
super()._reset(furniture_id, background)
# set two bodies for picking or assemblying
id1 = self.sim.model.eq_obj1id[0]
id2 = self.sim.model.eq_obj2id[0]
self._target_body1 = self.sim.model.body_id2name(id1)
self._target_body2 = self.sim.model.body_id2name(id2)
def _get_obs(self):
"""
Returns the current observation.
"""
state = super()._get_obs()
# proprioceptive features
if self._robot_ob:
robot_states = OrderedDict()
if self._control_type == 'impedance':
robot_states["joint_pos"] = np.array(
[self.sim.data.qpos[x] for x in self._ref_joint_pos_indexes]
)
robot_states["joint_vel"] = np.array(
[self.sim.data.qvel[x] for x in self._ref_joint_vel_indexes]
)
robot_states["right_gripper_qpos"] = np.array(
[self.sim.data.qpos[x] for x in self._ref_gripper_right_joint_pos_indexes]
)
robot_states["right_gripper_qvel"] = np.array(
[self.sim.data.qvel[x] for x in self._ref_gripper_right_joint_vel_indexes]
)
robot_states["left_gripper_qpos"] = np.array(
[self.sim.data.qpos[x] for x in self._ref_gripper_left_joint_pos_indexes]
)
robot_states["left_gripper_qvel"] = np.array(
[self.sim.data.qvel[x] for x in self._ref_gripper_left_joint_vel_indexes]
)
right_gripper_qpos = [self.sim.data.qpos[x] for x in self._ref_gripper_right_joint_pos_indexes]
left_gripper_qpos = [self.sim.data.qpos[x] for x in self._ref_gripper_left_joint_pos_indexes]
robot_states["right_gripper_dis"] = np.array(
[abs(right_gripper_qpos[0] - right_gripper_qpos[1])]
)
robot_states["left_gripper_dis"] = np.array(
[abs(left_gripper_qpos[0] - left_gripper_qpos[1])]
)
robot_states["right_eef_pos"] = np.array(self.sim.data.site_xpos[self.right_eef_site_id])
robot_states["right_eef_velp"] = np.array(self.sim.data.site_xvelp[self.right_eef_site_id]) # 3-dim
robot_states["right_eef_velr"] = self.sim.data.site_xvelr[self.right_eef_site_id] # 3-dim
robot_states["left_eef_pos"] = np.array(self.sim.data.site_xpos[self.left_eef_site_id])
robot_states["left_eef_velp"] = np.array(self.sim.data.site_xvelp[self.left_eef_site_id]) # 3-dim
robot_states["left_eef_velr"] = self.sim.data.site_xvelr[self.left_eef_site_id] # 3-dim
robot_states["right_eef_quat"] = T.convert_quat(
self.sim.data.get_body_xquat("right_hand"), to="xyzw"
)
robot_states["left_eef_quat"] = T.convert_quat(
self.sim.data.get_body_xquat("left_hand"), to="xyzw"
)
state['robot_ob'] = np.concatenate(
[x.ravel() for _, x in robot_states.items()]
)
return state
def _get_reference(self):
"""
Sets up references to robot joints and objects.
"""
super()._get_reference()
self.l_finger_geom_ids = [
[self.sim.model.geom_name2id(x) for x in self.gripper_left.left_finger_geoms],
[self.sim.model.geom_name2id(x) for x in self.gripper_right.left_finger_geoms]
]
self.r_finger_geom_ids = [
[self.sim.model.geom_name2id(x) for x in self.gripper_left.right_finger_geoms],
[self.sim.model.geom_name2id(x) for x in self.gripper_right.right_finger_geoms]
]
# indices for joints in qpos, qvel
self.robot_joints = list(self.mujoco_robot.joints)
self._ref_joint_pos_indexes = [
self.sim.model.get_joint_qpos_addr(x) for x in self.robot_joints
]
self._ref_joint_vel_indexes = [
self.sim.model.get_joint_qvel_addr(x) for x in self.robot_joints
]
# indices for grippers in qpos, qvel
self.gripper_left_joints = list(self.gripper_left.joints)
self._ref_gripper_left_joint_pos_indexes = [
self.sim.model.get_joint_qpos_addr(x) for x in self.gripper_left_joints
]
self._ref_gripper_left_joint_vel_indexes = [
self.sim.model.get_joint_qvel_addr(x) for x in self.gripper_left_joints
]
self.left_eef_site_id = self.sim.model.site_name2id("l_g_grip_site")
self.gripper_right_joints = list(self.gripper_right.joints)
self._ref_gripper_right_joint_pos_indexes = [
self.sim.model.get_joint_qpos_addr(x) for x in self.gripper_right_joints
]
self._ref_gripper_right_joint_vel_indexes = [
self.sim.model.get_joint_qvel_addr(x) for x in self.gripper_right_joints
]
self.right_eef_site_id = self.sim.model.site_name2id("grip_site")
# indices for joint pos actuation, joint vel actuation, gripper actuation
self._ref_joint_pos_actuator_indexes = [
self.sim.model.actuator_name2id(actuator)
for actuator in self.sim.model.actuator_names
if actuator.startswith("pos")
]
self._ref_joint_vel_actuator_indexes = [
self.sim.model.actuator_name2id(actuator)
for actuator in self.sim.model.actuator_names
if actuator.startswith("vel")
]
self._ref_joint_gripper_left_actuator_indexes = [
self.sim.model.actuator_name2id(actuator)
for actuator in self.sim.model.actuator_names
if actuator.startswith("gripper_l")
]
self._ref_joint_gripper_right_actuator_indexes = [
self.sim.model.actuator_name2id(actuator)
for actuator in self.sim.model.actuator_names
if actuator.startswith("gripper_r")
]
def _compute_reward(self):
"""
Computes reward of the current state.
"""
return super()._compute_reward()
def main():
import argparse
import config.furniture as furniture_config
from util import str2bool
parser = argparse.ArgumentParser()
furniture_config.add_argument(parser)
# change default config for Baxter
parser.add_argument('--seed', type=int, default=123)
parser.add_argument('--debug', type=str2bool, default=False)
parser.set_defaults(render=True)
config, unparsed = parser.parse_known_args()
# create an environment and run manual control of Baxter environment
env = FurnitureBaxterEnv(config)
env.run_manual(config)
if __name__ == "__main__":
main()
|
{"hexsha": "96bc8d52afab50b5358bec17887114d1c22d11d0", "size": 8911, "ext": "py", "lang": "Python", "max_stars_repo_path": "env/furniture_baxter.py", "max_stars_repo_name": "snasiriany/furniture", "max_stars_repo_head_hexsha": "918be936c0bbf954b751a5f7e4d5c14cf0df4442", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-13T21:58:28.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-13T21:58:28.000Z", "max_issues_repo_path": "env/furniture_baxter.py", "max_issues_repo_name": "snasiriany/furniture", "max_issues_repo_head_hexsha": "918be936c0bbf954b751a5f7e4d5c14cf0df4442", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "env/furniture_baxter.py", "max_forks_repo_name": "snasiriany/furniture", "max_forks_repo_head_hexsha": "918be936c0bbf954b751a5f7e4d5c14cf0df4442", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-03T00:01:10.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-03T00:01:10.000Z", "avg_line_length": 35.644, "max_line_length": 111, "alphanum_fraction": 0.6140724947, "include": true, "reason": "import numpy", "num_tokens": 2077}
|
from recourse import ActionSet
import numpy as np
# Test Strategy
# --------------------------------------------------------
# variable types: all, binary, mix
# action_set: all compatible, all conditionally compatible, all immutable, mix
def test_initialization(data):
a = ActionSet(X = data['X'])
b = ActionSet(X = data['X'].values, names = data['X'].columns.tolist())
assert a.name == b.name
def test_y_desired(data):
# initialization checks
a = ActionSet(data['X'], y_desired = 1)
assert a.y_desired == 1
a = ActionSet(data['X'], y_desired = -1)
assert a.y_desired == -1
a = ActionSet(data['X'], y_desired = 0)
assert a.y_desired == -1
# setter checks
a.y_desired = 1.0
assert a.y_desired == 1
a.y_desired = -1.0
assert a.y_desired == -1
a.y_desired = 0.0
assert a.y_desired == -1
def test_align(data, coefficients):
a = ActionSet(X = data['X'])
# no alignment means flip direction and compatability are empty
assert a.alignment_known == False
assert np.isnan(a.flip_direction).all()
assert np.isnan(a.compatible).all()
# aligning sets compatability and flip direction
a.set_alignment(coefficients)
assert a.alignment_known == True
assert not np.isnan(a.flip_direction).any()
assert not np.isnan(a.compatible).any()
# changing y_desired changes the flip direction
fd = np.array(a.flip_direction)
a.y_desired = -a.y_desired
assert np.all(fd == -np.array(a.flip_direction))
# flipping coefficients changes the flip direction
b = ActionSet(X = data['X'])
b.set_alignment(-coefficients)
assert np.all(fd == -np.array(b.flip_direction))
def test_subset_constraints(data):
if len(data['categorical_names']) == 1:
a = ActionSet(data['X'], y_desired = 1)
# add constraint
assert len(a.constraints) == 0
id = a.add_constraint(constraint_type = 'subset_limit', names = data['onehot_names'], lb = 1, ub = 1)
assert len(a.constraints) == 1
# remove constraint
a.remove_constraint(id)
assert len(a.constraints) == 0
# add progressively larger constriants
k = len(data['onehot_names'])
for n in range(k):
a.add_constraint(constraint_type = 'subset_limit', names = data['onehot_names'], lb = 0, ub = n)
assert len(a.constraints) == k
return True
|
{"hexsha": "13d7533ba41258aa15eef70c13312fbae34aeb8a", "size": 2525, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_action_set.py", "max_stars_repo_name": "ustunb/actionable-recourse", "max_stars_repo_head_hexsha": "e851de05ad32c077daf037a231addd271fcb1aac", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 70, "max_stars_repo_stars_event_min_datetime": "2018-07-14T08:01:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T14:53:30.000Z", "max_issues_repo_path": "tests/test_action_set.py", "max_issues_repo_name": "chirag126/actionable-recourse", "max_issues_repo_head_hexsha": "e851de05ad32c077daf037a231addd271fcb1aac", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2019-07-22T20:42:12.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-19T17:09:48.000Z", "max_forks_repo_path": "tests/test_action_set.py", "max_forks_repo_name": "chirag126/actionable-recourse", "max_forks_repo_head_hexsha": "e851de05ad32c077daf037a231addd271fcb1aac", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2018-07-14T08:52:35.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T05:10:44.000Z", "avg_line_length": 28.0555555556, "max_line_length": 110, "alphanum_fraction": 0.603960396, "include": true, "reason": "import numpy", "num_tokens": 631}
|
import numpy as np
from numba import jitclass,typeof ,vectorize ,prange,njit ,jit # import the decorator
from numba import int32, float64 , void # import the types
from collections import MutableMapping
def randKernel(spA ,spB ,seed=10):
np.random.seed(( spA +spB ) *seed)
return np.random.random()
def deltaKernel(spA ,spB):
if spA == spB:
return 1.
else:
return 0.
def Atoms2ChemicalKernelmat(atoms,atoms2=None,chemicalKernel=deltaKernel):
# unique sp in frames 1 and 2
uk1 = []
for frame in atoms:
uk1.extend(frame.get_atomic_numbers())
if atoms2 is not None:
for frame in atoms2:
uk1.extend(frame.get_atomic_numbers())
uk1 = list(set(uk1))
Nsp1 = max(uk1)+1
# 0 row and col are here but dont matter
chemicalKernelmat = np.zeros((Nsp1,Nsp1))
for it in uk1:
for jt in uk1:
chemicalKernelmat[it,jt] = chemicalKernel(it,jt)
return chemicalKernelmat
def get_chemicalKernelmatFrames(frames1 ,frames2=None ,chemicalKernel=deltaKernel):
# unique sp in frames 1 and 2
uk1 = []
for frame in frames1:
uk1.extend(frame.get_atomic_numbers())
uk1 = list(set(uk1))
if frames2 is None:
frames2 = frames1
uk2 = uk1
else:
uk2 = []
for frame in frames2:
uk2.extend(frame.get_atomic_numbers())
uk2 = list(set(uk2))
Nsp1 = max(uk1 ) +1
Nsp2 = max(uk2 ) +1
# 0 row and col are here but dont matter
chemicalKernelmat = np.zeros((Nsp1 ,Nsp2))
for it in uk1:
for jt in uk2:
chemicalKernelmat[it ,jt] = chemicalKernel(it ,jt)
return chemicalKernelmat
############## MEMORY LEAK WITH PARALLEL=TRUE
@jit(float64[:, :](int32[:, :], float64[:, :, :], float64[:, :]), parallel=False, nopython=True, nogil=True, cache=True)
def nb_partial_kernels2kernel(keys, partial_mats, chemicalKernelmat):
K, N, M = partial_mats.shape
kernel = np.zeros((N, M), dtype=np.float64)
for it in range(K):
spA, spB = (keys[it, 0], keys[it, 1]), (keys[it, 2], keys[it, 3])
theta1 = chemicalKernelmat[spA[0], spB[0]] * chemicalKernelmat[spA[1], spB[1]]
theta2 = chemicalKernelmat[spA[1], spB[0]] * chemicalKernelmat[spA[0], spB[1]]
if theta1 == 0. and theta2 == 0.:
continue
# the symmetry of the chemicalKernel and chemical soap vector is a bit messy
if spA[0] != spA[1] and spB[0] != spB[1]:
kernel += theta1 * partial_mats[K, :, :] * 2 + theta2 * partial_mats[K, :, :] * 2
elif (spA[0] == spA[1] and spB[0] != spB[1]) or (spA[0] != spA[1] and spB[0] == spB[1]):
kernel += theta1 * partial_mats[K, :, :] + theta2 * partial_mats[K, :, :]
elif spA[0] == spA[1] and spB[0] == spB[1]:
kernel += theta1 * partial_mats[K, :, :]
return kernel
class PartialKernels(MutableMapping):
def __init__(self, fingerprintsA, fingerprintsB=None, chemicalKernelmat=None, nthreads=4):
self.dtype = 'float64'
self.nthreads = nthreads
try:
import mkl
mkl.set_num_threads(self.nthreads)
except:
raise Warning('NUMPY DOES NOT SEEM TO BE LINKED TO MKL LIBRARY SO NTHREADS IS IGNORED')
self.fingerprintsA = fingerprintsA
self.fingerprints_infoA = self.get_info(fingerprintsA)
pairsA = self.fingerprints_infoA['pairs']
Nframe = len(fingerprintsA)
if fingerprintsB is not None:
self.fingerprintsB = fingerprintsB
self.fingerprints_infoB = self.get_info(fingerprintsB)
pairsB = self.fingerprints_infoB['pairs']
Mframe = len(fingerprintsB)
else:
self.fingerprintsB = None
pairsB = pairsA
Mframe = Nframe
# initialize data container
self._storage = {pA + pB: np.zeros((Nframe, Mframe), dtype=self.dtype)
for pA in pairsA for pB in pairsB}
self.set_partial_kernels()
self.chemicalKernelmat = chemicalKernelmat
self.set_kernel(chemicalKernelmat)
def get_dense_values(self):
values = np.asarray(self.values())
return values
def get_dense_keys(self):
keys = np.asarray(self.keys())
return keys
def get_dense_arrays(self):
return self.get_dense_keys(), self.get_dense_values()
def get_info(self, fingerprints):
ii = 0
ll = []
fings_info = {}
for it, fing1 in enumerate(fingerprints):
ll.extend(fing1['AVG'].keys())
for pA in fing1['AVG'].keys():
ii += 1
fings_info['types'] = np.unique(ll)
fings_info['lin_length'] = ii
fings_info['pairs'] = [(t1, t2) for t1 in fings_info['types']
for t2 in fings_info['types'] if t1 <= t2]
soapParams = fingerprints[0].get_soapParams()
nmax = soapParams['nmax']
lmax = soapParams['lmax']
fings_info['soapLen'] = nmax ** 2 * (lmax + 1)
fings_info['dtype'] = fingerprints[0]['AVG'].dtype
return fings_info
def set_kernel(self, chemicalKernelmat):
if chemicalKernelmat is None:
self.kernel = None
else:
_keys, _partial_mats = self.get_dense_arrays()
self.chemicalKernelmat = chemicalKernelmat
self.kernel = nb_partial_kernels2kernel(_keys, _partial_mats, chemicalKernelmat)
def get_kernel(self):
return self.kernel
def get_linear_array(self, fingerprints, fings_info):
dtype = fings_info['dtype']
lin_length = fings_info['lin_length']
soapLen = fings_info['soapLen']
pairs = fings_info['pairs']
lin_array = np.zeros((lin_length, soapLen), dtype=self.dtype)
pair2ids = {pA: {'frame_ids': [], 'linear_ids': []} for pA in pairs}
jj = 0
for it, fing1 in enumerate(fingerprints):
for pA, pp in fing1['AVG'].iteritems():
lin_array[jj] = np.asarray(pp, dtype=self.dtype)
pair2ids[pA]['frame_ids'].append(it)
pair2ids[pA]['linear_ids'].append(jj)
jj += 1
return lin_array, pair2ids
def set_partial_kernels_from_linear_prod(self, linear_prod, pair2idsA, pair2idsB):
for pA, itemA in pair2idsA.iteritems():
it_idsA, jj_idsA = itemA['frame_ids'], itemA['linear_ids']
for pB, itemB in pair2idsB.iteritems():
it_idsB, jj_idsB = itemB['frame_ids'], itemB['linear_ids']
self._storage[pA + pB][np.ix_(it_idsA, it_idsB)] = linear_prod[np.ix_(jj_idsA, jj_idsB)]
def set_partial_kernels(self):
lin_arrayA, pair2idsA = self.get_linear_array(self.fingerprintsA, self.fingerprints_infoA)
if self.fingerprintsB is None:
lin_arrayB, pair2idsB = lin_arrayA, pair2idsA
else:
lin_arrayB, pair2idsB = self.get_linear_array(self.fingerprintsB, self.fingerprints_infoB)
linear_prod = np.dot(lin_arrayA, lin_arrayB.T)
self.set_partial_kernels_from_linear_prod(linear_prod, pair2idsA, pair2idsB)
def __cmp__(self, dict):
return cmp(self._storage, dict)
def __contains__(self, item):
return item in self._storage
def __iter__(self):
for key in self.keys():
yield key
def __unicode__(self):
return unicode(repr(self._storage))
def __del__(self):
for values in self.__dict__.values():
del values
def __setitem__(self, key, item):
# asarray does not copy if the types are matching
self._storage[key] = np.asarray(item, dtype=self.dtype)
def __getitem__(self, key):
return self._storage[key]
def get(self, key):
return self._storage[key]
def __repr__(self):
return repr(self._storage)
def __len__(self):
return len(self.keys())
def __delitem__(self, key):
del self._storage[key]
def has_key(self, key):
return self._storage.has_key(key)
def pop(self, key, d=None):
return self._storage.pop(key, d)
def update(self, *args, **kwargs):
return self._storage.update(*args, **kwargs)
def keys(self):
return self._storage.keys()
def values(self):
return [self[key] for key in self.keys()]
def items(self):
return [(key, self[key]) for key in self.keys()]
class PartialKernels_slow(MutableMapping):
def __init__(self, fingerprintsA, fingerprintsB=None, chemicalKernelmat=None, nthreads=4):
self.dtype = 'float64'
self.nthreads = nthreads
try:
import mkl
mkl.set_num_threads(self.nthreads)
except:
raise Warning('NUMPY DOES NOT SEEM TO BE LINKED TO MKL LIBRARY SO NTHREADS IS IGNORED')
self.fingerprintsA = fingerprintsA
self.fingerprints_infoA = self.get_info(fingerprintsA)
pairsA = self.fingerprints_infoA['pairs']
Nframe = len(fingerprintsA)
if fingerprintsB is not None:
self.fingerprintsB = fingerprintsB
self.fingerprints_infoB = self.get_info(fingerprintsB)
pairsB = self.fingerprints_infoB['pairs']
Mframe = len(fingerprintsB)
else:
pairsB = pairsA
Mframe = Nframe
self._storage = {pA + pB: np.zeros((Nframe, Mframe), dtype=self.dtype)
for pA in pairsA for pB in pairsB}
self.set_partial_kernels(fingerprintsA, fingerprintsB)
self.set_kernel(chemicalKernelmat)
def __del__(self):
for values in self.__dict__.values():
del values
def __setitem__(self, key, item):
# asarray does not copy if the types are matching
self._storage[key] = np.asarray(item, dtype=self.dtype)
def __getitem__(self, key):
return self._storage[key]
def get(self, key):
return self._storage[key]
def __repr__(self):
return repr(self._storage)
def __len__(self):
return len(self.keys())
def __delitem__(self, key):
del self._storage[key]
def has_key(self, key):
return self._storage.has_key(key)
def pop(self, key, d=None):
return self._storage.pop(key, d)
def update(self, *args, **kwargs):
return self._storage.update(*args, **kwargs)
def keys(self):
return self._storage.keys()
def values(self):
return [self[key] for key in self.keys()]
def items(self):
return [(key, self[key]) for key in self.keys()]
def get_dense_values(self):
values = np.asarray(self.values())
return values
def get_dense_keys(self):
keys = np.asarray(self.keys())
return keys
def get_dense_arrays(self):
return self.get_dense_keys(), self.get_dense_values()
def __cmp__(self, dict):
return cmp(self._storage, dict)
def __contains__(self, item):
return item in self._storage
def __iter__(self):
for key in self.keys():
yield key
def __unicode__(self):
return unicode(repr(self._storage))
def get_info(self, fingerprints):
ii = 0
ll = []
fings_info = {}
for it, fing1 in enumerate(fingerprints):
ll.extend(fing1['AVG'].keys())
for pA in fing1['AVG'].keys():
ii += 1
fings_info['types'] = np.unique(ll)
fings_info['lin_length'] = ii
fings_info['pairs'] = [(t1, t2) for t1 in fings_info['types']
for t2 in fings_info['types'] if t1 <= t2]
soapParams = fingerprints[0].get_soapParams()
nmax = soapParams['nmax']
lmax = soapParams['lmax']
fings_info['soapLen'] = nmax ** 2 * (lmax + 1)
fings_info['dtype'] = fingerprints[0]['AVG'].dtype
return fings_info
def get_kernel(self):
return self.kernel
def set_kernel(self, chemicalKernelmat):
if chemicalKernelmat is None:
self.kernel = None
else:
kk = self.keys()
N, M = self[kk[0]].shape
kernel = np.zeros((N, M), dtype=self.dtype)
for key, mat in self.iteritems():
spA, spB = (key[0], key[1]), (key[2], key[3])
theta1 = chemicalKernelmat[spA[0], spB[0]] * chemicalKernelmat[spA[1], spB[1]]
theta2 = chemicalKernelmat[spA[1], spB[0]] * chemicalKernelmat[spA[0], spB[1]]
if theta1 == 0. and theta2 == 0.:
continue
# the symmetry of the chemicalKernel and chemical soap vector is a bit messy
if spA[0] != spA[1] and spB[0] != spB[1]:
kernel += theta1 * mat * 2 + theta2 * mat * 2
elif (spA[0] == spA[1] and spB[0] != spB[1]) or (spA[0] != spA[1] and spB[0] == spB[1]):
kernel += theta1 * mat + theta2 * mat
elif spA[0] == spA[1] and spB[0] == spB[1]:
kernel += theta1 * mat
self.kernel = kernel
def set_partial_kernels(self, fingerprintsA, fingerprintsB=None):
fings_infoA = self.get_info(fingerprintsA)
if fingerprintsB is None:
fingerprintsB = fingerprintsA
fings_infoB = fings_infoA
else:
fings_infoB = self.get_info(fingerprintsB)
Nframe, Mframe = len(fingerprintsA), len(fingerprintsB)
pairsA = fings_infoA['pairs']
pairsB = fings_infoB['pairs']
partial_kernels = {pA + pB: np.zeros((Nframe, Mframe), dtype=np.float64) for pA in pairsA for pB in pairsB}
for it, fing1 in enumerate(fingerprintsA):
for jt, fing2 in enumerate(fingerprintsB):
for sk1, pp1 in fing1['AVG'].iteritems():
for sk2, pp2 in fing2['AVG'].iteritems():
partial_kernels[sk1 + sk2][it, jt] = np.dot(pp1, pp2)
return partial_kernels
def test_implementation(fingerprintsA, fingerprintsB=None):
partial_kernels = PartialKernels(fingerprintsA, fingerprintsB)
partial_kernels_ref = PartialKernels_slow(fingerprintsA, fingerprintsB)
is_equal = []
not_equal = []
for key in partial_kernels_ref:
eee = np.allclose(partial_kernels_ref[key], partial_kernels[key])
is_equal.append((key, eee))
if not eee:
not_equal.append((key, eee))
if len(not_equal) == 0:
print('partial matrices are identical')
else:
print('partial matrices are not identical in:')
print(not_equal)
|
{"hexsha": "ce526387a645487568896171bfad93e040452e99", "size": 14714, "ext": "py", "lang": "Python", "max_stars_repo_path": "libmatch/chemical_kernel.py", "max_stars_repo_name": "cosmo-epfl/glosim2", "max_stars_repo_head_hexsha": "a1a919cdc6a618fea60bcc3ce43de47d69d5f5f4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2018-03-16T19:57:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-12T23:26:59.000Z", "max_issues_repo_path": "libmatch/chemical_kernel.py", "max_issues_repo_name": "cosmo-epfl/glosim2", "max_issues_repo_head_hexsha": "a1a919cdc6a618fea60bcc3ce43de47d69d5f5f4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2018-03-17T08:35:43.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-20T12:22:59.000Z", "max_forks_repo_path": "libmatch/chemical_kernel.py", "max_forks_repo_name": "cosmo-epfl/glosim2", "max_forks_repo_head_hexsha": "a1a919cdc6a618fea60bcc3ce43de47d69d5f5f4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2018-03-11T22:12:28.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-18T03:26:23.000Z", "avg_line_length": 32.6977777778, "max_line_length": 120, "alphanum_fraction": 0.5986135653, "include": true, "reason": "import numpy,from numba", "num_tokens": 3909}
|
using FileIO, Compat
import Compat.String
import FileIO: LOAD, SAVE, OSX, OS
const fs = open(Pkg.dir("FileIO", "docs", "registry.md"), "w")
function pkg_url(pkgname)
result = readchomp(Pkg.dir("METADATA", string(pkgname), "url"))
g = "git://"
if startswith(result, g)
return string("http://", result[length(g):end])
end
result
end
library2string(x) = "[$(x)]($(pkg_url(x)))"
extension2string(x) = join(map(string, x), ", ")
extension2string(x::AbstractString) = x
os2string(x::Vector) = isempty(x) ? "**all** platforms " : join(map(os2string, x), ", ")
os2string{O <: OS}(os::Type{O}) = "**$(O.name.name)**"
magic2string(x::Function) = "has detection function"
magic2string(x::Tuple) = isempty(x) ? "only extension": string(x)
magic2string(x) = string(x)
function loadsave2string(load_save_libraries)
io = IOBuffer()
loader_str, saver_str = " ", " "
for predicates in load_save_libraries
library = shift!(predicates)
os, loadsave = FileIO.split_predicates(predicates)
if isempty(loadsave)
print(io, "loads and saves on **all** platforms with ", library2string(library), " ")
elseif (LOAD in loadsave)
print(io, "loads with ", library2string(library), " on: ", os2string(os), " ")
elseif (SAVE in loadsave)
print(io, "loads with ", library2string(library), " on: ", os2string(os), " ")
end
end
takebuf_string(io)
end
function add_format{Sym}(::Type{DataFormat{Sym}}, magic, extension, io_libs...)
println(fs, "| $(Sym) | $(extension2string(extension)) | $(loadsave2string(io_libs)) | $(magic2string(magic)) |")
end
function add_format{sym}(fmt::Type{DataFormat{sym}}, magic::@compat(Union{Tuple,AbstractVector,String}), extension)
println(sym)
end
# for multiple magic bytes
function add_format{sym, T <: Vector{UInt8}, N}(fmt::Type{DataFormat{sym}}, magics::NTuple{N, T}, extension)
println(sym)
end
# For when "magic" is supplied as a function (see the HDF5 example in
# registry.jl)
function add_format{sym}(fmt::Type{DataFormat{sym}}, magic, extension)
println(sym)
end
println(fs, """
| Format Name | extensions | IO library | detection or magic number |
| ----------- | ---------- | ---------- | ---------- |""")
include(Pkg.dir("FileIO", "src", "registry.jl"))
close(fs)
|
{"hexsha": "766a810837db23da4cf712ee26d54b84ba742275", "size": 2337, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/make_docs.jl", "max_stars_repo_name": "JuliaPackageMirrors/FileIO.jl", "max_stars_repo_head_hexsha": "d4e34014e508da06e03ea838bc5440d6fc02732f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/make_docs.jl", "max_issues_repo_name": "JuliaPackageMirrors/FileIO.jl", "max_issues_repo_head_hexsha": "d4e34014e508da06e03ea838bc5440d6fc02732f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/make_docs.jl", "max_forks_repo_name": "JuliaPackageMirrors/FileIO.jl", "max_forks_repo_head_hexsha": "d4e34014e508da06e03ea838bc5440d6fc02732f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8695652174, "max_line_length": 117, "alphanum_fraction": 0.6414206247, "num_tokens": 643}
|
# -*- coding: utf-8 -*-
# ---------------------
from typing import *
import pandas as pd
import cv2
import numpy as np
class Joint(object):
"""
a Joint is a keypoint of the human body.
"""
# list of joint names
NAMES = [
'head_top',
'head_center',
'neck',
'right_clavicle',
'right_shoulder',
'right_elbow',
'right_wrist',
'left_clavicle',
'left_shoulder',
'left_elbow',
'left_wrist',
'spine0',
'spine1',
'spine2',
'spine3',
'spine4',
'right_hip',
'right_knee',
'right_ankle',
'left_hip',
'left_knee',
'left_ankle',
]
def __init__(self, joint_row):
"""
:param array: array version of the joint
"""
if isinstance(joint_row,np.ndarray):
array = joint_row
self.frame = int(array[0])
self.person_id = int(array[1])
self.type = int(array[2])
self.x2d = int(array[3])
self.y2d = int(array[4])
self.x3d = array[5]
self.y3d = array[6]
self.z3d = array[7]
self.occ = bool(array[8]) # is this joint occluded?
self.soc = bool(array[9]) # is this joint self-occluded?
self.x_top_left_BB = array[10]
self.y_top_left_BB = array[11]
self.x_bottom_right_BB = array[12]
self.y_bottom_right_BB = array[13]
self.x_2D_person = array[14]
self.y_2D_person = array[15]
self.wears_glasses = array[16]
self.ped_type = array[17]
if isinstance(joint_row, pd.Series):
joint_row = joint_row.astype(int)
self.frame = joint_row["frame_no_cam"]
self.person_id = joint_row["person_id"]
self.type = joint_row["joint_type"]
self.x2d = joint_row["x_2D_joint"]
self.y2d = joint_row["y_2D_joint"]
self.x3d = joint_row["x_3D_joint"]
self.y3d = joint_row["y_3D_joint"]
self.z3d = joint_row["z_3D_joint"]
self.occ = bool(joint_row["joint_occluded"]) # is this joint occluded?
self.soc = bool(joint_row["joint_self_occluded"]) # is this joint self-occluded?
self.x_top_left_BB = joint_row["x_top_left_BB"]
self.y_top_left_BB = joint_row["y_top_left_BB"]
self.x_bottom_right_BB = joint_row["x_bottom_right_BB"]
self.y_bottom_right_BB = joint_row["y_bottom_right_BB"]
self.x_2D_person = joint_row["x_2D_person"]
self.y_2D_person = joint_row["y_2D_person"]
self.wears_glasses = joint_row["wears_glasses"]
self.ped_type = joint_row["ped_type"]
def get_bounding_box_height(self):
return self.y_bottom_right_BB - self.y_top_left_BB
@property
def cam_distance(self):
# type: () -> float
"""
:return: distance of the joint from the camera
"""
# NOTE: camera coords = (0, 0, 0)
return np.sqrt(self.x3d ** 2 + self.y3d ** 2 + self.z3d ** 2)
@property
def is_on_screen(self):
# type: () -> bool
"""
:return: True if the joint is on screen, False otherwise
"""
return (0 <= self.x2d <= 1920) and (0 <= self.y2d <= 1080)
@property
def visible(self):
# type: () -> bool
"""
:return: True if the joint is visible, False otherwise
"""
return not (self.occ or self.soc)
@property
def personPosition(self):
return int(self.x_2D_person),int(self.y_2D_person)
@property
def pos2d(self):
# type: () -> Tuple[int, int]
"""
:return: 2D coordinates of the joints [px]
"""
return (self.x2d, self.y2d)
@property
def pos3d(self):
# type: () -> Tuple[float, float, float]
"""
:return: 3D coordinates of the joints [m]
"""
return (self.x3d, self.y3d, self.z3d)
@property
def color(self):
# type: () -> Tuple[int, int, int]
"""
:return: the color with which to draw the joint;
this color is chosen based on the visibility of the joint:
(1) occluded joint --> RED
(2) self-occluded joint --> ORANGE
(2) visible joint --> GREEN
"""
if self.occ:
return (255, 0, 42) # red
elif self.soc:
return (255, 128, 42) # orange
else:
return (0, 255, 42) # green
@property
def radius(self):
# type: () -> int
"""
:return: appropriate radius [px] for the circle that represents the joint;
this radius is a function of the distance of the joint from the camera
"""
#radius = int(round(100.0*np.power(10, 1 - (self.cam_distance / 20.0))))
bbox_height = self.get_bounding_box_height()
radius = int(round(bbox_height / 70))
return radius if (radius >= 1) else 1
@property
def name(self):
# type: () -> str
"""
:return: name of the joint (eg: 'neck', 'left_elbow', ...)
"""
return Joint.NAMES[self.type]
def draw(self, image):
# type: (np.ndarray) -> np.ndarray
"""
:param image: image on which to draw the joint
:return: image with the joint
"""
image = cv2.circle(
image, thickness=-1,
center=self.pos2d,
radius=self.radius,
color=self.color,
)
return image
def __str__(self):
visibility = 'visible' if self.visible else 'occluded'
return f'{self.name}|2D:({self.x2d},{self.y2d})|3D:({self.x3d},{self.y3d},{self.z3d})|{visibility}'
__repr__ = __str__
|
{"hexsha": "951fd57dc61abe15a62c3db43d53c1fbcda1f12e", "size": 5865, "ext": "py", "lang": "Python", "max_stars_repo_path": "utilities/joint.py", "max_stars_repo_name": "XiaoSanGit/wda_tracker", "max_stars_repo_head_hexsha": "b68ec0edb9daa6cc495815ba9ca549b36eec0369", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2020-06-23T11:17:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T00:38:09.000Z", "max_issues_repo_path": "utilities/joint.py", "max_issues_repo_name": "XiaoSanGit/wda_tracker", "max_issues_repo_head_hexsha": "b68ec0edb9daa6cc495815ba9ca549b36eec0369", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2020-07-07T03:59:02.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T04:28:06.000Z", "max_forks_repo_path": "utilities/joint.py", "max_forks_repo_name": "XiaoSanGit/wda_tracker", "max_forks_repo_head_hexsha": "b68ec0edb9daa6cc495815ba9ca549b36eec0369", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2021-02-14T07:11:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-23T12:47:08.000Z", "avg_line_length": 30.2319587629, "max_line_length": 107, "alphanum_fraction": 0.536743393, "include": true, "reason": "import numpy", "num_tokens": 1558}
|
\section{Menghavan 15: In Goghn\'{i}t hOl\'{e}dhach}
(\textit{Lesson 15: The Spatial System})\\
In the fifteenth lesson, you will learn how the spatial system works in Gal\'{a}thach.
\subsection{Gwepchoprith: Conversation}
\subsubsection{Conversation}
Below is a conversation between several people. One is a woman, Gw\'{e}rudhumna. The others are four men of the Gaulish Coast Guard, Derchun, Bledh\'{\i}nu, Compr\'{\i}nu, Duvnach and Tarthu. Gaulish people are recorded as having been fond of speaking in riddles.
% TODO: Add Fancy bubbly conversation.
\subsubsection{Colav\'{a}ru \textendash\ Tr\'{e}lav\'{a}ru}
(Conversation \textendash\ Translation)
Gw\'{e}rudhumna = wide-dark < Uerodumna
Tarthu = the dry one < Tartos
Bledh\'{\i}nu = wolf-like person < Bledinos
Compr\'{\i}nu = with-wood/tree-person < Comprinnos
Derchun = watcher < Dercunos
Duvnach = deep-like person < Dubnacos
Cunw\'{o}r = seadog < Cunomori
Gw\'{e}rudhumna: Di wath, map\'{a}th\'{e} ins\'{e} \'{\i}th anel! P\'{e} gaman a hesi s\'{u}?
(Gwerdhumna: Goodd day, boys there low below! How are you [pl.]?)
Derchun: D\'{\i} wath, Gw\'{e}rudhumna ins\'{e} ardhu uchel! Esi ni in dh\'{a}i. Ach ti-s\'{u}\'{e}?
(Derchun: Good day, Gw\'{e}rudhumna there high above! We are well. And your-self?)
Gw: N\'{e} h\'{e}thu mi chwer dh\'{a}i diaman. A hesi s\'{u} gw\'{o} in halis-sin?
(Gw: I have never been better. Are you [pl.] under this cliff?)
Bledh\'{\i}nu: Esi ni. A hesi ti gwer in halis?
(Bledh\'{\i}nu: We are. Are you on the cliff?)
Gw: Esi mi.
(Gw: I am.)
Bl: Gwerthamich.
(Bl: Excellent.)
Gw: Gw\'{e}la mi d\'{\i}\'{a}i aner a h\'{a}pis s\'{u}.
(Gw: I want to come down to see you [pl.].)
Compr\'{\i}nu: N\'{e} dh\'{\i}\'{a}i insin aner! Esi in senthu r\'{e} dhruch. G\'{a}la ni gar uch adhith!
(Compr\'{\i}nu: Don't come down here! The track is very bad. We can shout up to you!)
Gw: Math, peth nep o gw\'{e}la s\'{u}.
(Gw: Fine, whatever you [pl.] want.)
Duvnach: Duch, p\'{e} a chw\'{e}la ti, Gw\'{e}rudhumna?
(Duvnach: So, what do you want, Gw\'{e}rudhumna?)
Gw: R\'{e} chwels\'{\i} mi p\'{e}tha adh\'{u} ma hapis\'{u} s\'{u} m\'{o} garan’wir uchedh Tarthu.
(Gw: I would want to ask you [pl.] if you [pl.] have seen my superior boyfriend Tarthu.)
De: \'{a} ... apis\'{u} ni ch\'{e}.
(De: Aah ... we have seen him.)
Gw: P\'{e} gaman a hesi \'{e}?
(Gw: How is he?)
De: Ne hesi \'{e} d\'{a}isam.
(De: He is not [the] best.)
Bl: Esi \'{e} m\'{e}thamich m\'{e}iu.
(Bl: He is a bit average.)
Co: Esi \'{e} m\'{e}s m\'{e}dhoch, in chw\'{\i}r.
(Co: He is quite bad, in truth.)
Du: R\'{e} ghals\'{\i} ni sp\'{a} och esi \'{e} gwer w\'{e}s co w\'{e}s, cotham.
(Du: We could say that he is worse than bad, even.)
De: M\'{e}na ni och esi \'{o} ch’iachas anedh m\'{e}iu, en vithw\'{\i}ras.
(De: We think that his health is a bit inferior, in reality.)
Bl: Bathw\'{\i}or och esi \'{e} co hanamich co rh\'{e} ghals\'{\i} \'{e} bis.
(Bl: It appears that he is as poor as he could be.)
Co: Galvis mesam \'{o} vith, a ghn\'{\i}a ti.
(Co: Maybe [the] worst of his life, you know.)
Gw: N\'{e} ghn\'{\i}a mi neveth! Esi s\'{u} en lhavar cachu adhim. P\'{e}m\'{a}i a hesi \'{e}? Gw\'{e}la mi \'{a}pis ich\'{e} n\'{u} in gov\'{\i}on!
(I don't know anything! You’re speaking shit to me. Where is he? I want to see him now immediately!)
De: Esi \'{e} ins\'{e} pel.
(De: He’s overthere.)
Cunw\'{o}r: \'{u}\'{u}\'{u}\'{u}\'{u}\'{u}\'{u}\'{u}\'{u}\'{u}\'{u}\'{u}\'{u}\'{u}\'{u}\'{u} ....
(Cunw\'{o}r: Oooooooooooooooo ...)
Gw\'{e}rudhumna = wide-dark < Uerodumna
Tarthu = the dry one < Tartos
Bledh\'{\i}nu = wolf-like person < Bledinos
Compr\'{\i}nu = with-wood/tree-person < Comprinnos
Derchun = watcher < Dercunos
Duvnach = deep-like person < Dubnacos
Cunw\'{o}r = seadog < Cunomori
\subsection{Gwepchoprith: The spatial system}
\subsubsection{Physical spatial aspect}
The physical spatial aspect in Gal\'{a}thach is expressed using a set of opposing values:
uch: up / aner: down
uchel: above, over / anel: below, underneath
ardhu: high / \'{\i}th: low
gwer: on / gw\'{o}: under
Examples from the conversation above:
Di wath, map\'{a}th\'{e} ins\'{e} \'{\i}th anel > Good day, boys there low below
D\'{\i} wath, Gw\'{e}rudhumna ins\'{e} ardhu uchel > Good day, Gw\'{e}rudhumna there high above
A hesi s\'{u} gw\'{o} in halis-sin > Are you [pl.] under this cliff
A hesi ti gwer in halis > Are you on the cliff
Gw\'{e}la mi d\'{\i}\'{a}i aner a h\'{a}pis s\'{u} > I want to come down to see you [pl.]
G\'{a}la ni gar uch adhith > We can shout up to you
\subsubsection{Metaphorical adaptation of spatial values}
a) The spatial values given above are adapted to carry metaphorical meaning of quality:
uchedh: superior, better (< uch “up”)
anedh: inferior, worse (< ane- “down”)
Examples from the conversation above:
m\'{o} garan’wir uchedh > my superior boyfriend
esi \'{o} ch’iachas anedh m\'{e}iu > his health is a bit inferior
b) Spatial notions are also used to construct a value system using the concept of “tam”, meaning “quality/class”. This combined with an- (< ane-), “low quality”; m\'{e}- (< m\'{e}dh-, “middle”) “middle quality”; and gwer- “on”, i.e. “on top quality”.
anamich: worst, bad, poor (quality)
m\'{e}thamich: mediocre, ordinary, average (quality)
gwerthamich: best, good, excellent (quality)
Examples from the conversation above:
Gwerthamich < Excellent
Esi \'{e} m\'{e}thamich m\'{e}iu > He is a bit average
esi \'{e} co hanamich co rh\'{e} ghals\'{\i} \'{e} bis > he is as poor as he could be
\subsubsection{Comparitive value systems}
The metaphorical use of the spatial values is combined with regular words for quality (good, bad etc.) to construct two parallel systems of value judgement.
a) One system is regular. It uses the words good/bad in conjunction with the spatial value “gwer”, “on”, to construct the comparitive level and the suffix –am to construct the superlative level.
d\'{a}i – gwer dh\'{a}i– d\'{a}isam > good - better - best
mes – gwer wes – mesam > bad - worse - worst
Examples from the conversation above:
Esi ni in dh\'{a}i > we are well (used with adverbial particle “in”)
N\'{e} h\'{e}thu mi chwer dh\'{a}i diaman > I have never been better (fem.)
Ne hesi \'{e} d\'{a}isam > he is not [the] best
esi \'{e} gwer w\'{e}s co w\'{e}s > he is worse than bad
Galvis mesam \'{o} vith > maybe [the] worst of his life
b) One system is irregular. It uses alternative words for good/bad in conjunction with spatial value terms to formulate the comparitive and superlative forms.
math – uchedh - gwerthamich: fine superior excellent
druch – anedh - anamich: bad inferior poor
Examples from the conversation above:
D\'{\i} wath > good day
m\'{o} garan’wir uchedh > my superior boyfriend
Gwerthamich > excellent
Esi in senthu r\'{e} dhruch > the track is very bad
esi \'{o} ch’iachas anedh m\'{e}iu > his health is a bit inferior
esi \'{e} co hanamich co rh\'{e} ghals\'{\i} \'{e} bis > he is as poor as he could be
\subsubsection{Conversational words}
The conversation above gives some words that can be used in a conversational way to modify or temper statements.
m\'{e}dhoch: quite
cotham: even
en vithw\'{\i}ras: in reality
in gov\'{\i}on: immediately
in chw\'{\i}r: in truth
Examples from the conversation above:
Esi \'{e} m\'{e}s m\'{e}dhoch, in chw\'{\i}r > he is quite bad, in truth
R\'{e} ghals\'{\i} ni sp\'{a} och esi \'{e} gwer w\'{e}s co w\'{e}s, cotham > We could say that he is worse than bad, even
esi \'{o} ch’iachas anedh m\'{e}iu, en vithw\'{\i}ras > his health is a bit inferior, in reality
Gw\'{e}la mi \'{a}pis ich\'{e} n\'{u} in gov\'{\i}on > I want to see him now immediately
\subsection{Excercises}
\subsubsection{Vocabulary}
to climb: dres
tree: pren
to go: \'{a}i
cave: balu
waterfall: uch\'{o}n
rocks: carch\'{e}
source: an\'{o}n
cliff: alis
mountain: br\'{\i}
swamp: latha
crane: garan
river: \'{a}von
to run: r\'{\i}thi
ground (soil): ughr
beer: curu
wine: gw\'{\i}n
apple: aval
bread: barghu
meat: cich
axe: gwidhuv
sword: cl\'{a}dh
shovel: scoth\'{\i}r
story: sp\'{a}thl
dance: sulingen
music: canthl
lie: c\'{o}ias
performance: gwothan
health: iachas
horse: \'{e}p
\subsubsection{Translate}
Translate the following phrases using the vocabulary given.
I climb up in a tree:
You go down into a cave:
There is a waterfall above the rocks:
There is a spring below the cliff:
The mountain is high:
The swamp is low:
The crane sits on the bull:
The river runs under the ground:
This beer is superior:
This wine is inferior:
This apple is good:
This bread is better:
This meat is [the] best:
This axe is bad:
This sword is worse:
This shovel is [the] worst:
That story is fine:
That dance is superior:
That music is excellent:
That lie is bad:
That performance is inferior:
The condition of that horse is poor:
\newpage
\subsubsection{Solution}
I climb up in a tree: dr\'{e}sa mi uch en bren
You go down into a cave: \'{a}ia ti aner en valu
There is a waterfall above the rocks: esi uch\'{o}n uchel in garch\'{e}
There is a source below the cliff: esi an\'{o}n anel in halis
The mountain is high: esi in vr\'{\i} hardhu
The swamp is low: esi in lhatha h\'{\i}th
The crane sits on the bull: s\'{e}dha in garan gwer in t\'{a}ru
The river runs under the ground: r\'{\i}tha in \'{a}von gw\'{o} in ughr
This beer is superior: esi in curu-sin uchedh
This wine is inferior: esi in chw\'{\i}n-sin hanedh
This apple is good: esi in haval-sin dh\'{a}i
This bread is better: esi in barghu-sin gwer dh\'{a}i
This meat is [the] best: esi in gich-sin dh\'{a}isam
This axe is bad: esi in gwidhuv-sin m\'{e}s
This sword is worse: esi in gl\'{a}dh-sin gwer w\'{e}s
This shovel is [the] worst: esi in ‘coth\'{\i}r-sin wesam
That story is fine: esi in ‘p\'{a}thl-s\'{e} wath
That dance is superior: esi in sulingen-s\'{e} uchedh
That music is excellent: esi in ganthl-s\'{e} chwerthamich
That lie is bad: esi in g\'{o}ias-s\'{e} dhruch
That performance is inferior: esi in chwothan-s\'{e} hanedh
The health of that horse is poor: esi iachas in \'{e}p-s\'{e} hanamich
|
{"hexsha": "620679d30b83fd80b8f791db727a6ebc4f440ff7", "size": 10233, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "modern-gaulish-lessons/lesson15.tex", "max_stars_repo_name": "rockwolf/Gal-thach-hAtev-u", "max_stars_repo_head_hexsha": "4a71ceb307509bcbbbddc9d0dff2fc1f900ebeb2", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "modern-gaulish-lessons/lesson15.tex", "max_issues_repo_name": "rockwolf/Gal-thach-hAtev-u", "max_issues_repo_head_hexsha": "4a71ceb307509bcbbbddc9d0dff2fc1f900ebeb2", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modern-gaulish-lessons/lesson15.tex", "max_forks_repo_name": "rockwolf/Gal-thach-hAtev-u", "max_forks_repo_head_hexsha": "4a71ceb307509bcbbbddc9d0dff2fc1f900ebeb2", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9249146758, "max_line_length": 263, "alphanum_fraction": 0.6586533763, "num_tokens": 3776}
|
from featureExtract.feature import calFeature
from classifier.model import MusicClassifier
from audioIO import record, load
import wave
import numpy as np
import matplotlib.pyplot as plt
# record the music
# frames, ex_samWid = record("./data/demo_chunks/exp.wav", time = 10)
# wav, f = load("./data/demo_chunks/exp.wav", sr = 22050)
# calculate params
feats, names = calFeature('./data/demo_chunks/dubstep.wav')
# load model
model = MusicClassifier("./data/model/dnn_3.h5")
model.getDataInfo("./data/data_set/beatsdataset.csv")
# predict
output = model.predict(feats)
print(output)
|
{"hexsha": "ba13723f9d8adc249ab0720b68025aa959998582", "size": 586, "ext": "py", "lang": "Python", "max_stars_repo_path": "musicAI.py", "max_stars_repo_name": "Anne-Fern/shazam-air", "max_stars_repo_head_hexsha": "e51f9a11b896410599e9574417509646b962f86e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "musicAI.py", "max_issues_repo_name": "Anne-Fern/shazam-air", "max_issues_repo_head_hexsha": "e51f9a11b896410599e9574417509646b962f86e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "musicAI.py", "max_forks_repo_name": "Anne-Fern/shazam-air", "max_forks_repo_head_hexsha": "e51f9a11b896410599e9574417509646b962f86e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.9047619048, "max_line_length": 69, "alphanum_fraction": 0.7627986348, "include": true, "reason": "import numpy", "num_tokens": 146}
|
import os
import numpy as np
import astropy.units as u
from astropy.io import fits
from astropy.convolution import convolve
import mskpy
class config:
filt = 'F430M'
subframe = 'FULL'
readpat = 'SHALLOW2'
exptime_request = 300 * u.s
mu = 5. * u.mas / u.s
pa = 10 * u.deg
impact = 0.1 * u.arcsec
# simulate JWST stellar appulse of an asteroid
nircam_readouts = {
# nsamples, nframes
'RAPID': (1, 1),
'BRIGHT1': (2, 1),
'BRIGHT2': (2, 2),
'SHALLOW2': (5, 2),
'SHALLOW4': (5, 4),
'MEDIUM2': (10, 2),
'MEDIUM8': (10, 8),
'DEEP2': (20, 2),
'DEEP8': (20, 8),
}
nircam_tframes = {
'FULL': 10.73676 * u.s,
}
def dist_to_line(yx1, yx0, pa):
QP = np.array(yx1) - np.array(yx0)
# parallel:
n = np.r_[np.cos(pa).value, -np.sin(pa).value]
para = np.dot(QP, n) # / np.sqrt(np.dot(n, n))
# perpendicular:
n = np.r_[np.cos(pa + 90 * u.deg).value, -np.sin(pa + 90 * u.deg).value]
perp = np.abs(np.dot(QP, n)) # / np.sqrt(np.dot(n, n))
return para, perp
def trail(t, exptime, shape, cyx, b, ps, mu, pa):
"""
t : time offset, the line intercepts cyx + (0, b) at t = 0
exptime : exposure time
shape : shape of image array
cyx : the trail intercept point
b : impact parameter
ps : pixel scale of image array
mu : proper motion magnitude of target
pa : proper motion position angle of target, E of N
"""
import scipy.ndimage as nd
K = np.zeros(shape)
r = (mu / ps).decompose().value
n = r * exptime.to('s').value # number of pixels to trail
offset = r * t.to('s').value
yx0 = (cyx[0], cyx[1] + (b / ps).decompose().value)
yx = np.rollaxis(np.rollaxis(np.indices(shape), 2), 2)
para, perp = dist_to_line(yx, yx0, pa)
i = perp <= 1
K[i] = 1 - perp[i]
i = (para < offset) + (para > offset + n)
K[i] = 0
# odd dimensions:
K = K[:(K.shape[0] % 2 - 1), :(K.shape[1] % 2 - 1)]
K = K / K.sum() * exptime.to('s').value
return K[::-1, ::-1] # flip for convolution
def group_read(ramp, readpat, noise=False):
t0 = np.arange(len(ramp)) + 1
t1 = []
groups = []
if noise:
noise = np.random.poisson(ramp, ramp.shape)
else:
noise = 0
readout = ramp + noise
# first frame is always saved
t1.append(t0[0])
groups.append(readout[0])
for i in range(0, len(ramp), readpat[0]):
t1.append(t0[i:i+readpat[1]].mean())
groups.append(readout[i:i+readpat[1]].mean(0))
return np.array(t1), np.array(groups)
def calc_psf(filt, source=None):
# NIRCam default source is 5700 K star
import webbpsf
webbpsf.setup_logging()
nc = webbpsf.NIRCam()
nc.filter = filt
psf = nc.calc_psf(source=source, nlambda=5, fov_arcsec=2)
return psf
fn = 'star-{}.fits'.format(config.filt)
if not os.path.exists(fn):
star = calc_psf(config.filt)
star.writeto(fn, overwrite=True)
else:
star = fits.open(fn)
fn = 'ast-{}.fits'.format(config.filt)
if not os.path.exists(fn):
import pysynphot as S
sp = S.BlackBody(170)
ast = calc_psf(config.filt, source=sp)
ast.writeto(fn, overwrite=True)
else:
ast = fits.open(fn)
tframe = nircam_tframes[config.subframe]
readpat = nircam_readouts[config.readpat]
ngroups = int(np.floor(
(config.exptime_request / tframe + readpat[0] - readpat[1]) / readpat[0]))
nframes = ngroups * readpat[0] - (readpat[0] - readpat[1])
exptime = nframes * tframe
shape = ast[0].data.shape
cyx = mskpy.gcentroid(ast[0].data, np.array(shape) / 2, box=5)
ps = ast[0].header['PIXELSCL'] * u.arcsec / u.pix
ast[0].header['CY'] = cyx[0], 'centroid'
ast[0].header['CX'] = cyx[1], 'centroid'
stack = []
for i in range(nframes):
t = (i - nframes / 2) * tframe
K = trail(t, tframe, shape, cyx, config.impact, ps, config.mu, config.pa)
star_trail = convolve(star[0].data, K)
im = star_trail + ast[0].data * tframe.to('s').value
stack.append(im)
ramp = np.cumsum(stack, 0)
t0 = np.array(len(ramp))
t1, groups = group_read(ramp, readpat)
ast[0].data = groups
ast[1].data = np.array([mskpy.rebin(g, -4) for g in groups])
ast.append(fits.ImageHDU(ramp, name='FRAMES'))
ast.append(fits.ImageHDU(t1, name='GRPTIME'))
for i in range(2):
ast[i].header.add_history('Asteroid with star trail')
ast[i].header['SUBFRAME'] = config.subframe
ast[i].header['READPAT'] = config.readpat
ast[i].header['NGROUPS'] = ngroups
ast[i].header['NFRAMES'] = nframes
ast[i].header['EXPTIME'] = (tframe * nframes).value, tframe.unit
ast[i].header['MU'] = config.mu.value, config.mu.unit
ast[i].header['PA'] = config.pa.value, config.pa.unit
ast[i].header['IMPACT'] = config.impact.value, config.impact.unit
ast.writeto('appulse-1.fits', overwrite=True)
|
{"hexsha": "d6dfae15c2b1848bcb2e68c2ed95ca5e80dc4960", "size": 4808, "ext": "py", "lang": "Python", "max_stars_repo_path": "sketch/appulse-1.py", "max_stars_repo_name": "mkelley/jwst-group-editor", "max_stars_repo_head_hexsha": "aa6ba8c6b0d7383ac0055ec10d60f6725ff2b38d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sketch/appulse-1.py", "max_issues_repo_name": "mkelley/jwst-group-editor", "max_issues_repo_head_hexsha": "aa6ba8c6b0d7383ac0055ec10d60f6725ff2b38d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sketch/appulse-1.py", "max_forks_repo_name": "mkelley/jwst-group-editor", "max_forks_repo_head_hexsha": "aa6ba8c6b0d7383ac0055ec10d60f6725ff2b38d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.2823529412, "max_line_length": 78, "alphanum_fraction": 0.6094009983, "include": true, "reason": "import numpy,import scipy,import astropy,from astropy", "num_tokens": 1648}
|
import numpy as np
import skimage as ski
import os
from matplotlib import pyplot as plt
from skimage.feature import blob_dog, blob_log, blob_doh
from skimage.color import rgb2gray
from math import sqrt
log_defaults = {
'min_s': 1,
'max_s': 30,
'num_s': 10,
'thresh':0.1,
'overlap': 0.5,
'log_scale': False,
'exclude_border': False
}
def run_log(image, plot_im = False, verbose = False, log_params = log_defaults):
if verbose == True:
print (log_params)
# Find blobs with Laplacian of Gaussian
blobs_log = blob_log(
image,
min_sigma = log_params['min_s'],
max_sigma = log_params['max_s'],
num_sigma = log_params['num_s'],
threshold = log_params['thresh'],
overlap = log_params['overlap'],
log_scale = log_params['log_scale'],
exclude_border = log_params['exclude_border']
)
if len(blobs_log) == 0:
print('No Blobs')
# Compute radii in the 3rd column.
blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2)
if plot_im == True:
# Generate figure to check accuracy
fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(20, 10), sharex=True, sharey=True)
ax0.imshow(image)
ax1.imshow(image)
for blob in blobs_log:
y, x, r = blob
c = plt.Circle((x, y), r, color='r', linewidth=2, fill=False)
ax1.add_patch(c)
plt.tight_layout()
plt.show()
return fig, blobs_log
# Return fig and blobs_log for counting blobs
return blobs_log
class cell_counts:
def __init__(self, name, image, blobs, pixels_per_micron, log_params):
self.id = os.path.basename(name)[0:5]
self.name = name
self.image = image
self.blobs = blobs[blobs[:,2] > 2] # restriction on minimum blob size
self.pixels_per_micron = pixels_per_micron
self.log_params = log_params
@ property
def num_cells(self):
return len(self.blobs)
@ property
def im_area(self):
microns_per_pixel = 1/self.pixels_per_micron
im_area = self.image.shape[0] * self.image.shape[1] * microns_per_pixel**2
return im_area
@ property
def slice_area(self):
"""
CMH 20191217
Adding the below to extract only pixels above value
This is to extract area of the actual slice rather than the
area of the image, will save a lot of time cropping images
Sum across RGB pixel values to get one value for boolean
Update: Passing only green channel so not necessary
#sim = np.sum(self.image, axis = 2)
Calculate number of pixels with value > 1
Note: 1 is chosen as occasionally black pixels are [0,1,0]
as well as [0,0,0]
#
Return slice area = num true pixels * mpp^2
"""
bim = self.image[self.image>1]
microns_per_pixel = 1/self.pixels_per_micron
slice_area = bim.size * microns_per_pixel**2
return slice_area
@ property
def cells_per_um2(self):
um2 = self.slice_area
cells_per_um2 = self.num_cells/um2
return cells_per_um2
@ property
def cells_per_mm2(self):
return self.cells_per_um2 * 1e6
@ property
def percent_slice(self):
return 100 * self.slice_area/self.im_area
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'image': self.image,
'blobs': self.blobs,
'pixels_per_micron': self.pixels_per_micron,
'num_cells': self.num_cells,
'im_area': self.im_area,
'slice_area': self.slice_area,
'cells_per_um2': self.cells_per_um2,
'cells_per_mm2': self.cells_per_mm2,
'percent_slice': self.percent_slice,
'LOG_params': self.log_params
}
def overlay(self, return_fig = False):
fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(20, 10), sharex=True, sharey=True)
ax0.imshow(self.image)
ax1.imshow(self.image)
for blob in self.blobs:
y, x, r = blob
c = plt.Circle((x, y), r, color='r', linewidth=2, fill=False)
ax1.add_patch(c)
plt.tight_layout()
plt.show()
if return_fig == True:
return fig
else:
return
def collect_cell_counts(
image_directory,
log_params = log_defaults,
testi = 0,
verbose = False,
pixels_per_micron = 1.5
):
images = ski.io.ImageCollection(os.path.join(image_directory, '*.tif'))
# For testing, allow the check of first set of images up to i = testi
if testi > 0:
images = images[0:testi]
# Verbose
if verbose == True:
print ('LOG parameters are:')
print (log_params)
print()
print ('The first 5 files are:')
print (images.files[0:5])
print ('...')
print ('The last 5 files are:')
print (images.files[-5:])
print()
# Run
counted = []
for i, image in enumerate(images):
if verbose == True:
print('i is:', i)
print("Current file is:")
print(images.files[i])
print()
"""
Commenting out for training
if verbose == False:
if i%10 == 0:
print('Current index:', i)
"""
greyscale_im = rgb2gray(image)
image8 = ski.img_as_ubyte(greyscale_im)
blobs_log = run_log(image8, plot_im = False, log_params = log_params)
clob = cell_counts(
name = images.files[i],
image = image8,
blobs = blobs_log,
pixels_per_micron= pixels_per_micron,
log_params = log_params
)
counted.append(clob)
return counted
def clob_to_dict(clob):
return {
'id': clob.id,
'name': os.path.basename(clob.name)[:-4],
#'image': clob.image,
#'blobs': clob.blobs,
#'pixels_per_micron': clob.pixels_per_micron,
'num_cells': clob.num_cells,
#'im_area': clob.im_area,
'slice_area': clob.slice_area,
'cells_per_um2': clob.cells_per_um2,
'cells_per_mm2': clob.cells_per_mm2,
'percent_slice': clob.percent_slice
}
def extract_panda(clob_list):
dictlist = []
for i in range(len(clob_list)):
dictlist += [clob_to_dict(clob_list[i])]
DF = pd.DataFrame(dictlist)
return DF
|
{"hexsha": "6c7daf01ca7fe23b8e1583266fff39b1ce9bf4a8", "size": 6756, "ext": "py", "lang": "Python", "max_stars_repo_path": "cc/count/countcells.py", "max_stars_repo_name": "ixianid/cell_counting", "max_stars_repo_head_hexsha": "d0af45f8e516f57a80702e956af41fdd225cef67", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-10-13T04:34:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-13T04:34:34.000Z", "max_issues_repo_path": "cc/count/countcells.py", "max_issues_repo_name": "ixianid/cell_counting", "max_issues_repo_head_hexsha": "d0af45f8e516f57a80702e956af41fdd225cef67", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-03-24T18:10:59.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-08T01:34:35.000Z", "max_forks_repo_path": "cc/count/countcells.py", "max_forks_repo_name": "CamHolman/cell_counting", "max_forks_repo_head_hexsha": "d0af45f8e516f57a80702e956af41fdd225cef67", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2959641256, "max_line_length": 88, "alphanum_fraction": 0.5621669627, "include": true, "reason": "import numpy", "num_tokens": 1690}
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.5.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="WzGezE-jLT-Q"
# # Regressione
# + colab={} colab_type="code" id="HEOGBYJ_LT-X"
from IPython.display import Image
import warnings
warnings.filterwarnings('ignore')
# %matplotlib inline
# + colab={} colab_type="code" id="OYZrd4k1LT-n"
import numpy as np
import pandas as pd
import scipy.stats as st
from sklearn.linear_model import LinearRegression, Lasso, Ridge, ElasticNet
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split, cross_validate, cross_val_score, GridSearchCV, KFold, LeaveOneOut
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
from sklearn.feature_selection import mutual_info_regression
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC, RidgeCV
import seaborn as sns
import copy
# + colab={} colab_type="code" id="K_KPvIWPLgGH"
import urllib.request
filepath = "../dataset/"
url = "https://tvml.github.io/ml1920/dataset/"
def get_file(filename,local):
if local:
return filepath+filename
else:
urllib.request.urlretrieve (url+filename, filename)
return filename
# + colab={} colab_type="code" id="e81w6WCDLT-1"
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from matplotlib import cm
plt.style.use('ggplot')
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.serif'] = 'Ubuntu'
plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['font.size'] = 10
plt.rcParams['axes.labelsize'] = 10
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['axes.titlesize'] = 10
plt.rcParams['xtick.labelsize'] = 8
plt.rcParams['ytick.labelsize'] = 8
plt.rcParams['legend.fontsize'] = 10
plt.rcParams['figure.titlesize'] = 12
plt.rcParams['image.cmap'] = 'jet'
plt.rcParams['image.interpolation'] = 'none'
plt.rcParams['figure.figsize'] = (16, 8)
plt.rcParams['lines.linewidth'] = 2
plt.rcParams['lines.markersize'] = 8
colors = ['xkcd:pale orange', 'xkcd:sea blue', 'xkcd:pale red', 'xkcd:sage green', 'xkcd:terra cotta', 'xkcd:dull purple', 'xkcd:teal', 'xkcd:goldenrod', 'xkcd:cadet blue',
'xkcd:scarlet']
cmap_big = cm.get_cmap('Spectral', 512)
cmap = mcolors.ListedColormap(cmap_big(np.linspace(0.7, 0.95, 256)))
bbox_props = dict(boxstyle="round,pad=0.3", fc=colors[0], alpha=.5)
# + [markdown] colab_type="text" id="A1W_8vkLLT_A"
# # Esame del dataset Housing
# + [markdown] colab_type="text" id="Sx_cuu0CLT_D"
# Features:
#
# <pre>
# 1. CRIM per capita crime rate by town
# 2. ZN proportion of residential land zoned for lots over 25,000 sq.ft.
# 3. INDUS proportion of non-retail business acres per town
# 4. CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
# 5. NOX nitric oxides concentration (parts per 10 million)
# 6. RM average number of rooms per dwelling
# 7. AGE proportion of owner-occupied units built prior to 1940
# 8. DIS weighted distances to five Boston employment centres
# 9. RAD index of accessibility to radial highways
# 10. TAX full-value property-tax rate per $10,000
# 11. PTRATIO pupil-teacher ratio by town
# 12. B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
# 13. LSTAT % lower status of the population
# 14. MEDV Median value of owner-occupied homes in $1000s
# </pre>
# + [markdown] colab_type="text" id="yi836kY3LT_F"
# Lettura del dataset in dataframe pandas
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="zsM2ILnPLT_K" outputId="e862d1f4-3ec6-4ba1-cd6a-348300107059"
df = pd.read_csv(get_file('housing.data.txt',local=1), header=None, sep='\s+')
df.columns = ['CRIM','ZN','INDUS','CHAS','NOX','RM','AGE','DIS','RAD','TAX','PTRATIO','B','LSTAT','MEDV']
df.shape
# + [markdown] colab_type="text" id="IHWGWS1WLT_S"
# ## Visualizzazione delle caratteristiche del dataset
# + [markdown] colab_type="text" id="S_hYwNBHLT_U"
# Matrice delle distribuzioni mutue delle feature. Sulla diagonale, distribuzione delle singole feature
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="BXuF5r3aLT_X" outputId="13034d4b-2ed1-445b-bc9b-50dfc19c4fa7"
cols = ['LSTAT', 'RM', 'INDUS', 'AGE', 'MEDV']
fig = plt.figure(figsize=(16, 8))
sns.pairplot(df[cols], height=4, diag_kind='kde',
plot_kws=dict(color=colors[8]),
diag_kws=dict(shade=True, alpha=.7, color=colors[0]))
plt.show()
# + [markdown] colab_type="text" id="ubZe7F0GLT_e"
# Visualizzazione della matrice di correlazione. Alla posizione $(i,j)$ il coefficiente di correlazione (lineare) tra le feature $i$ e $j$. Valore in $[-1,1]$: $1$ correlazione perfetta, $-1$ correlazione inversa perfetta, $0$ assenza di correlazione
# + colab={"base_uri": "https://localhost:8080/", "height": 513} colab_type="code" id="7oBXiAU_LT_g" outputId="f609eba8-8bd8-4716-c516-049e4bf8745c"
cm = np.corrcoef(df[cols].values.T)
plt.figure(figsize=(14,7))
hm = sns.heatmap(cm,
cbar=True,
annot=True,
square=True,
fmt='.2f',
annot_kws={'size': 10},
yticklabels=cols,
xticklabels=cols,
cmap = cmap)
plt.tight_layout()
plt.show()
# + [markdown] colab_type="text" id="XrFgg_2ALT_s"
# ### Regressione di MEDV rispetto a una sola feature
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="tqqNLcLeLT_t" outputId="a36f40c0-bed0-40e5-8151-0c3caf49e271"
print("Feature utilizzabili: {0}".format(', '.join(map(str, df.columns[:-1]))))
# + colab={"base_uri": "https://localhost:8080/", "height": 452} colab_type="code" id="FinaEtbQLT_y" outputId="71eba5c5-5d5d-45b5-fd41-7a869124c834"
mi = mutual_info_regression(df[df.columns[:-1]], df[df.columns[-1]])
dmi = pd.DataFrame(mi, index=df.columns[:-1], columns=['mi']).sort_values(by='mi', ascending=False)
dmi.head(20)
# + [markdown] colab_type="text" id="xH3Vn74sLT_3"
# Utilizza la feature più significativa
# + colab={} colab_type="code" id="Y5j9EGzjLT_7"
feat = dmi.index[0]
# + colab={"base_uri": "https://localhost:8080/", "height": 419} colab_type="code" id="-YxDJENWMRZQ" outputId="6560daea-48f3-4938-a187-1bbca93ada5e"
df[[feat,'MEDV']]
# + colab={} colab_type="code" id="_T4GidP4LT_-"
X = df[[feat]].values
y = df['MEDV'].values
# + colab={"base_uri": "https://localhost:8080/", "height": 799} colab_type="code" id="F5a2R3YPLUAC" outputId="c5851d4b-e841-4283-884e-5ce1d49df6ad"
y
# + colab={} colab_type="code" id="en1TYfxoLUAI"
results = []
# + [markdown] colab_type="text" id="D1vfvAeELUAN"
# Regressione lineare standard: la funzione di costo è $$C(\mathbf{w})=\frac{1}{2}\sum_i (y(\mathbf{w},\mathbf{x}_i) - t_i)^2$$
# + colab={} colab_type="code" id="tnEU119hLUAO"
# crea modello di regressione lineare
r = LinearRegression()
# ne apprende i coefficienti sui dati disponibili
r = r.fit(X, y)
# + [markdown] colab_type="text" id="usLU4_fJLUAU"
# Misure di qualità utilizzate:
# - MSE (Errore quadratico medio) definito come $$\frac{1}{n}\sum_{i=1}^n (y(\mathbf{w},\mathbf{x}_i) - t_i)^2$$
#
# - $r^2$ (Coefficiente di determinazione) definito come frazione di varianza dei valori target spiegata dalla regressione $$\frac{\sum_{i=1}^n (y(\mathbf{w},\mathbf{x}_i) - \overline{t})^2}{\sum_{i=1}^n (t_i - \overline{t})^2}=1-\frac{\sum_{i=1}^n (y(\mathbf{w},\mathbf{x}_i) - t_i)^2}{\sum_{i=1}^n (t_i - \overline{t})^2}$$
#
# dove $$\overline{t}=\frac{1}{n}\sum_{i=1}^nt_i$$ è il valor medio del target
# + colab={} colab_type="code" id="WTlCxUcELUAV"
p = r.predict(X)
# valuta MSE su dati e previsioni
mse = mean_squared_error(p,y)
r2 = r2_score(p,y)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="6sBzDNYyLUAa" outputId="7f0aeb77-32b3-41d4-96af-808448d65603"
print('w0: {0:.3f}, w1: {1:.3f}, MSE: {2:.3f}, r2={3:5.2f}'.format(r.intercept_, r.coef_[0],mse, r2))
# + colab={"base_uri": "https://localhost:8080/", "height": 542} colab_type="code" id="EkPAp_wXLUAn" outputId="9acfd5f3-70d0-4658-a501-92174dea0890"
x = np.linspace(min(X),max(X),100).reshape(-1,1)
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
plt.scatter(X, y, c=colors[8], edgecolor="xkcd:light grey")
plt.plot(x, r.predict(x), color=colors[2])
plt.xlabel(feat)
plt.ylabel('MEDV')
plt.title('Regressione su una feature', fontsize=16)
plt.text(0.85, 0.9, 'MSE: {0:.3f}'.format(mse), fontsize=12, transform=ax.transAxes, bbox=bbox_props)
plt.text(0.85, 0.85, 'r2: {0:.3f}'.format(r2), fontsize=12, transform=ax.transAxes, bbox=bbox_props)
plt.show()
# + [markdown] colab_type="text" id="RvAjhuG4LUBI"
# Valuta il modello su test set al fine di evitare overfitting
# + colab={} colab_type="code" id="FuBJSthcLUBJ"
# partiziona dataset in training (80%) e test set (20%)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)
# + [markdown] colab_type="text" id="KC4SwMQALUBM"
# Crea una pipeline con il solo modello di regressione
# + colab={} colab_type="code" id="yUKCri3vLUBN"
pipe = Pipeline([('regression', LinearRegression())])
pipe = pipe.fit(X_train, y_train)
p_train = pipe.predict(X_train)
p_test = pipe.predict(X_test)
mse_train = mean_squared_error(p_train,y_train)
mse_test = mean_squared_error(p_test,y_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="rcuh3OXlLUBQ" outputId="ec499eb5-0de6-4fba-f60d-0d1395efc00d"
r = pipe.named_steps['regression']
print('w0: {0:.3f}, w1: {1:.3f}, MSE-train: {2:.3f}, MSE-test: {3:.3f}'.format(r.intercept_, r.coef_[0],mse_train, mse_test))
# + colab={} colab_type="code" id="KYnfa4nILUBY"
results.append(['Regression, 1 feature', mse_train, mse_test])
# + colab={"base_uri": "https://localhost:8080/", "height": 542} colab_type="code" id="8udR0PE0LUBb" outputId="e97141e9-bfff-4cbe-a5cc-796409c1d3da"
x = np.linspace(min(X),max(X),100).reshape(-1,1)
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
plt.scatter(X_train, y_train, c=colors[8], edgecolor="xkcd:light grey", label='Train')
plt.scatter(X_test, y_test, c=colors[0], edgecolor='black', label='Test')
plt.plot(x, pipe.predict(x), color=colors[2])
plt.xlabel(feat)
plt.ylabel('MEDV')
plt.title('Regressione su una feature con test set', fontsize=16)
plt.text(0.9, 0.9, 'MSE\ntrain {0:.3f}\ntest {1:.3f}'.format(mse_train, mse_test), fontsize=12, transform=ax.transAxes, bbox=bbox_props)
plt.show()
# + [markdown] colab_type="text" id="SWiPKre4LUBf"
# Aggiungi standardizzazione della feature, modificandone i valori in modo da ottenere media $0$ e varianza $1$. Utilizza le pipeline di scikit-learn per definire una sequenza di task: in questo caso i dati sono normalizzati mediante uno StandardScaler e sui risultati viene applicato il modello di regressione.
# + colab={} colab_type="code" id="Gf_exd6iLUBg"
pipe = Pipeline([('scaler', StandardScaler()),('regression', LinearRegression())])
pipe = pipe.fit(X_train, y_train)
p_train = pipe.predict(X_train)
p_test = pipe.predict(X_test)
mse_train = mean_squared_error(p_train,y_train)
mse_test = mean_squared_error(p_test,y_test)
# + colab={} colab_type="code" id="I-k_EpmNLUBi" outputId="d7329b99-cde7-4eb0-c80e-2ddbdb99e746"
s = pipe.named_steps['scaler']
print('Scaling: mean: {0:.3f}, var: {1:.3f}, scale: {2:.3f}'.format(s.mean_[0], s.var_[0],s.scale_[0]))
# + colab={} colab_type="code" id="Bb2GUAiSLUBk" outputId="fa179b8d-d169-4425-af93-1e8d907e41b9"
r = pipe.named_steps['regression']
print('w0: {0:.3f}, w1: {1:.3f}, MSE-train: {2:.3f}, MSE-test: {3:.3f}'.format(r.intercept_, r.coef_[0],mse_train, mse_test))
# + colab={} colab_type="code" id="2UfPjFAhLUBo"
results.append(['Regression, 1 feature, scaled', mse_train, mse_test])
# + colab={} colab_type="code" id="GKrltf3iLUBq" outputId="0621d9de-b9c2-4c45-bb4d-cf133687eceb"
x = np.linspace(min(X),max(X),100).reshape(-1,1)
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
plt.scatter(X_train, y_train, c=colors[8], edgecolor='xkcd:light grey', label='Train')
plt.scatter(X_test, y_test, c=colors[0], edgecolor='black', label='Test')
plt.plot(x, pipe.predict(x), color=colors[2])
plt.xlabel(feat)
plt.ylabel('MEDV')
plt.text(0.9, 0.9, 'MSE\ntrain {0:.3f}\ntest {1:.3f}'.format(mse_train, mse_test), fontsize=12, transform=ax.transAxes, bbox=bbox_props)
plt.title('Regressione su una feature standardizzata, con test set', fontsize=16)
plt.show()
# + [markdown] colab_type="text" id="v4lO8GN0LUBs"
# La valutazione potrebbe dipendere eccessivamente dalla coppia training-test set (varianza).
# Utilizzo della cross validation per valutare il modello. Si applica un KFold per suddividere il training set $X$ in n_splits coppie (training set, test set)
# + colab={} colab_type="code" id="5AfkUitELUBt" outputId="94668fd6-bc73-469a-ddfa-ab3cbce544c7"
pipe = Pipeline([('scaler', StandardScaler()),('regression', LinearRegression())])
k_fold = KFold(n_splits=3)
mse = []
preds = []
# itera su tutte le coppie (training set - test set)
for train, test in k_fold.split(X):
# effettua l'apprendimento dei coefficienti sul training set
r = pipe.fit(X[train], y[train])
# appende in una lista il modello di regressione appreso
preds.append(copy.deepcopy(r))
mse.append(mean_squared_error(r.predict(X[test]),y[test]))
for i,r in enumerate(preds):
c = [r.named_steps['scaler'].scale_[0], r.named_steps['scaler'].mean_[0], r.named_steps['regression'].intercept_, r
.named_steps['regression'].coef_[0]]
print('Fold: {0:2d}, mean:{1:.3f}, scale: {2:.3f}, w0: {3:.3f}, w1: {4:.3f}, MSE test set: {5:.3f}'.format(i, c[0],c[1],c[2],c[3],mse[i]))
# restituisce le medie dei coefficienti e del MSE su tutti i fold
print('\nMSE - media: {0:.3f}, dev.standard: {1:.3f}'.format(np.mean(mse), np.std(mse)))
# + colab={} colab_type="code" id="W1wE1Iu_LUBx" outputId="e3c04be7-194f-4684-945d-17f823631597"
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
plt.scatter(X, y, c=colors[8], edgecolor='xkcd:light grey')
for i, r in enumerate(preds):
plt.plot(X, r.predict(X), color=colors[i%7], linewidth=1)
plt.xlabel(feat)
plt.ylabel('MEDV')
plt.title('Regressione su una feature standardizzata, con CV', fontsize=16)
plt.show()
# + [markdown] colab_type="text" id="IqSLytR5LUBz"
# Utilizza la funzione cross_val_score di scikit-learn per effettuare la cross validation
# + colab={} colab_type="code" id="iOROy93FLUB0"
p = Pipeline([('scaler', StandardScaler()),('regression', LinearRegression())])
# apprende il modello su tutto il training set
r = p.fit(X, y)
# calcola costo derivante dall'applicazione del modello su tutto il dataset, quindi con possibile overfitting
mse = mean_squared_error(r.predict(X),y)
# effettua la cross validation, derivando il costo sul test set per tutti i fold
scores = cross_val_score(estimator=p, X=X, y=y, cv=5, scoring='neg_mean_squared_error')
# calcola costo medio su tutti i fold
mse_cv = -scores.mean()
# + colab={} colab_type="code" id="JeOw11joLUB1"
results.append(['Regression, 1 feature, scaled, CV', mse, mse_cv])
# + colab={} colab_type="code" id="slet7vExLUB4" outputId="822559c9-7510-456b-f1a2-a1dd2f892d59"
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
plt.scatter(X, y, c=colors[8], edgecolor='xkcd:light grey')
plt.plot(X, r.predict(X), color=colors[2])
plt.xlabel(feat)
plt.ylabel('MEDV')
plt.title('Regressione su una feature standardizzata, con CV', fontsize=16)
plt.text(0.88, 0.9, 'MSE\ntrain {0:.3f}\nmedia CV {1:.3f}'.format(mse, mse_cv), fontsize=12, transform=ax.transAxes, bbox=bbox_props)
plt.show()
# + [markdown] colab_type="text" id="F-DAAHg2LUB6"
# ### Regressione con regolazione
# + [markdown] colab_type="text" id="bQ_q5KSlLUB7"
# Utilizza un modello con regolazione L1 (Lasso): la funzione di costo è $$C(\mathbf{w})=\frac{1}{2}\sum_i ((y(\mathbf{w},\mathbf{x}_i) - t_i)^2+\frac{\alpha}{2}\sum_j |w_j|$$
# + colab={} colab_type="code" id="fRDmrJQxLUB7"
#fissa un valore per l'iperparametro
alpha = 0.5
p = Pipeline([('scaler', StandardScaler()),('regression', Lasso(alpha=alpha))])
r = p.fit(X, y)
mse = mean_squared_error(r.predict(X),y)
scores = cross_val_score(estimator=p, X=X, y=y, cv=10, scoring='neg_mean_squared_error')
mse_cv = -scores.mean()
# + colab={} colab_type="code" id="mUA7XrpuLUB_"
results.append(['Regression L1, 1 feature, scaled, CV, alpha=0.5', mse, mse_cv])
# + colab={} colab_type="code" id="v6Oc9VaHLUCB" outputId="c10e5797-28b8-457e-995c-46b8cd249600"
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
plt.scatter(X, y, c=colors[8], edgecolor='xkcd:light grey')
plt.plot(X, r.predict(X), color=colors[2])
plt.xlabel(feat)
plt.ylabel('MEDV')
plt.title(r'Regressione lineare con regolazione L1 ($\alpha={0:.2f}$)'.format(alpha), fontsize=16)
plt.text(0.88, 0.9, 'MSE\ntrain {0:.3f}\nmedia CV {1:.3f}'.format(mse, mse_cv), fontsize=12, transform=ax.transAxes, bbox=bbox_props)
plt.show()
# + [markdown] colab_type="text" id="shnWSlL9LUCE"
# Applica un modello con regolazione L2 (Ridge): la funzione di costo è $$C(\mathbf{w})=\frac{1}{2}\sum_i ((y(\mathbf{w},\mathbf{x}_i) - t_i)^2+\frac{\alpha}{2}\sum_j w_j^2$$
# + colab={} colab_type="code" id="Ie5Kz85vLUCE"
#fissa un valore per l'iperparametro
alpha = 0.5
p = Pipeline([('scaler', StandardScaler()),('regression', Ridge(alpha=alpha))])
r = p.fit(X, y)
scores = cross_val_score(estimator=r, X=X, y=y, cv=10, scoring='neg_mean_squared_error')
mse = mean_squared_error(r.predict(X),y)
mse_cv = -scores.mean()
# + colab={} colab_type="code" id="Wesq6ZKwLUCG"
results.append(['Regression L2, 1 feature, scaled, CV, alpha=0.5', mse, mse_cv])
# + colab={} colab_type="code" id="uw9K3q-cLUCJ" outputId="a3b0e46f-e910-416c-db66-ad4974c356ac"
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
plt.scatter(X, y, c=colors[8], edgecolor='xkcd:light grey')
plt.plot(X, r.predict(X), color=colors[2])
plt.xlabel('Numero medio di locali [RM]')
plt.ylabel('Prezzo in migliaia di $ [MEDV]')
plt.title(r'Regressione lineare con regolazione L2 ($\alpha={0:.2f}$)'.format(alpha), fontsize=16)
plt.text(0.88, 0.9, 'MSE\ntrain {0:.3f}\nmedia CV {1:.3f}'.format(mse, mse_cv), fontsize=12, transform=ax.transAxes, bbox=bbox_props)
plt.show()
# + [markdown] colab_type="text" id="OqfmXidYLUCL"
# Applica un modello con regolazione Elastic Net: la funzione di costo è $$C(\mathbf{w})=\frac{1}{2}\sum_i ((y(\mathbf{w},\mathbf{x}_i) - t_i)^2+\frac{\alpha}{2}(\gamma\sum_j |w_j|+(1-\gamma)\sum_j w_j^2)$$
# + colab={} colab_type="code" id="GRV4hATlLUCL"
alpha = 0.5
gamma = 0.3
p = Pipeline([('scaler', StandardScaler()),('regression', ElasticNet(alpha=alpha, l1_ratio=gamma))])
r = p.fit(X, y)
scores = cross_val_score(estimator=r, X=X, y=y, cv=10, scoring='neg_mean_squared_error')
mse = mean_squared_error(r.predict(X),y)
mse_cv = -scores.mean()
# + colab={} colab_type="code" id="u3nqNp2rLUCR"
results.append(['Regression Elastic Net, 1 feature, scaled, CV, alpha=0.5, gamma=0.3', mse, mse_cv])
# + colab={} colab_type="code" id="nCil7cUBLUCT" outputId="d4078f6f-46c9-47a2-a3fc-6619ba83a3bf"
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
plt.scatter(X, y, c=colors[8], edgecolor='xkcd:light grey')
plt.plot(X, r.predict(X), color=colors[2])
plt.xlabel('Numero medio di locali [RM]')
plt.ylabel('Prezzo in migliaia di $ [MEDV]')
plt.title(r'Regressione lineare con regolazione Elastic Net ($\alpha={0:.2f}, \gamma={1:.2f}$)'.format(alpha, gamma), fontsize=16)
plt.text(0.88, 0.9, 'MSE\ntrain {0:.3f}\nmedia CV {1:.3f}'.format(mse, mse_cv), fontsize=12, transform=ax.transAxes,
bbox=bbox_props)
plt.show()
# + [markdown] colab_type="text" id="ENzFUFjtLUCX"
# ## Funzioni base polinomiali
# + [markdown] colab_type="text" id="ta6XLw-TLUCX"
# Regressione lineare standard con funzioni base polinomiali. Utilizza PolynomialFeatures di scikit-learn, che implementa funzioni base polinomiali fino al grado dato
# + colab={} colab_type="code" id="NYZHfioDLUCY"
deg = 3
pipe_regr = Pipeline([('scaler', StandardScaler()),('bf', PolynomialFeatures(degree=deg)),('regression', LinearRegression())])
r = pipe_regr.fit(X, y)
scores = cross_val_score(estimator=r, X=X, y=y, cv=10, scoring='neg_mean_squared_error')
mse = mean_squared_error(r.predict(X),y)
mse_cv = -scores.mean()
# + colab={} colab_type="code" id="ugTU8bQzLUCZ"
results.append(['Regression, Polynomial, 1 feature, scaled, degree={0:d}, CV'.format(deg), mse, mse_cv])
# + colab={} colab_type="code" id="8F9ibDnALUCc" outputId="8cf7a996-4acb-4d7c-8052-2e0d3f4fff90"
xmin = np.floor(min(X)[0])
xmax = np.ceil(max(X)[0])
x = np.linspace(xmin,xmax,100).reshape(-1, 1)
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
plt.scatter(X, y, c=colors[8], edgecolor='xkcd:light grey')
plt.plot(x, r.predict(x), color=colors[2])
plt.xlabel('Numero medio di locali [RM]')
plt.ylabel('Prezzo in migliaia di $ [MEDV]')
plt.title(r'Regressione lineare con f.b. polinomiali ($d={0:3d}$)'.format(deg), fontsize=16)
plt.text(0.88, 0.9, 'MSE\ntrain {0:.3f}\nmedia CV {1:.3f}'.format(mse, mse_cv), fontsize=12, transform=ax.transAxes, bbox=bbox_props)
plt.show()
# + [markdown] colab_type="text" id="N79DgIiOLUCd"
# Visualizzazione dei residui: differenze $y_i-t_i$ in funzione di $y_i$
# + colab={} colab_type="code" id="ZOINwoDRLUCe" outputId="f46b202c-efa0-4a7b-aa13-ac35485f91c9"
y_pred = r.predict(X)
mm = min(y_pred)
mx = max(y_pred)
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
plt.scatter(y_pred, (y_pred - y), c=colors[8], edgecolor='xkcd:light grey')
plt.xlabel(r'Valori predetti ($y_i$)')
plt.ylabel(r'Residui ($y_i-t_i$)')
plt.hlines(y=0, xmin=(int(mm)/10)*10, xmax=(int(mx)/10)*10+10, color=colors[2], lw=2)
plt.text(0.88, 0.9, 'MSE: d = {0:d}\ntrain {1:.3f}\nmedia CV {2:.3f}'.format(deg, mse, mse_cv), fontsize=12, transform=ax.transAxes,
bbox=bbox_props)
plt.show()
# + colab={} colab_type="code" id="4wp9Da1fLUCj"
res = []
for deg in range(1,30):
r = Pipeline([('scaler', StandardScaler()),('bf', PolynomialFeatures(degree=deg)),('regression', LinearRegression())]).fit(X, y)
scores = cross_val_score(estimator=r, X=X, y=y, cv=10, scoring='neg_mean_squared_error')
mse = mean_squared_error(r.predict(X),y)
mse_cv = -scores.mean()
res.append([deg, mse, mse_cv])
# + colab={} colab_type="code" id="QCGdDVDMLUCl" outputId="d7935e4e-1f92-4932-c9fc-08e615bff938"
top = 15
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
plt.plot([r[0] for r in res[:top]], [r[1] for r in res[:top]], color=colors[8],label=r'Train')
plt.plot([r[0] for r in res[:top]], [r[2] for r in res[:top]], color=colors[2],label=r'Test')
l=plt.legend()
# + colab={} colab_type="code" id="y783lyGCLUCn"
alpha = 1
deg = 3
pipe_regr = Pipeline([('scaler', StandardScaler()),('bf', PolynomialFeatures(degree=deg)),('regression', Lasso(alpha=alpha))])
r = pipe_regr.fit(X, y)
scores = cross_val_score(estimator=r, X=X, y=y, cv=10, scoring='neg_mean_squared_error')
# + colab={} colab_type="code" id="5CSq6xHBLUCp" outputId="a5e385da-8460-43f8-a564-907d5f821865"
mse = mean_squared_error(r.predict(X),y)
mse_cv = -scores.mean()
xmin = np.floor(min(X)[0])
xmax = np.ceil(max(X)[0])
x = np.linspace(xmin,xmax,100).reshape(-1, 1)
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
plt.scatter(X, y, c=colors[8], edgecolor='white')
plt.plot(x, r.predict(x), color=colors[2])
plt.xlabel('Numero medio di locali [RM]')
plt.ylabel('Prezzo in migliaia di $ [MEDV]')
plt.title(r'Regressione lineare con f.b. polinomiali e regolazione L2 ($d={0:3d}, \alpha={1:.3f}$)'.format(deg, alpha), fontsize=16)
plt.text(0.88, 0.9, 'MSE\ntrain {0:.3f}\nmedia CV {1:.3f}'.format(mse, mse_cv), fontsize=12, transform=ax.transAxes,
bbox=bbox_props)
plt.show()
# + colab={} colab_type="code" id="_cVYm4ScLUCr"
res = []
for deg in range(1,20):
r = Pipeline([('scaler', StandardScaler()),('bf', PolynomialFeatures(degree=deg)),('regression', Lasso(alpha=alpha))]).fit(X, y)
scores = cross_val_score(estimator=r, X=X, y=y, cv=10, scoring='neg_mean_squared_error')
mse = mean_squared_error(r.predict(X),y)
mse_cv = -scores.mean()
res.append([deg, mse, mse_cv])
# + colab={} colab_type="code" id="nx-9VU6GLUCs" outputId="c56a17f4-7f3c-4fea-f509-abe2abd36518"
top = 15
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
plt.plot([r[0] for r in res[:top]], [r[1] for r in res[:top]], color=colors[8],label=r'Train')
plt.plot([r[0] for r in res[:top]], [r[2] for r in res[:top]], color=colors[2],label=r'Test')
l=plt.legend()
# + colab={} colab_type="code" id="YUsFDVWnLUCu" outputId="45cb8555-3eca-4e6a-a3ca-332b8806dbca"
y_pred = r.predict(X)
mm = min(y_pred)
mx = max(y_pred)
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
plt.scatter(y_pred, (y_pred - y),c=colors[8], edgecolor='white',label='Train')
plt.xlabel(r'Valori predetti ($y_i$)')
plt.ylabel(r'Residui ($y_i-t_i$)')
plt.hlines(y=0, xmin=(int(mm)/10)*10, xmax=(int(mx)/10)*10+10, color=colors[2], lw=2)
plt.tight_layout()
plt.show()
# + [markdown] colab_type="text" id="1rQvtlkDLUCw"
# ## Regressione su tutte le feature
# + colab={} colab_type="code" id="wCa0n7SALUCw"
X = df[df.columns[:-1]]
y = df[df.columns[-1]]
# + colab={} colab_type="code" id="-ZpdPPIhLUC2" outputId="7355cac9-6187-4c66-d037-923e78d972dd"
r = LinearRegression()
r.fit(X, y)
print('MSE: {0:.3f}'.format(mean_squared_error(r.predict(X),y)))
# + colab={} colab_type="code" id="7NT6s1lFLUC3" outputId="68b8cd2f-ad26-4d5c-ddb4-5069d3e1bfdd"
y_pred = r.predict(X)
mm = min(y_pred)
mx = max(y_pred)
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
plt.scatter(y_pred, (y_pred - y), c=colors[8], edgecolor='xkcd:light grey')
plt.xlabel(r'Valori predetti ($y_i$)')
plt.ylabel(r'Residui ($y_i-t_i$)')
plt.hlines(y=0, xmin=(int(mm)/10)*10, xmax=(int(mx)/10)*10+10, color=colors[2], lw=2)
plt.show()
# + colab={} colab_type="code" id="_OmD8ysmLUC5" outputId="bbbdab3a-b8c8-4777-9879-5ec547e30e15"
r = Pipeline([('scaler', StandardScaler()),('regression', LinearRegression())])
r.fit(X, y)
print('MSE: {0:.3f}'.format(mean_squared_error(r.predict(X),y)))
# + colab={} colab_type="code" id="0H4WoOOaLUDE" outputId="00495aac-f972-4a2f-cdd0-fc41eb8d57fd"
y_pred = r.predict(X)
mm = min(y_pred)
mx = max(y_pred)
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
plt.scatter(y_pred, (y_pred - y), c=colors[8], edgecolor='xkcd:light grey')
plt.xlabel(r'Valori predetti ($y_i$)')
plt.ylabel(r'Residui ($y_i-t_i$)')
plt.hlines(y=0, xmin=(int(mm)/10)*10, xmax=(int(mx)/10)*10+10, color=colors[2], lw=2)
plt.show()
# + [markdown] colab_type="text" id="Hx9up98BLUDK"
# Applica cross-validation
# + colab={} colab_type="code" id="_-IUj46qLUDK" outputId="b8da2178-3cf8-4524-bf3e-d08f75b53a7e"
r = Pipeline([('scaler', StandardScaler()),('regression', LinearRegression())])
scores = cross_val_score(estimator=r, X=X, y=y, cv=5, scoring='neg_mean_squared_error')
print('MSE')
print(-scores)
print('media {0:.3f}, dev.standard {1:.3f}'.format(-scores.mean(), -scores.std()))
# + colab={} colab_type="code" id="N_Qz2U0kLUDL" outputId="0664b93e-f1c5-40aa-db76-bcc3f8d72a17"
alpha = 0.5
r = Pipeline([('scaler', StandardScaler()),('regression', Lasso(alpha=alpha))])
scores = cross_val_score(estimator=r, X=X, y=y, cv=5, scoring='neg_mean_squared_error')
print('MSE')
print(-scores)
print('media {0:.3f}, dev.standard {1:.3f}'.format(-scores.mean(), -scores.std()))
# + colab={} colab_type="code" id="kXD6FQ9WLUDO" outputId="10295334-286d-407c-efc8-03009a770891"
alpha = 10
r = Pipeline([('scaler', StandardScaler()),('regression', Ridge(alpha=alpha))])
scores = cross_val_score(estimator=r, X=X, y=y, cv=5, scoring='neg_mean_squared_error')
print('MSE')
print(-scores)
print('media {0:.3f}, dev.standard {1:.3f}'.format(-scores.mean(), -scores.std()))
# + colab={} colab_type="code" id="6aEYp8AqLUDR" outputId="e8380447-b4de-4635-d998-8117de2951e5"
alpha = 0.5
gamma = 0.3
r = Pipeline([('scaler', StandardScaler()),('regression', ElasticNet(alpha=alpha, l1_ratio=gamma))])
scores = cross_val_score(estimator=r, X=X, y=y, cv=5, scoring='neg_mean_squared_error')
print('MSE')
print(-scores)
print('media {0:.3f}, dev.standard {1:.3f}'.format(-scores.mean(), -scores.std()))
# + [markdown] colab_type="text" id="wWyNt6n7LUDV"
# LassoCV effettua la ricerca del miglior valore per $\alpha$
# + colab={} colab_type="code" id="lJFg0U9dLUDW" outputId="e71324af-d537-4b72-d981-d8a405b3b688"
pipe_regr = Pipeline([('scaler', StandardScaler()),('regression', LassoCV(cv=7))])
r = pipe_regr.fit(X, y)
scores = cross_val_score(estimator=r, X=X, y=y, cv=5, scoring='neg_mean_squared_error')
best_alpha = pipe_regr.named_steps['regression'].alpha_
print(r'Miglior valore di alpha: {0:.3f}'.format(best_alpha))
print('MSE: {0:.3f}'.format(-scores.mean()))
# + colab={} colab_type="code" id="fiPDuI5jLUDY" outputId="f808b282-bc17-4b03-edf1-bf719d1d5a62"
pipe_regr = Pipeline([('scaler', StandardScaler()),('regression', Lasso(alpha = best_alpha))])
r = pipe_regr.fit(X, y)
scores = cross_val_score(estimator=r, X=X, y=y, cv=10, scoring='neg_mean_squared_error')
print('MSE: {0:.3f}'.format(-scores.mean()))
# + colab={} colab_type="code" id="GnHnxy_tLUDg" outputId="1ce740e8-957a-4f99-8cf4-190d0499d979"
y_pred = r.predict(X)
mm = min(y_pred)
mx = max(y_pred)
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
plt.scatter(y_pred, (y_pred - y), c=colors[8], edgecolor='xkcd:light grey')
plt.xlabel(r'Valori predetti ($y_i$)')
plt.ylabel(r'Residui ($y_i-t_i$)')
plt.hlines(y=0, xmin=(int(mm)/10)*10, xmax=(int(mx)/10)*10+10, color=colors[2], lw=2)
plt.tight_layout()
plt.show()
# + colab={} colab_type="code" id="lgR0sJKnLUDj" outputId="026486fc-82d0-4e83-af3b-cda09902d3b5"
pipe_regr = Pipeline([('scaler', StandardScaler()),('regression', RidgeCV(cv=20))])
r = pipe_regr.fit(X, y)
scores = cross_val_score(estimator=r, X=X, y=y, cv=10, scoring='neg_mean_squared_error')
best_alpha = pipe_regr.named_steps['regression'].alpha_
print(r'Miglior valore di alpha: {0:.3f}'.format(best_alpha))
print('MSE: {0:.3f}'.format(-scores.mean()))
# + colab={} colab_type="code" id="S1NpK9TLLUDl" outputId="2a7cad10-0553-496a-fe62-149205e674d1"
pipe_regr = Pipeline([('scaler', StandardScaler()),('regression', Ridge(alpha = best_alpha))])
r = pipe_regr.fit(X, y)
scores = cross_val_score(estimator=r, X=X, y=y, cv=10, scoring='neg_mean_squared_error')
print('MSE: {0:.3f}'.format(-scores.mean()))
# + colab={} colab_type="code" id="5NIn5i7FLUDo" outputId="c8ddd6fb-97aa-44fa-9a03-c5c4002c6938"
r = Pipeline([('scaler', StandardScaler()),('regression', Ridge(alpha=best_alpha))]).fit(X, y)
y_pred = r.predict(X)
mm = min(y_pred)
mx = max(y_pred)
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
plt.scatter(y_pred, (y_pred - y), c=colors[8], edgecolor='xkcd:light grey')
plt.xlabel(r'Valori predetti ($y_i$)')
plt.ylabel(r'Residui ($y_i-t_i$)')
plt.hlines(y=0, xmin=(int(mm)/10)*10, xmax=(int(mx)/10)*10+10, color=colors[2], lw=2)
plt.tight_layout()
plt.show()
# + [markdown] colab_type="text" id="nj5oUjitLUDr"
# ## Model selection
# + colab={} colab_type="code" id="OJjzVL2qLUDr"
X = np.array(df[df.columns[:-1]])
y = np.array(df[df.columns[-1]])
# + [markdown] colab_type="text" id="9NYkpIB1LUDt"
# ### Lasso
# + [markdown] colab_type="text" id="qtl5jriNLUDt"
# Ricerca su griglia di valori per alpha in Lasso
# + colab={} colab_type="code" id="96YUVTefLUDu"
domain = np.linspace(0,10,100)
cv = 10
scores = []
kf = KFold(n_splits=cv)
# considera tutti i valori di alpha in domain
for a in domain:
# definisce modello con Lasso
p = Pipeline([('scaler', StandardScaler()),('regression', Lasso(alpha=a))])
xval_err = 0
# per ogni coppia train-test valuta l'errore sul test set del modello istanziato sulla base del training set
for k, (train_index, test_index) in enumerate(kf.split(X,y)):
p.fit(X[train_index], y[train_index])
y1 = p.predict(X[test_index])
err = y1 - y[test_index]
xval_err += np.dot(err,err)
# calcola erroe medio
score = xval_err/X.shape[0]
scores.append([a,score])
scores = np.array(scores)
# + colab={} colab_type="code" id="kAlV30iWLUDv" outputId="41734d87-1e42-48f7-cfcd-35b778522c3b"
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
plt.plot(scores[:,0], scores[:,1])
plt.xlabel(r'$\alpha$')
plt.ylabel('MSE')
plt.title(r'MSE al variare di $\alpha$ in Lasso')
plt.show()
# + colab={} colab_type="code" id="2d8x05mbLUEA" outputId="0a41d0de-456d-4ea3-80ab-6e2e39860265"
min_index = np.argmin(scores[:,1])
print('Miglior valore per alpha: {0:.5f}. MSE={1:.3f}'.format(scores[min_index,0], scores[min_index,1]))
# + [markdown] colab_type="text" id="XjkUwEG8LUEC"
# Utilizzo di GridSearchCV
# + colab={} colab_type="code" id="xQ90VoqGLUED"
domain = np.linspace(0,10,100)
param_grid = [{'regression__alpha': domain}]
p = Pipeline([('scaler', StandardScaler()),('regression', Lasso())])
clf = GridSearchCV(p, param_grid, cv=5, scoring='neg_mean_squared_error')
clf = clf.fit(X,y)
sc = -clf.cv_results_['mean_test_score']
# + colab={} colab_type="code" id="7xWskxA7LUEG" outputId="f9dc6a3a-1e55-4d21-e898-9ed06ae769fb"
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
plt.plot(domain,sc)
plt.xlabel(r'$\alpha$')
plt.ylabel('MSE')
plt.title(r'MSE al variare di $\alpha$ in Lasso')
plt.show()
# + colab={} colab_type="code" id="AGvgy--KLUEI" outputId="008111cc-83f4-414c-d260-b547e56e7f72"
min_index = np.argmin(sc)
print('Miglior valore per alpha: {0:.5f}. MSE={1:.3f}'.format(domain[min_index], sc[min_index]))
# + [markdown] colab_type="text" id="sRlwftSGLUEJ"
# Utilizzo di LassoCV, che ricerca il miglior valore di $\alpha$ valutando lo score su un insieme di possibili valori mediante cross validation.
# + colab={} colab_type="code" id="-vPgG9HyLUEK"
domain=np.linspace(0,10,100)
p = Pipeline([('scaler', StandardScaler()),('regression', LassoCV(cv=10, alphas=domain))])
r = p.fit(X, y)
scores = np.mean(r.named_steps['regression'].mse_path_, axis=1)
# + colab={} colab_type="code" id="JTkdRiGhLUEL" outputId="6793e535-d5f3-4e6e-83f8-547d2148e0d3"
plt.figure(figsize=(16, 8))
plt.plot(r.named_steps['regression'].alphas_, scores)
plt.xlabel(r'$\alpha$')
plt.ylabel('cross validation score')
plt.tight_layout()
plt.show()
# + colab={} colab_type="code" id="yCbKWZ7ULUEN" outputId="00646dab-68a3-4baf-d278-0371a8a008da"
best_alpha = r.named_steps['regression'].alpha_
print(r'Miglior valore di alpha: {0:.5f}'.format(best_alpha))
i, = np.where(r.named_steps['regression'].alphas_ == best_alpha)
print('MSE: {0:.5f}'.format(scores[i][0]))
# + colab={} colab_type="code" id="04lrHyLyLUEP" outputId="5b1748f3-1672-4754-b486-353da1be76a6"
r.named_steps['regression'].coef_
# + [markdown] colab_type="text" id="r9uv4O4ULUER"
# Valuta Lasso con il valore trovato per $\alpha$ sull'intero dataset
# + colab={} colab_type="code" id="2wbu_tUbLUER" outputId="8cdf778c-f32c-4d97-b44e-8f5f34ddf1bf"
p = Pipeline([('scaler', StandardScaler()),('regression', Lasso(alpha = best_alpha))])
scores = cross_val_score(estimator=p, X=X, y=y, cv=20, scoring='neg_mean_squared_error')
print('MSE: {0:.3f}'.format(-scores.mean()))
# + [markdown] colab_type="text" id="8HIFkbFOLUET"
# ### Ridge
# + [markdown] colab_type="text" id="pCRqGwfnLUEU"
# Ricerca su griglia di valori per alpha in Ridge
# + colab={} colab_type="code" id="HN3E3VxZLUEV"
domain = np.linspace(80,120,100)
cv = 10
scores = []
kf = KFold(n_splits=cv)
for a in domain:
p = Pipeline([('scaler', StandardScaler()),('regression', Ridge(alpha=a))])
xval_err = 0
for k, (train_index, test_index) in enumerate(kf.split(X,y)):
p.fit(X[train_index], y[train_index])
y1 = p.predict(X[test_index])
err = y1 - y[test_index]
xval_err += np.dot(err,err)
score = xval_err/X.shape[0]
scores.append([a,score])
scores = np.array(scores)
# + colab={} colab_type="code" id="Kdi9vTo5LUEX" outputId="164f42a5-8c3e-47ea-8045-97049a923c07"
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
plt.plot(scores[:,0], scores[:,1])
plt.xlabel(r'$\alpha$')
plt.ylabel('MSE')
plt.title(r'MSE al variare di $\alpha$ in Ridge')
plt.show()
# + colab={} colab_type="code" id="NQBRXCm_LUEb" outputId="5b805934-63fc-4781-b2f4-06259d5e0274"
min_index = np.argmin(scores[:,1])
best_alpha = scores[min_index,0]
print('Miglior valore per alpha: {0:.5f}. MSE={1:.3f}'.format(scores[min_index,0], scores[min_index,1]))
# + [markdown] colab_type="text" id="19v7tiI2LUEe"
# Applica sul dataset con il valore trovato per $\alpha$
# + colab={} colab_type="code" id="uhptAsMXLUEe" outputId="913d1d67-e752-4072-c687-36104da8965a"
p = Pipeline([('scaler', StandardScaler()),('regression', Ridge(alpha = best_alpha))])
r = p.fit(X, y)
scores = cross_val_score(estimator=p, X=X, y=y, cv=10, scoring='neg_mean_squared_error')
print('alpha: {0:.3f}, MSE: {1:.3f}'.format(best_alpha, -scores.mean()))
# + [markdown] colab_type="text" id="jHRPjs-QLUEg"
# Utilizzo di GridSearchCV
# + colab={} colab_type="code" id="PW0fB4x_LUEg"
domain = np.linspace(80,120,100)
param_grid = [{'regression__alpha': domain}]
p = Pipeline([('scaler', StandardScaler()),('regression', Ridge())])
clf = GridSearchCV(p, param_grid, cv=10, scoring='neg_mean_squared_error')
clf = clf.fit(X,y)
scores = -clf.cv_results_['mean_test_score']
# + colab={} colab_type="code" id="bxbCYRTuLUEi" outputId="9f7b9229-514b-46f0-e8ae-65040b55fde2"
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
plt.plot(domain,scores)
plt.xlabel(r'$\alpha$')
plt.ylabel('MSE')
plt.title(r'MSE al variare di $\alpha$ in Ridge')
plt.show()
# + colab={} colab_type="code" id="eIQj0gPlLUEj" outputId="9cbbc2ec-6e1a-4d90-efe1-64d425d0c689"
min_index = np.argmin(scores)
print('Miglior valore per alpha: {0:.5f}. MSE={1:.3f}'.format(domain[min_index], scores[min_index]))
# + [markdown] colab_type="text" id="obkrxSSpLUEo"
# Applica sul dataset con il valore trovato per $\alpha$
# + colab={} colab_type="code" id="J2BspghOLUEo" outputId="5691e6b1-6853-4aa6-d842-3bede02a1d25"
p = Pipeline([('scaler', StandardScaler()),('regression', Ridge(alpha = best_alpha))])
r = p.fit(X, y)
scores = cross_val_score(estimator=p, X=X, y=y, cv=10, scoring='neg_mean_squared_error')
print('alpha: {0:.3f}, MSE: {1:.3f}'.format(best_alpha, -scores.mean()))
# + [markdown] colab_type="text" id="4Tqs9sR2LUEq"
# Utilizza RidgeCV, che ricerca il miglior valore di $\alpha$ valutando lo score su un insieme di possibili valori mediante cross validation
# + colab={} colab_type="code" id="iTVATZwZLUEq"
domain = np.linspace(0.1, 10, 100)
p = Pipeline([('scaler', StandardScaler()),('regression', RidgeCV(alphas=domain, store_cv_values = True))])
r = p.fit(X, y)
scores = np.mean(r.named_steps['regression'].cv_values_, axis=0)
# + colab={} colab_type="code" id="PfIO2uxuLUEr" outputId="82708b4c-0928-490d-d0b7-2f3db6d137ae"
plt.figure(figsize=(16, 8))
plt.plot(domain, scores)
plt.xlabel(r'$\alpha$')
plt.ylabel('cross validation score')
plt.tight_layout()
plt.show()
# + colab={} colab_type="code" id="-sz6GwGfLUEt" outputId="2653a4ab-915b-486f-8af4-5482f9affeb5"
best_alpha = p.named_steps['regression'].alpha_
print(r'Miglior valore di alpha: {0:.6f}'.format(best_alpha))
i, = np.where(domain == best_alpha)
print('score: {0:.3f}'.format(scores[i][0]))
# + [markdown] colab_type="text" id="UNy1fvb2LUEu"
# Valuta Ridge con il valore trovato per α
# sull'intero dataset
# + colab={} colab_type="code" id="LQuzv8xPLUEv" outputId="4d426f89-9cd9-449c-9e92-8f30222b3d44"
p = Pipeline([('scaler', StandardScaler()),('regression', Ridge(alpha = best_alpha))])
r = p.fit(X, y)
scores = cross_val_score(estimator=p, X=X, y=y, cv=10, scoring='neg_mean_squared_error')
print('alpha: {0:.3f}, MSE: {1:.3f}'.format(best_alpha, -scores.mean()))
# + colab={} colab_type="code" id="3nyzmtwNLUEw" outputId="72761f36-8137-44f8-f39c-cbc1eadfe60c"
r.named_steps['regression'].coef_
# + [markdown] colab_type="text" id="UDx5mHDrLUEx"
# ### Elastic net
# + [markdown] colab_type="text" id="k5kVlYdoLUEx"
# Ricerca su griglia 2d di valori per $\alpha$ e $\gamma$
# + colab={} colab_type="code" id="nFDgUlMCLUEy"
scores = []
for a in np.linspace(0,1,10):
for l in np.linspace(0,1,10):
p = Pipeline([('scaler', StandardScaler()),('regression', ElasticNet(alpha=a, l1_ratio=l))])
score = cross_val_score(estimator=p, X=X, y=y, cv=5, scoring='neg_mean_squared_error')
scores.append([a,l,-score.mean()])
# + colab={} colab_type="code" id="0Y3gKi5VLUEz" outputId="fc164837-c661-4be8-d015-a2dc3964b0d7"
scores = np.array(scores)
min_index = np.argmin(scores[:,2])
best_alpha = scores[min_index, 0]
best_gamma = scores[min_index, 1]
print(r"Migliore coppia: alpha={0:.2f}, gamma={1:.2f}. MSE={2:.3f}".format(best_alpha,best_gamma, scores[min_index,2]))
# + colab={} colab_type="code" id="iMjKOfU-LUE0" outputId="5940ef2e-be53-4929-8005-a5ca828c126f"
p = Pipeline([('scaler', StandardScaler()),('regression', ElasticNet(alpha = best_alpha, l1_ratio=best_gamma))])
r = p.fit(X, y)
scores = cross_val_score(estimator=p, X=X, y=y, cv=10, scoring='neg_mean_squared_error')
print('alpha: {0:.3f}, gamma: {1:.3f}; MSE: {2:.3f}'.format(best_alpha, best_gamma, -scores.mean()))
# + [markdown] colab_type="text" id="y4Pi0frgLUE1"
# Utilizza GridsearchCV
# + colab={} colab_type="code" id="m46E7DZVLUE1"
param_grid = [{'regression__alpha': np.linspace(0,1,10), 'regression__l1_ratio': np.linspace(0,1,10)}]
p = Pipeline([('scaler', StandardScaler()),('regression', ElasticNet(alpha=alpha, l1_ratio=gamma))])
clf = GridSearchCV(p, param_grid, cv=5, scoring='neg_mean_squared_error')
clf = clf.fit(X,y)
sc = -clf.cv_results_['mean_test_score']
# + colab={} colab_type="code" id="Z5TEh8u3LUE3" outputId="6afe7bca-88ee-47d8-c3af-69d67865e716"
best_alpha = clf.best_params_['regression__alpha']
best_gamma = clf.best_params_['regression__l1_ratio']
print(r"Migliore coppia: alpha={0:.2f}, gamma={1:.2f}. MSE={2:.3f}".format(best_alpha,
best_gamma, -clf.best_score_))
# + colab={} colab_type="code" id="cKvNBVOvLUE5" outputId="0688aab9-5bde-46bb-a40c-97c7bd142dfd"
p = Pipeline([('scaler', StandardScaler()),('regression', ElasticNet(alpha = best_alpha, l1_ratio=best_gamma))])
r = p.fit(X, y)
scores = cross_val_score(estimator=p, X=X, y=y, cv=10, scoring='neg_mean_squared_error')
print('alpha: {0:.3f}, gamma: {1:.3f}; MSE: {2:.3f}'.format(best_alpha, best_gamma, -scores.mean()))
# + colab={} colab_type="code" id="DznGMp-XLUE7"
|
{"hexsha": "1a7811597a201d623b3f650a02d6077f544c7ea5", "size": 42778, "ext": "py", "lang": "Python", "max_stars_repo_path": "codici/regression.py", "max_stars_repo_name": "tvml/ml2021", "max_stars_repo_head_hexsha": "d72a6762af9cd12019d87237d061bbb39f560da9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "codici/regression.py", "max_issues_repo_name": "tvml/ml2021", "max_issues_repo_head_hexsha": "d72a6762af9cd12019d87237d061bbb39f560da9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "codici/regression.py", "max_forks_repo_name": "tvml/ml2021", "max_forks_repo_head_hexsha": "d72a6762af9cd12019d87237d061bbb39f560da9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.695607763, "max_line_length": 325, "alphanum_fraction": 0.6964561223, "include": true, "reason": "import numpy,import scipy", "num_tokens": 15029}
|
# Copyright 2019 Antonio Medrano
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Antonio Medrano
import time
import numpy as np
import readDataFiles
from scipy.spatial.distance import cdist
from gurobipy import *
setParam('OutputFlag', 0) # mute solver meta-info
def Run_pCenterLSCP():
"""Example of complete p-Center program with the Gurobi API"""
m = Model()
start_time = time.time()
distances, distMatrix = computeDistances()
# p = numSites, SD = 0 is a trivial solution
print(' p, SD')
p = numSites
SD = 0
displaySolution(p, SD)
solution = np.empty([numSites, 2])
# solution[:,0] = range(1, numSites+1)
# solution[p-1,1] = 0
currP = numSites
SD = distances[0]
C = computeCoverageMatrix(distMatrix, SD)
BuildModel(m)
SolveModel(m)
p = m.objVal
while (p < currP):
currP -= 1
solution[currP-1,1] = SD
displaySolution(currP, SD)
for k in range(1,len(distances)):
SD = distances[k]
diff, C = updateCoverCoefficeints(distMatrix, SD, C)
for i in range(numDemands):
for j in diff[i]:
m.chgCoeff(m.getConstrByName("c[%d]" % i), X[j], 1)
SolveModel(m)
# get the solution and clear the solver
p = m.objVal
# check the output
while (p < currP):
currP -= 1
solution[currP-1,1] = SD
displaySolution(currP, SD)
# terminate the search when p == 1
if (p == 2):
p = 1
SD = np.amin(np.amax(distMatrix,0))
solution[p-1,1] = SD
displaySolution(p, SD)
iters = k+1
break
if (p == 1):
iters = k
break
total_time = time.time()-start_time
#print solution
print()
print('%d LSCP distances evaluated' % (iters))
print('Total problem solved in %f seconds' % (total_time))
print()
# plot.plotTradeoff(file, solution)
def computeDistances():
# Pull out just the site/demand IDs from the data
siteIDs = sites[:,0]
# Pull out just the coordinates from the data
xyPointArray = sites[:,[1,2]]
A = xyPointArray
B = A
# Compute the distance matrix, using the squared distance
distMatrix = np.ceil(cdist(A, B,'euclidean')).astype(int)
distances = np.unique(distMatrix)
return distances, distMatrix
def computeCoverageMatrix(distMatrix, SD):
#declare a couple variables
global cover_rows
# Determine neighborhood of demands within SD of sites
C = (distMatrix <= SD).astype(int)
# Convert coverage to array of nonzero elements in each row
cover_rows = [np.nonzero(t)[0] for t in C]
return C
def updateCoverCoefficeints(sqDistMatrix, SD, B):
# Determine neighborhood of demands within SD of sites
C = (sqDistMatrix <= SD).astype(int)
diff = [np.nonzero(t)[0] for t in (C-B)]
return diff, C
def BuildModel(m):
global X
# DECLARE VARIABLES:
# Facility Site binary decision variables X
# Each has a coefficient of 1 in the objective
X = m.addVars(numSites,
vtype=GRB.BINARY,
obj=1)
# Define Coverage Constraints:
for i in range(numDemands):
m.addConstr(quicksum(X[j] for j in cover_rows[i]) >= 1, "c[%d]" % i)
# The objective is to minimize the number of located facilities
m.modelSense = GRB.MINIMIZE
# m.update()
# print 'Number of variables = %d' % solver.NumVariables()
# print 'Number of constraints = %d' % solver.NumConstraints()
# print
return 0
def SolveModel(m):
"""Solve the problem and print the solution."""
# m.Params.ResultFile = "output.sol"
m.optimize()
def displaySolution(p, SD):
# The objective value and the minimum service distance
print('%3d, %d' % (p, SD))
def read_problem(file):
global numSites
global numDemands
global sites
try:
if (file[-3:].lower() == "dat"):
sites = readDataFiles.readDat(file)
elif (file[-3:].lower() == "tsp"):
sites = readDataFiles.readTSP(file)
except IOError:
print('Error reading file')
raise
numSites = sites.shape[0]
numDemands = numSites
# plot.plotData(sites)
print('%d locations' % (numSites))
def main(unused_argv):
print ('---- CPC-LSCP with Gurobi -----')
Run_pCenterLSCP()
""" Main will take in 1 argument: Data to Use """
if __name__ == '__main__':
if len(sys.argv) > 1 and len(sys.argv) <= 2:
file = '../data/' + sys.argv[1]
print()
print("Problem instance from: ", file)
read_problem(file)
main(sys.argv[1])
elif len(sys.argv) > 0 and len(sys.argv) <= 1:
file = '../data/swain.dat'
print()
print("Problem instance from: ", file)
read_problem(file)
main('swain.dat')
else:
print("Please Pass: Data to Use")
print("Problem not executed!")
|
{"hexsha": "07d333236f98abe08fdd0e9fa7b988a126b941e9", "size": 5656, "ext": "py", "lang": "Python", "max_stars_repo_path": "gurobi/CPCi-LSCP.py", "max_stars_repo_name": "antoniomedrano/p-center", "max_stars_repo_head_hexsha": "819013b0ab19114c6371a7b8eb81124fe91f5dad", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-02-18T15:14:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-08T13:58:54.000Z", "max_issues_repo_path": "gurobi/CPCi-LSCP.py", "max_issues_repo_name": "antoniomedrano/p-center", "max_issues_repo_head_hexsha": "819013b0ab19114c6371a7b8eb81124fe91f5dad", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-03-09T22:16:49.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-09T22:16:49.000Z", "max_forks_repo_path": "gurobi/CPCi-LSCP.py", "max_forks_repo_name": "antoniomedrano/p-center", "max_forks_repo_head_hexsha": "819013b0ab19114c6371a7b8eb81124fe91f5dad", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-10-20T21:39:47.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-20T21:39:47.000Z", "avg_line_length": 26.5539906103, "max_line_length": 76, "alphanum_fraction": 0.5968882603, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1473}
|
struct Bottleneck
layer
end
@functor Bottleneck
Bottleneck(in_planes, growth_rate) = Bottleneck(Chain(BatchNorm(in_planes, relu),
Conv((1, 1), in_planes => 4growth_rate),
BatchNorm(4growth_rate, relu),
Conv((3, 3), 4growth_rate => growth_rate, pad = (1, 1))))
(b::Bottleneck)(x) = cat(b.layer(x), x, dims = 3)
Transition(chs::Pair{<:Int,<:Int}) = Chain(BatchNorm(chs[1], relu),
Conv((1, 1), chs),
MeanPool((2, 2)))
function _make_dense_layers(block, in_planes, growth_rate, nblock)
local layers = []
for i in 1:nblock
push!(layers, block(in_planes, growth_rate))
in_planes += growth_rate
end
Chain(layers...)
end
function _densenet(nblocks = [6, 12, 24, 16]; block = Bottleneck, growth_rate = 32, reduction = 0.5, num_classes = 1000)
num_planes = 2growth_rate
layers = []
push!(layers, Conv((7, 7), 3 => num_planes, stride = (2, 2), pad = (3, 3)))
push!(layers, BatchNorm(num_planes, relu))
push!(layers, MaxPool((3, 3), stride = (2, 2), pad = (1, 1)))
for i in 1:3
push!(layers, _make_dense_layers(block, num_planes, growth_rate, nblocks[i]))
num_planes += nblocks[i] * growth_rate
out_planes = Int(floor(num_planes * reduction))
push!(layers, Transition(num_planes => out_planes))
num_planes = out_planes
end
push!(layers, _make_dense_layers(block, num_planes, growth_rate, nblocks[4]))
num_planes += nblocks[4] * growth_rate
push!(layers, BatchNorm(num_planes, relu))
Chain(layers..., MeanPool((7, 7)),
x->reshape(x, :, size(x, 4)),
Dense(num_planes, num_classes), softmax)
end
function densenet_layers()
weight = Metalhead.weights("densenet.bson")
weights = Dict{Any,Any}()
for ele in keys(weight)
weights[string(ele)] = convert(Array{Float64,N} where N, weight[ele])
end
ls = _densenet()
ls[1].weight .= weights["conv1_w_0"][end:-1:1,:,:,:][:,end:-1:1,:,:]
ls[2].β .= weights["conv1/bn_b_0"]
ls[2].γ .= weights["conv1/bn_w_0"]
l = 4
for (c, n) in enumerate([6, 12, 24, 16])
for i in 1:n
for j in [2, 4]
ls[l][i].layer[j].weight .= weights["conv$(c + 1)_$i/x$(j ÷ 2)_w_0"][end:-1:1,:,:,:][:,end:-1:1,:,:]
ls[l][i].layer[j - 1].β .= weights["conv$(c + 1)_$i/x$(j ÷ 2)/bn_b_0"]
ls[l][i].layer[j - 1].γ .= weights["conv$(c + 1)_$i/x$(j ÷ 2)/bn_w_0"]
end
end
l += 2
end
for i in [5, 7, 9] # Transition Block Conv Layers
ls[i][2].weight .= weights["conv$(i ÷ 2)_blk_w_0"][end:-1:1,:,:,:][:,end:-1:1,:,:]
ls[i][1].β .= weights["conv$(i ÷ 2)_blk/bn_b_0"]
ls[i][1].γ .= weights["conv$(i ÷ 2)_blk/bn_w_0"]
end
ls[end - 1].W .= transpose(dropdims(weights["fc6_w_0"], dims = (1, 2))) # Dense Layers
ls[end - 1].b .= weights["fc6_b_0"]
return ls
end
struct DenseNet <: ClassificationModel{ImageNet.ImageNet1k}
layers::Chain
end
DenseNet() = DenseNet(densenet_layers())
Base.show(io::IO, ::DenseNet) = print(io, "DenseNet()")
@functor DenseNet
(m::DenseNet)(x) = m.layers(x)
|
{"hexsha": "45d8cc1d1b8f210e8c9d303438f0dd96adc0e7c1", "size": 3327, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/densenet.jl", "max_stars_repo_name": "jeremiedb/Metalhead.jl", "max_stars_repo_head_hexsha": "271927afc98ce9353867b75dbfd8b9911dfc2627", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/densenet.jl", "max_issues_repo_name": "jeremiedb/Metalhead.jl", "max_issues_repo_head_hexsha": "271927afc98ce9353867b75dbfd8b9911dfc2627", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/densenet.jl", "max_forks_repo_name": "jeremiedb/Metalhead.jl", "max_forks_repo_head_hexsha": "271927afc98ce9353867b75dbfd8b9911dfc2627", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7741935484, "max_line_length": 120, "alphanum_fraction": 0.5545536519, "num_tokens": 1059}
|
import numpy as np
import h5py
import os
from classification.classifier import Classifier
class LinearMachine(Classifier):
def __init__(self, N, M, name='linear machine'):
super().__init__(N, M, name, _type=5)
self.weights = np.zeros((M, N))
def _predict(self, x):
return np.argmax(np.dot(self.weights, x))
def fit(self, X, Y, steps=1000, *args, **kwargs):
pi = np.random.normal(scale=1, size=(self.M, self.N))
streak_pi, streak_w = 0, 0
num_ok_w = 0
for step in range(1, steps+1):
k = np.random.randint(low=0, high=len(Y))
i = np.argmax(np.dot(pi, X[k])) # index of the winning neuron
if i == Y[k]:
streak_pi += 1
if streak_pi >= streak_w:
pi_predictions = np.array([np.argmax(np.dot(pi, x)) for x in X])
num_ok_pi = len(np.where(pi_predictions == Y)[0])
if num_ok_pi > num_ok_w:
streak_w = streak_pi
num_ok_pi = num_ok_w
self.weights = pi.copy()
if num_ok_pi == len(Y):
break
else:
pi[i] = pi[i] - 2*X[k]
pi = pi + X[k]
def _save(self, file):
file.create_dataset('weights', self.weights.shape, np.float32, self.weights, compression="gzip")
def _load(self, file):
self.weights = np.array(file['weights'])
def save(self, filename, absolute=False):
path = os.path.join(os.getcwd(), filename) if not absolute else filename
file = h5py.File(path+".h5", 'w')
name_ASCII = np.array([ord(x) for x in self.name], np.ubyte) # name of the model saved as array of ASCII values
file.create_dataset('name', name_ASCII.shape, np.ubyte, name_ASCII, compression="gzip")
file.create_dataset('weights', self.weights.shape, np.float32, self.weights, compression="gzip")
file.close()
def load(self, filename, absolute=False):
path = filename if absolute else os.path.join(os.getcwd(), filename)
file = h5py.File(path+'.h5', 'r')
self.weights = np.array(file['weights'])
self.name = ''.join([chr(x) for x in file['name']])
self.M, self.N = self.weights.shape
file.close()
|
{"hexsha": "e630a825b2eb4f44d744c1d24ddcda2647768421", "size": 2040, "ext": "py", "lang": "Python", "max_stars_repo_path": "classification/linear_machines.py", "max_stars_repo_name": "Chappie733/MLPack", "max_stars_repo_head_hexsha": "223b142ff22dc35b9122183435afdc473a2c0b47", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "classification/linear_machines.py", "max_issues_repo_name": "Chappie733/MLPack", "max_issues_repo_head_hexsha": "223b142ff22dc35b9122183435afdc473a2c0b47", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "classification/linear_machines.py", "max_forks_repo_name": "Chappie733/MLPack", "max_forks_repo_head_hexsha": "223b142ff22dc35b9122183435afdc473a2c0b47", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1724137931, "max_line_length": 114, "alphanum_fraction": 0.643627451, "include": true, "reason": "import numpy", "num_tokens": 573}
|
""" Fit point charges to a HORTON costfunction under constraints.
Copyright 2019 Simulation Lab
University of Freiburg
Author: Lukas Elflein <elfleinl@cs.uni-freiburg.de>
Based on legacy code by Johannes Hormann
"""
import argparse
import h5py
import warnings
import ase.io
import sympy
import parmed as pmd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from smamp.insertHbyList import insertHbyList
from smamp.tools import read_atom_numbers
def create_structure(infile_pdb, infile_top, hydrogen_file, strip_string=':SOL,CL'):
"""Build ase-format atomic structure descriptions.
Especially useful is the dictionary listing the relationship between ase indices and atom names.
Args:
infile_pdb (str): path to the gromacs structure file
infile_top (str): path to the gromacs topology file
hydrogen_file (str): file with explicit hydrogen atom description
strip_string (str): atoms to be removed from .pdb file
Returns:
pmd_struct:
pmd_top:
ase2pmd (dict): A map of ase indices to atom names
"""
implicitHbondingPartners = read_atom_numbers(hydrogen_file)
ua_ase_struct = ase.io.read(infile_pdb)
ua_pmd_struct = pmd.load_file(infile_pdb)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ua_pmd_top = pmd.gromacs.GromacsTopologyFile(infile_top, parametrize=False)
# strip water and electrolyte from system (if not yet done in .top)
ua_pmd_top.strip(strip_string)
ua_pmd_top.box = ua_pmd_struct.box # Needed because .pdb contains box info
ua_pmd_top.positions = ua_pmd_struct.positions
ua_names = [ a.name for a in ua_pmd_top.atoms ]
ua_residues = [ a.residue.name for a in ua_pmd_top.atoms ]
ua_ase_index = np.arange(len(ua_ase_struct))
ua_atom_residue_list = list(zip(ua_names, ua_residues))
ua_ase2pmd = dict(zip(ua_ase_index, ua_atom_residue_list))
ua_pmd2ase = dict(zip(ua_atom_residue_list, ua_ase_index))
return ua_pmd_struct, ua_pmd_top, ua_ase2pmd
def constrained_minimize(A, B, D=None, Q=None):
"""Find the minimum of the HORTON cost function.
The cost function is parametrized with matrix A and vector B.
In the unconstrained case, the minimization is equivalent to solving
A x - B = 0
for the charges x.
In the case of constraints, we have to solve the problem
A D x = B
D 0 l Q
with D being the logical constraints, and Q the respective charge values.
The function first stacks (A, D) and (B, Q) to resemble
the unconstrained case formally, then solves the constrained equation.
Args:
A (np.array): Matrix with quadratic terms of cost fucnction
B (np.array): Vector with linear tearms of cost function
D (np.array): Matrix with constraint logic
Q (np.array): Vector with constraint charges
Returns:
charges (np.array): Vector of optimal charges
langrange_forces (np.array): Vector of forces neccesary constrain charges
"""
# Default to zero total charge constraint
if D is None and Q is None:
Q = np.array([0])
D = np.ones(B.shape[0])
# Cast everything to arrays
A = np.atleast_2d(A)
D = np.atleast_2d(D)
B = np.atleast_1d(B)
Q = np.atleast_1d(Q)
# For old versions of numpy, block is not available. Fallback to bmat:
if float(np.version.version[2:]) < 13:
stack = np.bmat
else:
stack = np.block
# Stack the HORTON matrices with the constraints
zeros = np.zeros((Q.shape[0], Q.shape[0]))
A_con = stack([[A, D.T],
[D, zeros]])
B_con = stack([B, Q]).T
x = np.linalg.solve(A_con, B_con)
charges = x[:len(B)]
lagrange_forces = x[len(B):]
return charges, lagrange_forces
def unconstrained_minimize(A, B):
"""Find the unconstrained minimum of the HORTON cost function A x - B = 0.
Args:
A (np.array): Matrix with quadratic terms of cost fucnction
B (np.array): Vector with linear tearms of cost function
Returns:
charges (np.array): Vector of optimal charges
"""
charges = np.linalg.solve(A, B)
return(charges)
def parse_charge_groups(file_name, ase2pmd):
"""Read the charge group definition file."""
# first we read in the textfile
df = pd.read_csv(file_name, sep=',', header=None,
comment='#', names=['atom','cg'])
# Charge groups are independent on residue.
# Find unique residue names first
residue = []
for ase_index, atom_residuum in ase2pmd.items():
residue += [atom_residuum[1]]
residue = list(set(residue))
# Atoms appear in multiple charge groups.
# In the end, we want something like
# {cg1: [1, 5, 8]}
charge_groups = {}
for res_index in range(len(residue)):
for atom in df.atom:
# cg is the charge group of the current atom
# cg = df.loc[df.atom == atom].cg.values[0] - 1 + res_index * df.cg.max()
cg = df.loc[df.atom == atom].cg.values[0] + res_index * 1000
# ase2pmd is formatted like
# 0: ('CE1', 'terB')
for ase_index, atom_residuum in ase2pmd.items():
# If the atom names match, pick the ase index
if atom in atom_residuum:
if residue[res_index] in atom_residuum:
if not cg in charge_groups.keys():
charge_groups[cg] = []
charge_groups[cg] += [ase_index]
# Sort everything
for ase_index in charge_groups.keys():
charge_groups[ase_index].sort()
return charge_groups
def parse_group_charges(file_name):
"""Read the file specifying total charges of each charge group."""
group_q = pd.read_csv(file_name, sep=',', header=None, comment='#',
names=['charge'], index_col=0)
group_q.charge = group_q.charge.astype(float)
return group_q
def parse_symmetry(file_name):
"""Read the file containing pair-symmetry constraints."""
df = pd.read_csv(file_name, sep=',', header=None, comment='#')
symm_names = df.values.tolist()
return symm_names
def symmetry_names_to_index_groups(symm_names, ase2pmd):
"""Transform atom-name based constraints to index-based constraints."""
symm_groups = []
for i in range(len(symm_names)):
names = symm_names[i]
symm_groups += [[]]
for ase_index, atom_residuum in ase2pmd.items():
# If the atom names match, pick the ase index
atom_name = atom_residuum[0]
if names[0] == atom_name:
# Every member of this group is supposed to have equal charge
symm_groups[i] += [ase_index]
if names[1] == atom_name:
symm_groups[i] += [ase_index]
return symm_groups
def symmetry_groups_to_matrix(symm_groups, n_atoms):
"""Generate matrix-constraints from groups of same-charge indices.
>>> groups = [[0, 2, 3]]
>>> symmetry_groups_to_matrix(groups, n_atoms=5)[0]
array([[ 1, 0, -1, 0, 0],
[ 1, 0, 0, -1, 0]])
"""
symm_list = []
for group in symm_groups:
for atom_index in group[1:]:
matrix_row = np.zeros(n_atoms, dtype=int)
matrix_row[group[0]] = 1
matrix_row[atom_index] = -1
symm_list += [matrix_row]
symmetry_matrix = np.array(symm_list)
symmetry_q = np.zeros(symmetry_matrix.shape[0], dtype=int)
return symmetry_matrix, symmetry_q
def make_symmetry_constraints(symm_names, ase2pmd):
"""Transform atom-name symmetry constraints to ase-index matrix format."""
symm_groups = symmetry_names_to_index_groups(symm_names, ase2pmd)
n_atoms = len(ase2pmd)
D_matrix, Q_vector = symmetry_groups_to_matrix(symm_groups, n_atoms)
return D_matrix, Q_vector
def make_group_constraints(charge_groups, group_q, n_atoms):
"""Transform atom-name group charge group constraints to ase-index matrix form."""
# Initialize empty arrays
D_matrix = None
Q_vector = None
# Fill in constraint values for every charge group
for group_index in charge_groups.keys():
cg = charge_groups[group_index]
# Note: Charge groups are [1, 2, ...], np indices are [0, 1, ..]
# 1 means that the sum of q_i in the charge group is unweighted
constraint = np.zeros((1, n_atoms))
constraint[0, cg] = 1
if D_matrix is None:
D_matrix = constraint.copy()
else:
D_matrix = np.concatenate((D_matrix, constraint), axis=0)
# Now we need to specify the total charge of the group in a vector
# Charge groups defined in file are numbered 1..11, but exist on multiple residue.
# Thus, we map group indices back from 1001..1011 to 1..11:
q_index = group_index % 1000
total_group_charge = group_q.loc[q_index].values[0]
if Q_vector is None:
Q_vector = np.atleast_1d(total_group_charge).copy()
else:
Q_vector = np.concatenate((Q_vector, np.atleast_1d(total_group_charge)))
return D_matrix, Q_vector
def make_atom_name_constraints(ase2pmd):
"""Construct constraints for atoms of same name to have equal charge across residues."""
# Extract unique atom names
unique_names = []
for ase_index, atom_residuum in ase2pmd.items():
if atom_residuum[0] not in unique_names:
unique_names += [atom_residuum[0]]
name_groups = {}
for name in unique_names:
name_groups[name] = []
# At which indices do atom names occur?
for name in unique_names:
for ase_index, atom_residuum in ase2pmd.items():
if name in atom_residuum:
name_groups[name] += [ase_index]
# Keep name-groups with at least two members, don't need the rest
groups = []
for name, index_list in name_groups.items():
if len(index_list) > 1:
groups += [index_list]
# Transform the groups to matrix form
groups = np.array(groups)
D_matrix, Q_vector = symmetry_groups_to_matrix(groups, n_atoms=len(ase2pmd))
return D_matrix, Q_vector
def nonsingular_concat(X, vector):
"""Appends vector to matrix X iff the resulting matrix is nonsingular.
Args:
X (np.array): NxM Matrix to be appended to
vector (np.array): Nx1 vector to be appended to X
Returns:
new_X (np.array): Nx(M+1) Matrix or None
"""
# Cast vector to matrix
vector = np.atleast_2d(vector)
# Append vector as new row at bottom of matrix
new_X = np.concatenate((X, vector), axis=0)
# Check if matrix is still non-singular
if new_X.shape[0] == np.linalg.matrix_rank(new_X):
return new_X
else:
return None
def stack_constraints(X, Q_x, Y, Q_y, logging=False):
"""Transform two constraint matrices/vector pairs into a single pair.
Args:
X (np.array): Constraint matrix to be appended to
Y (np.array): Constraint matrix to be conatenated
Q_x (np.array): The constraint charges corresponding to X
Q_y (np.array): Constraint charges corresponding to Y
"""
# All constraints are empty
if all([obj is None for obj in (X, Y, Q_x, Q_y)]):
return X, Q_x
# First constraint set is empty, second one full
if X is None and (Y is not None and Q_y is not None):
return Y, Q_y
# Exactly the first set is non-empty
if (X is not None and Q_x is not None) and Y is None:
return X, Q_x
# Both sets of constraints are non-empty
if all([obj is not None for obj in (X, Y, Q_x, Q_y)]):
con_matrix = X.copy()
con_q = Q_x.copy()
for row in range(Y.shape[0]):
new_matrix = nonsingular_concat(con_matrix, Y[row, :])
if new_matrix is not None:
con_matrix = new_matrix
con_q = np.concatenate((con_q, np.atleast_1d(Q_y[row])))
else:
if logging:
with open('dropped_constraints.log', 'ab') as outfile:
np.savetxt(outfile, Y[row, :], fmt='%d', newline=" ")
outfile.write(b'\n')
return con_matrix, con_q
raise ValueError('Invalid mixture of empty and non-empty constraints')
def get_constraints(args=None, ase2pmd=None, debug=True, **kwargs):
'''Read provided constraint files and convert them into matrix form.'''
if args is not None:
charge_group_file = args.charge_groups
charge_group_charges_file = args.charge_group_charges
symmetry_file = args.symmetry_file
else:
charge_group_file = kwargs['charge_group_file']
charge_group_charges_file = kwargs['charge_group_charges_file']
symmetry_file = kwargs['symmetry_file']
# Constraints for atoms of same name to have same charge
name_matrix, name_q = make_atom_name_constraints(ase2pmd)
# Constraints for atoms of one group to have specified sum of charges
if charge_group_file is not None:
if charge_group_charges_file is None:
err = 'Charge groups defined: {}'.format(charge_group_file)
err += '\n But no total charges were defined.'
raise ValueError(err)
charge_groups = parse_charge_groups(charge_group_file, ase2pmd)
group_q = parse_group_charges(charge_group_charges_file)
n_atoms = len(ase2pmd)
group_matrix, group_q = make_group_constraints(charge_groups, group_q, n_atoms)
else:
group_matrix, group_q = None, None
# Constraints for pair-wise symmetric atoms to have equal charge
if symmetry_file is not None:
symmetry = parse_symmetry(symmetry_file)
symmetry_matrix, symmetry_q = make_symmetry_constraints(symmetry, ase2pmd)
else:
symmetry_matrix, symmetry_q = None, None
# Combine individual matrices to one matrix (enforces non-singularity)
group_symm_matrix, group_symm_q = stack_constraints(group_matrix, group_q,
symmetry_matrix, symmetry_q)
constraint_matrix, constraint_q = stack_constraints(group_symm_matrix, group_symm_q,
name_matrix, name_q)
if debug:
if symmetry_matrix is not None:
np.savetxt('symm_matrix.log', symmetry_matrix, fmt='%d')
if name_matrix is not None:
np.savetxt('name_matrix.log', name_matrix, fmt='%d')
if group_matrix is not None:
np.savetxt('group_matrix.log', group_matrix, fmt='%d')
np.savetxt('group_charges.log', group_q, fmt='%f')
if constraint_matrix is not None:
np.savetxt('constraint_matrix.log', constraint_matrix, fmt='%d')
return constraint_matrix, constraint_q
def read_horton_cost_function(file_name):
"""Extract A and B HORTON cost function matrics from HDF5 binary."""
cost_function = h5py.File(file_name)
A = cost_function['cost']['A'][()]
B = cost_function['cost']['B'][()]
return A, B
def parse_command_line():
"""Read file locations from command line interface."""
parser = argparse.ArgumentParser(prog='esp-fit-constrained.py',
description='Estimate charges from a HORTON ESP'
'cost function under constraints.')
parser.add_argument('-hor', '--horton_cost_function',
help='The location of the HORTON cost function file.',
required=True, metavar='cost.h5')
parser.add_argument('-p', '--pdb_infile',
help='The location of the atomic structure file',
required=True, metavar='snapshot.pdb')
parser.add_argument('-t', '--top_infile',
help='The location of the topolgy file',
required=True, metavar='topol.top')
parser.add_argument('-g', '--charge_groups',
help='The location of the charge group constraints .csv file.',
metavar='atoms_in_charge_group.csv', default=None)
parser.add_argument('-c', '--charge_group_charges',
help='The location of the charge group total charges .csv file.',
metavar='charge_group_total_charge.csv', default=None)
parser.add_argument('-s', '--symmetry_file',
help='The location of the symmetry constraints file.',
metavar='atoms_of_same_charge.csv', default=None)
parser.add_argument('-o', '--output_file',
help='The file where the optimized charges should be written to.',
default='fitted_point_charges.csv', metavar='fitted_point_charges.csv')
parser.add_argument('-hyd', '--hydrogen_file',
help='The hydrogen insertion rules',
default='hydrogen_per_atom.csv', metavar='hydrogen_per_atom.csv')
return parser.parse_args()
def write_charges(q, q_unconstrained, ase2pmd, out_name='fitted_point_charges', plot=False):
"""Write array of charges into .csv output file."""
def number_to_atom_name(i):
return ase2pmd[i][0]
def number_to_residuum(i):
return ase2pmd[i][1]
df = pd.DataFrame(q, columns=['q'])
df['q_unconstrained'] = q_unconstrained
df['indices'] = df.index
df['atom'] = df.indices.apply(number_to_atom_name)
df['residue'] = df.indices.apply(number_to_residuum)
df = df.drop(['indices'], axis=1)
df = df[['atom', 'residue', 'q', 'q_unconstrained']]
df.to_csv(out_name)
if plot:
plt.plot(q, range(len(q)), lw=0, marker='o')
plt.plot(q_unconstrained, range(len(q_unconstrained)), lw=0, marker='o')
plt.show()
return df
def write_forces(forces, logic_constraints, ase2pmd):
"""Write lagrange forces to .csv output file."""
force_constraint = []
if logic_constraints is not None:
forces = np.atleast_2d(forces).T
c = np.concatenate((forces, logic_constraints), axis=1)
c = c[c[:,0].argsort()]
# Sorted forces and constraints
forces = c[:, 0]
l = c[:, 1:]
for i in range(len(l)):
line = l[i]
f = forces[i]
constraint = np.nonzero(line)[0]
readable_con = [f]
for number in constraint:
atom = ase2pmd[number][0] + '/' + ase2pmd[number][1]
readable_con += [atom]
force_constraint += [readable_con]
with open('lagrange_forces.csv', 'w') as outfile:
outfile.write('force, atom names\n')
for entry in force_constraint:
line = '{0:.3f}, '.format(entry[0])
line += ' '.join(entry[1:])
line += '\n'
outfile.write(line)
def main():
'''Read the constraints, transform them into matrix form,
and then use them to fit the point charges.'''
print('This is "{}".'.format(__file__))
# Read command line arguments
args = parse_command_line()
print('Extracting structure via ASE ...')
# Look up the relationship between ASE indices, atom names
pmd_struct, pmd_top, ase2pmd = create_structure(args.pdb_infile, args.top_infile,
args.hydrogen_file)
print('Atomic structure built.')
# Import A and B matrices from HORTON
A, B = read_horton_cost_function(args.horton_cost_function)
# Calculate constraints
logic_constraints, charge_constraints = get_constraints(args, ase2pmd=ase2pmd)
print('Constraints caluclated: {} non-redunant.'.format(logic_constraints.shape[0]))
# print(logic_constraints, '\n', charge_constraints)
# Run the constrained minimization
q, f = constrained_minimize(A, B, logic_constraints, charge_constraints)
print('Constrained minimization done.')
print('Extremal charges: {:1.5f}, {:1.5f}'.format(q.min(), q.max()))
print('Extremal Lagrange forces: {:1.5f}, {:1.5f}'.format(f.min(), f.max()))
q_unconstrained = unconstrained_minimize(A, B)
# Save charges
charge_df = write_charges(q, q_unconstrained, ase2pmd, out_name=args.output_file, plot=False)
# Save Lagrange forces
write_forces(f, logic_constraints, ase2pmd)
print('Charges and forces written.')
print('Done.')
if __name__ == '__main__':
main()
|
{"hexsha": "d788261be67a17ce0a37a138daea5bb88bfc7e3f", "size": 19732, "ext": "py", "lang": "Python", "max_stars_repo_path": "bin/fitESPconstrained.py", "max_stars_repo_name": "lukaselflein/sarah_folderstructure", "max_stars_repo_head_hexsha": "a725271db3d8b5b28b24918b3daf0942fa04dcd8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bin/fitESPconstrained.py", "max_issues_repo_name": "lukaselflein/sarah_folderstructure", "max_issues_repo_head_hexsha": "a725271db3d8b5b28b24918b3daf0942fa04dcd8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2019-03-29T13:34:57.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-04T09:27:07.000Z", "max_forks_repo_path": "bin/fitESPconstrained.py", "max_forks_repo_name": "lukaselflein/sarah_folderstructure", "max_forks_repo_head_hexsha": "a725271db3d8b5b28b24918b3daf0942fa04dcd8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.2720588235, "max_line_length": 100, "alphanum_fraction": 0.6638962092, "include": true, "reason": "import numpy,import sympy", "num_tokens": 5005}
|
#!/usr/bin/env python
import rospy
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from hanp_msgs.msg import TimeToGoal
from hanp_msgs.msg import HumanTimeToGoalArray
from hanp_msgs.msg import HumanPathArray
from hanp_msgs.msg import HumanTrajectoryArray
from hanp_msgs.msg import Trajectory
from nav_msgs.msg import Path
def traj_ttg(msg):
data["ttg_traj"].append(msg.time_to_goal)
def plot_traj_ttg():
n = len(data["ttg_traj"])
x = np.arange(0,n,1)
y =[]
for i in range(0,n):
y.append(data["ttg_traj"][i].to_sec())
plt.figure()
plt.plot(x,y)
plt.ion()
plt.show()
plt.title('TTG Traj')
def path_ttg(msg):
data["ttg_path"].append(msg.time_to_goal)
def plot_path_ttg():
n = len(data["ttg_path"])
x = np.arange(0,n,1)
y =[]
for i in range(0,n):
y.append(data["ttg_path"][i].to_sec())
plt.figure()
plt.plot(x,y)
plt.ion()
plt.show()
plt.title('TTG Path')
def h_traj_ttg(msg):
data["h_ttg_traj"].append(msg.times_to_goal[0].time_to_goal)
def plot_h_traj_ttg():
n = len(data["h_ttg_traj"])
x = np.arange(0,n,1)
y =[]
for i in range(0,n):
y.append(data["h_ttg_traj"][i].to_sec())
plt.figure()
plt.plot(x,y)
plt.ion()
plt.show()
plt.title('TTG H_Traj')
def h_path_ttg(msg):
data["h_ttg_path"].append(msg.times_to_goal[0].time_to_goal)
def plot_h_path_ttg():
n = len(data["h_ttg_path"])
x = np.arange(0,n,1)
y =[]
for i in range(0,n):
y.append(data["h_ttg_path"][i].to_sec())
plt.figure()
plt.plot(x,y)
plt.ion()
plt.show()
plt.title('TTG H_path')
def global_plan(msg):
global last_time
global one_save_gr
last_time = rospy.Time.now()
if one_save_gr:
data["g_plan"].append(msg)
one_save_gr = False
# def plot_global_plan():
# n = len(data["g_plan"])
# x = np.arange(0,n,1)
#
# y =[]
# for i in range(0,n):
# y.append(data["g_plan"][i].to_sec())
#
# plt.figure()
# plt.plot(x,y)
# plt.ion()
# plt.show()
# plt.title('Global plan')
def h_global_plan(msg):
global last_time
global one_save_gh
last_time = rospy.Time.now()
if one_save_gh:
data["h_g_plan"].append(msg.paths[0])
one_save_hr = False
def local_plan(msg):
data["l_plan"].append(msg)
def h_local_plan(msg):
data["h_l_plan"].append(msg.paths[0])
def local_traj(msg):
data["l_traj"].append(msg)
def h_local_traj(msg):
data["h_l_traj"].append(msg.trajectories[0])
def timerCB(event):
now = rospy.Time.now()
global last_time
if (last_time-now).secs > 2:
one_save_gr = True
one_save_hr = True
def clear():
data = { "ttg_traj":[], "ttg_path":[], "h_ttg_traj":[], "h_ttg_path":[], "g_plan":[],"l_plan":[], "l_traj":[], "h_g_plan":[], "h_l_plan":[], "h_l_traj":[]}
def listener():
global last_time
global one_save_gr
global one_save_gh
one_save_gr = True
one_save_gh = True
rospy.init_node('data_saving_teb')
last_time = rospy.Time.now()
# root = Tk()
# my_gui = GuessingGame(root)
# root.mainloop()
# Subscribe to all topics
rospy.Subscriber("/move_base_node/TebLocalPlannerROS/global_plan",Path,global_plan)
rospy.Subscriber("/move_base_node/TebLocalPlannerROS/local_plan",Path,local_plan)
rospy.Subscriber("/move_base_node/TebLocalPlannerROS/local_traj",Trajectory,local_traj)
rospy.Subscriber("/move_base_node/TebLocalPlannerROS/plan_time",TimeToGoal,path_ttg)
rospy.Subscriber("/move_base_node/TebLocalPlannerROS/traj_time",TimeToGoal,traj_ttg)
rospy.Subscriber("/move_base_node/TebLocalPlannerROS/human_global_plans",HumanPathArray,h_global_plan)
rospy.Subscriber("/move_base_node/TebLocalPlannerROS/human_local_plans",HumanPathArray,h_local_plan)
rospy.Subscriber("/move_base_node/TebLocalPlannerROS/human_local_trajs",HumanTrajectoryArray,h_local_traj)
rospy.Subscriber("/move_base_node/TebLocalPlannerROS/human_plans_time",HumanTimeToGoalArray,h_path_ttg)
rospy.Subscriber("/move_base_node/TebLocalPlannerROS/human_trajs_time",HumanTimeToGoalArray,h_traj_ttg)
rospy.Timer(rospy.Duration(0.1), timerCB)
print("Started")
rospy.spin()
if __name__=='__main__':
try:
listener()
except rospy.ROSInterruptException:
pass
|
{"hexsha": "155efaef569741a0fb750822024b8ecebd725f92", "size": 4417, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/save_to_files.py", "max_stars_repo_name": "sphanit/hateb_local_planner", "max_stars_repo_head_hexsha": "a17fee83ab8bf626812cf9e31105ce6a01dff2bb", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/save_to_files.py", "max_issues_repo_name": "sphanit/hateb_local_planner", "max_issues_repo_head_hexsha": "a17fee83ab8bf626812cf9e31105ce6a01dff2bb", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/save_to_files.py", "max_forks_repo_name": "sphanit/hateb_local_planner", "max_forks_repo_head_hexsha": "a17fee83ab8bf626812cf9e31105ce6a01dff2bb", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-05T02:47:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-05T02:47:12.000Z", "avg_line_length": 25.3850574713, "max_line_length": 159, "alphanum_fraction": 0.6683269187, "include": true, "reason": "import numpy", "num_tokens": 1280}
|
import numpy as np
#Agent that uses Reinforcment-Learning with Monte Carlo policy evaluation to improve
class RL_Monte_Carlo_Agent():
#gamma: discount factor for future rewards
def __init__(self, gamma=0.9, verbose=False):
self.explore = True
self.n_states = 2*3**9
self.verbose = verbose
self.value = np.zeros(self.n_states)
self.state_visit_count = np.zeros(self.n_states,dtype=int)
self.gamma = gamma
self.games_played = 0
# converts a state id to an actual board configuration and player turn
def id_to_game_state(self,id):
turn = id // (self.n_states // 2)
i = id - turn * (self.n_states // 2)
v = np.zeros(9)
j = 8
while i > 0:
v[j] = i%3
i = i // 3
j = j - 1
return (np.array(v).reshape((3,3)) - 1, turn)
# converts a board configuration and player turn to state id
def id_from_game_state(self,game_state):
index = 0
(config, turn) = game_state
for cell in config.flatten():
index = index * 3 + cell + 1
index = index + turn * (self.n_states//2)
return int(index)
# callback when game is over
# score: +1 for won game, -1 for lost game, 0 for draw
# history: complete history of the game from player perspective
def game_finished(self,score,history):
# just for interest
self.games_played = self.games_played + 1
if not self.explore: return
reward = score
# t = steps into the past
for t,actual_state in enumerate(history):
for state in self.get_similar_states(actual_state):
i = self.id_from_game_state(state)
# compute 'future' value that we got from being in that state
game_reward = reward * self.gamma ** t
self.state_visit_count[i] = self.state_visit_count[i] + 1
self.alpha = 1.0/self.state_visit_count[i]
self.value[i] = self.value[i] + self.alpha * (game_reward - self.value[i])
#get the value that we get from performing action in config
def get_action_value(self,action,config):
possible_config = np.copy(config)
possible_config[action] = 0
return self.value[self.id_from_game_state((possible_config, 0))]
def get_action_exploration_status(self, action, config):
possible_config = np.copy(config)
possible_config[action] = 0
return self.state_visit_count[self.id_from_game_state((possible_config, 0))]
#given a config, find the possible action that yields the best value
def get_best_option(self,config):
available = np.nonzero(config.flatten() == -1)[0]
available = [(action//3,action%3) for action in available]
best_option = np.argmax([self.get_action_value(action, config) for action in available])
return available[best_option]
#given a config, find the possible action that leads to the least explored state
def get_least_explored_option(self,config):
available = np.nonzero(config.flatten() == -1)[0]
available = [(action//3,action%3) for action in available]
explore_status = [self.get_action_exploration_status(action, config) for action in available]
least_explored = np.argmin(explore_status)
return available[least_explored]
#For exploiting the symmetry and rotation of the game
def get_similar_states(self,state):
config, turn = state
config = np.copy(config)
similar = []
for i in range(3):
similar.append((config,turn))
similar.append((np.flip(config,axis=0),turn))
similar.append((np.flip(config,axis=1),turn))
config = np.rot90(config)
return similar
#find out the coordinates of the field that we want to occupy this turn
def act(self,game_state):
if self.explore:
(config, _) = game_state
return self.get_least_explored_option(config)
else:
(config, _) = game_state
best_option = self.get_best_option(config)
if (self.verbose):
print(game_state)
print(best_option)
return best_option
|
{"hexsha": "52e6209eb0c68938d0b3f894b8c663ff3b37b268", "size": 4290, "ext": "py", "lang": "Python", "max_stars_repo_path": "rl_agent.py", "max_stars_repo_name": "marschi/tictactoe_ai", "max_stars_repo_head_hexsha": "bc92c0c49ad12d93dc8fd0fe532df8106ff734b4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rl_agent.py", "max_issues_repo_name": "marschi/tictactoe_ai", "max_issues_repo_head_hexsha": "bc92c0c49ad12d93dc8fd0fe532df8106ff734b4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rl_agent.py", "max_forks_repo_name": "marschi/tictactoe_ai", "max_forks_repo_head_hexsha": "bc92c0c49ad12d93dc8fd0fe532df8106ff734b4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.8571428571, "max_line_length": 101, "alphanum_fraction": 0.6272727273, "include": true, "reason": "import numpy", "num_tokens": 1015}
|
(*
* Copyright 2014, NICTA
*
* This software may be distributed and modified according to the terms of
* the BSD 2-Clause license. Note that NO WARRANTY is provided.
* See "LICENSE_BSD2.txt" for details.
*
* @TAG(NICTA_BSD)
*)
theory Sep_Provers
imports Sep_Rotate
begin
(* Constrained lens for sep_erule tactic *)
lemma sep_asm_eq_erule:
"(P \<and>* R) s \<Longrightarrow> (\<And>s. T s = (P \<and>* R) s) \<Longrightarrow> (T s \<Longrightarrow> (P' \<and>* R') s) \<Longrightarrow> (P' \<and>* R') s"
by (clarsimp)
lemma sep_rule:
"(\<And>s. T s \<Longrightarrow> P s) \<Longrightarrow> (T \<and>* R) s \<Longrightarrow> (P \<and>* R) s"
by (rule sep_conj_impl1)
lemma sep_erule:
"(T \<and>* R') s \<Longrightarrow> (\<And>s. T s \<Longrightarrow> P s) \<Longrightarrow> (\<And>s. R' s \<Longrightarrow> R s) \<Longrightarrow> (P \<and>* R) s"
by (rule sep_conj_impl)
(* Construct analogues to rule/drule etc, for separation logic *)
ML {*
fun sep_select ctxt = resolve_tac ctxt [@{thm sep_eq}]
fun sep_asm_select ctxt = dresolve_tac ctxt [@{thm sep_asm_eq}]
fun sep_asm_erule_select ctxt = eresolve_tac ctxt [@{thm sep_asm_eq_erule}]
fun sep_rule_tactic ctxt thms =
let val sep_rule = resolve_tac ctxt [@{thm sep_rule}]
in sep_apply_tactic ctxt sep_rule thms end
fun sep_drule_tactic ctxt thms =
let val sep_drule = dresolve_tac ctxt [rotate_prems ~1 @{thm sep_rule}]
in sep_apply_tactic ctxt sep_drule thms end
fun sep_frule_tactic ctxt thms =
let val sep_frule = forward_tac ctxt [rotate_prems ~1 @{thm sep_rule}]
in sep_apply_tactic ctxt sep_frule thms end
fun sep_erule_tactic ctxt thms =
let val sep_erule = (eresolve_tac ctxt [@{thm sep_erule}])
in sep_apply_tactic ctxt sep_erule thms end
fun sep_rule_tac tac ctxt = rotator (sep_select ctxt) tac ctxt
fun sep_drule_tac tac ctxt = rotator (sep_asm_select ctxt) tac ctxt
fun sep_erule_tac tac ctxt = rotator (sep_asm_select ctxt) tac ctxt
fun sep_erule_concl_tac tac ctxt = rotator (sep_select ctxt) tac ctxt
fun sep_erule_full_tac tac ctxt =
let val r = rotator' ctxt
in
tac |> r (sep_asm_erule_select ctxt) |> r (sep_select ctxt)
end
fun sep_erule_full_tac' tac ctxt =
let val r = rotator' ctxt
in
tac |> r (sep_select ctxt) |> r (sep_asm_erule_select ctxt)
end
fun sep_rule_comb_tac true thms ctxt = sep_rule_tac (resolve_tac ctxt thms) ctxt
| sep_rule_comb_tac false thms ctxt = sep_rule_tac (sep_rule_tactic ctxt thms) ctxt
fun sep_rule_method bool thms ctxt = SIMPLE_METHOD' (sep_rule_comb_tac bool thms ctxt)
fun sep_drule_comb_tac true thms ctxt = sep_drule_tac (dresolve_tac ctxt thms) ctxt
| sep_drule_comb_tac false thms ctxt = sep_drule_tac (sep_drule_tactic ctxt thms) ctxt
fun sep_drule_method bool thms ctxt = SIMPLE_METHOD' (sep_drule_comb_tac bool thms ctxt)
fun sep_frule_method true thms ctxt = SIMPLE_METHOD' (sep_drule_tac (forward_tac ctxt thms) ctxt)
| sep_frule_method false thms ctxt = SIMPLE_METHOD' (sep_drule_tac (sep_frule_tactic ctxt thms) ctxt)
fun sep_erule_method true thms ctxt = SIMPLE_METHOD' (sep_erule_tac (eresolve_tac ctxt thms) ctxt)
| sep_erule_method false thms ctxt = SIMPLE_METHOD' (sep_erule_tac (sep_erule_tactic ctxt thms) ctxt)
fun sep_erule_concl_method true thms ctxt =
SIMPLE_METHOD' (sep_erule_concl_tac (eresolve_tac ctxt thms) ctxt)
| sep_erule_concl_method false thms ctxt =
SIMPLE_METHOD' (sep_erule_concl_tac (sep_erule_tactic ctxt thms) ctxt)
fun sep_erule_full_method true thms ctxt =
SIMPLE_METHOD' (sep_erule_full_tac (eresolve_tac ctxt thms) ctxt)
| sep_erule_full_method false thms ctxt =
SIMPLE_METHOD' (sep_erule_full_tac (sep_erule_tactic ctxt thms) ctxt)
*}
method_setup sep_rule = {*
Scan.lift (Args.mode "direct") -- Attrib.thms >> uncurry sep_rule_method
*}
method_setup sep_drule = {*
Scan.lift (Args.mode "direct") -- Attrib.thms >> uncurry sep_drule_method
*}
method_setup sep_frule = {*
Scan.lift (Args.mode "direct") -- Attrib.thms >> uncurry sep_frule_method
*}
method_setup sep_erule = {*
Scan.lift (Args.mode "direct") -- Attrib.thms >> uncurry sep_erule_method
*}
method_setup sep_erule_concl = {*
Scan.lift (Args.mode "direct") -- Attrib.thms >> uncurry sep_erule_concl_method
*}
method_setup sep_erule_full = {*
Scan.lift (Args.mode "direct") -- Attrib.thms>> uncurry sep_erule_full_method
*}
end
|
{"author": "pirapira", "repo": "eth-isabelle", "sha": "d0bb02b3e64a2046a7c9670545d21f10bccd7b27", "save_path": "github-repos/isabelle/pirapira-eth-isabelle", "path": "github-repos/isabelle/pirapira-eth-isabelle/eth-isabelle-d0bb02b3e64a2046a7c9670545d21f10bccd7b27/sep_algebra/Sep_Provers.thy"}
|
"""
Train and/or evaluate a spatial relation model on one or multiple splits.
Author: Philipp Jund, 2018
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import os
from SpatialRelationCNN.data_io.relation_dataset import RelationDataset
from SpatialRelationCNN.model.model import SpatialRelationModel
from SpatialRelationCNN.model.input_layer import InputLayer
from SpatialRelationCNN.model import evaluation_metrics as metrics
import SpatialRelationCNN.model.utility as util
import numpy as np
import tensorflow as tf
import tensorboard.plugins.projector as tensorboard_projector
# store the accuracies of all fifteen splits to compute mean + standard dev
accuracies = {p: {"3of5_accuracy": [],
"3of3_accuracy": [],
"5of5_accuracy": []}
for p in ["test", "validation"]}
# code to store embeddings in tensorboard
embedding_var = None # variable for tensorboard
assignment_op = None
config = tensorboard_projector.ProjectorConfig()
embedding_config = config.embeddings.add()
def export_embedding_to_tensorboard(embedding, model,
summary_writer, sess):
global embedding_var, assignment_op
if embedding_var is None:
embedding_var = tf.Variable(embedding, "tb_embeddings")
sess.run(embedding_var.initializer)
summary_writer.add_graph(sess.graph)
embedding_config.tensor_name = embedding_var.name
embedding_config.metadata_path = os.path.join(FLAGS.logdir,
"labels.tsv")
model.recreate_saver()
assignment_op = tf.assign(embedding_var, embedding)
sess.run(assignment_op)
tensorboard_projector.visualize_embeddings(summary_writer, config)
def train(model, sess, input_layer, labels, split, logdir,
validate=False):
"""Train `model` on `split` of `dataset`."""
global_step = tf.train.get_or_create_global_step()
loss, train_op = model.loss(), model.train_op
fd = {model.dropout_prob: 0.5}
sess.run(tf.global_variables_initializer())
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(logdir,
sess.graph)
print(summary_op)
input_layer.switch_input("train", sess)
run_summary = [train_op, loss, summary_op, global_step]
run_ops = [train_op, loss]
for i in range(FLAGS.num_iterations):
if i % 1000 == 0:
_, loss_value, summary, step = sess.run(run_summary, feed_dict=fd)
print("step: {}, loss: {}".format(step, loss_value))
summary_writer.add_summary(summary, i)
summary_writer.flush()
else:
_, loss_value = sess.run(run_ops, feed_dict=fd)
if validate and i % FLAGS.evaluate_every_n_steps == 0:
evaluate(model, sess, input_layer, labels, split, "validation",
i, logdir, summary_writer)
if i % FLAGS.snapshot_iterations == 0:
util.save(i, model, sess, logdir)
util.save(FLAGS.num_iterations, model, sess, logdir)
def evaluate(model, sess, input_layer, labels_gt, split, phase, step, logdir,
summary_writer):
"""Evaluate the model on the test_set."""
print("evaluating model")
input_layer.switch_input(phase, sess)
fd = {model.dropout_prob: 0.}
embeddings = []
labels = []
try:
run_ops = [model.embedding, labels_gt]
while True:
emb, y = sess.run(run_ops, feed_dict=fd)
embeddings += [emb[0]]
labels += [y]
except tf.errors.OutOfRangeError:
pass
embeddings, labels = np.array(embeddings), np.squeeze(np.array(labels))
with open(os.path.join(logdir, "labels.tsv"), 'w') as f:
f.writelines([str(i) + "\n" for i in labels])
export_embedding_to_tensorboard(embeddings, model, summary_writer, sess)
dist_mat = metrics.distance_matrix(embeddings)
similarity_mat = metrics.similarity_matrix(labels)
mean_sim, mean_dissim = metrics.mean_distances(dist_mat, similarity_mat)
args = {"step": step, "summary_writer": summary_writer}
# log distances...
util.log_np_summary(phase + "_mean_dist_similar", mean_sim, **args)
util.log_np_summary(phase + "_mean_dist_dissimilar",
mean_dissim, **args)
# ... and nearest neighbor performance
for x_of_k, k in ((3, 5), (3, 3), (5, 5)):
metric_name = "{}of{}_accuracy".format(x_of_k, k)
acc = metrics.knn_accuracy(dist_mat, similarity_mat, k, x_of_k)
accuracies[phase][metric_name].append(acc)
util.log_np_summary(phase + metric_name, acc, **args)
summary_writer.flush()
input_layer.switch_input("train", sess)
def main(_):
"""Train and/or evaluate the fifteen splits."""
global embedding_var
tf.logging.set_verbosity(tf.logging.INFO)
dataset = RelationDataset(FLAGS.data_dir,
validation_ratio=0.0)
if FLAGS.train_on_all_data:
dataset.splits[0]["train"] += dataset.splits[0]["test"]
for split_index in FLAGS.splits:
print("Training split {}.".format(split_index))
logdir = os.path.join(FLAGS.logdir, str(split_index))
with tf.Session() as sess:
input_layer = InputLayer(dataset, FLAGS.more_augmentation)
points, segment_ids, labels, is_clone_augmented = \
input_layer.dataset_input_fn(FLAGS.batch_size, split_index)
model = SpatialRelationModel(cloud_tensor=points,
id_tensor=segment_ids)
# preconstruct loss as we have augmentation information here
with tf.name_scope("Loss"):
ones = tf.ones_like(is_clone_augmented, dtype=tf.float32)
margin = tf.where(is_clone_augmented, ones * 0.2, ones * 1)
model.loss(margin=margin)
sess.run(tf.global_variables_initializer())
if os.path.exists(logdir):
util.load_variables(model, sess, logdir)
if not FLAGS.evaluate_only:
validate = bool(dataset.splits[split_index]["validation"])
train(model, sess, input_layer, labels, split_index, logdir,
validate=validate)
tf_summary_writer = tf.summary.FileWriter(logdir + "/test",
sess.graph)
evaluate(model, sess, input_layer, labels, split_index, "test",
0, logdir, tf_summary_writer)
embedding_var = None
tf.reset_default_graph()
if FLAGS.train_on_all_data:
break
summary_writer = tf.summary.FileWriter(FLAGS.logdir + "/mean_summary")
args = {"step": FLAGS.num_iterations, "summary_writer": summary_writer}
# generate final summary
for name, values in accuracies['test'].items():
util.log_np_summary("mean_" + name, np.mean(values), **args)
util.log_np_summary("stddev_" + name, np.std(values), **args)
if __name__ == "__main__":
# Using the Winograd non-fused algorithms provides a small performance
# boost.
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
parser = argparse.ArgumentParser()
parser.add_argument("--splits", type=int, default=list(range(15)),
help="The splits to train.", nargs='+')
parser.add_argument("--data_dir", type=str, default=".",
help="The directory containing the training data.")
parser.add_argument("--logdir", type=str, default=".",
help="The directory where the weights are saved during"
" training and tensorboard files are stored. If"
" the directory contains a checkpoint, the model"
" is restored from the latest checkpoint.")
parser.add_argument("--evaluate_only", type=bool, default=False,
help="If true, the script only evaluates the given "
"test splits.")
parser.add_argument("--train_on_all_data", type=bool, default=False,
help="If true, all data is used for training.")
parser.add_argument("--evaluate_every_n_steps", type=int, default=1000,
help="The splits to train.")
parser.add_argument("--more_augmentation", type=bool, default=False,
help="If true, we do additional augmentation, i.e.,"
"cloning scenes and random transforms.")
FLAGS, unparsed = parser.parse_known_args()
FLAGS.batch_size = 100
# restarts: 1500 -> 4500 -> 10000 ->
# duration: 1500 -> 3000 -> 6000 -> 12000
FLAGS.num_iterations = 14000
FLAGS.snapshot_iterations = 1000
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
{"hexsha": "327f9cce2d31f7ff1985b18e9126f90ff142e00b", "size": 8963, "ext": "py", "lang": "Python", "max_stars_repo_path": "SpatialRelationCNN/train.py", "max_stars_repo_name": "ICRA-2018/generalize_spatial_relations", "max_stars_repo_head_hexsha": "6a87e987848426da757e0add595e3ec035956f01", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2018-04-19T14:38:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-27T10:33:20.000Z", "max_issues_repo_path": "SpatialRelationCNN/train.py", "max_issues_repo_name": "ICRA-2018/generalize_spatial_relations", "max_issues_repo_head_hexsha": "6a87e987848426da757e0add595e3ec035956f01", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SpatialRelationCNN/train.py", "max_forks_repo_name": "ICRA-2018/generalize_spatial_relations", "max_forks_repo_head_hexsha": "6a87e987848426da757e0add595e3ec035956f01", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2018-06-03T15:00:10.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-17T07:06:29.000Z", "avg_line_length": 44.815, "max_line_length": 79, "alphanum_fraction": 0.6370634832, "include": true, "reason": "import numpy", "num_tokens": 1926}
|
import numpy as np
import os
import pandas as pd
import torch
import yaml
import argparse
from utils import seed_everything
from dataset import classes
from predict_test import cfg_to_preds_path
import warnings
warnings.filterwarnings("ignore")
SEED = 123
seed_everything(SEED)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--cfgs", default=[
'configs/eb5_512_deeplabv3plus.yaml',
'configs/eb6_448_linknet.yaml',
'configs/eb7_512_unetplusplus.yaml',
'configs/seresnet152d_512_unet.yaml'], nargs="+", type=str)
parser.add_argument("--folds", default=[0, 1, 2, 3, 4], nargs="+", type=int)
args = parser.parse_args()
cfgs = []
for cfg in args.cfgs:
with open(cfg) as f:
cfgs.append(yaml.load(f, Loader=yaml.FullLoader))
os.makedirs('pseudo_csv', exist_ok=True)
for source in ['pneumothorax', 'vin']:
if source == 'pneumothorax':
test_df = pd.read_csv('../../dataset/external_dataset/ext_csv/pneumothorax.csv')
elif source == 'vin':
test_df = pd.read_csv('../../dataset/external_dataset/ext_csv/vin.csv')
study_pred_list = [torch.load(cfg_to_preds_path(cfg, args.folds, source))['pred_dict'] for cfg in cfgs]
weights = [0.3, 0.2, 0.2, 0.3]
weights = weights[:len(study_pred_list)]
weights = np.array(weights)
weights /= np.sum(weights)
image_paths = []
labels = []
for _, row in test_df.iterrows():
pred = 0
for p, w in zip(study_pred_list, weights):
pred += w * p[row['image_path']]
image_path = row['image_path']
assert os.path.isfile(image_path) == True
image_paths.append(image_path)
labels.append(pred)
pseudo_test_df = pd.DataFrame()
pseudo_test_df['image_path'] = np.array(image_paths)
pseudo_test_df[classes] = np.array(labels, dtype=float)
pseudo_test_df['pseudo'] = np.array([True] * len(test_df), dtype=bool)
pseudo_test_df.to_csv('pseudo_csv/pseudo_{}.csv'.format(source), index=False)
|
{"hexsha": "8515b234d92df9b73dc8eea2e5cd85bf8066ea7b", "size": 2236, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/classification_aux/ensemble_pseudo_ext.py", "max_stars_repo_name": "sergeykochetkov/kaggle-covid19", "max_stars_repo_head_hexsha": "07717959e21dd3ce75a8c21b6025e681ada2b65d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/classification_aux/ensemble_pseudo_ext.py", "max_issues_repo_name": "sergeykochetkov/kaggle-covid19", "max_issues_repo_head_hexsha": "07717959e21dd3ce75a8c21b6025e681ada2b65d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/classification_aux/ensemble_pseudo_ext.py", "max_forks_repo_name": "sergeykochetkov/kaggle-covid19", "max_forks_repo_head_hexsha": "07717959e21dd3ce75a8c21b6025e681ada2b65d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3731343284, "max_line_length": 113, "alphanum_fraction": 0.6457960644, "include": true, "reason": "import numpy", "num_tokens": 563}
|
from random import randint
import numpy as np
from numba import cuda, njit
def generator(minV: int, maxV: int, amount: int) -> np.array:
output = np.zeros(shape=(amount,), dtype=int)
for iterate in range(0, amount, 1):
output[iterate] = (randint(minV, maxV))
return output
@njit
def bubble_Sort(input_data_list: np.array) -> np.array:
i = j = 0
swapped = False
data = input_data_list.copy()
size = len(data)
for j in range(size):
swapped = False
for i in range(size - j - 1):
if data[i] > data[i + 1]:
data[i], data[i + 1] = data[i + 1], data[i]
swapped = True
if not swapped:
break
return data
|
{"hexsha": "25aa111a27b080c4bf31198757f1d9b5848de982", "size": 723, "ext": "py", "lang": "Python", "max_stars_repo_path": "sortnumba.py", "max_stars_repo_name": "PolskiZajac/bubble-sort", "max_stars_repo_head_hexsha": "f31664f9f351d836e15620e3d630a017f04172df", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sortnumba.py", "max_issues_repo_name": "PolskiZajac/bubble-sort", "max_issues_repo_head_hexsha": "f31664f9f351d836e15620e3d630a017f04172df", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sortnumba.py", "max_forks_repo_name": "PolskiZajac/bubble-sort", "max_forks_repo_head_hexsha": "f31664f9f351d836e15620e3d630a017f04172df", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.8214285714, "max_line_length": 61, "alphanum_fraction": 0.571230982, "include": true, "reason": "import numpy,from numba", "num_tokens": 202}
|
#todo get all the parameters including image url from cmd line
# import the necessary packages
import numpy as np
import argparse
import cv2
import urllib.request as urlreq
import requests
import json
url = 'http://192.168.1.100:8080/snapshot?topic=/camera/color/image_raw'
server_url ='http://localhost:53983/api/DetectPeoples'
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
#ap.add_argument("-i", "--image", required=True,
# help="path to input image")
ap.add_argument("-p", "--prototxt", required=True,
help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True,
help="path to Caffe pre-trained model")
ap.add_argument("-c", "--confidence", type=float, default=0.2,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
# initialize the list of class labels MobileNet SSD was trained to
# detect, then generate a set of bounding box colors for each class
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
"diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
# load the input image and construct an input blob for the image
# by resizing to a fixed 300x300 pixels and then normalizing it
# (note: normalization is done via the authors of the MobileNet SSD
# implementation)
while True:
#save and count all detections
person_count = 0
boxes = ""
#TODO check for responce success
#get a single image at a time
resp = urlreq.urlopen(url)
image = np.asarray(bytearray(resp.read()), dtype='uint8')
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
original_shape = image.shape
(h, w) = image.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 0.007843, (300, 300), 127.5)
# pass the blob through the network and obtain the detections and
# predictions
net.setInput(blob)
detections = net.forward()
# loop over the detections
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with the
# prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if confidence > args["confidence"]:
# extract the index of the class label from the `detections`,
# then compute the (x, y)-coordinates of the bounding box for
# the object
idx = int(detections[0, 0, i, 1])
if CLASSES[idx] is not 'person':
continue
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
box_str = '{}:{}:{}:{}'.format(startX, startY, endX, endY)
# display the prediction
# label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
# print("[INFO] {}".format(label))
accuracy = '{:.2f}'.format(confidence * 100)
#draw rectangle for debugging
#cv2.rectangle(image, (startX, startY), (endX, endY),
# COLORS[idx], 2)
#y = startY - 15 if startY - 15 > 15 else startY + 15
#cv2.putText(image, label, (startX, y),
# cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
person_count += 1
boxes += accuracy +':'+ box_str + '%%%'
#send a post request of detections and empty list
r = requests.post(server_url, data = {"NumberOfPeople":person_count,"ValuesString":boxes})
#show the output image for debugging
#cv2.resize(image, original_shape[:2])
#cv2.imshow("Output", image)
#if cv2.waitKey(1) & 0xFF == ord('q'):
# break
cv2.destroyAllWindows()
|
{"hexsha": "e0922509c4a2518ef79ea6983a9e24ff97fd5e6c", "size": 4159, "ext": "py", "lang": "Python", "max_stars_repo_path": "detector/video_inference.py", "max_stars_repo_name": "Turgibot/FinalProject", "max_stars_repo_head_hexsha": "4d4a73829780ca936216add17bb93968d0861486", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "detector/video_inference.py", "max_issues_repo_name": "Turgibot/FinalProject", "max_issues_repo_head_hexsha": "4d4a73829780ca936216add17bb93968d0861486", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "detector/video_inference.py", "max_forks_repo_name": "Turgibot/FinalProject", "max_forks_repo_head_hexsha": "4d4a73829780ca936216add17bb93968d0861486", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.8691588785, "max_line_length": 118, "alphanum_fraction": 0.62250541, "include": true, "reason": "import numpy", "num_tokens": 1071}
|
from qiskit import QuantumCircuit, QuantumRegister, execute, Aer
import numpy as np
import time, sys
ftime = time.time
def speed(nbqubits, nb_circuits, repeat=1, depth=2, gpu=False):
params = np.pi * np.random.rand(depth, nbqubits, nb_circuits)
start_time = ftime()
for _ in range(repeat):
qc_list = []
for n_c in range(nb_circuits):
qr = QuantumRegister(nbqubits, 'qr')
qc = QuantumCircuit(qr)
for l in range(depth):
qc.rx(np.pi/2, qr)
for i in range(nbqubits):
qc.rz(params[l, i, n_c], qr[i])
qc.rx(np.pi/2, qr)
for i in range(nbqubits - 1):
qc.cz(qr[i], qr[i+1])
qc.rx(np.pi/2, qr)
qc.measure_all()
qc_list.append(qc)
job = execute(qc_list, Aer.get_backend('qasm_simulator'))
job.result()
end_time = ftime()
return (end_time - start_time)/repeat
if __name__ == '__main__':
try: nbqubits = int( sys.argv[1])
except: nbqubits = 5
try: nbcircuits = int( sys.argv[2])
except: nbcircuits = 10
try: depth = int( sys.argv[3])
except: depth = 2
t = speed(nbqubits, nbcircuits, depth=depth)
print(f"nb qubits={nbqubits}, nb circuits={nbcircuits}, depth={depth}:")
print(f"milliseconds {t*1000}")
print(f" seconds {t}")
# print(f" minutes {t/60}")
|
{"hexsha": "fc12ab1b46f8e01620b11028917b3507c5eff64d", "size": 1467, "ext": "py", "lang": "Python", "max_stars_repo_path": "speedtests/qktest.py", "max_stars_repo_name": "exaQ-ai/manyQ", "max_stars_repo_head_hexsha": "d943fc76d8ba1f858193fd4a1cd338b090252b5d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "speedtests/qktest.py", "max_issues_repo_name": "exaQ-ai/manyQ", "max_issues_repo_head_hexsha": "d943fc76d8ba1f858193fd4a1cd338b090252b5d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "speedtests/qktest.py", "max_forks_repo_name": "exaQ-ai/manyQ", "max_forks_repo_head_hexsha": "d943fc76d8ba1f858193fd4a1cd338b090252b5d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-08-14T01:10:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-03T16:17:35.000Z", "avg_line_length": 27.1666666667, "max_line_length": 76, "alphanum_fraction": 0.5501022495, "include": true, "reason": "import numpy", "num_tokens": 417}
|
"""
===============================
NumPy memmap in joblib.Parallel
===============================
This example illustrates some features enabled by using a memory map
(:class:`numpy.memmap`) within :class:`joblib.Parallel`. First, we show that
dumping a huge data array ahead of passing it to :class:`joblib.Parallel`
speeds up computation. Then, we show the possibility to provide write access to
original data.
"""
##############################################################################
# Speed up processing of a large data array
##############################################################################
#
# We create a large data array for which the average is computed for several
# slices.
import numpy as np
data = np.random.random((int(1e7),))
window_size = int(5e5)
slices = [slice(start, start + window_size)
for start in range(0, data.size - window_size, int(1e5))]
###############################################################################
# The ``slow_mean`` function introduces a :func:`time.sleep` call to simulate a
# more expensive computation cost for which parallel computing is beneficial.
# Parallel may not be beneficial for very fast operation, due to extra overhead
# (workers creations, communication, etc.).
import time
def slow_mean(data, sl):
"""Simulate a time consuming processing."""
time.sleep(0.01)
return data[sl].mean()
###############################################################################
# First, we will evaluate the sequential computing on our problem.
tic = time.time()
results = [slow_mean(data, sl) for sl in slices]
toc = time.time()
print('\nElapsed time computing the average of couple of slices {:.2f} s'
.format(toc - tic))
###############################################################################
# :class:`joblib.Parallel` is used to compute in parallel the average of all
# slices using 2 workers.
from joblib import Parallel, delayed
tic = time.time()
results = Parallel(n_jobs=2)(delayed(slow_mean)(data, sl) for sl in slices)
toc = time.time()
print('\nElapsed time computing the average of couple of slices {:.2f} s'
.format(toc - tic))
###############################################################################
# Parallel processing is already faster than the sequential processing. It is
# also possible to remove a bit of overhead by dumping the ``data`` array to a
# memmap and pass the memmap to :class:`joblib.Parallel`.
import os
from joblib import dump, load
folder = './joblib_memmap'
try:
os.mkdir(folder)
except FileExistsError:
pass
data_filename_memmap = os.path.join(folder, 'data_memmap')
dump(data, data_filename_memmap)
data = load(data_filename_memmap, mmap_mode='r')
tic = time.time()
results = Parallel(n_jobs=2)(delayed(slow_mean)(data, sl) for sl in slices)
toc = time.time()
print('\nElapsed time computing the average of couple of slices {:.2f} s\n'
.format(toc - tic))
###############################################################################
# Therefore, dumping large ``data`` array ahead of calling
# :class:`joblib.Parallel` can speed up the processing by removing some
# overhead.
###############################################################################
# Writable memmap for shared memory :class:`joblib.Parallel`
###############################################################################
#
# ``slow_mean_write_output`` will compute the mean for some given slices as in
# the previous example. However, the resulting mean will be directly written on
# the output array.
def slow_mean_write_output(data, sl, output, idx):
"""Simulate a time consuming processing."""
time.sleep(0.005)
res_ = data[sl].mean()
print("[Worker %d] Mean for slice %d is %f" % (os.getpid(), idx, res_))
output[idx] = res_
###############################################################################
# Prepare the folder where the memmap will be dumped.
output_filename_memmap = os.path.join(folder, 'output_memmap')
###############################################################################
# Pre-allocate a writable shared memory map as a container for the results of
# the parallel computation.
output = np.memmap(output_filename_memmap, dtype=data.dtype,
shape=len(slices), mode='w+')
###############################################################################
# ``data`` is replaced by its memory mapped version. Note that the buffer has
# already been dumped in the previous section.
data = load(data_filename_memmap, mmap_mode='r')
###############################################################################
# Fork the worker processes to perform computation concurrently
Parallel(n_jobs=2)(delayed(slow_mean_write_output)(data, sl, output, idx)
for idx, sl in enumerate(slices))
###############################################################################
# Compare the results from the output buffer with the expected results
print("\nExpected means computed in the parent process:\n {}"
.format(np.array(results)))
print("\nActual means computed by the worker processes:\n {}"
.format(output))
###############################################################################
# Clean-up the memmap
###############################################################################
#
# Remove the different memmap that we created. It might fail in Windows due
# to file permissions.
import shutil
try:
shutil.rmtree(folder)
except: # noqa
print('Could not clean-up automatically.')
|
{"hexsha": "db956ca6c42532d9cb96d938e3b8b67a4714a013", "size": 5591, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/parallel_memmap.py", "max_stars_repo_name": "ctb/joblib", "max_stars_repo_head_hexsha": "023e7a9df56fb2feab9f6f459653338519472af8", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2607, "max_stars_repo_stars_event_min_datetime": "2015-01-06T16:06:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T14:47:54.000Z", "max_issues_repo_path": "examples/parallel_memmap.py", "max_issues_repo_name": "ctb/joblib", "max_issues_repo_head_hexsha": "023e7a9df56fb2feab9f6f459653338519472af8", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1091, "max_issues_repo_issues_event_min_datetime": "2015-01-20T18:01:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T14:08:24.000Z", "max_forks_repo_path": "examples/parallel_memmap.py", "max_forks_repo_name": "ctb/joblib", "max_forks_repo_head_hexsha": "023e7a9df56fb2feab9f6f459653338519472af8", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 313, "max_forks_repo_forks_event_min_datetime": "2015-01-08T04:02:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T19:53:13.000Z", "avg_line_length": 35.8397435897, "max_line_length": 79, "alphanum_fraction": 0.5512430692, "include": true, "reason": "import numpy", "num_tokens": 1059}
|
[STATEMENT]
lemma one_inf_conv:
"1 \<sqinter> x = 1 \<sqinter> x\<^sup>T"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (1::'a) \<sqinter> x = (1::'a) \<sqinter> x\<^sup>T
[PROOF STEP]
by (metis conv_dist_inf coreflexive_symmetric inf.cobounded1 symmetric_one_closed)
|
{"llama_tokens": 121, "file": "Stone_Relation_Algebras_Relation_Algebras", "length": 1}
|
import numpy as np
def random_data(N=0, K=0, Y_cur=None, D_cur=None, X_cur=None):
if X_cur is not None:
N, K = X_cur.shape
elif D_cur is not None:
N = D_cur.shape[0]
elif Y_cur is not None:
N = Y_cur.shape[0]
if N == 0 and K == 0:
K = np.random.random_integers(1, 5)
N = np.random.random_integers(4, 4*K)
elif N != 0 and K == 0:
K = np.random.random_integers(1, N-1)
elif N == 0 and K != 0:
N = np.random.random_integers(4, 4*K)
data = []
if Y_cur is None:
Y_data = np.random.rand(N)
data.append(Y_data)
if D_cur is None:
D_data = np.random.random_integers(0, 1, N)
# loop to ensure at least two subjects in each group
while D_data.sum() <= 1 or D_data.sum() >= N-1:
D_data = np.random.random_integers(0, 1, N)
data.append(D_data)
if X_cur is None:
X_data = np.random.rand(N, K)
data.append(X_data)
if len(data) == 1:
return data[0]
else:
return data
|
{"hexsha": "eb58fc3683807c74a5226c78f055bdb1d0b408a5", "size": 907, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/utils.py", "max_stars_repo_name": "youngminju-phd/Causalinference", "max_stars_repo_head_hexsha": "630e8fb195754a720da41791b725d3dadabfb257", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 392, "max_stars_repo_stars_event_min_datetime": "2016-06-08T19:43:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T14:18:07.000Z", "max_issues_repo_path": "tests/utils.py", "max_issues_repo_name": "youngminju-phd/Causalinference", "max_issues_repo_head_hexsha": "630e8fb195754a720da41791b725d3dadabfb257", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2017-04-28T20:25:54.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-14T10:25:40.000Z", "max_forks_repo_path": "tests/utils.py", "max_forks_repo_name": "youngminju-phd/Causalinference", "max_forks_repo_head_hexsha": "630e8fb195754a720da41791b725d3dadabfb257", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 82, "max_forks_repo_forks_event_min_datetime": "2016-06-08T19:43:11.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T13:36:28.000Z", "avg_line_length": 22.675, "max_line_length": 62, "alphanum_fraction": 0.6438809261, "include": true, "reason": "import numpy", "num_tokens": 314}
|
import time, torch, sys, os
import nibabel as nib
import pickle as pkl
import numpy as np
from datetime import datetime
from glob import glob
import cv2
import matplotlib.pyplot as plt
class BaseArch(object):
def __init__(self, config):
"""basic settings"""
self.config = config
self.log_dir = self.get_log_dir()
"""to be set in children obj"""
self.net = None
"""global variables"""
self.epoch, self.step = 0, 0
self.phase = 'train'
self.best_model = ''
self.global_step = 0
self.global_epoch = 0
self.epoch_loss = 0
# self.check_gpu_info()
"""define in children obj"""
def train(self):
pass
def validate(self):
pass
def inference(self):
pass
def loss(self):
pass
def set_dataloader(self):
pass
def train_mode(self):
self.phase = 'train'
self.net.train()
def val_mode(self):
self.phase = 'val'
self.net.eval()
def test_mode(self):
self.phase = 'test'
self.net.eval()
def check_gpu_info(self):
'''will be useful when computing on HPC :) '''
gpu_id = torch.cuda.current_device()
gpu_type = torch.cuda.get_device_name(gpu_id)
print(f'>>> Computing on GPU: {gpu_type} <<<')
def set_device(self):
if torch.cuda.is_available():
device = torch.device('cuda')
print('>>> Using GPU.')
else:
device = torch.device('cpu')
print('>>> Using CPU')
return device
def save(self, type=None):
ckpt_path = os.path.join(self.log_dir, 'checkpoints')
os.makedirs(ckpt_path, exist_ok=True)
if type is None:
torch.save(self.net, os.path.join(ckpt_path, f'epoch-{self.epoch}.pt'))
elif type == 'best':
exist_best_models = glob(os.path.join(ckpt_path, 'best*.pt'))
[os.remove(i) for i in exist_best_models]
torch.save(self.net, os.path.join(ckpt_path, f'best-epoch-{self.epoch}.pt'))
else:
pass
def load_epoch(self, num_epoch):
if num_epoch != 'best':
self.epoch = int(num_epoch)
self.net = torch.load(os.path.join(self.log_dir, 'checkpoints', f'epoch-{num_epoch}.pt'))
print(f'load from epoch {self.epoch}')
else:
best_ckpt = glob(os.path.join(self.log_dir, 'checkpoints', 'best*'))
assert(len(best_ckpt)) != 0, "no best ckpt found in this exp..."
self.net = torch.load(best_ckpt[0])
self.epoch = int(best_ckpt[0].replace('.pt', '').split('-')[-1])
print(f'load from best epoch {best_ckpt[0]}')
def save_configure(self):
os.makedirs(self.log_dir, exist_ok=True)
with open(os.path.join(self.log_dir, 'config.pkl'), 'wb') as f:
pkl.dump(self.config, f)
def get_log_dir(self):
assert self.config.exp_name is not None, "exp_name should not be None."
log_dir = os.path.join('./logs',self.config.project ,self.config.exp_name)
while os.path.exists(log_dir) and 'train.py' in sys.argv[0] and self.config.continue_epoch=='-1':
log_dir = os.path.join(
'./logs',
self.config.project,
self.config.exp_name + '-' + datetime.now().strftime("%Y%m%d-%H%M%S"))
return log_dir
@staticmethod
def save_img(tensor_arr, save_path, pixdim=[1.0, 1.0, 1.0]):
save_folder = os.path.dirname(save_path)
if not os.path.exists(save_folder):
os.makedirs(save_folder)
arr = torch.squeeze(tensor_arr)
assert len(arr.shape)==3, "not a 3 dimentional volume, need to check."
arr = arr.detach().cpu().numpy()
nib_img = nib.Nifti1Image(arr, affine=np.eye(4))
nib_img.header['pixdim'][1:4] = np.array(pixdim)
nib.save(img=nib_img, filename=save_path)
def get_patch_cords_from_ref_image(self, ref_img):
patch_size = self.config.patch_size
inf_patch_stride_factors = self.config.inf_patch_stride_factors
if len(ref_img.shape) > 3:
shape = ref_img.shape[-3:]
else: shape = np.array(ref_img.shape)
patch_size = np.array(patch_size)
stride = patch_size // np.array(inf_patch_stride_factors)
iters = (shape - patch_size) // stride + 1
coords = [np.array([x, y, z])*stride for x in range(iters[0]) for y in range(iters[1]) for z in range(iters[2])] # left top points
coords = [list(i) for i in coords]
z_slice = [np.array([x, y, shape[2]-patch_size[2]])*np.array([stride[0], stride[1], 1]) for x in range(iters[0]) for y in range(iters[1])]
z_slice = [list(i) for i in z_slice]
x_slice = [np.array([shape[0]-patch_size[0], y, z])*np.array([1, stride[1], stride[2]]) for y in range(iters[1]) for z in range(iters[2])]
x_slice = [list(i) for i in x_slice]
y_slice = [np.array([x, shape[1]-patch_size[1], z])*np.array([stride[0], 1, stride[2]]) for x in range(iters[0]) for z in range(iters[2])]
y_slice = [list(i) for i in y_slice]
zb = [np.array([shape[0]-patch_size[0], shape[1]-patch_size[1], z])*np.array([1, 1, stride[2]]) for z in range(iters[2])] # z bound
zb = [list(i) for i in zb]
xb = [np.array([x, shape[1]-patch_size[1], shape[2]-patch_size[2]])*np.array([stride[0], 1, 1]) for x in range(iters[0])] # x bound
xb = [list(i) for i in xb]
yb = [np.array([shape[0]-patch_size[0], y, shape[2]-patch_size[2]])*np.array([1, stride[1], 1]) for y in range(iters[1])] # y bound
yb = [list(i) for i in yb]
br = [[shape[0]-patch_size[0], shape[1]-patch_size[1], shape[2]-patch_size[2]]]
# print(len(coords), len(xb), len(yb), len(zb))
for ex in [zb, xb, yb, br, z_slice, x_slice, y_slice]:
for p in ex:
if p not in coords:
coords.append(p)
return [[x, x+patch_size[0], y, y+patch_size[1], z, z+patch_size[2]] for (x, y, z) in coords]
@staticmethod
def vis_with_contour(fx_img, fx_seg, mv_img, mv_seg, pred_seg, save_folder, sbj_name, color=(255, 255, 0), info=''):
"""fx/mv_img/seg -> 3d volume"""
def normalize0255(arr):
return (arr - arr.min())*255.0 / (arr.max() - arr.min())
def add_contours(t2, label, color):
if len(t2.shape) != 3:
_t2 = np.tile(t2, (3,1,1)).transpose(1, 2, 0)
else:
_t2 = t2
_t2 = normalize0255(_t2).astype('uint8')
_label = label.astype('uint8')
blank = np.zeros(_t2.shape)
contours, hierarchy = cv2.findContours(_label.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE, offset=(0, 0))
tmp = _t2.copy() # ?????
cv2.drawContours(tmp, contours, -1, color, 1)
return tmp
img_set = np.concatenate([mv_img, fx_img, fx_img], axis=1)
img_set = normalize0255(img_set)
seg_set = np.concatenate([mv_seg, fx_seg, pred_seg], axis=1)
for z in range(fx_img.shape[-1]):
img_slice = img_set[..., z]
seg_slice = seg_set[..., z]
contoured_slice = add_contours(img_slice, seg_slice, color=color)
save_path = os.path.join(save_folder, sbj_name)
os.path.makedirs(save_path, exist_ok=True)
plt.imsave(os.path.join(save_path, f"{sbj_name}_{z}_{info}.png"), contoured_slice)
|
{"hexsha": "02ca43aa068815f069cb26768fb39a3230706814", "size": 7701, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/model/archs/baseArch.py", "max_stars_repo_name": "QianyeYang/mpmrireg", "max_stars_repo_head_hexsha": "619b8f0b1be5ae29e4ac20f4a030ab044fce69f2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/model/archs/baseArch.py", "max_issues_repo_name": "QianyeYang/mpmrireg", "max_issues_repo_head_hexsha": "619b8f0b1be5ae29e4ac20f4a030ab044fce69f2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/model/archs/baseArch.py", "max_forks_repo_name": "QianyeYang/mpmrireg", "max_forks_repo_head_hexsha": "619b8f0b1be5ae29e4ac20f4a030ab044fce69f2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.3134328358, "max_line_length": 146, "alphanum_fraction": 0.5683677444, "include": true, "reason": "import numpy", "num_tokens": 2075}
|
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 22 19:18:35 2018
@author: Siddharth
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import matplotlib.cm as CM
# scatter plot function
def plotting(model,disease,text,xaxis,yaxis):
labels=list(set(disease))
# Color vector creation
cvec=CM.brg(np.linspace(0,1,num=len(labels)))
legend_list=[]
for i in range(len(labels)):
plot_data = model[np.where(disease==labels[i])]
x=plot_data[:,0]
y=plot_data[:,1]
legend_list.append(plt.scatter(x, y, c=cvec[i]))
plt.legend(legend_list,labels,loc="best")
plt.xlabel(xaxis)
plt.ylabel(yaxis)
plt.title(text,fontweight="bold")
plt.show()
#TNSE
def tsne(attributes, disease, filename):
tsne = TSNE(n_components=2, init='pca', learning_rate=100)
final_tsne=tsne.fit_transform(attributes)
text="TSNE: "+filename
xaxis=""
yaxis=""
plotting(final_tsne,disease,text,xaxis,yaxis)
#SVD
def svd(attributes, disease, filename):
u, s, vh = np.linalg.svd(attributes, full_matrices=True)
new_svd = u[:,[0,1]]
text="SVD: "+filename
xaxis="Component 1"
yaxis="Component 2"
plotting(new_svd,disease,text,xaxis,yaxis)
#PCA
def pca(attributes, disease, filename):
mean = attributes.mean(axis=0)
adj_attributes = attributes - mean
covarience_mat = np.cov(adj_attributes, rowvar = False)
evals, evecs = np.linalg.eig(covarience_mat)
#sort eigen values in descending
idx = np.argsort(evals)[::-1]
#top eigen vectors
evecs = evecs[:,idx]
evals = evals[idx]
pca_alg = np.dot(adj_attributes, evecs)
text="PCA: "+filename
xaxis="PC 1"
yaxis="PC 2"
plotting(pca_alg,disease,text,xaxis,yaxis)
#inputting the file
filename = input("Enter filename: ")
data = [line.strip().split('\t') for line in open(filename, 'r')]
data = np.asarray(data)
attribute = data[:,0:data.shape[1]-1] #all columns except the last is taken as attributes
final_attribute = np.array(attribute, dtype=float)
disease = data[:,data.shape[1]-1] #last column is taken as the disease
#calling the algorithms
pca(final_attribute, disease, filename)
tsne(final_attribute, disease, filename)
svd(final_attribute, disease, filename)
|
{"hexsha": "6fbce1c056957c7a6eeda97c09680dec9174f0e2", "size": 2319, "ext": "py", "lang": "Python", "max_stars_repo_path": "PCA/Code/pca_code.py", "max_stars_repo_name": "SiddharthSelvaraj/Dimensionality-Reduction-and-Association-Analysis", "max_stars_repo_head_hexsha": "03683447d9d1cc64d9e3d0bfa4519e730a4e7a03", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-01-01T15:58:20.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-01T15:58:20.000Z", "max_issues_repo_path": "PCA/Code/pca_code.py", "max_issues_repo_name": "SiddharthSelvaraj/Dimensionality-Reduction-and-Association-Analysis", "max_issues_repo_head_hexsha": "03683447d9d1cc64d9e3d0bfa4519e730a4e7a03", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PCA/Code/pca_code.py", "max_forks_repo_name": "SiddharthSelvaraj/Dimensionality-Reduction-and-Association-Analysis", "max_forks_repo_head_hexsha": "03683447d9d1cc64d9e3d0bfa4519e730a4e7a03", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7307692308, "max_line_length": 89, "alphanum_fraction": 0.6752910737, "include": true, "reason": "import numpy", "num_tokens": 644}
|
{-# OPTIONS --without-K #-}
module WithoutK7 where
data I : Set where
i : I
data D (x : I) : Set where
d : D x
data P (x : I) : D x → Set where
Foo : ∀ x → P x (d {x = x}) → Set
Foo x ()
|
{"hexsha": "3a20e1cae8827f4cbbdc429d3a633a9ffaa9d708", "size": 196, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "test/fail/WithoutK7.agda", "max_stars_repo_name": "np/agda-git-experiment", "max_stars_repo_head_hexsha": "20596e9dd9867166a64470dd24ea68925ff380ce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-27T04:41:05.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-27T04:41:05.000Z", "max_issues_repo_path": "test/fail/WithoutK7.agda", "max_issues_repo_name": "np/agda-git-experiment", "max_issues_repo_head_hexsha": "20596e9dd9867166a64470dd24ea68925ff380ce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/fail/WithoutK7.agda", "max_forks_repo_name": "np/agda-git-experiment", "max_forks_repo_head_hexsha": "20596e9dd9867166a64470dd24ea68925ff380ce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 13.0666666667, "max_line_length": 33, "alphanum_fraction": 0.5153061224, "num_tokens": 74}
|
# Standard libraries
import pathlib
import glob
import platform
import pickle
from datetime import datetime
from pprint import pprint
# Scientific stack
import numpy as np
import numpy.random as rnd
import pandas as pd
# Chunked data
import zarr
# Audio processing
import dcase_util as du
# Pretty progress bar
import tqdm
import preprocessing as prep
n_feats = 100
dataset_name = f'numfeats{n_feats}'
# db_path = '/media/zanco/DADOS/zanco/datasets/TUT-urban-acoustic-scenes-2018-development/'
db_path = '/media/zanco/DADOS/zanco/datasets/TAU-urban-acoustic-scenes-2019-development/'
# db_path = 'E:/datasets/TUT-urban-acoustic-scenes-2018-development/'
# db_path = 'E:/datasets/TAU-urban-acoustic-scenes-2019-development/'
# version = '2018'
version = '2019'
preprocessor = prep.DataPreprocessing(db_path=db_path,
version=version,
n_feats=n_feats,
dataset_name=dataset_name,
dataset_folder=f'../saved_features{version}',
audio_preprocess='mid',
feature_type='mel_spectrogram')
# preprocessor.process(overwrite=False)
fold_meta, fold_split = preprocessor.generate_fold_meta(overwrite=False)
train_ids = fold_meta['identifier'][fold_split[0][0]]
valid_ids = fold_meta['identifier'][fold_split[0][1]]
c = list(set(train_ids) & set(valid_ids))
print(len(c))
seed = 0
n_splits = 5
# Get consistent results (same folds every time)
rand_state = rnd.get_state() # get current PRNG state
rnd.seed(seed)
# Get training and evaluation example indexes
train_ind = np.where(preprocessor.db_meta['example_type'].values == 'train')[0]
eval_ind = np.where(preprocessor.db_meta['example_type'].values == 'test')[0]
# Split based on labels and identifiers
from sklearn.model_selection import GroupKFold
splitter = GroupKFold(n_splits=n_splits)
X = np.empty([train_ind.size,1])
y = preprocessor.db_meta['scene_label'][train_ind]
ids = preprocessor.db_meta['identifier'][train_ind]
temp_fold_split = list(splitter.split(X=X,y=y,groups=ids))
# Fix indexing
fold_split = [[train_ind[x[0]], train_ind[x[1]]] for x in temp_fold_split]
from sklearn.model_selection import (TimeSeriesSplit, KFold, ShuffleSplit,
StratifiedKFold, GroupShuffleSplit,
GroupKFold, StratifiedShuffleSplit)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Patch
np.random.seed(1338)
cmap_data = plt.cm.Paired
cmap_group = plt.cm.prism
cmap_cv = plt.cm.coolwarm
n_splits = 5
# Generate the class/group data
_, label_index = np.unique(preprocessor.db_meta['scene_label'][train_ind].values, return_inverse=True)
y = label_index.astype('i1')
_, id_index = np.unique(preprocessor.db_meta['identifier'][train_ind].values, return_inverse=True)
groups = id_index.astype(int)
def visualize_groups(classes, groups):
# Visualize dataset groups
fig, ax = plt.subplots()
plot = ax.scatter(range(len(groups)), [.5] * len(groups), c=groups, marker='_',
lw=50, cmap=cmap_group)
ax.scatter(range(len(groups)), [3.5] * len(groups), c=classes, marker='_',
lw=50, cmap=cmap_data)
ax.set(ylim=[-1, 5], yticks=[.5, 3.5],
yticklabels=['Data\ngroup', 'Data\nclass'], xlabel="Sample index")
fig.colorbar(plot)
visualize_groups(y, groups)
def plot_cv_indices(cv, X, y, group, ax, n_splits, lw=10):
"""Create a sample plot for indices of a cross-validation object."""
# Generate the training/testing visualizations for each CV split
for ii, (tr, tt) in enumerate(cv.split(X=X, y=y, groups=group)):
# Fill in indices with the training/test groups
indices = np.array([np.nan] * len(X))
indices[tt] = 1
indices[tr] = 0
# Visualize the results
plot = ax.scatter(range(len(indices)), [ii + .5] * len(indices),
c=indices, marker='_', lw=lw, cmap=cmap_cv,
vmin=-.2, vmax=1.2)
fig.colorbar(plot)
# Plot the data classes and groups at the end
ax.scatter(range(len(X)), [ii + 1.5] * len(X),
c=y, marker='_', lw=lw, cmap=cmap_data)
ax.scatter(range(len(X)), [ii + 2.5] * len(X),
c=group, marker='_', lw=lw, cmap=cmap_group)
# Formatting
yticklabels = list(range(n_splits)) + ['class', 'group']
ax.set(yticks=np.arange(n_splits+2) + .5, yticklabels=yticklabels,
xlabel='Sample index', ylabel="CV iteration",
ylim=[n_splits+2.2, -.2])
ax.set_title('{}'.format(type(cv).__name__), fontsize=15)
return ax
fig, ax = plt.subplots()
# cv = KFold(n_splits)
plot_cv_indices(splitter, X, y, groups, ax, n_splits)
plt.show()
exit(0)
|
{"hexsha": "f7f54a31739a081a42746b6d728dd60789687c76", "size": 4869, "ext": "py", "lang": "Python", "max_stars_repo_path": "debugging/debug_validation.py", "max_stars_repo_name": "dangpzanco/dcase-task1", "max_stars_repo_head_hexsha": "72867cc5b8969d7ec55c5acfd30ebbc3a7246666", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-05-23T08:10:59.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-23T08:10:59.000Z", "max_issues_repo_path": "debugging/debug_validation.py", "max_issues_repo_name": "dangpzanco/dcase-task1", "max_issues_repo_head_hexsha": "72867cc5b8969d7ec55c5acfd30ebbc3a7246666", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "debugging/debug_validation.py", "max_forks_repo_name": "dangpzanco/dcase-task1", "max_forks_repo_head_hexsha": "72867cc5b8969d7ec55c5acfd30ebbc3a7246666", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-07-12T05:26:15.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-22T09:15:00.000Z", "avg_line_length": 30.8164556962, "max_line_length": 102, "alphanum_fraction": 0.6594783323, "include": true, "reason": "import numpy", "num_tokens": 1222}
|
"""1D and 2D quadrotor environment using PyBullet physics.
Based on UTIAS Dynamic Systems Lab's gym-pybullet-drones:
* https://github.com/utiasDSL/gym-pybullet-drones
"""
import math
from copy import deepcopy
import casadi as cs
from gym import spaces
import numpy as np
import pybullet as p
from safe_control_gym.envs.benchmark_env import Cost, Task
from safe_control_gym.envs.gym_pybullet_drones.base_aviary import BaseAviary, Physics
from safe_control_gym.envs.gym_pybullet_drones.quadrotor_utils import QuadType, cmd2pwm, pwm2rpm
from safe_control_gym.envs.constraints import create_ConstraintList_from_list, GENERAL_CONSTRAINTS
from safe_control_gym.envs.disturbances import DISTURBANCE_TYPES, DisturbanceList
from safe_control_gym.math_and_models.symbolic_systems import SymbolicModel
class Quadrotor(BaseAviary):
"""1D and 2D quadrotor environment task.
Including symbolic model, constraints, randomization, adversarial disturbances,
multiple cost functions, stabilization and trajectory tracking references.
"""
AVAILABLE_CONSTRAINTS = deepcopy(GENERAL_CONSTRAINTS)
DISTURBANCE_MODES = {
"observation": {
"dim": 6
},
"action": {
"dim": 2
},
"dynamics": {
"dim": 2
}
}
INERTIAL_PROP_RAND_INFO = {
"M": { # Nominal: 0.027
'distrib': "uniform",
'low': 0.022,
'high': 0.032
},
"Iyy": { # Nominal: 1.4e-5
'distrib': "uniform",
'low': 1.3e-5,
'high': 1.5e-5
}
}
INIT_STATE_RAND_INFO = {
"init_x": {
'distrib': "uniform",
'low': -0.5,
'high': 0.5
},
"init_x_dot": {
'distrib': "uniform",
'low': -0.01,
'high': 0.01
},
"init_z": {
'distrib': "uniform",
'low': 0.1,
'high': 1.5
},
"init_z_dot": {
'distrib': "uniform",
'low': -0.01,
'high': 0.01
},
"init_theta": {
'distrib': "uniform",
'low': -0.3,
'high': 0.3
},
"init_theta_dot": {
'distrib': "uniform",
'low': -0.01,
'high': 0.01
}
}
TASK_INFO = {
"stabilization_goal": [0, 1],
"stabilization_goal_tolerance": 0.05,
"trajectory_type": "circle",
"num_cycles": 1,
"trajectory_plane": "zx",
"trajectory_position_offset": [0.5, 0],
"trajectory_scale": -0.5
}
def __init__(self,
seed: int = 1337,
output_dir=None,
info_in_reset: bool = False,
ctrl_freq: int = 60,
pyb_freq: int = 240,
gui: bool = False,
physics: Physics = Physics.PYB,
quad_type: QuadType = QuadType.TWO_D,
normalized_rl_action_space: bool = False,
init_state=None,
randomized_init: bool = True,
init_state_randomization_info=None,
inertial_prop=None,
randomized_inertial_prop: bool = False,
inertial_prop_randomization_info=None,
task: Task = Task.STABILIZATION,
task_info=None,
episode_len_sec: int = 5,
cost: Cost = Cost.RL_REWARD,
disturbances=None,
adversary_disturbance=None,
adversary_disturbance_scale=0.01,
constraints=None,
done_on_violation: bool = False,
verbose: bool = False):
"""Initialize a quadrotor environment.
Args:
seed (int, optional): Seed for the random number generator.
output_dir (str, optional): path to directory to save any env outputs.
info_in_reset (bool, optional): Whether .reset() returns a dictionary with the
environment's symbolic model.
ctrl_freq (int, optional): The frequency at which the environment steps.
pyb_freq (int, optional): The frequency at which PyBullet steps (a multiple of ctrl_freq).
physics (Physics, optional): The choice of PyBullet update implementation
(e.g. the one with ground effect).
gui (bool, optional): Whether to show PyBullet's GUI.
quad_type (QuadType, optional): The choice of motion type (1D along z or 2D
in the x-z plane).
normalized_rl_action_space (bool, optional): Whether to normalize the action space around
the hover thrust.
init_state (ndarray, optional): The initial state of the environment, (z, z_dot) or
(x, x_dot, z, z_dot theta, theta_dot).
randomized_init (bool, optional): Whether to randomize the initial state.
init_state_randomization_info (dict, optional): A dictionary with information used to
randomize the initial state.
inertial_prop (ndarray, optional): The inertial properties of the environment
(mass, Iyy).
randomized_inertial_prop (bool, optional): Whether to randomize the inert. properties.
inertial_prop_randomization_info (dict, optional): A dictionary with information used
to randomize the inert. properties.
task: (Task, optional): The environment's task (stabilization or traj. tracking).
task_info (dict, optional): A dictionary with the information used to generate the
task X and U references.
episode_len_sec (int, optional): Maximum episode duration in seconds.
cost: (Cost, optional): Cost function choice used to compute the reward in .step().
disturbances (dict, optional): Dictionary to specify disturbances being used.
adversary_disturbance (str, optional): if to use adversary/external disturbance.
adversary_disturbance_scale (float, optional): parameterizes magnitude of adversary disturbance.
constraints (Dict, optional): Dictionary to specify the constraints being used.
done_on_violation (bool, optional): Whether to return done==True on a constraint
violation.
verbose (bool, optional): If to suppress environment print statetments.
"""
self.NAME = 'quadrotor'
# Select the 1D (moving along z) or 2D (moving in the xz plane) quadrotor.
self.QUAD_TYPE = QuadType(quad_type)
self.NORMALIZED_RL_ACTION_SPACE = normalized_rl_action_space
# Set timing constants.
self.CTRL_FREQ = ctrl_freq
self.PYB_FREQ = pyb_freq
if self.PYB_FREQ % self.CTRL_FREQ != 0:
raise ValueError(
"[ERROR] in Quadrotor.__init__(), pyb_freq is not divisible by env_freq."
)
self.CTRL_TIMESTEP = 1. / self.CTRL_FREQ
self.PYB_TIMESTEP = 1. / self.PYB_FREQ
# Store initial state info.
if init_state is None:
self.INIT_X, self.INIT_X_DOT, self.INIT_Z, self.INIT_Z_DOT, self.INIT_THETA, self.INIT_THETA_DOT = np.zeros(6)
elif self.QUAD_TYPE == QuadType.ONE_D:
self.INIT_X, self.INIT_X_DOT, self.INIT_THETA, self.INIT_THETA_DOT = np.zeros(4)
if isinstance(init_state, np.ndarray):
self.INIT_Z, self.INIT_Z_DOT = init_state
elif isinstance(init_state, dict):
self.INIT_Z = init_state.get("init_z", 0)
self.INIT_Z_DOT = init_state.get("init_z_dot", 0)
else:
raise ValueError(
"[ERROR] in Quadrotor.__init__(), init_state incorrect format."
)
elif self.QUAD_TYPE == QuadType.TWO_D:
if isinstance(init_state, np.ndarray):
self.INIT_X, self.INIT_X_DOT, self.INIT_Z, self.INIT_Z_DOT, self.INIT_THETA, self.INIT_THETA_DOT = init_state
elif isinstance(init_state, dict):
self.INIT_X = init_state.get("init_x", 0)
self.INIT_X_DOT = init_state.get("init_x_dot", 0)
self.INIT_Z = init_state.get("init_z", 0)
self.INIT_Z_DOT = init_state.get("init_z_dot", 0)
self.INIT_THETA = init_state.get("init_theta", 0)
self.INIT_THETA_DOT = init_state.get("init_theta_dot", 0)
else:
raise ValueError(
"[ERROR] in Quadrotor.__init__(), init_state incorrect format."
)
# Decide whether to randomize the initial state and how (see info dictionary).
self.RANDOMIZED_INIT = randomized_init
if init_state_randomization_info is not None:
self.INIT_STATE_RAND_INFO = init_state_randomization_info
# Do NOT randomize x, x_dot, theta, theta_dot for the 1D quadrotor.
if self.QUAD_TYPE == QuadType.ONE_D:
for init_name in ["init_x", "init_x_dot", "init_theta", "init_theta_dot"]:
self.INIT_STATE_RAND_INFO.pop(init_name, None)
# Decide whether to randomize the inertial properties and how (see info dictionary).
self.RANDOMIZED_INERTIAL_PROP = randomized_inertial_prop
if inertial_prop_randomization_info is not None:
self.INERTIAL_PROP_RAND_INFO = inertial_prop_randomization_info
# Do NOT randomize J for the 1D quadrotor.
if self.QUAD_TYPE == QuadType.ONE_D:
self.INERTIAL_PROP_RAND_INFO.pop("Iyy", None)
# Store disturbance info.
self.DISTURBANCES = disturbances
self.adversary_disturbance = adversary_disturbance
self.adversary_disturbance_scale = adversary_disturbance_scale
# 1D quad disturbances have lower dimensions
if self.QUAD_TYPE == QuadType.ONE_D:
self.DISTURBANCE_MODES["observation"]["dim"] = 2
self.DISTURBANCE_MODES["action"]["dim"] = 1
self.DISTURBANCE_MODES["dynamics"]["dim"] = 1
# Store constraint info
self.CONSTRAINTS = constraints
self.DONE_ON_VIOLATION = done_on_violation
self.VERBOSE = verbose
# Call BaseAviary constructor.
super().__init__(seed=seed,
info_in_reset=info_in_reset,
episode_len_sec=episode_len_sec,
cost=Cost(cost),
gui=gui,
freq=self.PYB_FREQ,
aggregate_phy_steps=int(self.PYB_FREQ / self.CTRL_FREQ),
physics=Physics(physics))
# Store action (input) and observation (state) spaces dimensions.
self.INPUT_DIM = self.action_space.shape[0]
self.STATE_DIM = self.observation_space.shape[0]
# Override inertial properties of passed as arguments.
if inertial_prop is None:
pass
elif np.array(inertial_prop).shape == (2,):
self.MASS, self.J[1, 1] = inertial_prop
elif isinstance(inertial_prop, dict):
self.MASS = inertial_prop.get("mass", 0)
self.J[1, 1] = inertial_prop.get("iyy", 0)
else:
raise ValueError(
"[ERROR] in Quadrotor.__init__(), inertial_prop is not of shape (2,)."
)
# Create X_GOAL and U_GOAL references for the assigned task.
self.TASK = Task(task)
if task_info is not None:
self.TASK_INFO = task_info
self.U_GOAL = np.ones(self.INPUT_DIM) * self.MASS * self.GRAVITY_ACC / self.INPUT_DIM
if self.TASK == Task.STABILIZATION:
if self.QUAD_TYPE == QuadType.ONE_D:
self.X_GOAL = np.hstack(
[self.TASK_INFO["stabilization_goal"][1],
0.0]) # x = {z, z_dot}.
elif self.QUAD_TYPE == QuadType.TWO_D:
self.X_GOAL = np.hstack([
self.TASK_INFO["stabilization_goal"][0], 0.0,
self.TASK_INFO["stabilization_goal"][1], 0.0, 0.0, 0.0
]) # x = {x, x_dot, z, z_dot, theta, theta_dot}.
elif self.TASK == Task.TRAJ_TRACKING:
POS_REF, \
VEL_REF, \
SPEED = self._generate_trajectory(traj_type=self.TASK_INFO["trajectory_type"],
traj_length=self.EPISODE_LEN_SEC,
num_cycles=self.TASK_INFO["num_cycles"],
traj_plane=self.TASK_INFO["trajectory_plane"],
position_offset=self.TASK_INFO["trajectory_position_offset"],
scaling=self.TASK_INFO["trajectory_scale"],
sample_time=self.CTRL_TIMESTEP
)
# print(POS_REF.shape)
# print(VEL_REF.shape)
# print(SPEED.shape)
# self._plot_trajectory(traj_type=self.TASK_INFO["trajectory_type"],
# traj_plane=self.TASK_INFO["trajectory_plane"],
# traj_length=self.EPISODE_LEN_SEC,
# num_cycles=self.TASK_INFO["num_cycles"],
# pos_ref_traj=POS_REF,
# vel_ref_traj=VEL_REF,
# speed_traj=SPEED
# )
if self.QUAD_TYPE == QuadType.ONE_D:
self.X_GOAL = np.vstack([
POS_REF[:, 2], # + self.INIT_Z, # Possible feature: add initial position.
VEL_REF[:, 2]
]).transpose()
elif self.QUAD_TYPE == QuadType.TWO_D:
self.X_GOAL = np.vstack([
POS_REF[:, 0], # + self.INIT_X, # Possible feature: add initial position.
VEL_REF[:, 0],
POS_REF[:, 2], # + self.INIT_Z, # Possible feature: add initial position.
VEL_REF[:, 2],
np.zeros(POS_REF.shape[0]),
np.zeros(VEL_REF.shape[0])
]).transpose()
def step(self, action):
"""Advances the environment by one control step.
Args:
action (ndarray): the action applied to the environment for the step.
Returns:
ndarray: The state of the environment after the step.
float: The scalar reward/cost of the step.
bool: Whether the conditions for the end of an episode are met in the step.
dict: A dictionary with information about the constraints evaluations and violations.
"""
# Sanity check (reset at least once).
self._check_initial_reset()
# Save the raw input action.
self.current_raw_input_action = action
# Advance the simulation.
obs, rew, done, info = self._advance_simulation(action)
# Standard Gym return.
return obs, rew, done, info
def reset(self):
"""(Re-)initializes the environment to start an episode.
Mandatory to call at least once after __init__().
Returns:
ndarray: The initial state of the environment.
dict: A dictionary with information about the dynamics and constraints symbolic models.
"""
# BaseAviary reset.
super().reset()
# Housekeeping variables.
self.initial_reset = True
self.state = None
self.current_raw_input_action = None
self.current_preprocessed_action = None
if self.adversary_disturbance is not None:
self.adv_action = None
# Reset the disturbances.
for mode in self.disturbances.keys():
self.disturbances[mode].reset(self)
# Choose randomized or deterministic inertial properties.
prop_values = {
"M": self.MASS,
"Iyy": self.J[1, 1],
}
if self.RANDOMIZED_INERTIAL_PROP:
prop_values = self._randomize_values_by_info(
prop_values, self.INERTIAL_PROP_RAND_INFO)
if any(phy_quantity < 0 for phy_quantity in prop_values.values()):
raise ValueError("[ERROR] in CartPole.reset(), negative randomized inertial properties.")
self.OVERRIDDEN_QUAD_MASS = prop_values["M"]
self.OVERRIDDEN_QUAD_INERTIA = [self.J[0, 0], prop_values["Iyy"], self.J[2, 2]]
# Override inertial properties.
p.changeDynamics(
self.DRONE_IDS[0],
linkIndex=-1, # Base link.
mass=self.OVERRIDDEN_QUAD_MASS,
localInertiaDiagonal=self.OVERRIDDEN_QUAD_INERTIA,
physicsClientId=self.PYB_CLIENT)
# Randomize initial state.
init_values = {
"init_x": self.INIT_X,
"init_x_dot": self.INIT_X_DOT,
"init_z": self.INIT_Z,
"init_z_dot": self.INIT_Z_DOT,
"init_theta": self.INIT_THETA,
"init_theta_dot": self.INIT_THETA_DOT,
}
if self.RANDOMIZED_INIT:
init_values = self._randomize_values_by_info(init_values, self.INIT_STATE_RAND_INFO)
OVERRIDDEN_INIT_X = init_values["init_x"]
OVERRIDDEN_INIT_X_DOT = init_values["init_x_dot"]
OVERRIDDEN_INIT_Z = init_values["init_z"]
OVERRIDDEN_INIT_Z_DOT = init_values["init_z_dot"]
OVERRIDDEN_INIT_THETA = init_values["init_theta"]
OVERRIDDEN_INIT_THETA_DOT = init_values["init_theta_dot"]
p.resetBasePositionAndOrientation(self.DRONE_IDS[0], [OVERRIDDEN_INIT_X, 0, OVERRIDDEN_INIT_Z],
p.getQuaternionFromEuler([0, OVERRIDDEN_INIT_THETA, 0]),
physicsClientId=self.PYB_CLIENT)
p.resetBaseVelocity(self.DRONE_IDS[0],
[OVERRIDDEN_INIT_X_DOT, 0, OVERRIDDEN_INIT_Z_DOT],
[0, OVERRIDDEN_INIT_THETA_DOT, 0],
physicsClientId=self.PYB_CLIENT)
# Update BaseAviary internal variables before calling self._get_observation().
self._update_and_store_kinematic_information()
# Return either an observation and dictionary or just the observation.
if self.INFO_IN_RESET:
return self._get_observation(), self._get_reset_info()
return self._get_observation()
def render(self, mode='human'):
"""Retrieves a frame from PyBullet rendering.
Args:
mode (str): Unused.
Returns:
ndarray: A multidimensional array with the RGB frame captured by PyBullet's camera.
"""
[w, h, rgb, dep, seg] = p.getCameraImage(width=self.RENDER_WIDTH,
height=self.RENDER_HEIGHT,
shadow=1,
viewMatrix=self.CAM_VIEW,
projectionMatrix=self.CAM_PRO,
renderer=p.ER_TINY_RENDERER,
flags=p.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX,
physicsClientId=self.PYB_CLIENT)
# Image.fromarray(np.reshape(rgb, (h, w, 4)), 'RGBA').show()
return np.reshape(rgb, (h, w, 4))
def close(self):
"""Clean up the environment and PyBullet connection.
"""
super().close()
def set_adversary_control(self, action):
"""Sets disturbance by an adversary controller.
This method can/should be called before (each) .step().
Args:
action (ndarray): The adversarial disturbance to apply to the environment.
"""
if self.adversary_disturbance is not None:
clipped_adv_action = np.clip(action,
self.adversary_action_space.low,
self.adversary_action_space.high)
self.adv_action = clipped_adv_action * self.adversary_disturbance_scale
def _setup_constraints(self):
"""Sets up a list (ConstraintList) of constraints.
"""
self.constraints = None
self.num_constraints = 0
if self.CONSTRAINTS is not None:
self.constraints = create_ConstraintList_from_list(
self.CONSTRAINTS, self.AVAILABLE_CONSTRAINTS, self)
self.num_constraints = self.constraints.num_constraints
def _setup_symbolic(self):
"""Creates symbolic (CasADi) models for dynamics, observation, and cost.
Returns:
SymbolicModel: CasADi symbolic model of the environment.
"""
m, g, l = self.MASS, self.GRAVITY_ACC, self.L
Iyy = self.J[1, 1]
dt = self.CTRL_TIMESTEP
# Define states.
z = cs.MX.sym('z')
z_dot = cs.MX.sym('z_dot')
if self.QUAD_TYPE == QuadType.ONE_D:
nx, nu = 2, 1
# Define states.
X = cs.vertcat(z, z_dot)
# Define input thrust.
T = cs.MX.sym('T')
U = cs.vertcat(T)
# Define dynamics equations.
X_dot = cs.vertcat(z_dot, T / m - g)
# Define observation equation.
Y = cs.vertcat(z, z_dot)
elif self.QUAD_TYPE == QuadType.TWO_D:
nx, nu = 6, 2
# Define states.
x = cs.MX.sym('x')
x_dot = cs.MX.sym('x_dot')
theta = cs.MX.sym('theta')
theta_dot = cs.MX.sym('theta_dot')
X = cs.vertcat(x, x_dot, z, z_dot, theta, theta_dot)
# Define input thrusts.
T1 = cs.MX.sym('T1')
T2 = cs.MX.sym('T2')
U = cs.vertcat(T1, T2)
# Define dynamics equations.
X_dot = cs.vertcat(x_dot,
cs.sin(theta) * (T1 + T2) / m, z_dot,
cs.cos(theta) * (T1 + T2) / m - g, theta_dot,
l * (T2 - T1) / Iyy / np.sqrt(2))
# Define observation.
Y = cs.vertcat(x, x_dot, z, z_dot, theta, theta_dot)
# Define cost (quadratic form).
Q = cs.MX.sym('Q', nx, nx)
R = cs.MX.sym('R', nu, nu)
Xr = cs.MX.sym('Xr', nx, 1)
Ur = cs.MX.sym('Ur', nu, 1)
cost_func = 0.5 * (X - Xr).T @ Q @ (X - Xr) + 0.5 * (U - Ur).T @ R @ (U - Ur)
# Define dynamics and cost dictionaries.
dynamics = {"dyn_eqn": X_dot, "obs_eqn": Y, "vars": {"X": X, "U": U}}
cost = {
"cost_func": cost_func,
"vars": {
"X": X,
"U": U,
"Xr": Xr,
"Ur": Ur,
"Q": Q,
"R": R
}
}
# Setup symbolic model.
self.symbolic = SymbolicModel(dynamics=dynamics, cost=cost, dt=dt)
def _set_action_space(self):
"""Returns the action space of the environment.
Returns:
gym.spaces: The quadrotor environment's action space, of size 1 or 2 depending on QUAD_TYPE.
"""
# Define action/input dimension, labels, and units.
if self.QUAD_TYPE == QuadType.ONE_D:
action_dim = 1
self.ACTION_LABELS = ['T']
self.ACTION_UNITS = ['N'] if not self.NORMALIZED_RL_ACTION_SPACE else ['-']
elif self.QUAD_TYPE == QuadType.TWO_D:
action_dim = 2
self.ACTION_LABELS = ['T1', 'T2']
self.ACTION_UNITS = ['N', 'N'] if not self.NORMALIZED_RL_ACTION_SPACE else ['-', '-']
if self.NORMALIZED_RL_ACTION_SPACE:
return spaces.Box(low=-np.ones(action_dim),
high=np.ones(action_dim),
dtype=np.float32)
else:
return spaces.Box(low=np.zeros(action_dim),
high=self.MAX_THRUST * np.ones(action_dim),
dtype=np.float32)
def _set_observation_space(self):
"""Returns the observation space of the environment.
Returns:
gym.spaces: The bounded observation (state) space, of size 2 or 6 depending on QUAD_TYPE.
"""
self.x_threshold = 2
self.z_threshold = 2
self.theta_threshold_radians = 85 * (2 * math.pi / 360)
if self.QUAD_TYPE == QuadType.ONE_D:
# x = {z, z_dot}.
# Define obs/state bounds.
low = np.array([self.GROUND_PLANE_Z * 2, -np.finfo(np.float32).max])
high = np.array([self.z_threshold * 2, np.finfo(np.float32).max])
# Define obs/state labels and units.
self.STATE_LABELS = ['z', 'z_dot']
self.STATE_UNITS = ['m', 'm/s']
elif self.QUAD_TYPE == QuadType.TWO_D:
# x = {x, x_dot, z, z_dot, theta, theta_dot}.
# Define obs/state bounds.
low = np.array([
-self.x_threshold * 2, -np.finfo(np.float32).max,
self.GROUND_PLANE_Z * 2, -np.finfo(np.float32).max,
-self.theta_threshold_radians * 2, -np.finfo(np.float32).max
])
high = np.array([
self.x_threshold * 2,
np.finfo(np.float32).max, self.z_threshold * 2,
np.finfo(np.float32).max, self.theta_threshold_radians * 2,
np.finfo(np.float32).max
])
# Define obs/state labels and units.
self.STATE_LABELS = ['x', 'x_dot', 'z', 'z_dot', 'theta', 'theta_dot']
self.STATE_UNITS = ['m', 'm/s', 'm', 'm/s', 'rad', 'rad/s']
return spaces.Box(low=low, high=high, dtype=np.float32)
def _preprocess_control(self, action):
"""Converts the action passed to .step() into motors' RPMs (ndarray of shape (4,)).
Args:
action (ndarray): The raw action input, of size 1 or 2 depending on QUAD_TYPE.
Returns:
ndarray: The motors RPMs to apply to the quadrotor.
"""
if self.NORMALIZED_RL_ACTION_SPACE:
action = np.clip(action, self.action_space.low, self.action_space.high)
thrust = (1 + (0.1 * action)) * ((self.GRAVITY_ACC * self.MASS) / self.INPUT_DIM)
else:
thrust = np.clip(action, self.action_space.low, self.action_space.high)
if not np.array_equal(thrust, np.array(action)) and self.VERBOSE:
print("[WARNING]: action was clipped in Quadrotor._preprocess_control().")
self.current_preprocessed_action = thrust
# Apply disturbances.
if "action" in self.disturbances:
thrust = self.disturbances["action"].apply(thrust, self)
if self.adversary_disturbance == "action":
thrust = thrust + self.adv_action
pwm = cmd2pwm(thrust, self.PWM2RPM_SCALE, self.PWM2RPM_CONST, self.KF, self.MIN_PWM, self.MAX_PWM)
rpm = pwm2rpm(pwm, self.PWM2RPM_SCALE, self.PWM2RPM_CONST)
return rpm
def _advance_simulation(self, action):
"""Pass the commanded RPMs and the adversarial force to the superclass .step().
The PyBullet simulation is stepped PYB_FREQ/CTRL_FREQ times in BaseAviary.
Args:
force (float): The RPMs to apply to the quadrotor's motors.
Returns:
ndarray: The state of the environment after the step.
float: The scalar reward/cost of the step.
bool: Whether the conditions for the end of an episode are met in the step.
dict: A dictionary with information about the constraints evaluations and violations.
"""
disturb_force = None
# Determine disturbance force.
passive_disturb = "dynamics" in self.disturbances
adv_disturb = self.adversary_disturbance == "dynamics"
if passive_disturb or adv_disturb:
disturb_force = np.zeros(2)
if passive_disturb:
disturb_force = self.disturbances["dynamics"].apply(
disturb_force, self)
if adv_disturb and self.adv_action is not None:
disturb_force = disturb_force + self.adv_action
# Clear the adversary action, wait for the next one.
self.adv_action = None
# Construct full (3D) disturbance force.
if disturb_force is not None:
if self.QUAD_TYPE == QuadType.ONE_D:
# Only disturb on z direction.
disturb_force = [0, 0, float(disturb_force)]
elif self.QUAD_TYPE == QuadType.TWO_D:
# Only disturb on x-z plane.
disturb_force = [
float(disturb_force[0]), 0,
float(disturb_force[1])
]
else:
raise NotImplementedError(
"[ERROR] in Quadrotor._advance_simulation(), disturb force for quad 3D is not available."
)
return super().step(action, disturb_force)
def _get_observation(self):
"""Returns the current observation (state) of the environment.
Returns:
ndarray: The state of the quadrotor, of size 2 or 6 depending on QUAD_TYPE.
"""
full_state = self._get_drone_state_vector(0)
pos, _, rpy, vel, ang_v, _ = np.split(full_state, [3, 7, 10, 13, 16])
if self.QUAD_TYPE == QuadType.ONE_D:
# x = {z, z_dot}.
self.state = np.hstack([pos[2], vel[2]]).reshape((2,))
elif self.QUAD_TYPE == QuadType.TWO_D:
# x = {x, x_dot, z, z_dot, theta, theta_dot}.
self.state = np.hstack(
[pos[0], vel[0], pos[2], vel[2], rpy[1], ang_v[1]]).reshape(
(6,))
if not np.array_equal(self.state,
np.clip(self.state, self.observation_space.low, self.observation_space.high)):
if self.GUI and self.VERBOSE:
print(
"[WARNING]: observation was clipped in Quadrotor._get_observation()."
)
# Apply observation disturbance.
obs = deepcopy(self.state)
if "observation" in self.disturbances:
obs = self.disturbances["observation"].apply(obs, self)
return obs
def _get_reward(self):
"""Computes the current step's reward value.
Returns:
float: The evaluated reward/cost.
"""
if self.COST == Cost.RL_REWARD:
full_state = self._get_drone_state_vector(0)
pos, _, rpy, vel, ang_v, _ = np.split(full_state, [3, 7, 10, 13, 16])
if self.QUAD_TYPE == QuadType.ONE_D:
dist = np.linalg.norm(np.array([0, 0, self.TASK_INFO["stabilization_goal"][1]]) - pos)**2
elif self.QUAD_TYPE == QuadType.TWO_D:
dist = np.linalg.norm(
np.array([self.TASK_INFO["stabilization_goal"][0], 0,
self.TASK_INFO["stabilization_goal"][1]]) - pos)**2
return -1 * dist
if self.COST == Cost.QUADRATIC:
state = self._get_observation()
if self.TASK == Task.STABILIZATION:
return float(-1 * self.symbolic.loss(x=state,
Xr=self.X_GOAL,
u=self.current_preprocessed_action,
Ur=self.U_GOAL,
Q=self.Q,
R=self.R)["l"])
if self.TASK == Task.TRAJ_TRACKING:
return float(-1 * self.symbolic.loss(x=state,
Xr=self.X_GOAL[self.ctrl_step_counter,:],
u=self.current_preprocessed_action,
Ur=self.U_GOAL,
Q=self.Q,
R=self.R)["l"])
def _get_done(self):
"""Computes the conditions for termination of an episode.
Returns:
bool: Whether an episode is over.
"""
# Done if goal reached for stabilization task with quadratic cost.
if self.TASK == Task.STABILIZATION and self.COST == Cost.QUADRATIC:
self.goal_reached = bool(np.linalg.norm(self.state - self.X_GOAL) < self.TASK_INFO["stabilization_goal_tolerance"])
if self.goal_reached:
return True
# Done if the episode length is exceeded.
if (self.ctrl_step_counter + 1) / self.CTRL_FREQ >= self.EPISODE_LEN_SEC:
return True
# Done if a constraint is violated.
if self.constraints is not None:
if self.DONE_ON_VIOLATION and self.constraints.is_violated(self):
return True
# Done if state is out-of-bounds.
# if self.QUAD_TYPE == QuadType.ONE_D:
# z, _ = self.state
# return bool(z < -self.z_threshold
# or z > self.z_threshold)
# if self.QUAD_TYPE == QuadType.TWO_D:
# x, _, z, _, theta, _ = self.state
# return bool(x < -self.x_threshold
# or x > self.x_threshold
# or z < -self.z_threshold
# or z > self.z_threshold
# or theta < -self.theta_threshold_radians
# or theta > self.theta_threshold_radians)
#
return False
def _get_info(self):
"""Generates the info dictionary returned by every call to .step().
Returns:
dict: A dictionary with information about the constraints evaluations and violations.
"""
info = {}
if self.TASK == Task.STABILIZATION and self.COST == Cost.QUADRATIC:
info["goal_reached"] = self.goal_reached # Add boolean flag for the goal being reached.
state = self._get_observation()
if self.constraints is not None:
pass
info["constraint_values"] = self.constraints.get_values(self)
info["constraint_violations"] = self.constraints.get_violations(self)
return info
def _get_reset_info(self):
"""Generates the info dictionary returned by every call to .reset().
Returns:
dict: A dictionary with information about the dynamics and constraints symbolic models.
"""
info = {}
info["symbolic_model"] = self.symbolic
info["physical_parameters"] = {
"quadrotor_mass": self.MASS,
"quadrotor_iyy_inertia": self.J[1, 1]
}
info["x_reference"] = self.X_GOAL
info["u_reference"] = self.U_GOAL
if self.constraints is not None:
info["symbolic_constraints"] = self.constraints.get_all_symbolic_models()
# info["constraint_values"] = self.constraints.get_values(self)
# info["constraint_violations"] = self.constraints.get_violations(self)
return info
def _parse_urdf_parameters(self, file_name: str = "cf2x.urdf"):
"""Parses an URDF file for the robot's properties.
Args:
file_name (str, optional): The .urdf file from which the properties should be pased.
Returns:
The quadrotor roperties stored in BaseAviary, see BaseAviary.__init__().
"""
return super()._parse_urdf_parameters(file_name)
|
{"hexsha": "0f99ee6571940e17efab0725251333558efcfce6", "size": 36180, "ext": "py", "lang": "Python", "max_stars_repo_path": "safe_control_gym/envs/gym_pybullet_drones/quadrotor.py", "max_stars_repo_name": "gokhanalcan/safe-control-gym", "max_stars_repo_head_hexsha": "e9086e102663a60a66f2cc9c8cd7610888744056", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "safe_control_gym/envs/gym_pybullet_drones/quadrotor.py", "max_issues_repo_name": "gokhanalcan/safe-control-gym", "max_issues_repo_head_hexsha": "e9086e102663a60a66f2cc9c8cd7610888744056", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "safe_control_gym/envs/gym_pybullet_drones/quadrotor.py", "max_forks_repo_name": "gokhanalcan/safe-control-gym", "max_forks_repo_head_hexsha": "e9086e102663a60a66f2cc9c8cd7610888744056", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.888337469, "max_line_length": 127, "alphanum_fraction": 0.5550580431, "include": true, "reason": "import numpy", "num_tokens": 8208}
|
import os
import sys
from keras.models import Model
from keras.layers import concatenate
if os.path.realpath(os.getcwd()) != os.path.dirname(os.path.realpath(__file__)):
sys.path.append(os.getcwd())
from deephar.config import mpii_sp_dataconf
from deephar.data import MERLSinglePerson
from deephar.models import reception
from deephar.utils import *
sys.path.append(os.path.join(os.getcwd(), 'exp/common'))
from mpii_tools import eval_singleperson_pckh
sys.path.append(os.path.join(os.getcwd(), 'datasets'))
import annothelper
annothelper.check_mpii_dataset()
"""Architecture configuration."""
num_blocks = 8
batch_size = 24
input_shape = mpii_sp_dataconf.input_shape
num_joints = 16
# """
model = reception.build(input_shape, num_joints, dim=2,
num_blocks=num_blocks, num_context_per_joint=2, ksize=(5, 5),
concat_pose_confidence=False)
# """
"""Merge pose and visibility as a single output."""
# """
outputs = []
for b in range(int(len(model.outputs) / 2)):
outputs.append(concatenate([model.outputs[2*b], model.outputs[2*b + 1]],
name='blk%d' % (b + 1)))
model = Model(model.input, outputs, name=model.name)
weights_path = "weights_merl_061.h5"
model.load_weights(weights_path)
anno_path = "/home/pminhtamnb/proj4/7-kpts/merl4000_4300.pkl"
dataset_path = "/mnt/hdd10tb/Users/duong/MERL"
mpii = MERLSinglePerson(dataset_path,anno_path,dataconf=mpii_sp_dataconf)
import matplotlib.pyplot as plt
import numpy as np
# input = np.array(Image.open("000001.jpg").resize((256,256)))/255.0
# input = np.array(Image.open("aa.jpg").resize((256,256)))/255.0
# input = np.array(Image.open("datasets/MPII/images/069937887.jpg").resize((256,256)))/255.0
# input = np.array(Image.open("datasets/MPII/images/099946068.jpg").resize((256,256)))/255.0
# input = np.array(Image.open("/mnt/hdd10tb/Datasets/MERL_Shopping/ReachToShelf/31_2_crop_1150_1171_ReachToShelf-0005.jpg").resize((256,256)))/255.0
# input = np.array(Image.open("frame0.png").resize((256,256)))/255.0
data = mpii.get_data(5)
input = data['image']
label = data['pose']
# input = np.array([input])[:,:,:,:3]
print(label)
plt.imshow(input)
# plt.savefig("mpii_input")
pred = model.predict(np.array([input]))
# print(pred)
# for i in pred:
# print(" predict : ", i.shape)
# """
plt.imshow(input)
print(len(pred))
for j in range(7,8):
for zz in pred[j][0]:
# for zz in pred:
print(zz)
if zz[2]>0.5:
plt.scatter(zz[0] * 256, zz[1] * 256)
plt.savefig("merl_1.jpg")
# """
|
{"hexsha": "783ed65308b41e8ccde9e503108fae09389f7b5b", "size": 2510, "ext": "py", "lang": "Python", "max_stars_repo_path": "exp/merl/predict_one_img.py", "max_stars_repo_name": "pminhtam/2D-3D_Multitask_Deep_Learning", "max_stars_repo_head_hexsha": "097a1035173ea5236db47a806ad275e7482bc6f2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2020-01-01T13:01:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T08:05:54.000Z", "max_issues_repo_path": "exp/merl/predict_one_img.py", "max_issues_repo_name": "pminhtam/2D-3D_Multitask_Deep_Learning", "max_issues_repo_head_hexsha": "097a1035173ea5236db47a806ad275e7482bc6f2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-01-28T22:14:48.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T23:30:54.000Z", "max_forks_repo_path": "exp/merl/predict_one_img.py", "max_forks_repo_name": "pminhtam/2D-3D_Multitask_Deep_Learning", "max_forks_repo_head_hexsha": "097a1035173ea5236db47a806ad275e7482bc6f2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-08-15T10:09:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-16T01:57:30.000Z", "avg_line_length": 28.202247191, "max_line_length": 148, "alphanum_fraction": 0.7067729084, "include": true, "reason": "import numpy", "num_tokens": 722}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.