text stringlengths 957 885k |
|---|
import random as r
import math as m
import numpy as np
import matplotlib.pyplot as plt
import os
#Make more elegant
base_path = "/Users/gregoryionovichpage/Desktop/" #change to your own path
filenameJ = "ts5p1J.txt" #absolute path to mathematica outputs for ts=5.1 !!
filenameH = "ts5p1H.txt"
filenameMsq = "Var5p1.txt"
path_to_fileJ = os.path.join(base_path, filenameJ)
path_to_fileH = os.path.join(base_path, filenameH)
path_to_fileMsq = os.path.join(base_path, filenameMsq)
#fJ = open(path_to_fileJ , 'r')#opening file, read mode
#fH = open(path_to_fileH , 'r')#opening file, read mode
#file = open(path_to_file , 'r')
print (path_to_fileJ)
print (path_to_fileH)
print (path_to_fileMsq)
Decide= 'J'
ts=5.1 #stopping time WARNING, if changing, you might also want new mathematica input files to read in.
tau=1 #transit time
S=10000 #number of samples
Lmbd=10 #Poisson intensity upper limit
class ClassHJ(object):
def __init__(self,Lmbd,tau,ts,S,Decide):
self.Lmbd=Lmbd
self.tau=tau
self.ts=ts
self.S=S
if Decide == 'H':
def Channel(self,lmbd):
#want output, i to be the number of exiting particles GIVEN a blockage occurs before ts
a=0 #for checking contributions
i=0#reset to cero
Ttot=0
while True:
E=-(m.log(r.random()))/lmbd #E for event
TtotO=Ttot #store old total time
Ttot+=E #add to total time the sum of event times
Diff= Ttot-TtotO #time difference between events
i+=1 #is number of particles that have entered
if(Ttot >= self.ts):
#here, need to be careful
#saying, if a particle enters at ts or beyond, we have had no blockage
#means that condition for h(0,ts) is not satisfied.
#set i to an impossible value.
i= -1
#returning this value will force simu to go back one sample, and try again.
#a=i
break
if(Diff <= self.tau and i !=1): #'normal blocking condition'
#it takes two particles to make a blockage, so
i-=2 #this number exited
break
Ttot=0
return i;
if Decide == 'J':
def Channel(self,lmbd):
#want output, i to be the number of exiting particles
a=0 #for checking contributions
i=0
Ttot=0
while True:
E=-(m.log(r.random()))/lmbd #E for event
TtotO=Ttot #store old total time
Ttot+=E #add to total time the sum of event times
Diff= Ttot-TtotO #time difference between events
i+=1 #is number of particles that have entered
if(Diff <= self.tau and i !=1): #'stronger condition'
#it takes two particles to make a blockage, so
i-=2 #this number exited
#a=i
break
if(Ttot >= self.ts-self.tau):
#last event needs to be subtracted
i-=1
#a=i
break
#need a distinciton between number of particles that exited at ts against blockage?
Ttot=0
return i;
if Decide == 'G':
#creating simu for K particles exiting AND that it has remained open.
def Channel(self,lmbd):
#
a=0 #for checking contributions
i=0
Ttot=0
while True:
E=-(m.log(r.random()))/lmbd #E for event
TtotO=Ttot #store old total time
Ttot+=E #add to total time the sum of event times
Diff= Ttot-TtotO #time difference between events
i+=1 #is number of particles that have entered
if(Diff <= self.tau and i !=1): #'stronger condition'
#if there is a condition for a blockage, reject the sample!
i= -1 #set to impossible value
break
if(Ttot >= self.ts-self.tau): #there could still be a blockage in final time interval. ACCOUNT
#last event needs to be subtracted as it won't exit.
Ttot+=-(m.log(r.random()))/lmbd #generate last event
#a=i
if (Ttot >= self.ts):
i-=1
else:
i= -1
#need to generate one more event. if last event is before ts, send -1.
#is last event is after ts, do i-=1
break
#need a distinciton between number of particles that exited at ts against blockage?
Ttot=0
return i;
def Sample(self,lmbd,PSum,PSumSq):#need new arguments for check
for j in range(self.S-1):
P = self.Channel(lmbd)#no. of particles avant block, OR impossible value.
#print(P,j)
if(P == -1):#if impossible value (define as qqch autre?)
j-=1 #forces the sanple to be taken again.
else:#continue as normal, include new values to sums
PSum += P
PSumSq += P*P
PAv=PSum/self.S
PAvSq=PSumSq/self.S
Var= PAvSq - (PAv*PAv) #variance of <m>
return PAvSq, Var
def Iteration(self):
#variables to modify are lmbd,
lmbd=.0001 #lambda iterator
K=200 #for splitting up data for representation later
x=[]#empty lists for plotting later
y=[]
z=[]
X=[]
dataMsq = np.genfromtxt(path_to_fileMsq)#Read from txt mathematica output
#dataJ = np.genfromtxt(path_to_fileJ)#Read from txt mathematica output
xM=dataMsq[:,0]
yM=dataMsq[:,1]
#xM1=dataJ[:,0]
#yM1=dataJ[:,1]
for k in range(K):
PSum=0
PSumSq=0
lmbd+=(self.Lmbd/K)
self.Channel(lmbd)
A,B = self.Sample(lmbd,PSum, PSumSq)
x.append(lmbd)
X.append( (1+np.exp(lmbd))/( (1-np.exp(lmbd)) * (1-np.exp(lmbd)) ) ) # limit output for high t
y.append(A)
z.append(B)
#f.write(str(lmbd))#Output text file, finish.
plt.subplot(221) #look up meaning of number
plt.grid(True)
plt.plot(x, z)
#plt.plot(x, yM,'r:')
#plt.yscale('log')
#plt.xscale('log')
#plt.title('log')
plt.plot(xM, yM,'r:')#plot analytical outputs
#plt.plot(xM1, yM1,'g:')#plot analytical outputs
#plt.errorbar(x, y,np.sqrt(z))
plt.ylabel('<Var>')
plt.xlabel('Lambda')
plt.show()
#fH.close()
ClassHJ(Lmbd,tau,ts,S,Decide).Iteration() |
<filename>cupydo/manager.py
#!/usr/bin/env python
# -*- coding: latin-1; -*-
'''
Copyright 2018 University of Liège
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
manager.py
Interface general manager.
Authors : <NAME>, <NAME>, <NAME>
'''
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import numpy as np
import ccupydo
from utilities import *
np.set_printoptions(threshold=np.nan)
# ----------------------------------------------------------------------
# Manager class
# ----------------------------------------------------------------------
class Manager(ccupydo.CManager):
"""
Manager of CUPyDO.
Handle MPI partitioning and gather fluid-struture interface information.
Inherited public members :
-setGlobalIndexing()
-getGlobalIndex()
"""
def __init__(self, FluidSolver, SolidSolver, nDim, computationType='steady', mpiComm=None):
"""
Description.
"""
ccupydo.CManager.__init__(self)
mpiPrint('\n***************************** Initializing FSI interface *****************************', mpiComm)
if mpiComm != None:
self.mpiComm = mpiComm
myid = mpiComm.Get_rank()
mpiSize = mpiComm.Get_size()
else:
self.mpiComm = None
myid = 0
mpiSize = 1
# --- Initialize all the parameters --- #
self.nDim = nDim
self.computationType = computationType
self.mechanical = True
self.thermal = False
self.haveFluidSolver = False
self.nLocalFluidInterfaceNodes = 0
self.nLocalFluidInterfacePhysicalNodes = 0
self.haveFluidInterface = False
self.fluidHaloNodesList = {}
self.fluidIndexing = {}
self.haveSolidSolver = False
self.nLocalSolidInterfaceNodes = 0
self.nLocalSolidInterfacePhysicalNodes = 0
self.haveSolidInterface = False
self.solidHaloNodesList = {}
self.solidIndexing = {}
# --- Identify the fluid and solid interfaces and store the number of nodes on both sides (and for each partition) ---
if FluidSolver != None:
print('Fluid solver is initialized on process {}'.format(myid))
self.haveFluidSolver = True
self.nLocalFluidInterfaceNodes = FluidSolver.nNodes
if self.nLocalFluidInterfaceNodes != 0:
self.haveFluidInterface = True
print('Number of interface fluid nodes (halo nodes included) on proccess {} : {}'.format(myid,self.nLocalFluidInterfaceNodes))
else:
pass
if SolidSolver != None:
print('Solid solver is initialized on process {}'.format(myid))
self.haveSolidSolver = True
self.nLocalSolidInterfaceNodes = SolidSolver.nNodes
if self.nLocalSolidInterfaceNodes != 0:
self.haveSolidInterface = True
print('Number of interface solid nodes (halo nodes included) on proccess {} : {}'.format(myid,self.nLocalSolidInterfaceNodes))
else:
pass
# --- Exchange information about processors on which the solvers are defined and where the interface nodes are lying --- #
if self.mpiComm != None:
if self.haveFluidSolver == True:
sendBufFluid = myid
else:
sendBufFluid = -1
if self.haveSolidSolver == True:
sendBufSolid = myid
else:
sendBufSolid = -1
if self.haveFluidInterface == True:
sendBufFluidInterface = myid
else:
sendBufFluidInterface = -1
if self.haveSolidInterface == True:
sendBufSolidInterface = myid
else :
sendBufSolidInterface = -1
rcvBufFluid = mpiAllGather(mpiComm, sendBufFluid)
rcvBufSolid = mpiAllGather(mpiComm, sendBufSolid)
rcvBufFluidInterface = mpiAllGather(mpiComm, sendBufFluidInterface)
rcvBufSolidInterface = mpiAllGather(mpiComm, sendBufSolidInterface)
self.fluidSolverProcessors = rcvBufFluid[rcvBufFluid != -1]
self.solidSolverProcessors = rcvBufSolid[rcvBufSolid != -1]
self.fluidInterfaceProcessors = rcvBufFluidInterface[rcvBufFluidInterface != -1]
self.solidInterfaceProcessors = rcvBufSolidInterface[rcvBufSolidInterface != -1]
else:
self.fluidSolverProcessors = np.zeros(1, dtype=int)
self.solidSolverProcessors = np.zeros(1, dtype=int)
self.fluidInterfaceProcessors = np.zeros(1, dtype=int)
self.solidInterfaceProcessors = np.zeros(1, dtype=int)
mpiBarrier(mpiComm)
# --- Get the list of the halo nodes on the f/s interface --- #
self.fluidHaloNodesList = FluidSolver.haloNodeList
if myid in self.solidSolverProcessors:
self.solidHaloNodesList = SolidSolver.haloNodeList
if self.mpiComm != None:
self.fluidHaloNodesList = self.mpiComm.allgather(self.fluidHaloNodesList)
self.solidHaloNodesList = self.mpiComm.allgather(self.solidHaloNodesList)
else:
self.fluidHaloNodesList = [{}]
self.solidHaloNodesList = [{}]
# --- Get the number of physical (= not halo) nodes on the f/s interface --- #
self.nLocalFluidInterfacePhysicalNodes = FluidSolver.nPhysicalNodes
if myid in self.solidSolverProcessors:
self.nLocalSolidInterfacePhysicalNodes = SolidSolver.nPhysicalNodes
# --- Calculate the total (sum over all partitions) number of nodes at the f/s interface --- #
self.nFluidInterfaceNodes = mpiAllReduce(mpiComm, self.nLocalFluidInterfaceNodes)
self.nFluidInterfacePhysicalNodes = mpiAllReduce(mpiComm, self.nLocalFluidInterfacePhysicalNodes)
self.nSolidInterfaceNodes = mpiAllReduce(mpiComm, self.nLocalSolidInterfaceNodes)
self.nSolidInterfacePhysicalNodes = mpiAllReduce(mpiComm, self.nLocalSolidInterfacePhysicalNodes)
mpiPrint('Total number of fluid interface nodes (halo nodes included) : {}'.format(self.nFluidInterfaceNodes), mpiComm)
mpiPrint('Total number of solid interface nodes (halo nodes included) : {}'.format(self.nSolidInterfaceNodes), mpiComm)
mpiPrint('Total number of fluid interface nodes : {}'.format(self.nFluidInterfacePhysicalNodes), mpiComm)
mpiPrint('Total number of solid interface nodes : {}'.format(self.nSolidInterfacePhysicalNodes), mpiComm)
# --- Store the number of physical interface nodes on each processor and allgather the information --- #
self.fluidPhysicalInterfaceNodesDistribution = np.zeros(mpiSize, dtype=int)
self.solidPhysicalInterfaceNodesDistribution = np.zeros(mpiSize, dtype=int)
if self.mpiComm != None:
self.fluidPhysicalInterfaceNodesDistribution = mpiAllGather(self.mpiComm, self.nLocalFluidInterfacePhysicalNodes)
self.solidPhysicalInterfaceNodesDistribution = mpiAllGather(self.mpiComm, self.nLocalSolidInterfacePhysicalNodes)
else:
self.fluidPhysicalInterfaceNodesDistribution[0] = self.nFluidInterfacePhysicalNodes
self.solidPhysicalInterfaceNodesDistribution[0] = self.nSolidInterfacePhysicalNodes
# --- Calculate and store the global indexing of interface physical nodes
if self.mpiComm != None:
fluidGlobalIndexRange_temp = tuple()
solidGlobalIndexRange_temp = tuple()
if myid in self.fluidInterfaceProcessors:
globalIndexStart = 0
for iProc in range(myid):
globalIndexStart += self.fluidPhysicalInterfaceNodesDistribution[iProc]
globalIndexStop = globalIndexStart + self.nLocalFluidInterfacePhysicalNodes-1
else:
globalIndexStart = 0
globalIndexStop = 0
fluidGlobalIndexRange_temp = (globalIndexStart,globalIndexStop)
self.fluidGlobalIndexRange = self.mpiComm.allgather(fluidGlobalIndexRange_temp)
self.setGlobalIndexing("fluid", self.fluidGlobalIndexRange)
if myid in self.solidInterfaceProcessors:
globalIndexStart = 0
for jProc in range(myid):
globalIndexStart += self.solidPhysicalInterfaceNodesDistribution[jProc]
globalIndexStop = globalIndexStart + self.nLocalSolidInterfaceNodes-1
else:
globalIndexStart = 0
globalIndexStop = 0
solidGlobalIndexRange_temp = (globalIndexStart,globalIndexStop)
self.solidGlobalIndexRange = self.mpiComm.allgather(solidGlobalIndexRange_temp)
self.setGlobalIndexing("solid", self.solidGlobalIndexRange)
else:
temp = (0,self.nLocalFluidInterfacePhysicalNodes-1)
self.fluidGlobalIndexRange = list()
self.fluidGlobalIndexRange.append(temp)
temp = (0,self.nSolidInterfacePhysicalNodes-1)
self.solidGlobalIndexRange = list()
self.solidGlobalIndexRange.append(temp)
# --- Map the FSI indexing with the solvers indexing --- #
fluidIndexing_temp = {}
localIndex = 0
for iVertex in range(self.nLocalFluidInterfaceNodes):
nodeIndex = FluidSolver.getNodalIndex(iVertex)
if nodeIndex in self.fluidHaloNodesList[myid].keys():
pass
else:
fluidIndexing_temp[nodeIndex] = self.getGlobalIndex('fluid', myid, localIndex)
localIndex += 1
solidIndexing_temp = {}
localIndex = 0
for jVertex in range(self.nLocalSolidInterfaceNodes):
nodeIndex = SolidSolver.getNodalIndex(jVertex)
if nodeIndex in self.solidHaloNodesList[myid].keys():
pass
else:
solidIndexing_temp[nodeIndex] = self.getGlobalIndex('solid', myid, localIndex)
localIndex += 1
if self.mpiComm != None:
fluidIndexing_temp = self.mpiComm.allgather(fluidIndexing_temp)
solidIndexing_temp = self.mpiComm.allgather(solidIndexing_temp)
for ii in range(len(solidIndexing_temp)):
for key, value in solidIndexing_temp[ii].items():
self.solidIndexing[key] = value
for ii in range(len(fluidIndexing_temp)):
for key, value in fluidIndexing_temp[ii].items():
self.fluidIndexing[key] = value
else:
self.fluidIndexing = fluidIndexing_temp.copy()
self.solidIndexing = solidIndexing_temp.copy()
del fluidIndexing_temp, solidIndexing_temp
def getGlobalIndex(self, domain, iProc, iLocalVertex):
"""
Description.
"""
if domain == 'fluid':
globalStartIndex = self.fluidGlobalIndexRange[iProc][0]
elif domain == 'solid':
globalStartIndex = self.solidGlobalIndexRange[iProc][0]
globalIndex = globalStartIndex + iLocalVertex
return globalIndex
def getNumberOfFluidInterfaceNodes(self):
"""
Description.
"""
return self.nFluidInterfacePhysicalNodes
def getNumberOfLocalFluidInterfaceNodes(self):
"""
Description.
"""
return self.nLocalFluidInterfacePhysicalNodes
def getNumberOfSolidInterfaceNodes(self):
"""
Description.
"""
return self.nSolidInterfacePhysicalNodes
def getNumberOfLocalSolidInterfaceNodes(self):
"""
Description.
"""
return self.nLocalSolidInterfacePhysicalNodes
def getSolidSolverProcessors(self):
"""
Des.
"""
return self.solidSolverProcessors
def getSolidInterfaceProcessors(self):
"""
Description.
"""
return self.solidInterfaceProcessors
def getFluidInterfaceProcessors(self):
"""
Description.
"""
return self.fluidInterfaceProcessors
def getSolidPhysicalInterfaceNodesDistribution(self):
"""
Des.
"""
return self.solidPhysicalInterfaceNodesDistribution
def getFluidPhysicalInterfaceNodesDistribution(self):
"""
Des.
"""
return self.fluidPhysicalInterfaceNodesDistribution
def getSolidGlobalIndexRange(self):
"""
Des.
"""
return self.solidGlobalIndexRange
def getFluidGlobalIndexRange(self):
"""
Des.
"""
return self.fluidGlobalIndexRange
def getFluidHaloNodesList(self):
"""
Des.
"""
return self.fluidHaloNodesList
def getSolidHaloNodesList(self):
"""
Des.
"""
return self.solidHaloNodesList
def getFluidIndexing(self):
"""
Des.
"""
return self.fluidIndexing
def getSolidIndexing(self):
"""
Des.
"""
return self.solidIndexing
def getnDim(self):
"""
Des.
"""
return self.nDim
def getComputationType(self):
"""
des.
"""
return self.computationType
def getMPIComm(self):
"""
Description.
"""
return self.mpiComm
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os
import json
import shutil
from crds.bestrefs import BestrefsScript
from crds.tests import test_config
"""
Bestrefs has a number of command line parameters which make it operate in different modes.
-----------
NEW CONTEXT
-----------
crds.bestrefs always computes best references with respect to a context which can be explicitly specified with the
--new-context parameter. If no --new-context is specified, the default operational context is determined by
consulting the CRDS server or looking in the local cache as a fallback.
------------------------
LOOKUP PARAMETER SOURCES
------------------------
The two primary modes for bestrefs involve the source of reference file matching parameters. Conceptually
lookup parameters are always associated with particular datasets and used to identify the references
required to process those datasets.
The options --files, --datasets, --instruments, and --all determine the source of lookup parameters:
1. To find best references for a list of files do something like this:
% python -m crds.bestrefs --new-context hst.pmap --files j8bt05njq_raw.fits j8bt06o6q_raw.fits j8bt09jcq_raw.fits
the first parameter, hst.pmap, is the context with respect to which best references are determined.
2. To find best references for a list of catalog dataset ids do something like this:
% python -m crds.bestrefs --new-context hst.pmap --datasets j8bt05njq j8bt06o6q j8bt09jcq
3. To do mass scale testing for all cataloged datasets for a particular instrument(s) do:
% python -m crds.bestrefs --new-context hst.pmap --instruments acs
4. To do mass scale testing for all supported instruments for all cataloged datasets do:
% python -m crds.bestrefs --new-context hst.pmap --all
----------------
COMPARISON MODES
----------------
The --old-context and --compare-source-bestrefs parameters define the best references comparison mode. Each names
the origin of a set of prior recommendations and implicitly requests a comparison to the recommendations from
the newly computed bestrefs determined by --new-context.
CONTEXT-TO-CONTEXT
..................
--old-context can be used to specify a second context for which bestrefs are dynamically computed; --old-context implies
that a bestrefs comparison will be made with --new-context.
PRIOR SOURCE RECOMMENDATIONS
............................
--compare-source-bestrefs requests that the bestrefs from --new-context be compared to the bestrefs which are
recorded with the lookup parameter data, either in the file headers of data files, or in the catalog. In both
cases the prior best references are recorded static values, not dynamically computed bestrefs.
------------
UPDATE MODES
------------
Currently there is only one update mode. When --files are specified as the input source, --update-bestrefs can
also be specified to update the input data file headers with new bestrefs recommendations. In this case the data
files are used as both the source of matching parameters and as the destination for best reference recommendations.
------------
OUTPUT MODES
------------
crds.bestrefs supports several output modes for bestrefs and comparison results.
If --print-affected is specified, crds.bestrefs will print out the name of any file (or dataset id) for which at least one update for
one reference type was recommended. This is essentially a list of files to be reprocessed with new references.
% python -m crds.bestrefs --new-context hst.pmap --files j8bt05njq_raw.fits j8bt06o6q_raw.fits j8bt09jcq_raw.fits --compare-source-bestrefs --print-affected
j8bt05njq_raw.fits
j8bt06o6q_raw.fits
j8bt09jcq_raw.fits
"""
def dt_bestrefs_3_files():
"""
Compute simple bestrefs for 3 files:
>>> old_state = test_config.setup()
>>> BestrefsScript(argv="bestrefs.py --new-context hst.pmap --files data/j8bt05njq_raw.fits data/j8bt06o6q_raw.fits data/j8bt09jcq_raw.fits")()
CRDS - INFO - No comparison context or source comparison requested.
CRDS - INFO - No file header updates requested; dry run.
CRDS - INFO - ===> Processing data/j8bt05njq_raw.fits
CRDS - INFO - ===> Processing data/j8bt06o6q_raw.fits
CRDS - INFO - ===> Processing data/j8bt09jcq_raw.fits
CRDS - INFO - 0 errors
CRDS - INFO - 0 warnings
CRDS - INFO - 5 infos
0
>>> test_config.cleanup(old_state)
"""
def dt_bestrefs_compare_source_files():
"""
Compute and print files with at least one reference change:
>>> old_state = test_config.setup()
>>> BestrefsScript(argv="bestrefs.py --new-context hst.pmap --files data/j8bt05njq_raw.fits data/j8bt06o6q_raw.fits data/j8bt09jcq_raw.fits --print-affected --compare-source-bestrefs")()
CRDS - INFO - No file header updates requested; dry run.
CRDS - INFO - ===> Processing data/j8bt05njq_raw.fits
CRDS - INFO - instrument='ACS' type='ATODTAB' data='data/j8bt05njq_raw.fits' :: New best reference: 'kcb1734ij_a2d.fits' --> 'n/a' :: Would update.
CRDS - INFO - instrument='ACS' type='CRREJTAB' data='data/j8bt05njq_raw.fits' :: New best reference: 'n4e12510j_crr.fits' --> 'n/a' :: Would update.
CRDS - INFO - instrument='ACS' type='IMPHTTAB' data='data/j8bt05njq_raw.fits' :: New best reference: 'undefined' --> 'w3m1716tj_imp.fits' :: Would update.
CRDS - INFO - instrument='ACS' type='NPOLFILE' data='data/j8bt05njq_raw.fits' :: New best reference: 'undefined' --> 'v9718263j_npl.fits' :: Would update.
CRDS - INFO - instrument='ACS' type='SHADFILE' data='data/j8bt05njq_raw.fits' :: New best reference: 'kcb1734pj_shd.fits' --> 'n/a' :: Would update.
CRDS - INFO - ===> Processing data/j8bt06o6q_raw.fits
CRDS - INFO - instrument='ACS' type='ATODTAB' data='data/j8bt06o6q_raw.fits' :: New best reference: 'kcb1734ij_a2d.fits' --> 'n/a' :: Would update.
CRDS - INFO - instrument='ACS' type='CRREJTAB' data='data/j8bt06o6q_raw.fits' :: New best reference: 'n4e12510j_crr.fits' --> 'n/a' :: Would update.
CRDS - INFO - instrument='ACS' type='IMPHTTAB' data='data/j8bt06o6q_raw.fits' :: New best reference: 'undefined' --> 'w3m1716tj_imp.fits' :: Would update.
CRDS - INFO - instrument='ACS' type='NPOLFILE' data='data/j8bt06o6q_raw.fits' :: New best reference: 'undefined' --> 'v9718264j_npl.fits' :: Would update.
CRDS - INFO - instrument='ACS' type='SHADFILE' data='data/j8bt06o6q_raw.fits' :: New best reference: 'kcb1734pj_shd.fits' --> 'n/a' :: Would update.
CRDS - INFO - ===> Processing data/j8bt09jcq_raw.fits
CRDS - INFO - instrument='ACS' type='ATODTAB' data='data/j8bt09jcq_raw.fits' :: New best reference: 'kcb1734ij_a2d.fits' --> 'n/a' :: Would update.
CRDS - INFO - instrument='ACS' type='IMPHTTAB' data='data/j8bt09jcq_raw.fits' :: New best reference: 'undefined' --> 'w3m1716tj_imp.fits' :: Would update.
CRDS - INFO - instrument='ACS' type='NPOLFILE' data='data/j8bt09jcq_raw.fits' :: New best reference: 'undefined' --> 'v9718260j_npl.fits' :: Would update.
CRDS - INFO - instrument='ACS' type='SHADFILE' data='data/j8bt09jcq_raw.fits' :: New best reference: 'kcb1734pj_shd.fits' --> 'n/a' :: Would update.
CRDS - INFO - Affected products = 3
data/j8bt05njq_raw.fits
data/j8bt06o6q_raw.fits
data/j8bt09jcq_raw.fits
CRDS - INFO - 0 errors
CRDS - INFO - 0 warnings
CRDS - INFO - 19 infos
0
>>> test_config.cleanup(old_state)
"""
def dt_bestrefs_3_files_default_context_from_server():
"""
Compute simple bestrefs for 3 files using the default context from the server:
>>> old_state = test_config.setup()
>>> BestrefsScript(argv="bestrefs.py --new-context=hst.pmap --files data/j8bt05njq_raw.fits data/j8bt06o6q_raw.fits data/j8bt09jcq_raw.fits")()
CRDS - INFO - No comparison context or source comparison requested.
CRDS - INFO - No file header updates requested; dry run.
CRDS - INFO - ===> Processing data/j8bt05njq_raw.fits
CRDS - INFO - ===> Processing data/j8bt06o6q_raw.fits
CRDS - INFO - ===> Processing data/j8bt09jcq_raw.fits
CRDS - INFO - 0 errors
CRDS - INFO - 0 warnings
CRDS - INFO - 5 infos
0
>>> test_config.cleanup(old_state)
"""
def dt_bestrefs_broken_dataset_file():
"""
Same + one broken file to test shell error status
>>> old_state = test_config.setup()
>>> BestrefsScript(argv="bestrefs.py --new-context hst.pmap --files data/j8bt05njq_raw.fits data/j8bt05njq_raw_broke.fits data/j8bt06o6q_raw.fits data/j8bt09jcq_raw.fits")()
CRDS - INFO - No comparison context or source comparison requested.
CRDS - INFO - No file header updates requested; dry run.
CRDS - INFO - ===> Processing data/j8bt05njq_raw.fits
CRDS - INFO - ===> Processing data/j8bt05njq_raw_broke.fits
CRDS - ERROR - instrument='ACS' type='BIASFILE' data='data/j8bt05njq_raw_broke.fits' :: New: Bestref FAILED: parameter='CCDAMP' value='FOOBAR' is not in ['A', 'ABCD', 'AC', 'AD', 'B', 'BC', 'BD', 'C', 'D']
CRDS - INFO - ===> Processing data/j8bt06o6q_raw.fits
CRDS - INFO - ===> Processing data/j8bt09jcq_raw.fits
CRDS - INFO - 1 errors
CRDS - INFO - 0 warnings
CRDS - INFO - 6 infos
1
>>> test_config.cleanup(old_state)
"""
def dt_bestrefs_broken_cache_and_server():
"""
>>> old_state = test_config.setup(cache="/nowhere", url="https://server-is-out-of-town")
>> BestrefsScript(argv="bestrefs.py --new-context hst.pmap --files data/j8bt05njq_raw.fits")()
CRDS - ERROR - (FATAL) CRDS server connection and cache load FAILED. Cannot continue. See https://hst-crds.stsci.edu or https://jwst-crds.stsci.edu for more information on configuring CRDS.
Traceback (most recent call last):
...
SystemExit: 1
>>> test_config.cleanup(old_state)
"""
def dt_bestrefs_catalog_dataset():
"""
Compute simple bestrefs for 1 catalog datasets using hst.pmap:
>>> old_state = test_config.setup()
>>> BestrefsScript(argv="bestrefs.py --new-context hst.pmap --datasets LB6M01030")() # doctest: +ELLIPSIS
CRDS - INFO - Dumping dataset parameters from CRDS server at '...' for ['LB6M01030']
CRDS - INFO - Dumped 1 of 1 datasets from CRDS server at '...'
CRDS - INFO - Computing bestrefs for datasets ['LB6M01030']
CRDS - INFO - No comparison context or source comparison requested.
CRDS - INFO - 0 errors
CRDS - INFO - 0 warnings
CRDS - INFO - 4 infos
0
>>> test_config.cleanup(old_state)
MAINTENANCE NOTE: the preceding test is currently an expected error case pending the delivery of a modified
WFC3 FLSHFILE rmap located at crds/hst/prototypes/wfc3/hst_wfc3_flshfile_0251.rmap. Once the modified rmap
is delivered to operations, the above new-context should be changed to the new OPS context. After that point,
all mirrors of OPS to DEV should work without the exected errors due to FLASHCUR=='UNDEFINED'. The only changes
in the modified rmap should be header changes, nominally the rmap_relevance expression; additional changes
may reflect new flshfile submissions which happened after the prototype rmap was created.
"""
def dt_bestrefs_context_to_context():
"""
Compute comparison bestrefs between two contexts:
>>> old_state = test_config.setup()
>>> BestrefsScript(argv="bestrefs.py --new-context data/hst_0001.pmap --old-context hst.pmap --files data/j8bt05njq_raw.fits data/j8bt06o6q_raw.fits data/j8bt09jcq_raw.fits")()
CRDS - INFO - No file header updates requested; dry run.
CRDS - INFO - ===> Processing data/j8bt05njq_raw.fits
CRDS - INFO - ===> Processing data/j8bt06o6q_raw.fits
CRDS - INFO - ===> Processing data/j8bt09jcq_raw.fits
CRDS - INFO - 0 errors
CRDS - INFO - 0 warnings
CRDS - INFO - 4 infos
0
>>> test_config.cleanup(old_state)
"""
class TestBestrefs(test_config.CRDSTestCase):
script_class = BestrefsScript
# server_url = "https://hst-crds-dev.stsci.edu"
cache = test_config.CRDS_TESTING_CACHE
def test_bestrefs_affected_datasets(self):
self.run_script("crds.bestrefs --affected-datasets --old-context hst_0314.pmap --new-context hst_0315.pmap --datasets-since 2015-01-01",
expected_errs=0)
def test_bestrefs_from_pickle(self):
self.run_script("crds.bestrefs --new-context hst_0315.pmap --load-pickle data/test_cos.pkl --stats --print-affected-details",
expected_errs=0)
def test_bestrefs_to_pickle(self):
self.run_script("crds.bestrefs --datasets LA9K03C3Q:LA9K03C3Q LA9K03C5Q:LA9K03C5Q LA9K03C7Q:LA9K03C7Q "
"--new-context hst_0315.pmap --save-pickle test_cos.pkl --stats",
expected_errs=0)
os.remove("test_cos.pkl")
def test_bestrefs_from_json(self):
self.run_script("crds.bestrefs --new-context hst_0315.pmap --load-pickle data/test_cos.json --stats",
expected_errs=1)
def test_bestrefs_to_json(self):
self.run_script("crds.bestrefs --instrument cos --new-context hst_0315.pmap --save-pickle test_cos.json --datasets-since 2015-01-01 --stats",
expected_errs=None)
os.remove("test_cos.json")
def test_bestrefs_at_file(self):
self.run_script("crds.bestrefs --files @data/bestrefs_file_list --new-context hst_0315.pmap --stats",
expected_errs=0)
def test_bestrefs_remote(self):
self.run_script("crds.bestrefs --files @data/bestrefs_file_list --new-context hst_0315.pmap --remote --stats",
expected_errs=0)
def test_bestrefs_new_references(self):
self.run_script("crds.bestrefs --files @data/bestrefs_file_list --new-context hst_0315.pmap --print-new-references --stats",
expected_errs=0)
def test_bestrefs_default_new_context(self):
self.run_script("crds.bestrefs --files @data/bestrefs_file_list --stats",
expected_errs=0)
def test_bestrefs_update_file_headers(self):
shutil.copy("data/j8bt06o6q_raw.fits", "j8bt06o6q_raw.fits")
self.run_script("crds.bestrefs --files ./j8bt06o6q_raw.fits --new-context hst_0315.pmap --update-bestrefs",
expected_errs=0)
os.remove("j8bt06o6q_raw.fits")
def test_bestrefs_update_bestrefs(self):
# """update_bestrefs modifies dataset file headers"""
shutil.copy("data/j8bt06o6q_raw.fits", "j8bt06o6q_raw.fits")
self.run_script("crds.bestrefs --files ./j8bt06o6q_raw.fits --new-context hst_0315.pmap --update-bestrefs",
expected_errs=0)
os.remove("j8bt06o6q_raw.fits")
def test_bestrefs_bad_sources(self):
with self.assertRaises(AssertionError):
self.run_script("crds.bestrefs --all-instruments --instrument cos --new-context hst_0315.pmap",
expected_errs=1)
def test_bestrefs_update_headers(self):
# """update_headers updates original headers from a pickle saving a new pickle withn orginal + overrides."""
self.run_script("crds.bestrefs --new-context hst_0315.pmap --datasets LCE31SW6Q:LCE31SW6Q --load-pickle data/test_cos_update.json "
" --save-pickle ./test_cos_combined.json --update-bestrefs --update-pickle", expected_errs=1)
with open("./test_cos_combined.json") as pfile:
header = json.load(pfile)
header = header["LCE31SW6Q:LCE31SW6Q"]
badttab = header["BADTTAB"]
assert badttab == "N/A"
gsagtab = header["GSAGTAB"]
assert gsagtab == "X6L1439EL_GSAG.FITS"
flatfile = header["FLATFILE"]
assert flatfile == "N/A"
os.remove("./test_cos_combined.json")
# ==================================================================================
def main():
"""Run module tests, for now just doctests only."""
import unittest
suite = unittest.TestLoader().loadTestsFromTestCase(TestBestrefs)
unittest.TextTestRunner().run(suite)
from crds.tests import test_bestrefs, tstmod
return tstmod(test_bestrefs)
if __name__ == "__main__":
print(main())
|
#!/usr/bin/env python3
import os
import sys
import argparse
import json
from lib.expression_tree import *
from patterns import *
from lib.util import *
class ExpressionManager(object):
"""
NOTE-1: the same column can appear in multiple expression nodes; this is
because it has multiple patterns; in this case, check each attr
against all patterns the column appears in; if it matches:
- exactly one pattern: apply that one
- more than one pattern: choose one and apply it
- no pattern: add the attr to the exception column
NOTE-2: it is the operator's responsibility to handle null values and raise
exception if not supported; for now, they will be added to the
exceptions column; TODO: handle them better in the future
"""
def __init__(self, in_columns, expr_nodes, null_value):
self.null_value = null_value
self.expr_nodes = []
# populate expr_nodes
for expr_n in expr_nodes:
pd = get_pattern_detector(expr_n.p_id)
operator = pd.get_operator(expr_n.cols_in, expr_n.cols_out, expr_n.operator_info, self.null_value)
self.expr_nodes.append({
"expr_n": expr_n,
"operator": operator
})
self.in_columns, self.out_columns, self.in_columns_map, self.out_columns_map = [], [], {}, {}
# populate in_columns & save their indices
for idx, in_col in enumerate(in_columns):
self.in_columns.append(in_col)
self.in_columns_map[in_col.col_id] = idx
# populate out_columns with:
# 1) unused columns
for in_col in in_columns:
# add original column if not present as input in any expression node
used_columns = [c.col_id for expr_n in expr_nodes for c in expr_n.cols_in]
if in_col.col_id not in used_columns:
self.out_columns.append(in_col)
continue
# [2) output, 3) exception, 4) unconsumed input] columns from expression nodes
for expr_n in expr_nodes:
# output columns
self.out_columns.extend(expr_n.cols_out)
# exception columns
for ex_col in expr_n.cols_ex:
# NOTE: multiple expr_n can have the same ex_col; add it only once
if ex_col.col_id not in [c.col_id for c in self.out_columns]:
self.out_columns.append(ex_col)
# unconsumed input columns
for in_col in expr_n.cols_in:
if in_col.col_id not in {c.col_id for c in expr_n.cols_in_consumed}:
# NOTE: in_col may have been added already by other expr_n; add it only once
if in_col.col_id not in [c.col_id for c in self.out_columns]:
self.out_columns.append(in_col)
# save output & exception column indices
for idx, out_col in enumerate(self.out_columns):
self.out_columns_map[out_col.col_id] = idx
# create stats columns
# self.in_columns_stats = []
# for idx, in_col in enumerate(in_columns):
# in_col_stats = {
# "col_id": in_col.col_id,
# "exception_count": 0
# }
# self.in_columns_stats.append(in_col_stats)
self.out_columns_stats = []
for idx, out_col in enumerate(self.out_columns):
out_col_stats = {
"col_id": out_col.col_id,
"null_count": 0
}
self.out_columns_stats.append(out_col_stats)
# debug
# print("here")
# print([c.col_id for c in in_columns])
# print(self.in_columns_map["19"])
# end-debug
# # TODO: debug
# print("***expression_nodes***")
# for expr_n in expr_nodes:
# print(expr_n.p_id)
# print("/n***in_columns***")
# for c in self.in_columns:
# print(c)
# print("/n***in_columns_map***")
# for k,c in self.in_columns_map.items():
# print(k,c)
# print("/n***out_columns***")
# for c in self.out_columns:
# print(c)
# print("/n***out_columns_map***")
# for k,c in self.out_columns_map.items():
# print(k,c)
# # TODO: end-debug
def get_out_columns(self):
return self.out_columns
def dump_out_header(self, fd, fdelim):
line = fdelim.join([col.name for col in self.get_out_columns()])
fd.write(line + "\n")
def dump_out_schema(self, fd, out_table_name):
line = "CREATE TABLE \"{}\"(".format(out_table_name)
fd.write(line + "\n")
for idx, out_col in enumerate(self.out_columns):
line = " \"{}\" {}".format(out_col.name, out_col.datatype.to_sql_str())
if idx < len(self.out_columns)-1:
line += ","
fd.write(line + "\n")
line = ");"
fd.write(line + "\n")
def get_stats(self, valid_tuple_count, total_tuple_count):
# # exception stats
# in_columns_stats = deepcopy(self.in_columns_stats)
# ex_col_stats = []
# for in_col_s in in_columns_stats:
# in_col_s["exception_ratio"] = float(in_col_s["exception_count"]) / valid_tuple_count if valid_tuple_count > 0 else float("inf")
# ex_col_id = OutputColumnManager.get_exception_col_id(in_col_s["col_id"])
# # NOTE: this check is necessary because not all input columns have an exception column
# if ex_col_id in self.out_columns_map:
# ex_col_stats.append(in_col_s)
# null stats
out_columns_stats = deepcopy(self.out_columns_stats)
for out_col_s in out_columns_stats:
out_col_s["null_ratio"] = float(out_col_s["null_count"]) / valid_tuple_count if valid_tuple_count > 0 else float("inf")
stats = {
"out_columns": out_columns_stats
}
return stats
def is_valid_tuple(self, tpl):
if len(tpl) != len(self.in_columns):
return False
return True
def apply_expressions(self, in_tpl):
out_tpl = [self.null_value] * len(self.out_columns)
if not self.is_valid_tuple(in_tpl):
return None
# fill out_tpl in for each expression node
in_columns_consumed = set()
for expr_n_idx, expr_n in enumerate(self.expr_nodes):
expr_n, operator = expr_n["expr_n"], expr_n["operator"]
in_attrs = []
# mark in_col as referenced & get in_attrs
used = False
for in_col in expr_n.cols_in:
if in_col.col_id in in_columns_consumed:
# print("debug: column already used with another expression node")
used = True
break
in_attr = in_tpl[self.in_columns_map[in_col.col_id]]
in_attrs.append(in_attr)
if used:
continue
# apply operator
try:
out_attrs = operator(in_attrs)
except OperatorException as e:
# this operator cannot be applied, but others may be; in the worst case, attr is added to the exception column at the end
# print("debug: OperatorException: {}".format(e))
# for in_col in expr_n.cols_in:
# in_col_idx = self.in_columns_map[in_col.col_id]
# self.in_columns_stats[in_col_idx]["exception_count"] += 1
continue
# mark in_col as used
for in_col in expr_n.cols_in_consumed:
in_columns_consumed.add(in_col.col_id)
# use this expr_n
for in_col in expr_n.cols_in:
in_col_idx = self.in_columns_map[in_col.col_id]
# fill in out_tpl
for out_attr_idx, out_attr in enumerate(out_attrs):
out_col_idx = self.out_columns_map[expr_n.cols_out[out_attr_idx].col_id]
out_tpl[out_col_idx] = str(out_attr)
# debug: debug-values
# debug_values[expr_n.cols_out[out_attr_idx].col_id] = out_tpl[out_col_idx]
# end-debug: debug-values
# handle unused attrs
for in_col_idx, in_col in enumerate(self.in_columns):
# debug: debug-values
# debug_values[in_col.col_id] = in_tpl[in_col_idx]
# end-debug: debug-values
# if column not consumed by any expression node
if in_col.col_id not in in_columns_consumed:
# attr is null and no expression node handled it
if in_tpl[in_col_idx] == self.null_value:
# nothing to be done; out_tpl[out_col_idx] is already null
continue
# in_col is an output column
# NOTE: this also catches unconsumed input columns
if in_col.col_id in self.out_columns_map:
out_col_id = in_col.col_id
else: # exception
out_col_id = OutputColumnManager.get_exception_col_id(in_col.col_id)
out_col_idx = self.out_columns_map[out_col_id]
# add attr to out_tpl
out_tpl[out_col_idx] = str(in_tpl[in_col_idx])
# debug: debug-values
# debug_values[out_col_id] = out_tpl[out_col_idx]
# end-debug: debug-values
# count nulls for stats
for idx, attr in enumerate(out_tpl):
if attr == self.null_value:
self.out_columns_stats[idx]["null_count"] += 1
# debug: debug-values
# target_col_id = "40__2_0_2__1_0_0"
# if target_col_id in self.in_columns_map:
# print("[in] {}".format(in_tpl[self.in_columns_map[target_col_id]]))
# if target_col_id in self.out_columns_map:
# print("[out] {}".format(out_tpl[self.out_columns_map[target_col_id]]))
# end-debug: debug-values
return out_tpl
def apply_expression_manager_list(tpl, expr_manager_list):
# print("\n[in_tpl]", len(tpl), tpl)
# debug: debug-values
# global debug_values
# debug_values = {}
# print("len(expr_manager_list): {}".format(len(expr_manager_list)))
# end-debug: debug-values
# apply all expression managers one after the other
for expr_manager in expr_manager_list:
# debug: debug-values
# for idx, expr_manager in enumerate(expr_manager_list):
# print("level: ", idx)
# end-debug: debug-values
tpl = expr_manager.apply_expressions(tpl)
if tpl is None:
return None
# print("level: ", idx)
# print([col.col_id for col in expr_manager.get_out_columns()])
# print(len(tpl), tpl)
# print(len(null_mask), null_mask)
#
# for idx, col in enumerate(expr_manager.get_out_columns()):
# print(null_mask[idx], tpl[idx], col.col_id)
# print("[out_tpl]", len(tpl), tpl)
# print("[null_mask]", len(null_mask), null_mask)
# sys.exit(1)
# debug: debug-values
# if total_tuple_count == 1:
# print(json.dumps(debug_values, indent=2))
# sys.exit(1)
# end-debug: debug-values
return tpl
def driver_loop(driver, expr_manager_list, fdelim, null_value, fd_out, fd_null_mask):
global total_tuple_count
global valid_tuple_count
total_tuple_count = 0
valid_tuple_count = 0
while True:
line = driver.nextTuple()
if line is None:
break
total_tuple_count += 1
in_tpl = line.split(fdelim)
out_tpl = apply_expression_manager_list(in_tpl, expr_manager_list)
if out_tpl is None:
continue
valid_tuple_count += 1
null_mask = ["1" if attr == null_value else "0" for attr in in_tpl]
line_new = fdelim.join(out_tpl)
fd_out.write(line_new + "\n")
line_new = fdelim.join(null_mask)
fd_null_mask.write(line_new + "\n")
# debug: print progress
if total_tuple_count % 100000 == 0:
print("[progress] total_tuple_count={}M, valid_tuple_count={}M".format(
float(total_tuple_count) / 1000000,
float(valid_tuple_count) / 1000000))
# end-debug
return (total_tuple_count, valid_tuple_count)
def parse_args():
parser = argparse.ArgumentParser(
description="""Detect column patterns in CSV file."""
)
parser.add_argument('file', metavar='FILE', nargs='?',
help='CSV file to process. Stdin if none given')
parser.add_argument('--header-file', dest='header_file', type=str,
help="CSV file containing the header row (<workbook>/samples/<table>.header-renamed.csv)",
required=True)
parser.add_argument('--datatypes-file', dest='datatypes_file', type=str,
help="CSV file containing the datatypes row (<workbook>/samples/<table>.datatypes.csv)",
required=True)
parser.add_argument('--expr-tree-file', dest='expr_tree_file', type=str,
help="Input file containing expression nodes",
required=True)
parser.add_argument('--output-dir', dest='output_dir', type=str,
help="Output dir to put output files in",
required=True)
parser.add_argument('--out-table-name', dest='out_table_name', type=str,
help="Name of the table",
required=True)
parser.add_argument("-F", "--fdelim", dest="fdelim",
help="Use <fdelim> as delimiter between fields", default="|")
parser.add_argument("--null", dest="null", type=str,
help="Interprets <NULL> as NULLs", default="null")
return parser.parse_args()
def main():
args = parse_args()
print(args)
with open(args.header_file, 'r') as fd:
header = list(map(lambda x: x.strip(), fd.readline().split(args.fdelim)))
with open(args.datatypes_file, 'r') as fd:
datatypes = list(map(lambda x: DataType.from_sql_str(x.strip()), fd.readline().split(args.fdelim)))
if len(header) != len(datatypes):
return RET_ERR
# build columns
columns = []
for idx, col_name in enumerate(header):
col_id = str(idx)
columns.append(Column(col_id, col_name, datatypes[idx]))
# load expression tree
expression_tree = read_expr_tree(args.expr_tree_file)
if len(expression_tree.levels) == 0:
raise Exception("Empty expression tree")
# debug
# connected_components = expression_tree.get_connected_components()
# print("[connected_components] len={}".format(len(connected_components)))
# for cc_expr_tree in connected_components:
# print(cc_expr_tree.levels)
# end-debug
# init expression managers
expr_manager_list = []
in_columns = columns
for idx, level in enumerate(expression_tree.levels):
expr_nodes = [expression_tree.get_node(node_id) for node_id in level]
expr_manager = ExpressionManager(in_columns, expr_nodes, args.null)
expr_manager_list.append(expr_manager)
# out_columns becomes in_columns for the next level
in_columns = expr_manager.get_out_columns()
# generate header and schema files with output columns
out_header_file = os.path.join(args.output_dir, "{}.header.csv".format(args.out_table_name))
with open(out_header_file, 'w') as fd_h:
expr_manager_list[-1].dump_out_header(fd_h, args.fdelim)
out_schema_file = os.path.join(args.output_dir, "{}.table.sql".format(args.out_table_name))
with open(out_schema_file, 'w') as fd_s:
expr_manager_list[-1].dump_out_schema(fd_s, args.out_table_name)
# apply expression tree and generate the new csv file
output_file = os.path.join(args.output_dir, "{}.csv".format(args.out_table_name))
null_mask_file = os.path.join(args.output_dir, "{}.nulls.csv".format(args.out_table_name))
try:
if args.file is None:
fd_in = os.fdopen(os.dup(sys.stdin.fileno()))
else:
fd_in = open(args.file, 'r')
driver = FileDriver(fd_in)
with open(output_file, 'w') as fd_out, open(null_mask_file, 'w') as fd_null_mask:
(total_tuple_count, valid_tuple_count) = driver_loop(driver, expr_manager_list, args.fdelim, args.null, fd_out, fd_null_mask)
finally:
try:
fd_in.close()
except:
pass
# output stats
valid_tuple_ratio = float(valid_tuple_count) / total_tuple_count if total_tuple_count > 0 else float("inf")
out_columns_stats = expr_manager_list[-1].get_stats(valid_tuple_count, total_tuple_count)["out_columns"]
stats = {
"total_tuple_count": total_tuple_count,
"valid_tuple_count": valid_tuple_count,
"valid_tuple_ratio": valid_tuple_ratio,
"out_columns": out_columns_stats,
"level_stats": {}
}
for level, expr_mgr in enumerate(expr_manager_list):
stats["level_stats"][level] = expr_mgr.get_stats(valid_tuple_count, total_tuple_count)
stats_file = os.path.join(args.output_dir, "{}.stats.json".format(args.out_table_name))
with open(stats_file, 'w') as fd_s:
json.dump(stats, fd_s, indent=2)
print("total_tuple_count={}, valid_tuple_count={}".format(total_tuple_count, valid_tuple_count))
if __name__ == "__main__":
main()
"""
#[remote]
wbs_dir=/scratch/bogdan/tableau-public-bench/data/PublicBIbenchmark-test
repo_wbs_dir=/scratch/bogdan/master-project/public_bi_benchmark-master_project/benchmark
#[local-personal]
wbs_dir=/media/bogdan/Data/Bogdan/Work/cwi-data/tableau-public-bench/data/PublicBIbenchmark-poc_1
repo_wbs_dir=/media/bogdan/Data/Bogdan/Work/cwi/master-project/public_bi_benchmark-master_project/benchmark
================================================================================
wb=CommonGovernment
table=CommonGovernment_1
================================================================================
wb=Eixo
table=Eixo_1
================================================================================
wb=Arade
table=Arade_1
================================================================================
wb=CMSprovider
table=CMSprovider_1
================================================================================
wb=Generico
table=Generico_2
================================================================================
expr_tree_file=$wbs_dir/$wb/$table.expr_tree/c_tree.json
out_table="${table}_out"
# [apply-expression]
input_file=$wbs_dir/$wb/$table.csv
output_dir=$wbs_dir/$wb/$table.poc_1_out
mkdir -p $output_dir && \
time ./pattern_detection/apply_expression.py --expr-tree-file $expr_tree_file --header-file $repo_wbs_dir/$wb/samples/$table.header-renamed.csv --datatypes-file $repo_wbs_dir/$wb/samples/$table.datatypes.csv --output-dir $output_dir --out-table-name $out_table $input_file
# [apply-expression-theoretical]
input_file=$wbs_dir/$wb/$table.sample-theoretical-test.csv
output_dir=$wbs_dir/$wb/$table.poc_1_out-theoretical
mkdir -p $output_dir && \
time ./pattern_detection/apply_expression.py --expr-tree-file $expr_tree_file --header-file $repo_wbs_dir/$wb/samples/$table.header-renamed.csv --datatypes-file $repo_wbs_dir/$wb/samples/$table.datatypes.csv --output-dir $output_dir --out-table-name $out_table $input_file
cat $output_dir/$out_table.stats.json | less
# [load & evaluation]
n_input_file=$output_dir/$out_table.csv
n_schema_file=$output_dir/$out_table.table.sql
wv_n_schema_file=$output_dir/$out_table.table-vectorwise.sql
db_name=pbib
source ~/.ingVWsh
./util/VectorWiseify-schema.sh $n_schema_file $wv_n_schema_file > /dev/null
time ./evaluation/main-vectorwise.sh $db_name $n_input_file $wv_n_schema_file $out_table $output_dir
cat $output_dir/stats-vectorwise/$out_table.statdump.out | less
cat $output_dir/stats-vectorwise/$out_table.compression-log.out | less
cat $output_dir/load-vectorwise/$out_table.data-files.out | less
cat $output_dir/$out_table.eval-vectorwise.json | less
# [compare]
stats_file_nocompression=$wbs_dir/$wb/$table.evaluation-nocompression/$table.eval-vectorwise.json
stats_file_default=$wbs_dir/$wb/$table.evaluation/$table.eval-vectorwise.json
stats_file_wc=$wbs_dir/$wb/$table.poc_1_out/$out_table.eval-vectorwise.json
apply_expr_stats_file=$wbs_dir/$wb/$table.poc_1_out/$out_table.stats.json
summary_out_file=$output_dir/$table.summary.json
# ./evaluation/compare_stats.py $stats_file_nocompression $stats_file_default
./evaluation/compare_stats.py $stats_file_default $stats_file_wc --expr-tree-file $expr_tree_file --apply-expr-stats-file $apply_expr_stats_file --summary-out-file $summary_out_file
================================================================================
less $output_dir/$out_table.table.sql
cat $output_dir/$out_table.csv | less -S
awk -F "|" '{ print $2, " ", $57 }' $output_dir/$out_table.csv | less -S
awk -F "|" '{ print $20, " ", $82 }' $output_dir/$out_table.csv | less -S
awk -F "|" '{ print $48, " ", $90 }' $output_dir/$out_table.csv | less -S
awk -F "|" '{ print $3, " ", $58, $59, $60, $61 }' $output_dir/$out_table.csv | less -S
"""
|
# ===============================================================================
# NAME: InstanceTopologyHVisitor.py
#
# DESCRIPTION: A visitor responsible for the generation of component
# base class source code file.
#
# AUTHOR: reder
# EMAIL: <EMAIL>
# DATE CREATED : Feb 5, 2007
#
# Copyright 2013, California Institute of Technology.
# ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged.
# ===============================================================================
#
# Python standard modules
#
import logging
import sys
from fprime_ac.generators import formatters
# from fprime_ac.utils import DiffAndRename
from fprime_ac.generators.visitors import AbstractVisitor
from fprime_ac.models import ModelParser
#
# Python extention modules and custom interfaces
#
# from Cheetah import Template
# from fprime_ac.utils import version
from fprime_ac.utils import ConfigManager
#
# Import precompiled templates here
#
try:
from fprime_ac.generators.templates.topology import includes1TopologyH
from fprime_ac.generators.templates.topology import publicInstanceTopologyH
except ImportError:
print("ERROR: must generate python templates first.")
sys.exit(-1)
# from fprime_ac.generators.templates import finishTopologyCpp
#
# Universal globals used within module go here.
# (DO NOT USE MANY!)
#
# Global logger init. below.
PRINT = logging.getLogger("output")
DEBUG = logging.getLogger("debug")
#
# Module class or classes go here.
class InstanceTopologyHVisitor(AbstractVisitor.AbstractVisitor):
"""
A visitor class responsible for generation of component header
classes in C++.
"""
__instance = None
__config = None
__fp = None
__form = None
__form_comment = None
__model_parser = None
def __init__(self):
"""
Constructor.
"""
super().__init__()
self.__config = ConfigManager.ConfigManager.getInstance()
self.__form = formatters.Formatters()
self.__form_comment = formatters.CommentFormatters()
self.__model_parser = ModelParser.ModelParser.getInstance()
DEBUG.info("InstanceTopologyHVisitor: Instanced.")
self.bodytext = ""
self.prototypetext = ""
def _writeTmpl(self, c, visit_str):
"""
Wrapper to write tmpl to files desc.
"""
DEBUG.debug("InstanceTopologyHVisitor:%s" % visit_str)
DEBUG.debug("===================================")
DEBUG.debug(c)
self.__fp.writelines(c.__str__())
DEBUG.debug("===================================")
def initFilesVisit(self, obj):
"""
Defined to generate files for generated code products.
@parms obj: the instance of the component model to visit.
"""
# Build filename here...
if len(obj.get_comp_list()) > 0:
xml_file = obj.get_comp_list()[0].get_xml_filename()
x = xml_file.split(".")
s = self.__config.get("assembly", "TopologyXML").split(".")
l = len(s[0])
#
if (x[0][-l:] == s[0]) & (x[1] == s[1]):
filename = x[0].split(s[0])[0] + self.__config.get(
"assembly", "TopologyH"
)
PRINT.info(
"Generating code filename: %s topology, using default XML filename prefix..."
% filename
)
else:
msg = (
"XML file naming format not allowed (must be XXXAppAi.xml), Filename: %s"
% xml_file
)
PRINT.info(msg)
raise ValueError(msg)
#
# Get the partN, partition label from XML file name if there is one.
# For no partition it is simply None. This is used only for ARINC653
# demo to prepend partition prefix to instance names of components.
#
if xml_file.find("part") > 0:
self.partition = "part" + xml_file.split("part")[1].split("AppAi")[0]
else:
self.partition = None
#
# Open file for writting here...
DEBUG.info("Open file: %s" % filename)
self.__fp = open(filename, "w")
if self.__fp is None:
raise Exception("Could not open %s file.") % filename
DEBUG.info("Completed")
else:
PRINT.info("ERROR: NO COMPONENTS FOUND IN TOPOLOGY XML FILE...")
sys.exit(-1)
def startSourceFilesVisit(self, obj):
"""
Defined to generate starting static code within files.
"""
def includes1Visit(self, obj):
"""
Defined to generate includes within a file.
Usually used for the base classes but also for Port types
@parms args: the instance of the concrete element to operation on.
"""
relative_path = self.relativePath()
#
DEBUG.debug("Relative path: %s", relative_path)
#
c = includes1TopologyH.includes1TopologyH()
temp = obj.get_comp_list()
# Only generate port connections
c.connect_only = False
if obj.connect_only:
c.connect_only = True
# Generate Components as pointers
c.is_ptr = False
if obj.is_ptr:
c.is_ptr = True
else:
if not obj.connect_only:
c.is_ptr = True
obj.is_ptr = True
c.component_header_list = []
for component in temp:
#
# Hack to fix the include file so it is consistent...
if self.__config.get("component", "XMLDefaultFileName") == "False":
namespace = ""
else:
namespace = component.get_namespace()
#
# Added configurable override for includes for testing
if self.__config.get("includes", "comp_include_path") == "None":
if relative_path is not None:
path = relative_path
else:
path = component.get_namespace()
else:
path = self.__config.get("includes", "comp_include_path")
c.path = path
c.component_header_list.append((path, namespace, component.get_kind()))
#
# Build list of unique component types here...
comp_types = [k[2] for k in c.component_header_list]
comp_types = self.__model_parser.uniqueList(comp_types)
comp_headers = c.component_header_list
#
# Recreate component_header_list here with only unique types...
c.component_header_list = []
for k in comp_types:
for comp in comp_headers:
if k == comp[2]:
c.component_header_list.append(comp)
break
#
## Create Component Declarations
component_list = []
for component in temp:
d = {
"ns": component.get_namespace(),
"name": component.get_name(),
"kind": component.get_kind(),
}
component_list.append(dict(d))
c.component_import_list = []
for xml_name in obj.get_instance_header_dict():
if obj.get_instance_header_dict()[xml_name] is not None:
xml_path = obj.get_instance_header_dict()[xml_name]
else:
xml_path = xml_name
xml_path = xml_path.strip()
xml_path = xml_path.replace("i.xml", "")
xml_path += "c.hpp"
c.component_import_list.append(xml_path)
c.component_declarations = []
for component in component_list:
if obj.is_ptr:
declaration_template = """{ns}::{kind}Impl* {name}_ptr = 0;""".format(
**component
)
else:
declaration_template = """{ns}::{kind}Impl {name}("{name}");""".format(
**component
)
c.component_declarations.append(declaration_template)
self._writeTmpl(c, "includes1Visit")
def includes2Visit(self, obj):
"""
Defined to generate internal includes within a file.
Usually used for data type includes and system includes.
@parms args: the instance of the concrete element to operation on.
"""
def namespaceVisit(self, obj):
"""
Defined to generate namespace code within a file.
Also any pre-condition code is generated.
@parms args: the instance of the concrete element to operation on.
"""
def publicVisit(self, obj):
"""
Defined to generate public stuff within a class.
@parms args: the instance of the concrete element to operation on.
"""
c = publicInstanceTopologyH.publicInstanceTopologyH()
# Added hack for ARINC demo...
part = self.partition
#
component_list = []
connection_list = []
c.name = obj.get_name()
c.kind = obj
c.component_declarations = []
c.component_inits = []
c.port_connections = []
c.component_startups = []
c.component_teardowns = []
c.command_registrations = []
c.component_reference_ids = []
# Only generate port connections
c.connect_only = False
if obj.connect_only:
c.connect_only = True
# Generate Components as pointers
c.is_ptr = False
if obj.is_ptr:
c.is_ptr = True
else:
if not obj.connect_only:
c.is_ptr = True
obj.is_ptr = True
for component in obj.get_comp_list():
d = {
"ns": component.get_namespace(),
"name": component.get_name(),
"kind": component.get_kind2(),
}
component_list.append(dict(d))
for port in component.get_ports():
d = {
"comment": port.get_comment(),
"comp": component.get_name(),
"name": port.get_name(),
"type": port.get_type(),
"direction": port.get_direction(),
"num": port.get_source_num(),
"tcomp": port.get_target_comp(),
"tname": port.get_target_port(),
"ttype": port.get_target_type(),
"tdirection": port.get_target_direction(),
"tnum": port.get_target_num(),
"modeler": component.get_modeler(),
}
connection_list.append(dict(d))
#
# Generate Component Declarations
for component in component_list:
# Partition instance names
n = component["name"] # Save name
if part is None:
pass
else:
component["name"] = part + "_" + component["name"]
#
if obj.is_ptr:
declaration_template = """{name}_ptr = new {ns}::{ns}Impl("{name}");""".format(
**component
)
c.component_declarations.append(declaration_template)
else:
pass ## If objects are generated as instances the object was instansiated in includes
#
#
# Generate Set Window/Base ID Method
for id_tuple in obj.get_base_id_list():
n = id_tuple[0]
base_id = id_tuple[1]
if obj.is_ptr:
declaration_template = """{}_ptr->setIdBase({});""".format(n, base_id)
else:
declaration_template = """{}.setIdBase({});""".format(n, base_id)
c.component_reference_ids.append(declaration_template)
#
#
# Generate Component Initalizations
for component in component_list:
if obj.is_ptr:
init_template = """{name}_ptr->init(10);""".format(**component)
else:
init_template = """{name}.init(10);""".format(**component)
c.component_inits.append(init_template)
#
# Generate Port Connections
for connection in connection_list:
if connection["type"] == "Serial":
connection["type"] = "Serialize"
if connection["ttype"] == "Serialize":
connection["ttype"] = "Serialize"
comment = "//{comment}".format(**connection)
if obj.is_ptr:
connection = """{comp}_ptr->set_{name}_OutputPort({num}, {tcomp}_ptr->get_{tname}_InputPort({tnum}));""".format(
**connection
)
connection_template = (comment, connection)
else:
connection = """{comp}.set_{name}_OutputPort({num}, {tcomp}.get_{tname}_InputPort({tnum}));""".format(
**connection
)
connection_template = (comment, connection)
c.port_connections.append(connection_template)
#
# Generate Component Command Registration
for connection in connection_list:
if connection["type"] == "CmdReg" and connection["direction"] == "output":
if obj.is_ptr:
registration = """{comp}_ptr->regCommands();""".format(**connection)
else:
registration = """{comp}.regCommands();""".format(**connection)
c.command_registrations.append(registration)
# c.command_registrations.append(connection['comp']+" "+str(connection['type'])+" "+str(connection['direction']))
#
# Generate Component Startup
for component in component_list:
startup_template = ""
if component["kind"] == "active":
if obj.is_ptr:
startup_template = """{name}_ptr->start(0, 100, 10 * 1024);""".format(
**component
)
else:
startup_template = """{name}.start(0, 100, 10 * 1024);""".format(
**component
)
c.component_startups.append(startup_template)
#
#
# Generate Component Teardown
for component in component_list:
teardown_template = ""
if component["kind"] == "active":
if obj.is_ptr:
teardown_template = """{name}_ptr->exit();""".format(**component)
else:
teardown_template = """{name}.exit();""".format(**component)
c.component_teardowns.append(teardown_template)
#
self._writeTmpl(c, "publicVisit")
def protectedVisit(self, obj):
"""
Defined to generate protected stuff within a class.
@parms args: the instance of the concrete element to operation on.
"""
def privateVisit(self, obj):
"""
Defined to generate private stuff within a class.
@parms args: the instance of the concrete element to operation on.
"""
def finishSourceFilesVisit(self, obj):
"""
Defined to generate ending static code within files.
"""
# c = finishComponentCpp.finishComponentCpp()
# self._writeTmpl(c, "finishSourceFilesVisit")
self.__fp.close()
|
<filename>pseudoc/parser.py<gh_stars>1-10
# PseudoC-IR - Simple Program Analysis/Compiler Intermediate Representation
#
# Copyright (c) 2020-2021 <NAME>
#
# The MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import re
from lexer import Lexer
from . import config
from .ir import InlineStr, SpecFunc, Arg, Insn, BBlock, Func, Data, Module, PrimType, PtrType, ArrType, StructType
LEX_IDENT = re.compile(r"[$][A-Za-z_0-9]+|[@]?[A-Za-z_][A-Za-z_0-9]*")
LEX_SIMPLE_IDENT = re.compile(r"[A-Za-z_][A-Za-z_0-9]*")
LEX_QUOTED_IDENT = re.compile(r"`.+?`")
LEX_NUM = re.compile(r"-?\d+")
LEX_TYPE = re.compile(r"void|i1|i8|u8|i16|u16|i32|u32|i64|u64")
# Simplified. To avoid enumerating specific operators supported, just say
# "anything non-space, except handle opening parens specially (for calls
# w/o args).
LEX_OP = re.compile(r"\(|[^ ]+")
LEX_UNARY_OP = re.compile(r"[-~!*(]")
LEX_STR = re.compile(r'"([^\\]|\\.)*"')
TYPE_NAMES = {"void", "i1", "i8", "u8", "i16", "u16", "i32", "u32", "i64", "u64"}
LABEL_CNT = 0
STRUCT_TYPE_MAP = {}
def parse_reg(lex, name):
reg = None
if name.startswith("$"):
if lex.match("{"):
reg = lex.expect_re(LEX_IDENT, err="expected identifier")
lex.expect("}")
return reg
def parse_var(lex):
typ = parse_type(lex)
name = lex.expect_re(LEX_IDENT, err="expected identifier")
reg = parse_reg(lex, name)
return typ, name, reg
def parse_type_name(lex):
return lex.match_re(LEX_TYPE)
def parse_type_mod(lex, typ):
while True:
if lex.match("*"):
typ = PtrType(typ)
elif lex.check("["):
dims = []
while lex.match("["):
dim = parse_val(lex).val
assert isinstance(dim, int)
dims.append(dim)
lex.expect("]")
for dim in reversed(dims):
typ = ArrType(typ, dim)
else:
break
return typ
def parse_type(lex):
if lex.match("struct"):
name = lex.expect_re(LEX_SIMPLE_IDENT, "expected structure identifier")
typ = STRUCT_TYPE_MAP.get(name)
if typ is None:
typ = STRUCT_TYPE_MAP[name] = StructType(name, None)
else:
typ = parse_type_name(lex)
if typ is None:
return None
typ = PrimType(typ)
return parse_type_mod(lex, typ)
def parse_simple_type(lex):
if lex.match("void"):
lex.expect("*")
return "void*"
return parse_type_name(lex)
def parse_global_type_and_name(lex):
typ = None
name = None
parsed = lex.match_re(LEX_QUOTED_IDENT)
if parsed:
# If we matched a quoted id at the beginning, we know there's no type.
name = parsed[1:-1]
else:
parsed = lex.expect_re(LEX_SIMPLE_IDENT, "expected a simple identifier")
if parsed == "struct":
parsed = lex.expect_re(LEX_SIMPLE_IDENT, "expected structure identifier")
typ = STRUCT_TYPE_MAP.get(parsed)
if typ is None:
typ = STRUCT_TYPE_MAP[parsed] = StructType(parsed, None)
elif parsed in TYPE_NAMES:
typ = PrimType(parsed)
else:
# Otherwise what we parsed is a name.
name = parsed
if typ:
typ = parse_type_mod(lex, typ)
name = lex.match_re(LEX_SIMPLE_IDENT)
if not name:
name = lex.match_re(LEX_QUOTED_IDENT)
if not name:
# If that was a bare structure name followed by {, i.e. a
# structure type definition, it's ok to not have a name,
# otherwise report error.
if isinstance(typ, StructType) and lex.check("{"):
pass
else:
lex.error("identifer expected after type")
return (typ, name)
# Returns Arg object with .val and possibly .reg initialized.
def parse_val(lex, spec_funcs=False):
reg = None
v = lex.match_re(LEX_IDENT)
if v:
if spec_funcs and v.startswith("@"):
if not v in ("@sizeof"):
lex.error("Only const-valued special functions may be used where value is expected")
lex.expect("(")
if v == "@sizeof":
args = [parse_type(lex)]
lex.expect(")")
else:
args = parse_args(lex)
return Arg(SpecFunc(v, *args))
else:
reg = parse_reg(lex, v)
else:
v = lex.match_re(LEX_NUM)
if v:
v = int(v, 0)
else:
v = lex.match_re(LEX_STR)
if v:
v = InlineStr(v[1:-1])
else:
lex.error("expected value (var or const)")
a = Arg(v)
if reg is not None:
a.reg = reg
return a
def parse_args(lex):
# "(" already matched
res = []
while not lex.check(")"):
res.append(parse_val(lex, spec_funcs=True))
if not lex.match(","):
break
lex.expect(")")
return res
def parse_params(lex):
# "(" already matched
names = []
types = []
while not lex.check(")"):
typ, name, reg = parse_var(lex)
names.append(name)
types.append(typ)
if not lex.match(","):
break
lex.expect(")")
return names, types
def parse_if_expr(lex):
res = []
lex.expect("(")
res.append(parse_val(lex))
if not lex.check(")"):
res.append(lex.expect_re(LEX_OP))
res.append(parse_val(lex))
lex.expect(")")
return res
def get_label():
global LABEL_CNT
c = LABEL_CNT
LABEL_CNT += 1
return "_l%d" % c
def make_call(dest, name, *args):
if name.startswith("@"):
# Special name
return Insn(dest, name, *args), False
else:
return Insn(dest, "call", name, *args), config.SPLIT_BB_AFTER_CALL
TYPE_SIZES = {
"i8": 1,
"u8": 1,
"i16": 2,
"u16": 2,
"i32": 4,
"u32": 4,
"i64": 8,
"u64": 8,
"void*": 4,
}
def parse_data(lex, name):
desc = []
size = 0
lex.expect("{")
while not lex.match("}"):
if lex.check('"'):
s = lex.match_re(LEX_STR)
b = s[1:-1].encode()
def unesc(m):
v = m.group(0)[1:]
if v.startswith(b"x"):
v = bytes([int(v[1:], 16)])
else:
v = {b"0": b"\0", b'"': b'"', b"n": b"\n"}[v]
return v
b = re.sub(rb"(\\x..|\\.)", unesc, b)
desc.append(("str", s, b))
size += len(b)
elif lex.match('('):
typ = parse_simple_type(lex)
lex.expect(")")
desc.append((typ, parse_val(lex)))
size += TYPE_SIZES[typ]
else:
lex.error("Unexpected syntax in data element")
lex.match(",")
data = Data(name, desc)
data.size = size
return data
def parse(f):
STRUCT_TYPE_MAP.clear()
mod = Module()
bb = None
prev_bb = None
label2bb = {}
lex = Lexer()
start_new_bb = True
cfg = None
def get_bb(label):
bb = label2bb.get(label)
if bb is None:
bb = label2bb[label] = BBlock(label, [])
return bb
for l in f:
l = l.strip()
if not l or l.startswith("#"):
continue
lex.init(l)
if cfg is None:
typ, name = parse_global_type_and_name(lex)
if isinstance(typ, StructType) and lex.match("{"):
# Structure declaration
if typ.fields is not None:
lex.error("duplicate struct definition: %s" % typ.name)
fields = []
while not lex.match("}"):
typ_fld = parse_type(lex)
fldname = lex.match_re(LEX_SIMPLE_IDENT)
fields.append((fldname, typ_fld))
lex.match(",")
typ.fields = fields
mod.add(typ)
continue
elif lex.match("("):
cfg = Func(name)
cfg.res_type = typ
cfg.params, cfg.param_types = parse_params(lex)
lex.expect("{")
label2bb = {}
start_new_bb = True
bb = None
prev_bb = None
elif lex.match("="):
data = parse_data(lex, name)
data.type = typ
mod.contents.append(data)
else:
lex.error("expected function, data, or structure definition")
continue
if lex.match("}"):
cfg.calc_preds()
mod.contents.append(cfg)
cfg = None
continue
is_label = l.endswith(":")
if is_label or start_new_bb:
if is_label:
label = l[:-1]
else:
label = get_label()
if True:
bb = get_bb(label)
cfg.bblocks.append(bb)
if prev_bb:
# Fallthru edge
prev_bb.succs.append(bb)
prev_bb = bb
start_new_bb = False
if is_label:
continue
insn = None
# Context before having parsed anything, for error messages.
lex_ctx = lex.l
if lex.match("goto"):
label = lex.expect_re(LEX_IDENT)
bb.succs.append(get_bb(label))
prev_bb = None
start_new_bb = True
elif lex.match("if"):
expr = parse_if_expr(lex)
lex.expect("goto")
label = lex.expect_re(LEX_IDENT)
bb.succs.append(get_bb(label))
if not lex.eol():
lex.expect("else")
# Currently "goto" after "else" is optional.
lex.match("goto")
label = lex.expect_re(LEX_IDENT)
bb.succs.append(get_bb(label))
prev_bb = None
insn = Insn("", "if", *expr)
start_new_bb = True
elif lex.match("return"):
if not lex.eol():
arg = parse_val(lex)
insn = Insn("", "return", arg)
else:
insn = Insn("", "return")
prev_bb = None
elif lex.match("@nop"):
insn = Insn("", "@nop")
else:
ptr_typ = None
if lex.match("*"):
lex.expect("(")
ptr_typ = parse_type(lex)
assert isinstance(ptr_typ, PtrType)
ptr_typ = ptr_typ.el_type
lex.expect(")")
dest_typ, dest, dest_reg = parse_var(lex)
if lex.match("="):
if ptr_typ is None and not dest.startswith("$"):
lex.error("Can assign only to local variables (must start with '$')", ctx=lex_ctx)
unary_op = lex.match_re(LEX_UNARY_OP)
if unary_op:
# Unary op
typ = None
args = []
if unary_op == "*":
op = "@load"
if lex.match("("):
typ = parse_type(lex)
assert isinstance(typ, PtrType)
typ = typ.el_type
lex.expect(")")
elif unary_op == "(":
op = "@cast"
typ = parse_type(lex)
lex.expect(")")
args.append(typ)
args.append(parse_val(lex))
if unary_op == "*":
args.append(typ)
insn = Insn(dest, op, *args)
else:
arg1 = parse_val(lex)
if lex.eol():
if ptr_typ is None:
# Move
insn = Insn(dest, "=", arg1)
else:
# Store
insn = Insn("", "@store", dest, ptr_typ, arg1)
else:
# Binary op
op = lex.expect_re(LEX_OP)
if op == "(":
# Function call
args = parse_args(lex)
insn, start_new_bb = make_call(dest, arg1.val, *args)
else:
arg2 = parse_val(lex)
insn = Insn(dest, op, arg1, arg2)
insn.reg = dest_reg
insn.typ = dest_typ
elif lex.match("("):
# Function call
args = parse_args(lex)
insn, start_new_bb = make_call("", dest, *args)
else:
lex.error("Unexpected syntax")
assert lex.eol(), "Unexpected content at end of line: %r" % lex.l
if insn:
bb.insns.append(insn)
#print(label2bb)
#print(cfg.bblocks)
#cfg.simple_print()
return mod
def __main__():
with open(sys.argv[1]) as f:
mod = parse(f)
mod.dump(bb_ann=False, expl_goto=True)
if __name__.startswith("__main__"):
import sys
if __name__ == "__main__":
__main__()
|
import copy
import logging
from itertools import product
from functools import wraps
import numpy as np
import matplotlib.pyplot as plt
# Own imports
from spike_swarm_sim.register import neuron_models, synapse_models, learning_rules
from spike_swarm_sim.utils import increase_time, merge_dicts, remove_duplicates
from .neuron_models import NonSpikingNeuronModel, SpikingNeuronModel
from .decoding import DecodingWrapper
from .encoding import EncodingWrapper
from .utils.monitor import NeuralNetMonitor
try:
from .utils.visualization import *
except:
pass
def monitor(func):
""" Decorator for recording and monitoring the relevant neuronal variables.
The records are stored in the monitor attribute of the NeuralNetwork class.
"""
@wraps(func)
def wrapper(self, encoded_stimuli, **kwargs):
spikes, Isynapses, voltages = func(self, encoded_stimuli, **kwargs)
#* Debugging and Monitoring (debug option must be enabled)
if self.monitor is not None:
monitor_vars = {
# 'encoded_inputs' : encoded_stimuli.copy(),
'stimuli' : np.hstack(tuple(self.stimuli.values())).copy(),
'voltages' : voltages.copy(),
'currents' : Isynapses.copy(),
'outputs' : spikes.copy()
}
if issubclass(type(self.neurons), SpikingNeuronModel):
monitor_vars.update({
'encoded_inputs' : encoded_stimuli.copy(),
'spikes' : spikes.copy(),
'recovery' : self.neurons.recovery.copy(),
'neuron_theta' : self.neurons.theta.copy(),
'activities' : np.hstack([v.activities.copy() for v in self.decoders.all.values()])#!
})
self.monitor.update(**monitor_vars)
return spikes, Isynapses, voltages
return wrapper
class NeuralNetwork:
""" Class for the artificial neural networks. This class is mainly a wrapper that creates and executes
the main building blocks of ANNs. These blocks are encoding, synapses, neurons and decoding, albeit there
are other functionalities such as learning rules, monitors, and so on. This class encompasses any kind
of neural network, the precise architecture and dynamics will be fixed by the neuron and synapses models
throughout the topology dictionary.
==========================================================================================================
- Params:
topology [dict] : dictionary specifying the ANN architecture (see configuration files for more details).
- Attributes:
dt [float] : Euler step of the ANN.
t [int] : time counter.
time_scale [int] : ratio between neuronal and environment dynamics. This means that every time step of
the env., the ANN performs time_scale updates.
synapses [Synapses] : object storing the synapse models.
stim_encoding [dict] : dict of sensor_name : Encoding object storing all the neural encoders.
pointers [dict] : dict mapping ensembles to the index in the ANN adjacency matrix. The index is only
the index of the last neuron of the ensemble.
subpop_neurons [dict] : dict mapping ensembles to number of neurons per ensemble.
n_inputs [int] : number of ANN inputs (after decoding).
stimuli_order [list of str]: ordered list with the sensor order as specified in the ANN config.
neurons [SpikingNeuronModel or NonSpikingNeuronModel] : object storing the neurons of the ANN.
update_rule : #TODO
output_neurons [list] : list with the name of the motor/output ensembles.
monitor [NeuralNetMonitor or None]: Monitor to record neuronal variables if mode is DEBUG.
spikes [np.ndarray of shape=num_neurons]: current generated spikes.
stimuli [dict] : current supplied stimuli. Dict mapping sensor name to stimuli values.
action_decoding [dict] : dict of action_name : Decoding object storing all the neural decoders.
==========================================================================================================
"""
def __init__(self, dt, neuron_model='rate_model', synapse_model='static_synapse', time_scale=1):
self.t = 0
self.dt = dt #* Euler Step
self.time_scale = time_scale #* ANN steps per world step.
self.neuron_model = neuron_model
self.synapse_model = synapse_model
#* Flag indicating if the ANN is built and functional.
#* The ANN cannot be used if this flag is False.
self.is_built = False #! NO SE USA
#* Submodules of the neural network distributing its functioning
#* and computations.
if synapse_model == 'dynamic_synapse' and issubclass(neuron_models[neuron_model], NonSpikingNeuronModel):
raise Exception(logging.error('The combination of dynamic synapses and '\
'non-spiking neuron models is not currently implemented.'))
self.synapses = synapse_models[self.synapse_model](self.dt)
self.neurons = neuron_models[self.neuron_model](self.dt)
self.encoders = EncodingWrapper(self.time_scale)
self.decoders = None
self.learning_rule = None
#* Monitor that, if in DEBUG mode, will store all the relevant neural
#* variables.
self.monitor = None
#* Overall ANN directed graph description.
self.graph = {'inputs' : {}, 'neurons' : {}, 'synapses' : {}}
self.ensemble_names = []
self.input_ensemble_names = []
self.motor_ensemble_names = []
#* Ordered list of stimuli names (not input nodes)
self.stimuli_names = []
#* Variables storing the previous stim and spikes.
self.stimuli, self.spikes, self.prev_input = None, None, None
self.weight_registry = None
def build(self):
#! BUILD NEURONS
self.synapses.build(self.graph)
if self.learning_rule is not None:
self.learning_rule.build(self.graph)
#TODO --- Create Monitor (DEBUG MODE) ---
# self.output_neurons = remove_duplicates([out['ensemble'] for out in topology['outputs'].values()])
if logging.root.level == logging.DEBUG:
self.monitor = NeuralNetMonitor({ens : self.num_ensemble_neurons(ens)\
for ens in self.ensemble_names},\
{name : self.encoders.get(name).n_stimuli for name in self.stimuli_names},\
{name : self.num_input_nodes(name) for name in self.input_ensemble_names},\
self.motor_ensemble_names)
else:
self.monitor = None
# #* --- Reset dynamics ---
# self.reset()
def build_from_dict(self, topology):
#* Add neurons
for name, ensemble in topology['ensembles'].items():
self.add_ensemble(name, ensemble['n'], **ensemble['params'])
#* Add stimuli
for name, stim in topology['stimuli'].items():
self.add_stimuli(name, stim['n'], stim['sensor'])
#* Add motor ensembles
for out in topology['outputs'].values():
self.set_motor(out['ensemble'])
#* Add encoders
for input_name, encoder in topology['encoding'].items():
self.add_encoder(encoder['scheme'], topology['stimuli'][input_name]['sensor'],\
receptive_field=encoder['receptive_field']['name'], receptive_field_params=encoder['receptive_field']['params'])
#* Add Learning Rule
if topology.get('learning_rule', {}).get('rule') is not None:
self.learning_rule = learning_rules.get(topology.get('learning_rule', {}).get('rule'))() #TODO decouple, improve.
#* Add Synapses
for name, syn in topology['synapses'].items():
syn_params = {key : val for key, val in syn.items()\
if key not in ['pre', 'post', 'p', 'trainable']}
self.add_synapse(name, syn['pre'], syn['post'], conn_prob=syn['p'], **syn_params)
#* Add Decoders
self.decoders = DecodingWrapper(topology)
#* Build ANN
self.build()
def set_motor(self, ensemble_name):
if ensemble_name not in self.ensemble_names:
raise Exception(logging.error('Ensemble "{}" does not exist').format(ensemble_name))
if ensemble_name in self.motor_ensemble_names:
return
self.motor_ensemble_names.append(ensemble_name)
for neuron in self.graph['neurons'].values():
if neuron['ensemble'] == ensemble_name:
neuron['is_motor'] = True
def add_stimuli(self, name, num_nodes, sensor):
for n in range(num_nodes):
self.graph['inputs'].update({
'{}_{}'.format(name, n) : {'ensemble' : name, 'sensor' : sensor, 'idx': len(self.graph['inputs'])}
})
self.input_ensemble_names.append(name)
self.stimuli_names.append(sensor)
def add_ensemble(self, name, num_neurons, **kwargs):
self.ensemble_names.append(name)
for n in range(num_neurons):
self.add_neuron('{}_{}'.format(name, n), ensemble=name, **kwargs)
def add_neuron(self, name, ensemble=None, **kwargs):
self.neurons.add(**kwargs)#!
ensemble = ensemble if ensemble is not None else name
if ensemble not in self.ensemble_names:
self.ensemble_names.append(ensemble)
self.graph['neurons'].update({name : merge_dicts([{'ensemble' : ensemble,
'idx' : len(self.neurons)-1, 'is_motor' : False}, kwargs])})
def delete_neuron(self, name):
neuron_index = self.graph['neurons'][name]['idx']
ensemble = self.graph['neurons'][name]['ensemble']
self.neurons.delete(neuron_index)
self.graph['neurons'].pop(name, None)
#! Ojo index of other neurons?
for neuron in self.graph['neurons'].values():
if neuron['idx'] >= neuron_index:
neuron['idx'] -= 1
#* Remove ensemble if neuron was the only unit.
if not any([neuron['ensemble'] == ensemble for neuron in self.graph['neurons'].values()]):
self.ensemble_names.remove(ensemble)
#* Remove any synapse with the neuron as pre or post
for syn_name, syn in [*self.graph['synapses'].items()]:
if syn['pre'] == name or syn['post'] == name:
self.delete_synapse(syn_name)
def add_synapse(self, name, pre, post, weight=1., conn_prob=1., trainable=True, **kwargs):
""" Adds synapses between pre and post ensembles. """
if post in self.graph['inputs'] or post in self.input_ensemble_names:
raise Exception(logging.error('An input node or ensemble cannot be '\
'a postsynaptic neuron or ensemble.'))
#* Check if pre is neuron or ensemble.
if pre not in merge_dicts([self.graph['inputs'], self.graph['neurons']]):
if pre not in self.input_ensemble_names + self.ensemble_names:
raise Exception(logging.error('Connection presynaptic neuron or ensemble '\
'"{}" does not exist').format(pre))
pre = [name for name, node in merge_dicts([self.graph['inputs'], self.graph['neurons']]).items() if node['ensemble'] == pre]
else:
pre = [pre]
#* Check if post is neuron or ensemble.
if post not in merge_dicts([self.graph['inputs'], self.graph['neurons']]):
if post not in self.input_ensemble_names + self.ensemble_names:
raise Exception(logging.error('Connection postsynaptic neuron or ensemble '\
'"{}" does not exist').format(post))
post = [name for name, node in merge_dicts([self.graph['inputs'], self.graph['neurons']]).items() if node['ensemble'] == post]
else:
post = [post]
#* Add connections (note: not compatible with previous implementation checkpoints).
#! REVISAR SEED
np.random.seed(44 + len(self.graph['synapses']))
for i, (pre_node, post_node) in enumerate(product(pre, post)):
if np.random.random() < conn_prob:
synapse_config = merge_dicts([{
'pre' : pre_node, 'post' : post_node,
'weight': weight, 'trainable' : trainable,
'group' : name, 'idx' : len(self.graph['synapses']), 'enabled' : True}, kwargs])
if self.learning_rule is not None:
synapse_config.update({'learning_rule' : {p : 0. for p in ['A', 'B', 'C', 'D']}})
if self.synapse_model == 'dynamic_synapse':
#! Add min and max possible delays?
synapse_config.update({'delay' : np.random.randint(1, 10)})
syn_name = "{}_{}".format(name, i) if len(pre + post) > 2 else name
self.graph['synapses'].update({syn_name : synapse_config})
np.random.seed()
def delete_synapse(self, name):
self.graph['synapses'].pop(name, None)
def add_encoder(self, scheme, sensor, receptive_field=None, receptive_field_params={}):
raw_inputs = [inp for inp in self.graph['inputs'].values() if inp['sensor'] == sensor]
self.encoders.add(scheme, sensor, len(raw_inputs), receptive_field=receptive_field,\
receptive_field_params=receptive_field_params)
if receptive_field is not None:
if 'n_neurons' in receptive_field_params and receptive_field_params['n_neurons'] > 1:
#* Correct the input nodes if the encoding augments their dimension.
ensemble_name = tuple(raw_inputs)[0]['ensemble']
for n in range(len(raw_inputs), receptive_field_params['n_neurons'] * len(raw_inputs)):
prev_idx = self.graph['inputs'][ensemble_name+'_'+str(n-1)]['idx']
for inp_node in filter(lambda x: x['idx'] >= prev_idx + 1, self.graph['inputs'].values()):
inp_node['idx'] += 1
self.graph['inputs'].update({'{}_{}'.format(ensemble_name, n) :\
{'ensemble' : ensemble_name, 'sensor' : sensor, 'idx': prev_idx + 1}})
@increase_time
@monitor
def _step(self, stimuli):
""" Private method devoted to step the synapses and neurons sequentially.
====================================================================================
- Args:
stimuli [dict]: dict mapping stimuli name and numpy array containing its values.
- Returns:
spikes [np.ndarray]: boolean vector with the generated spikes.
soma_currents [np.ndarray]: vector of currents injected to the neurons.
voltages [np.ndarray]: vector of membrane voltages after neurons step.
====================================================================================
"""
soma_currents = self.synapses.step(np.r_[stimuli, self.spikes], self.voltages)
spikes, voltages = self.neurons.step(soma_currents)
return spikes, soma_currents, voltages
def step(self, stimuli, reward=None):
""" Simulation step of the neural network.
It is composed by four main steps:
1) Encoding of stimuli to spikes (if SNN used).
2) Synapses step.
3) Neurons step.
4) Decoding of spikes or activities into actions.
===============================================================
- Args:
stimuli [dict]: dict mapping stimuli name and numpy array
containing its values.
- Returns:
actions [dict]: dict mapping output names and actions.
===============================================================
"""
#* --- Convert stimuli into spikes (Encoders Step) ---
if len(stimuli) == 0:
raise Exception(logging.error('The ANN received empty stimuli.'))
stimuli = {s : stimuli[s].copy() for s in self.stimuli_names}
inputs = self.encoders.step(stimuli)
self.stimuli = stimuli.copy()
if self.time_scale == 1:
inputs = inputs[np.newaxis]
#* --- Apply update rules to synapses ---
if self.t > 1 and self.learning_rule is not None:
# If reward is None while learning rule is not, then
# assume that it is a non modulated learning rule.
if reward is None:
reward = 1.
# Use inputs and neuron outputs of previous time step.
self.synapses.weights += self.learning_rule.step(self.prev_input, self.spikes, reward=reward)
self.synapses.weights = np.clip(self.synapses.weights, a_min=-6, a_max=6)
#* --- Step synapses and neurons ---
spikes_window = []
for tt, stim in enumerate(inputs):
spikes, _, _ = self._step(stim)
self.spikes = spikes.copy()
spikes_window.append(spikes.copy())
spikes_window = np.stack(spikes_window)
#* --- Convert spikes into actions (Decoding Step) ---
actions = self.decoders.step(spikes_window[:, self.motor_neurons])
self.prev_input = inputs[-1].copy()
#* --- Debugging stuff (DEBUG MODE) --- #
if self.t == self.time_scale * 4000 and self.monitor is not None:
vv = np.stack(tuple(self.monitor.get('outputs').values()))
ii = np.stack(tuple(self.monitor.get('stimuli').values()))
II = np.stack(tuple(self.monitor.get('currents').values()))
aa = np.stack(tuple(self.monitor.get('voltages').values()))
# grasp0 = self.monitor.get('outputs')['OUT_GRASP_0']
# plot_spikes(self)
import pdb; pdb.set_trace()
return actions
@property
def num_neurons(self):
""" Number of neurons in the ANN (non-input). """
return len(self.neurons)
@property
def num_inputs(self):
return len(self.graph['inputs'])
@property
def num_motor(self):
return len(self.motor_neurons)
@property
def num_hidden(self):
return self.num_neurons - self.num_motor
@property
def motor_neurons(self):
""" Indices of motor neurons without counting input nodes. When addressing the
weight matrix or any kind of ANN adj. mat., the number of inputs MUST be added.
"""
return np.hstack([self.ensemble_indices(motor) for motor in self.motor_ensemble_names])
def num_ensemble_neurons(self, ensemble):
return len(self.ensemble_indices(ensemble))
def num_input_nodes(self, ensemble):
return len(self.input_ensemble_indices(ensemble))
def ensemble_indices(self, ens_name, consider_inputs=False):
""" Indices of the neurons of the requested ensemble. """
if ens_name not in self.ensemble_names:
raise Exception(logging.error('Requested ensemble "{}" does not exist.'.format(ens_name)))
indices = np.array([neuron['idx'] for neuron in self.graph['neurons'].values() if neuron['ensemble'] == ens_name])
if consider_inputs:
indices += self.num_inputs #!
return indices
def input_ensemble_indices(self, input_name):
""" Indices of the neurons of the requested ensemble. """
if input_name not in self.input_ensemble_names:
raise Exception(logging.error('Requested input ensemble "{}" does not exist.'.format(input_name)))
indices = np.array([node['idx'] for node in self.graph['inputs'].values() if node['ensemble'] == input_name])
return indices
@property
def is_spiking(self):
""" Whether the neural network is a spiking neural network or not. """
return issubclass(type(self.neurons), SpikingNeuronModel)
@property
def voltages(self):
"""Getter instantaneous voltage vector (membrane voltage of each neuron membrane)
at current simulation timestep."""
return self.neurons.voltages
@property
def weights(self):
"Getter of the numpy weight matrix."
return self.synapses.weights
def reset(self):
""" Reset process of all the neural network dynamics. """
self.t = 0
self.build()
self.neurons.reset()
self.synapses.reset()
self.encoders.reset()
self.decoders.reset()
if self.learning_rule is not None:
self.learning_rule.reset()
if self.monitor is not None:
self.monitor.reset()
self.spikes = np.zeros(self.weights.shape[0])
self.stimuli = None
self.prev_input = None
|
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from graph.dim import Dim
import logging
from copy import deepcopy
from functools import partial
from itertools import groupby
import numpy as np
from graph.types import SplitParameters
from graph.types.base import ComparableParameters, NNEdge
from utils.graph import GraphView
from ..matcher import Matcher, description, groups, match_name, run_before
LOG = logging.getLogger("nntool." + __name__)
@match_name("match_duplicate_operations")
@description("""Removes operations that are duplicates on the same edge""")
@run_before("*")
@groups('symmetric', 'scaled')
class MatchDuplicateOperations(Matcher):
def _match(self, G: GraphView, set_identity: bool = True, **kwargs):
if G.quantization:
LOG.warning(
'match_duplicate_operations does not handle quantized graphs')
return False
def same_source_edge_fn(x):
return f"{x.from_node.__hash__()}##{x.from_idx}"
def same_dest_edge(x):
return f"{x.to_node.__hash__()}##{x.to_idx}"
modified_graph = False
while True:
found_more = False
same_source_edges = [list(edge_list) for _, edge_list in
groupby(sorted(G.edges(), key=same_source_edge_fn), same_source_edge_fn)]
# all have the same origin
same_source_edges = [elem for elem in same_source_edges
if len(elem) > 1]
same_dest_edges = []
same_dest_group_edges = []
for same_source_edge in same_source_edges:
same_source_edge = [edge for edge in same_source_edge if isinstance(
edge.to_node, ComparableParameters)]
while same_source_edge:
first = same_source_edge.pop(0)
others = list(filter(partial(lambda x, y: y.to_node.is_same_operation_as(
x.to_node), first), same_source_edge))
if others:
same_dest_edges.append(tuple([first] + others))
for other in others:
same_source_edge.remove(other)
continue
other_groups = list(filter(partial(lambda x, y: y.to_node.can_be_grouped_with(
x.to_node), first), same_source_edge))
if other_groups:
same_dest_group_edges.append(
tuple([first] + other_groups))
for other in other_groups:
same_source_edge.remove(other)
# all are multiple edges that go to something comparable
for edge_set in same_dest_edges:
modified_graph = True
found_more = True
first = edge_set[0]
first_node = first.to_node
dup_nodes = []
for other in edge_set[1::]:
dest_node = other.to_node
dup_nodes.append(dest_node.name)
out_edges = G.out_edges(dest_node.name)
G.remove(dest_node)
for out_edge in out_edges:
G.add_edge(NNEdge(from_node=first_node, to_node=out_edge.to_node,
from_idx=out_edge.from_idx, to_idx=out_edge.to_idx))
LOG.info(
f'removed duplicates {",".join(dup_nodes)} to {first_node.name}')
for edge_set in same_dest_group_edges:
modified_graph = True
found_more = True
# we will merge all the convolutions into one
first = edge_set[0]
first_node = first.to_node
in_edges = G.indexed_in_edges(first_node.name)
first_filter = first_node.filter
weights_node = in_edges[1].from_node
biases_node = in_edges[2].from_node
dup_nodes = []
num_convs = len(edge_set)
out_shape = deepcopy(first_node.out_dims[0])
out_shape.c *= num_convs
# create a split after the first node splitting on channel axis
act_slices, out_shapes, axis = SplitParameters.get_splits(
out_shape, out_shape.get_order_idx('c'), num_splits=num_convs)
split1 = SplitParameters(G.unique_name(
f'{first_node.name}_split'), act_slices=act_slices, out_shapes=out_shapes, axis=axis)
out_num = 0
# first node out edge goes to split
out_edges = G.out_edges(first_node.name)
for edge in out_edges:
G.remove_edge(edge)
G.add_edge(NNEdge(from_node=split1, from_idx=out_num, to_node=edge.to_node, to_idx=edge.to_idx))
G.add_edge(NNEdge(from_node=first_node, to_node=split1))
# first split output goes to original output
for other in edge_set[1::]:
out_num += 1
node_other = other.to_node
dup_nodes.append(node_other.name)
in_edges = G.indexed_in_edges(node_other.name)
weights_other = in_edges[1].from_node
biases_other = in_edges[2].from_node
# merge the weights and biases diwn output channel
weights_node.value = np.concatenate(
(weights_node.value, weights_other.value), axis=first_filter.get_order_idx('out_c'))
weights_node.dims = Dim.unnamed(weights_node.value.shape)
biases_node.value = np.concatenate(
(biases_node.value, biases_other.value))
biases_node.dims = Dim.unnamed(biases_node.value.shape)
first_filter.out_c += node_other.filter.out_c
# wire edge from split
out_edges = G.out_edges(node_other.name)
G.remove(node_other)
G.remove(weights_other)
G.remove(biases_other)
for edge in out_edges:
G.add_edge(NNEdge(from_node=split1, from_idx=out_num, to_node=edge.to_node, to_idx=edge.to_idx))
# TODO - handle quantization
LOG.info(
f'merged convolutions {",".join(dup_nodes)} into {first_node.name}')
if not found_more:
break
if set_identity:
self.set_identity(G)
return modified_graph
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""An example Keras trainer for the Cora data set using graph regularization.
USAGE:
python graph_keras_mlp_cora.py [flags] train.tfr test.tfr
See https://linqs.soe.ucsc.edu/data for a description of the Cora data set, and
the corresponding graph and training data set.
This example demonstrates the use of sequential, functional, and subclass models
in Keras for graph regularization. Users may change 'base_models' defined in
main() as necessary, to select a subset of the supported Keras base model types.
In all cases, the base model used is a multi-layer perceptron containing two
hidden layers with drop out.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
import attr
import neural_structured_learning as nsl
import tensorflow as tf
FLAGS = flags.FLAGS
FLAGS.showprefixforinfo = False
flags.DEFINE_integer('train_epochs', None, 'Number of epochs to train.')
flags.DEFINE_integer('eval_steps', None, 'Number of steps to evaluate.')
NBR_FEATURE_PREFIX = 'NL_nbr_'
NBR_WEIGHT_SUFFIX = '_weight'
@attr.s
class HParams(object):
"""Hyper-parameters used for training."""
### dataset parameters
num_classes = attr.ib(default=7)
max_seq_length = attr.ib(default=1433)
### NGM parameters
distance_type = attr.ib(default=nsl.configs.DistanceType.L2)
graph_regularization_multiplier = attr.ib(default=0.1)
num_neighbors = attr.ib(default=1)
### model architecture
num_fc_units = attr.ib(default=[50, 50])
### training parameters
train_epochs = attr.ib(default=10)
batch_size = attr.ib(default=128)
dropout_rate = attr.ib(default=0.5)
### eval parameters
eval_steps = attr.ib(default=None) # Every test instance is evaluated.
def get_hyper_parameters():
"""Returns the hyper-parameters used for training."""
hparams = HParams()
if FLAGS.train_epochs:
hparams.train_epochs = FLAGS.train_epochs
if FLAGS.eval_steps:
hparams.eval_steps = FLAGS.eval_steps
return hparams
def load_dataset(filename):
"""Reads a file in the `.tfrecord` format.
Args:
filename: Name of the file containing `tf.train.Example` objects.
Returns:
An instance of `tf.data.TFRecordDataset` containing the `tf.train.Example`
objects.
"""
return tf.data.TFRecordDataset([filename])
def make_dataset(file_path, training, include_nbr_features, hparams):
"""Returns a `tf.data.Dataset` instance based on data in `file_path`."""
def parse_example(example_proto):
"""Extracts relevant fields from the `example_proto`.
Args:
example_proto: An instance of `tf.train.Example`.
Returns:
A pair whose first value is a dictionary containing relevant features
and whose second value contains the ground truth labels.
"""
# The 'words' feature is a multi-hot, bag-of-words representation of the
# original raw text. A default value is required for examples that don't
# have the feature.
feature_spec = {
'words':
tf.io.FixedLenFeature([hparams.max_seq_length],
tf.int64,
default_value=tf.constant(
0,
dtype=tf.int64,
shape=[hparams.max_seq_length])),
'label':
tf.io.FixedLenFeature((), tf.int64, default_value=-1),
}
if include_nbr_features:
for i in range(hparams.num_neighbors):
nbr_feature_key = '{}{}_{}'.format(NBR_FEATURE_PREFIX, i, 'words')
nbr_weight_key = '{}{}{}'.format(NBR_FEATURE_PREFIX, i,
NBR_WEIGHT_SUFFIX)
nbr_id_key = '{}{}_{}'.format(NBR_FEATURE_PREFIX, i, 'id')
feature_spec[nbr_feature_key] = tf.io.FixedLenFeature(
[hparams.max_seq_length],
tf.int64,
default_value=tf.constant(
0, dtype=tf.int64, shape=[hparams.max_seq_length]))
feature_spec[nbr_weight_key] = tf.io.FixedLenFeature(
[1], tf.float32, default_value=tf.constant([0.0]))
feature_spec[nbr_id_key] = tf.io.FixedLenFeature(
(), tf.string, default_value='')
features = tf.io.parse_single_example(example_proto, feature_spec)
labels = features.pop('label')
return features, labels
# If the dataset is sharded, the following code may be required:
# filenames = tf.data.Dataset.list_files(file_path, shuffle=True)
# dataset = filenames.interleave(load_dataset, cycle_length=1)
dataset = load_dataset(file_path)
if training:
dataset = dataset.shuffle(10000)
dataset = dataset.map(parse_example)
dataset = dataset.batch(hparams.batch_size)
return dataset
def make_mlp_sequential_model(hparams):
"""Creates a sequential multi-layer perceptron model."""
model = tf.keras.Sequential()
model.add(
tf.keras.layers.InputLayer(
input_shape=(hparams.max_seq_length,), name='words'))
# Input is already one-hot encoded in the integer format. We cast it to
# floating point format here.
model.add(
tf.keras.layers.Lambda(lambda x: tf.keras.backend.cast(x, tf.float32)))
for num_units in hparams.num_fc_units:
model.add(tf.keras.layers.Dense(num_units, activation='relu'))
model.add(tf.keras.layers.Dropout(hparams.dropout_rate))
model.add(tf.keras.layers.Dense(hparams.num_classes, activation='softmax'))
return model
def make_mlp_functional_model(hparams):
"""Creates a functional API-based multi-layer perceptron model."""
inputs = tf.keras.Input(
shape=(hparams.max_seq_length,), dtype='int64', name='words')
# Input is already one-hot encoded in the integer format. We cast it to
# floating point format here.
cur_layer = tf.keras.layers.Lambda(
lambda x: tf.keras.backend.cast(x, tf.float32))(
inputs)
for num_units in hparams.num_fc_units:
cur_layer = tf.keras.layers.Dense(num_units, activation='relu')(cur_layer)
# For functional models, by default, Keras ensures that the 'dropout' layer
# is invoked only during training.
cur_layer = tf.keras.layers.Dropout(hparams.dropout_rate)(cur_layer)
outputs = tf.keras.layers.Dense(
hparams.num_classes, activation='softmax')(
cur_layer)
model = tf.keras.Model(inputs, outputs=outputs)
return model
def make_mlp_subclass_model(hparams):
"""Creates a multi-layer perceptron subclass model in Keras."""
class MLP(tf.keras.Model):
"""Subclass model defining a multi-layer perceptron."""
def __init__(self):
super(MLP, self).__init__()
self.cast_to_float_layer = tf.keras.layers.Lambda(
lambda x: tf.keras.backend.cast(x, tf.float32))
self.dense_layers = [
tf.keras.layers.Dense(num_units, activation='relu')
for num_units in hparams.num_fc_units
]
self.dropout_layer = tf.keras.layers.Dropout(hparams.dropout_rate)
self.output_layer = tf.keras.layers.Dense(
hparams.num_classes, activation='softmax')
def call(self, inputs, training=False):
cur_layer = self.cast_to_float_layer(inputs['words'])
for dense_layer in self.dense_layers:
cur_layer = dense_layer(cur_layer)
cur_layer = self.dropout_layer(cur_layer, training=training)
outputs = self.output_layer(cur_layer)
return outputs
return MLP()
def log_metrics(model_desc, eval_metrics):
"""Logs evaluation metrics at `logging.INFO` level.
Args:
model_desc: A description of the model.
eval_metrics: A dictionary mapping metric names to corresponding values. It
must contain the loss and accuracy metrics.
"""
logging.info('\n')
logging.info('Eval accuracy for %s: %s', model_desc, eval_metrics['accuracy'])
logging.info('Eval loss for %s: %s', model_desc, eval_metrics['loss'])
if 'graph_loss' in eval_metrics:
logging.info('Eval graph loss for %s: %s', model_desc,
eval_metrics['graph_loss'])
def train_and_evaluate(model, model_desc, train_dataset, test_dataset, hparams):
"""Compiles, trains, and evaluates a `Keras` model.
Args:
model: An instance of `tf.Keras.Model`.
model_desc: A description of the model.
train_dataset: An instance of `tf.data.Dataset` representing training data.
test_dataset: An instance of `tf.data.Dataset` representing test data.
hparams: An instance of `Hparams`.
"""
model.compile(
optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
model.fit(train_dataset, epochs=hparams.train_epochs, verbose=1)
eval_results = dict(
zip(model.metrics_names,
model.evaluate(test_dataset, steps=hparams.eval_steps)))
log_metrics(model_desc, eval_results)
def main(argv):
# Check that the correct number of arguments have been provided. The
# training and test data should contain 'tf.train.Example' objects in the
# TFRecord format.
if len(argv) != 3:
raise app.UsageError('Invalid number of arguments; expected 2, got %d' %
(len(argv) - 1))
hparams = get_hyper_parameters()
train_data_path = argv[1]
test_data_path = argv[2]
# Graph regularization configuration.
graph_reg_config = nsl.configs.make_graph_reg_config(
max_neighbors=hparams.num_neighbors,
multiplier=hparams.graph_regularization_multiplier,
distance_type=hparams.distance_type,
sum_over_axis=-1)
# Create the base MLP models.
base_models = {
'FUNCTIONAL': make_mlp_functional_model(hparams),
'SEQUENTIAL': make_mlp_sequential_model(hparams),
'SUBCLASS': make_mlp_subclass_model(hparams)
}
for base_model_tag, base_model in base_models.items():
logging.info('\n====== %s BASE MODEL TEST BEGIN ======', base_model_tag)
train_dataset = make_dataset(train_data_path, True, False, hparams)
test_dataset = make_dataset(test_data_path, False, False, hparams)
train_and_evaluate(base_model, 'Base MLP model', train_dataset,
test_dataset, hparams)
logging.info('\n====== TRAINING WITH GRAPH REGULARIZATION ======\n')
# Wrap the base MLP model with graph regularization.
graph_reg_model = nsl.keras.GraphRegularization(base_model,
graph_reg_config)
train_dataset = make_dataset(train_data_path, True, True, hparams)
test_dataset = make_dataset(test_data_path, False, False, hparams)
train_and_evaluate(graph_reg_model, 'MLP + graph regularization',
train_dataset, test_dataset, hparams)
logging.info('\n====== %s BASE MODEL TEST END ======', base_model_tag)
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
app.run(main)
|
# -*- coding: utf-8 -*-
# AUTHOR: RaNaN
import os
import pprint
import time
import traceback
from sys import exc_info
from threading import Thread
from types import MethodType
class PluginThread(Thread):
"""
abstract base class for thread types.
"""
# ----------------------------------------------------------------------
def __init__(self, manager):
"""
Constructor.
"""
super().__init__()
self.daemon = True
self.pyload = manager.pyload
self._ = manager._
self.m = self.manager = manager #: thread manager
def write_debug_report(self, pyfile):
"""
writes a.
:return:
"""
date = time.strftime("%Y-%m-%d_%H-%M-%S")
dump_name = f"debug_{pyfile.pluginname}_{date}.zip"
dump_filename = os.path.join(self.pyload.cachedir, dump_name)
dump = self.get_debug_dump(pyfile)
try:
import zipfile
with zipfile.ZipFile(dump_filename, "w") as zip:
for entry in os.listdir(
os.path.join(self.pyload.cachedir, pyfile.pluginname)
):
try:
# avoid encoding errors
zip.write(
os.path.join(
self.pyload.cachedir, pyfile.pluginname, entry
),
os.path.join(pyfile.pluginname, entry),
)
except Exception:
pass
info = zipfile.ZipInfo(
os.path.join(pyfile.pluginname, "debug_Report.txt"), time.gmtime()
)
info.external_attr = 0o644 << 16 #: change permissions
zip.writestr(info, dump)
if not os.stat(dump_filename).st_size:
raise Exception("Empty Zipfile")
except Exception as exc:
self.pyload.log.debug(f"Error creating zip file: {exc}")
dump_filename = dump_filename.replace(".zip", ".txt")
with open(dump_filename, mode="w") as fp:
fp.write(dump)
self.pyload.log.info(self._("Debug Report written to {}").format(dump_filename))
def get_debug_dump(self, pyfile):
version = self.pyload.api.get_server_version()
dump = f"pyLoad {version} Debug Report of {pyfile.pluginname} {pyfile.plugin.__version__} \n\nTRACEBACK:\n {traceback.format_exc()} \n\nFRAMESTACK:\n"
tb = exc_info()[2]
stack = []
while tb:
stack.append(tb.tb_frame)
tb = tb.tb_next
for frame in stack[1:]:
dump += f"\n_frame {frame.f_code.co_name} in {frame.f_code.co_filename} at line {frame.f_lineno}\n"
for key, value in frame.f_locals.items():
dump += f"\t{key:20} = "
try:
dump += pprint.pformat(value) + "\n"
except Exception as exc:
dump += f"<ERROR WHILE PRINTING VALUE> {exc}\n"
del frame
del stack #: delete it just to be sure...
dump += "\n\n_PLUGIN OBJECT DUMP: \n\n"
for name in dir(pyfile.plugin):
attr = getattr(pyfile.plugin, name)
if not name.endswith("__") and not isinstance(attr, MethodType):
dump += f"\t{name:20} = "
try:
dump += pprint.pformat(attr) + "\n"
except Exception as exc:
dump += f"<ERROR WHILE PRINTING VALUE> {exc}\n"
dump += "\n_PYFILE OBJECT DUMP: \n\n"
for name in dir(pyfile):
attr = getattr(pyfile, name)
if not name.endswith("__") and not isinstance(attr, MethodType):
dump += f"\t{name:20} = "
try:
dump += pprint.pformat(attr) + "\n"
except Exception as exc:
dump += f"<ERROR WHILE PRINTING VALUE> {exc}\n"
if pyfile.pluginname in self.pyload.config.plugin:
dump += "\n\nCONFIG: \n\n"
dump += pprint.pformat(self.pyload.config.plugin[pyfile.pluginname]) + "\n"
return dump
def clean(self, pyfile):
"""
set thread unactive and release pyfile.
"""
self.active = False
pyfile.release()
|
<reponame>dchabot/ophyd_hkl<filename>tests/test_device.py
import time
import logging
import unittest
from ophyd import (Device, Component, FormattedComponent)
from ophyd.signal import Signal
from ophyd.utils import ExceptionBundle
logger = logging.getLogger(__name__)
class FakeSignal(Signal):
def __init__(self, read_pv, *, name=None, parent=None):
self.read_pv = read_pv
super().__init__(name=name, parent=parent)
def get(self):
return self.name
def describe_configuration(self):
return {self.name + '_conf': {'source': 'SIM:test'}}
def read_configuration(self):
return {self.name + '_conf': {'value': 0}}
def setUpModule():
pass
def tearDownModule():
logger.debug('Cleaning up')
def test_device_state():
d = Device('test')
d.stage()
old, new = d.configure({})
assert old == new
d.unstage()
class DeviceTests(unittest.TestCase):
def test_attrs(self):
class MyDevice(Device):
cpt1 = Component(FakeSignal, '1')
cpt2 = Component(FakeSignal, '2')
cpt3 = Component(FakeSignal, '3')
d = MyDevice('prefix', read_attrs=['cpt1'],
configuration_attrs=['cpt2'],
monitor_attrs=['cpt3']
)
d.read()
self.assertEqual(d.read_attrs, ['cpt1'])
self.assertEqual(d.configuration_attrs, ['cpt2'])
self.assertEqual(d.monitor_attrs, ['cpt3'])
self.assertEqual(list(d.read().keys()), [d.cpt1.name])
self.assertEqual(set(d.read_configuration().keys()),
{d.cpt2.name, d.cpt2.name + '_conf'})
self.assertEqual(list(d.describe().keys()), [d.cpt1.name])
self.assertEqual(set(d.describe_configuration().keys()),
{d.cpt2.name, d.cpt2.name + '_conf'})
def test_complexdevice(self):
class SubDevice(Device):
cpt1 = Component(FakeSignal, '1')
cpt2 = Component(FakeSignal, '2')
cpt3 = Component(FakeSignal, '3')
class SubSubDevice(SubDevice):
cpt4 = Component(FakeSignal, '4')
class MyDevice(Device):
sub1 = Component(SubDevice, '1')
subsub2 = Component(SubSubDevice, '2')
cpt3 = Component(FakeSignal, '3')
device = MyDevice('prefix', name='dev')
device.configuration_attrs = ['sub1',
'subsub2.cpt2',
'subsub2.cpt4',
'cpt3']
device.sub1.read_attrs = ['cpt2']
device.sub1.configuration_attrs = ['cpt1']
self.assertIs(device.sub1.parent, device)
self.assertIs(device.subsub2.parent, device)
self.assertIs(device.cpt3.parent, device)
self.assertEquals(device.sub1.signal_names,
['cpt1', 'cpt2', 'cpt3'])
self.assertEquals(device.subsub2.signal_names,
['cpt1', 'cpt2', 'cpt3', 'cpt4'])
conf_keys = {'dev_sub1_cpt1_conf', # from sub1.*
# 'dev_sub1_cpt2_conf', # not in sub1.config_attrs
'dev_subsub2_cpt2_conf', # from subsub2.cpt2
'dev_subsub2_cpt4_conf', # from subsub2.cpt4
'dev_cpt3_conf', # from cpt3
'dev_sub1_cpt1', # from sub1.*
'dev_sub1_cpt2', # from sub1.*
# (and sub1.read_attrs)
'dev_subsub2_cpt2', # from subsub2.cpt2
'dev_subsub2_cpt4', # from subsub2.cpt4
'dev_cpt3' # from cpt3
}
self.assertEquals(set(device.describe_configuration().keys()),
conf_keys)
self.assertEquals(set(device.read_configuration().keys()),
conf_keys)
def test_complexdevice_stop(self):
class SubSubDevice(Device):
cpt4 = Component(FakeSignal, '4')
def stop(self):
self.stop_called = True
if self.prefix.endswith('_raises_'):
raise Exception('stop failed for some reason')
class SubDevice(Device):
cpt1 = Component(FakeSignal, '1')
cpt2 = Component(FakeSignal, '2')
cpt3 = Component(FakeSignal, '3')
subsub = Component(SubSubDevice, '')
def stop(self):
self.stop_called = True
super().stop()
class MyDevice(Device):
sub1 = Component(SubDevice, '1')
sub2 = Component(SubDevice, '_raises_')
sub3 = Component(SubDevice, '_raises_')
cpt3 = Component(FakeSignal, '3')
dev = MyDevice('', name='mydev')
with self.assertRaises(ExceptionBundle) as cm:
dev.stop()
ex = cm.exception
self.assertEquals(len(ex.exceptions), 2)
self.assertTrue(dev.sub1.stop_called)
self.assertTrue(dev.sub2.stop_called)
self.assertTrue(dev.sub3.stop_called)
self.assertTrue(dev.sub1.subsub.stop_called)
self.assertTrue(dev.sub2.subsub.stop_called)
self.assertTrue(dev.sub3.subsub.stop_called)
def test_name_shadowing(self):
RESERVED_ATTRS = ['name', 'parent', 'signal_names', '_signals',
'read_attrs', 'configuration_attrs', 'monitor_attrs',
'_sig_attrs', '_sub_devices']
type('a', (Device,), {'a': None}) # legal class definition
# Illegal class definitions:
for attr in RESERVED_ATTRS:
self.assertRaises(TypeError, type, 'a', (Device,), {attr: None})
def test_formatted_component(self):
FC = FormattedComponent
class MyDevice(Device):
cpt = Component(FakeSignal, 'suffix')
ch = FC(FakeSignal, '{self.prefix}{self._ch}')
def __init__(self, prefix, ch='a', **kwargs):
self._ch = ch
super().__init__(prefix, **kwargs)
ch_value = '_test_'
device = MyDevice('prefix:', ch=ch_value)
self.assertIs(device.cpt.parent, device)
self.assertIs(device.ch.parent, device)
self.assertIs(device._ch, ch_value)
self.assertEquals(device.ch.read_pv, device.prefix + ch_value)
self.assertEquals(device.cpt.read_pv,
device.prefix + MyDevice.cpt.suffix)
|
# -*- coding: utf-8 -*-
import numpy as np
import os
import argparse
import time
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.transforms as trn
import torchvision.datasets as dset
import torch.nn.functional as F
from tqdm import tqdm
from models.allconv import AllConvNet
from models.wrn_virtual import WideResNet
from models.densenet import DenseNet3
# go through rigamaroo to do ...utils.display_results import show_performance
if __package__ is None:
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from utils.validation_dataset import validation_split
parser = argparse.ArgumentParser(description='Trains a CIFAR Classifier',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dataset', type=str, choices=['cifar10', 'cifar100'],
default='cifar10',
help='Choose between CIFAR-10, CIFAR-100.')
parser.add_argument('--model', '-m', type=str, default='dense',
choices=['allconv', 'wrn', 'dense'], help='Choose architecture.')
parser.add_argument('--calibration', '-c', action='store_true',
help='Train a model to be used for calibration. This holds out some data for validation.')
# Optimization options
parser.add_argument('--epochs', '-e', type=int, default=100, help='Number of epochs to train.')
parser.add_argument('--learning_rate', '-lr', type=float, default=0.1, help='The initial learning rate.')
parser.add_argument('--batch_size', '-b', type=int, default=128, help='Batch size.')
parser.add_argument('--test_bs', type=int, default=200)
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')
parser.add_argument('--decay', '-d', type=float, default=0.0001, help='Weight decay (L2 penalty).')
# WRN Architecture
parser.add_argument('--layers', default=40, type=int, help='total number of layers')
parser.add_argument('--widen-factor', default=2, type=int, help='widen factor')
parser.add_argument('--droprate', default=0.3, type=float, help='dropout probability')
# Checkpoints
parser.add_argument('--save', '-s', type=str, default='./snapshots/baseline', help='Folder to save checkpoints.')
parser.add_argument('--load', '-l', type=str, default='', help='Checkpoint path to resume / test.')
parser.add_argument('--test', '-t', action='store_true', help='Test only flag.')
# Acceleration
parser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')
parser.add_argument('--prefetch', type=int, default=4, help='Pre-fetching threads.')
# energy reg
parser.add_argument('--start_epoch', type=int, default=40)
parser.add_argument('--sample_number', type=int, default=1000)
parser.add_argument('--select', type=int, default=1)
parser.add_argument('--sample_from', type=int, default=10000)
parser.add_argument('--loss_weight', type=float, default=0.1)
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
print(state)
torch.manual_seed(1)
np.random.seed(1)
# mean and standard deviation of channels of CIFAR-10 images
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_transform = trn.Compose([trn.RandomHorizontalFlip(), trn.RandomCrop(32, padding=4),
trn.ToTensor(), trn.Normalize(mean, std)])
test_transform = trn.Compose([trn.ToTensor(), trn.Normalize(mean, std)])
if args.dataset == 'cifar10':
train_data = dset.CIFAR10('/nobackup-slow/dataset/my_xfdu/cifarpy', train=True, transform=train_transform, download=True)
test_data = dset.CIFAR10('/nobackup-slow/dataset/my_xfdu/cifarpy', train=False, transform=test_transform, download=True)
num_classes = 10
else:
train_data = dset.CIFAR100('/nobackup-slow/dataset/my_xfdu/cifarpy', train=True, transform=train_transform, download=True)
test_data = dset.CIFAR100('/nobackup-slow/dataset/my_xfdu/cifarpy', train=False, transform=test_transform, download=True)
num_classes = 100
calib_indicator = ''
if args.calibration:
train_data, val_data = validation_split(train_data, val_share=0.1)
calib_indicator = '_calib'
train_loader = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
test_loader = torch.utils.data.DataLoader(
test_data, batch_size=args.test_bs, shuffle=False,
num_workers=args.prefetch, pin_memory=True)
# Create model
if args.model == 'allconv':
net = AllConvNet(num_classes)
elif args.model == 'dense':
net = DenseNet3(100, num_classes, 12, reduction=0.5, bottleneck=True, dropRate=0.0, normalizer=None,
k=None, info=None)
else:
net = WideResNet(args.layers, num_classes, args.widen_factor, dropRate=args.droprate)
start_epoch = 0
# Restore model if desired
if args.load != '':
for i in range(1000 - 1, -1, -1):
model_name = os.path.join(args.load, args.dataset + calib_indicator + '_' + args.model +
'_baseline_epoch_' + str(i) + '.pt')
if os.path.isfile(model_name):
net.load_state_dict(torch.load(model_name))
print('Model restored! Epoch:', i)
start_epoch = i + 1
break
if start_epoch == 0:
assert False, "could not resume"
if args.ngpu > 1:
net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))
if args.ngpu > 0:
net.cuda()
torch.cuda.manual_seed(1)
cudnn.benchmark = True # fire on all cylinders
if args.dataset == 'cifar10':
num_classes = 10
else:
num_classes = 100
weight_energy = torch.nn.Linear(num_classes, 1).cuda()
torch.nn.init.uniform_(weight_energy.weight)
data_dict = torch.zeros(num_classes, args.sample_number, 342).cuda()
number_dict = {}
for i in range(num_classes):
number_dict[i] = 0
eye_matrix = torch.eye(342, device='cuda')
logistic_regression = torch.nn.Linear(1, 2)
logistic_regression = logistic_regression.cuda()
optimizer = torch.optim.SGD(
list(net.parameters()) + list(weight_energy.parameters()) + \
list(logistic_regression.parameters()), state['learning_rate'], momentum=state['momentum'],
weight_decay=state['decay'], nesterov=True)
def cosine_annealing(step, total_steps, lr_max, lr_min):
return lr_min + (lr_max - lr_min) * 0.5 * (
1 + np.cos(step / total_steps * np.pi))
def log_sum_exp(value, dim=None, keepdim=False):
"""Numerically stable implementation of the operation
value.exp().sum(dim, keepdim).log()
"""
import math
# TODO: torch.max(value, dim=None) threw an error at time of writing
if dim is not None:
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
if keepdim is False:
m = m.squeeze(dim)
return m + torch.log(torch.sum(
F.relu(weight_energy.weight) * torch.exp(value0), dim=dim, keepdim=keepdim))
else:
m = torch.max(value)
sum_exp = torch.sum(torch.exp(value - m))
# if isinstance(sum_exp, Number):
# return m + math.log(sum_exp)
# else:
return m + torch.log(sum_exp)
# /////////////// Training ///////////////
def train(epoch):
net.train() # enter train mode
loss_avg, lr_loss_avg = 0.0, 0.0
for data, target in train_loader:
data, target = data.cuda(), target.cuda()
# forward
x, output = net.forward_virtual(data)
# energy regularization.
sum_temp = 0
for index in range(num_classes):
sum_temp += number_dict[index]
lr_reg_loss = torch.zeros(1).cuda()[0]
if sum_temp == num_classes * args.sample_number and epoch < args.start_epoch:
# maintaining an ID data queue for each class.
target_numpy = target.cpu().data.numpy()
for index in range(len(target)):
dict_key = target_numpy[index]
data_dict[dict_key] = torch.cat((data_dict[dict_key][1:],
output[index].detach().view(1, -1)), 0)
elif sum_temp == num_classes * args.sample_number and epoch >= args.start_epoch:
target_numpy = target.cpu().data.numpy()
for index in range(len(target)):
dict_key = target_numpy[index]
data_dict[dict_key] = torch.cat((data_dict[dict_key][1:],
output[index].detach().view(1, -1)), 0)
# the covariance finder needs the data to be centered.
for index in range(num_classes):
if index == 0:
X = data_dict[index] - data_dict[index].mean(0)
mean_embed_id = data_dict[index].mean(0).view(1, -1)
else:
X = torch.cat((X, data_dict[index] - data_dict[index].mean(0)), 0)
mean_embed_id = torch.cat((mean_embed_id,
data_dict[index].mean(0).view(1, -1)), 0)
## add the variance.
temp_precision = torch.mm(X.t(), X) / len(X)
temp_precision += 0.0001 * eye_matrix
for index in range(num_classes):
new_dis = torch.distributions.multivariate_normal.MultivariateNormal(
mean_embed_id[index], covariance_matrix=temp_precision)
negative_samples = new_dis.rsample((args.sample_from,))
prob_density = new_dis.log_prob(negative_samples)
# breakpoint()
# index_prob = (prob_density < - self.threshold).nonzero().view(-1)
# keep the data in the low density area.
cur_samples, index_prob = torch.topk(- prob_density, args.select)
if index == 0:
ood_samples = negative_samples[index_prob]
else:
ood_samples = torch.cat((ood_samples, negative_samples[index_prob]), 0)
if len(ood_samples) != 0:
# add some gaussian noise
# ood_samples = self.noise(ood_samples)
# energy_score_for_fg = 1 * torch.logsumexp(predictions[0][selected_fg_samples][:, :-1] / 1, 1)
energy_score_for_fg = log_sum_exp(x, 1)
predictions_ood = net.fc(ood_samples)
# energy_score_for_bg = 1 * torch.logsumexp(predictions_ood[0][:, :-1] / 1, 1)
energy_score_for_bg = log_sum_exp(predictions_ood, 1)
input_for_lr = torch.cat((energy_score_for_fg, energy_score_for_bg), -1)
labels_for_lr = torch.cat((torch.ones(len(output)).cuda(),
torch.zeros(len(ood_samples)).cuda()), -1)
criterion = torch.nn.CrossEntropyLoss()
output1 = logistic_regression(input_for_lr.view(-1, 1))
lr_reg_loss = criterion(output1, labels_for_lr.long())
# if epoch % 5 == 0:
# print(lr_reg_loss)
else:
target_numpy = target.cpu().data.numpy()
for index in range(len(target)):
dict_key = target_numpy[index]
if number_dict[dict_key] < args.sample_number:
data_dict[dict_key][number_dict[dict_key]] = output[index].detach()
number_dict[dict_key] += 1
# backward
optimizer.zero_grad()
loss = F.cross_entropy(x, target)
# breakpoint()
loss += args.loss_weight * lr_reg_loss
loss.backward()
optimizer.step()
# scheduler.step()
# exponential moving average
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
lr_loss_avg = lr_loss_avg * 0.8 + float(lr_reg_loss) * 0.2
state['train_loss'] = loss_avg
state['train_vos_loss'] = lr_loss_avg
# test function
def test():
net.eval()
loss_avg = 0.0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.cuda(), target.cuda()
# forward
output = net(data)
loss = F.cross_entropy(output, target)
# accuracy
pred = output.data.max(1)[1]
correct += pred.eq(target.data).sum().item()
# test loss average
loss_avg += float(loss.data)
state['test_loss'] = loss_avg / len(test_loader)
state['test_accuracy'] = correct / len(test_loader.dataset)
if args.test:
test()
print(state)
exit()
# Make save directory
if not os.path.exists(args.save):
os.makedirs(args.save)
if not os.path.isdir(args.save):
raise Exception('%s is not a dir' % args.save)
with open(os.path.join(args.save, args.dataset + calib_indicator + '_' + args.model +
'_' + str(args.loss_weight) + \
'_' + str(args.sample_number)+ '_' + str(args.start_epoch) + '_' +\
str(args.select) + '_' + str(args.sample_from) +
'_dense_baseline_training_results.csv'), 'w') as f:
f.write('epoch,time(s),train_loss,test_loss,test_error(%)\n')
print('Beginning Training\n')
# Main loop
for epoch in range(start_epoch, args.epochs):
state['epoch'] = epoch
begin_epoch = time.time()
train(epoch)
test()
if epoch == 49:
optimizer.param_groups[0]['lr'] *= args.learning_rate * 0.1
elif epoch == 74:
optimizer.param_groups[0]['lr'] *= args.learning_rate * 0.01
elif epoch == 89:
optimizer.param_groups[0]['lr'] *= args.learning_rate * 0.001
# Save model
torch.save(net.state_dict(),
os.path.join(args.save, args.dataset + calib_indicator + '_' + args.model +
'_baseline_dense' + '_' + str(args.loss_weight) + \
'_' + str(args.sample_number)+ '_' + str(args.start_epoch) + '_' +\
str(args.select) + '_' + str(args.sample_from) + '_' + 'epoch_' + str(epoch) + '.pt'))
# Let us not waste space and delete the previous model
prev_path = os.path.join(args.save, args.dataset + calib_indicator + '_' + args.model +
'_baseline_dense' + '_' + str(args.loss_weight) + \
'_' + str(args.sample_number)+ '_' + str(args.start_epoch) + '_' +\
str(args.select) + '_' + str(args.sample_from) + '_' + 'epoch_' + str(epoch - 1) + '.pt')
if os.path.exists(prev_path): os.remove(prev_path)
# Show results
with open(os.path.join(args.save, args.dataset + calib_indicator + '_' + args.model +
'_' + str(args.loss_weight) + \
'_' + str(args.sample_number) + '_' + str(args.start_epoch) + '_' + \
str(args.select) + '_' + str(args.sample_from) +
'_baseline_training_results.csv'), 'a') as f:
f.write('%03d,%05d,%0.6f,%0.6f,%0.5f,%0.2f\n' % (
(epoch + 1),
time.time() - begin_epoch,
state['train_loss'],
state['train_vos_loss'],
state['test_loss'],
100 - 100. * state['test_accuracy'],
))
# # print state with rounded decimals
# print({k: round(v, 4) if isinstance(v, float) else v for k, v in state.items()})
print('Epoch {0:3d} | Time {1:5d} | Train Loss {2:.4f} | VOS Loss {3:.4f} | Test Loss {4:.3f} | Test Error {5:.2f}'.format(
(epoch + 1),
int(time.time() - begin_epoch),
state['train_loss'],
state['train_vos_loss'],
state['test_loss'],
100 - 100. * state['test_accuracy'])
) |
import pandas as pd
import requests
import json
import elasticsearch.helpers
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
class UNSDapi():
# https://unstats.un.org/sdgapi/swagger/#!/GeoArea/V1SdgGeoAreaListGet. Consider this link for Indicators code and areacodes .
def __init__(self, field=None):
# Choose your field of data [Indicator, Series, Target], only Indicators available for now.
self.field=field
def indicator_link(self, indicatorcode=None,areacode=None, timeperiod=None, page=None, pagesize=None):
'''
Generates SDG query link
'''
#indicatorcode- The code constitutes series inforamtion and Goal - Target hierarchy, if None- all indicators will be included ,dtype= list
#areacode- Unique codes associated with each countries, if None- all countries will be included, dtype = list
#timeperiod Years from which we want data eg: ["2000","2010"], if None- all years will be included, dtype = list
#page Page number to be displayed, if None- shows page 1 (only 1 page is shown at a time), dtype = integer
#pagesize Number of records requireds per page, (Number of pages in the call will differ when this field is altered), if None- default size is 25, dtype= integer
'''
When pulling large datasets, try to give a higher value for page size to avoid high number of total pages.
'''
apilink=''
linkprefix='https://unstats.un.org/SDGAPI/v1/sdg/'+self.field+'/Data?'
self.indicatorcode=indicatorcode
self.areacode=areacode
self.timeperiod=timeperiod
self.page=page
self.pagesize=pagesize
if self.indicatorcode==None:
pass
else:
for i in self.indicatorcode:
apilink=apilink+'indicator='+i+'&'
if self.areacode==None:
pass
else:
for i in self.areacode:
apilink=apilink+'areacode='+i+'&'
if self.timeperiod==None:
pass
else:
for i in self.timeperiod:
apilink=apilink+'timeperiod='+i+'&'
if self.page==None:
pass
else:
for i in self.page:
apilink=apilink+'page='+i+'&'
if self.pagesize==None:
pass
else:
apilink=apilink+'pageSize='+str(self.pagesize)+'&'
self.link=linkprefix+apilink[:len(apilink)-1]
def Extract_json(self):
# Extracts the Json file through SDG query
if self.link==None:
print("Pass a valid api URL")
pass
else:
try:
req=requests.get(self.link)
self.json_dataset=json.loads(req.text)['data']
except Exception as f:
print("{} following error occured. Provide valid queries only".format(f))
def sendtorepo(self, ip, port):
for i in self.json_dataset:
i['@timestamp']=str(int(i['timePeriodStart']))+'-12-30'+'T'+'00:00:00'+'Z'
es = Elasticsearch([{'host': ip, 'port': port}], request_timeout=3000)
doc_type = 'ticker_type'
self.index_name= 'unsd_check'
es.indices.create(index= self.index_name, ignore=400)
docs=self.json_dataset
out = bulk(es, docs, index=(self.index_name), doc_type=doc_type, raise_on_error=True, request_timeout=3000)
def return_json_dataset(self):
#returs the json dataset(1 page only)
return self.json_dataset
def return_json_size(self):
self.json_dataset_size=self.json_dataset['totalElements']
return self.json_dataset_size
def return_excel(self):
pd.DataFrame(self.json_dataset).to_excel("South_africa.xlsx")
'''
Following query will pull in all indicators of Senegal and Cote d'Ivoire fot the year 2000 and 2001
curl -X DELETE "localhost:9200/unsd_check"
if __name__=='__main__':
obj=UNSDapi('Indicator')
obj.indicator_link(areacode=['384','686']) # areacode is 686 and 384 for Côte d'Ivoire and Senegal respectively.
obj.Extract_json()
print(obj.return_json_size())
obj.return_excel()
'''
# Following query will create Excel sheet for all indicators of all years for both the countries by the name of output.xlsx for the years 2000,2001,2002,2003
if __name__=='__main__':
obj=UNSDapi('Indicator')
obj.indicator_link(areacode=['170'], pagesize=9230) # areacode is 686 and 384 for Côte d'Ivoire and Senegal respectively.timeperiod=['2015','2016','2017','2018']
print(obj.link)
obj.Extract_json()
obj.return_excel()
#obj.sendtorepo('elasticsearch.taiyo.io',9200)
#print(obj.return_json_size())
#obj.return_excel()
|
<filename>examples/simon_speck/speck.py
from __future__ import print_function
class SpeckCipher:
# valid cipher configurations stored:
# block_size:{key_size:number_rounds}
__valid_setups = {32: {64: 22},
48: {72: 22, 96: 23},
64: {96: 26, 128: 27},
96: {96: 28, 144: 29},
128: {128: 32, 192: 33, 256: 34}}
__valid_modes = ['ECB', 'CTR', 'CBC', 'PCBC', 'CFB', 'OFB']
def encrypt_round(self, x, y, k):
"""Complete One Round of Fiestal Operation"""
rs_x = ((x << (self.word_size - self.alpha_shift)) + (x >> self.alpha_shift)) & self.mod_mask
add_sxy = (rs_x + y) & self.mod_mask
new_x = k ^ add_sxy
ls_y = ((y >> (self.word_size - self.beta_shift)) + (y << self.beta_shift)) & self.mod_mask
new_y = new_x ^ ls_y
return new_x, new_y
def decrypt_round(self, x, y, k):
"""Complete One Round of Inverse Fiestal Operation"""
xor_xy = x ^ y
new_y = ((xor_xy << (self.word_size - self.beta_shift)) + (xor_xy >> self.beta_shift)) & self.mod_mask
xor_xk = x ^ k
if xor_xk >= new_y:
msub = xor_xk - new_y
else:
msub = ((xor_xk - new_y) % self.mod_mask) + 1
new_x = ((msub >> (self.word_size - self.alpha_shift)) + (msub << self.alpha_shift)) & self.mod_mask
return new_x, new_y
def __init__(self, key, key_size=128, block_size=128, mode='ECB', init=0, counter=0):
# Setup block/word size
try:
self.possible_setups = self.__valid_setups[block_size]
self.block_size = block_size
self.word_size = self.block_size >> 1
except KeyError:
print('Invalid block size!')
print('Please use one of the following block sizes:', [x for x in self.__valid_setups.keys()])
raise
# Setup Number of Rounds and Key Size
try:
self.rounds = self.possible_setups[key_size]
self.key_size = key_size
except KeyError:
print('Invalid key size for selected block size!!')
print('Please use one of the following key sizes:', [x for x in self.possible_setups.keys()])
raise
# Create Properly Sized bit mask for truncating addition and left shift outputs
self.mod_mask = (2 ** self.word_size) - 1
# Setup Circular Shift Parameters
if self.block_size == 32:
self.beta_shift = 2
self.alpha_shift = 7
else:
self.beta_shift = 3
self.alpha_shift = 8
# Parse the given iv and truncate it to the block length
try:
self.iv = init & ((2 ** self.block_size) - 1)
self.iv_upper = self.iv >> self.word_size
self.iv_lower = self.iv & self.mod_mask
except (ValueError, TypeError):
print('Invalid IV Value!')
print('Please Provide IV as int')
raise
# Parse the given Counter and truncate it to the block length
try:
self.counter = counter & ((2 ** self.block_size) - 1)
except (ValueError, TypeError):
print('Invalid Counter Value!')
print('Please Provide Counter as int')
raise
# Check Cipher Mode
try:
position = self.__valid_modes.index(mode)
self.mode = self.__valid_modes[position]
except ValueError:
print('Invalid cipher mode!')
print('Please use one of the following block cipher modes:', self.__valid_modes)
raise
# Parse the given key and truncate it to the key length
try:
self.key = key & ((2 ** self.key_size) - 1)
except (ValueError, TypeError):
print('Invalid Key Value!')
print('Please Provide Key as int')
raise
# Pre-compile key schedule
self.key_schedule = [self.key & self.mod_mask]
l_schedule = [(self.key >> (x * self.word_size)) & self.mod_mask for x in
range(1, self.key_size // self.word_size)]
for x in range(self.rounds - 1):
new_l_k = self.encrypt_round(l_schedule[x], self.key_schedule[x], x)
l_schedule.append(new_l_k[0])
self.key_schedule.append(new_l_k[1])
def encrypt(self, plaintext):
try:
b = (plaintext >> self.word_size) & self.mod_mask
a = plaintext & self.mod_mask
except TypeError:
print('Invalid plaintext!')
print('Please provide plaintext as int')
raise
if self.mode == 'ECB':
for x in self.key_schedule:
b, a = self.encrypt_round(b, a, x)
elif self.mode == 'CTR':
true_counter = self.iv + self.counter
d = (true_counter >> self.word_size) & self.mod_mask
c = true_counter & self.mod_mask
for x in self.key_schedule:
d, c = self.encrypt_round(d, c, x)
b ^= d
a ^= c
self.counter += 1
elif self.mode == 'CBC':
b ^= self.iv_upper
a ^= self.iv_lower
for x in self.key_schedule:
b, a = self.encrypt_round(b, a, x)
self.iv_upper = b
self.iv_lower = a
self.iv = (b << self.word_size) + a
elif self.mode == 'PCBC':
f, e = b, a
b ^= self.iv_upper
a ^= self.iv_lower
for x in self.key_schedule:
b, a = self.encrypt_round(b, a, x)
self.iv_upper = (b ^ f)
self.iv_lower = (a ^ e)
self.iv = (self.iv_upper << self.word_size) + self.iv_lower
elif self.mode == 'CFB':
d = self.iv_upper
c = self.iv_lower
for x in self.key_schedule:
d, c = self.encrypt_round(d, c, x)
b ^= d
a ^= c
self.iv_upper = b
self.iv_lower = a
self.iv = (b << self.word_size) + a
elif self.mode == 'OFB':
d = self.iv_upper
c = self.iv_lower
for x in self.key_schedule:
d, c = self.encrypt_round(d, c, x)
self.iv_upper = d
self.iv_lower = c
self.iv = (d << self.word_size) + c
b ^= d
a ^= c
ciphertext = (b << self.word_size) + a
return ciphertext
def decrypt(self, ciphertext):
try:
b = (ciphertext >> self.word_size) & self.mod_mask
a = ciphertext & self.mod_mask
except TypeError:
print('Invalid ciphertext!')
print('Please provide plaintext as int')
raise
if self.mode == 'ECB':
for x in reversed(self.key_schedule):
b, a = self.decrypt_round(b, a, x)
elif self.mode == 'CTR':
true_counter = self.iv + self.counter
d = (true_counter >> self.word_size) & self.mod_mask
c = true_counter & self.mod_mask
for x in self.key_schedule:
d, c = self.encrypt_round(d, c, x)
b ^= d
a ^= c
self.counter += 1
elif self.mode == 'CBC':
f, e = b, a
for x in reversed(self.key_schedule):
b, a = self.decrypt_round(b, a, x)
b ^= self.iv_upper
a ^= self.iv_lower
self.iv_upper = f
self.iv_lower = e
self.iv = (f << self.word_size) + e
elif self.mode == 'PCBC':
f, e = b, a
for x in reversed(self.key_schedule):
b, a = self.decrypt_round(b, a, x)
b ^= self.iv_upper
a ^= self.iv_lower
self.iv_upper = (b ^ f)
self.iv_lower = (a ^ e)
self.iv = (self.iv_upper << self.word_size) + self.iv_lower
elif self.mode == 'CFB':
d = self.iv_upper
c = self.iv_lower
self.iv_upper = b
self.iv_lower = a
self.iv = (b << self.word_size) + a
for x in self.key_schedule:
d, c = self.encrypt_round(d, c, x)
b ^= d
a ^= c
elif self.mode == 'OFB':
d = self.iv_upper
c = self.iv_lower
for x in self.key_schedule:
d, c = self.encrypt_round(d, c, x)
self.iv_upper = d
self.iv_lower = c
self.iv = (d << self.word_size) + c
b ^= d
a ^= c
plaintext = (b << self.word_size) + a
return plaintext
def update_iv(self, new_iv=None):
if new_iv:
try:
self.iv = new_iv & ((2 ** self.block_size) - 1)
self.iv_upper = self.iv >> self.word_size
self.iv_lower = self.iv & self.mod_mask
except TypeError:
print('Invalid Initialization Vector!')
print('Please provide IV as int')
raise
return self.iv
if __name__ == "__main__":
cipher = SpeckCipher(0x1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100, 256, 128, 'ECB')
g = cipher.encrypt(0x65736f6874206e49202e72656e6f6f70)
print(hex(g))
|
import numpy as np
def get_vectors(all_sentences, word_2_vec):
random_vec = np.mean(np.vstack(word_2_vec.values()).astype('float'), axis=0)
all_OOV_words = []
all_sentences_vecs = []
for sentence in all_sentences:
sentence_vecs = []
for word in sentence:
if word not in word_2_vec.keys():
if word in ['a', 'an', 'to', 'of', 'and']:
vec = word_2_vec['from']
else:
vec = random_vec
all_OOV_words.append(word)
else:
vec = word_2_vec[word]
sentence_vecs.append(vec)
sentence_vecs = np.vstack(sentence_vecs).astype('float')
sentence_vecs = sentence_vecs[np.newaxis]
all_sentences_vecs.append(sentence_vecs)
all_sentences_vecs = np.concatenate(all_sentences_vecs, axis=0)
return all_sentences_vecs, all_OOV_words
def load_data():
word_2_vec = {}
with open('wordvecs.txt') as f:
for line in f:
line = line.split()
word = line[0]
vec = line[1:]
word_2_vec[word] = vec
all_sentences = []
all_targets = []
is_end_of_a_sentence = False
sentence = []
sentence_labels = []
with open('news_tagged_data.txt') as f:
for line in f:
if line == '\n':
is_end_of_a_sentence = True
if is_end_of_a_sentence:
all_sentences.append(sentence)
all_targets.append(sentence_labels)
sentence = []
sentence_labels = []
is_end_of_a_sentence = False
else:
line = line.strip()
line = line.lower()
line = line.split()
word = line[0]
word_label = line[1]
sentence.append(word)
sentence_labels.append(word_label)
longest_len = max([len(s) for s in all_sentences])
list_of_all_targets = []
for targets in all_targets:
list_of_all_targets.extend(targets)
total_number_of_classes = len(set(list_of_all_targets))
target_to_index = {}
set_of_all_targets = set(list_of_all_targets)
for index,target in enumerate(set_of_all_targets):
target_to_index[target] = index
index_to_target = {}
for index, target in enumerate(set_of_all_targets):
index_to_target[index] = target
all_padded_sentences = []
all_padded_targets = []
all_masks = []
for sentence, target in zip(all_sentences, all_targets):
length = len(sentence)
to_pad = longest_len - length
padding = ['reyhan'] * to_pad
sentence = sentence + padding
mask = np.ones(longest_len)
if to_pad != 0:
mask[-to_pad:] = np.zeros(to_pad)
target = [target_to_index[t] for t in target]
target = target + list(np.zeros(to_pad))
all_padded_sentences.append(sentence)
all_masks.append(mask)
onehot = np.zeros((len(target), 9))
onehot[range(29), target] = 1
all_padded_targets.append(onehot[np.newaxis])
word_2_vec['reyhan'] = np.zeros(300)
all_sentences_vecs, all_OOV_words = get_vectors(all_padded_sentences, word_2_vec)
all_masks = np.vstack(all_masks)
all_padded_targets = np.concatenate(all_padded_targets, axis=0)
return all_sentences_vecs, all_masks, all_padded_targets, word_2_vec, index_to_target |
import numpy as np
from collections import Counter
from railrl.exploration_strategies.count_based.compute_obs_mean_std import compute_obs_mean_std
from railrl.misc.np_util import bin2int, softmax
class CountExploration:
def __init__(self,
env,
hash_dim=16,
observation_key='observation',
num_samples=5000,
normalize_obs=False,
obs_mean=None,
obs_std=None,
hashing_matrix=None,
):
self.obs_dim = env.observation_space.spaces[observation_key].low.size
self.hash_dim = hash_dim
if hashing_matrix is not None:
self.hashing_matrix = hashing_matrix
else:
self.hashing_matrix = np.reshape(np.random.normal(0, 1, self.obs_dim * self.hash_dim),
(self.obs_dim, self.hash_dim))
self.counts = Counter()
obs_dim = env.observation_space.spaces[observation_key].low.size
if normalize_obs and (obs_mean is None or obs_std is None):
obs_mean, obs_std = compute_obs_mean_std(env, N=num_samples, observation_key=observation_key)
elif not normalize_obs:
obs_mean, obs_std = np.zeros(obs_dim), np.ones(obs_dim)
else:
raise NotImplementedError('invalid normalization params')
self.obs_mean = obs_mean
self.obs_std = obs_std + .00001
self.env = env
self.observation_key = observation_key
def increment_counts(self, observations):
bins = self._observations_to_bins(observations)
for b in bins:
self.counts[b] += 1
def get_counts(self, observations):
bins = self._observations_to_bins(observations)
return np.array([self.counts[bin] for bin in bins], dtype=np.float32)
def _observations_to_bins(self, observations):
observations = np.divide(observations - self.obs_mean, self.obs_std)
mul = np.dot(observations, self.hashing_matrix)
sn = np.where(mul > 0, 1, 0)
code = bin2int(sn.T)
if code.shape == ():
code = np.array([code])
return code
def compute_count_based_reward(self, observations):
new_obs_counts = self.get_counts(observations)
new_rewards = ((new_obs_counts + .0001) ** (-1 / 2)).reshape(-1, 1)
return new_rewards
def clear_counter(self):
self.counts = Counter()
class CountExplorationCountGoalSampler(CountExploration):
'''
Steps:
1. take in a bunch of randomly sampled goals
2. compute the count_based reward for those goals
3. compute softmax prob dist from those rewards
4. use the softmax dist to pick one of the goals you had sampled originally
goal space has to be equal to obs space ie use achieved goals to hash
'''
def __init__(self,
theta=1.0,
replay_buffer=None,
goal_key='desired_goal',
num_count_based_goals=100,
use_softmax=True,
**kwargs
):
self.theta = theta
self.goal_key = goal_key
self.num_count_based_goals = num_count_based_goals
self.use_softmax = use_softmax
self.replay_buffer = replay_buffer
super().__init__(**kwargs)
def get_count_based_goal(self):
if len(self.counts.keys()) == 0:
# initially sample a random goal
return self.env.sample_goal()
if self.replay_buffer is not None:
indices = self.replay_buffer._sample_indices(self.num_count_based_goals)
goals = self.replay_buffer._next_obs[self.observation_key][indices]
else:
goals = self.env.sample_goals(self.num_count_based_goals)[self.goal_key]
count_based_rewards = self.compute_count_based_reward(goals)
if self.use_softmax:
probabilities = softmax(count_based_rewards, self.theta).reshape(-1)
else:
probabilities = np.ones(self.num_count_based_goals) * 1 / self.num_count_based_goals
idxs = np.array(list(range(self.num_count_based_goals)))
idx = np.random.choice(idxs, p=probabilities)
return goals[idx]
|
<reponame>jpfxgood/dashboard
# Copyright 2017 <NAME> unicode curses based graphics package
""" module that implements a graphics package using block graphics on curses window """
import locale
locale.setlocale(locale.LC_ALL,"")
import curses
import curses.ascii
import sys
import os
import math
def angle_point(x0,y0,a,radius):
return (int(x0+(1.5*math.cos(math.radians(a))*radius)), int(y0+math.sin(math.radians(a))*radius))
class Canvas:
""" primitive drawing surface attached to a curses window """
to_mask = { 0:1, 1:2, 2:4, 3:8 }
mask_to_char = {
0 :'\u2008',
1 :'\u2598',
2 :'\u259d',
3 :'\u2580',
4 :'\u2596',
5 :'\u258c',
6 :'\u259e',
7 :'\u259b',
8 :'\u2597',
9 :'\u259a',
10:'\u2590',
11:'\u259c',
12:'\u2584',
13:'\u2599',
14:'\u259f',
15:'\u2588'
}
char_to_mask = {
'\u2008':0 ,
'\u2598':1 ,
'\u259d':2 ,
'\u2580':3 ,
'\u2596':4 ,
'\u258c':5 ,
'\u259e':6 ,
'\u259b':7 ,
'\u2597':8 ,
'\u259a':9 ,
'\u2590':10,
'\u259c':11,
'\u2584':12,
'\u2599':13,
'\u259f':14,
'\u2588':15
}
def __init__(self, win = None ):
""" constructor, can be initialized with a window to draw on, otherwise window must be set later by set_window """
self.set_win(win)
def to_rowcol(self, x, y ):
""" return character row col for input x,y coordinates """
return (int(y/2),int(x/2))
def from_rowcol(self, row, col ):
""" return the pixel location of a character position, returns upper left pixel in matrix"""
return (int(row)*2,int(col)*2)
def round_text_position(self, x, y):
""" adjust a text position so that it always ends up down and to the right if it is at a half pixel offset """
r,c = self.to_rowcol(x,y)
y1,x1 = self.from_rowcol(r,c)
h,w = self.from_rowcol(1,1)
if y1 < y:
y = y + h/2
if x1 < x:
x = x + w/2
return x, y
def round_text_x_position(self, x):
""" adjust a text position so that it always ends up down and to the right if it is at a half pixel offset """
r,c = self.to_rowcol(x,0)
y1,x1 = self.from_rowcol(r,c)
h,w = self.from_rowcol(1,1)
if x1 < x:
x = x + w/2
return x
def round_text_y_position(self, y):
""" adjust a text position so that it always ends up down and to the right if it is at a half pixel offset """
r,c = self.to_rowcol(0,y)
y1,x1 = self.from_rowcol(r,c)
h,w = self.from_rowcol(1,1)
if y1 < y:
y = y + h/2
return y
def set_win(self, win ):
""" point this canvas at a window and initialize things, will blank out the window """
self.win = win
self.init_win()
def init_win(self):
""" initializes the window and sets up all of the defaults """
curses.init_pair(5,curses.COLOR_BLACK,curses.COLOR_BLACK)
curses.init_pair(1,curses.COLOR_GREEN,curses.COLOR_BLACK)
curses.init_pair(2,curses.COLOR_RED,curses.COLOR_BLACK)
curses.init_pair(3,curses.COLOR_CYAN,curses.COLOR_BLACK)
curses.init_pair(4,curses.COLOR_WHITE,curses.COLOR_BLACK)
self.green = curses.color_pair(1)
self.red = curses.color_pair(2)
self.cyan = curses.color_pair(3)
self.white = curses.color_pair(4)
self.black = curses.color_pair(5)
if curses.can_change_color():
self.color_min = 8
self.color_max = 256
red = 0
green = 100
blue = 20
for c in range(self.color_min,self.color_max):
curses.init_color(c,red,green,blue)
red += 23
green += 33
blue += 53
red = red % 1000
green = green % 1000
blue = blue % 1000
for cidx in range(self.color_min,self.color_max):
curses.init_pair(cidx,cidx,curses.COLOR_BLACK)
else:
self.color_min = 0
self.color_max = 8
if self.win:
self.max_y,self.max_x = self.win.getmaxyx()
self.char_map = [[None] * self.max_y for i in range(self.max_x)]
self.max_y = self.max_y * 2
self.max_x = self.max_x * 2
else:
self.max_y,self.max_x = (0,0)
self.char_map = None
def clear( self ):
""" clear the entire canvas """
self.char_map = [[None] * self.max_y for i in range(self.max_x)]
def refresh( self ):
""" refresh the display after drawing """
self.win.refresh()
def get_maxxy( self ):
""" return the maximum number of x and y pixels that are available in this canvas """
return (self.max_x,self.max_y)
def put_pixel( self, x,y, color, set = True ):
""" turn on a pixel with the color indicated """
if x < 0 or x >= self.max_x or y < 0 or y >= self.max_y:
return
row,col = self.to_rowcol(x,y)
if row >= self.max_y//2 or col >= self.max_x//2:
return
mask = self.to_mask[(int(x)%2)+((int(y)%2)*2)]
if not self.char_map[col][row]:
current_mask = 0
else:
current_mask = self.char_to_mask[self.char_map[col][row]]
if set:
self.char_map[col][row] = self.mask_to_char[ mask | current_mask ]
else:
self.char_map[col][row] = self.mask_to_char[ mask ^ current_mask ]
try:
self.win.addstr(row,col,self.char_map[col][row].encode('utf_8'),color)
except:
pass
def line(self, x0, y0, x1, y1, color, put_pixel=None ):
""" draw a line between x0,y0 and x1,y1 in color """
def set_pixel(x,y,color):
if put_pixel:
put_pixel(x,y,color)
else:
self.put_pixel(x,y,color)
dx = abs(x1 - x0)
dy = abs(y1 - y0)
x, y = int(x0), int(y0)
sx = -1 if x0 > x1 else 1
sy = -1 if y0 > y1 else 1
if dx > dy:
err = dx / 2.0
while x != int(x1):
set_pixel(x, y,color)
err -= dy
if err < 0:
y += sy
err += dx
x += sx
else:
err = dy / 2.0
while y != int(y1):
set_pixel(x, y,color)
err -= dx
if err < 0:
x += sx
err += dy
y += sy
set_pixel(x, y, color)
def intersect( self, seg1, seg2, clip_to_seg = False ):
""" find the intersection of two segments as tuples (x0,y0,x1,y1) returns tuple (x,y) if no intersection returns None """
def lineform( seg ):
""" return A, B, C for the standard line formula Ax + By = C """
A = float(seg[1]-seg[3])
B = float(seg[2]-seg[0])
C = A*seg[0]+B*seg[1]
return (A,B,C)
l1 = lineform(seg1)
l2 = lineform(seg2)
det = l1[0]*l2[1] - l2[0]*l1[1]
if det != 0:
x = (l2[1]*l1[2] - l1[1]*l2[2])/det
y = (l1[0]*l2[2] - l2[0]*l1[2])/det
if clip_to_seg:
if x >= min(seg1[0],seg1[2]) and x <= max(seg1[0],seg1[2]) and y >= min(seg1[1],seg1[3]) and y <= max(seg1[1],seg1[3]):
return (int(x),int(y))
else:
return (int(x),int(y))
return None
def cross_product_length( self, pA,pB,pC ):
""" compute the cross product of AB x BC """
BAx = float(pA[0] - pB[0])
BAy = float(pA[1] - pB[1])
BCx = float(pC[0] - pB[0])
BCy = float(pC[1] - pB[1])
return (BAx * BCy - BAy * BCx)
def is_convex( self, points ):
""" take a list of (x,y) tuples representing the vertecies of a polygon in order and return True if it represents a convex polygon, False otherwise """
got_negative = False
got_positive = False
num_points = len(points)
if num_points <= 3:
return True
min_x,min_y,max_x,max_y = self.get_bounds(points)
if max_x-min_x <= 1.0 or max_y-min_y <= 1.0:
return True
for A in range(num_points):
B = (A+1)%num_points
C = (B+1)%num_points
cross_product = self.cross_product_length(points[A],points[B],points[C])
if cross_product < 0:
got_negative = True
elif cross_product > 0:
got_positive = True
return not (got_negative and got_positive)
def get_bounds(self,points):
""" return tuple (min_x,min_y,max_x,max_y) for list of points """
min_x = -1
min_y = -1
max_x = -1
max_y = -1
for x,y in points:
if min_x < 0 or x < min_x:
min_x = x
if min_y < 0 or y < min_y:
min_y = y
if max_x < 0 or x > max_x:
max_x = x
if max_y < 0 or y > max_y:
max_y = y
return (min_x,min_y,max_x,max_y)
def clip_polygon(self, points, minX, minY, maxX, maxY, dir=-1 ):
""" clip a polygon against the bounds exressed by minX,minY to maxX,maxY and return either None for nothing inside or the points for the polygon dir is -1 all,0=top,1=right,2=bottom,3=left """
def inside( p, minX, minY, maxX, maxY, dir ):
x,y = p
if dir == 0:
return(y >= minY)
elif dir == 1:
return(x < maxX)
elif dir == 2:
return(y < maxY)
elif dir == 3:
return(x >= minX)
def intersect(sp, ep, minX, minY, maxX, maxY, dir ):
x0,y0 = sp
x1,y1 = ep
s1 = (x0,y0,x1,y1)
if dir == 0:
s2 = (minX,minY,maxX,minY)
elif dir == 1:
s2 = (maxX,minY,maxX,maxY)
elif dir == 2:
s2 = (minX,maxY,maxX,maxY)
elif dir == 3:
s2 = (minX,minY,minX,maxY)
return self.intersect(s1,s2,False)
if dir == -1:
for d in [0,1,2,3]:
points = self.clip_polygon(points,minX,minY,maxX,maxY,d)
if not points:
return None
return points
else:
sp = points[-1]
out_points = []
for ep in points:
if inside(ep,minX,minY,maxX,maxY,dir):
if inside(sp,minX,minY,maxX,maxY,dir):
out_points.append(ep)
else:
ip = intersect(sp,ep,minX,minY,maxX,maxY,dir)
out_points.append(ip)
out_points.append(ep)
else:
if inside(sp,minX,minY,maxX,maxY,dir):
ip = intersect(sp,ep,minX,minY,maxX,maxY,dir)
out_points.append(ip)
sp = ep
return out_points if out_points else None
def rasterize( self, points, color, put_pixel=None):
""" sort points representing the boundary of a filled shape and rasterize by filling lines with color """
ps = sorted(points,key=lambda x: (x[1],x[0]))
n_points = len(ps)
if n_points == 0:
return
elif n_points == 1:
x,y = ps[0]
if put_pixel:
put_pixel(x,y,color)
else:
self.put_pixel(x,y,color)
else:
idx = 1
x0,y0 = ps[0]
x1,y1 = x0,y0
while idx < len(ps):
xn,yn = ps[idx]
if yn == y0:
x1,y1 = xn,yn
else:
if x0 == x1:
if put_pixel:
put_pixel(x0,y0,color)
else:
self.put_pixel(x0,y0,color)
else:
self.line(x0,y0,x1,y1,color,put_pixel)
x0,y0 = xn,yn
x1,y1 = x0,y0
idx += 1
if x0 == x1:
if put_pixel:
put_pixel(x0,y0,color)
else:
self.put_pixel(x0,y0,color)
else:
self.line(x0,y0,x1,y1,color,put_pixel)
def circle(self, x0, y0, radius, color, fill = False, put_pixel=None ):
""" draw a circle centered at x0,y0 of radius radius in color """
points = []
def circle_point( points, xc,yc,x,y ):
points.extend([(xc+x, yc+y),(xc-x, yc+y),(xc+x, yc-y),(xc-x, yc-y),(xc+y, yc+x),(xc-y, yc+x),(xc+y, yc-x),(xc-y, yc-x)])
x0 = int(x0)
y0 = int(y0)
radius = int(radius)
x = 0
y = radius
d = 3 - 2 * radius
circle_point(points,x0,y0,x,y)
while y >= x:
x += 1
if d > 0:
y -= 1
d = d + 4 * (x - y) + 10
else:
d = d + 4 * x + 6
circle_point(points,x0,y0,x,y)
for idx in range(len(points)):
x,y = points[idx]
x = int(((x-x0)*1.5)+x0)
points[idx] = (x,y)
if not fill:
for x,y in points:
if put_pixel:
put_pixel(x,y,color)
else:
self.put_pixel(x,y,color)
else:
self.rasterize(points,color,put_pixel)
def arc(self,x0,y0,radius,a0,a1,color,fill=False,put_pixel=None,just_points=False):
""" draw an arc between a0 degrees to a1 degrees centered at x0,y0 with radius and color """
points = []
def circle_point( points, xc,yc,x,y ):
points.extend([(xc+x, yc+y),(xc-x, yc+y),(xc+x, yc-y),(xc-x, yc-y),(xc+y, yc+x),(xc-y, yc+x),(xc+y, yc-x),(xc-y, yc-x)])
x0 = int(x0)
y0 = int(y0)
radius = int(radius)
x = 0
y = radius
d = 3 - 2 * radius
circle_point(points,x0,y0,x,y)
while y >= x:
x += 1
if d > 0:
y -= 1
d = d + 4 * (x - y) + 10
else:
d = d + 4 * x + 6
circle_point(points,x0,y0,x,y)
xs,ys = angle_point(x0,y0,a0,radius+0.5)
xe,ye = angle_point(x0,y0,a1,radius+0.5)
xm,ym = angle_point(x0,y0,(a0+a1)/2,radius+0.5)
x_min = min(x0,xs,xe,xm)
y_min = min(y0,ys,ye,ym)
x_max = max(x0,xs,xe,xm)
y_max = max(y0,ys,ye,ym)
# for drawing the previous one was for bounding
xs,ys = angle_point(x0,y0,a0,radius)
xe,ye = angle_point(x0,y0,a1,radius)
filtered_points = []
for x,y in points:
px = int(((x-x0)*1.5)+x0)
if px >= x_min and px <= x_max and y >= y_min and y <= y_max:
angle = math.degrees(math.atan2(y-y0,x-x0))
if angle < 0:
angle += 360
if angle >= a0 and angle <= a1:
filtered_points.append((px,y))
points = filtered_points
if just_points:
for x,y in points:
put_pixel(x,y,color)
else:
if not fill:
self.line(x0,y0,xs,ys,color,put_pixel)
self.line(x0,y0,xe,ye,color,put_pixel)
for x,y in points:
if put_pixel:
put_pixel(x,y,color)
else:
self.put_pixel(x,y,color)
else:
def add_pixel( x,y,color ):
points.append((x,y))
self.line(x0,y0,xs,ys,color,add_pixel)
self.line(x0,y0,xe,ye,color,add_pixel)
self.rasterize(points,color,put_pixel)
def rect(self,x0,y0,x1,y1,color,fill=False,put_pixel=None):
""" draw a rectangle bounding x0,y0, x1,y1, in color == color optionally filling """
x0 = int(x0)
x1 = int(x1)
y0 = int(y0)
y1 = int(y1)
if not fill:
self.line(x0,y0,x0,y1,color)
self.line(x0,y1,x1,y1,color)
self.line(x1,y1,x1,y0,color)
self.line(x1,y0,x0,y0,color)
else:
if y1 < y0:
y=y0
y0=y1
y1 = y
for y in range(y0,y1):
self.line(x0,y,x1,y,color,put_pixel)
def textat(self,x,y,color,message):
""" draw a text message at a coordinate in the color specified """
x,y = self.round_text_position(x,y)
height, width = self.from_rowcol(1,len(message))
if x < 0 or x >self.max_x or y < 0 or y >self.max_y:
return
if y + height > self.max_y:
return
if x + height > self.max_x:
clip_height,clip_width = self.to_rowcol(1,(self.max_x-x))
if clip_width > 0:
message = message[:clip_width]
else:
return
row,col = self.to_rowcol(x,y)
self.win.addstr(row,col,message.encode('utf_8'),color)
def polyline(self,points,color,put_pixel=None):
""" draw a polyline defined by the sequence points which represent a list of (x,y) tuples in the order they should be connected in color """
n_points = len(points)
if n_points == 0:
return
elif n_points == 1:
x,y = points[0]
if put_pixel:
put_pixel(x,y,color)
else:
self.put_pixel(x,y,color)
else:
for idx in range(n_points-1):
x0,y0 = points[idx]
x1,y1 = points[idx+1]
self.line(x0,y0,x1,y1,color,put_pixel)
def poly_fill(self,points,color,put_pixel = None):
""" fill a concave polygon by recursively subdividing until we get a convex polygon """
clips = []
minX,minY,maxX,maxY = self.get_bounds(points)
minX = float(minX)
minY = float(minY)
maxX = float(maxX)
maxY = float(maxY)
midX = (minX+maxX)/2.0
midY = (minY+maxY)/2.0
clips.append((minX,minY,midX,midY))
clips.append((midX,minY,maxX,midY))
clips.append((midX,midY,maxX,maxY))
clips.append((minX,midY,midX,maxY))
while clips:
minX,minY,maxX,maxY = clips.pop(0)
if int(minX)==int(maxX) or int(minY)==int(maxY):
continue
p = self.clip_polygon(points,minX,minY,maxX,maxY)
if p:
if self.is_convex(p):
self.polygon(p,color,True,put_pixel)
else:
midX = (minX+maxX)/2.0
midY = (minY+maxY)/2.0
if midX - minX < 1.0 or midY - minY < 1.0 or maxX - midX < 1.0 or maxY - midY < 1.0:
continue
clips.append((minX,minY,midX,midY))
clips.append((midX,minY,maxX,midY))
clips.append((midX,midY,maxX,maxY))
clips.append((minX,midY,midX,maxY))
def polygon(self,points,color,fill=False,put_pixel=None):
""" draw a polygon defined by the sequence points which represent a list of (x,y) tuples in the order they should be connected in color
the last point will be connected to the first point. polygons can be filled. """
if not points:
return
convex = True
if fill:
convex = self.is_convex(points)
poly_pixels = []
def put_poly_pixel(x,y,color):
poly_pixels.append((x,y))
i = iter(points)
first = p1 = next(i,None)
while p1:
p2 = next(i,None)
if p2:
last = p2
self.line(p1[0],p1[1],p2[0],p2[1],color,put_poly_pixel)
else:
last = p1
put_poly_pixel(p1[0],p1[1],color)
p1 = p2
self.line(first[0],first[1],last[0],last[1],color,put_poly_pixel)
if not fill:
for x,y in poly_pixels:
if put_pixel:
put_pixel(x,y,color)
else:
self.put_pixel(x,y,color)
else:
if convex:
self.rasterize( poly_pixels, color, put_pixel)
else:
for x,y in poly_pixels:
if put_pixel:
put_pixel(x,y,color)
else:
self.put_pixel(x,y,color)
self.poly_fill(points,color,put_pixel)
|
import os
import numpy as np
import matplotlib.pyplot as plt
from . import helper_generic as hlp
from . import helper_site_response as sr
from . import helper_signal_processing as sig
from PySeismoSoil.class_frequency_spectrum import Frequency_Spectrum as FS
from PySeismoSoil.class_Vs_profile import Vs_Profile
class Ground_Motion:
"""
Class implementation of an earthquake ground motion.
Parameters
----------
data : str or numpy.ndarray
If str: the full file name on the hard drive containing the data.
If np.ndarray: the numpy array containing the motion data.
The motion data can be acceleration, velocity, or displacement.
The data can have one column (which contains the motion) or two
columns (1st column: time; 2nd column: motion). If only one column
is supplied, another input parameter ``dt`` must also be supplied.
unit : str
Valid values include:
['m', 'cm',
'm/s', 'cm/s',
'm/s/s', 'cm/s/s', 'gal', 'g']
motion_type : {'accel', 'veloc', 'displ'}
Specifying what type of motion "data" contains. It needs to be
consistent with "unit". For example, if motion_type is "accel" and
unit is "m/s", an exception will be raised.
dt : float
Recording time interval of the ground motion. If ``data`` has only one
column, this parameter must be supplied. If ``data`` has two columns,
this parameter is ignored.
sep : str
Delimiter character for reading the text file. If ``data`` is supplied as
a numpy array, this parameter is ignored.
**kwargs_to_genfromtxt :
Any extra keyword arguments will be passed to ``numpy.genfromtxt()``
function for loading the data from the hard drive (if applicable).
Attributes
----------
dt : float
Recording time interval of the motion.
time : numpy.ndarray
1D numpy array: the time points in seconds.
accel : numpy.ndarray
A numpy array of two columns, whose first column is identical to "time",
and second column is the acceleration in SI unit.
veloc : numpy.ndarray
A numpy array of two columns, whose first column is identical to "time",
and second column is the velocity in SI unit.
displ : numpy.ndarray
A numpy array of two columns, whose first column is identical to "time",
and second column is the displacement in SI unit.
pga, pgv, pgd : float
Peak ground acceleration, velocity, and displacement in SI unit.
pga_in_gal, pga_in_g, pgv_in_cm_s, pgd_in_cm : <float>
PGA, PGV, and PGD in other common units.
Arias_Intensity : numpy.ndarray
A numpy array of two columns, whose first column is identical to "time",
and second column is the Arias intensity.
Arias_Intensity_normalized : numpy.ndarray
A numpy array of two columns, whose first column is identical to "time",
and second column is the normalized Arias intensity.
peak_Arias_Intensity : float
The last element of the second column of Arias_Intensity.
T5_95 : float
The time interval (in seconds) between 5% of peak Arias intensity
to 95% of peak Arias intensity.
rms_accel, rms_veloc, rms_displ : float
Root-mean-square acceleration, velocity, and displacement of the motion.
_path_name, _file_name : str
Names of the directory and file of the input data, if a file name.
"""
def __init__(
self, data, *, unit, motion_type='accel', dt=None, sep='\t',
**kwargs_to_genfromtxt,
):
if isinstance(data, str): # a file name
self._path_name, self._file_name = os.path.split(data)
else:
self._path_name, self._file_name = None, None
data_, dt = hlp.read_two_column_stuff(data, delta=dt, sep=sep)
valid_unit_name = ['m', 'cm', 'm/s', 'cm/s', 'm/s/s', 'cm/s/s', 'gal', 'g']
if unit not in valid_unit_name:
if 's^2' in unit:
raise ValueError("Please use '/s/s' instead of 's^2' in `unit`.")
else:
raise ValueError(
"Invalid `unit` name. Valid names are: %s" % valid_unit_name
)
if motion_type not in ['accel', 'veloc', 'displ']:
raise ValueError("`motion_type` must be in {'accel', 'veloc', 'displ'}")
if (unit == 'g' or unit == 'gal') and motion_type != 'accel':
raise ValueError(
"If unit is 'g' or 'gal', then `motion_type` must be 'accel'."
)
if unit in ['cm', 'cm/s', 'cm/s/s', 'gal']:
data_[:, 1] = data_[:, 1] / 100.0 # cm --> m
elif unit == 'g':
data_[:, 1] = data_[:, 1] * 9.81 # g --> m/s/s
self.dt = float(dt) # float; unit: sec
self.npts = len(data_[:, 0]) # int; how many time points
self.time = np.linspace(0, self.dt*(self.npts-1), num=self.npts)
if motion_type == 'accel':
self.accel = data_ # numpy array, with length unit 'm'
self.veloc, self.displ = sr.num_int(self.accel)
elif motion_type == 'veloc':
self.accel = sr.num_diff(data_)
self.veloc = data_
self.displ = sr.num_int(data_)[0]
else: # displ
self.veloc = sr.num_diff(data_)
self.accel = sr.num_diff(self.veloc)
self.displ = data_
self.pga = float(np.max(np.abs(self.accel[:, 1])))
self.pgv = float(np.max(np.abs(self.veloc[:, 1])))
self.pgd = float(np.max(np.abs(self.displ[:, 1])))
self.pga_in_gal = self.pga * 100.0
self.pga_in_g = self.pga / 9.81
self.pgv_in_cm_s = self.pgv * 100.0
self.pgd_in_cm = self.pgd * 100.0
arias_result = self.__calc_Arias()
self.Arias_Intensity = arias_result[0]
self.Arias_Intensity_normalized = arias_result[1]
self.peak_Arias_Intensity = arias_result[2]
self.T5_95 = arias_result[3]
self.rms_accel, self.rms_veloc, self.rms_displ = self.__calc_RMS()
def __repr__(self):
"""
Basic information of a ground motion.
"""
text = 'n_pts=%d, dt=%.4gs, PGA=%.3gg=%.3ggal, PGV=%.3gcm/s, PGD=%.3gcm, T5_95=%.3gs'\
% (self.npts, self.dt, self.pga_in_g, self.pga_in_gal,
self.pgv_in_cm_s, self.pgd_in_cm, self.T5_95)
return text
def summary(self):
"""
Show a brief summary of the ground motion.
"""
print(self)
self.plot()
def get_Fourier_spectrum(
self, real_val=True, double_sided=False, show_fig=False,
):
"""
Get Fourier spectrum of the ground motion.
Parameters
----------
real_val : bool
Whether to return the amplitude (or "magnitude") of the complex
numbers.
double_sided : bool
Whether to return the second half of the spectrum (i.e. beyond the
Nyquist frequency).
show_fig : bool
Whether to show figures of the spectrum.
Return
------
fs : PySeismoSoil.class_frequency_spectrym.Frequency_Spectrum
A frequency spectrum object.
"""
x = sig.fourier_transform(
self.accel, real_val=real_val, double_sided=double_sided, show_fig=show_fig,
)
fs = FS(x)
return fs
def get_response_spectra(
self, T_min=0.01, T_max=10, n_pts=60, damping=0.05, show_fig=True,
parallel=False, n_cores=None, subsample_interval=1,
):
"""
Get elastic response spectra of the ground motion, using the "exact"
solution to the equation of motion (Section 5.2, Dynamics of Structures,
Second Edition, by <NAME>).
Parameters
----------
T_min : float
Minimum period value to calculate the response spectra. Unit: sec.
T_max : float
Maximum period value to calculate the response spectra. Unit: sec.
n_pts : int
Number of points you want for the response spectra. A high number
increases computation time.
damping : float
Damping of the dash pots. Do not use "percent" as unit. Unit: 1
(i.e., not percent).
show_fig : bool
Whether to show a figure of the response spectra.
parallel : bool
Whether to perform the calculation in parallel.
n_cores : int or ``None``
Number of cores to use in parallel. Not necessary if not ``parallel``.
subsample_interval : int
The interval at which to subsample the input acceleration in the
time domain. A higher number reduces computation time, but could
lead to less accurate results.
Returns
-------
(Tn, SA, PSA, SV, PSV, SD, fn) : tuple of 1D numpy.ndarray
Periods, spectral acceleration, pseudo spectral acceleration,
spectral velocity, pseudo spectral velocity, spectral displacement,
and frequencies, respectively. Units: SI.
"""
rs = sr.response_spectra(
self.accel, damping=damping, T_min=T_min,
T_max=T_max, n_pts=n_pts, show_fig=show_fig,
parallel=parallel, n_cores=n_cores,
subsample_interval=subsample_interval,
)
return rs
def plot(self, show_as_unit='m', fig=None, ax=None, figsize=(5,6), dpi=100):
"""
Plots acceleration, velocity, and displacement waveforms together.
Parameters
----------
show_as_unit : str
What unit to convert the ground motion into, when plotting.
fig : matplotlib.figure.Figure or ``None``
Figure object. If None, a new figure will be created.
ax : matplotlib.axes._subplots.AxesSubplot or ``None``
Axes object. If None, a new axes will be created.
figsize: (float, float)
Figure size in inches, as a tuple of two numbers. The figure
size of ``fig`` (if not ``None``) will override this parameter.
dpi : float
Figure resolution. The dpi of ``fig`` (if not ``None``) will override
this parameter.
Returns
-------
fig : matplotlib.figure.Figure
The figure object being created or being passed into this function.
ax : matplotlib.axes._subplots.AxesSubplot
The axes object being created or being passed into this function.
"""
if self._file_name:
title = self._file_name
else:
title = ''
if show_as_unit == 'm':
accel_ = self.accel
elif show_as_unit == 'cm':
accel_ = self._unit_convert(unit='cm/s/s')
elif show_as_unit == 'g':
accel_ = self._unit_convert(unit='g')
else:
raise ValueError("`show_as_unit` can only be 'm', 'cm', or 'g'.")
fig, ax = sr.plot_motion(
accel_, unit=show_as_unit, title=title,
fig=fig, ax=ax, figsize=figsize, dpi=dpi,
)
return fig, ax
def _unit_convert(self, unit='m/s/s'):
"""
Convert the unit of acceleration. "In-place" conversion is not allowed,
because ground motions are always stored in SI units internally.
Parameters
----------
unit : {'m/s/s', 'cm/s/s', 'gal', 'g'}
What unit to convert the acceleration into.
Returns
-------
accel : numpy.ndarray
Acceleration time history with the desired unit. It is a 2D numpy
array wity two columns (time and acceleration).
"""
accel = self.accel.copy()
if unit == 'm/s/s':
pass
elif unit in ['cm/s/s', 'gal']:
accel[:, 1] *= 100 # m/s/s --> cm/s/s
elif unit == 'g':
accel[:, 1] /= 9.81 # m/s/s --> g
else:
raise ValueError('Unrecognized `unit`. Must be an acceleration unit.')
return accel
def __calc_RMS(self):
"""
Private method.
Returns RMS acceleration, velocity, and displacement. Unit: SI.
"""
acc = self.accel
vel, dis = sr.num_int(acc)
rms_accel = np.sqrt(np.mean(acc[:, 1] ** 2.0))
rms_veloc = np.sqrt(np.mean(vel[:, 1] ** 2.0))
rms_displ = np.sqrt(np.mean(dis[:, 1] ** 2.0))
return rms_accel, rms_veloc, rms_displ
def __arias_time_bounds(self, t, Ia_normalized, low_lim, high_lim):
"""
Private method.
Calculate lower and upper time bounds corresponding to two given
normalized Arias intensity percentages (e.g., [0.05, 0.95])
"""
if low_lim >= high_lim:
raise ValueError('low_lim must be smaller than high_lim.')
if t is None:
t = self.accel[:, 0]
if Ia_normalized is None:
Ia_normalized = self.Arias_Intensity_normalized[:, 1]
if len(t) != len(Ia_normalized):
raise ValueError('Ia_normalized and t must have the same length.')
n = len(t)
t_low = 0.0 # initialize this variable, in case low_lim <= 0 seconds
t_high = t[-1] # initialize t_high, in case high_lim >= max(time)
prev = Ia_normalized[0]
for i in range(n):
if Ia_normalized[i] >= low_lim and prev < low_lim:
t_low = t[i]
if Ia_normalized[i] >= high_lim and prev < high_lim:
t_high = t[i]
prev = Ia_normalized[i]
return t_low, t_high
def __calc_Arias(self, motion='accel', show_fig=False):
"""
Private method.
Calculate Arias intensity. Returns the intensity time series, peak
intensity, and T5_95 (time interval from 5% Arias intensity to 95%
Arias intensity).
"""
g = 9.81
if motion == 'accel':
t = self.accel[:, 0]
a = self.accel[:, 1]
elif motion == 'veloc':
t = self.veloc[:, 0]
a = self.veloc[:, 1]
elif motion == 'displ':
t = self.displ[:, 0]
a = self.displ[:, 1]
n = len(a)
dt = t[1] - t[0]
Ia_1col = np.zeros(n)
a_sq = a ** 2.0
for i in range(1,n):
Ia_1col[i] = Ia_1col[i - 1] + np.pi / (2 * g) * a_sq[i - 1] * dt
Ia_peak = float(Ia_1col[-1])
Ia = np.column_stack((t,Ia_1col))
Ia_norm_1col = Ia_1col / Ia_peak # normalized
Ia_norm = np.column_stack((t,Ia_norm_1col))
t_low, t_high = self.__arias_time_bounds(t, Ia_norm_1col, 0.05, 0.95)
T5_95 = t_high - t_low
if show_fig:
plt.figure()
ax = plt.axes()
ax.plot(t, Ia)
ax.grid(ls=':')
ax.set_xlabel('Time [sec]')
ax.set_ylabel('Arias intensity')
y_low, y_high = ax.get_ylim()
plt.plot([t_low, t_low], [y_low, y_high], lw=0.75, ls='--', c='r')
plt.plot([t_high, t_high], [y_low, y_high], lw=0.75, ls='--', c='r')
return Ia, Ia_norm, Ia_peak, T5_95
def scale_motion(self, factor=1.0, target_PGA_in_g=None):
"""
Scale ground motion, either by specifying a factor, or specifying a
target PGA level.
Parameters
----------
factor : float
The factor to multiply to the original acceleration (with the
unit of m/s/s)
target_PGA_in_g : float
The target PGA (in g). If ``target_PGA_in_g`` is not None, it
overrides ``factor``.
Returns
-------
scaled_motion : Ground_Motion
The scaled motion
"""
if target_PGA_in_g != None:
factor = target_PGA_in_g / self.pga_in_g
else: # factor != None, and target_PGA_in_g is None
pass
time = self.accel[:, 0]
acc = self.accel[:, 1]
acc_scaled = acc * factor
return Ground_Motion(np.column_stack((time, acc_scaled)), unit='m')
def truncate(self, limit, arias=True, extend=[0, 0], show_fig=False):
"""
Truncate ground motion, removing data points in the head and/or tail.
Parameters
----------
limit : (float, float) or [float, float]
The lower/upper bounds of time (e.g., [2, 95]) or normalized Arias
intensity (e.g., [0.05, 0.95]).
arias : bool
If ``True``, ``limit`` means the normalized Arias intensity.
Otherwise, ``limit`` means the actual time.
extend : tuple or list of two floats
How many seconds to extend before and after the original truncated
time limits. For example, if extend is [5, 5] sec, and the original
time limits are [3, 50] sec, then the actual time limits are
[0, 55] sec. (3 - 5 = -2 smaller than 0, so truncated at 0.)
show_fig : bool
Whether or not to show the waveforms before and after truncation.
Returns
-------
truncated_accel : Ground_Motion
Truncated ground motion.
fig : matplotlib.figure.Figure
The figure object being created or being passed into this function.
ax : matplotlib.axes._subplots.AxesSubplot
The axes object being created or being passed into this function.
(n1, n2) : tuple<int>
The indices at which signal is truncated. In other words,
truncated_accel = original_accel[n1 : n2].
"""
if not isinstance(limit, (tuple, list)):
raise TypeError('"limit" must be a list/tuple of two elements.')
if len(limit) != 2:
raise ValueError('Length of "limit" must be 2.')
if not isinstance(extend, (tuple, list)):
raise TypeError('"extend" must be a list/tuple of two elements.')
if len(extend) != 2:
raise ValueError('Length of "extend" must be 2.')
if extend[0] < 0 or extend[1] < 0:
raise ValueError('extend should be non negative.')
lim1, lim2 = limit
if lim1 >= lim2:
raise ValueError('"limit" must be in ascending order.')
if not arias: # "limit" represents actual time limits
t1, t2 = lim1, lim2
else: # "limit" represents bounds of normalized Arias instensity
t1, t2 = self.__arias_time_bounds(None, None, lim1, lim2)
t1 -= extend[0]
t2 += extend[1]
n1 = int(t1 / self.dt)
n2 = int(t2 / self.dt)
if n1 < 0: n1 = 0
if n2 > self.npts: n2 = self.npts
time_trunc = self.accel[:n2-n1, 0]
accel_trunc = self.accel[n1:n2, 1]
truncated = np.column_stack((time_trunc, accel_trunc))
if show_fig:
ax = [None] * 3
fig = plt.figure(figsize=(5,6))
fig.subplots_adjust(left=0.2)
ax[0] = fig.add_subplot(3,1,1)
ax[0].plot(self.time, self.accel[:,1], 'gray', lw=1.75, label='original')
ax[0].plot(self.time[n1:n2], truncated[:,1], 'm', lw=1., label='truncated')
ax[0].grid(ls=':')
ax[0].set_ylabel('Accel. [m/s/s]')
ax[0].legend(loc='best')
ax[1] = fig.add_subplot(3,1,2)
ax[1].plot(self.time, self.veloc[:,1], 'gray', lw=1.75)
ax[1].plot(self.time[n1:n2], sr.num_int(truncated)[0][:,1], 'm', lw=1.)
ax[1].grid(ls=':')
ax[1].set_ylabel('Veloc. [m/s]')
ax[2] = fig.add_subplot(3,1,3)
ax[2].plot(self.time, self.displ[:,1], 'gray', lw=1.75)
ax[2].plot(self.time[n1:n2], sr.num_int(truncated)[1][:,1], 'm', lw=1.)
ax[2].grid(ls=':')
ax[2].set_ylabel('Displ. [m]')
ax[2].set_xlabel('Time [sec]')
fig.tight_layout(pad=0.3)
else:
fig, ax = None, None
return Ground_Motion(truncated, unit='m'), fig, ax, (n1, n2)
def amplify_by_tf(
self, transfer_function, taper=False, extrap_tf=True,
deconv=False, show_fig=False, dpi=100, return_fig_obj=False,
):
"""
Amplify (or de-amplify) ground motions in the frequency domain. The
mathematical process behind this function is as follows:
(1) INPUT = fft(input)
(2) OUTPUT = INPUT * TRANS_FUNC
(3) output = ifft(OUTPUT)
Parameters
----------
transfer_function : PySeismoSoil.class_frequency_spectrum.Frequency_Spectrum
The transfer function to apply to the ground motion. It only needs
to be "single-sided" (see notes below).
taper : bool
Whether to taper the input acceleration (using Tukey taper)
extrap_tf : bool
Whether to extrapolate the transfer function if its frequency range
does not reach the frequency range implied by the input motion
deconv : bool
If ``False``, a regular amplification is performed; otherwise, the
transfer function is "deducted" from the input motion ("deconvolution").
show_fig : bool
Whether or not to show an illustration of how the calculation is
carried out.
dpi : int
Desired DPI for the figures; only effective when ``show_fig`` is
``True``.
return_fig_obj : bool
Whether or not to return figure and axis objects to the caller.
Returns
-------
output_motion : Ground_Motion
The resultant ground motion in time domain
fig : matplotlib.figure.Figure, *optional*
The figure object being created or being passed into this function.
ax : matplotlib.axes._subplots.AxesSubplot, *optional*
The axes object being created or being passed into this function.
Notes
-----
"Single sided":
For example, the sampling time interval of ``input_motion`` is 0.01
sec, then the Nyquist frequency is 50 Hz. Therefore, the transfer
function needs to contain information at least up to the Nyquist
frequency, i.e., at least 0-50 Hz, and anything above 50 Hz will
not affect the input motion at all.
"""
if not isinstance(transfer_function, FS):
raise TypeError(
'`transfer_function` needs to be of type '
'`Frequency_Spectrum` (or its subclass).'
)
freq = transfer_function.freq
tf_1col = transfer_function.spectrum
transfer_function_single_sided = (freq, tf_1col)
result = sr.amplify_motion(
self.accel,
transfer_function_single_sided,
taper=taper,
extrap_tf=extrap_tf,
deconv=deconv,
show_fig=show_fig,
dpi=dpi,
return_fig_obj=return_fig_obj,
)
if return_fig_obj:
output_accel, fig, ax = result
return Ground_Motion(output_accel, unit='m'), fig, ax
else:
output_accel = result
return Ground_Motion(output_accel, unit='m')
def amplify(self, soil_profile, boundary='elastic', show_fig=False):
"""
Amplify the ground motion via a 1D soil profile, using linear site
amplification method.
Parameters
----------
soil_profile : PySeismoSoil.class_Vs_profile.Vs_Profile
The soil profile through which to deconvolve the gound motion.
boundary : {'elastic', 'rigid'}
The type of boundary of the bottom of the soil profile.
show_fig : bool
Whether or not to show a figure that illustrates the deconvolution
process.
Returns
-------
output_motion : Ground_Motion
The amplified ground motion.
"""
if not isinstance(soil_profile, Vs_Profile):
raise TypeError('`soil_profile` must be of type `Vs_Profile`.')
vs_profile = soil_profile.vs_profile
surface_motion = self.accel # note: unit is SI
response = sr.linear_site_resp(
vs_profile, surface_motion, deconv=False,
boundary=boundary, show_fig=show_fig,
)[0]
output_motion = Ground_Motion(response, unit='m')
return output_motion
def compare(
self, another_ground_motion, this_ground_motion_as_input=True,
smooth=True, input_accel_label='Input', output_accel_label='Output',
):
"""
Compare with another ground motion: plot comparison figures showing
two time histories and the transfer function between them.
Parameters
----------
another_ground_motion : Ground_Motion
Another ground motion object.
this_ground_motion_as_input : bool
If ``True``, this ground motion is treated as the input ground
motion. Otherwise, the other ground motion is treated as the input.
smooth : bool
In the comparison plot, whether or not to also show the smoothed
amplification factor.
input_accel_label : str
The text label for the input acceleration in the figure legend.
output_accel_label : str
The text label for the output acceleration in the figure legend.
Returns
-------
fig : matplotlib.figure.Figure
The figure object created in this function.
ax : matplotlib.axes._subplots.AxesSubplot
The axes object created in this function.
"""
if not isinstance(another_ground_motion, Ground_Motion):
raise TypeError('`another_ground_motion` must be a `Ground_Motion`.')
# END IF
if this_ground_motion_as_input:
accel_in = self.accel
accel_out = another_ground_motion.accel
else:
accel_in = another_ground_motion.accel
accel_out = self.accel
# END IF-ELSE
amp_ylabel = f'Amplification\n({input_accel_label} ➡ {output_accel_label})'
phs_ylabel = f'Phase shift [rad]\n({input_accel_label} ➡ {output_accel_label})'
fig, ax = sr.compare_two_accel(
accel_in,
accel_out,
smooth=smooth,
input_accel_label=input_accel_label,
output_accel_label=output_accel_label,
amplification_ylabel=amp_ylabel,
phase_shift_ylabel=phs_ylabel,
)
return fig, ax
def deconvolve(self, soil_profile, boundary='elastic', show_fig=False):
"""
Deconvolve the ground motion, i.e., propagate the motion downwards to
get the borehole motion (rigid boundary) or the "rock outcrop" motion
(elastic boundary).
Parameters
----------
soil_profile : PySeismoSoil.class_Vs_profile.Vs_Profile
The soil profile through which to deconvolve the gound motion.
boundary : {'elastic', 'rigid'}
The type of boundary of the bottom of the soil profile.
show_fig : bool
Whether or not to show a figure that illustrates the deconvolution
process.
Returns
-------
deconv_motion : Ground_Motion
The deconvolved motion on the rock outcrop or in a borehole.
"""
if not isinstance(soil_profile, Vs_Profile):
raise TypeError('`soil_profile` must be of type `Vs_Profile`.')
vs_profile = soil_profile.vs_profile
surface_motion = self.accel # note: unit is SI
response = sr.linear_site_resp(
vs_profile, surface_motion, deconv=True,
boundary=boundary, show_fig=show_fig,
)[0]
deconv_motion = Ground_Motion(response, unit='m')
return deconv_motion
def baseline_correct(self, cutoff_freq=0.20, show_fig=False):
"""
Baseline-correct the acceleration (via zero-phase-shift high-pass
method).
Parameters
----------
cutoff_freq : float
The frequency (unit: Hz) for high passing. Energies below this
frequency are filtered out.
show_fig : bool
Whether or not to show figures comparing before and after.
Returns
-------
corrected : Ground_Motion
The baseline-corrected ground motion, with SI units.
"""
accel_ = sig.baseline(self.accel, show_fig=show_fig, cutoff_freq=cutoff_freq)
return Ground_Motion(accel_, unit='m')
def lowpass(self, cutoff_freq, show_fig=False, filter_order=4, padlen=150):
"""
Zero-phase-shift low-pass filtering.
Parameters
----------
cutoff_freq : float
Cut-off frequency (unit: Hz).
filter_order : int
Filter order.
padlen : int
Pad length (the number of elements by which to extend x at both ends
of axis before applying the filter). If None, use the default value
(https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.filtfilt.html).
Returns
-------
filtered : Ground_Motion
Filtered signal.
"""
accel_ = sig.lowpass(
self.accel, cutoff_freq, show_fig=show_fig,
filter_order=filter_order, padlen=padlen,
)
return Ground_Motion(accel_, unit='m')
def highpass(self, cutoff_freq, show_fig=False, filter_order=4, padlen=150):
"""
Zero-phase-shift high-pass filtering.
Pameters
--------
cutoff_freq : float
Cut-off frequency (unit: Hz).
filter_order : int
Filter order.
padlen : int
Pad length (the number of elements by which to extend x at both ends
of axis before applying the filter). If None, use the default value
(https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.filtfilt.html).
Returns
-------
filtered : Ground_Motion
Filtered signal.
"""
accel_ = sig.highpass(
self.accel, cutoff_freq, show_fig=show_fig,
filter_order=filter_order, padlen=padlen,
)
return Ground_Motion(accel_, unit='m')
def bandpass(self, cutoff_freq, show_fig=False, filter_order=4, padlen=150):
"""
Zero-phase-shift band-pass filtering.
Pameters
--------
cutoff_freq : [float, float]
Cut-off frequencies (in Hz), from low to high.
filter_order : int
Filter order.
padlen : int
Pad length (the number of elements by which to extend x at both ends
of axis before applying the filter). If None, use the default value
(https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.filtfilt.html).
Returns
-------
filtered : Ground_Motion
Filtered signal
"""
accel_ = sig.bandpass(
self.accel, cutoff_freq, show_fig=show_fig,
filter_order=filter_order, padlen=padlen,
)
return Ground_Motion(accel_, unit='m')
def bandstop(self, cutoff_freq, show_fig=False, filter_order=4, padlen=150):
"""
Zero-phase-shift band-stop filtering.
Pameters
--------
cutoff_freq : [float, float]
Cut-off frequencies (in Hz), from low to high.
filter_order : int
Filter order.
padlen : int
padlen : int
Pad length (the number of elements by which to extend x at both ends
of axis before applying the filter). If None, use the default value
(https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.filtfilt.html).
Returns
-------
filtered : Ground_Motion
Filtered signal
"""
accel_ = sig.bandstop(
self.accel, cutoff_freq, show_fig=show_fig,
filter_order=filter_order, padlen=padlen,
)
return Ground_Motion(accel_, unit='m')
def save_accel(
self, fname, sep='\t', t_prec='%.5g', motion_prec='%.5g', unit='m/s/s',
):
"""
Saves the acceleration as a text file.
Parameters
----------
fname : str
File name (including path).
sep : str
Delimiter.
t_prec : str
The precision specifier for the "time" column.
motion_prec : str
The precision specifier for the "motion" column.
unit : str
What unit shall the exported acceleration be in.
"""
fmt = [t_prec, motion_prec]
data = self.accel
if unit == 'm/s/s':
pass
elif unit == 'g':
data[:,1] = data[:,1] / 9.81
elif unit in ['gal', 'cm/s/s']:
data[:,1] = data[:,1] * 100.0
np.savetxt(fname, data, fmt=fmt, delimiter=sep)
|
'''
compare MC and TD using the blackjack exmaple
state space: Tuple(Discrete(32), Discrete(11), Discrete(2))
player's current sum {0, ... , 31}
dealer's face up {1, ... , 10}, aces can either count as 11 or 1
whether the player has usebale ace {0, 1}
action space: Discrete(2)
stick = 0, hit = 1
Detail of this environment: https://github.com/openai/gym/blob/master/gym/envs/toy_text/blackjack.py
'''
import sys
import gym
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from src.algorithm.mc import mc_prediction_v, mc_prediction_q, mc_control
from src.algorithm.td0 import td0_prediction_v, td0_prediction_q
from src.algorithm.td_lambda import forward_td_prediction_v, backward_td_prediction_v
from src.utils import plot_policy
def demo_with_random_policy (env):
for i_episode in range(5):
print('Game ', i_episode)
state = env.reset()
while True:
print("state: ", state)
action = env.action_space.sample()
print("action: ", "stick" if action == 0 else "hit")
state, reward, done, _ = env.step(action)
if done:
print("last state: ", state)
print('End game! Reward: ', reward)
print('You won :)\n') if reward > 0 else print('You lost :(\n')
break
# generate episode using naive fixed policy
# default: if sum exceeds 18, "stick" 80% "hit" 20%, vice versa
def generate_episode_naive_policy(env):
episode = []
state = env.reset()
while True:
probs = [0.8, 0.2] if state[0] > 18 else [0.2, 0.8]
action = np.random.choice(np.arange(2), p=probs)
next_state, reward, done, _ = env.step(action)
episode.append((state, action, reward))
if done:
break
state = next_state
return episode
def plot_blackjack_values(V):
def get_Z(x, y, usable_ace):
if (x,y,usable_ace) in V:
return V[x,y,usable_ace]
else:
return 0
def get_figure(usable_ace, ax):
x_range = np.arange(11, 22)
y_range = np.arange(1, 11)
X, Y = np.meshgrid(x_range, y_range)
Z = np.array([get_Z(x,y,usable_ace) for x,y in zip(np.ravel(X), np.ravel(Y))]).reshape(X.shape)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=plt.cm.coolwarm, vmin=-1.0, vmax=1.0)
ax.set_xlabel('Player\'s Current Sum')
ax.set_ylabel('Dealer\'s Showing Card')
ax.set_zlabel('State Value')
ax.view_init(ax.elev, -120)
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(211, projection='3d')
ax.set_title('Usable Ace')
get_figure(True, ax)
ax = fig.add_subplot(212, projection='3d')
ax.set_title('No Usable Ace')
get_figure(False, ax)
plt.show()
if __name__ == "__main__":
env = gym.make('Blackjack-v0')
# demo how the interface works using a random policy
demo_with_random_policy(env)
# MC prediction given a fixed policy
V = mc_prediction_v(env, 500000, generate_episode_naive_policy)
plot_blackjack_values(V)
# TD(0) prediction given the same fixed policy
V = td0_prediction_v(env, 500000, generate_episode_naive_policy, alpha=0.1)
plot_blackjack_values(V)
# Forward View TD(lambda) prediction given the same fixed policy
V = forward_td_prediction_v(env, 500000, generate_episode_naive_policy, alpha=0.1, lambd=0.9)
plot_blackjack_values(V)
# Backward View TD(lambda) prediction given the same fixed policy
V = backward_td_prediction_v(env, 500000, generate_episode_naive_policy, alpha=0.1, lambd=0.9)
plot_blackjack_values(V)
# GLIE MC control
policy, Q = mc_control(env, 600000, alpha=0.01)
V = dict((k,np.max(v)) for k, v in Q.items())
plot_blackjack_values(V)
plot_policy(policy)
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from restclients_core import models
class Event(models.Model):
DRAFT_STATE = "0"
TENTATIVE_STATE = "1"
CONFIRMED_STATE = "2"
SEALED_STATE = "3"
DENIED_STATE = "4"
CANCELLED_STATE = "99"
STATE_CHOICES = (
(DRAFT_STATE, "Draft"),
(TENTATIVE_STATE, "Tentative"),
(CONFIRMED_STATE, "Confirmed"),
(SEALED_STATE, "Sealed"),
(DENIED_STATE, "Denied"),
(CANCELLED_STATE, "Cancelled"),
)
event_id = models.IntegerField()
alien_uid = models.CharField(max_length=100, null=True)
name = models.CharField(max_length=100)
title = models.CharField(max_length=100)
start_date = models.DateField()
end_date = models.DateField()
state = models.CharField(max_length=2, choices=STATE_CHOICES)
parent_id = models.IntegerField(null=True)
cabinet_id = models.IntegerField(null=True)
cabinet_name = models.CharField(max_length=100, null=True)
def state_name(self):
return dict(self.STATE_CHOICES)[self.state]
def parent(self):
if not hasattr(self, "_parent"):
self._parent = None
if self.parent_id is not None:
from uw_r25.events import get_event_by_id
self._parent = get_event_by_id(self.parent_id)
return self._parent
def children(self):
if not hasattr(self, "_children"):
from uw_r25.events import get_events
self._children = get_events(parent_id=self.event_id)
return self._children
def cabinet(self):
if self.cabinet_id is not None:
if self.cabinet_id == self.event_id:
return self
else:
from uw_r25.events import get_event_by_id
return get_event_by_id(self.cabinet_id)
class Meta:
db_table = "restclients_r25_event"
class Space(models.Model):
space_id = models.IntegerField()
name = models.CharField(max_length=100)
formal_name = models.CharField(max_length=200)
class Meta:
db_table = "restclients_r25_space"
class Reservation(models.Model):
STANDARD_STATE = "1"
EXCEPTION_STATE = "2"
WARNING_STATE = "3"
OVERRIDE_STATE = "4"
CANCELLED_STATE = "99"
STATE_CHOICES = (
(STANDARD_STATE, "Standard"),
(EXCEPTION_STATE, "Exception"),
(WARNING_STATE, "Warning"),
(OVERRIDE_STATE, "Override"),
(CANCELLED_STATE, "Cancelled"),
)
reservation_id = models.IntegerField()
state = models.CharField(max_length=2, choices=STATE_CHOICES)
start_datetime = models.DateTimeField()
end_datetime = models.DateTimeField()
event_id = models.IntegerField()
event_name = models.CharField(max_length=64)
profile_name = models.CharField(max_length=32)
contact_name = models.CharField(max_length=64)
contact_email = models.CharField(max_length=64)
def state_name(self):
return dict(self.STATE_CHOICES)[self.state]
class Meta:
db_table = "restclients_r25_reservation"
class BindingReservation(models.Model):
bound_reservation_id = models.IntegerField()
primary_reservation = models.IntegerField()
name = models.CharField(max_length=200)
bound_event_id = models.IntegerField()
class Meta:
db_table = "restclients_r25_binding_reservation"
|
<reponame>piotrwinkler/breast_density_classifier<gh_stars>0
import argparse
import glob
import os
import numpy as np
import torch
from sklearn.metrics import accuracy_score
import models_torch as models
import utils
EXPERIMENT_DATA_DIR = "/tmp/mgr"
def inference(parameters, verbose=True) -> int:
# resolve device
device = torch.device(
"cuda:{}".format(parameters["gpu_number"]) if parameters["device_type"] == "gpu"
else "cpu"
)
# load input images
datum_l_cc = utils.load_images(parameters['image_path'], 'L-CC')
datum_r_cc = utils.load_images(parameters['image_path'], 'R-CC')
datum_l_mlo = utils.load_images(parameters['image_path'], 'L-MLO')
datum_r_mlo = utils.load_images(parameters['image_path'], 'R-MLO')
# construct models and prepare data
if parameters["model_type"] == 'cnn':
model = models.BaselineBreastModel(device, nodropout_probability=1.0, gaussian_noise_std=0.0).to(device)
model.load_state_dict(torch.load(parameters["model_path"]))
x = {
"L-CC": torch.Tensor(datum_l_cc).permute(0, 3, 1, 2).to(device),
"L-MLO": torch.Tensor(datum_l_mlo).permute(0, 3, 1, 2).to(device),
"R-CC": torch.Tensor(datum_r_cc).permute(0, 3, 1, 2).to(device),
"R-MLO": torch.Tensor(datum_r_mlo).permute(0, 3, 1, 2).to(device),
}
elif parameters["model_type"] == 'histogram':
model = models.BaselineHistogramModel(num_bins=parameters["bins_histogram"]).to(device)
model.load_state_dict(torch.load(parameters["model_path"]))
x = torch.Tensor(utils.histogram_features_generator([
datum_l_cc, datum_r_cc, datum_l_mlo, datum_r_mlo
], parameters)).to(device)
else:
raise RuntimeError(parameters["model_type"])
# run prediction
with torch.no_grad():
prediction_density = model(x).cpu().numpy()
if verbose:
# nicely prints out the predictions
print('Density prediction:\n'
'\tAlmost entirely fatty (0):\t\t\t' + str(prediction_density[0, 0]) + '\n'
'\tScattered areas of fibroglandular density (1):\t' + str(prediction_density[0, 1]) + '\n'
'\tHeterogeneously dense (2):\t\t\t' + str(prediction_density[0, 2]) + '\n'
'\tExtremely dense (3):\t\t\t\t' + str(prediction_density[0, 3]) + '\n')
return np.argmax(prediction_density[0])+1 # return density in scope 1 to 4
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run Inference')
parser.add_argument('model_type')
parser.add_argument('--bins-histogram', default=50)
parser.add_argument('--model-path', default=None)
parser.add_argument('--device-type', default="cpu")
# parser.add_argument('--image-path', default="images/")
args = parser.parse_args()
parameters_ = {
"model_type": args.model_type,
"bins_histogram": args.bins_histogram,
"model_path": args.model_path,
"device_type": args.device_type,
# "image_path": args.image_path,
}
if parameters_["model_path"] is None:
if args.model_type == "histogram":
parameters_["model_path"] = "saved_models/BreastDensity_BaselineHistogramModel/model.p"
if args.model_type == "cnn":
parameters_["model_path"] = "saved_models/BreastDensity_BaselineBreastModel/model.p"
predicted_values = []
real_values = []
predicted_values_two_classes = []
real_values_two_classes = []
two_classes_mapping = {1: 0, 2: 0, 3: 1, 4: 1}
for dir in glob.glob(f"{EXPERIMENT_DATA_DIR}/*/"):
parameters_["image_path"] = dir
predicted_density = inference(parameters_)
with open(os.path.join(dir, "density.txt")) as file:
real_density = int(file.read())
print(f"Predicted density: {predicted_density}")
print(f"Real density: {real_density}\n")
print(f"Predicted density (2 cls): {two_classes_mapping[predicted_density]}")
print(f"Real density (2 cls): {two_classes_mapping[real_density]}\n")
predicted_values.append(predicted_density)
real_values.append(real_density)
predicted_values_two_classes.append(two_classes_mapping[predicted_density])
real_values_two_classes.append(two_classes_mapping[real_density])
print(f"Total accuracy: {accuracy_score(real_values, predicted_values)}")
print(f"Total accuracy two classes: {accuracy_score(real_values_two_classes, predicted_values_two_classes)}")
"""
python density_model_torch_custom.py histogram
python density_model_torch_custom.py cnn
"""
|
import click
import configparser
import glob
import logging
import os
import sched
import MySQLdb
from jog import JogFormatter
from prometheus_client import start_http_server
from prometheus_client.core import REGISTRY
from .metrics import gauge_generator
from .parser import parse_response
from .scheduler import schedule_job
from .utils import log_exceptions, nice_shutdown
log = logging.getLogger(__name__)
CONTEXT_SETTINGS = {
'help_option_names': ['-h', '--help']
}
METRICS_BY_QUERY = {}
class QueryMetricCollector(object):
def collect(self):
# Copy METRICS_BY_QUERY before iterating over it
# as it may be updated by other threads.
# (only first level - lower levels are replaced
# wholesale, so don't worry about them)
query_metrics = METRICS_BY_QUERY.copy()
for metrics in query_metrics.values():
yield from gauge_generator(metrics)
def run_query(mysql_client, dbs, name, timezone, query, value_columns):
metrics = []
for db in dbs:
try:
mysql_client.select_db(db)
with mysql_client.cursor() as cursor:
if timezone:
cursor.execute("set time_zone = '{}'".format(timezone))
cursor.execute(query)
raw_response = cursor.fetchall()
columns = [column[0] for column in cursor.description]
response = [{column: row[i] for i, column in enumerate(columns)}
for row in raw_response]
metrics += parse_response(name, db, value_columns, response)
except Exception:
log.exception('Error while querying db [%s], query [%s].', db, query)
METRICS_BY_QUERY[name] = metrics
def validate_server_address(ctx, param, address_string):
if ':' in address_string:
host, port_string = address_string.split(':', 1)
try:
port = int(port_string)
except ValueError:
msg = "port '{}' in address '{}' is not an integer".format(port_string, address_string)
raise click.BadParameter(msg)
return (host, port)
else:
return (address_string, 3306)
@click.command(context_settings=CONTEXT_SETTINGS)
@click.option('--port', '-p', default=9207,
help='Port to serve the metrics endpoint on. (default: 9207)')
@click.option('--config-file', '-c', default='exporter.cfg', type=click.File(),
help='Path to query config file. '
'Can be absolute, or relative to the current working directory. '
'(default: exporter.cfg)')
@click.option('--config-dir', default='./config', type=click.Path(file_okay=False),
help='Path to query config directory. '
'If present, any files ending in ".cfg" in the directory '
'will be parsed as additional query config files. '
'Merge order is main config file, then config directory files '
'in filename order. '
'Can be absolute, or relative to the current working directory. '
'(default: ./config)')
@click.option('--mysql-server', '-s', callback=validate_server_address, default='localhost',
help='Address of a MySQL server to run queries on. '
'A port can be provided if non-standard (3306) e.g. mysql:3333. '
'(default: localhost)')
@click.option('--mysql-databases', '-d', required=True,
help='Databases to run queries on. '
'Database names should be separated by commas e.g. db1,db2.')
@click.option('--mysql-user', '-u', default='root',
help='MySQL user to run queries as. (default: root)')
@click.option('--mysql-password', '-P', default='',
help='Password for the MySQL user, if required. (default: no password)')
@click.option('--mysql-local-timezone', '-Z', default='',
help='Local timezone for sql commands like NOW(). (default: use server timezone)')
@click.option('--json-logging', '-j', default=False, is_flag=True,
help='Turn on json logging.')
@click.option('--log-level', default='INFO',
type=click.Choice(['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']),
help='Detail level to log. (default: INFO)')
@click.option('--verbose', '-v', default=False, is_flag=True,
help='Turn on verbose (DEBUG) logging. Overrides --log-level.')
def cli(**options):
"""Export MySQL query results to Prometheus."""
log_handler = logging.StreamHandler()
log_format = '[%(asctime)s] %(name)s.%(levelname)s %(threadName)s %(message)s'
formatter = JogFormatter(log_format) if options['json_logging'] else logging.Formatter(log_format)
log_handler.setFormatter(formatter)
log_level = getattr(logging, options['log_level'])
logging.basicConfig(
handlers=[log_handler],
level=logging.DEBUG if options['verbose'] else log_level
)
logging.captureWarnings(True)
port = options['port']
mysql_host, mysql_port = options['mysql_server']
dbs = options['mysql_databases'].split(',')
username = options['mysql_user']
password = options['<PASSWORD>']
timezone = options['mysql_local_timezone']
config = configparser.ConfigParser()
config.read_file(options['config_file'])
config_dir_file_pattern = os.path.join(options['config_dir'], '*.cfg')
config_dir_sorted_files = sorted(glob.glob(config_dir_file_pattern))
config.read(config_dir_sorted_files)
query_prefix = 'query_'
queries = {}
for section in config.sections():
if section.startswith(query_prefix):
query_name = section[len(query_prefix):]
query_interval = config.getfloat(section, 'QueryIntervalSecs')
query = config.get(section, 'QueryStatement')
value_columns = config.get(section, 'QueryValueColumns').split(',')
queries[query_name] = (query_interval, query, value_columns)
scheduler = sched.scheduler()
mysql_client = MySQLdb.connect(host=mysql_host,
port=mysql_port,
user=username,
passwd=password,
autocommit=True)
for name, (interval, query, value_columns) in queries.items():
schedule_job(scheduler, interval,
run_query, mysql_client, dbs, name, timezone, query, value_columns)
REGISTRY.register(QueryMetricCollector())
log.info('Starting server...')
start_http_server(port)
log.info('Server started on port %s', port)
scheduler.run()
@log_exceptions(exit_on_exception=True)
@nice_shutdown()
def main():
cli(auto_envvar_prefix='MYSQL_EXPORTER')
|
<reponame>posm/posm-opendronemap-api
# coding=utf-8
from datetime import datetime
import json
import shutil
from StringIO import StringIO
import subprocess32 as subprocess
import os
import uuid
from cachetools.func import lru_cache
from celery import Celery
from flask import Flask, redirect, request, send_from_directory, jsonify, url_for
from flask_cors import CORS
from flask_uploads import UploadSet, configure_uploads
from flask_tus import tus_manager
import rasterio
from rasterio.warp import transform_bounds
from PIL import Image
from werkzeug.wsgi import DispatcherMiddleware
APPLICATION_ROOT = os.environ.get('APPLICATION_ROOT', '')
REDIS_URL = os.environ.get('REDIS_URL', 'redis://')
CELERY_BROKER_URL = os.environ.get('CELERY_BROKER_URL', REDIS_URL)
CELERY_DEFAULT_QUEUE = os.environ.get('CELERY_DEFAULT_QUEUE', 'posm-opendronemap-api')
CELERY_RESULT_BACKEND = os.environ.get('CELERY_RESULT_BACKEND', REDIS_URL)
PROJECTS_PATH = os.environ.get('PROJECTS_PATH', 'projects')
USE_X_SENDFILE = os.environ.get('USE_X_SENDFILE', False)
UPLOADED_IMAGERY_DEST = os.environ.get('UPLOADED_IMAGERY_DEST', 'uploads/')
# strip trailing slash if necessary
if PROJECTS_PATH[-1] == '/':
PROJECTS_PATH = PROJECTS_PATH[:-1]
# add trailing slash if necessary
if UPLOADED_IMAGERY_DEST[-1] != '/':
UPLOADED_IMAGERY_DEST = UPLOADED_IMAGERY_DEST[:-1]
app = Flask('posm-opendronemap-api')
CORS(app)
app.config['APPLICATION_ROOT'] = APPLICATION_ROOT
app.config['USE_X_SENDFILE'] = USE_X_SENDFILE
app.config['UPLOADED_IMAGERY_DEST'] = UPLOADED_IMAGERY_DEST
# Initialize Celery
celery = Celery(app.name, broker=CELERY_BROKER_URL)
celery.conf.update({
'broker_url': CELERY_BROKER_URL,
'result_backend': CELERY_RESULT_BACKEND,
'task_default_queue': CELERY_DEFAULT_QUEUE,
'task_track_started': True,
})
# Initialize Tus
# TODO upload to a specific project id
tm = tus_manager(app, upload_url='/projects/upload',
upload_folder=app.config['UPLOADED_IMAGERY_DEST'])
# Initialize Flask-Uploads
imagery = UploadSet('imagery', ('jpg', 'png'))
configure_uploads(app, (imagery,))
@tm.upload_file_handler
def upload_file_handler(upload_file_path, id=None, filename=None):
if filename is None:
filename = os.path.basename(upload_file_path)
if id is None:
id = str(uuid.uuid4())
images_path = os.path.join(PROJECTS_PATH, id, 'images')
if not os.path.exists(images_path):
os.makedirs(images_path)
shutil.move(upload_file_path, os.path.join(images_path, filename))
return os.path.join(id, 'images', filename)
@celery.task(bind=True)
def process_project(self, id):
started_at = datetime.utcnow()
project_path = os.path.join(PROJECTS_PATH, id)
command = [
'python',
'/code/run.py',
'--project-path',
'.', # this will be executed from the project directory
]
def cleanup():
for dir in ('images_resize', 'odm_georeferencing', 'odm_meshing', 'odm_orthophoto', 'odm_texturing', 'opensfm', 'pmvs'):
target_path = os.path.join(project_path, dir)
os.path.isdir(target_path) and shutil.rmtree(target_path)
os.path.isfile(target_path) and os.unlink(target_path)
self.update_state(state='RUNNING',
meta={
'name': 'opendronemap',
'started_at': started_at.isoformat(),
'status': 'Processing imagery',
'task_id': self.request.id,
})
child = None
try:
# start by cleaning up in case the previous run was cancelled
cleanup()
log_path = os.path.join(project_path, 'logs')
os.path.exists(log_path) or os.mkdir(log_path)
with open(os.path.join(log_path, 'stdout.log'), 'w+') as stdout:
with open(os.path.join(log_path, 'stderr.log'), 'w+') as stderr:
# NOTE: this is used instead of check_call so that we can call terminate() on the
# child rather than assuming that signals will be passed through and be handled
# correctly
child = subprocess.Popen(command, cwd=project_path, stdout=stdout, stderr=stderr)
child.wait(timeout=60*60*6)
except subprocess.TimeoutExpired as e:
child.kill()
child.wait()
cleanup()
raise Exception(json.dumps({
'name': 'opendronemap',
'started_at': started_at.isoformat(),
'command': ' '.join(command),
'status': 'Timed out'
}))
except subprocess.CalledProcessError as e:
cleanup()
raise Exception(json.dumps({
'name': 'opendronemap',
'started_at': started_at.isoformat(),
'command': e.cmd,
'return_code': e.returncode,
'status': 'Failed'
}))
except:
if child:
child.terminate()
raise
# clean up and move artifacts
artifacts_path = os.path.join(project_path, 'artifacts')
if os.path.exists(artifacts_path):
shutil.rmtree(artifacts_path)
else:
os.mkdir(artifacts_path)
for artifact in ('odm_texturing', 'odm_orthophoto/odm_orthophoto.tif', 'odm_orthophoto/odm_orthophoto.png'):
src_path = os.path.join(project_path, artifact)
if os.path.isdir(src_path):
for item in os.listdir(src_path):
shutil.move(os.path.join(src_path, item), artifacts_path)
else:
os.path.exists(src_path) and shutil.move(src_path, artifacts_path)
# create a thumbnail
im = Image.open(os.path.join(artifacts_path, 'odm_orthophoto.png'))
im.thumbnail((128, 128))
im.save(os.path.join(artifacts_path, 'ortho_thumb.png'))
with rasterio.drivers():
with rasterio.open(os.path.join(artifacts_path, 'odm_orthophoto.tif')) as src:
metadata = get_metadata(id)
metadata.update({
'status': {
'state': 'SUCCESS',
},
'meta': {
'width': src.width,
'height': src.height,
'resolution': src.res,
'crs': str(src.crs),
'crs_wkt': src.crs.wkt,
'bounds': transform_bounds(src.crs, {'init': 'epsg:4326'}, *src.bounds),
'size': os.stat(src.name).st_size,
}
})
save_metadata(id, metadata)
cleanup()
os.unlink(os.path.join(project_path, "process.task"))
return {
'name': 'preprocess',
'completed_at': datetime.utcnow().isoformat(),
'started_at': started_at,
'status': 'Image processing completed'
}
def get_task_status(id):
task_info_path = os.path.join(PROJECTS_PATH, id, 'process.task')
if os.path.exists(task_info_path):
with open(task_info_path) as t:
task_id = t.read()
return fetch_status(task_id)
else:
return {}
def get_metadata(id):
metadata_path = os.path.join(PROJECTS_PATH, id, 'index.json')
images_path = os.path.join(PROJECTS_PATH, id, 'images')
artifacts_path = os.path.join(PROJECTS_PATH, id, 'artifacts')
if os.path.exists(metadata_path):
with open(metadata_path) as metadata:
metadata = json.load(metadata)
else:
metadata = {
'images': [],
'artifacts': [],
'status': {},
'user': {},
}
if os.path.exists(images_path):
metadata['images'] = os.listdir(images_path)
if os.path.exists(artifacts_path):
metadata['artifacts'] = os.listdir(artifacts_path)
status = get_task_status(id)
if status:
metadata['status'] = status
return metadata
def save_metadata(id, metadata):
metadata_path = os.path.join(PROJECTS_PATH, id, 'index.json')
if not os.path.exists(os.path.dirname(metadata_path)):
os.makedirs(os.path.dirname(metadata_path))
with open(metadata_path, 'w') as metadata_file:
metadata_file.write(json.dumps(metadata))
@app.errorhandler(IOError)
def handle_ioerror(error):
return '', 404
@app.route('/tasks')
def list_tasks():
i = celery.control.inspect()
status = {
'scheduled': i.scheduled(),
'active': i.active(),
'reserved': i.reserved(),
}
return jsonify(status), 200
@app.route('/projects')
def list_projects():
"""List available projects"""
projects = dict(map(lambda project: (project, get_metadata(project)), filter(
lambda project: os.path.isdir(os.path.join(PROJECTS_PATH, project)), os.listdir(PROJECTS_PATH))))
return jsonify(projects), 200
@app.route('/projects', methods=['PUT'])
def create_project():
body = request.get_json(force=True)
id = str(uuid.uuid4())
metadata = get_metadata(id)
metadata['user'] = body
save_metadata(id, metadata)
return jsonify(metadata), 201
@app.route('/projects/<id>', methods=['PATCH', 'POST'])
def update_project(id):
body = request.get_json(force=True)
metadata = get_metadata(id)
if request.method == 'PATCH':
metadata['user'].update(body)
else:
metadata['user'] = body
save_metadata(id, metadata)
return jsonify(metadata), 200
@app.route('/projects/<id>/upload', methods=['PUT'])
def upload_imagery(id):
path = app.config['UPLOADED_IMAGERY_DEST'] + imagery.save(request.files['file'])
target_path = upload_file_handler(path, id=id)
with app.app_context():
return jsonify({
'project': url_for('get_project', id=id, _external=True),
}), 200
@app.route('/projects/<id>')
def get_project(id):
return jsonify(get_metadata(id)), 200
@app.route('/projects/<id>/images')
def list_project_images(id):
return jsonify(get_metadata(id)['images']), 200
@app.route('/projects/<id>/images/<image_id>')
def download_project_image(id, image_id):
images_path = os.path.join(PROJECTS_PATH, id, 'images')
return send_from_directory(
images_path,
image_id,
conditional=True
)
@app.route('/projects/<id>/images/<image_id>/thumb')
@lru_cache()
def get_project_image_thumbnail(id, image_id):
im = Image.open(os.path.join(PROJECTS_PATH, id, 'images', image_id))
out = StringIO()
im.thumbnail((128, 128))
im.save(out, "jpeg")
return out.getvalue(), 200, {
'Content-Type': 'image/jpeg'
}
@app.route('/projects/<id>/logs/stderr')
def get_project_stderr(id):
return send_from_directory(
os.path.join(PROJECTS_PATH, id, 'logs'),
'stderr.log',
conditional=True,
mimetype='text/plain',
)
@app.route('/projects/<id>/logs/stdout')
def get_project_stdout(id):
return send_from_directory(
os.path.join(PROJECTS_PATH, id, 'logs'),
'stdout.log',
conditional=True,
mimetype='text/plain',
)
@app.route('/projects/<id>/artifacts')
def list_project_artifacts(id):
return jsonify(get_metadata(id)['artifacts']), 200
@app.route('/projects/<id>/artifacts/<artifact_id>')
def download_project_artifact(id, artifact_id):
return send_from_directory(
os.path.join(PROJECTS_PATH, id, 'artifacts'),
artifact_id,
conditional=True
)
@app.route('/projects/<id>/process', methods=['POST'])
def start_processing_project(id):
task_info = os.path.join(PROJECTS_PATH, id, 'process.task')
if os.path.exists(task_info) and not request.args.get('force'):
return jsonify({
'message': 'Processing already in progress, ?force=true to force'
}), 400
task = process_project.s(id=id).apply_async()
# stash task.id so we know which task to look up
with open(task_info, 'w') as f:
f.write(task.id)
return '', 202, {
'Location': url_for('get_project_status', id=id)
}
@app.route('/projects/<id>/process', methods=['DELETE'])
def cancel_processing_project(id):
task_info = os.path.join(PROJECTS_PATH, id, 'process.task')
with open(task_info) as t:
task_id = t.read()
celery.control.revoke(task_id, terminate=True)
return '', 201
def fetch_status(task_id):
result = celery.AsyncResult(task_id)
status = {
# TODO result.state doesn't account for the states of all children
'state': result.state,
'steps': []
}
for _, node in result.iterdeps(intermediate=True):
if hasattr(node, 'info'):
if isinstance(node.info, Exception):
try:
status['steps'].append(json.loads(node.info.message))
except:
status['steps'].append(node.info.message)
else:
status['steps'].append(node.info)
return status
@app.route('/projects/<id>/status')
def get_project_status(id):
task_info = os.path.join(PROJECTS_PATH, id, 'process.task')
if os.path.exists(task_info):
with open(task_info) as t:
task_id = t.read()
return jsonify(fetch_status(task_id)), 200
elif os.path.exists(os.path.dirname(task_info)):
metadata = get_metadata(id)
return jsonify(metadata['status']), 200
else:
return '', 404
app.wsgi_app = DispatcherMiddleware(None, {
app.config['APPLICATION_ROOT']: app.wsgi_app
})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8000, debug=True)
|
<filename>Twitter_Crawler/developer_keys_tokens.py
"""
@author: Team18(member details are as follows)
Name(Firstname Surname) | Username | StudentID | City
---------------------------------------------------------------------
<NAME> | chuangw | 791793 | Melbourne
<NAME> | honglongz | 985262 | Melbourne
<NAME> | jili | 961543 | Melbourne
<NAME> | wlin8 | 885536 | Melbourne
<NAME> | Yangyangh1 | 978954 | Melbourne
"""
# gather all the twitter API account
developer_1 = {}
developer_1['developer_id'] = 1
developer_1['consumer_key'] = "MI0LP9Q5hVjsNw9vipfqHYLoG"
developer_1['consumer_secret'] = "<KEY>"
developer_1['access_token'] = "<KEY>"
developer_1['access_token_secret'] = "<KEY>"
developer_1['search_by_location'] = [('Australian Capital Territory', '-35.2809,149.1300,67.9115km')]
developer_2 = {}
developer_2['developer_id'] = 2
developer_2['consumer_key'] = "sEgcQbfRuJueIKPgcOSNsBfVZ"
developer_2['consumer_secret'] = "<KEY>"
developer_2['access_token'] = "<KEY>"
developer_2['access_token_secret'] = "<KEY>"
developer_2['search_by_location'] = [('Western Australia','-32.8019,115.4495,1137.4491km')]
developer_3 = {}
developer_3['developer_id'] = 3
developer_3['consumer_key'] = "hXM6f3Ik6yWZbwL1Upb6WThYs"
developer_3['consumer_secret'] = "<KEY>"
developer_3['access_token'] = "<KEY>"
developer_3['access_token_secret'] = "<KEY>"
developer_3['search_by_location'] = [('Northern Territory','-21.7979,133.5341,1163.5738km')]
developer_4 = {}
developer_4['developer_id'] = 4
developer_4['consumer_key'] = "PhsvvKJdgTDCWaY1yIjTpNMQu"
developer_4['consumer_secret'] = "<KEY>"
developer_4['access_token'] = "1213766792352395266-xCldWvcu46Lo8VyBAFNhtMlybt7VeB"
developer_4['access_token_secret'] = "<KEY>"
developer_4['search_by_location'] = [('Queensland','-28.3640,152.0734,1942.3921km')]
developer_5 = {}
developer_5['developer_id'] = 5
developer_5['consumer_key'] = "aJtd8o0rnMaGcMpbKRINRevRV"
developer_5['consumer_secret'] = "<KEY>"
developer_5['access_token'] = "<KEY>"
developer_5['access_token_secret'] = "<KEY>"
developer_5['search_by_location'] = [('Tasmania','-43.1213,147.0267,245.9167km')]
developer_6 = {}
developer_6['developer_id'] = 6
developer_6['consumer_key'] = "DwIYk9GSzF0E75ET3fKseSq6P"
developer_6['consumer_secret'] = "<KEY>"
developer_6['access_token'] = "<KEY>"
developer_6['access_token_secret'] = "<KEY>9ZiDy"
developer_6['search_by_location'] = [('South Australia','-34.9285,138.6007,776.17km')]
developer_7 = {}
developer_7['developer_id'] = 7
developer_7['consumer_key'] = "RDkYXYW4G9RCC9qlKUlpvTz5F"
developer_7['consumer_secret'] = "<KEY>"
developer_7['access_token'] = "<KEY>"
developer_7['access_token_secret'] = "<KEY>"
developer_7['search_by_location'] = [('New South Wales','-33.8688,151.2093,1147.3195km')]
developer_8 = {}
developer_8['developer_id'] = 8
developer_8['consumer_key'] = "kU5CURnt2Ha2Daez16dcImIlV"
developer_8['consumer_secret'] = "<KEY>"
developer_8['access_token'] = "<KEY>"
developer_8['access_token_secret'] = "<KEY>"
developer_8['search_by_location'] = [('Victoria','-37.4713,144.7852,848.7016km')]
config = {}
temp = locals()
for i in range(8):
config[i + 1] = locals()["developer_" + str(i + 1)]
|
from convex_adversarial.dual_network import robust_loss, RobustBounds
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
import time
import copy
import os
DEBUG = False
## standard training
def train_baseline(loader, model, opt, epoch, log1, log2, verbose):
batch_time = AverageMeter()
losses = AverageMeter()
errors = AverageMeter()
model.train()
print('==================== training ====================')
end = time.time()
for i, (X,y) in enumerate(loader):
X,y = X.cuda(), y.cuda()
out = model(Variable(X))
ce = nn.CrossEntropyLoss()(out, Variable(y))
err = (out.max(1)[1] != y).float().sum() / X.size(0)
opt.zero_grad()
ce.backward()
opt.step()
batch_time.update(time.time()-end)
end = time.time()
losses.update(ce.item(), X.size(0))
errors.update(err.item(), X.size(0))
print(epoch, i, '{0:.4f}'.format(err.item()), '{0:.4f}'.format(ce.item()), file=log1)
if verbose and i % verbose == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Error {errors.val:.4f} ({errors.avg:.4f})'.format(
epoch, i, len(loader), batch_time=batch_time,
loss=losses, errors=errors))
log1.flush()
print(epoch, '{:.4f}'.format(errors.avg), '{:.4f}'.format(losses.avg), file=log2)
log2.flush()
def evaluate_baseline(loader, model, epoch, log, verbose):
batch_time = AverageMeter()
losses = AverageMeter()
errors = AverageMeter()
model.eval()
print('==================== validating ====================')
end = time.time()
for i, (X,y) in enumerate(loader):
X,y = X.cuda(), y.cuda()
out = model(Variable(X))
ce = nn.CrossEntropyLoss()(out, Variable(y))
err = (out.max(1)[1] != y).float().sum() / X.size(0)
# measure accuracy and record loss
losses.update(ce.item(), X.size(0))
errors.update(err.item(), X.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if verbose and i % verbose == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Error {error.val:.4f} ({error.avg:.4f})'.format(
i, len(loader), batch_time=batch_time, loss=losses,
error=errors))
log.flush()
print(epoch, '{:.4f}'.format(errors.avg), '{:.4f}'.format(losses.avg), file=log)
log.flush()
print(' * Error: {error.avg:.2%}'.format(error=errors))
return errors.avg
## robust training for overall robustness
def train_robust(loader, model, opt, epsilon, epoch, log1, log2, verbose, clip_grad=None, **kwargs):
batch_time = AverageMeter()
losses = AverageMeter()
errors = AverageMeter()
robust_losses = AverageMeter()
robust_errors = AverageMeter()
model.train()
print('==================== training ====================')
print('epsilon:', '{:.4f}'.format(epsilon))
end = time.time()
for i, (X,y) in enumerate(loader):
X,y = X.cuda(), y.cuda().long()
if y.dim() == 2:
y = y.squeeze(1)
# data_time.update(time.time() - end)
with torch.no_grad():
ce = nn.CrossEntropyLoss()(model(X), y).item()
err = (model(X).max(1)[1] != y).float().sum().item() / X.size(0)
robust_ce, robust_err = robust_loss(model, epsilon, X, y, **kwargs)
opt.zero_grad()
robust_ce.backward()
if clip_grad:
nn.utils.clip_grad_norm_(model.parameters(), clip_grad)
opt.step()
# measure accuracy and record loss
losses.update(ce, X.size(0))
errors.update(err, X.size(0))
robust_losses.update(robust_ce.detach().item(), X.size(0))
robust_errors.update(robust_err, X.size(0))
# measure elapsed time
batch_time.update(time.time()-end)
end = time.time()
if verbose and i % verbose == 0:
endline = '\n' if i % verbose == 0 else '\r'
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Robust loss {rloss.val:.4f} ({rloss.avg:.4f})\t'
'Robust error {rerrors.val:.4f} ({rerrors.avg:.4f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Error {errors.val:.4f} ({errors.avg:.4f})'.format(
epoch, i, len(loader), batch_time=batch_time,
loss=losses, errors=errors, rloss = robust_losses,
rerrors = robust_errors), end=endline)
print(epoch, i, '{0:.4f}'.format(err), '{0:.4f}'.format(robust_err),
'{0:.4f}'.format(ce), '{0:.4f}'.format(robust_ce.detach().item()), file=log1)
log1.flush()
del X, y, robust_ce, ce, err, robust_err
if DEBUG and i == 10:
break
print(epoch, '{:.4f}'.format(errors.avg), '{:.4f}'.format(robust_errors.avg),
'{:.4f}'.format(robust_losses.avg), file=log2)
log2.flush()
torch.cuda.empty_cache()
def evaluate_robust(loader, model, epsilon, epoch, log, verbose, **kwargs):
batch_time = AverageMeter()
losses = AverageMeter()
errors = AverageMeter()
robust_losses = AverageMeter()
robust_errors = AverageMeter()
model.eval()
print('==================== validating ====================')
print('epsilon:', '{:.4f}'.format(epsilon))
end = time.time()
torch.set_grad_enabled(False)
for i, (X,y) in enumerate(loader):
X,y = X.cuda(), y.cuda().long()
if y.dim() == 2:
y = y.squeeze(1)
robust_ce, robust_err = robust_loss(model, epsilon, X, y, **kwargs)
ce = nn.CrossEntropyLoss()(model(X), y).item()
err = (model(X).max(1)[1] != y).float().sum().item() / X.size(0)
# _,pgd_err = _pgd(model, Variable(X), Variable(y), epsilon)
# measure accuracy and record loss
losses.update(ce, X.size(0))
errors.update(err, X.size(0))
robust_losses.update(robust_ce.item(), X.size(0))
robust_errors.update(robust_err, X.size(0))
# measure elapsed time
batch_time.update(time.time()-end)
end = time.time()
if verbose:
# print(epoch, i, robust_ce.data[0], robust_err, ce.data[0], err)
endline = '\n' if i % verbose == 0 else '\r'
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Robust loss {rloss.val:.4f} ({rloss.avg:.4f})\t'
'Robust error {rerrors.val:.4f} ({rerrors.avg:.4f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Error {error.val:.4f} ({error.avg:.4f})'.format(
i, len(loader), batch_time=batch_time,
loss=losses, error=errors, rloss = robust_losses,
rerrors = robust_errors), end=endline)
del X, y, robust_ce, ce, err, robust_err
if DEBUG and i == 10:
break
print(epoch, '{:.4f}'.format(errors.avg), '{:.4f}'.format(robust_errors.avg),
'{0:.4f}'.format(robust_losses.avg), file=log)
log.flush()
print('')
print(' * Error: {error.avg:.2%}\n'
' * Robust error: {rerror.avg:.2%}'.format(
error=errors, rerror=robust_errors))
torch.set_grad_enabled(True)
torch.cuda.empty_cache()
return errors.avg, robust_errors.avg
## joint robust training for overall robustness
def train_joint_robust(loader, model1, model2, opt1, opt2, epsilon, epoch, log1, log2, verbose, clip_grad=None, **kwargs):
batch_time = AverageMeter()
losses1 = AverageMeter()
losses2 = AverageMeter()
errors1 = AverageMeter()
errors2 = AverageMeter()
robust_losses1 = AverageMeter()
robust_losses2 = AverageMeter()
robust_errors1 = AverageMeter()
robust_errors2 = AverageMeter()
model1.train()
model2.train()
print('==================== training ====================')
print('epsilon:', '{:.4f}'.format(epsilon))
end = time.time()
for i, (X,y) in enumerate(loader):
X,y = X.cuda(), y.cuda().long()
if y.dim() == 2:
y = y.squeeze(1)
# data_time.update(time.time() - end)
ce1 = nn.CrossEntropyLoss()(model1(X), y).item()
ce2 = nn.CrossEntropyLoss()(model2(X), y).item()
with torch.no_grad():
err1 = (model1(X).max(1)[1] != y).float().sum().item() / X.size(0)
err2 = (model2(X).max(1)[1] != y).float().sum().item() / X.size(0)
robust_ce1, robust_err1 = robust_loss(model1, epsilon, X, y, **kwargs)
robust_ce2, robust_err2 = robust_loss(model2, epsilon, X, y, **kwargs)
robust_ce = robust_ce1 + robust_ce2
opt1.zero_grad()
opt2.zero_grad()
robust_ce.backward()
if clip_grad:
nn.utils.clip_grad_norm_(model1.parameters(), clip_grad)
nn.utils.clip_grad_norm_(model2.parameters(), clip_grad)
opt1.step()
opt2.step()
# measure accuracy and record loss
losses1.update(ce1, X.size(0))
losses2.update(ce2, X.size(0))
errors1.update(err1, X.size(0))
errors2.update(err2, X.size(0))
robust_losses1.update(robust_ce1.detach().item(), X.size(0))
robust_losses2.update(robust_ce2.detach().item(), X.size(0))
robust_errors1.update(robust_err1, X.size(0))
robust_errors2.update(robust_err2, X.size(0))
# measure elapsed time
batch_time.update(time.time()-end)
end = time.time()
if verbose and i % verbose == 0:
endline = '\n' if i % verbose == 0 else '\r'
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Robust loss {rloss.val:.4f} ({rloss.avg:.4f})\t'
'Robust error {rerrors.val:.4f} ({rerrors.avg:.4f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Error {errors.val:.4f} ({errors.avg:.4f})'.format(
epoch, i, len(loader), batch_time=batch_time,
loss=losses1, errors=errors1, rloss = robust_losses1,
rerrors = robust_errors1), end=endline)
print(epoch, i, '{0:.4f}'.format(err1), '{0:.4f}'.format(robust_err1),
'{0:.4f}'.format(ce1), '{0:.4f}'.format(robust_ce1.detach().item()), file=log1)
log1.flush()
del X, y, robust_ce1, robust_ce2, ce1, ce2, err1, err2, robust_err1, robust_err2
if DEBUG and i == 10:
break
print(epoch, '{:.4f}'.format(errors1.avg), '{:.4f}'.format(robust_errors1.avg),
'{:.4f}'.format(robust_losses1.avg), file=log2)
log2.flush()
torch.cuda.empty_cache()
## robsut training for cost-sensitive robustness
def train_robust_task_spec(loader, model, opt, epsilon, epoch, log1, log2, verbose,
input_mat, mat_type, alpha, clip_grad=None, **kwargs):
model.train()
print('==================== training ====================')
print('epsilon:', '{:.4f}'.format(epsilon))
batch_time = AverageMeter()
errors = AverageMeter()
robust_losses = AverageMeter()
cost_adv_exps_total = 0
num_exps_total = 0
end = time.time()
for i, (X,y) in enumerate(loader):
X,y = X.cuda(), y.cuda().long()
if y.dim() == 2:
y = y.squeeze(1)
clas_err, robust_ce, cost_adv_exps, num_exps = robust_loss_task_spec(model, epsilon,
Variable(X), Variable(y),
input_mat, mat_type,
alpha, **kwargs)
opt.zero_grad()
robust_ce.backward()
if clip_grad:
nn.utils.clip_grad_norm_(model.parameters(), clip_grad)
opt.step()
if num_exps != 0:
robust_cost = cost_adv_exps/num_exps
else:
robust_cost = 0.0
# measure accuracy and record loss
errors.update(clas_err, X.size(0))
robust_losses.update(robust_ce.item(), X.size(0))
cost_adv_exps_total += cost_adv_exps
num_exps_total += num_exps
# measure elapsed time
batch_time.update(time.time()-end)
end = time.time()
print(epoch, i, '{:.4f}'.format(clas_err), '{:.4f}'.format(robust_cost),
'{:.4f}'.format(robust_ce.item()), file=log1)
if verbose and i % verbose == 0:
endline = '\n' if i % verbose == 0 else '\r'
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Error {error.val:.4f} ({error.avg:.4f})\t'
'Robust cost {rcost:.4f}\t'
'Robust loss {rloss.val:.4f} ({rloss.avg:.4f})'.format(
epoch, i, len(loader), batch_time=batch_time, error=errors,
rcost = robust_cost, rloss = robust_losses), end=endline)
log1.flush()
del X, y, robust_ce, clas_err, cost_adv_exps, num_exps, robust_cost
if DEBUG and i ==10:
break
if num_exps_total != 0:
robust_cost_avg = cost_adv_exps_total/num_exps_total
else:
robust_cost_avg = 0.0
print(epoch, '{:.4f}'.format(errors.avg), '{:.4f}'.format(robust_cost_avg),
'{:.4f}'.format(robust_losses.avg), file=log2)
log2.flush()
torch.cuda.empty_cache()
def evaluate_robust_task_spec(loader, model, epsilon, epoch, log, verbose,
input_mat, mat_type, alpha, **kwargs):
model.eval()
print('==================== validating ====================')
print('epsilon:', '{:.4f}'.format(epsilon))
batch_time = AverageMeter()
errors = AverageMeter()
robust_losses = AverageMeter()
cost_adv_exps_total = 0
num_exps_total = 0
end = time.time()
torch.set_grad_enabled(False)
tic = time.time()
for i, (X,y) in enumerate(loader):
X,y = X.cuda(), y.cuda().long()
if y.dim() == 2:
y = y.squeeze(1)
clas_err, robust_ce, cost_adv_exps, num_exps = robust_loss_task_spec(model, epsilon, X, y,
input_mat, mat_type, alpha, **kwargs)
if num_exps != 0:
robust_cost = cost_adv_exps/num_exps
else:
robust_cost = 0.0
# measure accuracy and record loss
errors.update(clas_err, X.size(0))
robust_losses.update(robust_ce.item(), X.size(0))
cost_adv_exps_total += cost_adv_exps
num_exps_total += num_exps
# measure elapsed time
batch_time.update(time.time()-end)
end = time.time()
if verbose:
endline = '\n' if i % verbose == 0 else '\r'
print('Validate: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Error {error.val:.4f} ({error.avg:.4f})\t'
'Robust cost {rcost:.4f}\t'
'Robust loss {rloss.val:.4f} ({rloss.avg:.4f})'.format(
i, len(loader), batch_time=batch_time, error=errors,
rcost=robust_cost, rloss=robust_losses), end=endline)
del X, y, robust_ce, clas_err, cost_adv_exps, num_exps, robust_cost
if DEBUG and i ==10:
break
if num_exps_total != 0: # for binary case, same as the portion of cost-sensitive adv exps
robust_cost_avg = cost_adv_exps_total/num_exps_total
else:
robust_cost_avg = 0.0
print('')
if mat_type == 'binary':
print(' * Classification error: {error.avg:.2%}\n'
' * Cost-sensitive robust error: {rerror:.2%}'.format(
error=errors, rerror=robust_cost_avg))
print(epoch, '{:.4f}'.format(errors.avg), '{:.4f}'.format(robust_cost_avg),
'{0:.4f}'.format(robust_losses.avg), file=log)
else:
print(' * Classication Error: {error.avg:.2%}\n'
' * Average cost: {rcost:.3f}'.format(
error=errors, rcost=robust_cost_avg))
print(epoch, '{:.4f}'.format(errors.avg), '{:.4f}'.format(robust_cost_avg),
'{0:.4f}'.format(robust_losses.avg), file=log)
log.flush()
torch.set_grad_enabled(True)
torch.cuda.empty_cache()
return errors.avg, robust_cost_avg
## compute the pairwise test robust error w.r.t. a given classifier
def evaluate_test_clas_spec(loader, model, epsilon, path, verbose, **kwargs):
print('==================== testing ====================')
model.eval()
num_classes = model[-1].out_features
# define the class-specific error matrices for aggregation
clas_err_mats = torch.FloatTensor(num_classes+1, num_classes+1).zero_().cuda()
robust_err_mats = torch.FloatTensor(num_classes+1, num_classes+1).zero_().cuda()
num_exps_vecs = torch.FloatTensor(num_classes+1).zero_().cuda()
torch.set_grad_enabled(False)
# aggregate the error matrices for the whole dataloader
for i, (X,y) in enumerate(loader):
X,y = X.cuda(), y.cuda().long()
if y.dim() == 2:
y = y.squeeze(1)
clas_err_mat, robust_err_mat, \
num_exps_vec = calc_err_clas_spec(model, epsilon, X, y, **kwargs)
clas_err_mats += clas_err_mat
robust_err_mats += robust_err_mat
num_exps_vecs += num_exps_vec
if verbose:
endline = '\n' if i % verbose == 0 else '\r'
print('Test: [{0}/{1}]'.format(i, len(loader)), end=endline)
del X, y, clas_err_mat, robust_err_mat, num_exps_vec
clas_err_mats = clas_err_mats.cpu().numpy().astype(int)
robust_err_mats = robust_err_mats.cpu().numpy().astype(int)
num_exps_vecs = num_exps_vecs.cpu().numpy()
clas_err_overall = clas_err_mats[-1, -1] / num_exps_vecs[-1]
robust_err_overall = robust_err_mats[-1, -1] / num_exps_vecs[-1]
print('overall classification error: ', '{:.2%}'.format(clas_err_overall))
print('overall robust error: ', '{:.2%}'.format(robust_err_overall))
# compute the robust error probabilities for each pair of class
robust_prob_mats = np.zeros((num_classes+1, num_classes+1))
for i in range(num_classes):
class_size_col = copy.deepcopy(num_exps_vecs)
class_size_col[num_classes] -= num_exps_vecs[i]
robust_prob_mats[:,i] = robust_err_mats[:,i]/class_size_col
robust_prob_mats[:,num_classes] = robust_err_mats[:,num_classes]/num_exps_vecs
print('')
print('pairwise robust test error:')
print(robust_prob_mats)
print(robust_err_mats)
print('')
print('overall classification error: ', '{:.2%}'.format(clas_err_overall))
print('overall robust error: ', '{:.2%}'.format(robust_err_overall))
return
clas_err_mats.to_csv(path+'_clasErrs.csv', sep='\t')
robust_err_mats.to_csv(path+'_robustErrs.csv', sep='\t')
robust_prob_mats.to_csv(path+'_robustProbs.csv', sep='\t')
torch.set_grad_enabled(True)
torch.cuda.empty_cache()
## compute the overall cost-weighted error on test dataset
def evaluate_test(loader, model, epsilon, input_mat, mat_type, verbose, **kwargs):
model.eval()
# print('==================== testing ====================')
errors = AverageMeter()
cost_adv_exps_total = 0
num_exps_total = 0
torch.set_grad_enabled(False)
for i, (X,y) in enumerate(loader):
X,y = X.cuda(), y.cuda().long()
if y.dim() == 2:
y = y.squeeze(1)
clas_err, robust_ce, cost_adv_exps, num_exps = robust_loss_task_spec(model, epsilon, X, y,
input_mat, mat_type, **kwargs)
if num_exps != 0:
robust_cost = cost_adv_exps/num_exps
else:
robust_cost = 0.0
errors.update(clas_err, X.size(0))
cost_adv_exps_total += cost_adv_exps
num_exps_total += num_exps
if verbose == len(loader):
print('Test: [{0}/{1}]\t\t'
'Robust cost {rcost:.4f}\t\t'
'Error {error.val:.4f} ({error.avg:.4f})'.format(
i, len(loader), error=errors, rcost=robust_cost), end='\r')
elif verbose:
endline = '\n' if i % verbose == 0 else '\r'
print('Test: [{0}/{1}]\t\t'
'Robust error {rcost:.4f}\t\t'
'Error {error.val:.4f} ({error.avg:.4f})'.format(
i, len(loader), error=errors,
rcost=robust_cost), end=endline)
del X, y, robust_ce, clas_err, cost_adv_exps, num_exps, robust_cost
if num_exps_total != 0:
robust_cost_avg = cost_adv_exps_total/num_exps_total
else:
robust_cost_avg = 0.0
print('')
if mat_type == 'binary':
print(' * Classification error: {error.avg:.2%}\n'
' * Cost-sensitive robust error: {rerror:.2%}'.format(
error=errors, rerror=robust_cost_avg))
else: # real-valued
print(' * Classification error: {error.avg:.2%}\n'
' * Average cost: {rcost:.3f}'.format(
error=errors, rcost=robust_cost_avg))
torch.set_grad_enabled(True)
torch.cuda.empty_cache()
return errors.avg, robust_cost_avg
## define the metric and training loss function for cost-sensitive robustness
def robust_loss_task_spec(net, epsilon, X, y, input_mat, mat_type, alpha=1.0, **kwargs):
num_classes = net[-1].out_features
# loss function for standard classification
out = net(X)
clas_err = (out.max(1)[1] != y).float().sum().item() / X.size(0)
ce_loss = nn.CrossEntropyLoss(reduction='elementwise_mean')(out, y)
# regularization term for cost-sensitive robustness
cost_adv_exps = 0.0
num_exps = 0
for k in range(num_classes):
if np.all(input_mat[k, :] == 0):
continue
else:
targ_clas = np.nonzero(input_mat[k, :])[0] # extract the corresponding output classes
ind = (y == k).nonzero() # extract the considered input example indices
if len(ind) != 0:
ind = ind.squeeze(1)
X_sub = X[ind, ...]
y_sub = y[ind, ...]
# robust score matrix
f = RobustBounds(net, epsilon, **kwargs)(X_sub,y_sub)[:,targ_clas]
zero_col = torch.FloatTensor(np.zeros(len(ind), dtype=float)).cuda()
weight_vec = torch.FloatTensor(input_mat[k, targ_clas]).repeat(len(ind),1).cuda()
# cost-weighted robust score matrix
f_weighted = torch.cat((f + torch.log(weight_vec), zero_col.unsqueeze(1)), dim=1)
target = torch.LongTensor(len(targ_clas)*np.ones(len(ind), dtype=float)).cuda()
# aggregate the training loss function (including the robust regularizer)
ce_loss = ce_loss + alpha*nn.CrossEntropyLoss(reduction='elementwise_mean')(f_weighted, target)
zero_tensor = torch.FloatTensor(np.zeros(f.size())).cuda()
err_mat = (f > zero_tensor).cpu().numpy()
if mat_type == 'binary': # same as the number of cost-sensitive adversarial exps
cost_adv_exps += err_mat.max(1).sum().item()
else: # real-valued case
# use the total costs as the measure
cost_adv_exps += np.dot(np.sum(err_mat, axis=0), input_mat[k,targ_clas])
num_exps += len(ind)
return clas_err, ce_loss, cost_adv_exps, num_exps
## compute the pairwise classification and robust error
def calc_err_clas_spec(net, epsilon, X, y, **kwargs):
num_classes = net[-1].out_features
targ_clas = range(num_classes)
zero_mat = torch.FloatTensor(X.size(0), num_classes).zero_()
# aggregate the class-specific classification and robust error counts
clas_err_mat = torch.FloatTensor(num_classes+1, num_classes+1).zero_()
robust_err_mat = torch.FloatTensor(num_classes+1, num_classes+1).zero_()
# aggregate the number of examples for each class
num_exps_vec = torch.FloatTensor(num_classes+1).zero_()
if X.is_cuda:
zero_mat = zero_mat.cuda()
clas_err_mat = clas_err_mat.cuda()
robust_err_mat = robust_err_mat.cuda()
num_exps_vec = num_exps_vec.cuda()
# compute the class-specific classification error matrix
val, idx = torch.max(net(X), dim=1)
for j in range(len(y)):
row_ind = y[j]
col_ind = idx[j].item()
if row_ind != col_ind:
clas_err_mat[row_ind, col_ind] += 1
clas_err_mat[row_ind, num_classes] += 1
clas_err_mat[num_classes, ] = torch.sum(clas_err_mat[:num_classes, ], dim=0)
f = RobustBounds(net, epsilon, **kwargs)(X,y)[:,targ_clas]
# robust error counts for each example
err_mat = (f > zero_mat).float() # class-specific robust error counts
err = (f.max(1)[1] != y).float() # overall robust error counts
# compute pairwise robust error matrix
for i in range(num_classes):
ind = (y == i).nonzero() # indices of examples in class i
if len(ind) != 0:
ind = ind.squeeze(1)
robust_err_mat[i, :num_classes] += torch.sum(err_mat[ind, ].squeeze(1), dim=0)
robust_err_mat[i, num_classes] += torch.sum(err[ind])
num_exps_vec[i] += len(ind)
# compute the weighted average for each target class
robust_err_mat[num_classes, ] = torch.sum(robust_err_mat[:num_classes, ], dim=0)
num_exps_vec[num_classes] = torch.sum(num_exps_vec[:num_classes])
return clas_err_mat, robust_err_mat, num_exps_vec
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def merge_models2(nn1, nn2, nn3, n_layers):
c = 0
for i, layer in enumerate(nn3.modules()):
if isinstance(layer, nn.Linear):
if c < n_layers:
layer1 = list(nn1.modules())[i]
layer2 = list(nn2.modules())[i]
layer3 = list(nn3.modules())[i]
if c == 0:
size_w = layer3.weight.data.size()[0] // 2;
size_b = layer3.bias.data.size()[0] // 2;
layer3.weight.data[:size_w , :] = layer1.weight.data[:, :].cuda()
layer3.weight.data[size_w: , :] = layer2.weight.data[:, :].cuda()
layer3.bias.data[:size_b] = layer1.bias.data[:].cuda()
layer3.bias.data[size_b:] = layer2.bias.data[:].cuda()
layer3.weight.data.cuda()
layer3.bias.data.cuda()
elif c == n_layers:
size_w = layer3.weight.data.size()[0] // 2;
size_b = layer3.bias.data.size()[0] // 2;
out_size = layer3.weight.data.size()[1]
diag = torch.diag(0.5 * torch.ones(out_size // 2).cuda()).cuda()
layer3.weight.data = torch.cat((diag, diag), 1).cuda()
layer3.bias.data = torch.zeros(out_size // 2).cuda()
layer3.weight.data.cuda()
layer3.bias.data.cuda()
else:
size_w0 = layer3.weight.data.size()[0] // 2;
size_w1 = layer3.weight.data.size()[1] // 2;
size_b = layer3.bias.data.size()[0] // 2;
layer3.weight.data[:size_w0, :size_w1] = layer1.weight.data[:, :].cuda()
layer3.weight.data[size_w0:, size_w1:] = layer2.weight.data[:, :].cuda()
layer3.weight.data[:size_w0, size_w1:] = torch.zeros(size_w0, size_w1).cuda()
layer3.weight.data[size_w0:, :size_w1] = torch.zeros(size_w0, size_w1).cuda()
layer3.bias.data[:size_b] = layer1.bias.data[:].cuda()
layer3.bias.data[size_b:] = layer2.bias.data[:].cuda()
layer3.weight.data.cuda()
layer3.bias.data.cuda()
c += 1
return nn3
|
<filename>models/net_wrapper.py<gh_stars>1-10
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
from helpers.utils import warpgrid, get_ctx
from .synthesizer_net import InnerProd, Bias, Transformer
from .audio_net import Unet
from .vision_net import ResnetFC, ResnetDilated
from .criterion import BCELoss, L1Loss, L2Loss
def activate(x, activation):
if activation == 'sigmoid':
return torch.sigmoid(x)
elif activation == 'softmax':
return F.softmax(x, dim=1)
elif activation == 'relu':
return F.relu(x)
elif activation == 'tanh':
return torch.tanh(x)
elif activation == 'abstanh':
return torch.abs(torch.tanh(x))
elif activation == 'no':
return x
else:
raise Exception('Unkown activation!')
class NetWrapper(nn.Module):
def __init__(self, nets, crit):
super(NetWrapper, self).__init__()
self.net_sound, self.net_frame, self.net_synthesizer = nets
self.crit = crit
def forward(self, batch_data, ctx, pixelwise=False):
if pixelwise:
return self._forward_pixelwise(batch_data, ctx)
return self._forward(batch_data, ctx)
def _forward(self, batch_data, ctx):
mag_mix = batch_data['mag_mix']
mags = batch_data['mags']
frames = batch_data['frames']
mag_mix = mag_mix + 1e-10
N = get_ctx(ctx, 'num_mix')
B = mag_mix.size(0)
T = mag_mix.size(3)
# 0.0 warp the spectrogram
if get_ctx(ctx, 'log_freq'):
grid_warp = torch.from_numpy(
warpgrid(B, 256, T, warp=True)).to(get_ctx(ctx, 'device'))
mag_mix = F.grid_sample(mag_mix, grid_warp, align_corners=True)
for n in range(N):
mags[n] = F.grid_sample(mags[n], grid_warp, align_corners=True)
# 0.1 calculate loss weighting coefficient: magnitude of input mixture
if get_ctx(ctx, 'weighted_loss'):
weight = torch.log1p(mag_mix)
weight = torch.clamp(weight, 1e-3, 10)
else:
weight = torch.ones_like(mag_mix)
# 0.2 ground truth masks are computed after warping!
gt_masks = [None for n in range(N)]
for n in range(N):
if get_ctx(ctx, 'binary_mask'):
# for simplicity, mag_N > 0.5 * mag_mix
gt_masks[n] = (mags[n] > 0.5 * mag_mix).float()
else:
gt_masks[n] = mags[n] / mag_mix
# clamp to avoid large numbers in ratio masks
gt_masks[n].clamp_(0., 5.)
# LOG magnitude
log_mag_mix = torch.log(mag_mix).detach()
# 1. forward net_sound -> BxCxHxW
feat_sound = self.net_sound(log_mag_mix)
feat_sound = activate(feat_sound, get_ctx(ctx, 'sound_activation'))
# 2. forward net_frame -> Bx1xC
feat_frames = [None for n in range(N)]
for n in range(N):
feat_frames[n] = self.net_frame.forward_multiframe(frames[n])
feat_frames[n] = activate(feat_frames[n], get_ctx(ctx, 'img_activation'))
# 3. sound synthesizer
pred_masks = [None for n in range(N)]
for n in range(N):
pred_masks[n] = self.net_synthesizer(feat_frames[n], feat_sound)
pred_masks[n] = activate(pred_masks[n], get_ctx(ctx, 'output_activation'))
# 4. loss
err = self.crit(pred_masks, gt_masks, weight).reshape(1)
return err, \
{'pred_masks': pred_masks, 'gt_masks': gt_masks,
'mag_mix': mag_mix, 'mags': mags, 'weight': weight}
def _forward_pixelwise(self, batch_data, ctx):
mag_mix = batch_data['mag_mix']
frames = batch_data['frames']
mag_mix = mag_mix + 1e-10
bs = mag_mix.size(0)
T = mag_mix.size(3)
# 0.0 warp the spectrogram
if get_ctx(ctx, 'log_freq'):
grid_warp = torch.from_numpy(warpgrid(bs, 256, T, warp=True)).to(get_ctx(ctx, 'device'))
mag_mix = F.grid_sample(mag_mix, grid_warp, align_corners=True)
# LOG magnitude
log_mag_mix = torch.log(mag_mix).detach()
# 1. forward net_sound -> BxCxHxW
feat_sound = self.net_sound(log_mag_mix)
feat_sound = activate(feat_sound, get_ctx(ctx, 'sound_activation'))
# 2. forward net_frame -> Bx1xC
frames = frames[0] # num_mix == 1
feat_frames = self.net_frame.forward_multiframe(frames, pool=False)
(B, C, T, H, W) = feat_frames.size()
feat_frames = feat_frames.permute(0, 1, 3, 4, 2)
feat_frames = feat_frames.reshape(B * C, H * W, T)
feat_frames = F.adaptive_avg_pool1d(feat_frames, 1)
feat_frames = feat_frames.view(B, C, H, W)
feat_frames = activate(feat_frames, get_ctx(ctx, 'img_activation'))
channels = feat_frames.detach().cpu().numpy()
# 3. sound synthesizer
pred_masks = self.net_synthesizer.forward_pixelwise(feat_frames, feat_sound)
pred_masks = activate(pred_masks, get_ctx(ctx, 'output_activation'))
return {'pred_masks': pred_masks, 'processed_mag_mix': mag_mix, 'feat_frames_channels': channels}
class ModelBuilder:
# custom weights initialization
def weights_init(self, m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.001)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.0001)
def build_sound(self, arch='unet5', fc_dim=64, weights='', device='cpu'):
# 2D models
if arch == 'unet5':
net_sound = Unet(fc_dim=fc_dim, num_downs=5)
elif arch == 'unet6':
net_sound = Unet(fc_dim=fc_dim, num_downs=6)
elif arch == 'unet7':
net_sound = Unet(fc_dim=fc_dim, num_downs=7)
else:
raise Exception('Architecture undefined!')
net_sound.apply(self.weights_init)
if len(weights) > 0:
print('Loading weights for net_sound')
net_sound.load_state_dict(torch.load(weights, map_location=device))
return net_sound
def build_frame(self, arch='resnet18', fc_dim=64, pool_type='avgpool', weights='', device='cpu'):
pretrained = True
if arch == 'resnet18fc':
original_resnet = torchvision.models.resnet18(pretrained)
net = ResnetFC(
original_resnet, fc_dim=fc_dim, pool_type=pool_type)
elif arch == 'resnet18dilated':
original_resnet = torchvision.models.resnet18(pretrained)
net = ResnetDilated(
original_resnet, fc_dim=fc_dim, pool_type=pool_type)
else:
raise Exception('Architecture undefined!')
if len(weights) > 0:
print('Loading weights for net_frame')
net.load_state_dict(torch.load(weights, map_location=device))
return net
def build_synthesizer(self, arch, fc_dim=64, weights='', device='cpu'):
if arch == 'linear':
net = InnerProd(fc_dim=fc_dim)
elif arch == 'bias':
net = Bias()
elif arch == 'transformer':
net = Transformer(fc_dim)
else:
raise Exception('Architecture undefined!')
net.apply(self.weights_init)
if len(weights) > 0:
print('Loading weights for net_synthesizer')
net.load_state_dict(torch.load(weights, map_location=device))
return net
def build_criterion(self, arch):
if arch == 'bce':
net = BCELoss()
elif arch == 'l1':
net = L1Loss()
elif arch == 'l2':
net = L2Loss()
else:
raise Exception('Architecture undefined!')
return net
|
# coding: utf-8
"""
Cloudsmith API
The API to the Cloudsmith Service
OpenAPI spec version: v1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class WebhooksApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def webhooks_create(self, owner, repo, **kwargs):
"""
Create a specific webhook in a repository.
Create a specific webhook in a repository.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.webhooks_create(owner, repo, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param WebhooksCreate data:
:return: RepositoryWebhook
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.webhooks_create_with_http_info(owner, repo, **kwargs)
else:
(data) = self.webhooks_create_with_http_info(owner, repo, **kwargs)
return data
def webhooks_create_with_http_info(self, owner, repo, **kwargs):
"""
Create a specific webhook in a repository.
Create a specific webhook in a repository.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.webhooks_create_with_http_info(owner, repo, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param WebhooksCreate data:
:return: RepositoryWebhook
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['owner', 'repo', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method webhooks_create" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'owner' is set
if ('owner' not in params) or (params['owner'] is None):
raise ValueError("Missing the required parameter `owner` when calling `webhooks_create`")
# verify the required parameter 'repo' is set
if ('repo' not in params) or (params['repo'] is None):
raise ValueError("Missing the required parameter `repo` when calling `webhooks_create`")
collection_formats = {}
path_params = {}
if 'owner' in params:
path_params['owner'] = params['owner']
if 'repo' in params:
path_params['repo'] = params['repo']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey']
return self.api_client.call_api('/webhooks/{owner}/{repo}/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RepositoryWebhook',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def webhooks_delete(self, owner, repo, identifier, **kwargs):
"""
Delete a specific webhook in a repository.
Delete a specific webhook in a repository.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.webhooks_delete(owner, repo, identifier, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param str identifier: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.webhooks_delete_with_http_info(owner, repo, identifier, **kwargs)
else:
(data) = self.webhooks_delete_with_http_info(owner, repo, identifier, **kwargs)
return data
def webhooks_delete_with_http_info(self, owner, repo, identifier, **kwargs):
"""
Delete a specific webhook in a repository.
Delete a specific webhook in a repository.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.webhooks_delete_with_http_info(owner, repo, identifier, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param str identifier: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['owner', 'repo', 'identifier']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method webhooks_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'owner' is set
if ('owner' not in params) or (params['owner'] is None):
raise ValueError("Missing the required parameter `owner` when calling `webhooks_delete`")
# verify the required parameter 'repo' is set
if ('repo' not in params) or (params['repo'] is None):
raise ValueError("Missing the required parameter `repo` when calling `webhooks_delete`")
# verify the required parameter 'identifier' is set
if ('identifier' not in params) or (params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `webhooks_delete`")
collection_formats = {}
path_params = {}
if 'owner' in params:
path_params['owner'] = params['owner']
if 'repo' in params:
path_params['repo'] = params['repo']
if 'identifier' in params:
path_params['identifier'] = params['identifier']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikey']
return self.api_client.call_api('/webhooks/{owner}/{repo}/{identifier}/', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def webhooks_list(self, owner, repo, **kwargs):
"""
Get a list of all webhooks in a repository.
Get a list of all webhooks in a repository.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.webhooks_list(owner, repo, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param int page: A page number within the paginated result set.
:param int page_size: Number of results to return per page.
:return: list[RepositoryWebhook]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.webhooks_list_with_http_info(owner, repo, **kwargs)
else:
(data) = self.webhooks_list_with_http_info(owner, repo, **kwargs)
return data
def webhooks_list_with_http_info(self, owner, repo, **kwargs):
"""
Get a list of all webhooks in a repository.
Get a list of all webhooks in a repository.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.webhooks_list_with_http_info(owner, repo, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param int page: A page number within the paginated result set.
:param int page_size: Number of results to return per page.
:return: list[RepositoryWebhook]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['owner', 'repo', 'page', 'page_size']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method webhooks_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'owner' is set
if ('owner' not in params) or (params['owner'] is None):
raise ValueError("Missing the required parameter `owner` when calling `webhooks_list`")
# verify the required parameter 'repo' is set
if ('repo' not in params) or (params['repo'] is None):
raise ValueError("Missing the required parameter `repo` when calling `webhooks_list`")
collection_formats = {}
path_params = {}
if 'owner' in params:
path_params['owner'] = params['owner']
if 'repo' in params:
path_params['repo'] = params['repo']
query_params = []
if 'page' in params:
query_params.append(('page', params['page']))
if 'page_size' in params:
query_params.append(('page_size', params['page_size']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikey']
return self.api_client.call_api('/webhooks/{owner}/{repo}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[RepositoryWebhook]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def webhooks_partial_update(self, owner, repo, identifier, **kwargs):
"""
Update a specific webhook in a repository.
Update a specific webhook in a repository.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.webhooks_partial_update(owner, repo, identifier, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param str identifier: (required)
:param WebhooksPartialUpdate data:
:return: RepositoryWebhook
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.webhooks_partial_update_with_http_info(owner, repo, identifier, **kwargs)
else:
(data) = self.webhooks_partial_update_with_http_info(owner, repo, identifier, **kwargs)
return data
def webhooks_partial_update_with_http_info(self, owner, repo, identifier, **kwargs):
"""
Update a specific webhook in a repository.
Update a specific webhook in a repository.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.webhooks_partial_update_with_http_info(owner, repo, identifier, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param str identifier: (required)
:param WebhooksPartialUpdate data:
:return: RepositoryWebhook
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['owner', 'repo', 'identifier', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method webhooks_partial_update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'owner' is set
if ('owner' not in params) or (params['owner'] is None):
raise ValueError("Missing the required parameter `owner` when calling `webhooks_partial_update`")
# verify the required parameter 'repo' is set
if ('repo' not in params) or (params['repo'] is None):
raise ValueError("Missing the required parameter `repo` when calling `webhooks_partial_update`")
# verify the required parameter 'identifier' is set
if ('identifier' not in params) or (params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `webhooks_partial_update`")
collection_formats = {}
path_params = {}
if 'owner' in params:
path_params['owner'] = params['owner']
if 'repo' in params:
path_params['repo'] = params['repo']
if 'identifier' in params:
path_params['identifier'] = params['identifier']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey']
return self.api_client.call_api('/webhooks/{owner}/{repo}/{identifier}/', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RepositoryWebhook',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def webhooks_read(self, owner, repo, identifier, **kwargs):
"""
Views for working with repository webhooks.
Views for working with repository webhooks.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.webhooks_read(owner, repo, identifier, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param str identifier: (required)
:return: RepositoryWebhook
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.webhooks_read_with_http_info(owner, repo, identifier, **kwargs)
else:
(data) = self.webhooks_read_with_http_info(owner, repo, identifier, **kwargs)
return data
def webhooks_read_with_http_info(self, owner, repo, identifier, **kwargs):
"""
Views for working with repository webhooks.
Views for working with repository webhooks.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.webhooks_read_with_http_info(owner, repo, identifier, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param str identifier: (required)
:return: RepositoryWebhook
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['owner', 'repo', 'identifier']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method webhooks_read" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'owner' is set
if ('owner' not in params) or (params['owner'] is None):
raise ValueError("Missing the required parameter `owner` when calling `webhooks_read`")
# verify the required parameter 'repo' is set
if ('repo' not in params) or (params['repo'] is None):
raise ValueError("Missing the required parameter `repo` when calling `webhooks_read`")
# verify the required parameter 'identifier' is set
if ('identifier' not in params) or (params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `webhooks_read`")
collection_formats = {}
path_params = {}
if 'owner' in params:
path_params['owner'] = params['owner']
if 'repo' in params:
path_params['repo'] = params['repo']
if 'identifier' in params:
path_params['identifier'] = params['identifier']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikey']
return self.api_client.call_api('/webhooks/{owner}/{repo}/{identifier}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RepositoryWebhook',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
<filename>9_mh_server/__init__.py
""" MH Server plugin entry-point. See the load() and unload() functions.
"""
import gui3d, gui
import mh
from core import G
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from server import ServerThread
################################################################################
class MHServerTaskView(gui3d.TaskView):
human = None # Reference to the in-scene human
toggle = None # Toggle button for server enable / disable
logbox = None # Log view of any server spew
server = None # ServerThread instance
def __init__(self, category):
""" Constructor for the TaskView. This accepts the category under which
this feature is enabled.
The category is registered with the app and passed into this
constructor on the `load()` API below.
"""
self.human = gui3d.app.selectedHuman
gui3d.TaskView.__init__(self, category, "MHServer")
fr_left = self.addLeftWidget(gui.GroupBox("Settings:"))
self.txt_port = fr_left.addWidget(gui.TextEdit(text="18830"))
self.btn_restart = fr_left.addWidget(gui.Button("Restart"))
@self.btn_restart.mhEvent
def onClicked(e):
self.restart_server()
self.logbox = self.addTopWidget(gui.DocumentEdit())
self.logbox.setText("")
self.logbox.setLineWrapMode(gui.DocumentEdit.NoWrap)
def bootstrap(self, app):
""" `bootstrap` allows this TaskView to figure out dependent task views
to trigger downstream functions.
"""
self.pose_lib = app.getTask("Pose/Animate", "Pose")
self.skel_lib = app.getTask("Pose/Animate", "Skeleton")
self.clothes_lib = app.getTask("Geometries", "Clothes")
self.eyebrows_lib = app.getTask("Geometries", "Eyebrows")
self.eyes_lib = app.getTask("Geometries", "Eyes")
self.topologies_lib = app.getTask("Geometries", "Topologies")
self.eyelashes_lib = app.getTask("Geometries", "Eyelashes")
self.hair_lib = app.getTask("Geometries", "Hair")
self.teeth_lib = app.getTask("Geometries", "Teeth")
self.tongue_lib = app.getTask("Geometries", "Tongue")
def log(self, msg):
""" Logs a message to the text box `log`.
"""
self.logbox.addText(msg + "\n")
if self.server:
self.server.broadcast(str(msg))
def command(self, msg, conn=None):
words = str(msg).rstrip().split(" ")
cmd, args = words[0], words[1:]
factory.run(self, cmd, args)
def start_server(self):
self.log("Trying to start server thread ...")
self.server = ServerThread(port=int(self.txt_port.text, 10))
self.logbox.connect(self.server, SIGNAL("log(QString)"), self.log)
self.logbox.connect(self.server, SIGNAL("command(QString)"), self.command)
self.server.set_taskview(self)
self.server.start()
def stop_server(self):
self.log("Trying to close server thread ...")
if self.server is None:
return
self.server.stop()
self.server = None
def restart_server(self):
self.stop_server()
self.start_server()
""" -----------------------------------------------------------------------
Registered makehuman commands.
"""
""" Modeling :: Main
"""
@factory.register(
"set_age",
"Set the human's age",
["value", float, 0, "parameter value (between 1.0 and 90.0"])
def set_age(self, age):
age = min(max(age, 1.0), 90.0)
G.mhapi.modifiers.setAge(age)
@factory.register(
"set_weight",
"Set the human's weight",
["value", float, 0, "parameter value (between 50%% and 150%%"])
def set_weight(self, weight):
weight = min(max(weight, 50.0), 150.0)
G.mhapi.modifiers.setWeight(weight)
@factory.register(
"set_muscle",
"Set the human's muscle",
["value", float, 0, "parameter value (between 0%% and 100%%"])
def set_muscle(self, muscle):
muscle = min(max(muscle, 0.0), 100.0)
G.mhapi.modifiers.setMuscle(muscle)
@factory.register(
"set_height",
"Set the human's height",
["value", float, 0, "parameter value (in cm)"])
def set_height(self, height):
G.mhapi.modifiers.setHeight(height)
@factory.register(
"set_gender",
"Set the human's gender",
["value", float, 0, "parameter value (100%% is female and 0%% is male"])
def set_gender(self, gender):
gender = min(max(gender, 0.0), 100.0)
G.mhapi.modifiers.setGender(gender)
""" ------------------------------------------------------------------- """
""" Geometries :: Clothes
"""
@factory.register(
"add_clothes",
"Set the human's clothes -- these are addititve (see remove_clothes)",
["clothes_path", str, "data/clothes/male_casualsuit02/male_casualsuit02.mhclo", "path to clothes file"])
def add_clothes(self, clothes_path):
self.clothes_lib.selectProxy(clothes_path)
@factory.register(
"remove_clothes",
"Remove the human's clothes -- these are addititve (see add_clothes)",
["clothes_path", str, "data/clothes/male_casualsuit02/male_casualsuit02.mhclo", "path to clothes file"])
def remove_clothes(self, clothes_path):
self.clothes_lib.deselectProxy(clothes_path)
""" Geometries :: Eyes
"""
@factory.register(
"set_eyes",
"Set the human's eyes -- should always set low-poly",
["eyes_path", str, "data/eyes/low-poly/low-poly.mhclo", "path to eyes file"])
def set_eyes(self, eyes_path):
self.eyes_lib.selectProxy(eyes_path)
""" Geometries :: Hair
"""
@factory.register(
"set_hair",
"Set the human's hair",
["hair_path", str, "data/hair/afro01/afro01.mhclo", "path to hair file"])
def set_hair(self, hair_path):
self.hair_lib.selectProxy(hair_path)
""" Geometries :: Teeth
"""
@factory.register(
"set_teeth",
"Set the human's teeth",
["teeth_path", str, "data/teeth/teeth_shape01/teeth_shape01.mhclo", "path to teeth file"])
def set_teeth(self, teeth_path):
self.teeth_lib.selectProxy(teeth_path)
""" Geometries :: Topologies
"""
@factory.register(
"set_topologies",
"Set the human's topologies",
["topologies_path", str, "", "path to topologies file"])
def set_topologies(self, topologies_path):
self.topologies_lib.selectProxy(topologies_path)
""" Geometries :: Eyebrows
"""
@factory.register(
"set_eyebrows",
"Set the human's eyebrows",
["eyebrows_path", str, "data/eyebrows/eyebrow001/eyebrow001.mhclo", "path to eyebrows file"])
def set_eyebrows(self, eyebrows_path):
self.eyebrows_lib.selectProxy(eyebrows_path)
""" Geometries :: Eyelashes
"""
@factory.register(
"set_eyelashes",
"Set the human's eyelashes",
["eyelashes_path", str, "data/eyelashes/eyelashes02/eyelashes02.mhclo", "path to eyelashes file"])
def set_eyelashes(self, eyelashes_path):
self.eyelashes_lib.selectProxy(eyelashes_path)
""" Geometries :: Tongue
"""
@factory.register(
"set_tongue",
"Set the human's tongue",
["tongue_path", str, None, "path to tongue file"])
def set_tongue(self, tongue_path):
self.tongue_lib.selectProxy(tongue_path)
""" ------------------------------------------------------------------- """
""" Pose/Animate :: Skeleton
"""
@factory.register(
"set_skeleton",
"Set the human's skeleton from the specified .mhskel file",
["skel_path", str, "data/rigs/game_engine.mhskel", "path to .mhskel file"])
def set_skeleton(self, skel_path):
self.skel_lib.filechooser.onFileSelected(skel_path)
""" Pose/Animate :: Pose
"""
@factory.register(
"set_pose",
"Set the human's pose to the specified bvh file",
["pose_path", str, "data/poses/tpose.bvh", "path to pose file"])
def set_pose(self, pose_path):
self.pose_lib.filechooser.onFileSelected(pose_path)
################################################################################
class Loader():
task = None
def load(self, app):
category = app.getCategory("MHServer")
self.task = category.addTask(MHServerTaskView(category))
self.task.bootstrap(app)
self.task.start_server()
def unload(self, app):
if self.task:
self.task.stop_server()
loader = Loader()
################################################################################
""" Interface functions required to register with makehuman's plugin system.
"""
def load(app):
return loader.load(app)
def unload(app):
return loader.unload(app)
|
<gh_stars>10-100
#!/usr/bin/env python3
# Handbrake processing of dvd/bluray
import sys
import os
import logging
import subprocess
import re
import shlex
# Added for sleep check/ transcode limits
import time # noqa: F401
import datetime # noqa: F401
import psutil # noqa: F401
from arm.ripper import utils
from arm.ui import app, db # noqa E402
from arm.config.config import cfg
PROCESS_COMPLETE = "Handbrake processing complete"
def handbrake_mainfeature(srcpath, basepath, logfile, job):
"""process dvd with mainfeature enabled.\n
srcpath = Path to source for HB (dvd or files)\n
basepath = Path where HB will save trancoded files\n
logfile = Logfile for HB to redirect output to\n
job = Job object\n
Returns nothing
"""
logging.info("Starting DVD Movie Mainfeature processing")
logging.debug("Handbrake starting: ")
logging.debug("\n\r" + job.pretty_table())
utils.database_updater({'status': "waiting_transcode"}, job)
# TODO: send a notification that jobs are waiting ?
utils.sleep_check_process("HandBrakeCLI", int(cfg["MAX_CONCURRENT_TRANSCODES"]))
logging.debug("Setting job status to 'transcoding'")
utils.database_updater({'status': "transcoding"}, job)
filename = os.path.join(basepath, job.title + "." + cfg["DEST_EXT"])
filepathname = os.path.join(basepath, filename)
logging.info(f"Ripping title Mainfeature to {shlex.quote(filepathname)}")
get_track_info(srcpath, job)
track = job.tracks.filter_by(main_feature=True).first()
if track is None:
msg = "No main feature found by Handbrake. Turn MAINFEATURE to false in arm.yml and try again."
logging.error(msg)
raise RuntimeError(msg)
track.filename = track.orig_filename = filename
db.session.commit()
if job.disctype == "dvd":
hb_args = cfg["HB_ARGS_DVD"]
hb_preset = cfg["HB_PRESET_DVD"]
elif job.disctype == "bluray":
hb_args = cfg["HB_ARGS_BD"]
hb_preset = cfg["HB_PRESET_BD"]
cmd = 'nice {0} -i {1} -o {2} --main-feature --preset "{3}" {4} >> {5} 2>&1'.format(
cfg["HANDBRAKE_CLI"],
shlex.quote(srcpath),
shlex.quote(filepathname),
hb_preset,
hb_args,
logfile
)
logging.debug(f"Sending command: {cmd}")
try:
subprocess.check_output(cmd, shell=True).decode("utf-8")
logging.info("Handbrake call successful")
track.status = "success"
except subprocess.CalledProcessError as hb_error:
err = f"Call to handbrake failed with code: {hb_error.returncode}({hb_error.output})"
logging.error(err)
track.status = "fail"
track.error = err
job.status = "fail"
db.session.commit()
sys.exit(err)
logging.info(PROCESS_COMPLETE)
logging.debug("\n\r" + job.pretty_table())
track.ripped = True
db.session.commit()
def handbrake_all(srcpath, basepath, logfile, job):
"""Process all titles on the dvd\n
srcpath = Path to source for HB (dvd or files)\n
basepath = Path where HB will save trancoded files\n
logfile = Logfile for HB to redirect output to\n
job = Disc object\n
Returns nothing
"""
# Wait until there is a spot to transcode
job.status = "waiting_transcode"
db.session.commit()
utils.sleep_check_process("HandBrakeCLI", int(cfg["MAX_CONCURRENT_TRANSCODES"]))
job.status = "transcoding"
db.session.commit()
logging.info("Starting BluRay/DVD transcoding - All titles")
if job.disctype == "dvd":
hb_args = cfg["HB_ARGS_DVD"]
hb_preset = cfg["HB_PRESET_DVD"]
elif job.disctype == "bluray":
hb_args = cfg["HB_ARGS_BD"]
hb_preset = cfg["HB_PRESET_BD"]
get_track_info(srcpath, job)
logging.debug(f"Total number of tracks is {job.no_of_titles}")
for track in job.tracks:
if track.length < int(cfg["MINLENGTH"]):
# too short
logging.info(f"Track #{track.track_number} of {job.no_of_titles}. "
f"Length ({track.length}) is less than minimum length ({cfg['MINLENGTH']}). Skipping")
elif track.length > int(cfg["MAXLENGTH"]):
# too long
logging.info(f"Track #{track.track_number} of {job.no_of_titles}. "
f"Length ({track.length}) is greater than maximum length ({cfg['MAXLENGTH']}). Skipping")
else:
# just right
logging.info(f"Processing track #{track.track_number} of {job.no_of_titles}. "
f"Length is {track.length} seconds.")
filename = "title_" + str.zfill(str(track.track_number), 2) + "." + cfg["DEST_EXT"]
filepathname = os.path.join(basepath, filename)
logging.info(f"Transcoding title {track.track_number} to {shlex.quote(filepathname)}")
track.filename = track.orig_filename = filename
db.session.commit()
cmd = 'nice {0} -i {1} -o {2} --preset "{3}" -t {4} {5}>> {6} 2>&1'.format(
cfg["HANDBRAKE_CLI"],
shlex.quote(srcpath),
shlex.quote(filepathname),
hb_preset,
str(track.track_number),
hb_args,
logfile
)
logging.debug(f"Sending command: {cmd}")
try:
hb = subprocess.check_output(
cmd,
shell=True
).decode("utf-8")
logging.debug(f"Handbrake exit code: {hb}")
track.status = "success"
except subprocess.CalledProcessError as hb_error:
err = f"Handbrake encoding of title {track.track_number} failed with code: {hb_error.returncode}" \
f"({hb_error.output})"
logging.error(err)
track.status = "fail"
track.error = err
track.ripped = True
db.session.commit()
logging.info(PROCESS_COMPLETE)
logging.debug("\n\r" + job.pretty_table())
def handbrake_mkv(srcpath, basepath, logfile, job):
"""process all mkv files in a directory.\n
srcpath = Path to source for HB (dvd or files)\n
basepath = Path where HB will save trancoded files\n
logfile = Logfile for HB to redirect output to\n
job = Disc object\n
Returns nothing
"""
# Added to limit number of transcodes
job.status = "waiting_transcode"
db.session.commit()
utils.sleep_check_process("HandBrakeCLI", int(cfg["MAX_CONCURRENT_TRANSCODES"]))
job.status = "transcoding"
db.session.commit()
if job.disctype == "dvd":
hb_args = cfg["HB_ARGS_DVD"]
hb_preset = cfg["HB_PRESET_DVD"]
elif job.disctype == "bluray":
hb_args = cfg["HB_ARGS_BD"]
hb_preset = cfg["HB_PRESET_BD"]
# This will fail if the directory raw gets deleted
for f in os.listdir(srcpath):
srcpathname = os.path.join(srcpath, f)
destfile = os.path.splitext(f)[0]
filename = os.path.join(basepath, destfile + "." + cfg["DEST_EXT"])
filepathname = os.path.join(basepath, filename)
logging.info(f"Transcoding file {shlex.quote(f)} to {shlex.quote(filepathname)}")
cmd = 'nice {0} -i {1} -o {2} --preset "{3}" {4}>> {5} 2>&1'.format(
cfg["HANDBRAKE_CLI"],
shlex.quote(srcpathname),
shlex.quote(filepathname),
hb_preset,
hb_args,
logfile
)
logging.debug(f"Sending command: {cmd}")
try:
hb = subprocess.check_output(
cmd,
shell=True
).decode("utf-8")
logging.debug(f"Handbrake exit code: {hb}")
except subprocess.CalledProcessError as hb_error:
err = f"Handbrake encoding of file {shlex.quote(f)} failed with code: {hb_error.returncode}" \
f"({hb_error.output})"
logging.error(err)
# job.errors.append(f)
logging.info(PROCESS_COMPLETE)
logging.debug("\n\r" + job.pretty_table())
def get_track_info(srcpath, job):
"""Use HandBrake to get track info and updatte Track class\n
srcpath = Path to disc\n
job = Job instance\n
"""
charset_found = False
logging.info("Using HandBrake to get information on all the tracks on the disc. This will take a few minutes...")
cmd = '{0} -i {1} -t 0 --scan'.format(
cfg["HANDBRAKE_CLI"],
shlex.quote(srcpath)
)
logging.debug(f"Sending command: {cmd}")
try:
hb = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT,
shell=True
).decode('utf-8', 'ignore').splitlines()
except subprocess.CalledProcessError as hb_error:
logging.error("Couldn't find a valid track. Try running the command manually to see more specific errors.")
logging.error(f"Specific error is: {hb_error}")
else:
charset_found = True
if not charset_found:
try:
hb = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT,
shell=True
).decode('cp437').splitlines()
except subprocess.CalledProcessError as hb_error:
logging.error("Couldn't find a valid track. Try running the command manually to see more specific errors.")
logging.error(f"Specific error is: {hb_error}")
# If it doesnt work now we either have bad encoding or HB has ran into issues
return -1
t_pattern = re.compile(r'.*\+ title *')
pattern = re.compile(r'.*duration\:.*')
seconds = 0
t_no = 0
fps = float(0)
aspect = 0
result = None
mainfeature = False
for line in hb:
# get number of titles
if result is None:
if job.disctype == "bluray":
result = re.search('scan: BD has (.*) title\(s\)', line) # noqa: W605
else:
result = re.search('scan: DVD has (.*) title\(s\)', line) # noqa: W605
if result:
titles = result.group(1)
titles = titles.strip()
logging.debug(f"Line found is: {line}")
logging.info(f"Found {titles} titles")
job.no_of_titles = titles
db.session.commit()
if (re.search(t_pattern, line)) is not None:
if t_no == 0:
pass
else:
utils.put_track(job, t_no, seconds, aspect, fps, mainfeature, "handbrake")
mainfeature = False
t_no = line.rsplit(' ', 1)[-1]
t_no = t_no.replace(":", "")
if (re.search(pattern, line)) is not None:
t = line.split()
h, m, s = t[2].split(':')
seconds = int(h) * 3600 + int(m) * 60 + int(s)
if (re.search("Main Feature", line)) is not None:
mainfeature = True
if (re.search(" fps", line)) is not None:
fps = line.rsplit(' ', 2)[-2]
aspect = line.rsplit(' ', 3)[-3]
aspect = str(aspect).replace(",", "")
utils.put_track(job, t_no, seconds, aspect, fps, mainfeature, "handbrake")
|
#!/usr/bin/env python
# Copyright (c) 2018-2019 Intel Labs.
# authors: <NAME> (<EMAIL>), <NAME> (<EMAIL>)
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
Module to manipulate the routes, by making then more or less dense (Up to a certain parameter).
It also contains functions to convert the CARLA world location do GPS coordinates.
"""
import math
import xml.etree.ElementTree as ET
from agents.navigation.global_route_planner import GlobalRoutePlanner
from agents.navigation.local_planner import RoadOption
from srunner.scenariomanager.carla_data_provider import CarlaDataProvider
def _location_to_gps(lat_ref, lon_ref, location):
"""
Convert from world coordinates to GPS coordinates
:param lat_ref: latitude reference for the current map
:param lon_ref: longitude reference for the current map
:param location: location to translate
:return: dictionary with lat, lon and height
"""
EARTH_RADIUS_EQUA = 6378137.0 # pylint: disable=invalid-name
scale = math.cos(lat_ref * math.pi / 180.0)
mx = scale * lon_ref * math.pi * EARTH_RADIUS_EQUA / 180.0
my = scale * EARTH_RADIUS_EQUA * math.log(math.tan((90.0 + lat_ref) * math.pi / 360.0))
mx += location.x
my -= location.y
lon = mx * 180.0 / (math.pi * EARTH_RADIUS_EQUA * scale)
lat = 360.0 * math.atan(math.exp(my / (EARTH_RADIUS_EQUA * scale))) / math.pi - 90.0
z = location.z
return {'lat': lat, 'lon': lon, 'z': z}
def location_route_to_gps(route, lat_ref, lon_ref):
"""
Locate each waypoint of the route into gps, (lat long ) representations.
:param route:
:param lat_ref:
:param lon_ref:
:return:
"""
gps_route = []
for transform, connection in route:
gps_point = _location_to_gps(lat_ref, lon_ref, transform.location)
gps_route.append((gps_point, connection))
return gps_route
def _get_latlon_ref(world):
"""
Convert from waypoints world coordinates to CARLA GPS coordinates
:return: tuple with lat and lon coordinates
"""
xodr = world.get_map().to_opendrive()
tree = ET.ElementTree(ET.fromstring(xodr))
# default reference
lat_ref = 42.0
lon_ref = 2.0
for opendrive in tree.iter("OpenDRIVE"):
for header in opendrive.iter("header"):
for georef in header.iter("geoReference"):
if georef.text:
str_list = georef.text.split(' ')
for item in str_list:
if '+lat_0' in item:
lat_ref = float(item.split('=')[1])
if '+lon_0' in item:
lon_ref = float(item.split('=')[1])
return lat_ref, lon_ref
def downsample_route(route, sample_factor):
"""
Downsample the route by some factor.
:param route: the trajectory , has to contain the waypoints and the road options
:param sample_factor: Maximum distance between samples
:return: returns the ids of the final route that can
"""
ids_to_sample = []
prev_option = None
dist = 0
for i, point in enumerate(route):
curr_option = point[1]
# Lane changing
if curr_option in (RoadOption.CHANGELANELEFT, RoadOption.CHANGELANERIGHT):
ids_to_sample.append(i)
dist = 0
# When road option changes
elif prev_option != curr_option and prev_option not in (RoadOption.CHANGELANELEFT, RoadOption.CHANGELANERIGHT):
ids_to_sample.append(i)
dist = 0
# After a certain max distance
elif dist > sample_factor:
ids_to_sample.append(i)
dist = 0
# At the end
elif i == len(route) - 1:
ids_to_sample.append(i)
dist = 0
# Compute the distance traveled
else:
curr_location = point[0].location
prev_location = route[i - 1][0].location
dist += curr_location.distance(prev_location)
prev_option = curr_option
return ids_to_sample
def interpolate_trajectory(waypoints_trajectory, hop_resolution=1.0):
"""
Given some raw keypoints interpolate a full dense trajectory to be used by the user.
returns the full interpolated route both in GPS coordinates and also in its original form.
Args:
- waypoints_trajectory: the current coarse trajectory
- hop_resolution: distance between the trajectory's waypoints
"""
grp = GlobalRoutePlanner(CarlaDataProvider.get_map(), hop_resolution)
# Obtain route plan
route = []
for i in range(len(waypoints_trajectory) - 1):
waypoint = waypoints_trajectory[i]
waypoint_next = waypoints_trajectory[i + 1]
interpolated_trace = grp.trace_route(waypoint, waypoint_next)
for wp_tuple in interpolated_trace:
route.append((wp_tuple[0].transform, wp_tuple[1]))
lat_ref, lon_ref = _get_latlon_ref(CarlaDataProvider.get_world())
return location_route_to_gps(route, lat_ref, lon_ref), route
def interpolate_trajectory_modified(world, waypoints_trajectory, hop_resolution=1.0):
"""
Given some raw keypoints interpolate a full dense trajectory to be used by the user.
:param world: an reference to the CARLA world so we can use the planner
:param waypoints_trajectory: the current coarse trajectory
:param hop_resolution: is the resolution, how dense is the provided trajectory going to be made
:return: the full interpolated route both in GPS coordinates and also in its original form.
"""
grp = GlobalRoutePlanner(world.get_map(), hop_resolution)
# Obtain route plan
route = []
for i in range(len(waypoints_trajectory) - 1): # Goes until the one before the last.
waypoint = waypoints_trajectory[i]
waypoint_next = waypoints_trajectory[i + 1]
interpolated_trace = grp.trace_route(waypoint, waypoint_next)
# for wp_tuple in interpolated_trace:
# route.append((wp_tuple[0], wp_tuple[1]))
# Increase the route position to avoid fails
# lat_ref, lon_ref = _get_latlon_ref(world)
return interpolated_trace |
<reponame>LiyuanLucasLiu/LD-Net
from __future__ import print_function
import datetime
import time
import torch
import torch.nn as nn
import torch.optim as optim
import codecs
import pickle
import math
from model_word_ada.LM import LM
from model_word_ada.basic import BasicRNN
from model_word_ada.ldnet import LDRNN
from model_word_ada.densenet import DenseRNN
from model_word_ada.dataset import LargeDataset, EvalDataset
from model_word_ada.adaptive import AdaptiveSoftmax
import model_word_ada.utils as utils
from torch_scope import wrapper
import argparse
import logging
import json
import os
import sys
import itertools
import functools
logger = logging.getLogger(__name__)
def evaluate(data_loader, lm_model, limited = 76800):
lm_model.eval()
lm_model.init_hidden()
total_loss = 0
total_len = 0
for word_t, label_t in data_loader:
label_t = label_t.view(-1)
tmp_len = label_t.size(0)
total_loss += tmp_len * lm_model(word_t, label_t).item()
total_len += tmp_len
if limited >=0 and total_len > limited:
break
ppl = math.exp(total_loss / total_len)
return ppl
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str, default="auto")
parser.add_argument('--cp_root', default='./checkpoint')
parser.add_argument('--checkpoint_name', default='ld0')
parser.add_argument('--git_tracking', action='store_true')
parser.add_argument('--dataset_folder', default='./data/one_billion/')
parser.add_argument('--restore_checkpoint', default='')
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--sequence_length', type=int, default=20)
parser.add_argument('--hid_dim', type=int, default=300)
parser.add_argument('--word_dim', type=int, default=300)
parser.add_argument('--label_dim', type=int, default=1600)
parser.add_argument('--layer_num', type=int, default=10)
parser.add_argument('--droprate', type=float, default=0.01)
parser.add_argument('--add_relu', action='store_true')
parser.add_argument('--layer_drop', type=float, default=0.5)
parser.add_argument('--epoch', type=int, default=400)
parser.add_argument('--clip', type=float, default=5)
parser.add_argument('--update', choices=['Adam', 'Adagrad', 'Adadelta'], default='Adam', help='adam is the best')
parser.add_argument('--rnn_layer', choices=['Basic', 'DenseNet', 'LDNet'], default='LDNet')
parser.add_argument('--rnn_unit', choices=['gru', 'lstm', 'rnn'], default='lstm')
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--lr_decay', type=float, default=0.1)
parser.add_argument('--cut_off', nargs='+', default=[4000,40000,200000])
parser.add_argument('--interval', type=int, default=100)
parser.add_argument('--epoch_size', type=int, default=4000)
parser.add_argument('--patience', type=float, default=10)
args = parser.parse_args()
pw = wrapper(os.path.join(args.cp_root, args.checkpoint_name), args.checkpoint_name, enable_git_track=args.git_tracking)
gpu_index = pw.auto_device() if 'auto' == args.gpu else int(args.gpu)
device = torch.device("cuda:" + str(gpu_index) if gpu_index >= 0 else "cpu")
if gpu_index >= 0:
torch.cuda.set_device(gpu_index)
logger.info('Loading dataset.')
dataset = pickle.load(open(args.dataset_folder + 'test.pk', 'rb'))
w_map, test_data, range_idx = dataset['w_map'], dataset['test_data'], dataset['range']
train_loader = LargeDataset(args.dataset_folder, range_idx, args.batch_size, args.sequence_length)
test_loader = EvalDataset(test_data, args.batch_size)
logger.info('Building models.')
rnn_map = {'Basic': BasicRNN, 'DenseNet': DenseRNN, 'LDNet': functools.partial(LDRNN, layer_drop = args.layer_drop)}
rnn_layer = rnn_map[args.rnn_layer](args.layer_num, args.rnn_unit, args.word_dim, args.hid_dim, args.droprate)
cut_off = args.cut_off + [len(w_map) + 1]
if args.label_dim > 0:
soft_max = AdaptiveSoftmax(args.label_dim, cut_off)
else:
soft_max = AdaptiveSoftmax(rnn_layer.output_dim, cut_off)
lm_model = LM(rnn_layer, soft_max, len(w_map), args.word_dim, args.droprate, label_dim = args.label_dim, add_relu=args.add_relu)
lm_model.rand_ini()
logger.info('Building optimizer.')
optim_map = {'Adam' : optim.Adam, 'Adagrad': optim.Adagrad, 'Adadelta': optim.Adadelta}
if args.lr > 0:
optimizer=optim_map[args.update](lm_model.parameters(), lr=args.lr)
else:
optimizer=optim_map[args.update](lm_model.parameters())
if args.restore_checkpoint:
if os.path.isfile(args.restore_checkpoint):
logger.info("loading checkpoint: '{}'".format(args.restore_checkpoint))
model_file = wrapper.restore_checkpoint(args.restore_checkpoint)['model']
lm_model.load_state_dict(model_file, False)
else:
logger.info("no checkpoint found at: '{}'".format(args.restore_checkpoint))
lm_model.to(device)
logger.info('Saving configues.')
pw.save_configue(args)
logger.info('Setting up training environ.')
best_train_ppl = float('inf')
cur_lr = args.lr
batch_index = 0
epoch_loss = 0
patience = 0
writer = SummaryWriter(log_dir='./runs_1b/'+args.log_dir)
name_list = ['batch_loss', 'train_ppl', 'test_ppl']
bloss, tr_ppl, te_ppl = [args.log_dir+'/'+tup for tup in name_list]
try:
for indexs in range(args.epoch):
logger.info('############')
logger.info('Epoch: {}'.format(indexs))
pw.nvidia_memory_map()
lm_model.train()
for word_t, label_t in train_loader.get_tqdm(device):
if 1 == train_loader.cur_idx:
lm_model.init_hidden()
label_t = label_t.view(-1)
lm_model.zero_grad()
loss = lm_model(word_t, label_t)
loss.backward()
torch.nn.utils.clip_grad_norm_(lm_model.parameters(), args.clip)
optimizer.step()
batch_index += 1
if 0 == batch_index % args.interval:
s_loss = utils.to_scalar(loss)
pw.add_loss_vs_batch({'batch_loss': s_loss}, batch_index, use_logger = False)
epoch_loss += utils.to_scalar(loss)
if 0 == batch_index % args.epoch_size:
epoch_ppl = math.exp(epoch_loss / args.epoch_size)
pw.add_loss_vs_batch({'train_ppl': epoch_ppl}, batch_index, use_logger = True)
if epoch_loss < best_train_ppl:
best_train_ppl = epoch_loss
patience = 0
else:
patience += 1
epoch_loss = 0
if patience > args.patience and cur_lr > 0:
patience = 0
cur_lr *= args.lr_decay
best_train_ppl = float('inf')
logger.info('adjust_learning_rate...')
utils.adjust_learning_rate(optimizer, cur_lr)
test_ppl = evaluate(test_loader.get_tqdm(device), lm_model)
pw.add_loss_vs_batch({'test_ppl': test_ppl}, indexs, use_logger = True)
pw.save_checkpoint(model = lm_model, optimizer = optimizer, is_best = True)
except KeyboardInterrupt:
logger.info('Exiting from training early')
test_ppl = evaluate(test_loader.get_tqdm(device), lm_model)
pw.add_loss_vs_batch({'test_ppl': test_ppl}, indexs, use_logger = True)
pw.save_checkpoint(model = lm_model, optimizer = optimizer, is_best = True)
pw.close() |
<gh_stars>1-10
# coding=utf-8
# METEOR 算法 + 转述不变词 + 字符重叠
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import os
from collections import namedtuple
import argparse
import numpy as np
import codecs
import re
from nltk.corpus import wordnet as wn
from nltk.stem import PorterStemmer
# stemmer = PorterStemmer()
# stemmer = WordNetLemmatizer()
from itertools import tee, zip_longest
from nltk.corpus import stopwords
import nltk
import spacy
import math
from sklearn.feature_extraction import stop_words
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="data/", help="Data folder.")
parser.add_argument("--lang", type=str, default="CN",
help="Target language. Chinese by default.")
parser.add_argument("--cand", type=str, default="candidate.txt",
help="Candidate translation/paraphrase file.")
parser.add_argument("--ref", type=str, default="reference.txt",
help="Reference translation/paraphrase file.")
# Default settings for a universal language. Say, Chinese.
parser.add_argument("--weights", type=str, default="1.0,0,0,0.6,0.9",
help="Weights for exact/stem/synset/paraphrase/overlap matching, float numbers separated by comma. "
"0 indicates not applicable.")
parser.add_argument("--hyper", type=str, default="0.85,0.2,0.6,0.75,0.35",
help="Hyper parameters alpha, beta, gamma, and delta.")
parser.add_argument("--paraphrase_invariant", type=str, default="paraphrase_invariant.txt",
help="Invariant words during paraphrasing, i.e.: copy words.")
parser.add_argument("--paraphrase_file", type=str, default="checked.copypp.clean",
help="Paraphrase file.")
parser.add_argument("--paraphrase_file_format", type=str, default="pair",
help="Format of paraphrase file. Could be either 'pair' or 'clique'.")
parser.add_argument("--function_words", type=str, default="english.function.words",
help="Function words list")
args = parser.parse_args()
# "pair": each line contains a pair of paraphrases. It's okay to also include other information,
# e.g.: weight/probability. These additional information will be omitted.
# "clique": each line contains a group of paraphrases.
# 五个匹配阶段
NUM_MODULES = 5
EXACT, STEM, SYNSET, PARA, SYNTAX = 0, 1, 2, 3, 4
FAKE_COUNT = 8
# FAKE_COUNT_ = 4
AVERAGE_MATCHED = 0
THRESHOLD = 1
# Matcher:某一个局部匹配
# module: 指匹配的模式,取值为 EXACT/STEM/SYNSET/PARAPHRASE/OVERLAP
# prob: 字面意思应该匹配的概率,不过 METEOR 的 Java 代码中好像都设置为 1
# start & matchStart: 如果第一个句子的第 i 个词匹配了第二个句子的第 j 个词,则 start = j, matchStart = i
# length & matchLength: 如果有短语匹配,第一个句子中的 m 个词匹配了第二个句子中的 n 个词,则 length = n, matchLength = m
Matcher = namedtuple("Matcher", ["module", "prob", "start", "length", "matchStart", "matchLength"])
nlp = spacy.load("en_core_web_md")
class Stage(object):
# Stage: 暂存区,用于保存各个阶段的、所有可能的匹配结果
def __init__(self, length1, length2):
# length1, length2 分别是 candidate 和 reference 的长度
# self.matches 中的每个元素都是由 Matcher 组成的列表
self.matches = [list() for _ in range(length2)]
self.line1Coverage = np.zeros(length1)
self.line2Coverage = np.zeros(length2)
def show(self):
print("===== Show Stage: =====")
print("# of possible matches:", sum([len(matcher_list) for matcher_list in self.matches]))
for i, matcher_list in enumerate(self.matches):
for matcher in matcher_list:
print(matcher)
print(self.line1Coverage)
print(self.line2Coverage)
print("-" * 30 + "\n")
class PartialAlignment(object):
# PartialAlignment: 搜索最佳匹配的时候的中间结果
def __init__(self, length1=-1, length2=-1, pa_obj=None):
# length1 和 length2 分别是 candidate 和 reference 的长度
# pa_obj 是另一个 PartialAlignment 对象
# 两种初始化方式:提供 pa_obj 时 copy 一份;否则初始化一个空的 PartialAlignment 对象
if pa_obj is None:
# 记录句子 2 中每个位置对应的匹配(一开始全部初始化为空)
self.matches = [None for _ in range(length2)]
# 当前部分匹配的个数
self.matchCount = 0
# 当前部分匹配在第一个句子和第二个句子中的考虑匹配类别信息之后的总权重
self.matches1, self.matches2 = 0.0, 0.0
# 当前部分匹配覆盖到的句子 1 和句子 2 的词数
self.allMatches1, self.allMatches2 = 0, 0
self.chunks = 0
# 句子 2 中当前要考虑的匹配起始位置的下标
self.idx = 0
# 所有匹配位置下标绝对值之差的和
self.distance = 0
# 最后一个匹配在句子 1 中的位置
self.lastMatchEnd = -1
self.line1UsedWords = np.zeros(length1, dtype=np.bool)
self.line2UsedWords = np.zeros(length2, dtype=np.bool)
else:
self.matches = copy.copy(pa_obj.matches)
self.matchCount = pa_obj.matchCount
self.matches1, self.matches2 = pa_obj.matches1, pa_obj.matches2
self.allMatches1, self.allMatches2 = pa_obj.allMatches1, pa_obj.allMatches2
self.chunks = pa_obj.chunks
self.idx = pa_obj.idx
self.distance = pa_obj.distance
self.lastMatchEnd = pa_obj.lastMatchEnd
self.line1UsedWords = pa_obj.line1UsedWords.copy()
self.line2UsedWords = pa_obj.line2UsedWords.copy()
def isUsed(self, matcher):
if np.sum(self.line2UsedWords[matcher.start:matcher.start+matcher.length]) > 0:
# line2 used
return True
if np.sum(self.line1UsedWords[matcher.matchStart:matcher.matchStart + matcher.matchLength]) > 0:
return True
return False
def setUsed(self, matcher, bool_flag):
self.line2UsedWords[matcher.start:matcher.start + matcher.length] = bool_flag
self.line1UsedWords[matcher.matchStart:matcher.matchStart + matcher.matchLength] = bool_flag
def show(self):
print("===== Show PartialAlignment: =====")
print("# of matches:", len([matcher for matcher in self.matches if matcher is not None]))
print("-")
for i, matcher in enumerate(self.matches):
print(matcher)
print("-")
print("Match weights:")
print(self.matches1, self.matches2)
print("# of covered words:")
print(self.allMatches1, self.allMatches2)
print("Used words:")
print(self.line1UsedWords)
print(self.line2UsedWords)
print("-" * 30 + "\n")
class Alignment(object):
# Alignment: 最终匹配结果哦,包括由所有最终选中的 Matcher 组成的集合,以及 function/content word 等信息
def __init__(self, token_list1, token_list2):
# token_list1, token_list2 分别是 candidate 和 reference 句子
self.words1 = token_list1
self.words2 = token_list2
# 其实没必要存下面这四个列表,只要存四个数字就行了,管它到底哪个词是实的哪个词是虚的呢
self.line1ContentWords = []
self.line2ContentWords = []
self.line1FunctionWords = []
self.line2FunctionWords = []
self.line1NERWords = []
self.line2NERWords = []
self.matches = []
# matches[i] contains a match starting at index i in line2
self.line1Matches, self.line2Matches = 0, 0
# Per-module match totals (Content)
self.moduleContentMatches1, self.moduleContentMatches2 = None, None
# Per-module match totals (Function)
self.moduleFunctionMatches1, self.moduleFunctionMatches2 = None, None
# Per-module match totals (NER)
self.moduleNERMatches1, self.moduleNERMatches2 = None, None
self.numChunks = 0
self.avgChunkLength = 0
def set_count_and_chunks(self):
self.line1Matches, self.line2Matches = 0, 0
self.numChunks = 0
idx, lastMatchEnd = 0, -1
while idx < len(self.matches):
matcher = self.matches[idx]
if matcher is None:
# A break in line 2 indicates end of a chunk.
if lastMatchEnd != -1:
self.numChunks += 1
lastMatchEnd = -1
idx += 1
else:
self.line1Matches += matcher.matchLength
self.line2Matches += matcher.length
if lastMatchEnd != -1 and matcher.matchStart != lastMatchEnd:
self.numChunks += 1
idx = matcher.start + matcher.length
lastMatchEnd = matcher.matchStart + matcher.matchLength
if lastMatchEnd != -1:
# print("before chunks:", self.numChunks)
self.numChunks += 1
pass
# if self.numChunks > 0:
# self.avgChunkLength = (self.line1Matches + self.line2Matches) / (2.0 * self.numChunks)
def show(self):
print("===== Alignment: =====")
print("Sentence 1: " + " ".join([token + "(" + str(i) + ")" for i, token in enumerate(self.words1)]))
print("Sentence 2: " + " ".join([token + "(" + str(i) + ")" for i, token in enumerate(self.words2)]))
print("# of matches:", len([matcher for matcher in self.matches if matcher is not None]))
print("-")
for i, matcher in enumerate(self.matches):
print(matcher)
print("-")
print("-" * 30 + "\n")
class Meteor(object):
def __init__(self, weights, hyper, lang, synset_file="", function_words="", ner_copy="",
paraphrase_invariant="", paraphrase_file="", paraphrase_file_format="pair"):
self.lang = lang
self.eps = 1e-6
self.weights = [float(p) for p in weights.split(",")]
self.hyper = [float(p) for p in hyper.split(",")]
self.alpha, self.beta, self.gamma, self.delta = self.hyper
self.beamSize = 40
self.er, self.ec = 0, 0
self.ner = set()
self.stored_align = None
self.function_words = {"(", "。", ";", ":", ",", ")", "、", "‘", "’", "“",
"”", "?", "!", "—", "《", "》", "…", ".", "•"}
print(self.weights)
print(self.hyper)
# print("FAKE_COUNT:",FAKE_COUNT)
# print("THRESHOLD:", THRESHOLD)
# print("AVERAGE_MATCHED:",AVERAGE_MATCHED)
self.total_matched = 0
self.sentence_cnt = 0
self.stemmer = PorterStemmer()
# 加载同义词词表
# 将一个单词 映射为 它出现的所有同义词集的 ID 的集合
# self.possible_synsets = dict()
# if os.path.isfile(synset_file):
# print("Loading synset from file " + synset_file)
# with codecs.open(synset_file, "r", encoding="utf-8") as f:
# # synset_file 的格式为每行一个同义词集
# for synset_clique_id, line in enumerate(f):
# tokens = line.split()
# for token in tokens:
# if token in self.ossible_synsets:
# self.possible_synsets[token].add(synset_clique_id)
# else:
# self.possible_synsets[token] = {synset_clique_id}
# else:
# print("No synset knowledge.")
self.paraphrase_invariant_words = set()
#加载实词部分
if os.path.isfile(function_words):
print("Loading function_words from file " + function_words)
with open(function_words, "r") as f:
for line in f:
line = line.strip()
if not line:
continue
self.function_words.add(line)
# print(self.function_words)
# 加转述不变词
if os.path.isfile(paraphrase_invariant):
print("Loading paraphrase invariants from file " + paraphrase_invariant)
with codecs.open(paraphrase_invariant, "r", encoding="utf-8") as f:
for line in f:
word = line.split()[0]
self.paraphrase_invariant_words.add(word)
else:
print("", paraphrase_invariant)
# 加入实体识别知识
# if os.path.isfile(ner_copy):
# print("Loading NER COPY from file " + ner_copy)
# with codecs.open(ner_copy, "r", encoding="utf-8") as f:
# for line in f:
# word = line.split()[0]
# self.paraphrase_invariant_words.add(word)
# else:
# print("nonono:",ner_copy)
if os.path.isfile(ner_copy):
print("Loading NER COPY from file " + ner_copy)
with codecs.open(ner_copy, "r", encoding="utf-8") as f:
for line in f:
word = line.split()[0]
self.paraphrase_invariant_words.add(word.lower())
else:
print("Not find ", ner_copy)
self.stop_words = stop_words.ENGLISH_STOP_WORDS
self.paraphrase_invariant_words -= self.stop_words
self.paraphrase_invariant_words -= self.function_words
self.paraphrase_invariant_words -= self.ner
print(len(self.paraphrase_invariant_words))
# 加载转述词表 对应的是Meteor中的paraphrase知识
# 将一个单词 映射为 它的所有转述词的集合
self.possible_paraphrases = dict()
if os.path.isfile(paraphrase_file):
print("Loading paraphrases from file " + paraphrase_file)
with open(paraphrase_file, "r", encoding='utf8') as f:
if paraphrase_file_format == "pair":
# paraphrase_file 的格式为每行两个词,以及一些可能的权重信息
for i, line in enumerate(f):
tokens = line.split("||||")
x, y = tokens[0].strip(), tokens[1].strip()
# print(x)
# print(y)
# print("+"*60)
if x in self.possible_paraphrases:
self.possible_paraphrases[x].add(y)
else:
self.possible_paraphrases[x] = {y}
if y in self.possible_paraphrases:
self.possible_paraphrases[y].add(x)
else:
self.possible_paraphrases[y] = {x}
else:
# paraphrase_file 的格式为每行一个同义词集
for i, line in enumerate(f):
# possible_paraphrases[token] 里也包含了这个词自己,不过不影响程序正确性
tokens = line.split()
for token in tokens:
if token in self.possible_paraphrases:
self.possible_paraphrases[token] = self.possible_paraphrases[token] | set(tokens)
else:
self.possible_paraphrases[token] = set(tokens)
else:
print("No paraphrase knowledge.")
# 加入句法相关的知识
self.possible_syntax_possibles = dict()
# 同义词词林的加入
def possible_synsets(self, word):
possible_sets = []
for synset in wn.synsets(word):
possible_sets += synset.lemma_names()
return set(possible_sets)
def NER_parsing(self, text):
ner_sets = set()
words = nltk.word_tokenize(text)
tags = nltk.pos_tag(words)
ners = nltk.ne_chunk(tags, binary=True)
for t in ners.subtrees(lambda t: t.label() == 'NE'):
ner_sets |= set(leaf[0].lower() for leaf in t.leaves())
return ner_sets
@staticmethod
def _normalize(sentence):
# print(sentence)
# for i in ["[",".","!",":"."/","_",",".";","$","%","^",'"'*(+\")]+|[+——()?【】“”!,。?;、~@#¥%……&*()]+"]
sentence = re.sub(r"[+\!:\/_,;$%^*(+\")-]+|[+——()?【】“”!,。?;、~@#¥%……&*()]+", "", sentence)
# token_list = [i.lower()for i in nltk.word_tokenize(sentence)]
token_list = [i.text.lower() for i in nlp(sentence)]
if '.' in token_list:
token_list.remove('.')
# sentence = sentence.replace("s'", "s")
# sentence = re.sub("\'", " \'", sentence).lower().split()
# candidate = re.sub("[+\.\!:\/_,;$%^*(+\")]+|[+——()?【】“”!,。?;、~@#¥%……&*()]+","",candidate).lower().split()
return token_list
def align(self, candidate_token_list, reference_token_list, mode="RUN"):
# def align(self, candidate_token_list, reference_token_list, reference_NER_set, candidate_NER_set, mode="RUN"):
# mode: "RUN" 中间不输出调试信息,"DEBUG" 输出中间结果以供调试
# 两个列表相等时进行特判,只做 EXACT MATCH,防止 beam search 出问题
is_identity = " ".join(candidate_token_list) == " ".join(reference_token_list)
# 依次考虑 exact/stem/synset/paraphrase 四个阶段
stage = Stage(length1=len(candidate_token_list), length2=len(reference_token_list))
if self.weights[0] > self.eps:
self._exact_matching(stage, candidate_token_list, reference_token_list)
if not is_identity and self.weights[1] > self.eps and self.stemmer is not None:
# TODO: 其实如果权重大于 eps 但是没有 stemmer 应该报个错,以下三个分支同
self._stem_matching(stage, candidate_token_list, reference_token_list)
if not is_identity and self.weights[2] > self.eps > 0:
self._synset_matching(stage, candidate_token_list, reference_token_list)
if not is_identity and self.weights[3] > self.eps and len(self.possible_paraphrases) > 0:
self._paraphrase_matching(stage, candidate_token_list, reference_token_list)
if not is_identity and self.weights[4] > self.eps:
self._syntax_matching(stage, candidate_token_list, reference_token_list)
if mode == "DEBUG":
stage.show()
# 通过启发式搜索找到最佳匹配方案
# 先预处理一些能够确认的 matcher (one-to-one, non-overlapping matches),缩小搜索范围
initialPath = PartialAlignment(length1=len(candidate_token_list), length2=len(reference_token_list))
for matcher_list in stage.matches:
if len(matcher_list) == 1:
matcher = matcher_list[0]
# 注意:i, j 分别是句子 2 和句子 1 的起始位置(而非反过来!)
i, j = matcher.start, matcher.matchStart
if np.sum(stage.line2Coverage[i:i+matcher.length]) > 1:
continue
if np.sum(stage.line1Coverage[j:j+matcher.matchLength]) > 1:
continue
# 此处未更新 initialPath 的匹配计数和权重等信息,在 resolve() 函数里会统计
# 这样设计的主要考虑是,后续搜索的时候才能知道这些匹配的 chunk 信息(它们跟别的匹配有没有连起来)
initialPath.matches[i] = matcher
initialPath.line2UsedWords[i:i+matcher.length] = True
initialPath.line1UsedWords[j:j+matcher.matchLength] = True
# 然后进行 beam search
best = self.resolve(stage, initialPath, mode)
if mode == "DEBUG":
print("Best path:")
best.show()
# 最后做一些统计工作,并返回最终的对齐结果
a = Alignment(candidate_token_list, reference_token_list)
a.moduleContentMatches1 = np.zeros(NUM_MODULES, dtype=np.int)
a.moduleContentMatches2 = np.zeros(NUM_MODULES, dtype=np.int)
a.moduleFunctionMatches1 = np.zeros(NUM_MODULES, dtype=np.int)
a.moduleFunctionMatches2 = np.zeros(NUM_MODULES, dtype=np.int)
for i, token in enumerate(candidate_token_list):
if token in self.function_words:
a.line1FunctionWords.append(i)
else:
a.line1ContentWords.append(i)
for j, token in enumerate(reference_token_list):
# print(token)
if token in self.function_words:
a.line2FunctionWords.append(j)
else:
a.line2ContentWords.append(j)
for matcher in best.matches:
if matcher is not None:
# 如果第二个句子的某个词 j 的匹配不为空,再更新整个句子的匹配信息
for k in range(matcher.matchLength):
if candidate_token_list[matcher.matchStart + k] in self.function_words:
a.moduleFunctionMatches1[matcher.module] += 1
# elif candidate_token_list[matcher.matchStart + k] in self.paraphrase_invariant_words or testNERMatch(matcher, "c"):
# a.moduleNERMatches1[matcher.module] += 1
# print("candidate matched:",candidate_token_list[matcher.matchStart + k])
# #
else:
a.moduleContentMatches1[matcher.module] += 1
for k in range(matcher.length):
if reference_token_list[matcher.start + k] in self.function_words:
a.moduleFunctionMatches2[matcher.module] += 1
# elif reference_token_list[matcher.start + k] in total_NER or testNERMatch(matcher, "r"):
# print("reference matched:",reference_token_list[matcher.start + k])
# a.moduleNERMatches2[matcher.module] += 1
# elif reference_token_list[matcher.start + k] in self.paraphrase_invariant_words or testNERMatch(matcher, "r"):
# a.moduleNERMatches2[matcher.module] += 1
# print("reference matched:",reference_token_list[matcher.start + k])
else:
a.moduleContentMatches2[matcher.module] += 1
a.matches = best.matches
a.set_count_and_chunks()
return a
def _exact_matching(self, stage, token_list1, token_list2):
for j in range(len(token_list2)):
for i in range(len(token_list1)):
if token_list1[i] == token_list2[j]:
stage.matches[j].append(Matcher(module=EXACT, prob=1.0, start=j, length=1,
matchStart=i, matchLength=1))
stage.line1Coverage[i] += 1
stage.line2Coverage[j] += 1
def _stem_matching(self, stage, token_list1, token_list2, verbose=False):
# TODO: currently stemmer is None!
stem_list1 = [self.stemmer.stem(token) for token in token_list1]
stem_list2 = [self.stemmer.stem(token) for token in token_list2]
for j in range(len(token_list2)):
for i in range(len(token_list1)):
if stem_list1[i] == stem_list2[j] and token_list1[i] != token_list2[j]:
stage.matches[j].append(Matcher(module=STEM, prob=1.0, start=j, length=1,
matchStart=i, matchLength=1))
stage.line1Coverage[i] += 1
stage.line2Coverage[j] += 1
def _synset_matching(self, stage, token_list1, token_list2, verbose=False):
for j in range(len(token_list2)):
for i in range(len(token_list1)):
t1, t2 = token_list1[i], token_list2[j]
t1_synsets = self.possible_synsets(t1)
t2_synsets = self.possible_synsets(t2)
if verbose:
print("\nsynset module result\n")
print("reference:", t1, t1_synsets)
print("candidate:", t2, t2_synsets)
print("intersection:", t1_synsets & t2_synsets)
if t1 != t2 and len(t1_synsets & t2_synsets) > 0:
stage.matches[j].append(Matcher(module=SYNSET, prob=1.0, start=j, length=1,
matchStart=i, matchLength=1))
stage.line1Coverage[i] += 1
stage.line2Coverage[j] += 1
def _syntax_matching(self, stage, token_list1, token_list2, verbose=False):
pass
# 为paraphrase部分选出n-gram
def uniwise(self,s):
# 列表元素是元组合,分别代表的内容是(index,length,(n-gram))
pair = []
for i in range(len(s)):
pair.append([i, 1, (s[i],)])
return pair
def pairwise(self, s):
# 列表元素是元组合,分别代表的内容是(index,length,(n-gram))
pair = []
for i in range(len(s)-1):
pair.append([i, 2, (s[i], s[i+1])])
return pair
def triwise(self, s):
# 列表元素是元组合,分别代表的内容是(index,length,(n-gram))
pair = []
for i in range(len(s)-2):
pair.append([i, 3, (s[i], s[i+1], s[i+2])])
return pair
def fourwise(self, s):
# 列表元素是元组合,分别代表的内容是(index,length,(n-gram))
pair = []
for i in range(len(s)-3):
pair.append([i, 4, (s[i], s[i+1], s[i+2], s[i+3])])
return pair
def n_gram(self, s):
# 获得句子的所有n-gram组合(n = 1,2,3,4)
return self.uniwise(s) + self.pairwise(s) + self.triwise(s) + self.fourwise(s)
def _paraphrase_matching(self, stage, token_list1, token_list2):
# 我挖掘的是词汇转述网络,因此我的转述极大团实际上只相当于 METEOR 里的 synset
# METEOR 里的 paraphrase 是可以多对多的(当然,一对一也包含了进来)
# TODO: 以后要改成支持多对多的话,可以参考如下代码:
# https://github.com/cmu-mtlab/meteor/blob/master/src/edu/cmu/meteor/aligner/ParaphraseMatcher.java
# 暂时先写成和 SYNSET 匹配几乎一样的好了
# 先找出所有的n-gram集合
t1 = self.n_gram(token_list1)
t2 = self.n_gram(token_list2)
for j in range(len(t2)):
for i in range(len(t1)):
t1_, t2_ = ' '.join(t1[i][2]), ' '.join(t2[j][2])
if (t1_ != t2_) and (t1_ in self.possible_paraphrases and t2_ in self.possible_paraphrases[t1_]):
try:
stage.matches[t2[j][0]].append(Matcher(module=PARA, prob=1.0, start=t2[j][0], length=t2[j][1],
matchStart=t1[i][0], matchLength=t1[i][1]))
# print("debug Matcher!:",Matcher(module=PARA, prob=1.0, start=t2[j][0], length=t2[j][1],
# matchStart=t1[i][0], matchLength=t1[i][1]))
except Exception as e:
exit()
# print(t1,t2)
# exit()
# 更新覆盖范围
for index, word in enumerate(t1[i][2]):
stage.line1Coverage[t1[i][0] + index ] += 1
for index, word in enumerate(t2[j][2]):
stage.line2Coverage[t2[j][0] + index ] += 1
# def _overlap_matching(self, stage, token_list1, token_list2):
# for j in range(len(token_list2)):
# for i in range(len(token_list1)):
# t1, t2 = token_list1[i], token_list2[j]
# if (t1 != t2) and len(set(t1) & set(t2)) > 0:
# stage.matches[j].append(Matcher(module=OVERLAP, prob=1.0, start=j, length=1,
# matchStart=i, matchLength=1))
# stage.line1Coverage[i] += 1
# stage.line2Coverage[j] += 1
def resolve(self, stage, start, mode):
# mode: "RUN" 中间不输出调试信息,"DEBUG" 输出中间结果以供调试
# 使用 beam search 从所有可能的匹配里搜索一个最好的
def pa_to_key(pa):
# 把 PartialAlignment 对象转换成 key
# 相当于 Meteor 里的 PARTIAL_COMPARE_TOTAL
# pa1 < pa2 <==> pa_to_key(pa1) < pa_to_key(pa2)
return pa.matches1 + pa.matches2, -pa.chunks, -pa.distance
# 当前搜索队列和下一步待搜索队列
paths, nextPaths = [], []
nextPaths.append(start)
if mode == "DEBUG":
print("Begining search: ", len(nextPaths))
# 注意 stage.matches 是一个长为 length2 的列表,其中每个元素是对应的句子 2 中的词的所有可能匹配的列表
length2 = len(stage.matches)
# Proceed from left to right
for current in range(length2 + 1):
# Advance
paths = nextPaths
nextPaths = []
paths.sort(key=lambda pa: pa_to_key(pa), reverse=True)
if mode == "DEBUG":
print("In beam search step " + str(current) + ", PartialAlignment list is:")
for path in paths:
path.show()
# Try as many paths as beam allows
num_trials = min(self.beamSize, len(paths))
for rank in range(num_trials):
path = paths[rank]
if mode == "DEBUG":
print("Beam search base on following path:")
path.show()
# Case 1: Path is complete
if current == length2:
# Close last chunk
if path.lastMatchEnd != -1:
path.chunks += 1
nextPaths.append(path)
if mode == "DEBUG":
print("Append Case 1!")
continue
# Case 2: Current index word is in use
if path.line2UsedWords[current]:
# If this is still part of a match
# 如果之前一个匹配覆盖了多个词,就会出现这种情况
if current < path.idx:
nextPaths.append(path)
if mode == "DEBUG":
print("Append Case 2.1!")
# If fixed match
# 如果在预处理阶段把这个 matcher 包含了进来,就会出现这种情况
elif path.matches[path.idx] is not None:
matcher = path.matches[path.idx]
path.matchCount += 1
path.matches1 += matcher.matchLength * self.weights[matcher.module]
path.matches2 += matcher.length * self.weights[matcher.module]
path.allMatches1 += matcher.matchLength
path.allMatches2 += matcher.length
# Not conitnuous in line1
if path.lastMatchEnd != -1 and matcher.matchStart != path.lastMatchEnd:
path.chunks += 1
# Advance to end of match + 1
path.idx = matcher.start + matcher.length
path.lastMatchEnd = matcher.matchStart + matcher.matchLength
path.distance += abs(matcher.start - matcher.matchStart)
nextPaths.append(path)
if mode == "DEBUG":
print("Append Case 2.2!")
continue
# Case 3: Multiple possible matches
# 前两种情况直接修改 path 然后 continue 即可;这种情况需要将 path 复制多份
matches = stage.matches[current]
for matcher in matches:
if not path.isUsed(matcher):
newPath = PartialAlignment(pa_obj=path)
newPath.setUsed(matcher, True)
newPath.matches[current] = matcher
newPath.matchCount += 1
newPath.matches1 += matcher.matchLength * self.weights[matcher.module]
newPath.matches2 += matcher.length * self.weights[matcher.module]
if newPath.lastMatchEnd != -1 and matcher.matchStart != newPath.lastMatchEnd:
newPath.chunks += 1
newPath.idx = matcher.start + matcher.length
newPath.lastMatchEnd = matcher.matchStart + matcher.matchLength
path.distance += abs(matcher.start - matcher.matchStart)
nextPaths.append(newPath)
if mode == "DEBUG":
print("Append Case 3!")
# Case 4: skip this index
if path.lastMatchEnd != -1:
path.chunks += 1
path.lastMatchEnd = -1
path.idx += 1
nextPaths.append(path)
if mode == "DEBUG":
print("Append Case 4!")
if len(nextPaths) == 0:
print("Warning: unexpected conditions - skipping matches until possible to continue")
nextPaths.sort(key=lambda pa: pa_to_key(pa), reverse=True)
return nextPaths[0]
def testNERMatch(self, candidate_token_list, reference_token_list, matcher, flag):
if flag == "r":
for k in range(matcher.matchLength):
if candidate_token_list[matcher.matchStart + k] in self.paraphrase_invariant_words:
self.ec += 1
return True
return False
else:
for k in range(matcher.length):
if reference_token_list[matcher.start + k] in self.paraphrase_invariant_words:
self.er += 1
return True
return False
def sentence_meteor_ner(self, candidate, reference, norm, verbose=False):
self.er = 0
self.ec = 0
# reference 和 candidate 可以是 Unicode string,也可以是 Unicode string 的列表,
# 例如 "我爱你。" 或 ["我", "爱", "你", "。"]
# 只支持一个 reference 的情形
# norm 表示是否对 Unicode string 进行需要 tokenize
# print("="*60)
assert type(reference) == type(candidate)
# print("reference:",reference)
# print("candidate:",candidate)
# reference_NER_set = set()
# candidate_NER_set = set()
reference_NER_set = self.NER_parsing(reference)-self.function_words
candidate_NER_set = self.NER_parsing(candidate)-self.function_words
# print("before norm:",candidate)
if candidate and norm:
candidate = self._normalize(candidate)
reference = self._normalize(reference)
# print("candidate after norm:",candidate)
# if verbose:
# print("Candidate: ", candidate)
# print("Reference: ", reference)
# 此时 reference 和 candidate 都应该是 Unicode string 的列表,即形如 ["我", "爱", "你", "。"]
# a = self.align(candidate_token_list=candidate, reference_token_list=reference,\
# reference_NER_set = reference_NER_set, candidate_NER_set = candidate_NER_set)
a = self.align(candidate_token_list=candidate, reference_token_list=reference)
self.stored_align = a
if verbose:
a.show()
# P, R, ch, m = 1.0, 1.0, 6, 6
# P, R, ch, m = 1.0, 1.0, 1, 6
# P, R, ch, m = 0.8571, 1.0, 2, 6
P, R = 0, 0
for i in range(NUM_MODULES):
# P += self.weights[i] * (self.delta1 * a.moduleNERMatches1[i]
# + self.delta2 * a.moduleContentMatches1[i]
# + (1-self.delta1 - self.delta2)*a.moduleFunctionMatches1[i])
# R += self.weights[i] * (self.delta1 * a.moduleNERMatches2[i]
# + self.delta2 * a.moduleContentMatches2[i]
# + (1 - self.delta1-self.delta2) * a.moduleFunctionMatches2[i])
P += self.weights[i] * (self.delta * a.moduleContentMatches1[i]
+ (1 - self.delta) * a.moduleFunctionMatches1[i])
R += self.weights[i] * (self.delta * a.moduleContentMatches2[i]
+ (1 - self.delta) * a.moduleFunctionMatches2[i])
P /= (self.delta * len(a.line1ContentWords) + (1 - self.delta) * len(a.line1FunctionWords))
R /= (self.delta * len(a.line2ContentWords) + (1 - self.delta) * len(a.line2FunctionWords))
line1_matched_stable_words, line2_matched_stable_words = 0, 0
# can_ner = set()
# ref_ner = set()
# can_ner = self.NER_parsing(ori_can) #- self.stop_words
# ref_ner = self.NER_parsing(ori_ref) #- self.stop_words
for matcher in a.matches:
if matcher is not None:
# 如果第二个句子的某个词 j 的匹配不为空,再更新整个句子的匹配信息
# print("+"*60)
for k in range(matcher.matchLength):
if candidate[
matcher.matchStart + k] in self.paraphrase_invariant_words: # or self.testNERMatch(candidate_token_list = candidate, reference_token_list = reference ,matcher = matcher, flag = "c"):
line1_matched_stable_words += 1
# print("matched_candidate:",candidate[matcher.matchStart + k])
# print("candidate stable:",candidate[matcher.matchStart + k])
# print("*"*60)
for k in range(matcher.length):
if reference[
matcher.start + k] in self.paraphrase_invariant_words: # or self.testNERMatch(candidate_token_list = candidate, reference_token_list = reference ,matcher = matcher, flag = "r"):
line2_matched_stable_words += 1
# print("matchedreference:",reference[matcher.start + k])
# print("reference stable:",reference[matcher.start + k])
# # 如果转述不变词在匹配中被漏掉了,加以相应的惩罚
# print("total_reference:",[word for word in reference if word in self.paraphrase_invariant_words])
# print("total_candidate:",[word for word in candidate if word in self.paraphrase_invariant_words])
line1_total_stable_words = len(
[word for word in candidate if word in self.paraphrase_invariant_words]) # + self.er
line2_total_stable_words = len(
[word for word in reference if word in self.paraphrase_invariant_words]) # + self.ec
# print(FAKE_COUNT)
# print(AVERAGE_MATCHED)
self.total_matched += line1_total_stable_words
self.total_matched += line2_total_stable_words
self.sentence_cnt += 2
# # if len(ref_ner) > 0:
# # print(ref_ner)
# # print(ori_ref)
# # print("reference:",[word for word in reference if word in ref_ner])
# # print("reference_matched_words:",line2_matched_stable_words)
# # if len(can_ner) > 0:
# # print(can_ner)
# # print(ori_can)
# # print("candidate:",[word for word in candidate if word in can_ner])
# # print("candidate_matched_words:",line1_matched_stable_words)
# print("ref_total_stable:", [word for word in reference if word in self.paraphrase_invariant_words])
# print("can_total_stable:", [word for word in candidate if word in self.paraphrase_invariant_words])
# Pen_P = (line1_matched_stable_words + FAKE_COUNT) / (line1_total_stable_words + FAKE_COUNT)
# Pen_R = (line2_matched_stable_words + FAKE_COUNT) / (line2_total_stable_words + FAKE_COUNT)
Pen_P = (line1_total_stable_words - line1_matched_stable_words) / (line1_total_stable_words + FAKE_COUNT)
Pen_R = (line2_total_stable_words - line2_matched_stable_words) / (line2_total_stable_words + FAKE_COUNT)
# if line1_total_stable_words == 0:
# Pen_P = 0
# else:
# Pen_P = (line1_total_stable_words - line1_matched_stable_words ) / (line1_total_stable_words )
# if line2_total_stable_words == 0:
# Pen_R = 0
# else:
# Pen_R = (line2_total_stable_words - line2_matched_stable_words ) / (line2_total_stable_words )
# Pen_P = math.exp((line1_matched_stable_words + FAKE_COUNT) / (line1_total_stable_words + FAKE_COUNT))
# Pen_R = math.exp((line2_matched_stable_words + FAKE_COUNT) / (line2_total_stable_words + FAKE_COUNT) - 1)
# Pen_P = A * Pen_P * THRESHOLD + THRESHOLD
# Pen_R = Pen_R * THRESHOLD + THRESHOLD
# print("orignal_P:", P, " orignal_R:", R)
# print("before:", " Pen_P:",Pen_P," Pen_R:",Pen_R)
# Pen_P = w * (Pen_P ** THRESHOLD)
# Pen_R = w * (Pen_R ** THRESHOLD)
Pen_P = (1 - Pen_P)
Pen_R = (1 - Pen_R)
# #if line1_total_stable_words == 0:
# Pen_P = 1
# Pen_P_ = 1
# else:
# Pen_P = (line1_matched_stable_words + FAKE_COUNT) / (line1_total_stable_words + FAKE_COUNT)
# Pen_P_ = (line1_matched_stable_words + FAKE_COUNT_) / (line1_total_stable_words + FAKE_COUNT_)
# if line2_total_stable_words == 0:
# Pen_R = 1
# Pen_R_ = 1
# else:
# Pen_R = (line2_matched_stable_words + FAKE_COUNT) / (line2_total_stable_words + FAKE_COUNT)
# Pen_R_ = Pen_R_ = (line2_matched_stable_words + FAKE_COUNT_) / (line2_total_stable_words + FAKE_COUNT_)
# # if Pen_P != 1 or Pen_R != 1:
# print("PENALTY:",Pen_P, Pen_R,P,R)
# print("orignal_P:",P)
# print("orignal_R:",R)
# print("after:", " Pen_P:",Pen_P," Pen_R:",Pen_R)
# print(FAKE_COUNT_," Pen_P_:",Pen_P_," Pen_R_:",Pen_R_)
P, R = P * (Pen_P), R * (Pen_R)
num_chunks = a.numChunks # `ch` in Meteor formula
# print("num_chunks:", num_chunks)
num_matched_words = (a.line1Matches + a.line2Matches) / 2.0 # `m` in Meteor formula
try:
F_mean = (P * R) / (self.alpha * P + (1 - self.alpha) * R)
# print("changdu candidate:", len(candidate))
# Pen = self.gamma * ((num_chunks / num_matched_words) * (1/len(candidate))) ** self.beta
Pen = self.gamma * (num_chunks / num_matched_words) ** self.beta
# print("Pen:",Pen)
except Exception as e:
# print("X"*60)
# print(reference)
# print(candidate)
# print(P)
# print(R)
return 0
# print(F_mean)
score = (1 - Pen) * F_mean
# score = F_mean
# print("final_score:", score)
if verbose:
print("Statistics:")
print("P = ", P, ", R = ", R, ", ch = ", num_chunks, ", m = ", num_matched_words,
", Pen = ", Pen, " , F_mean = ", F_mean)
return score
def sentence_meteor(self, candidate, reference, norm, verbose=False):
self.er = 0
self.ec = 0
# reference 和 candidate 可以是 Unicode string,也可以是 Unicode string 的列表,
# 例如 "我爱你。" 或 ["我", "爱", "你", "。"]
# 只支持一个 reference 的情形
# norm 表示是否对 Unicode string 进行需要 tokenize
# print("="*60)
assert type(reference) == type(candidate)
# print("reference:",reference)
# print("candidate:",candidate)
# reference_NER_set = set()
# candidate_NER_set = set()
# reference_NER_set = self.NER_parsing(reference)-self.function_words
# candidate_NER_set = self.NER_parsing(candidate)-self.function_words
# print("before norm:",candidate)
if candidate and norm:
candidate = self._normalize(candidate)
reference = self._normalize(reference)
# print("candidate after norm:",candidate)
# if verbose:
# print("Candidate: ", candidate)
# print("Reference: ", reference)
# 此时 reference 和 candidate 都应该是 Unicode string 的列表,即形如 ["我", "爱", "你", "。"]
# a = self.align(candidate_token_list=candidate, reference_token_list=reference,\
# reference_NER_set = reference_NER_set, candidate_NER_set = candidate_NER_set)
a = self.align(candidate_token_list=candidate, reference_token_list=reference)
self.stored_align = a
if verbose:
a.show()
# P, R, ch, m = 1.0, 1.0, 6, 6
# P, R, ch, m = 1.0, 1.0, 1, 6
# P, R, ch, m = 0.8571, 1.0, 2, 6
P, R = 0, 0
for i in range(NUM_MODULES):
# P += self.weights[i] * (self.delta1 * a.moduleNERMatches1[i]
# + self.delta2 * a.moduleContentMatches1[i]
# + (1-self.delta1 - self.delta2)*a.moduleFunctionMatches1[i])
# R += self.weights[i] * (self.delta1 * a.moduleNERMatches2[i]
# + self.delta2 * a.moduleContentMatches2[i]
# + (1 - self.delta1-self.delta2) * a.moduleFunctionMatches2[i])
P += self.weights[i] * (self.delta * a.moduleContentMatches1[i]
+ (1-self.delta)*a.moduleFunctionMatches1[i])
R += self.weights[i] * (self.delta * a.moduleContentMatches2[i]
+ (1 - self.delta) * a.moduleFunctionMatches2[i])
P /= (self.delta * len(a.line1ContentWords) + (1 - self.delta) * len(a.line1FunctionWords))
R /= (self.delta * len(a.line2ContentWords) + (1 - self.delta) * len(a.line2FunctionWords))
line1_matched_stable_words, line2_matched_stable_words = 0, 0
# can_ner = set()
# ref_ner = set()
# can_ner = self.NER_parsing(ori_can) #- self.stop_words
# ref_ner = self.NER_parsing(ori_ref) #- self.stop_words
for matcher in a.matches:
if matcher is not None:
# 如果第二个句子的某个词 j 的匹配不为空,再更新整个句子的匹配信息
# print("+"*60)
for k in range(matcher.matchLength):
if candidate[matcher.matchStart + k] in self.paraphrase_invariant_words:# or self.testNERMatch(candidate_token_list = candidate, reference_token_list = reference ,matcher = matcher, flag = "c"):
line1_matched_stable_words += 1
# print("matched_candidate:",candidate[matcher.matchStart + k])
# print("candidate stable:",candidate[matcher.matchStart + k])
# print("*"*60)
for k in range(matcher.length):
if reference[matcher.start + k] in self.paraphrase_invariant_words :#or self.testNERMatch(candidate_token_list = candidate, reference_token_list = reference ,matcher = matcher, flag = "r"):
line2_matched_stable_words += 1
# print("matchedreference:",reference[matcher.start + k])
# print("reference stable:",reference[matcher.start + k])
# # 如果转述不变词在匹配中被漏掉了,加以相应的惩罚
# print("total_reference:",[word for word in reference if word in self.paraphrase_invariant_words])
# print("total_candidate:",[word for word in candidate if word in self.paraphrase_invariant_words])
line1_total_stable_words = len([word for word in candidate if word in self.paraphrase_invariant_words]) #+ self.er
line2_total_stable_words = len([word for word in reference if word in self.paraphrase_invariant_words]) #+ self.ec
# print(FAKE_COUNT)
# print(AVERAGE_MATCHED)
self.total_matched += line1_total_stable_words
self.total_matched += line2_total_stable_words
self.sentence_cnt += 2
# # if len(ref_ner) > 0:
# # print(ref_ner)
# # print(ori_ref)
# # print("reference:",[word for word in reference if word in ref_ner])
# # print("reference_matched_words:",line2_matched_stable_words)
# # if len(can_ner) > 0:
# # print(can_ner)
# # print(ori_can)
# # print("candidate:",[word for word in candidate if word in can_ner])
# # print("candidate_matched_words:",line1_matched_stable_words)
# print("ref_total_stable:", [word for word in reference if word in self.paraphrase_invariant_words])
# print("can_total_stable:", [word for word in candidate if word in self.paraphrase_invariant_words])
# Pen_P = (line1_matched_stable_words + FAKE_COUNT) / (line1_total_stable_words + FAKE_COUNT)
# Pen_R = (line2_matched_stable_words + FAKE_COUNT) / (line2_total_stable_words + FAKE_COUNT)
Pen_P = (line1_total_stable_words - line1_matched_stable_words) / (line1_total_stable_words + FAKE_COUNT)
Pen_R = (line2_total_stable_words - line2_matched_stable_words) / (line2_total_stable_words + FAKE_COUNT)
# if line1_total_stable_words == 0:
# Pen_P = 0
# else:
# Pen_P = (line1_total_stable_words - line1_matched_stable_words ) / (line1_total_stable_words )
# if line2_total_stable_words == 0:
# Pen_R = 0
# else:
# Pen_R = (line2_total_stable_words - line2_matched_stable_words ) / (line2_total_stable_words )
# Pen_P = math.exp((line1_matched_stable_words + FAKE_COUNT) / (line1_total_stable_words + FAKE_COUNT))
# Pen_R = math.exp((line2_matched_stable_words + FAKE_COUNT) / (line2_total_stable_words + FAKE_COUNT) - 1)
# Pen_P = A * Pen_P * THRESHOLD + THRESHOLD
# Pen_R = Pen_R * THRESHOLD + THRESHOLD
# print("orignal_P:", P, " orignal_R:", R)
# print("before:", " Pen_P:",Pen_P," Pen_R:",Pen_R)
# Pen_P = w * (Pen_P ** THRESHOLD)
# Pen_R = w * (Pen_R ** THRESHOLD)
Pen_P = (1 - Pen_P)
Pen_R = (1 - Pen_R)
# #if line1_total_stable_words == 0:
# Pen_P = 1
# Pen_P_ = 1
# else:
# Pen_P = (line1_matched_stable_words + FAKE_COUNT) / (line1_total_stable_words + FAKE_COUNT)
# Pen_P_ = (line1_matched_stable_words + FAKE_COUNT_) / (line1_total_stable_words + FAKE_COUNT_)
# if line2_total_stable_words == 0:
# Pen_R = 1
# Pen_R_ = 1
# else:
# Pen_R = (line2_matched_stable_words + FAKE_COUNT) / (line2_total_stable_words + FAKE_COUNT)
# Pen_R_ = Pen_R_ = (line2_matched_stable_words + FAKE_COUNT_) / (line2_total_stable_words + FAKE_COUNT_)
# # if Pen_P != 1 or Pen_R != 1:
# print("PENALTY:",Pen_P, Pen_R,P,R)
# print("orignal_P:",P)
# print("orignal_R:",R)
# print("after:", " Pen_P:",Pen_P," Pen_R:",Pen_R)
# print(FAKE_COUNT_," Pen_P_:",Pen_P_," Pen_R_:",Pen_R_)
P, R = P * (Pen_P), R * (Pen_R)
num_chunks = a.numChunks # `ch` in Meteor formula
# print("num_chunks:", num_chunks)
num_matched_words = (a.line1Matches + a.line2Matches) / 2.0 # `m` in Meteor formula
try:
F_mean = (P * R) / (self.alpha * P + (1 - self.alpha) * R)
# print("changdu candidate:", len(candidate))
# Pen = self.gamma * ((num_chunks / num_matched_words) * (1/len(candidate))) ** self.beta
Pen = self.gamma * (num_chunks / num_matched_words) ** self.beta
# print("Pen:",Pen)
except Exception as e:
# print("X"*60)
# print(reference)
# print(candidate)
# print(P)
# print(R)
return 0
# print(F_mean)
score = (1 - Pen) * F_mean
# score = F_mean
# print("final_score:", score)
if verbose:
print("Statistics:")
print("P = ", P, ", R = ", R, ", ch = ", num_chunks, ", m = ", num_matched_words,
", Pen = ", Pen, " , F_mean = ", F_mean)
return score
def file_meteor(self, candidate_file, reference_file, output_file, norm=True):
with codecs.open(candidate_file, "r", encoding="utf-8") as f1:
with codecs.open(reference_file, "r", encoding="utf-8") as f2:
with codecs.open(output_file, "w", encoding="utf-8") as f3:
for line1, line2 in zip(f1, f2):
meteor = self.sentence_meteor(candidate=line1.strip(), reference=line2.strip(), norm=norm)
f3.write(str(meteor) + "\n")
def unit_test():
metric = Meteor(weights="1,1,1,1", hyper="0.9,3.0,0.5,1.0", lang="EN")
metric.function_words = metric.function_words | {"I", "you", "."}
# A little hack.
# The following examples come from wiki: https://en.wikipedia.org/wiki/METEOR
# For this example, there are 2 possible alignments (a) and (b). See the figure in wiki for details.
# Wiki said alignment (a) is prefered, and the Meteor score is:
# Score: 0.5000 = Fmean: 1.0000 * (1 - Penalty: 0.5000)
# Fmean: 1.0000 = 10 * Precision: 1.0000 * Recall: 1.0000 / (Recall: 1.0000 + 9 * Precision: 1.0000)
# Penalty: 0.5000 = 0.5 * (Fragmentation: 1.0000 ^ 3)
# Fragmentation: 1.0000 = Chunks: 6.0000 / Matches: 6.0000
# However, my program produces alignment (b), which is correct in fact.
# The detailed output is:
# P = 1.0, R = 1.0, ch = 3, m = 6.0, Pen = 0.0625, F_mean = 1.0
# Meteor score: 0.9375
res = metric.sentence_meteor(candidate="on the mat sat the cat", reference="the cat sat on the mat", norm=True)
print("Meteor score: ", res)
# Score: 0.9977 = Fmean: 1.0000 * (1 - Penalty: 0.0023)
# Fmean: 1.0000 = 10 * Precision: 1.0000 * Recall: 1.0000 / (Recall: 1.0000 + 9 * Precision: 1.0000)
# Penalty: 0.0023 = 0.5 * (Fragmentation: 0.1667 ^ 3)
# Fragmentation: 0.1667 = Chunks: 1.0000 / Matches: 6.0000
res = metric.sentence_meteor(candidate="the cat sat on the mat", reference="the cat sat on the mat", norm=True)
print("Meteor score: ", res)
# Score: 0.9654 = Fmean: 0.9836 * (1 - Penalty: 0.0185)
# Fmean: 0.9836 = 10 * Precision: 0.8571 * Recall: 1.0000 / (Recall: 1.0000 + 9 * Precision: 0.8571)
# Penalty: 0.0185 = 0.5 * (Fragmentation: 0.3333 ^ 3)
# Fragmentation: 0.3333 = Chunks: 2.0000 / Matches: 6.0000
res = metric.sentence_meteor(candidate="the cat was sat on the mat", reference="the cat sat on the mat", norm=True)
print("Meteor score: ", res)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="data/", help="Data folder.")
parser.add_argument("--test_set", type=str, default="pos_neg", help="Test set file name. (Input)")
parser.add_argument("--paraphrase_file", type=str, default="filtered_paraphrase_table",
help="Paraphrase file.")
parser.add_argument("--paraphrase_file_format", type=str, default="pair",
help="Format of paraphrase file. Could be either 'pair' or 'clique'.")
parser.add_argument("--paraphrase_invariant", type=str, default="sorted_copy_words_freq0.6_20000000",
help="Invariant words during paraphrasing, i.e.: copy words_wmt15111")
parser.add_argument("--function_words", type=str, default="english.function.words",
help="Function words list")
# parser.add_argument("--ner_copy", type = str, default = "ner_copy_words_quora")
parser.add_argument("--ner_copy", type=str, default="ner_copy_words_wmt16")
args = parser.parse_args()
metric = Meteor(weights="1.0,0.6,0.8,0.6,0", hyper="0.85,0.2,0.6,0.75", lang="EN",
paraphrase_invariant=os.path.join(args.data_dir, args.paraphrase_invariant),
paraphrase_file=os.path.join(args.data_dir, args.paraphrase_file),
paraphrase_file_format=args.paraphrase_file_format,
function_words=os.path.join(args.data_dir, args.function_words),
ner_copy=os.path.join(args.data_dir, args.ner_copy))
metric.sentence_meteor(reference="I saw Monika in Evita three times and she was definitely an inspiration to me.",
candidate="I saw Monica in Evita three times and it was definitely inspirational to me.",
norm=True, verbose=True)
|
<reponame>CharLee674/rvisa_lightlab
from . import VISAInstrumentDriver
from lightlab.equipment.abstract_drivers import Configurable
from lightlab.laboratory.instruments import Keithley
import numpy as np
import time
from lightlab import logger
class Keithley_2400_SM(VISAInstrumentDriver, Configurable):
''' A Keithley 2400 driver.
`Manual: <http://research.physics.illinois.edu/bezryadin/labprotocol/Keithley2400Manual.pdf>`__
Usage: :any:`/ipynbs/Hardware/Keithley.ipynb`
Capable of sourcing current and measuring voltage, such as a Keithley
Also provides interface methods for measuring resistance and measuring power
'''
instrument_category = Keithley
autoDisable = None # in seconds. NOT IMPLEMENTED
_latestCurrentVal = 0
_latestVoltageVal = 0
currStep = None
voltStep = None
rampStepTime = 0.01 # in seconds.
def __init__(self, name=None, address=None, **kwargs):
'''
Args:
currStep (float): amount to step if ramping in current mode. Default (None) is no ramp
voltStep (float): amount to step if ramping in voltage mode. Default (None) is no ramp
rampStepTime (float): time to wait on each ramp step point
'''
self.currStep = kwargs.pop("currStep", None)
self.voltStep = kwargs.pop("voltStep", None)
self.rampStepTime = kwargs.pop("rampStepTime", 0.01)
VISAInstrumentDriver.__init__(self, name=name, address=address, **kwargs)
Configurable.__init__(self, headerIsOptional=False, verboseIsOptional=False)
def startup(self):
self.write('*RST')
def setPort(self, port):
if port == 'Front':
self.setConfigParam('ROUT:TERM', 'FRON')
elif port == 'Rear':
self.setConfigParam('ROUT:TERM', 'REAR')
def __setSourceMode(self, isCurrentSource):
if isCurrentSource:
sourceStr, meterStr = ('CURR', 'VOLT')
else:
sourceStr, meterStr = ('VOLT', 'CURR')
self.setConfigParam('SOURCE:FUNC', sourceStr)
self.setConfigParam('SOURCE:{}:MODE'.format(sourceStr), 'FIXED')
self.setConfigParam('SENSE:FUNCTION:OFF:ALL')
self.setConfigParam('SENSE:FUNCTION:ON', '"{}"'.format(meterStr))
self.setConfigParam('SENSE:{}:RANGE:AUTO'.format(meterStr), 'ON')
self.setConfigParam('RES:MODE', 'MAN') # Manual resistance ranging
def setVoltageMode(self, protectionCurrent=0.05):
self.enable(False)
self.__setSourceMode(isCurrentSource=False)
self.setProtectionCurrent(protectionCurrent)
self._configVoltage(0)
def setCurrentMode(self, protectionVoltage=1):
self.enable(False)
self.__setSourceMode(isCurrentSource=True)
self.setProtectionVoltage(protectionVoltage)
self._configCurrent(0)
def _configCurrent(self, currAmps):
currAmps = float(currAmps)
if currAmps >= 0:
currAmps = np.clip(currAmps, a_min=1e-9, a_max=1.)
else:
currAmps = np.clip(currAmps, a_min=-1, a_max=-1e-9)
if currAmps != 0:
needRange = 10 ** np.ceil(np.log10(abs(currAmps)))
self.setConfigParam('SOURCE:CURR:RANGE', needRange)
self.setConfigParam('SOURCE:CURR', currAmps)
self._latestCurrentVal = currAmps
def _configVoltage(self, voltVolts):
voltVolts = float(voltVolts)
if voltVolts != 0:
needRange = 10 ** np.ceil(np.log10(np.abs(voltVolts)))
self.setConfigParam('SOURCE:VOLT:RANGE', needRange)
self.setConfigParam('SOURCE:VOLT', voltVolts)
self._latestVoltageVal = voltVolts
def setCurrent(self, currAmps):
''' This leaves the output on indefinitely '''
currTemp = self._latestCurrentVal
if not self.enable() or self.currStep is None:
self._configCurrent(currAmps)
else:
nSteps = int(np.floor(abs(currTemp - currAmps) / self.currStep))
for curr in np.linspace(currTemp, currAmps, 1 + nSteps)[1:]:
self._configCurrent(curr)
time.sleep(self.rampStepTime)
def setVoltage(self, voltVolts):
voltTemp = self._latestVoltageVal
if not self.enable() or self.voltStep is None:
self._configVoltage(voltVolts)
else:
nSteps = int(np.floor(abs(voltTemp - voltVolts) / self.voltStep))
for volt in np.linspace(voltTemp, voltVolts, 1 + nSteps)[1:]:
self._configVoltage(volt)
time.sleep(self.rampStepTime)
def getCurrent(self):
currGlob = self.getConfigParam('SOURCE:CURR')
if type(currGlob) is dict:
currGlob = currGlob['&']
return currGlob
def getVoltage(self):
voltGlob = self.getConfigParam('SOURCE:VOLT')
if type(voltGlob) is dict:
voltGlob = voltGlob['&']
return voltGlob
def setProtectionVoltage(self, protectionVoltage):
self.setConfigParam('VOLT:PROT', protectionVoltage)
def setProtectionCurrent(self, protectionCurrent):
self.setConfigParam('CURR:PROT', protectionCurrent)
@property
def protectionVoltage(self):
return self.getConfigParam('VOLT:PROT')
@property
def protectionCurrent(self):
return self.getConfigParam('CURR:PROT')
def measVoltage(self):
retStr = self.query('MEASURE:VOLT?')
v = float(retStr.split(',')[0]) # first number is voltage always
if v >= self.protectionVoltage:
logger.warning('Keithley compliance voltage of %s reached', self.protectionVoltage)
logger.warning('You are sourcing %smW into the load.', v * self._latestCurrentVal * 1e-3)
return v
def measCurrent(self):
retStr = self.query('MEASURE:CURR?')
i = float(retStr.split(',')[1]) # second number is current always
if i >= self.protectionCurrent:
logger.warning('Keithley compliance current of %s reached', self.protectionCurrent)
logger.warning('You are sourcing %smW into the load.', i * self._latestVoltageVal * 1e-3)
return i
def enable(self, newState=None):
''' get/set enable state
'''
if newState is False:
if self.getConfigParam('SOURCE:FUNC') == 'CURR':
self.setCurrent(0)
else:
self.setVoltage(0)
if newState is not None:
self.setConfigParam('OUTP:STATE', 1 if newState else 0, forceHardware=True)
retVal = self.getConfigParam('OUTP:STATE', forceHardware=True)
return retVal in ['ON', 1, '1']
|
from __future__ import unicode_literals
import copy
from dateutil.relativedelta import relativedelta
import six
from dash.utils import get_month_range
from django import forms
from django.forms.forms import DeclarativeFieldsMetaclass
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from . import fields as filter_fields
from . import utils
class FilterForm(forms.Form):
def __init__(self, *args, **kwargs):
self.org = kwargs.pop('org')
super(FilterForm, self).__init__(*args, **kwargs)
# Create a shallow copy of the data to ensure that it is
# mutable. Some filters need the ability to overwrite the
# data that was passed in.
if self.data is not None:
self.data = copy.copy(self.data)
class Filter(six.with_metaclass(DeclarativeFieldsMetaclass, object)):
# The metaclass is what does the work to set up fields
# that are declared as attributes of the class.
pass
class DateRangeFilter(Filter):
DATE_WINDOW_CHOICES = (
('', ''),
('month', _("Current month")),
('30-days', _("Last 30 days")),
('60-days', _("Last 60 days")),
('90-days', _("Last 90 days")),
('6-months', _("Last 6 months")),
('12-months', _("Last 12 months")),
('custom', _("Custom range...")),
)
date_range = forms.ChoiceField(
label=_("Date range"),
choices=DATE_WINDOW_CHOICES)
start_date = filter_fields.FilterDateField(
label=_("Start date"),
required=False)
end_date = filter_fields.FilterDateField(
label=_("End date"),
required=False)
def clean(self):
self.cleaned_data = super(DateRangeFilter, self).clean()
window = self.cleaned_data.get('date_range')
if window == 'custom':
# Only apply additional checks if data did not have errors.
if 'start_date' not in self.errors and 'end_date' not in self.errors:
start_date = self.cleaned_data.get('start_date')
end_date = self.cleaned_data.get('end_date')
# Require at least one date filter.
if not start_date and not end_date:
self.add_error(
forms.ALL_FIELDS,
_("Please choose a start date or an end date."))
# Ensure date filter order makes sense.
elif (start_date and end_date) and start_date > end_date:
self.add_error(
'end_date',
_("End date must be after start date."))
# Set default values for start date and end date.
else:
self.cleaned_data.setdefault('start_date', None)
self.cleaned_data.setdefault('end_date', None)
self.data.setdefault('start_date', None)
self.data.setdefault('end_date', None)
else:
# Throw out user-submitted dates.
self.cleaned_data.pop('start_date', None)
self.cleaned_data.pop('end_date', None)
self.data.pop('start_date', None)
self.data.pop('end_date', None)
self._errors.pop('start_date', None)
self._errors.pop('end_date', None)
# Calculate the correct date window.
if window:
if window == 'month':
# get_month_range() a tuple with datetimes representing
# midnight of the first day of the current month, and
# midnight of the first day of the following month.
start_date, end_date = get_month_range()
# Show the user the last day of the month,
# e.g., show June 1 to June 30 rather than June 1 to July 1.
end_date = end_date - relativedelta(days=1)
else:
number, unit = window.split('-') # e.g., 6-months
end_date = utils.midnight(timezone.now())
start_date = end_date - relativedelta(**{unit: int(number)})
self.cleaned_data['start_date'] = start_date
self.cleaned_data['end_date'] = end_date
self.data['start_date'] = start_date
self.data['end_date'] = end_date
# Pad the end_date by one day so that results for all times during
# the end_date are accounted for in the query.
end_date = self.cleaned_data.get('end_date')
if end_date is not None:
self.cleaned_data['end_date'] = end_date + relativedelta(days=1)
return self.cleaned_data
class DataFieldFilter(Filter):
def __init__(self, *args, **kwargs):
super(DataFieldFilter, self).__init__(*args, **kwargs)
self.contact_fields = []
for data_field in self.org.datafield_set.visible():
field_name = 'contact_{}'.format(data_field.key)
self.contact_fields.append((field_name, data_field))
self.fields[field_name] = forms.CharField(
label='Contact: {}'.format(data_field.display_name),
required=False)
def filter_contacts(self, queryset=None):
"""Filter queryset to match all contact field search input."""
contacts = queryset if queryset is not None else self.org.contacts.all()
for name, data_field in self.contact_fields:
value = self.cleaned_data.get(name)
if value:
contacts = contacts.filter(
contactfield__field=data_field,
contactfield__value__icontains=value)
return contacts
|
"""Sun2 Binary Sensor."""
from datetime import timedelta
import logging
import voluptuous as vol
try:
from homeassistant.components.binary_sensor import BinarySensorEntity
except ImportError:
from homeassistant.components.binary_sensor import BinarySensorDevice
BinarySensorEntity = BinarySensorDevice
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_ABOVE, CONF_ELEVATION, CONF_MONITORED_CONDITIONS, CONF_NAME)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.event import async_track_point_in_time
from homeassistant.util import dt as dt_util
from .helpers import (
async_init_astral_loc, astral_loc, nearest_second, SIG_LOC_UPDATED)
_LOGGER = logging.getLogger(__name__)
DEFAULT_ELEVATION_ABOVE = -0.833
DEFAULT_ELEVATION_NAME = 'Above Horizon'
ABOVE_ICON = 'mdi:white-balance-sunny'
BELOW_ICON = 'mdi:moon-waxing-crescent'
_ONE_DAY = timedelta(days=1)
_ONE_SEC = timedelta(seconds=1)
_SENSOR_TYPES = [CONF_ELEVATION]
ATTR_NEXT_CHANGE = 'next_change'
# elevation
# elevation: <threshold>
# elevation:
# above: <threshold>
# name: <friendly_name>
def _val_cfg(config):
if isinstance(config, str):
config = {config: {}}
else:
if CONF_ELEVATION in config:
value = config[CONF_ELEVATION]
if isinstance(value, float):
config[CONF_ELEVATION] = {CONF_ABOVE: value}
if CONF_ELEVATION in config:
options = config[CONF_ELEVATION]
for key in options:
if key not in [CONF_ELEVATION, CONF_ABOVE, CONF_NAME]:
raise vol.Invalid(
f'{key} not allowed for {CONF_ELEVATION}')
if CONF_ABOVE not in options:
options[CONF_ABOVE] = DEFAULT_ELEVATION_ABOVE
if CONF_NAME not in options:
above = options[CONF_ABOVE]
if above == DEFAULT_ELEVATION_ABOVE:
name = DEFAULT_ELEVATION_NAME
else:
name = 'Above '
if above < 0:
name += f'minus {-above}'
else:
name += f'{above}'
options[CONF_NAME] = name
return config
_BINARY_SENSOR_SCHEMA = vol.All(
vol.Any(
vol.In(_SENSOR_TYPES),
vol.Schema({
vol.Required(vol.In(_SENSOR_TYPES)): vol.Any(
vol.Coerce(float),
vol.Schema({
vol.Optional(CONF_ABOVE): vol.Coerce(float),
vol.Optional(CONF_NAME): cv.string,
}),
),
}),
),
_val_cfg,
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_MONITORED_CONDITIONS): vol.All(
cv.ensure_list, [_BINARY_SENSOR_SCHEMA]),
})
class Sun2ElevationSensor(BinarySensorEntity):
"""Sun2 Elevation Sensor."""
def __init__(self, hass, name, above):
"""Initialize sensor."""
self._name = name
self._threshold = above
self._state = None
self._next_change = None
async_init_astral_loc(hass)
self._unsub_dispatcher = None
@property
def should_poll(self):
"""Do not poll."""
return False
@property
def name(self):
"""Return the name of the entity."""
return self._name
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return {ATTR_NEXT_CHANGE: self._next_change}
@property
def icon(self):
"""Return the icon to use in the frontend."""
return ABOVE_ICON if self.is_on else BELOW_ICON
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._state
async def async_loc_updated(self):
"""Location updated."""
self.async_schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Subscribe to update signal."""
self._unsub_dispatcher = async_dispatcher_connect(
self.hass, SIG_LOC_UPDATED, self.async_loc_updated)
async def async_will_remove_from_hass(self):
"""Disconnect from update signal."""
self._unsub_dispatcher()
def _find_nxt_dttm(self, t0_dttm, t0_elev, t1_dttm, t1_elev):
# Do a binary search for time between t0 & t1 where elevation is
# nearest threshold, but also above (or equal to) it if current
# elevation is below it (i.e., current state is False), or below it if
# current elevation is above (or equal to) it (i.e., current state is
# True.)
slope = 1 if t1_elev > t0_elev else -1
# Find mid point and throw away fractional seconds since astral package
# ignores microseconds.
tn_dttm = nearest_second(t0_dttm + (t1_dttm - t0_dttm) / 2)
tn_elev = astral_loc().solar_elevation(tn_dttm)
while not (
(self._state and tn_elev <= self._threshold
or not self._state and tn_elev > self._threshold)
and abs(tn_elev - self._threshold) <= 0.01):
if (tn_elev - self._threshold) * slope > 0:
if t1_dttm == tn_dttm:
break
t1_dttm = tn_dttm
else:
if t0_dttm == tn_dttm:
break
t0_dttm = tn_dttm
tn_dttm = nearest_second(t0_dttm + (t1_dttm - t0_dttm) / 2)
tn_elev = astral_loc().solar_elevation(tn_dttm)
# Did we go too far?
if self._state and tn_elev > self._threshold:
tn_dttm -= slope * _ONE_SEC
if astral_loc().solar_elevation(tn_dttm) > self._threshold:
raise RuntimeError("Couldn't find next update time")
elif not self._state and tn_elev <= self._threshold:
tn_dttm += slope * _ONE_SEC
if astral_loc().solar_elevation(tn_dttm) <= self._threshold:
raise RuntimeError("Couldn't find next update time")
return tn_dttm
def _get_nxt_dttm(self, cur_dttm):
# Find next segment of elevation curve, between a pair of solar noon &
# solar midnight, where it crosses the threshold, but in the opposite
# direction (i.e., where output should change state.) Note that this
# might be today, tomorrow, days away, or never, depending on location,
# time of year and specified threshold.
# Start by finding the next five solar midnight & solar noon "events"
# since current time might be anywhere from before today's solar
# midnight (if it is this morning) to after tomorrow's solar midnight
# (if it is this evening.)
date = cur_dttm.date()
evt_dttm1 = astral_loc().solar_midnight(date)
evt_dttm2 = astral_loc().solar_noon(date)
evt_dttm3 = astral_loc().solar_midnight(date + _ONE_DAY)
evt_dttm4 = astral_loc().solar_noon(date + _ONE_DAY)
evt_dttm5 = astral_loc().solar_midnight(date + 2 * _ONE_DAY)
# See if segment we're looking for falls between any of these events.
# If not move ahead a day and try again, but don't look more than a
# a year ahead.
end_date = date + 366 * _ONE_DAY
while date < end_date:
if cur_dttm < evt_dttm1:
if self._state:
t0_dttm = cur_dttm
t1_dttm = evt_dttm1
else:
t0_dttm = evt_dttm1
t1_dttm = evt_dttm2
elif cur_dttm < evt_dttm2:
if not self._state:
t0_dttm = cur_dttm
t1_dttm = evt_dttm2
else:
t0_dttm = evt_dttm2
t1_dttm = evt_dttm3
elif cur_dttm < evt_dttm3:
if self._state:
t0_dttm = cur_dttm
t1_dttm = evt_dttm3
else:
t0_dttm = evt_dttm3
t1_dttm = evt_dttm4
else:
if not self._state:
t0_dttm = cur_dttm
t1_dttm = evt_dttm4
else:
t0_dttm = evt_dttm4
t1_dttm = evt_dttm5
t0_elev = astral_loc().solar_elevation(t0_dttm)
t1_elev = astral_loc().solar_elevation(t1_dttm)
# Did we find it?
# Note, if t1_elev > t0_elev, then we're looking for an elevation
# ABOVE threshold. In this case we can't use this range if the
# threshold is EQUAL to the elevation at t1, because this range
# does NOT include any points with a higher elevation value. For
# all other cases it's ok for the threshold to equal the elevation
# at t0 or t1.
if (t0_elev <= self._threshold < t1_elev
or t1_elev <= self._threshold <= t0_elev):
nxt_dttm = self._find_nxt_dttm(
t0_dttm, t0_elev, t1_dttm, t1_elev)
if nxt_dttm - cur_dttm > _ONE_DAY:
_LOGGER.warning(
'Sun elevation will not reach %f again until %s',
self._threshold, nxt_dttm.date())
return nxt_dttm
# Shift one day ahead.
date += _ONE_DAY
evt_dttm1 = evt_dttm3
evt_dttm2 = evt_dttm4
evt_dttm3 = evt_dttm5
evt_dttm4 = astral_loc().solar_noon(date + _ONE_DAY)
evt_dttm5 = astral_loc().solar_midnight(date + 2 * _ONE_DAY)
# Didn't find one.
return None
async def async_update(self):
"""Update state."""
cur_dttm = dt_util.now()
cur_elev = astral_loc().solar_elevation(cur_dttm)
self._state = cur_elev > self._threshold
_LOGGER.debug(
'name=%s, above=%f, elevation=%f',
self._name, self._threshold, cur_elev)
nxt_dttm = self._get_nxt_dttm(cur_dttm)
self._next_change = nxt_dttm
@callback
def async_update(now):
self.async_schedule_update_ha_state(True)
if nxt_dttm:
async_track_point_in_time(self.hass, async_update, nxt_dttm)
else:
_LOGGER.error(
'Sun elevation never reaches %f at this location',
self._threshold)
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up sensors."""
sensors = []
for cfg in config[CONF_MONITORED_CONDITIONS]:
if CONF_ELEVATION in cfg:
options = cfg[CONF_ELEVATION]
sensors.append(Sun2ElevationSensor(
hass, options[CONF_NAME], options[CONF_ABOVE]))
async_add_entities(sensors, True)
|
# Super7SegStockTicker.py
#
# Demo of Maniacal Labs Super 7 Seg Display
#
# Uses Alpha Vantage API to obtain current prices for
# stock and cryptocurrencies, then displays the prices
# to the Super7Seg display connected to the specified COM port
#
# Alpha Vantage API documentation: https://www.alphavantage.co/documentation/
import json
import requests
import time
from super7 import Super7, BaudRates
# Alpha Vantage API Key.
# https://www.alphavantage.co/support/#api-key
API_KEY = "---YOUR API KEY HERE---"
# Total display width. Equal to number of S7 displays x 12
TOTAL_CHARS = 12
# Stock and Cryptocurrency symbols
STOCKS = ['RHT', 'AMD', 'NVDA', 'ML']
CRYPTOS = ['BTC', 'ETH']
# Delay between each quote displayed
QUOTE_DISPLAY_DELAY = 5
# List of quotes will display this many times before prices are updated
GET_QUOTES_LOOP_COUNT = 5
# Initialize S7 display
# --Specify your COM port here--
s7 = Super7('COM4', baudrate=BaudRates.BAUD_38400)
s7.clear()
def scroll_in_msg(msg, delay=0.1):
"""Scroll text from R to L across available S7 displays.
The display will pause when the quote reaches the left side
of the display. This delay specified by QUOTE_DISPLAY_DELAY
"""
msg = ''.join(([' '] * TOTAL_CHARS)) + msg
for i in range(len(msg) + 1):
s7.write(msg[i:i + TOTAL_CHARS])
if i == TOTAL_CHARS:
time.sleep(QUOTE_DISPLAY_DELAY)
time.sleep(delay)
def get_current_stock_price(symbol):
"""Requests intra-day prices, returns most recent price."""
r = requests.get("https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=" + symbol + "&interval=1min&apikey=" + API_KEY)
data = r.json()
del r
current = data["Meta Data"]["3. Last Refreshed"]
price = float(data["Time Series (1min)"][current]["1. open"])
return price
def get_current_crypto_price(symbol):
"""Requests intra-day prices, returns most recent price."""
r = requests.get("https://www.alphavantage.co/query?function=DIGITAL_CURRENCY_INTRADAY&symbol=" + symbol + "&market=CNY&apikey=" + API_KEY)
data = r.json()
del r
current = data["Meta Data"]["7. Last Refreshed"]
price = float(data["Time Series (Digital Currency Intraday)"][current]["1b. price (USD)"])
return price
def update_quotes(stocks, cryptos):
"""Get current quotes, return dict with {symbol : price}."""
dict_quotes = {i: get_current_stock_price(i) for i in stocks}
for i in cryptos:
dict_quotes[i] = get_current_crypto_price(i)
return dict_quotes
while True:
s7.clear()
s7.write("-GET QUOTES-")
quotesToPrint = update_quotes(STOCKS, CRYPTOS)
for i in range(GET_QUOTES_LOOP_COUNT):
for k, v in quotesToPrint.items():
# --Uncomment the next two lines for static display (no scroll)
# s7.write("" + k + " {0:.2f}".format(v))
# time.sleep(QUOTE_DISPLAY_DELAY)
# --OR--
# --Uncomment the next two lines for scrolling display
msg = "" + k + " {0:.2f}".format(v)
scroll_in_msg(msg, 0.1)
|
<reponame>revnav/sandbox
# coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class TransferApplianceSummary(object):
"""
TransferApplianceSummary model.
"""
#: A constant which can be used with the lifecycle_state property of a TransferApplianceSummary.
#: This constant has a value of "REQUESTED"
LIFECYCLE_STATE_REQUESTED = "REQUESTED"
#: A constant which can be used with the lifecycle_state property of a TransferApplianceSummary.
#: This constant has a value of "ORACLE_PREPARING"
LIFECYCLE_STATE_ORACLE_PREPARING = "ORACLE_PREPARING"
#: A constant which can be used with the lifecycle_state property of a TransferApplianceSummary.
#: This constant has a value of "SHIPPING"
LIFECYCLE_STATE_SHIPPING = "SHIPPING"
#: A constant which can be used with the lifecycle_state property of a TransferApplianceSummary.
#: This constant has a value of "DELIVERED"
LIFECYCLE_STATE_DELIVERED = "DELIVERED"
#: A constant which can be used with the lifecycle_state property of a TransferApplianceSummary.
#: This constant has a value of "PREPARING"
LIFECYCLE_STATE_PREPARING = "PREPARING"
#: A constant which can be used with the lifecycle_state property of a TransferApplianceSummary.
#: This constant has a value of "FINALIZED"
LIFECYCLE_STATE_FINALIZED = "FINALIZED"
#: A constant which can be used with the lifecycle_state property of a TransferApplianceSummary.
#: This constant has a value of "RETURN_DELAYED"
LIFECYCLE_STATE_RETURN_DELAYED = "RETURN_DELAYED"
#: A constant which can be used with the lifecycle_state property of a TransferApplianceSummary.
#: This constant has a value of "RETURN_SHIPPED"
LIFECYCLE_STATE_RETURN_SHIPPED = "RETURN_SHIPPED"
#: A constant which can be used with the lifecycle_state property of a TransferApplianceSummary.
#: This constant has a value of "RETURN_SHIPPED_CANCELLED"
LIFECYCLE_STATE_RETURN_SHIPPED_CANCELLED = "RETURN_SHIPPED_CANCELLED"
#: A constant which can be used with the lifecycle_state property of a TransferApplianceSummary.
#: This constant has a value of "ORACLE_RECEIVED"
LIFECYCLE_STATE_ORACLE_RECEIVED = "ORACLE_RECEIVED"
#: A constant which can be used with the lifecycle_state property of a TransferApplianceSummary.
#: This constant has a value of "ORACLE_RECEIVED_CANCELLED"
LIFECYCLE_STATE_ORACLE_RECEIVED_CANCELLED = "ORACLE_RECEIVED_CANCELLED"
#: A constant which can be used with the lifecycle_state property of a TransferApplianceSummary.
#: This constant has a value of "PROCESSING"
LIFECYCLE_STATE_PROCESSING = "PROCESSING"
#: A constant which can be used with the lifecycle_state property of a TransferApplianceSummary.
#: This constant has a value of "COMPLETE"
LIFECYCLE_STATE_COMPLETE = "COMPLETE"
#: A constant which can be used with the lifecycle_state property of a TransferApplianceSummary.
#: This constant has a value of "CUSTOMER_NEVER_RECEIVED"
LIFECYCLE_STATE_CUSTOMER_NEVER_RECEIVED = "CUSTOMER_NEVER_RECEIVED"
#: A constant which can be used with the lifecycle_state property of a TransferApplianceSummary.
#: This constant has a value of "ORACLE_NEVER_RECEIVED"
LIFECYCLE_STATE_ORACLE_NEVER_RECEIVED = "ORACLE_NEVER_RECEIVED"
#: A constant which can be used with the lifecycle_state property of a TransferApplianceSummary.
#: This constant has a value of "CUSTOMER_LOST"
LIFECYCLE_STATE_CUSTOMER_LOST = "CUSTOMER_LOST"
#: A constant which can be used with the lifecycle_state property of a TransferApplianceSummary.
#: This constant has a value of "CANCELLED"
LIFECYCLE_STATE_CANCELLED = "CANCELLED"
#: A constant which can be used with the lifecycle_state property of a TransferApplianceSummary.
#: This constant has a value of "DELETED"
LIFECYCLE_STATE_DELETED = "DELETED"
#: A constant which can be used with the lifecycle_state property of a TransferApplianceSummary.
#: This constant has a value of "REJECTED"
LIFECYCLE_STATE_REJECTED = "REJECTED"
#: A constant which can be used with the lifecycle_state property of a TransferApplianceSummary.
#: This constant has a value of "ERROR"
LIFECYCLE_STATE_ERROR = "ERROR"
def __init__(self, **kwargs):
"""
Initializes a new TransferApplianceSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param label:
The value to assign to the label property of this TransferApplianceSummary.
:type label: str
:param lifecycle_state:
The value to assign to the lifecycle_state property of this TransferApplianceSummary.
Allowed values for this property are: "REQUESTED", "ORACLE_PREPARING", "SHIPPING", "DELIVERED", "PREPARING", "FINALIZED", "RETURN_DELAYED", "RETURN_SHIPPED", "RETURN_SHIPPED_CANCELLED", "ORACLE_RECEIVED", "ORACLE_RECEIVED_CANCELLED", "PROCESSING", "COMPLETE", "CUSTOMER_NEVER_RECEIVED", "ORACLE_NEVER_RECEIVED", "CUSTOMER_LOST", "CANCELLED", "DELETED", "REJECTED", "ERROR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param serial_number:
The value to assign to the serial_number property of this TransferApplianceSummary.
:type serial_number: str
:param creation_time:
The value to assign to the creation_time property of this TransferApplianceSummary.
:type creation_time: datetime
"""
self.swagger_types = {
'label': 'str',
'lifecycle_state': 'str',
'serial_number': 'str',
'creation_time': 'datetime'
}
self.attribute_map = {
'label': 'label',
'lifecycle_state': 'lifecycleState',
'serial_number': 'serialNumber',
'creation_time': 'creationTime'
}
self._label = None
self._lifecycle_state = None
self._serial_number = None
self._creation_time = None
@property
def label(self):
"""
Gets the label of this TransferApplianceSummary.
:return: The label of this TransferApplianceSummary.
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""
Sets the label of this TransferApplianceSummary.
:param label: The label of this TransferApplianceSummary.
:type: str
"""
self._label = label
@property
def lifecycle_state(self):
"""
Gets the lifecycle_state of this TransferApplianceSummary.
Allowed values for this property are: "REQUESTED", "ORACLE_PREPARING", "SHIPPING", "DELIVERED", "PREPARING", "FINALIZED", "RETURN_DELAYED", "RETURN_SHIPPED", "RETURN_SHIPPED_CANCELLED", "ORACLE_RECEIVED", "ORACLE_RECEIVED_CANCELLED", "PROCESSING", "COMPLETE", "CUSTOMER_NEVER_RECEIVED", "ORACLE_NEVER_RECEIVED", "CUSTOMER_LOST", "CANCELLED", "DELETED", "REJECTED", "ERROR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_state of this TransferApplianceSummary.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this TransferApplianceSummary.
:param lifecycle_state: The lifecycle_state of this TransferApplianceSummary.
:type: str
"""
allowed_values = ["REQUESTED", "ORACLE_PREPARING", "SHIPPING", "DELIVERED", "PREPARING", "FINALIZED", "RETURN_DELAYED", "RETURN_SHIPPED", "RETURN_SHIPPED_CANCELLED", "ORACLE_RECEIVED", "ORACLE_RECEIVED_CANCELLED", "PROCESSING", "COMPLETE", "CUSTOMER_NEVER_RECEIVED", "ORACLE_NEVER_RECEIVED", "CUSTOMER_LOST", "CANCELLED", "DELETED", "REJECTED", "ERROR"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def serial_number(self):
"""
Gets the serial_number of this TransferApplianceSummary.
:return: The serial_number of this TransferApplianceSummary.
:rtype: str
"""
return self._serial_number
@serial_number.setter
def serial_number(self, serial_number):
"""
Sets the serial_number of this TransferApplianceSummary.
:param serial_number: The serial_number of this TransferApplianceSummary.
:type: str
"""
self._serial_number = serial_number
@property
def creation_time(self):
"""
Gets the creation_time of this TransferApplianceSummary.
:return: The creation_time of this TransferApplianceSummary.
:rtype: datetime
"""
return self._creation_time
@creation_time.setter
def creation_time(self, creation_time):
"""
Sets the creation_time of this TransferApplianceSummary.
:param creation_time: The creation_time of this TransferApplianceSummary.
:type: datetime
"""
self._creation_time = creation_time
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
import datetime
import logging
import pytz
import os
import random
import re
import time
from pathlib import Path
import logzero
import pandas as pd
import torch
import torch.nn as nn
import numpy as np
from sklearn.metrics import roc_auc_score
from torch.utils.data import DataLoader
from logzero import logger
from torch.utils.tensorboard import SummaryWriter
from torch.optim.lr_scheduler import ReduceLROnPlateau
from model import get_model
from dataset import SetiSimpleDataset, get_transforms
from config import load_config
from util import parse_args, get_folds
from optimizer import get_optimizer
from scheduler import get_scheduler
from module import train_fn, valid_fn, mixup_train_fn
from loss import FocalLoss
import torch.backends.cudnn as cudnn
import horovod.torch as hvd
import torch.utils.data.distributed
import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
from util import bn_to_syncbn, DummySummaryWriter
# Some of the code was adapted from the following URL
# https://www.kaggle.com/yasufuminakama/cassava-resnext50-32x4d-starter-training
ROOT = Path.cwd().parent
INPUT = ROOT / "input"
DATA = INPUT / "sbl"
TRAIN = DATA / "train"
TEST = DATA / "test"
def get_path_label(df: pd.DataFrame, img_dir: str):
"""Get file path and target info."""
path_label = {
"paths": [img_dir / f"{img_id[0]}/{img_id}.npy" for img_id in df["id"].values],
"labels": df[CLASSES].values.astype("f"),
"id": df["id"].values
}
return path_label
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
class ReduceLROnPlateauPatch(ReduceLROnPlateau):
def get_lr(self):
return [ group['lr'] for group in self.optimizer.param_groups ]
def train_loop(conf, hvd, folds, fold, logger, log_basename, total_epochs,
new_train, new_test):
logger.info(f"=============== fold: {fold} training ===============")
if conf.ckpt_path:
conf.ckpt_path = re.sub('fold._best', f"fold{fold}_best", conf.ckpt_path)
logger.info(f"replace ckpt_path: {conf.ckpt_path}")
# foldsをidでソートしておく(ddpのvalidationのため)
folds = folds.sort_values(by=['id']).reset_index(drop=True)
# loader
trn_idx = folds[folds['fold'] != fold].index
val_idx = folds[folds['fold'] == fold].index
train_folds = folds.loc[trn_idx].reset_index(drop=True)
valid_folds = folds.loc[val_idx].reset_index(drop=True)
if new_train:
valid_folds = pd.read_csv(DATA / 'train_labels.csv')
if new_test:
valid_folds = pd.read_csv(DATA / 'sample_submission.csv')
tb_logname = os.path.join(conf.log_dir, f"{log_basename}_fold{fold}")
if hvd.rank() == 0:
tb_writer = SummaryWriter(log_dir=tb_logname)
else:
tb_writer = DummySummaryWriter()
train_path_label = get_path_label(train_folds, TRAIN)
valid_path_label = get_path_label(valid_folds, TRAIN)
if new_train:
valid_path_label = get_path_label(valid_folds, TRAIN)
if new_test:
valid_path_label = get_path_label(valid_folds, TEST)
# pseudo label
if conf.pseudo_label:
pseudo_folds = pd.read_csv('pseudo_test_labels.csv')
pseudo_path_label = get_path_label(pseudo_folds, TEST)
train_path_label['paths'] = np.concatenate([train_path_label['paths'], pseudo_path_label['paths']])
train_path_label['labels'] = np.concatenate([train_path_label['labels'], pseudo_path_label['labels']])
train_path_label['id'] = np.concatenate([train_path_label['id'], pseudo_path_label['id']])
logger.info("use pseudo labeled data")
train_dataset = SetiSimpleDataset(paths=train_path_label['paths'],
labels=train_path_label['labels'],
ids=train_path_label['id'],
transform=get_transforms(conf, conf.train_trans_mode),
target_only=conf.target_only,
seed=conf.seed)
valid_dataset = SetiSimpleDataset(paths=valid_path_label['paths'],
labels=valid_path_label['labels'],
ids=valid_path_label['id'],
transform=get_transforms(conf, conf.valid_trans_mode),
target_only=conf.target_only,
with_id=True)
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset, num_replicas=hvd.size(), rank=hvd.rank())
valid_sampler = torch.utils.data.distributed.DistributedSampler(
valid_dataset, num_replicas=hvd.size(), rank=hvd.rank())
train_loader = DataLoader(train_dataset,
batch_size=conf.train_bs,
sampler=train_sampler,
num_workers=conf.num_workers,
pin_memory=conf.pin_memory,
drop_last=True)
valid_loader = DataLoader(valid_dataset,
batch_size=conf.valid_bs,
sampler=valid_sampler,
num_workers=conf.num_workers,
pin_memory=conf.pin_memory,
drop_last=False)
if conf.mixup:
# gen second train_dataset
train_dataset2 = SetiSimpleDataset(paths=train_path_label['paths'],
labels=train_path_label['labels'],
ids=train_path_label['id'],
transform=get_transforms(conf, conf.train_trans_mode),
target_only=conf.target_only,
seed=conf.seed+1000)
train_sampler2 = torch.utils.data.distributed.DistributedSampler(
train_dataset2, num_replicas=hvd.size(), rank=hvd.rank())
train_loader2 = DataLoader(train_dataset2,
batch_size=conf.train_bs,
sampler=train_sampler2,
num_workers=conf.num_workers,
pin_memory=conf.pin_memory,
drop_last=True)
# update print_freq
if conf.print_freq == 0:
conf.print_freq = max(2, len(train_loader) // 10)
# model
device = torch.device(conf.device)
if conf.loss_type == 'bce':
criterion = nn.BCEWithLogitsLoss()
elif conf.loss_type == 'focal':
criterion = FocalLoss(gamma=conf.focal_loss_gamma)
else:
raise NotImplementedError(conf.loss_type)
model = get_model(conf, conf.backbone_model_name, logger)
if conf.sync_bn:
model.apply(bn_to_syncbn)
logger.info('convert bn to sync_bn')
if conf.overwrite_gem_p:
model.global_pool.p.data.fill_(conf.overwrite_gem_p)
logger.info(f"overwrite_gem_p: {conf.overwrite_gem_p}")
model = model.to(device)
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
parameters = [
{'params': model.parameters()}
]
named_parameters = list(model.named_parameters())
optimizer = get_optimizer(conf, parameters)
optimizer = hvd.DistributedOptimizer(
optimizer, named_parameters=named_parameters,
compression=hvd.Compression.none)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
scheduler = get_scheduler(conf, optimizer, train_loader, logger)
global_step = 0
best_score = 0
best_loss = np.inf
current_lr = scheduler.get_last_lr()
logger.info(f"lr: {current_lr}")
tb_writer.add_scalar('Other/LearningRate', current_lr[0], global_step)
best_y_true = None
best_y_preds = None
for epoch in range(conf.epochs):
start_time = time.time()
if conf.train:
if conf.mixup:
avg_loss, global_step = mixup_train_fn(conf, global_step,
train_loader, train_loader2,
model, criterion,
optimizer, epoch, scheduler, device, train_sampler, train_sampler2, logger, tb_writer)
else:
avg_loss, global_step = train_fn(conf, global_step, train_loader, model, criterion,
optimizer, epoch, scheduler, device, train_sampler, logger, tb_writer)
# val
avg_loss, score, y_true, y_preds = valid_fn(conf, global_step, valid_loader, model, criterion,
device, True, hvd, logger, tb_writer, new_test)
if isinstance(scheduler, ReduceLROnPlateau):
if conf.plateau_mode == 'min':
scheduler.step(avg_loss)
elif conf.plateau_mode == 'max':
scheduler.step(score)
current_lr = scheduler.get_last_lr()
logger.info(f"lr: {current_lr}")
tb_writer.add_scalar('Other/LearningRate', current_lr[0], global_step)
if conf.train:
if score > best_score:
best_score = score
logger.info(f'Fold {fold} Epoch {epoch+1} - Save Best Score: {best_score:.4f} Model')
if hvd.rank() == 0:
torch.save({'model': model.state_dict()},
f'{OUTPUT_DIR}/fold{fold}_best_score.pth')
best_y_true = y_true
best_y_preds = y_preds
if avg_loss < best_loss:
best_loss = avg_loss
logger.info(f'Fold {fold} Epoch {epoch+1} - Save Best Loss: {best_loss:.4f} Model')
if hvd.rank() == 0:
torch.save({'model': model.state_dict()},
f'{OUTPUT_DIR}/fold{fold}_best_loss.pth')
if hvd.rank() == 0:
torch.save({'model': model.state_dict()},
f'{OUTPUT_DIR}/fold{fold}_epoch{epoch}.pth')
else:
if score > best_score:
best_score = score
best_y_true = y_true
best_y_preds = y_preds
elapsed = time.time() - start_time
if conf.train:
logger.info(f'Fold {fold} Epoch {epoch+1} - AUROC score: {score:.4f} Best: {best_score:.4f} time: {elapsed:.0f}s')
logger.info(f'output_dir: {OUTPUT_DIR}')
else:
logger.info(f'AUROC score: {score:.4f}')
total_epochs -= 1
full_elapsed = total_epochs * int(elapsed)
time_delta = datetime.timedelta(seconds=full_elapsed)
now = datetime.datetime.now(pytz.timezone('Asia/Tokyo'))
end_time = now + time_delta
logger.info(f"Expected remaining seconds: {full_elapsed} sec")
logger.info(f"Expected end time: {end_time}")
valid_folds['preds'] = best_y_preds
return best_score, best_y_true, best_y_preds, valid_folds
if __name__ == '__main__':
args = parse_args()
conf = load_config(args.conf)
for (key, value) in args._get_kwargs():
if key in ['input_width', 'input_height',
'scale_width', 'scale_height',
'valid_bs', 'valid_trans_mode',
'ckpt_path', 'train_fold',
'overwrite_gem_p',
'tta_hflip', 'tta_vflip', 'tta_sigmoid',
'seed']:
if value:
setattr(conf, key, value)
if args.test or args.new_train or args.new_test:
conf.train = False
conf.epochs = 1
seed_everything(conf.seed)
hvd.init()
torch.manual_seed(conf.seed)
torch.cuda.set_device(hvd.local_rank())
torch.cuda.manual_seed(conf.seed)
cudnn.benchmark = True
FOLD_SEED = conf.fold_seed
CLASSES = ["target",]
N_FOLDS = conf.n_fold
formatter = logging.Formatter('%(message)s')
logzero.formatter(formatter)
if not os.path.exists(conf.log_dir):
os.makedirs(conf.log_dir, exist_ok=True)
if args.new_train:
log_basename = f"new_train_{conf.prefix}-{conf.backbone_model_name}-{datetime.datetime.now().strftime('%Y%m%d%H%M%S')}"
elif args.new_test:
log_basename = f"new_test_{conf.prefix}-{conf.backbone_model_name}-{datetime.datetime.now().strftime('%Y%m%d%H%M%S')}"
else:
log_basename = f"{conf.prefix}-{conf.backbone_model_name}-{datetime.datetime.now().strftime('%Y%m%d%H%M%S')}"
log_filename = f"{log_basename}.log"
logzero.logfile(os.path.join(conf.log_dir, log_filename))
# メインのnode以外のloggerを停止
if hvd.rank() == 0:
logzero.logfile(os.path.join(conf.log_dir, log_filename))
else:
logzero.logfile('', disableStderrLogger=True)
logger.info(conf)
OUTPUT_DIR = f"{conf.model_dir}/{conf.prefix}_{conf.backbone_model_name}"
if hvd.rank() == 0:
os.makedirs(f"{OUTPUT_DIR}", exist_ok=True)
logger.info(f"output_dir: {OUTPUT_DIR}")
train = get_folds(conf, N_FOLDS, FOLD_SEED, DATA, logger)
trn_fold = [int(elem) for elem in conf.train_fold.split(',')]
total_epochs = conf.epochs * len(trn_fold)
oof_y_true = []
oof_y_preds = []
oof_df = pd.DataFrame()
submit_df_list = []
for fold in range(conf.n_fold):
if fold in trn_fold:
best_score, best_y_true, best_y_preds, _oof_df =\
train_loop(conf, hvd, train, fold, logger, log_basename, total_epochs,
args.new_train, args.new_test)
if args.new_train or args.new_test:
if args.ensemble_sigmoid:
_oof_df['preds'] = torch.tensor(_oof_df['preds'].values).sigmoid().numpy()
submit_df_list.append(_oof_df)
if args.new_test:
prefix = 'new_test'
elif args.new_train:
prefix = 'new_train'
elif args.test:
prefix = 'oof'
else:
prefix = 'oof'
_oof_df.to_csv(f"{OUTPUT_DIR}/{prefix}_fold{fold}.csv", index=False)
else:
oof_df = pd.concat([oof_df, _oof_df])
oof_y_true.append(best_y_true)
oof_y_preds.append(best_y_preds)
logger.info(f"fold{fold} Best Score: {best_score:.4f}")
total_epochs -= conf.epochs
if args.new_train or args.new_test:
sub_df = None
if not args.new_test:
for oof_df in submit_df_list:
if sub_df is not None:
sub_df['preds'] = sub_df['preds'] + oof_df['preds']
else:
sub_df = oof_df
score = roc_auc_score(sub_df.target.values, sub_df.preds.values)
logger.info(f"oof test score: {score}")
else:
for oof_df in submit_df_list:
if sub_df is not None:
sub_df['target'] = sub_df['target'] + oof_df['preds']
else:
oof_df = oof_df.drop('target', axis=1)
oof_df.columns = ['id', 'target']
sub_df = oof_df
if hvd.rank() == 0:
sub_df = sub_df.sort_values(by=['id']).reset_index(drop=True)
if args.new_train:
sub_df.to_csv(f"{OUTPUT_DIR}/new_train.csv", index=False)
if args.new_test:
sub_df.to_csv(f"{OUTPUT_DIR}/new_test.csv", index=False)
else:
if len(trn_fold) == N_FOLDS:
oof_y_true = np.concatenate(oof_y_true)
oof_y_preds = np.concatenate(oof_y_preds)
score = roc_auc_score(oof_y_true, oof_y_preds)
logger.info(f"oof score: {score}")
if hvd.rank() == 0:
oof_df = oof_df.sort_values(by=['id']).reset_index(drop=True)
oof_df.to_csv(f"{OUTPUT_DIR}/oof_df.csv", index=False)
logger.info(f"log saved: {os.path.join(conf.log_dir, log_filename)}")
|
<gh_stars>100-1000
from django.conf import settings
from django.conf.urls import url
from django.contrib import admin
from django.http import Http404, HttpResponseForbidden
from django.urls import include, path
from django.urls.converters import register_converter
from django.views.defaults import page_not_found, permission_denied, server_error
from django.views.generic import RedirectView
from exporter import views as exporter_views
from . import converters, views
register_converter(converters.UnicodeSlugConverter, "uslug")
register_converter(converters.ItemIdConverter, "item_id")
tx_urlpatterns = (
[
path("", views.CampaignListView.as_view(), name="campaign-list"),
path(
"<uslug:slug>/", views.CampaignDetailView.as_view(), name="campaign-detail"
),
path(
"<uslug:campaign_slug>/export/csv/",
exporter_views.ExportCampaignToCSV.as_view(),
name="campaign-export-csv",
),
path(
"<uslug:campaign_slug>/export/bagit/",
exporter_views.ExportCampaignToBagIt.as_view(),
name="campaign-export-bagit",
),
path(
"<uslug:campaign_slug>/<uslug:project_slug>/export/bagit/",
exporter_views.ExportProjectToBagIt.as_view(),
name="project-export-bagit",
),
path(
(
"<uslug:campaign_slug>/<uslug:project_slug>/"
"<item_id:item_id>/export/bagit/"
),
exporter_views.ExportItemToBagIt.as_view(),
name="item-export-bagit",
),
path(
"<uslug:campaign_slug>/report/",
views.ReportCampaignView.as_view(),
name="campaign-report",
),
path(
(
"<uslug:campaign_slug>/<uslug:project_slug>/"
"<item_id:item_id>/<uslug:slug>/"
),
views.AssetDetailView.as_view(),
name="asset-detail",
),
# n.b. this must be above project-detail to avoid being seen as a project slug:
path(
"<uslug:campaign_slug>/next-transcribable-asset/",
views.redirect_to_next_transcribable_campaign_asset,
name="redirect-to-next-transcribable-campaign-asset",
),
path(
"<uslug:campaign_slug>/next-reviewable-asset/",
views.redirect_to_next_reviewable_campaign_asset,
name="redirect-to-next-reviewable-campaign-asset",
),
path(
"<uslug:campaign_slug>/<uslug:slug>/",
views.ProjectDetailView.as_view(),
name="project-detail",
),
path(
"<uslug:campaign_slug>/<uslug:project_slug>/<item_id:item_id>/",
views.ItemDetailView.as_view(),
name="item-detail",
),
],
"transcriptions",
)
urlpatterns = [
path("", views.HomeView.as_view(), name="homepage"),
path("healthz", views.healthz, name="health-check"),
path("letter", views.AccountLetterView, name="user-letter"),
path("about/", views.simple_page, name="about"),
path("help-center/", views.simple_page, name="help-center"),
path("help-center/welcome-guide/", views.simple_page, name="welcome-guide"),
path("help-center/how-to-transcribe/", views.simple_page, name="how-to-transcribe"),
path("help-center/how-to-review/", views.simple_page, name="how-to-review"),
path("help-center/how-to-tag/", views.simple_page, name="how-to-tag"),
path(
"help-center/welcome-guide-esp/",
views.simple_page,
name="welcome-guide-spanish",
),
path(
"help-center/how-to-transcribe-esp/",
views.simple_page,
name="how-to-transcribe-spanish",
),
path(
"help-center/how-to-review-esp/",
views.simple_page,
name="how-to-review-spanish",
),
path("help-center/how-to-tag-esp/", views.simple_page, name="how-to-tag-spanish"),
path("for-educators/", views.simple_page, name="for-educators"),
path("for-staff/", views.simple_page, name="for-staff"),
path("resources/", views.simple_page, name="resources"),
path(
"latest/",
RedirectView.as_view(pattern_name="about", permanent=True, query_string=True),
),
path("questions/", views.simple_page, name="questions"),
path("contact/", views.ContactUsView.as_view(), name="contact"),
path("act/", views.action_app, name="action-app"),
path(
"campaigns-topics/",
views.CampaignTopicListView.as_view(),
name="campaign-topic-list",
),
path("topics/", views.TopicListView.as_view(), name="topic-list"),
path("topics/<uslug:slug>/", views.TopicDetailView.as_view(), name="topic-detail"),
path(
"topics/<uslug:topic_slug>/next-transcribable-asset/",
views.redirect_to_next_transcribable_topic_asset,
name="redirect-to-next-transcribable-topic-asset",
),
path(
"topics/<uslug:topic_slug>/next-reviewable-asset/",
views.redirect_to_next_reviewable_topic_asset,
name="redirect-to-next-reviewable-topic-asset",
),
path(
"next-transcribable-asset/",
views.redirect_to_next_transcribable_asset,
name="redirect-to-next-transcribable-asset",
),
path(
"next-reviewable-asset/",
views.redirect_to_next_reviewable_asset,
name="redirect-to-next-reviewable-asset",
),
path("campaigns/", include(tx_urlpatterns, namespace="transcriptions")),
path("reserve-asset/<int:asset_pk>/", views.reserve_asset, name="reserve-asset"),
path(
"assets/<int:asset_pk>/transcriptions/save/",
views.save_transcription,
name="save-transcription",
),
path(
"transcriptions/<int:pk>/submit/",
views.submit_transcription,
name="submit-transcription",
),
path(
"transcriptions/<int:pk>/review/",
views.review_transcription,
name="review-transcription",
),
path("assets/<int:asset_pk>/tags/submit/", views.submit_tags, name="submit-tags"),
path("assets/", views.AssetListView.as_view(), name="asset-list"),
path(
"transcribe/", views.TranscribeListView.as_view(), name="transcribe-asset-list"
),
path("review/", views.ReviewListView.as_view(), name="review-asset-list"),
path("account/ajax-status/", views.ajax_session_status, name="ajax-session-status"),
path("account/ajax-messages/", views.ajax_messages, name="ajax-messages"),
path(
"account/register/",
views.ConcordiaRegistrationView.as_view(),
name="registration_register",
),
path(
"account/login/", views.ConcordiaLoginView.as_view(), name="registration_login"
),
path("account/profile/", views.AccountProfileView.as_view(), name="user-profile"),
path(
"account/password_reset/",
views.ConcordiaPasswordResetRequestView.as_view(),
name="password_reset",
),
path(
"account/reset/<uidb64>/<token>/",
views.ConcordiaPasswordResetConfirmView.as_view(),
name="password_reset_confirm",
),
path("account/", include("django_registration.backends.activation.urls")),
path("account/", include("django.contrib.auth.urls")),
path(
".well-known/change-password", # https://wicg.github.io/change-password-url/
RedirectView.as_view(pattern_name="password_change"),
),
path("captcha/ajax/", views.ajax_captcha, name="ajax-captcha"),
path("captcha/", include("captcha.urls")),
path("admin/", admin.site.urls),
# Internal support assists:
path("error/500/", server_error),
path("error/404/", page_not_found, {"exception": Http404()}),
path("error/429/", views.ratelimit_view),
path("error/403/", permission_denied, {"exception": HttpResponseForbidden()}),
url("", include("django_prometheus_metrics.urls")),
path("robots.txt", include("robots.urls")),
]
if settings.DEBUG:
import debug_toolbar
from django.conf.urls.static import static
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
"""Tests for working with the rules database."""
import base64
import sqlite3
import tempfile
from unittest.mock import Mock, create_autospec, patch
import pytest
import urllib3
from pywemo.exceptions import HTTPException, RulesDbQueryError
from pywemo.ouimeaux_device.api import rules_db
from pywemo.ouimeaux_device.api.service import REQUESTS_TIMEOUT, Session
MOCK_NAME = "WemoDeviceName"
MOCK_UDN = "WemoDeviceUDN"
MOCK_TARGET_UDN = "WemoTargetUDN"
MOCK_RULE_TYPE = "RuleType"
@pytest.fixture()
def temp_file():
with tempfile.NamedTemporaryFile(
prefix="wemorules", suffix=".db"
) as temp_file:
yield temp_file
@pytest.fixture()
def sqldb(temp_file):
rules_db._create_empty_db(temp_file.name)
try:
conn = sqlite3.connect(temp_file.name)
conn.row_factory = sqlite3.Row
yield conn
finally:
conn.close()
def test_create_empty_db(sqldb):
statements = set(
line for line in sqldb.iterdump() if line.startswith('CREATE TABLE')
)
# flake8: noqa: E501 (long lines)
assert statements == set(
[
# https://github.com/pywemo/pywemo/issues/61#issuecomment-748693894
"CREATE TABLE RULES(RuleID PRIMARY KEY, Name TEXT NOT NULL, Type TEXT NOT NULL, RuleOrder INTEGER, StartDate TEXT, EndDate TEXT, State TEXT, Sync INTEGER);",
"CREATE TABLE RULEDEVICES(RuleDevicePK INTEGER PRIMARY KEY AUTOINCREMENT, RuleID INTEGER, DeviceID TEXT, GroupID INTEGER, DayID INTEGER, StartTime INTEGER, RuleDuration INTEGER, StartAction REAL, EndAction REAL, SensorDuration INTEGER, Type INTEGER, Value INTEGER, Level INTEGER, ZBCapabilityStart TEXT, ZBCapabilityEnd TEXT, OnModeOffset INTEGER, OffModeOffset INTEGER, CountdownTime INTEGER, EndTime INTEGER);",
"CREATE TABLE DEVICECOMBINATION(DeviceCombinationPK INTEGER PRIMARY KEY AUTOINCREMENT, RuleID INTEGER, SensorID TEXT, SensorGroupID INTEGER, DeviceID TEXT, DeviceGroupID INTEGER);",
"CREATE TABLE GROUPDEVICES(GroupDevicePK INTEGER PRIMARY KEY AUTOINCREMENT, GroupID INTEGER, DeviceID TEXT);",
"CREATE TABLE LOCATIONINFO(LocationPk INTEGER PRIMARY KEY AUTOINCREMENT, cityName TEXT, countryName TEXT, latitude TEXT, longitude TEXT, countryCode TEXT, region TEXT);",
"CREATE TABLE BLOCKEDRULES(Primarykey INTEGER PRIMARY KEY AUTOINCREMENT, ruleId TEXT);",
"CREATE TABLE RULESNOTIFYMESSAGE(RuleID INTEGER PRIMARY KEY AUTOINCREMENT, NotifyRuleID INTEGER, Message TEXT, Frequency INTEGER);",
"CREATE TABLE SENSORNOTIFICATION(SensorNotificationPK INTEGER PRIMARY KEY AUTOINCREMENT, RuleID INTEGER, NotifyRuleID INTEGER, NotificationMessage TEXT, NotificationDuration INTEGER);",
"CREATE TABLE TARGETDEVICES(TargetDevicesPK INTEGER PRIMARY KEY AUTOINCREMENT, RuleID INTEGER, DeviceID TEXT, DeviceIndex INTEGER);",
]
)
def test_pack_unpack_db(temp_file, sqldb):
orig_statements = set(
line for line in sqldb.iterdump() if line.startswith('CREATE TABLE')
)
packed = rules_db._pack_db(temp_file, "inner.db")
inner_name = rules_db._unpack_db(base64.b64decode(packed), temp_file)
assert inner_name == "inner.db"
conn = sqlite3.connect(temp_file.name)
try:
unpacked_statements = set(
line for line in conn.iterdump() if line.startswith('CREATE TABLE')
)
finally:
conn.close()
assert orig_statements == unpacked_statements
def test_auto_primary_key(sqldb):
"""Ensure the primary key for a row is updated when it is added to the db."""
cursor = sqldb.cursor()
row1 = rules_db.TargetDevicesRow(RuleID=12)
row2 = rules_db.TargetDevicesRow(RuleID=34)
row1.update_db(cursor)
row2.update_db(cursor)
assert row1.TargetDevicesPK + 1 == row2.TargetDevicesPK
def test_add_remove(sqldb):
db = rules_db.RulesDb(sqldb, MOCK_UDN, MOCK_NAME)
# Rules
assert len(db._rules) == 0
rule = db.add_rule(
rules_db.RulesRow(
RuleID=501,
Name="Long Press Rule",
Type=MOCK_RULE_TYPE,
State=1,
)
)
assert len(db._rules) == 1
db.remove_rule(rule)
assert len(db._rules) == 0
# RuleDevices
assert len(db._rule_devices) == 0
device = db.add_rule_devices(
rules_db.RuleDevicesRow(RuleDevicePK=1, RuleID=501, DeviceID=MOCK_UDN)
)
assert len(db._rule_devices) == 1
db.remove_rule_devices(device)
assert len(db._rule_devices) == 0
# TargetDevices
assert len(db._target_devices) == 0
target = db.add_target_devices(
rules_db.TargetDevicesRow(RuleID=501, DeviceID=MOCK_TARGET_UDN)
)
assert len(db._target_devices) == 1
db.remove_target_devices(target)
assert len(db._target_devices) == 0
def test_clear_all(sqldb):
db = rules_db.RulesDb(sqldb, MOCK_UDN, MOCK_NAME)
rule = db.add_rule(
rules_db.RulesRow(
RuleID=501,
Name="Long Press Rule",
Type=MOCK_RULE_TYPE,
State=1,
)
)
assert len(db._rules) == 1
# RuleDevices
assert len(db._rule_devices) == 0
device = db.add_rule_devices(
rules_db.RuleDevicesRow(RuleDevicePK=1, RuleID=501, DeviceID=MOCK_UDN)
)
assert len(db._rule_devices) == 1
# TargetDevices
assert len(db._target_devices) == 0
target = db.add_target_devices(
rules_db.TargetDevicesRow(RuleID=501, DeviceID=MOCK_TARGET_UDN)
)
assert len(db._target_devices) == 1
db.clear_all()
assert len(db._rules) == 0
assert len(db._rule_devices) == 0
assert len(db._target_devices) == 0
def test_update_if_modified_field_changed(sqldb):
cursor = sqldb.cursor()
rules_db.RulesRow(
RuleID=501,
Name="Long Press Rule",
Type=MOCK_RULE_TYPE,
State=1,
).update_db(cursor)
rules_db.RuleDevicesRow(
RuleDevicePK=1, RuleID=501, DeviceID=MOCK_UDN
).update_db(cursor)
db = rules_db.RulesDb(sqldb, MOCK_UDN, MOCK_NAME)
rule, device = db.rules_for_device()[0]
assert db.update_if_modified() is False
# Modifying an entry in the db should cause update_if_modified() to be True
rule.State = 0
assert db.update_if_modified() is True
def test_update_if_modified_new_entry(sqldb):
rule = rules_db.RulesRow(RuleID=501)
db = rules_db.RulesDb(sqldb, MOCK_UDN, MOCK_NAME)
assert db.update_if_modified() is False
# Adding a new entry in the db should cause update_if_modified() to be True
db.add_target_device_to_rule(rule, MOCK_TARGET_UDN)
assert db.update_if_modified() is True
def test_add_remove_target_device_to_rule(sqldb):
rule = rules_db.RulesRow(RuleID=501)
db = rules_db.RulesDb(sqldb, MOCK_UDN, MOCK_NAME)
assert MOCK_TARGET_UDN not in db.get_target_devices_for_rule(rule)
db.add_target_device_to_rule(rule, MOCK_TARGET_UDN)
assert MOCK_TARGET_UDN in db.get_target_devices_for_rule(rule)
db.remove_target_device_from_rule(rule, MOCK_TARGET_UDN)
assert MOCK_TARGET_UDN not in db.get_target_devices_for_rule(rule)
def test_get_target_devices_for_rule(sqldb):
cursor = sqldb.cursor()
rule = rules_db.RulesRow(RuleID=501)
rules_db.TargetDevicesRow(
RuleID=rule.RuleID,
DeviceID=MOCK_TARGET_UDN,
).update_db(cursor)
db = rules_db.RulesDb(sqldb, MOCK_UDN, MOCK_NAME)
assert db.get_target_devices_for_rule(rule) == frozenset([MOCK_TARGET_UDN])
def test_entry_with_no_primary_key(sqldb):
# Create a RULEDEVICES table that allows NULLS for RuleDevicePK
# From https://github.com/pywemo/pywemo/issues/276
sqldb.cursor().execute("DROP TABLE RULEDEVICES")
sqldb.cursor().execute(
"""CREATE TABLE RULEDEVICES (RuleDevicePK UNIQUE, RuleID INTEGER, DeviceID, GroupID, DayID INTEGER, StartTime,RuleDuration, StartAction INTEGER, EndAction INTEGER, SensorDuration,Type,Value,Level,ZBCapabilityStart TEXT DEFAULT "", ZBCapabilityEnd TEXT DEFAULT "", OnModeOffset INTEGER DEFAULT 0,OffModeOffset INTEGER DEFAULT 0,CountdownTime INTEGER DEFAULT 0,EndTime INTEGER DEFAULT 0, ProductName TEXT DEFAULT "")"""
)
sqldb.cursor().execute(
"INSERT INTO RULEDEVICES VALUES(NULL,22,'uuid:Lightswitch-1_0','0',1,'60','86280',0,0,'0','-1','-1','-1','-1','-1',0,0,1800,86340,'')"
)
# Should not cause an exception.
db = rules_db.RulesDb(sqldb, MOCK_UDN, MOCK_NAME)
# Should not be indexed either.
assert len(db.rule_devices) == 0
def test_rules_db_from_device(temp_file, sqldb):
rules_db.RulesRow(RuleID=501, Name="", Type="").update_db(sqldb.cursor())
sqldb.commit()
sqldb.close()
zip_content = base64.b64decode(rules_db._pack_db(temp_file, "inner.db"))
mock_response = create_autospec(urllib3.HTTPResponse, instance=True)
mock_response.status = 200
mock_response.data = zip_content
store_rules = []
class Device:
name = MOCK_NAME
udn = MOCK_UDN
session = Session("http://localhost/")
class rules:
@staticmethod
def FetchRules():
return {
"ruleDbVersion": "1",
"ruleDbPath": "http://localhost/rules.db",
}
@staticmethod
def StoreRules(**kwargs):
store_rules.append(kwargs)
with patch(
"urllib3.PoolManager.request", return_value=mock_response
) as mock_request:
with rules_db.rules_db_from_device(Device) as db:
mock_request.assert_called_once_with(
method="GET", url="http://localhost/rules.db"
)
# Make a modification to trigger StoreRules.
assert len(db._rules) == 1
db._rules[501].State = 1
assert len(store_rules) == 1
assert store_rules[0]["ruleDbVersion"] == 2
assert len(store_rules[0]["ruleDbBody"]) > 1000
def test_rules_db_from_device_404():
mock_response = create_autospec(urllib3.HTTPResponse, instance=True)
mock_response.status = 404
class Device:
name = MOCK_NAME
udn = MOCK_UDN
session = Session("http://localhost/")
class rules:
@staticmethod
def FetchRules():
return {
"ruleDbVersion": "1",
"ruleDbPath": "http://localhost/rules.db",
}
completed_with_no_exceptions = False
with patch(
"urllib3.PoolManager.request", return_value=mock_response
) as mock_request:
with rules_db.rules_db_from_device(Device) as db:
mock_request.assert_called_once_with(
method="GET", url="http://localhost/rules.db"
)
assert len(db.rules) == 0
completed_with_no_exceptions = True
assert completed_with_no_exceptions
def test_rules_db_from_device_raises_http_exception():
device = Mock()
device.session = Session("http://localhost/")
device.rules = Mock()
device.rules.FetchRules.return_value = {
'ruleDbVersion': 1,
'ruleDbPath': 'http://localhost/',
}
with patch(
'urllib3.PoolManager.request', side_effect=urllib3.exceptions.HTTPError
):
with pytest.raises(HTTPException):
with rules_db.rules_db_from_device(device):
pass
def test_sqlite_errors_raised():
mock_response = create_autospec(urllib3.HTTPResponse, instance=True)
mock_response.status = 404
class Device:
name = MOCK_NAME
udn = MOCK_UDN
session = Session("http://localhost/")
class rules:
@staticmethod
def FetchRules():
return {
"ruleDbVersion": "1",
"ruleDbPath": "http://localhost/rules.db",
}
with patch(
"urllib3.PoolManager.request", return_value=mock_response
) as mock_request:
with pytest.raises(RulesDbQueryError):
with rules_db.rules_db_from_device(Device) as db:
raise sqlite3.OperationalError("test")
|
<gh_stars>10-100
# # Copyright (c) ShanghaiTech PLUS Lab. All Rights Reserved.
# import bisect
# import copy
import logging
# import torch.utils.data
from plusseg.utils.comm import get_world_size
from plusseg.utils.imports import import_file
# from . import datasets as D
from . import samplers
# from .transforms import build_transforms
from .base_dataset import BaseDataset
from .datasets import PascalContextSegDataset
datasets = {
"coco": None,
"pcontext": PascalContextSegDataset
}
def build_dataset(cfg):
"""
Arguments:
cfg: (dict) configureation parameters
"""
dataset_name = cfg.DATASET.NAME
return datasets[dataset_name.lower()](cfg)
def make_data_loader(cfg, is_train=True, is_distributed=False):
num_gpus = get_world_size()
if is_train:
imgs_per_batch = cfg.SOLVER.IMS_PER_BATCH
assert (
imgs_per_batch % num_gpus == 0
), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of GPUs ({}) used.".format(
imgs_per_batch, num_gpus)
imgs_per_gpu = imgs_per_batch // num_gpus
shuffle = True
# num_iters = cfg.SOLVER.MAX_ITER
else:
imgs_per_batch = cfg.TEST.IMS_PER_BATCH
assert (
imgs_per_batch % num_gpus == 0
), "TEST.IMS_PER_BATCH ({}) must be divisible by the number of GPUs ({}) used.".format(
imgs_per_batch, num_gpus)
imgs_per_gpu = imgs_per_batch // num_gpus
shuffle = False if not is_distributed else True
if imgs_per_gpu > 1:
logger = logging.getLogger(__name__)
logger.warning(
"When using more than one image per GPU you may encounter "
"an out-of-memory (OOM) error if your GPU does not have "
"sufficient memory. If this happens, you can reduce "
"SOLVER.IMS_PER_BATCH (for training) or "
"TEST.IMS_PER_BATCH (for inference). For training, you must "
"also adjust the learning rate and schedule length according "
"to the linear scaling rule. See for example: "
"https://github.com/facebookresearch/Detectron/blob/master/configs/getting_started/tutorial_1gpu_e2e_faster_rcnn_R-50-FPN.yaml#L14"
)
paths_catalog = import_file(
"plusseg.config.paths_catalog",
cfg.PATHS_CATALOG, True
)
DatasetCatalog = paths_catalog.DatasetCatalog
dataset_list = cfg.DATASETS.TRAIN if is_train else cfg.DATASETS.TEST
# import pdb; pdb.set_trace()
# transforms = None if not is_train else build_transforms(cfg, is_train)
# datasets = build_dataset(dataset_list, transforms, DatasetCatalog, is_train)
datasets = build_dataset(dataset_list, DatasetCatalog, is_train)
data_loaders = []
for dataset in datasets:
sampler = make_data_sampler(dataset, shuffle, is_distributed)
num_workers = cfg.DATALOADER.NUM_WORKERS
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
sampler=sampler
)
data_loaders.append(data_loader)
if is_train:
assert len(data_loaders) == 1
return data_loaders[0]
return data_loaders
def make_data_sampler(dataset, shuffle, distributed):
if distributed:
return samplers.DistributedSampler(dataset, shuffle=shuffle)
if shuffle:
sampler = torch.utils.data.sampler.RandomSampler(dataset)
else:
sampler = torch.utils.data.sampler.SequentialSampler(dataset)
return sampler
# def make_batch_data_sampler(dataset, sampler, images_per_batch):
# batch_sampler = torch.utils.data.sampler.BatchSampler(
# sampler, images_per_batch, drop_last=False
# ) |
#PYTHON MODULE: ISSUE
import mysql.connector
from mysql.connector import errorcode
from datetime import date
from mysql.connector import (connection)
import os
def clrscreen():
print('\n' * 5)
def SearchIssuedBooks():
try:
os.system('cls')
cnx = mysql.connector.connect(user='root', password='<PASSWORD>', host='localhost', database='Library')
Cursor = cnx.cursor()
mno = input("Enter Member No to search issued book : ")
query = ("SELECT * FROM issue where mno = %s")
rec_srch = (mno,)
Cursor.execute(query, rec_srch)
Rec_count = 0
for (Bno,Mno,d_o_issue,d_o_ret) in Cursor:
Rec_count += 1
print("=============================================================")
print("1.Book Code : ", Bno)
print("2.Member Code : ", Mno)
print("3.Date of Issue : ", d_o_issue)
print("4.Date of Return : ", d_o_ret)
print("=============================================================")
if Rec_count%2 == 0:
input("Press any key continue")
clrscreen()
print(Rec_count, "Record(s) found")
Cursor.close()
cnx.close()
print("You have done it!")
except mysql.connector.ERROR as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cnx.close()
def issueBook():
try:
cnx = mysql.connector.connect(user='root', password='<PASSWORD>', host='localhost', database='Library')
Cursor = cnx.cursor()
bno = input("Enter Book Code to issue : ")
mno = input("Enter Member Code : ")
print("Enter Date Issue (Date/Month and Year separately) : ")
DD = int(input("Enter Date : "))
MM = int(input("Enter Month : "))
YY = int(input("Enter Year : "))
Qry = ("INSERT INTO issue (bno,mno,d_o_issue) VALUES(%s, %s, %s) ")
data = (bno,mno,date(YY,MM,DD))
Cursor.execute(Qry,data)
cnx.commit()
Cursor.close()
cnx.close()
print("Recorded Inserted.")
except mysql.connector.ERROR as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
cnx.close()
def returnBook():
try:
cnx = mysql.connector.connect(user='root', password='<PASSWORD>', host='localhost', database='Library')
Cursor = cnx.cursor()
bno = input("Enter Book Code of the Book to be returned to the Library : ")
Mno = input("Enter Member Code of Member who is returning Book : ")
retDate = date.today()
Qry = ("""Update Issue set d_o_ret = %s WHERE BNO = %s and Mno = %s""")
rec = (retDate, bno, Mno)
Cursor.execute(Qry, rec)
cnx.commit()
Cursor.close()
cnx.close()
print(Cursor.rowcount, "Record(s) Deleted Successfully.")
except mysql.connector.ERROR as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
cnx.close()
|
# Author: <NAME>, <NAME>, <NAME>
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from collections.abc import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from .validation import check_array, _assert_all_finite
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(
check_array(y, accept_sparse=['csr', 'csc', 'coo']).shape[1]
)
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == {"binary", "multiclass"}:
ys_types = {"multiclass"}
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y,
accept_sparse=['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, str) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__') or isinstance(y, Sequence):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
raise ValueError("Unknown label type: %r" % y_type)
def type_of_target(y):
"""Determine the type of data indicated by the target.
Note that this type is the most specific type that can be inferred.
For example:
* ``binary`` is more specific but compatible with ``multiclass``.
* ``multiclass`` of integers is more specific but compatible with
``continuous``.
* ``multilabel-indicator`` is more specific but compatible with
``multiclass-multioutput``.
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multilabel-indicator'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, str))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
sparse_pandas = (y.__class__.__name__ in ['SparseSeries', 'SparseArray'])
if sparse_pandas:
raise ValueError("y cannot be class 'SparseSeries' or 'SparseArray'")
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], str)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead - the MultiLabelBinarizer'
' transformer can convert to this format.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], str)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
_assert_all_finite(y)
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integers of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = sample_weight[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = np.bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = np.bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
def _ovr_decision_function(predictions, confidences, n_classes):
"""Compute a continuous, tie-breaking OvR decision function from OvO.
It is important to include a continuous value, not only votes,
to make computing AUC or calibration meaningful.
Parameters
----------
predictions : array-like, shape (n_samples, n_classifiers)
Predicted classes for each binary classifier.
confidences : array-like, shape (n_samples, n_classifiers)
Decision functions or predicted probabilities for positive class
for each binary classifier.
n_classes : int
Number of classes. n_classifiers must be
``n_classes * (n_classes - 1 ) / 2``
"""
n_samples = predictions.shape[0]
votes = np.zeros((n_samples, n_classes))
sum_of_confidences = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
sum_of_confidences[:, i] -= confidences[:, k]
sum_of_confidences[:, j] += confidences[:, k]
votes[predictions[:, k] == 0, i] += 1
votes[predictions[:, k] == 1, j] += 1
k += 1
# Monotonically transform the sum_of_confidences to (-1/3, 1/3)
# and add it with votes. The monotonic transformation is
# f: x -> x / (3 * (|x| + 1)), it uses 1/3 instead of 1/2
# to ensure that we won't reach the limits and change vote order.
# The motivation is to use confidence levels as a way to break ties in
# the votes without switching any decision made based on a difference
# of 1 vote.
transformed_confidences = (sum_of_confidences /
(3 * (np.abs(sum_of_confidences) + 1)))
return votes + transformed_confidences
|
#!/usr/bin/env python3
import signal
import gpiozero
from time import sleep
from subprocess import run, PIPE
from urllib.request import urlopen
from urllib.error import URLError
from json import load
from json.decoder import JSONDecodeError
from display import Display
STOPPED = 0
STARTING = 1
INITIALISING = 2
RUNNING = 3
ERROR = 4
RED = (1, 0, 0)
AMBER = (1, 0.1, 0)
YELLOW = (1, 0.24, 0)
GREEN = (0, 1, 0)
BLUE = (0, 0, 1)
PURPLE = (1, 0, 1)
BLACK = (0, 0, 0)
led = gpiozero.RGBLED(22, 27, 17)
state = STOPPED
delay = 1 / 5 #Hz
display = Display()
def get_throttle_count():
netstat = run(['/bin/netstat', '-t', '--numeric-ports'], stdout=PIPE)
c = 0
if netstat.stdout:
for l in netstat.stdout.splitlines():
if b':12090' in l and l.endswith(b'ESTABLISHED'):
c += 1
return c
def get_jmri_state():
r = run(['systemctl', 'show', 'jmri.service'], stdout=PIPE)
if r.stdout:
state = ERROR
status = r.stdout.splitlines()
status = { j[0]: j[1] for j in [ i.split(b'=') for i in status ] }
if status[b'ActiveState'] == b'active':
state = STARTING
if status[b'SubState'] == b'running':
state = INITIALISING
try:
with urlopen('http://localhost:12080/json/metadata') as u:
load(u)
state = RUNNING
except JSONDecodeError:
state = ERROR
except ConnectionRefusedError:
pass
except URLError:
pass
elif status[b'ActiveState'] == b'activating':
state = STARTING
elif status[b'ActiveState'] == b'inactive':
state = STOPPED
if status[b'SubState'] == b'failed':
state = ERROR
else:
state = ERROR
return state
def get_log_line(first_error=False):
session_log = []
try:
with open('/home/pi/.jmri/log/session.log') as f:
session_log = f.readlines()
except FileNotFoundError:
return 'Session log not found.'
# Remove lines which are parts of stack traces
session_log = [ l.strip() for l in session_log if l.startswith('20') ]
if session_log:
session_errors = [ l for l in session_log if 'ERROR' in l ]
session_tail = session_log[-1]
if first_error:
if session_errors:
return session_errors[0][62:]
return 'No errors in session log'
return session_tail[62:]
return 'Session log empty.'
SIG_LUT = list(range(1, 64))
SIG_LUT[2] = 'Monitor interrupted by Keyboard.'
SIG_LUT[15] = 'The system is shutting down.'
RUN = True
def signal_handler(i, _):
global RUN
display.write_lines(['Please Wait', SIG_LUT[i]])
display.dim()
RUN = False
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
while RUN:
state = get_jmri_state()
throttles = ''
if state == STARTING:
led.color = AMBER
delay = 1 / 4 #Hz
state_text = 'JMRI Starting'
elif state == INITIALISING:
led.color = YELLOW
delay = 1 / 6 #Hz
state_text = 'Initialising'
elif state == RUNNING:
led.color = GREEN
delay = 5
state_text = 'JMRI Running'
tc = get_throttle_count()
if tc > 1:
throttles = '%d active throttles' % tc
elif tc == 1:
throttles = '%d active throttle' % tc
elif state == STOPPED:
led.color = RED
delay = 1 / 2 #Hz
state_text = 'JMRI Stopped'
elif state == ERROR:
led.color = PURPLE
delay = 1 / 8 #Hz
state_text = 'Error'
else:
led.color = PURPLE
delay = 1 / 16 #Hz
state_text = 'Unknown Error'
log_line = '…'
if state != STARTING:
log_line = get_log_line(state == ERROR)
display.write_lines([state_text, throttles, log_line])
sleep(delay)
if state == RUNNING:
delay = 0.02
led.color = BLACK
sleep(delay)
|
<filename>TweetParser.py<gh_stars>0
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.svm import LinearSVC
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.multiclass import OneVsRestClassifier
import sys
import json
import os.path
from pprint import pprint
class TweetParser:
def __init__(self, hashtag):
self.hashtag = hashtag
try:
with open('.linenum', 'r') as f:
lines = int(f.readline())
except:
lines = 0
filepath = 'new_tweets/' + hashtag
self.f = open(filepath, 'r')
self.f.seek(lines)
self.target_names = ["Hostile", "Nice", "Happy", "Sad", "Angry"]
self.tweets = {}
self.classifier = None
def getNextTweet(self):
t = self.f.readline()
if t != '':
tweet = {'text': t, 'id': self.f.tell()}
self.tweets[tweet['id']] = tweet
with open('.linenum', 'w') as f:
f.write(str(self.f.tell()))
return tweet
""" old json stuff
tweet = json.loads(t)
self.tweets[tweet['id']] = tweet
with open('.linenum', 'w') as f:
f.write(str(self.f.tell()))
return tweet
"""
else:
return None
def add_tags(self, id, tags):
if type(tags) is list:
self.tweets[id]['tags'] = tags
with open('tags/' + self.hashtag + '.json', 'a') as f:
f.write(json.dumps(self.tweets[id]) + '\n')
return True
else:
return False
def get_tagged_tweets(self):
with open('tags/' + self.hashtag + '.json', 'r') as f:
tweets = []
t = f.readline()
while t and t != '':
if t == "\n":
t = f.readline()
continue
tweet = json.loads(t)
tweets.append(tweet)
t = f.readline()
if len(tweets) > 0:
return tweets
else:
return None
def predict(self, tweets):
if self.classifier != None:
predicted = self.classifier.predict(tweets)
return predicted
else:
return None
def __get_categories(self):
categories = []
for i, cat in enumerate(self.target_names):
print(str(i) + ': ' + cat)
user_input = input()
for cat in user_input.split():
if int(cat) >= 0 and int(cat) < len(self.target_names):
categories.append(int(cat))
return categories
def train(self, tag_tweets=False):
trainArr = []
y_train = []
if tag_tweets:
tweet = self.getNextTweet()
print('Enter the numbers, separated by space, corresponding to the' +
'categories you think that the tweet belongs to.\n')
#User Feedback-loop
i = 0
while tweet and i < 10:
print(tweet['text'] + '\n')
y_train.append(self.__get_categories())
trainArr.append(tweet['text'])
i+=1
tweet = self.getNextTweet()
else:
tweets = self.get_tagged_tweets()
tweets = tweets[0:300]
for tweet in tweets:
if 'tags' in tweet.keys():
trainArr.append(tweet['text'])
y_train.append(tweet['tags'])
if len(y_train) == 0 or len(trainArr) == 0 or len([item for sublist in y_train for item in sublist]) == 0:
return False
x_train = np.array(trainArr)
classifier = Pipeline([
('vectorizer', CountVectorizer(ngram_range=(1,2))),
('tfidf', TfidfTransformer()),
('clf', OneVsRestClassifier(LinearSVC()))
])
classifier.fit(x_train, y_train)
self.classifier = classifier
return True
def test(self):
if self.classifier == None:
return False
tweets = self.get_tagged_tweets()
tweets = tweets[300:]
x_test = []
for tweet in tweets:
x_test.append(tweet['text'])
predicted = self.predict(x_test)
return predicted, tweets
if __name__ == '__main__':
# Define some parameters
hashtag = 'NetNeutrality'
if(len(sys.argv) == 2):
hashtag = sys.argv[1]
t = TweetParser(hashtag)
t.train()
t.test()
|
<reponame>fno2010/rucio
# -*- coding: utf-8 -*-
# Copyright CERN since 2015
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' replicas table PK definition is in wrong order '''
from alembic import context
from alembic.op import create_primary_key, drop_constraint, create_foreign_key, drop_index
# revision identifiers used by alembic
revision = '3345511706b8'
down_revision = '01eaf73ab656'
def upgrade():
'''
Upgrade the database to this revision
'''
if context.get_context().dialect.name in ['oracle', 'postgresql']:
drop_constraint('SOURCES_REPLICA_FK', 'sources', type_='foreignkey')
drop_constraint('REPLICAS_PK', 'replicas', type_='primary')
create_primary_key('REPLICAS_PK', 'replicas', ['scope', 'name', 'rse_id'])
create_foreign_key('SOURCES_REPLICA_FK', 'sources', 'replicas', ['scope', 'name', 'rse_id'], ['scope', 'name', 'rse_id'])
elif context.get_context().dialect.name == 'mysql':
drop_constraint('SOURCES_REPLICA_FK', 'sources', type_='foreignkey')
# The constraint has an internal index which is not automatically dropped,
# we have to do that manually
drop_index('SOURCES_REPLICA_FK', 'sources')
drop_constraint(constraint_name='REPLICAS_LFN_FK', table_name='replicas', type_='foreignkey')
drop_constraint(constraint_name='REPLICAS_RSE_ID_FK', table_name='replicas', type_='foreignkey')
drop_constraint('REPLICAS_PK', 'replicas', type_='primary')
create_foreign_key('REPLICAS_LFN_FK', 'replicas', 'dids', ['scope', 'name'], ['scope', 'name'])
create_foreign_key('REPLICAS_RSE_ID_FK', 'replicas', 'rses', ['rse_id'], ['id'])
create_primary_key('REPLICAS_PK', 'replicas', ['scope', 'name', 'rse_id'])
create_foreign_key('SOURCES_REPLICA_FK', 'sources', 'replicas', ['scope', 'name', 'rse_id'], ['scope', 'name', 'rse_id'])
def downgrade():
'''
Downgrade the database to the previous revision
'''
if context.get_context().dialect.name in ['oracle', 'postgresql']:
drop_constraint(constraint_name='SOURCES_REPLICA_FK', table_name='sources', type_='foreignkey')
drop_constraint(constraint_name='REPLICAS_PK', table_name='replicas', type_='primary')
create_primary_key('REPLICAS_PK', 'replicas', ['rse_id', 'scope', 'name'])
create_foreign_key('SOURCES_REPLICA_FK', 'sources', 'replicas', ['rse_id', 'scope', 'name'], ['rse_id', 'scope', 'name'])
elif context.get_context().dialect.name == 'mysql':
drop_constraint(constraint_name='SOURCES_REPLICA_FK', table_name='sources', type_='foreignkey')
# The constraint has an internal index which is not automatically dropped,
# we have to do that manually
drop_index('SOURCES_REPLICA_FK', 'sources')
drop_constraint(constraint_name='REPLICAS_LFN_FK', table_name='replicas', type_='foreignkey')
drop_constraint(constraint_name='REPLICAS_RSE_ID_FK', table_name='replicas', type_='foreignkey')
drop_constraint(constraint_name='REPLICAS_PK', table_name='replicas', type_='primary')
create_foreign_key('REPLICAS_LFN_FK', 'replicas', 'dids', ['scope', 'name'], ['scope', 'name'])
create_foreign_key('REPLICAS_RSE_ID_FK', 'replicas', 'rses', ['rse_id'], ['id'])
create_primary_key('REPLICAS_PK', 'replicas', ['rse_id', 'scope', 'name'])
create_foreign_key('SOURCES_REPLICA_FK', 'sources', 'replicas', ['rse_id', 'scope', 'name'], ['rse_id', 'scope', 'name'])
|
import datetime
import random
from flask import request, render_template, current_app, url_for
from flask_login import login_user, current_user, logout_user, login_required
from sqlalchemy import text
from werkzeug.security import check_password_hash, generate_password_hash
from werkzeug.utils import redirect
from modules import login_manager, db
from modules.ctrla import Database
from modules.models import Folder, Task, User
database = Database()
@login_manager.user_loader
def load_user(id_) -> User:
_: User = database.get(User, id_)
return _
@current_app.route("/")
def index():
order_by = request.args.get("order_by", default="date_created desc")
_ = current_user.folders.order_by(text(order_by))
return render_template("index.html", folders_=_, order_by=order_by)
@current_app.route("/login", methods=["POST"])
def login():
email = request.form["email"]
password = request.form["password"]
user = db.session.query(User).filter(User.email == email).first()
if user and check_password_hash(user.password, password):
login_user(user)
return redirect(url_for("index"))
else:
return "Login failed."
@current_app.route("/logout")
def logout():
logout_user()
return redirect(url_for("index"))
@current_app.route("/signup", methods=["POST"])
def signup():
database.create(User(first_name=request.form["first_name"],
last_name=request.form["last_name"],
email=request.form["email"],
password=generate_password_hash(request.form["password"]),
date_joined=datetime.datetime.now()))
return redirect(url_for("index"))
@current_app.route("/folder")
@login_required
def folder():
_: Folder = database.get(Folder, request.args.get("id_"))
return render_template("folder.html", folder=_)
@current_app.route("/folder_create", methods=["POST"])
@login_required
def folder_create():
database.create(Folder(name=request.form["name"],
color="#{:06x}".format(random.randint(0, 0xFFFFFF)),
date_created=datetime.datetime.now(),
user=current_user.id))
return redirect(request.referrer)
@current_app.route("/folder_edit", methods=["POST"])
@login_required
def folder_edit():
_: Folder = database.get(Folder, int(request.form["id_"]))
_.name = request.form["name"]
_.color = request.form["color"]
database.update()
return redirect(request.referrer)
@current_app.route("/folder_delete")
@login_required
def folder_delete():
_: Folder = database.get(Folder, request.args.get("id_"))
for i in _.tasks:
database.delete(i)
database.delete(_)
return redirect(url_for("index"))
@current_app.route("/tasks")
@login_required
def tasks_():
order_by = request.args.get("order_by", default="tasks.date_created desc")
_ = current_user.tasks.join(Folder).order_by(text(order_by))
return render_template("tasks.html", tasks_=_, order_by=order_by)
@current_app.route("/task")
@login_required
def task():
_: Task = database.get(Task, request.args.get("id_"))
return render_template("task.html", task=_)
@current_app.route("/task_create", methods=["POST"])
@login_required
def task_create():
database.create(Task(name=request.form["name"],
folder=int(request.form["id_"]),
date_created=datetime.datetime.now(),
user=current_user.id))
return redirect(request.referrer)
@current_app.route("/subtask_create", methods=["POST"])
@login_required
def subtask_create():
_: Task = database.get(Task, int(request.form["id_"]))
database.create(Task(name=request.form["name"],
folder=_.folder,
date_created=datetime.datetime.now(),
parent_task=_.id,
user=current_user.id))
return redirect(request.referrer)
@current_app.route("/task_edit", methods=["POST"])
@login_required
def task_edit():
_: Task = database.get(Task, int(request.form["id_"]))
_.name = request.form["name"]
_.note = request.form["note"]
_.folder = int(request.form["folder"])
_.reminder = request.form.get("reminder") is not None
_.date_due = request.form["date_due"] if _.reminder else None
database.update()
return redirect(request.referrer)
@current_app.route("/task_delete")
@login_required
def task_delete():
_: Task = database.get(Task, request.args.get("id_"))
database.delete(_)
return redirect(request.referrer)
@current_app.route("/task_toggle")
@login_required
def task_toggle():
_: Task = database.get(Task, request.args.get("id_"))
_.done = not _.done
database.update()
return redirect(request.referrer)
|
"""root-editorconfig-required hook tests."""
import contextlib
import io
import os
import tempfile
import pytest
from hooks.root_editorconfig_required import check_root_editorconfig
@pytest.mark.parametrize(
"quiet",
(True, False),
ids=("quiet=True", "quiet=False"),
)
@pytest.mark.parametrize(
"create_file",
(True, False),
ids=("create_file=True", "create_file=False"),
)
@pytest.mark.parametrize(
("input_content", "expected_stderr", "expected_exitcode"),
(
pytest.param(
"""# EditorConfig is awesome: https://EditorConfig.org
root = true
[*]
end_of_line = lf
insert_final_newline = true
[*.{js,py}]
charset = utf-8
""",
None,
0,
id="root=true",
),
pytest.param(
"""root = false
[*]
end_of_line = lf
insert_final_newline = true
[*.{js,py}]
charset = utf-8
""",
(
"Found 'root = false' in .editorconfig when expected to"
" find 'root = true'.\n"
),
1,
id="root=false",
),
pytest.param(
"""root = invalid
[*]
end_of_line = lf
insert_final_newline = true
[*.{js,py}]
charset = utf-8
""",
(
"Invalid 'root' directive value 'invalid' at"
" '.editorconfig:1'. Possible values are 'true' and 'false'.\n"
),
1,
id="root=invalid",
),
pytest.param(
"""root = true
root = false
[*]
end_of_line = lf
insert_final_newline = true
[*.{js,py}]
charset = utf-8
""",
(
"Found 'root = false' in .editorconfig when expected to"
" find 'root = true'.\n"
),
1,
id="root=true-root=false",
),
pytest.param(
"""root = true
root = true
[*]
end_of_line = lf
insert_final_newline = true
[*.{js,py}]
charset = utf-8
""",
"Found multiple definitions of 'root = true' in .editorconfig\n",
1,
id="root=true-root=true",
),
pytest.param(
"""[*]
root = true
end_of_line = lf
insert_final_newline = true
[*.{js,py}]
charset = utf-8
""",
"Directive 'root = true' not found before section headers.\n",
1,
id="[*]root=true",
),
),
)
def test_root_editorconfig_required(
input_content,
expected_stderr,
expected_exitcode,
create_file,
quiet,
):
if expected_stderr is None:
expected_stderr = ""
previous_cwd = os.getcwd()
with tempfile.TemporaryDirectory() as dirpath:
try:
os.chdir(dirpath)
if create_file:
with open(os.path.join(dirpath, ".editorconfig"), "w") as f:
f.write(input_content)
else:
expected_exitcode = 1
expected_stderr = "Missing '.editorconfig' file\n"
if quiet:
expected_stderr = ""
stderr = io.StringIO()
with contextlib.redirect_stderr(stderr):
exitcode = check_root_editorconfig(quiet=quiet)
assert exitcode == expected_exitcode
assert stderr.getvalue() == expected_stderr
finally:
os.chdir(previous_cwd)
|
"""
New schemas for behavioral training criteria
<NAME>, CSHL, 2019
"""
import pandas as pd
import numpy as np
import sys, os, time
import matplotlib.pyplot as plt
import datajoint as dj
from IPython import embed as shell # for debugging
import datetime
# import wrappers etc
from ibl_pipeline import reference, subject, action, acquisition, data, behavior
from ibl_pipeline.analyses import behavior as behavioral_analyses
from dj_tools import *
from ibl_pipeline.analyses import analysis_utils as utils
# https://int-brain-lab.slack.com/archives/CB13FQFK4/p1561595587061400
behavior_bpod = dj.create_virtual_module('behavior', 'ibl_behavior')
# =========================================================
# https://github.com/anne-urai/IBL-pipeline/blob/master/ibl_pipeline/analyses/behavior.py#L195
# =========================================================
def compute_reaction_time(trials):
# median reaction time
trials_rt = trials.proj(
signed_contrast='trial_stim_contrast_left- \
trial_stim_contrast_right',
rt='trial_response_time-trial_stim_on_time')
rt = trials_rt.fetch(as_dict=True)
rt = pd.DataFrame(rt)
rt = rt[['signed_contrast', 'rt']]
try:
median_rt = rt.groupby('signed_contrast').median().reset_index()
except:
median_rt = rt.groupby('signed_contrast').count().reset_index()
median_rt['rt'] = np.nan
return median_rt
# schema = dj.schema('group_shared_anneurai_analyses')
schema = dj.schema('user_anneurai_analyses')
print('defining table')
# =========================================================
# DEFINE THE SCHEMA
# =========================================================
@schema
class TrainingStatus(dj.Lookup):
definition = """
training_status: varchar(32)
"""
contents = zip(['untrainable',
'unbiasable',
'in_training',
'trained_1a',
'trained_1b',
'ready4ephysrig',
'ready4recording'])
@schema
class SessionTrainingStatus(dj.Computed):
definition = """
-> behavioral_analyses.PsychResults
---
-> TrainingStatus
"""
def make(self, key):
subject_key = key.copy()
subject_key.pop('session_start_time')
previous_sessions = SessionTrainingStatus & subject_key & \
'session_start_time < "{}"'.format(
key['session_start_time'].strftime('%Y-%m-%d %H:%M:%S')
)
status = previous_sessions.fetch('training_status')
# ========================================================= #
# is the animal ready to be recorded?
# ========================================================= #
# if the previous status was 'ready4recording', keep
if len(status) and np.any(status == 'ready4recording'):
key['training_status'] = 'ready4recording'
self.insert1(key)
return
# if the protocol for the current session is a biased session,
# set the status to be "trained" and check up the criteria for
# "read for ephys"
task_protocol = (acquisition.Session & key).fetch1('task_protocol')
if task_protocol and 'biased' in task_protocol:
# Criteria for "ready4recording"
sessions = (behavior.TrialSet & subject_key &
(acquisition.Session & 'task_protocol LIKE "%biased%"') &
'session_start_time <= "{}"'.format(
key['session_start_time'].strftime(
'%Y-%m-%d %H:%M:%S')
)).fetch('KEY')
# if more than 3 biased sessions, see what's up
if len(sessions) >= 3:
sessions_rel = sessions[-3:]
# were these last 3 sessions done on an ephys rig?
bpod_board = (behavior_bpod.Settings & sessions_rel).fetch('pybpod_board')
ephys_board = [True for i in list(bpod_board) if 'ephys' in i]
if len(ephys_board) == 3:
n_trials = (behavior.TrialSet & sessions_rel).fetch('n_trials')
performance_easy = (behavioral_analyses.PsychResults & sessions_rel).fetch(
'performance_easy')
# criterion: 3 sessions with >400 trials, and >90% correct on high contrasts
if np.all(n_trials > 400) and np.all(performance_easy > 0.9):
trials = behavior.TrialSet.Trial & sessions_rel
prob_lefts = (dj.U('trial_stim_prob_left') & trials).fetch(
'trial_stim_prob_left')
# if no 0.5 of prob_left, keep trained
if not np.all(abs(prob_lefts - 0.5) > 0.001):
# compute psychometric functions for each of 3 conditions
# trials_50 = trials & \
# 'ABS(trial_stim_prob_left - 0.5) < 0.001'
trials_80 = trials & \
'ABS(trial_stim_prob_left - 0.2) < 0.001'
trials_20 = trials & \
'ABS(trial_stim_prob_left - 0.8) < 0.001'
# also compute the median reaction time
medRT = compute_reaction_time(trials)
# psych_unbiased = utils.compute_psych_pars(trials_unbiased)
psych_80 = utils.compute_psych_pars(trials_80)
psych_20 = utils.compute_psych_pars(trials_20)
# psych_50 = utils.compute_psych_pars(trials_50)
# repeat the criteria for training_1b
# add on criteria for lapses and bias shift in the biased blocks
criterion = psych_80['lapse_low'] < 0.1 and \
psych_80['lapse_high'] < 0.1 and \
psych_20['lapse_low'] < 0.1 and \
psych_20['lapse_high'] < 0.1 and \
psych_20['bias'] - psych_80['bias'] > 5 and \
medRT.loc[medRT['signed_contrast'] == 0, 'rt'].item() < 2
if criterion:
# were all 3 sessions done on an ephys rig already?
key['training_status'] = 'ready4recording'
self.insert1(key)
return
# ========================================================= #
# is the animal doing biasedChoiceWorld
# ========================================================= #
# if the previous status was 'ready4ephysrig', keep
if len(status) and np.any(status == 'ready4ephysrig'):
key['training_status'] = 'ready4ephysrig'
self.insert1(key)
return
# if the protocol for the current session is a biased session,
# set the status to be "trained" and check up the criteria for
# "read for ephys"
task_protocol = (acquisition.Session & key).fetch1('task_protocol')
if task_protocol and 'biased' in task_protocol:
# Criteria for "ready4recording" or "ready4ephysrig" status
sessions = (behavior.TrialSet & subject_key &
(acquisition.Session & 'task_protocol LIKE "%biased%"') &
'session_start_time <= "{}"'.format(
key['session_start_time'].strftime(
'%Y-%m-%d %H:%M:%S')
)).fetch('KEY')
# if there are more than 40 sessions of biasedChoiceWorld, give up on this mouse
if len(sessions) >= 40:
key['training_status'] = 'unbiasable'
# if not more than 3 biased sessions, see what's up
if len(sessions) >= 3:
sessions_rel = sessions[-3:]
n_trials = (behavior.TrialSet & sessions_rel).fetch('n_trials')
performance_easy = (behavioral_analyses.PsychResults & sessions_rel).fetch(
'performance_easy')
# criterion: 3 sessions with >400 trials, and >90% correct on high contrasts
if np.all(n_trials > 400) and np.all(performance_easy > 0.9):
trials = behavior.TrialSet.Trial & sessions_rel
prob_lefts = (dj.U('trial_stim_prob_left') & trials).fetch(
'trial_stim_prob_left')
# if no 0.5 of prob_left, keep trained
if not np.all(abs(prob_lefts - 0.5) > 0.001):
# # compute psychometric functions for each of 3 conditions
# trials_50 = trials & \
# 'ABS(trial_stim_prob_left - 0.5) < 0.001'
trials_80 = trials & \
'ABS(trial_stim_prob_left - 0.2) < 0.001'
trials_20 = trials & \
'ABS(trial_stim_prob_left - 0.8) < 0.001'
# also compute the median reaction time
medRT = compute_reaction_time(trials)
# psych_unbiased = utils.compute_psych_pars(trials_unbiased)
psych_80 = utils.compute_psych_pars(trials_80)
psych_20 = utils.compute_psych_pars(trials_20)
# psych_50 = utils.compute_psych_pars(trials_50)
# repeat the criteria for training_1b
# add on criteria for lapses and bias shift in the biased blocks
criterion = psych_80['lapse_low'] < 0.1 and \
psych_80['lapse_high'] < 0.1 and \
psych_20['lapse_low'] < 0.1 and \
psych_20['lapse_high'] < 0.1 and \
psych_20['bias'] - psych_80['bias'] > 5 and \
medRT.loc[medRT['signed_contrast'] == 0, 'rt'].item() < 2
if criterion:
key['training_status'] = 'ready4ephysrig'
self.insert1(key)
return
# ========================================================= #
# is the animal doing trainingChoiceWorld?
# 1B training
# ========================================================= #
# if has reached 'trained_1b' before, mark the current session 'trained_1b' as well
if len(status) and np.any(status == 'trained_1b'):
key['training_status'] = 'trained_1b'
self.insert1(key)
return
# training in progress if the animals was trained in < 3 sessions
sessions = (behavior.TrialSet & subject_key &
'session_start_time <= "{}"'.format(
key['session_start_time'].strftime('%Y-%m-%d %H:%M:%S')
)).fetch('KEY')
if len(sessions) >= 3:
# training in progress if any of the last three sessions have
# < 400 trials or performance of easy trials < 0.8
sessions_rel = sessions[-3:]
n_trials = (behavior.TrialSet & sessions_rel).fetch('n_trials')
performance_easy = (behavioral_analyses.PsychResults & sessions_rel).fetch(
'performance_easy')
if np.all(n_trials > 400) and np.all(performance_easy > 0.9):
# training in progress if the current session does not
# have low contrasts
contrasts = abs(
(behavioral_analyses.PsychResults & key).fetch1('signed_contrasts'))
if 0 in contrasts and \
np.sum((contrasts < 0.065) & (contrasts > 0.001)):
# compute psych results of last three sessions
trials = behavior.TrialSet.Trial & sessions_rel
psych = utils.compute_psych_pars(trials)
# also compute the median reaction time
medRT = compute_reaction_time(trials)
# cum_perform_easy = utils.compute_performance_easy(trials)
criterion = abs(psych['bias']) < 10 and \
psych['threshold'] < 20 and \
psych['lapse_low'] < 0.1 and \
psych['lapse_high'] < 0.1 and \
medRT.loc[medRT['signed_contrast'] == 0, 'rt'].item() < 2
if criterion:
key['training_status'] = 'trained_1b'
self.insert1(key)
return
# ========================================================= #
# is the animal still doing trainingChoiceWorld?
# 1A training
# ========================================================= #
# if has reached 'trained_1b' before, mark the current session 'trained_1b' as well
if len(status) and np.any(status == 'trained_1a'):
key['training_status'] = 'trained_1a'
self.insert1(key)
return
# training in progress if the animals was trained in < 3 sessions
sessions = (behavior.TrialSet & subject_key &
'session_start_time <= "{}"'.format(
key['session_start_time'].strftime('%Y-%m-%d %H:%M:%S')
)).fetch('KEY')
if len(sessions) >= 3:
# training in progress if any of the last three sessions have
# < 400 trials or performance of easy trials < 0.8
sessions_rel = sessions[-3:]
n_trials = (behavior.TrialSet & sessions_rel).fetch('n_trials')
performance_easy = (behavioral_analyses.PsychResults & sessions_rel).fetch(
'performance_easy')
if np.all(n_trials > 200) and np.all(performance_easy > 0.8):
# training in progress if the current session does not
# have low contrasts
contrasts = abs(
(behavioral_analyses.PsychResults & key).fetch1('signed_contrasts'))
if 0 in contrasts and \
np.sum((contrasts < 0.065) & (contrasts > 0.001)):
# compute psych results of last three sessions
trials = behavior.TrialSet.Trial & sessions_rel
psych = utils.compute_psych_pars(trials)
# cum_perform_easy = utils.compute_performance_easy(trials)
criterion = abs(psych['bias']) < 16 and \
psych['threshold'] < 19 and \
psych['lapse_low'] < 0.2 and \
psych['lapse_high'] < 0.2
if criterion:
key['training_status'] = 'trained_1a'
self.insert1(key)
return
# ========================================================= #
# did the animal not get any criterion assigned?
# ========================================================= #
# check whether the subject has been trained over 40 days
if len(sessions) >= 40:
key['training_status'] = 'untrainable'
self.insert1(key)
return
# ========================================================= #
# assume a base key of 'in_training' for all mice
# ========================================================= #
key['training_status'] = 'in_training'
self.insert1(key)
# =================
# populate this
# =================
# SessionTrainingStatus.drop()
SessionTrainingStatus.populate(display_progress=True)
|
# encoding: utf-8
"""
File: conftest.py
Author: <NAME>
Email: <EMAIL>
Github: https://github.com/mdrohmann
Description: pytest configuration file with standard tests for server modules.
"""
import re
import copy
import pytest
import random
from twisted.application import service
from twisted.python.reflect import namedAny
from twisted.spread import pb
from twisted.internet import reactor, defer
from twisted.web import resource, server, client
from zope.interface.verify import verifyObject
from txtemplates.common import backend
from txtemplates.utils.zope_ext import (
create_dummy_class_for, get_methods_for_interface)
from txtemplates.utils.html import SingleWebRequest
# introduce the @incremental marker
def pytest_runtest_makereport(item, call):
if "incremental" in item.keywords:
if call.excinfo is not None:
parent = item.parent
parent._previousfailed = item
def pytest_runtest_setup(item):
if "incremental" in item.keywords:
previousfailed = getattr(item.parent, "_previousfailed", None)
if previousfailed is not None:
pytest.xfail(
"previous test failed ({})".format(previousfailed.name))
def default_option_handler(request):
return copy.copy(request.param)
@pytest.fixture(scope="module")
def html_request(request):
return default_option_handler(request)
@pytest.fixture(scope="module")
def backend_options(request):
return default_option_handler(request)
@pytest.fixture(scope="module")
def full_server_options(request):
return default_option_handler(request)
@pytest.fixture(scope="class")
def backend_methods_fixture(request):
return default_option_handler(request)
def _make_backend(boptions, module):
random.seed(323)
if boptions == 'dummy':
DummyBackend = create_dummy_class_for(module.backend.IBackend)
return DummyBackend()
else:
return module.backend.make_backend(boptions)
@pytest.fixture()
def backend_fixture(backend_options):
options = backend_options[0]
module = backend_options[1]
try:
return _make_backend(options, module), module
except Exception, e:
return e, module
#@pytest.fixture()
#def dummy_backend_fixture(request):
# server_module = request.param
# DummyBackend = create_dummy_class_for(server_module.backend.IBackend)
# d = DummyBackend()
# verifyObject(server_module.backend.IBackend, d)
# return d
#
#
@pytest.fixture(scope="class")
def service_fixture(full_server_options):
[boptions, soptions], module = full_server_options
try:
db = _make_backend(boptions, module)
except Exception, e:
return e, module
try:
ret = module.service.decorate_backend(db, soptions)
except Exception as e:
ret = e
return ret, module
def _guard_test(module):
try:
module.registerAdapters()
except ValueError:
pytest.fail(
"Please guard the registerAdapters function in {}"
.format(str(module)))
@pytest.fixture(scope="class")
def pb_adapter_fixture(service_fixture):
ds = service_fixture[0]
module = service_fixture[1]
module.protocol.pb.registerAdapters()
_guard_test(module.protocol.pb)
pfs = module.protocol.pb.IPerspective(ds)
return pfs, module
@pytest.fixture(scope="class")
def web_adapter_fixture(service_fixture):
ds = service_fixture[0]
module = service_fixture[1]
module.protocol.html.registerAdapters()
_guard_test(module.protocol.html)
res = resource.IResource(ds)
return res, module
@pytest.fixture(scope="class")
def full_pb_server(request, pb_adapter_fixture):
pfs, module = pb_adapter_fixture
try:
factory = pb.PBServerFactory(pfs)
ret = reactor.listenTCP(0, factory)
def fin():
ret.stopListening()
request.addfinalizer(fin)
except Exception as e:
return e, module
return ret, module
@pytest.fixture(scope="class")
def full_web_server(request, web_adapter_fixture):
res, module = web_adapter_fixture
try:
site = server.Site(res)
ret = reactor.listenTCP(0, site)
def fin():
ret.stopListening()
request.addfinalizer(fin)
except Exception as e:
return e, module
return ret, module
@pytest.fixture(scope="class")
def pb_proxy(request, full_pb_server):
server, module = full_pb_server
port = server.getHost().port
interface = module.backend.IBackend
pbproxy = backend.PbProxy(
interface, 'tcp:localhost:port={}:timeout=3'.format(port), reactor)
def fin():
pbproxy.root_object.broker.transport.loseConnection()
request.addfinalizer(fin)
pytest.blockon(pbproxy.connection)
pytest.blockon(pbproxy.d)
return pbproxy, module
def _get_options_ids_and_module(
scope, config, servers_name, var_lists, server_name):
opname = var_lists[0]
idname = var_lists[1]
if hasattr(scope, servers_name):
config.update(getattr(scope, servers_name))
else:
if hasattr(scope, server_name):
server_name = getattr(scope, server_name)
else:
server_name = config.keys()[0]
tmp = {}
if hasattr(scope, opname):
tmp[opname] = getattr(scope, opname)
if hasattr(scope, idname):
tmp[idname] = getattr(scope, idname)
elif idname not in config.get(server_name, {}):
tmp[idname] = None
if not server_name in config:
config[server_name] = {}
config[server_name].update(tmp)
return config
def _check_options_ids_and_module(config, names):
if len(config) == 0:
raise ValueError("Failed to configure options, ids and modules.")
for c in config.itervalues():
for name in names:
if name not in c:
raise ValueError(
"Missing configuration value {}!".format(name))
def get_options_ids_and_module(metafunc, name):
"""
retrieves two variables from the nearest available scope around the
function called.
The variables, we are searching for, have the names
- `{name}_options` and
- `{name}_ids`
- `server_module`
The 'class' and 'module' scopes are searched for these variable names.
Alternatively, `server_modules` variable. This variable must be a
dictionary, where the keys are module names, and the values should be
dictionaries with keys `{name}_options` and `{name}_ids`.
An example for this configuration is:
.. code:: python
server_modules = {
'voteapp.echo': {
'backend_options': [{}], 'backend_ids': ['default'] },
'voteapp.tallygist': {
'backend_options': [{}], 'backend_ids': ['default'] },
}
The return value has the above form.
"""
option_name = '{}_options'.format(name)
id_name = '{}_ids'.format(name)
names = [option_name, id_name]
server_module_name = 'server_module'.format(name)
server_modules_name = 'server_modules'.format(name)
config = {}
try:
package = metafunc.module.__package__
confmodule = namedAny(package).conftest
config = _get_options_ids_and_module(
confmodule, config, server_modules_name, names, server_module_name)
except:
pass
try:
config = _get_options_ids_and_module(
metafunc.cls, config, server_modules_name, names,
server_module_name)
except:
pass
_check_options_ids_and_module(config, names)
return _get_options(config, names)
def _get_options(option_config, names):
opret = []
idsret = []
for (k, v) in option_config.iteritems():
module = namedAny(k)
opts = v[names[0]]
ids = v[names[1]]
opret += zip(opts, [module for _ in enumerate(opts)])
if ids is None:
idsret = None
else:
if idsret is None:
raise ValueError(
"Ids need to be None for all or none of the test modules.")
else:
idsret += ids
assert len(idsret) == len(opret)
return opret, idsret
def _get_backend_options(metafunc):
params, ids = get_options_ids_and_module(metafunc, 'backend')
return params, ids
def _get_full_server_options(metafunc):
params, ids = get_options_ids_and_module(metafunc, 'full_server')
return params, ids
def get_server_module(metafunc):
try:
server_module_name = namedAny(
metafunc.module.__package__).conftest.server_module
except:
pass
if metafunc.cls is not None and hasattr(metafunc.cls, 'server_module'):
server_module_name = metafunc.cls.server_module
return namedAny(server_module_name)
def pytest_generate_tests(metafunc):
if 'backend_fixture' in metafunc.fixturenames:
params, ids = _get_backend_options(metafunc)
metafunc.parametrize('backend_options', params, ids=ids, indirect=True)
if 'service_fixture' in metafunc.fixturenames:
params, ids = _get_full_server_options(metafunc)
metafunc.parametrize(
'full_server_options', params, ids=ids, indirect=True,
scope="class")
if 'backend_methods_fixture' in metafunc.fixturenames:
module = get_server_module(metafunc)
methods = get_methods_for_interface(module.backend.IBackend)
if hasattr(metafunc.cls, 'xfail_methods'):
xfail_methods = metafunc.cls.xfail_methods
methods = [
pytest.mark.xfail(m, run=False)
if m in xfail_methods else m
for m in methods]
metafunc.parametrize('backend_methods_fixture', methods, indirect=True)
if 'html_request' in metafunc.fixturenames:
if hasattr(metafunc.cls, 'webrequests'):
ids = []
opts = metafunc.cls.webrequests
for i, wr in enumerate(opts):
if not isinstance(wr, SingleWebRequest):
pytest.fail(
"webrequests attribute needs to be a list of "
"'SingleWebRequest' objects")
if wr.id:
ids.append(wr.id)
else:
ids.append('wr-{}'.format(i+1))
metafunc.parametrize('html_request', opts, indirect=True, ids=ids)
def cb_error(failure):
print failure
assert 0
class WorkingServiceBase(object):
def test_service(self, service_fixture):
ds, module = service_fixture
verifyObject(module.service.IService, ds)
assert isinstance(ds, service.Service)
class DummyServiceBase(object):
full_server_options = [('dummy', {})]
full_server_ids = ['dummy_service']
def test_methods_return_deferreds(self, service_fixture):
ds, module = service_fixture
for method in get_methods_for_interface(module.service.IService):
op = getattr(ds, method)
ret = op()
assert isinstance(ret, defer.Deferred)
class FailingServiceBase():
full_server_options = [('dummy', {'unknown': 'parameter'})]
full_server_ids = ['too_many_parameters']
def test_service_error(self, service_fixture):
service, _ = service_fixture
assert isinstance(service, ValueError)
assert 'Unknown keys' in service.message
class WorkingBackendsBase(object):
def test_backend(self, backend_fixture):
b, module = backend_fixture
assert verifyObject(module.backend.IBackend, b)
class FailingBackendsBase(object):
backend_options = [{'unknown': 'parameter'}]
backend_ids = ['too_many_parameters']
def test_backend_error(self, backend_fixture):
backend, _ = backend_fixture
assert isinstance(backend, ValueError)
assert 'Unknown keys' in backend.message
class PBAdapterBase(object):
def test_pb_adaptation(self, pb_adapter_fixture):
pfs, module = pb_adapter_fixture
assert isinstance(pfs, module.protocol.pb.PerspectiveFromService)
verifyObject(module.protocol.pb.IPerspective, pfs)
class PBDummyServerBase(object):
full_server_options = [('dummy', {})]
full_server_ids = ['dummy']
def test_pb_echo(self, pb_proxy, backend_methods_fixture):
proxy, module = pb_proxy
verifyObject(module.backend.IBackend, proxy)
o = getattr(proxy, backend_methods_fixture)
d = o()
assert isinstance(d, defer.Deferred)
def cb_check_return(ret):
assert ret.startswith(
'Called {} successfully'.format(backend_methods_fixture))
d.addCallbacks(cb_check_return, cb_error)
return d
class WebResourcesBase(object):
def test_real_webrequests(self, html_request, service_fixture,
full_web_server, monkeypatch):
service, _ = service_fixture
html_request.mock(service, monkeypatch)
server, module = full_web_server
port = server.getHost().port
d = html_request.realRender(port)
def cb_body_received(body, wr):
if 'body' in wr.expect:
assert re.search(wr.expect['body'], body)
def cb_rendered(response, wr):
if 'code' in wr.expect:
assert response.code == wr.expect['code']
d = client.readBody(response)
d.addCallback(cb_body_received, wr)
return d
d.addCallback(cb_rendered, html_request)
return d
# vim: set ft=python sw=4 et spell spelllang=en:
|
from math import ceil
import pandas as pd
import numpy as np
import torch
import torch.utils.data as tud
import os
import sys
sys.path.append('..')
from utils import config as cfg
class HarmonixDataset(tud.Dataset):
'''Harmonix dataset object.
To follow the idea of the original paper, each chunk is aligned with beat or downbeat. However,
in our online model we may not get the beat information. Thus, we use a simple overlapped
sample method. The length of the chunk is defined in config.py. It also contains a hop size
in case we need.
'''
def __init__(self, data='melspecs',
data_format='npy',
transform=None):
self._data_paths = []
self._label_paths = []
# load the data paths and label paths to memory
data_dir = os.path.join(cfg.HARMONIX_DIR, data)
label_dir = os.path.join(cfg.HARMONIX_DIR, 'segments')
for f in os.listdir(data_dir):
if not f.endswith(data_format) or f.startswith('.'):
continue
data_path = os.path.join(data_dir, f)
song_name = f[:-8] # -mel.npy
label_path = os.path.join(label_dir, song_name+'.txt')
self._data_paths.append(data_path)
self._label_paths.append(label_path)
self._transform = transform
def __len__(self):
return len(self._data_paths)
def __getitem__(self, index):
# print(self._data_paths[index])
data = np.load(self._data_paths[index]) # (mels, time)
#print(self._data_paths[index])
if self._transform is not None:
data = self._transform(data)
label_df = pd.read_csv(self._label_paths[index], sep=' ', header=None, names=['time', 'label'])
times, labels = np.array(label_df['time'], dtype='float32'), label_df['label']
# map labels to numbers
labels = labels.str.replace('\d+', '', regex=True)
labels = labels.str.lower()
labels = labels.str.strip()
labels = pd.factorize(labels)[0]
# return ref_times and ref_labels for msaf algorithms
return {'data': torch.tensor(data),
'ref_times': times,
'ref_labels': np.array(labels)}
class SongDataset(tud.Dataset):
'''Batch sampling within a song
__getitem__(self, index) will return:
1. the ith chunk of a song
2. the segment label (an integer) for the chunk.
`alignment`: if align the chunks with beat
'''
def __init__(self, data, times, labels, batch_size, mode,
transform=None, alignment=None, label_strategy='last'):
self._data = data
self._times = times
self._labels = labels
self._mode = mode
self._alignment = alignment
self._transform = transform # TODO: window function
self._batch_size = batch_size
self._label_strategy = label_strategy
self._chunks, self._chunk_labels = self._process_data()
def _process_data(self):
'''
1. split the data to chunks with hop size for train and validation dataset respectively
2. calculate the label of each chunk
3. shuffle the samples
4. complement the last batch
'''
# different hop size for train and validation set
if self._mode == 'train':
hop_size = cfg.train_hop_size
elif self._mode == 'val':
hop_size = cfg.eval_hop_size
chunk_len = cfg.CHUNK_LEN
bin_time_len = cfg.BIN_TIME_LEN
# split chunks and map them to labels
if not self._alignment:
# TODO: double check this number of chunks and data length for a song
n_chunks = ((self._data.shape[1] - (chunk_len - 1) - 1) // hop_size) + 1
data_len = chunk_len + (n_chunks - 1) * hop_size
self._data = self._data[:, :data_len]
# map each chunk to a segment label
chunks = []
chunk_labels = []
for i in range(n_chunks):
start = i * hop_size
end = start + chunk_len
chunks.append(self._data[:, start:end])
# decide which time the label is represented by
if self._label_strategy == 'center':
label_time = (start + end) * bin_time_len / 2
elif self._label_strategy == 'last': # TODO: some window function should be applied to the chunk to emphasize current frame
label_time = (end - cfg.time_lag_len) * bin_time_len # about 1s time lag
label_idx = torch.argmax((self._times > label_time).type(torch.uint8)) - 1
#print(label_idx)
if label_idx < 0: # some silence may not be labeled, like before starting or after ends
chunk_labels.append(-1)
else:
chunk_labels.append(self._labels[label_idx])
else:
raise NotImplementedError
# we don't have to shuffle and complement for the validation set
chunks = torch.stack(chunks)
chunk_labels = torch.tensor(chunk_labels)
if self._mode == 'val':
return chunks, chunk_labels
# shuffle
rands = torch.randperm(chunk_labels.size(0))
chunks = chunks[rands, :, :]
chunk_labels = chunk_labels[rands]
# complement the last batch
num_batch = chunk_labels.size(0) / self._batch_size
if num_batch > 1:
r = ceil(num_batch)*self._batch_size - len(chunk_labels)
chunks = torch.concat([chunks, chunks[:r, :, :]])
chunk_labels = torch.concat([chunk_labels, chunk_labels[:r]])
return chunks, chunk_labels
def __len__(self):
return len(self._chunk_labels)
def __getitem__(self, index):
return self._chunks[index, :, :], self._chunk_labels[index]
if __name__ == '__main__':
dataset = HarmonixDataset()
dataloader = tud.DataLoader(dataset, 1, shuffle=True, num_workers=0)
batch = next(iter(dataloader))
print(batch)
song_dataset = SongDataset(batch['data'].squeeze(0),
batch['ref_times'].squeeze(0),
batch['ref_labels'].squeeze(0), 128, 'train')
song_dataloader = tud.DataLoader(song_dataset, 1)
song_batch = next(iter(song_dataloader))
print(song_batch)
|
import copy
import distributions
import kmeans
import numpy as np
import matplotlib.pyplot as plt
import sys
from matplotlib.mlab import bivariate_normal
from numpy import newaxis as nax
from numpy.linalg import det, inv
from IPython.core.debugger import Tracer
def log_likelihood(X, obs_distr, pi):
N = X.shape[0]
K = len(obs_distr)
pdfs = np.zeros((N,K))
for j in range(K):
pdfs[:,j] = obs_distr[j].pdf(X)
# ll = sum_i log(sum_j pi_j p(x_i|theta_j))
ll = sum(np.log(np.dot(pi, pdfs[i,:])) for i in range(N))
return ll
def em(X, init_obs_distr, assignments=None, n_iter=10, Xtest=None):
N = X.shape[0]
K = len(init_obs_distr)
if assignments is not None:
pi = np.array([np.sum(assignments == j) for j in range(K)], dtype=np.float)
pi = pi / np.sum(pi)
else:
pi = np.ones(K) / K
obs_distr = copy.deepcopy(init_obs_distr)
tau = np.zeros((N, K)) # tau[i,j] = p(z_i = j | x_i)
ll_train = []
ll_test = []
for i in range(n_iter):
# E-step
for j in range(K):
tau[:,j] = pi[j] * obs_distr[j].pdf(X)
# normalize each line
tau = tau / np.sum(tau, axis=1)[:,nax]
print(np.bincount(np.argmax(tau,1)))
# M-step
pi = np.sum(tau, axis=0) / N
for j in range(K):
obs_distr[j].max_likelihood(X, tau[:,j])
# Tracer()()
ll_train.append(log_likelihood(X, obs_distr, pi))
if Xtest is not None:
ll_test.append(log_likelihood(Xtest, obs_distr, pi))
return tau, obs_distr, pi, ll_train, ll_test
def plot_em(X, tau, obs_distr, contours=False):
means = np.vstack(d.mean for d in obs_distr)
K = means.shape[0]
plt.figure()
plt.scatter(X[:,0], X[:,1], c=np.argmax(tau, axis=1))
plt.scatter(means[:,0], means[:,1], color='green', s=100)
if contours:
sigmas = [d.cov for d in obs_distr]
for j in range(K):
x, y = np.arange(-10., 10., 0.04), np.arange(-15., 15., 0.04)
xx, yy = np.meshgrid(x, y)
sx = np.sqrt(sigmas[j][0,0])
sy = np.sqrt(sigmas[j][1,1])
sxy = sigmas[j][1,0]
z = bivariate_normal(xx, yy, sx, sy, means[j,0], means[j,1], sxy)
cs = plt.contour(xx, yy, z, [0.01])
if __name__ == '__main__':
X = np.loadtxt('EMGaussian.data')
Xtest = np.loadtxt('EMGaussian.test')
K = 4
iterations = 40
assignments, centers, _ = kmeans.kmeans_best_of_n(X, K, n_trials=5)
for k in range(K):
centers[k].sigma2 = 1.
# Isotropic
tau, obs_distr, pi, ll_train_iso, ll_test_iso = \
em(X, centers, assignments, n_iter=iterations, Xtest=Xtest)
plot_em(X, tau, obs_distr, contours=True)
plt.title('EM with covariance matrices proportional to identity')
# General
new_centers = [distributions.Gaussian(c.mean, c.sigma2*np.eye(2)) \
for c in centers]
tau, obs_distr, pi, ll_train_gen, ll_test_gen = \
em(X, new_centers, assignments, n_iter=iterations, Xtest=Xtest)
plot_em(X, tau, obs_distr, contours=True)
plt.title('EM with general covariance matrices')
# log-likelihood plot
plt.figure()
plt.plot(ll_train_iso, label='isotropic, training')
plt.plot(ll_test_iso, label='isotropic, test')
plt.plot(ll_train_gen, label='general, training')
plt.plot(ll_test_gen, label='general, test')
plt.xlabel('iterations')
plt.ylabel('log-likelihood')
plt.title('Comparison of learning curves')
plt.legend()
|
<reponame>ZeitOnline/zeit.content.modules
from zeit.cms.browser.widget import RestructuredTextDisplayWidget
from zope.cachedescriptors.property import Lazy as cachedproperty
import UserDict
import grokcore.component as grok
import lxml.objectify
import zeit.cms.content.property
import zeit.cms.content.reference
import zeit.content.modules.interfaces
import zeit.edit.block
import zope.formlib.form
import zope.interface
import zope.security
class RawText(zeit.edit.block.Element):
zope.interface.implements(zeit.content.modules.interfaces.IRawText)
text_reference = zeit.cms.content.reference.SingleResource(
'.text_reference', 'related')
text = zeit.cms.content.property.ObjectPathProperty(
'.text', zeit.content.modules.interfaces.IRawText['text'])
@property
def raw_code(self):
if self.text_reference:
return self.text_reference.text
if self.text:
return self.text
return ''
@cachedproperty
def params(self):
return zeit.content.modules.interfaces.IEmbedParameters(self)
class EmbedParameters(
grok.Adapter,
UserDict.DictMixin,
zeit.cms.content.xmlsupport.Persistent):
# 99% copy&paste from z.c.author.author.BiographyQuestions, changed the tag
# name to `param` from `question` and added type conversion.
grok.context(zeit.content.modules.interfaces.IRawText)
grok.implements(zeit.content.modules.interfaces.IEmbedParameters)
def __init__(self, context):
# The really correct way to do this would be the "trusted adapter"
# pattern, i.e. unwrap context but then wrap ourselves. But then we
# would need a security declaration that covers arbitrary attributes
# (since the parameters are user-defined), which is not feasible.
context = zope.security.proxy.getObject(context)
object.__setattr__(self, 'context', context)
object.__setattr__(self, 'xml', context.xml)
embed = self.context.text_reference
fields = {}
if (zeit.content.text.interfaces.IEmbed.providedBy(embed) and
embed.parameter_definition):
for name, field in embed.parameter_fields.items():
fields[name] = field.bind(embed)
object.__setattr__(self, 'fields', fields)
# Set parent last so we don't accidentally trigger _p_changed.
object.__setattr__(self, '__parent__', context)
def __getitem__(self, key):
node = self.xml.xpath('param[@id="%s"]' % key)
if not node:
field = self.fields.get(key, zope.schema.TextLine())
return field.default
return self._converter(key).fromProperty(node[0].text)
def __setitem__(self, key, value):
node = self.xml.xpath('param[@id="%s"]' % key)
if node:
self.xml.remove(node[0])
if value: # XXX Use field.missing_value?
value = self._converter(key).toProperty(value)
node = lxml.objectify.E.param(value, id=key)
lxml.objectify.deannotate(node[0], cleanup_namespaces=True)
self.xml.append(node)
super(EmbedParameters, self).__setattr__('_p_changed', True)
def _converter(self, name):
props = zeit.cms.content.property.DAVConverterWrapper.DUMMY_PROPERTIES
field = self.fields.get(name, zope.schema.TextLine())
return zope.component.queryMultiAdapter(
(field, props),
zeit.cms.content.interfaces.IDAVPropertyConverter)
def keys(self):
return [x.get('id') for x in self.xml.xpath('param')]
# Attribute-style access is meant only for zope.formlib.
def __getattr__(self, key):
return self.get(key)
def __setattr__(self, key, value):
self[key] = value
class ICSS(zope.interface.Interface):
vivi_css = zope.schema.Text(readonly=True)
class CSSInjector(grok.Adapter):
grok.context(zeit.content.modules.interfaces.IRawText)
grok.implements(ICSS)
@cachedproperty
def vivi_css(self):
import cssutils
embed = self.context.text_reference
if not zeit.content.text.interfaces.IEmbed.providedBy(embed):
return None
if not embed.vivi_css:
return None
module = self.context.__name__
css = cssutils.parseString(embed.vivi_css)
for rule in css:
if not isinstance(rule, cssutils.css.CSSStyleRule):
continue
selectors = [x.selectorText for x in rule.selectorList]
while rule.selectorList:
del rule.selectorList[0]
for selector in selectors:
# zeit.content.article
rule.selectorList.append(u'#%s %s' % (module, selector))
# zeit.content.cp
rule.selectorList.append(u'.%s %s' % (module, selector))
return u'<style>\n%s\n</style>' % css.cssText
class EmbedParameterForm(object):
_form_fields = NotImplemented
_omit_fields = ()
def __init__(self, context, request):
super(EmbedParameterForm, self).__init__(context, request)
self.form_fields = zope.formlib.form.FormFields(
ICSS, zeit.cms.content.interfaces.IMemo) + self._form_fields.omit(
*self._omit_fields)
memo = self.form_fields['memo']
memo.custom_widget = RestructuredTextDisplayWidget
memo.for_display = True
self.form_fields['vivi_css'].custom_widget = RawDisplayWidget
embed = self.context.text_reference
if (zeit.content.text.interfaces.IEmbed.providedBy(embed) and
embed.parameter_definition):
self.form_fields = self.form_fields.omit('text')
# There really is no point in security declarations for fields.
parameters = zope.security.proxy.getObject(embed.parameter_fields)
for field in parameters.values():
self.form_fields += zope.formlib.form.FormFields(field)
@grok.adapter(zeit.content.modules.interfaces.IRawText)
@grok.implementer(zeit.cms.content.interfaces.IMemo)
def embed_memo(context):
embed = context.text_reference
if not zeit.content.text.interfaces.IEmbed.providedBy(embed):
return EMPTY_MEMO
return zeit.cms.content.interfaces.IMemo(embed)
class EmptyMemo(object):
memo = u''
EMPTY_MEMO = EmptyMemo()
class RawDisplayWidget(zope.formlib.widget.DisplayWidget):
def __call__(self):
return self._data
|
from __future__ import unicode_literals
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from django.urls import reverse
from django.utils.encoding import python_2_unicode_compatible
from taggit.managers import TaggableManager
from extras.models import CustomFieldModel
from utilities.models import ChangeLoggedModel
from .constants import *
@python_2_unicode_compatible
class TenantGroup(ChangeLoggedModel):
"""
An arbitrary collection of Tenants.
"""
name = models.CharField(
max_length=50,
unique=True
)
slug = models.SlugField(
unique=True
)
csv_headers = ['name', 'slug']
class Meta:
ordering = ['name']
verbose_name = 'Service Provider'
verbose_name_plural = 'Service Providers'
def __str__(self):
return self.name
def get_absolute_url(self):
return "{}?group={}".format(reverse('tenancy:tenant_list'), self.slug)
def to_csv(self):
return (
self.name,
self.slug,
)
@python_2_unicode_compatible
class Tenant(ChangeLoggedModel, CustomFieldModel):
"""
A Tenant represents an organization served by the NetBox owner. This is typically a customer or an internal
department.
"""
name = models.CharField(
max_length=30,
unique=True
)
slug = models.SlugField(
unique=True
)
group = models.ForeignKey(
to='tenancy.TenantGroup',
on_delete=models.SET_NULL,
related_name='tenants',
blank=True,
null=True
)
description = models.CharField(
max_length=100,
blank=True,
help_text='Long-form name (optional)'
)
comments = models.TextField(
blank=True
)
custom_field_values = GenericRelation(
to='extras.CustomFieldValue',
content_type_field='obj_type',
object_id_field='obj_id'
)
tags = TaggableManager()
csv_headers = ['name', 'slug', 'group', 'description', 'comments']
class Meta:
ordering = ['group', 'name']
verbose_name = 'Customer'
verbose_name_plural = 'Customers'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('tenancy:tenant', args=[self.slug])
def to_csv(self):
return (
self.name,
self.slug,
self.group.name if self.group else None,
self.description,
self.comments,
)
@python_2_unicode_compatible
class Package(ChangeLoggedModel, CustomFieldModel):
"""
A Package represents a service delivered to our customers.
"""
name = models.CharField(max_length=30, unique=True)
slug = models.SlugField(unique=True)
ipv4_enabled = models.BooleanField(blank=False, default=True, verbose_name='IPv4 is enabled', help_text='Customers recieve an IPv4 address')
ipv6_enabled = models.BooleanField(blank=False, default=True, verbose_name='IPv6 is enabled', help_text='Customers recieve an IPv6 address')
multicast_enabled = models.BooleanField(blank=False, default=True, verbose_name='Multicast is enabled', help_text='Customers can use multicast')
speed_upload = models.PositiveIntegerField(blank=False, null=False, verbose_name='Upload speed rate (Kbps)')
speed_download = models.PositiveIntegerField(blank=False, null=False, verbose_name='Download speed rate (Kbps)')
qos_profile = models.CharField(max_length=30, unique=False)
comments = models.TextField(
blank=True
)
custom_field_values = GenericRelation(
to='extras.CustomFieldValue',
content_type_field='obj_type',
object_id_field='obj_id'
)
tags = TaggableManager()
csv_headers = ['name', 'slug', 'ipv4_enabled', 'ipv6_enabled', 'multicast_enabled', 'speed_upload', 'speed_download', 'qos_profile']
class Meta:
ordering = ['name']
verbose_name = 'Package'
verbose_name_plural = 'Packages'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('tenancy:package', args=[self.slug])
def to_csv(self):
return (
self.name,
self.slug,
self.ipv4_enabled,
self.ipv6_enabled,
self.multicast_enabled,
self.speed_upload,
self.speed_download,
self.qos_profile,
)
|
<reponame>mchestr/pycbc<gh_stars>0
import json
import pytest
import requests
from pycbc.client import WebBookingClient
@pytest.fixture
def client(mock_responses):
return WebBookingClient()
def test_service_search(client, service_search_stub, service_search_json):
data = client.services_search()
assert data == service_search_json
def test_branches_service_search(client, service_id, branches_service_search_stub, branches_service_search_json):
data = client.branches_service_search(service_id)
assert data == branches_service_search_json
def test_branches_dates_search(client, service_id, branch_id, branches_dates_search_stub, branches_dates_search_json):
data = client.branches_dates_search(service_id, branch_id)
assert data == branches_dates_search_json
def test_branches_times_search(client, service_id, branch_id, date, branches_times_search_stub,
branches_times_search_json):
data = client.branches_times_search(service_id, branch_id, date)
assert data == branches_times_search_json
def test_appointments_search(client, user_data, appointments_search_stub, appointments_search_json):
data = client.appointments_search(user_data.first_name, user_data.last_name, user_data.email, user_data.phone)
assert data == appointments_search_json
assert json.loads(appointments_search_stub.calls[0].request.body) == {
'captcha': False,
'dob': '',
'email': user_data.email,
'externalId': '',
'firstName': user_data.first_name,
'lastName': user_data.last_name,
'phone': user_data.phone
}
def test_appointment_cancel(client, appointment_id, appointment_cancel_stub):
data = client.appointment_cancel(appointment_id)
assert data is None
def test_appointment_reserve(client, appointment_reserve_stub, date, time, appointment_reserve_json, service_id,
service_qp_id, service_name, branch_id):
data = client.appointment_reserve(service_id, service_name, service_qp_id, branch_id, date, time)
assert data == appointment_reserve_json
assert json.loads(appointment_reserve_stub.calls[0].request.body) == {
'custom': json.dumps({
'peopleServices': [{
'publicId': service_id,
'qpId': service_qp_id,
'adult': 1,
'name': service_name,
'child': 0,
}]
}, separators=(',', ':')),
'services': [{
'publicId': service_id,
}],
}
def test_appointment_check_multiple(client, appointment_check_multiple_stub, appointment_check_multiple_ok_json,
service_id, phone, email, date, time):
data = client.appointment_check_multiple(service_id, date, time, phone, email)
assert data == appointment_check_multiple_ok_json
def test_appointment_confirm(client, appointment_confirm_stub, appointment_id, service_id, service_qp_id, service_name,
date_of_birth, email, first_name, last_name, phone, appointment_confirm_json):
data = client.appointment_confirm(service_id, service_name, service_qp_id, appointment_id, first_name, last_name,
email, phone, date_of_birth)
assert data == appointment_confirm_json
assert json.loads(appointment_confirm_stub.calls[0].request.body) == {
'captcha': '',
'custom': json.dumps({
'peopleServices': [{
'publicId': service_id,
'qpId': service_qp_id,
'adult': 1,
'name': service_name,
'child': 0,
}],
'totalCost': 0,
'createdByUser': 'Qmatic Web Booking',
'customSlotLength': 15,
}, separators=(',', ':')),
'customer': {
'dateOfBirth': date_of_birth,
'dob': '',
'email': email,
'externalId': "",
'firstName': first_name,
'lastName': last_name,
'phone': phone,
},
'languageCode': 'en',
'notes': '',
'notificationType': '',
'title': 'Qmatic Web Booking',
}
def test_configuration(client, configuration_stub, configuration_json):
data = client.configuration()
assert data == configuration_json
def test_configuration_bad_response(client, configuration_error_stub):
with pytest.raises(requests.HTTPError):
client.configuration()
|
import sys
import os
current_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(current_dir, ".."))
import numpy as np
from lib import generate
def test__crossover():
population_01 = [1, 2, 3]
population_02 = [3, 2, 1]
crv_point = 1
population_01_left = population_01[:crv_point]
population_01_right = population_01[crv_point:]
population_02_left = population_02[:crv_point]
population_02_right = population_02[crv_point:]
population_01_new = population_01_left + population_02_right
population_02_new = population_02_left + population_01_right
assert population_01_new == [1, 2, 1]
assert population_02_new == [3, 2, 3]
crv_point = 2
population_01_left = population_01[:crv_point]
population_01_right = population_01[crv_point:]
population_02_left = population_02[:crv_point]
population_02_right = population_02[crv_point:]
population_01_new = population_01_left + population_02_right
population_02_new = population_02_left + population_01_right
assert population_01_new == [1, 2, 1]
assert population_02_new == [3, 2, 3]
population_01 = [1, 2, 3, 4]
population_02 = [4, 3, 2, 1]
crv_point = 1
population_01_left = population_01[:crv_point]
population_01_right = population_01[crv_point:]
population_02_left = population_02[:crv_point]
population_02_right = population_02[crv_point:]
population_01_new = population_01_left + population_02_right
population_02_new = population_02_left + population_01_right
assert population_01_new == [1, 3, 2, 1]
assert population_02_new == [4, 2, 3, 4]
crv_point = 2
population_01_left = population_01[:crv_point]
population_01_right = population_01[crv_point:]
population_02_left = population_02[:crv_point]
population_02_right = population_02[crv_point:]
population_01_new = population_01_left + population_02_right
population_02_new = population_02_left + population_01_right
assert population_01_new == [1, 2, 2, 1]
assert population_02_new == [4, 3, 3, 4]
crv_point = 3
population_01_left = population_01[:crv_point]
population_01_right = population_01[crv_point:]
population_02_left = population_02[:crv_point]
population_02_right = population_02[crv_point:]
population_01_new = population_01_left + population_02_right
population_02_new = population_02_left + population_01_right
assert population_01_new == [1, 2, 3, 1]
assert population_02_new == [4, 3, 2, 4]
def test__generate():
number_question = 5
questions = np.random.randint(1, 4, number_question)
assert len(questions) == number_question
def test__gen_population():
num_population = 10
num_question = 5
range_ans = (0, 1)
population = [generate(num_question, range_ans) for i in range(num_population)]
assert len(population) == num_population
assert len(population[0]) == num_question
def test__evaluate():
# all matched
answers_original = [1, 2, 3, 4, 5]
answers = [1, 2, 3, 4, 5]
assert len(answers_original) == len(answers)
result = []
for a, b in zip(answers_original, answers):
if a == b:
match = 1
else:
match = 0
result.append(match)
assert sum(result) == 5
# partrly matched
answers_original = [1, 2, 3, 4, 5]
answers = [5, 4, 3, 2, 1]
assert len(answers_original) == len(answers)
result = []
for a, b in zip(answers_original, answers):
if a == b:
match = 1
else:
match = 0
result.append(match)
assert sum(result) == 1
# nothing matched
answers_original = [1, 2, 3, 4, 5]
answers = [5, 4, 5, 2, 1]
assert len(answers_original) == len(answers)
result = []
for a, b in zip(answers_original, answers):
if a == b:
match = 1
else:
match = 0
result.append(match)
assert sum(result) == 0
def test__mutation():
range_min = 2
range_max = 3
idx_max = 1
idx_mut = np.random.randint(0, idx_max)
l = [1, 2, 3]
l[idx_mut] = np.random.randint(range_min, range_max)
assert l == [2, 2, 3]
def test__selection():
score_dict_sorted = [(0, 2), (1, 1), (2, 1), (3, 0), (4, 0)]
num_survive = 3
score_selected = score_dict_sorted[:num_survive]
assert len(score_selected) == num_survive
|
#!/usr/bin/env python
from __future__ import with_statement
import numpy
import HTSeq
import re
import itertools
try:
from six.moves import xrange
except ImportError:
pass
def add_dict(error, dic):
if error not in dic:
last_element = len(dic)
for i in xrange(last_element, error + 1):
dic[i] = 0
dic[error] += 1
def add_match(prev, succ, match_list):
# expand the match_list matrix to the biggest possible size
expand = max(prev, succ) + 1
if expand > len(match_list):
last_element = len(match_list)
for i in xrange(0, last_element):
for j in xrange(last_element, expand):
match_list[i][j] = 0
for i in xrange(last_element, expand):
match_list[i] = {}
for j in xrange(0, expand):
match_list[i][j] = 0
match_list[prev][succ] += 1
def parse_cs(cs_string):
mis = 0
list_op = []
list_hist = []
prev_op = "start"
for item in re.findall('(:[0-9]+|\*[a-z][a-z]|[=\+\-][A-Za-z]+)', cs_string):
op = item[0]
op_name = conv_op_to_word(op)
if op_name != "mis":
list_op.append(op)
elif prev_op != "mis":
list_op.append(op)
prev_op = op_name
if op_name == "ins" or op_name == "del":
if mis != 0:
list_hist.append(mis)
mis = 0
list_hist.append(len(item) - 1)
elif op_name == "match":
if mis != 0:
list_hist.append(mis)
mis = 0
list_hist.append(int(item[1:]))
elif op_name == "mis":
mis += 1
if mis != 0: # Deals with the case where mis is the last error in cs string
list_hist.append(mis)
return list_hist, list_op
def get_cs(cigar_str, md_str):
cs = []
k = 0
cx = 0
cy = 0
mx = 0
my = 0
md = re.findall('(\\d+)|(\\^[A-Za-z]+)|([A-Za-z])', md_str)
cigar = re.findall('(\d+)([MIDSHX=])', cigar_str) # Don't find (\d+)N since those are introns
for m in md:
if m[1] != "":
l = len(m[1]) - 1
cs.extend(["-", m[1][1:]])
mx += l
cx += l
k += 1
else:
if m[0] != "":
ml = int(m[0])
else:
ml = 1
while k < len(cigar) and cigar[k][1] != 'D':
cl = int(cigar[k][0])
op = cigar[k][1]
if op == "M":
if my + ml < cy + cl:
if ml > 0:
if m[2] != "":
cs.extend(['*', 'a', 'b'])
else:
cs.extend([':', ml])
mx += ml
my += ml
ml = 0
break
else:
dl = cy + cl - my
cs.extend([':', dl])
cx += cl
cy += cl
k += 1
mx += dl
my += dl
ml -= dl
elif op == 'I':
cs.extend(['+', 'I' * cl])
cy += cl
my += cl
k += 1
elif op == 'S':
cy += cl
my += cl
k += 1
cs_str = [str(x) for x in cs]
return "".join(cs_str)
def conv_op_to_word(op):
if op == ":":
return "match"
elif op == "+":
return "ins"
elif op == "-":
return "del"
elif op == "*":
return "mis"
else:
return "skip"
def hist(outfile, alnm_ftype):
infile = outfile
if "_genome" in outfile:
outfile = outfile[:-7]
out_match = open(outfile + "_match.hist", 'w')
out_mis = open(outfile + "_mis.hist", 'w')
out_ins = open(outfile + "_ins.hist", 'w')
out_del = open(outfile + "_del.hist", 'w')
out1 = open(outfile + "_error_markov_model", 'w')
out2 = open(outfile + "_match_markov_model", 'w')
out3 = open(outfile + "_first_match.hist", 'w')
dic_match = {}
dic_first_match = {}
dic_mis = {}
dic_ins = {}
dic_del = {}
match_list = {}
match_bin = {}
error_list = {"mis/mis": 0, "mis/ins": 0, "mis/del": 0, "ins/mis": 0, "ins/ins": 0, "ins/del": 0,
"del/mis": 0, "del/ins": 0, "del/del": 0, "mis0/mis": 0, "mis0/ins": 0, "mis0/del": 0,
"del0/mis": 0, "del0/ins": 0, "del0/del": 0, "ins0/mis": 0, "ins0/ins": 0, "ins0/del": 0}
first_error = {"mis": 0, "ins": 0, "del": 0}
for x in xrange(0, 150):
dic_match[x] = 0
match_list[x] = {}
for y in xrange(0, 150):
match_list[x][y] = 0
for key in match_bin.keys():
match_bin[key][x] = 0
for x in xrange(0, 150):
dic_first_match[x] = 0
for x in xrange(0, 30):
dic_mis[x] = 0
dic_ins[x] = 0
dic_del[x] = 0
if alnm_ftype == "maf":
with open(infile + "_besthit.maf", 'r') as f:
for line in f:
prev_match = 0
prev_error = ""
flag = True
new = line.strip().split()
ref = new[6].upper()
new_line = next(f)
new = new_line.strip().split()
query = new[6].upper()
match = 0
mismatch = 0
ins = 0
dele = 0
for i in xrange(0, len(ref)):
if ref[i] == query[i]:
if mismatch != 0:
add_dict(mismatch, dic_mis)
mismatch = 0
if flag:
flag = False
first_error["mis"] += 1
else:
error_list[prev_error + "/" + "mis"] += 1
prev_error = "mis"
elif ins != 0:
add_dict(ins, dic_ins)
ins = 0
if flag:
flag = False
first_error["ins"] += 1
else:
error_list[prev_error + "/" + "ins"] += 1
prev_error = "ins"
elif dele != 0:
add_dict(dele, dic_del)
dele = 0
if flag:
flag = False
first_error["del"] += 1
else:
error_list[prev_error + "/" + "del"] += 1
prev_error = "del"
match += 1
if i == len(ref) - 1 and match != 0:
add_match(prev_match, match, match_list)
elif ref[i] == '-':
if match != 0:
if flag:
add_dict(match, dic_first_match)
prev_match = match
else:
add_dict(match, dic_match)
add_match(prev_match, match, match_list)
prev_match = match
match = 0
elif mismatch != 0:
add_dict(mismatch, dic_mis)
dic_match[0] += 1
add_match(prev_match, 0, match_list)
prev_match = 0
mismatch = 0
if flag:
flag = False
first_error["mis"] += 1
else:
error_list[prev_error + "/" + "mis"] += 1
prev_error = "mis0"
ins += 1
elif query[i] == '-':
if match != 0:
if flag:
add_dict(match, dic_first_match)
prev_match = match
else:
add_dict(match, dic_match)
add_match(prev_match, match, match_list)
prev_match = match
match = 0
elif mismatch != 0:
add_dict(mismatch, dic_mis)
dic_match[0] += 1
add_match(prev_match, 0, match_list)
prev_match = 0
mismatch = 0
if flag:
flag = False
first_error["mis"] += 1
else:
error_list[prev_error + "/" + "mis"] += 1
prev_error = "mis0"
dele += 1
else:
if match != 0:
if flag:
add_dict(match, dic_first_match)
prev_match = match
else:
add_dict(match, dic_match)
add_match(prev_match, match, match_list)
prev_match = match
match = 0
elif ins != 0:
add_dict(ins, dic_ins)
add_dict(match, dic_match)
add_match(prev_match, 0, match_list)
prev_match = 0
ins = 0
if flag:
flag = False
first_error["ins"] += 1
else:
error_list[prev_error + "/" + "ins"] += 1
prev_error = "ins0"
elif dele != 0:
add_dict(dele, dic_del)
add_dict(match, dic_match)
add_match(prev_match, 0, match_list)
prev_match = 0
dele = 0
if flag:
flag = False
first_error["del"] += 1
else:
error_list[prev_error + "/" + "del"] += 1
prev_error = "del0"
mismatch += 1
else:
sam_reader = HTSeq.SAM_Reader
alnm_file_sam = infile + "_primary.sam"
alignments = sam_reader(alnm_file_sam)
for alnm in alignments:
# if cs tag is provided, continue, else calculate it from MD and cigar first.
try:
list_hist, list_op_unique = parse_cs(alnm.optional_field('cs'))
except:
cs_string = get_cs(alnm.original_sam_line.split()[5], alnm.optional_field('MD'))
list_hist, list_op_unique = parse_cs(cs_string)
flag = True
for i in range(0, len(list_op_unique)):
curr_op = conv_op_to_word(list_op_unique[i])
if curr_op != "skip":
if curr_op != "match":
exact_prev_op = conv_op_to_word(list_op_unique[i - 1])
if exact_prev_op != "match":
prev_error += "0"
if flag:
flag = False
first_error[curr_op] += 1
else:
error_list[prev_error + "/" + curr_op] += 1
prev_error = curr_op
if curr_op == "mis":
add_dict(list_hist[i], dic_mis)
if exact_prev_op != "match":
add_dict(0, dic_match)
add_match(prev_match, 0, match_list)
prev_match = 0
elif curr_op == "del":
add_dict(list_hist[i], dic_del)
elif curr_op == "ins":
add_dict(list_hist[i], dic_ins)
else:
match = list_hist[i]
if flag:
add_dict(match, dic_first_match)
prev_match = match
else:
if i == len(list_op_unique) - 1:
add_match(prev_match, match, match_list)
else:
add_dict(match, dic_match)
add_match(prev_match, match, match_list)
prev_match = match
# write the histogram for other matches and errors:
total_match = 0
total_mis = 0
total_ins = 0
total_del = 0
out_match.write("number of bases\tMatches:\n")
for key in dic_match:
out_match.write(str(key) + "\t" + str(dic_match[key]) + "\n")
total_match += key * dic_match[key]
out_match.close()
out_mis.write("number of bases\tMismatches:\n")
for key in dic_mis:
out_mis.write(str(key) + "\t" + str(dic_mis[key]) + "\n")
total_mis += key * dic_mis[key]
out_mis.close()
out_ins.write("number of bases\tInsertions:\n")
for key in dic_ins:
out_ins.write(str(key) + "\t" + str(dic_ins[key]) + "\n")
total_ins += key * dic_ins[key]
out_ins.close()
out_del.write("number of bases\tDeletions:\n")
for key in dic_del:
out_del.write(str(key) + "\t" + str(dic_del[key]) + "\n")
total_del += key * dic_del[key]
out_del.close()
out_error_rate = open(outfile + "_error_rate.tsv", 'w')
out_error_rate.write("Mismatch rate:\t" + str(total_mis * 1.0 / (total_mis + total_match + total_del)) + '\n')
out_error_rate.write("Insertion rate:\t" + str(total_ins * 1.0 / (total_mis + total_match + total_del)) + '\n')
out_error_rate.write("Deletion rate:\t" + str(total_del * 1.0 / (total_mis + total_match + total_del)) + '\n')
out_error_rate.write("Total error rate:\t" + str(
(total_mis + total_ins + total_del) * 1.0 / (total_mis + total_match + total_del)) + '\n')
out_error_rate.close()
predecessor = {"mis": error_list["mis/mis"] + error_list["mis/ins"] + error_list["mis/del"],
"ins": error_list["ins/mis"] + error_list["ins/ins"] + error_list["ins/del"],
"del": error_list["del/mis"] + error_list["del/ins"] + error_list["del/del"],
"mis0": error_list["mis0/mis"] + error_list["mis0/ins"] + error_list["mis0/del"],
"ins0": error_list["ins0/mis"] + error_list["ins0/ins"] + error_list["ins0/del"],
"del0": error_list["del0/mis"] + error_list["del0/ins"] + error_list["del0/del"]}
out1.write("succedent \tmis\tins\tdel\n")
num_of_first = sum(first_error.values())
out1.write(
"start\t" + str(first_error["mis"] * 1.0 / num_of_first) + "\t" + str(first_error["ins"] * 1.0 / num_of_first) +
"\t" + str(first_error["del"] * 1.0 / num_of_first))
for x in ["mis", "ins", "del", "mis0", "ins0", "del0"]:
out1.write("\n" + x)
for y in ["mis", "ins", "del"]:
if predecessor[x] == 0:
out1.write("\t" + "0")
else:
out1.write("\t" + str(error_list[x + "/" + y] * 1.0 / predecessor[x]))
# Match markov model
count = 0
for k1 in sorted(match_list.keys()):
count += sum(match_list[k1].values())
# 15 bins for the precedent in the match pair, each bin has roughly count/15 match events
bin_size = count / 15
k_of_bin = 0
k_of_match_list = 0
last_k = 0
count_each_bin = {}
while k_of_bin < 15:
if k_of_match_list >= len(match_list):
break
match_bin[k_of_bin] = {}
for i in xrange(0, len(match_list)):
match_bin[k_of_bin][i] = 0
tmp_count = 0
while tmp_count < bin_size and k_of_match_list < len(match_list):
new_added = sum(match_list[k_of_match_list].values())
if abs(tmp_count + new_added - bin_size) > abs(tmp_count - bin_size) and tmp_count != 0:
break
else:
tmp_count += new_added
k_of_match_list += 1
for k1 in xrange(last_k, k_of_match_list):
for k2 in xrange(0, len(match_list[k1])):
match_bin[k_of_bin][k2] += match_list[k1][k2]
count_each_bin[k_of_bin] = [(last_k, k_of_match_list), tmp_count]
last_k = k_of_match_list
k_of_bin += 1
if k_of_match_list < len(match_list):
tmp_count = 0
for k1 in xrange(last_k, len(match_list)):
tmp_count += sum(match_list[k1].values())
for k2 in xrange(0, len(match_list)):
match_bin[k_of_bin - 1][k2] += match_list[k1][k2]
count_each_bin[k_of_bin - 1][1] += tmp_count
count_prob = [0] * len(match_bin)
out2.write("bins\t" + "\t".join("%s-%s" % tup[0] for tup in count_each_bin.values()) + '\n')
for i in xrange(0, len(match_list)):
out2.write(str(i) + "-" + str(i + 1))
for k_of_bin in match_bin:
if count_each_bin[k_of_bin][1] == 0:
out2.write("\t" + "0")
else:
count_prob[k_of_bin] += match_bin[k_of_bin][i] * 1.0 / count_each_bin[k_of_bin][1]
out2.write("\t" + str(count_prob[k_of_bin]))
out2.write('\n')
out2.close()
# First match profile:
out3.write("bin\t0-50000\n")
count_prob = 0
total_first_match = sum(dic_first_match.values())
for i in xrange(0, len(dic_first_match)):
count_prob += dic_first_match[i] * 1.0 / total_first_match
out3.write(str(i) + "-" + str(i + 1) + "\t" + str(count_prob) + '\n')
out3.close() |
"""ping.py
NXOS parsers for the following show commands:
* ping {addr}
* ping {addr} source {source} count {count}
* ping {addr} vrf {vrf} count {count} size {size}
"""
# Python
import re
# Metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import (Any,
Optional, Use, SchemaTypeError, Schema)
class PingSchema(MetaParser):
""" Schema for
* ping {addr}
* ping {addr} source {source} repeat {count}
* ping {addr} vrf {vrf} count {count} size {size}
"""
schema = {
'ping': {
'address': str,
'ip': str,
'data_bytes': int,
Optional('repeat'): int,
Optional('timeout_secs'): int,
Optional('source'): str,
Optional('result_per_line'): list,
'statistics': {
'send': int,
'received': int,
'success_rate_percent': float,
Optional('round_trip'): {
'min_ms': float,
'avg_ms': float,
'max_ms': float,
}
}
}
}
class Ping(PingSchema):
""" parser for
* ping {addr} source {source} count {count}
* ping {addr} vrf {vrf} count {count} size {size}
"""
cli_command = [
'ping {addr}',
'ping {addr} source {source} count {count}',
'ping {addr} vrf {vrf} count {count} size {size}',
]
def cli(self,
addr=None,
vrf=None,
count=None,
source=None,
size=None,
ttl=None,
timeout=None,
tos=None,
dscp=None,
command=None,
rapid=None,
do_not_fragment=None,
validate=None,
output=None):
if not output:
cmd = []
if addr and vrf:
cmd.append('ping {addr} vrf {vrf}'.format(vrf=vrf, addr=addr))
elif addr:
cmd.append('ping {addr}'.format(addr=addr))
if source:
if re.match(r'\d+\.\d+\.\d+\.\d+', source) or ':' in source:
cmd.append('source {source}'.format(source=source))
else:
cmd.append('source-interface {source}'.format(source=source))
if count:
cmd.append('count {count}'.format(count=count))
if size:
cmd.append('packet-size {size}'.format(size=size))
if timeout:
cmd.append('timeout {timeout}'.format(timeout=timeout))
if do_not_fragment:
cmd.append('df-bit')
cmd = ' '.join(cmd)
if command:
cmd = command
out = self.device.execute(cmd)
else:
out = output
ret_dict = {}
result_per_line = []
# PING 10.2.2.2 (10.2.2.2): 56 data bytes
# PING R2_xr (10.2.2.2) from 10.3.3.3: 56 data bytes
p1 = re.compile(
r'^PING\s+(?P<address>[\S]+)\s+\((?P<ip>\S+)\)(\s+from\s+(?P<source>\S+))?:\s+(?P<data_bytes>\d+)\s+data\s+bytes'
)
# 64 bytes from 10.2.2.2: icmp_seq=0 ttl=254 time=4.669 ms
p2 = re.compile(r'^\d+\s+bytes\s+from')
# Request 0 timed out
p2_1 = re.compile(r'^Request\s+\d+\s+timed\s+out')
# ping: sendto 10.1.1.5 64 chars, No route to host
p2_2 = re.compile(r'^ping:\ssendto\s')
# 10 packets transmitted, 0 packets received, 100.00% packet loss
p3 = re.compile(
r'^(?P<send>\d+)\s+packets\s+transmitted,\s+(?P<received>\d+)\s+packets\s+received,\s+(?P<loss_percent>\S+)%\s+packet\s+loss'
)
# round-trip min/avg/max = 2.334/3.74/5.13 ms
p4 = re.compile(
r'^round-trip\smin/avg/max\s=\s+(?P<min_ms>\d+\.\d+)/(?P<avg_ms>\d+\.\d+)/(?P<max_ms>\d+\.\d+)\s+ms'
)
ping_dict = {}
for line in out.splitlines():
line = line.strip()
# PING 10.2.2.2 (10.2.2.2): 56 data bytes
# PING R2_xr (10.2.2.2) from 10.3.3.3: 56 data bytes
m = p1.match(line)
if m:
group = m.groupdict()
ping_dict = ret_dict.setdefault('ping', {})
ping_dict.update({
'data_bytes': int(group['data_bytes']),
'address': group['address'],
'ip': group['ip'],
})
if count:
ping_dict.update({'repeat': int(count)})
else:
ping_dict.update({'repeat': 5})
if group['source']:
ping_dict.update({'source': group['source']})
continue
# 64 bytes from 10.2.2.2: icmp_seq=0 ttl=254 time=4.669 ms
m = p2.match(line)
if m:
group = m.groupdict()
result_per_line.append(line)
ping_dict.update({'result_per_line': result_per_line})
continue
# Request 0 timed out
m = p2_1.match(line)
if m:
group = m.groupdict()
result_per_line.append(line)
ping_dict.update({'result_per_line': result_per_line})
continue
# ping: sendto 10.1.1.5 64 chars, No route to host
m = p2_2.match(line)
if m:
group = m.groupdict()
result_per_line.append(line)
ping_dict.update({'result_per_line': result_per_line})
continue
# 10 packets transmitted, 0 packets received, 100.00% packet loss
m = p3.match(line)
if m:
group = m.groupdict()
stat_dict = ping_dict.setdefault('statistics', {})
stat_dict.update({
'success_rate_percent':
float(100 - float(group['loss_percent'])),
'received':
int(group['received']),
'send':
int(group['send']),
})
continue
# round-trip min/avg/max = 2.334/3.74/5.13 ms
m = p4.match(line)
if m:
group = m.groupdict()
if 'statistics' in ping_dict:
ping_dict['statistics'].setdefault(
'round_trip', {}).update({
'min_ms':
float(group['min_ms']),
'avg_ms':
float(group['avg_ms']),
'max_ms':
float(group['max_ms']),
})
return ret_dict |
<gh_stars>0
# coding: utf-8 # noqa: E902
#!/usr/bin/python3 # noqa: E265
"""
Manage efriends front end.
Module: views.py
Class: Views/0 inherits object
Author: PQ <pq_rfw @ pm.me>
"""
import tkinter as tk
from dataclasses import dataclass
from os import path
from pprint import pprint as pp # noqa: F401
from tkinter import filedialog, messagebox, ttk
import requests
import webview
from PIL import Image, ImageTk
from controls import Controls
from reports import Reports
from structs import Structs
from texts import Texts
from utils import Utils
CN = Controls()
RP = Reports()
ST = Structs()
TX = Texts()
UT = Utils()
class Views(object):
"""Manage Tkinter GUI widgets for efriends app."""
def __init__(self):
"""Initialize the Views object.
Configure and start up the app.
"""
self.foutypes = ["json", "csv", "html", "pdf", "df"]
CN.check_python_version()
CN.set_erep_headers()
CN.configure_database()
CN.create_log('INFO')
CN.create_bkupdb()
self.set_basic_interface()
if not self.check_user():
self.set_menu_item(TX.menu.m_win, TX.menu.i_coll, "disable")
self.set_menu_item(TX.menu.m_win, TX.menu.i_viz, "disable")
self.make_config_frame()
@dataclass
class buffer:
"""Data buffer for Views object."""
current_frame: str = None
# Helpers
def set_menu_item(self,
p_menu: str, p_item: str,
p_action: str):
"""Enable or disable a menu item.
Args:
p_menu (str): Name of the menu
p_item (str): Name of the menu item
p_action (str): "enable" or "disable"
"""
w_state = "normal" if p_action == "enable"\
else "disabled"
if p_menu == TX.menu.m_file:
if p_item == TX.menu.i_close:
# First integer refers to item's order in menu
self.file_menu.entryconfig(0, state=w_state)
elif p_menu == TX.menu.m_win:
if p_item == TX.menu.i_cfg:
self.win_menu.entryconfig(0, state=w_state)
if p_item == TX.menu.i_coll:
self.win_menu.entryconfig(1, state=w_state)
if p_item == TX.menu.i_viz:
self.win_menu.entryconfig(2, state=w_state)
# Event handlers
def exit_appl(self):
"""Quit the app."""
CN.close_controls()
self.win_root.quit()
def close_frame(self):
"""Remove and destroy the currently-opened frame."""
if self.buffer.current_frame == "config":
self.cfg_frame.grid_forget()
self.cfg_frame.destroy()
elif self.buffer.current_frame == "collect":
self.collect_frame.grid_forget()
self.collect_frame.destroy()
elif self.buffer.current_frame == "viz":
self.viz_frame.grid_forget()
self.viz_frame.destroy()
self.set_menu_item(TX.menu.m_win, TX.menu.i_cfg, "enable")
self.set_menu_item(TX.menu.m_win, TX.menu.i_coll, "enable")
self.set_menu_item(TX.menu.m_win, TX.menu.i_viz, "enable")
self.set_menu_item(TX.menu.m_file, TX.menu.i_close, "disable")
setattr(self.buffer, 'current_frame', None)
self.win_root.title(TX.title.t_app)
def show_user_guide(self):
"""Display User Guide wiki page in browser window."""
url = TX.urls.h_user_guide
webview.create_window(TX.title.t_guide, url)
webview.start()
def show_about(self):
"""Display About wiki page in browser window."""
url = TX.urls.h_about
webview.create_window(TX.title.t_about, url)
webview.start()
def save_log_level(self):
"""Handle updates to log level."""
log_level = str(self.log_lvl_val.get()).strip()
ok = CN.create_log(log_level)
if ok:
self.show_message(ST.MsgLevel.INFO,
TX.msg.n_log_cfg, TX.msg.n_logging_on)
def check_user(self) -> bool:
"""See if user record already exists.
Returns:
bool: True if user record exists.
"""
usrd, _ = CN.get_user_db_record()
if usrd is None:
return False
CN.enable_logging() # Default to INFO
self.set_menu_item(TX.menu.m_win, TX.menu.i_coll, "enable")
self.make_user_image()
return True
def save_user_config(self) -> tuple:
"""Handle updates to user credentials."""
erep_email = str(self.email.get()).strip()
erep_passw = str(self.passw.get()).strip()
if erep_email and erep_passw:
id_info = CN.verify_citizen_credentials(erep_email,
erep_passw, True)
CN.write_user_rec(id_info.profile_id, erep_email, erep_passw)
citzn_rec = CN.get_ctzn_profile_from_erep(id_info.profile_id,
True)
CN.write_ctzn_rec(citzn_rec)
detail = TX.msg.n_connected + "\n" + TX.msg.n_user_cfg + "\n"
detail += TX.msg.n_greet.replace("[user]", id_info.user_name)
self.show_message(ST.MsgLevel.INFO,
TX.msg.n_user_on, detail)
self.check_user()
def save_apikey_config(self):
"""Handle updates to user API Key."""
erep_apikey = str(self.apikey.get()).strip()
if erep_apikey:
usrd, _ = CN.get_user_db_record()
msglvl = ST.MsgLevel.INFO
if usrd is not None:
if CN.verify_api_key(usrd.user_erep_profile_id, erep_apikey):
detail = TX.msg.n_user_key_on
CN.write_user_rec(usrd.user_erep_profile_id,
usrd.user_erep_email,
usrd.user_erep_password,
erep_apikey)
else:
detail = TX.shit.f_user_key_fail
msglvl = ST.MsgLevel.ERROR
else:
detail = TX.shit.f_user_key_fail
msglvl = ST.MsgLevel.ERROR
self.show_message(msglvl, TX.msg.n_user_key_test, detail)
def collect_friends(self):
"""Login to and logout of erep using user credentials."""
usrd, _ = CN.get_user_db_record()
detail = CN.get_erep_friends_data(usrd.user_erep_profile_id)
self.show_message(ST.MsgLevel.INFO, TX.msg.n_got_friends, detail)
def get_citizen_by_id(self):
"""Get user profile data from eRepublik."""
msg = None
citizen_id = str(self.citz_byid.get()).strip()
if citizen_id:
ctzn_d, _ = CN.get_ctzn_db_rec_by_id(citizen_id)
if ctzn_d is not None:
msg = TX.msg.n_citzn_on_db
detail = TX.msg.n_updating_citzn
is_friend = True if ctzn_d.is_user_friend == "True"\
else False
else:
msg = TX.msg.n_new_citzn
detail = TX.msg.n_adding_citzn
is_friend_val = self.isfriend_id_chk.get()
is_friend = True if is_friend_val == 1 else False
call_ok = CN.get_erep_citizen_by_id(citizen_id,
False, is_friend)
if call_ok:
self.show_message(ST.MsgLevel.INFO, msg, detail)
def get_citizen_by_name(self):
"""Look up Citizen profile by Name."""
msg = None
usrd, _ = CN.get_user_db_record()
apikey = usrd.user_tools_api_key
if apikey in (None, "None", ""):
msg, detail = self.update_msg("", "",
TX.msg.n_no_user_key,
TX.msg.n_key_required)
else:
citizen_nm = str(self.citz_bynm.get()).strip()
if citizen_nm:
msglvl = ST.MsgLevel.INFO
ctzn_d, _ = CN.get_citizen_db_rec_by_nm(citizen_nm)
if ctzn_d is not None:
msg = TX.msg.n_citzn_on_db
detail = TX.msg.n_updating_citzn
is_friend = True if ctzn_d.is_user_friend == "True"\
else False
else:
is_friend_val = self.isfriend_nm_chk.get()
is_friend = True if is_friend_val == 1 else False
ok, detail =\
CN.get_erep_citizen_by_nm(apikey, citizen_nm,
False, is_friend)
if ok:
msg = TX.msg.n_new_citzn
else:
msg = TX.msg.n_problem
msglvl = ST.MsgLevel.WARN
self.show_message(msglvl, msg, detail)
def select_profile_ids_file(self):
"""Select a file containing list of profile IDs."""
ftypes = (("All", "*.*"), ("CSV", "*.csv"), ("Text", "*.txt"))
idfile = filedialog.askopenfilename(initialdir=UT.get_home(),
title=TX.button.b_pick_file,
filetypes=ftypes)
self.idf_loc.insert(0, idfile)
def refresh_citzns_from_file(self):
"""Collect/refresh citizen data based on a list of profile IDs.
This will add any new IDs not yet on DB and refresh data for
those already on the data base.
"""
msg = None
id_file_path = str(self.idf_loc.get()).strip()
if id_file_path not in (None, "None", ""):
call_ok, detail =\
CN.refresh_citizen_data_from_file(id_file_path)
self.show_message(ST.MsgLevel.INFO,
TX.msg.n_id_file_on, detail)
def refresh_ctizns_from_db(self):
"""Refresh citizen data based on active profile IDs on DB."""
msg = None
msglvl = ST.MsgLevel.INFO
ok, detail = CN.refresh_ctzn_data_from_db()
if ok:
msg = TX.msg.n_id_data_on
else:
msg = TX.msg.n_problem
msglvl = ST.MsgLevel.WARN
self.show_message(msglvl, msg, detail)
def run_visualization(self, p_sql_nm: str):
"""Execute processes to run, display results for selected query.
Args:
p_sql_nm (str): Legit SQL id
"""
file_types = list()
pp(("p_sql_nm", p_sql_nm))
pp(("self.chx", self.chx))
for fty in self.foutypes:
on_off = self.chx[p_sql_nm][fty].get()
if on_off == 1:
file_types.append(fty)
msglvl = ST.MsgLevel.INFO
if file_types:
results = RP.run_citizen_viz(p_sql_nm, file_types)
file_path = path.join(UT.get_home(), TX.dbs.cache_path) + "/"
msg = TX.msg.n_files_exported
msg = msg.replace("[cache]", file_path)
detail = ""
for ftyp, val in results.items():
detail += "{}\n".format(val.replace(file_path, ""))
if ftyp == "html":
"""Display User Guide wiki page in browser window."""
webview.create_window(ftyp, val)
webview.start()
else:
msg = TX.shit.f_no_go
detail = TX.shit.f_no_format
msglvl = ST.MsgLevel.ERROR
self.show_message(msglvl, msg, detail)
# Constructors
def make_viz_frame(self):
"""Construct frame for reporting, visualizing citizen data."""
def prep_viz_options():
"""Pull in SQL and Plot files."""
self.qry = dict()
self.chx = dict()
sql_files = RP.get_sql_files()
for fid, fnm in sql_files.items():
self.qry[fid] = RP.get_query_desc(fid)
# self.plot = dict()
def set_context():
"""Adjust menus, set frame."""
setattr(self.buffer, 'current_frame', 'viz')
self.win_root.title(TX.title.t_viz)
self.set_menu_item(TX.menu.m_file, TX.menu.i_close, "enable")
self.set_menu_item(TX.menu.m_win, TX.menu.i_cfg, "enable")
self.set_menu_item(TX.menu.m_win, TX.menu.i_coll, "enable")
self.set_menu_item(TX.menu.m_win, TX.menu.i_viz, "disable")
self.viz_frame = tk.Frame(self.win_root,
width=400, padx=5, pady=5)
self.viz_frame.grid(sticky=tk.N)
self.win_root.grid_rowconfigure(1, weight=1)
self.win_root.grid_columnconfigure(1, weight=1)
def set_labels():
"""Define widget labels for visualization frame."""
row_cnt = 0
for fid, desc in self.qry.items():
ttk.Label(self.viz_frame, text=desc).grid(
row=row_cnt, column=0, sticky=tk.E, padx=5)
row_cnt += 1
def set_inputs():
"""Define input widgets for visualization frame."""
def set_export_options(p_sql_id: str, p_chx_ty: str,
p_row: int, p_col: int):
"""Show checkboxes: JSON, CSV, HTML, PDF, DataFrame.
Args:
p_sql_id (str): Legit ID of a SQL file
p_chx_ty (str): json, csv, etc..
p_row (int): row of frame to display checkbox
p_col (int): col of frame to display checkbox
"""
chx_txt = {"json": TX.button.c_json,
"csv": TX.button.c_csv,
"html": TX.button.c_html,
"pdf": TX.button.c_pdf,
"df": TX.button.c_dataframe}
if p_chx_ty in self.foutypes:
chx_nm = chx_txt[p_chx_ty]
self.chx[p_sql_id][p_chx_ty] = tk.IntVar(value=0)
ttk.Checkbutton(self.viz_frame, text=chx_nm,
variable=self.chx[p_sql_id][p_chx_ty],
onvalue=1, offvalue=0).grid(
row=p_row, column=p_col,
sticky=tk.E, padx=5)
row_cnt = 0
# This is problematic. Only gets sql ID (fid) from last item built.
for fid, desc in self.qry.items():
self.chx[fid] = dict()
for fky, fty in enumerate(self.foutypes):
set_export_options(fid, fty, row_cnt, fky + 2)
row_cnt += 1
# make_viz_frame() MAIN:
self.close_frame()
prep_viz_options()
set_context()
set_labels()
set_inputs()
def make_collect_frame(self):
"""Construct frame for collecting profile IDs, citizen data, etc."""
def set_context():
"""Adjust menus, set frame."""
setattr(self.buffer, 'current_frame', 'collect')
self.win_root.title(TX.title.t_coll)
self.set_menu_item(TX.menu.m_file, TX.menu.i_close, "enable")
self.set_menu_item(TX.menu.m_win, TX.menu.i_coll, "disable")
self.set_menu_item(TX.menu.m_win, TX.menu.i_cfg, "enable")
self.collect_frame = tk.Frame(self.win_root,
width=400, padx=5, pady=5)
self.collect_frame.grid(sticky=tk.N)
self.win_root.grid_rowconfigure(1, weight=1)
self.win_root.grid_columnconfigure(1, weight=1)
def set_labels():
"""Define widget labels for collect frame."""
lbl_txt = [TX.label.l_getfriends,
TX.label.l_getcit_byid,
TX.label.l_getcit_bynm,
TX.label.l_idf_loc,
TX.label.l_db_refresh]
for row_num, label_text in enumerate(lbl_txt):
ttk.Label(self.collect_frame, text=label_text).grid(
row=row_num, column=0, sticky=tk.E, padx=5)
def set_inputs():
"""Define input widgets for collect frame."""
def set_friends_list_input():
"""Collect / refresh friends data."""
ttk.Button(self.collect_frame,
text=TX.button.b_getfriends,
command=self.collect_friends,
state=tk.NORMAL).grid(
row=0, column=1, sticky=tk.W, padx=5)
def set_ctzn_by_id_input():
"""Refresh one citizen by ID."""
self.citz_byid =\
ttk.Entry(self.collect_frame, width=25).grid(
row=1, column=1, sticky=tk.W, padx=5)
self.isfriend_id_chk = tk.IntVar()
ttk.Checkbutton(self.collect_frame,
text=TX.button.c_is_friend,
variable=self.isfriend_id_chk,
onvalue=1, offvalue=0).grid(
row=1, column=2, sticky=tk.E, padx=5)
ttk.Button(self.collect_frame,
text=TX.button.b_get_ctzn_data,
command=self.get_citizen_by_id,
state=tk.NORMAL).grid(
row=1, column=3, sticky=tk.W, padx=5)
def set_ctzn_by_nm_input():
"""Refresh one citizen by Name."""
widget_state = tk.NORMAL\
if usrd.user_tools_api_key not in (None, "None", "")\
else tk.DISABLED
self.citz_bynm =\
ttk.Entry(self.collect_frame, width=25,
state=widget_state).grid(
row=2, column=1, sticky=tk.W, padx=5)
self.isfriend_nm_chk = tk.IntVar()
ttk.Checkbutton(self.collect_frame,
text=TX.button.c_is_friend,
variable=self.isfriend_nm_chk,
onvalue=1, offvalue=0).grid(
row=2, column=2, sticky=tk.E, padx=5)
ttk.Button(self.collect_frame,
text=TX.button.b_get_ctzn_data,
command=self.get_citizen_by_name,
state=widget_state).grid(
row=2, column=3, sticky=tk.W, padx=5)
def set_id_by_list_input():
"""Read in a list of Profile IDs from a file."""
self.idf_loc =\
ttk.Entry(self.collect_frame, width=50).grid(
row=3, column=1, sticky=tk.W, padx=5)
ttk.Button(self.collect_frame,
text=TX.button.b_get_file,
command=self.select_profile_ids_file).grid(
row=3, column=2, sticky=tk.W, padx=5)
ttk.Button(self.collect_frame,
text=TX.button.b_get_ctzn_data,
command=self.refresh_citzns_from_file).grid(
row=3, column=3, sticky=tk.W, padx=5)
def set_db_refresh_input():
"""Refresh all active citizen profile IDs on the database."""
ttk.Button(self.collect_frame,
text=TX.button.b_get_ctzn_data,
command=self.refresh_ctizns_from_db).grid(
row=4, column=1, sticky=tk.W, padx=5)
# set_inputs() main:
set_friends_list_input()
set_ctzn_by_id_input()
if usrd is not None:
set_ctzn_by_nm_input()
set_id_by_list_input()
set_db_refresh_input()
# make_collect_frame() MAIN:
self.close_frame()
usrd, _ = CN.get_user_db_record()
set_context()
set_labels()
set_inputs()
def make_config_frame(self):
"""Construct frame for entering configuration info."""
def prep_cfg_data():
"""Handle empty and None values."""
if usrd is not None:
if usrd.user_erep_email not in (None, "None"):
usrd_dflt["email"] = usrd.user_erep_email
if usrd.user_erep_password not in (None, "None"):
usrd_dflt["passw"] = usrd.user_erep_password
if usrd.user_tools_api_key not in (None, "None"):
usrd_dflt["apikey"] = usrd.user_tools_api_key
def set_context():
"""Set root and frame. Enable/disable menu items."""
setattr(self.buffer, 'current_frame', 'config')
self.win_root.title(TX.title.t_cfg)
self.set_menu_item(TX.menu.m_file, TX.menu.i_close, "enable")
self.set_menu_item(TX.menu.m_win, TX.menu.i_cfg, "disable")
self.set_menu_item(TX.menu.m_win, TX.menu.i_coll, "enable")
self.cfg_frame = tk.Frame(self.win_root,
width=400, padx=5, pady=5)
self.cfg_frame.grid(sticky=tk.N)
self.win_root.grid_rowconfigure(1, weight=1)
self.win_root.grid_columnconfigure(1, weight=1)
def set_labels():
"""Define and assign text to data entry labels."""
lbl_txt = [TX.label.l_log_lvl,
TX.label.l_email,
TX.label.l_passw,
TX.label.l_apikey]
for row_num, label_text in enumerate(lbl_txt):
ttk.Label(self.cfg_frame, text=label_text).grid(
row=row_num, column=0, sticky=tk.E, padx=5)
def set_inputs():
"""Define and assign defaults to data input widgets."""
def set_log_level_input():
"""Set logging level."""
self.log_lvl_val = tk.StringVar(self.cfg_frame)
# Store current log level in a config file
# if cf_dflt["log_lvl"]:
# self.log_lvl_val.set(cf_dflt["log_lvl"])
# else
self.log_lvl_val.set('INFO')
self.log_level = tk.OptionMenu(self.cfg_frame,
self.log_lvl_val,
*ST.LogLevel.keys())
self.log_level.grid(row=0, column=1, sticky=tk.W, padx=5)
self.log_save_btn =\
ttk.Button(self.cfg_frame,
text=TX.button.b_save_log_cfg,
command=self.save_log_level)
self.log_save_btn.grid(row=0, column=3, sticky=tk.W, padx=5)
def set_erep_email_input():
"""User's eRepublik login email credential."""
self.email = ttk.Entry(self.cfg_frame, width=25)
email_val = tk.StringVar(self.cfg_frame)
email_val.set(usrd_dflt["email"])
self.email.insert(0, email_val.get())
self.email.grid(row=1, column=1, sticky=tk.W, padx=5)
def set_erep_password_input():
"""User's eRepublik login password credential. Hidden input."""
self.passw = ttk.Entry(self.cfg_frame, width=25, show="*")
passw_val = tk.StringVar(self.cfg_frame)
passw_val.set(usrd_dflt["passw"])
self.passw.insert(0, passw_val.get())
self.passw.grid(row=2, column=1, sticky=tk.W, padx=5)
self.creds_save_btn =\
ttk.Button(self.cfg_frame,
text=TX.button.b_save_creds,
command=self.save_user_config)
self.creds_save_btn.grid(row=2, column=3, sticky=tk.W, padx=5)
def set_apikey_input():
"""User's eRepublik Tools API key. Hidden input."""
self.apikey = ttk.Entry(self.cfg_frame, width=30, show="*")
apikey_val = tk.StringVar(self.cfg_frame)
apikey_val.set(usrd_dflt["apikey"])
self.apikey.insert(0, apikey_val.get())
self.apikey.grid(row=3, column=1, sticky=tk.W, padx=5)
self.apikey_save_btn =\
ttk.Button(self.cfg_frame,
text=TX.button.b_save_apikey,
command=self.save_apikey_config)
self.apikey_save_btn.grid(row=3, column=3, sticky=tk.W, padx=5)
set_log_level_input()
set_erep_email_input()
set_erep_password_input()
set_apikey_input()
# make_config_frame() MAIN:
self.close_frame()
usrd_dflt = {"email": "", "passw": "", "apikey": ""}
usrd, _ = CN.get_user_db_record()
prep_cfg_data()
set_context()
set_labels()
set_inputs()
def show_message(self,
p_msg_level: str,
p_msg: str,
p_detail: str):
"""Construct and display feedback message.
Args:
p_msg_level (str in ST.MessageLevel.keys())
p_msg (str) short message text
p_detail (str) lengthier message text
"""
if p_msg_level == "ERROR":
m_title = TX.title.t_error
elif p_msg_level == "WARN":
m_title = TX.title.t_warn
else:
m_title = TX.title.t_info
messagebox.showwarning(title=m_title, message=p_msg,
detail="\n{}".format(p_detail))
def make_user_image(self):
"""Construct the avatar-display."""
usrd, _ = CN.get_user_db_record()
ctzd, _ = CN.get_ctzn_db_rec_by_id(usrd.user_erep_profile_id)
user_avatar_file =\
Image.open(requests.get(ctzd.avatar_link, stream=True).raw)
tk_img = ImageTk.PhotoImage(user_avatar_file)
user_avatar_img = ttk.Label(self.win_root, image=tk_img)
user_avatar_img.image = tk_img
user_avatar_img.place(x=750, y=450)
def make_menus(self):
"""Construct app menus on the root window."""
self.menu_bar = tk.Menu(self.win_root)
self.file_menu = tk.Menu(self.menu_bar, tearoff=0)
self.menu_bar.add_cascade(label=TX.menu.m_file,
menu=self.file_menu)
self.file_menu.add_command(label=TX.menu.i_close,
command=self.close_frame, state="disabled")
self.file_menu.add_command(label=TX.menu.i_quit,
command=self.exit_appl)
self.win_menu = tk.Menu(self.menu_bar, tearoff=0)
self.menu_bar.add_cascade(label=TX.menu.m_win, menu=self.win_menu)
self.win_menu.add_command(label=TX.menu.i_cfg,
command=self.make_config_frame)
self.win_menu.add_command(label=TX.menu.i_coll,
command=self.make_collect_frame)
self.win_menu.add_command(label=TX.menu.i_viz,
command=self.make_viz_frame)
self.help_menu = tk.Menu(self.menu_bar, tearoff=0)
self.menu_bar.add_cascade(label=TX.menu.m_help,
menu=self.help_menu)
self.help_menu.add_command(label=TX.menu.i_docs,
command=self.show_user_guide)
self.help_menu.add_command(label=TX.menu.i_about,
command=self.show_about)
self.win_root.config(menu=self.menu_bar)
def set_basic_interface(self):
"""Construct GUI widgets for efriends app."""
# Initial set of Window widgets
self.win_root = tk.Tk() # root app window
self.win_root.title(TX.title.t_app)
self.win_root.geometry('900x600+100+100')
self.win_root.minsize(900, 600)
self.win_root.eval('tk::PlaceWindow . center')
# Initial set of menus
self.make_menus()
|
<reponame>bastianwegge/pants<filename>src/python/pants/backend/go/util_rules/embedcfg.py
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
from dataclasses import dataclass
from typing import Any, Iterable, Mapping
from pants.util.frozendict import FrozenDict
from pants.util.meta import frozen_after_init
@dataclass(unsafe_hash=True)
@frozen_after_init
class EmbedConfig:
patterns: FrozenDict[str, tuple[str, ...]]
files: FrozenDict[str, str]
def __init__(self, patterns: Mapping[str, Iterable[str]], files: Mapping[str, str]) -> None:
"""Configuration passed to the Go compiler to configure file embedding.
The compiler relies entirely on the caller to map embed patterns to actual filesystem
paths. All embed patterns
contained in the package must be mapped. Consult
`FirstPartyPkgAnalysis.embed_patterns` for the embed patterns obtained from analysis.
:param patterns: Maps each pattern provided via a //go:embed directive to a list of file
paths relative to the package directory for files to embed for that pattern. When the
embedded variable is an `embed.FS`, those relative file paths define the virtual
directory hierarchy exposed by the embed.FS filesystem abstraction. The relative file
paths are resolved to actual filesystem paths for their content by consulting the
`files` dictionary.
:param files: Maps each virtual, relative file path used as a value in the `patterns`
dictionary to the actual filesystem path with that file's content.
"""
self.patterns = FrozenDict({k: tuple(v) for k, v in patterns.items()})
self.files = FrozenDict(files)
@classmethod
def from_json_dict(cls, d: dict[str, Any]) -> EmbedConfig | None:
result = cls(
patterns=FrozenDict(
{key: tuple(value) for key, value in d.get("Patterns", {}).items()}
),
files=FrozenDict(d.get("Files", {})),
)
return result if result else None
def to_embedcfg(self) -> bytes:
data = {
"Patterns": dict(self.patterns),
"Files": dict(self.files),
}
return json.dumps(data).encode("utf-8")
def __bool__(self) -> bool:
return bool(self.patterns) or bool(self.files)
def merge(self, other: EmbedConfig) -> EmbedConfig:
"""Merge two EmbedConfig's into one.
Overlapping keys must have the same values.
"""
overlapping_patterns_keys = set(self.patterns.keys()) & set(other.patterns.keys())
for key in overlapping_patterns_keys:
if self.patterns[key] != other.patterns[key]:
raise AssertionError(
"Unable to merge conflicting golang file embed configurations. This should not have occurred. "
"Please open an issue at https://github.com/pantsbuild/pants/issues/new/choose "
"with the following information: "
f"Patterns Key: {key}; Left: {self.patterns[key]}; Right: {other.patterns[key]} "
)
overlapping_files_keys = set(self.files.keys()) & set(other.files.keys())
for key in overlapping_files_keys:
if self.files[key] != other.files[key]:
raise AssertionError(
"Unable to merge conflicting golang file embed configurations. This should not have occurred. "
"Please open an issue at https://github.com/pantsbuild/pants/issues/new/choose "
"with the following information: "
f"Files Key: {key}; Left: {self.patterns[key]}; Right: {other.patterns[key]} "
)
return EmbedConfig(
patterns={**self.patterns, **other.patterns},
files={**self.files, **other.files},
)
|
<reponame>atssada/deeplearning4j
import math
from matplotlib.pyplot import hist, title, subplot, scatter, plot
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import seaborn # improves matplotlib look and feel
import sys
import time
'''
Optimization Methods Visualalization
Graph tools to help visualize how optimization is performing
'''
GLOBAL_TIME = 1.5
def load_file(path):
return np.loadtxt(path, delimiter=',')
def sigmoid(hidden_mean):
return 1 / (1 + np.exp(-hidden_mean))
def render_plot(values, plot_type='histogram', chart_title=''):
if np.product(values.shape) < 2:
values = np.zeros((3, 3))
chart_title += '-fake'
if plot_type == 'histogram':
hist(values)
elif plot_type == "scatter":
scatter(values)
else:
print "The " + plot_type + " format is not supported. Please choose histogram or scatter."
magnitude = ' mm %g ' % np.mean(np.fabs(values))
chart_title += ' ' + magnitude
title(chart_title)
def render_activation_probability(dataPath, filename):
hidden_mean = load_file(dataPath)
img = Image.fromarray(sigmoid(hidden_mean) * 256)
if img.mode != 'RGB':
img = img.convert('RGB')
img.save(filename, 'PNG')
def plot_single_graph(path, chart_title, filename):
print 'Graphing ' + chart_title + '\n'
values = load_file(path)
plt.plot(values, 'b')
plt.title(chart_title)
plt.savefig(filename, format='png')
plt.show(block=False)
time.sleep(GLOBAL_TIME)
plt.close()
def plot_matrices(orig_path, plot_type, filename):
paths = orig_path.split(',')
for idx, path in enumerate(paths):
if idx % 2 == 0:
title = paths[idx + 1]
print 'Loading matrix ' + title + '\n'
matrix = load_file(path)
subplot(2, len(paths)/4, idx/2+1)
render_plot(matrix, plot_type, chart_title=title)
plt.tight_layout()
plt.savefig(filename, format='png')
plt.show(block=False)
time.sleep(GLOBAL_TIME)
plt.close()
# TODO Finish adapting. Code still does not fully run through.
# def render_filter(data_path, n_rows, n_cols, filename):
# weight_data = load_file(data_path).reshape((n_rows, n_cols))
# patch_width = weight_data.shape[1]
# patch_height = 1
#
# # Initialize background to dark gray
# filter_frame = np.ones((n_rows*patch_width, n_cols * patch_height), dtype='uint8')
#
# for row in xrange(int(n_rows/n_cols)):
# for col in xrange(n_cols):
# patch = weight_data[row * n_cols + col].reshape((patch_width, patch_height))
# norm_patch = ((patch - patch.min()) / (patch.max() - patch.min() + 1e-6))
# filter_frame[row * patch_width: row * patch_width + patch_width,
# col * patch_height:col * patch_height + patch_height] = norm_patch * 255
# img = Image.fromarray(filter_frame)
# img.savefig(filename)
# img.show()
#
# def render_filter(data_path, filename, filter_width=10, filter_height=10):
# print 'Rendering filter image...'
# weight_data = load_file(data_path)
# n_rows = weight_data.shape[0]
# n_cols = weight_data.shape[1]
# padding = 1
#
# # Initialize background to dark gray
# filter_frame = np.ones(((filter_width+padding) * filter_width, (filter_height+padding) * filter_height), dtype='uint8') * 51
#
# for row in xrange(n_rows):
# for col in xrange(n_cols):
# patch = weight_data[row * n_cols + col].reshape((filter_width, filter_height))
# norm_patch = ((patch - patch.min()) / (patch.max() - patch.min() + 1e-6))
# filter_frame[row * (filter_height+padding): row * (filter_height+padding)+filter_height, col * (filter_width+padding): col * (filter_width+padding)+filter_width] = norm_patch * 255
# filter_frame[row * (filter_height+padding): row * (filter_height+padding) + filter_height, col * (filter_width+padding): col *(filter_width+padding) + filter_width]
# img = Image.fromarray(filter_frame)
# if img.mode != 'RGB':
# img = img.convert('RGB')
# img.save(filename)
# def vis_square(data_path, filename, n_rows=28, n_cols=28, padsize=1, padval=0):
# data = load_file(data_path)
# data = data.reshape(n_rows, n_cols)
#
# data -= data.min()
# data /= data.max()
#
# # force the number of filters to be square
# n = int(np.ceil(np.sqrt(data.shape[0])))
# padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
# data = np.pad(data, padding, mode='constant', constant_values=(padval, padval))
#
# # tile the filters into an image
# data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
# data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
#
# plt.imshow(data)
# time.sleep(GLOBAL_TIME)
# plt.savefig(data, filename)
if __name__ == '__main__':
if len(sys.argv) < 4:
print 'Please specify a command: One of hbias,weights,plot and a file path'
sys.exit(1)
plot_type = sys.argv[1]
path = sys.argv[2]
filename = sys.argv[3]
if plot_type == 'activations':
render_activation_probability(path, filename)
elif plot_type == 'single_matrix':
render_plot(path)
elif plot_type == 'histogram':
plot_matrices(path, plot_type, filename)
elif plot_type == 'scatter':
plot_matrices(path, plot_type, filename)
elif plot_type == 'loss':
plot_single_graph(path, plot_type, filename)
elif plot_type == 'accuracy':
plot_single_graph(path, plot_type, filename)
# elif sys.argv[1] == 'filter':
# if sys.argv[7]:
# n_rows = int(sys.argv[4])
# n_cols = int(sys.argv[5])
# filter_width = int(sys.argv[6])
# filter_height = int(sys.argv[7])
# render_filter(path, filename, n_rows, n_cols, filter_height, filter_width)
# elif sys.argv[5]:
# n_rows = int(sys.argv[4])
# n_cols = int(sys.argv[5])
# render_filter(path, filename, n_rows, n_cols)
# else:
# render_filter(path, filename)
|
<gh_stars>0
from .widgets.form import *
from .psql.Database_builder import *
import re
def check_invalid_letters(text, mode):
name_checked = re.match("([_a-z0-9])+", text)
if not name_checked or len(name_checked.group()) < len(text):
messagebox.showinfo('Wrong letters', 'Your ' + mode + ''' name contains invalid characters.
Only valid characters are _, a-z and 0-9''')
return False
return True
def too_low_role(ex):
too_low_role_check = re.search("must be owner (.+)", ex)
if too_low_role_check:
messagebox.showinfo('Too low role', """
Application user does not have rights
on this operation. You can change application user
on Application user which you can enter from the link at the bottom at the
database list.
""")
return
def handle_database_connect_error(ex, values):
ex = str(ex)
too_low_role(ex)
role_not_exist = re.search("role \"(.+)\" does not exist", ex)
if role_not_exist:
messagebox.showinfo('Role already exist', 'Role ' + values[1] + ' does not exist')
return
database_not_exist = re.search("database \"(.+)\" does not exist", ex)
if database_not_exist:
messagebox.showinfo('Role already exist', 'Database ' + values[0] + ' does not exist')
return
if 'password authentication' in ex:
messagebox.showinfo('Wrong password', 'Wrong password for role ' + values[0])
return
messagebox.showinfo('Error', 'We are sorry, but something went wrong, Error: ' + ex)
class CreateDatabaseForm:
def __init__(self, root, data, cb):
self.cb = cb
self.form = Form(
root,
"Create database",
[
Field(
"Database name",
warning="Database name can only contain lower case letters, numbers and underscores",
required=True
),
Field("Owner name (optional)")
],
[
FormButton(
"Create",
self.create_database
)
]
)
self.screen = self.form.main_frame
def create_database(self):
values = self.form.get_values()
if values is None:
return
if check_invalid_letters(values[0], "database") is False:
return
try:
Database_builder().create_db(values[0], values[1])
self.form.clear()
database_connection = DatabaseConnection(database=values[0], user=values[1], password="")
Database_builder().save_database_connection(database_connection)
self.form.clear()
self.cb(values[0])
messagebox.showinfo('Database creation', 'Database was successfully created')
except BaseException as ex:
ex = str(ex)
too_low_role(ex)
role_not_exist = re.search("role \"(.+)\" does not exist", ex)
if role_not_exist:
messagebox.showinfo('Role doesn\'t exist', 'Role ' + values[1] + ' does not exist')
return
database_already_exist = re.search("database \"(.+)\" already exist", ex)
if database_already_exist:
messagebox.showinfo('Database already exist', 'Database ' + values[0] + ' already exist')
return
messagebox.showinfo('Error', 'We are sorry, but something went wrong, Error: ' + ex)
class AddUserToDatabase:
def __init__(self, root, data):
self.database = data[0]
self.form = Form(
root,
"Add user to database",
[
Field(
"Username",
required=True
)
],
[
FormButton(
"Add",
self.handle_create
)
]
)
self.screen = self.form.main_frame
def handle_create(self):
values = self.form.get_values()
if values is None:
return
if check_invalid_letters(values[0], "username") is False:
return
try:
Database_builder().add_user(self.database, values[0])
self.form.clear()
messagebox.showinfo('information', 'User was successfully created')
except BaseException as ex:
ex = str(ex)
too_low_role(ex)
role_not_exist = re.search("role \"(.+)\" does not exist", ex)
if role_not_exist:
messagebox.showinfo('Role doesn\'t exist', 'Role ' + values[0] + ' does not exist')
return
messagebox.showinfo('Error', 'We are sorry, but something went wrong, Error: ' + ex)
class CreateUserForm:
def __init__(self, root, data):
self.database = data[0] if len(data) > 0 else None
self.form = Form(
root,
"Create user",
[
Field(
"Username",
warning="Username can contain only lower case letters, _ and 0 - 9",
required=True
),
Field("Password"),
Field("Create db", type="check"),
Field("Create role", type="check"),
Field("Superuser", type="check")
],
[
FormButton(
"Create",
self.create_user
)
]
)
self.screen = self.form.main_frame
def create_user(self):
values = self.form.get_values()
if values is None:
return
if check_invalid_letters(values[0], "username") is False:
return
try:
user_builder = UserBuilder(values[0], values[1], values[2], values[3], values[4])
if self.database is not None:
Database_builder().create_and_user_database(self.database, user_builder)
messagebox.showinfo('information', 'User was successfully created and added')
else:
Database_builder().create_user(user_builder)
messagebox.showinfo('information', 'User was successfully created')
self.form.clear()
except BaseException as ex:
ex = str(ex)
too_low_role(ex)
database_already_exist = re.search("role \"(.+)\" already exist", ex)
if database_already_exist:
messagebox.showinfo('Role already exist', 'Role ' + values[0] + ' already exist')
return
messagebox.showinfo('Error', 'We are sorry, but something went wrong, Error: ' + ex)
class ConnectDatabaseForm:
def __init__(self, root, data, cb):
self.cb = cb
self.form = Form(
root,
"Connect to database",
[
Field(
"Database name",
warning="Database can contain only lower case letters, _ and 0 - 9",
required=True
),
Field(
"Username",
warning="Username can contain only lower case letters, _ and 0 - 9",
required=True
),
Field("Password")
],
[
FormButton(
"Test connection",
lambda: self.handle_connection("test")
),
FormButton(
"Connect",
lambda: self.handle_connection("apply")
)
]
)
self.screen = self.form.main_frame
def handle_connection(self, mode):
values = self.form.get_values()
if values is None:
return
if check_invalid_letters(values[0], "database") is False:
return
if check_invalid_letters(values[1], "database") is False:
return
database_connection_exist = Database_builder().get_database_connection(values[0])
if database_connection_exist is not None:
messagebox.showinfo('Database connection', 'You are already connected to database ' + values[0])
return
database_connection = DatabaseConnection(database=values[0], user=values[1], password=values[2])
try:
Database_builder().test_database_connection(database_connection)
if mode != "test":
Database_builder().save_database_connection(database_connection)
self.cb(values[0])
self.form.clear()
messagebox.showinfo('Correct credentials', 'Credentials for connection are correct')
except BaseException as ex:
handle_database_connect_error(ex, values)
class RenameDatabaseForm:
def __init__(self, root, data, cb):
self.cb = cb
self.database = data[0]
self.form = Form(
root,
"Rename database",
[
Field(
"Database name",
value=self.database,
warning="Database name can only contain lower case letters, numbers and underscores",
required=True
)
],
[
FormButton(
"Rename",
self.rename
)
]
)
self.screen = self.form.main_frame
def rename(self):
values = self.form.get_values()
if values is None:
return
if check_invalid_letters(values[0], "database") is False:
return
try:
database_builder = Database_builder()
connection = database_builder.get_database_connection(self.database)
database_builder.rename_database(self.database, values[0])
database_builder.remove_database_connection(self.database)
database_connection = DatabaseConnection(database=values[0], user=connection['user'], password=connection['password'])
database_builder.save_database_connection(database_connection)
messagebox.showinfo("Database rename", "Your database was successfully renamed")
self.cb(self.database, values[0])
self.database = values[0]
self.form.clear()
except BaseException as ex:
ex = str(ex)
too_low_role(ex)
database_already_exist = re.search("database \"(.+)\" already exist", ex)
if database_already_exist:
messagebox.showinfo('Database already exist', 'Database ' + values[0] + ' already exist')
return
messagebox.showinfo('Error', 'We are sorry, but something went wrong, Error: ' + ex)
class RenameTable:
def __init__(self, root, data, cb):
self.cb = cb
self.database = data[0]
self.table = data[1]
self.form = Form(
root,
"Rename table",
[
Field(
"Table name",
value=self.table,
required=True
)
],
[
FormButton(
"Rename",
self.rename
)
]
)
self.screen = self.form.main_frame
def rename(self):
values = self.form.get_values()
if values is None:
return
if values[0] == self.table:
messagebox.showinfo('Wrong rename', 'Table cannot be renamed to same name')
return
name_checked = re.match("([_#@a-z0-9])+", values[0])
if not name_checked or len(name_checked.group()) < len(values[0]):
messagebox.showinfo('Wrong letters', '''Your table name contains invalid characters.
Only valid characters are #, @, _, a-z and 0-9''')
return
try:
Database_builder().rename_table(self.database, self.table, values[0])
messagebox.showinfo("Table rename", "Table" + self.table + " was successfully renamed")
self.cb(self.database, self.table, values[0])
self.table = values[0]
self.form.clear()
except BaseException as ex:
too_low_role(ex)
messagebox.showinfo('Error', 'We are sorry, but something went wrong, Error: ' + str(ex))
class ApplicationMainUserChange:
def __init__(self, root, data):
connection = Database_builder().get_application_user()
self.form = Form(
root,
"Configure database client user",
[
Field(
"user",
warning="Database can contain only lower case letters, _ and 0 - 9",
required=True,
value=connection['user']
),
Field(
"Database",
warning="Username can contain only lower case letters, _ and 0 - 9",
required=True,
value=connection['database']
),
Field(
"Password",
required=True,
value=connection['password']
),
Field(
"Host",
required=True,
value=connection['host']
),
Field(
"Port",
required=True,
value=connection['port']
)
],
[
FormButton(
"Connect",
lambda: self.handle_connection()
)
]
)
self.screen = self.form.main_frame
def handle_connection(self,):
values = self.form.get_values()
if values is None:
return
if check_invalid_letters(values[0], "user") is False:
return
if check_invalid_letters(values[1], "database") is False:
return
database_connection = DatabaseConnection(
user=values[0],
database=values[1],
password=values[2],
host=values[3],
port=values[4]
)
try:
Database_builder().test_database_connection(database_connection)
Database_builder().save_application_user(database_connection)
messagebox.showinfo('Correct credentials', 'Your application user was saved')
except BaseException as ex:
handle_database_connect_error(ex, values)
|
# Commented out IPython magic to ensure Python compatibility.
# %tensorflow_version 2.x
import tensorflow as tf
print("Tensorflow version " + tf.__version__)
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver() # TPU detection
print('Running on TPU ', tpu.cluster_spec().as_dict()['worker'])
except ValueError:
raise BaseException('ERROR: Not connected to a TPU runtime; please see the previous cell in this notebook for instructions!')
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
tpu_strategy = tf.distribute.TPUStrategy(tpu)
"""# Stage 1: Importing dependencies"""
gpu_info = !nvidia-smi
gpu_info = '\n'.join(gpu_info)
if gpu_info.find('failed') >= 0:
print('Not connected to a GPU')
else:
print(gpu_info)
!pip install sentencepiece
!pip install tf-models-official
#!pip install tf-models-nightly # better to install the version in development
!pip install tf-nightly
import tensorflow as tf
tf.__version__
import tensorflow_hub as hub
from official.nlp.bert.tokenization import FullTokenizer
from official.nlp.bert.input_pipeline import create_squad_dataset
from official.nlp.data.squad_lib import generate_tf_record_from_json_file
from official.nlp import optimization
from official.nlp.data.squad_lib import read_squad_examples
from official.nlp.data.squad_lib import FeatureWriter
from official.nlp.data.squad_lib import convert_examples_to_features
from official.nlp.data.squad_lib import write_predictions
import numpy as np
import math
import random
import time
import json
import collections
import os
from google.colab import drive
"""# Stage 2: Data preprocessing"""
drive.mount("/content/drive")
input_meta_data = generate_tf_record_from_json_file(
"/content/drive/MyDrive/Data/train-v1.1.json",
"/content/drive/MyDrive/Data/vocab.txt",
"/content/drive/MyDrive/Data/train-v1.1.tf_record")
with tf.io.gfile.GFile("/content/drive/MyDrive/Data/train_meta_data", "w") as writer:
writer.write(json.dumps(input_meta_data, indent=4) + "\n")
BATCH_SIZE = 4
train_dataset = create_squad_dataset(
"/content/drive/MyDrive/Data/train-v1.1.tf_record",
input_meta_data['max_seq_length'], # 384
BATCH_SIZE,
is_training=True)
"""# Stage 3: Model building
## Squad layer
"""
class BertSquadLayer(tf.keras.layers.Layer):
def __init__(self):
super(BertSquadLayer, self).__init__()
self.final_dense = tf.keras.layers.Dense(
units=2,
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
def call(self, inputs):
logits = self.final_dense(inputs) # (batch_size, seq_len, 2)
logits = tf.transpose(logits, [2, 0, 1]) # (2, batch_size, seq_len)
unstacked_logits = tf.unstack(logits, axis=0) # [(batch_size, seq_len), (batch_size, seq_len)]
return unstacked_logits[0], unstacked_logits[1]
"""## Whole model"""
class BERTSquad(tf.keras.Model):
def __init__(self,
name="bert_squad"):
super(BERTSquad, self).__init__(name=name)
self.bert_layer = hub.KerasLayer(
"https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/1",
trainable=True)
self.squad_layer = BertSquadLayer()
def apply_bert(self, inputs):
# _ , sequence_output = self.bert_layer([inputs["input_ids"],
# inputs["input_mask"],
# inputs["segment_ids"]])
# New names for the 3 different elements of the inputs, since an update
# in tf_models_officials. Doesn't change anything for any other BERT
# usage.
_ , sequence_output = self.bert_layer([inputs["input_word_ids"],
inputs["input_mask"],
inputs["input_type_ids"]])
return sequence_output
def call(self, inputs):
seq_output = self.apply_bert(inputs)
start_logits, end_logits = self.squad_layer(seq_output)
return start_logits, end_logits
"""# Stage 4: Training
## Creating the AI
"""
TRAIN_DATA_SIZE = 88641
NB_BATCHES_TRAIN = 2000
BATCH_SIZE = 4
NB_EPOCHS = 3
INIT_LR = 5e-5
WARMUP_STEPS = int(NB_BATCHES_TRAIN * 0.1)
train_dataset_light = train_dataset.take(NB_BATCHES_TRAIN)
bert_squad = BERTSquad()
optimizer = optimization.create_optimizer(
init_lr=INIT_LR,
num_train_steps=NB_BATCHES_TRAIN,
num_warmup_steps=WARMUP_STEPS)
def squad_loss_fn(labels, model_outputs):
start_positions = labels['start_positions']
end_positions = labels['end_positions']
start_logits, end_logits = model_outputs
start_loss = tf.keras.backend.sparse_categorical_crossentropy(
start_positions, start_logits, from_logits=True)
end_loss = tf.keras.backend.sparse_categorical_crossentropy(
end_positions, end_logits, from_logits=True)
total_loss = (tf.reduce_mean(start_loss) + tf.reduce_mean(end_loss)) / 2
return total_loss
train_loss = tf.keras.metrics.Mean(name="train_loss")
next(iter(train_dataset_light))
bert_squad.compile(optimizer,
squad_loss_fn)
checkpoint_path = "./drive/MyDrive/projects/BERT/ckpt_bert_squad/"
ckpt = tf.train.Checkpoint(bert_squad=bert_squad)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=1)
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
print("Latest checkpoint restored!!")
"""## Custom training"""
for epoch in range(NB_EPOCHS):
print("Start of epoch {}".format(epoch+1))
start = time.time()
train_loss.reset_states()
for (batch, (inputs, targets)) in enumerate(train_dataset_light):
with tf.GradientTape() as tape:
model_outputs = bert_squad(inputs)
loss = squad_loss_fn(targets, model_outputs)
gradients = tape.gradient(loss, bert_squad.trainable_variables)
optimizer.apply_gradients(zip(gradients, bert_squad.trainable_variables))
train_loss(loss)
if batch % 50 == 0:
print("Epoch {} Batch {} Loss {:.4f}".format(
epoch+1, batch, train_loss.result()))
if batch % 500 == 0:
ckpt_save_path = ckpt_manager.save()
print("Saving checkpoint for epoch {} at {}".format(epoch+1,
ckpt_save_path))
print("Time taken for 1 epoch: {} secs\n".format(time.time() - start))
"""# Stage 5: Evaluation
## Prepare evaluation
Get the dev set in the session
"""
eval_examples = read_squad_examples(
"/content/drive/MyDrive/Data/dev-v1.1.json",
is_training=False,
version_2_with_negative=False)
"""Define the function that will write the tf_record file for the dev set"""
eval_writer = FeatureWriter(
filename=os.path.join("/content/drive/MyDrive/Data/",
"eval.tf_record"),
is_training=False)
"""Create a tokenizer for future information needs"""
my_bert_layer = hub.KerasLayer(
"https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/1",
trainable=False)
vocab_file = my_bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = my_bert_layer.resolved_object.do_lower_case.numpy()
tokenizer = FullTokenizer(vocab_file, do_lower_case)
"""Define the function that add the features (feature is a protocol in tensorflow) to our eval_features list"""
def _append_feature(feature, is_padding):
if not is_padding:
eval_features.append(feature)
eval_writer.process_feature(feature)
"""Create the eval features and the writes the tf.record file"""
eval_features = []
dataset_size = convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=384,
doc_stride=128,
max_query_length=64,
is_training=False,
output_fn=_append_feature,
batch_size=4)
eval_writer.close()
"""Load the ready-to-be-used dataset to our session"""
BATCH_SIZE = 4
eval_dataset = create_squad_dataset(
"/content/drive/MyDrive/Data/eval.tf_record",
384,#input_meta_data['max_seq_length'],
BATCH_SIZE,
is_training=False)
"""## Making the predictions
Defines a certain type of collection (like a dictionary)
"""
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
"""Returns each element of batched output at a time"""
def get_raw_results(predictions):
for unique_ids, start_logits, end_logits in zip(predictions['unique_ids'],
predictions['start_logits'],
predictions['end_logits']):
yield RawResult(
unique_id=unique_ids.numpy(),
start_logits=start_logits.numpy().tolist(),
end_logits=end_logits.numpy().tolist())
"""Let's make our predictions!"""
all_results = []
for count, inputs in enumerate(eval_dataset):
x, _ = inputs
unique_ids = x.pop("unique_ids")
start_logits, end_logits = bert_squad(x, training=False)
output_dict = dict(
unique_ids=unique_ids,
start_logits=start_logits,
end_logits=end_logits)
for result in get_raw_results(output_dict):
all_results.append(result)
if count % 100 == 0:
print("{}/{}".format(count, 2709))
"""Write the predictions in a json file that will work with the evaluation script"""
output_prediction_file = "/content/drive/MyDrive/Data/predictions.json"
output_nbest_file = "/content/drive/MyDrive/Data/nbest_predictions.json"
output_null_log_odds_file = "/content/drive/MyDrive/Data/null_odds.json"
write_predictions(
eval_examples,
eval_features,
all_results,
20,
30,
True,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
verbose=False)
"""### Input dict creation
#### Utils
"""
my_bert_layer = hub.KerasLayer(
"https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/1",
trainable=False)
vocab_file = my_bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = my_bert_layer.resolved_object.do_lower_case.numpy()
tokenizer = FullTokenizer(vocab_file, do_lower_case)
def is_whitespace(c):
'''
Tell if a chain of characters corresponds to a whitespace or not.
'''
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
def whitespace_split(text):
'''
Take a text and return a list of "words" by splitting it according to
whitespaces.
'''
doc_tokens = []
prev_is_whitespace = True
for c in text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
return doc_tokens
def tokenize_context(text_words):
'''
Take a list of words (returned by whitespace_split()) and tokenize each word
one by one. Also keep track, for each new token, of its original word in the
text_words parameter.
'''
text_tok = []
tok_to_word_id = []
for word_id, word in enumerate(text_words):
word_tok = tokenizer.tokenize(word)
text_tok += word_tok
tok_to_word_id += [word_id]*len(word_tok)
return text_tok, tok_to_word_id
def get_ids(tokens):
return tokenizer.convert_tokens_to_ids(tokens)
def get_mask(tokens):
return np.char.not_equal(tokens, "[PAD]").astype(int)
def get_segments(tokens):
seg_ids = []
current_seg_id = 0
for tok in tokens:
seg_ids.append(current_seg_id)
if tok == "[SEP]":
current_seg_id = 1-current_seg_id # turns 1 into 0 and vice versa
return seg_ids
def create_input_dict(question, context):
'''
Take a question and a context as strings and return a dictionary with the 3
elements needed for the model. Also return the context_words, the
context_tok to context_word ids correspondance and the length of
question_tok that we will need later.
'''
question_tok = tokenizer.tokenize(my_question)
context_words = whitespace_split(context)
context_tok, context_tok_to_word_id = tokenize_context(context_words)
input_tok = question_tok + ["[SEP]"] + context_tok + ["[SEP]"]
input_tok += ["[PAD]"]*(384-len(input_tok)) # in our case the model has been
# trained to have inputs of length max 384
input_dict = {}
input_dict["input_word_ids"] = tf.expand_dims(tf.cast(get_ids(input_tok), tf.int32), 0)
input_dict["input_mask"] = tf.expand_dims(tf.cast(get_mask(input_tok), tf.int32), 0)
input_dict["input_type_ids"] = tf.expand_dims(tf.cast(get_segments(input_tok), tf.int32), 0)
return input_dict, context_words, context_tok_to_word_id, len(question_tok)
"""#### Creation"""
with open('/content/drive/MyDrive/Data/Text Documents/file_final.txt') as final_txt:
contents = final_txt.read()
my_context = contents
my_context = contents[:496875]
my_context
my_question = '''How have restrictive practices been used on people with disability and older people experienced during COVID-19?'''
my_input_dict, my_context_words, context_tok_to_word_id, question_tok_len = create_input_dict(my_question, my_context)
my_input_dict
my_input_dict['input_word_ids'][0]
input_id_chunks = my_input_dict['input_word_ids'][0]
mask_chunks = my_input_dict['input_mask'][0]
type_chunks = my_input_dict['input_type_ids'][0]
input_id_chunks = tf.split(input_id_chunks, num_or_size_splits=195)
mask_chunks = tf.split(mask_chunks, num_or_size_splits=195)
type_chunks = tf.split(type_chunks, num_or_size_splits=195)
input_id_chunks = tf.stack(input_id_chunks)
mask_chunks = tf.stack(mask_chunks)
type_chunks = tf.stack(type_chunks)
my_input_dict = {'input_mask':mask_chunks,'input_type_ids':type_chunks,'input_word_ids':input_id_chunks}
for tensor in input_id_chunks:
print(len(tensor))
"""### Prediction"""
start_logits, end_logits = bert_squad(my_input_dict, training=False)
"""### Interpretation
We remove the ids corresponding to the question and the `["SEP"]` token:
"""
start_logits_context = start_logits.numpy()[0, question_tok_len+1:]
end_logits_context = end_logits.numpy()[0, question_tok_len+1:]
"""First easy interpretation:"""
start_word_id = context_tok_to_word_id[np.argmax(start_logits_context)]
end_word_id = context_tok_to_word_id[np.argmax(end_logits_context)]
""""Advanced" - making sure that the start of the answer is before the end:"""
pair_scores = np.ones((len(start_logits_context), len(end_logits_context)))*(-1E10)
for i in range(len(start_logits_context-1)):
for j in range(i, len(end_logits_context)):
pair_scores[i, j] = start_logits_context[i] + end_logits_context[j]
pair_scores_argmax = np.argmax(pair_scores)
start_word_id = context_tok_to_word_id[pair_scores_argmax // len(start_logits_context)]
end_word_id = context_tok_to_word_id[pair_scores_argmax % len(end_logits_context)]
"""Final answer:"""
predicted_answer = ' '.join(my_context_words[start_word_id:end_word_id+1])
print("The answer to:\n" + my_question + "\nis:\n" + predicted_answer)
|
<gh_stars>0
import time
import boto3
import requests
import gym
import numpy as np
from time import gmtime,strftime
from gym.spaces import Discrete, Box
cloudwatch_cli = boto3.client('cloudwatch',region_name='us-west-2')
class GameServerEnv(gym.Env):
def __init__(self, env_config={}):
print ("in __init__")
print ("env_config {}".format(env_config))
self.namespace = env_config['cloudwatch_namespace']
self.gs_inventory_url = env_config['gs_inventory_url']
self.learning_freq = env_config['learning_freq']
self.min_servers = int(env_config['min_servers'])
self.max_servers = int(env_config['max_servers'])
self.action_factor = int(env_config['action_factor'])
self.over_prov_factor = int(env_config['over_prov_factor'])
self.num_steps = 0
self.max_num_steps = 301
self.history_len = 5
self.total_num_of_obs = 1
# we have two observation array, allocation and demand. allocation is alloc_observation, demand is observation hence *2
self.observation_space = Box(low=np.array([self.min_servers]*self.history_len*2),
high=np.array([self.max_servers]*self.history_len*2),
dtype=np.uint32)
# How many servers should the agent spin up at each time step
self.action_space = Box(low=np.array([0]),
high=np.array([1]),
dtype=np.float32)
def reset(self):
print ("in reset")
#self.populate_cloudwatch_metric(self.namespace,1,'reset')
self.num_steps = 0
self.current_min = 0
self.demand_observation = np.array([self.min_servers]*self.history_len)
self.alloc_observation = np.array([self.min_servers]*self.history_len)
print ('self.demand_observation '+str(self.demand_observation))
return np.concatenate((self.demand_observation, self.alloc_observation)).tolist()
#to delete
#return self.demand_observation
def step(self, action):
print ('in step - action recieved from model'+str(action))
self.num_steps+=1
self.total_num_of_obs+=1
print('total_num_of_obs={}'.format(self.total_num_of_obs))
raw_action=float(action)
self.curr_action = raw_action*self.action_factor
self.curr_action = np.clip(self.curr_action, self.min_servers, self.max_servers)
print('self.curr_action={}'.format(self.curr_action))
if (self.gs_inventory_url!='local'):
#get the demand from the matchmaking service
print('quering matchmaking service for current demand, curr_demand')
try:
gs_url=self.gs_inventory_url
req=requests.get(url=gs_url)
data=req.json()
self.curr_demand = float(data['Prediction']['num_of_gameservers'])
except requests.exceptions.RequestException as e:
print(e)
print('if matchmaking did not respond just randomized curr_demand between limit, reward will correct')
self.curr_demand = float(np.random.randint(self.min_servers,self.max_servers))
if (self.gs_inventory_url=='local'):
print('local matchmaking service for current demand, curr_demand')
data=self.get_curr_sine1h()
self.curr_demand = float(data['Prediction']['num_of_gameservers'])
# clip the demand to the allowed range
self.curr_demand = np.clip(self.curr_demand, self.min_servers, self.max_servers)
print('self.curr_demand={}'.format(self.curr_demand))
self.curr_alloc = self.demand_observation[0]
print('self.curr_alloc={}'.format(self.curr_alloc))
# store the current demand in the history array demand_observation
self.demand_observation = self.demand_observation[1:] # shift the observation by one to remove one history point
self.demand_observation=np.append(self.demand_observation,self.curr_demand)
print('self.demand_observation={}'.format(self.demand_observation))
# store the current allocation in the history array alloc_observation
self.alloc_observation = self.alloc_observation[1:]
self.alloc_observation=np.append(self.alloc_observation,self.curr_action)
print('self.alloc_observation={}'.format(self.alloc_observation))
#reward calculation - in case of over provision just 1-ratio. under provision is more severe so 500% more for negative reward
print('calculate the reward, calculate the ratio between allocation and demand, we use the first allocation in the series of history of five, first_alloc/curr_demand')
ratio=self.curr_action/self.curr_demand
print('ratio={}'.format(ratio))
if (ratio>1):
#reward=1-ratio
reward = -1 * (self.curr_alloc - self.curr_demand)
print('reward over provision - ratio>1 - {}'.format(reward))
if (ratio<1):
#reward=-50*ratio
reward = -5 * (self.curr_demand - self.curr_alloc)
print('reward under provision - ratio<1 - {}'.format(reward))
if (ratio==1):
reward=1
print('ratio=1')
reward -= (self.curr_demand - self.curr_alloc)*self.over_prov_factor
print('ratio={}'.format(ratio))
print('reward={}'.format(reward))
#Instrumnet the allocation and demand in cloudwatch
print('populating cloudwatch - self.curr_demand={}'.format(self.curr_demand))
self.populate_cloudwatch_metric(self.namespace,self.curr_demand,'curr_demand')
print('populating cloudwatch - self.curr_alloc={}'.format(self.curr_action))
self.populate_cloudwatch_metric(self.namespace,self.curr_action,'curr_alloc')
print('populating cloudwatch - reward={}'.format(reward))
self.populate_cloudwatch_metric(self.namespace,reward,'reward')
print('populating cloudwatch - num_steps={}'.format(self.num_steps))
self.populate_cloudwatch_metric(self.namespace,self.num_steps,'num_steps')
print('populating cloudwatch - total_num_of_obs={}'.format(self.total_num_of_obs))
self.populate_cloudwatch_metric(self.namespace,self.total_num_of_obs,'total_num_of_obs')
if (self.num_steps >= self.max_num_steps):
done = True
print ("self.num_steps "+str(self.num_steps))
print ("self.max_num_steps "+str(self.max_num_steps))
else:
done = False
print ('time.sleep() for {} before next iteration'.format(self.learning_freq))
time.sleep(int(self.learning_freq))
extra_info = {}
#the next state includes the demand and allocation history.
next_state=np.concatenate((self.demand_observation,self.alloc_observation)).tolist()
print ('next_state={}'.format(next_state))
return next_state, reward, done, extra_info
def render(self, mode):
print("in render")
pass
def populate_cloudwatch_metric(self,namespace,metric_value,metric_name):
print("in populate_cloudwatch_metric metric_value="+str(metric_value)+" metric_name="+metric_name)
response = cloudwatch_cli.put_metric_data(
Namespace=namespace,
MetricData=[
{
'MetricName': metric_name,
'Unit': 'None',
'Value': metric_value,
},
]
)
print('response from cloud watch'+str(response))
def get_curr_sine1h(self):
max_servers=self.max_servers*0.9
print ('in get_curr_sine1h')
cycle_arr=np.linspace(0.2,3.1,61)
self.current_min = (self.current_min + 1) % 60
current_min = self.current_min
print('current_min={}'.format(current_min))
current_point=cycle_arr[int(current_min)]
sine=max_servers*np.sin(current_point)
print('sine({})={}'.format(current_point,sine))
return {"Prediction":{"num_of_gameservers": sine}}
|
"""
Algorithm:
Also called as Partition Exchange sort.
Developed by <NAME> in 1959 and published in 1961
When implemented well it can be about 2 or 3 times faster than the its main competitor merge sort and heap sort.
Quick Sort:
1. Comparison Sort
2. In_place sort (it may required more memory)
3. Unstable sort
4. Recursive Algorithm
Divide and Conquer Algorithm:
1. Divide
2. Conquer
3. Combine
For ascending order:
<Smaller Value> <Pivot> <Bigger value>
For descending order:
<Bigger Value> <Pivot> <Smaller Value>
Pivot Element:
1. First element (In case of sorted list we should avoid this because if we consider this then left/right side there won't be any value(s).)
2. Last element
3. Random element
4. Median of three values (first, middle, last)
1. left <= right
2. a[left] <= pivot
3. a[right] >= pivot
Algorithm to solve :
1. Select the pivot element
2. Find out the correct position of the pivot element in the list by rearranging it.
3. Divide the list based on pivot element.
4. Sort the sublist recursively.
Solution flow:
We will use 3 functions
1. To find the pivot
2. Divide the list to sublist and here we'll use recursive method
3. Main method to take i/p
index 0 1 2 3 4 5
list 56 26 93 17 31 44
(pivot) (left) (right)
"""
# taking first element as pivot
def pivot_pos_first(list1, first, last):
pivot = list1[first]
left = first + 1
right = last
while True:
while left <= right and list1[left] <= pivot: # for descending list1[left] >= pivot
left += 1
while left <= right and list1[right] >= pivot: # for descending list1[right] >= pivot
right -= 1
if right < left:
# list1[first], list1[right] = list1[right], list1[first]
break
else: # when list1[left] <= pivot and list1[right] >= pivot got false
list1[left], list1[right] = list1[right], list1[left]
list1[first], list1[right] = list1[right], list1[first]
return right
def quick_sort_first(list1, first, last):
if first < last:
p = pivot_pos_first(list1, first, last)
quick_sort_first(list1, first, p-1) # divide the list (left)
quick_sort_first(list1, p+1, last) # divide the list (right)
if __name__ == "__main__":
list1 = [56, 26, 93, 17, 31, 44]
quick_sort_first(list1, 0, len(list1) - 1)
print(list1)
# taking last element as pivot
def pivot_pos_last(list1, first, last):
pivot = list1[last]
left = first
right = last - 1
while True:
while left <= right and list1[left] <= pivot: # for descending list1[left] >= pivot
left += 1
while left <= right and list1[right] >= pivot: # for descending list1[right] >= pivot
right -= 1
if right < left:
# list1[first], list1[right] = list1[right], list1[first]
break
else: # when list1[left] <= pivot and list1[right] >= pivot got false
list1[left], list1[right] = list1[right], list1[left]
list1[last], list1[left] = list1[left], list1[last]
return left
def quick_sort_last(list1, first, last):
if first < last:
p = pivot_pos_last(list1, first, last)
quick_sort_last(list1, first, p-1) # divide the list (left)
quick_sort_last(list1, p+1, last) # divide the list (right)
if __name__ == "__main__":
list1 = [56, 26, 93, 17, 31, 44]
quick_sort_last(list1, 0, len(list1) - 1)
print(list1)
# taking random element as pivot
import random
def pivot_pos_random(list1, first, last):
rindex = random.randint(first, last)
list1[rindex], list1[last] = list1[last], list1[rindex]
pivot = list1[last]
left = first
right = last - 1
while True:
while left <= right and list1[left] <= pivot: # for descending list1[left] >= pivot
left += 1
while left <= right and list1[right] >= pivot: # for descending list1[right] >= pivot
right -= 1
if right < left:
# list1[first], list1[right] = list1[right], list1[first]
break
else: # when list1[left] <= pivot and list1[right] >= pivot got false
list1[left], list1[right] = list1[right], list1[left]
list1[last], list1[left] = list1[left], list1[last]
return left
def quick_sort_random(list1, first, last):
if first < last:
p = pivot_pos_random(list1, first, last)
quick_sort_random(list1, first, p-1) # divide the list (left)
quick_sort_random(list1, p+1, last) # divide the list (right)
if __name__ == "__main__":
list1 = [0, 56, 26, 93, 17, 31, 31, 44, 0]
quick_sort_random(list1, 0, len(list1) - 1)
print(list1)
# taking median element as pivot
import statistics
def pivot_pos_median(list1, first, last):
low = list1[first]
high = list1[last]
middle = (first+last)//2
pivot_value = statistics.median([low, list1[middle], high])
if pivot_value == low:
pindex = first
elif pivot_value == high:
pindex = last
else:
pindex = middle
list1[pindex], list1[last] = list1[last], list1[pindex]
pivot = list1[last]
left = first
right = last - 1
while True:
while left <= right and list1[left] <= pivot: # for descending list1[left] >= pivot
left += 1
while left <= right and list1[right] >= pivot: # for descending list1[right] >= pivot
right -= 1
if right < left:
# list1[first], list1[right] = list1[right], list1[first]
break
else: # when list1[left] <= pivot and list1[right] >= pivot got false
list1[left], list1[right] = list1[right], list1[left]
list1[last], list1[left] = list1[left], list1[last]
return left
def quick_sort_median(list1, first, last):
if first < last:
p = pivot_pos_median(list1, first, last)
quick_sort_median(list1, first, p-1) # divide the list (left)
quick_sort_median(list1, p+1, last) # divide the list (right)
if __name__ == "__main__":
list1 = [0, 56, 26, 93, 17, 31, 31, 44, 0]
quick_sort_median(list1, 0, len(list1) - 1)
print(list1) |
<filename>massl_pretrain.py
# 2022 Tran_Nhiem, <NAME> (SSL Team)
# Pre-training Partially Inherence from 2021 solo-learn team development.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
import os
from pprint import pprint
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorch_lightning.loggers import WandbLogger
from MA_SSRL.args.setup import parse_args_pretrain
from MA_SSRL.methods import METHODS
from MA_SSRL.utils.auto_resumer import AutoResumer
try:
from MA_SSRL.methods.dali import PretrainABC
except ImportError as e:
print(e)
_dali_avaliable = False
else:
_dali_avaliable = True
try:
from MA_SSRL.utils.auto_umap import AutoUMAP
except ImportError:
_umap_available = False
else:
_umap_available = True
import types
from MA_SSRL.utils.checkpointer import Checkpointer
from MA_SSRL.utils.classification_dataloader import prepare_data as prepare_data_classification
from MA_SSRL.utils.pretrain_dataloader import (
prepare_dataloader,
prepare_datasets,
prepare_n_crop_transform_v1,
prepare_transform,
)
def main():
seed_everything(5)
args = parse_args_pretrain()
assert args.method in METHODS, f"Choose from {METHODS.keys()}"
#args.method=="massl"
print(args.num_large_crops)
# if args.num_large_crops != 2:
# ## Adding for multi-Views
# assert args.method in METHODS#=="wmse"
MethodClass = METHODS[args.method]
if args.dali:
assert (
_dali_avaliable
), "Dali is not currently avaiable, please install it first with [dali]."
MethodClass = types.new_class(f"Dali{MethodClass.__name__}", (PretrainABC, MethodClass))
model = MethodClass(**args.__dict__)
# pretrain dataloader
# note that, to support plugin, i modify the prepare_transform func (i didn't unpack the dict!!)
if not args.dali:
# asymmetric augmentations
if args.unique_augs > 1: # note : --brightness 0.4 0.4 0.4 0.4 \ # 4 params to bypass inner chk mechnaism in sh file
# pluggin proposed multiple-DA
if args.dataset == "mulda" or "mulda_v1":
transform = prepare_transform(args.dataset, args.transform_kwargs, args.mulda_kwargs)
else: # normal case, this way plz ~ ~
transform = [
prepare_transform(args.dataset, kwargs) for kwargs in args.transform_kwargs
]
else:
transform = [prepare_transform(args.dataset, args.transform_kwargs)]
## My Goal is Get X--> Crop it --> Two crop --> Transform
transform = prepare_n_crop_transform_v1(transform, num_crops_per_aug=args.num_crops_per_aug)
if args.debug_augmentations:
print("Transforms:")
pprint(transform)
train_dataset = prepare_datasets(
args.dataset,
transform,
data_dir=args.data_dir,
train_dir=args.train_dir,
no_labels=args.no_labels,
)
train_loader = prepare_dataloader(
train_dataset, batch_size=args.batch_size, num_workers=args.num_workers
)
# normal dataloader for when it is available
if args.dataset == "custom" and (args.no_labels or args.val_dir is None):
val_loader = None
## pluggin proposed multiple-DA
# i'm not sure about the below line, but i also add our ds into it!!
elif args.dataset in ["imagenet100", "imagenet", "mulda", "mulda_v1"] and args.val_dir is None:
val_loader = None
else:
_, val_loader = prepare_data_classification(
args.dataset,
data_dir=args.data_dir,
train_dir=args.train_dir,
val_dir=args.val_dir,
batch_size=args.batch_size,
num_workers=args.num_workers,
)
callbacks = []
# wandb logging
if args.wandb:
wandb_logger = WandbLogger(
name=args.name,
project=args.project,
entity=args.entity,
offline=args.offline,
)
wandb_logger.watch(model, log="gradients", log_freq=100)
wandb_logger.log_hyperparams(args)
# lr logging
lr_monitor = LearningRateMonitor(logging_interval="epoch")
callbacks.append(lr_monitor)
if args.save_checkpoint:
# save checkpoint on last epoch only
ckpt = Checkpointer(
args,
logdir=os.path.join(args.checkpoint_dir, args.method),
frequency=args.checkpoint_frequency,
)
callbacks.append(ckpt)
if args.auto_umap:
assert (
_umap_available
), "UMAP is not currently avaiable, please install it first with [umap]."
auto_umap = AutoUMAP(
args,
logdir=os.path.join(args.auto_umap_dir, args.method),
frequency=args.auto_umap_frequency,
)
callbacks.append(auto_umap)
# 1.7 will deprecate resume_from_checkpoint, but for the moment
# the argument is the same, but we need to pass it as ckpt_path to trainer.fit
ckpt_path = None
if args.auto_resume and args.resume_from_checkpoint is None:
auto_resumer = AutoResumer(
checkpoint_dir=os.path.join(args.checkpoint_dir, args.method),
max_hours=args.auto_resumer_max_hours,
)
resume_from_checkpoint = auto_resumer.find_checkpoint(args)
if resume_from_checkpoint is not None:
print(
"Resuming from previous checkpoint that matches specifications:",
f"'{resume_from_checkpoint}'",
)
ckpt_path = resume_from_checkpoint
elif args.resume_from_checkpoint is not None:
ckpt_path = args.resume_from_checkpoint
del args.resume_from_checkpoint
trainer = Trainer.from_argparse_args(
args,
logger=wandb_logger if args.wandb else None,
callbacks=callbacks,
enable_checkpointing=False,
)
print("\n\nI'm in here \n\n")
# it's very not good... the issue occurs in train_loader, i'm not sure which da-method cause the img have invalid size
# while i will go deep into each trfs 'Composer'
# for x1, x2, x3, x4 in train_loader:
# #print(im.shape)
# # unpack
# #x1, x2, x3, x4 = im
# print(x1.shape)
# break
if args.dali:
trainer.fit(model, val_dataloaders=val_loader, ckpt_path=ckpt_path)
else:
trainer.fit(model, train_loader, val_loader, ckpt_path=ckpt_path)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# --------------------------------------------------------
# 3D object detection train file
#
# -------------------------------------------------------
import pdb, traceback
import argparse
import math
import h5py
import numpy as np
import tensorflow as tf
import socket
import time
import os
import sys
import datetime
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(ROOT_DIR)
sys.path.append(os.path.join(ROOT_DIR,'utils'))
sys.path.append(os.path.join(ROOT_DIR,'utils_xyz'))
sys.path.append(os.path.join(ROOT_DIR,'models'))
sys.path.append(os.path.join(ROOT_DIR,'config'))
from pointnet2_obj_detection_tf4 import placeholder_inputs,get_model,get_loss
#import provider
import get_dataset
from evaluation import EvaluationMetrics
from kitti_data_net_provider import kitti_data_net_provider #Normed_H5f,Net_Provider
from config import cfg
import multiprocessing as mp
from bbox_transform import bbox_transform_inv
from nms_3d import nms_3d
from evaluation_3d import evaluation_3d
ISDEBUG = False
ISSUMMARY = True
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_name', default='rawh5_kitti', help='rawh5_kitti')
#parser.add_argument('--all_fn_globs', type=str,default='stride_1_step_2_8192_normed/',\
# help='The file name glob for both training and evaluation')
parser.add_argument('--feed_elements', default='xyz_raw', help='xyz_1norm,xyz_midnorm,color_1norm')
parser.add_argument('--batch_size', type=int, default= 32, help='Batch Size during training [default: 24]')
parser.add_argument('--eval_fnglob_or_rate', default='train', help='file name str glob or file number rate: scan1*.nh5 0.2')
parser.add_argument('--num_point', type=int, default=2**15, help='Point number [default: 2**15]')
parser.add_argument('--max_epoch', type=int, default=50, help='Epoch to run [default: 50]')
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
parser.add_argument('--learning_rate', type=float, default=0.01, help='Initial learning rate [default: 0.01]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=300000, help='Decay step for lr decay [default: 300000]')
parser.add_argument('--decay_rate', type=float, default=0.5, help='Decay rate for lr decay [default: 0.5]')
parser.add_argument('--max_test_file_num', type=int, default=None, help='Which area to use for test, option: 1-6 [default: 6]')
parser.add_argument('--only_evaluate',action='store_true',help='do not train')
parser.add_argument('--finetune',action='store_true',help='do not train')
parser.add_argument('--model_epoch', type=int, default=10, help='the epoch of model to be restored')
parser.add_argument('--auto_break',action='store_true',help='If true, auto break when error occurs')
FLAGS = parser.parse_args()
DATASET_NAME = FLAGS.dataset_name
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
BASE_LEARNING_RATE = FLAGS.learning_rate
GPU_INDEX = FLAGS.gpu
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
try:
FLAGS.eval_fnglob_or_rate = float(FLAGS.eval_fnglob_or_rate)
log_eval_fn_glob = ''
print('FLAGS.eval_fnglob_or_rate is eval file number rate')
except:
log_eval_fn_glob = FLAGS.eval_fnglob_or_rate.split('*')[0]
print('FLAGS.eval_fnglob_or_rate is eval name glob. log_eval_fn_glob:%s'%(log_eval_fn_glob))
date = datetime.datetime.now().date()
if FLAGS.only_evaluate:
MAX_EPOCH = 1
log_name = 'log_Test.txt'
else:
MAX_EPOCH = FLAGS.max_epoch
log_name = 'log_Train.txt'
FLAGS.log_dir = FLAGS.log_dir+'-B'+str(BATCH_SIZE)+'-'+\
FLAGS.feed_elements+'-'+str(NUM_POINT)+'-'+FLAGS.dataset_name+'-eval_'+log_eval_fn_glob+str(date)
FLAGS.feed_elements = FLAGS.feed_elements.split(',')
LOG_DIR = os.path.join(ROOT_DIR,'train_res/object_detection_result/'+FLAGS.log_dir)
MODEL_PATH = os.path.join(LOG_DIR,'model.ckpt-'+str(FLAGS.model_epoch))
LOG_DIR_FUSION = os.path.join(ROOT_DIR,'train_res/object_detection_result/accuracy_log.txt')
if not os.path.exists(LOG_DIR): os.makedirs(LOG_DIR)
os.system('cp %s/models/pointnet2_obj_detection_tf4.py %s' % (ROOT_DIR,LOG_DIR)) # bkp of model def
os.system('cp %s/config/config.py %s' % (ROOT_DIR,LOG_DIR))
os.system('cp %s/train_obj_detection.py %s' % (BASE_DIR,LOG_DIR)) # bkp of train procedure
acc_name = 'accuracy.txt'
if FLAGS.finetune:
LOG_FOUT = open(os.path.join(LOG_DIR, log_name), 'a')
LOG_FOUT_FUSION = open(os.path.join(LOG_DIR, acc_name), 'a')
else:
LOG_FOUT = open(os.path.join(LOG_DIR, log_name), 'w')
LOG_FOUT_FUSION = open(os.path.join(LOG_DIR, acc_name), 'w')
LOG_FOUT_FUSION.write(str(FLAGS)+'\n\n')
LOG_FOUT.write(str(FLAGS)+'\n\n')
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
#BN_DECAY_DECAY_STEP = float(DECAY_STEP * 2)
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
HOSTNAME = socket.gethostname()
# Load Data
# FLAGS.all_fn_globs = FLAGS.all_fn_globs.split(',')
#net_provider = Net_Provider(dataset_name=FLAGS.dataset_name, \
# all_filename_glob=FLAGS.all_fn_globs, \
# eval_fnglob_or_rate=FLAGS.eval_fnglob_or_rate,\
# only_evaluate = FLAGS.only_evaluate,\
# num_point_block = NUM_POINT,
# feed_elements=FLAGS.feed_elements)
data_provider = kitti_data_net_provider(DATASET_NAME,BATCH_SIZE)
NUM_CHANNELS = cfg.TRAIN.NUM_CHANNELS # x, y, z
NUM_CLASSES = cfg.TRAIN.NUM_CLASSES # bg, fg
NUM_REGRESSION = cfg.TRAIN.NUM_REGRESSION
START_TIME = time.time()
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
DECAY_STEP, # Decay step.
DECAY_RATE, # Decay rate.
staircase=True)
learing_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch*BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def train_eval(train_feed_buf_q,eval_feed_buf_q):
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl, labels_pl, smpws_pl = placeholder_inputs(BATCH_SIZE, NUM_POINT,NUM_CHANNELS, NUM_REGRESSION)
is_training_pl = tf.placeholder(tf.bool, shape=())
# Note the global_step=batch parameter to minimize.
# That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
batch = tf.Variable(0)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
# Get model and loss
end_points, pred_class, pred_box, xyz_pl = get_model(pointclouds_pl, is_training_pl, NUM_CLASSES, bn_decay=bn_decay)
loss, classification_loss, regression_loss, loss_details, pred_prob = get_loss(BATCH_SIZE,pred_class, pred_box, labels_pl,smpws_pl, xyz_pl)
tf.summary.scalar('loss', loss)
#tf.summary.scalar('loss_details',loss_details)
#correct = tf.equal(tf.argmax(pred, 2), tf.to_int64(labels_pl))
#accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE*NUM_POINT)
#tf.summary.scalar('accuracy', accuracy)
# Get training operator
learning_rate = get_learning_rate(batch)
tf.summary.scalar('learning_rate', learning_rate)
if OPTIMIZER == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=batch)
# Add ops to save and restore all the variables.
saver = tf.train.Saver(max_to_keep=50)
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Add summary writers
merged = tf.summary.merge_all()
if not FLAGS.only_evaluate:
train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
sess.graph)
test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'))
else:
test_writer = None
# Init variables
init = tf.global_variables_initializer()
sess.run(init, {is_training_pl:True})
# define operations
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'pred_class': pred_class,
'pred_box': pred_box,
'xyz_pl': xyz_pl,
'loss': loss,
'classification_loss':classification_loss,
'regression_loss':regression_loss,
'loss_details':loss_details,
'pred_prob':pred_prob,
'train_op': train_op,
'merged': merged,
'step': batch,
'smpws_pl': smpws_pl}
if FLAGS.finetune:
saver.restore(sess,MODEL_PATH)
log_string('finetune, restored model from: \n\t%s'%MODEL_PATH)
log_string(data_provider.data_summary_str)
if ISDEBUG:
builder = tf.profiler.ProfileOptionBuilder
opts = builder(builder.time_and_memory()).order_by('micros').build()
pctx = tf.contrib.tfprof.ProfileContext('/tmp/train_dir',
trace_steps=[],
dump_steps=[])
else:
opts = None
pctx = None
epoch_start = 0
if FLAGS.finetune:
epoch_start+=(FLAGS.model_epoch+1)
for epoch in range(epoch_start,epoch_start+MAX_EPOCH):
log_string('**** EPOCH %03d ****' % (epoch))
sys.stdout.flush()
#if train_feed_buf_q == None:
# net_provider.update_train_eval_shuffled_idx()
if not FLAGS.only_evaluate:
train_log_str = train_one_epoch(sess, ops, train_writer,epoch,train_feed_buf_q,pctx,opts)
else:
train_log_str = ''
saver.restore(sess,MODEL_PATH)
log_string('only evaluate, restored model from: \n\t%s'%MODEL_PATH)
log_string('training is finished \n')
eval_log_str = eval_one_epoch(sess, ops, test_writer,epoch,eval_feed_buf_q)
# Save the variables to disk.
if not FLAGS.only_evaluate:
if (epoch > 0 and epoch % 1 == 0) or epoch == MAX_EPOCH-1:
save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"),global_step=epoch)
log_string("Model saved in file: %s" % os.path.basename(save_path))
# if epoch == MAX_EPOCH -1:
LOG_FOUT_FUSION.write('batch_id:'+str(epoch)+', accuracy:'+str(eval_log_str)+'\n'+'\n\n' )
LOG_FOUT_FUSION.flush()
log_string('Accuracy is : %0.3f' % (eval_log_str))
def add_log(tot,epoch,batch_idx,loss_batch,c_TP_FN_FP,total_seen,t_batch_ls,SimpleFlag = 0):
ave_whole_acc,class_acc_str,ave_acc_str = EvaluationMetrics.get_class_accuracy(
c_TP_FN_FP,total_seen)
log_str = ''
if len(t_batch_ls)>0:
t_per_batch = np.mean(np.concatenate(t_batch_ls,axis=1),axis=1)
t_per_block = t_per_batch / BATCH_SIZE
t_per_block_str = np.array2string(t_per_block*1000,formatter={'float_kind':lambda x: "%0.1f"%x})
else:
t_per_block_str = "no-t"
log_str += '%s [%d - %d] \t t_block(d,c):%s\tloss: %0.3f \tacc: %0.3f' % \
( tot,epoch,batch_idx,t_per_block_str,loss_batch,ave_whole_acc )
if SimpleFlag >0:
log_str += ave_acc_str
if SimpleFlag >1:
log_str += class_acc_str
log_string(log_str)
return log_str
def train_one_epoch(sess, ops, train_writer,epoch,train_feed_buf_q,pctx,opts):
""" ops: dict mapping from string to tf ops """
is_training = True
#log_string('----')
num_blocks = data_provider.num_train_data
if num_blocks!=None:
num_batches = num_blocks // BATCH_SIZE
if num_batches ==0: return ''
else:
num_batches = None
total_seen = 0.0001
loss_sum = 0.0
c_TP_FN_FP = np.zeros(shape=(3,NUM_CLASSES))
print('total batch num = ',num_batches)
batch_idx = -1
t_batch_ls=[]
train_logstr = ''
while (batch_idx < num_batches-1) or (num_batches==None):
t0 = time.time()
batch_idx += 1
#start_idx = batch_idx * BATCH_SIZE
#end_idx = (batch_idx+1) * BATCH_SIZE
poinr_cloud_data = []
label_data = []
if train_feed_buf_q == None:
point_cloud_data, label_data = data_provider._get_next_minibatch() #cur_data,cur_label,cur_smp_weights = net_provider.get_train_batch(start_idx,end_idx)
else:
if train_feed_buf_q.qsize() == 0:
print('train_feed_buf_q.qsize == 0')
break
#cur_data,cur_label,cur_smp_weights, batch_idx_buf,epoch_buf = train_feed_buf_q.get()
point_cloud_data, label_data = train_feed_buf_q.get()
cur_smp_weights = np.ones((point_cloud_data.shape[0], point_cloud_data.shape[1]))
t1 = time.time()
if type(point_cloud_data) == type(None):
break # all data reading finished
feed_dict = {ops['pointclouds_pl']: point_cloud_data,
ops['labels_pl']: label_data,
ops['is_training_pl']: is_training,
ops['smpws_pl']: cur_smp_weights}
if ISDEBUG and epoch == 0 and batch_idx ==5:
pctx.trace_next_step()
pctx.dump_next_step()
summary, step, _, loss_val, pred_class_val, classification_loss_val, regression_loss_val, loss_details_val = sess.run([ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred_class'], ops['classification_loss'], ops['regression_loss'], ops['loss_details']],
feed_dict=feed_dict)
pctx.profiler.profile_operations(options=opts)
else:
summary, step, _, loss_val, pred_class_val, classification_loss_val, regression_loss_val, loss_details_val = sess.run([ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred_class'], ops['classification_loss'], ops['regression_loss'], ops['loss_details']],
feed_dict=feed_dict)
t_batch_ls.append( np.reshape(np.array([t1-t0,time.time() - t1]),(2,1)) )
if ISSUMMARY: train_writer.add_summary(summary, step)
if batch_idx%100 == 0:
print('the training batch is {}, the loss value is {}'.format(batch_idx, loss_val))
print('the classificaiton loss is {}, the regression loss is {}'.format(classification_loss_val, regression_loss_val))
print('the details of loss value, dx:{},dy:{},dz:{},dl:{},dw:{},dh:{},dtheta:{}'.format(\
loss_details_val[0], loss_details_val[1], loss_details_val[2], loss_details_val[3], loss_details_val[4], loss_details_val[5], loss_details_val[6]))
#print('the all merged is {}'.format(summary))
#log_string('Accuracy is : %0.3f' % (eval_log_str))
log_string('the training batch is {}, the loss value is {}'.format(batch_idx, loss_val))
log_string('the classificaiton loss is {}, the regression loss is {}'.format(classification_loss_val, regression_loss_val))
log_string('the details of loss value, dx:{},dy:{},dz:{},dl:{},dw:{},dh:{},dtheta:{}'.format(\
loss_details_val[0], loss_details_val[1], loss_details_val[2], loss_details_val[3], loss_details_val[4], loss_details_val[5], loss_details_val[6]))
if False and ( batch_idx == num_batches-1 or (epoch == 0 and batch_idx % 20 ==0) or batch_idx%200==0) : ## not evaluation in one epoch
pred_class_val = np.argmax(pred_class_val, 2)
loss_sum += loss_val
total_seen += (BATCH_SIZE*NUM_POINT)
c_TP_FN_FP += EvaluationMetrics.get_TP_FN_FP(NUM_CLASSES,pred_class_val,cur_label)
train_logstr = add_log('train',epoch,batch_idx,loss_sum/(batch_idx+1),c_TP_FN_FP,total_seen,t_batch_ls)
if batch_idx == 200:
os.system('nvidia-smi')
return train_logstr
def limit_eval_num_batches(epoch,num_batches):
if epoch%5 != 0:
num_batches = min(num_batches,num_batches)
#num_batches = min(num_batches,31)
return num_batches
def eval_one_epoch(sess, ops, test_writer, epoch,eval_feed_buf_q):
""" ops: dict mapping from string to tf ops """
is_training = False
total_seen = 0.00001
loss_sum = 0.0
c_TP_FN_FP = np.zeros(shape=(3,NUM_CLASSES))
log_string('----')
num_blocks = data_provider.evaluation_num # evaluation some of data
if num_blocks != None:
num_batches = num_blocks // BATCH_SIZE
num_batches = limit_eval_num_batches(epoch,num_batches)
if num_batches == 0:
print('\ntest num_blocks=%d BATCH_SIZE=%d num_batches=%d'%(num_blocks,BATCH_SIZE,num_batches))
return ''
else:
num_batches = None
eval_logstr = ''
t_batch_ls = []
all_gt_box = []
all_pred_class_val = []
all_pred_box_val = []
all_xyz = []
batch_idx = -1
# label
while (batch_idx < num_batches-1) or (num_batches==None):
t0 = time.time()
batch_idx += 1
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
if eval_feed_buf_q == None:
point_cloud_data, label_data, gt_box = data_provider._get_evaluation_minibatch(start_idx, end_idx) #cur_data,cur_label,cur_smp_weights = net_provider.get_eval_batch(start_idx,end_idx)
else:
if eval_feed_buf_q.qsize() == 0:
print('eval_feed_buf_q.qsize == 0')
break
point_cloud_data, label_data, epoch_buf = eval_feed_buf_q.get()
#assert batch_idx == batch_idx_buf and epoch== epoch_buf
cur_smp_weights = np.ones((point_cloud_data.shape[0], point_cloud_data.shape[1]))
t1 = time.time()
if type(point_cloud_data) == type(None):
print('batch_idx:%d, get None, reading finished'%(batch_idx))
break # all data reading finished
feed_dict = {ops['pointclouds_pl']: point_cloud_data,
ops['labels_pl']: label_data,
ops['is_training_pl']: is_training,
ops['smpws_pl']: cur_smp_weights }
summary, step, loss_val, pred_class_val, pred_prob_val, pred_box_val, xyz_pl, classification_loss_val, regression_loss_val, loss_details_val = sess.run([ops['merged'], ops['step'], ops['loss'], ops['pred_class'], ops['pred_prob'], ops['pred_box'], ops['xyz_pl'], ops['classification_loss'], ops['regression_loss'], ops['loss_details']],
feed_dict=feed_dict)
if ISSUMMARY and test_writer != None:
test_writer.add_summary(summary, step)
t_batch_ls.append( np.reshape(np.array([t1-t0,time.time() - t1]),(2,1)) )
all_gt_box.append(gt_box) # all_gt_box is a list, num_batches x BATCH_SIZE x ( k*8 ), all_gt_box[n][m] is the ground truth box of one label image.
all_pred_class_val.append(pred_prob_val) # the all_pred_class_val is the list, num_batches x BATCH_SIZE x point_num x 4, all_pred_val[n] is the narray of BATCH_SIZE x point_num
all_pred_box_val.append(pred_box_val) # the all_pred_box_val is also list, num_batches x BATCH_SIZE x point_num x 14, all_pred_box_val[n] is the narray of BATCH_SIZE x point_num
all_xyz.append(xyz_pl) # the all_xyz shape: num_batches x (BATCHSIZE X point_num x3)
if False and (batch_idx == num_batches-1 or (FLAGS.only_evaluate and batch_idx%30==0)):
pred_logits = np.argmax(pred_prob_val, 2)
total_seen += (BATCH_SIZE*NUM_POINT)
loss_sum += loss_val
c_TP_FN_FP += EvaluationMetrics.get_TP_FN_FP(NUM_CLASSES,pred_logits,cur_label)
#net_provider.set_pred_label_batch(pred_prob_val,start_idx,end_idx)
eval_logstr = add_log('eval',epoch,batch_idx,loss_sum/(batch_idx+1),c_TP_FN_FP,total_seen,t_batch_ls)
if batch_idx%40 == 0:
print('the test batch is {}, the loss value is {}'.format(batch_idx, loss_val))
print('the classificaiton loss is {}, the regression loss is {}'.format(classification_loss_val, regression_loss_val))
print('the details of loss value, dx:{},dy:{},dz:{},dl:{},dw:{},dh:{},dtheta:{}'.format(\
loss_details_val[0], loss_details_val[1], loss_details_val[2], loss_details_val[3], loss_details_val[4], loss_details_val[5], loss_details_val[6]))
## estimate the all detection results
# format of all_pred_boxes: l, w, h, theta, x, y, z, score
# format of gt_boxes: type, l, w, h, theta, x, y, z
# put assemble all_pred_class_val and all_pred_box_val together accroding to
# the format of all_pred_boxes
# using 0.05 to select the all prediction, getting all_3D_box
# using nms_3d to filter out the all bounding box
print('---------------------------')
print('Start evaluation!!')
all_pred_boxes, all_gt_boxes = boxes_assemble_filter(all_pred_class_val, all_pred_box_val, all_xyz, all_gt_box, 0.05)
# caculate the average precision with the detection results
aveg_precision = evaluation_3d(all_pred_boxes, all_gt_boxes, cfg.TEST.RPN_NMS_THRESH)
# delete the all_gt_box, all_pred_class_val and all_pred_box_val to save
# memory
print('The average precision is {}'.format(aveg_precision))
#if FLAGS.only_evaluate:
# obj_dump_dir = os.path.join(FLAGS.log_dir,'obj_dump')
# net_provider.gen_gt_pred_objs(FLAGS.visu,obj_dump_dir)
# net_provider.write_file_accuracies(FLAGS.log_dir)
# print('\nobj out path:'+obj_dump_dir)
return aveg_precision
def boxes_assemble_filter(all_pred_class_val, all_pred_box_val, all_xyz, all_gt_box , thresh = 0.05):
#all_pred_boxes = np.zeros([1,8]) #l, w, h, theta, x, y, z, score
all_pred_boxes = [] # saved in list
num_batch = len(all_pred_class_val)
batch_size = all_pred_class_val[0].shape[0]
gt_box_ = []
num_anchors = cfg.TRAIN.NUM_ANCHORS
num_class = cfg.TRAIN.NUM_CLASSES
num_regression = cfg.TRAIN.NUM_REGRESSION
# generate, (num_samples x num_point) x 8
for i in range(num_batch):
for j in range(batch_size):
index = i*batch_size + j
temp_pred_class = np.array([all_pred_class_val[i][j,:,(x*num_class+1):((x+1)*num_class)] for x in range(num_anchors)]).transpose(1, 0, 2) ##shape: 512 x num_anchors x 1
temp_pred_class = temp_pred_class.reshape(-1, 1) # shape: n x 1
'''
# l, w, h, alpha, x, y ,z
temp_pred_box_l = np.array([ np.exp(all_pred_box_val[i][j,:,(x*num_regression)])*anchor_length for x in range(num_anchors)])
temp_pred_box_l = temp_pred_box_l.reshape(-1,1)
temp_pred_box_w = np.array([ np. exp(all_pred_box_val[i][j,:,(x*num_regression+1)])*anchor_width for x in range(num_anchors)])
temp_pred_box_w = temp_pred_box_w.reshape(-1,1)
temp_pred_box_h = np.array([ np.exp(all_pred_box_val[i][j,:,(x*num_regression+2)])*anchor_height for x in range(num_anchors)])
temp_pred_box_h = temp_pred_box_h.reshape(-1,1)
temp_pred_box_alpha = np.array([ all_pred_box_val[i][j,:,(x*num_regression+3)]*np.pi/4+anchor_alpha[x,0] for x in range(num_anchors)])
temp_pred_box_alpha = temp_pred_box_alpha.reshape(-1,1)
temp_pred_box_x = np.array([ all_pred_box_val[i][j,:,(x*num_regression+4)]*anchor_length + all_xyz[i][j,:,0] for x in range(num_anchors) ])
temp_pred_box_x = temp_pred_box_x.reshape(-1,1)
temp_pred_box_y = np.array([ all_pred_box_val[i][j,:,(x*num_regression+5)]*anchor_width + all_xyz[i][j,:,1] for x in range(num_anchors) ])
temp_pred_box_y = temp_pred_box_y.reshape(-1,1)
temp_pred_box_z = np.array([ all_pred_box_val[i][j,:,(x*num_regression+6)]*anchor_height + all_xyz[i][j,:,3] for x in range(num_anchors) ])
temp_pred_box_z = temp_pred_box_z.reshape(-1,1)
'''
# temp_pred_box = np.array([all_pred_box_val[i][j,:,(x*num_regression):((x+1)*num_regression)] for x in range(num_anchors)]).transpose(1,0,2) ## shape: 512 x num_anchors x 7
# temp_pred_box = temp_pred_box.reshape(-1, num_regression) # shape: n x 7
## transform the prediction into real num
temp_all_box = bbox_transform_inv(all_pred_box_val[i][j,:,:], all_xyz[i][j,:,:])
#temp_index = np.full((temp_pred_class.shape[0],1), index) # shape: n x 1
# temp_all_ = np.concatenate((temp_index, temp_pred_box_l, temp_pred_box_w, temp_pred_box_h, temp_pred_box_alpha, temp_pred_box_x, temp_pred_box_y, temp_pred_box_z, temp_pred_class),axis=1) # shape: n x 9
temp_all_ = np.concatenate(( temp_all_box,temp_pred_class), axis=1)
## getting box whose confidence is over thresh
temp_all_ = temp_all_[ np.where( temp_all_[:,7] >= thresh)[0], :] ## temp_all_ shape: n x 8
## useing nms
if temp_all_.shape[0] > 0: ## there is no prediction box whose prediction is over thresh
temp_all_ = nms_3d(temp_all_, cfg.TEST.NMS)
all_pred_boxes.append(temp_all_)
gt_box_.append(all_gt_box[i][j])
# all_pred_boxes = np.delete(all_pred_boxes, 0, 0)
# all_pred_boxes = all_pred_boxes[ np.where( all_pred_boxes[:,8] >= thresh)[0], :]
return all_pred_boxes, gt_box_
def add_train_feed_buf(train_feed_buf_q):
with tf.device('/cpu:0'):
max_buf_size = 20
num_blocks = data_provider.num_train_data #num_blocks = net_provider.train_num_blocks
if num_blocks!=None:
num_batches = num_blocks // BATCH_SIZE
else:
num_batches = None
epoch_start = 0
if FLAGS.finetune:
epoch_start+=(FLAGS.model_epoch+1)
for epoch in range(epoch_start,epoch_start+MAX_EPOCH):
# net_provider.update_train_eval_shuffled_idx()
batch_idx = -1
while (batch_idx < num_batches-1) or (num_batches==None):
if train_feed_buf_q.qsize() < max_buf_size:
batch_idx += 1
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
point_cloud_data, label_data = data_provider._get_next_minibatch() #cur_data,cur_label,cur_smp_weights = net_provider.get_train_batch(start_idx,end_idx)
train_feed_buf_q.put( [cur_data,cur_label,cur_smp_weights, batch_idx,epoch] )
if type(cur_data) == type(None):
print('add_train_feed_buf: get None data from net_provider, all data put finished. epoch= %d, batch_idx= %d'%(epoch,batch_idx))
break # all data reading finished
else:
time.sleep(0.1*BATCH_SIZE*max_buf_size/3)
print('add_train_feed_buf: data reading finished. epoch= %d, batch_idx= %d'%(epoch,batch_idx))
def add_eval_feed_buf(eval_feed_buf_q):
with tf.device('/cpu:1'):
max_buf_size = 20
num_blocks = data_provider.evaluation_num
if num_blocks!=None:
raw_num_batches = num_blocks // BATCH_SIZE
else:
raw_num_batches = None
epoch_start = 0
if FLAGS.finetune:
epoch_start+=(FLAGS.model_epoch+1)
for epoch in range(epoch_start,epoch_start+MAX_EPOCH):
batch_idx = -1
num_batches = limit_eval_num_batches(epoch,raw_num_batches)
while (batch_idx < num_batches-1) or (num_batches==None):
if eval_feed_buf_q.qsize() < max_buf_size:
batch_idx += 1
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
point_cloud_data, label_data = data_provider._get_evaluation_minibatch(start_idx, end_idx) #cur_data,cur_label,cur_smp_weights = net_provider.get_eval_batch(start_idx,end_idx)
eval_feed_buf_q.put( [cur_data,cur_label,cur_smp_weights, batch_idx,epoch] )
if type(cur_data) == type(None):
print('add_eval_feed_buf: get None data from net_provider, all data put finished. epoch= %d, batch_idx= %d'%(epoch,batch_idx))
break # all data reading finished
else:
time.sleep(0.1*BATCH_SIZE*max_buf_size/3)
print('add_eval_feed_buf: data reading finished. epoch= %d, batch_idx= %d'%(epoch,batch_idx))
def main():
IsFeedData_MultiProcessing = False and (not FLAGS.auto_break)
if IsFeedData_MultiProcessing:
train_feed_buf_q = mp.Queue()
eval_feed_buf_q = mp.Queue()
processes = {}
processes[ 'add_train_buf'] = mp.Process(target=add_train_feed_buf,args=(train_feed_buf_q,))
processes[ 'add_eval_buf'] = mp.Process(target=add_eval_feed_buf,args=(eval_feed_buf_q,))
processes[ 'train_eval'] = mp.Process(target=train_eval,args=(train_feed_buf_q,eval_feed_buf_q,))
for p in processes:
processes[p].start()
for p in processes:
processes[p].join()
else:
train_eval(None,None)
if __name__ == "__main__":
if FLAGS.auto_break:
try:
main()
LOG_FOUT.close()
except:
type, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
else:
main()
#train_eval(None,None)
LOG_FOUT.close()
LOG_FOUT_FUSION.close()
|
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.python.ops import array_ops
from artifice.log import logger # noqa
from artifice import utils
from artifice import sparse
from artifice import conv_utils
from artifice import img, vis
NEG_INF = np.finfo(np.float32).min
class PeakDetection(keras.layers.Layer):
"""Finds local maxima in each channel of the image.
Does this pretty crudely by comparing each pixel with all of those <= 2 units
away. That is, (i,j) is a local max if inputs[i,j] is greater than the pixels
marked x shown below:
|---|---|---|---|---|---|---|
| | | | | | | |
|---|---|---|---|---|---|---|
| | | | x | | | | 0
|---|---|---|---|---|---|---|
| | | x | x | x | | | 1
|---|---|---|---|---|---|---|
| | x | x |i,j| x | x | | 2
|---|---|---|---|---|---|---|
| | | x | x | x | | | 3
|---|---|---|---|---|---|---|
| | | | x | | | | 4
|---|---|---|---|---|---|---|
| | | | | | | | 5
|---|---|---|---|---|---|---|
0 1 2 3 4 5
We consider pixels near the edge to be local maxima if they satisfy the
above, assuming marked positions outside the image domain are at -inf.
In cases of ties, both points are returned.
Notes:
* Loop indices work out like so:
0 => 2, 2+1
1 => 1, 3+1
2 => 0, 4+1
3 => 1, 3+1
4 => 2, 2+1
"""
def __init__(self, threshold_abs=None, **kwargs):
self.threshold_abs = threshold_abs
super().__init__(**kwargs)
def compute_output_shape(self, input_shape):
"""(batch_size, num_channels, num_peaks, 2)"""
return (None, len(input_shape))
def call(self, inputs):
"""FIXME! briefly describe function
Use tf.image.image_gradients to get dx, dy for each channel then scan
that image for low/zero spots in both directions.
:param inputs:
:returns:
:rtype:
"""
padded = tf.pad(inputs, [[0, 0], [2, 2], [2, 2], [0, 0]],
constant_values=NEG_INF)
mask = np.ones_like(inputs, dtype=tf.bool)
for di in range(5):
start = abs(di - 2)
stop = -abs(di - 2) + 4
for dj in range(start, stop + 1):
mask = tf.logical_and(
mask,
inputs >= padded[:, di: di + inputs.shape[1],
dj: dj + inputs.shape[2], :])
if self.threshold_abs is not None:
mask = tf.logical_and(mask, inputs > tf.constant(
self.threshold_abs, tf.float32))
return tf.where(mask)
class SparseConv2D(keras.layers.Layer):
"""2D convolution using the sbnet library.
The input to this layer should therefore be a list of tensors `[inputs,
mask]` where `mask` has shape `[N, W, H, 1]`.
In theory, additional performance gain can be achieved by making inputs a
tf.Variable. We have not tested this.
:param filters:
:param kernel_size:
:param batch_size: if provided, allows SparseConv2D to use sparse_scatter_var
(assuming eager execution is not enabled)
:param strides:
:param padding:
:param activation:
:param use_bias:
:param kernel_initializer:
:param bias_initializer:
:param kernel_regularizer:
:param bias_regularizer:
:param activity_regularizer:
:param kernel_constraint:
:param bias_constraint:
:param block_size:
:param tol:
:param avgpool:
:returns:
:rtype:
"""
def __init__(self,
filters,
kernel_size,
batch_size=None, # todo: replace with a use_var option
strides=[1, 1],
padding='valid',
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
block_size=[16, 16],
tol=0.5,
avgpool=False,
**kwargs):
super().__init__(**kwargs)
self.filters = filters
self.kernel_size = utils.listify(kernel_size, 2)
self.batch_size = batch_size
self.use_var = batch_size is not None and not tf.executing_eagerly()
self.strides = utils.listify(strides, 2)
self.padding = padding
self.activation = keras.layers.Activation(activation)
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.kernel_constraint = kernel_constraint
self.bias_constraint = bias_constraint
self.block_size = utils.listify(block_size, 2)
self.output_block_size = [conv_utils.conv_output_length(
self.block_size[i],
self.kernel_size[i],
'valid',
self.strides[i]) for i in [0, 1]]
self.block_offset = [0, 0]
self.output_block_offset = self.block_offset
self.block_stride = self.output_block_size
self.output_block_stride = self.output_block_size
self.tol = tol
self.avgpool = avgpool
if self.padding == 'valid':
pad_size = [0, 0]
else:
pad_h = self.kernel_size[0] // 2
pad_w = (self.kernel_size[1] - 1) // 2
pad_size = [pad_h, pad_w]
self.pad = keras.layers.ZeroPadding2D(pad_size)
def build(self, input_shape):
input_shape, mask_shape = input_shape
self.block_count = [utils.divup(input_shape[1], self.block_stride[0]),
utils.divup(input_shape[2], self.block_stride[1])]
if len(input_shape) != 4:
raise ValueError(f'Inputs should have rank 4. Received input shape: '
f'{input_shape}')
if input_shape[3] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[3])
kernel_shape = self.kernel_size + [input_dim, self.filters]
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
dtype=tf.float32,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
dtype=tf.float32,
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True)
else:
self.bias = None
if self.use_var:
output_shape = list(self.compute_output_shape([input_shape, mask_shape]))
self.outputs = self.add_variable(
name='outputs',
shape=[self.batch_size] + output_shape[1:],
dtype=tf.float32,
initializer='zeros',
trainable=False,
use_resource=False)
def compute_output_shape(self, input_shape):
input_shape, mask_shape = input_shape
shape = conv_utils.conv_output_shape(
input_shape,
self.filters,
self.kernel_size,
self.padding,
self.strides)
return tf.TensorShape(shape)
def call(self, inputs):
inputs, mask = inputs
if self.use_var:
self.outputs.assign(tf.zeros_like(self.outputs))
outputs = self.outputs
else:
output_shape = list(
self.compute_output_shape([inputs.shape, mask.shape]))
batch_size = array_ops.shape(inputs)[0]
outputs = tf.zeros([batch_size] + output_shape[1:], tf.float32)
if self.padding == 'same':
inputs = self.pad(inputs)
mask = self.pad(mask)
indices = sparse.reduce_mask(
mask,
block_count=self.block_count,
bsize=self.block_size,
boffset=self.block_offset,
bstride=self.block_stride,
tol=self.tol,
avgpool=self.avgpool)
blocks = sparse.gather(
inputs,
indices.bin_counts,
indices.active_block_indices,
bsize=self.block_size,
boffset=self.block_offset,
bstride=self.block_stride)
strides = [1, self.strides[0], self.strides[1], 1]
blocks = tf.nn.conv2d(
blocks,
self.kernel,
strides=strides,
padding='VALID')
if self.use_bias:
blocks = tf.nn.bias_add(blocks, self.bias, data_format='NHWC')
if self.activation is not None:
blocks = self.activation(blocks)
outputs = sparse.scatter(
blocks,
indices.bin_counts,
indices.active_block_indices,
outputs,
bsize=self.output_block_size,
boffset=self.output_block_offset,
bstride=self.output_block_stride,
use_var=self.use_var)
if self.use_var:
outputs.set_shape([None] + outputs.shape.as_list()[1:])
return outputs
class SparseConv2DTranspose(keras.layers.Layer):
"""2D transpose convolution using the sbnet library.
:param filters:
:param kernel_size:
:param batch_size: needed to allocate space for outputs, if using a variable
:param strides:
:param padding:
:param data_format:
:param dilation_rate:
:param activation:
:param use_bias:
:param kernel_initializer:
:param bias_initializer:
:param kernel_regularizer:
:param bias_regularizer:
:param activity_regularizer:
:param kernel_constraint:
:param bias_constraint:
:param block_size:
:param tol:
:param avgpool:
:returns:
:rtype:
"""
def __init__(self,
filters,
kernel_size,
batch_size,
strides=[1, 1],
padding='valid',
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
block_size=[16, 16],
tol=0.5,
avgpool=False,
**kwargs):
super().__init__(**kwargs)
self.filters = filters
self.kernel_size = utils.listify(kernel_size, 2)
self.batch_size = batch_size
self.use_var = batch_size is not None and not tf.executing_eagerly()
self.strides = utils.listify(strides, 2)
self.padding = padding
self.activation = keras.layers.Activation(activation)
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.kernel_constraint = kernel_constraint
self.bias_constraint = bias_constraint
self.block_size = utils.listify(block_size, 2)
self.output_block_size = [conv_utils.deconv_output_length(
self.block_size[i],
self.kernel_size[i],
'valid',
stride=self.strides[i]) for i in [0, 1]]
self.block_offset = [0, 0]
self.output_block_offset = self.block_offset
self.block_stride = self.block_size
self.output_block_stride = self.output_block_size # might not be correct
self.tol = tol
self.avgpool = avgpool
def build(self, input_shape):
input_shape, mask_shape = input_shape
self.block_count = [utils.divup(input_shape[1], self.block_stride[0]),
utils.divup(input_shape[2], self.block_stride[1])]
if len(input_shape) != 4:
raise ValueError(f'Inputs should have rank 4. Received input shape: '
f'{input_shape}')
if input_shape[3] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[3])
kernel_shape = self.kernel_size + [self.filters, input_dim]
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=tf.float32)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=tf.float32)
else:
self.bias = None
if self.use_var:
output_shape = self.compute_output_shape([input_shape, mask_shape])
self.outputs = self.add_variable(
name='outputs',
shape=[self.batch_size] + list(output_shape)[1:],
dtype=tf.float32,
initializer='zeros',
trainable=False,
use_resource=False)
def compute_output_shape(self, input_shape):
input_shape, mask_shape = input_shape
shape = conv_utils.deconv_output_shape(
input_shape,
self.filters,
self.kernel_size,
self.padding,
self.strides)
return tf.TensorShape(shape)
def call(self, inputs):
inputs, mask = inputs
if self.padding == 'valid':
raise NotImplementedError('valid padding for transpose convolution')
indices = sparse.reduce_mask(
mask,
block_count=self.block_count,
bsize=self.block_size,
boffset=self.block_offset,
bstride=self.block_stride,
tol=self.tol,
avgpool=self.avgpool)
blocks = sparse.gather(
inputs,
indices.bin_counts,
indices.active_block_indices,
bsize=self.block_size,
boffset=self.block_offset,
bstride=self.block_stride)
blocks_shape = array_ops.shape(blocks)
num_blocks = blocks_shape[0]
height, width = blocks_shape[1], blocks_shape[2]
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
out_pad_h = out_pad_w = None # output padding not implemented
# Infer the dynamic output shape:
out_height = conv_utils.deconv_output_length(
height,
kernel_h,
padding='valid',
output_padding=out_pad_h,
stride=stride_h)
out_width = conv_utils.deconv_output_length(
width,
kernel_w,
padding='valid',
output_padding=out_pad_w,
stride=stride_w)
blocks_output_shape = (num_blocks, out_height, out_width, self.filters)
strides = [1, self.strides[0], self.strides[1], 1]
blocks = tf.nn.conv2d_transpose(
blocks,
self.kernel,
blocks_output_shape,
strides=strides,
padding='VALID',
data_format='NHWC')
if not tf.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape([blocks.shape, mask.shape])
blocks.set_shape(out_shape)
if self.use_bias:
blocks = tf.nn.bias_add(blocks, self.bias, data_format='NHWC')
if self.activation is not None:
blocks = self.activation(blocks)
if self.use_var:
self.outputs.assign(tf.zeros_like(self.outputs))
outputs = self.outputs
else:
output_shape = list(
self.compute_output_shape([inputs.shape, mask.shape]))
batch_size = array_ops.shape(inputs)[0]
outputs = tf.zeros([batch_size] + output_shape[1:],
tf.float32) # todo: might not work
outputs = sparse.scatter(
blocks,
indices.bin_counts,
indices.active_block_indices,
outputs,
bsize=self.output_block_size,
boffset=self.output_block_offset,
bstride=self.output_block_stride,
use_var=self.use_var)
if self.use_var:
outputs.set_shape([None] + outputs.shape.as_list()[1:])
return outputs
class ReduceMask(keras.layers.Layer):
"""Perform the sparse gather operation.
Outputs is a list containing [bin_counts, active_block_indices] rather than
the usual namedtuple.
:param block_size:
:param block_offset:
:param block_stride:
:param tol:
:param avgpool:
:returns:
:rtype:
"""
def __init__(self,
block_size=[16, 16],
block_offset=[0, 0],
block_stride=[16, 16],
tol=0.5,
avgpool=False,
**kwargs):
super().__init__(**kwargs)
self.block_size = utils.listify(block_size, 2)
self.block_offset = utils.listify(block_offset, 2)
self.block_stride = utils.listify(block_stride, 2)
self.tol = tol
self.avgpool = avgpool
def build(self, mask_shape):
self.block_count = [utils.divup(mask_shape[1], self.block_stride[0]),
utils.divup(mask_shape[2], self.block_stride[1])]
def compute_output_shape(self, _):
return [tf.TensorShape([]), tf.TensorShape([None, 3])]
def call(self, mask_):
indices = sparse.reduce_mask(
mask_,
block_count=self.block_count,
bsize=self.block_size,
boffset=self.block_offset,
bstride=self.block_stride,
tol=self.tol,
avgpool=self.avgpool)
return [indices.bin_counts, indices.active_block_indices]
class SparseGather(keras.layers.Layer):
"""Perform the sparse gather operation.
:param block_size:
:param block_offset:
:param block_stride:
:returns:
:rtype:
"""
def __init__(self,
block_size=[16, 16],
block_offset=[0, 0],
block_stride=[16, 16],
**kwargs):
super().__init__(**kwargs)
self.block_size = utils.listify(block_size, 2)
self.block_offset = utils.listify(block_offset, 2)
self.block_stride = utils.listify(block_stride, 2)
def compute_output_shape(self, input_shape):
input_shape, _, _ = input_shape
return tf.TensorShape(
[None, self.block_size[0], self.block_size[1], input_shape[3]])
def call(self, inputs):
inputs, bin_counts, active_block_indices = inputs
return sparse.gather(
inputs,
bin_counts,
active_block_indices,
bsize=self.block_size,
boffset=self.block_offset,
bstride=self.block_stride)
class SparseScatter(keras.layers.Layer):
"""Perform the sparse scatter operation.
:param block_size:
:param block_offset:
:param block_stride:
:returns:
:rtype:
"""
def __init__(self,
output_shape,
block_size=[16, 16],
block_offset=[0, 0],
block_stride=[16, 16],
use_var=False,
**kwargs):
super().__init__(**kwargs)
self.output_shape_ = list(output_shape)
self.block_size = utils.listify(block_size, 2)
self.block_offset = utils.listify(block_offset, 2)
self.block_stride = utils.listify(block_stride, 2)
self.use_var = use_var
def compute_output_shape(self, _):
return tf.TensorShape(self.output_shape_)
def call(self, inputs):
inputs, bin_counts, active_block_indices = inputs
outputs = tf.zeros(self.output_shape_, tf.float32)
return sparse.scatter(
inputs,
bin_counts,
active_block_indices,
outputs,
bsize=self.block_size,
boffset=self.block_offset,
bstride=self.block_stride,
use_var=self.use_var)
def main():
# tf.enable_eager_execution()
inputs = keras.layers.Input(shape=(100, 100, 1))
x = SparseConv2D(1, [3, 3], 4, padding='same')([inputs, inputs])
x = SparseConv2D(1, [1, 1], 4, padding='same')([x, x])
# x = SparseConv2DTranspose(1, [2, 2], strides=[2, 2], padding='same')([x, x]) # noqa
# x = keras.layers.MaxPool2D()(x)
model = keras.Model(inputs, x)
model.compile(optimizer=tf.train.AdadeltaOptimizer(0.1), loss='mse',
metrics=['mae'])
images = np.array([
img.open_as_float('../data/disks_100x100/images/1001.png'),
img.open_as_float('../data/disks_100x100/images/1002.png'),
img.open_as_float('../data/disks_100x100/images/1003.png'),
img.open_as_float('../data/disks_100x100/images/1004.png')])
images = images[:, :, :, np.newaxis]
dataset = tf.data.Dataset.from_tensor_slices((images, images))
dataset = dataset.batch(4).repeat(-1)
model.fit(dataset, epochs=5, steps_per_epoch=1000)
x = images
y = model.predict(images)
vis.plot_image(*x, *y, columns=4, vmin=0., vmax=1.)
vis.show('../figs/sparse_conv2d_example.pdf')
if __name__ == '__main__':
main()
|
"""
REST filters for Rate APIs
"""
from django.db import models
from django.db.models import OuterRef, Subquery, QuerySet
from django_filters import rest_framework as filters
from .models import Rate
class RateFilter(filters.FilterSet):
"""
Rate object filter
"""
user = filters.BooleanFilter(
label="filter rate associated to connected user",
method='user_filter')
key = filters.CharFilter(
label="filter rates with key",
method='key_filter')
key_or_null = filters.CharFilter(
label="filter rates with key or without key",
method='key_or_null_filter')
key_isnull = filters.CharFilter(
label="filter rates without key", method='key_isnull_filter')
value_date = filters.DateFilter(
label="filter rates at a specific date",
field_name='value_date',
lookup_expr='exact')
from_obj = filters.DateFilter(
label="filter rates after a specific date (included)",
field_name='value_date',
lookup_expr='gte')
to_obj = filters.DateFilter(
label="filter rates before a specific date (included)",
field_name='value_date',
lookup_expr='lte')
value = filters.NumberFilter(
label="filter rates with a specific value",
field_name='value',
lookup_expr='exact')
lower_bound = filters.NumberFilter(
label="filter rates with a value higher than the given value",
field_name='value',
lookup_expr='gte')
higher_bound = filters.NumberFilter(
label="filter rates with a value lower than the given value",
field_name='value',
lookup_expr='lte')
currency = filters.CharFilter(
label="filter by target currency",
field_name='currency',
lookup_expr='iexact')
base_currency = filters.CharFilter(
label="filter by base currency",
field_name='base_currency',
lookup_expr='iexact')
currency_latest_values = filters.CharFilter(
label="Only output latest rates for currency",
method='currency_latest_values_filter')
base_currency_latest_values = filters.CharFilter(
label="Only output latest rates for currency",
method='base_currency_latest_values_filter')
ordering = filters.OrderingFilter(
# tuple-mapping retains order
fields=(
('key', 'key'),
('value', 'value'),
('value_date', 'value_date'),
('base_currency', 'base_currency'),
('currency', 'currency'),
),
)
class Meta:
"""
Meta
"""
model = Rate
exclude = ['pk', ]
def user_filter(self, queryset: QuerySet,
name: str, value: str) -> QuerySet:
"""
Filter on user
"""
if self.request and self.request.user and \
self.request.user.is_authenticated:
return queryset.filter(**{
'user': self.request.user,
})
return queryset.filter(user__isnull=True)
def key_filter(self, queryset: QuerySet,
name: str, value: str) -> QuerySet:
"""
Filter on key, only filters if request.user is set and authenticated
"""
if self.request and self.request.user and \
self.request.user.is_authenticated:
return queryset.filter(**{
'user': self.request.user,
'key': value
})
return queryset.filter(user__isnull=True)
def key_or_null_filter(self, queryset: QuerySet,
name: str, value: str) -> QuerySet:
"""
Filter on key if user is authenticated or on records without user
"""
if self.request and self.request.user and \
self.request.user.is_authenticated:
return queryset.filter(
(models.Q(user=self.request.user) & models.Q(key=value)) |
models.Q(key__isnull=True)
)
return queryset.filter(user__isnull=True)
@staticmethod
def key_isnull_filter(queryset: QuerySet,
name: str, value: str) -> QuerySet:
"""
Filter on records without key
"""
return queryset.filter(key__isnull=True)
@staticmethod
def currency_latest_values_filter(queryset: QuerySet,
name: str, value: str) -> QuerySet:
"""
Returns a queryset of latest values fos a currency
"""
queryset = queryset.filter(currency=value)
latest = queryset.filter(
currency=OuterRef('currency')
).order_by('-value_date')
return queryset.annotate(
currency_latest=Subquery(latest.values('value_date')[:1])
).filter(value_date=models.F('currency_latest'))
@staticmethod
def base_currency_latest_values_filter(queryset: QuerySet,
name: str, value: str) -> QuerySet:
"""
Returns a queryset of latest valeus for a base currency
"""
queryset = queryset.filter(base_currency=value)
latest = queryset.filter(
base_currency=OuterRef('base_currency')
).order_by('-value_date')
return queryset.annotate(
base_currency_latest=Subquery(latest.values('value_date')[:1])
).filter(value_date=models.F('base_currency_latest'))
|
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements command to list guest policies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.compute.instances.ops_agents import ops_agents_policy as agent_policy
from googlecloudsdk.api_lib.compute.instances.ops_agents.converters import guest_policy_to_ops_agents_policy_converter as converter
from googlecloudsdk.api_lib.compute.instances.ops_agents.validators import guest_policy_validator
from googlecloudsdk.api_lib.compute.os_config import utils as osconfig_api_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.compute.os_config import utils as osconfig_command_utils
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
def _TransformGuestPolicyDescription(resource):
"""Returns a length-limited guest policy description."""
max_len = 30 # Show only the first 30 characters if description is long.
description = resource.get('description', '')
return (description[:max_len] +
'...') if len(description) > max_len else description
def _Args(parser):
"""Parses input flags and sets up output formats."""
parser.display_info.AddFormat("""
table(
id.basename(),
description(),
create_time,
update_time
)
""")
parser.display_info.AddTransforms(
{'description': _TransformGuestPolicyDescription})
@base.ReleaseTracks(base.ReleaseTrack.BETA, base.ReleaseTrack.ALPHA)
class List(base.ListCommand):
"""List Google Cloud's operations suite agents (Ops Agents) policies.
{command} lists policies that facilitate agent management across Compute
Engine instances based on user specified instance filters. These policies
install, specify versioning, enable autoupgrade, and remove Ops Agents.
The command returns a list of policies, including the ``ID'', ``DESCRIPTION'',
``CREATE_TIME'', and ``UPDATE_TIME'' for each policy. If no policies are
found, it returns an empty list. If malformed policies are found, they are
included in the result list with the descriptions replaced by ``<MALFORMED>'',
and a warning is shown.
"""
detailed_help = {
'DESCRIPTION':
'{description}',
'EXAMPLES':
"""\
To list guest policies in the current project, run:
$ {command}
""",
}
@staticmethod
def Args(parser):
"""See base class."""
_Args(parser)
def Run(self, args):
"""See base class."""
release_track = self.ReleaseTrack()
client = osconfig_api_utils.GetClientInstance(
release_track, api_version_override='v1beta')
messages = osconfig_api_utils.GetClientMessages(
release_track, api_version_override='v1beta')
project = properties.VALUES.core.project.GetOrFail()
request = messages.OsconfigProjectsGuestPoliciesListRequest(
pageSize=args.page_size,
parent=osconfig_command_utils.GetProjectUriPath(project),
)
service = client.projects_guestPolicies
for guest_policy in list_pager.YieldFromList(
service,
request,
limit=args.limit,
predicate=guest_policy_validator.IsOpsAgentPolicy,
batch_size=osconfig_command_utils.GetListBatchSize(args),
field='guestPolicies',
batch_size_attribute='pageSize',
):
try:
yield converter.ConvertGuestPolicyToOpsAgentPolicy(guest_policy)
except exceptions.BadArgumentException:
log.warning(
'Encountered a malformed policy. The Ops Agents policy [%s] may '
'have been modified directly by the OS Config guest policy API / '
'gcloud commands. If so, please delete and re-create with the Ops '
'Agents policy gcloud commands. If not, this may be an internal '
'error.',
guest_policy.name,
)
yield agent_policy.OpsAgentPolicy(
assignment=None,
agent_rules=None,
description='<MALFORMED>',
etag=None,
name=guest_policy.name,
update_time=guest_policy.updateTime,
create_time=guest_policy.createTime
)
|
import pyarrow
import rpy2.rinterface as rinterface
import rpy2.robjects as robjects
import rpy2.robjects.conversion as conversion
import rpy2.robjects.packages as packages
import typing
import warnings
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import rpy2.robjects
rarrow = packages.importr('arrow')
TARGET_VERSION = '5.0.'
if not rarrow.__version__.startswith(TARGET_VERSION):
warnings.warn(
'This was designed againt arrow versions starting with %s'
' but you have %s' %
(TARGET_VERSION, rarrow.__version__))
def pyarrow_to_r_array(
obj: 'pyarrow.lilb.Array'
):
"""Create an R `arrow::Arrow` object from a pyarrow Array.
This is sharing the C/C++ object between the two languages.
The returned object depends on the active conversion rule in
rpy2. By default it will be an `rpy2.robjects.Environment`.
"""
schema_ptr = rarrow.allocate_arrow_schema()[0]
array_ptr = rarrow.allocate_arrow_array()[0]
try:
obj._export_to_c(int(array_ptr), int(schema_ptr))
r_array = rarrow.ImportArray(array_ptr, schema_ptr)
finally:
rarrow.delete_arrow_schema(schema_ptr)
rarrow.delete_arrow_array(array_ptr)
return r_array
def rarrow_to_py_array(
obj: 'rpy2.robjects.Environment'
):
"""Create a pyarrow array from an R `arrow::Array` object.
This is sharing the C/C++ object between the two languages.
"""
schema_ptr = rarrow.allocate_arrow_schema()[0]
array_ptr = rarrow.allocate_arrow_array()[0]
try:
rarrow.ExportArray(obj, array_ptr, schema_ptr)
py_array = pyarrow.Array._import_from_c(int(array_ptr),
int(schema_ptr))
finally:
rarrow.delete_arrow_schema(schema_ptr)
rarrow.delete_arrow_array(array_ptr)
return py_array
def pyarrow_to_r_recordbatch(
obj: 'pyarrow.lib.RecordBatch'
):
"""Create an R `arrow::RecordBatch` object from a pyarrow RecordBatch.
This is sharing the C/C++ object between the two languages.
The returned object depends on the active conversion rule in
rpy2. By default it will be an `rpy2.robjects.Environment`.
"""
schema_ptr = rarrow.allocate_arrow_schema()[0]
array_ptr = rarrow.allocate_arrow_array()[0]
try:
obj._export_to_c(int(array_ptr), int(schema_ptr))
r_recordbatch = rarrow.ImportRecordBatch(array_ptr, schema_ptr)
finally:
rarrow.delete_arrow_schema(schema_ptr)
rarrow.delete_arrow_array(array_ptr)
return r_recordbatch
def pyarrow_to_r_recordbatchreader(
obj: 'pyarrow.lib.RecordBatchReader'
):
"""Create an R `arrow::RecordBatchReader` from a pyarrow RecordBatchReader.
This is sharing the C/C++ object between the two languages.
The returned object depends on the active conversion rule in
rpy2. By default it will be an `rpy2.robjects.Environment`.
"""
stream_ptr = rarrow.allocate_arrow_array_stream()[0]
try:
obj._export_to_c(int(stream_ptr))
return rarrow.ImportRecordBatchReader(stream_ptr)
finally:
rarrow.delete_arrow_array_stream(stream_ptr)
def pyarrow_to_r_chunkedarray(
obj: 'pyarrow.lib.ChunkedArray'
):
"""Create an R `arrow::ChunkedArray` object from a pyarrow ChunkedArray.
This is sharing the C/C++ object between the two languages.
The returned object depends on the active conversion rule in
rpy2. By default it will be an `rpy2.robjects.Environment`.
"""
chunks = tuple(pyarrow_to_r_array(x) for x in obj.chunks)
res = rarrow.ChunkedArray['create'](*chunks)
return res
def rarrow_to_py_chunkedarray(
obj: 'rpy2.robjects.Environment'
) -> pyarrow.lib.ChunkedArray:
"""Create a pyarrow chunked array from an R `arrow::ChunkedArray` object.
This is sharing the C/C++ object between the two languages.
"""
chunks = tuple(rarrow_to_py_array(x) for x in obj['chunks'])
return pyarrow.chunked_array(chunks)
def pyarrow_to_r_datatype(
obj: 'pyarrow.lib.DataType'
):
schema_ptr = rarrow.allocate_arrow_schema()[0]
try:
obj._export_to_c(int(schema_ptr))
return rarrow.ImportType(schema_ptr)
finally:
rarrow.delete_arrow_schema(schema_ptr)
def rarrow_to_py_datatype(
obj: 'rpy2.robjects.Environment'
) -> pyarrow.lib.DataType:
"""Create a pyarrow.lib.DataType from an R `arrow::DataType` object.
This is sharing the C/C++ object between the two languages.
"""
schema_ptr = rarrow.allocate_arrow_schema()[0]
try:
rarrow.ExportType(obj, schema_ptr)
py_datatype = pyarrow.lib.DataType._import_from_c(schema_ptr)
finally:
rarrow.delete_arrow_schema(schema_ptr)
return py_datatype
def pyarrow_to_r_field(
obj: 'pyarrow.lib.Field'
):
schema_ptr = rarrow.allocate_arrow_schema()[0]
try:
obj._export_to_c(int(schema_ptr))
return rarrow.ImportField(schema_ptr)
finally:
rarrow.delete_arrow_schema(schema_ptr)
def rarrow_to_py_field(
obj: 'rpy2.robjects.Environment'
) -> pyarrow.lib.Field:
"""Create a pyarrow.lib.Field from an R `arrow::DataType` object.
This is sharing the C/C++ object between the two languages.
"""
schema_ptr = rarrow.allocate_arrow_schema()[0]
try:
rarrow.ExportField(obj, schema_ptr)
py_field = pyarrow.Field._import_from_c(schema_ptr)
finally:
rarrow.delete_arrow_schema(schema_ptr)
return py_field
def pyarrow_to_r_table(
obj: 'pyarrow.lib.Table',
py2rpy: typing.Optional[
conversion.Converter] = None
):
"""Create an R `arrow::Table` object from a pyarrow Table.
This is sharing the C/C++ object between the two languages.
The returned object depends on the active conversion rule in
rpy2. By default it will be an `rpy2.robjects.Environment`.
"""
if py2rpy is None:
py2rpy = converter
# TODO: this is using the converter defined in the module,
# not the converter currently in rpy2.robjects.conversion.
kwargs = dict(
(k, converter.py2rpy(v))
for k, v in zip(obj.schema.names, obj.columns)
)
kwargs['schema'] = pyarrow_to_r_schema(obj.schema)
return rarrow.Table['create'](**kwargs)
def rarrow_to_py_table(
obj: 'rpy2.robjects.Environment',
rpy2py: typing.Optional[
conversion.Converter] = None
):
"""Create a pyarrow Table fomr an R `arrow::Table` object.
This is sharing the C/C++ object between the two languages.
The returned object depends on the active conversion rule in
rpy2.
"""
if rpy2py is None:
rpy2py = converter
# TODO: rpy2 conversion forces something a little kludgy here.
columns = [
(rpy2py._rpy2py_nc_map[rinterface.SexpEnvironment][x.rclass[0]](x))
for x in obj['columns']
]
schema = rarrow_to_py_schema(obj['schema'])
py_table = pyarrow.Table.from_arrays(columns,
schema=schema)
return py_table
def pyarrow_to_r_schema(
obj: 'pyarrow.lib.Schema'
):
"""Create an R `arrow::Schema` object from a pyarrow Schema.
This is sharing the C/C++ object between the two languages.
The returned object depends on the active conversion rule in
rpy2. By default it will be an `rpy2.robjects.Environment`.
"""
schema_ptr = rarrow.allocate_arrow_schema()[0]
try:
obj._export_to_c(int(schema_ptr))
r_schema = rarrow.ImportSchema(schema_ptr)
finally:
rarrow.delete_arrow_schema(schema_ptr)
return r_schema
def rarrow_to_py_schema(
obj: 'rpy2.robjects.Environment'
):
"""Create a pyarrow Schema fomr an R `arrow::Schema` object.
This is sharing the C/C++ object between the two languages.
The returned object depends on the active conversion rule in
rpy2. By default it will be an `rpy2.robjects.Environment`.
"""
schema_ptr = rarrow.allocate_arrow_schema()[0]
try:
rarrow.ExportSchema(obj, schema_ptr)
py_schema = pyarrow.Schema._import_from_c(int(schema_ptr))
finally:
rarrow.delete_arrow_schema(schema_ptr)
return py_schema
converter = conversion.Converter('default arrow conversion',
template=robjects.default_converter)
# Pyarrow to R arrow.
converter.py2rpy.register(pyarrow.lib.Array, pyarrow_to_r_array)
converter.py2rpy.register(pyarrow.lib.Field, pyarrow_to_r_field)
converter.py2rpy.register(pyarrow.lib.ChunkedArray,
pyarrow_to_r_chunkedarray)
converter.py2rpy.register(pyarrow.lib.RecordBatch,
pyarrow_to_r_recordbatch)
converter.py2rpy.register(pyarrow.lib.RecordBatchReader,
pyarrow_to_r_recordbatchreader)
converter.py2rpy.register(pyarrow.lib.Schema, pyarrow_to_r_schema)
converter.py2rpy.register(pyarrow.lib.Table, pyarrow_to_r_table)
converter.py2rpy.register(pyarrow.lib.DataType, pyarrow_to_r_datatype)
# R arrow to pyarrow.
converter._rpy2py_nc_map.update(
{
rinterface.SexpEnvironment:
conversion.NameClassMap(robjects.Environment)
}
)
# TODO: use complete class name hierarchy to be safer?
converter._rpy2py_nc_map[rinterface.SexpEnvironment].update(
{
'Array': rarrow_to_py_array,
'ChunkedArray': rarrow_to_py_chunkedarray,
'Field': rarrow_to_py_field,
'Schema': rarrow_to_py_schema,
'Table': rarrow_to_py_table,
'Type': rarrow_to_py_datatype
}
)
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
from google.ads.google_ads.v1.proto.services import google_ads_service_pb2_grpc
class GoogleAdsServiceGrpcTransport(object):
"""gRPC transport class providing stubs for
google.ads.googleads.v1.services GoogleAdsService API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = ()
def __init__(self,
channel=None,
credentials=None,
address='googleads.googleapis.com:443'):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
'The `channel` and `credentials` arguments are mutually '
'exclusive.', )
# Create the channel.
if channel is None:
channel = self.create_channel(
address=address,
credentials=credentials,
)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
'google_ads_service_stub': google_ads_service_pb2_grpc.GoogleAdsServiceStub(channel),
}
@classmethod
def create_channel(
cls,
address='googleads.googleapis.com:443',
credentials=None,
**kwargs):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
kwargs (dict): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address,
credentials=credentials,
scopes=cls._OAUTH_SCOPES,
**kwargs
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def search(self):
"""Return the gRPC stub for :meth:`GoogleAdsServiceClient.search`.
Returns all rows that match the search query.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['google_ads_service_stub'].Search
@property
def mutate(self):
"""Return the gRPC stub for :meth:`GoogleAdsServiceClient.mutate`.
Creates, updates, or removes resources. This method supports atomic
transactions with multiple types of resources. For example, you can
atomically create a campaign and a campaign budget, or perform up to
thousands of mutates atomically.
This method is essentially a wrapper around a series of mutate methods.
The only features it offers over calling those methods directly are: -
Atomic transactions - Temp resource names (described below) - Somewhat
reduced latency over making a series of mutate calls.
Note: Only resources that support atomic transactions are included, so
this method can't replace all calls to individual services.
## Atomic Transaction Benefits
Atomicity makes error handling much easier. If you're making a series of
changes and one fails, it can leave your account in an inconsistent
state. With atomicity, you either reach the desired state directly, or
the request fails and you can retry.
## Temp Resource Names
Temp resource names are a special type of resource name used to create a
resource and reference that resource in the same request. For example,
if a campaign budget is created with 'resource\_name' equal to
'customers/123/campaignBudgets/-1', that resource name can be reused in
the 'Campaign.budget' field in the same request. That way, the two
resources are created and linked atomically.
To create a temp resource name, put a negative number in the part of the
name that the server would normally allocate.
Note: - Resources must be created with a temp name before the name can
be reused. For example, the previous CampaignBudget+Campaign example
would fail if the mutate order was reversed. - Temp names are not
remembered across requests. - There's no limit to the number of temp
names in a request. - Each temp name must use a unique negative number,
even if the resource types differ.
## Latency
It's important to group mutates by resource type or the request may time
out and fail. Latency is roughly equal to a series of calls to
individual mutate methods, where each change in resource type is a new
call. For example, mutating 10 campaigns then 10 ad groups is like 2
calls, while mutating 1 campaign, 1 ad group, 1 campaign, 1 ad group is
like 4 calls.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['google_ads_service_stub'].Mutate
|
# -*- coding: utf-8 -*-
"""
<NAME>, <NAME>, <NAME>
Sandia National Laboratories
February 26, 2020
Sudoku Board solvers.
- logical puzzle solvers, which simply apply logical operators
- heuristic cell selectors
"""
import random
import operators
import config_data
import board_update_descriptions
import board
import translate
import logging
logger = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# LOGICAL PUZZLE SOLVER SUPPORT METHODS
# -----------------------------------------------------------------------------
def calculate_status(sboard, msg):
""" Centralized diagnostics for after applying logical operators.
Args:
sboard : the board to evaluate
msg : the message describing the operator just applied
Returns:
int : the number of uncertain values in sboard
"""
# If we found a contradiction (bad guess earlier in search), return 0
# as no more cells can be assigned
if(sboard.invalidCells()):
logger.debug("Found logical contradiction: invalid cells on board %s",
str(sboard.getStateStr(True, False)))
return 0
nValues = sboard.countUncertainValues()
logger.debug("Uncertainty state after %s\n%s\n%s uncertain values remaining",
str(msg), str(sboard.getStateStr(True)), str(nValues))
return nValues
def get_operator(op_name):
""" Call the given operator.
Args:
op_name : the operator function name to apply
Returns:
Board(s): the updated board passed in, or a collection
of boards if that's what the operator returns
"""
try:
op_func = board_update_descriptions.operators_description[op_name]['function']
function = getattr(operators, op_func)
except KeyError:
raise Exception(f'Can\'t find operator {op_name}')
except AttributeError:
raise Exception(f'Can\'t find function {op_func}')
return function
def get_action(action_name):
""" Call the given action.
Args:
action_name : the operator function name to apply
Returns:
Board(s): the updated board passed in, or a collection
of boards if that's what the operator returns
"""
try:
# Yes, it's insecure, but at least we're getting the thing we eval from ourselves
action_func = board_update_descriptions.actions_description[action_name]['function']
function = eval(action_func)
except KeyError:
raise(f'Can\'t find action {action_name}')
except AttributeError:
raise(f'Can\'t find function {action_func}')
return function
BREAK = int(0xdead)
NORMAL = int(0xcafe)
def apply_one_operator(op, sboard):
""" Apply one operator.
Returns:
boolean Changed if the operator changed the board,
MAGIC BREAK if the operator loop should break/restart and NORMAL if it should continue.
"""
prevValues = sboard.countUncertainValues()
prevBoard = board.Board(sboard)
sboard = get_operator(op)(sboard)
newValues = calculate_status(sboard, op)
changed = newValues < prevValues
if changed and sboard.config.restart_op_search_on_match:
return (sboard, BREAK)
return (sboard, NORMAL)
def apply_logical_operator(op, sboard):
""" Apply one logical operator.
"""
prevValues = sboard.countUncertainValues()
(sboard, control) = apply_one_operator(op, sboard)
sboard = apply_free_operators(sboard)
if sboard.config.retry_logical_op_after_free_ops and control != BREAK and sboard.countUncertainValues() < prevValues:
## Continue to iterate on the single logical operator, including free operators
return apply_logical_operator(op, sboard)
return (sboard, control)
def loop_operators(sboard, operations_list, function_to_apply):
""" Loop over operations list, applying function_to_apply,
given initial sboard, following configured control flow,
until no values change.
"""
initialUncertainValues = sboard.countUncertainValues()
while(initialUncertainValues > 0):
for op in operations_list:
(sboard, control) = function_to_apply(op, sboard)
if control == BREAK:
break
## If none of the operators did anything, exit the loop
if initialUncertainValues == sboard.countUncertainValues():
break
initialUncertainValues = sboard.countUncertainValues()
return sboard
def apply_free_operators(sboard, force=False):
""" Iterate over free operators until no values change. """
# Simplify if we're being forced or our config allows it
if (force == False and sboard.config.simplify == False):
return sboard
# Apply the free operators to a fixed point
return loop_operators(sboard,
sboard.config.free_operations,
apply_one_operator)
# -----------------------------------------------------------------------------
# LOGICAL PUZZLE OPERATOR SELECTORS
# -----------------------------------------------------------------------------
def select_all_logical_operators_ordered(ordering=None):
""" Returns an ordered list of parameterized logical operators. """
if not ordering:
def ordering(name):
logger.debug("Asking to order operator %s", str(name))
return board_update_descriptions.operators_description[name]['cost']
costly_ops = sorted(
[op['internal_name'] for op in translate.get_possible_operators()], key=ordering)
logger.debug("Allowing for costly operations %s", str(costly_ops))
return costly_ops
# -----------------------------------------------------------------------------
# LOGICAL PUZZLE SOLVERS
# -----------------------------------------------------------------------------
def logical_solve(sboard, logical_ops):
""" Solves sboard using only logical operators.
Args:
sboard (Board) : the board to apply operators to
logical_ops ([operator, ...]) :
a list of logical operator function names describing the set of
logical operators to apply and the order in which
to apply them (applying inclusion and exclusion to a fixed
point in between each specified logical operator)
restart (boolean) : whether to restart at the first
operator in the logical_ops list once one operator actually
affects the board (True) or to continue from the next
operator in the list (False)
Returns:
list of one item,
sboard (Board) modified by the logical operators until no
operator in logical_ops can make any more changes,
OR None if sboard reaches a contradiction.
Currently iterates between propagating exclusions and assigning
inclusions until no new constraints are identified.
"""
# Iterate until we don't change the board or no uncertain values remain
sboard = loop_operators(sboard,
logical_ops,
apply_logical_operator)
req_ops = list(logical_ops)
req_ops.extend(sboard.config.free_operations)
sboard.config.log_operations_request(
req_ops,
f'Requested application of {len(logical_ops)} operators, {len(sboard.config.free_operations)} free operators',
sboard)
return sboard
def logical_solve_action(sboard, logical_ops):
child_board = logical_solve(sboard, logical_ops)
child_board.addAction({'action': 'applyops', 'operators': list(logical_ops)})
return [child_board]
# -----------------------------------------------------------------------------
# SEARCH METHODS
# -----------------------------------------------------------------------------
def expand_cell(sboard, cell_id):
"""
Expands the board cell identified by cell_id.
Args:
sboard : the starting board to "expand"
cell_id : the identifier of the cell to expand
Returns:
collection of board : new boards. Each board is a copy of sboard
except that cell_id is set to a unique possible value.
The collection of boards together cover the possible values of
cell_id in sboard.
For each value that cell_id can take
create a new (duplicate) board and assign one of the candidate partition sets
to the identified cell
Returns a list of Boards with the cell value assigned
If identified cell has only one possible value, simply returns [sboard]
NOTE: propagation of the assigned value is not performed automatically.
"""
sboard.computeAccessibleCells()
if sboard.accessible_cells:
assert cell_id in sboard.accessible_cells, \
f'Cannot pivot on cell {cell_id} that is not accessible in {sboard.accessible_cells}'
cell = sboard.getCell(cell_id)
if(cell.isCertain()):
return [sboard]
expansion = []
for v in cell.getValueSet():
b = board.Board(sboard)
bcell = b.getCell(cell_id)
bcell.assign(v)
expansion.append(b)
progress = f'Assigning {str(bcell.getIdentifier())} = {board.Cell.displayValue(bcell.getCertainValue())}'
b.config.complete_operation('pivot', progress, b, True)
b.addAction({'action': 'pivot', 'cell': list(type(b).getLocations(cell_id, b.getDegree())), 'value': v})
progress = f'Pivoted on {str(bcell.getIdentifier())} for {len(expansion)} new (unvalidated) boards'
sboard.config.complete_operation('pivot', progress, sboard, True)
return expansion
def expand_cell_with_assignment(sboard, cell_and_val):
"""
Expands the board cell identified by cell_id into a board with that value assigned,
and a board with that value excluded (and marked as backup).
Args:
sboard : the starting board to "expand"
cell_and_val : a tuple:
cell_id : the identifier of the cell to expand
value : a value to assign to cell_id,
intersecting those values with valid values
Returns:
collection of board : new boards. Each board is a copy of sboard
except that in the first board cell_id is set to value
and in the second (backup) board cell_id contains the remaining values.
NOTE: propagation of the assigned value is not performed automatically.
"""
(cell_id, value) = cell_and_val
return __expand_cell_with_assignment(sboard, cell_id, value, False)
def expand_cell_with_exclusion(sboard, cell_and_val):
"""
Expands the board cell identified by cell_id into a board with that value excluded,
and a board with that value excluded (and marked as backup).
Args:
sboard : the starting board to "expand"
cell_and_val : a tuple:
cell_id : the identifier of the cell to expand
value : a value to assign to cell_id,
intersecting those values with valid values
Returns:
collection of board : new boards. Each board is a copy of sboard
except that in the first board cell_id contains the remaining values
and in the second (backup) board cell_id is set to value.
NOTE: propagation of the assigned value is not performed automatically.
"""
(cell_id, value) = cell_and_val
return __expand_cell_with_assignment(sboard, cell_id, value, True)
def __expand_cell_with_assignment(sboard, cell_id, value, make_exclusion_primary=False):
"""
Expands the board cell identified by cell_id into partitions specified.
Args:
sboard : the starting board to "expand"
cell_id : the identifier of the cell to expand
value : a value to assign to cell_id,
intersecting those values with valid values
Returns:
collection of board : new boards. Each board is a copy of sboard
except that in the first board cell_id is set to value
and in the second board cell_id contains the remaining values.
The collection of boards together cover the possible values of
cell_id in sboard.
NOTE: propagation of the assigned value is not performed automatically.
"""
sboard.computeAccessibleCells()
if sboard.accessible_cells:
assert cell_id in sboard.accessible_cells, \
f'Cannot pivot on cell {cell_id} that is not accessible in {sboard.accessible_cells}'
cell = sboard.getCell(cell_id)
if(cell.isCertain()):
return [sboard]
expansion = []
assigned = board.Board(sboard)
removed = board.Board(sboard)
action = None
if make_exclusion_primary:
assigned.setToBackground()
action = 'exclude'
expansion.append(removed)
expansion.append(assigned)
else:
removed.setToBackground()
action = 'assign'
expansion.append(assigned)
expansion.append(removed)
cell_loc = list(type(assigned).getLocations(cell_id, assigned.getDegree()))
bcell = assigned.getCell(cell_id)
bcell.assign(value)
progress = f'Assigning {str(bcell.getIdentifier())} = {board.Cell.displayValue(bcell.getCertainValue())}'
assigned.config.complete_operation(action, progress, assigned, True)
assigned.addAction({'action': 'assign', 'cell': cell_loc, 'value': value})
bcell = removed.getCell(cell_id)
bcell.exclude(value)
progress = f'Removing {board.Cell.displayValue(value)} from {str(bcell.getIdentifier())}, resulting in {board.Cell.displayValues(bcell.getValueSet())}'
removed.config.complete_operation(action, progress, removed, True)
removed.addAction({'action': 'exclude', 'cell': cell_loc, 'value': value})
progress = f'Performed {action} on {str(bcell.getIdentifier())} with {board.Cell.displayValue(value)} for {len(expansion)} new (unvalidated) boards'
sboard.config.complete_operation(action, progress, sboard, True)
return expansion
# -----------------------------------------------------------------------------
# BEYOND-LOGICAL PUZZLE SOLVER SUPPORT METHODS
# -----------------------------------------------------------------------------
def take_action(sboard, expansion_op, args):
""" Apply apply_free_operators to all boards in sboard_collection. """
sboard_expansion = get_action(expansion_op)(sboard, args)
ret = []
for brd in sboard_expansion:
brd = apply_free_operators(brd)
if (sboard.config.prune_invalid_boards
and brd.invalidCells()):
continue
# Include the board if we're including everything
# OR if it's not got a proven contradiction yet
ret.append(brd)
return ret
def collect_cells(sboard):
"""
Given an sboard, collect the set of cells to select from.
"""
# Get the list of board cells that are accessible
access_cells = sboard.computeAccessibleCells()
logger.debug("Accessible cells are %s", str(access_cells))
cell_list = []
#Need to get the actual cells not the identifier
for ide in access_cells:
cell_list.append(sboard.getCell(ide))
return cell_list
def select(item_list, heuristic, criterion, selector):
"""
Selects and returns a single selected item from item_list by the following:
1. Apply heuristic to each item_list
2. Filter the set of item_list according to criterion
3. If multiple item_list satisfy criterion, select among those using selector
Heuristic argument is a function that takes an item from item_list as
input and returns a number (e.g., number of candidate values)
Criterion argument is a function that takes a list of heuristic scores and
returns whatever value(s) satisfy the criterion (e.g., max or min)
Selector argument is a function that takes a list of items and
returns whatever item is selected by selector
Returns:
One item from item_list
"""
# Apply heuristic to get list of (item, score) tuples
hscores = list(map(lambda item: (item, heuristic(item)), item_list))
# Apply criterion to determine the best score
selection = criterion([score for (item, score) in hscores])
# Filter the list of heuristic scores to get the set of best cells
best_list = [item for (item, score) in hscores if score == selection]
# Pick one of the best cells at random
if selector is None:
selector = random.choice
return selector(best_list)
# -----------------------------------------------------------------------------
# SEARCH PUZZLE CELL / BOARD / ACTION SELECTORS
# -----------------------------------------------------------------------------
def select_random_cell_with_fewest_uncertain_values(sboard):
""" Return the Cell that has the fewest uncertain values. """
return select(collect_cells(sboard), candidate_cell_values_heuristic, min, random.choice)
def select_random_cell_with_most_uncertain_values(sboard):
""" Return the Cell that has the most uncertain values. """
return select(collect_cells(sboard), candidate_cell_values_heuristic, max, random.choice)
def select_cell_by_user(sboard):
""" Return the Cell selected by the user. """
return select(collect_cells(sboard), uniform_heuristic, min, users_choice_cell)
def select_random_board_with_fewest_uncertain_values(node_list):
""" Return the Board that has the fewest uncertain values, removing it from node_list. """
board = select(node_list, candidate_board_uncertain_values_heuristic, min, random.choice)
node_list.remove(board)
return board
def select_random_board_with_most_uncertain_values(node_list):
""" Return the Board that has the most uncertain values, removing it from node_list. """
board = select(node_list, candidate_board_uncertain_values_heuristic, max, random.choice)
node_list.remove(board)
return board
def select_random_board_with_fewest_uncertain_cells(node_list):
""" Return the Board that has the fewest uncertain cells, removing it from node_list. """
board = select(node_list, candidate_board_uncertain_cells_heuristic, min, random.choice)
node_list.remove(board)
return board
def select_random_board_with_most_uncertain_cells(node_list):
""" Return the Board that has the most uncertain cells, removing it from node_list. """
board = select(node_list, candidate_board_uncertain_cells_heuristic, max, random.choice)
node_list.remove(board)
return board
def select_board_by_user(node_list):
""" Return the Board selected by the user, removing it from node_list. """
board = select(node_list, uniform_heuristic, min, users_choice_board)
node_list.remove(board)
return board
# -----------------------------------------------------------------------------
# SEARCH PUZZLE HEURISTICS
# Cell -> comparable value representing "goodness" of cell
# Board -> comparable value representing "goodness" of board
# -----------------------------------------------------------------------------
def candidate_cell_values_heuristic(cell):
""" Taking in a Cell, return the number of candidate values. """
return len(cell.getValues())
def uniform_heuristic(item):
""" Taking in an item, return 1. """
return 1
def candidate_board_uncertain_cells_heuristic(node):
""" Taking in a GameTreeNode, return the number of uncertain cells. """
return len(node.board.getUncertainCells())
def candidate_board_uncertain_values_heuristic(node):
""" Taking in a GameTreeNode, return the number of uncertain values across cells. """
return node.board.countUncertainValues()
# -----------------------------------------------------------------------------
# SEARCH PUZZLE SELECTORS
# [list of possible Cells] -> a single selected pivot Cell
# -----------------------------------------------------------------------------
def users_choice_cell(cell_list):
""" Taking in a list of Cells, ask the user to select one and return it.
"""
selected = None
assert len(cell_list) > 0, "Can't select cell from empty list."
if len(cell_list) == 1:
logger.info("Selecting single cell %s", str(cell_list[0]))
return cell_list[0]
while True:
names = sorted([cell.getIdentifier() for cell in cell_list])
print("Which cell do you want to expand? {}".format(names))
selected = input()
if selected in names:
logger.debug("User selected cell ID %s.", str(selected))
match_cell = next(filter(lambda x: x.getIdentifier() == selected, cell_list))
if match_cell:
break
logger.warn("Unable to find cell matching selected identifier %s.", str(selected))
return match_cell
def users_choice_board(node_list):
"""
Allow the user to select a board to continue exploring. Remove that board from the node_list
"""
assert len(node_list) > 0, "Can't select board from empty list."
if len(node_list) == 1:
logger.info("Selecting single board %s", str(node_list[0]))
return node_list[0]
while True:
print("Choose a board to explore.")
# MAL TODO gross
for i in range(len(node_list)):
node = node_list[i]
print("Board Number {}:\n{}\n".format(i, node.board.getStateStr(True)))
print("Please input the desired board number:")
idx = int(input())
if idx >= 0 and idx < len(node_list):
logger.debug("User selected board index %s.", str(idx))
active = node_list[idx]
break
logger.warn("Selected index not valid (%s).", str(idx))
return active
|
<filename>deprecated/TNSAgent/tns/testneighbormodel.py
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2017, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
from datetime import datetime, timedelta, date, time
from dateutil import relativedelta
from .model import Model
from .vertex import Vertex
from .helpers import *
from .measurement_type import MeasurementType
from .interval_value import IntervalValue
from .transactive_record import TransactiveRecord
from .meter_point import MeterPoint
from .market import Market
from .time_interval import TimeInterval
from .neighbor import Neighbor
from .neighbor_model import NeighborModel
from .local_asset import LocalAsset
from .local_asset_model import LocalAssetModel
from .myTransactiveNode import myTransactiveNode
from .bulk_supplier_dc import BulkSupplier_dc
def test_all():
# TEST_ALL - run all test functions
print('Running NeighborModel.test_all()')
test_calculate_reserve_margin()
test_check_for_convergence()
test_marginal_price_from_vertices()
test_prep_transactive_signal()
test_receive_transactive_signal()
test_schedule_engagement()
test_schedule_power()
test_send_transactive_signal()
test_update_dc_threshold()
test_update_dual_costs()
test_update_production_costs()
test_update_vertices()
def test_calculate_reserve_margin():
# TEST_LAM_CALCULATE_RESERVE_MARGIN() - a LocalAssetModel ("LAM") class
# method NOTE: Reserve margins are introduced but not fully integrated into
# code in early template versions.
# CASES:
# 1. uses hard maximum if no active vertices exist
# 2. vertices exist
# 2.1 uses maximum vertex power if it is less than hard power constraint
# 2.2 uses hard constraint if it is less than maximum vertex power
# 2.3 upper flex power is greater than scheduled power assigns correct
# positive reserve margin
# 2.4 upperflex power less than scheduled power assigns zero value to
# reserve margin.
print('Running NeighborModel.test_calculate_reserve_margin()')
pf = 'pass'
# Establish a test market
test_mkt = Market()
# Establish test market with an active time interval
# Modified 1/29/18 due to new TimeInterval constructor
dt = datetime.now()
at = dt
# NOTE: Function Hours() corrects behavior of Matlab hours().
dur = timedelta(hours=1)
mkt = test_mkt
mct = dt
# NOTE: Function Hours() corrects behavior of Matlab hours().
# st = datetime(date) + Hours(12) # today at noon
st = datetime.combine(date.today(), time()) + timedelta(hours=12)
ti = TimeInterval(at, dur, mkt, mct, st)
test_mkt.timeIntervals = [ti]
# Establish a test object that is a LocalAsset with assigned maximum power
test_object = Neighbor()
test_object.maximumPower = 100
# Establish test object that is a NeighborModel
test_model = NeighborModel()
test_model.scheduledPowers = [IntervalValue(test_model, ti, test_mkt, MeasurementType.ScheduledPower, 0.0)]
# Allow object and model to cross-reference one another.
test_object.model = test_model
test_model.object = test_object
# Run the first test case.
test_model.calculate_reserve_margin(test_mkt)
print('- method ran without errors')
if len(test_model.reserveMargins) != 1:
raise Exception('- an unexpected number of results were stored')
else:
print('- one reserve margin was stored, as expected')
if test_model.reserveMargins[0].value != 100:
pf = 'fail'
print('- the method did not use the available maximum power')
else:
print('- the method used maximum power value, as expected')
# create some vertices and store them
interval_value1 = IntervalValue(test_model, ti, test_mkt, MeasurementType.Vertex, Vertex(0, 0, -10))
interval_value2 = IntervalValue(test_model, ti, test_mkt, MeasurementType.Vertex, Vertex(0, 0, 10))
test_model.activeVertices = [interval_value1, interval_value2]
# run test with maximum power greater than maximum vertex
test_object.maximumPower = 100
test_model.calculate_reserve_margin(test_mkt)
if test_model.reserveMargins[0].value != 10:
pf = 'fail'
print('- the method should have used vertex for comparison')
else:
print('- the method correctly chose to use the vertex power')
# run test with maximum power less than maximum vertex
test_object.maximumPower = 5
test_model.calculate_reserve_margin(test_mkt)
if test_model.reserveMargins[0].value != 5:
pf = 'fail'
print('- method should have used maximum power for comparison')
else:
print('- the method properly chose to use the maximum power')
# run test with scheduled power greater than maximum vertex
test_model.scheduledPowers[0].value = 20
test_object.maximumPower = 500
test_model.calculate_reserve_margin(test_mkt)
if test_model.reserveMargins[0].value != 0:
pf = 'fail'
print('- method should have assigned zero for a neg. result')
else:
print('- the method properly assigned 0 for a negative result')
# Success.
print('- the test ran to completion')
print('\nResult: #s\n\n', pf)
def test_check_for_convergence():
print('Running NeighborModel.test_check_for_convergence()')
pf = 'pass'
one_hour = timedelta(hours=1)
# Create a test NeighborModel object.
test_model = NeighborModel()
test_model.convergenceThreshold = 0.01
test_model.converged = True
# Create a test Market object.
test_market = Market()
# Create and store an active TimeInterval object.
dt = datetime.now()
time_intervals = TimeInterval(dt, one_hour, test_market, dt, dt)
test_market.timeIntervals = time_intervals
## TEST 1: No TransactiveRecord messages have been sent.
print('- Test 1: Property sentSignal is empty')
test_model.check_for_convergence(test_market)
print(' - the method ran to completion')
if len(test_model.convergenceFlags) != 1:
pf = 'fail'
print(' - an unexpected number of convergence flags occurred')
else:
print(' - the expected number of convergence flags occurred')
if test_model.convergenceFlags[0].value != False:
tf = 'fail'
print(' - the interval convergence flag should have been false')
else:
print(' - the interval convergence flag was false, as expected')
if test_model.converged != False:
tf = 'fail'
print(' - the overall convergence should have been false')
else:
print(' - the overall convergence was false, as expected')
## TEST 2: Compare sent and received signals with identical records
print('- Test 2: Comparing identical sent and received transactive records')
test_model.converged = False # Preset to ensure test changes status.
# Create a couple TransactiveRecord objects. NOTE: sent and received
# records have opposite signs for their powers. These should therefore
# match and show convergence. The timestamp of the the record for
# receivedSignal should be made LATER than that for the sent as this is a
# precondition that must be met.
tr = [
TransactiveRecord(time_intervals, 0, 0.05, 100),
TransactiveRecord(time_intervals, 0, 0.05, -100)]
tr[0].timeStamp = datetime.now() + one_hour
# NOTE: The latter-defined record must be placed in receivedSignal to
# satisfy a precondition.
test_model.sentSignal = [tr[0]]
test_model.receivedSignal = [tr[1]]
test_model.check_for_convergence(test_market)
print(' - the method ran to completion')
if len(test_model.convergenceFlags) != 1:
pf = 'fail'
print(' - an unexpected number of interval convergence flags occurred')
else:
print(' - the expected number of interval convergence flags occurred')
if test_model.convergenceFlags[0].value != True:
tf = 'fail'
print(' - the interval convergence flag should have been true')
else:
print(' - the interval convergence flag was true, as expected')
if test_model.converged != True:
tf = 'fail'
print(' - the overall convergence should have been true')
else:
print(' - the overall convergence was true, as expected')
## TEST 3: Revise records' scheduled powers to show lack of convergence
print('- Test 3: Revise powers to destroy convergence between sent and received messages')
test_model.receivedSignal[0].power = 1.02 * test_model.receivedSignal[0].power
test_model.check_for_convergence(test_market)
print(' - the method ran to completion')
if len(test_model.convergenceFlags) != 1:
pf = 'fail'
print(' - an unexpected number of interval convergence flags occurred')
else:
print(' - the expected number of interval convergence flags occurred')
if test_model.convergenceFlags[0].value != False:
tf = 'fail'
print(' - the interval convergence flag should have been false')
else:
print(' - the interval convergence flag was false, as expected')
if test_model.converged != False:
tf = 'fail'
print(' - the overall convergence should have been false')
else:
print(' - the overall convergence was false, as expected')
## TEST 4: Sent and received signals differ, no signal received since last send
print('- Test 4: No received signal since last send')
dt = format_ts(datetime.now())
test_model.sentSignal[0].timeStamp = dt
test_model.receivedSignal[0].timeStamp = dt
try:
test_model.check_for_convergence(test_market)
print(' - the method ran to completion')
except:
print(' - the method encountered errors and stopped')
if len(test_model.convergenceFlags) != 1:
pf = 'fail'
print(' - an unexpected number of interval convergence flags occurred')
else:
print(' - the expected number of interval convergence flags occurred')
if test_model.convergenceFlags[0].value != True:
tf = 'fail'
print(' - the interval convergence flag should have been true')
else:
print(' - the interval convergence flag was true, as expected')
if test_model.converged != True:
tf = 'fail'
print(' - the overall convergence should have been true')
else:
print(' - the overall convergence was true, as expected')
## TEST 5: Compare identical mySignal and sentSignal records
print('- Test 5: Identical mySignal and sentSignal contents')
# Create prepared mySignal message that is exactly the same as the sent
# message.
test_model.mySignal = [tr[0]]
test_model.sentSignal = [tr[0]]
# Ensure that the sent signal was sent much more than 5 minutes ago
test_model.sentSignal[0].timeStamp = dt - one_hour
# Ensure that a signal has NOT been received since the last one was sent.
# This intentionally violates a precondition so that the method under
# test will not compare the sent and received messages.
test_model.receivedSignal[0].timeStamp = test_model.sentSignal[0].timeStamp - one_hour
try:
test_model.check_for_convergence(test_market)
print(' - the method ran to completion')
except:
print(' - the method encountered errors and stopped')
if len(test_model.convergenceFlags) != 1:
pf = 'fail'
print(' - an unexpected number of interval convergence flags occurred')
else:
print(' - the expected number of interval convergence flags occurred')
if test_model.convergenceFlags[0].value != True:
tf = 'fail'
print(' - the interval convergence flag should have been true')
else:
print(' - the interval convergence flag was true, as expected')
if test_model.converged != True:
tf = 'fail'
print(' - the overall convergence should have been true')
else:
print(' - the overall convergence was true, as expected')
## TEST 6: Compare multiple matched mySignal and testSignal records
print('- Test 6: Compare multiple matched mySignal and testSignal records')
# Create a couple new TransactiveRecord objects.
tr.append(TransactiveRecord(time_intervals, 1, 0.049, 90))
tr[2].timeStamp = test_model.sentSignal[0].timeStamp
tr.append(TransactiveRecord(time_intervals, 2, 0.051, 110))
tr[3].timeStamp = test_model.sentSignal[0].timeStamp
# Append the mySignal and sentSignal records. The sets should still remain
# identical, meaning that the system has not changed and remains converged.
test_model.mySignal = [tr[0],tr[2],tr[3]]
test_model.sentSignal = [tr[0],tr[2],tr[3]]
try:
test_model.check_for_convergence(test_market)
print(' - the method ran to completion')
except:
print(' - the method encountered errors and stopped')
if len(test_model.convergenceFlags) != 1:
pf = 'fail'
print(' - an unexpected number of interval convergence flags occurred')
else:
print(' - the expected number of interval convergence flags occurred')
if test_model.convergenceFlags[0].value != True:
tf = 'fail'
print(' - the interval convergence flag should have been true')
else:
print(' - the interval convergence flag was true, as expected')
if test_model.converged != True:
tf = 'fail'
print(' - the overall convergence should have been true')
else:
print(' - the overall convergence was true, as expected')
## TEST 7: A Vertex differs significantly between mySignal and sentSignal
print('- Test 7: mySignal and sentSignal differ significantly, multiple points.')
# Change mySignal to be significantly different from sentSignal.
# test_model.mySignal[0].
tr.append(TransactiveRecord(time_intervals, 1, 0.049, 85))
test_model.mySignal = [tr[0],tr[4],tr[3]]
test_model.check_for_convergence(test_market)
print(' - the method ran to completion')
if len(test_model.convergenceFlags) != 1:
pf = 'fail'
print(' - an unexpected number of interval convergence flags occurred')
else:
print(' - the expected number of interval convergence flags occurred')
if test_model.convergenceFlags[0].value != False:
tf = 'fail'
print(' - the interval convergence flag should have been false')
else:
print(' - the interval convergence flag was false, as expected')
if test_model.converged != False:
tf = 'fail'
print(' - the overall convergence should have been false')
else:
print(' - the overall convergence was false, as expected')
# Success.
print('- the test ran to completion')
print('\nResult: #s\n\n', pf)
def test_marginal_price_from_vertices():
# TEST_MARGINAL_PRICE_FROM_VERTICES() - test method
# marginal_price_from_vertices().
print('Running NeighborModel.test_marginal_price_from_vertices()')
pf = 'pass'
# CASES:
# - power less than leftmost vertex
# - power greater than rightmost vertex
# - power between two vertices
# Create a test NeighborModel object.
test_obj = NeighborModel()
# Create and store two test Vertex objects. Misorder to test ordering.
test_vertice1 = Vertex(0.2, 0, 100)
test_vertice2 = Vertex(0.1, 0, -100)
test_vertices = [test_vertice1, test_vertice2]
# Test 1: Power less than leftmost vertex.
print('- Test 1: power less than leftmost Vertex')
power = -150
marginal_price = test_obj.marginal_price_from_vertices(power, test_vertices)
print(' - the method ran without errors')
if marginal_price != test_vertices[1].marginalPrice:
pf = 'fail'
print(' - the method returned an unexpected marginal price')
else:
print(' - the method returned the expected marginal price')
# Test 2: Power greater than the rightmost Vertex.
print('- Test 2: power greater than the rightmost Vertex')
power = 150
marginal_price = test_obj.marginal_price_from_vertices(power, test_vertices)
print(' - the method ran without errors')
if marginal_price != test_vertices[0].marginalPrice:
pf = 'fail'
print(' - the method returned an unexpected marginal price')
else:
print(' - the method returned the expected marginal price')
# Test 3: Power between vertices.
print('- Test 3: power is between vertices')
power = 0
marginal_price = test_obj.marginal_price_from_vertices(power, test_vertices)
print(' - the method ran without errors')
if abs(marginal_price - 0.15) > 0.0001:
pf = 'fail'
print(' - the method returned an unexpected marginal price')
else:
print(' - the method returned the expected marginal price')
# Success.
print('- the test ran to completion')
print('\nResult: #s\n\n', pf)
def test_prep_transactive_signal():
print('Running NeighborModel.test_prep_transactive_signal()')
pf = 'pass'
# Create a test model.
test_model = NeighborModel()
# Create a test object.
test_object = Neighbor()
# Let the test object and model cross reference one another.
test_object.model = test_model
test_model.object = test_object
# Create a test market object.
test_market = Market()
# Create a test LocalAssetModel object.
test_asset_model = LocalAssetModel()
# Create a test LocalAsset object.
test_local_asset = LocalAsset()
# Let the asset and its model cross-reference one another.
test_local_asset.model = test_asset_model
test_asset_model.object = test_local_asset
# Create a test myTransactiveNode object and its references to its
# objects and models.
test_myTransactiveNode = myTransactiveNode()
test_myTransactiveNode.neighbors = [test_object]
test_myTransactiveNode.localAssets = [test_local_asset]
test_myTransactiveNode.markets = test_market
# Create and store a TimeInterval object
dt = datetime.now()
at = dt
dur = timedelta(hours=1) # Hours(1)
mkt = test_market
mct = dt
st = dt
time_interval = TimeInterval(at, dur, mkt, mct, st)
test_market.timeIntervals = [time_interval]
# Create some active vertices and their IntervalValue objects ready to
# choose from for the various tests.
vertice1 = Vertex(0.1, 0, -100)
vertice2 = Vertex(0.2, 0, -37.5)
vertice3 = Vertex(0.3, 0, 0)
vertice4 = Vertex(0.4, 0, 25)
vertice5 = Vertex(0.5, 0, 100)
interval_values = [
IntervalValue(test_model, time_interval, test_market, MeasurementType.TestVertex, vertice1),
IntervalValue(test_model, time_interval, test_market, MeasurementType.TestVertex, vertice2),
IntervalValue(test_model, time_interval, test_market, MeasurementType.TestVertex, vertice3),
IntervalValue(test_model, time_interval, test_market, MeasurementType.TestVertex, vertice4),
IntervalValue(test_model, time_interval, test_market, MeasurementType.TestVertex, vertice5)
]
## TEST 1
print('- Test 1: Neighbor is NOT transactive')
test_model.transactive = False
test_model.prep_transactive_signal(test_market, test_myTransactiveNode)
print(' - The method warned and returned, as expected')
## TEST 2
print('- Test 2: The trans. Neighbor is offered no flexibility')
# Configure the test.
test_model.transactive = True
test_model.scheduledPowers = [IntervalValue(test_model, time_interval, test_market, MeasurementType.ScheduledPower, 200)]
test_asset_model.activeVertices = [interval_values[2]]
test_model.prep_transactive_signal(test_market, test_myTransactiveNode)
print(' - the method ran to completion without errors')
if len(test_model.mySignal) != 1:
pf = 'fail'
raise Exception(' - the wrong number of transactive records were stored')
else:
print(' - a transactive record was stored as expected')
if test_model.mySignal[0].power != -200 and test_model.mySignal[0].marginalPrice != float("inf"):
pf = 'fail'
raise Exception(' - the transactive record values were not as expected')
else:
print(' - the values in the transactive record were as expected')
## TEST 3
print('- Test 3: The trans. Neigbor imports from myTransactiveNode')
# Configure the test.
test_model.transactive = True
test_model.scheduledPowers = [
IntervalValue(test_model, time_interval, test_market, MeasurementType.ScheduledPower, -50)]
test_object.maximumPower = -10
test_object.minimumPower = -75
test_asset_model.activeVertices = [interval_values[2], interval_values[4]]
test_model.prep_transactive_signal(test_market, test_myTransactiveNode)
print(' - the method ran to completion without errors')
if len(test_model.mySignal) != 3:
pf = 'fail'
raise Exception(' - the wrong number of transactive records were stored')
else:
print(' - three transactive records ware stored as expected')
# if any(~ismember([test_model.mySignal(:).power], [25, 50, 75])):
non_members = [x for x in test_model.mySignal if x.power not in [10, 50, 75]]
if len(non_members) > 0:
pf = 'fail'
print(' - the record power values were not as expected')
else:
print(' - the power values in the records were as expected')
# if any(abs([test_model.mySignal(:).marginalPrice]-0.3500) < 0.0001)
# and any(abs([test_model.mySignal(:).marginalPrice]-0.4000) < 0.0001)
# and any(abs([test_model.mySignal(:).marginalPrice]-0.4500) < 0.0001):
cond1 = [abs(x.marginalPrice - 0.3200) < 0.0001 for x in test_model.mySignal]
cond2 = [abs(x.marginalPrice - 0.4000) < 0.0001 for x in test_model.mySignal]
cond3 = [abs(x.marginalPrice - 0.4500) < 0.0001 for x in test_model.mySignal]
if any(cond1) and any(cond2) and any(cond3):
print(' - the marginal price values were as expected')
else:
pf = 'fail'
print(' - the marginal price values were not as expected')
## TEST 4
print('- Test 4: The trans. Neighbor exports to myTransactiveNode')
# Configure the test.
test_model.transactive = True
test_model.scheduledPowers = [
IntervalValue(test_model, time_interval, test_market, MeasurementType.ScheduledPower, 50)]
test_object.maximumPower = 75
test_object.minimumPower = 10
test_asset_model.activeVertices = [interval_values[0], interval_values[2]]
test_model.prep_transactive_signal(test_market, test_myTransactiveNode)
print(' - the method ran to completion without errors')
if len(test_model.mySignal) != 3:
pf = 'fail'
print(' - the wrong number of transactive records were stored')
else:
print(' - three transactive records ware stored as expected')
# if any(~ismember([test_model.mySignal(:).power], [-25, -50, -75]))
non_members = [x for x in test_model.mySignal if x.power not in [-10, -50, -75]]
if len(non_members) > 0:
pf = 'fail'
print(' - the record power values were not as expected')
else:
print(' - the power values in the records were as expected')
# if any(abs([test_model.mySignal(: ).marginalPrice]-0.1500) < 0.0001)
# and any(abs([test_model.mySignal(:).marginalPrice]-0.2000) < 0.0001)
# and any(abs([test_model.mySignal(:).marginalPrice]-0.2500) < 0.0001)
cond1 = [abs(x.marginalPrice - 0.1500) < 0.0001 for x in test_model.mySignal]
cond2 = [abs(x.marginalPrice - 0.2000) < 0.0001 for x in test_model.mySignal]
cond3 = [abs(x.marginalPrice - 0.2800) < 0.0001 for x in test_model.mySignal]
if any(cond1) and any(cond2) and any(cond3):
pass # print(' - the marginal price values were as expected')
else:
pf = 'fail'
print(' - the marginal price values were not as expected')
## TEST 5
print('- Test 5: There is an extra Vertex in the range')
# Configure the test.
test_model.transactive = True
test_model.scheduledPowers = [
IntervalValue(test_model, time_interval, test_market, MeasurementType.ScheduledPower,50)]
test_object.maximumPower = 75
test_object.minimumPower = 25
test_asset_model.activeVertices = [interval_values[0],
interval_values[1], # an extra vertex in active flex range
interval_values[2]]
test_model.prep_transactive_signal(test_market, test_myTransactiveNode)
print(' - the method ran to completion without errors')
if len(test_model.mySignal) != 4:
pf = 'fail'
print(' - the wrong number of transactive records were stored')
else:
print(' - four transactive records ware stored as expected')
# if any(~ismember([test_model.mySignal(: ).power], [-25, -50, -75, -37.5]))
non_members = [x for x in test_model.mySignal if x.power not in [-25, -50, -75, -37.5]]
if len(non_members) > 0:
pf = 'fail'
print(' - the record power values were not as expected')
else:
print(' - the power values in the records were as expected')
# if any(abs([test_model.mySignal(: ).marginalPrice]-0.1800)< 0.0001)
# and any(abs([test_model.mySignal(:).marginalPrice]-0.1400)< 0.0001)
# and any(abs([test_model.mySignal(:).marginalPrice]-0.2333)< 0.0001)
# and any(abs([test_model.mySignal(:).marginalPrice]-0.2000)< 0.0001)
cond1 = [abs(x.marginalPrice - 0.1800) < 0.0001 for x in test_model.mySignal]
cond2 = [abs(x.marginalPrice - 0.1400) < 0.0001 for x in test_model.mySignal]
cond3 = [abs(x.marginalPrice - 0.2333) < 0.0001 for x in test_model.mySignal]
cond4 = [abs(x.marginalPrice - 0.2000) < 0.0001 for x in test_model.mySignal]
if any(cond1) and any(cond2) and any(cond3) and any(cond4):
pass # print(' - the marginal price values were as expected')
else:
pf = 'fail'
print(' - the marginal price values were not as expected')
# Success.
print('- the test ran to completion')
print('\nResult: #s\n\n', pf)
def test_receive_transactive_signal():
print('Running NeighborModel.test_receive_transactive_signal()')
pf = 'pass'
# Create a test NeighborModel object.
test_model = NeighborModel()
# Create a test Neighbor object.
test_object = Neighbor()
test_object.name = 'TN_abcdefghijklmn'
# Get the test object and model to cross-reference one another.
test_object.model = test_model
test_model.object = test_object
# Create a test market object.
test_market = Market()
# Create a test myTransactiveNode object.
test_myTransactiveNode = myTransactiveNode
test_myTransactiveNode.name = 'mTN_abcd'
## TEST 1
print('- Test 1: Neighbor is NOT transactive')
test_model.transactive = False
test_model.receive_transactive_signal(test_myTransactiveNode)
print(' - The method warned and returned, as expected')
# Test 2
print('- Test 2: Read a csv file into received transactive records')
# Configure for the test.
test_model.transactive = True
# Create a test time interval
dt = datetime.now()
at = dt
dur = timedelta(hours=1) # Hours(1)
mkt = test_market
mct = dt
st = dt
time_interval = TimeInterval(at, dur, mkt, mct, st)
# Create a couple test transactive records.
test_record1 = TransactiveRecord(time_interval, 0, 0.1, 0)
test_record2 = TransactiveRecord(time_interval, 1, 0.2, 100)
test_model.mySignal = [test_record1, test_record2]
test_model.send_transactive_signal(test_myTransactiveNode)
print(' - this test depends on method send_transactive_signal() to create a file')
# Clear the mySignal property that will be used to receive the records.
test_model.receivedSignal = []
# A trick is needed because the filenames rely on source and target node
# names, which are swapped in the reading and sending methods. Exchange
# the names of the test object and test myTransactiveNode.
name_holder = test_myTransactiveNode.name
test_myTransactiveNode.name = test_object.name
test_object.name = name_holder
test_model.receive_transactive_signal(test_myTransactiveNode)
print(' - the receive method ran without errors')
if len(test_model.receivedSignal) != 2:
pf = 'fail'
print(' - an unexpected, or no, record count was stored')
else:
print(' - the expected number of records was stored')
# Success.
print('- the test ran to completion')
print('\nResult: #s\n\n', pf)
def test_schedule_engagement():
print('Running NeighborModel.test_schedule_engagement()')
pf = 'pass'
test_obj = NeighborModel()
test_mkt = Market()
test_obj.schedule_engagement()
print('- method ran to completion')
if test_obj == test_obj:
print('- the NeighborModel was unchanged, which is correct')
else:
raise ('- the NeighborModel was unexpected altered')
# Success.
print('- the test ran to completion')
print('\nResult: #s\n\n', pf)
def test_schedule_power():
# TEST_SCHEDULE_POWER() - tests a NeighborModel method called
# schedule_power().
print('Running NeighborModel.test_schedule_power()')
pf = 'pass'
# Create a test NeighborModel object.
test_model = NeighborModel()
# test_model.defaultPower = 99
# Create a test Market object.
test_market = Market()
# Create and store an active TimeInterval object.
dt = datetime.now() # datetime that may be used for all datetime arguments
time_interval = TimeInterval(dt, timedelta(hours=1), test_market, dt, dt)
test_market.timeIntervals = [time_interval]
# Create and store a marginal price IntervalValue object.
test_market.marginalPrices = [
IntervalValue(test_market, time_interval, test_market, MeasurementType.MarginalPrice, 0.1)]
# Create a store a simple active Vertex for the test model.
test_vertex = Vertex(0.1, 0, 100)
test_interval_value = IntervalValue(test_model, time_interval, test_market,
MeasurementType.ActiveVertex, test_vertex)
test_model.activeVertices = [test_interval_value]
## TEST 1
print('- Test 1: scheduled power does not exist yet')
test_model.schedule_power(test_market)
print(' - the method ran without errors')
if len(test_model.scheduledPowers) != 1:
pf = 'fail'
print(' - an unexpected number of scheduled powers is created')
else:
print(' - the expected number of scheduled powers is created')
scheduled_power = test_model.scheduledPowers[0].value
if scheduled_power != 100:
pf = 'fail'
print(' - the scheduled power value was not that expected')
else:
print(' - the scheduled power value was as expected')
## TEST 2
print('- Test 2: scheduled power value exists to be reassigned')
# Configure for test by using a different active vertex.
test_vertex.power = 50
test_model.activeVertices[0].value = test_vertex
test_model.schedule_power(test_market)
print(' - the method ran without errors')
if len(test_model.scheduledPowers) != 1:
pf = 'fail'
print(' - an unexpected number of scheduled powers is found')
else:
print(' - the expected number of scheduled powers is found')
scheduled_power = test_model.scheduledPowers[0].value
if scheduled_power != 50:
pf = 'fail'
print(' - the scheduled power value was not that expected')
else:
print(' - the scheduled power value was as expected')
# Success.
print('- the test ran to completion')
print('\nResult: #s\n\n', pf)
def test_send_transactive_signal():
import os
print('Running NeighborModel.test_send_transactive_signal()')
pf = 'pass'
# Create a test NeighborModel object.
test_model = NeighborModel()
# test_model.name = 'NM_abcdefghijkl'
# Create a test Neighbor object.
test_object = Neighbor()
test_object.name = 'TN_abcdefghijklmn'
# Get the test object and model to cross-reference one another.
test_object.model = test_model
test_model.object = test_object
# Create a test market object.
test_market = Market()
# Create a test myTransactiveNode object.
test_myTransactiveNode = myTransactiveNode
test_myTransactiveNode.name = 'mTN_abcd'
## TEST 1
print('- Test 1: Neighbor is NOT transactive')
test_model.transactive = False
test_model.send_transactive_signal(test_myTransactiveNode)
print(' - The method warned and returned, as expected')
# Test 2
print('- Test 2: Write transactive records into a csv file')
# Configure for the test.
test_model.transactive = True
# Create a test time interval
dt = datetime.now()
at = dt
dur = timedelta(hours=1)
mkt = test_market
mct = dt
st = dt
time_interval = TimeInterval(at, dur, mkt, mct, st)
# Create a couple test transactive records.
test_record1 = TransactiveRecord(time_interval, 0, 0.1, 0)
test_record2 = TransactiveRecord(time_interval, 1, 0.2, 100)
test_model.mySignal = [test_record1, test_record2]
test_model.send_transactive_signal(test_myTransactiveNode)
print(' - the method ran to completion without errors')
expected_filename = 'mTN_a-TN_ab.txt'
# if exist(expected_filename, 'file') != 2:
if not os.path.isfile(expected_filename):
pf = 'fail'
print(' - the expected output file does not exist')
else:
print(' - the expected output file exists')
# expected_data = csvread(expected_filename, 1, 3, [1, 3, 2, 4])
# if expected_data !=[0.1000, 0; 0.2000, 100]:
# pf = 'fail'
# print(' - the csv file contents were not as expected')
# else:
# print(' - the csv file contents were as expected')
## TEST 3: Check that the saved sent signal is the same as that calculated.
print('- Test 3: Was the sent signal saved properly?')
if test_model.mySignal != test_model.sentSignal:
pf = 'fail'
print(' - the sent signal does not match the calculated one')
else:
print(' - the sent signal matches the calculated one')
# Success.
print('- the test ran to completion')
print('\nResult: #s\n\n', pf)
# Close and delete the file.
# fclose('all')
# delete(expected_filename)
def test_update_dc_threshold():
print('Running NeighborModel.test_update_dc_threshold()')
pf = 'pass'
dt = datetime.now()
## Basic configuration for tests:
# Create a test object and initialize demand-realted properties
test_obj = BulkSupplier_dc()
test_obj.demandMonth = dt.month
test_obj.demandThreshold = 1000
# Create a test market
test_mkt = Market()
# Create and store two time intervals
at = dt
dur = timedelta(hours=1) # Hours(1)
mkt = test_mkt
mct = dt
st = dt
ti1 = TimeInterval(at, dur, mkt, mct, st)
st = st + dur
ti2 = TimeInterval(at, dur, mkt, mct, st)
ti = [ti1, ti2]
test_mkt.timeIntervals = ti
# Test case when there is no MeterPoint object
test_obj.demandThreshold = 1000
test_obj.demandMonth = dt.month
test_obj.meterPoints = [] # MeterPoint.empty
# Create and store a couple scheduled powers
iv1 = IntervalValue(test_obj, ti[0], test_mkt, MeasurementType.ScheduledPower, 900)
iv2 = IntervalValue(test_obj, ti[1], test_mkt, MeasurementType.ScheduledPower, 900)
test_obj.scheduledPowers = [iv1, iv2]
test_obj.update_dc_threshold(test_mkt)
print('- the method ran without errors')
if test_obj.demandThreshold != 1000:
pf = 'fail'
print('- the method inferred the wrong demand threshold value')
else:
print('- the method properly kept the old demand threshold value with no meter')
iv1 = IntervalValue(test_obj, ti[0], test_mkt, MeasurementType.ScheduledPower, 1100)
iv2 = IntervalValue(test_obj, ti[1], test_mkt, MeasurementType.ScheduledPower, 900)
test_obj.scheduledPowers = [iv1, iv2]
test_obj.update_dc_threshold(test_mkt)
print('- the method ran without errors when there is no meter')
if test_obj.demandThreshold != 1100:
pf = 'fail'
print('- the method did not update the inferred demand threshold value')
else:
print('- the method properly updated the demand threshold value with no meter')
## Test with an appropriate MeterPoint meter
# Create and store a MeterPoint test object
test_mtr = MeterPoint()
test_mtr.measurementType = MeasurementType.AverageDemandkW # 'average_demand_kW'
test_mtr.currentMeasurement = 900
test_obj.meterPoints = [test_mtr]
# Reconfigure the test object for this test:
iv1 = IntervalValue(test_obj, ti[0], test_mkt, MeasurementType.ScheduledPower, 900)
iv2 = IntervalValue(test_obj, ti[1], test_mkt, MeasurementType.ScheduledPower, 900)
test_obj.scheduledPowers = [iv1, iv2]
test_obj.demandThreshold = 1000
test_obj.demandMonth = dt.month
# Run the test. Confirm it runs.
test_obj.update_dc_threshold(test_mkt)
print('- the method ran without errors when there is a meter')
# Check that the old threshold is correctly retained.
if test_obj.demandThreshold != 1000:
pf = 'fail'
print('- the method failed to keep the correct demand threshold value when there is a meter')
else:
print('- the method properly kept the old demand threshold value when there is a meter')
# Reconfigure the test object with a lower current threshold
iv1 = IntervalValue(test_obj, ti[0], test_mkt, MeasurementType.ScheduledPower, 900)
iv2 = IntervalValue(test_obj, ti[1], test_mkt, MeasurementType.ScheduledPower, 900)
test_obj.scheduledPowers = [iv1, iv2]
test_obj.demandThreshold = 800
# Run the test.
test_obj.update_dc_threshold(test_mkt)
# Check that a new, higher demand threshold was set.
if test_obj.demandThreshold != 900:
pf = 'fail'
print('- the method failed to update the demand threshold value when there is a meter')
else:
print('- the method properly updated the demand threshold value when there is a meter')
## Test rollover to new month
# Configure the test object
# test_obj.demandMonth = month(datetime - days(31)) # prior month
test_obj.demandMonth = dt + relativedelta.relativedelta(months=-1) # (dt - timedelta(days=31)).month # prior month
test_obj.demandThreshold = 1000
test_obj.scheduledPowers[0].value = 900
test_obj.scheduledPowers[1].value = 900
# test_obj.meterPoints = MeterPoint.empty
test_obj.meterPoints = [] # MeterPoint.empty
# Run the test
test_obj.update_dc_threshold(test_mkt)
# See if the demand threshold was reset at the new month.
if test_obj.demandThreshold != 0.8 * 1000:
pf = 'fail'
print('- the method did not reduce the threshold properly in a new month')
else:
print('- the method reduced the threshold properly in a new month')
# Success
print('- the test ran to completion')
print('Result: #s\n\n', pf)
def test_update_dual_costs():
print('Running NeighborModel.test_update_dual_costs()')
pf = 'pass'
# Create a test Market object.
test_market = Market()
# Create and store a TimeInterval object.
dt = datetime.now() # datetime that may be used for most datetime arguments
time_interval = TimeInterval(dt, timedelta(hours=1), test_market, dt, dt)
test_market.timeIntervals = [time_interval]
# Create and store a marginal price IntervalValue object.
test_market.marginalPrices = [
IntervalValue(test_market, time_interval, test_market, MeasurementType.MarginalPrice, 0.1)]
# Create a test NeighborModel object.
test_model = NeighborModel()
# Create and store a scheduled power IntervalValue in the active time
# interval.
test_model.scheduledPowers = [
IntervalValue(test_model, time_interval, test_market, MeasurementType.ScheduledPower, 100)]
# Create and store a production cost IntervalValue object in the active
# time interval.
test_model.productionCosts = [
IntervalValue(test_model, time_interval, test_market, MeasurementType.ProductionCost, 1000)]
# TEST 1
print('- Test 1: First calculation of a dual cost')
test_model.update_dual_costs(test_market)
print(' - the method ran without errors')
if len(test_model.dualCosts) != 1:
pf = 'fail'
print(' - the wrong number of dual cost values was created')
else:
print(' - the right number of dual cost values was created')
dual_cost = test_model.dualCosts[0].value
if dual_cost != (1000 - 100 * 0.1):
pf = 'fail'
print(' - an unexpected dual cost value was found')
else:
print(' - the expected dual cost value was found')
# TEST 2
print('- Test 2: Reassignment of an existing dual cost')
# Configure the test by modifying the marginal price value.
test_market.marginalPrices[0].value = 0.2
test_model.update_dual_costs(test_market)
print(' - the method ran without errors')
if len(test_model.dualCosts) != 1:
pf = 'fail'
print(' - the wrong number of dual cost values was created')
else:
print(' - the right number of dual cost values was created')
dual_cost = test_model.dualCosts[0].value
if dual_cost != (1000 - 100 * 0.2):
pf = 'fail'
print(' - an unexpected dual cost value was found')
else:
print(' - the expected dual cost value was found')
# Success.
print('- the test ran to completion')
print('\nResult: #s\n\n', pf)
def test_update_production_costs():
print('Running NeighborModel.test_update_production_costs()')
pf = 'pass'
# Create a test Market object.
test_market = Market()
# Create and store a TimeInterval object.
dt = datetime.now() # datetime that may be used for most datetime arguments
time_interval = TimeInterval(dt, timedelta(hours=1), test_market, dt, dt)
test_market.timeIntervals = [time_interval]
# Create a test NeighborModel object.
test_model = NeighborModel()
# Create and store a scheduled power IntervalValue in the active time
# interval.
test_model.scheduledPowers = [
IntervalValue(test_model, time_interval, test_market, MeasurementType.ScheduledPower, 50)]
# Create and store some active vertices IntervalValue objects in the
# active time interval.
vertex1 = Vertex(0.1, 1000, 0)
vertex2 = Vertex(0.2, 1015, 100)
test_model.activeVertices = [
IntervalValue(test_model, time_interval, test_market, MeasurementType.ActiveVertex, vertex1),
IntervalValue(test_model, time_interval, test_market, MeasurementType.ActiveVertex, vertex2)
]
# TEST 1
print('- Test 1: First calculation of a production cost')
test_model.update_production_costs(test_market)
print(' - the method ran without errors')
if len(test_model.productionCosts) != 1:
pf = 'fail'
print(' - the wrong number of production costs was created')
else:
print(' - the right number of production cost values was created')
production_cost = test_model.productionCosts[0].value
if production_cost != 1007.5:
pf = 'fail'
print(' - an unexpected production cost value was found')
else:
print(' - the expected production cost value was found')
# TEST 2
print('- Test 2: Reassignment of an existing production cost')
# Configure the test by modifying the scheduled power value.
test_model.scheduledPowers[0].value = 150
test_model.update_production_costs(test_market)
print(' - the method ran without errors')
if len(test_model.productionCosts) != 1:
pf = 'fail'
print(' - the wrong number of productions was created')
else:
print(' - the right number of production cost values was created')
production_cost = test_model.productionCosts[0].value
if production_cost != 1015:
pf = 'fail'
print(' - an unexpected dual cost value was found')
else:
print(' - the expected dual cost value was found')
# Success.
print('- the test ran to completion')
print('\nResult: #s\n\n', pf)
def test_update_vertices():
print('Running NeighborModel.test_update_vertices()')
pf = 'pass'
# Create a test Market object.
test_market = Market()
# Create and store a TimeInterval object.
dt = datetime.now() # datetime that may be used for most datetime arguments
time_interval = TimeInterval(dt, timedelta(hours=1), test_market, dt, dt)
test_market.timeIntervals = [time_interval]
# Create a test NeighborModel object.
test_model = NeighborModel()
# Create and store a scheduled power IntervalValue in the active time interval.
test_model.scheduledPowers = [
IntervalValue(test_model, time_interval, test_market, MeasurementType.ScheduledPower, 50)]
# Create a Neighbor object and its maximum and minimum powers.
test_object = Neighbor()
test_object.maximumPower = 200
test_object.minimumPower = 0
test_object.lossFactor = 0 # eliminate losses from the calcs for now.
# Have the Neighbor model and object cross reference one another.
test_object.model = test_model
test_model.object = test_object
## TEST 1
print('- Test 1: No default vertex has been defined for the Neighbor')
test_model.defaultVertices = []
test_model.update_vertices(test_market)
print(' - the method warned and returned, as designed.')
## TEST 2
print('- Test 2: The Neighbor is not transactive')
# Create the default Vertex object.
test_model.defaultVertices = [Vertex(.1, 0, 100)]
test_model.transactive = False
test_model.update_vertices(test_market)
print(' - the method ran without errors')
if len(test_model.activeVertices) != 1:
pf = 'fail'
print(' - there is an unexpected number of active vertices')
else:
print(' - the expected number of active vertices was found')
vertex = test_model.activeVertices[0].value
if vertex.power != 100 or vertex.marginalPrice != 0.1:
pf = 'fail'
print(' - the vertex values are not as expected')
else:
print(' - the vertex values were derived from the default vertex as expected')
## TEST 3
print('- Test 3: The Neighbor is transactive, but transactive records are not available')
test_model.transactive = True
test_model.defaultVertices = [Vertex(.2, 0, 200)] # Changed
test_model.update_vertices(test_market)
print(' - the method ran without errors')
if len(test_model.activeVertices) != 1:
pf = 'fail'
print(' - there is an unexpected number of active vertices')
else:
print(' - the expected number of active vertices was found')
vertex = test_model.activeVertices[0].value
if vertex.power != 200 or vertex.marginalPrice != 0.2:
pf = 'fail'
print(' - the vertex values are not as expected')
else:
print(' - the vertex values were derived from the default vertex as expected')
## TEST 4
print(['- Test 4: The Neighbor is transactive, and a transactive records are available to use'])
test_model.transactive = True
# Create and store some received transactive records
transactive_record1 = TransactiveRecord(time_interval, 1, 0.15, 0)
transactive_record2 = TransactiveRecord(time_interval, 2, 0.25, 100)
transactive_record3 = TransactiveRecord(time_interval, 0, 0.2, 50)
test_model.receivedSignal = [transactive_record1, transactive_record2, transactive_record3]
test_model.demandThreshold = 500
test_model.update_vertices(test_market)
print(' - the method ran without errors')
if len(test_model.activeVertices) != 2:
pf = 'fail'
print(' - there is an unexpected number of active vertices')
else:
print(' - the expected number of active vertices was found')
# vertex = [test_model.activeVertices(:).value]
# vertex_power = [vertex.power]
# vertex_marginal_price = [vertex.marginalPrice]
vertex_power = [x.value.power for x in test_model.activeVertices]
vertex_marginal_price = [x.value.marginalPrice for x in test_model.activeVertices]
# if any(~ismember([vertex_power], [0, 100]))
# or any(~ismember([vertex_marginal_price], [0.1500, 0.2500]))
non_members1 = [x for x in vertex_power if x not in [0, 100]]
non_members2 = [x for x in vertex_marginal_price if x not in [0.1500, 0.2500]]
if len(non_members1) > 0 or len(non_members2) > 0:
pf = 'fail'
print(' - the vertex values are not as expected')
else:
print(' - the vertex values were derived from the received transactive records as expected')
## TEST 5
print('- Test 5: The Neighbor is transactive with transactive records, and demand charges are in play')
test_model.transactive = True
# Create and store some received transactive records
transactive_record1 = TransactiveRecord(time_interval, 1, 0.15, 0)
transactive_record2 = TransactiveRecord(time_interval, 2, 0.25, 100)
transactive_record3 = TransactiveRecord(time_interval, 0, 0.2, 50)
test_model.receivedSignal = [transactive_record1, transactive_record2, transactive_record3]
# The demand threshold is being moved into active vertex range.
test_model.demandThreshold = 80 #
test_model.update_vertices(test_market)
print(' - the method ran without errors')
if len(test_model.activeVertices) != 4:
pf = 'fail'
print(' - there is an unexpected number of active vertices')
else:
print(' - the expected number of active vertices was found')
# vertex = [test_model.activeVertices(:).value]
# vertex_power = [vertex.power]
# vertex_marginal_price = [vertex.marginalPrice]
vertex_power = [x.value.power for x in test_model.activeVertices]
vertex_marginal_price = [round(x.value.marginalPrice,4) for x in test_model.activeVertices]
# if any(~ismember([vertex_power], [0, 80, 100]))
# or any(~ismember(single(vertex_marginal_price), single([0.1500, 0.2300, 10.2500, 10.2300])))
non_members1 = [x for x in vertex_power if x not in [0, 80, 100]]
non_members2 = [x for x in vertex_marginal_price if x not in [0.1500, 0.2300, 10.2500, 10.2300]]
if len(non_members1) > 0 or len(non_members2) > 0:
pf = 'fail'
print(' - the vertex values are not as expected')
else:
print(' - the vertex values were derived from the received transactive records and demand threshold as expected')
# Success.
print('- the test ran to completion')
print('\nResult: #s\n\n', pf)
if __name__ == '__main__':
test_all()
|
import argparse
import json
from torch.utils.data import DataLoader
from models.doepd_net import *
from utils.datasets import *
from utils.utils import *
from utils.parse_config import *
def test(
data,
weights=None,
batch_size=16,
img_size=416,
conf_thres=0.001,
iou_thres=0.6, # for nms
save_json=False,
single_cls=False,
augment=False,
model=None,
dataloader=None):
# Initialize/load model and set device
if model is None:
device = torch_utils.select_device(opt.device, batch_size=batch_size)
verbose = opt.task == 'test'
# Remove previous
for f in glob.glob('test_batch*.png'):
os.remove(f)
# Initialize model
model = DoepdNet(run_mode='yolo', image_size = img_size)
load_doepd_weights(model, device=device)
# Fuse
# model.fuse()
model.to(device)
if device.type != 'cpu' and torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
else: # called by train.py
device = next(model.parameters()).device # get model device
verbose = False
# Configure run
data = parse_data_cfg(data)
nc = 1 if single_cls else int(data['classes']) # number of classes
path = data['valid'] # path to test images
names = load_classes(data['names']) # class names
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
iouv = iouv[0].view(1) # comment for mAP@0.5:0.95
niou = iouv.numel()
# Dataloader
if dataloader is None:
dataset = LoadImagesAndLabels(path, img_size, batch_size, rect=True, single_cls=opt.single_cls)
batch_size = min(batch_size, len(dataset))
dataloader = DataLoader(dataset,
batch_size=batch_size,
num_workers=min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]),
pin_memory=True,
collate_fn=dataset.collate_fn)
seen = 0
model.eval()
_ = model(torch.zeros((1, 3, img_size, img_size), device=device)) if device.type != 'cpu' else None # run once
coco91class = coco80_to_coco91_class()
s = ('%20s' + '%10s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@0.5', 'F1')
p, r, f1, mp, mr, map, mf1, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class = [], [], [], []
for batch_i, (imgs, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
imgs = imgs.to(device).float() / 255.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0
targets = targets.to(device)
nb, _, height, width = imgs.shape # batch size, channels, height, width
whwh = torch.Tensor([width, height, width, height]).to(device)
# Plot images with bounding boxes
f = 'test_batch%g.png' % batch_i # filename
if batch_i < 1 and not os.path.exists(f):
plot_images(imgs=imgs, targets=targets, paths=paths, fname=f)
# Disable gradients
with torch.no_grad():
# Run model
t = torch_utils.time_synchronized()
inf_out, train_out = model(imgs, augment=augment)[0] # inference and training outputs
t0 += torch_utils.time_synchronized() - t
# Compute loss
if hasattr(model, 'hyp'): # if model has loss hyperparameters
loss += compute_loss(train_out, targets, model)[1][:3] # GIoU, obj, cls
# Run NMS
t = torch_utils.time_synchronized()
output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres) # nms
t1 += torch_utils.time_synchronized() - t
# Statistics per image
for si, pred in enumerate(output):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
seen += 1
if pred is None:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Append to text file
# with open('test.txt', 'a') as file:
# [file.write('%11.5g' * 7 % tuple(x) + '\n') for x in pred]
# Clip boxes to image bounds
clip_coords(pred, (height, width))
# Append to pycocotools JSON dictionary
if save_json:
# [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
image_id = int(Path(paths[si]).stem.split('_')[-1])
box = pred[:, :4].clone() # xyxy
scale_coords(imgs[si].shape[1:], box, shapes[si][0], shapes[si][1]) # to original shape
box = xyxy2xywh(box) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(pred.tolist(), box.tolist()):
jdict.append({'image_id': image_id,
'category_id': coco91class[int(p[5])],
'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)})
# Assign all predictions as incorrect
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
if nl:
detected = [] # target indices
tcls_tensor = labels[:, 0]
# target boxes
tbox = xywh2xyxy(labels[:, 1:5]) * whwh
# Per target class
for cls in torch.unique(tcls_tensor):
ti = (cls == tcls_tensor).nonzero().view(-1) # prediction indices
pi = (cls == pred[:, 5]).nonzero().view(-1) # target indices
# Search for detections
if pi.shape[0]:
# Prediction to target ious
ious, i = box_iou(pred[pi, :4], tbox[ti]).max(1) # best ious, indices
# Append detections
for j in (ious > iouv[0]).nonzero():
d = ti[i[j]] # detected target
if d not in detected:
detected.append(d)
correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
if len(detected) == nl: # all targets already located in image
break
# Append statistics (correct, conf, pcls, tcls)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats):
p, r, ap, f1, ap_class = ap_per_class(*stats)
if niou > 1:
p, r, ap, f1 = p[:, 0], r[:, 0], ap.mean(1), ap[:, 0] # [P, R, AP@0.5:0.95, AP@0.5]
mp, mr, map, mf1 = p.mean(), r.mean(), ap.mean(), f1.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%10.3g' * 6 # print format
print(pf % ('all', seen, nt.sum(), mp, mr, map, mf1))
# Print results per class
if verbose and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap[i], f1[i]))
# Print speeds
if verbose or save_json:
t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (img_size, img_size, batch_size) # tuple
print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map, mf1, *(loss.cpu() / len(dataloader)).tolist()), maps
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='*.cfg path')
parser.add_argument('--data', type=str, default='data/coco2014.data', help='*.data path')
parser.add_argument('--weights', type=str, default='weights/yolov3-spp-ultralytics.pt', help='weights path')
parser.add_argument('--batch-size', type=int, default=16, help='size of each image batch')
parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')
parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
parser.add_argument('--task', default='test', help="'test', 'study', 'benchmark'")
parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1) or cpu')
parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
opt = parser.parse_args()
opt.save_json = opt.save_json or any([x in opt.data for x in ['coco.data', 'coco2014.data', 'coco2017.data']])
print(opt)
# task = 'test', 'study', 'benchmark'
if opt.task == 'test': # (default) test normally
test(opt.cfg,
opt.data,
opt.weights,
opt.batch_size,
opt.img_size,
opt.conf_thres,
opt.iou_thres,
opt.save_json,
opt.single_cls,
opt.augment)
elif opt.task == 'benchmark': # mAPs at 320-608 at conf 0.5 and 0.7
y = []
for i in [320, 416, 512, 608]: # img-size
for j in [0.5, 0.7]: # iou-thres
t = time.time()
r = test(opt.cfg, opt.data, opt.weights, opt.batch_size, i, opt.conf_thres, j, opt.save_json)[0]
y.append(r + (time.time() - t,))
np.savetxt('benchmark.txt', y, fmt='%10.4g') # y = np.loadtxt('study.txt')
elif opt.task == 'study': # Parameter study
y = []
x = np.arange(0.4, 0.9, 0.05) # iou-thres
for i in x:
t = time.time()
r = test(opt.cfg, opt.data, opt.weights, opt.batch_size, opt.img_size, opt.conf_thres, i, opt.save_json)[0]
y.append(r + (time.time() - t,))
np.savetxt('study.txt', y, fmt='%10.4g') # y = np.loadtxt('study.txt')
# Plot
fig, ax = plt.subplots(3, 1, figsize=(6, 6))
y = np.stack(y, 0)
ax[0].plot(x, y[:, 2], marker='.', label='mAP@0.5')
ax[0].set_ylabel('mAP')
ax[1].plot(x, y[:, 3], marker='.', label='mAP@0.5:0.95')
ax[1].set_ylabel('mAP')
ax[2].plot(x, y[:, -1], marker='.', label='time')
ax[2].set_ylabel('time (s)')
for i in range(3):
ax[i].legend()
ax[i].set_xlabel('iou_thr')
fig.tight_layout()
plt.savefig('study.jpg', dpi=200)
|
# coding: utf-8
# Copyright 2009-2014 <NAME>
# License: BSD (see file COPYING for details)
"""This fixture adds all known countries of the world to your
database.
Unlike the official `ISO 3133 <http://www.iso.org/iso/country_codes>`_
it features more languages, and it creates also codes for countries
that no longer exist. It is not official at all. See also
:doc:`/topics/gpdn`.
The `countries.xml` is an unmodified copy of
http://users.pandora.be/bosteels/countries.xml
TODO: Estonian names. Maybe from
https://et.wikipedia.org/wiki/ISO_maakoodide_loend
`TABLE2` contains 4-letter codes for countries that no longer exist.
This is mostly based on <http://www.davros.org/misc/iso3166.html>,
but one country (DEDE) was added.
The :mod:`lino_xl.lib.statbel.countries.fixtures.inscodes` fixture,
extends this data by attaching Belgian INS codes to these countries.
"""
from __future__ import print_function
import os
from xml.dom import minidom
import logging
logger = logging.getLogger('lino')
from django.conf import settings
from lino.api import dd
TABLE2 = """
BQAQ ATB 000 British Antarctic Territory
BUMM BUR 104 Burma, Socialist Republic of the Union of
BYAA BYS 112 Byelorussian SSR Soviet Socialist Republic
CTKI CTE 128 Canton & Enderbury Islands
CSHH CSK 200 Czechoslovakia, Czechoslovak Socialist Republic
DYBJ DHY 204 Dahomey
NQAQ ATN 216 Dronning Maud Land
TPTL TMP 626 East Timor (was Portuguese Timor)
AIDJ AFI 262 French Afars and Issas
FQHH ATF 000 French Southern and Antarctic Territories \
(now split between AQ and TF)
DEDE ??? ??? German Federal Republic
DDDE DDR 278 German Democratic Republic
GEHH GEL 296 Gilbert & Ellice Islands (now split into Kiribati and Tuvalu)
JTUM JTN 396 Johnston Island
MIUM MID 488 Midway Islands
NTHH NTZ 536 Neutral Zone (formerly between Saudi Arabia & Iraq)
NHVU NHB 548 New Hebrides
PCHH PCI 582 Pacific Islands (trust territory) \
(divided into FM, MH, MP, and PW)
PZPA PCZ 000 Panama Canal Zone
SKIN SKM 000 Sikkim
RHZW RHO 716 Southern Rhodesia
PUUM PUS 849 US Miscellaneous Pacific Islands
SUHH SUN 810 USSR, Union of Soviet Socialist Republics
HVBF HVO 854 Upper Volta, Republic of
VDVN VDR 000 Viet-Nam, Democratic Republic of
WKUM WAK 872 Wake Island
YDYE YMD 720 Yemen, Democratic, People's Democratic Republic of
YUCS YUG 891 Yugoslavia, Federal Republic of
ZRCD ZAR 180 Zaire, Republic of
"""
#~ unused = """
#~ FX FXX 249 France, Metropolitan
#~ EH ESH 732 Spanish Sahara (now Western Sahara)
#~ YU YUG 890 Yugoslavia, Socialist Federal Republic of
#~ """
COUNTRIES = {}
def objects():
n = 0
Country = settings.SITE.models.countries.Country
"""
"""
fn = os.path.join(os.path.dirname(__file__), 'countries.xml')
logger.debug("Reading %s", fn)
dom = minidom.parse(fn)
#~ print dom.documentElement.__class__
#~ print dom.documentElement
for coun in dom.documentElement.getElementsByTagName('coun:country'):
names = {}
for name in coun.getElementsByTagName('coun:name'):
assert len(name.childNodes) == 1
#~ print [n.data for n in ]
#~ print name.firstChild.data
names[str(name.attributes['lang'].value)] = name.firstChild.data
kw = dd.babel_values('name', **names)
iso2 = coun.getElementsByTagName('coun:alpha2')[0].childNodes[0].data
if Country.objects.filter(pk=iso2).count() > 0:
logger.debug("ISO code %r already exists %s", iso2, coun)
continue
kw.update(
isocode=iso2,
iso3=coun.getElementsByTagName(
'coun:alpha3')[0].childNodes[0].data,
)
if not 'name' in kw:
kw['name'] = names['en']
if kw['name']:
#~ kw.update(iso3=iso3)
n += 1
yield Country(**kw)
else:
logger.warning(
"%r : no name for default site language %s",
coun, settings.SITE.DEFAULT_LANGUAGE.django_code)
for ln in TABLE2.splitlines():
ln = ln.strip()
if ln:
code1, code2, code3, name = ln.split(None, 3)
n += 1
yield Country(isocode=code1, name=name)
logger.info("Installed %d countries", n)
|
<filename>scrapy/downloadermiddlewares/robotstxt.py
"""
This is a middleware to respect robots.txt policies. To activate it you must
enable this middleware and enable the ROBOTSTXT_OBEY setting.
"""
import logging
import sys
import re
from twisted.internet.defer import Deferred, maybeDeferred
from scrapy.exceptions import NotConfigured, IgnoreRequest
from scrapy.http import Request
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.log import failure_to_exc_info
from scrapy.utils.python import to_native_str
from scrapy.utils.misc import load_object
logger = logging.getLogger(__name__)
class RobotsTxtMiddleware(object):
DOWNLOAD_PRIORITY = 1000
def __init__(self, crawler):
if not crawler.settings.getbool('ROBOTSTXT_OBEY'):
raise NotConfigured
self._default_useragent = crawler.settings.get('USER_AGENT', 'Scrapy')
self._robotstxt_useragent = crawler.settings.get('ROBOTSTXT_USER_AGENT', None)
self.crawler = crawler
self._parsers = {}
self._parserimpl = load_object(crawler.settings.get('ROBOTSTXT_PARSER'))
# check if parser dependencies are met, this should throw an error otherwise.
self._parserimpl.from_crawler(self.crawler, b'')
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def process_request(self, request, spider):
if request.meta.get('dont_obey_robotstxt'):
return
d = maybeDeferred(self.robot_parser, request, spider)
d.addCallback(self.process_request_2, request, spider)
return d
def process_request_2(self, rp, request, spider):
if rp is None:
return
useragent = self._robotstxt_useragent
if not useragent:
useragent = request.headers.get(b'User-Agent', self._default_useragent)
if not rp.allowed(request.url, useragent):
logger.debug("Forbidden by robots.txt: %(request)s",
{'request': request}, extra={'spider': spider})
self.crawler.stats.inc_value('robotstxt/forbidden')
raise IgnoreRequest("Forbidden by robots.txt")
def robot_parser(self, request, spider):
url = urlparse_cached(request)
netloc = url.netloc
if netloc not in self._parsers:
self._parsers[netloc] = Deferred()
robotsurl = "%s://%s/robots.txt" % (url.scheme, url.netloc)
robotsreq = Request(
robotsurl,
priority=self.DOWNLOAD_PRIORITY,
meta={'dont_obey_robotstxt': True}
)
dfd = self.crawler.engine.download(robotsreq, spider)
dfd.addCallback(self._parse_robots, netloc, spider)
dfd.addErrback(self._logerror, robotsreq, spider)
dfd.addErrback(self._robots_error, netloc)
self.crawler.stats.inc_value('robotstxt/request_count')
if isinstance(self._parsers[netloc], Deferred):
d = Deferred()
def cb(result):
d.callback(result)
return result
self._parsers[netloc].addCallback(cb)
return d
else:
return self._parsers[netloc]
def _logerror(self, failure, request, spider):
if failure.type is not IgnoreRequest:
logger.error("Error downloading %(request)s: %(f_exception)s",
{'request': request, 'f_exception': failure.value},
exc_info=failure_to_exc_info(failure),
extra={'spider': spider})
return failure
def _parse_robots(self, response, netloc, spider):
self.crawler.stats.inc_value('robotstxt/response_count')
self.crawler.stats.inc_value('robotstxt/response_status_count/{}'.format(response.status))
rp = self._parserimpl.from_crawler(self.crawler, response.body)
rp_dfd = self._parsers[netloc]
self._parsers[netloc] = rp
rp_dfd.callback(rp)
def _robots_error(self, failure, netloc):
if failure.type is not IgnoreRequest:
key = 'robotstxt/exception_count/{}'.format(failure.type)
self.crawler.stats.inc_value(key)
rp_dfd = self._parsers[netloc]
self._parsers[netloc] = None
rp_dfd.callback(None)
|
<reponame>jakekrafczyk/restauraunt_ranker<filename>scrape_restauraunts.py
from selenium import webdriver
from bs4 import BeautifulSoup
import requests
import pandas as pd
import re
import random
import time
# optional first step, define the driver, ie chrome or Brave
driver = webdriver.Chrome('./chromedriver') # <- chromedriver needs to be the same version as chrome
url = "https://www.tripadvisor.com/Restaurants-g30196-Austin_Texas.html#EATERY_LIST_CONTENTS"
# url = "<a data-page-number="1" data-offset="0" href="/Restaurants-g30196-Austin_Texas.html#EATERY_LIST_CONTENTS" class="nav previous rndBtn ui_button primary taLnk" onclick=" require('common/Radio')('restaurant-filters').emit('paginate', this.getAttribute('data-offset'));; ta.trackEventOnPage('STANDARD_PAGINATION', 'previous', '1', 0); return false;
# ">
# Previous
# </a>" //*[@id="EATERY_LIST_CONTENTS"]/div[2]/div/a
#//*[@id="EATERY_LIST_CONTENTS"]/div[2]/div/a
# #EATERY_LIST_CONTENTS > div.deckTools.btm > div > a
driver.get(url)
# NEED TO LOOK AT WEBDRIVER DOCUMENTATION, THIS PROCESS IS NOT WORKING
# now lets narrow in on the data we want to collect
rest_dict = {'review_pages':[],'names':[]}
#driver.get(url)
#/html/body/div[4]/div[3]/div[2]/div[2]/div[2]/div[2]/div[2]/div/div[6]/div[3]/div[5]/div[2]/div/a
# retrieve the names and Trip Advisor links of all the restaraunts in the given city
def names_and_links(some_url,data_parameters):
#print(data_parameters)
# request the data from the desired page
#page = driver.get(some_url)#,data=data_parameters)
page = requests.get(some_url)
# set encoding- this is the default so technically not necessary
#page.encoding = 'ISO-885901'
# collect your soup!
soup = BeautifulSoup(page.text, 'html.parser')
# can print the page output with this -> print(soup.prettify())
boost_list = soup.find_all(class_ = "wQjYiB7z")
count = 0
for listing in boost_list:
# identify and append the link to the reviews page
if listing.text[0].isdigit():
reviews_link = listing.find('a')['href']
rest_dict['review_pages'].append(reviews_link)
# clean and append the name of the restaraunt(checking for a digit at
# the beginning excludes sponsored listings)
if listing.text[0].isdigit():
name = re.sub(r'\d+', '',listing.text)[2:].strip()
rest_dict['names'].append(name)
print(name)
count += 1
print(count)
# identify the "next" button
next_ = soup.find(class_ = 'unified pagination js_pageLinks')
# if theres a next page available identify it
button_identifier = " ".join(next_.find('span')['class'])
if button_identifier != "nav next disabled":
#next_page_link = next_.find('a')['href']
#print(next_page_link)
# wait 4 - 12 secs before next scrape
print("\nWaiting...\n")
time.sleep(random.randint(4,12))
# # move to the next page of results
# data_page = {}
# next_page = requests.get('https://www.tripadvisor.com' + next_page_link,params=)
# next_soup = BeautifulSoup(next_page.text, 'html.parser')
#update data page parameters
# count +=1
# offset += 30
# data_page["data-page-number"] += 1
# data_page["data-offset"] += 30
# data_page["data-page-number"] = str(count)
# data_page["data-offset"] = str(offset)
count = int(data_page["data-page-number"]) + 1
offset = int(data_page["data-offset"]) + 30
data_page["data-page-number"] = str(count)
data_page["data-offset"] = str(offset)
driver.find_element_by_xpath('/html/body/div[4]/div[3]/div[2]/div[2]/div[2]/div[2]/div[2]/div/div[6]/div[3]/div[5]/div[2]/div').click()
#now run the above search again
names_and_links(some_url,data_page)
count = 1
offset = 0
data_page = {"data-page-number":str(count),"data-offset":str(offset),"href":"/Restaurants-g30196-oa60-Austin_Texas.html#EATERY_LIST_CONTENTS", "class":"nav next rndBtn ui_button primary taLnk", "onclick":f" require('common/Radio')('restaurant-filters').emit('paginate', this.getAttribute('data-offset'));; ta.trackEventOnPage('STANDARD_PAGINATION', 'next', '{count}', 0); return false;"}
names_and_links(url,data_page)
# NEED TO IDENTIFY BY DATA-PAGE-NUMBER AND/OR DATAOFFSET
df = pd.DataFrame(rest_dict,data_page)
# df['Name'] = names
# df['Review_Page'] = review_pages
print(len(df))
df.to_csv('Austin_restaurants.csv') |
<reponame>siscia/Facepager
import json
import os,sys,platform
import lxml
import lxml.html
import StringIO
def getResourceFolder():
if getattr(sys, 'frozen', False) and (platform.system() != 'Darwin'):
folder = os.path.dirname(sys.executable)
elif getattr(sys, 'frozen', False) and (platform.system() == 'Darwin'):
folder = sys._MEIPASS
else:
folder = os.getcwd()
return folder
def hasDictValue(data,multikey):
try:
keys=multikey.split('.',1)
if type(data) is dict and keys[0] != '':
if len(keys) > 1:
value=data.get(keys[0],"")
value = hasDictValue(value,keys[1])
else:
value = keys[0] in data
elif type(data) is list and keys[0] == '*':
if len(keys) > 1 and len(data) > 0:
value = data[0]
value = hasDictValue(value,keys[1])
else:
value = len(data) > 0
elif type(data) is list and keys[0].isnumeric():
no = int(keys[0])
if len(keys) > 1 and len(data) > no:
value = data[no]
value = hasDictValue(value,keys[1])
else:
value = len(data) > no
else:
value = False
return value
except Exception as e:
return False
def getDictValue(data,multikey,dump=True):
try:
keys=multikey.split('.',1)
if type(data) is dict and keys[0] != '':
#value=data.get(keys[0],"")
try:
value=data[keys[0]]
if len(keys) > 1:
value = getDictValue(value,keys[1],dump)
except:
if keys[0] == '*' and len(keys) > 1:
listkey = keys[1]
elif keys[0] == '*':
listkey = ''
else:
listkey = None
if listkey is not None:
valuelist=[]
for elem in data:
valuelist.append(getDictValue(data[elem],listkey,dump))
value = ";".join(valuelist)
else:
value = ''
elif type(data) is list and keys[0] != '':
try:
value=data[int(keys[0])]
if len(keys) > 1:
value = getDictValue(value,keys[1],dump)
except:
if keys[0] == '*' and len(keys) > 1:
listkey = keys[1]
elif keys[0] == '*':
listkey = ''
else:
listkey = keys[0]
valuelist=[]
for elem in data:
valuelist.append(getDictValue(elem,listkey,dump))
value = ";".join(valuelist)
else:
value = data
if dump and (type(value) is dict or type(value) is list):
return json.dumps(value)
elif dump and (isinstance(value, (int, long))):
return str(value)
else:
return value
except Exception as e:
return ""
def filterDictValue(data,multikey,dump=True):
try:
keys=multikey.split('.',1)
if type(data) is dict and keys[0] != '':
value = { key: data[key] for key in data.keys() if key != keys[0]}
if len(keys) > 1:
value[keys[0]] = filterDictValue(data[keys[0]],keys[1],False)
if not len(value):
value = None
elif type(data) is list and keys[0] != '':
try:
value=data
if len(keys) > 1:
value[int(keys[0])] = getDictValue(value[int(keys[0])],keys[1],False)
else:
value[int(keys[0])] = ''
except:
if keys[0] == '*' and len(keys) > 1:
listkey = keys[1]
elif keys[0] == '*':
listkey = ''
else:
listkey = keys[0]
valuelist=[]
for elem in data:
valuelist.append(filterDictValue(elem,listkey,False))
value = valuelist
else:
value = ''
if dump and (type(value) is dict or type(value) is list):
return json.dumps(value)
else:
return value
except Exception as e:
return ""
def recursiveIterKeys(value,prefix=None):
for key in value.iterkeys():
if type(value[key]) is dict:
for subkey in recursiveIterKeys(value[key],key):
fullkey = subkey if prefix is None else ".".join([prefix,subkey])
yield fullkey
else:
fullkey = key if prefix is None else ".".join([prefix,key])
yield fullkey
def htmlToJson(data,csskey=None,type='lxml'):
#type='html5'
soup = lxml.html.fromstring(data)
def parseSoup(element,context = True):
out = {}
if context:
#out['name'] = element.tag
if element.text is not None:
out['text'] = unicode(element.text).strip("\n\t ")
attributes= {}
if context:
for name, value in sorted(element.items()):
attributes['@'+name] = value
out.update(attributes)
children = []
for child in element:
if isinstance(child.tag, basestring):
id = str(child.get('id',''))
key = child.tag+'#'+id if id != '' else child.tag
children.append({key:parseSoup(child)})
else:
value = unicode(child.text).strip("\n\t ")
if value != '':
children.append({'text':value})
if len(children) > 0:
out['items'] = children
#simplify:
if len(children) == 0 and len(attributes) ==0:
out = out.get('text',None)
elif len(children) > 0 and len(attributes) ==0 and out.get('text',None) is None:
del out['items']
out = children
return out
output = []
if csskey is not None:
for part in soup.cssselect(csskey):
output.extend(parseSoup(part,True))
else:
output = {soup.tag : parseSoup(soup,True)}
return output
# See http://foobarnbaz.com/2012/12/31/file-upload-progressbar-in-pyqt/
# See http://code.activestate.com/recipes/578669-wrap-a-string-in-a-file-like-object-that-calls-a-u/
class CancelledError(Exception):
"""Error denoting user interruption.
"""
def __init__(self, msg):
self.msg = msg
Exception.__init__(self, msg)
def __str__(self):
return self.msg
__repr__ = __str__
class BufferReader():
"""StringIO with a callback.
"""
def __init__(self, data='',callback=None):
self._callback = callback
self._progress = 0
self._len = int(len(data))
self._io = StringIO.StringIO(data)
def __len__(self):
return self._len
def rewind(self):
self._io.seek(0)
def read(self, *args):
chunk = self._io.read(*args)
self._progress += int(len(chunk))
if self._callback:
try:
self._callback(self._progress,self._len)
except:
raise CancelledError('The upload was cancelled.')
return chunk |
<reponame>zhaoshiyu/DNLP
# -*- coding: utf-8 -*-
import os
import collections
import numpy as np
import pickle
class Transfer(object):
def __init__(self, data_dir, seq_length, label_data=None, vocab_corpus_file=None):
self.seq_length = seq_length
self.vocab_labels_file = os.path.join(data_dir, 'vocab_labels.pkl')
if os.path.exists(self.vocab_labels_file):
with open(self.vocab_labels_file, 'rb') as f:
self.vocab, self.labels = pickle.load(f)
elif label_data and vocab_corpus_file:
self.labels = self.preprocess_labels(label_data)
assert os.path.isfile(vocab_corpus_file), '%s file does not exist .' % vocab_corpus_file
self.vocab = self.preprocess_vocab_file(vocab_corpus_file)
with open(self.vocab_labels_file, 'wb') as f:
pickle.dump([self.vocab, self.labels], f)
else:
print('label data is null or not vecab corpus file, please check and try again')
exit(1)
assert self.vocab, 'vocab is null'
assert self.labels, 'labels is null'
self.label_size = len(self.labels)
self.vocab_size = len(self.vocab) + 1
self.id2labels = dict(list(zip(list(self.labels.values()), list(self.labels.keys()))))
def preprocess_labels(self, labels_data):
count = 0
labels = {}
for label in set(labels_data):
labels[label] = count
count += 1
return labels
def preprocess_vocab(self, data):
counter = collections.Counter(data)
count_pairs = sorted(list(counter.items()), key=lambda i: -i[1])
chars, _ = list(zip(*count_pairs))
return dict(list(zip(chars, list(range(1, len(chars)+1)))))
def preprocess_vocab_file(self, vocab_corpus_file):
if not os.path.exists(vocab_corpus_file):
print('not vocab corpus file .')
exit(1)
with open(vocab_corpus_file, 'r') as f:
corpus = f.readlines()
corpus = ' '.join([i.strip() for i in corpus])
# corpus = corpus.decode('utf8')
return self.preprocess_vocab(corpus)
def text_to_tensor(self, text):
vector_ids = list(map(self.vocab.get, text[:self.seq_length]))
vector_ids = [i if i else 0 for i in vector_ids]
if len(vector_ids) >= self.seq_length:
vector_ids = vector_ids[:self.seq_length]
else:
vector_ids = vector_ids + [0] * (self.seq_length - len(vector_ids))
return vector_ids
def label_to_tensor(self, text_labels):
return np.array(list(map(self.labels.get, text_labels)))
def tensor_to_label(self, label_tensor):
return list(map(self.id2labels.get, label_tensor))
def text_to_tensor(self, vocab_dict, text, seq_length):
vector_ids = list(map(vocab_dict.get, text[:seq_length]))
vector_ids = [i if i else 0 for i in vector_ids]
if len(vector_ids) >= seq_length:
vector_ids = vector_ids[:seq_length]
else:
vector_ids = vector_ids + [0] * (seq_length - len(vector_ids))
return vector_ids
|
#!/usr/bin/python
#
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A program for analyzing the Shaka compiled sources to find areas that can
be removed if not needed. This uses the source map
(i.e. shaka-player.compiled.debug.map) to find the compiled code
size, see:
https://github.com/mattrobenolt/python-sourcemap
http://www.html5rocks.com/en/tutorials/developertools/sourcemaps/
This script can output four different stats in two different formats:
- The size of functions and namespaces.
- The dependencies between types (in plain or DOT format).
- The dependencies between functions (in plain or DOT format).
- All tokens in the source map.
The dependencies can be outputted in DOT format which can be used with graph
programs to display a visual layout of the dependencies.
"""
import json
import math
import shakaBuildHelpers
import string
import sys
import os
def fromVlqSigned(value):
"""Converts a VLQ number to a normal signed number.
Arguments:
value - A number decoded from a VLQ string.
Returns:
an integer.
"""
negative = (value & 1) == 1
value >>= 1
return -value if negative else value
class Segment:
"""Defines an entry in the source map.
Members:
dstColOffset - The offset of the destination column from the previous
segment.
nameOffset - If not None, the offset of the name index from the previous
segment.
"""
def __init__(self, data):
self.dstColOffset = data[0]
self.nameOffset = data[4] if len(data) > 4 else None
def decodeSegment(segment):
"""Decodes VLQ values from the given segment.
Arguments:
segment - A string containing the encoded segment text.
Returns:
the parsed Segment.
"""
# A Base64 VLQ digit can represent 5 bits, so it is Base32.
VLQ_BASE_SHIFT = 5
VLQ_BASE = 1 << VLQ_BASE_SHIFT
# A mask of bits for a VLQ digit (11111), 31 decimal
VLQ_BASE_MASK = VLQ_BASE - 1
# The continuation bit is the 6th bit
VLQ_CONTINUATION_BIT = VLQ_BASE
# Don't use Base64 lib since it is not a real Base64 string; it simply
# decodes each character to a single Base64 number.
B64 = dict((c, i) for i, c in
enumerate('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
'0123456789+/'))
values = []
cur, shift = 0, 0
for c in segment:
digit = B64[c]
cont = (digit & VLQ_CONTINUATION_BIT) != 0
digit &= VLQ_BASE_MASK
cur += digit << shift
shift += VLQ_BASE_SHIFT
if not cont:
values.append(fromVlqSigned(cur))
cur, shift = 0, 0
# A valid VLQ string should not have dangling bits.
assert cur == 0
assert shift == 0
return Segment(values)
class Token:
"""A Token represents one JavaScript symbol. For example, this can be a
variable or an equals sign. If this is a variable or the keyword 'function'
it will usually have a name which indicates what it originally was defined.
But there are also tokens such as ; and ( which appear as tokens in the
map but to not have explicit name (see isFunction).
Members:
dstLine - Line index in compiled code
dstCol - Column index in compiled code
name - Name of the token; or None
"""
def __init__(self, dstLine, dstCol, name=None):
self.dstLine = dstLine
self.dstCol = dstCol
self.name = name
def __str__(self):
return str(self.name)
def decodeMappings(lineData, names):
"""Decodes a mappings line of text.
Arguments:
lineData - A string containing the mapping line.
names - An array of strings containing the names of the objects.
Returns:
a list of Tokens
"""
tokens = []
lines = lineData.split(';')
nameId = 0
for dstLine, line in enumerate(lines):
dstCol = 0
segments = line.split(',')
for segment in segments:
if not segment:
continue
segment = decodeSegment(segment)
dstCol += segment.dstColOffset
# segment.dstCol can be negative (more useful in names below); however
# after applying a negative offset, the result must still be positive.
assert dstCol >= 0
name = None
if segment.nameOffset != None:
nameId += segment.nameOffset
assert nameId >= 0
name = names[nameId]
tokens.append(Token(dstLine, dstCol, name))
return tokens
def isFunction(token, lines):
"""Determines if the given token is the start of a function.
All function definitions are assumed to have a name field and the token in
the compiled source is the keyword 'function'. Sometimes the function is
defined on the previous semicolon and sometimes that semicolon appears on
the previous line.
Arguments:
token - The Token to check.
lines - An array of compiled code lines.
Returns:
whether the token is a function.
"""
# All functions have a name.
if not token.name:
return False
# Sometimes a function token starts with the previous ;
# Also sometimes the token starts on the ; that is on the previous
# line.
partialLine = lines[token.dstLine][token.dstCol:]
if partialLine == ';\n':
if len(lines) == token.dstLine + 1:
return False
else:
return lines[token.dstLine + 1].startswith('function')
else:
return (partialLine.startswith('function') or
partialLine.startswith(';function'))
def readFunction(tokenIter, prev, prevIndex, lines, callback):
"""Reads a function from the token stream. The function token should
already be consumed.
Arguments:
tokenIter - An iterator of the tokens.
prev - The token containing the function definition.
prevIndex - The index of the previous token.
lines - An array of compiled code lines.
callback - A callback type used to create the data. See traverseTokens.
Returns:
an array of State objects in a format controlled by the callback (see
traverseTokens).
"""
brackets = 0
read = False
ret = []
partialLine = lines[prev.dstLine][prev.dstCol:]
state = callback(prev, prevIndex)
try:
while not read or brackets > 0:
index, token = next(tokenIter)
partialLine = lines[token.dstLine][token.dstCol:]
# Recursively read functions. Sometimes functions are defined nested.
# This doesn't happen that often, and never for Shaka methods, so it does
# not count it twice since the size of this method includes the nested
# function.
if isFunction(token, lines):
ret += readFunction(tokenIter, token, index, lines, callback)
else:
state.add(token, index)
if partialLine.startswith('{}'):
read = True
elif partialLine[0] == '{':
brackets += 1
read = True
elif partialLine[0] == '}':
brackets -= 1
# When we run out of tokens, simply ignore it. A parent call will not see
# this error; but it will continue and the next call to 'next' will fail
# with another StopIteration. This ensures that the last State object
# is included for invalid content.
except StopIteration:
pass
temp = state.build()
if temp:
ret.append(temp)
return ret
def traverseTokens(tokens, lines, callback):
"""Traverses a list of tokens to identify functions. Then uses a callback
to perform some work on the functions. Each function seen gets a new State
object created from the given callback method; there is a single State for
global code which is given None in the constructor. Then, each token seen
is passed to the 'add' method of the State. This is used by the State to
either calculate sizes, print tokens, or detect dependencies. The 'build'
method is called at the end of the function to create a result object that
is returned as an array at the end.
Arguments:
tokens - An array of Tokens.
lines - An array of compiled code lines.
callback - A constructor that returns a state object. It takes a start
token or None if outside a function. It has two member
functions:
add - accepts the current token and the token's index.
build - returns an object to be added to the results.
Returns:
an array of State objects in a format controlled by the callback.
"""
ret = []
state = callback(None, None)
# Create a token iterator. This is used to read tokens from the array. We
# cannot use a for loop because the iterator is passed to readFunction.
tokenIter = enumerate(tokens)
try:
while True:
index, token = next(tokenIter)
if isFunction(token, lines):
ret += readFunction(tokenIter, token, index, lines, callback)
else:
state.add(token, index)
except StopIteration:
pass
temp = state.build()
if temp:
ret.append(temp)
return ret
class FunctionSize:
def __init__(self, name, size):
self.name = name
self.size = size
def printTokens(tokens, lines, funcs):
"""Prints the given tokens.
Arguments:
tokens - An array of Tokens.
lines - An array of compiled code lines.
funcs - An array of FunctionSize.
"""
class State:
def __init__(self, token, index):
# The start of a function, or the global start.
self.name = token.name if token else None
if token:
self._printToken('>', token, index)
def _printToken(self, prefix, token, index):
partialLine = lines[token.dstLine][token.dstCol:]
if len(tokens) > index + 1:
next_ = tokens[index + 1]
if next_.dstLine == token.dstLine:
partialLine = lines[token.dstLine][token.dstCol:next_.dstCol]
tokenText = partialLine[:10].replace('\n', '').rjust(12)
print '%s %4d %4d %12s %s' % (prefix, token.dstLine, token.dstCol,
tokenText, token.name)
def add(self, token, index):
prefix = None
if not self.name:
prefix = '!'
elif lines[token.dstLine][token.dstCol:token.dstCol+2] == '{}':
prefix = ' '
elif lines[token.dstLine][token.dstCol] == '{':
prefix = '+'
elif lines[token.dstLine][token.dstCol] == '}':
prefix = '-'
else:
prefix = ' '
self._printToken(prefix, token, index)
def build(self):
if not self.name:
return
# The end of a function. Print the size of this function.
size = 0
thisFunc = filter(lambda key:key.name == self.name, funcs)
if len(thisFunc) > 0:
size = thisFunc[0].size
print 'X', self.name, size
traverseTokens(tokens, lines, State)
class FunctionDependencies:
def __init__(self, name, deps):
self.name = name
self.deps = deps
def processDeps(tokens, lines, isClass):
"""Processes the tokens into function or class dependencies.
Arguments:
tokens - An array of Tokens.
lines - An array of compiled code lines.
isClass - Whether to create a class graph instead of a function graph.
Returns:
an array of FunctionDependencies.
"""
class State:
def __init__(self, token, _):
self.deps = []
self.name, self.parts = self._createParts(token)
def _createParts(self, token):
if not token or not token.name:
return (None, None)
parts = token.name.split('.')
name = token.name
# Instance methods are the same as static methods.
if len(parts) > 2 and parts[-2] == 'prototype':
del parts[-2]
# Strip function names if class graph; also remove it from the name.
if isClass:
if parts[-1][0] in string.lowercase:
del parts[-1]
name = '.'.join(parts)
return (name, parts)
def add(self, token, _):
# Ignore symbols outside a function. Only care about function
# references and only those that reference our code.
if not self.name or not token.name or not token.name.startswith('shaka.'):
return
name, otherParts = self._createParts(token)
# Get the index of the first different namespace.
count = min(len(self.parts), len(otherParts))
i = 0
while i < count and self.parts[i] == otherParts[i]:
i += 1
# Ignore use of members of the same object:
# OfflineVideoSource.configure and OfflineVideoSource.store
if (i == count - 1 or i == count) and len(self.parts) == len(otherParts):
return
# Ignore use of the constructor of the same type:
# OfflineVideoSource and OfflineVideoSource.store
if i == count and abs(len(self.parts) - len(otherParts)) == 1:
return
# Add the dependency.
if not (name in self.deps):
self.deps.append(name)
def build(self):
return FunctionDependencies(self.name, self.deps) if self.name else None
ret = traverseTokens(tokens, lines, State)
assert len(ret) > 0
ret = sorted(ret, key=lambda key:key.name)
# We need to collapse duplicates.
i = 0
while i + 1 < len(ret):
if ret[i].name == ret[i + 1].name:
for dep in ret[i + 1].deps:
if not dep in ret[i].deps:
ret[i].deps.append(dep)
del ret[i + 1]
else:
i += 1
return ret
def processSizes(tokens, lines):
"""Processes an array of tokens into function lengths.
Arguments:
tokens - An array of Tokens.
lines - An array of compiled code lines.
Returns:
an array of FunctionSizes sorted on name.
"""
class State:
def __init__(self, token, _):
self.name = token.name if token else None
self.size = 0
self.start = token.dstCol if token else None
self.line = token.dstLine if token else None
def add(self, token, _):
# Ignore outside a function
if not self.name:
return
# If we skipped to the next line, include the code to the end of the line.
# If we skipped multiple lines, include the whole line. This will most
# likely never happen since the compiled code usually has new lines on
# function boundaries.
assert token.dstLine >= self.line
while token.dstLine != self.line:
self.size += len(lines[self.line]) - self.start
self.line += 1
self.start = 0
# Keep increasing the size. We can't simply keep the start and measure
# at the end since we are not given the end token in build().
self.size += token.dstCol - self.start
self.start = token.dstCol
def build(self):
return FunctionSize(self.name, self.size) if self.name else None
ret = traverseTokens(tokens, lines, State)
assert len(ret) > 0
ret = filter(lambda key:key.name and
(key.name.startswith('shaka.') or key.name.startswith('goog.')), ret)
ret = sorted(ret, key=lambda key:key.name)
# We need to collapse duplicates.
i = 0
while i + 1 < len(ret):
if ret[i].name == ret[i + 1].name:
ret[i].size += ret[i + 1].size
del ret[i + 1]
else:
i += 1
return ret
def printTree(results, indent, callback, endCallback):
"""Prints the results in an indented format.
Arguments:
results - An array of FunctionSizes sorted on name.
indent - A number to indent.
callback - A callback function to print the data. Accepts a title, an
indentation, and a sublist of the items in that group.
endCallback - A callback function called after a group; can be None.
"""
# This is used both when printing sizes and when printing dependencies in
# DOT format. This recursively creates groups of items with the same prefix.
# e.g.
# shaka
# shaka.util
# shaka.util.FailoverUri
# shaka.util.TypedBind
# shaka.player
# ...
if len(results) <= 1:
callback(None, indent, results)
return
# We want to group-by prefixes for the elements. Since it is sorted, we
# can find the overall prefix length.
first = results[0].name.split('.')
last = results[-1].name.split('.')
prefix = 0
while (prefix < len(first) and prefix < len(last)
and first[prefix] == last[prefix]):
prefix += 1
group = 0
groupItems = first
if prefix == len(first):
# This happens when the group has a first element of a class name and the
# remaining are member functions. Remove the first element from this
# group.
groupItems = results[1].name.split('.')
group = 1
# Start with second element, and go one more so we make sure to process the
# last group.
for i in range(1, len(results) + 1):
items = (results[i].name.split('.') if i != len(results) else
[''] * (prefix + 1))
if items[prefix] != groupItems[prefix]:
title = '.'.join(groupItems[:(prefix + 1)])
callback(title, indent, results[group:i])
printTree(results[group:i], indent + 1, callback, endCallback)
# Set the start of the next group to the current element.
group = i
groupItems = items
if endCallback:
endCallback(indent)
def printSizes(sizes):
"""Prints the sizes in an indented format.
Arguments:
sizes - An array of FunctionSizes sorted on name.
"""
# This callback is used to print the total sizes of each of the sub-groups.
# Using the indent as padding allows to print a tree-like structure to
# show how big each section is.
def callbackFactory(padding):
# Use a factory so we capture the padding.
def callback(title, indent, results):
if title:
size = sum(map(lambda key:key.size, results))
print '%s %*d %s' % (indent * ' ', padding, size, title)
return callback
total = sum(map(lambda key:key.size, sizes))
padding = int(math.ceil(math.log10(total)))
print '%*d %s' % (padding, total, 'TOTAL')
printTree(sizes, 0, callbackFactory(padding), None)
def printDeps(results, inDot):
"""Prints the dependencies.
Arguments:
results - A sorted array of FunctionDependencies.
inDot - Whether to print in DOT format.
"""
if not inDot:
for func in results:
name, deps = func.name, func.deps
# Ignore items with no dependencies.
if len(deps) > 0:
print name
for dep in deps:
print ' ', dep
return
depMap = dict()
# Use the printTree to produce clusters for each namespace and type. This
# will print boxes around each class and show dependencies between types.
print 'digraph {'
def callbackFactory(depMap, temp):
def callback(title, indent, results):
if title:
if len(results) > 1:
print '\t' * indent, 'subgraph', 'cluster' + str(len(temp)), '{'
temp.append(1)
else:
print '\t' * indent, len(depMap), '[', \
'label="' + results[0].name + '"', ']', ';'
depMap[results[0].name] = len(depMap)
return callback
def endCallback(indent):
if indent > 1:
print '\t' * (indent - 1), '}'
printTree(results, 1, callbackFactory(depMap, []), endCallback)
for func in results:
name, deps = func.name, func.deps
# Ignore items with no dependencies.
if len(deps) > 0:
if not name in depMap:
depMap[name] = len(depMap)
print '\t', depMap[name], '[', 'label="' + name + '"', ']', ';'
for dep in deps:
if not dep in depMap:
depMap[dep] = len(depMap)
print '\t', depMap[dep], '[', 'label="' + dep + '"', ']', ';'
print '\t', depMap[name], '->', depMap[dep], ';'
print '}'
class Options:
def __init__(self):
self.printDeps = False
self.printSizes = False
self.printTokens = False
self.inDot = False
self.isClass = False
def process(text, options):
"""Decodes a JSON string containing source map data.
Arguments:
text - A JSON string containing source map data.
options - An object containing the command-line options.
"""
# The spec allows a map file to start with )]} to prevent javascript from
# including it.
if text.startswith(')]}\'\n') or text.startswith(')]}\n'):
_, text = text.split('\n', 1)
# Decode the JSON data and get the parts we need.
data = json.loads(text)
# Paths are relative to the source code root.
base = shakaBuildHelpers.getSourceBase()
fileLines = open(os.path.join(base, data['file'])).readlines()
names = data['names']
mappings = data['mappings']
tokens = decodeMappings(mappings, names)
sizes = processSizes(tokens, fileLines)
# Print out one of the results.
if options.printTokens:
printTokens(tokens, fileLines, sizes)
elif options.printSizes:
printSizes(sizes)
elif options.printDeps or options.isClass:
temp = processDeps(tokens, fileLines, options.isClass)
printDeps(temp, options.inDot)
def printHelp():
"""Prints the help docs.
"""
print 'Usage:', sys.argv[0], """[options] [--] [source_map]
source_map must be either the path to the source map, or the name of the build
type. You must build Shaka first.
Types(must include exactly one):
-c --class-deps : Prints the class dependencies
-f --function-deps : Prints the function dependencies
-s --function-sizes : Prints the function sizes (in number of characters)
-t --all-tokens : Prints all tokens in the source map
Options:
-d --dot-format : Prints in DOT format; only valid with \
--function-deps or --class-dep
-h --help : Prints this help page
Token Format:
prefix line col token name => Token
X functionName size => end function
Prefixes:
> - start a function
! - not in a function
- - end curly brace
+ - start curly brace
- other token
DOT Format:
This can print the dependency graph in DOT format. This can be used with
graph programs to display a visual graph of dependencies. For example
using graphviz:
""", sys.argv[0], """-c -d | fdp -Goverlap=prism | neato -n2 -Tsvg > out.svg"""
def main(args):
options = Options()
doneArgs = False
name = 'shaka-player.compiled.debug.map'
# Process the command-line arguments.
for arg in args:
if doneArgs or arg[0] != '-':
name = arg
elif arg == '-f' or arg == '--function-deps':
options.printDeps = True
elif arg == '-t' or arg == '--all-tokens':
options.printTokens = True
elif arg == '-s' or arg == '--function-sizes':
options.printSizes = True
elif arg == '-c' or arg == '--class-deps':
options.isClass = True
elif arg == '-d' or arg == '--dot-format':
options.inDot = True
elif arg == '--':
doneArgs = True
elif arg == '-h' or arg == '--help':
printHelp()
return 0
else:
print >> sys.stderr, 'Unrecognized argument:', arg
printHelp()
return 1
# Try to find the file
if not os.path.isfile(name):
# Get the source code base directory
base = shakaBuildHelpers.getSourceBase()
# Supports the following searches:
# * File name given, map in dist/
# * Type given, map in working directory
# * Type given, map in dist/
if os.path.isfile(os.path.join(base, 'dist' , name)):
name = os.path.join(base, 'dist', name)
elif os.path.isfile(
os.path.join('shaka-player.' + name + '.debug.map')):
name = os.path.join('shaka-player.' + name + '.debug.map')
elif os.path.isfile(
os.path.join(base, 'dist', 'shaka-player.' + name + '.debug.map')):
name = os.path.join(base, 'dist', 'shaka-player.' + name + '.debug.map')
else:
print >> sys.stderr, name, 'not found; build Shaka first.'
return 1
# Verify arguments are correct.
if (options.printSizes + options.printDeps + options.printTokens +
options.isClass) != 1:
print >> sys.stderr, 'Must include exactly one output type.'
printHelp()
return 1
elif options.inDot and not options.printDeps and not options.isClass:
print >> sys.stderr, '--dot-format only valid with --function-deps or \
--class-deps.'
return 1
else:
process(open(name).read(), options)
return 0
if __name__ == '__main__':
shakaBuildHelpers.runMain(main)
|
<reponame>rcsb/py-rcsb_utils_io
##
# File: IoUtil.py
#
# Updates:
# 2-Feb-2018 jdw add default return values for deserialize ops
# 3-Feb-2018 jdw pickle -> pickle - make default return {} if not specified
# 14-Feb-2018 jdw add fix for the new location of XmlToObj module
# 20-May-2018 jdw move to this module
# 3-Jun-2018 jdw add serializeMmCif/deserializeMmCif
# 4-Jun-2018 jdw overhaul api - provide two public methods.
# 4-Jun-2018 jdw add format for dictionaries which require special parsing
# 15-Jun-2018 jdw add textDump (pretty printer) serialization method -
# 28-Sep-2018 jdw add helper class for serializing python date/datetime types
# 8-Oct-2018 jdw add convenience function to test for file existence
# 11-Oct-2018 jdw make encoding utf-8 for lists
# 13-Oct-2018 jdw add Py27 support for explicit encoding using io.open.
# 26-Oct-2018 jdw add additional JSON encodings for yaml data types
# 25-Nov-2018 jdw add support for FASTA format sequence files
# 30-Nov-2018 jdw add support CSV file formats
# 11-Dec-2018 jdw add comment filtering on input for CSV files
# 5-Feb-2019 jdw add support for gzip compression as part of serializing mmCIF files.
# 6-Feb-2019 jdw add vrpt-xml-to-cif option and supporting method __deserializeVrptToCif()
# 24-Mar-2019 jdw suppress error message on missing validation report file.
# 25-Mar-2019 jdw expose comment processing for csv/tdd files as keyword argument
# 3-Apr-2019 jdw add comment option and compression handling to __deserializeList()
# 11-Jul-2019 jdw add explicit py2 safe file decompression to avoid encoding problems.
# 10-Aug-2019 jdw add XML/ElementTree reader
# 13-Aug-2019 jdw add serialization/deserialization in parts
# 18-Sep-2019 jdw add method deserializeCsvIter()
# 17-Sep-2021 jdw add an explicit file test for gzip compression to avoid problems with uncompressed files.
##
__docformat__ = "google en"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__license__ = "Apache 2.0"
import csv
import datetime
import glob
import gzip
import io
import itertools
import json
import logging
import os
import pickle
import pprint
import random
import string
import sys
import time
from collections import OrderedDict
import numpy
import requests
import ruamel.yaml
from mmcif.io.IoAdapterPy import IoAdapterPy
from rcsb.utils.io.decorators import retry
from rcsb.utils.io.FastaUtil import FastaUtil
from rcsb.utils.io.FileUtil import FileUtil
try:
from mmcif.io.IoAdapterCore import IoAdapterCore as IoAdapter # pylint: disable=ungrouped-imports
except Exception:
from mmcif.io.IoAdapterPy import IoAdapterPy as IoAdapter # pylint: disable=reimported,ungrouped-imports
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
logger = logging.getLogger(__name__)
def uncommentFilter(csvfile):
for row in csvfile:
raw = row.split("#")[0].strip()
if raw:
yield raw
def getObjSize(obj, seen=None):
"""Report the size of the input object"""
size = sys.getsizeof(obj)
if seen is None:
seen = set()
objId = id(obj)
if objId in seen:
return 0
# Important mark as seen *before* entering recursion to gracefully handle
# self-referential objects
seen.add(objId)
if isinstance(obj, dict):
size += sum([getObjSize(v, seen) for v in obj.values()])
size += sum([getObjSize(k, seen) for k in obj.keys()])
elif hasattr(obj, "__dict__"):
size += getObjSize(obj.__dict__, seen)
elif hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes, bytearray)):
size += sum([getObjSize(i, seen) for i in obj])
return size
class JsonTypeEncoder(json.JSONEncoder):
"""Helper class to handle serializing date and time objects"""
# pylint: disable=method-hidden,protected-access
def default(self, o):
if isinstance(o, datetime.datetime):
return o.isoformat()
if isinstance(o, datetime.date):
return o.isoformat()
# if isinstance(o, tuple):
# return list(o)
if isinstance(o, ruamel.yaml.comments.CommentedMap):
return o._od
if isinstance(o, ruamel.yaml.comments.CommentedSeq):
return o._lst
if isinstance(o, numpy.integer):
return int(o)
if isinstance(o, numpy.floating):
return float(o)
if isinstance(o, numpy.ndarray):
return o.tolist()
return json.JSONEncoder.default(self, o)
class IoUtil(object):
def __init__(self, **kwargs):
self.__fileU = FileUtil(**kwargs)
def serialize(self, filePath, myObj, fmt="pickle", **kwargs):
"""Public method to serialize format appropriate objects
Args:
filePath (str): local file path'
myObj (object): format appropriate object to be serialized
format (str, optional): one of ['mmcif', mmcif-dict', json', 'list', 'text-dump', pickle' (default)]
**kwargs: additional keyword arguments passed to worker methods -
Returns:
bool: status of serialization operation; true for success or false otherwise
"""
ret = False
fmt = str(fmt).lower()
ret = self.__fileU.mkdirForFile(filePath)
if not ret:
return ret
if fmt in ["mmcif"]:
ret = self.__serializeMmCif(filePath, myObj, **kwargs)
elif fmt in ["json"]:
ret = self.__serializeJson(filePath, myObj, **kwargs)
elif fmt in ["pickle"]:
ret = self.__serializePickle(filePath, myObj, **kwargs)
elif fmt in ["list"]:
ret = self.__serializeList(filePath, myObj, enforceAscii=True, **kwargs)
elif fmt in ["mmcif-dict"]:
ret = self.__serializeMmCifDict(filePath, myObj, **kwargs)
elif fmt in ["text-dump"]:
ret = self.__textDump(filePath, myObj, **kwargs)
elif fmt in ["fasta"]:
ret = self.__serializeFasta(filePath, myObj, **kwargs)
elif fmt in ["csv"]:
ret = self.__serializeCsv(filePath, myObj, **kwargs)
else:
pass
return ret
def deserialize(self, filePath, fmt="pickle", **kwargs):
"""Public method to deserialize objects in supported formats.
Args:
filePath (str): local file path
format (str, optional): one of ['mmcif', 'json', 'list', ..., 'pickle' (default)]
**kwargs: additional keyword arguments passed to worker methods -
Returns:
object: deserialized object data
"""
fmt = str(fmt).lower()
if fmt in ["mmcif"]:
ret = self.__deserializeMmCif(filePath, **kwargs) # type: ignore
elif fmt in ["json"]:
ret = self.__deserializeJson(filePath, **kwargs) # type: ignore
elif fmt in ["pickle"]:
ret = self.__deserializePickle(filePath, **kwargs) # type: ignore
elif fmt in ["list"]:
ret = self.__deserializeList(filePath, enforceAscii=True, **kwargs) # type: ignore
elif fmt in ["mmcif-dict"]:
ret = self.__deserializeMmCifDict(filePath, **kwargs) # type: ignore
elif fmt in ["fasta"]:
ret = self.__deserializeFasta(filePath, **kwargs) # type: ignore
# elif fmt in ["vrpt-xml-to-cif"]:
# ret = self.__deserializeVrptToCif(filePath, **kwargs) # type: ignore
elif fmt in ["csv", "tdd"]:
delimiter = kwargs.get("csvDelimiter", "," if fmt == "csv" else "\t")
ret = self.__deserializeCsv(filePath, delimiter=delimiter, **kwargs) # type: ignore
elif fmt in ["xml"]:
ret = self.__deserializeXml(filePath, **kwargs) # type: ignore
else:
ret = None # type: ignore
return ret
def __sliceInChunks(self, myList, numChunks):
mc = min(len(myList), numChunks)
chunkSize = int(len(myList) / mc)
if len(myList) % mc:
chunkSize += 1
for i in range(0, len(myList), chunkSize):
yield myList[i : i + chunkSize]
def serializeInParts(self, filePath, myObj, numParts, fmt="json", **kwargs):
"""Public method to serialize format appropriate (json, pickle) objects in multiple parts
Args:
filePath (str): local file path
myObj (object): format appropriate object to be serialized
numParts (int): divide the data into numParts segments
format (str, optional): one of ['json' or 'pickle']. Defaults to json
**kwargs: additional keyword arguments passed to worker methods -
Returns:
bool: True for success or False otherwise
"""
if fmt not in ["json", "pickle"]:
logger.error("Unsupported format for %s", fmt)
return False
pth, fn = os.path.split(filePath)
self.__fileU.mkdirForFile(pth)
bn, ext = os.path.splitext(fn)
ret = True
if isinstance(myObj, list):
for ii, subList in enumerate(self.__sliceInChunks(myObj, numParts)):
fp = os.path.join(pth, bn + "_part_%d" % (ii + 1) + ext)
ok = self.serialize(fp, subList, fmt=fmt, **kwargs)
ret = ret and ok
elif isinstance(myObj, dict):
for ii, keyList in enumerate(self.__sliceInChunks(list(myObj.keys()), numParts)):
fp = os.path.join(pth, bn + "_part_%d" % (ii + 1) + ext)
ok = self.serialize(fp, OrderedDict([(k, myObj[k]) for k in keyList]), fmt=fmt, **kwargs)
ret = ret and ok
else:
logger.error("Unsupported data type for serialization in parts")
ret = False
#
return ret
def deserializeInParts(self, filePath, numParts, fmt="json", **kwargs):
"""Public method to deserialize objects in supported formats from multiple parts
Args:
filePath (str): local file path
numParts (int): reconstruct the data object from numParts segments
format (str, optional): one of ['json' or 'pickle']. Defaults to json
**kwargs: additional keyword arguments passed to worker methods -
Returns:
object: deserialized object data
"""
rObj = None
if fmt not in ["json", "pickle"]:
logger.error("Unsupported format for %s", fmt)
return rObj
#
pth, fn = os.path.split(filePath)
bn, ext = os.path.splitext(fn)
if not numParts:
fp = os.path.join(pth, bn + "_part_*" + ext)
numParts = len(glob.glob(fp))
#
for ii in range(numParts):
fp = os.path.join(pth, bn + "_part_%d" % (ii + 1) + ext)
tObj = self.deserialize(fp, fmt=fmt, **kwargs)
if isinstance(tObj, list):
if not rObj:
rObj = []
rObj.extend(tObj)
elif isinstance(tObj, dict):
if not rObj:
rObj = OrderedDict()
rObj.update(tObj)
else:
logger.error("Unsupported data type for deserialization in parts")
return rObj
def exists(self, filePath, mode=os.R_OK):
return self.__fileU.exists(filePath, mode=mode)
def mkdir(self, dirPath, mode=0o755):
return self.__fileU.mkdir(dirPath, mode=mode)
def remove(self, pth):
return self.__fileU.remove(pth)
def __deserializeFasta(self, filePath, **kwargs):
try:
commentStyle = kwargs.get("commentStyle", "uniprot")
fau = FastaUtil()
return fau.readFasta(filePath, commentStyle=commentStyle)
except Exception as e:
logger.error("Unable to deserialize %r %r ", filePath, str(e))
return {}
def __serializeFasta(self, filePath, myObj, **kwargs):
try:
maxLineLength = int(kwargs.get("maxLineLength", 70))
makeComment = kwargs.get("makeComment", False)
fau = FastaUtil()
ok = fau.writeFasta(filePath, myObj, maxLineLength=maxLineLength, makeComment=makeComment)
return ok
except Exception as e:
logger.error("Unable to serialize FASTA file %r %r", filePath, str(e))
return False
def __textDump(self, filePath, myObj, **kwargs):
try:
indent = kwargs.get("indent", 1)
width = kwargs.get("width", 120)
sOut = pprint.pformat(myObj, indent=indent, width=width)
with open(filePath, "w") as ofh:
ofh.write("\n%s\n" % sOut)
return True
except Exception as e:
logger.error("Unable to dump to %r %r", filePath, str(e))
return False
def __serializePickle(self, filePath, myObj, **kwargs):
try:
pickleProtocol = kwargs.get("pickleProtocol", pickle.DEFAULT_PROTOCOL)
with open(filePath, "wb") as outfile:
pickle.dump(myObj, outfile, pickleProtocol)
return True
except Exception as e:
logger.error("Unable to serialize %r %r", filePath, str(e))
return False
def __deserializePickle(self, filePath, **kwargs):
myDefault = kwargs.get("default", {})
try:
if sys.version_info[0] > 2:
encoding = kwargs.get("encoding", "ASCII")
errors = kwargs.get("errors", "strict")
with open(filePath, "rb") as outfile:
return pickle.load(outfile, encoding=encoding, errors=errors)
else:
with open(filePath, "rb") as outfile:
return pickle.load(outfile)
except Exception as e:
logger.warning("Unable to deserialize %r %r", filePath, str(e))
return myDefault
def __serializeJson(self, filePath, myObj, **kwargs):
"""Internal method to serialize the input object as JSON. An encoding
helper class is included to handle selected python data types (e.g., datetime)
"""
indent = kwargs.get("indent", 0)
enforceAscii = kwargs.get("enforceAscii", True)
try:
if enforceAscii:
with open(filePath, "w") as outfile:
json.dump(myObj, outfile, indent=indent, cls=JsonTypeEncoder, ensure_ascii=enforceAscii)
else:
with io.open(filePath, "w", encoding="utf-8") as outfile:
json.dump(myObj, outfile, indent=indent, cls=JsonTypeEncoder, ensure_ascii=enforceAscii)
return True
except Exception as e:
logger.error("Unable to serialize %r %r", filePath, str(e))
return False
def __deserializeJson(self, filePath, **kwargs):
myDefault = kwargs.get("default", {})
encoding = kwargs.get("encoding", "utf-8-sig")
encodingErrors = kwargs.get("encodingErrors", "ignore")
try:
if filePath[-3:] == ".gz":
if sys.version_info[0] > 2:
with gzip.open(filePath, "rt", encoding=encoding, errors=encodingErrors) as inpFile:
return json.load(inpFile, object_pairs_hook=OrderedDict)
else:
# Py2 situation non-ascii encodings is problematic
# with gzip.open(filePath, "rb") as csvFile:
# oL = self.__csvReader(csvFile, rowFormat, delimiter)
tPath = self.__fileU.uncompress(filePath, outputDir=None)
with io.open(tPath, newline="", encoding=encoding, errors="ignore") as inpFile:
return json.load(inpFile, object_pairs_hook=OrderedDict)
else:
with open(filePath, "r") as inpFile:
return json.load(inpFile, object_pairs_hook=OrderedDict)
except Exception as e:
logger.warning("Unable to deserialize %r %r", filePath, str(e))
return myDefault
def __hasMinSize(self, pth, minSize):
try:
return os.path.getsize(pth) >= minSize
except Exception:
return False
def __deserializeMmCif(self, locator, **kwargs):
""" """
try:
containerList = []
workPath = kwargs.get("workPath", None)
enforceAscii = kwargs.get("enforceAscii", True)
raiseExceptions = kwargs.get("raiseExceptions", True)
useCharRefs = kwargs.get("useCharRefs", True)
minSize = kwargs.get("minSize", 5)
#
if self.__fileU.isLocal(locator):
if minSize >= 0 and not self.__hasMinSize(locator, minSize):
logger.warning("Minimum file size not satisfied for: %r", locator)
myIo = IoAdapter(raiseExceptions=raiseExceptions, useCharRefs=useCharRefs)
containerList = myIo.readFile(locator, enforceAscii=enforceAscii, outDirPath=workPath) # type: ignore
else:
# myIo = IoAdapterPy(raiseExceptions=raiseExceptions, useCharRefs=useCharRefs)
# containerList = myIo.readFile(locator, enforceAscii=enforceAscii, outDirPath=workPath)
containerList = self.__deserializeMmCifRemote(locator, useCharRefs, enforceAscii, workPath)
except Exception as e:
logger.error("Failing for %s with %s", locator, str(e))
return containerList
@retry((requests.exceptions.RequestException), maxAttempts=3, delaySeconds=1, multiplier=2, defaultValue=[], logger=logger)
def __deserializeMmCifRemote(self, locator, useCharRefs, enforceAscii, workPath):
containerList = []
try:
myIo = IoAdapterPy(raiseExceptions=True, useCharRefs=useCharRefs)
containerList = myIo.readFile(locator, enforceAscii=enforceAscii, outDirPath=workPath)
except Exception as e:
raise e
return containerList
def __serializeMmCif(self, filePath, containerList, **kwargs):
""" """
try:
ret = False
workPath = kwargs.get("workPath", None)
enforceAscii = kwargs.get("enforceAscii", True)
raiseExceptions = kwargs.get("raiseExceptions", True)
useCharRefs = kwargs.get("useCharRefs", True)
#
myIo = IoAdapter(raiseExceptions=raiseExceptions, useCharRefs=useCharRefs)
if filePath.endswith(".gz") and workPath:
rfn = "".join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
tPath = os.path.join(workPath, rfn)
ret = myIo.writeFile(tPath, containerList=containerList, enforceAscii=enforceAscii)
ret = self.__fileU.compress(tPath, filePath, compressType="gzip")
else:
ret = myIo.writeFile(filePath, containerList=containerList, enforceAscii=enforceAscii)
except Exception as e:
logger.error("Failing for %s with %s", filePath, str(e))
return ret
def __deserializeMmCifDict(self, filePath, **kwargs):
""" """
try:
containerList = []
workPath = kwargs.get("workPath", None)
enforceAscii = kwargs.get("enforceAscii", True)
raiseExceptions = kwargs.get("raiseExceptions", True)
useCharRefs = kwargs.get("useCharRefs", True)
#
myIo = IoAdapterPy(raiseExceptions=raiseExceptions, useCharRefs=useCharRefs)
containerList = myIo.readFile(filePath, enforceAscii=enforceAscii, outDirPath=workPath)
except Exception as e:
logger.error("Failing for %s with %s", filePath, str(e))
return containerList
def __serializeMmCifDict(self, filePath, containerList, **kwargs):
""" """
try:
ret = False
# workPath = kwargs.get('workPath', None)
enforceAscii = kwargs.get("enforceAscii", True)
raiseExceptions = kwargs.get("raiseExceptions", True)
useCharRefs = kwargs.get("useCharRefs", True)
#
myIo = IoAdapterPy(raiseExceptions=raiseExceptions, useCharRefs=useCharRefs)
ret = myIo.writeFile(filePath, containerList=containerList, enforceAscii=enforceAscii)
except Exception as e:
logger.error("Failing for %s with %s", filePath, str(e))
return ret
def __serializeList(self, filePath, aList, enforceAscii=True, **kwargs):
""" """
try:
_ = kwargs
if enforceAscii:
encoding = "ascii"
else:
encoding = "utf-8"
#
if sys.version_info[0] > 2:
with open(filePath, "w") as ofh:
if enforceAscii:
for st in aList:
ofh.write("%s\n" % st.encode("ascii", "xmlcharrefreplace").decode("ascii"))
else:
for st in aList:
ofh.write("%s\n" % st)
else:
if enforceAscii:
with io.open(filePath, "w", encoding=encoding) as ofh:
for st in aList:
ofh.write("%s\n" % st.encode("ascii", "xmlcharrefreplace").decode("ascii"))
else:
with open(filePath, "wb") as ofh:
for st in aList:
ofh.write("%s\n" % st)
return True
except Exception as e:
logger.error("Unable to serialize %r %r", filePath, str(e))
return False
def __processList(self, ifh, enforceAscii=True, **kwargs):
uncomment = kwargs.get("uncomment", True)
aList = []
for line in ifh:
if enforceAscii:
pth = line[:-1].encode("ascii", "xmlcharrefreplace").decode("ascii")
else:
pth = line[:-1]
if not pth or (uncomment and pth.startswith("#")):
continue
aList.append(pth)
return aList
def __deserializeList(self, filePath, enforceAscii=True, encodingErrors="ignore", **kwargs):
aList = []
_ = kwargs
try:
if filePath[-3:] == ".gz":
if sys.version_info[0] > 2:
with gzip.open(filePath, "rt", encoding="utf-8-sig", errors=encodingErrors) as ifh:
aList = self.__processList(ifh, enforceAscii=enforceAscii, **kwargs)
else:
tPath = self.__fileU.uncompress(filePath, outputDir=None)
# for py2 this commented code is problematic for non-ascii data
# with gzip.open(filePath, "rb") as ifh:
# aList = self.__processList(ifh, enforceAscii=enforceAscii)
with io.open(tPath, encoding="utf-8-sig", errors="ignore") as ifh:
aList = self.__processList(ifh, enforceAscii=enforceAscii)
else:
with io.open(filePath, encoding="utf-8-sig", errors="ignore") as ifh:
aList = self.__processList(ifh, enforceAscii=enforceAscii, **kwargs)
except Exception as e:
logger.error("Unable to deserialize %r %s", filePath, str(e))
#
logger.debug("Reading list length %d", len(aList))
return aList
def __csvReader(self, csvFile, rowFormat, delimiter, uncomment=True):
oL = []
maxInt = sys.maxsize
csv.field_size_limit(maxInt)
if rowFormat == "dict":
if uncomment:
reader = csv.DictReader(uncommentFilter(csvFile), delimiter=delimiter)
else:
reader = csv.DictReader(csvFile, delimiter=delimiter)
for rowD in reader:
oL.append(rowD)
elif rowFormat == "list":
if uncomment:
reader = csv.reader(uncommentFilter(csvFile), delimiter=delimiter)
else:
reader = csv.reader(csvFile, delimiter=delimiter)
for rowL in reader:
oL.append(rowL)
return oL
def deserializeCsvIter(self, filePath, delimiter=",", rowFormat="dict", encodingErrors="ignore", uncomment=True, **kwargs):
"""Return an iterator to input CSV format file.
Args:
filePath (str): input file path
delimiter (str, optional): CSV delimiter. Defaults to ",".
rowFormat (str, optional): format for each process row (list or dict). Defaults to "dict".
encodingErrors (str, optional): treatment of encoding errors. Defaults to "ignore".
uncomment (bool, optional): flag to ignore leading comments. Defaults to True.
Returns:
(iterator): iterator for rowwise access to processed CSV data
"""
encoding = kwargs.get("encoding", "utf-8-sig")
maxInt = sys.maxsize
csv.field_size_limit(maxInt)
try:
if filePath[-3:] == ".gz":
with gzip.open(filePath, "rt", encoding=encoding, errors=encodingErrors) as csvFile:
startIt = itertools.dropwhile(lambda x: x.startswith("#"), csvFile) if uncomment else csvFile
if rowFormat == "dict":
reader = csv.DictReader(startIt, delimiter=delimiter)
elif rowFormat == "list":
reader = csv.reader(startIt, delimiter=delimiter)
for row in reader:
yield row
else:
with io.open(filePath, newline="", encoding=encoding, errors="ignore") as csvFile:
startIt = itertools.dropwhile(lambda x: x.startswith("#"), csvFile) if uncomment else csvFile
if rowFormat == "dict":
reader = csv.DictReader(startIt, delimiter=delimiter)
elif rowFormat == "list":
reader = csv.reader(startIt, delimiter=delimiter)
for row in reader:
# if uncomment and row.startswith("#"):
# continue
yield row
except Exception as e:
logger.error("Unable to deserialize %r %s", filePath, str(e))
def __deserializeCsv(self, filePath, delimiter=",", rowFormat="dict", encodingErrors="ignore", uncomment=True, **kwargs):
oL = []
encoding = kwargs.get("encoding", "utf-8-sig")
try:
if filePath[-3:] == ".gz":
if sys.version_info[0] > 2:
with gzip.open(filePath, "rt", encoding=encoding, errors=encodingErrors) as csvFile:
oL = self.__csvReader(csvFile, rowFormat, delimiter, uncomment=uncomment)
else:
# Py2 situation non-ascii encodings is problematic
# with gzip.open(filePath, "rb") as csvFile:
# oL = self.__csvReader(csvFile, rowFormat, delimiter)
tPath = self.__fileU.uncompress(filePath, outputDir=None)
with io.open(tPath, newline="", encoding=encoding, errors="ignore") as csvFile:
oL = self.__csvReader(csvFile, rowFormat, delimiter, uncomment=uncomment)
else:
with io.open(filePath, newline="", encoding=encoding, errors="ignore") as csvFile:
oL = self.__csvReader(csvFile, rowFormat, delimiter, uncomment=uncomment)
return oL
except Exception as e:
logger.error("Unable to deserialize %r %s", filePath, str(e))
#
logger.debug("Reading list length %d", len(oL))
return oL
def __serializeCsv(self, filePath, rowDictList, fieldNames=None, **kwargs):
""" """
_ = kwargs
try:
wD = {}
ret = False
fNames = fieldNames if fieldNames else list(rowDictList[0].keys())
# with io.open(filePath, 'w', newline='') as csvFile:
with open(filePath, "w") as csvFile:
writer = csv.DictWriter(csvFile, fieldnames=fNames)
writer.writeheader()
for ii, rowDict in enumerate(rowDictList):
try:
wD = {k: v for k, v in rowDict.items() if k in fNames}
writer.writerow(wD)
except Exception as e:
logger.error("Skipping bad CSV record %d wD %r rowDict %r with %s", ii + 1, wD, rowDict, str(e))
continue
ret = True
except Exception as e:
logger.error("Failing for %s : %r with %s", filePath, wD, str(e))
return ret
def __csvEncoder(self, csvData, encoding="utf-8-sig", encodingErrors="ignore"):
"""Handle encoding issues for gzipped data in Py2. (beware of the BOM chars)
Args:
csvData (text lines): uncompressed data from gzip open
encoding (str, optional): character encoding. Defaults to "utf-8-sig".
encodingErrors (str, optional): error treatment. Defaults to "ignore".
"""
for line in csvData:
yield line.decode("utf-8-sig", errors=encodingErrors).encode(encoding, errors=encodingErrors)
def __deserializeXmlPrev(self, filePath, **kwargs):
"""Read the input XML file path and return an ElementTree data object instance.
Args:
filePath (sting): input XML file path
Returns:
object: instance of an ElementTree tree object
"""
_ = kwargs
tree = None
try:
logger.debug("Parsing XML path %s", filePath)
if filePath[-3:] == ".gz":
with gzip.open(filePath, mode="rb") as ifh:
tV = time.time()
tree = ET.parse(ifh)
else:
with open(filePath, mode="rb") as ifh:
tV = time.time()
tree = ET.parse(ifh)
logger.debug("Parsed %s in %.2f seconds", filePath, time.time() - tV)
except Exception as e:
logger.error("Unable to deserialize %r %s", filePath, str(e))
#
return tree
def __testGzip(self, filePath):
ok = True
with gzip.open(filePath, "r") as fh:
try:
fh.read(1)
except gzip.BadGzipFile:
ok = False
except Exception:
ok = False
logger.debug("Gzip file check %r", ok)
return ok
def __deserializeXml(self, filePath, **kwargs):
"""Read the input XML file path and return an ElementTree data object instance.
Args:
filePath (sting): input XML file path
Returns:
object: instance of an ElementTree tree object
"""
_ = kwargs
tree = None
encoding = kwargs.get("encoding", "utf-8-sig")
encodingErrors = kwargs.get("encodingErrors", "ignore")
#
try:
logger.debug("Parsing XML path %s", filePath)
if filePath[-3:] == ".gz" and self.__testGzip(filePath):
if sys.version_info[0] > 2:
with gzip.open(filePath, "rt", encoding=encoding, errors=encodingErrors) as ifh:
tV = time.time()
tree = ET.parse(ifh)
else:
tPath = self.__fileU.uncompress(filePath, outputDir=None)
with io.open(tPath, encoding=encoding, errors=encodingErrors) as ifh:
tV = time.time()
tree = ET.parse(ifh)
else:
with io.open(filePath, encoding=encoding, errors=encodingErrors) as ifh:
tV = time.time()
tree = ET.parse(ifh)
logger.debug("Parsed %s in %.2f seconds", filePath, time.time() - tV)
except Exception as e:
logger.error("Unable to deserialize %r %s", filePath, str(e))
#
return tree
|
<filename>2_term/made_2021_computer_vision/homeworks/02/inference_utils.py<gh_stars>1-10
import logging
import cv2
import numpy as np
import torch
def get_logger(filename: str) -> logging.Logger:
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(message)s', '%Y-%m-%d %H:%M:%S')
fh = logging.FileHandler(filename)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logger.addHandler(fh)
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
sh.setFormatter(formatter)
logger.addHandler(sh)
return logger
def get_boxes_from_mask(mask, margin, clip=False):
"""
Detect connected components on mask, calculate their bounding boxes (with margin) and return them (normalized).
If clip is True, cutoff the values to (0.0, 1.0).
:return np.ndarray boxes shaped (N, 4)
"""
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(mask)
boxes = []
for j in range(1, num_labels): # j = 0 == background component
x, y, w, h = stats[j][:4]
x1 = int(x - margin * w)
y1 = int(y - margin * h)
x2 = int(x + w + margin * w)
y2 = int(y + h + margin * h)
box = np.asarray([x1, y1, x2, y2])
boxes.append(box)
if len(boxes) == 0:
return []
boxes = np.asarray(boxes).astype(np.float)
boxes[:, [0, 2]] /= mask.shape[1]
boxes[:, [1, 3]] /= mask.shape[0]
if clip:
boxes = boxes.clip(0.0, 1.0)
return boxes
def prepare_for_segmentation(image, fit_size):
"""
Scale proportionally image into fit_size and pad with zeroes to fit_size
:return: np.ndarray image_padded shaped (*fit_size, 3), float k (scaling coef), float dw (x pad), dh (y pad)
"""
# pretty much the same code as segmentation.transforms.Resize
h, w = image.shape[:2]
k = fit_size[0] / max(w, h)
image_fitted = cv2.resize(image, dsize=None, fx=k, fy=k)
h_, w_ = image_fitted.shape[:2]
dw = (fit_size[0] - w_) // 2
dh = (fit_size[1] - h_) // 2
image_padded = cv2.copyMakeBorder(image_fitted, top=dh, bottom=dh, left=dw, right=dw,
borderType=cv2.BORDER_CONSTANT, value=0.0)
if image_padded.shape[0] != fit_size[1] or image_padded.shape[1] != fit_size[0]:
image_padded = cv2.resize(image_padded, dsize=fit_size)
return image_padded, k, dw, dh
def get_boxes_from_mask(mask, margin, clip=False):
"""
Detect connected components on mask, calculate their bounding boxes (with margin) and return them (normalized).
If clip is True, cutoff the values to (0.0, 1.0).
:return np.ndarray boxes shaped (N, 4)
"""
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(mask)
boxes = []
for j in range(1, num_labels): # j = 0 == background component
x, y, w, h = stats[j][:4]
x1 = int(x - margin * w)
y1 = int(y - margin * h)
x2 = int(x + w + margin * w)
y2 = int(y + h + margin * h)
box = np.asarray([x1, y1, x2, y2])
boxes.append(box)
if len(boxes) == 0:
return []
boxes = np.asarray(boxes).astype(np.float)
boxes[:, [0, 2]] /= mask.shape[1]
boxes[:, [1, 3]] /= mask.shape[0]
if clip:
boxes = boxes.clip(0.0, 1.0)
return boxes
def prepare_for_recognition(image, output_size):
image = cv2.resize(image, output_size, interpolation=cv2.INTER_AREA)
image = image.astype(np.float) / 255.
return torch.from_numpy(image.transpose(2, 0, 1)).float().unsqueeze(0) |
import unittest
from pathlib import Path
from unittest.mock import patch
from typer.testing import CliRunner
from adk.exceptions import ApplicationNotFound, ExperimentDirectoryNotValid
from adk.command_list import app, applications_app, experiments_app, retrieve_application_name_and_path, \
retrieve_experiment_name_and_path
from adk.command_processor import CommandProcessor
from adk.managers.config_manager import ConfigManager
class TestCommandList(unittest.TestCase):
def setUp(self):
self.application = 'test_application'
self.experiment_name = 'test_experiment'
self.roles = ["role1, role2"]
self.path = Path("dummy")
self.runner = CliRunner()
self.app_dict_1 = {'remote': [], 'local': []}
self.app_dict_2 = {'remote': [{'name': 'foo'}, {'name': 'bar'}], 'local': []}
self.app_dict_3 = {'remote': [], 'local': [{'name': 'foo'}]}
self.app_dict_4 = {'remote': [{'name': 'bar'}], 'local': [{'name': 'foo'}]}
self.app_dict_5 = {'local': []}
self.app_dict_6 = {'local': [{'name': 'foo'}, {'name': 'bar'}]}
self.app_dict_7 = {'remote': []}
self.app_dict_8 = {'remote': [{'name': 'foo'}, {'name': 'bar'}]}
def test_login(self):
with patch.object(CommandProcessor, "login") as login_mock:
login_output = self.runner.invoke(app, ['login', 'test_host'], input="test_username\ntest_password")
self.assertIn('Command is not yet implemented', login_output.stdout)
login_mock.reset_mock()
login_output = self.runner.invoke(app, ['login'], input="test_username\ntest_password")
self.assertIn('Command is not yet implemented', login_output.stdout)
login_mock.reset_mock()
login_output = self.runner.invoke(app, ['login'], input=" \n ")
self.assertIn('Command is not yet implemented', login_output.stdout)
def test_logout(self):
with patch.object(CommandProcessor, "logout") as logout_mock:
logout_output = self.runner.invoke(app, ['logout'])
self.assertIn('Command is not yet implemented', logout_output.stdout)
logout_mock.reset_mock()
logout_output = self.runner.invoke(app, ['logout', 'qutech.com'])
self.assertIn('Command is not yet implemented', logout_output.stdout)
def test_applications_create_succes(self):
with patch("adk.command_list.Path.cwd", return_value=self.path) as mock_cwd, \
patch("adk.command_list.validate_path_name") as mock_validate_path_name, \
patch.object(CommandProcessor, 'applications_create') as application_create_mock:
application_create_output = self.runner.invoke(applications_app,
['create', 'test_application', 'role1', 'role2'])
mock_cwd.assert_called_once()
self.assertEqual(mock_validate_path_name.call_count, 3)
application_create_mock.assert_called_once_with(application_name=self.application, roles=('role1', 'role2'),
application_path=self.path / self.application)
self.assertEqual(application_create_output.exit_code, 0)
self.assertIn(f"Application 'test_application' created successfully in directory '{self.path}'",
application_create_output.stdout)
def test_applications_create_exceptions(self):
with patch("adk.command_list.Path.cwd", return_value='test') as mock_cwd:
# Raise NotEnoughRoles when only one or less roles are given
application_create_output = self.runner.invoke(applications_app, ['create', 'test_application', 'role1'])
self.assertIn('The number of roles must be higher than one', application_create_output.stdout)
# Raise RolesNotUnique when roles are duplicated
application_create_output = self.runner.invoke(applications_app, ['create', 'test_application', 'role1',
'role2', 'role3', 'role2'])
self.assertIn('The role names must be unique', application_create_output.stdout)
# Raise InvalidApplicationName when the application name is invalid contains ['/', '\\', '*', ':', '?',
# '"', '<', '>', '|']
application_create_output = self.runner.invoke(applications_app, ['create', 'test_application/2',
'role1', 'role2'])
self.assertIn('Error: Application name can\'t contain any of the following characters: [\'/\', \'\\\', '
'\'*\', \':\', \'?\', \'"\', \'<\', \'>\', \'|\']', application_create_output.stdout)
application_create_output = self.runner.invoke(applications_app, ['create', 'test*application',
'role1', 'role2'])
self.assertIn('Error: Application name can\'t contain any of the following characters: [\'/\', \'\\\', '
'\'*\', \':\', \'?\', \'"\', \'<\', \'>\', \'|\']', application_create_output.stdout)
application_create_output = self.runner.invoke(applications_app, ['create', 'test\\application',
'role1', 'role2'])
self.assertIn('Error: Application name can\'t contain any of the following characters: [\'/\', \'\\\', '
'\'*\', \':\', \'?\', \'"\', \'<\', \'>\', \'|\']', application_create_output.stdout)
# Raise InvalidRoleName when one of the roles contains ['/', '\\', '*', ':', '?', '"', '<', '>', '|']
application_create_output = self.runner.invoke(applications_app, ['create', 'test_application',
'role/1', 'role2'])
self.assertIn('Error: Role name can\'t contain any of the following characters: [\'/\', \'\\\', '
'\'*\', \':\', \'?\', \'"\', \'<\', \'>\', \'|\']', application_create_output.stdout)
application_create_output = self.runner.invoke(applications_app, ['create', 'test_application',
'role1', 'role/2'])
self.assertIn('Error: Role name can\'t contain any of the following characters: [\'/\', \'\\\', '
'\'*\', \':\', \'?\', \'"\', \'<\', \'>\', \'|\']', application_create_output.stdout)
application_create_output = self.runner.invoke(applications_app, ['create', 'test_application',
'rol/e1', 'role2'])
self.assertIn('Error: Role name can\'t contain any of the following characters: [\'/\', \'\\\', '
'\'*\', \':\', \'?\', \'"\', \'<\', \'>\', \'|\']', application_create_output.stdout)
# Raise Other Exception
mock_cwd.side_effect = Exception("Test")
application_create_output = self.runner.invoke(applications_app,
['create', 'test_application', 'role1', 'role2'])
self.assertIn("Unhandled exception: Exception('Test')", application_create_output.stdout)
def test_application_delete_no_experiment_dir(self):
with patch.object(CommandProcessor, 'applications_delete', return_value=True) as applications_delete_mock, \
patch("adk.command_list.retrieve_application_name_and_path") as retrieve_appname_and_path_mock:
retrieve_appname_and_path_mock.return_value = self.path, self.application
application_delete_output = self.runner.invoke(applications_app, ['delete'])
self.assertEqual(application_delete_output.exit_code, 0)
retrieve_appname_and_path_mock.assert_called_once()
self.assertIn("Application deleted successfully",
application_delete_output.stdout)
retrieve_appname_and_path_mock.reset_mock()
applications_delete_mock.return_value = False
application_delete_output = self.runner.invoke(applications_app, ['delete'])
self.assertEqual(application_delete_output.exit_code, 0)
retrieve_appname_and_path_mock.assert_called_once()
self.assertIn("Application files deleted",
application_delete_output.stdout)
def test_application_delete_with_application_dir(self):
with patch.object(CommandProcessor, 'applications_delete', return_value=False) as applications_delete_mock, \
patch("adk.command_list.retrieve_application_name_and_path") as retrieve_appname_and_path_mock:
retrieve_appname_and_path_mock.return_value = self.path, self.application
application_delete_output = self.runner.invoke(applications_app, ['delete', 'app_dir'])
applications_delete_mock.assert_called_once()
self.assertEqual(application_delete_output.exit_code, 0)
retrieve_appname_and_path_mock.assert_called_once()
self.assertIn("Application files deleted, directory not empty",
application_delete_output.stdout)
def test_experiment_delete_no_experiment_dir(self):
with patch.object(CommandProcessor, 'experiments_delete', return_value=True) as experiments_delete_mock, \
patch("adk.command_list.retrieve_experiment_name_and_path") as retrieve_expname_and_path_mock:
retrieve_expname_and_path_mock.return_value = self.path, self.experiment_name
experiment_delete_output = self.runner.invoke(experiments_app, ['delete'])
self.assertEqual(experiment_delete_output.exit_code, 0)
retrieve_expname_and_path_mock.assert_called_once()
self.assertIn("Experiment deleted successfully",
experiment_delete_output.stdout)
retrieve_expname_and_path_mock.reset_mock()
experiments_delete_mock.return_value = False
experiment_delete_output = self.runner.invoke(experiments_app, ['delete'])
self.assertEqual(experiment_delete_output.exit_code, 0)
retrieve_expname_and_path_mock.assert_called_once()
self.assertIn("Experiment files deleted",
experiment_delete_output.stdout)
def test_experiment_delete_with_experiment_dir(self):
with patch.object(CommandProcessor, 'experiments_delete', return_value=False) as experiments_delete_mock, \
patch("adk.command_list.retrieve_experiment_name_and_path") as retrieve_expname_and_path_mock:
retrieve_expname_and_path_mock.return_value = self.path, self.experiment_name
experiment_delete_output = self.runner.invoke(experiments_app, ['delete', 'exp_dir'])
experiments_delete_mock.assert_called_once_with(experiment_name=self.experiment_name,
experiment_path=self.path)
self.assertEqual(experiment_delete_output.exit_code, 0)
retrieve_expname_and_path_mock.assert_called_once()
self.assertIn("Experiment files deleted, directory not empty",
experiment_delete_output.stdout)
def test_retrieve_application_name_and_path(self):
with patch("adk.command_list.validate_path_name") as validate_path_name_mock, \
patch("adk.command_list.Path.cwd") as cwd_mock, \
patch.object(ConfigManager, "get_application_path") as get_application_path_mock, \
patch.object(ConfigManager, "get_application_from_path") as get_application_from_path_mock, \
patch("adk.command_list.Path.is_dir") as is_dir_mock:
get_application_path_mock.return_value = self.path
# application name not None
retrieve_application_name_and_path(application_name=self.application)
is_dir_mock.assert_called_once()
validate_path_name_mock.assert_called_once_with("Application", self.application)
get_application_path_mock.assert_called_once_with(self.application)
# application name is None
is_dir_mock.reset_mock()
cwd_mock.return_value = self.path
get_application_from_path_mock.return_value = self.application, None
retrieve_application_name_and_path(application_name=None)
is_dir_mock.assert_called_once()
cwd_mock.assert_called_once()
get_application_from_path_mock.assert_called_once_with(self.path)
# Raise ApplicationNotFound when application_path is None
validate_path_name_mock.reset_mock()
get_application_path_mock.reset_mock()
get_application_path_mock.return_value = None
self.assertRaises(ApplicationNotFound, retrieve_application_name_and_path, self.application)
validate_path_name_mock.assert_called_once_with("Application", self.application)
get_application_path_mock.assert_called_once_with(self.application)
# Raise Raise ApplicationNotFound when application directory does not exist
validate_path_name_mock.reset_mock()
get_application_path_mock.reset_mock()
is_dir_mock.reset_mock()
is_dir_mock.return_value = False
get_application_path_mock.return_value = self.path
self.assertRaises(ApplicationNotFound, retrieve_application_name_and_path, self.application)
is_dir_mock.assert_called_once()
validate_path_name_mock.assert_called_once_with("Application", self.application)
get_application_path_mock.assert_called_once_with(self.application)
def test_applications_validate_all_ok(self):
with patch.object(CommandProcessor, 'applications_validate') as applications_validate_mock, \
patch("adk.command_list.retrieve_application_name_and_path") as retrieve_appname_and_path_mock:
retrieve_appname_and_path_mock.return_value = self.path, self.application
# When application is valid (no items in error, warning and info)
applications_validate_mock.return_value = {"error": {}, "warning": {}, "info": {}}
application_validate_output = self.runner.invoke(applications_app, ['validate'])
retrieve_appname_and_path_mock.assert_called_once()
applications_validate_mock.assert_called_once_with(application_name=self.application,
application_path=self.path)
self.assertIn(f"Application '{self.application}' is valid", application_validate_output.stdout)
# When application is valid with item in in 'info'
retrieve_appname_and_path_mock.reset_mock()
applications_validate_mock.reset_mock()
applications_validate_mock.return_value = {"error": {}, "warning": {}, "info": {"info"}}
application_validate_output = self.runner.invoke(applications_app, ['validate'])
applications_validate_mock.assert_called_once_with(application_name=self.application,
application_path=self.path)
retrieve_appname_and_path_mock.assert_called_once()
self.assertIn(f"Application '{self.application}' is valid", application_validate_output.stdout)
# When application name is given as input
retrieve_appname_and_path_mock.reset_mock()
applications_validate_mock.reset_mock()
application_validate_output = self.runner.invoke(applications_app, ['validate', self.application])
applications_validate_mock.assert_called_once_with(application_name=self.application,
application_path=self.path)
retrieve_appname_and_path_mock.assert_called_once()
self.assertIn(f"Application '{self.application}' is valid", application_validate_output.stdout)
def test_applications_validate_invalid(self):
with patch.object(CommandProcessor, 'applications_validate') as applications_validate_mock, \
patch("adk.command_list.retrieve_application_name_and_path") as retrieve_appname_and_path_mock:
retrieve_appname_and_path_mock.return_value = self.path, self.application
applications_validate_mock.return_value = {"error": {"error"}, "warning": {"warning"}, "info": {"info"}}
application_validate_output = self.runner.invoke(applications_app, ['validate'])
applications_validate_mock.assert_called_once_with(application_name=self.application,
application_path=self.path)
retrieve_appname_and_path_mock.assert_called_once()
self.assertIn(f"Application '{self.application}' is invalid", application_validate_output.stdout)
# When only 'error' has items
retrieve_appname_and_path_mock.reset_mock()
applications_validate_mock.reset_mock()
applications_validate_mock.return_value = {"error": {"error"}, "warning": {}, "info": {}}
application_validate_output = self.runner.invoke(applications_app, ['validate'])
applications_validate_mock.assert_called_once_with(application_name=self.application,
application_path=self.path)
retrieve_appname_and_path_mock.assert_called_once()
self.assertIn(f"Application '{self.application}' is invalid", application_validate_output.stdout)
def test_experiment_create(self):
with patch("adk.command_list.Path.cwd") as mock_cwd, \
patch.object(CommandProcessor, 'experiments_create') as experiment_create_mock, \
patch.object(CommandProcessor, 'applications_validate') as app_validate_mock, \
patch("adk.command_list.validate_path_name") as mock_validate_path, \
patch("adk.command_list.retrieve_application_name_and_path") as retrieve_application_name_and_path_mock:
retrieve_application_name_and_path_mock.return_value = self.path, "app_name"
mock_cwd.return_value = 'test'
app_validate_mock.return_value = {"error": [], "warning": [], "info": []}
experiment_create_mock.return_value = True, ''
experiment_create_output = self.runner.invoke(experiments_app, ['create', 'test_exp', 'app_name',
'network_1'])
mock_validate_path.assert_called_once_with('Experiment', 'test_exp')
retrieve_application_name_and_path_mock.assert_called_once_with(application_name="app_name")
self.assertEqual(experiment_create_output.exit_code, 0)
self.assertIn("Experiment 'test_exp' created successfully in directory 'test'",
experiment_create_output.stdout)
experiment_create_mock.assert_called_once_with(experiment_name='test_exp', application_name='app_name',
network_name='network_1', local=True, path='test')
def test_retrieve_experiment_name_and_path(self):
with patch("adk.command_list.Path.cwd") as cwd_mock, \
patch("adk.command_list.validate_path_name") as validate_path_name_mock, \
patch("adk.command_list.Path.is_dir") as is_dir_mock:
# if experiment name is not None
cwd_mock.return_value = self.path
retrieve_experiment_name_and_path(self.experiment_name)
validate_path_name_mock.assert_called_once_with("Experiment", self.experiment_name)
is_dir_mock.assert_called_once()
# if experiment name is None
is_dir_mock.reset_mock()
retrieve_experiment_name_and_path(None)
is_dir_mock.assert_called_once()
# raise ExperimentDirectoryNotValid
is_dir_mock.reset_mock()
validate_path_name_mock.reset_mock()
is_dir_mock.return_value = False
self.assertRaises(ExperimentDirectoryNotValid, retrieve_experiment_name_and_path, self.experiment_name)
validate_path_name_mock.assert_called_once_with("Experiment", self.experiment_name)
is_dir_mock.assert_called_once()
def test_experiment_validate(self):
with patch("adk.command_list.retrieve_experiment_name_and_path") as retrieve_experiment_name_and_path_mock, \
patch.object(CommandProcessor, 'experiments_validate') as experiments_validate_mock, \
patch("adk.command_list.show_validation_messages") as show_validation_messages_mock:
experiments_validate_mock.return_value = {"error": {"error"}, "warning": {"warning"}, "info": {"info"}}
retrieve_experiment_name_and_path_mock.return_value = (self.path, self.experiment_name)
experiment_validate_output = self.runner.invoke(experiments_app, ['validate'])
retrieve_experiment_name_and_path_mock.assert_called_once_with(experiment_name=None)
experiments_validate_mock.assert_called_once_with(experiment_path=self.path)
show_validation_messages_mock.assert_called_once()
self.assertIn("Experiment is invalid", experiment_validate_output.stdout)
# When only 'error' has items
experiments_validate_mock.reset_mock()
retrieve_experiment_name_and_path_mock.reset_mock()
show_validation_messages_mock.reset_mock()
experiments_validate_mock.return_value = {"error": {"error"}, "warning": {}, "info": {}}
experiment_validate_output = self.runner.invoke(experiments_app, ['validate'])
retrieve_experiment_name_and_path_mock.assert_called_once_with(experiment_name=None)
experiments_validate_mock.assert_called_once_with(experiment_path=self.path)
show_validation_messages_mock.assert_called_once()
self.assertIn("Experiment is invalid", experiment_validate_output.stdout)
# When application is valid (no items in error, warning and info)
experiments_validate_mock.reset_mock()
retrieve_experiment_name_and_path_mock.reset_mock()
show_validation_messages_mock.reset_mock()
experiments_validate_mock.return_value = {"error": {}, "warning": {}, "info": {}}
experiment_validate_output = self.runner.invoke(experiments_app, ['validate'])
retrieve_experiment_name_and_path_mock.assert_called_once_with(experiment_name=None)
experiments_validate_mock.assert_called_once_with(experiment_path=self.path)
show_validation_messages_mock.assert_called_once()
self.assertIn("Experiment is valid", experiment_validate_output.stdout)
# When application is valid with item in in 'info'
experiments_validate_mock.reset_mock()
retrieve_experiment_name_and_path_mock.reset_mock()
show_validation_messages_mock.reset_mock()
experiments_validate_mock.return_value = {"error": {}, "warning": {}, "info": {"info"}}
experiment_validate_output = self.runner.invoke(experiments_app, ['validate'])
retrieve_experiment_name_and_path_mock.assert_called_once_with(experiment_name=None)
experiments_validate_mock.assert_called_once_with(experiment_path=self.path)
show_validation_messages_mock.assert_called_once()
self.assertIn("Experiment is valid", experiment_validate_output.stdout)
def test_experiment_run(self):
with patch.object(CommandProcessor, 'experiments_validate') as exp_validate_mock, \
patch.object(CommandProcessor, 'experiments_run') as exp_run_mock, \
patch("adk.command_list.retrieve_experiment_name_and_path") as retrieve_expname_and_path_mock:
retrieve_expname_and_path_mock.return_value = self.path, None
exp_validate_mock.return_value = {"error": [], "warning": [], "info": []}
exp_run_output = self.runner.invoke(experiments_app, ['run'])
exp_validate_mock.assert_called_once_with(experiment_path=self.path)
retrieve_expname_and_path_mock.assert_called_once()
exp_run_mock.assert_called_once_with(experiment_path=self.path, block=False)
self.assertEqual(exp_run_output.exit_code, 0)
retrieve_expname_and_path_mock.reset_mock()
exp_run_mock.reset_mock()
exp_run_output = self.runner.invoke(experiments_app, ['run', '--block'])
exp_run_mock.assert_called_once_with(experiment_path=self.path, block=True)
retrieve_expname_and_path_mock.assert_called_once()
self.assertEqual(exp_run_output.exit_code, 0)
def test_experiment_results(self):
with patch.object(CommandProcessor, 'experiments_results') as exp_results_mock, \
patch("adk.command_list.retrieve_experiment_name_and_path") as retrieve_expname_and_path_mock:
retrieve_expname_and_path_mock.return_value = self.path, None
exp_results_output = self.runner.invoke(experiments_app, ['results'])
exp_results_mock.assert_called_once_with(all_results=False, experiment_path=self.path)
self.assertEqual(exp_results_output.exit_code, 0)
retrieve_expname_and_path_mock.assert_called_once()
retrieve_expname_and_path_mock.reset_mock()
exp_results_mock.reset_mock()
exp_results_mock.return_value = ['r1', 'r2']
exp_results_output = self.runner.invoke(experiments_app, ['results', '--all', '--show'])
exp_results_mock.assert_called_once_with(all_results=True, experiment_path=self.path)
retrieve_expname_and_path_mock.assert_called_once()
self.assertEqual(exp_results_output.exit_code, 0)
self.assertIn("['r1', 'r2']", exp_results_output.stdout)
retrieve_expname_and_path_mock.reset_mock()
exp_results_mock.reset_mock()
exp_results_output = self.runner.invoke(experiments_app, ['results', '--all'])
exp_results_mock.assert_called_once_with(all_results=True, experiment_path=self.path)
retrieve_expname_and_path_mock.assert_called_once()
self.assertEqual(exp_results_output.exit_code, 0)
self.assertIn("Results are stored at location 'dummy/results/processed.json'", exp_results_output.stdout)
def test_applications_list(self):
with patch.object(CommandProcessor, "applications_list") as list_applications_mock:
list_applications_mock.side_effect = [self.app_dict_1, self.app_dict_2,
self.app_dict_3, self.app_dict_4]
result_both = self.runner.invoke(applications_app, ['list'])
self.assertEqual(result_both.exit_code, 0)
self.assertIn('There are no local applications available', result_both.stdout)
self.assertNotIn('There are no remote applications available', result_both.stdout)
result_both = self.runner.invoke(applications_app, ['list'])
self.assertEqual(result_both.exit_code, 0)
self.assertIn('There are no local applications available', result_both.stdout)
self.assertNotIn('2 remote application(s)', result_both.stdout)
self.assertNotIn('foo', result_both.stdout)
self.assertNotIn('bar', result_both.stdout)
result_both = self.runner.invoke(applications_app, ['list'])
self.assertEqual(result_both.exit_code, 0)
self.assertIn('1 local application(s)', result_both.stdout)
self.assertIn('foo', result_both.stdout)
self.assertNotIn('There are no remote applications available', result_both.stdout)
result_both = self.runner.invoke(applications_app, ['list'])
self.assertEqual(result_both.exit_code, 0)
self.assertIn('1 local application(s)', result_both.stdout)
self.assertNotIn('1 remote application(s)', result_both.stdout)
self.assertIn('foo', result_both.stdout)
self.assertNotIn('bar', result_both.stdout)
def test_applications_list_local(self):
with patch.object(CommandProcessor, "applications_list") as list_applications_mock:
list_applications_mock.side_effect = [self.app_dict_5, self.app_dict_6]
result_local = self.runner.invoke(applications_app, ['list', '--local'])
self.assertEqual(result_local.exit_code, 0)
self.assertIn('There are no local applications available', result_local.stdout)
self.assertNotIn('remote', result_local.stdout)
result_local = self.runner.invoke(applications_app, ['list', '--local'])
self.assertEqual(result_local.exit_code, 0)
self.assertIn('2 local application(s)', result_local.stdout)
self.assertIn('foo', result_local.stdout)
self.assertIn('bar', result_local.stdout)
self.assertNotIn('remote', result_local.stdout)
def test_applications_list_remote(self):
with patch.object(CommandProcessor, "applications_list") as list_applications_mock:
list_applications_mock.side_effect = [self.app_dict_7, self.app_dict_8]
result_remote = self.runner.invoke(applications_app, ['list', '--remote'])
self.assertEqual(result_remote.exit_code, 2)
self.assertNotIn('There are no remote applications available', result_remote.stdout)
self.assertNotIn('local', result_remote.stdout)
result_remote = self.runner.invoke(applications_app, ['list', '--remote'])
self.assertEqual(result_remote.exit_code, 2)
self.assertNotIn('2 remote application(s)', result_remote.stdout)
self.assertNotIn('foo', result_remote.stdout)
self.assertNotIn('bar', result_remote.stdout)
self.assertNotIn('local', result_remote.stdout)
|
<gh_stars>1-10
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
CLI interface for barbican management
"""
from __future__ import print_function
from os_vm_expire.common import config
# from os_vm_expire.model import clean
# from os_vm_expire.model.migration import commands
from os_vm_expire.model.models import VmExclude
from os_vm_expire.model import repositories
import os_vm_expire.version
import argparse
import datetime
import prettytable
import six
import sys
import time
from oslo_config import cfg
from oslo_db import options
from oslo_log import log as logging
from oslo_utils import encodeutils
CONF = cfg.CONF
options.set_defaults(CONF)
LOG = logging.getLogger(__name__)
# Decorators for actions
def args(*args, **kwargs):
def _decorator(func):
func.__dict__.setdefault('args', []).insert(0, (args, kwargs))
return func
return _decorator
class VmExcludeCommands(object):
"""Class for managing VM excludes"""
EXCLUSION_TYPES = {
'domain': 0,
'project': 1,
'user': 2
}
EXCLUSION_MAP = {
0: 'domain',
1: 'project',
2: 'user'
}
description = "Subcommands for managing VM excludes"
list_description = "List exclusions"
@args('--type', metavar='<exclude-type>', dest='excludeType',
default=None,
help='Type of exclusion (domain, project, user)')
def list(self, excludeType=None):
repositories.setup_database_engine_and_factory()
repo = repositories.get_vmexclude_repository()
exclude_type = None
if excludeType is not None:
print("Filter by exclude type %s" % (excludeType))
if excludeType == 'domain':
exclude_type = 0
elif excludeType == 'project':
exclude_type = 1
elif excludeType == 'user':
exclude_type = 2
excludes = repo.get_type_entities(exclude_type=exclude_type)
headers = [
'id',
'type',
'exclude_id'
]
pt = prettytable.PrettyTable(headers)
for instance in excludes:
pt.add_row(
[
instance.id,
VmExcludeCommands.EXCLUSION_MAP[instance.exclude_type],
instance.exclude_id
]
)
if six.PY3:
print(encodeutils.safe_encode(pt.get_string()).decode())
else:
print(encodeutils.safe_encode(pt.get_string()))
add_description = "Add exclusion"
@args('--type', metavar='<exclude-type>', dest='excludeType',
default=None,
help='Type of exclusion (domain, project, user)')
@args('--id', metavar='<exclude-id>', dest='excludeId',
default=None,
help='id of entity to exclude (domain, project, user)')
def add(self, excludeType=None, excludeId=None):
if excludeId is None:
print("id option is mandatory")
return
if excludeType not in VmExcludeCommands.EXCLUSION_TYPES:
print("type is not valid")
return
repositories.setup_database_engine_and_factory()
repo = repositories.get_vmexclude_repository()
entity = VmExclude()
entity.exclude_type = VmExcludeCommands.EXCLUSION_TYPES[excludeType]
entity.exclude_id = excludeId
try:
entity = repo.create_exclude(entity)
except Exception as e:
print(str(e))
return
repositories.commit()
headers = [
'id',
'type',
'exclude_id'
]
pt = prettytable.PrettyTable(headers)
pt.add_row(
[
entity.id,
VmExcludeCommands.EXCLUSION_MAP[entity.exclude_type],
entity.exclude_id
]
)
if six.PY3:
print(encodeutils.safe_encode(pt.get_string()).decode())
else:
print(encodeutils.safe_encode(pt.get_string()))
delete_description = "Delete exclusion"
@args('--id', metavar='<exclude-id>', dest='excludeId',
default=None,
help='id of exclude')
def delete(self, excludeId=None):
if excludeId is None:
print("Missing mandatory id parameter")
return
repositories.setup_database_engine_and_factory()
repo = repositories.get_vmexclude_repository()
repo.delete_entity_by_id(excludeId)
repositories.commit()
print("Exclude deleted")
class VmExpireCommands(object):
"""Class for managing VM expiration"""
description = "Subcommands for managing VM expiration"
list_description = "List VM expirations"
@args('--instance', metavar='<instance-id>', dest='instanceid',
default=None,
help='Instance id')
@args('--days', metavar='<expire-in>', dest='days',
default=None,
help='Filter VM expiring in X days')
def list(self, instanceid=None, days=None):
repositories.setup_database_engine_and_factory()
repo = repositories.get_vmexpire_repository()
res = repo.get_all_by(instance_id=instanceid, project_id=None)
headers = [
'id',
'expire',
'instance.name',
'instance.id',
'project.id',
'user.id',
'notif',
'notif.last'
]
limit = None
if days:
try:
dt = datetime.datetime.now() + datetime.timedelta(days=int(days))
limit = time.mktime(dt.timetuple())
except Exception as e:
print(str(e))
return
pt = prettytable.PrettyTable(headers)
for instance in res:
if limit and instance.expire > limit:
continue
pt.add_row(
[
instance.id,
datetime.datetime.fromtimestamp(instance.expire),
instance.instance_name,
instance.instance_id,
instance.project_id,
instance.user_id,
instance.notified,
instance.notified_last
]
)
if six.PY3:
print(encodeutils.safe_encode(pt.get_string()).decode())
else:
print(encodeutils.safe_encode(pt.get_string()))
extend_description = "Extend a VM duration"
@args('--id', metavar='<id>', dest='expirationid',
help='Expiration id')
def extend(self, expirationid):
if not expirationid:
print("Missing id parameter")
return
repositories.setup_database_engine_and_factory()
repo = repositories.get_vmexpire_repository()
repo.extend_vm(entity_id=expirationid)
repositories.commit()
print("VM expiration successfully extended!")
remove_description = "Deletes a VM expiration"
@args('--id', metavar='<expiration-id>', dest='expirationid',
help='Expiration id')
def remove(self, expirationid):
if not expirationid:
print("Missing id paramerer")
return
repositories.setup_database_engine_and_factory()
repo = repositories.get_vmexpire_repository()
repo.delete_entity_by_id(entity_id=expirationid)
repositories.commit()
print("VM expiration successfully removed!")
add_description = "Add a VM to the expiration database"
@args('--id', metavar='<instance-id>', dest='instanceid',
help='Instance id')
def add(self, instanceid):
if not instanceid:
print("Missing id parameter")
return
repositories.setup_database_engine_and_factory()
repo = repositories.get_vmexpire_repository()
instance = repo.add_vm(instanceid)
if not instance:
print("Failure to add VM expiration, check logs")
return
repositories.commit()
print("VM expiration successfully generated!")
CATEGORIES = {
'vm': VmExpireCommands,
'exclude': VmExcludeCommands
}
# Modifying similar code from nova/cmd/manage.py
def methods_of(obj):
"""Get all callable methods of an object that don't start with underscore
returns a list of tuples of the form (method_name, method)
"""
result = []
for fn in dir(obj):
if callable(getattr(obj, fn)) and not fn.startswith('_'):
result.append((fn, getattr(obj, fn),
getattr(obj, fn + '_description', None)))
return result
# Shamelessly taking same code from nova/cmd/manage.py
def add_command_parsers(subparsers):
"""Add subcommand parser to oslo_config object"""
for category in CATEGORIES:
command_object = CATEGORIES[category]()
desc = getattr(command_object, 'description', None)
parser = subparsers.add_parser(category, description=desc)
parser.set_defaults(command_object=command_object)
category_subparsers = parser.add_subparsers(dest='action')
for (action, action_fn, action_desc) in methods_of(command_object):
parser = category_subparsers.add_parser(action,
description=action_desc)
action_kwargs = []
for args, kwargs in getattr(action_fn, 'args', []):
# Assuming dest is the arg name without the leading
# hyphens if no dest is supplied
kwargs.setdefault('dest', args[0][2:])
if kwargs['dest'].startswith('action_kwarg_'):
action_kwargs.append(
kwargs['dest'][len('action_kwarg_'):])
else:
action_kwargs.append(kwargs['dest'])
kwargs['dest'] = 'action_kwarg_' + kwargs['dest']
parser.add_argument(*args, **kwargs)
parser.set_defaults(action_fn=action_fn)
parser.set_defaults(action_kwargs=action_kwargs)
parser.add_argument('action_args', nargs='*',
help=argparse.SUPPRESS)
# Define subcommand category
category_opt = cfg.SubCommandOpt('category',
title='Command categories',
help='Available categories',
handler=add_command_parsers)
def main():
"""Parse options and call the appropriate class/method."""
CONF = config.new_config()
CONF.register_cli_opt(category_opt)
try:
logging.register_options(CONF)
logging.setup(CONF, "osvmexpire-manage")
cfg_files = cfg.find_config_files(project='os-vm-expire')
CONF(args=sys.argv[1:],
project='os-vm-expire',
prog='osvmexpire-manage',
version=os_vm_expire.version.__version__,
default_config_files=cfg_files)
except RuntimeError as e:
sys.exit("ERROR: %s" % e)
# find sub-command and its arguments
fn = CONF.category.action_fn
fn_args = [arg.decode('utf-8') for arg in CONF.category.action_args]
fn_kwargs = {}
for k in CONF.category.action_kwargs:
v = getattr(CONF.category, 'action_kwarg_' + k)
if v is None:
continue
if isinstance(v, bytes):
v = v.decode('utf-8')
fn_kwargs[k] = v
# call the action with the remaining arguments
try:
return fn(*fn_args, **fn_kwargs)
except Exception as e:
sys.exit("ERROR: %s" % e)
if __name__ == '__main__':
main()
|
<filename>texbox/cli_tables.py
import argparse
import secrets
from functools import partial
from pathlib import Path
import pandas as pd
from .constants import (
ABBREVIATION_TEMPLATE,
BEGIN_LANDSCAPE_MACRO,
BEGIN_TABLE_MACRO,
BEGIN_TABLE_PARAMS_MACRO,
BREAK_COLUMN_HEADING_TEMPLATE,
CITE_MACRO,
CUSTOM_FOOTER_LEGEND_TEMPLATE,
END_LANDSCAPE_MACRO,
END_TABULAR_MACRO,
LINE_BREAK,
SPACE_MACRO,
TABLE_LABEL_TEMPLATE,
UNICODE_2_MATH_SYMBOL,
)
from .utils import (
CustomHelpFormatter,
dreplace,
is_multi_word_string,
padify,
rreplace,
str2list,
strs2str,
templatify,
templatify_cell,
templatify_col_names,
)
def parse_args():
parser = argparse.ArgumentParser(
description="Generate a LaTeX table from a bibliography-based file.",
add_help=True,
formatter_class=CustomHelpFormatter,
)
parser._action_groups.pop()
required = parser.add_argument_group("required arguments")
optional = parser.add_argument_group("optional arguments")
required.add_argument(
"-i",
"--input",
required=True,
help="The path to the file to be tabulated.",
metavar="PATH",
type=str,
dest="input_path",
)
required.add_argument(
"-o",
"--output",
required=True,
help="The path to the file for the generated LaTeX table.",
metavar="PATH",
type=str,
dest="output_path",
)
required.add_argument(
"-ck",
"--cite-key-col",
required=True,
help="The column name for the cite key.",
metavar="COL",
type=str,
)
required.add_argument(
"-t",
"--title-col",
required=True,
help="The column name for the title.",
metavar="COL",
type=str,
)
optional.add_argument(
"-c",
"--cols",
help="The subset of columns to maintain. By default, all columns are kept except the title column.",
metavar="COLS",
type=str,
default=None,
)
optional.add_argument(
"-a",
"--acronym-col-names",
help="The subset of columns whose name is an acronym and which must be wrapped in a macro. By default, no column name is considered an acronym.",
metavar="COLS",
type=str,
default=None,
)
optional.add_argument(
"-ac",
"--acronym-cols",
help="The subset of columns whose comma-separated values are acronyms and which must be wrapped in a macro. By default, no columns are considered to have acronyms.",
metavar="COLS",
type=str,
default=None,
)
optional.add_argument(
"-r",
"--rotate",
help="Rotate the generated LaTeX table (landscape mode).",
action="store_true",
)
optional.add_argument(
"-b",
"--break-col-headings",
help="Break the column headings of the generated LaTeX table with more than one word.",
action="store_true",
)
optional.add_argument(
"-ca",
"--caption",
help="The caption for the generated LaTeX table.",
metavar="STR",
type=str,
default=None,
)
optional.add_argument(
"-sb",
"--sort-by",
help="The subset of columns to sort by.",
metavar="COLS",
type=str,
default=None,
)
optional.add_argument(
"-fl",
"--footer-legend",
help="The path to the file with the footer legend entries.",
metavar="PATH",
type=str,
default=None,
dest="footer_path",
)
optional.add_argument(
"-pp",
"--table-position-params",
help="The position parameters for the table environment. By default, no parameters are specified.",
metavar="STR",
type=str,
default="",
)
args = parser.parse_args()
return args
def main():
args = parse_args()
input_path = Path(args.input_path)
output_path = Path(args.output_path)
if args.cols is None:
df = pd.read_csv(input_path)
df = df.drop(columns=args.title_col)
else:
cols = str2list(args.cols) + [args.cite_key_col]
df = pd.read_csv(input_path, usecols=cols)
if args.sort_by is not None:
df = df.sort_values(by=str2list(args.sort_by))
if args.acronym_cols is not None:
acronym_cols = str2list(args.acronym_cols)
df[acronym_cols] = df[acronym_cols].applymap(
partial(templatify_cell, template=ABBREVIATION_TEMPLATE)
)
df = df.rename(columns={args.cite_key_col: args.title_col})
df[args.title_col] = CITE_MACRO + "{" + df[args.title_col] + "}"
if args.acronym_col_names is not None:
df = templatify_col_names(
df, str2list(args.acronym_col_names), ABBREVIATION_TEMPLATE
)
if args.break_col_headings:
cols = [col for col in df.columns if is_multi_word_string(col)]
df = templatify_col_names(
df,
cols,
BREAK_COLUMN_HEADING_TEMPLATE,
pre_col_transform=lambda col: col.replace(" ", LINE_BREAK),
)
df = df.replace(UNICODE_2_MATH_SYMBOL, regex=True)
latex_table = df.to_latex(
index=False,
escape=False,
label=templatify(TABLE_LABEL_TEMPLATE, input_path.stem + secrets.token_hex(2)),
caption=args.caption,
).rstrip()
latex_table = latex_table.replace(
BEGIN_TABLE_MACRO,
templatify(BEGIN_TABLE_PARAMS_MACRO, args.table_position_params),
)
if args.footer_path is not None:
footer_legend_path = Path(args.footer_path)
footer_legend = footer_legend_path.read_text().strip()
footer_legend = dreplace(footer_legend, UNICODE_2_MATH_SYMBOL).replace(
"\\\\", "\\"
)
footer_legend = templatify(
CUSTOM_FOOTER_LEGEND_TEMPLATE,
footer_legend.replace("\n", padify(SPACE_MACRO)),
)
latex_table = rreplace(
latex_table, END_TABULAR_MACRO, strs2str(END_TABULAR_MACRO, footer_legend)
)
output_path.write_text(
strs2str(
BEGIN_LANDSCAPE_MACRO if args.rotate else None,
latex_table,
END_LANDSCAPE_MACRO if args.rotate else None,
)
)
if __name__ == "__main__":
main()
|
<filename>games/necrowar/tile.py
# Tile: A Tile in the game that makes up the 2D map grid.
# DO NOT MODIFY THIS FILE
# Never try to directly create an instance of this class, or modify its member variables.
# Instead, you should only be reading its variables and calling its functions.
from typing import List, Optional
from games.necrowar.game_object import GameObject
# <<-- Creer-Merge: imports -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# you can add additional import(s) here
# <<-- /Creer-Merge: imports -->>
class Tile(GameObject):
"""The class representing the Tile in the Necrowar game.
A Tile in the game that makes up the 2D map grid.
"""
def __init__(self):
"""Initializes a Tile with basic logic as provided by the Creer code generator.
"""
GameObject.__init__(self)
# private attributes to hold the properties so they appear read only
self._corpses = 0
self._is_castle = False
self._is_gold_mine = False
self._is_grass = False
self._is_island_gold_mine = False
self._is_path = False
self._is_river = False
self._is_tower = False
self._is_unit_spawn = False
self._is_wall = False
self._is_worker_spawn = False
self._num_ghouls = 0
self._num_hounds = 0
self._num_zombies = 0
self._owner = None
self._tile_east = None
self._tile_north = None
self._tile_south = None
self._tile_west = None
self._tower = None
self._unit = None
self._x = 0
self._y = 0
@property
def corpses(self) -> int:
"""int: The amount of corpses on this tile.
"""
return self._corpses
@property
def is_castle(self) -> bool:
"""bool: Whether or not the tile is a castle tile.
"""
return self._is_castle
@property
def is_gold_mine(self) -> bool:
"""bool: Whether or not the tile is considered to be a gold mine or not.
"""
return self._is_gold_mine
@property
def is_grass(self) -> bool:
"""bool: Whether or not the tile is considered grass or not (Workers can walk on grass).
"""
return self._is_grass
@property
def is_island_gold_mine(self) -> bool:
"""bool: Whether or not the tile is considered to be the island gold mine or not.
"""
return self._is_island_gold_mine
@property
def is_path(self) -> bool:
"""bool: Whether or not the tile is considered a path or not (Units can walk on paths).
"""
return self._is_path
@property
def is_river(self) -> bool:
"""bool: Whether or not the tile is considered a river or not.
"""
return self._is_river
@property
def is_tower(self) -> bool:
"""bool: Whether or not the tile is considered a tower or not.
"""
return self._is_tower
@property
def is_unit_spawn(self) -> bool:
"""bool: Whether or not the tile is the unit spawn.
"""
return self._is_unit_spawn
@property
def is_wall(self) -> bool:
"""bool: Whether or not the tile can be moved on by workers.
"""
return self._is_wall
@property
def is_worker_spawn(self) -> bool:
"""bool: Whether or not the tile is the worker spawn.
"""
return self._is_worker_spawn
@property
def num_ghouls(self) -> int:
"""int: The amount of Ghouls on this tile.
"""
return self._num_ghouls
@property
def num_hounds(self) -> int:
"""int: The amount of Hounds on this tile.
"""
return self._num_hounds
@property
def num_zombies(self) -> int:
"""int: The amount of Zombies on this tile.
"""
return self._num_zombies
@property
def owner(self) -> Optional['games.necrowar.player.Player']:
"""games.necrowar.player.Player or None: Which player owns this tile, only applies to grass tiles for workers, NULL otherwise.
"""
return self._owner
@property
def tile_east(self) -> Optional['games.necrowar.tile.Tile']:
"""games.necrowar.tile.Tile or None: The Tile to the 'East' of this one (x+1, y). None if out of bounds of the map.
"""
return self._tile_east
@property
def tile_north(self) -> Optional['games.necrowar.tile.Tile']:
"""games.necrowar.tile.Tile or None: The Tile to the 'North' of this one (x, y-1). None if out of bounds of the map.
"""
return self._tile_north
@property
def tile_south(self) -> Optional['games.necrowar.tile.Tile']:
"""games.necrowar.tile.Tile or None: The Tile to the 'South' of this one (x, y+1). None if out of bounds of the map.
"""
return self._tile_south
@property
def tile_west(self) -> Optional['games.necrowar.tile.Tile']:
"""games.necrowar.tile.Tile or None: The Tile to the 'West' of this one (x-1, y). None if out of bounds of the map.
"""
return self._tile_west
@property
def tower(self) -> Optional['games.necrowar.tower.Tower']:
"""games.necrowar.tower.Tower or None: The Tower on this Tile if present, otherwise None.
"""
return self._tower
@property
def unit(self) -> Optional['games.necrowar.unit.Unit']:
"""games.necrowar.unit.Unit or None: The Unit on this Tile if present, otherwise None.
"""
return self._unit
@property
def x(self) -> int:
"""int: The x (horizontal) position of this Tile.
"""
return self._x
@property
def y(self) -> int:
"""int: The y (vertical) position of this Tile.
"""
return self._y
def res(self, num: int) -> bool:
"""Resurrect the corpses on this tile into Zombies.
Args:
num (int): Number of zombies to resurrect.
Returns:
bool: True if successful res, False otherwise.
"""
return self._run_on_server('res', {
'num': num
})
def spawn_unit(self, title: str) -> bool:
"""Spawns a fighting unit on the correct tile.
Args:
title (str): The title of the desired unit type.
Returns:
bool: True if successfully spawned, False otherwise.
"""
return self._run_on_server('spawnUnit', {
'title': title
})
def spawn_worker(self) -> bool:
"""Spawns a worker on the correct tile.
Returns:
bool: True if successfully spawned, False otherwise.
"""
return self._run_on_server('spawnWorker', {
})
directions = ["North", "East", "South", "West"]
"""int: The valid directions that tiles can be in, "North", "East", "South", or "West"
"""
def get_neighbors(self) -> List['games.necrowar.tile.Tile']:
"""Gets the neighbors of this Tile
Returns:
list[games.necrowar.tile.Tile]: The list of neighboring Tiles of this Tile.
"""
neighbors = []
for direction in Tile.directions:
neighbor = getattr(self, "tile_" + direction.lower())
if neighbor:
neighbors.append(neighbor)
return neighbors
def is_pathable(self) -> bool:
"""Checks if a Tile is pathable to units
Returns:
bool: True if pathable, False otherwise.
"""
# <<-- Creer-Merge: is_pathable_builtin -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
return False # DEVELOPER ADD LOGIC HERE
# <<-- /Creer-Merge: is_pathable_builtin -->>
def has_neighbor(self, tile: 'games.necrowar.tile.Tile') -> bool:
"""Checks if this Tile has a specific neighboring Tile.
Args:
tile (games.necrowar.tile.Tile): The Tile to check against.
Returns:
bool: True if the tile is a neighbor of this Tile, False otherwise
"""
return bool(tile and tile in self.get_neighbors())
# <<-- Creer-Merge: functions -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# if you want to add any client side logic (such as state checking functions) this is where you can add them
# <<-- /Creer-Merge: functions -->>
|
<filename>src/utility.py
import pickle
from typing import Tuple
from src.model import Piece, Board, State
from src.constant import ShapeConstant, GameConstant
def dump(obj, path):
"""
[DESC]
Function to dump Object
[PARAMS]
obj: Object -> objects you want dump
"""
pickle.dump(obj, open(path, "wb"))
def is_out(board: Board, row: int, col: int) -> bool:
"""
[DESC]
Function to see if the piece (row, col) is outside of the board
[PARAMS]
board: Board -> current board
row: int -> row to be checked
col: int -> column to be checked
[RETURN]
True if outside board
False if inside board
"""
return row < 0 or row >= board.row or col < 0 or col >= board.col
def is_full(board: Board) -> bool:
"""
[DESC]
Function to see if current board is full of pieces
[PARAMS]
board: Board -> current board
[RETURN]
True if board is full
False if board is not full
"""
for row in range(board.row):
for col in range(board.col):
if board[row, col].shape == ShapeConstant.BLANK:
return False
return True
def check_streak(board: Board, row: int, col: int) -> Tuple[str, str, str]:
"""
[DESC]
Function to check streak from row, col in current board
[PARAMS]
board: Board -> current board
row: int -> row
col: int -> column
[RETURN]
None if the row, col in a board isn't filled with piece
Tuple[prior, shape, color] match with player set if streak found and cause of win
"""
piece = board[row, col]
if piece.shape == ShapeConstant.BLANK:
return None
streak_way = [(-1, 0), (1, 0), (0, -1), (0, 1),
(-1, -1), (-1, 1), (1, -1), (1, 1)]
for prior in GameConstant.WIN_PRIOR:
mark = 0
for row_ax, col_ax in streak_way:
row_ = row + row_ax
col_ = col + col_ax
for _ in range(GameConstant.N_COMPONENT_STREAK - 1):
if is_out(board, row_, col_):
mark = 0
break
shape_condition = (
prior == GameConstant.SHAPE
and piece.shape != board[row_, col_].shape
)
color_condition = (
prior == GameConstant.COLOR
and piece.color != board[row_, col_].color
)
if shape_condition or color_condition:
mark = 0
break
row_ += row_ax
col_ += col_ax
mark += 1
if mark == GameConstant.N_COMPONENT_STREAK - 1:
player_set = [
(GameConstant.PLAYER1_SHAPE, GameConstant.PLAYER1_COLOR),
(GameConstant.PLAYER2_SHAPE, GameConstant.PLAYER2_COLOR),
]
for player in player_set:
if prior == GameConstant.SHAPE:
if piece.shape == player[0]:
return (prior, player)
elif prior == GameConstant.COLOR:
if piece.color == player[1]:
return (prior, player)
def is_win(board: Board) -> Tuple[str, str]:
"""
[DESC]
Function to check if player won
[PARAMS]
board: Board -> current board
[RETURN]
None if there is no streak
Tuple[shape, color] match with player set if there is a streak
"""
temp_win = None
for row in range(board.row):
for col in range(board.col):
checked = check_streak(board, row, col)
if checked:
if checked[0] == GameConstant.WIN_PRIOR[0]:
return checked[1]
else:
temp_win = checked[1]
return temp_win
def place(state: State, n_player: int, shape: str, col: str) -> int:
"""
[DESC]
Function to place piece in board
[PARAMS]
state = current state in the game
n_player = which player (player 1 or 2)
shape = shape
col = which col
[RETURN]
-1 if placement is invalid
int(row) if placement is valid
"""
if state.players[n_player].quota[shape] == 0:
return -1
for row in range(state.board.row - 1, -1, -1):
if state.board[row, col].shape == ShapeConstant.BLANK:
piece = Piece(shape, GameConstant.PLAYER_COLOR[n_player])
state.board.set_piece(row, col, piece)
state.players[n_player].quota[shape] -= 1
return row
return -1
def place_debug(state: State, n_player: int, shape: str, col: str) -> int:
"""
[DESC]
Function to place piece in board
[PARAMS]
state = current state in the game
n_player = which player (player 1 or 2)
shape = shape
col = which col
[RETURN]
-1 if placement is invalid
int(row) if placement is valid
"""
if state.players[n_player].quota[shape] == 0:
print('Shape:', shape, 'is empty!')
return -1
for row in range(state.board.row - 1, -1, -1):
if state.board[row, col].shape == ShapeConstant.BLANK:
piece = Piece(shape, GameConstant.PLAYER_COLOR[n_player])
state.board.set_piece(row, col, piece)
state.players[n_player].quota[shape] -= 1
return row
print('Column', col, 'is full')
return -1
|
#pip install pysqlite3
# создаем базу
class Sql_modul():
def __init__(self, db_name, config_db):
self.db_name = db_name
self.config_db = config_db
def create_db(self):
import sqlite3 as lite
import sys
connect = None
try:
connect = lite.connect(self.db_name)
cur = connect.cursor()
cur.execute('SELECT SQLITE_VERSION()')
data = cur.fetchone()[0]
print(f"SQLite version: {data}")
except lite.Error as e:
print(f"Error {e.args[0]}:")
sys.exit(1)
connect.commit()
connect.close()
Sql_modul.create_db_table(self)
# создаем таблицу
def create_db_table(self):
import sqlite3 as lite
connect = None
connect = lite.connect(self.db_name)
cur = connect.cursor()
cur.execute("CREATE TABLE records(" + self.config_db + ")")
connect.commit()
connect.close()
# добавляем запись в базу
def insert_record(self, data):
import sqlite3 as lite
import sys
import os.path
if os.path.exists(self.db_name) is not True:
Sql_modul.create_db(self)
connect = None
try:
connect = lite.connect(self.db_name)
cursor = connect.cursor()
cursor.execute("select * from records")
results = cursor.fetchall()
number_new_rec = len(results)
config_rec = Sql_modul.create_config_rec(self)
cursor.execute("INSERT INTO records VALUES" + config_rec ,[number_new_rec+1] + data)
connect.commit()
connect.close()
except lite.Error as e:
print(f"Error {e.args[0]}:")
sys.exit(1)
# конфигурируем строчку записи, так как не знаем сколько колонок в базе будет
# фактически делаем универсальный способ записи в базу
def create_config_rec(self):
config_db = self.config_db.split(',')
config_rec = '('
for i in range(len(config_db)):
config_rec = config_rec + '?'
if i < len(config_db)-1:
config_rec = config_rec + ','
if i == len(config_db)-1:
config_rec = config_rec + ')'
return config_rec
# Прочитаем записи из базы
def read_data(self, uname):
import sqlite3 as lite
import sys
import os.path
if os.path.exists(self.db_name) is not True:
Sql_modul.create_db(self)
connect = None
try:
connect = lite.connect(self.db_name)
cursor = connect.cursor()
cursor.execute('SELECT * FROM records')
names_colums = list(map(lambda x: x[0], cursor.description))
cursor.execute("SELECT * FROM records WHERE name=?", (uname,))
rows = cursor.fetchall()
connect.close()
except lite.Error as e:
print(f"Error {e.args[0]}:")
connect.close()
rows = None
sys.exit(1)
return rows
#create_db("test.db", 'id INT, name TEXT, mid_salary INT, max_salary INT, min_salary INT, common_skills TEXT')
# config_db = 'id INT, name TEXT, mid_salary INT, max_salary INT, min_salary INT, common_skills TEXT'
# data = ["python", 70000 , 120000, 40000, "sqllite3"]
# Sql_clas = Sql_modul("test.db", config_db)
# Sql_clas.insert_record(data)
# Sql_clas.read_data("python") |
"""
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
"""
C13815920 - Appropriate component dependencies are automatically added to node entities
C13767842 - All Gradient nodes can be added to a graph
C17461363 - All Gradient nodes can be removed from a graph
"""
import os
import pytest
# Bail on the test if ly_test_tools doesn't exist.
pytest.importorskip('ly_test_tools')
import ly_test_tools.environment.file_system as file_system
import editor_python_test_tools.hydra_test_utils as hydra
test_directory = os.path.join(os.path.dirname(__file__), 'EditorScripts')
@pytest.mark.parametrize('project', ['AutomatedTesting'])
@pytest.mark.parametrize('level', ['tmp_level'])
@pytest.mark.usefixtures("automatic_process_killer")
@pytest.mark.parametrize("launcher_platform", ['windows_editor'])
class TestGradientNodes(object):
@pytest.fixture(autouse=True)
def setup_teardown(self, request, workspace, project, level):
def teardown():
file_system.delete([os.path.join(workspace.paths.engine_root(), project, "Levels", level)], True, True)
request.addfinalizer(teardown)
file_system.delete([os.path.join(workspace.paths.engine_root(), project, "Levels", level)], True, True)
@pytest.mark.test_case_id('C13815920')
@pytest.mark.SUITE_periodic
def test_LandscapeCanvas_GradientNodes_DependentComponentsAdded(self, request, editor, level, launcher_platform):
cfg_args = [level]
expected_lines = [
"Landscape Canvas pane is open",
"New graph created",
"Graph registered with Landscape Canvas",
"FastNoiseGradientNode created new Entity with all required components",
"ImageGradientNode created new Entity with all required components",
"PerlinNoiseGradientNode created new Entity with all required components",
"RandomNoiseGradientNode created new Entity with all required components"
]
hydra.launch_and_validate_results(request, test_directory, editor, 'GradientNodes_DependentComponentsAdded.py',
expected_lines, cfg_args=cfg_args)
@pytest.mark.test_case_id('C13767842')
@pytest.mark.SUITE_periodic
def test_LandscapeCanvas_GradientNodes_EntityCreatedOnNodeAdd(self, request, editor, level, launcher_platform):
"""
Verifies all Gradient nodes can be successfully added to a Landscape Canvas graph, and the proper entity
creation occurs.
"""
cfg_args = [level]
expected_lines = [
"Landscape Canvas pane is open",
"New graph created",
"Graph registered with Landscape Canvas",
"AltitudeGradientNode created new Entity with Altitude Gradient Component",
"ConstantGradientNode created new Entity with Constant Gradient Component",
"FastNoiseGradientNode created new Entity with FastNoise Gradient Component",
"ImageGradientNode created new Entity with Image Gradient Component",
"PerlinNoiseGradientNode created new Entity with Perlin Noise Gradient Component",
"RandomNoiseGradientNode created new Entity with Random Noise Gradient Component",
"ShapeAreaFalloffGradientNode created new Entity with Shape Falloff Gradient Component",
"SlopeGradientNode created new Entity with Slope Gradient Component",
"SurfaceMaskGradientNode created new Entity with Surface Mask Gradient Component"
]
hydra.launch_and_validate_results(request, test_directory, editor, 'GradientNodes_EntityCreatedOnNodeAdd.py',
expected_lines, cfg_args=cfg_args)
@pytest.mark.test_case_id('C17461363')
@pytest.mark.SUITE_periodic
def test_LandscapeCanvas_GradientNodes_EntityRemovedOnNodeDelete(self, request, editor, level, launcher_platform):
"""
Verifies all Gradient nodes can be successfully removed from a Landscape Canvas graph, and the proper entity
cleanup occurs.
"""
cfg_args = [level]
expected_lines = [
"Landscape Canvas pane is open",
"New graph created",
"Graph registered with Landscape Canvas",
"FastNoiseGradientNode corresponding Entity was deleted when node is removed",
"AltitudeGradientNode corresponding Entity was deleted when node is removed",
"ConstantGradientNode corresponding Entity was deleted when node is removed",
"RandomNoiseGradientNode corresponding Entity was deleted when node is removed",
"ShapeAreaFalloffGradientNode corresponding Entity was deleted when node is removed",
"SlopeGradientNode corresponding Entity was deleted when node is removed",
"PerlinNoiseGradientNode corresponding Entity was deleted when node is removed",
"ImageGradientNode corresponding Entity was deleted when node is removed",
"SurfaceMaskGradientNode corresponding Entity was deleted when node is removed"
]
hydra.launch_and_validate_results(request, test_directory, editor, 'GradientNodes_EntityRemovedOnNodeDelete.py',
expected_lines, cfg_args=cfg_args)
|
<filename>google/ads/google_ads/v2/proto/resources/ad_group_label_pb2.py
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v2/proto/resources/ad_group_label.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v2/proto/resources/ad_group_label.proto',
package='google.ads.googleads.v2.resources',
syntax='proto3',
serialized_options=_b('\n%com.google.ads.googleads.v2.resourcesB\021AdGroupLabelProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v2/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V2.Resources\312\002!Google\\Ads\\GoogleAds\\V2\\Resources\352\002%Google::Ads::GoogleAds::V2::Resources'),
serialized_pb=_b('\n<google/ads/googleads_v2/proto/resources/ad_group_label.proto\x12!google.ads.googleads.v2.resources\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1cgoogle/api/annotations.proto\"\x82\x01\n\x0c\x41\x64GroupLabel\x12\x15\n\rresource_name\x18\x01 \x01(\t\x12.\n\x08\x61\x64_group\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05label\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValueB\xfe\x01\n%com.google.ads.googleads.v2.resourcesB\x11\x41\x64GroupLabelProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v2/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V2.Resources\xca\x02!Google\\Ads\\GoogleAds\\V2\\Resources\xea\x02%Google::Ads::GoogleAds::V2::Resourcesb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_ADGROUPLABEL = _descriptor.Descriptor(
name='AdGroupLabel',
full_name='google.ads.googleads.v2.resources.AdGroupLabel',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v2.resources.AdGroupLabel.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ad_group', full_name='google.ads.googleads.v2.resources.AdGroupLabel.ad_group', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='label', full_name='google.ads.googleads.v2.resources.AdGroupLabel.label', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=162,
serialized_end=292,
)
_ADGROUPLABEL.fields_by_name['ad_group'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_ADGROUPLABEL.fields_by_name['label'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
DESCRIPTOR.message_types_by_name['AdGroupLabel'] = _ADGROUPLABEL
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AdGroupLabel = _reflection.GeneratedProtocolMessageType('AdGroupLabel', (_message.Message,), dict(
DESCRIPTOR = _ADGROUPLABEL,
__module__ = 'google.ads.googleads_v2.proto.resources.ad_group_label_pb2'
,
__doc__ = """A relationship between an ad group and a label.
Attributes:
resource_name:
The resource name of the ad group label. Ad group label
resource names have the form: ``customers/{customer_id}/adGrou
pLabels/{ad_group_id}~{label_id}``
ad_group:
The ad group to which the label is attached.
label:
The label assigned to the ad group.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v2.resources.AdGroupLabel)
))
_sym_db.RegisterMessage(AdGroupLabel)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
from pytorch_sound.models import register_model, register_model_architecture
from typing import List
#
# Make blocks
#
def weights_init(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find("BatchNorm2d") != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def WNConv1d(*args, **kwargs):
return weight_norm(nn.Conv1d(*args, **kwargs))
def WNConvTranspose1d(*args, **kwargs):
return weight_norm(nn.ConvTranspose1d(*args, **kwargs))
class ResnetBlock(nn.Module):
def __init__(self, dim, kernel_size: int, dilation: int = 1):
super().__init__()
self.block = nn.Sequential(
nn.LeakyReLU(0.2),
nn.ReflectionPad1d(dilation),
WNConv1d(dim, dim, kernel_size=kernel_size, dilation=dilation),
nn.LeakyReLU(0.2),
WNConv1d(dim, dim, kernel_size=1),
)
self.shortcut = WNConv1d(dim, dim, kernel_size=1)
def forward(self, x):
return self.shortcut(x) + self.block(x)
#
# Build Generator
#
@register_model('generator')
class Generator(nn.Module):
def __init__(self, mel_dim: int = 80, dim: int = 384, out_dim: int = 4, res_kernels: List[int] = [2, 4, 8]):
super().__init__()
# make in conv
self.in_conv = nn.Sequential(
nn.ReflectionPad1d(3),
WNConv1d(mel_dim, dim, kernel_size=7, padding=0)
)
# body
self.res_stack = nn.ModuleList()
self.res_params = res_kernels
res_dilations = [3 ** i for i in range(4)]
for idx, ratio in enumerate(self.res_params):
stack = nn.Sequential(
nn.LeakyReLU(0.2),
WNConvTranspose1d(
dim,
dim // 2,
kernel_size=ratio * 2,
stride=ratio,
padding=ratio // 2 + ratio % 2,
output_padding=ratio % 2,
),
*[ResnetBlock(dim // 2, 3, dilation=res_dilations[i]) for i in range(4)]
)
self.res_stack.append(stack)
dim //= 2
# out
self.out = nn.Sequential(
nn.LeakyReLU(0.2),
nn.ReflectionPad1d(3),
WNConv1d(dim, out_dim, kernel_size=7, padding=0),
nn.Tanh()
)
self.apply(weights_init)
def forward(self, mel: torch.Tensor) -> torch.Tensor:
x = self.in_conv(mel)
for stack in self.res_stack:
x = stack(x)
return self.out(x)
class DiscriminatorBlock(nn.Module):
def __init__(self):
super().__init__()
self.module_list = nn.ModuleList([
nn.Sequential(
WNConv1d(1, 16, 15, padding=7),
nn.LeakyReLU(0.2)
),
nn.Sequential(
WNConv1d(16, 64, 41, stride=4, groups=4, padding=4 * 5),
nn.LeakyReLU(0.2)
),
nn.Sequential(
WNConv1d(64, 256, 41, stride=4, groups=16, padding=4 * 5),
nn.LeakyReLU(0.2)
),
nn.Sequential(
WNConv1d(256, 512, 41, stride=4, groups=64, padding=4 * 5),
nn.LeakyReLU(0.2)
),
nn.Sequential(
WNConv1d(512, 512, 5, padding=2),
nn.LeakyReLU(0.2)
),
WNConv1d(512, 1, 3, padding=1)
])
def forward(self, x):
results = []
for module in self.module_list:
x = module(x)
results.append(x)
return results
@register_model('discriminator')
class Discriminator(nn.Module):
def __init__(self):
super().__init__()
self.blocks = nn.ModuleList([DiscriminatorBlock()] * 3)
self.downsample = nn.AvgPool1d(4, stride=2, padding=1, count_include_pad=False)
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
results = []
for idx, block in enumerate(self.blocks):
results.extend(block(x))
if idx < len(self.blocks) - 1:
x = self.downsample(x)
return results
@register_model_architecture('generator', 'generator_mb_16k')
def generator_mb():
return {
'mel_dim': 80,
'dim': 384,
'out_dim': 4,
'res_kernels': [2, 5, 5]
}
@register_model_architecture('generator', 'generator_mb')
def generator_mb():
return {
'mel_dim': 80,
'dim': 384,
'out_dim': 4,
'res_kernels': [2, 4, 8]
}
@register_model_architecture('discriminator', 'discriminator_base')
def discriminator_base():
return {}
|
# encoding: utf-8
from __future__ import unicode_literals
import copy
import unittest
from datetime import datetime
from io import BytesIO
from threading import Thread
from mock import Mock
import pdef
from pdef.rpc import *
from pdef.tests.messages.protocol import *
from pdef.tests.interfaces.protocol import *
class TestRpcProtocol(unittest.TestCase):
def setUp(self):
handler = lambda inv: inv
self.proxy = pdef.proxy(TestInterface, handler)
self.protocol = RpcProtocol()
# get_request.
def test_get_request(self):
invocation = self.proxy.method(1, 2)
request = self.protocol.get_request(invocation)
assert request.method == GET
assert request.path == '/method'
assert request.query == {'arg0': '1', 'arg1': '2'}
assert request.post == {}
def test_get_request__query(self):
invocation = self.proxy.query(1, 2)
request = self.protocol.get_request(invocation)
assert request.method == GET
assert request.path == '/query'
assert request.query == {'arg0': '1', 'arg1': '2'}
assert request.post == {}
def test_get_request__post(self):
invocation = self.proxy.post(1, 2)
request = self.protocol.get_request(invocation)
assert request.method == POST
assert request.path == '/post'
assert request.query == {}
assert request.post == {'arg0': '1', 'arg1': '2'}
def test_get_request__forbid_none_path_args(self):
invocation = self.proxy.interface0(None, None).string0("hello, world")
self.assertRaises(ValueError, self.protocol.get_request, invocation)
def test_get_request__chained_methods(self):
invocation = self.proxy.interface0(1, 2).method(3, 4)
request = self.protocol.get_request(invocation)
assert request.method == GET
assert request.path == '/interface0/1/2/method'
assert request.query == {'arg0': '3', 'arg1': '4'}
assert request.post == {}
def test_get_request__urlencode_args(self):
invocation = self.proxy.string0('Привет')
request = self.protocol.get_request(invocation)
assert request.path == '/string0'
assert request.query == {'text': 'Привет'}
# to_json.
def test_to_json__string_no_quotes(self):
result = self.protocol._to_json('Привет," мир!', descriptors.string0)
assert result == 'Привет,\\\" мир!'
def test_to_json__datetime_no_quotes(self):
result = self.protocol._to_json(datetime(1970, 1, 1, 0, 0, 10), descriptors.datetime0)
assert result == '1970-01-01T00:00:10Z'
def test_to_json__enum_no_quotes(self):
result = self.protocol._to_json(TestEnum.ONE, TestEnum.descriptor)
assert result == 'one'
# get_invocation.
def test_get_invocation(self):
request = RpcRequest(path='/method', query={'arg0': '1', 'arg1': '2'})
invocation = self.protocol.get_invocation(request, TestInterface.descriptor)
assert invocation.method.name == 'method'
assert invocation.kwargs == {'arg0': 1, 'arg1': 2}
def test_get_invocation__query_method(self):
request = RpcRequest(path='/query', query={'arg0': '1'})
invocation = self.protocol.get_invocation(request, TestInterface.descriptor)
assert invocation.method.name == 'query'
assert invocation.kwargs == {'arg0': 1, 'arg1': None}
def test_get_invocation__post_method(self):
request = RpcRequest(POST, path='/post', post={'arg0': '1'})
invocation = self.protocol.get_invocation(request, TestInterface.descriptor)
assert invocation.method.name == 'post'
assert invocation.kwargs == {'arg0': 1, 'arg1': None}
def test_get_invocation__post_method_not_allowed(self):
request = RpcRequest(GET, path='/post', post={})
try:
self.protocol.get_invocation(request, TestInterface.descriptor)
self.fail()
except RpcException as e:
assert e.status == http_codes.METHOD_NOT_ALLOWED
def test_get_invocation__chained_method_index(self):
request = RpcRequest(path='/interface0/1/2/query', query={'arg0': '3'})
chain = self.protocol.get_invocation(request, TestInterface.descriptor).to_chain()
invocation0 = chain[0]
invocation1 = chain[1]
assert len(chain) == 2
assert invocation0.method.name == 'interface0'
assert invocation0.kwargs == {'arg0': 1, 'arg1': 2}
assert invocation1.method.name == 'query'
assert invocation1.kwargs == {'arg0': 3, 'arg1': None}
def test_get_invocation__last_method_not_terminal(self):
request = RpcRequest(path='/interface0/1/2')
try:
self.protocol.get_invocation(request, TestInterface.descriptor)
self.fail()
except RpcException as e:
assert e.status == http_codes.BAD_REQUEST
def test_get_invocation__query_args(self):
request = RpcRequest(path='/string0', query={'text': 'Привет'})
invocation = self.protocol.get_invocation(request, TestInterface.descriptor)
assert invocation.method.name == 'string0'
assert invocation.kwargs == {'text': 'Привет'}
def test_get_invocation__query_args_with_slashes(self):
request = RpcRequest(path='/string0', query={'text': 'Привет/мир'})
invocation = self.protocol.get_invocation(request, TestInterface.descriptor)
assert invocation.method.name == 'string0'
assert invocation.kwargs == {'text': 'Привет/мир'}
# from_json.
def test_from_json(self):
message = TestMessage(string0='Привет', bool0=True, int0=123)
json = message.to_json()
result = self.protocol._from_json(json, TestMessage.descriptor)
assert result == message
def test_from_json__unquoted_string(self):
result = self.protocol._from_json('Привет', descriptors.string0)
assert result == 'Привет'
class TestRpcClient(unittest.TestCase):
def setUp(self):
self.session = Mock()
self.client = rpc_client(TestInterface, 'http://localhost:8080', session=self.session)
def test_build_request(self):
rpc_req = RpcRequest(POST, path='/method',
query={'key': 'value', 'arg0': '1', 'arg1': '2'},
post={'key': 'value'})
req = self.client._build_request(rpc_req)
assert req.method == POST
assert req.url == 'http://localhost:8080/method'
assert req.data == {'key': 'value'}
assert req.params == {'key': 'value', 'arg0': '1', 'arg1': '2'}
def test_parse_response__ok(self):
response = requests.Response()
response.status_code = http_codes.OK
response._content = b'{"data": 123}'
result = self.client._parse_response(response, descriptors.int32)
assert result == 123
def test_parse_response__application_exc(self):
exc = TestException('Test exception')
response = requests.Response()
response.status_code = http_codes.UNPROCESSABLE_ENTITY
response._content = b'{"error": {"text": "Test exception"}}'
try:
self.client._parse_response(response, descriptors.int32, TestException.descriptor)
self.fail()
except TestException as e:
assert e == exc
def test_parse_response__server_error(self):
response = requests.Response()
response.status_code = http_codes.NOT_FOUND
response._content = 'Method not found'.encode('utf-8')
try:
self.client._parse_response(response, None, None)
self.fail()
except RpcException as e:
assert e.status == http_codes.NOT_FOUND
assert e.message == 'Method not found'
class TestRpcHandler(unittest.TestCase):
def setUp(self):
self.service = Mock()
self.handler = RpcHandler(TestInterface, self.service)
def test_handle__rpc_exception(self):
try:
request = RpcRequest(path='/wrong/method')
self.handler(request)
self.fail()
except RpcException as e:
assert e.status == http_codes.BAD_REQUEST
def test_handle__ok(self):
self.service.method = Mock(return_value=3)
request = RpcRequest(path='/method', query={'arg0': '1', 'arg1': '2'})
success, result = self.handler(request)
assert success is True
assert result.data == 3
assert not result.has_error
assert result.__class__.data.type is descriptors.int32
def test_handle__application_exception(self):
e = TestException(text='Hello, world')
self.service.method = Mock(side_effect=e)
request = RpcRequest(path='/method', query={'arg0': '1', 'arg1': '2'})
success, result = self.handler(request)
assert success is False
assert not result.has_data
assert result.error == e
assert result.__class__.error.type is TestException.descriptor
def test_handle__unexpected_exception(self):
self.service.method = Mock(side_effect=ValueError)
request = RpcRequest(path='/method', query={'arg0': '1', 'arg1': '2'})
try:
self.handler(request)
self.fail()
except ValueError:
pass
class TestWsgiRpcServer(unittest.TestCase):
def env(self):
return {
'REQUEST_METHOD': 'GET',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': 0,
'SCRIPT_NAME': '/myapp',
'PATH_INFO': '/method0/method1'
}
def test_handle(self):
hello = '<NAME>'
result_class = rpc_result_class(descriptors.string0)
handler = lambda request: (True, result_class(hello))
server = wsgi_app(handler)
start_response = Mock()
content = server(self.env(), start_response)[0]
start_response.assert_called_with('200 OK',
[('Content-Type', 'application/json; charset=utf-8'),
('Content-Length', '%s' % len(content))])
def test_handle__rpc_exc(self):
def handler(request):
raise RpcException(http_codes.NOT_FOUND, 'Method not found')
server = wsgi_app(handler)
start_response = Mock()
content = server(self.env(), start_response)[0]
assert content.decode(UTF8) == 'Method not found'
start_response.assert_called_with('404 Not Found',
[('Content-Type', 'text/plain; charset=utf-8'),
('Content-Length', '%s' % len(content))])
def test_parse_request(self):
query = urlencode('привет=мир', '=')
body = urlencode('пока=мир', '=')
env = {
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(body),
'SCRIPT_NAME': '/myapp',
'PATH_INFO': '/method0/method1',
'QUERY_STRING': query,
'wsgi.input': BytesIO(body.encode('utf-8')),
}
server = WsgiRpcApp(Mock())
request = server._parse_request(env)
assert request.method == 'POST'
assert request.path == '/method0/method1'
assert request.query == {'привет': 'мир'}
assert request.post == {'пока': 'мир'}
class TestIntegration(unittest.TestCase):
def setUp(self):
from wsgiref.simple_server import make_server
self.service = Mock()
handler = rpc_handler(TestSubInterface, self.service)
app = wsgi_app(handler)
self.server = make_server('localhost', 0, app)
self.server_thread = Thread(target=self.server.serve_forever)
self.server_thread.start()
url = 'http://localhost:%s' % self.server.server_port
self.client = rpc_client(TestSubInterface, url).proxy()
import logging
FORMAT = '%(name)s %(levelname)s - %(message)s'
logging.basicConfig(level=logging.WARN, format=FORMAT)
def tearDown(self):
self.server.shutdown()
def test(self):
client = self.client
service = self.service
message = TestMessage('Привет', True, -123)
exc = TestException('Test exception')
dt = datetime(2013, 11, 17, 19, 41)
string_in = 'Привет'
string_out = 'Пока'
if sys.version > '3':
# Python 3 wsgiref uses 'iso-8859-1' to decode PATH_INFO
string_in = 'Hello'
string_out = 'Goodbye'
service.method = Mock(return_value=3)
service.query = Mock(return_value=7)
service.post = Mock(return_value=11)
service.string0 = Mock(return_value=string_out)
service.datetime0 = Mock(return_value=dt)
service.enum0 = Mock(return_value=TestEnum.THREE)
service.message0 = Mock(return_value=copy.deepcopy(message))
service.interface0 = Mock(return_value=service)
service.void0 = Mock(return_value=None)
service.exc0 = Mock(side_effect=copy.deepcopy(exc))
service.serverError = Mock(side_effect=ValueError('Test exception'))
service.subMethod = Mock(return_value=None)
assert client.method(1, 2) == 3
service.method.assert_called_with(arg0=1, arg1=2)
assert client.query(3, 4) == 7
service.query.assert_called_with(arg0=3, arg1=4)
assert client.post(5, 6) == 11
service.post.assert_called_with(arg0=5, arg1=6)
assert client.string0(string_in) == string_out
service.string0.assert_called_with(text=string_in)
assert client.datetime0(dt) == dt
service.datetime0.assert_called_with(dt=dt)
assert client.enum0(TestEnum.THREE) == TestEnum.THREE
service.enum0.assert_called_with(enum0=TestEnum.THREE)
assert client.message0(message) == message
service.message0.assert_called_with(msg=message)
assert client.interface0(1, 2).query(3, 4) == 7
service.interface0.assert_called_with(arg0=1, arg1=2)
assert client.void0() is None
service.void0.assert_called_with()
try:
client.exc0()
self.fail()
except TestException as e:
assert e == exc
self.assertRaises(RpcException, client.serverError)
assert client.subMethod() is None
service.subMethod.assert_called_with()
|
<gh_stars>1-10
from scipy.integrate import RK45, BDF
import numpy
class Block:
def __init__(self, name = "Block"):
self.dimension = {"A": 0, "U": 0, "O": 0}
self.name = name
self.inputs = []
self.outputs = []
def state(self, t, y, u):
raise Exception("state method should be implemented")
def output(self, t, y, u, o):
raise Exception("output method should be implemented")
def getDiminsion(self):
return self.dimension
def addInput(self, input):
self.inputs.append(input)
self.dimension["U"] += 1
def addOutput(self, output):
self.outputs.append(output)
self.dimension["O"] += 1
def setStateDimension(self, dimension):
self.dimension["A"] = dimension
class DynamicSystem:
def __init__(self, solverType, blocks = [], maxStep = numpy.inf, maxIterations = 500):
self.solverType = solverType
self.blocks = blocks
self.connections = []
self.inputs = {}
self.outputs = {}
self.t = []
self.numberOfOutputs = 0
self.maxIterations = maxIterations
self.maxStep = maxStep
def run(self, stopTime):
self.uniqueBlockNames()
self.prepareOutputs()
self.prepareInputs()
self.t.clear()
solverClass= None
if(self.solverType == 'RK45'):
solverClass = RK45
elif(self.solverType == 'BDF'):
solverClass = BDF
elif(self.solverType == 'FixedStep'):
solverClass = FixedStepSolver
else:
raise Exception("Solver {} not supported".format(self.solverType))
solver = solverClass(self.getStepFunction(), 0, numpy.zeros(self.getFullStateDimension()), stopTime, max_step = self.maxStep)
previousDelta = 0
self.evaluateOutputs(0, numpy.zeros(self.getFullStateDimension()))
self.promoteAllOutputs(0)
self.t.append(0)
while solver.status == 'running':
solver.step()
t = solver.t
y = solver.y
self.evaluateOutputs(t, y)
self.promoteAllOutputs(t)
self.t.append(t)
if(int(t/stopTime*100) % 5 < previousDelta):
print("Current time: {:.2g}s, {:.1f}% finished".format(t, t/stopTime*100))
previousDelta = int(t/stopTime*100) % 5
def getFullStateDimension(self):
dim = 0
for block in self.blocks:
dim += block.getDiminsion()["A"]
return dim
def getStepFunction(self):
def stepFunction(t, y):
dy = numpy.zeros(len(y))
runningNumberOfStates = 0
self.evaluateOutputs(t, y)
for block in self.blocks:
blockDimension = block.getDiminsion()["A"]
if(blockDimension > 0):
dy[runningNumberOfStates:runningNumberOfStates + blockDimension] = block.state(t, y[runningNumberOfStates:runningNumberOfStates + blockDimension], self.inputs[block.name])
runningNumberOfStates += blockDimension
return dy
return stepFunction
def addBlocks(self, blocks):
self.blocks.extend(blocks)
def connect(self, outputA, inputB):
self.connections.append((outputA, inputB))
def uniqueBlockNames(self):
names = {}
for block in self.blocks:
if block.name not in names:
names[block.name] = 1
else:
names[block.name] += 1
block.name += str(names[block.name])
def prepareInputs(self):
for connection in self.connections:
if connection[1][1].name not in self.inputs:
self.inputs[connection[1][1].name] = {}
self.inputs[connection[1][1].name][connection[1][0]] = self.outputs[connection[0][1].name][connection[0][0]]
def prepareOutputs(self):
self.numberOfOutputs = 0
for block in self.blocks:
tempDict = {}
for output in block.outputs:
tempDict[output] = Signal(self.t)
self.outputs[block.name] = tempDict
self.numberOfOutputs += len(block.outputs)
def evaluateOutputs(self, t, y):
previousOutput = numpy.zeros(self.numberOfOutputs)
converged = False
iteration = 0
while((not converged) and (iteration < self.maxIterations)):
converged = True
runningOutput = 0
runningNumberOfStates = 0
for block in self.blocks:
tempDict = {}
for oName in block.outputs:
tempDict[oName] = 0
block.output(t, y[runningNumberOfStates:runningNumberOfStates + block.getDiminsion()["A"]], self.inputs[block.name] if block.name in self.inputs else None, tempDict)
runningNumberOfStates += block.getDiminsion()["A"]
for oName in block.outputs:
if(previousOutput[runningOutput] - tempDict[oName] > 1e-15):
converged = False
previousOutput[runningOutput] = tempDict[oName]
self.outputs[block.name][oName].addCandidate(t, previousOutput[runningOutput])
runningOutput += 1
iteration += 1
if(iteration == self.maxIterations):
raise Exception("Maximum number of iterations reached, possible infinite loop. Consider increasing the number of iterations or introduce a delay element in the loop.")
def getOutputs(self, block):
return self.outputs[block.name]
def promoteAllOutputs(self, t):
for blockOutputs in self.outputs.values():
for output in blockOutputs.values():
output.promoteCandidate(t)
class Signal:
def __init__(self, t):
self.values = []
self.t = t
self._candidate = (0, 0)
def __getitem__(self, t):
if(t < 0):
return 0
if(len(self.t) > 0):
if(t <= self.t[-1]):
return numpy.interp(t, self.t, self.values)
elif(t < self._candidate[0]):
return (self._candidate[1] - self.values[-1]) / (self._candidate[0] - self.t[-1]) * (t - self.t[-1]) + self.values[-1]
else:
return self._candidate[1]
else:
return self._candidate[1]
def addCandidate(self, t, value):
self._candidate = (t, value)
def promoteCandidate(self, t):
if(t == self._candidate[0]):
self.values.append(self._candidate[1])
else:
raise Exception("Oops, something went wrong!")
class FixedStepSolver:
def __init__(self, fun, t0, y0, t_bound, max_step):
self.timeStep = max_step
self.fun = fun
self.t = t0
self.y = y0
self.t_bound = t_bound
self.status = "running"
def step(self):
self.t += self.timeStep
self.y += self.fun(self.t, self.y) * self.timeStep
if(self.t + self.timeStep >= self.t_bound):
self.status = "finished" |
<filename>recsys19_hybridsvd/hybrids.py
import scipy as sp
import numpy as np
from scipy.sparse.linalg import LinearOperator
from sksparse import __version__ as sk_sp_version
from sksparse.cholmod import cholesky as cholesky_decomp_sparse
assert sk_sp_version >= '0.4.3'
SPARSE_MODE = True
from polara import SVDModel
from polara.recommender.coldstart.models import ItemColdStartEvaluationMixin
from polara.lib.similarity import stack_features
from polara.tools.timing import track_time
from string import Template
from scaledsvd import ScaledSVD
class CholeskyFactor:
def __init__(self, factor):
self._factor = factor
self._L = None
self._transposed = False
@property
def L(self):
if self._L is None:
self._L = self._factor.L()
return self._L
@property
def T(self):
self._transposed = True
return self
def dot(self, v):
if self._transposed:
self._transposed = False
return self.L.T.dot(self._factor.apply_P(v))
else:
return self._factor.apply_Pt(self.L.dot(v))
def solve(self, y):
x = self._factor
if self._transposed:
self._transposed = False
return x.apply_Pt(x.solve_Lt(y, use_LDLt_decomposition=False))
else:
raise NotImplementedError
def update_inplace(self, A, beta):
self._factor.cholesky_inplace(A, beta=beta)
self._L = None
class CholeskyFactorsMixin:
def __init__(self, *args, **kwargs):
self._sparse_mode = SPARSE_MODE
self.return_factors = True
super().__init__(*args, **kwargs)
entities = [self.data.fields.userid, self.data.fields.itemid]
self._cholesky = dict.fromkeys(entities)
self._features_weight = 0.999
self.data.subscribe(self.data.on_change_event, self._clean_cholesky)
def _clean_cholesky(self):
self._cholesky = {entity:None for entity in self._cholesky.keys()}
def _update_cholesky(self):
for entity, cholesky in self._cholesky.items():
if cholesky is not None:
self._update_cholesky_inplace(entity)
@property
def features_weight(self):
return self._features_weight
@features_weight.setter
def features_weight(self, new_val):
if new_val != self._features_weight:
self._features_weight = new_val
self._update_cholesky()
self._renew_model()
@property
def item_cholesky_factor(self):
itemid = self.data.fields.itemid
return self.get_cholesky_factor(itemid)
@property
def user_cholesky_factor(self):
userid = self.data.fields.userid
return self.get_cholesky_factor(userid)
def get_cholesky_factor(self, entity):
cholesky = self._cholesky.get(entity, None)
if cholesky is None:
self._update_cholesky_factor(entity)
return self._cholesky[entity]
def _update_cholesky_factor(self, entity):
entity_similarity = self.data.get_relations_matrix(entity)
if entity_similarity is None:
self._cholesky[entity] = None
else:
if self._sparse_mode:
cholesky_decomp = cholesky_decomp_sparse
mode = 'sparse'
else:
raise NotImplementedError
weight = self.features_weight
beta = (1.0 - weight) / weight
if self.verbose:
print('Performing {} Cholesky decomposition for {} similarity'.format(mode, entity))
msg = Template('Cholesky decomposition computation time: $time')
with track_time(verbose=self.verbose, message=msg):
self._cholesky[entity] = CholeskyFactor(cholesky_decomp(entity_similarity, beta=beta))
def _update_cholesky_inplace(self, entity):
entity_similarity = self.data.get_relations_matrix(entity)
if self._sparse_mode:
weight = self.features_weight
beta = (1.0 - weight) / weight
if self.verbose:
print('Updating Cholesky decomposition inplace for {} similarity'.format(entity))
msg = Template(' Cholesky decomposition update time: $time')
with track_time(verbose=self.verbose, message=msg):
self._cholesky[entity].update_inplace(entity_similarity, beta)
else:
raise NotImplementedError
def build_item_projector(self, v):
cholesky_items = self.item_cholesky_factor
if cholesky_items is not None:
if self.verbose:
print(f'Building {self.data.fields.itemid} projector for {self.method}')
msg = Template(' Solving triangular system: $time')
with track_time(verbose=self.verbose, message=msg):
self.factors['items_projector_left'] = cholesky_items.T.solve(v)
msg = Template(' Applying Cholesky factor: $time')
with track_time(verbose=self.verbose, message=msg):
self.factors['items_projector_right'] = cholesky_items.dot(v)
def get_item_projector(self):
vl = self.factors.get('items_projector_left', None)
vr = self.factors.get('items_projector_right', None)
return vl, vr
class HybridSVD(CholeskyFactorsMixin, SVDModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.method = 'HybridSVD'
self.precompute_auxiliary_matrix = False
def _check_reduced_rank(self, rank):
super()._check_reduced_rank(rank)
self.round_item_projector(rank)
def round_item_projector(self, rank):
vl, vr = self.get_item_projector()
if (vl is not None) and (rank < vl.shape[1]):
self.factors['items_projector_left'] = vl[:, :rank]
self.factors['items_projector_right'] = vr[:, :rank]
def build(self, *args, **kwargs):
if not self._sparse_mode:
raise(ValueError)
# the order matters - trigger on_change events first
svd_matrix = self.get_training_matrix(dtype=np.float64)
cholesky_items = self.item_cholesky_factor
cholesky_users = self.user_cholesky_factor
if self.precompute_auxiliary_matrix:
if cholesky_items is not None:
svd_matrix = cholesky_items.T.dot(svd_matrix.T).T
cholesky_items._L = None
if cholesky_users is not None:
svd_matrix = cholesky_users.T.dot(svd_matrix)
cholesky_users._L = None
operator = svd_matrix
else:
if cholesky_items is not None:
L_item = cholesky_items
else:
L_item = sp.sparse.eye(svd_matrix.shape[1])
if cholesky_users is not None:
L_user = cholesky_users
else:
L_user = sp.sparse.eye(svd_matrix.shape[0])
def matvec(v):
return L_user.T.dot(svd_matrix.dot(L_item.dot(v)))
def rmatvec(v):
return L_item.T.dot(svd_matrix.T.dot(L_user.dot(v)))
operator = LinearOperator(svd_matrix.shape, matvec, rmatvec)
super().build(*args, operator=operator, **kwargs)
self.build_item_projector(self.factors[self.data.fields.itemid])
def slice_recommendations(self, test_data, shape, start, stop, test_users=None):
test_matrix, slice_data = self.get_test_matrix(test_data, shape, (start, stop))
vl, vr = self.get_item_projector()
scores = test_matrix.dot(vr).dot(vl.T)
return scores, slice_data
class ScaledHybridSVD(ScaledSVD, HybridSVD):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.method = 'HybridSVDs'
class HybridSVDColdStart(ItemColdStartEvaluationMixin, HybridSVD):
def __init__(self, *args, item_features=None, **kwargs):
super().__init__(*args, **kwargs)
self.method = 'HybridSVD(cs)'
self.item_features = item_features
self.use_raw_features = item_features is not None
def build(self, *args, **kwargs):
super().build(*args, return_factors=True, **kwargs)
def get_recommendations(self):
userid = self.data.fields.userid
itemid = self.data.fields.itemid
u = self.factors[userid]
v = self.factors['items_projector_right']
s = self.factors['singular_values']
if self.use_raw_features:
item_info = self.item_features.reindex(self.data.index.itemid.training.old.values,
fill_value=[])
item_features, feature_labels = stack_features(item_info, normalize=False)
w = item_features.T.dot(v).T
cold_info = self.item_features.reindex(self.data.index.itemid.cold_start.old.values,
fill_value=[])
cold_item_features, _ = stack_features(cold_info, labels=feature_labels, normalize=False)
else:
w = self.data.item_relathions.T.dot(v).T
cold_item_features = self.data.cold_items_similarity
wwt_inv = np.linalg.pinv(w @ w.T)
cold_items_factors = cold_item_features.dot(w.T) @ wwt_inv
scores = cold_items_factors @ (u * s[None, :]).T
top_similar_users = self.get_topk_elements(scores)
return top_similar_users
class ScaledHybridSVDColdStart(ScaledSVD, HybridSVDColdStart):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.method = 'HybridSVDs(cs)' |
<gh_stars>0
# -*- coding=utf-8 -*-
import metadata_parser
try:
from urllib.parse import urlparse
# from urllib.parse import urlencode
except ImportError:
from urlparse import urlparse
# from urllib import urlencode
import unittest
import six
if False:
import logging
l = logging.getLogger()
l2 = logging.getLogger("metadata_parser")
l.setLevel(logging.DEBUG)
l2.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
l.addHandler(ch)
l2.addHandler(ch)
URLS_VALID = [
"http://example.com",
"http://example.com/",
"http://example.com/one",
"http://example.com/one/two.html",
"http://foo.example.com",
"http://example.com:80",
"http://example.com:80/",
"http://example.com:80/one",
"http://example.com:80/one/two.html",
"http://192.168.1.1",
"http://192.168.1.1/",
"http://192.168.1.1:80",
"http://192.168.1.1:8080",
"http://192.168.1.1:80/",
"http://192.168.1.1:8080/",
"http://192.168.1.1:80/a.html",
"http://192.168.1.1:8080/a.html",
"https://example.com",
"https://example.com/",
"https://example.com/one",
"https://example.com/one/two.html",
"https://foo.example.com",
"https://example.com:80",
"https://example.com:80/",
"https://example.com:80/one",
"https://example.com:80/one/two.html",
"https://192.168.1.1",
"https://192.168.1.1/",
"https://192.168.1.1:80",
"https://192.168.1.1:8080",
"https://192.168.1.1:80/",
"https://192.168.1.1:8080/",
"https://192.168.1.1:80/a.html",
"https://192.168.1.1:8080/a.html",
]
URLS_VALID_CONDITIONAL = [
"http://localhost",
"http://localhost:80",
"http://localhost:8000",
"http://localhost/foo",
"http://localhost:80/foo",
"http://localhost:8000/foo",
"https://localhost",
"https://localhost:80",
"https://localhost:8000",
"https://localhost/foo",
"https://localhost:80/foo",
"https://localhost:8000/foo",
"http://127.0.0.1",
"http://127.0.0.1:80",
"http://127.0.0.1:8000",
"http://127.0.0.1/foo",
"http://127.0.0.1:80/foo",
"http://127.0.0.1:8000/foo",
"https://127.0.0.1",
"https://127.0.0.1:80",
"https://127.0.0.1:8000",
"https://127.0.0.1/foo",
"https://127.0.0.1:80/foo",
"https://127.0.0.1:8000/foo",
"http://0.0.0.0",
"http://0.0.0.0:80",
"http://0.0.0.0:8000",
"http://0.0.0.0/foo",
"http://0.0.0.0:80/foo",
"http://0.0.0.0:8000/foo",
"https://0.0.0.0",
"https://0.0.0.0:80",
"https://0.0.0.0:8000",
"https://0.0.0.0/foo",
"https://0.0.0.0:80/foo",
"https://0.0.0.0:8000/foo",
]
URLS_INVALID = [
"http://example_com",
"http://example_com/",
"http://example_com/one",
"http://999.999.999.999/",
"http://999.999.999.999.999/",
"http://999.999.999.999.999:8080:8080",
"https://example_com",
"https://example_com/",
"https://example_com/one",
"https://999.999.999.999/",
"https://999.999.999.999.999/",
"https://999.999.999.999.999:8080:8080",
]
RFC_REGEX_VALID = [
"""http://user:password@one.example.com/foo/bar;one=two&three=four?foo=bar&biz=bash#foo"""
]
RFC_REGEX_INVALID = ["""</p><br /><p>Then l""", """ccurl" style="display:none;" """]
class TestUrlRfcValid(unittest.TestCase):
"""
python -m unittest tests.url_parsing.TestUrlRfcValid
Ensures URLs contain rfc valid components
"""
def test_urls_valid(self):
for i in RFC_REGEX_VALID:
matched = metadata_parser.RE_rfc3986_valid_characters.match(i)
self.assertTrue(matched)
def test_urls_invalid(self):
for i in RFC_REGEX_INVALID:
matched = metadata_parser.RE_rfc3986_valid_characters.match(i)
self.assertTrue(matched is None)
class TestUrlParsing(unittest.TestCase):
"""
python -m unittest tests.url_parsing.TestUrls
Ensures URLs are parsed correctly as valid/invalid
"""
def test_urls_valid(self):
for i in URLS_VALID:
parsed = urlparse(i)
self.assertTrue(metadata_parser.is_parsed_valid_url(parsed))
def test_urls_invalid(self):
for i in URLS_INVALID:
parsed = urlparse(i)
self.assertFalse(metadata_parser.is_parsed_valid_url(parsed))
def test_urls_valid_conditional(self):
for i in URLS_VALID_CONDITIONAL:
parsed = urlparse(i)
self.assertFalse(
metadata_parser.is_parsed_valid_url(
parsed, require_public_netloc=True, allow_localhosts=False
)
)
self.assertTrue(
metadata_parser.is_parsed_valid_url(
parsed, require_public_netloc=False, allow_localhosts=True
)
)
class TestAbsoluteUpgrades(unittest.TestCase):
"""
python -m unittest tests.url_parsing.TestAbsoluteUpgrades
Ensures URLs are parsed correctly as valid/invalid
"""
def test_none_returns_none(self):
absolute = metadata_parser.url_to_absolute_url(None, url_fallback=None)
self.assertEqual(absolute, None)
def test_nothing(self):
absolute = metadata_parser.url_to_absolute_url(
"http://example.com", url_fallback="http://example.com"
)
self.assertEqual(absolute, "http://example.com")
def test_upgrade(self):
absolute = metadata_parser.url_to_absolute_url(
"a.html", url_fallback="http://example.com"
)
self.assertEqual(absolute, "http://example.com/a.html")
def test_fallback(self):
absolute = metadata_parser.url_to_absolute_url(
None, url_fallback="http://example.com"
)
self.assertEqual(absolute, "http://example.com")
class _DocumentCanonicalsMixin(object):
def _MakeOne(self, url):
"""generates a canonical document"""
doc_base = """<html><head>%(head)s</head><body></body></html>"""
canonical_base = """<link rel='canonical' href='%(canonical)s' />"""
_canonical_html = canonical_base % {"canonical": url}
_doc_html = doc_base % {"head": _canonical_html}
return _doc_html
class TestDocumentCanonicals(unittest.TestCase, _DocumentCanonicalsMixin):
"""
python -m unittest tests.url_parsing.TestDocumentCanonicals
"""
def test_canonical_simple(self):
"""someone did their job"""
url = None
rel_canonical = "https://example.com/canonical"
rel_expected = "https://example.com/canonical"
html_doc = self._MakeOne(rel_canonical)
parsed = metadata_parser.MetadataParser(url=url, html=html_doc)
parsed_url = parsed.get_discrete_url()
self.assertEqual(parsed_url, rel_expected)
def test_canonical_upgrade(self):
"""someone else did their job. not as good, but did their job"""
url = "https://example.com"
rel_canonical = "/canonical"
rel_expected = "https://example.com/canonical"
html_doc = self._MakeOne(rel_canonical)
parsed = metadata_parser.MetadataParser(url=url, html=html_doc)
parsed_url = parsed.get_discrete_url()
self.assertEqual(parsed_url, rel_expected)
def test_upgrade_invalid_root(self):
"""
you had one job...
"""
url = "https://example.com"
rel_canonical = "http://localhost:8080"
rel_expected = "https://example.com"
html_doc = self._MakeOne(rel_canonical)
parsed = metadata_parser.MetadataParser(url=url, html=html_doc)
parsed_url = parsed.get_discrete_url()
self.assertEqual(parsed_url, rel_expected)
def test_upgrade_utf8_path(self):
"""
you had one job... but you didn't read the RFC you shitty third rate enterprise cms
"""
url = "https://example.com"
rel_canonical = r"https://example.com/canonical-ü"
rel_expected = r"https://example.com/canonical-%C3%BC"
html_doc = self._MakeOne(rel_canonical)
parsed = metadata_parser.MetadataParser(
url=url,
html=html_doc,
derive_encoding=False,
default_encoding="utf-8",
html_encoding="utf-8",
)
parsed_url = parsed.get_discrete_url()
self.assertEqual(parsed_url, rel_expected)
def test_upgrade_invalid_file(self):
"""
you had one job...
if someone lists the canonical as an invalid domain, remount the right domain
python -m unittest tests.url_parsing.TestDocumentCanonicals.test_upgrade_invalid_file
"""
url = "https://example.com/a"
rel_canonical = "http://localhost:8080"
rel_expected = "https://example.com"
html_doc = self._MakeOne(rel_canonical)
parsed = metadata_parser.MetadataParser(url=url, html=html_doc)
parsed_url = parsed.get_discrete_url()
self.assertEqual(parsed_url, rel_expected)
def test_upgrade_invalid_file_b(self):
"""
you had one job...
if someone lists the canonical as a different file on an invalid domain, remount the right domain
"""
url = "https://example.com/a"
rel_canonical = "http://localhost:8080/b"
rel_expected = "https://example.com/b"
html_doc = self._MakeOne(rel_canonical)
parsed = metadata_parser.MetadataParser(url=url, html=html_doc)
parsed_url = parsed.get_discrete_url()
self.assertEqual(parsed_url, rel_expected)
def test_readme_scenario(self):
"""
you had one job...
if someone lists the canonical as an invalid LOCAL domain, remount the right domain
python -m unittest tests.url_parsing.TestDocumentCanonicals.test_readme_scenario
"""
url = "https://example.com/a"
rel_canonical = "http://localhost:8000/alt-path/to/foo"
rel_expected = "https://example.com/alt-path/to/foo"
rel_expected_legacy = rel_canonical
html_doc = self._MakeOne(rel_canonical)
parsed = metadata_parser.MetadataParser(url=url, html=html_doc)
# ensure we replace the bad domain with the right one
parsed_url = parsed.get_discrete_url()
self.assertEqual(parsed_url, rel_expected)
# ensure support for the legacy behavior...
parsed_url = parsed.get_discrete_url(require_public_global=False)
self.assertEqual(parsed_url, rel_expected_legacy)
class TestDocumentCanonicalsRelative(unittest.TestCase, _DocumentCanonicalsMixin):
"""
python -m unittest tests.url_parsing.TestDocumentCanonicalsRelative
python -m unittest tests.url_parsing.TestDocumentCanonicalsRelative.test_upgrade_local_a
python -m unittest tests.url_parsing.TestDocumentCanonicalsRelative.test_upgrade_local_b
"""
def test_upgrade_local_a(self):
"""
"""
url = "https://example.com/nested/A.html"
rel_canonical = "/nested/B.html"
rel_expected = "https://example.com/nested/B.html"
html_doc = self._MakeOne(rel_canonical)
parsed = metadata_parser.MetadataParser(url=url, html=html_doc)
parsed_url = parsed.get_discrete_url()
self.assertEqual(parsed_url, rel_expected)
def test_upgrade_local_b(self):
"""
"""
url = "https://example.com/nested/A.html"
rel_canonical = "B.html"
rel_expected = "https://example.com/nested/B.html"
html_doc = self._MakeOne(rel_canonical)
parsed = metadata_parser.MetadataParser(url=url, html=html_doc)
parsed_url = parsed.get_discrete_url()
self.assertEqual(parsed_url, rel_expected)
def test_upgrade_local_bb(self):
"""
"""
url = "https://example.com/nested/A.html"
rel_canonical = "path/to/B.html"
rel_expected = "https://example.com/nested/path/to/B.html"
html_doc = self._MakeOne(rel_canonical)
parsed = metadata_parser.MetadataParser(url=url, html=html_doc)
parsed_url = parsed.get_discrete_url()
self.assertEqual(parsed_url, rel_expected)
def test_upgrade_local_c(self):
"""
"""
url = "https://example.com/nested/A.html"
rel_canonical = "/B.html"
rel_expected = "https://example.com/B.html"
html_doc = self._MakeOne(rel_canonical)
parsed = metadata_parser.MetadataParser(url=url, html=html_doc)
parsed_url = parsed.get_discrete_url()
self.assertEqual(parsed_url, rel_expected)
def test_noupgrade_a(self):
"""
these tests currently require tldextract; otherwise they won't work right.
"""
url = "https://example.com/nested/A.html"
rel_canonical = "https://foo.local/B.html"
rel_expected = None
html_doc = self._MakeOne(rel_canonical)
parsed = metadata_parser.MetadataParser(url=url, html=html_doc)
parsed_url = parsed.get_url_canonical(require_public_global=True)
self.assertEqual(parsed_url, rel_expected)
parsed_url = parsed.get_url_opengraph(require_public_global=True)
self.assertEqual(parsed_url, rel_expected)
parsed_url = parsed.get_url_canonical(
require_public_global=True, url_fallback=url
)
self.assertEqual(parsed_url, rel_expected)
parsed_url = parsed.get_url_opengraph(
require_public_global=True, url_fallback=url
)
self.assertEqual(parsed_url, rel_expected)
class TestFixUnicodeUrls(unittest.TestCase):
def test_fix_unicode_path(self):
_test_pairs = (
(
"https://example.com/2017/12/abcdefgühijklmnop?a=%20foo",
"https://example.com/2017/12/abcdefg%C3%BChijklmnop?a=%20foo",
),
)
for (raw, expected) in _test_pairs:
cleaned = metadata_parser.fix_unicode_url(raw)
self.assertEqual(cleaned, expected)
if six.PY2:
cleaned = metadata_parser.fix_unicode_url(
raw.decode("utf-8"), encoding="utf-8"
).encode("utf-8")
self.assertEqual(cleaned, expected)
def test_fix_unicode_path_leave_unicode_kwargs(self):
_test_pairs = (
(
"https://example.com/2017/12/abcdefgühijklmnop?a=%20foo&b=ü",
"https://example.com/2017/12/abcdefg%C3%BChijklmnop?a=%20foo&b=ü",
),
)
for (raw, expected) in _test_pairs:
cleaned = metadata_parser.fix_unicode_url(raw)
self.assertEqual(cleaned, expected)
if six.PY2:
cleaned = metadata_parser.fix_unicode_url(
raw.decode("utf-8"), encoding="utf-8"
).encode("utf-8")
self.assertEqual(cleaned, expected)
class TestArgsExceptions(unittest.TestCase, _DocumentCanonicalsMixin):
"""
python -m unittest tests.url_parsing.TestArgsExceptions
"""
def test_no_args__good(self):
url = "https://example.com/nested/A.html"
rel_canonical = "/B.html"
rel_expected = "https://example.com/B.html"
html_doc = self._MakeOne(rel_canonical)
parsed = metadata_parser.MetadataParser(url=url, html=html_doc)
parsed_url = parsed.get_discrete_url()
def test_og_first__good(self):
url = "https://example.com/nested/A.html"
rel_canonical = "/B.html"
rel_expected = "https://example.com/B.html"
html_doc = self._MakeOne(rel_canonical)
parsed = metadata_parser.MetadataParser(url=url, html=html_doc)
parsed_url = parsed.get_discrete_url(og_first=True)
def test_og_first_canonical_first__bad(self):
url = "https://example.com/nested/A.html"
rel_canonical = "/B.html"
rel_expected = "https://example.com/B.html"
html_doc = self._MakeOne(rel_canonical)
parsed = metadata_parser.MetadataParser(url=url, html=html_doc)
self.assertRaises(
ValueError, parsed.get_discrete_url, og_first=True, canonical_first=True
)
def test_canonical_first__bad(self):
url = "https://example.com/nested/A.html"
rel_canonical = "/B.html"
rel_expected = "https://example.com/B.html"
html_doc = self._MakeOne(rel_canonical)
parsed = metadata_parser.MetadataParser(url=url, html=html_doc)
self.assertRaises(ValueError, parsed.get_discrete_url, canonical_first=True)
def test_canonical_first__good(self):
url = "https://example.com/nested/A.html"
rel_canonical = "/B.html"
rel_expected = "https://example.com/B.html"
html_doc = self._MakeOne(rel_canonical)
parsed = metadata_parser.MetadataParser(url=url, html=html_doc)
parsed_url = parsed.get_discrete_url(og_first=False, canonical_first=True)
|
import feedparser
import numpy
import time
import os
import sys
from git import Repo
from datetime import datetime
shows_list = []
old_shows_list = []
h2 = "## "
h3 = "### "
dblel = "\n\n"
hoz_sep = "|"
vert_head = "|Show|Total Length|Number of Shows|Average Length|Average Gap|Standard Deviation|Shows Per Year|Monthly Show Output|"
vert_sep = "|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|"
vert_head_old = "|Show|Total Length|Number of Shows|Average Length|"
vert_sep_old = "|:---:|:---:|:---:|:---:|"
show_output = []
old_show_output = []
summary_output = []
new_show_output = []
shows_Checked = []
intervals = (
('years', 31536000),
#('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('mins', 60),
('secs', 1),
)
class Show:
def __init__(self, **kwds):
self.__dict__.update(kwds)
def display_time(seconds, granularity=2):
result = []
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{:.0f} {}".format(value, name))
return ' '.join(result[:granularity])
def parse_feed(feed_name):
base_feed = 'https://www.relay.fm/'
end_feed = '/feed'
total_feed = base_feed + feed_name + end_feed
d = feedparser.parse(total_feed)
ents = d['entries']
total_len = 0
num_shows = len(ents)
for e in ents:
length = e['itunes_duration']
total_len += int(float(length)) #1
old_show_output.append("|**" + d['feed']['title'] + "**|")
old_show_output.append(display_time(total_len,4) + "|")
old_show_output.append(str(num_shows) + "|")
avg = total_len / num_shows
old_show_output.append(display_time(avg,4) + "|\n")
return total_len
def parse_prediction_feed(feed_name, last_checked):
base_feed = 'https://www.relay.fm/'
end_feed = '/feed'
total_feed = base_feed + feed_name + end_feed
d = feedparser.parse(total_feed)
ents = d['entries']
total_len = 0
num_shows = len(ents)
time_list = []
update_needed = False
s = ents[0]['id']
s = s.replace("http://relay.fm/", "")
ss = s.split('/')
if(len(ss) == 2):
show_Checked = Show(name=ss[0], episode=ss[1])
shows_Checked.append(show_Checked)
if int(ss[1]) > int(last_checked):
update_needed = True
print("Update Needed")
for e in ents:
length = e['itunes_duration']
if (e['id'] == 'http://relay.fm/parallel/39'):
length = '3929'
if (e['id'] == 'http://relay.fm/makedo/70'):
length = '2550'
if (e['id'] == 'http://relay.fm/penaddict/438'):
length = '5006'
if(not length):
print(e['id'])
total_len += int(float("".join(length.split()))) #2
time_list.append(time.mktime(e['published_parsed']))
time_list = list(reversed(time_list))
diff_gap = numpy.diff(time_list)
avg_gap = numpy.average(diff_gap)
avg_length = total_len / num_shows
shows_per_year = 31536000 / avg_gap
yearly_output = avg_length * shows_per_year
monthly_output = yearly_output / 12
std_dev = numpy.std(diff_gap)
show_output.append("|**" + d['feed']['title'] + "**|")
show_output.append(display_time(total_len, 4) + "|")
show_output.append(str(num_shows) + "|")
show_output.append(display_time(avg_length, 4) + "|")
show_output.append(display_time(avg_gap, 3) + "|")
show_output.append(display_time(std_dev, 3) + "|")
show_output.append("{:.1f}".format(shows_per_year) + "|")
show_output.append(display_time(monthly_output, 4) + "|\n")
return total_len, yearly_output
def getShows():
master_Feed = "https://www.relay.fm/master/feed"
d = feedparser.parse(master_Feed)
ents = d['entries']
newest_Shows = []
names = []
for e in ents:
s = e['id']
s = s.replace("http://relay.fm/", "")
ss = s.split('/')
if(len(ss) == 2):
showToAdd = Show(name=ss[0], episode=ss[1])
# Shows are newest first, so duplicates won't be added
if(ss[0] not in names):
newest_Shows.append(showToAdd)
names.append(ss[0])
return newest_Shows
def readShowList(path):
old_shows = []
current_shows = []
file = open(path + "oldShows.txt", "r")
for f in file:
old_shows.append(f.rstrip())
file = open(path + "currentShows.txt", "r")
for f in file:
showInfo = f.rstrip().split(":")
if(len(showInfo) == 2):
showToAdd = Show(name=showInfo[0], lastCheckedEpisode=showInfo[1])
current_shows.append(showToAdd)
return old_shows, current_shows
def compareShows(latest_shows, shows_list):
needs_update = []
new_shows = []
for latest in latest_shows:
showName = latest.name
found = False
last_Episode = -1
for show in shows_list:
if show.name == showName:
found = True
last_Episode = show.lastCheckedEpisode
break
if found:
if int(last_Episode) < int(latest.episode):
needs_update.append(showName)
else:
new_shows.append(showName)
return needs_update, new_shows
def main():
now = datetime.now()
use_git = False
path = ""
if len(sys.argv) > 1:
print("Using Git")
use_git = True
path = sys.argv[1] + "/"
git_repo = Repo(path)
git_repo.git.pull()
old_shows_list, shows_list = readShowList(path)
latest_shows = getShows()
shows_to_update, new_shows = compareShows(latest_shows, shows_list)
#sys.exit(0)
running_total = 0
yearly_output = 0
for show in shows_list:
total, yearly = parse_prediction_feed(show.name, show.lastCheckedEpisode)
running_total += total
yearly_output += yearly
for show in old_shows_list:
running_total += parse_feed(show)
time_to_one_year = ((31536000 - running_total)/yearly_output) * 31536000
summary_output.append(h2 + 'Total shows: ' + str(len(shows_list) + len(old_shows_list)) + dblel)
summary_output.append(h3 +'Total shows length: ' + display_time(running_total,4) + dblel)
summary_output.append(h2 + "Total active shows: " + str(len(shows_list)) + dblel)
summary_output.append(h3 + "Yearly output: " + display_time(yearly_output, 3) + dblel)
summary_output.append(h3 + "Monthly output: " + display_time(yearly_output/12, 3) + dblel)
summary_output.append(h2 + "Time untill 1 year of content: " + display_time(time_to_one_year, 2) + dblel)
summary_output.append("\n-------------------------------------------------\n\n")
file = 0
if use_git:
file = open(path + "docs/index.md","w")
else:
file = open("docs/index.md","w")
for s in summary_output:
file.write(s)
file.write("\n")
file.write(h2 + "Active Shows")
file.write("\n")
file.write(vert_head + "\n")
file.write(vert_sep + "\n")
for s in show_output:
file.write(s)
file.write("\n-------------------------------------------------\n\n")
file.write(h2 + "Retired Shows")
file.write("\n")
file.write(vert_head_old + "\n")
file.write(vert_sep_old + "\n")
for s in old_show_output:
file.write(s)
for new_show in new_shows:
file.write("\nNew show needs adding - " + new_show)
current_time = now.strftime("%H:%M:%S %d/%m/%Y")
file.write("\nGenerated at: " + current_time + "\n")
file.close()
if use_git:
file = open(path + "currentShows.txt","w")
else:
file = open("currentShows.txt","w")
for checked in shows_Checked:
s = str(checked.name) + ":" + str(checked.episode) + "\n"
file.write(s)
file.close()
if use_git:
git_repo.git.add('.')
git_repo.git.commit(m="Updated Relay show stats")
git_repo.git.push()
sys.exit(0)
if __name__ == "__main__":
main()
|
<reponame>riju-pal/QCoDeS_riju<filename>qcodes/instrument_drivers/Keysight/N9030B.py
import numpy as np
from typing import Any, Tuple, Dict, Union
from qcodes import (
VisaInstrument, InstrumentChannel, Parameter, ParameterWithSetpoints
)
from qcodes.instrument.parameter import ParamRawDataType
from qcodes.utils.validators import Enum, Numbers, Arrays, Ints
from qcodes.utils.helpers import create_on_off_val_mapping
class FrequencyAxis(Parameter):
def __init__(self,
start: Parameter,
stop: Parameter,
npts: Parameter,
*args: Any,
**kwargs: Any
) -> None:
super().__init__(*args, **kwargs)
self._start: Parameter = start
self._stop: Parameter = stop
self._npts: Parameter = npts
def get_raw(self) -> ParamRawDataType:
start_val = self._start()
stop_val = self._stop()
npts_val = self._npts()
assert start_val is not None
assert stop_val is not None
assert npts_val is not None
return np.linspace(start_val, stop_val, npts_val)
class Trace(ParameterWithSetpoints):
def __init__(self, number: int, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.instrument: Union["SpectrumAnalyzerMode", "PhaseNoiseMode"]
self.root_instrument: "N9030B"
self.number = number
def get_raw(self) -> ParamRawDataType:
return self.instrument._get_data(trace_num=self.number)
class SpectrumAnalyzerMode(InstrumentChannel):
"""
Spectrum Analyzer Mode for Keysight N9030B instrument.
"""
def __init__(self, parent: "N9030B", name: str, *arg: Any, **kwargs: Any):
super().__init__(parent, name, *arg, **kwargs)
self._min_freq = -8e7
self._valid_max_freq: Dict[str, float] = {"503": 3.7e9,
"508": 8.5e9,
"513": 13.8e9,
"526": 27e9,
"544": 44.5e9}
opt: str
for hw_opt_for_max_freq in self._valid_max_freq.keys():
if hw_opt_for_max_freq in self.root_instrument._options():
opt = hw_opt_for_max_freq
self._max_freq = self._valid_max_freq[opt]
self.add_parameter(
name="start",
unit="Hz",
get_cmd=":SENSe:FREQuency:STARt?",
set_cmd=self._set_start,
get_parser=float,
vals=Numbers(self._min_freq, self._max_freq - 10),
docstring="start frequency for the sweep"
)
self.add_parameter(
name="stop",
unit="Hz",
get_cmd=":SENSe:FREQuency:STOP?",
set_cmd=self._set_stop,
get_parser=float,
vals=Numbers(self._min_freq + 10, self._max_freq),
docstring="stop frequency for the sweep"
)
self.add_parameter(
name="center",
unit="Hz",
get_cmd=":SENSe:FREQuency:CENTer?",
set_cmd=self._set_center,
get_parser=float,
vals=Numbers(self._min_freq + 5, self._max_freq - 5),
docstring="Sets and gets center frequency"
)
self.add_parameter(
name="span",
unit="Hz",
get_cmd=":SENSe:FREQuency:SPAN?",
set_cmd=self._set_span,
get_parser=float,
vals=Numbers(10, self._max_freq - self._min_freq),
docstring="Changes span of frequency"
)
self.add_parameter(
name="npts",
get_cmd=":SENSe:SWEep:POINts?",
set_cmd=self._set_npts,
get_parser=int,
vals=Ints(1, 20001),
docstring="Number of points for the sweep"
)
self.add_parameter(
name="sweep_time",
label="Sweep time",
get_cmd=":SENSe:SWEep:TIME?",
set_cmd=":SENSe:SWEep:TIME {}",
get_parser=float,
unit="s",
docstring="gets sweep time"
)
self.add_parameter(
name="auto_sweep_time_enabled",
get_cmd=":SENSe:SWEep:TIME:AUTO?",
set_cmd=self._enable_auto_sweep_time,
val_mapping=create_on_off_val_mapping(on_val="ON", off_val="OFF"),
docstring="enables auto sweep time"
)
self.add_parameter(
name="auto_sweep_type_enabled",
get_cmd=":SENSe:SWEep:TYPE:AUTO?",
set_cmd=self._enable_auto_sweep_type,
val_mapping=create_on_off_val_mapping(on_val="ON", off_val="OFF"),
docstring="enables auto sweep type"
)
self.add_parameter(
name="sweep_type",
get_cmd=":SENSe:SWEep:TYPE?",
set_cmd=self._set_sweep_type,
val_mapping={
"fft": "FFT",
"sweep": "SWE",
},
docstring="Sets up sweep type. Possible options are 'fft' and "
"'sweep'."
)
self.add_parameter(
name="freq_axis",
label="Frequency",
unit="Hz",
start=self.start,
stop=self.stop,
npts=self.npts,
vals=Arrays(shape=(self.npts.get_latest,)),
parameter_class=FrequencyAxis,
docstring="Creates frequency axis for the sweep from start, "
"stop and npts values."
)
self.add_parameter(
name="trace",
label="Trace",
unit="dB",
number=1,
vals=Arrays(shape=(self.npts.get_latest,)),
setpoints=(self.freq_axis,),
parameter_class=Trace,
docstring="Gets trace data."
)
def _set_start(self, val: float) -> None:
"""
Sets start frequency
"""
stop = self.stop()
if val >= stop:
raise ValueError(f"Start frequency must be smaller than stop "
f"frequency. Provided start freq is: {val} Hz and "
f"set stop freq is: {stop} Hz")
self.write(f":SENSe:FREQuency:STARt {val}")
start = self.start()
if abs(val - start) >= 1:
self.log.warning(
f"Could not set start to {val} setting it to {start}"
)
def _set_stop(self, val: float) -> None:
"""
Sets stop frequency
"""
start = self.start()
if val <= start:
raise ValueError(f"Stop frequency must be larger than start "
f"frequency. Provided stop freq is: {val} Hz and "
f"set start freq is: {start} Hz")
self.write(f":SENSe:FREQuency:STOP {val}")
stop = self.stop()
if abs(val - stop) >= 1:
self.log.warning(
f"Could not set stop to {val} setting it to {stop}"
)
def _set_center(self, val: float) -> None:
"""
Sets center frequency and updates start and stop frequencies if they
change.
"""
self.write(f":SENSe:FREQuency:CENTer {val}")
self.update_trace()
def _set_span(self, val: float) -> None:
"""
Sets frequency span and updates start and stop frequencies if they
change.
"""
self.write(f":SENSe:FREQuency:SPAN {val}")
self.update_trace()
def _set_npts(self, val: int) -> None:
"""
Sets number of points for sweep
"""
self.write(f":SENSe:SWEep:POINts {val}")
def _enable_auto_sweep_time(self, val: str) -> None:
"""
Enables auto sweep time
"""
self.write(f":SENSe:SWEep:TIME:AUTO {val}")
def _enable_auto_sweep_type(self, val: str) -> None:
"""
Enables auto sweep type
"""
self.write(f":SENSe:SWEep:TYPE:AUTO {val}")
def _set_sweep_type(self, val: str) -> None:
"""
Sets sweep type
"""
self.write(f":SENSe:SWEep:TYPE {val}")
def _get_data(self, trace_num: int) -> ParamRawDataType:
"""
Gets data from the measurement.
"""
try:
timeout = self.sweep_time() + self.root_instrument._additional_wait
with self.root_instrument.timeout.set_to(timeout):
data_str = self.ask(f":READ:"
f"{self.root_instrument.measurement()}"
f"{trace_num}?")
data = np.array(data_str.rstrip().split(",")).astype("float64")
except TimeoutError as e:
raise TimeoutError("Couldn't receive any data. Command timed "
"out.") from e
trace_data = data[1::2]
return trace_data
def update_trace(self) -> None:
"""
Updates start and stop frequencies whenever span of/or center frequency
is updated.
"""
self.start()
self.stop()
def setup_swept_sa_sweep(self,
start: float,
stop: float,
npts: int) -> None:
"""
Sets up the Swept SA measurement sweep for Spectrum Analyzer Mode.
"""
self.root_instrument.mode("SA")
if "SAN" in self.root_instrument._available_meas():
self.root_instrument.measurement("SAN")
else:
raise RuntimeError("Swept SA measurement is not available on your "
"Keysight N9030B instrument with Spectrum "
"Analyzer mode.")
self.start(start)
self.stop(stop)
self.npts(npts)
def autotune(self) -> None:
"""
Autotune quickly get to the most likely signal of interest, and
position it optimally on the display.
"""
self.write(":SENS:FREQuency:TUNE:IMMediate")
self.center()
class PhaseNoiseMode(InstrumentChannel):
"""
Phase Noise Mode for Keysight N9030B instrument.
"""
def __init__(self, parent: "N9030B", name: str, *arg: Any, **kwargs: Any):
super().__init__(parent, name, *arg, **kwargs)
self._min_freq = 1
self._valid_max_freq: Dict[str, float] = {"503": 3699999995,
"508": 8499999995,
"513": 13799999995,
"526": 26999999995,
"544": 44499999995}
opt: str
for hw_opt_for_max_freq in self._valid_max_freq.keys():
if hw_opt_for_max_freq in self.root_instrument._options():
opt = hw_opt_for_max_freq
self._max_freq = self._valid_max_freq[opt]
self.add_parameter(
name="npts",
get_cmd=":SENSe:LPLot:SWEep:POINts?",
set_cmd=":SENSe:LPLot:SWEep:POINts {}",
get_parser=int,
vals=Ints(601, 20001),
docstring="Number of points for the sweep"
)
self.add_parameter(
name="start_offset",
unit="Hz",
get_cmd=":SENSe:LPLot:FREQuency:OFFSet:STARt?",
set_cmd=self._set_start_offset,
get_parser=float,
vals=Numbers(self._min_freq, self._max_freq - 10),
docstring="start frequency offset for the plot"
)
self.add_parameter(
name="stop_offset",
unit="Hz",
get_cmd=":SENSe:LPLot:FREQuency:OFFSet:STOP?",
set_cmd=self._set_stop_offset,
get_parser=float,
vals=Numbers(self._min_freq + 99, self._max_freq),
docstring="stop frequency offset for the plot"
)
self.add_parameter(
name="signal_tracking_enabled",
get_cmd=":SENSe:FREQuency:CARRier:TRACk?",
set_cmd=":SENSe:FREQuency:CARRier:TRACk {}",
val_mapping=create_on_off_val_mapping(on_val="ON", off_val="OFF"),
docstring="Gets/Sets signal tracking. When signal tracking is "
"enabled carrier signal is repeatedly realigned. Signal "
"Tracking assumes the new acquisition occurs repeatedly "
"without pause."
)
self.add_parameter(
name="freq_axis",
label="Frequency",
unit="Hz",
start=self.start_offset,
stop=self.stop_offset,
npts=self.npts,
vals=Arrays(shape=(self.npts.get_latest,)),
parameter_class=FrequencyAxis,
docstring="Creates frequency axis for the sweep from "
"start_offset, stop_offset and npts values."
)
self.add_parameter(
name="trace",
label="Trace",
unit="dB",
number=3,
vals=Arrays(shape=(self.npts.get_latest,)),
setpoints=(self.freq_axis,),
parameter_class=Trace,
docstring="Gets trace data."
)
def _set_start_offset(self, val: float) -> None:
"""
Sets start offset for frequency in the plot
"""
stop_offset = self.stop_offset()
self.write(f":SENSe:LPLot:FREQuency:OFFSet:STARt {val}")
start_offset = self.start_offset()
if abs(val - start_offset) >= 1:
self.log.warning(
f"Could not set start offset to {val} setting it to "
f"{start_offset}"
)
if val >= stop_offset or abs(val - stop_offset) < 10:
self.log.warning(f"Provided start frequency offset {val} Hz was "
f"greater than preset stop frequency offset "
f"{stop_offset} Hz. Provided start frequency "
f"offset {val} Hz is set and new stop freq offset"
f" is: {self.stop_offset()} Hz.")
def _set_stop_offset(self, val: float) -> None:
"""
Sets stop offset for frequency in the plot
"""
start_offset = self.start_offset()
self.write(f":SENSe:LPLot:FREQuency:OFFSet:STOP {val}")
stop_offset = self.stop_offset()
if abs(val - stop_offset) >= 1:
self.log.warning(
f"Could not set stop offset to {val} setting it to "
f"{stop_offset}"
)
if val <= start_offset or abs(val-start_offset) < 10:
self.log.warning(f"Provided stop frequency offset {val} Hz was "
f"less than preset start frequency offset "
f"{start_offset} Hz. Provided stop frequency "
f"offset {val} Hz is set and new start freq offset"
f" is: {self.start_offset()} Hz.")
def _get_data(self, trace_num: int) -> ParamRawDataType:
"""
Gets data from the measurement.
"""
raw_data = self.ask(f":READ:{self.root_instrument.measurement()}{1}?")
trace_res_details = np.array(
raw_data.rstrip().split(",")
).astype("float64")
if len(trace_res_details) != 7 or (
len(trace_res_details) >= 1 and trace_res_details[0] < -50
):
self.log.warning("Carrier(s) Incorrect or Missing!")
return -1 * np.ones(self.npts())
try:
data_str = self.ask(f":READ:{self.root_instrument.measurement()}"
f"{trace_num}?")
data = np.array(data_str.rstrip().split(",")).astype("float64")
except TimeoutError as e:
raise TimeoutError("Couldn't receive any data. Command timed "
"out.") from e
trace_data = data[1::2]
return trace_data
def setup_log_plot_sweep(self,
start_offset: float,
stop_offset: float,
npts: int
) -> None:
"""
Sets up the Log Plot measurement sweep for Phase Noise Mode.
"""
self.root_instrument.mode("PNOISE")
if "LPL" in self.root_instrument._available_meas():
self.root_instrument.measurement("LPL")
else:
raise RuntimeError("Log Plot measurement is not available on your "
"Keysight N9030B instrument with Phase Noise "
"mode.")
self.start_offset(start_offset)
self.stop_offset(stop_offset)
self.npts(npts)
def autotune(self) -> None:
"""
On autotune, the measurement automatically searches for and tunes to
the strongest signal in the full span of the analyzer.
"""
self.write(":SENSe:FREQuency:CARRier:SEARch")
self.start_offset()
self.stop_offset()
class N9030B(VisaInstrument):
"""
Driver for Keysight N9030B PXA signal analyzer. Keysight N9030B PXA
siganl analyzer is part of Keysight X-Series Multi-touch Signal
Analyzers.
This driver allows Swept SA measurements in Spectrum Analyzer mode and
Log Plot measurements in Phase Noise mode of the instrument.
Args:
name
address
"""
def __init__(self, name: str, address: str, **kwargs: Any) -> None:
super().__init__(name, address, terminator='\n', **kwargs)
self._min_freq: float
self._max_freq: float
self._additional_wait: float = 1
self.add_parameter(
name="mode",
get_cmd=":INSTrument:SELect?",
set_cmd=":INSTrument:SELect {}",
vals=Enum(*self._available_modes()),
docstring="Allows setting of different modes present and licensed "
"for the instrument."
)
self.add_parameter(
name="measurement",
get_cmd=":CONFigure?",
set_cmd=":CONFigure:{}",
vals=Enum("SAN", "LPL"),
docstring="Sets measurement type from among the available "
"measurement types."
)
self.add_parameter(
name="cont_meas",
initial_value=False,
get_cmd=":INITiate:CONTinuous?",
set_cmd=self._enable_cont_meas,
val_mapping=create_on_off_val_mapping(on_val="ON", off_val="OFF"),
docstring="Enables or disables continuous measurement."
)
self.add_parameter(
name="format",
get_cmd=":FORMat:TRACe:DATA?",
set_cmd=":FORMat:TRACe:DATA {}",
val_mapping={
"ascii": "ASCii",
"int32": "INTeger,32",
"real32": "REAL,32",
"real64": "REAL,64"
},
docstring="Sets up format of data received"
)
if "SA" in self._available_modes():
sa_mode = SpectrumAnalyzerMode(self, name="sa")
self.add_submodule("sa", sa_mode)
else:
self.log.info("Spectrum Analyzer mode is not available on this "
"instrument.")
if "PNOISE" in self._available_modes():
pnoise_mode = PhaseNoiseMode(self, name="pn")
self.add_submodule("pn", pnoise_mode)
else:
self.log.info("Phase Noise mode is not available on this "
"instrument.")
self.connect_message()
def _available_modes(self) -> Tuple[str, ...]:
"""
Returns present and licensed modes for the instrument.
"""
available_modes = self.ask(":INSTrument:CATalog?")
av_modes = available_modes[1:-1].split(',')
modes: Tuple[str, ...] = ()
for i, mode in enumerate(av_modes):
if i == 0:
modes = modes + (mode.split(' ')[0], )
else:
modes = modes + (mode.split(' ')[1], )
return modes
def _available_meas(self) -> Tuple[str, ...]:
"""
Gives available measurement with a given mode for the instrument
"""
available_meas = self.ask(":CONFigure:CATalog?")
av_meas = available_meas[1:-1].split(',')
measurements: Tuple[str, ...] = ()
for i, meas in enumerate(av_meas):
if i == 0:
measurements = measurements + (meas, )
else:
measurements = measurements + (meas[1:], )
return measurements
def _enable_cont_meas(self, val: str) -> None:
"""
Sets continuous measurement to ON or OFF.
"""
self.write(f":INITiate:CONTinuous {val}")
def _options(self) -> Tuple[str, ...]:
"""
Returns installed options numbers.
"""
options_raw = self.ask('*OPT?')
return tuple(options_raw[1:-1].split(','))
def reset(self) -> None:
"""
Reset the instrument by sending the RST command
"""
self.write("*RST")
def abort(self) -> None:
"""
Aborts the measurement
"""
self.write(":ABORt")
|
# -*- coding: utf-8 -*-
from configparser import ConfigParser
from flask import Flask, jsonify, request
import logging.handlers
import logging
from bin.ram import RAM
from bin.cpu import CPU
from bin.network import Network
from bin.load_avg import LoadAvg
from bin.boot_time import BootTime
from bin.disk import Disk
# convert human sizes to bytes
def convert_bytes(byts):
try:
if byts.endswith('kb'):
return int(byts[0:-2]) * 1024
elif byts.endswith('mb'):
return int(byts[0:-2]) * 1024 * 1024
elif byts.endswith('gb'):
return int(byts[0:-2]) * 1024 * 1024 * 1024
else:
raise IOError('Invalid input. Correct format: #kb/#mb/#gb like 10gb or 5mb')
except ValueError:
raise IOError('Invalid input. Correct format: #kb/#mb/#gb like 10gb or 5mb')
# load config
config = ConfigParser()
config.read('config.ini')
err_type = ''
log_file = ''
log_size_limit = ''
log_file_number_limit = 0
flsk_host = ''
flsk_port = 0
try:
# log values
err_type = 'Log > Name'
log_file = config.get('Log', 'Name', fallback='agent.log')
err_type = 'Log > Size_limit'
log_size_limit = config.get('Log', 'Size_limit', fallback='5mb')
log_size_limit = convert_bytes(log_size_limit)
err_type = 'Log > File_Limit'
log_file_number_limit = config.getint('Log', 'File_Limit', fallback=10)
# flask values
err_type = 'Flask > Host'
flsk_host = config.get('Flask', 'Host', fallback='0.0.0.0')
err_type = 'Flask > Port'
flsk_port = config.getint('Flask', 'Port', fallback=5000)
except IOError as e:
print('CONFIG ERROR: Unable to load values from \"{}\"! STACKTRACE: {}'.format(err_type, e.args[1]))
print('CONFIG ERROR: Force closing program...')
exit()
# prepare logging
try:
logger = logging.getLogger('AtomicMonitor Agent')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.handlers.RotatingFileHandler(log_file, maxBytes=log_size_limit,
backupCount=log_file_number_limit))
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter('%(asctime)s | %(levelname)-8s | %(topic)-5s | %(message)s'))
logger.addHandler(ch)
except IOError as e:
print('FILE ERROR: Unable to prepare log file! STACETRACE: {}'.format(e.args[1]))
print('FILE ERROR: Force closing program...')
exit()
# setup variables
sram = RAM()
scpu = CPU()
net = Network()
load = LoadAvg()
boot = BootTime()
sdisk = Disk()
app = Flask(__name__)
# display current specs
@app.route('/now')
def web_now():
# retrieve current system specs
ram_percent, ram_used, ram_total = sram.get_memory_usage()
cpu_percent = scpu.get_usage()
boot_time = boot.get_boot_time()
disk_io = sdisk.get_disk_io()
# create json object
json_data = {
'ram': {
'percent_used': ram_percent,
'used': ram_used,
'total': ram_total
},
'cpu': {
'percent_used': cpu_percent
},
'boot': {
'start_timestamp': boot_time
},
'disk_io': disk_io
}
logging.info('Retrieved now status for IP: {}'.format(request.remote_addr), extra={'topic': 'AGENT'})
# print json data
return jsonify(json_data)
# display full system specs
@app.route('/')
def web_all():
# retrieve current system specs
ram_percent, ram_used, ram_total = sram.get_memory_usage()
swap_percent, swap_used, swap_total = sram.get_swap_usage()
cpu_usage = scpu.get_usage()
nics_bytes = net.get_nic_status()
nic_names, nic_sent, nic_recvs = [], [], []
for nic in nics_bytes:
nic_names.append(nic.get_name())
nic_sent.append(nic.get_sent())
nic_recvs.append(nic.get_recv())
islinux, load_1m, load_5m, load_15m = load.get_load()
if not islinux:
load_1m = 'NULL'
load_5m = 'NULL'
load_15m = 'NULL'
boot_time = boot.get_boot_time()
disks = sdisk.get_disks()
disk_names, disk_percents, disk_uses, disk_totals = [], [], [], []
for disk in disks:
disk_names.append(disk.get_name())
disk_percents.append(disk.get_percent())
disk_uses.append(disk.get_used())
disk_totals.append(disk.get_total())
disk_io = sdisk.get_disk_io()
# create json object
json_data = {
'memory': {
'ram': {
'percent_used': ram_percent,
'used': ram_used,
'total': ram_total
},
'swap': {
'percent_used': swap_percent,
'used': swap_used,
'total': swap_total
}
},
'cpu': {
'percent_used': cpu_usage
},
'network': [
{
'name': name,
'mb_sent': sent,
'mb_recieved': recv
}
for name, sent, recv in zip(nic_names, nic_sent, nic_recvs)
],
'load': {
'1min': load_1m,
'5min': load_5m,
'15min': load_15m
},
'boot': {
'time': {
'timestamp': boot_time
}
},
'disks': {
'io': disk_io,
'list': [
{
'name': name,
'percent_used': percent,
'used': used,
'total': total
}
for name, percent, used, total in zip(disk_names, disk_percents, disk_uses, disk_totals)
]
}
}
logging.info('Retrieved all status for IP: {}'.format(request.remote_addr), extra={'topic': 'AGENT'})
# print json data
return jsonify(json_data)
# start flask process
if __name__ == '__main__':
logging.info('Starting program...', extra={'topic': 'AGENT'})
# start Flask service
app.run(host=flsk_host, port=flsk_port)
|
"""TEST ENVIRONMENT UTILITIES"""
import collections
import json
import tempfile
from pathlib import Path
from mldock.platform_helpers import utils
from mldock.platform_helpers.mldock.configuration.environment.base import (
BaseEnvironment,
)
class TestBaseEnvironment:
"""Collection of tests to test base environment"""
@staticmethod
def test_create_training_directories_success():
"""Test Environment class instantiates directories successfully"""
with tempfile.TemporaryDirectory() as tempdir:
# you can e.g. create a file here:
container_opt = Path(tempdir)
environment = BaseEnvironment(base_dir=container_opt)
root_dir_tree = [
p.relative_to(tempdir).as_posix() for p in container_opt.glob("*")
]
input_dir_tree = [
p.relative_to(tempdir).as_posix()
for p in Path(container_opt, "input").glob("*")
]
assert collections.Counter(root_dir_tree) == collections.Counter(
["input", "output", "model"]
), "Fail. Root directories were not created successfully"
assert collections.Counter(input_dir_tree) == collections.Counter(
["input/data", "input/config"]
), "Fail. Root directories were not created successfully"
@staticmethod
def test_environment_properties_with_expected_paths():
"""Test Environment class provides the correct/expected paths for properties"""
with tempfile.TemporaryDirectory() as tempdir:
# you can e.g. create a file here:
container_opt = Path(tempdir)
environment = BaseEnvironment(base_dir=container_opt)
assert environment.input_dir == Path(
container_opt, "input"
), "Fail. Input directory did not match"
assert environment.input_data_dir == Path(
container_opt, "input/data"
), "Fail. Input Data directory did not match"
assert environment.input_config_dir == Path(
container_opt, "input/config"
), "Fail. Input Config directory did not match"
assert environment.model_dir == Path(
container_opt, "model"
), "Fail. Model directory did not match"
assert environment.output_data_dir == Path(
container_opt, "output"
), "Fail. Output directory did not match"
@staticmethod
def test_setup_hyperparameters_is_correct():
"""Test Environment class provides the correct/expected paths for properties"""
with tempfile.TemporaryDirectory() as tempdir:
# you can e.g. create a file here:
container_opt = Path(tempdir)
hyperparameters = {"key": "value", "factors": 1, "decision": False}
env_vars = {"MLDOCK_HYPERPARAMETERS": json.dumps(hyperparameters)}
valid_vars = [
{"key": key, "value": value} for key, value in env_vars.items()
]
with utils.set_env(**env_vars):
environment = BaseEnvironment(base_dir=container_opt)
assert (
environment.hyperparameters == hyperparameters
), "Fail. Hyperparameters did not match expected"
@staticmethod
def test_get_input_channel_iter():
"""Test Environment class provides the correct/expected input channels"""
with tempfile.TemporaryDirectory() as tempdir:
# you can e.g. create a file here:
container_opt = Path(tempdir)
env_vars = {"MLDOCK_INPUT_CHANNEL_EXAMPLE": "s3://bucket/data/example/"}
valid_vars = [
{"key": key, "value": value} for key, value in env_vars.items()
]
with utils.set_env(**env_vars):
environment = BaseEnvironment(base_dir=container_opt)
assert environment.get_input_channel_iter()[0] == {
"key": "MLDOCK_INPUT_CHANNEL_EXAMPLE",
"value": "s3://bucket/data/example/",
}, "Fail. Input Channel 'example' was not found"
@staticmethod
def test_get_output_channel_iter():
"""Test Environment class provides the correct/expected output channels"""
with tempfile.TemporaryDirectory() as tempdir:
# you can e.g. create a file here:
container_opt = Path(tempdir)
env_vars = {
"MLDOCK_OUTPUT_CHANNEL_EXAMPLE": "s3://bucket/data/output/example"
}
valid_vars = [
{"key": key, "value": value} for key, value in env_vars.items()
]
with utils.set_env(**env_vars):
environment = BaseEnvironment(base_dir=container_opt)
assert environment.get_output_channel_iter()[0] == {
"key": "MLDOCK_OUTPUT_CHANNEL_EXAMPLE",
"value": "s3://bucket/data/output/example",
}, "Fail. Output Channel 'example' was not found"
@staticmethod
def test_get_model_output_channel_iter():
"""Test Environment class provides the correct/expected model input channels"""
with tempfile.TemporaryDirectory() as tempdir:
# you can e.g. create a file here:
container_opt = Path(tempdir)
env_vars = {
"MLDOCK_MODEL_INPUT_CHANNEL_EXAMPLE": "s3://bucket/model/example"
}
valid_vars = [
{"key": key, "value": value} for key, value in env_vars.items()
]
with utils.set_env(**env_vars):
environment = BaseEnvironment(base_dir=container_opt)
assert environment.get_model_input_channel_iter()[0] == {
"key": "MLDOCK_MODEL_INPUT_CHANNEL_EXAMPLE",
"value": "s3://bucket/model/example",
}, "Fail. Output Channel 'example' was not found"
@staticmethod
def test_get_model_output_channel_iter():
"""Test Environment class provides the correct/expected model output channels"""
with tempfile.TemporaryDirectory() as tempdir:
# you can e.g. create a file here:
container_opt = Path(tempdir)
env_vars = {
"MLDOCK_MODEL_OUTPUT_CHANNEL_EXAMPLE": "s3://bucket/model/example"
}
valid_vars = [
{"key": key, "value": value} for key, value in env_vars.items()
]
with utils.set_env(**env_vars):
environment = BaseEnvironment(base_dir=container_opt)
assert environment.get_model_output_channel_iter()[0] == {
"key": "MLDOCK_MODEL_OUTPUT_CHANNEL_EXAMPLE",
"value": "s3://bucket/model/example",
}, "Fail. Output Channel 'example' was not found"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.